10203040506070809010011012013014015016017018019020021022023024025026027028029030031032033034035036037038039040041042043044045046047048049050051052053054055056057058059060061062063064065066067068069070071072073074075076077078079080081082083084085086087088089090091092093094095096097098099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000501050205030504050505060507050805090510051105120513051405150516051705180519052005210522052305240525052605270528052905300531053205330534053505360537053805390540054105420543054405450546054705480549055005510552055305540555055605570558055905600561056205630564056505660567056805690570057105720573057405750576057705780579058005810582058305840585058605870588058905900591059205930594059505960597059805990600060106020603060406050606060706080609061006110612190361306140615061606170618061906200621062206230624145625145626145627145628862986308631063206330634063506360637063806390640864186420643175064417506451750646064706480649065006510652065306540655065655620657065827484659066006610662278106630664066506660667066806690670067106720673947567494756759475676947567794756781196791226801226811226821226831226840685068606870688068906900691069206930694069506960697069818213699182137000701959770218253703182537041825370507060707070807090710822711071241171341171407150716071707180719072007210722072307240725072607274117284117294117300731073207330734073507360737073807390740074107420743074407450746074707480749075007510752075307540755075607570758075907600761076207630764076507660767076807690770077107720773077407750776077707780779078007810782078307840785078607870788078907900791079207930794079507960797079807990800080108020803080408050806080708080809081008110812081308140815081608170818081908200821082208230824082508260827082808290830083108320833083408350836083708380839084008410842084308440845084608470848084908500851085208530854085508560857085808590860086108620863086408650866086708680869087008710872087318438740875087608770878087922548802254881225488208830884088508860887470788808894707890089118281892235489308940895470889608972352898235289923529000901090229030904090509062354907090809090910091109122691326914269150916091709180919092009211492214923149241492519261927192819292109306309313593209330934093509360937175938093917594017594135094215294315294476945152946094799948094909507695109520953095409559995699957999580959096099961210296221029630964096522549660967096809690970097109720973097409750976097709780979098009810982098309840985098609870988098909900991099209930994099509960997099809990100001001010020100301004010050100601007010080100901010010110101201013010140101501016010174010180101901020010210102201023141024141025141026141027141028261029261030010318010320103326103426103526103601037010382610390104001041010420104301044110451104611047410480104941050010511105211053110540105501056010570105801059110601106111062151063210642106521066210670106801069010700107101072010731107411075010761107701078010790108001081010820108301084010850108601087010880108901090010910109201093010940109501096010970109801099011000110101102011030110401105011063411073411088711090111001111011121741113871114011150111687111701118341119681120011213411223411230112401125011265311270112834112934113001131011320113301134011350113601137241113824111396361140011410114201143127211440114563611460114701148011491272115024111513951152011532411154241115501156011570115801159011600116101162151163151164151165151166451167011680116901170301171011721511730117401175011761511770117815117930118001181151182151183011840118501186011870118801189011900119101192011930119401195011960119715119815119901200012010120201203012040120501206213912073995120801209399512100121101212012130121401215012160121779901218012193985122001221012220122339951224012252139122621391227012281856122901230444123101232012330123401235012360123714121238012392139124021391241213912420124301244012450124601247012482761249276125027612510125227612536551254012556551256012570125801259012600126101262012631310126401265651126601267012680126965512700127127612722761273012740127537912760127701278012790128001281012820128301284012850128601287012880128901290012910129201293012940129501296012970129801299013000130101302013030130401305276130627613070130801309013100131101312241513130131401315013160131701318013192415132001321013220132301324724513250132601327013280132901330013310133201333013340133501336013370133801339013400134101342013430134401345013460134701348013490135001351013520135301354013550135601357013580135901360013610136201363013640136501366013670136801369013700137101372013730137401375013760137701378013790138001381013820138301384013850138601387013880138901390013910139201393013940139501396013970139801399014000140101402014030140401405014060140701408014090141001411014120141301414014150141601417014180141901420014210142201423014240142501426014270142801429014300143101432014330143401435014360143701438014390144001441014420144301444014450144601447014480144901450014510145201453014540145501456014570145801459014600146101462014630146401465014660146701468014690147001471014720147301474014750147601477014780147901480014810148201483014840148501486014870148801489014900149101492014930149401495014960149701498014990150001501015020150301504015050150601507015080150901510015110151201513015140151501516015170151801519015200152101522015230152401525015260152701528015290153001531015320153301534015350153601537015380153901540015410154201543015440154501546015470154801549015500155101552015535131554513155501556015570155801559513156001561015622156352156452156553156653156753156853156949515705011571509157250915735091574509157501576015770157801579015800158101582015830158401585015860158701588015890159001591015920159301594015950159601597415984159981600016010160201603016044160528160620160716160841609416100161101612016130161401615016160161701618016190162001621016220162301624016250162601627016280162901630016310163201633016340163501636016370163801639016400164101642016430164401645016460164701648016490165001651016520165301654016550165601657016580165901660016610166201663016640166501666016670166801669016700167101672016730167401675513167601677016780167901680016811281682128168301684128168569168659168701688016890169001691016925916930169401695016960169701698591699017002561701017021281703128170401705017060170701708017090171001711017120171301714017150171601717017181526117190172001721017220172301724017251576217260172701728017293152417300173115706173201733157061734157061735017360173715762173815261173901740501174101742152611743017440174501746017470174801749655817500175101752017530175401755655817560175701758017590176001761017620176301764017650176601767017680176901770017716558177265581773655817746558177501776017770177801779017800178101782017830178401785017860178701788017890179001791017920179301794017950179601797017980179901800018010180201803018040180501806355091807018083550918090181001811018120181301814018150181601817018180181901820018210182201823018240182501826018273550918283550918291670418301880518311880518320183301834018350183601837018380183901840018410184201843018440184501846018470184801849018500185101852018530185401855018560185701858261859261860018610186201863018640186501866018670186801869018700187101872018730187401875018760187701878018790188001881018820188301884018850188601887018880188901890018910189201893018940189501896018970189801899019000190101902019030190401905019060190701908019090191001911019120191301914019150191601917019180191901920019210192201923019240192501926019270192801929019300193101932019330193401935019360193701938019390194001941019420194301944019450194601947431948019490195001951019520195343195443195512919560195701958019590196001961019620196301964019654319660196701968019690197043197186197201973431974431975431976019775285131978019791761281980180431198101982430319830198443031985430319864303198701988019894319904319914319920199301994019950199601997019980199977202200002001386012002020030200402005020060200702008020090201002011020120201302014020150201602017020180201902020020213860120223860120233860120243860120253429820264303202743032028430320290203002031430320324303203343032034430320354303203643032037430320384303203943032040020410204202043020440204538601204638601204738878204802049688732050342982051277205202053430320540205502056020570205802059386012060386012061020620206302064386012065386012066386012067766592068020693805820700207102072020733805820743805820753805820763805820773805820783805820790208038601208102082631820836318208463182085140732086140732087140732088345172089345172090345172091345172092408420934084209402095386012096386012097386012098386012099021000210102102021030210443032105021060210743032108430321090211002111021124303211343032114860621150211642602117426021184260211942602120021214260212285202123021240212502126021274321284321294321304321318621320213302134021350213602137021380213902140021410214202143021440214502146021470214802149021500215102152021530215402155021560215702158021590216002161021620216302164021650216602167021680216902170021710217202173021740217502176021770217802179021800218102182021830218402185021860218702188021890219002191021920219302194021950219602197021980219902200022010220202203022040220502206022070220802209022100221102212022130221402215022160221702218022190222002221022220222302224022250222602227022280222902230022310223202233022340223502236022370223802239022400224102242022430224402245022460224702248022490225002251022520225302254022550225602257022580225902260022610226202263022643285822653285822663285822670226802269022700227102272022730227402275022760227702278022790228002281180582282022830228402285022860228702288022890229002291022920229302294022950229602297022980229902300023010230202303023040230502306023070230802309023100231102312023130231402315023160231702318023190232002321023220232302324023250232602327023280232902330023310233202333023340233502336023370233802339023400234102342023430234402345023460234702348023490235002351023520235302354023550235602357023580235902360023610236202363023640236502366023670236802369023700237102372023730237402375023760237702378023790238002381023820238302384023850238602387023880238902390023910239202393023940239502396023970239802399024000240102402024030240402405024060240702408024090241002411024120241302414024150241602417024180241902420024210242202423024240242502426024270242802429024300243102432024330243402435024360243702438024390244002441024420244302444024450244602447024480244902450024510 module dparse.lexer; import std.typecons; import std.typetuple; import std.array; import std.algorithm; import std.range; import std.experimental.lexer; import std.traits; import core.cpuid : sse42; public import dparse.trivia; /// Operators private enum operators = [ ",", ".", "..", "...", "/", "/=", "!", "!<", "!<=", "!<>", "!<>=", "!=", "!>", "!>=", "$", "%", "%=", "&", "&&", "&=", "(", ")", "*", "*=", "+", "++", "+=", "-", "--", "-=", ":", ";", "<", "<<", "<<=", "<=", "<>", "<>=", "=", "==", "=>", ">", ">=", ">>", ">>=", ">>>", ">>>=", "?", "@", "[", "]", "^", "^=", "^^", "^^=", "{", "|", "|=", "||", "}", "~", "~=" ]; /// Kewords private enum keywords = [ "abstract", "alias", "align", "asm", "assert", "auto", "bool", "break", "byte", "case", "cast", "catch", "cdouble", "cent", "cfloat", "char", "class", "const", "continue", "creal", "dchar", "debug", "default", "delegate", "delete", "deprecated", "do", "double", "else", "enum", "export", "extern", "false", "final", "finally", "float", "for", "foreach", "foreach_reverse", "function", "goto", "idouble", "if", "ifloat", "immutable", "import", "in", "inout", "int", "interface", "invariant", "ireal", "is", "lazy", "long", "macro", "mixin", "module", "new", "nothrow", "null", "out", "override", "package", "pragma", "private", "protected", "public", "pure", "real", "ref", "return", "scope", "shared", "short", "static", "struct", "super", "switch", "synchronized", "template", "this", "throw", "true", "try", "typedef", "typeid", "typeof", "ubyte", "ucent", "uint", "ulong", "union", "unittest", "ushort", "version", "void", "wchar", "while", "with", "__DATE__", "__EOF__", "__FILE__", "__FILE_FULL_PATH__", "__FUNCTION__", "__gshared", "__LINE__", "__MODULE__", "__parameters", "__PRETTY_FUNCTION__", "__TIME__", "__TIMESTAMP__", "__traits", "__vector", "__VENDOR__", "__VERSION__" ]; /// Other tokens private enum dynamicTokens = [ "specialTokenSequence", "comment", "identifier", "scriptLine", "whitespace", "doubleLiteral", "floatLiteral", "idoubleLiteral", "ifloatLiteral", "intLiteral", "longLiteral", "realLiteral", "irealLiteral", "uintLiteral", "ulongLiteral", "characterLiteral", "dstringLiteral", "stringLiteral", "wstringLiteral" ]; private enum pseudoTokenHandlers = [ "\"", "lexStringLiteral", "`", "lexWysiwygString", "//", "lexSlashSlashComment", "/*", "lexSlashStarComment", "/+", "lexSlashPlusComment", ".", "lexDot", "'", "lexCharacterLiteral", "0", "lexNumber", "1", "lexDecimal", "2", "lexDecimal", "3", "lexDecimal", "4", "lexDecimal", "5", "lexDecimal", "6", "lexDecimal", "7", "lexDecimal", "8", "lexDecimal", "9", "lexDecimal", "q\"", "lexDelimitedString", "q{", "lexTokenString", "r\"", "lexWysiwygString", "x\"", "lexHexString", " ", "lexWhitespace", "\t", "lexWhitespace", "\r", "lexWhitespace", "\n", "lexWhitespace", "\v", "lexWhitespace", "\f", "lexWhitespace", "\u2028", "lexLongNewline", "\u2029", "lexLongNewline", "#!", "lexScriptLine", "#line", "lexSpecialTokenSequence" ]; /// Token ID type for the D lexer. public alias IdType = TokenIdType!(operators, dynamicTokens, keywords); /** * Function used for converting an IdType to a string. * * Examples: * --- * IdType c = tok!"case"; * assert (str(c) == "case"); * --- */ public alias str = tokenStringRepresentation!(IdType, operators, dynamicTokens, keywords); /** * Template used to refer to D token types. * * See the $(B operators), $(B keywords), and $(B dynamicTokens) enums for * values that can be passed to this template. * Example: * --- * import dparse.lexer; * IdType t = tok!"floatLiteral"; * --- */ public template tok(string token) { alias tok = TokenId!(IdType, operators, dynamicTokens, keywords, token); } mixin template TokenTriviaFields() { /** * Whitespace and comment tokens attached to this token. * * All trivia tokens must have the text property set to the text with * which they identify with. This means you can map all trivia tokens to * their .text property and join them together to get the source code back * without any loss of information. * * Trivia is only included when calling getTokensForParser. When iterating * over DLexer all tokens will be in their raw form and none will be * converted to trivia. * * Note: in the future you might need to explicitly pass * WhitespaceBehavior.include (or keep the default) as getTokensForParser * currently overrides it to include. * * Contains: `comment`, `whitespace`, `specialTokenSequence` */ immutable(typeof(this))[] leadingTrivia; /// ditto immutable(typeof(this))[] trailingTrivia; string memoizedLeadingComment = null; string memoizedTrailingComment = null; /// Legacy property to get documentation comments, with comment border /// stripped off, which is attached to this token. string comment() const pure nothrow @safe @property { import dparse.trivia : extractLeadingDdoc; if (memoizedLeadingComment !is null) return memoizedLeadingComment; return (cast()memoizedLeadingComment) = this.extractLeadingDdoc; } /// ditto string trailingComment() const pure nothrow @safe @property { import dparse.trivia : extractTrailingDdoc; if (memoizedTrailingComment !is null) return memoizedTrailingComment; return (cast()memoizedTrailingComment) = this.extractTrailingDdoc; } int opCmp(size_t i) const pure nothrow @safe @nogc { if (index < i) return -1; if (index > i) return 1; return 0; } int opCmp(ref const typeof(this) other) const pure nothrow @safe @nogc { return opCmp(other.index); } } // mixin in from dparse.lexer to make error messages more managable size as the // entire string is dumped when there is a type mismatch. private enum extraFields = "import dparse.lexer:TokenTriviaFields; mixin TokenTriviaFields;"; /// The token type in the D lexer public alias Token = std.experimental.lexer.TokenStructure!(IdType, extraFields); /** * Configure whitespace handling */ public enum WhitespaceBehavior : ubyte { include = 0b0000_0000, skip = 0b0000_0001, } private enum stringBehaviorNotWorking = "Automatic string parsing is not " ~ "supported and was previously not working. To unescape strings use the " ~ "`dparse.strings : unescapeString` function on the token texts instead."; /** * Configure string lexing behavior */ // was enum, but struct now for deprecations and support with old compilers public struct StringBehavior { /// Do not include quote characters, process escape sequences deprecated(stringBehaviorNotWorking) static immutable StringBehavior compiler = StringBehavior(0b0000_0000); /// Opening quotes, closing quotes, and string suffixes are included in /// the string token deprecated(stringBehaviorNotWorking) static immutable StringBehavior includeQuoteChars = StringBehavior(0b0000_0001); /// String escape sequences are not replaced deprecated(stringBehaviorNotWorking) static immutable StringBehavior notEscaped = StringBehavior(0b0000_0010); /// Not modified at all. Useful for formatters or highlighters static immutable StringBehavior source = StringBehavior(0b0000_0011); ubyte behavior; alias behavior this; } public enum CommentBehavior : bool { intern = true, noIntern = false } /** * Lexer configuration struct */ public struct LexerConfig { string fileName; StringBehavior stringBehavior; WhitespaceBehavior whitespaceBehavior; CommentBehavior commentBehavior = CommentBehavior.intern; } /** * Basic type token types. */ public alias BasicTypes = AliasSeq!(tok!"int", tok!"bool", tok!"byte", tok!"cdouble", tok!"cent", tok!"cfloat", tok!"char", tok!"creal", tok!"dchar", tok!"double", tok!"float", tok!"idouble", tok!"ifloat", tok!"ireal", tok!"long", tok!"real", tok!"short", tok!"ubyte", tok!"ucent", tok!"uint", tok!"ulong", tok!"ushort", tok!"void", tok!"wchar"); /** * Returns: true if the given ID is for a basic type. */ public bool isBasicType(IdType type) nothrow pure @safe @nogc { switch (type) { foreach (T; BasicTypes) { case T: return true; } default: return false; } } /** * Number literal token types. */ public alias NumberLiterals = AliasSeq!(tok!"doubleLiteral", tok!"floatLiteral", tok!"idoubleLiteral", tok!"ifloatLiteral", tok!"intLiteral", tok!"longLiteral", tok!"realLiteral", tok!"irealLiteral", tok!"uintLiteral", tok!"ulongLiteral"); /** * Returns: true if the given ID type is for a number literal. */ public bool isNumberLiteral(IdType type) nothrow pure @safe @nogc { switch (type) { foreach (T; NumberLiterals) { case T: return true; } default: return false; } } /** * Number literal token types. */ public alias IntegerLiterals = AliasSeq!(tok!"intLiteral", tok!"longLiteral", tok!"uintLiteral", tok!"ulongLiteral"); /** * Returns: true if the given ID type is for a integer literal. */ public bool isIntegerLiteral(IdType type) nothrow pure @safe @nogc { switch (type) { foreach (T; IntegerLiterals) { case T: return true; } default: return false; } } /** * Operator token types. */ public alias Operators = AliasSeq!(tok!",", tok!".", tok!"..", tok!"...", tok!"/", tok!"/=", tok!"!", tok!"!<", tok!"!<=", tok!"!<>", tok!"!<>=", tok!"!=", tok!"!>", tok!"!>=", tok!"$", tok!"%", tok!"%=", tok!"&", tok!"&&", tok!"&=", tok!"(", tok!")", tok!"*", tok!"*=", tok!"+", tok!"++", tok!"+=", tok!"-", tok!"--", tok!"-=", tok!":", tok!";", tok!"<", tok!"<<", tok!"<<=", tok!"<=", tok!"<>", tok!"<>=", tok!"=", tok!"==", tok!"=>", tok!">", tok!">=", tok!">>", tok!">>=", tok!">>>", tok!">>>=", tok!"?", tok!"@", tok!"[", tok!"]", tok!"^", tok!"^=", tok!"^^", tok!"^^=", tok!"{", tok!"|", tok!"|=", tok!"||", tok!"}", tok!"~", tok!"~="); /** * Returns: true if the given ID type is for an operator. */ public bool isOperator(IdType type) nothrow pure @safe @nogc { switch (type) { foreach (T; Operators) { case T: return true; } default: return false; } } /** * Keyword token types. */ public alias Keywords = AliasSeq!(tok!"abstract", tok!"alias", tok!"align", tok!"asm", tok!"assert", tok!"auto", tok!"break", tok!"case", tok!"cast", tok!"catch", tok!"class", tok!"const", tok!"continue", tok!"debug", tok!"default", tok!"delegate", tok!"delete", tok!"deprecated", tok!"do", tok!"else", tok!"enum", tok!"export", tok!"extern", tok!"false", tok!"final", tok!"finally", tok!"for", tok!"foreach", tok!"foreach_reverse", tok!"function", tok!"goto", tok!"if", tok!"immutable", tok!"import", tok!"in", tok!"inout", tok!"interface", tok!"invariant", tok!"is", tok!"lazy", tok!"macro", tok!"mixin", tok!"module", tok!"new", tok!"nothrow", tok!"null", tok!"out", tok!"override", tok!"package", tok!"pragma", tok!"private", tok!"protected", tok!"public", tok!"pure", tok!"ref", tok!"return", tok!"scope", tok!"shared", tok!"static", tok!"struct", tok!"super", tok!"switch", tok!"synchronized", tok!"template", tok!"this", tok!"throw", tok!"true", tok!"try", tok!"typedef", tok!"typeid", tok!"typeof", tok!"union", tok!"unittest", tok!"version", tok!"while", tok!"with", tok!"__DATE__", tok!"__EOF__", tok!"__FILE__", tok!"__FILE_FULL_PATH__", tok!"__FUNCTION__", tok!"__gshared", tok!"__LINE__", tok!"__MODULE__", tok!"__parameters", tok!"__PRETTY_FUNCTION__", tok!"__TIME__", tok!"__TIMESTAMP__", tok!"__traits", tok!"__vector", tok!"__VENDOR__", tok!"__VERSION__"); /** * Returns: true if the given ID type is for a keyword. */ public bool isKeyword(IdType type) pure nothrow @safe @nogc { switch (type) { foreach (T; Keywords) { case T: return true; } default: return false; } } /** * String literal token types */ public alias StringLiterals = AliasSeq!(tok!"dstringLiteral", tok!"stringLiteral", tok!"wstringLiteral"); /** * Returns: true if the given ID type is for a string literal. */ public bool isStringLiteral(IdType type) pure nothrow @safe @nogc { switch (type) { foreach (T; StringLiterals) { case T: return true; } default: return false; } } /** * Protection token types. */ public alias Protections = AliasSeq!(tok!"export", tok!"package", tok!"private", tok!"public", tok!"protected"); /** * Returns: true if the given ID type is for a protection attribute. */ public bool isProtection(IdType type) pure nothrow @safe @nogc { switch (type) { foreach (T; Protections) { case T: return true; } default: return false; } } public alias SpecialTokens = AliasSeq!(tok!"__DATE__", tok!"__TIME__", tok!"__TIMESTAMP__", tok!"__VENDOR__", tok!"__VERSION__", tok!"__FILE__", tok!"__FILE_FULL_PATH__", tok!"__LINE__", tok!"__MODULE__", tok!"__FUNCTION__", tok!"__PRETTY_FUNCTION__"); public bool isSpecialToken(IdType type) pure nothrow @safe @nogc { switch (type) { foreach (T; SpecialTokens) { case T: return true; } default: return false; } } public alias Literals = AliasSeq!(StringLiterals, NumberLiterals, tok!"characterLiteral", SpecialTokens, tok!"true", tok!"false", tok!"null", tok!"$"); public bool isLiteral(IdType type) pure nothrow @safe @nogc { switch (type) { foreach (T; Literals) { case T: return true; } default: return false; } } /** * Returns: an array of tokens lexed from the given source code to the output * range. All whitespace, comment and specialTokenSequence tokens (trivia) are * attached to the token nearest to them. * * Trivia is put on the last token as `trailingTrivia` if it is on the same * line as the trivia, otherwise it will be attached to the next token in the * `leadingTrivia` until there is the EOF, where it will be attached as * `trailingTrivia` again. */ const(Token)[] getTokensForParser(R)(R sourceCode, LexerConfig config, StringCache* cache) if (is(Unqual!(ElementEncodingType!R) : ubyte) && isDynamicArray!R) { config.whitespaceBehavior = WhitespaceBehavior.include; config.commentBehavior = CommentBehavior.noIntern; auto leadingTriviaAppender = appender!(Token[])(); leadingTriviaAppender.reserve(128); auto trailingTriviaAppender = appender!(Token[])(); trailingTriviaAppender.reserve(128); auto output = appender!(typeof(return))(); auto lexer = DLexer(sourceCode, config, cache); loop: while (!lexer.empty) switch (lexer.front.type) { case tok!"specialTokenSequence": case tok!"whitespace": case tok!"comment": if (!output.data.empty && lexer.front.line == output.data[$ - 1].line) trailingTriviaAppender.put(lexer.front); else leadingTriviaAppender.put(lexer.front); lexer.popFront(); break; case tok!"__EOF__": break loop; default: Token t = lexer.front; lexer.popFront(); if (!output.data.empty && !trailingTriviaAppender.data.empty) (cast() output.data[$ - 1].trailingTrivia) = trailingTriviaAppender.data.idup; t.leadingTrivia = leadingTriviaAppender.data.idup; leadingTriviaAppender.clear(); trailingTriviaAppender.clear(); output.put(t); break; } if (!output.data.empty) { trailingTriviaAppender.put(leadingTriviaAppender.data); (cast() output.data[$ - 1].trailingTrivia) = trailingTriviaAppender.data.idup; } return output.data; } /** * The D lexer struct. */ public struct DLexer { mixin Lexer!(Token, lexIdentifier, isSeparating, operators, dynamicTokens, keywords, pseudoTokenHandlers); /// @disable this(); /** * Params: * range = the bytes that compose the source code that will be lexed. * config = the lexer configuration to use. * cache = the string interning cache for de-duplicating identifiers and * other token text. * haveSSE42 = Parse streaming SIMD Extensions 4.2 in inline assembly */ this(R)(R range, const LexerConfig config, StringCache* cache, bool haveSSE42 = sse42()) pure nothrow @safe if (is(Unqual!(ElementEncodingType!R) : ubyte) && isDynamicArray!R) { this.haveSSE42 = haveSSE42; auto r = (range.length >= 3 && range[0] == 0xef && range[1] == 0xbb && range[2] == 0xbf) ? range[3 .. $] : range; static if (is(ElementEncodingType!R == immutable)) this.range = LexerRange(cast(const(ubyte)[]) r); else this.range = LexerRange(cast(const(ubyte)[]) r.idup); this.config = config; this.cache = cache; popFront(); } /// public void popFront()() pure nothrow @safe { do _popFront(); while (config.whitespaceBehavior == WhitespaceBehavior.skip && _front.type == tok!"whitespace"); } /** * Lexer error/warning message. */ static struct Message { /// 1-based line number size_t line; /// 1-based byte offset size_t column; /// Text of the message string message; /// `true` for an error, `false` for a warning bool isError; } /** * Returns: An array of all of the warnings and errors generated so far * during lexing. It may make sense to only check this when `empty` * returns `true`. */ const(Message[]) messages() const @property { return _messages; } private pure nothrow @safe: bool isWhitespace() { switch (range.bytes[range.index]) { case ' ': case '\r': case '\n': case '\t': case '\v': case '\f': return true; case 0xe2: auto peek = range.peek(2); return peek.length == 2 && peek[0] == 0x80 && (peek[1] == 0xa8 || peek[1] == 0xa9); default: return false; } } void popFrontWhitespaceAware() { switch (range.bytes[range.index]) { case '\r': range.popFront(); if (!(range.index >= range.bytes.length) && range.bytes[range.index] == '\n') { range.popFront(); range.incrementLine(); } else range.incrementLine(); return; case '\n': range.popFront(); range.incrementLine(); return; case 0xe2: auto lookahead = range.peek(3); if (lookahead.length == 3 && lookahead[1] == 0x80 && (lookahead[2] == 0xa8 || lookahead[2] == 0xa9)) { range.index+=3; range.column+=3; range.incrementLine(); return; } else { range.popFront(); return; } default: range.popFront(); return; } } void lexWhitespace(ref Token token) @trusted { mixin (tokenStart); loop: do { version (X86_64) { if (haveSSE42 && range.index + 16 < range.bytes.length) { skip!(true, '\t', ' ', '\v', '\f')(range.bytes.ptr + range.index, &range.index, &range.column); } } switch (range.bytes[range.index]) { case '\r': range.popFront(); if (!(range.index >= range.bytes.length) && range.bytes[range.index] == '\n') { range.popFront(); } range.column = 1; range.line += 1; break; case '\n': range.popFront(); range.column = 1; range.line += 1; break; case ' ': case '\t': case '\v': case '\f': range.popFront(); break; case 0xe2: if (range.index + 2 >= range.bytes.length) break loop; if (range.bytes[range.index + 1] != 0x80) break loop; if (range.bytes[range.index + 2] == 0xa8 || range.bytes[range.index + 2] == 0xa9) { range.index += 3; range.column += 3; range.column = 1; range.line += 1; break; } break loop; default: break loop; } } while (!(range.index >= range.bytes.length)); string text = config.whitespaceBehavior == WhitespaceBehavior.include ? cache.intern(range.slice(mark)) : ""; token = Token(tok!"whitespace", text, line, column, index); } void lexNumber(ref Token token) { mixin (tokenStart); if (range.bytes[range.index] == '0' && range.index + 1 < range.bytes.length) { immutable ahead = range.bytes[range.index + 1]; switch (ahead) { case 'x': case 'X': range.index += 2; range.column += 2; lexHex(token, mark, line, column, index); return; case 'b': case 'B': range.index += 2; range.column += 2; lexBinary(token, mark, line, column, index); return; default: lexDecimal(token, mark, line, column, index); return; } } else lexDecimal(token, mark, line, column, index); } void lexHex(ref Token token) { mixin (tokenStart); lexHex(token, mark, line, column, index); } void lexHex(ref Token token, size_t mark, size_t line, size_t column, size_t index) @trusted { IdType type = tok!"intLiteral"; bool foundDot; hexLoop: while (!(range.index >= range.bytes.length)) { switch (range.bytes[range.index]) { case 'a': .. case 'f': case 'A': .. case 'F': case '0': .. case '9': case '_': version (X86_64) { if (haveSSE42 && range.index + 16 < range.bytes.length) { immutable ulong i = rangeMatch!(false, '0', '9', 'a', 'f', 'A', 'F', '_', '_') (range.bytes.ptr + range.index); range.column += i; range.index += i; } else range.popFront(); } else range.popFront(); break; case 'u': case 'U': lexIntSuffix(type); break hexLoop; case 'i': if (foundDot) lexFloatSuffix(type); break hexLoop; case 'L': if (foundDot) lexFloatSuffix(type); else lexIntSuffix(type); break hexLoop; case 'p': case 'P': lexExponent(type); break hexLoop; case '.': if (foundDot || !(range.index + 1 < range.bytes.length) || range.peekAt(1) == '.') break hexLoop; else { // The following bit of silliness tries to tell the // difference between "int dot identifier" and // "double identifier". if (range.index + 1 < range.bytes.length) { switch (range.peekAt(1)) { case '0': .. case '9': case 'A': .. case 'F': case 'a': .. case 'f': goto doubleLiteral; default: break hexLoop; } } else { doubleLiteral: range.popFront(); foundDot = true; type = tok!"doubleLiteral"; } } break; default: break hexLoop; } } token = Token(type, cache.intern(range.slice(mark)), line, column, index); } void lexBinary(ref Token token) { mixin (tokenStart); return lexBinary(token, mark, line, column, index); } void lexBinary(ref Token token, size_t mark, size_t line, size_t column, size_t index) @trusted { IdType type = tok!"intLiteral"; binaryLoop: while (!(range.index >= range.bytes.length)) { switch (range.bytes[range.index]) { case '0': case '1': case '_': version (X86_64) { if (haveSSE42 && range.index + 16 < range.bytes.length) { immutable ulong i = rangeMatch!(false, '0', '1', '_', '_')( range.bytes.ptr + range.index); range.column += i; range.index += i; } else range.popFront(); } else range.popFront(); break; case 'u': case 'U': case 'L': lexIntSuffix(type); break binaryLoop; default: break binaryLoop; } } token = Token(type, cache.intern(range.slice(mark)), line, column, index); } void lexDecimal(ref Token token) { mixin (tokenStart); lexDecimal(token, mark, line, column, index); } void lexDecimal(ref Token token, size_t mark, size_t line, size_t column, size_t index) @trusted { bool foundDot = range.bytes[range.index] == '.'; IdType type = tok!"intLiteral"; if (foundDot) { range.popFront(); type = tok!"doubleLiteral"; } decimalLoop: while (!(range.index >= range.bytes.length)) { switch (range.bytes[range.index]) { case '0': .. case '9': case '_': version (X86_64) { if (haveSSE42 && range.index + 16 < range.bytes.length) { immutable ulong i = rangeMatch!(false, '0', '9', '_', '_')(range.bytes.ptr + range.index); range.column += i; range.index += i; } else range.popFront(); } else range.popFront(); break; case 'u': case 'U': if (!foundDot) lexIntSuffix(type); break decimalLoop; case 'i': lexFloatSuffix(type); break decimalLoop; case 'L': if (foundDot) lexFloatSuffix(type); else lexIntSuffix(type); break decimalLoop; case 'f': case 'F': lexFloatSuffix(type); break decimalLoop; case 'e': case 'E': lexExponent(type); break decimalLoop; case '.': if (foundDot || !(range.index + 1 < range.bytes.length) || range.peekAt(1) == '.') break decimalLoop; else { // The following bit of silliness tries to tell the // difference between "int dot identifier" and // "double identifier". if (range.index + 1 < range.bytes.length) { immutable ch = range.peekAt(1); if (ch <= 0x2f || (ch >= '0' && ch <= '9') || (ch >= ':' && ch <= '@') || (ch >= '[' && ch <= '^') || (ch >= '{' && ch <= '~') || ch == '`' || ch == '_') { goto doubleLiteral; } else break decimalLoop; } else { doubleLiteral: range.popFront(); foundDot = true; type = tok!"doubleLiteral"; } } break; default: break decimalLoop; } } token = Token(type, cache.intern(range.slice(mark)), line, column, index); } void lexIntSuffix(ref IdType type) pure nothrow @safe { bool secondPass; if (range.bytes[range.index] == 'u' || range.bytes[range.index] == 'U') { U: if (type == tok!"intLiteral") type = tok!"uintLiteral"; else type = tok!"ulongLiteral"; range.popFront(); if (secondPass) return; if (range.index < range.bytes.length && (range.bytes[range.index] == 'L' || range.bytes[range.index] == 'l')) goto L; goto I; } if (range.bytes[range.index] == 'L' || range.bytes[range.index] == 'l') { L: if (type == tok!"uintLiteral") type = tok!"ulongLiteral"; else type = tok!"longLiteral"; range.popFront(); if (range.index < range.bytes.length && (range.bytes[range.index] == 'U' || range.bytes[range.index] == 'u')) { secondPass = true; goto U; } goto I; } I: if (range.index < range.bytes.length && range.bytes[range.index] == 'i') { warning("Complex number literals are deprecated"); range.popFront(); if (type == tok!"longLiteral" || type == tok!"ulongLiteral") type = tok!"idoubleLiteral"; else type = tok!"ifloatLiteral"; } } void lexFloatSuffix(ref IdType type) pure nothrow @safe { switch (range.bytes[range.index]) { case 'L': range.popFront(); type = tok!"doubleLiteral"; break; case 'f': case 'F': range.popFront(); type = tok!"floatLiteral"; break; default: break; } if (range.index < range.bytes.length && range.bytes[range.index] == 'i') { warning("Complex number literals are deprecated"); range.popFront(); if (type == tok!"floatLiteral") type = tok!"ifloatLiteral"; else type = tok!"idoubleLiteral"; } } void lexExponent(ref IdType type) pure nothrow @safe { range.popFront(); bool foundSign = false; bool foundDigit = false; while (range.index < range.bytes.length) { switch (range.bytes[range.index]) { case '-': case '+': if (foundSign) { if (!foundDigit) error("Expected an exponent"); return; } foundSign = true; range.popFront(); break; case '0': .. case '9': case '_': foundDigit = true; range.popFront(); break; case 'L': case 'f': case 'F': case 'i': lexFloatSuffix(type); return; default: if (!foundDigit) error("Expected an exponent"); return; } } } void lexScriptLine(ref Token token) { mixin (tokenStart); while (!(range.index >= range.bytes.length) && !isNewline) { range.popFront(); } token = Token(tok!"scriptLine", cache.intern(range.slice(mark)), line, column, index); } void lexSpecialTokenSequence(ref Token token) { mixin (tokenStart); while (!(range.index >= range.bytes.length) && !isNewline) { range.popFront(); } token = Token(tok!"specialTokenSequence", cache.intern(range.slice(mark)), line, column, index); } void lexSlashStarComment(ref Token token) @trusted { mixin (tokenStart); IdType type = tok!"comment"; range.popFrontN(2); while (range.index < range.bytes.length) { version (X86_64) { if (haveSSE42 && range.index + 16 < range.bytes.length) skip!(false, '\r', '\n', '/', '*', 0xe2)(range.bytes.ptr + range.index, &range.index, &range.column); } if (range.bytes[range.index] == '*') { range.popFront(); if (!(range.index >= range.bytes.length) && range.bytes[range.index] == '/') { range.popFront(); break; } } else popFrontWhitespaceAware(); } if (config.commentBehavior == CommentBehavior.intern) token = Token(type, cache.intern(range.slice(mark)), line, column, index); else token = Token(type, cast(string) range.slice(mark), line, column, index); } void lexSlashSlashComment(ref Token token) @trusted { mixin (tokenStart); IdType type = tok!"comment"; range.popFrontN(2); while (range.index < range.bytes.length) { version (X86_64) { if (haveSSE42 && range.index + 16 < range.bytes.length) { skip!(false, '\r', '\n', 0xe2)(range.bytes.ptr + range.index, &range.index, &range.column); } } if (range.bytes[range.index] == '\r' || range.bytes[range.index] == '\n') break; range.popFront(); } if (config.commentBehavior == CommentBehavior.intern) token = Token(type, cache.intern(range.slice(mark)), line, column, index); else token = Token(type, cast(string) range.slice(mark), line, column, index); } void lexSlashPlusComment(ref Token token) @trusted { mixin (tokenStart); IdType type = tok!"comment"; range.index += 2; range.column += 2; int depth = 1; while (depth > 0 && !(range.index >= range.bytes.length)) { version (X86_64) { if (haveSSE42 && range.index + 16 < range.bytes.length) { skip!(false, '+', '/', '\\', '\r', '\n', 0xe2)(range.bytes.ptr + range.index, &range.index, &range.column); } } if (range.bytes[range.index] == '+') { range.popFront(); if (!(range.index >= range.bytes.length) && range.bytes[range.index] == '/') { range.popFront(); depth--; } } else if (range.bytes[range.index] == '/') { range.popFront(); if (!(range.index >= range.bytes.length) && range.bytes[range.index] == '+') { range.popFront(); depth++; } } else popFrontWhitespaceAware(); } if (config.commentBehavior == CommentBehavior.intern) token = Token(type, cache.intern(range.slice(mark)), line, column, index); else token = Token(type, cast(string) range.slice(mark), line, column, index); } void lexStringLiteral(ref Token token) @trusted { mixin (tokenStart); range.popFront(); while (true) { if (range.index >= range.bytes.length) { error("Error: unterminated string literal"); token = Token(tok!""); return; } version (X86_64) { if (haveSSE42 && range.index + 16 < range.bytes.length) { skip!(false, '"', '\\', '\r', '\n', 0xe2)(range.bytes.ptr + range.index, &range.index, &range.column); } } if (range.bytes[range.index] == '"') { range.popFront(); break; } else if (range.bytes[range.index] == '\\') { if (!lexEscapeSequence()) { token = Token.init; return; } } else popFrontWhitespaceAware(); } IdType type = tok!"stringLiteral"; lexStringSuffix(type); token = Token(type, cache.intern(range.slice(mark)), line, column, index); } void lexWysiwygString(ref Token token) @trusted { mixin (tokenStart); IdType type = tok!"stringLiteral"; immutable bool backtick = range.bytes[range.index] == '`'; if (backtick) { range.popFront(); while (true) { if (range.index >= range.bytes.length) { error("Error: unterminated string literal"); token = Token(tok!""); return; } version (X86_64) { if (haveSSE42 && range.index + 16 < range.bytes.length) { skip!(false, '\r', '\n', 0xe2, '`')(range.bytes.ptr + range.index, &range.index, &range.column); } } if (range.bytes[range.index] == '`') { range.popFront(); break; } else popFrontWhitespaceAware(); } } else { range.popFront(); if (range.index >= range.bytes.length) { error("Error: unterminated string literal"); token = Token(tok!""); return; } range.popFront(); while (true) { if (range.index >= range.bytes.length) { error("Error: unterminated string literal"); token = Token(tok!""); return; } else if (range.bytes[range.index] == '"') { range.popFront(); break; } else popFrontWhitespaceAware(); } } lexStringSuffix(type); token = Token(type, cache.intern(range.slice(mark)), line, column, index); } private ubyte lexStringSuffix(ref IdType type) pure nothrow @safe { if (range.index >= range.bytes.length) { type = tok!"stringLiteral"; return 0; } else { switch (range.bytes[range.index]) { case 'w': range.popFront(); type = tok!"wstringLiteral"; return 'w'; case 'd': range.popFront(); type = tok!"dstringLiteral"; return 'd'; case 'c': range.popFront(); type = tok!"stringLiteral"; return 'c'; default: type = tok!"stringLiteral"; return 0; } } } void lexDelimitedString(ref Token token) { mixin (tokenStart); range.index += 2; range.column += 2; ubyte open; ubyte close; switch (range.bytes[range.index]) { case '<': open = '<'; close = '>'; range.popFront(); lexNormalDelimitedString(token, mark, line, column, index, open, close); break; case '{': open = '{'; close = '}'; range.popFront(); lexNormalDelimitedString(token, mark, line, column, index, open, close); break; case '[': open = '['; close = ']'; range.popFront(); lexNormalDelimitedString(token, mark, line, column, index, open, close); break; case '(': open = '('; close = ')'; range.popFront(); lexNormalDelimitedString(token, mark, line, column, index, open, close); break; default: lexHeredocString(token, mark, line, column, index); break; } } void lexNormalDelimitedString(ref Token token, size_t mark, size_t line, size_t column, size_t index, ubyte open, ubyte close) { int depth = 1; while (!(range.index >= range.bytes.length) && depth > 0) { if (range.bytes[range.index] == open) { depth++; range.popFront(); } else if (range.bytes[range.index] == close) { depth--; range.popFront(); if (depth <= 0) { if (range.bytes[range.index] == '"') { range.popFront(); } else { error("Error: `\"` expected to end delimited string literal"); token = Token(tok!""); return; } } } else popFrontWhitespaceAware(); } IdType type = tok!"stringLiteral"; lexStringSuffix(type); token = Token(type, cache.intern(range.slice(mark)), line, column, index); } void lexHeredocString(ref Token token, size_t mark, size_t line, size_t column, size_t index) { Token ident; lexIdentifier(ident); if (isNewline()) popFrontWhitespaceAware(); else error("Newline expected"); while (!(range.index >= range.bytes.length)) { if (isNewline()) { popFrontWhitespaceAware(); if (!range.canPeek(ident.text.length)) { error(ident.text ~ " expected"); break; } if (range.peek(ident.text.length - 1) == ident.text) { range.popFrontN(ident.text.length); break; } } else { range.popFront(); } } if (!(range.index >= range.bytes.length) && range.bytes[range.index] == '"') { range.popFront(); } else error("`\"` expected"); IdType type = tok!"stringLiteral"; lexStringSuffix(type); token = Token(type, cache.intern(range.slice(mark)), line, column, index); } void lexTokenString(ref Token token) { mixin (tokenStart); assert (range.bytes[range.index] == 'q'); range.popFront(); assert (range.bytes[range.index] == '{'); range.popFront(); auto app = appender!string(); app.put("q{"); int depth = 1; immutable WhitespaceBehavior oldWhitespace = config.whitespaceBehavior; immutable StringBehavior oldString = config.stringBehavior; config.whitespaceBehavior = WhitespaceBehavior.include; config.stringBehavior = StringBehavior.source; scope (exit) { config.whitespaceBehavior = oldWhitespace; config.stringBehavior = oldString; } advance(_front); while (depth > 0 && !empty) { auto t = front(); if (t.text is null) app.put(str(t.type)); else app.put(t.text); if (t.type == tok!"}") { depth--; if (depth > 0) popFront(); } else if (t.type == tok!"{") { depth++; popFront(); } else popFront(); } IdType type = tok!"stringLiteral"; auto b = lexStringSuffix(type); if (b != 0) app.put(b); token = Token(type, cache.intern(cast(const(ubyte)[]) app.data), line, column, index); } void lexHexString(ref Token token) { mixin (tokenStart); range.index += 2; range.column += 2; loop: while (true) { if (range.index >= range.bytes.length) { error("Error: unterminated hex string literal"); token = Token(tok!""); return; } else if (isWhitespace()) popFrontWhitespaceAware(); else switch (range.bytes[range.index]) { case '0': .. case '9': case 'A': .. case 'F': case 'a': .. case 'f': range.popFront(); break; case '"': range.popFront(); break loop; default: error("Error: invalid character in hex string"); token = Token(tok!""); return; } } IdType type = tok!"stringLiteral"; lexStringSuffix(type); token = Token(type, cache.intern(range.slice(mark)), line, column, index); } bool lexNamedEntity() in { assert (range.bytes[range.index] == '&'); } do { Token t; range.popFront(); lexIdentifier(t, true); if (t.type != tok!"identifier" || range.empty || range.bytes[range.index] != ';') { error("Error: invalid named character entity"); return false; } range.popFront(); return true; } bool lexEscapeSequence() { range.popFront(); if (range.index >= range.bytes.length) { error("Error: non-terminated character escape sequence."); return false; } switch (range.bytes[range.index]) { case '&': return lexNamedEntity(); case '\'': case '"': case '?': case '\\': case 'a': case 'b': case 'f': case 'n': case 'r': case 't': case 'v': range.popFront(); break; case 'x': range.popFront(); foreach (i; 0 .. 2) { if (range.index >= range.bytes.length) { error("Error: 2 hex digits expected."); return false; } switch (range.bytes[range.index]) { case '0': .. case '9': case 'a': .. case 'f': case 'A': .. case 'F': range.popFront(); break; default: error("Error: 2 hex digits expected."); return false; } } break; case '0': if (!(range.index + 1 < range.bytes.length) || ((range.index + 1 < range.bytes.length) && range.peekAt(1) == '\'')) { range.popFront(); break; } goto case; case '1': .. case '7': for (size_t i = 0; i < 3 && !(range.index >= range.bytes.length) && range.bytes[range.index] >= '0' && range.bytes[range.index] <= '7'; i++) range.popFront(); break; case 'u': range.popFront(); foreach (i; 0 .. 4) { if (range.index >= range.bytes.length) { error("Error: at least 4 hex digits expected."); return false; } switch (range.bytes[range.index]) { case '0': .. case '9': case 'a': .. case 'f': case 'A': .. case 'F': range.popFront(); break; default: error("Error: at least 4 hex digits expected."); return false; } } break; case 'U': range.popFront(); foreach (i; 0 .. 8) { if (range.index >= range.bytes.length) { error("Error: at least 8 hex digits expected."); return false; } switch (range.bytes[range.index]) { case '0': .. case '9': case 'a': .. case 'f': case 'A': .. case 'F': range.popFront(); break; default: error("Error: at least 8 hex digits expected."); return false; } } break; default: error("Invalid escape sequence"); while (true) { if (range.index >= range.bytes.length) { error("Error: non-terminated character escape sequence."); break; } if (range.bytes[range.index] == ';') { range.popFront(); break; } else { range.popFront(); } } return false; } return true; } void lexCharacterLiteral(ref Token token) { mixin (tokenStart); range.popFront(); if (range.empty) goto err; if (range.bytes[range.index] == '\\') lexEscapeSequence(); else if (range.bytes[range.index] == '\'') { range.popFront(); token = Token(tok!"characterLiteral", cache.intern(range.slice(mark)), line, column, index); } else if (range.bytes[range.index] & 0x80) { while (range.bytes[range.index] & 0x80) range.popFront(); } else popFrontWhitespaceAware(); if (range.index < range.bytes.length && range.bytes[range.index] == '\'') { range.popFront(); token = Token(tok!"characterLiteral", cache.intern(range.slice(mark)), line, column, index); } else { err: error("Error: Expected `'` to end character literal"); token = Token(tok!""); } } void lexIdentifier(ref Token token, const bool silent = false) @trusted { mixin (tokenStart); if (isSeparating(0)) { if (silent) return; error("Invalid identifier"); range.popFront(); } while (true) { version (X86_64) { if (haveSSE42 && range.index + 16 < range.bytes.length) { immutable ulong i = rangeMatch!(false, 'a', 'z', 'A', 'Z', '_', '_') (range.bytes.ptr + range.index); range.column += i; range.index += i; } } if (isSeparating(0)) break; else range.popFront(); } token = Token(tok!"identifier", cache.intern(range.slice(mark)), line, column, index); } void lexDot(ref Token token) { mixin (tokenStart); if (!(range.index + 1 < range.bytes.length)) { range.popFront(); token = Token(tok!".", null, line, column, index); return; } switch (range.peekAt(1)) { case '0': .. case '9': lexNumber(token); return; case '.': range.popFront(); range.popFront(); if (!(range.index >= range.bytes.length) && range.bytes[range.index] == '.') { range.popFront(); token = Token(tok!"...", null, line, column, index); } else token = Token(tok!"..", null, line, column, index); return; default: range.popFront(); token = Token(tok!".", null, line, column, index); return; } } void lexLongNewline(ref Token token) @nogc { mixin (tokenStart); range.popFront(); range.popFront(); range.popFront(); range.incrementLine(); string text = config.whitespaceBehavior == WhitespaceBehavior.include ? cache.intern(range.slice(mark)) : ""; token = Token(tok!"whitespace", text, line, column, index); } bool isNewline() @nogc { if (range.bytes[range.index] == '\n') return true; if (range.bytes[range.index] == '\r') return true; return (range.bytes[range.index] & 0x80) && (range.index + 2 < range.bytes.length) && (range.peek(2) == "\u2028" || range.peek(2) == "\u2029"); } bool isSeparating(size_t offset) @nogc { enum : ubyte { n, y, m // no, yes, maybe } if (range.index + offset >= range.bytes.length) return true; auto c = range.bytes[range.index + offset]; static immutable ubyte[256] LOOKUP_TABLE = [ y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, n, n, n, n, n, n, n, n, n, n, y, y, y, y, y, y, y, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, y, y, y, y, n, y, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, y, y, y, y, y, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m ]; immutable ubyte result = LOOKUP_TABLE[c]; if (result == n) return false; if (result == y) return true; if (result == m) { auto r = range; range.popFrontN(offset); return (r.canPeek(2) && (r.peek(2) == "\u2028" || r.peek(2) == "\u2029")); } assert (false); } enum tokenStart = q{ size_t index = range.index; size_t column = range.column; size_t line = range.line; auto mark = range.mark(); }; void error(string message) { _messages ~= Message(range.line, range.column, message, true); } void warning(string message) { _messages ~= Message(range.line, range.column, message, false); assert (_messages.length > 0); } Message[] _messages; StringCache* cache; LexerConfig config; bool haveSSE42; } /** * Creates a token range from the given source code. Creates a default lexer * configuration and a GC-managed string cache. */ public auto byToken(R)(R range) if (is(Unqual!(ElementEncodingType!R) : ubyte) && isDynamicArray!R) { LexerConfig config; StringCache* cache = new StringCache(range.length.optimalBucketCount); return DLexer(range, config, cache); } /** * Creates a token range from the given source code. Uses the given string * cache. */ public auto byToken(R)(R range, StringCache* cache) if (is(Unqual!(ElementEncodingType!R) : ubyte) && isDynamicArray!R) { LexerConfig config; return DLexer(range, config, cache); } /** * Creates a token range from the given source code. Uses the provided lexer * configuration and string cache. */ public auto byToken(R)(R range, const LexerConfig config, StringCache* cache) if (is(Unqual!(ElementEncodingType!R) : ubyte) && isDynamicArray!R) { return DLexer(range, config, cache); } /** * Helper function used to avoid too much allocations while lexing. * * Params: * size = The length in bytes of the source file. * * Returns: * The optimal initial bucket count a `StringCache` should have. */ size_t optimalBucketCount(size_t size) { import std.math : nextPow2; return nextPow2((size + 31U) / 32U).min(1U << 30U); } /// unittest { assert(optimalBucketCount(1) == 2); assert(optimalBucketCount(9000 * 32) == 16384); static if (size_t.sizeof == ulong.sizeof) assert(optimalBucketCount(100_000_000_000UL) == 1 << 30); } /** * The string cache is used for string interning. * * It will only store a single copy of any string that it is asked to hold. * Interned strings can be compared for equality by comparing their $(B .ptr) * field. * * Default and postbilt constructors are disabled. When a StringCache goes out * of scope, the memory held by it is freed. * * See_also: $(LINK http://en.wikipedia.org/wiki/String_interning) */ struct StringCache { public pure nothrow @nogc: @disable this(); @disable this(this); /** * Params: bucketCount = the initial number of buckets. Must be a * power of two */ this(size_t bucketCount) nothrow @trusted @nogc in { import core.bitop : popcnt; static if (size_t.sizeof == 8) { immutable low = popcnt(cast(uint) bucketCount); immutable high = popcnt(cast(uint) (bucketCount >> 32)); assert ((low == 0 && high == 1) || (low == 1 && high == 0)); } else { static assert (size_t.sizeof == 4); assert (popcnt(cast(uint) bucketCount) == 1); } } do { buckets = (cast(Node**) calloc((Node*).sizeof, bucketCount))[0 .. bucketCount]; } ~this() { Block* current = rootBlock; while (current !is null) { Block* prev = current; current = current.next; free(cast(void*) prev); } foreach (nodePointer; buckets) { Node* currentNode = nodePointer; while (currentNode !is null) { if (currentNode.mallocated) free(currentNode.str.ptr); Node* prev = currentNode; currentNode = currentNode.next; free(prev); } } rootBlock = null; free(buckets.ptr); buckets = null; } /** * Caches a string. */ string intern(const(ubyte)[] str) @safe { if (str is null || str.length == 0) return ""; return _intern(str); } /** * ditto */ string intern(string str) @trusted { return intern(cast(ubyte[]) str); } /** * The default bucket count for the string cache. */ static enum defaultBucketCount = 4096; private: string _intern(const(ubyte)[] bytes) @trusted { immutable uint hash = hashBytes(bytes); immutable size_t index = hash & (buckets.length - 1); Node* s = find(bytes, hash); if (s !is null) return cast(string) s.str; ubyte[] mem = void; bool mallocated = bytes.length > BIG_STRING; if (mallocated) mem = (cast(ubyte*) malloc(bytes.length))[0 .. bytes.length]; else mem = allocate(bytes.length); mem[] = bytes[]; Node* node = cast(Node*) malloc(Node.sizeof); node.str = mem; node.hash = hash; node.next = buckets[index]; node.mallocated = mallocated; buckets[index] = node; return cast(string) mem; } Node* find(const(ubyte)[] bytes, uint hash) @trusted { import std.algorithm : equal; immutable size_t index = hash & (buckets.length - 1); Node* node = buckets[index]; while (node !is null) { if (node.hash == hash && bytes == cast(ubyte[]) node.str) return node; node = node.next; } return node; } static uint hashBytes(const(ubyte)[] data) pure nothrow @trusted @nogc in { assert (data !is null); assert (data.length > 0); } do { immutable uint m = 0x5bd1e995; immutable int r = 24; uint h = cast(uint) data.length; while (data.length >= 4) { uint k = (cast(ubyte) data[3]) << 24 | (cast(ubyte) data[2]) << 16 | (cast(ubyte) data[1]) << 8 | (cast(ubyte) data[0]); k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; data = data[4 .. $]; } switch (data.length & 3) { case 3: h ^= data[2] << 16; goto case; case 2: h ^= data[1] << 8; goto case; case 1: h ^= data[0]; h *= m; break; default: break; } h ^= h >> 13; h *= m; h ^= h >> 15; return h; } ubyte[] allocate(size_t numBytes) pure nothrow @trusted @nogc in { assert (numBytes != 0); } out (result) { assert (result.length == numBytes); } do { Block* r = rootBlock; size_t i = 0; while (i <= 3 && r !is null) { immutable size_t available = r.bytes.length; immutable size_t oldUsed = r.used; immutable size_t newUsed = oldUsed + numBytes; if (newUsed <= available) { r.used = newUsed; return r.bytes[oldUsed .. newUsed]; } i++; r = r.next; } Block* b = cast(Block*) calloc(Block.sizeof, 1); b.used = numBytes; b.next = rootBlock; rootBlock = b; return b.bytes[0 .. numBytes]; } static struct Node { ubyte[] str = void; Node* next = void; uint hash = void; bool mallocated = void; } static struct Block { Block* next; size_t used; enum BLOCK_CAPACITY = BLOCK_SIZE - size_t.sizeof - (void*).sizeof; ubyte[BLOCK_CAPACITY] bytes; } static assert (BLOCK_SIZE == Block.sizeof); enum BLOCK_SIZE = 1024 * 16; // If a string would take up more than 1/4 of a block, allocate it outside // of the block. enum BIG_STRING = BLOCK_SIZE / 4; Node*[] buckets; Block* rootBlock; } private extern(C) void* calloc(size_t, size_t) nothrow pure @nogc @trusted; private extern(C) void* malloc(size_t) nothrow pure @nogc @trusted; private extern(C) void free(void*) nothrow pure @nogc @trusted; unittest { auto source = cast(ubyte[]) q{ import std.stdio;}c; auto tokens = getTokensForParser(source, LexerConfig(), new StringCache(StringCache.defaultBucketCount)); assert (tokens.map!"a.type"().equal([tok!"import", tok!"identifier", tok!".", tok!"identifier", tok!";"])); } /// Test \x char sequence unittest { auto toks = (string s) => byToken(cast(ubyte[])s); // valid immutable hex = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','A','B','C','D','E','F']; auto source = ""; foreach (h1; hex) foreach (h2; hex) source ~= "'\\x" ~ h1 ~ h2 ~ "'"; assert (toks(source).filter!(t => t.type != tok!"characterLiteral").empty); // invalid assert (toks(`'\x'`).messages[0] == DLexer.Message(1,4,"Error: 2 hex digits expected.",true)); assert (toks(`'\x_'`).messages[0] == DLexer.Message(1,4,"Error: 2 hex digits expected.",true)); assert (toks(`'\xA'`).messages[0] == DLexer.Message(1,5,"Error: 2 hex digits expected.",true)); assert (toks(`'\xAY'`).messages[0] == DLexer.Message(1,5,"Error: 2 hex digits expected.",true)); assert (toks(`'\xXX'`).messages[0] == DLexer.Message(1,4,"Error: 2 hex digits expected.",true)); } version (X86_64) { version (DigitalMars) private enum useDMDStyle = true; else version (LDC) private enum useDMDStyle = (__VERSION__ < 2092); // GDC-style supported since v1.22 else private enum useDMDStyle = false; // not supported by GDC private ulong pcmpestri(ubyte flags, chars...)(const ubyte* bytes) pure nothrow @trusted @nogc if (chars.length <= 8) { enum constant = ByteCombine!chars; enum charsLength = chars.length; static if (useDMDStyle) { asm pure nothrow @nogc { naked; } version (Windows) // `bytes` in RCX asm pure nothrow @nogc { movdqu XMM1, [RCX]; } else // `bytes` in RDI asm pure nothrow @nogc { movdqu XMM1, [RDI]; } asm pure nothrow @nogc { mov R10, constant; movq XMM2, R10; mov RAX, charsLength; mov RDX, 16; pcmpestri XMM2, XMM1, flags; mov RAX, RCX; ret; } } else // GDC-style inline asm (GCC basically) { ulong result; asm pure nothrow @nogc { `movdqu %1, %%xmm1 movq %3, %%xmm2 pcmpestri %5, %%xmm1, %%xmm2` : "=c" (result) // %0: pcmpestri result in RCX, to be stored into `result` : "m" (*bytes), // %1: address of `bytes` string "d" (16), // %2: length of `bytes` head in XMM1, as pcmpestri input in EDX "r" (constant), // %3: max 8 `chars` to load into GP register, then XMM2 "a" (charsLength), // %4: length in XMM2, as pcmpestri input in EAX "i" (flags) // %5: `flags` immediate : "xmm1", "xmm2"; // clobbered registers } return result; } } /** * Skips between 0 and 16 bytes that match (or do not match) one of the * given $(B chars). */ void skip(bool matching, chars...)(const ubyte* bytes, ulong* pindex, ulong* pcolumn) pure nothrow @trusted @nogc if (chars.length <= 8) { static if (matching) enum flags = 0b0001_0000; else enum flags = 0b0000_0000; const r = pcmpestri!(flags, chars)(bytes); *pindex += r; *pcolumn += r; } /** * Returns: the number of bytes starting at the given location that match * (or do not match if $(B invert) is true) the byte ranges in $(B chars). */ ulong rangeMatch(bool invert, chars...)(const ubyte* bytes) pure nothrow @trusted @nogc { static assert(chars.length % 2 == 0); static if (invert) enum rangeMatchFlags = 0b0000_0100; else enum rangeMatchFlags = 0b0001_0100; return pcmpestri!(rangeMatchFlags, chars)(bytes); } template ByteCombine(c...) { static assert (c.length <= 8); static if (c.length > 1) enum ulong ByteCombine = c[0] | (ByteCombine!(c[1..$]) << 8); else enum ulong ByteCombine = c[0]; } } unittest { import core.exception : RangeError; import std.exception : assertNotThrown; static immutable src1 = "/++"; static immutable src2 = "/**"; LexerConfig cf; StringCache ca = StringCache(16); assertNotThrown!RangeError(getTokensForParser(src1, cf, &ca)); assertNotThrown!RangeError(getTokensForParser(src2, cf, &ca)); } unittest { static immutable src = `"\eeee"`; LexerConfig cf; StringCache ca = StringCache(16); auto l = DLexer(src, cf, &ca); assert(l.front().type == tok!""); assert(!l.messages.empty); } unittest { alias Msg = DLexer.Message; LexerConfig cf; StringCache ca = StringCache(16); { auto l = DLexer(`"\©"`, cf, &ca); assert(l.front().type == tok!"stringLiteral"); assert(l.messages == []); } { auto l = DLexer(`"\™\⌝"`, cf, &ca); assert(l.front().type == tok!"stringLiteral"); assert(l.messages == []); } { auto l = DLexer(`"\&trade"`, cf, &ca); assert(l.front().type == tok!""); assert(l.messages == [ Msg(1, 9, "Error: invalid named character entity", true) ]); } { auto l = DLexer(`"\™\&urcorn"`, cf, &ca); assert(l.front().type == tok!""); assert(l.messages == [ Msg(1, 18, "Error: invalid named character entity", true) ]); } { auto l = DLexer(`"\&"`, cf, &ca); assert(l.front().type == tok!""); assert(l.messages == [ Msg(1, 4, "Error: invalid named character entity", true) ]); } { auto l = DLexer(`"\&0"`, cf, &ca); assert(l.front().type == tok!""); assert(l.messages == [ Msg(1, 5, "Error: invalid named character entity", true) ]); } { auto l = DLexer(`"\©`, cf, &ca); assert(l.front().type == tok!""); assert(l.messages == [ Msg(1, 8, "Error: invalid named character entity", true) ]); } { auto l = DLexer(`"\©`, cf, &ca); assert(l.front().type == tok!""); assert(l.messages == [ Msg(1, 9, "Error: unterminated string literal", true) ]); } } // legacy code using compatibility comment and trailingComment unittest { import std.conv : to; import std.exception : enforce; static immutable src = `/// this is a module. // mixed /// it can do stuff module foo.bar; // hello /** * some doc * hello */ int x; /// very nice // TODO: do stuff void main() { #line 40 /// could be better writeln(":)"); } /// end of file`; LexerConfig cf; StringCache ca = StringCache(16); const tokens = getTokensForParser(src, cf, &ca); void assertEquals(T)(T a, T b, string what, string file = __FILE__, size_t line = __LINE__) { enforce(a == b, "Failed " ~ what ~ " '" ~ a.to!string ~ "' == '" ~ b.to!string ~ "'", file, line); } void test(size_t index, IdType type, string comment, string trailingComment, string file = __FILE__, size_t line = __LINE__) { assertEquals(tokens[index].type, type, "type", file, line); assertEquals(tokens[index].comment, comment, "comment", file, line); assertEquals(tokens[index].trailingComment, trailingComment, "trailingComment", file, line); } test(0, tok!"module", "this is a module.\nit can do stuff", ""); test(1, tok!"identifier", "", ""); test(2, tok!".", "", ""); test(3, tok!"identifier", "", ""); test(4, tok!";", "", ""); test(5, tok!"int", "some doc\nhello", ""); test(6, tok!"identifier", "", ""); test(7, tok!";", "", "very nice"); test(8, tok!"void", "", ""); test(9, tok!"identifier", "", ""); test(10, tok!"(", "", ""); test(11, tok!")", "", ""); test(12, tok!"{", "", ""); test(13, tok!"identifier", "could be better", ""); test(14, tok!"(", "", ""); test(15, tok!"stringLiteral", "", ""); test(16, tok!")", "", ""); test(17, tok!";", "", ""); test(18, tok!"}", "", ""); } // dlang-community/D-Scanner#805 unittest { final class SomeExpr { Token tok; } auto e1 = new SomeExpr(); const e2 = new SomeExpr(); immutable e3 = new immutable SomeExpr(); immutable t1 = e1.tok; immutable t2 = e2.tok; immutable t3 = e3.tok; }
module dparse.lexer; import std.typecons; import std.typetuple; import std.array; import std.algorithm; import std.range; import std.experimental.lexer; import std.traits; import core.cpuid : sse42; public import dparse.trivia; /// Operators private enum operators = [ ",", ".", "..", "...", "/", "/=", "!", "!<", "!<=", "!<>", "!<>=", "!=", "!>", "!>=", "$", "%", "%=", "&", "&&", "&=", "(", ")", "*", "*=", "+", "++", "+=", "-", "--", "-=", ":", ";", "<", "<<", "<<=", "<=", "<>", "<>=", "=", "==", "=>", ">", ">=", ">>", ">>=", ">>>", ">>>=", "?", "@", "[", "]", "^", "^=", "^^", "^^=", "{", "|", "|=", "||", "}", "~", "~=" ]; /// Kewords private enum keywords = [ "abstract", "alias", "align", "asm", "assert", "auto", "bool", "break", "byte", "case", "cast", "catch", "cdouble", "cent", "cfloat", "char", "class", "const", "continue", "creal", "dchar", "debug", "default", "delegate", "delete", "deprecated", "do", "double", "else", "enum", "export", "extern", "false", "final", "finally", "float", "for", "foreach", "foreach_reverse", "function", "goto", "idouble", "if", "ifloat", "immutable", "import", "in", "inout", "int", "interface", "invariant", "ireal", "is", "lazy", "long", "macro", "mixin", "module", "new", "nothrow", "null", "out", "override", "package", "pragma", "private", "protected", "public", "pure", "real", "ref", "return", "scope", "shared", "short", "static", "struct", "super", "switch", "synchronized", "template", "this", "throw", "true", "try", "typedef", "typeid", "typeof", "ubyte", "ucent", "uint", "ulong", "union", "unittest", "ushort", "version", "void", "wchar", "while", "with", "__DATE__", "__EOF__", "__FILE__", "__FILE_FULL_PATH__", "__FUNCTION__", "__gshared", "__LINE__", "__MODULE__", "__parameters", "__PRETTY_FUNCTION__", "__TIME__", "__TIMESTAMP__", "__traits", "__vector", "__VENDOR__", "__VERSION__" ]; /// Other tokens private enum dynamicTokens = [ "specialTokenSequence", "comment", "identifier", "scriptLine", "whitespace", "doubleLiteral", "floatLiteral", "idoubleLiteral", "ifloatLiteral", "intLiteral", "longLiteral", "realLiteral", "irealLiteral", "uintLiteral", "ulongLiteral", "characterLiteral", "dstringLiteral", "stringLiteral", "wstringLiteral" ]; private enum pseudoTokenHandlers = [ "\"", "lexStringLiteral", "`", "lexWysiwygString", "//", "lexSlashSlashComment", "/*", "lexSlashStarComment", "/+", "lexSlashPlusComment", ".", "lexDot", "'", "lexCharacterLiteral", "0", "lexNumber", "1", "lexDecimal", "2", "lexDecimal", "3", "lexDecimal", "4", "lexDecimal", "5", "lexDecimal", "6", "lexDecimal", "7", "lexDecimal", "8", "lexDecimal", "9", "lexDecimal", "q\"", "lexDelimitedString", "q{", "lexTokenString", "r\"", "lexWysiwygString", "x\"", "lexHexString", " ", "lexWhitespace", "\t", "lexWhitespace", "\r", "lexWhitespace", "\n", "lexWhitespace", "\v", "lexWhitespace", "\f", "lexWhitespace", "\u2028", "lexLongNewline", "\u2029", "lexLongNewline", "#!", "lexScriptLine", "#line", "lexSpecialTokenSequence" ]; /// Token ID type for the D lexer. public alias IdType = TokenIdType!(operators, dynamicTokens, keywords); /** * Function used for converting an IdType to a string. * * Examples: * --- * IdType c = tok!"case"; * assert (str(c) == "case"); * --- */ public alias str = tokenStringRepresentation!(IdType, operators, dynamicTokens, keywords); /** * Template used to refer to D token types. * * See the $(B operators), $(B keywords), and $(B dynamicTokens) enums for * values that can be passed to this template. * Example: * --- * import dparse.lexer; * IdType t = tok!"floatLiteral"; * --- */ public template tok(string token) { alias tok = TokenId!(IdType, operators, dynamicTokens, keywords, token); } mixin template TokenTriviaFields() { /** * Whitespace and comment tokens attached to this token. * * All trivia tokens must have the text property set to the text with * which they identify with. This means you can map all trivia tokens to * their .text property and join them together to get the source code back * without any loss of information. * * Trivia is only included when calling getTokensForParser. When iterating * over DLexer all tokens will be in their raw form and none will be * converted to trivia. * * Note: in the future you might need to explicitly pass * WhitespaceBehavior.include (or keep the default) as getTokensForParser * currently overrides it to include. * * Contains: `comment`, `whitespace`, `specialTokenSequence` */ immutable(typeof(this))[] leadingTrivia; /// ditto immutable(typeof(this))[] trailingTrivia; string memoizedLeadingComment = null; string memoizedTrailingComment = null; /// Legacy property to get documentation comments, with comment border /// stripped off, which is attached to this token. string comment() const pure nothrow @safe @property { import dparse.trivia : extractLeadingDdoc; if (memoizedLeadingComment !is null) return memoizedLeadingComment; return (cast()memoizedLeadingComment) = this.extractLeadingDdoc; } /// ditto string trailingComment() const pure nothrow @safe @property { import dparse.trivia : extractTrailingDdoc; if (memoizedTrailingComment !is null) return memoizedTrailingComment; return (cast()memoizedTrailingComment) = this.extractTrailingDdoc; } int opCmp(size_t i) const pure nothrow @safe @nogc { if (index < i) return -1; if (index > i) return 1; return 0; } int opCmp(ref const typeof(this) other) const pure nothrow @safe @nogc { return opCmp(other.index); } } // mixin in from dparse.lexer to make error messages more managable size as the // entire string is dumped when there is a type mismatch. private enum extraFields = "import dparse.lexer:TokenTriviaFields; mixin TokenTriviaFields;"; /// The token type in the D lexer public alias Token = std.experimental.lexer.TokenStructure!(IdType, extraFields); /** * Configure whitespace handling */ public enum WhitespaceBehavior : ubyte { include = 0b0000_0000, skip = 0b0000_0001, } private enum stringBehaviorNotWorking = "Automatic string parsing is not " ~ "supported and was previously not working. To unescape strings use the " ~ "`dparse.strings : unescapeString` function on the token texts instead."; /** * Configure string lexing behavior */ // was enum, but struct now for deprecations and support with old compilers public struct StringBehavior { /// Do not include quote characters, process escape sequences deprecated(stringBehaviorNotWorking) static immutable StringBehavior compiler = StringBehavior(0b0000_0000); /// Opening quotes, closing quotes, and string suffixes are included in /// the string token deprecated(stringBehaviorNotWorking) static immutable StringBehavior includeQuoteChars = StringBehavior(0b0000_0001); /// String escape sequences are not replaced deprecated(stringBehaviorNotWorking) static immutable StringBehavior notEscaped = StringBehavior(0b0000_0010); /// Not modified at all. Useful for formatters or highlighters static immutable StringBehavior source = StringBehavior(0b0000_0011); ubyte behavior; alias behavior this; } public enum CommentBehavior : bool { intern = true, noIntern = false } /** * Lexer configuration struct */ public struct LexerConfig { string fileName; StringBehavior stringBehavior; WhitespaceBehavior whitespaceBehavior; CommentBehavior commentBehavior = CommentBehavior.intern; } /** * Basic type token types. */ public alias BasicTypes = AliasSeq!(tok!"int", tok!"bool", tok!"byte", tok!"cdouble", tok!"cent", tok!"cfloat", tok!"char", tok!"creal", tok!"dchar", tok!"double", tok!"float", tok!"idouble", tok!"ifloat", tok!"ireal", tok!"long", tok!"real", tok!"short", tok!"ubyte", tok!"ucent", tok!"uint", tok!"ulong", tok!"ushort", tok!"void", tok!"wchar"); /** * Returns: true if the given ID is for a basic type. */ public bool isBasicType(IdType type) nothrow pure @safe @nogc { switch (type) { foreach (T; BasicTypes) { case T: return true; } default: return false; } } /** * Number literal token types. */ public alias NumberLiterals = AliasSeq!(tok!"doubleLiteral", tok!"floatLiteral", tok!"idoubleLiteral", tok!"ifloatLiteral", tok!"intLiteral", tok!"longLiteral", tok!"realLiteral", tok!"irealLiteral", tok!"uintLiteral", tok!"ulongLiteral"); /** * Returns: true if the given ID type is for a number literal. */ public bool isNumberLiteral(IdType type) nothrow pure @safe @nogc { switch (type) { foreach (T; NumberLiterals) { case T: return true; } default: return false; } } /** * Number literal token types. */ public alias IntegerLiterals = AliasSeq!(tok!"intLiteral", tok!"longLiteral", tok!"uintLiteral", tok!"ulongLiteral"); /** * Returns: true if the given ID type is for a integer literal. */ public bool isIntegerLiteral(IdType type) nothrow pure @safe @nogc { switch (type) { foreach (T; IntegerLiterals) { case T: return true; } default: return false; } } /** * Operator token types. */ public alias Operators = AliasSeq!(tok!",", tok!".", tok!"..", tok!"...", tok!"/", tok!"/=", tok!"!", tok!"!<", tok!"!<=", tok!"!<>", tok!"!<>=", tok!"!=", tok!"!>", tok!"!>=", tok!"$", tok!"%", tok!"%=", tok!"&", tok!"&&", tok!"&=", tok!"(", tok!")", tok!"*", tok!"*=", tok!"+", tok!"++", tok!"+=", tok!"-", tok!"--", tok!"-=", tok!":", tok!";", tok!"<", tok!"<<", tok!"<<=", tok!"<=", tok!"<>", tok!"<>=", tok!"=", tok!"==", tok!"=>", tok!">", tok!">=", tok!">>", tok!">>=", tok!">>>", tok!">>>=", tok!"?", tok!"@", tok!"[", tok!"]", tok!"^", tok!"^=", tok!"^^", tok!"^^=", tok!"{", tok!"|", tok!"|=", tok!"||", tok!"}", tok!"~", tok!"~="); /** * Returns: true if the given ID type is for an operator. */ public bool isOperator(IdType type) nothrow pure @safe @nogc { switch (type) { foreach (T; Operators) { case T: return true; } default: return false; } } /** * Keyword token types. */ public alias Keywords = AliasSeq!(tok!"abstract", tok!"alias", tok!"align", tok!"asm", tok!"assert", tok!"auto", tok!"break", tok!"case", tok!"cast", tok!"catch", tok!"class", tok!"const", tok!"continue", tok!"debug", tok!"default", tok!"delegate", tok!"delete", tok!"deprecated", tok!"do", tok!"else", tok!"enum", tok!"export", tok!"extern", tok!"false", tok!"final", tok!"finally", tok!"for", tok!"foreach", tok!"foreach_reverse", tok!"function", tok!"goto", tok!"if", tok!"immutable", tok!"import", tok!"in", tok!"inout", tok!"interface", tok!"invariant", tok!"is", tok!"lazy", tok!"macro", tok!"mixin", tok!"module", tok!"new", tok!"nothrow", tok!"null", tok!"out", tok!"override", tok!"package", tok!"pragma", tok!"private", tok!"protected", tok!"public", tok!"pure", tok!"ref", tok!"return", tok!"scope", tok!"shared", tok!"static", tok!"struct", tok!"super", tok!"switch", tok!"synchronized", tok!"template", tok!"this", tok!"throw", tok!"true", tok!"try", tok!"typedef", tok!"typeid", tok!"typeof", tok!"union", tok!"unittest", tok!"version", tok!"while", tok!"with", tok!"__DATE__", tok!"__EOF__", tok!"__FILE__", tok!"__FILE_FULL_PATH__", tok!"__FUNCTION__", tok!"__gshared", tok!"__LINE__", tok!"__MODULE__", tok!"__parameters", tok!"__PRETTY_FUNCTION__", tok!"__TIME__", tok!"__TIMESTAMP__", tok!"__traits", tok!"__vector", tok!"__VENDOR__", tok!"__VERSION__"); /** * Returns: true if the given ID type is for a keyword. */ public bool isKeyword(IdType type) pure nothrow @safe @nogc { switch (type) { foreach (T; Keywords) { case T: return true; } default: return false; } } /** * String literal token types */ public alias StringLiterals = AliasSeq!(tok!"dstringLiteral", tok!"stringLiteral", tok!"wstringLiteral"); /** * Returns: true if the given ID type is for a string literal. */ public bool isStringLiteral(IdType type) pure nothrow @safe @nogc { switch (type) { foreach (T; StringLiterals) { case T: return true; } default: return false; } } /** * Protection token types. */ public alias Protections = AliasSeq!(tok!"export", tok!"package", tok!"private", tok!"public", tok!"protected"); /** * Returns: true if the given ID type is for a protection attribute. */ public bool isProtection(IdType type) pure nothrow @safe @nogc { switch (type) { foreach (T; Protections) { case T: return true; } default: return false; } } public alias SpecialTokens = AliasSeq!(tok!"__DATE__", tok!"__TIME__", tok!"__TIMESTAMP__", tok!"__VENDOR__", tok!"__VERSION__", tok!"__FILE__", tok!"__FILE_FULL_PATH__", tok!"__LINE__", tok!"__MODULE__", tok!"__FUNCTION__", tok!"__PRETTY_FUNCTION__"); public bool isSpecialToken(IdType type) pure nothrow @safe @nogc { switch (type) { foreach (T; SpecialTokens) { case T: return true; } default: return false; } } public alias Literals = AliasSeq!(StringLiterals, NumberLiterals, tok!"characterLiteral", SpecialTokens, tok!"true", tok!"false", tok!"null", tok!"$"); public bool isLiteral(IdType type) pure nothrow @safe @nogc { switch (type) { foreach (T; Literals) { case T: return true; } default: return false; } } /** * Returns: an array of tokens lexed from the given source code to the output * range. All whitespace, comment and specialTokenSequence tokens (trivia) are * attached to the token nearest to them. * * Trivia is put on the last token as `trailingTrivia` if it is on the same * line as the trivia, otherwise it will be attached to the next token in the * `leadingTrivia` until there is the EOF, where it will be attached as * `trailingTrivia` again. */ const(Token)[] getTokensForParser(R)(R sourceCode, LexerConfig config, StringCache* cache) if (is(Unqual!(ElementEncodingType!R) : ubyte) && isDynamicArray!R) { config.whitespaceBehavior = WhitespaceBehavior.include; config.commentBehavior = CommentBehavior.noIntern; auto leadingTriviaAppender = appender!(Token[])(); leadingTriviaAppender.reserve(128); auto trailingTriviaAppender = appender!(Token[])(); trailingTriviaAppender.reserve(128); auto output = appender!(typeof(return))(); auto lexer = DLexer(sourceCode, config, cache); loop: while (!lexer.empty) switch (lexer.front.type) { case tok!"specialTokenSequence": case tok!"whitespace": case tok!"comment": if (!output.data.empty && lexer.front.line == output.data[$ - 1].line) trailingTriviaAppender.put(lexer.front); else leadingTriviaAppender.put(lexer.front); lexer.popFront(); break; case tok!"__EOF__": break loop; default: Token t = lexer.front; lexer.popFront(); if (!output.data.empty && !trailingTriviaAppender.data.empty) (cast() output.data[$ - 1].trailingTrivia) = trailingTriviaAppender.data.idup; t.leadingTrivia = leadingTriviaAppender.data.idup; leadingTriviaAppender.clear(); trailingTriviaAppender.clear(); output.put(t); break; } if (!output.data.empty) { trailingTriviaAppender.put(leadingTriviaAppender.data); (cast() output.data[$ - 1].trailingTrivia) = trailingTriviaAppender.data.idup; } return output.data; } /** * The D lexer struct. */ public struct DLexer { mixin Lexer!(Token, lexIdentifier, isSeparating, operators, dynamicTokens, keywords, pseudoTokenHandlers); /// @disable this(); /** * Params: * range = the bytes that compose the source code that will be lexed. * config = the lexer configuration to use. * cache = the string interning cache for de-duplicating identifiers and * other token text. * haveSSE42 = Parse streaming SIMD Extensions 4.2 in inline assembly */ this(R)(R range, const LexerConfig config, StringCache* cache, bool haveSSE42 = sse42()) pure nothrow @safe if (is(Unqual!(ElementEncodingType!R) : ubyte) && isDynamicArray!R) { this.haveSSE42 = haveSSE42; auto r = (range.length >= 3 && range[0] == 0xef && range[1] == 0xbb && range[2] == 0xbf) ? range[3 .. $] : range; static if (is(ElementEncodingType!R == immutable)) this.range = LexerRange(cast(const(ubyte)[]) r); else this.range = LexerRange(cast(const(ubyte)[]) r.idup); this.config = config; this.cache = cache; popFront(); } /// public void popFront()() pure nothrow @safe { do _popFront(); while (config.whitespaceBehavior == WhitespaceBehavior.skip && _front.type == tok!"whitespace"); } /** * Lexer error/warning message. */ static struct Message { /// 1-based line number size_t line; /// 1-based byte offset size_t column; /// Text of the message string message; /// `true` for an error, `false` for a warning bool isError; } /** * Returns: An array of all of the warnings and errors generated so far * during lexing. It may make sense to only check this when `empty` * returns `true`. */ const(Message[]) messages() const @property { return _messages; } private pure nothrow @safe: bool isWhitespace() { switch (range.bytes[range.index]) { case ' ': case '\r': case '\n': case '\t': case '\v': case '\f': return true; case 0xe2: auto peek = range.peek(2); return peek.length == 2 && peek[0] == 0x80 && (peek[1] == 0xa8 || peek[1] == 0xa9); default: return false; } } void popFrontWhitespaceAware() { switch (range.bytes[range.index]) { case '\r': range.popFront(); if (!(range.index >= range.bytes.length) && range.bytes[range.index] == '\n') { range.popFront(); range.incrementLine(); } else range.incrementLine(); return; case '\n': range.popFront(); range.incrementLine(); return; case 0xe2: auto lookahead = range.peek(3); if (lookahead.length == 3 && lookahead[1] == 0x80 && (lookahead[2] == 0xa8 || lookahead[2] == 0xa9)) { range.index+=3; range.column+=3; range.incrementLine(); return; } else { range.popFront(); return; } default: range.popFront(); return; } } void lexWhitespace(ref Token token) @trusted { mixin (tokenStart); loop: do { version (X86_64) { if (haveSSE42 && range.index + 16 < range.bytes.length) { skip!(true, '\t', ' ', '\v', '\f')(range.bytes.ptr + range.index, &range.index, &range.column); } } switch (range.bytes[range.index]) { case '\r': range.popFront(); if (!(range.index >= range.bytes.length) && range.bytes[range.index] == '\n') { range.popFront(); } range.column = 1; range.line += 1; break; case '\n': range.popFront(); range.column = 1; range.line += 1; break; case ' ': case '\t': case '\v': case '\f': range.popFront(); break; case 0xe2: if (range.index + 2 >= range.bytes.length) break loop; if (range.bytes[range.index + 1] != 0x80) break loop; if (range.bytes[range.index + 2] == 0xa8 || range.bytes[range.index + 2] == 0xa9) { range.index += 3; range.column += 3; range.column = 1; range.line += 1; break; } break loop; default: break loop; } } while (!(range.index >= range.bytes.length)); string text = config.whitespaceBehavior == WhitespaceBehavior.include ? cache.intern(range.slice(mark)) : ""; token = Token(tok!"whitespace", text, line, column, index); } void lexNumber(ref Token token) { mixin (tokenStart); if (range.bytes[range.index] == '0' && range.index + 1 < range.bytes.length) { immutable ahead = range.bytes[range.index + 1]; switch (ahead) { case 'x': case 'X': range.index += 2; range.column += 2; lexHex(token, mark, line, column, index); return; case 'b': case 'B': range.index += 2; range.column += 2; lexBinary(token, mark, line, column, index); return; default: lexDecimal(token, mark, line, column, index); return; } } else lexDecimal(token, mark, line, column, index); } void lexHex(ref Token token) { mixin (tokenStart); lexHex(token, mark, line, column, index); } void lexHex(ref Token token, size_t mark, size_t line, size_t column, size_t index) @trusted { IdType type = tok!"intLiteral"; bool foundDot; hexLoop: while (!(range.index >= range.bytes.length)) { switch (range.bytes[range.index]) { case 'a': .. case 'f': case 'A': .. case 'F': case '0': .. case '9': case '_': version (X86_64) { if (haveSSE42 && range.index + 16 < range.bytes.length) { immutable ulong i = rangeMatch!(false, '0', '9', 'a', 'f', 'A', 'F', '_', '_') (range.bytes.ptr + range.index); range.column += i; range.index += i; } else range.popFront(); } else range.popFront(); break; case 'u': case 'U': lexIntSuffix(type); break hexLoop; case 'i': if (foundDot) lexFloatSuffix(type); break hexLoop; case 'L': if (foundDot) lexFloatSuffix(type); else lexIntSuffix(type); break hexLoop; case 'p': case 'P': lexExponent(type); break hexLoop; case '.': if (foundDot || !(range.index + 1 < range.bytes.length) || range.peekAt(1) == '.') break hexLoop; else { // The following bit of silliness tries to tell the // difference between "int dot identifier" and // "double identifier". if (range.index + 1 < range.bytes.length) { switch (range.peekAt(1)) { case '0': .. case '9': case 'A': .. case 'F': case 'a': .. case 'f': goto doubleLiteral; default: break hexLoop; } } else { doubleLiteral: range.popFront(); foundDot = true; type = tok!"doubleLiteral"; } } break; default: break hexLoop; } } token = Token(type, cache.intern(range.slice(mark)), line, column, index); } void lexBinary(ref Token token) { mixin (tokenStart); return lexBinary(token, mark, line, column, index); } void lexBinary(ref Token token, size_t mark, size_t line, size_t column, size_t index) @trusted { IdType type = tok!"intLiteral"; binaryLoop: while (!(range.index >= range.bytes.length)) { switch (range.bytes[range.index]) { case '0': case '1': case '_': version (X86_64) { if (haveSSE42 && range.index + 16 < range.bytes.length) { immutable ulong i = rangeMatch!(false, '0', '1', '_', '_')( range.bytes.ptr + range.index); range.column += i; range.index += i; } else range.popFront(); } else range.popFront(); break; case 'u': case 'U': case 'L': lexIntSuffix(type); break binaryLoop; default: break binaryLoop; } } token = Token(type, cache.intern(range.slice(mark)), line, column, index); } void lexDecimal(ref Token token) { mixin (tokenStart); lexDecimal(token, mark, line, column, index); } void lexDecimal(ref Token token, size_t mark, size_t line, size_t column, size_t index) @trusted { bool foundDot = range.bytes[range.index] == '.'; IdType type = tok!"intLiteral"; if (foundDot) { range.popFront(); type = tok!"doubleLiteral"; } decimalLoop: while (!(range.index >= range.bytes.length)) { switch (range.bytes[range.index]) { case '0': .. case '9': case '_': version (X86_64) { if (haveSSE42 && range.index + 16 < range.bytes.length) { immutable ulong i = rangeMatch!(false, '0', '9', '_', '_')(range.bytes.ptr + range.index); range.column += i; range.index += i; } else range.popFront(); } else range.popFront(); break; case 'u': case 'U': if (!foundDot) lexIntSuffix(type); break decimalLoop; case 'i': lexFloatSuffix(type); break decimalLoop; case 'L': if (foundDot) lexFloatSuffix(type); else lexIntSuffix(type); break decimalLoop; case 'f': case 'F': lexFloatSuffix(type); break decimalLoop; case 'e': case 'E': lexExponent(type); break decimalLoop; case '.': if (foundDot || !(range.index + 1 < range.bytes.length) || range.peekAt(1) == '.') break decimalLoop; else { // The following bit of silliness tries to tell the // difference between "int dot identifier" and // "double identifier". if (range.index + 1 < range.bytes.length) { immutable ch = range.peekAt(1); if (ch <= 0x2f || (ch >= '0' && ch <= '9') || (ch >= ':' && ch <= '@') || (ch >= '[' && ch <= '^') || (ch >= '{' && ch <= '~') || ch == '`' || ch == '_') { goto doubleLiteral; } else break decimalLoop; } else { doubleLiteral: range.popFront(); foundDot = true; type = tok!"doubleLiteral"; } } break; default: break decimalLoop; } } token = Token(type, cache.intern(range.slice(mark)), line, column, index); } void lexIntSuffix(ref IdType type) pure nothrow @safe { bool secondPass; if (range.bytes[range.index] == 'u' || range.bytes[range.index] == 'U') { U: if (type == tok!"intLiteral") type = tok!"uintLiteral"; else type = tok!"ulongLiteral"; range.popFront(); if (secondPass) return; if (range.index < range.bytes.length && (range.bytes[range.index] == 'L' || range.bytes[range.index] == 'l')) goto L; goto I; } if (range.bytes[range.index] == 'L' || range.bytes[range.index] == 'l') { L: if (type == tok!"uintLiteral") type = tok!"ulongLiteral"; else type = tok!"longLiteral"; range.popFront(); if (range.index < range.bytes.length && (range.bytes[range.index] == 'U' || range.bytes[range.index] == 'u')) { secondPass = true; goto U; } goto I; } I: if (range.index < range.bytes.length && range.bytes[range.index] == 'i') { warning("Complex number literals are deprecated"); range.popFront(); if (type == tok!"longLiteral" || type == tok!"ulongLiteral") type = tok!"idoubleLiteral"; else type = tok!"ifloatLiteral"; } } void lexFloatSuffix(ref IdType type) pure nothrow @safe { switch (range.bytes[range.index]) { case 'L': range.popFront(); type = tok!"doubleLiteral"; break; case 'f': case 'F': range.popFront(); type = tok!"floatLiteral"; break; default: break; } if (range.index < range.bytes.length && range.bytes[range.index] == 'i') { warning("Complex number literals are deprecated"); range.popFront(); if (type == tok!"floatLiteral") type = tok!"ifloatLiteral"; else type = tok!"idoubleLiteral"; } } void lexExponent(ref IdType type) pure nothrow @safe { range.popFront(); bool foundSign = false; bool foundDigit = false; while (range.index < range.bytes.length) { switch (range.bytes[range.index]) { case '-': case '+': if (foundSign) { if (!foundDigit) error("Expected an exponent"); return; } foundSign = true; range.popFront(); break; case '0': .. case '9': case '_': foundDigit = true; range.popFront(); break; case 'L': case 'f': case 'F': case 'i': lexFloatSuffix(type); return; default: if (!foundDigit) error("Expected an exponent"); return; } } } void lexScriptLine(ref Token token) { mixin (tokenStart); while (!(range.index >= range.bytes.length) && !isNewline) { range.popFront(); } token = Token(tok!"scriptLine", cache.intern(range.slice(mark)), line, column, index); } void lexSpecialTokenSequence(ref Token token) { mixin (tokenStart); while (!(range.index >= range.bytes.length) && !isNewline) { range.popFront(); } token = Token(tok!"specialTokenSequence", cache.intern(range.slice(mark)), line, column, index); } void lexSlashStarComment(ref Token token) @trusted { mixin (tokenStart); IdType type = tok!"comment"; range.popFrontN(2); while (range.index < range.bytes.length) { version (X86_64) { if (haveSSE42 && range.index + 16 < range.bytes.length) skip!(false, '\r', '\n', '/', '*', 0xe2)(range.bytes.ptr + range.index, &range.index, &range.column); } if (range.bytes[range.index] == '*') { range.popFront(); if (!(range.index >= range.bytes.length) && range.bytes[range.index] == '/') { range.popFront(); break; } } else popFrontWhitespaceAware(); } if (config.commentBehavior == CommentBehavior.intern) token = Token(type, cache.intern(range.slice(mark)), line, column, index); else token = Token(type, cast(string) range.slice(mark), line, column, index); } void lexSlashSlashComment(ref Token token) @trusted { mixin (tokenStart); IdType type = tok!"comment"; range.popFrontN(2); while (range.index < range.bytes.length) { version (X86_64) { if (haveSSE42 && range.index + 16 < range.bytes.length) { skip!(false, '\r', '\n', 0xe2)(range.bytes.ptr + range.index, &range.index, &range.column); } } if (range.bytes[range.index] == '\r' || range.bytes[range.index] == '\n') break; range.popFront(); } if (config.commentBehavior == CommentBehavior.intern) token = Token(type, cache.intern(range.slice(mark)), line, column, index); else token = Token(type, cast(string) range.slice(mark), line, column, index); } void lexSlashPlusComment(ref Token token) @trusted { mixin (tokenStart); IdType type = tok!"comment"; range.index += 2; range.column += 2; int depth = 1; while (depth > 0 && !(range.index >= range.bytes.length)) { version (X86_64) { if (haveSSE42 && range.index + 16 < range.bytes.length) { skip!(false, '+', '/', '\\', '\r', '\n', 0xe2)(range.bytes.ptr + range.index, &range.index, &range.column); } } if (range.bytes[range.index] == '+') { range.popFront(); if (!(range.index >= range.bytes.length) && range.bytes[range.index] == '/') { range.popFront(); depth--; } } else if (range.bytes[range.index] == '/') { range.popFront(); if (!(range.index >= range.bytes.length) && range.bytes[range.index] == '+') { range.popFront(); depth++; } } else popFrontWhitespaceAware(); } if (config.commentBehavior == CommentBehavior.intern) token = Token(type, cache.intern(range.slice(mark)), line, column, index); else token = Token(type, cast(string) range.slice(mark), line, column, index); } void lexStringLiteral(ref Token token) @trusted { mixin (tokenStart); range.popFront(); while (true) { if (range.index >= range.bytes.length) { error("Error: unterminated string literal"); token = Token(tok!""); return; } version (X86_64) { if (haveSSE42 && range.index + 16 < range.bytes.length) { skip!(false, '"', '\\', '\r', '\n', 0xe2)(range.bytes.ptr + range.index, &range.index, &range.column); } } if (range.bytes[range.index] == '"') { range.popFront(); break; } else if (range.bytes[range.index] == '\\') { if (!lexEscapeSequence()) { token = Token.init; return; } } else popFrontWhitespaceAware(); } IdType type = tok!"stringLiteral"; lexStringSuffix(type); token = Token(type, cache.intern(range.slice(mark)), line, column, index); } void lexWysiwygString(ref Token token) @trusted { mixin (tokenStart); IdType type = tok!"stringLiteral"; immutable bool backtick = range.bytes[range.index] == '`'; if (backtick) { range.popFront(); while (true) { if (range.index >= range.bytes.length) { error("Error: unterminated string literal"); token = Token(tok!""); return; } version (X86_64) { if (haveSSE42 && range.index + 16 < range.bytes.length) { skip!(false, '\r', '\n', 0xe2, '`')(range.bytes.ptr + range.index, &range.index, &range.column); } } if (range.bytes[range.index] == '`') { range.popFront(); break; } else popFrontWhitespaceAware(); } } else { range.popFront(); if (range.index >= range.bytes.length) { error("Error: unterminated string literal"); token = Token(tok!""); return; } range.popFront(); while (true) { if (range.index >= range.bytes.length) { error("Error: unterminated string literal"); token = Token(tok!""); return; } else if (range.bytes[range.index] == '"') { range.popFront(); break; } else popFrontWhitespaceAware(); } } lexStringSuffix(type); token = Token(type, cache.intern(range.slice(mark)), line, column, index); } private ubyte lexStringSuffix(ref IdType type) pure nothrow @safe { if (range.index >= range.bytes.length) { type = tok!"stringLiteral"; return 0; } else { switch (range.bytes[range.index]) { case 'w': range.popFront(); type = tok!"wstringLiteral"; return 'w'; case 'd': range.popFront(); type = tok!"dstringLiteral"; return 'd'; case 'c': range.popFront(); type = tok!"stringLiteral"; return 'c'; default: type = tok!"stringLiteral"; return 0; } } } void lexDelimitedString(ref Token token) { mixin (tokenStart); range.index += 2; range.column += 2; ubyte open; ubyte close; switch (range.bytes[range.index]) { case '<': open = '<'; close = '>'; range.popFront(); lexNormalDelimitedString(token, mark, line, column, index, open, close); break; case '{': open = '{'; close = '}'; range.popFront(); lexNormalDelimitedString(token, mark, line, column, index, open, close); break; case '[': open = '['; close = ']'; range.popFront(); lexNormalDelimitedString(token, mark, line, column, index, open, close); break; case '(': open = '('; close = ')'; range.popFront(); lexNormalDelimitedString(token, mark, line, column, index, open, close); break; default: lexHeredocString(token, mark, line, column, index); break; } } void lexNormalDelimitedString(ref Token token, size_t mark, size_t line, size_t column, size_t index, ubyte open, ubyte close) { int depth = 1; while (!(range.index >= range.bytes.length) && depth > 0) { if (range.bytes[range.index] == open) { depth++; range.popFront(); } else if (range.bytes[range.index] == close) { depth--; range.popFront(); if (depth <= 0) { if (range.bytes[range.index] == '"') { range.popFront(); } else { error("Error: `\"` expected to end delimited string literal"); token = Token(tok!""); return; } } } else popFrontWhitespaceAware(); } IdType type = tok!"stringLiteral"; lexStringSuffix(type); token = Token(type, cache.intern(range.slice(mark)), line, column, index); } void lexHeredocString(ref Token token, size_t mark, size_t line, size_t column, size_t index) { Token ident; lexIdentifier(ident); if (isNewline()) popFrontWhitespaceAware(); else error("Newline expected"); while (!(range.index >= range.bytes.length)) { if (isNewline()) { popFrontWhitespaceAware(); if (!range.canPeek(ident.text.length)) { error(ident.text ~ " expected"); break; } if (range.peek(ident.text.length - 1) == ident.text) { range.popFrontN(ident.text.length); break; } } else { range.popFront(); } } if (!(range.index >= range.bytes.length) && range.bytes[range.index] == '"') { range.popFront(); } else error("`\"` expected"); IdType type = tok!"stringLiteral"; lexStringSuffix(type); token = Token(type, cache.intern(range.slice(mark)), line, column, index); } void lexTokenString(ref Token token) { mixin (tokenStart); assert (range.bytes[range.index] == 'q'); range.popFront(); assert (range.bytes[range.index] == '{'); range.popFront(); auto app = appender!string(); app.put("q{"); int depth = 1; immutable WhitespaceBehavior oldWhitespace = config.whitespaceBehavior; immutable StringBehavior oldString = config.stringBehavior; config.whitespaceBehavior = WhitespaceBehavior.include; config.stringBehavior = StringBehavior.source; scope (exit) { config.whitespaceBehavior = oldWhitespace; config.stringBehavior = oldString; } advance(_front); while (depth > 0 && !empty) { auto t = front(); if (t.text is null) app.put(str(t.type)); else app.put(t.text); if (t.type == tok!"}") { depth--; if (depth > 0) popFront(); } else if (t.type == tok!"{") { depth++; popFront(); } else popFront(); } IdType type = tok!"stringLiteral"; auto b = lexStringSuffix(type); if (b != 0) app.put(b); token = Token(type, cache.intern(cast(const(ubyte)[]) app.data), line, column, index); } void lexHexString(ref Token token) { mixin (tokenStart); range.index += 2; range.column += 2; loop: while (true) { if (range.index >= range.bytes.length) { error("Error: unterminated hex string literal"); token = Token(tok!""); return; } else if (isWhitespace()) popFrontWhitespaceAware(); else switch (range.bytes[range.index]) { case '0': .. case '9': case 'A': .. case 'F': case 'a': .. case 'f': range.popFront(); break; case '"': range.popFront(); break loop; default: error("Error: invalid character in hex string"); token = Token(tok!""); return; } } IdType type = tok!"stringLiteral"; lexStringSuffix(type); token = Token(type, cache.intern(range.slice(mark)), line, column, index); } bool lexNamedEntity() in { assert (range.bytes[range.index] == '&'); } do { Token t; range.popFront(); lexIdentifier(t, true); if (t.type != tok!"identifier" || range.empty || range.bytes[range.index] != ';') { error("Error: invalid named character entity"); return false; } range.popFront(); return true; } bool lexEscapeSequence() { range.popFront(); if (range.index >= range.bytes.length) { error("Error: non-terminated character escape sequence."); return false; } switch (range.bytes[range.index]) { case '&': return lexNamedEntity(); case '\'': case '"': case '?': case '\\': case 'a': case 'b': case 'f': case 'n': case 'r': case 't': case 'v': range.popFront(); break; case 'x': range.popFront(); foreach (i; 0 .. 2) { if (range.index >= range.bytes.length) { error("Error: 2 hex digits expected."); return false; } switch (range.bytes[range.index]) { case '0': .. case '9': case 'a': .. case 'f': case 'A': .. case 'F': range.popFront(); break; default: error("Error: 2 hex digits expected."); return false; } } break; case '0': if (!(range.index + 1 < range.bytes.length) || ((range.index + 1 < range.bytes.length) && range.peekAt(1) == '\'')) { range.popFront(); break; } goto case; case '1': .. case '7': for (size_t i = 0; i < 3 && !(range.index >= range.bytes.length) && range.bytes[range.index] >= '0' && range.bytes[range.index] <= '7'; i++) range.popFront(); break; case 'u': range.popFront(); foreach (i; 0 .. 4) { if (range.index >= range.bytes.length) { error("Error: at least 4 hex digits expected."); return false; } switch (range.bytes[range.index]) { case '0': .. case '9': case 'a': .. case 'f': case 'A': .. case 'F': range.popFront(); break; default: error("Error: at least 4 hex digits expected."); return false; } } break; case 'U': range.popFront(); foreach (i; 0 .. 8) { if (range.index >= range.bytes.length) { error("Error: at least 8 hex digits expected."); return false; } switch (range.bytes[range.index]) { case '0': .. case '9': case 'a': .. case 'f': case 'A': .. case 'F': range.popFront(); break; default: error("Error: at least 8 hex digits expected."); return false; } } break; default: error("Invalid escape sequence"); while (true) { if (range.index >= range.bytes.length) { error("Error: non-terminated character escape sequence."); break; } if (range.bytes[range.index] == ';') { range.popFront(); break; } else { range.popFront(); } } return false; } return true; } void lexCharacterLiteral(ref Token token) { mixin (tokenStart); range.popFront(); if (range.empty) goto err; if (range.bytes[range.index] == '\\') lexEscapeSequence(); else if (range.bytes[range.index] == '\'') { range.popFront(); token = Token(tok!"characterLiteral", cache.intern(range.slice(mark)), line, column, index); } else if (range.bytes[range.index] & 0x80) { while (range.bytes[range.index] & 0x80) range.popFront(); } else popFrontWhitespaceAware(); if (range.index < range.bytes.length && range.bytes[range.index] == '\'') { range.popFront(); token = Token(tok!"characterLiteral", cache.intern(range.slice(mark)), line, column, index); } else { err: error("Error: Expected `'` to end character literal"); token = Token(tok!""); } } void lexIdentifier(ref Token token, const bool silent = false) @trusted { mixin (tokenStart); if (isSeparating(0)) { if (silent) return; error("Invalid identifier"); range.popFront(); } while (true) { version (X86_64) { if (haveSSE42 && range.index + 16 < range.bytes.length) { immutable ulong i = rangeMatch!(false, 'a', 'z', 'A', 'Z', '_', '_') (range.bytes.ptr + range.index); range.column += i; range.index += i; } } if (isSeparating(0)) break; else range.popFront(); } token = Token(tok!"identifier", cache.intern(range.slice(mark)), line, column, index); } void lexDot(ref Token token) { mixin (tokenStart); if (!(range.index + 1 < range.bytes.length)) { range.popFront(); token = Token(tok!".", null, line, column, index); return; } switch (range.peekAt(1)) { case '0': .. case '9': lexNumber(token); return; case '.': range.popFront(); range.popFront(); if (!(range.index >= range.bytes.length) && range.bytes[range.index] == '.') { range.popFront(); token = Token(tok!"...", null, line, column, index); } else token = Token(tok!"..", null, line, column, index); return; default: range.popFront(); token = Token(tok!".", null, line, column, index); return; } } void lexLongNewline(ref Token token) @nogc { mixin (tokenStart); range.popFront(); range.popFront(); range.popFront(); range.incrementLine(); string text = config.whitespaceBehavior == WhitespaceBehavior.include ? cache.intern(range.slice(mark)) : ""; token = Token(tok!"whitespace", text, line, column, index); } bool isNewline() @nogc { if (range.bytes[range.index] == '\n') return true; if (range.bytes[range.index] == '\r') return true; return (range.bytes[range.index] & 0x80) && (range.index + 2 < range.bytes.length) && (range.peek(2) == "\u2028" || range.peek(2) == "\u2029"); } bool isSeparating(size_t offset) @nogc { enum : ubyte { n, y, m // no, yes, maybe } if (range.index + offset >= range.bytes.length) return true; auto c = range.bytes[range.index + offset]; static immutable ubyte[256] LOOKUP_TABLE = [ y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, n, n, n, n, n, n, n, n, n, n, y, y, y, y, y, y, y, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, y, y, y, y, n, y, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, y, y, y, y, y, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m ]; immutable ubyte result = LOOKUP_TABLE[c]; if (result == n) return false; if (result == y) return true; if (result == m) { auto r = range; range.popFrontN(offset); return (r.canPeek(2) && (r.peek(2) == "\u2028" || r.peek(2) == "\u2029")); } assert (false); } enum tokenStart = q{ size_t index = range.index; size_t column = range.column; size_t line = range.line; auto mark = range.mark(); }; void error(string message) { _messages ~= Message(range.line, range.column, message, true); } void warning(string message) { _messages ~= Message(range.line, range.column, message, false); assert (_messages.length > 0); } Message[] _messages; StringCache* cache; LexerConfig config; bool haveSSE42; } /** * Creates a token range from the given source code. Creates a default lexer * configuration and a GC-managed string cache. */ public auto byToken(R)(R range) if (is(Unqual!(ElementEncodingType!R) : ubyte) && isDynamicArray!R) { LexerConfig config; StringCache* cache = new StringCache(range.length.optimalBucketCount); return DLexer(range, config, cache); } /** * Creates a token range from the given source code. Uses the given string * cache. */ public auto byToken(R)(R range, StringCache* cache) if (is(Unqual!(ElementEncodingType!R) : ubyte) && isDynamicArray!R) { LexerConfig config; return DLexer(range, config, cache); } /** * Creates a token range from the given source code. Uses the provided lexer * configuration and string cache. */ public auto byToken(R)(R range, const LexerConfig config, StringCache* cache) if (is(Unqual!(ElementEncodingType!R) : ubyte) && isDynamicArray!R) { return DLexer(range, config, cache); } /** * Helper function used to avoid too much allocations while lexing. * * Params: * size = The length in bytes of the source file. * * Returns: * The optimal initial bucket count a `StringCache` should have. */ size_t optimalBucketCount(size_t size) { import std.math : nextPow2; return nextPow2((size + 31U) / 32U).min(1U << 30U); } /// unittest { assert(optimalBucketCount(1) == 2); assert(optimalBucketCount(9000 * 32) == 16384); static if (size_t.sizeof == ulong.sizeof) assert(optimalBucketCount(100_000_000_000UL) == 1 << 30); } /** * The string cache is used for string interning. * * It will only store a single copy of any string that it is asked to hold. * Interned strings can be compared for equality by comparing their $(B .ptr) * field. * * Default and postbilt constructors are disabled. When a StringCache goes out * of scope, the memory held by it is freed. * * See_also: $(LINK http://en.wikipedia.org/wiki/String_interning) */ struct StringCache { public pure nothrow @nogc: @disable this(); @disable this(this); /** * Params: bucketCount = the initial number of buckets. Must be a * power of two */ this(size_t bucketCount) nothrow @trusted @nogc in { import core.bitop : popcnt; static if (size_t.sizeof == 8) { immutable low = popcnt(cast(uint) bucketCount); immutable high = popcnt(cast(uint) (bucketCount >> 32)); assert ((low == 0 && high == 1) || (low == 1 && high == 0)); } else { static assert (size_t.sizeof == 4); assert (popcnt(cast(uint) bucketCount) == 1); } } do { buckets = (cast(Node**) calloc((Node*).sizeof, bucketCount))[0 .. bucketCount]; } ~this() { Block* current = rootBlock; while (current !is null) { Block* prev = current; current = current.next; free(cast(void*) prev); } foreach (nodePointer; buckets) { Node* currentNode = nodePointer; while (currentNode !is null) { if (currentNode.mallocated) free(currentNode.str.ptr); Node* prev = currentNode; currentNode = currentNode.next; free(prev); } } rootBlock = null; free(buckets.ptr); buckets = null; } /** * Caches a string. */ string intern(const(ubyte)[] str) @safe { if (str is null || str.length == 0) return ""; return _intern(str); } /** * ditto */ string intern(string str) @trusted { return intern(cast(ubyte[]) str); } /** * The default bucket count for the string cache. */ static enum defaultBucketCount = 4096; private: string _intern(const(ubyte)[] bytes) @trusted { immutable uint hash = hashBytes(bytes); immutable size_t index = hash & (buckets.length - 1); Node* s = find(bytes, hash); if (s !is null) return cast(string) s.str; ubyte[] mem = void; bool mallocated = bytes.length > BIG_STRING; if (mallocated) mem = (cast(ubyte*) malloc(bytes.length))[0 .. bytes.length]; else mem = allocate(bytes.length); mem[] = bytes[]; Node* node = cast(Node*) malloc(Node.sizeof); node.str = mem; node.hash = hash; node.next = buckets[index]; node.mallocated = mallocated; buckets[index] = node; return cast(string) mem; } Node* find(const(ubyte)[] bytes, uint hash) @trusted { import std.algorithm : equal; immutable size_t index = hash & (buckets.length - 1); Node* node = buckets[index]; while (node !is null) { if (node.hash == hash && bytes == cast(ubyte[]) node.str) return node; node = node.next; } return node; } static uint hashBytes(const(ubyte)[] data) pure nothrow @trusted @nogc in { assert (data !is null); assert (data.length > 0); } do { immutable uint m = 0x5bd1e995; immutable int r = 24; uint h = cast(uint) data.length; while (data.length >= 4) { uint k = (cast(ubyte) data[3]) << 24 | (cast(ubyte) data[2]) << 16 | (cast(ubyte) data[1]) << 8 | (cast(ubyte) data[0]); k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; data = data[4 .. $]; } switch (data.length & 3) { case 3: h ^= data[2] << 16; goto case; case 2: h ^= data[1] << 8; goto case; case 1: h ^= data[0]; h *= m; break; default: break; } h ^= h >> 13; h *= m; h ^= h >> 15; return h; } ubyte[] allocate(size_t numBytes) pure nothrow @trusted @nogc in { assert (numBytes != 0); } out (result) { assert (result.length == numBytes); } do { Block* r = rootBlock; size_t i = 0; while (i <= 3 && r !is null) { immutable size_t available = r.bytes.length; immutable size_t oldUsed = r.used; immutable size_t newUsed = oldUsed + numBytes; if (newUsed <= available) { r.used = newUsed; return r.bytes[oldUsed .. newUsed]; } i++; r = r.next; } Block* b = cast(Block*) calloc(Block.sizeof, 1); b.used = numBytes; b.next = rootBlock; rootBlock = b; return b.bytes[0 .. numBytes]; } static struct Node { ubyte[] str = void; Node* next = void; uint hash = void; bool mallocated = void; } static struct Block { Block* next; size_t used; enum BLOCK_CAPACITY = BLOCK_SIZE - size_t.sizeof - (void*).sizeof; ubyte[BLOCK_CAPACITY] bytes; } static assert (BLOCK_SIZE == Block.sizeof); enum BLOCK_SIZE = 1024 * 16; // If a string would take up more than 1/4 of a block, allocate it outside // of the block. enum BIG_STRING = BLOCK_SIZE / 4; Node*[] buckets; Block* rootBlock; } private extern(C) void* calloc(size_t, size_t) nothrow pure @nogc @trusted; private extern(C) void* malloc(size_t) nothrow pure @nogc @trusted; private extern(C) void free(void*) nothrow pure @nogc @trusted; unittest { auto source = cast(ubyte[]) q{ import std.stdio;}c; auto tokens = getTokensForParser(source, LexerConfig(), new StringCache(StringCache.defaultBucketCount)); assert (tokens.map!"a.type"().equal([tok!"import", tok!"identifier", tok!".", tok!"identifier", tok!";"])); } /// Test \x char sequence unittest { auto toks = (string s) => byToken(cast(ubyte[])s); // valid immutable hex = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','A','B','C','D','E','F']; auto source = ""; foreach (h1; hex) foreach (h2; hex) source ~= "'\\x" ~ h1 ~ h2 ~ "'"; assert (toks(source).filter!(t => t.type != tok!"characterLiteral").empty); // invalid assert (toks(`'\x'`).messages[0] == DLexer.Message(1,4,"Error: 2 hex digits expected.",true)); assert (toks(`'\x_'`).messages[0] == DLexer.Message(1,4,"Error: 2 hex digits expected.",true)); assert (toks(`'\xA'`).messages[0] == DLexer.Message(1,5,"Error: 2 hex digits expected.",true)); assert (toks(`'\xAY'`).messages[0] == DLexer.Message(1,5,"Error: 2 hex digits expected.",true)); assert (toks(`'\xXX'`).messages[0] == DLexer.Message(1,4,"Error: 2 hex digits expected.",true)); } version (X86_64) { version (DigitalMars) private enum useDMDStyle = true; else version (LDC) private enum useDMDStyle = (__VERSION__ < 2092); // GDC-style supported since v1.22 else private enum useDMDStyle = false; // not supported by GDC private ulong pcmpestri(ubyte flags, chars...)(const ubyte* bytes) pure nothrow @trusted @nogc if (chars.length <= 8) { enum constant = ByteCombine!chars; enum charsLength = chars.length; static if (useDMDStyle) { asm pure nothrow @nogc { naked; } version (Windows) // `bytes` in RCX asm pure nothrow @nogc { movdqu XMM1, [RCX]; } else // `bytes` in RDI asm pure nothrow @nogc { movdqu XMM1, [RDI]; } asm pure nothrow @nogc { mov R10, constant; movq XMM2, R10; mov RAX, charsLength; mov RDX, 16; pcmpestri XMM2, XMM1, flags; mov RAX, RCX; ret; } } else // GDC-style inline asm (GCC basically) { ulong result; asm pure nothrow @nogc { `movdqu %1, %%xmm1 movq %3, %%xmm2 pcmpestri %5, %%xmm1, %%xmm2` : "=c" (result) // %0: pcmpestri result in RCX, to be stored into `result` : "m" (*bytes), // %1: address of `bytes` string "d" (16), // %2: length of `bytes` head in XMM1, as pcmpestri input in EDX "r" (constant), // %3: max 8 `chars` to load into GP register, then XMM2 "a" (charsLength), // %4: length in XMM2, as pcmpestri input in EAX "i" (flags) // %5: `flags` immediate : "xmm1", "xmm2"; // clobbered registers } return result; } } /** * Skips between 0 and 16 bytes that match (or do not match) one of the * given $(B chars). */ void skip(bool matching, chars...)(const ubyte* bytes, ulong* pindex, ulong* pcolumn) pure nothrow @trusted @nogc if (chars.length <= 8) { static if (matching) enum flags = 0b0001_0000; else enum flags = 0b0000_0000; const r = pcmpestri!(flags, chars)(bytes); *pindex += r; *pcolumn += r; } /** * Returns: the number of bytes starting at the given location that match * (or do not match if $(B invert) is true) the byte ranges in $(B chars). */ ulong rangeMatch(bool invert, chars...)(const ubyte* bytes) pure nothrow @trusted @nogc { static assert(chars.length % 2 == 0); static if (invert) enum rangeMatchFlags = 0b0000_0100; else enum rangeMatchFlags = 0b0001_0100; return pcmpestri!(rangeMatchFlags, chars)(bytes); } template ByteCombine(c...) { static assert (c.length <= 8); static if (c.length > 1) enum ulong ByteCombine = c[0] | (ByteCombine!(c[1..$]) << 8); else enum ulong ByteCombine = c[0]; } } unittest { import core.exception : RangeError; import std.exception : assertNotThrown; static immutable src1 = "/++"; static immutable src2 = "/**"; LexerConfig cf; StringCache ca = StringCache(16); assertNotThrown!RangeError(getTokensForParser(src1, cf, &ca)); assertNotThrown!RangeError(getTokensForParser(src2, cf, &ca)); } unittest { static immutable src = `"\eeee"`; LexerConfig cf; StringCache ca = StringCache(16); auto l = DLexer(src, cf, &ca); assert(l.front().type == tok!""); assert(!l.messages.empty); } unittest { alias Msg = DLexer.Message; LexerConfig cf; StringCache ca = StringCache(16); { auto l = DLexer(`"\©"`, cf, &ca); assert(l.front().type == tok!"stringLiteral"); assert(l.messages == []); } { auto l = DLexer(`"\™\⌝"`, cf, &ca); assert(l.front().type == tok!"stringLiteral"); assert(l.messages == []); } { auto l = DLexer(`"\&trade"`, cf, &ca); assert(l.front().type == tok!""); assert(l.messages == [ Msg(1, 9, "Error: invalid named character entity", true) ]); } { auto l = DLexer(`"\™\&urcorn"`, cf, &ca); assert(l.front().type == tok!""); assert(l.messages == [ Msg(1, 18, "Error: invalid named character entity", true) ]); } { auto l = DLexer(`"\&"`, cf, &ca); assert(l.front().type == tok!""); assert(l.messages == [ Msg(1, 4, "Error: invalid named character entity", true) ]); } { auto l = DLexer(`"\&0"`, cf, &ca); assert(l.front().type == tok!""); assert(l.messages == [ Msg(1, 5, "Error: invalid named character entity", true) ]); } { auto l = DLexer(`"\©`, cf, &ca); assert(l.front().type == tok!""); assert(l.messages == [ Msg(1, 8, "Error: invalid named character entity", true) ]); } { auto l = DLexer(`"\©`, cf, &ca); assert(l.front().type == tok!""); assert(l.messages == [ Msg(1, 9, "Error: unterminated string literal", true) ]); } } // legacy code using compatibility comment and trailingComment unittest { import std.conv : to; import std.exception : enforce; static immutable src = `/// this is a module. // mixed /// it can do stuff module foo.bar; // hello /** * some doc * hello */ int x; /// very nice // TODO: do stuff void main() { #line 40 /// could be better writeln(":)"); } /// end of file`; LexerConfig cf; StringCache ca = StringCache(16); const tokens = getTokensForParser(src, cf, &ca); void assertEquals(T)(T a, T b, string what, string file = __FILE__, size_t line = __LINE__) { enforce(a == b, "Failed " ~ what ~ " '" ~ a.to!string ~ "' == '" ~ b.to!string ~ "'", file, line); } void test(size_t index, IdType type, string comment, string trailingComment, string file = __FILE__, size_t line = __LINE__) { assertEquals(tokens[index].type, type, "type", file, line); assertEquals(tokens[index].comment, comment, "comment", file, line); assertEquals(tokens[index].trailingComment, trailingComment, "trailingComment", file, line); } test(0, tok!"module", "this is a module.\nit can do stuff", ""); test(1, tok!"identifier", "", ""); test(2, tok!".", "", ""); test(3, tok!"identifier", "", ""); test(4, tok!";", "", ""); test(5, tok!"int", "some doc\nhello", ""); test(6, tok!"identifier", "", ""); test(7, tok!";", "", "very nice"); test(8, tok!"void", "", ""); test(9, tok!"identifier", "", ""); test(10, tok!"(", "", ""); test(11, tok!")", "", ""); test(12, tok!"{", "", ""); test(13, tok!"identifier", "could be better", ""); test(14, tok!"(", "", ""); test(15, tok!"stringLiteral", "", ""); test(16, tok!")", "", ""); test(17, tok!";", "", ""); test(18, tok!"}", "", ""); } // dlang-community/D-Scanner#805 unittest { final class SomeExpr { Token tok; } auto e1 = new SomeExpr(); const e2 = new SomeExpr(); immutable e3 = new immutable SomeExpr(); immutable t1 = e1.tok; immutable t2 = e2.tok; immutable t3 = e3.tok; }