dparse.lexer 338/855(39%) line coverage

      
10
20
30
40
50
60
70
80
90
100
110
120
130
140
150
160
170
180
190
200
210
220
230
240
250
260
270
280
290
300
310
320
330
340
350
360
370
380
390
400
410
420
430
440
450
460
470
480
490
500
510
520
530
540
550
560
570
580
590
600
610
620
630
640
650
660
670
680
690
700
710
720
730
740
750
760
770
780
790
800
810
820
830
840
850
860
870
880
890
900
910
920
930
940
950
960
970
980
990
1000
1010
1020
1030
1040
1050
1060
1070
1080
1090
1100
1110
1120
1130
1140
1150
1160
1170
1180
1190
1200
1210
1220
1230
1240
1250
1260
1270
1280
1290
1300
1310
1320
1330
1340
1350
1360
1370
1380
1390
1400
1410
1420
1430
1440
1450
1460
1470
1480
1490
1500
1510
1520
1530
1540
1550
1560
1570
1580
1590
1600
1610
1620
1630
1640
1650
1660
1670
1680
1690
1700
1710
1720
1730
1740
1750
1760
1770
1780
1790
1800
1810
1820
1830
1840
1850
1860
1870
1880
1890
1900
1910
1920
1930
1940
1950
1960
1970
1980
1990
2000
2010
2020
2030
2040
2050
2060
2070
2080
2090
2100
2110
2120
2130
2140
2150
2160
2170
2180
2190
2200
2210
2220
2230
2240
2250
2260
2270
2280
2290
2300
2310
2320
2330
2340
2350
2360
2370
2380
2390
2400
2410
2420
2430
2440
2450
2460
2470
2480
2490
2500
2510
2520
2530
2540
2550
2560
2570
2580
2590
2600
2610
2620
2630
2640
2650
2660
2670
2680
2690
2700
2710
2720
2730
2740
2750
2760
2770
2780
2790
2800
2810
2820
2830
2840
2850
2860
2870
2880
2890
2900
2910
2920
2930
2940
2950
2960
2970
2980
2990
3000
3010
3020
3030
3040
3050
3060
3070
3080
3090
3100
3110
3120
3130
3140
3150
3160
3170
3180
3190
3200
3210
3220
3230
3240
3250
3260
3270
3280
3290
3300
3310
3320
3330
3340
3350
3360
3370
3380
3390
3400
3410
3420
3430
3440
3450
3460
3470
3480
3490
3500
3510
3520
3530
3540
3550
3560
3570
3580
3590
3600
3610
3620
3630
3640
3650
3660
3670
3680
3690
3700
3710
3720
3730
3740
3750
3760
3770
3780
3790
3800
3810
3820
3830
3840
3850
3860
3870
3880
3890
3900
3910
3920
3930
3940
3950
3960
3970
3980
3990
4000
4010
4020
4030
4040
4050
4060
4070
4080
4090
4100
4110
4120
4130
4140
4150
4160
4170
4180
4190
4200
4210
4220
4230
4240
4250
4260
4270
4280
4290
4300
4310
4320
4330
4340
4350
4360
4370
4380
4390
4400
4410
4420
4430
4440
4450
4460
4470
4480
4490
4500
4510
4520
4530
4540
4550
4560
4570
4580
4590
4600
4610
4620
4630
4640
4650
4660
4670
4680
4690
4700
4710
4720
4730
4740
4750
4760
4770
4780
4790
4800
4810
4820
4830
4840
4850
4860
4870
4880
4890
4900
4910
4920
4930
4940
4950
4960
4970
4980
4990
5000
5010
5020
5030
5040
5050
5060
5070
5080
5090
5100
5110
5120
5130
5140
5150
5160
5170
5180
5190
5200
5210
5220
5230
5240
5250
5260
5270
5280
5290
5300
5310
5320
5330
5340
5350
5360
5370
5380
5390
5400
5410
5420
5430
5440
5450
5460
5470
5480
5490
5500
5510
5520
5530
5540
5550
5560
5570
5580
5590
5600
5610
5620
5630
5640
5650
5660
5670
5680
5690
5700
5710
5720
5730
5740
5750
5760
5770
5780
5790
5800
5810
5820
5830
5840
5850
5860
5870
5880
5890
5900
5910
5920
5930
5940
5950
5961411
5970
5980
5990
6000
6010
6020
6030
6040
6050
6060
6070
608145
609145
610145
611145
6128
6138
6148
6150
6160
6170
6180
6190
6200
6210
6220
6230
6248
6258
6260
6271258
6281258
6291258
6300
6310
6320
6330
6340
6350
6360
6370
6380
6390
64045768
6410
64222593
6430
6440
6450
64622884
6470
6480
6490
6500
6510
6520
6530
6540
6550
6560
6577763
6587763
6597763
6607763
6617763
662125
663125
664125
665125
666125
667125
6680
6690
6700
6710
6720
6730
6740
6750
6760
6770
6780
6790
6800
6810
68214996
68314996
6840
6857888
68615030
68715030
68815030
6890
6900
6910
6920
6930
694678
6950
696339
697339
6980
6990
7000
7010
7020
7030
7040
7050
7060
7070
7080
7090
7100
711339
712339
713339
7140
7150
7160
7170
7180
7190
7200
7210
7220
7230
7240
7250
7260
7270
7280
7290
7300
7310
7320
7330
7340
7350
7360
7370
7380
7390
7400
7410
7420
7430
7440
7450
7460
7470
7480
7490
7500
7510
7520
7530
7540
7550
7560
7570
7580
7590
7600
7610
7620
7630
7640
7650
7660
7670
7680
7690
7700
7710
7720
7730
7740
7750
7760
7770
7780
7790
7800
7810
7820
7830
7840
7850
7860
7870
7880
7890
7900
7910
7920
7930
7940
7950
7960
7970
7980
7990
8000
8010
8020
8030
8040
8050
8060
8070
8080
8090
8100
8110
8120
8130
8140
8150
8160
8170
8180
8190
8200
8210
8220
8230
8240
8250
8260
8270
8280
8290
8300
8310
8320
8330
8340
8350
8360
8370
8380
8390
8400
8410
8420
8430
8440
8450
8460
8470
8480
8490
8500
8510
8520
8530
8540
8550
8560
8571656
8580
8590
8600
8610
8620
8631995
8641995
8651995
8660
8670
8680
8690
8700
8714179
8720
8734179
8740
87516193
8762090
8770
8780
8794180
8800
8812088
8822088
8832088
8840
8850
8862
8870
8880
8890
8902090
8910
8920
8930
8940
8950
89626
89726
89826
8990
9000
9010
9020
9030
9040
90512
90612
90712
90812
9091
9101
9111
9121
913173
914519
91525
9160
9170
9180
9190
9200
921148
9220
923148
924148
925296
926108
927108
92854
929108
9300
93194
9320
9330
93454
9350
9360
9370
9380
93994
94094
94194
9420
9430
94494
9451877
9461877
9470
9480
9491995
9500
9510
9520
9530
9540
9550
9560
9570
9580
9590
9600
9610
9620
9630
9640
9650
9660
9670
9680
9690
9700
9710
9720
9730
9740
9750
9760
9770
9780
9790
9800
9810
9820
9830
9840
9850
9860
9870
9880
9890
9900
9910
9920
9930
9940
9950
9960
9970
9980
9990
10000
100138
10020
10030
10040
10050
10060
100712
100812
100912
101012
101112
101226
101326
10140
101576
10160
101726
101826
101926
10200
10210
102226
10230
10240
10250
10260
10270
10281
10291
10301
10314
10320
10334
10340
10351
10361
10371
10380
10390
10400
10410
10420
10431
10441
10451
104615
10472
10482
10492
10502
10510
10520
10530
10540
10550
10560
10571
10581
10590
10601
10610
10620
10630
10640
10650
10660
10670
10680
10690
10700
10710
10720
10730
10740
10750
10760
10770
10780
10790
10800
10810
10820
10830
10840
10850
10860
10870
10880
10890
109030
109130
109275
10930
10940
10950
1096150
109775
10980
10990
110075
11010
110230
110360
11040
110530
110630
11070
11080
11090
111045
11110
111230
111330
11140
11150
11160
11170
11180
11190
11200
1121184
1122184
1123517
11240
11250
11260
11271034
11280
1129517
11300
11310
11320
11331034
1134184
1135333
11360
1137184
1138184
11390
11400
11410
11420
11430
11440
11450
114615
114715
114815
114915
115045
11510
11520
11530
115430
11550
115615
11570
11580
11590
116015
11610
116215
116330
11640
116515
116615
11670
11680
11690
11700
11710
11720
11730
11740
11750
11760
11770
11780
11790
11800
118115
118215
11830
11840
11850
11860
11870
11880
11890
11901640
11912967
11920
11932967
11940
11950
11960
11970
11980
11990
12000
12015934
12020
12032946
12040
12050
12060
12072967
12080
12091640
12101640
12110
12121327
12130
1214359
12150
12160
12170
12180
12190
12200
1221968
12220
12231640
12241640
12251640
12260
12270
12280
12290
12300
12310
1232234
1233234
1234234
12350
1236234
1237578
12380
1239578
12400
12410
12420
12430
12440
12450
12460
12471156
12480
1249567
12500
12510
12520
1253578
12540
1255234
1256234
12570
12580
1259344
12600
12610
12620
12630
12640
12650
12660
12670
12680
12690
12700
12710
12720
12730
12740
12750
12760
12770
12780
12790
12800
12810
12820
12830
12840
12850
12860
12870
12880
1289234
1290234
12910
12920
12930
12940
12950
12961874
12970
12980
12990
13000
13010
13020
13031874
13040
13050
13060
13070
13085622
13090
13100
13110
13120
13130
13140
13150
13160
13170
13180
13190
13200
13210
13220
13230
13240
13250
13260
13270
13280
13290
13300
13310
13320
13330
13340
13350
13360
13370
13380
13390
13400
13410
13420
13430
13440
13450
13460
13470
13480
13490
13500
13510
13520
13530
13540
13550
13560
13570
13580
13590
13600
13610
13620
13630
13640
13650
13660
13670
13680
13690
13700
13710
13720
13730
13740
13750
13760
13770
13780
13790
13800
13810
13820
13830
13840
13850
13860
13870
13880
13890
13900
13910
13920
13930
13940
13950
13960
13970
13980
13990
14000
14010
14020
14030
14040
14050
14060
14070
14080
14090
14100
14110
14120
14130
14140
14150
14160
14170
14180
14190
14200
14210
14220
14230
14240
14250
14260
14270
14280
14290
14300
14310
14320
14330
14340
14350
14360
14370
14380
14390
14400
14410
14420
14430
14440
14450
14460
14470
14480
14490
14500
14510
14520
14530
14540
14550
14560
14570
14580
14590
14600
14610
14620
14630
14640
14650
14660
14670
14680
14690
14700
14710
14720
14730
14740
14750
14760
14770
14780
14790
14800
14810
14820
14830
14840
14850
14860
14870
14880
14890
14900
14910
14920
14930
14940
14950
14960
14970
14980
14990
15000
15010
15020
15030
15040
15050
15060
15070
15080
15090
15100
15110
15120
15130
15140
15150
15160
15170
15180
15190
15200
1521403
1522403
15230
15240
15250
15260
1527403
15280
15292
153048
153148
153249
153349
153449
153549
1536385
1537391
1538399
1539399
1540399
1541399
15420
15430
15440
15450
15460
15470
15480
15490
15500
15510
15520
15530
15540
15550
15560
15570
15580
15590
15600
15610
15620
15630
15644
15654
15668
15670
15680
15690
15700
15714
157228
157320
157416
15754
15764
15770
15780
15790
15800
15810
15820
15830
15840
15850
15860
15870
15880
15890
15900
15910
15920
15930
15940
15950
15960
15970
15980
15990
16000
16010
16020
16030
16040
16050
16060
16070
16080
16090
16100
16110
16120
16130
16140
16150
16160
16170
16180
16190
16200
16210
16220
16230
16240
16250
16260
16270
16280
16290
16300
16310
16320
16330
16340
16350
16360
16370
16380
16390
16400
16410
1642403
16430
16440
16450
16460
16470
164898
164998
16500
165198
165244
165354
16540
16550
16560
16570
16580
165954
16600
16610
16620
16630
16640
166554
16660
1667196
16680
166998
167098
16710
16720
16730
16740
16750
16760
16770
16780
16790
16800
16810
16820
16830
168411629
16850
16860
16870
16880
168912017
16900
16910
16920
169324034
16940
169511997
16960
169711997
169811997
16990
17000
170112017
170211629
17030
1704388
17050
170611629
17070
17080
17090
17100
17110
17120
17134900
17140
17150
17160
17170
17180
17194900
17200
17210
17220
17230
17240
17250
17260
17270
17280
17290
17300
17310
17320
17330
17340
17354900
17364900
17374900
17384900
17390
17400
17410
17420
17430
17440
17450
17460
17470
17480
17490
17500
17510
17520
17530
17540
17550
17560
17570
17580
17590
17600
17610
17620
17630
17640
17650
17660
17670
17680
17690
177027557
17710
177227557
17730
17740
17750
17760
17770
17780
17790
17800
17810
17820
17830
17840
17850
17860
17870
17880
17890
17900
179127557
179227557
179312848
179414709
179514709
17960
17970
17980
17990
18000
18010
18020
18030
18040
18050
18060
18070
18080
18090
18100
18110
18120
18130
18140
18150
18160
18170
18180
18190
18200
18210
182226
182326
18240
18250
18260
18270
18280
18290
18300
18310
18320
18330
18340
18350
18360
18370
18380
18390
18400
18410
18420
18430
18440
18450
18460
18470
18480
18490
18500
18510
18520
18530
18540
18550
18560
18570
18580
18590
18600
18610
18620
18630
18640
18650
18660
18670
18680
18690
18700
18710
18720
18730
18740
18750
18760
18770
18780
18790
18800
18810
18820
18830
18840
18850
18860
18870
18880
18890
18900
18910
18920
18930
18940
18950
18960
18970
18980
18990
19000
19010
19020
19030
19040
19050
19060
19070
19080
19090
19100
19110
19120
19130
19140
19150
19160
19170
19180
19190
19200
19210
19220
19230
19240
19250
19260
19270
19280
19290
19300
19310
19320
19330
19340
19350
19360
19370
19380
19390
19400
19410
19420
19430
19440
19450
19460
19470
19480
19490
19500
19510
19520
19530
19540
19550
19560
19570
19580
19590
19600
19610
19620
19630
19640
19650
19660
19670
19680
19690
19700
19710
19720
19730
19740
19750
19760
19770
19780
19790
19800
19810
19820
19830
19840
19850
19860
19870
19880
19890
19900
19910
19920
19930
19940
19950
19960
19970
19980
19990
20000
20010
20020
20030
20040
20050
20060
20070
20080
20090
20100
20110
20120
20130
20140
20150
20160
20170
20180
20190
20200
20210
20220
20230
20240
20250
20260
20270
20280
20290
20300
20310
20320
20330
20340
20350
20360
20370
20380
20390
20400
20410
20420
20430
20440
20450
20460
20470
20480
20490
20500
20510
20520
20530
20540
20550
20560
20570
20580
20590
20600
20610
20620
20630
20640
20650
20660
20670
20680
20690
20700
20710
20720
20730
20740
20750
20760
20770
20780
20790
20800
20810
20820
20830
20840
20850
20860
20870
20880
20890
20900
20910
20920
20930
20940
20950
20960
20970
20980
20990
21000
21010
21020
21030
21040
21050
21060
21070
21080
21090
21100
21110
21120
21130
21140
21150
21160
21170
21180
21190
21200
21210
21220
21230
21240
21250
21260
21270
21280
21290
21300
21310
21320
21330
21340
21350
21360
21370
21380
21390
21400
21410
21420
21430
21440
21450
21460
21470
21480
21490
21500
21510
21520
21530
21540
21550
21560
21570
21580
21590
21600
21610
21620
21630
21640
21650
21660
21670
21680
21690
21700
21710
21720
21730
21740
21750
21760
21770
21780
21790
21800
21810
21820
21830
21840
21850
21860
21870
21880
21890
21900
21910
21920
21930
21940
21950
21960
21970
21980
21990
22000
22010
22020
22030
22040
22050
22060
22070
22080
22090
22100
22110
22120
22130
22140
22150
22160
22170
22180
22190
22200
22210
22220
22230
22240
22250
22260
22270
22280
22290
22300
22310
22320
22330
22340
22350
22360
22370
22380
22390
22400
22410
22420
22430
22440
22450
22460
22470
22480
22490
22500
22510
22520
22530
22540
22550
22560
22570
22580
22590
22600
22610
22620
22630
22640
22650
22660
22670
22680
22690
22700
22710
22720
22730
22740
22750
22760
22770
22780
22790
22800
22810
22820
22830
22840
22850
22860
22870
22880
22890
22900
22910
22920
22930
22940
22950
22960
22970
22980
22990
23000
23010
23020
23030
23040
23050
23060
23070
23080
23090
23100
23110
23120
23130
23140
23150
23160
23170
23180
23190
23200
23210
23220
23230
23240
23250
232637
23270
23280
23290
23300
23310
233237
233337
2334111
23350
23360
23370
23380
23390
23400
23410
23420
23430
234437
23450
23460
23470
23480
234937
235074
23510
235237
235337
235437
23550
2356454767
23570
2358151552
2359155204
23600
23613652
23620
23633652
23643652
23653652
23660
23670
236837
236937
237037
23710
23720
23730
23740
23750
23760
23770
237861710
23790
238030855
23810
23820
23830
23840
23850
23860
23870
23880
23890
23900
23910
23920
23930
23940
23950
23960
23970
23980
23990
240030855
240130855
240230855
240330855
240427203
24053652
24063652
24073652
24080
24090
24103652
24113652
24123652
24133652
24143652
24153652
24163652
24173652
24183652
24190
24200
24210
24220
24230
242430855
242530855
242631074
24270
242854625
242927203
2430219
24310
24323652
24330
24340
24350
24360
24370
243830855
243930855
24400
24410
24420
244330855
244430855
244530855
244659782
24470
244828927
24490
24500
24510
245228927
245328927
245428927
245528927
245628927
245728927
24580
245930855
24600
24615010
24625010
24635010
246410722
246510722
246610722
246727439
246827439
246927439
247027439
24713416
24723416
24730
247430855
247530855
247630855
247730855
24780
24790
24800
24810
24820
24833652
24840
24850
24863652
24873652
24880
24890
24900
24913652
24923652
24937304
24940
24953615
24963615
24973615
24983615
24990
25003615
25017230
25020
25030
25040
25050
250637
250737
250837
250937
251074
25110
25120
25130
25140
25150
25160
25170
25180
25190
25200
25210
25220
25230
25240
25250
25260
25270
25280
25290
25300
25310
25320
25330
25340
25350
25360
25370
25380
25390
25400
25410
25420
25430
25440
25450
25460
25470
25480
25490
25500
25510
25520
25530
25540
25550
25560
25570
25580
25590
25600
25610
25620
25630
25640
25650
25660
25670
25680
25690
25700
25710
25720
25730
25740
25750
25760
25770
25780
25790
25800
25810
25820
25830
25840
25850
25860
25870
25880
25890
25900
25910
25920
25930
25940
25950
25960
25970
25980
25990
26000
26010
26020
26030
26040
26050
26060
26070
26080
26090
26100
26110
26120
26130
26140
26150
26160
26170
26180
26190
26200
26210
26220
26230
26240
26250
26260
26270
26280
26290
26300
26310
26320
26330
26340
26350
26360
26370
26380
26390
26400
26410
26420
26430
26440
26450
26460
26470
26480
26490
26500
26510
26520
26530
26540
26550
26560
26570
26580
26590
26600
26610
26620
26630
26640
26650
26660
26670
module dparse.lexer; import std.typecons; import std.typetuple; import std.array; import std.algorithm; import std.range; import std.experimental.lexer; import std.traits; import core.cpuid : sse42; version (D_InlineAsm_X86_64) { version (Windows) {} else version = iasm64NotWindows; } /// Operators private enum operators = [ ",", ".", "..", "...", "/", "/=", "!", "!<", "!<=", "!<>", "!<>=", "!=", "!>", "!>=", "$", "%", "%=", "&", "&&", "&=", "(", ")", "*", "*=", "+", "++", "+=", "-", "--", "-=", ":", ";", "<", "<<", "<<=", "<=", "<>", "<>=", "=", "==", "=>", ">", ">=", ">>", ">>=", ">>>", ">>>=", "?", "@", "[", "]", "^", "^=", "^^", "^^=", "{", "|", "|=", "||", "}", "~", "~=" ]; /// Kewords private enum keywords = [ "abstract", "alias", "align", "asm", "assert", "auto", "bool", "break", "byte", "case", "cast", "catch", "cdouble", "cent", "cfloat", "char", "class", "const", "continue", "creal", "dchar", "debug", "default", "delegate", "delete", "deprecated", "do", "double", "else", "enum", "export", "extern", "false", "final", "finally", "float", "for", "foreach", "foreach_reverse", "function", "goto", "idouble", "if", "ifloat", "immutable", "import", "in", "inout", "int", "interface", "invariant", "ireal", "is", "lazy", "long", "macro", "mixin", "module", "new", "nothrow", "null", "out", "override", "package", "pragma", "private", "protected", "public", "pure", "real", "ref", "return", "scope", "shared", "short", "static", "struct", "super", "switch", "synchronized", "template", "this", "throw", "true", "try", "typedef", "typeid", "typeof", "ubyte", "ucent", "uint", "ulong", "union", "unittest", "ushort", "version", "void", "wchar", "while", "with", "__DATE__", "__EOF__", "__FILE__", "__FILE_FULL_PATH__", "__FUNCTION__", "__gshared", "__LINE__", "__MODULE__", "__parameters", "__PRETTY_FUNCTION__", "__TIME__", "__TIMESTAMP__", "__traits", "__vector", "__VENDOR__", "__VERSION__" ]; /// Other tokens private enum dynamicTokens = [ "specialTokenSequence", "comment", "identifier", "scriptLine", "whitespace", "doubleLiteral", "floatLiteral", "idoubleLiteral", "ifloatLiteral", "intLiteral", "longLiteral", "realLiteral", "irealLiteral", "uintLiteral", "ulongLiteral", "characterLiteral", "dstringLiteral", "stringLiteral", "wstringLiteral" ]; private enum pseudoTokenHandlers = [ "\"", "lexStringLiteral", "`", "lexWysiwygString", "//", "lexSlashSlashComment", "/*", "lexSlashStarComment", "/+", "lexSlashPlusComment", ".", "lexDot", "'", "lexCharacterLiteral", "0", "lexNumber", "1", "lexDecimal", "2", "lexDecimal", "3", "lexDecimal", "4", "lexDecimal", "5", "lexDecimal", "6", "lexDecimal", "7", "lexDecimal", "8", "lexDecimal", "9", "lexDecimal", "q\"", "lexDelimitedString", "q{", "lexTokenString", "r\"", "lexWysiwygString", "x\"", "lexHexString", " ", "lexWhitespace", "\t", "lexWhitespace", "\r", "lexWhitespace", "\n", "lexWhitespace", "\v", "lexWhitespace", "\f", "lexWhitespace", "\u2028", "lexLongNewline", "\u2029", "lexLongNewline", "#!", "lexScriptLine", "#line", "lexSpecialTokenSequence" ]; /// Token ID type for the D lexer. public alias IdType = TokenIdType!(operators, dynamicTokens, keywords); /** * Function used for converting an IdType to a string. * * Examples: * --- * IdType c = tok!"case"; * assert (str(c) == "case"); * --- */ public alias str = tokenStringRepresentation!(IdType, operators, dynamicTokens, keywords); /** * Template used to refer to D token types. * * See the $(B operators), $(B keywords), and $(B dynamicTokens) enums for * values that can be passed to this template. * Example: * --- * import dparse.lexer; * IdType t = tok!"floatLiteral"; * --- */ public template tok(string token) { alias tok = TokenId!(IdType, operators, dynamicTokens, keywords, token); } private enum extraFields = q{ string comment; string trailingComment; int opCmp(size_t i) const pure nothrow @safe { if (index < i) return -1; if (index > i) return 1; return 0; } int opCmp(ref const typeof(this) other) const pure nothrow @safe { return opCmp(other.index); } }; /// The token type in the D lexer public alias Token = std.experimental.lexer.TokenStructure!(IdType, extraFields); /** * Configure whitespace handling */ public enum WhitespaceBehavior : ubyte { include = 0b0000_0000, skip = 0b0000_0001, } /** * Configure string lexing behavior */ public enum StringBehavior : ubyte { /// Do not include quote characters, process escape sequences compiler = 0b0000_0000, /// Opening quotes, closing quotes, and string suffixes are included in the /// string token includeQuoteChars = 0b0000_0001, /// String escape sequences are not replaced notEscaped = 0b0000_0010, /// Not modified at all. Useful for formatters or highlighters source = includeQuoteChars | notEscaped } public enum CommentBehavior : bool { intern = true, noIntern = false } /** * Lexer configuration struct */ public struct LexerConfig { string fileName; StringBehavior stringBehavior; WhitespaceBehavior whitespaceBehavior; CommentBehavior commentBehavior = CommentBehavior.intern; } /** * Basic type token types. */ public alias BasicTypes = AliasSeq!(tok!"int", tok!"bool", tok!"byte", tok!"cdouble", tok!"cent", tok!"cfloat", tok!"char", tok!"creal", tok!"dchar", tok!"double", tok!"float", tok!"idouble", tok!"ifloat", tok!"ireal", tok!"long", tok!"real", tok!"short", tok!"ubyte", tok!"ucent", tok!"uint", tok!"ulong", tok!"ushort", tok!"void", tok!"wchar"); /** * Returns: true if the given ID is for a basic type. */ public bool isBasicType(IdType type) nothrow pure @safe @nogc { switch (type) { foreach (T; BasicTypes) { case T: return true; } default: return false; } } /** * Number literal token types. */ public alias NumberLiterals = AliasSeq!(tok!"doubleLiteral", tok!"floatLiteral", tok!"idoubleLiteral", tok!"ifloatLiteral", tok!"intLiteral", tok!"longLiteral", tok!"realLiteral", tok!"irealLiteral", tok!"uintLiteral", tok!"ulongLiteral"); /** * Returns: true if the given ID type is for a number literal. */ public bool isNumberLiteral(IdType type) nothrow pure @safe @nogc { switch (type) { foreach (T; NumberLiterals) { case T: return true; } default: return false; } } /** * Number literal token types. */ public alias IntegerLiterals = AliasSeq!(tok!"intLiteral", tok!"longLiteral", tok!"uintLiteral", tok!"ulongLiteral"); /** * Returns: true if the given ID type is for a integer literal. */ public bool isIntegerLiteral(IdType type) nothrow pure @safe @nogc { switch (type) { foreach (T; IntegerLiterals) { case T: return true; } default: return false; } } /** * Operator token types. */ public alias Operators = AliasSeq!(tok!",", tok!".", tok!"..", tok!"...", tok!"/", tok!"/=", tok!"!", tok!"!<", tok!"!<=", tok!"!<>", tok!"!<>=", tok!"!=", tok!"!>", tok!"!>=", tok!"$", tok!"%", tok!"%=", tok!"&", tok!"&&", tok!"&=", tok!"(", tok!")", tok!"*", tok!"*=", tok!"+", tok!"++", tok!"+=", tok!"-", tok!"--", tok!"-=", tok!":", tok!";", tok!"<", tok!"<<", tok!"<<=", tok!"<=", tok!"<>", tok!"<>=", tok!"=", tok!"==", tok!"=>", tok!">", tok!">=", tok!">>", tok!">>=", tok!">>>", tok!">>>=", tok!"?", tok!"@", tok!"[", tok!"]", tok!"^", tok!"^=", tok!"^^", tok!"^^=", tok!"{", tok!"|", tok!"|=", tok!"||", tok!"}", tok!"~", tok!"~="); /** * Returns: true if the given ID type is for an operator. */ public bool isOperator(IdType type) nothrow pure @safe @nogc { switch (type) { foreach (T; Operators) { case T: return true; } default: return false; } } /** * Keyword token types. */ public alias Keywords = AliasSeq!(tok!"abstract", tok!"alias", tok!"align", tok!"asm", tok!"assert", tok!"auto", tok!"break", tok!"case", tok!"cast", tok!"catch", tok!"class", tok!"const", tok!"continue", tok!"debug", tok!"default", tok!"delegate", tok!"delete", tok!"deprecated", tok!"do", tok!"else", tok!"enum", tok!"export", tok!"extern", tok!"false", tok!"final", tok!"finally", tok!"for", tok!"foreach", tok!"foreach_reverse", tok!"function", tok!"goto", tok!"if", tok!"immutable", tok!"import", tok!"in", tok!"inout", tok!"interface", tok!"invariant", tok!"is", tok!"lazy", tok!"macro", tok!"mixin", tok!"module", tok!"new", tok!"nothrow", tok!"null", tok!"out", tok!"override", tok!"package", tok!"pragma", tok!"private", tok!"protected", tok!"public", tok!"pure", tok!"ref", tok!"return", tok!"scope", tok!"shared", tok!"static", tok!"struct", tok!"super", tok!"switch", tok!"synchronized", tok!"template", tok!"this", tok!"throw", tok!"true", tok!"try", tok!"typedef", tok!"typeid", tok!"typeof", tok!"union", tok!"unittest", tok!"version", tok!"while", tok!"with", tok!"__DATE__", tok!"__EOF__", tok!"__FILE__", tok!"__FILE_FULL_PATH__", tok!"__FUNCTION__", tok!"__gshared", tok!"__LINE__", tok!"__MODULE__", tok!"__parameters", tok!"__PRETTY_FUNCTION__", tok!"__TIME__", tok!"__TIMESTAMP__", tok!"__traits", tok!"__vector", tok!"__VENDOR__", tok!"__VERSION__"); /** * Returns: true if the given ID type is for a keyword. */ public bool isKeyword(IdType type) pure nothrow @safe @nogc { switch (type) { foreach (T; Keywords) { case T: return true; } default: return false; } } /** * String literal token types */ public alias StringLiterals = AliasSeq!(tok!"dstringLiteral", tok!"stringLiteral", tok!"wstringLiteral"); /** * Returns: true if the given ID type is for a string literal. */ public bool isStringLiteral(IdType type) pure nothrow @safe @nogc { switch (type) { foreach (T; StringLiterals) { case T: return true; } default: return false; } } /** * Protection token types. */ public alias Protections = AliasSeq!(tok!"export", tok!"package", tok!"private", tok!"public", tok!"protected"); /** * Returns: true if the given ID type is for a protection attribute. */ public bool isProtection(IdType type) pure nothrow @safe @nogc { switch (type) { foreach (T; Protections) { case T: return true; } default: return false; } } public alias SpecialTokens = AliasSeq!(tok!"__DATE__", tok!"__TIME__", tok!"__TIMESTAMP__", tok!"__VENDOR__", tok!"__VERSION__", tok!"__FILE__", tok!"__FILE_FULL_PATH__", tok!"__LINE__", tok!"__MODULE__", tok!"__FUNCTION__", tok!"__PRETTY_FUNCTION__"); public bool isSpecialToken(IdType type) pure nothrow @safe @nogc { switch (type) { foreach (T; SpecialTokens) { case T: return true; } default: return false; } } public alias Literals = AliasSeq!(StringLiterals, NumberLiterals, tok!"characterLiteral", SpecialTokens, tok!"true", tok!"false", tok!"null", tok!"$"); public bool isLiteral(IdType type) pure nothrow @safe @nogc { switch (type) { foreach (T; Literals) { case T: return true; } default: return false; } } /** * Returns: an array of tokens lexed from the given source code to the output range. All * whitespace tokens are skipped and comments are attached to the token nearest * to them. */ const(Token)[] getTokensForParser(R)(R sourceCode, LexerConfig config, StringCache* cache) if (is(Unqual!(ElementEncodingType!R) : ubyte) && isDynamicArray!R) { enum CommentType : ubyte { notDoc, line, block } static CommentType commentType(string comment) pure nothrow @safe { if (comment.length < 3) return CommentType.notDoc; if (comment[0 ..3] == "///") return CommentType.line; if (comment[0 ..3] == "/++" || comment[0 ..3] == "/**") return CommentType.block; return CommentType.notDoc; } config.whitespaceBehavior = WhitespaceBehavior.skip; config.commentBehavior = CommentBehavior.noIntern; auto leadingCommentAppender = appender!(char[])(); leadingCommentAppender.reserve(1024); auto trailingCommentAppender = appender!(char[])(); trailingCommentAppender.reserve(1024); bool hadDdoc; string empty = cache.intern(""); auto output = appender!(typeof(return))(); auto lexer = DLexer(sourceCode, config, cache); size_t tokenCount; loop: while (!lexer.empty) switch (lexer.front.type) { case tok!"specialTokenSequence": case tok!"whitespace": lexer.popFront(); break; case tok!"comment": final switch (commentType(lexer.front.text)) { case CommentType.block: case CommentType.line: if (tokenCount > 0 && lexer.front.line == output.data[tokenCount - 1].line) { if (!trailingCommentAppender.data.empty) trailingCommentAppender.put('\n'); unDecorateComment(lexer.front.text, trailingCommentAppender); hadDdoc = true; } else { if (!leadingCommentAppender.data.empty) leadingCommentAppender.put('\n'); unDecorateComment(lexer.front.text, leadingCommentAppender); hadDdoc = true; } lexer.popFront(); break; case CommentType.notDoc: lexer.popFront(); break; } break; case tok!"__EOF__": if (!trailingCommentAppender.data.empty) (cast() output.data[$ - 1].trailingComment) = cache.intern(cast(string) trailingCommentAppender.data); break loop; default: Token t = lexer.front; lexer.popFront(); tokenCount++; if (!output.data.empty && !trailingCommentAppender.data.empty) { (cast() output.data[$ - 1].trailingComment) = cache.intern(cast(string) trailingCommentAppender.data); hadDdoc = false; } t.comment = leadingCommentAppender.data.length > 0 ? cache.intern(cast(string) leadingCommentAppender.data) : (hadDdoc ? empty : null); leadingCommentAppender.clear(); trailingCommentAppender.clear(); hadDdoc = false; output.put(t); break; } return output.data; } /** * The D lexer struct. */ public struct DLexer { mixin Lexer!(Token, lexIdentifier, isSeparating, operators, dynamicTokens, keywords, pseudoTokenHandlers); /// @disable this(); /** * Params: * range = the bytes that compose the source code that will be lexed. * config = the lexer configuration to use. * cache = the string interning cache for de-duplicating identifiers and * other token text. * haveSSE42 = Parse streaming SIMD Extensions 4.2 in inline assembly */ this(R)(R range, const LexerConfig config, StringCache* cache, bool haveSSE42 = sse42()) pure nothrow @safe if (is(Unqual!(ElementEncodingType!R) : ubyte) && isDynamicArray!R) { this.haveSSE42 = haveSSE42; auto r = (range.length >= 3 && range[0] == 0xef && range[1] == 0xbb && range[2] == 0xbf) ? range[3 .. $] : range; this.range = LexerRange(cast(const(ubyte)[]) r); this.config = config; this.cache = cache; popFront(); } /// public void popFront()() pure nothrow @safe { do _popFront(); while (config.whitespaceBehavior == WhitespaceBehavior.skip && _front.type == tok!"whitespace"); } /** * Lexer error/warning message. */ static struct Message { /// 1-based line number size_t line; /// 1-based byte offset size_t column; /// Text of the message string message; /// `true` for an error, `false` for a warning bool isError; } /** * Returns: An array of all of the warnings and errors generated so far * during lexing. It may make sense to only check this when `empty` * returns `true`. */ const(Message[]) messages() const @property { return _messages; } private pure nothrow @safe: bool isWhitespace() { switch (range.bytes[range.index]) { case ' ': case '\r': case '\n': case '\t': case '\v': case '\f': return true; case 0xe2: auto peek = range.peek(2); return peek.length == 2 && peek[0] == 0x80 && (peek[1] == 0xa8 || peek[1] == 0xa9); default: return false; } } void popFrontWhitespaceAware() { switch (range.bytes[range.index]) { case '\r': range.popFront(); if (!(range.index >= range.bytes.length) && range.bytes[range.index] == '\n') { range.popFront(); range.incrementLine(); } else range.incrementLine(); return; case '\n': range.popFront(); range.incrementLine(); return; case 0xe2: auto lookahead = range.peek(3); if (lookahead.length == 3 && lookahead[1] == 0x80 && (lookahead[2] == 0xa8 || lookahead[2] == 0xa9)) { range.index+=3; range.column+=3; range.incrementLine(); return; } else { range.popFront(); return; } default: range.popFront(); return; } } void lexWhitespace(ref Token token) @trusted { mixin (tokenStart); loop: do { version (iasm64NotWindows) { if (haveSSE42 && range.index + 16 < range.bytes.length) { skip!(true, '\t', ' ', '\v', '\f')(range.bytes.ptr + range.index, &range.index, &range.column); } } switch (range.bytes[range.index]) { case '\r': range.popFront(); if (!(range.index >= range.bytes.length) && range.bytes[range.index] == '\n') { range.popFront(); } range.column = 1; range.line += 1; break; case '\n': range.popFront(); range.column = 1; range.line += 1; break; case ' ': case '\t': case '\v': case '\f': range.popFront(); break; case 0xe2: if (range.index + 2 >= range.bytes.length) break loop; if (range.bytes[range.index + 1] != 0x80) break loop; if (range.bytes[range.index + 2] == 0xa8 || range.bytes[range.index + 2] == 0xa9) { range.index += 3; range.column += 3; range.column = 1; range.line += 1; break; } break loop; default: break loop; } } while (!(range.index >= range.bytes.length)); string text = config.whitespaceBehavior == WhitespaceBehavior.include ? cache.intern(range.slice(mark)) : ""; token = Token(tok!"whitespace", text, line, column, index); } void lexNumber(ref Token token) { mixin (tokenStart); if (range.bytes[range.index] == '0' && range.index + 1 < range.bytes.length) { immutable ahead = range.bytes[range.index + 1]; switch (ahead) { case 'x': case 'X': range.index += 2; range.column += 2; lexHex(token, mark, line, column, index); return; case 'b': case 'B': range.index += 2; range.column += 2; lexBinary(token, mark, line, column, index); return; default: lexDecimal(token, mark, line, column, index); return; } } else lexDecimal(token, mark, line, column, index); } void lexHex(ref Token token) { mixin (tokenStart); lexHex(token, mark, line, column, index); } void lexHex(ref Token token, size_t mark, size_t line, size_t column, size_t index) @trusted { IdType type = tok!"intLiteral"; bool foundDot; hexLoop: while (!(range.index >= range.bytes.length)) { switch (range.bytes[range.index]) { case 'a': .. case 'f': case 'A': .. case 'F': case '0': .. case '9': case '_': version (iasm64NotWindows) { if (haveSSE42 && range.index + 16 < range.bytes.length) { immutable ulong i = rangeMatch!(false, '0', '9', 'a', 'f', 'A', 'F', '_', '_') (range.bytes.ptr + range.index); range.column += i; range.index += i; } else range.popFront(); } else range.popFront(); break; case 'u': case 'U': lexIntSuffix(type); break hexLoop; case 'i': if (foundDot) lexFloatSuffix(type); break hexLoop; case 'L': if (foundDot) lexFloatSuffix(type); else lexIntSuffix(type); break hexLoop; case 'p': case 'P': lexExponent(type); break hexLoop; case '.': if (foundDot || !(range.index + 1 < range.bytes.length) || range.peekAt(1) == '.') break hexLoop; else { // The following bit of silliness tries to tell the // difference between "int dot identifier" and // "double identifier". if (range.index + 1 < range.bytes.length) { switch (range.peekAt(1)) { case '0': .. case '9': case 'A': .. case 'F': case 'a': .. case 'f': goto doubleLiteral; default: break hexLoop; } } else { doubleLiteral: range.popFront(); foundDot = true; type = tok!"doubleLiteral"; } } break; default: break hexLoop; } } token = Token(type, cache.intern(range.slice(mark)), line, column, index); } void lexBinary(ref Token token) { mixin (tokenStart); return lexBinary(token, mark, line, column, index); } void lexBinary(ref Token token, size_t mark, size_t line, size_t column, size_t index) @trusted { IdType type = tok!"intLiteral"; binaryLoop: while (!(range.index >= range.bytes.length)) { switch (range.bytes[range.index]) { case '0': case '1': case '_': version (iasm64NotWindows) { if (haveSSE42 && range.index + 16 < range.bytes.length) { immutable ulong i = rangeMatch!(false, '0', '1', '_', '_')( range.bytes.ptr + range.index); range.column += i; range.index += i; } else range.popFront(); } else range.popFront(); break; case 'u': case 'U': case 'L': lexIntSuffix(type); break binaryLoop; default: break binaryLoop; } } token = Token(type, cache.intern(range.slice(mark)), line, column, index); } void lexDecimal(ref Token token) { mixin (tokenStart); lexDecimal(token, mark, line, column, index); } void lexDecimal(ref Token token, size_t mark, size_t line, size_t column, size_t index) @trusted { bool foundDot = range.bytes[range.index] == '.'; IdType type = tok!"intLiteral"; if (foundDot) { range.popFront(); type = tok!"doubleLiteral"; } decimalLoop: while (!(range.index >= range.bytes.length)) { switch (range.bytes[range.index]) { case '0': .. case '9': case '_': version (iasm64NotWindows) { if (haveSSE42 && range.index + 16 < range.bytes.length) { immutable ulong i = rangeMatch!(false, '0', '9', '_', '_')(range.bytes.ptr + range.index); range.column += i; range.index += i; } else range.popFront(); } else range.popFront(); break; case 'u': case 'U': if (!foundDot) lexIntSuffix(type); break decimalLoop; case 'i': lexFloatSuffix(type); break decimalLoop; case 'L': if (foundDot) lexFloatSuffix(type); else lexIntSuffix(type); break decimalLoop; case 'f': case 'F': lexFloatSuffix(type); break decimalLoop; case 'e': case 'E': lexExponent(type); break decimalLoop; case '.': if (foundDot || !(range.index + 1 < range.bytes.length) || range.peekAt(1) == '.') break decimalLoop; else { // The following bit of silliness tries to tell the // difference between "int dot identifier" and // "double identifier". if (range.index + 1 < range.bytes.length) { immutable ch = range.peekAt(1); if (ch <= 0x2f || (ch >= '0' && ch <= '9') || (ch >= ':' && ch <= '@') || (ch >= '[' && ch <= '^') || (ch >= '{' && ch <= '~') || ch == '`' || ch == '_') { goto doubleLiteral; } else break decimalLoop; } else { doubleLiteral: range.popFront(); foundDot = true; type = tok!"doubleLiteral"; } } break; default: break decimalLoop; } } token = Token(type, cache.intern(range.slice(mark)), line, column, index); } void lexIntSuffix(ref IdType type) pure nothrow @safe { bool secondPass; if (range.bytes[range.index] == 'u' || range.bytes[range.index] == 'U') { U: if (type == tok!"intLiteral") type = tok!"uintLiteral"; else type = tok!"ulongLiteral"; range.popFront(); if (secondPass) return; if (range.index < range.bytes.length && (range.bytes[range.index] == 'L' || range.bytes[range.index] == 'l')) goto L; goto I; } if (range.bytes[range.index] == 'L' || range.bytes[range.index] == 'l') { L: if (type == tok!"uintLiteral") type = tok!"ulongLiteral"; else type = tok!"longLiteral"; range.popFront(); if (range.index < range.bytes.length && (range.bytes[range.index] == 'U' || range.bytes[range.index] == 'u')) { secondPass = true; goto U; } goto I; } I: if (range.index < range.bytes.length && range.bytes[range.index] == 'i') { warning("Complex number literals are deprecated"); range.popFront(); if (type == tok!"longLiteral" || type == tok!"ulongLiteral") type = tok!"idoubleLiteral"; else type = tok!"ifloatLiteral"; } } void lexFloatSuffix(ref IdType type) pure nothrow @safe { switch (range.bytes[range.index]) { case 'L': range.popFront(); type = tok!"doubleLiteral"; break; case 'f': case 'F': range.popFront(); type = tok!"floatLiteral"; break; default: break; } if (range.index < range.bytes.length && range.bytes[range.index] == 'i') { warning("Complex number literals are deprecated"); range.popFront(); if (type == tok!"floatLiteral") type = tok!"ifloatLiteral"; else type = tok!"idoubleLiteral"; } } void lexExponent(ref IdType type) pure nothrow @safe { range.popFront(); bool foundSign = false; bool foundDigit = false; while (range.index < range.bytes.length) { switch (range.bytes[range.index]) { case '-': case '+': if (foundSign) { if (!foundDigit) error("Expected an exponent"); return; } foundSign = true; range.popFront(); break; case '0': .. case '9': case '_': foundDigit = true; range.popFront(); break; case 'L': case 'f': case 'F': case 'i': lexFloatSuffix(type); return; default: if (!foundDigit) error("Expected an exponent"); return; } } } void lexScriptLine(ref Token token) { mixin (tokenStart); while (!(range.index >= range.bytes.length) && !isNewline) { range.popFront(); } token = Token(tok!"scriptLine", cache.intern(range.slice(mark)), line, column, index); } void lexSpecialTokenSequence(ref Token token) { mixin (tokenStart); while (!(range.index >= range.bytes.length) && !isNewline) { range.popFront(); } token = Token(tok!"specialTokenSequence", cache.intern(range.slice(mark)), line, column, index); } void lexSlashStarComment(ref Token token) @trusted { mixin (tokenStart); IdType type = tok!"comment"; range.popFrontN(2); while (range.index < range.bytes.length) { version (iasm64NotWindows) { if (haveSSE42 && range.index + 16 < range.bytes.length) skip!(false, '\r', '\n', '/', '*', 0xe2)(range.bytes.ptr + range.index, &range.index, &range.column); } if (range.bytes[range.index] == '*') { range.popFront(); if (!(range.index >= range.bytes.length) && range.bytes[range.index] == '/') { range.popFront(); break; } } else popFrontWhitespaceAware(); } if (config.commentBehavior == CommentBehavior.intern) token = Token(type, cache.intern(range.slice(mark)), line, column, index); else token = Token(type, cast(string) range.slice(mark), line, column, index); } void lexSlashSlashComment(ref Token token) @trusted { mixin (tokenStart); IdType type = tok!"comment"; range.popFrontN(2); while (range.index < range.bytes.length) { version (iasm64NotWindows) { if (haveSSE42 && range.index + 16 < range.bytes.length) { skip!(false, '\r', '\n', 0xe2)(range.bytes.ptr + range.index, &range.index, &range.column); } } if (range.bytes[range.index] == '\r' || range.bytes[range.index] == '\n') break; range.popFront(); } if (config.commentBehavior == CommentBehavior.intern) token = Token(type, cache.intern(range.slice(mark)), line, column, index); else token = Token(type, cast(string) range.slice(mark), line, column, index); } void lexSlashPlusComment(ref Token token) @trusted { mixin (tokenStart); IdType type = tok!"comment"; range.index += 2; range.column += 2; int depth = 1; while (depth > 0 && !(range.index >= range.bytes.length)) { version (iasm64NotWindows) { if (haveSSE42 && range.index + 16 < range.bytes.length) { skip!(false, '+', '/', '\\', '\r', '\n', 0xe2)(range.bytes.ptr + range.index, &range.index, &range.column); } } if (range.bytes[range.index] == '+') { range.popFront(); if (!(range.index >= range.bytes.length) && range.bytes[range.index] == '/') { range.popFront(); depth--; } } else if (range.bytes[range.index] == '/') { range.popFront(); if (!(range.index >= range.bytes.length) && range.bytes[range.index] == '+') { range.popFront(); depth++; } } else popFrontWhitespaceAware(); } if (config.commentBehavior == CommentBehavior.intern) token = Token(type, cache.intern(range.slice(mark)), line, column, index); else token = Token(type, cast(string) range.slice(mark), line, column, index); } void lexStringLiteral(ref Token token) @trusted { mixin (tokenStart); range.popFront(); while (true) { if (range.index >= range.bytes.length) { error("Error: unterminated string literal"); token = Token(tok!""); return; } version (iasm64NotWindows) { if (haveSSE42 && range.index + 16 < range.bytes.length) { skip!(false, '"', '\\', '\r', '\n', 0xe2)(range.bytes.ptr + range.index, &range.index, &range.column); } } if (range.bytes[range.index] == '"') { range.popFront(); break; } else if (range.bytes[range.index] == '\\') { if (!lexEscapeSequence()) { token = Token.init; return; } } else popFrontWhitespaceAware(); } IdType type = tok!"stringLiteral"; lexStringSuffix(type); token = Token(type, cache.intern(range.slice(mark)), line, column, index); } void lexWysiwygString(ref Token token) @trusted { mixin (tokenStart); IdType type = tok!"stringLiteral"; immutable bool backtick = range.bytes[range.index] == '`'; if (backtick) { range.popFront(); while (true) { if (range.index >= range.bytes.length) { error("Error: unterminated string literal"); token = Token(tok!""); return; } version (iasm64NotWindows) { if (haveSSE42 && range.index + 16 < range.bytes.length) { skip!(false, '\r', '\n', 0xe2, '`')(range.bytes.ptr + range.index, &range.index, &range.column); } } if (range.bytes[range.index] == '`') { range.popFront(); break; } else popFrontWhitespaceAware(); } } else { range.popFront(); if (range.index >= range.bytes.length) { error("Error: unterminated string literal"); token = Token(tok!""); return; } range.popFront(); while (true) { if (range.index >= range.bytes.length) { error("Error: unterminated string literal"); token = Token(tok!""); return; } else if (range.bytes[range.index] == '"') { range.popFront(); break; } else popFrontWhitespaceAware(); } } lexStringSuffix(type); token = Token(type, cache.intern(range.slice(mark)), line, column, index); } private ubyte lexStringSuffix(ref IdType type) pure nothrow @safe { if (range.index >= range.bytes.length) { type = tok!"stringLiteral"; return 0; } else { switch (range.bytes[range.index]) { case 'w': range.popFront(); type = tok!"wstringLiteral"; return 'w'; case 'd': range.popFront(); type = tok!"dstringLiteral"; return 'd'; case 'c': range.popFront(); type = tok!"stringLiteral"; return 'c'; default: type = tok!"stringLiteral"; return 0; } } } void lexDelimitedString(ref Token token) { mixin (tokenStart); range.index += 2; range.column += 2; ubyte open; ubyte close; switch (range.bytes[range.index]) { case '<': open = '<'; close = '>'; range.popFront(); lexNormalDelimitedString(token, mark, line, column, index, open, close); break; case '{': open = '{'; close = '}'; range.popFront(); lexNormalDelimitedString(token, mark, line, column, index, open, close); break; case '[': open = '['; close = ']'; range.popFront(); lexNormalDelimitedString(token, mark, line, column, index, open, close); break; case '(': open = '('; close = ')'; range.popFront(); lexNormalDelimitedString(token, mark, line, column, index, open, close); break; default: lexHeredocString(token, mark, line, column, index); break; } } void lexNormalDelimitedString(ref Token token, size_t mark, size_t line, size_t column, size_t index, ubyte open, ubyte close) { int depth = 1; while (!(range.index >= range.bytes.length) && depth > 0) { if (range.bytes[range.index] == open) { depth++; range.popFront(); } else if (range.bytes[range.index] == close) { depth--; range.popFront(); if (depth <= 0) { if (range.bytes[range.index] == '"') { range.popFront(); } else { error("Error: `\"` expected to end delimited string literal"); token = Token(tok!""); return; } } } else popFrontWhitespaceAware(); } IdType type = tok!"stringLiteral"; lexStringSuffix(type); token = Token(type, cache.intern(range.slice(mark)), line, column, index); } void lexHeredocString(ref Token token, size_t mark, size_t line, size_t column, size_t index) { Token ident; lexIdentifier(ident); if (isNewline()) popFrontWhitespaceAware(); else error("Newline expected"); while (!(range.index >= range.bytes.length)) { if (isNewline()) { popFrontWhitespaceAware(); if (!range.canPeek(ident.text.length)) { error(ident.text ~ " expected"); break; } if (range.peek(ident.text.length - 1) == ident.text) { range.popFrontN(ident.text.length); break; } } else { range.popFront(); } } if (!(range.index >= range.bytes.length) && range.bytes[range.index] == '"') { range.popFront(); } else error("`\"` expected"); IdType type = tok!"stringLiteral"; lexStringSuffix(type); token = Token(type, cache.intern(range.slice(mark)), line, column, index); } void lexTokenString(ref Token token) { mixin (tokenStart); assert (range.bytes[range.index] == 'q'); range.popFront(); assert (range.bytes[range.index] == '{'); range.popFront(); auto app = appender!string(); app.put("q{"); int depth = 1; immutable WhitespaceBehavior oldWhitespace = config.whitespaceBehavior; immutable StringBehavior oldString = config.stringBehavior; config.whitespaceBehavior = WhitespaceBehavior.include; config.stringBehavior = StringBehavior.source; scope (exit) { config.whitespaceBehavior = oldWhitespace; config.stringBehavior = oldString; } advance(_front); while (depth > 0 && !empty) { auto t = front(); if (t.text is null) app.put(str(t.type)); else app.put(t.text); if (t.type == tok!"}") { depth--; if (depth > 0) popFront(); } else if (t.type == tok!"{") { depth++; popFront(); } else popFront(); } IdType type = tok!"stringLiteral"; auto b = lexStringSuffix(type); if (b != 0) app.put(b); token = Token(type, cache.intern(cast(const(ubyte)[]) app.data), line, column, index); } void lexHexString(ref Token token) { mixin (tokenStart); range.index += 2; range.column += 2; loop: while (true) { if (range.index >= range.bytes.length) { error("Error: unterminated hex string literal"); token = Token(tok!""); return; } else if (isWhitespace()) popFrontWhitespaceAware(); else switch (range.bytes[range.index]) { case '0': .. case '9': case 'A': .. case 'F': case 'a': .. case 'f': range.popFront(); break; case '"': range.popFront(); break loop; default: error("Error: invalid character in hex string"); token = Token(tok!""); return; } } IdType type = tok!"stringLiteral"; lexStringSuffix(type); token = Token(type, cache.intern(range.slice(mark)), line, column, index); } bool lexEscapeSequence() { range.popFront(); if (range.index >= range.bytes.length) { error("Error: non-terminated character escape sequence."); return false; } switch (range.bytes[range.index]) { case '\'': case '"': case '?': case '\\': case 'a': case 'b': case 'f': case 'n': case 'r': case 't': case 'v': range.popFront(); break; case 'x': range.popFront(); foreach (i; 0 .. 2) { if (range.index >= range.bytes.length) { error("Error: 2 hex digits expected."); return false; } switch (range.bytes[range.index]) { case '0': .. case '9': case 'a': .. case 'f': case 'A': .. case 'F': range.popFront(); break; default: error("Error: 2 hex digits expected."); return false; } } break; case '0': if (!(range.index + 1 < range.bytes.length) || ((range.index + 1 < range.bytes.length) && range.peekAt(1) == '\'')) { range.popFront(); break; } goto case; case '1': .. case '7': for (size_t i = 0; i < 3 && !(range.index >= range.bytes.length) && range.bytes[range.index] >= '0' && range.bytes[range.index] <= '7'; i++) range.popFront(); break; case 'u': range.popFront(); foreach (i; 0 .. 4) { if (range.index >= range.bytes.length) { error("Error: at least 4 hex digits expected."); return false; } switch (range.bytes[range.index]) { case '0': .. case '9': case 'a': .. case 'f': case 'A': .. case 'F': range.popFront(); break; default: error("Error: at least 4 hex digits expected."); return false; } } break; case 'U': range.popFront(); foreach (i; 0 .. 8) { if (range.index >= range.bytes.length) { error("Error: at least 8 hex digits expected."); return false; } switch (range.bytes[range.index]) { case '0': .. case '9': case 'a': .. case 'f': case 'A': .. case 'F': range.popFront(); break; default: error("Error: at least 8 hex digits expected."); return false; } } break; default: error("Invalid escape sequence"); while (true) { if (range.index >= range.bytes.length) { error("Error: non-terminated character escape sequence."); break; } if (range.bytes[range.index] == ';') { range.popFront(); break; } else { range.popFront(); } } return false; } return true; } void lexCharacterLiteral(ref Token token) { mixin (tokenStart); range.popFront(); if (range.empty) goto err; if (range.bytes[range.index] == '\\') lexEscapeSequence(); else if (range.bytes[range.index] == '\'') { range.popFront(); token = Token(tok!"characterLiteral", cache.intern(range.slice(mark)), line, column, index); } else if (range.bytes[range.index] & 0x80) { while (range.bytes[range.index] & 0x80) range.popFront(); } else popFrontWhitespaceAware(); if (range.index < range.bytes.length && range.bytes[range.index] == '\'') { range.popFront(); token = Token(tok!"characterLiteral", cache.intern(range.slice(mark)), line, column, index); } else { err: error("Error: Expected `'` to end character literal"); token = Token(tok!""); } } void lexIdentifier(ref Token token) @trusted { mixin (tokenStart); if (isSeparating(0)) { error("Invalid identifier"); range.popFront(); } while (true) { version (iasm64NotWindows) { if (haveSSE42 && range.index + 16 < range.bytes.length) { immutable ulong i = rangeMatch!(false, 'a', 'z', 'A', 'Z', '_', '_') (range.bytes.ptr + range.index); range.column += i; range.index += i; } } if (isSeparating(0)) break; else range.popFront(); } token = Token(tok!"identifier", cache.intern(range.slice(mark)), line, column, index); } void lexDot(ref Token token) { mixin (tokenStart); if (!(range.index + 1 < range.bytes.length)) { range.popFront(); token = Token(tok!".", null, line, column, index); return; } switch (range.peekAt(1)) { case '0': .. case '9': lexNumber(token); return; case '.': range.popFront(); range.popFront(); if (!(range.index >= range.bytes.length) && range.bytes[range.index] == '.') { range.popFront(); token = Token(tok!"...", null, line, column, index); } else token = Token(tok!"..", null, line, column, index); return; default: range.popFront(); token = Token(tok!".", null, line, column, index); return; } } void lexLongNewline(ref Token token) @nogc { mixin (tokenStart); range.popFront(); range.popFront(); range.popFront(); range.incrementLine(); string text = config.whitespaceBehavior == WhitespaceBehavior.include ? cache.intern(range.slice(mark)) : ""; token = Token(tok!"whitespace", text, line, column, index); } bool isNewline() @nogc { if (range.bytes[range.index] == '\n') return true; if (range.bytes[range.index] == '\r') return true; return (range.bytes[range.index] & 0x80) && (range.index + 2 < range.bytes.length) && (range.peek(2) == "\u2028" || range.peek(2) == "\u2029"); } bool isSeparating(size_t offset) @nogc { enum : ubyte { n, y, m // no, yes, maybe } if (range.index + offset >= range.bytes.length) return true; auto c = range.bytes[range.index + offset]; static immutable ubyte[256] LOOKUP_TABLE = [ y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, n, n, n, n, n, n, n, n, n, n, y, y, y, y, y, y, y, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, y, y, y, y, n, y, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, y, y, y, y, y, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m ]; immutable ubyte result = LOOKUP_TABLE[c]; if (result == n) return false; if (result == y) return true; if (result == m) { auto r = range; range.popFrontN(offset); return (r.canPeek(2) && (r.peek(2) == "\u2028" || r.peek(2) == "\u2029")); } assert (false); } enum tokenStart = q{ size_t index = range.index; size_t column = range.column; size_t line = range.line; auto mark = range.mark(); }; void error(string message) { _messages ~= Message(range.line, range.column, message, true); } void warning(string message) { _messages ~= Message(range.line, range.column, message, false); assert (_messages.length > 0); } Message[] _messages; StringCache* cache; LexerConfig config; bool haveSSE42; } /** * Creates a token range from the given source code. Creates a default lexer * configuration and a GC-managed string cache. */ public auto byToken(R)(R range) if (is(Unqual!(ElementEncodingType!R) : ubyte) && isDynamicArray!R) { LexerConfig config; StringCache* cache = new StringCache(range.length.optimalBucketCount); return DLexer(range, config, cache); } /** * Creates a token range from the given source code. Uses the given string * cache. */ public auto byToken(R)(R range, StringCache* cache) if (is(Unqual!(ElementEncodingType!R) : ubyte) && isDynamicArray!R) { LexerConfig config; return DLexer(range, config, cache); } /** * Creates a token range from the given source code. Uses the provided lexer * configuration and string cache. */ public auto byToken(R)(R range, const LexerConfig config, StringCache* cache) if (is(Unqual!(ElementEncodingType!R) : ubyte) && isDynamicArray!R) { return DLexer(range, config, cache); } /** * Removes "decoration" such as leading whitespace, leading + and * characters, * and places the result into the given output range */ public void unDecorateComment(T)(string comment, auto ref T outputRange) if (isOutputRange!(T, string)) in { assert (comment.length >= 3); } do { import std.string : chompPrefix, KeepTerminator, lineSplitter, stripRight; string leadingChars; enum LineType { none, normal, strange } LineType prevLineType; switch (comment[0 .. 3]) { case "///": foreach (line; lineSplitter!(KeepTerminator.yes)(comment)) { if (leadingChars.empty) { size_t k = 3; while (k < line.length && (line[k] == ' ' || line[k] == '\t')) k++; leadingChars = line[0 .. k]; } outputRange.put(line.chompPrefix(leadingChars)); } break; case "/++": case "/**": alias CL = MultiLineCommentHelper!(ElementEncodingType!(typeof(comment))); CL cl = CL(comment); cl.process(outputRange); break; default: outputRange.put(comment); } } /// unittest { import std.array:array, appender; import std.stdio:stderr; stderr.writeln("Running unittest for unDecorateComment..."); string[] inputs = [ "/***************\n*******************/", "/***************\n *\n ******************/", "/**\n*/", "/** */", "/***/", "/******/", "/** abcde1 */", "/// abcde2\n/// abcde2", "/**\n * stuff1\n */", "/**\n *\n * stuff2\n */", "/**\n *\n * stuff3\n *\n */", "/**\n *\n * stuff4\n *\n*/", "/**\n * abcde3\n * abcde3 \n */", "/**\n * abcde4\n *\n * abcde4\n */", "/**abcde5\n*abcde5\n*/", "/** abcde6\n * abcde6\n*/", "/**\n1\n\n\n\n*/", "/**\r\n1\r\n\r\n\r\n\r\n*/", "/**\na1\n\na2\n\n*/", "/**b1\n*b2\n*b3*/", "/**c1\n *c2\n *c3*/", "/**d1\n *d2\n *d3\n*/", "///a\fbc\n///def" ]; string[] outputs = [ "", "", "", "", "", "", "abcde1", "abcde2\nabcde2", "stuff1", "stuff2", "stuff3", "stuff4", "abcde3\n abcde3", "abcde4\n\nabcde4", "abcde5\nabcde5", "abcde6\nabcde6", "1", "1", "a1\n\na2", "b1\nb2\nb3", "c1\nc2\nc3", "d1\nd2\nd3", "a\fbc\ndef" ]; // tests where * and + are not interchangeable string[2][] np = [ ["/**\n * d1\n d2\n */", "* d1\nd2"], ["/**\n + d1\n d2\n */", "+ d1\nd2"], ["/**d1\n\n\n*d2\n*/", "d1\n\n*d2"], ]; assert(inputs.length == outputs.length); foreach (pair; zip(inputs, outputs)) { foreach (b; [true, false]) { auto app = appender!string(); unDecorateComment(b ? pair[0] : pair[0].replace("*", "+"), app); assert(pair[1] == app.data, "[[" ~ pair[0] ~ "]] => [[" ~ app.data ~ "]]"); } } foreach (pair; np) { auto app = appender!string(); unDecorateComment(pair[0], app); assert(pair[1] == app.data, "[[" ~ pair[0] ~ "]] => [[" ~ app.data ~ "]]"); } stderr.writeln("Unittest for unDecorateComment passed."); } /** Gives a line per line view on DDOC comments of type `/++` and `/**` which * makes easier to remove the decoration and in an almost 100% nogc way. */ private struct MultiLineCommentHelper(CharType : const(char)) { // this struct is more used as a 'function with nested functions' would. this() @disable; this(this) @disable; auto opAssign(T)(T t) @disable; private: char[][] lines; // either lines.length or lines.length-1, depending on if last line only closes size_t lastLineInBlockPlusOne; // either '*' or '+' const(char) commentChar; // either 0 or 1, depending on if first line only opens ubyte firstLineInBlock; import std.ascii : isWhite; void stripIndent() @safe @nogc pure nothrow { if (lines.length < 2) return; size_t count; foreach (const j; 0 .. lines[1].length) if (!(lines[1][j]).isWhite) { count = j; break; } if (count < 2) return; foreach (ref line; lines[1 .. $]) { foreach (const j; 0 .. line.length) { if (!(line[j]).isWhite) break; if (j == count - 1) { line = line[j .. $]; break; } } } } void processFirstLine() @safe @nogc pure nothrow { assert(lines.length); if (lines[0].length > 3) { foreach (const i; 1..lines[0].length) { if (lines[0][i] == commentChar) { if (i < lines[0].length - 2) continue; if (i == lines[0].length - 2 && lines[0][i+1] == '/') { lines[0][] = ' '; break; } if (i == lines[0].length - 1) { lines[0][] = ' '; break; } } else { lines[0][0..i] = ' '; break; } } } lines[0][0..3] = " "; if (lines.length == 1 && lines[0][$-2] == commentChar && lines[0][$-1] == '/') { lines[0][$-2..$] = " "; } foreach (const i; 0..lines[0].length) if (!(lines[0][i].isWhite)) return; firstLineInBlock = 1; } void processLastLine() @safe @nogc pure nothrow { lastLineInBlockPlusOne = lines.length; if (lines.length == 1) return; size_t closeStartIndex = size_t.max; foreach (const i; 0..lines[$-1].length) { if (lines[$-1][i] == commentChar) { if (closeStartIndex == size_t.max) closeStartIndex = i; if (i == lines[$-1].length - 2) { // see the FIXME note in unDecorate() lastLineInBlockPlusOne = closeStartIndex == 0 ? lines.length-1 : lines.length; lines[$-1][closeStartIndex..$] = ' '; break; } } else { closeStartIndex = size_t.max; lastLineInBlockPlusOne = lines.length; } } } void unDecorate() @safe @nogc pure nothrow { if (lines.length == 1 || lines.length == 2 && lines[$-1].length == 0) return; bool allDecorated; static immutable char[2][2] pattern = [[' ', '*'],[' ', '+']]; const ubyte patternIndex = commentChar == '+'; // first line is never decorated const size_t lo = 1; // although very uncommon, the last line can be decorated e.g in `* lastline */`: // the first '*' is a deco if all prev lines are also decorated. // FIXME: `hi` should be set to `lastLineInBlockPlusOne`... const size_t hi = (lines[$-1].length > 1 && (lines[$-1][0] == commentChar || lines[$-1][0..2] == pattern[patternIndex])) ? lines.length : lines.length-1; // deco with a leading white foreach (const i; lo .. hi) { if (lines[i].length < 2) break; else if (lines[i][0..2] != pattern[patternIndex]) break; else if (i == hi-1) allDecorated = true; } // deco w/o leading white if (!allDecorated) foreach (const i; lo .. hi) { if (lines[i].length == 0) break; if (lines[i][0] != commentChar) break; else if (i == hi-1) allDecorated = true; } if (!allDecorated) return; const size_t indexToChange = (lines[lo][0] == commentChar) ? 0 : 1; foreach (ref line; lines[lo .. hi]) line[indexToChange] = ' '; } void stripLeft() @safe @nogc pure nothrow { foreach (const i; 0 .. lines[0].length) if (!(lines[0][i]).isWhite) { lines[0] = lines[0][i..$]; break; } if (lines.length == 1) return; while (true) { bool processColumn; foreach (ref line; lines[1 .. lastLineInBlockPlusOne]) { if (line.length == 0) continue; if (!(line[0]).isWhite) return; processColumn = true; } if (!processColumn) return; foreach (ref line; lines[1 .. lastLineInBlockPlusOne]) { if (line.length == 0) continue; line = line[1..$]; } } } void stripRight() @safe @nogc pure nothrow { foreach (ref line; lines[0 .. lines.length]) { if (line.length == 0) continue; if ((line[$-1]).isWhite) { size_t firstWhite = line.length; while (firstWhite > 0 && (line[firstWhite-1]).isWhite) firstWhite--; line = line[0..firstWhite]; } } } void run() @safe @nogc pure nothrow { stripIndent(); processFirstLine(); processLastLine(); unDecorate(); stripLeft(); stripRight(); } public: this(CharType[] text) @safe pure nothrow { commentChar = text[1]; size_t startIndex, i; Appender!(char[][]) linesApp; linesApp.reserve(512); void storeLine(size_t endIndexPlusOne) { static if (isMutable!CharType) linesApp ~= text[startIndex..endIndexPlusOne]; else linesApp ~= text[startIndex..endIndexPlusOne].dup; } while (true) { if (i == text.length - 1) { storeLine(text.length); break; } if (text[i] == '\n') { storeLine(i); startIndex = i + 1; } else if (text[i .. i+2] == "\r\n") { storeLine(i); i++; startIndex = i + 1; } i++; } lines = linesApp.data; } void process(T)(ref T outbuffer) { run(); outbuffer.reserve(lines.length * 90); bool prevWritten, empties; foreach (ref line; lines[firstLineInBlock .. lines.length]) { if (line.length != 0) { // close preceeding line if (prevWritten) outbuffer ~= "\n"; // insert new empty line if (prevWritten && empties) outbuffer ~= "\n"; outbuffer ~= line; prevWritten = true; empties = false; } else empties = true; } } } /** * Helper function used to avoid too much allocations while lexing. * * Params: * size = The length in bytes of the source file. * * Returns: * The optimal initial bucket count a `StringCache` should have. */ size_t optimalBucketCount(size_t size) { import std.math : nextPow2; return nextPow2((size + 31U) / 32U).min(1U << 30U); } /// unittest { assert(optimalBucketCount(1) == 2); assert(optimalBucketCount(9000 * 32) == 16384); assert(optimalBucketCount(100_000_000_000UL) == 1 << 30); } /** * The string cache is used for string interning. * * It will only store a single copy of any string that it is asked to hold. * Interned strings can be compared for equality by comparing their $(B .ptr) * field. * * Default and postbilt constructors are disabled. When a StringCache goes out * of scope, the memory held by it is freed. * * See_also: $(LINK http://en.wikipedia.org/wiki/String_interning) */ struct StringCache { public pure nothrow @nogc: @disable this(); @disable this(this); /** * Params: bucketCount = the initial number of buckets. Must be a * power of two */ this(size_t bucketCount) nothrow @trusted @nogc in { import core.bitop : popcnt; static if (size_t.sizeof == 8) { immutable low = popcnt(cast(uint) bucketCount); immutable high = popcnt(cast(uint) (bucketCount >> 32)); assert ((low == 0 && high == 1) || (low == 1 && high == 0)); } else { static assert (size_t.sizeof == 4); assert (popcnt(cast(uint) bucketCount) == 1); } } do { buckets = (cast(Node**) calloc((Node*).sizeof, bucketCount))[0 .. bucketCount]; } ~this() { Block* current = rootBlock; while (current !is null) { Block* prev = current; current = current.next; free(cast(void*) prev); } foreach (nodePointer; buckets) { Node* currentNode = nodePointer; while (currentNode !is null) { if (currentNode.mallocated) free(currentNode.str.ptr); Node* prev = currentNode; currentNode = currentNode.next; free(prev); } } rootBlock = null; free(buckets.ptr); buckets = null; } /** * Caches a string. */ string intern(const(ubyte)[] str) @safe { if (str is null || str.length == 0) return ""; return _intern(str); } /** * ditto */ string intern(string str) @trusted { return intern(cast(ubyte[]) str); } /** * The default bucket count for the string cache. */ static enum defaultBucketCount = 4096; private: string _intern(const(ubyte)[] bytes) @trusted { immutable uint hash = hashBytes(bytes); immutable size_t index = hash & (buckets.length - 1); Node* s = find(bytes, hash); if (s !is null) return cast(string) s.str; ubyte[] mem = void; bool mallocated = bytes.length > BIG_STRING; if (mallocated) mem = (cast(ubyte*) malloc(bytes.length))[0 .. bytes.length]; else mem = allocate(bytes.length); mem[] = bytes[]; Node* node = cast(Node*) malloc(Node.sizeof); node.str = mem; node.hash = hash; node.next = buckets[index]; node.mallocated = mallocated; buckets[index] = node; return cast(string) mem; } Node* find(const(ubyte)[] bytes, uint hash) @trusted { import std.algorithm : equal; immutable size_t index = hash & (buckets.length - 1); Node* node = buckets[index]; while (node !is null) { if (node.hash == hash && bytes == cast(ubyte[]) node.str) return node; node = node.next; } return node; } static uint hashBytes(const(ubyte)[] data) pure nothrow @trusted @nogc in { assert (data !is null); assert (data.length > 0); } do { immutable uint m = 0x5bd1e995; immutable int r = 24; uint h = cast(uint) data.length; while (data.length >= 4) { uint k = (cast(ubyte) data[3]) << 24 | (cast(ubyte) data[2]) << 16 | (cast(ubyte) data[1]) << 8 | (cast(ubyte) data[0]); k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; data = data[4 .. $]; } switch (data.length & 3) { case 3: h ^= data[2] << 16; goto case; case 2: h ^= data[1] << 8; goto case; case 1: h ^= data[0]; h *= m; break; default: break; } h ^= h >> 13; h *= m; h ^= h >> 15; return h; } ubyte[] allocate(size_t numBytes) pure nothrow @trusted @nogc in { assert (numBytes != 0); } out (result) { assert (result.length == numBytes); } do { Block* r = rootBlock; size_t i = 0; while (i <= 3 && r !is null) { immutable size_t available = r.bytes.length; immutable size_t oldUsed = r.used; immutable size_t newUsed = oldUsed + numBytes; if (newUsed <= available) { r.used = newUsed; return r.bytes[oldUsed .. newUsed]; } i++; r = r.next; } Block* b = cast(Block*) calloc(Block.sizeof, 1); b.used = numBytes; b.next = rootBlock; rootBlock = b; return b.bytes[0 .. numBytes]; } static struct Node { ubyte[] str = void; Node* next = void; uint hash = void; bool mallocated = void; } static struct Block { Block* next; size_t used; enum BLOCK_CAPACITY = BLOCK_SIZE - size_t.sizeof - (void*).sizeof; ubyte[BLOCK_CAPACITY] bytes; } static assert (BLOCK_SIZE == Block.sizeof); enum BLOCK_SIZE = 1024 * 16; // If a string would take up more than 1/4 of a block, allocate it outside // of the block. enum BIG_STRING = BLOCK_SIZE / 4; Node*[] buckets; Block* rootBlock; } private extern(C) void* calloc(size_t, size_t) nothrow pure @nogc @trusted; private extern(C) void* malloc(size_t) nothrow pure @nogc @trusted; private extern(C) void free(void*) nothrow pure @nogc @trusted; unittest { auto source = cast(ubyte[]) q{ import std.stdio;}c; auto tokens = getTokensForParser(source, LexerConfig(), new StringCache(StringCache.defaultBucketCount)); assert (tokens.map!"a.type"().equal([tok!"import", tok!"identifier", tok!".", tok!"identifier", tok!";"])); } /// Test \x char sequence unittest { auto toks = (string s) => byToken(cast(ubyte[])s); // valid immutable hex = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','A','B','C','D','E','F']; auto source = ""; foreach (h1; hex) foreach (h2; hex) source ~= "'\\x" ~ h1 ~ h2 ~ "'"; assert (toks(source).filter!(t => t.type != tok!"characterLiteral").empty); // invalid assert (toks(`'\x'`).messages[0] == DLexer.Message(1,4,"Error: 2 hex digits expected.",true)); assert (toks(`'\x_'`).messages[0] == DLexer.Message(1,4,"Error: 2 hex digits expected.",true)); assert (toks(`'\xA'`).messages[0] == DLexer.Message(1,5,"Error: 2 hex digits expected.",true)); assert (toks(`'\xAY'`).messages[0] == DLexer.Message(1,5,"Error: 2 hex digits expected.",true)); assert (toks(`'\xXX'`).messages[0] == DLexer.Message(1,4,"Error: 2 hex digits expected.",true)); } version (iasm64NotWindows) { /** * Skips between 0 and 16 bytes that match (or do not match) one of the * given $(B chars). */ void skip(bool matching, chars...)(const ubyte*, ulong*, ulong*) pure nothrow @trusted @nogc if (chars.length <= 8) { enum constant = ByteCombine!chars; enum charsLength = chars.length; static if (matching) enum flags = 0b0001_0000; else enum flags = 0b0000_0000; asm pure nothrow @nogc { naked; movdqu XMM1, [RDX]; mov R10, constant; movq XMM2, R10; mov RAX, charsLength; mov RDX, 16; pcmpestri XMM2, XMM1, flags; add [RSI], RCX; add [RDI], RCX; ret; } } /** * Returns: the number of bytes starting at the given location that match * (or do not match if $(B invert) is true) the byte ranges in $(B chars). */ ulong rangeMatch(bool invert, chars...)(const ubyte*) pure nothrow @trusted @nogc { static assert (chars.length % 2 == 0); enum constant = ByteCombine!chars; static if (invert) enum rangeMatchFlags = 0b0000_0100; else enum rangeMatchFlags = 0b0001_0100; enum charsLength = chars.length; asm pure nothrow @nogc { naked; movdqu XMM1, [RDI]; mov R10, constant; movq XMM2, R10; mov RAX, charsLength; mov RDX, 16; pcmpestri XMM2, XMM1, rangeMatchFlags; mov RAX, RCX; ret; } } template ByteCombine(c...) { static assert (c.length <= 8); static if (c.length > 1) enum ulong ByteCombine = c[0] | (ByteCombine!(c[1..$]) << 8); else enum ulong ByteCombine = c[0]; } } unittest { import core.exception : RangeError; import std.exception : assertNotThrown; static immutable src1 = "/++"; static immutable src2 = "/**"; LexerConfig cf; StringCache ca = StringCache(16); assertNotThrown!RangeError(getTokensForParser(src1, cf, &ca)); assertNotThrown!RangeError(getTokensForParser(src2, cf, &ca)); } unittest { static immutable src = `"\eeee"`; LexerConfig cf; StringCache ca = StringCache(16); auto l = DLexer(src, cf, &ca); assert(l.front().type == tok!""); assert(!l.messages.empty); }