vibe.internal.utilallocator 4/81(4%) line coverage

      
10
20
30
40
50
60
70
80
90
100
110
126
136
142
156
160
170
180
190
200
210
220
230
240
250
260
270
280
290
300
310
320
330
340
350
360
370
380
390
400
410
420
430
440
450
460
470
480
490
500
510
520
530
540
550
560
570
580
590
600
610
620
630
640
650
660
670
680
690
700
710
720
730
740
750
760
770
780
790
800
810
820
830
840
850
860
870
880
890
900
910
920
930
940
950
960
970
980
990
1000
1010
1020
1030
1040
1050
1060
1070
1080
1090
1100
1110
1120
1130
1140
1150
1160
1170
1180
1190
1200
1210
1220
1230
1240
1250
1260
1270
1280
1290
1300
1310
1320
1330
1340
1350
1360
1370
1380
1390
1400
1410
1420
1430
1440
1450
1460
1470
1480
1490
1500
1510
1520
1530
1540
1550
1560
1570
1580
1590
1600
1610
1620
1630
1640
1650
1660
1670
1680
1690
1700
1710
1720
1730
1740
1750
1760
1770
1780
1790
1800
1810
1820
1830
1840
1850
1860
module vibe.internal.utilallocator; public import std.experimental.allocator : allocatorObject, CAllocatorImpl, dispose, expandArray, IAllocator, make, makeArray, shrinkArray, theAllocator; public import std.experimental.allocator.mallocator; public import std.experimental.allocator.building_blocks.affix_allocator; // NOTE: this needs to be used instead of theAllocator due to Phobos issue 17564 @property IAllocator vibeThreadAllocator() @safe nothrow @nogc { import std.experimental.allocator.gc_allocator; static IAllocator s_threadAllocator; if (!s_threadAllocator) s_threadAllocator = () @trusted { return allocatorObject(GCAllocator.instance); } (); return s_threadAllocator; } final class RegionListAllocator(Allocator, bool leak = false) : IAllocator { import vibe.internal.memory_legacy : AllocSize, alignedSize; import std.algorithm.comparison : min, max; import std.conv : emplace; static if (__VERSION__ < 2072) import std.experimental.allocator.common : Ternary; else import std.typecons : Ternary; static struct Pool { Pool* next; void[] data; void[] remaining; } private { Allocator m_baseAllocator; Pool* m_freePools; Pool* m_fullPools; size_t m_poolSize; } this(size_t pool_size, Allocator base) @safe nothrow { m_poolSize = pool_size; m_baseAllocator = base; } ~this() { deallocateAll(); } override @property uint alignment() const { return 0x10; } @property size_t totalSize() @safe nothrow @nogc { size_t amt = 0; for (auto p = m_fullPools; p; p = p.next) amt += p.data.length; for (auto p = m_freePools; p; p = p.next) amt += p.data.length; return amt; } @property size_t allocatedSize() @safe nothrow @nogc { size_t amt = 0; for (auto p = m_fullPools; p; p = p.next) amt += p.data.length; for (auto p = m_freePools; p; p = p.next) amt += p.data.length - p.remaining.length; return amt; } override void[] allocate(size_t sz, TypeInfo ti = null) { auto aligned_sz = alignedSize(sz); Pool* pprev = null; Pool* p = cast(Pool*)m_freePools; while( p && p.remaining.length < aligned_sz ){ pprev = p; p = p.next; } if( !p ){ auto pmem = m_baseAllocator.allocate(AllocSize!Pool); p = emplace!Pool(cast(Pool*)pmem.ptr); p.data = m_baseAllocator.allocate(max(aligned_sz, m_poolSize)); p.remaining = p.data; p.next = cast(Pool*)m_freePools; m_freePools = p; pprev = null; } auto ret = p.remaining[0 .. aligned_sz]; p.remaining = p.remaining[aligned_sz .. $]; if( !p.remaining.length ){ if( pprev ){ pprev.next = p.next; } else { m_freePools = p.next; } p.next = cast(Pool*)m_fullPools; m_fullPools = p; } return ret[0 .. sz]; } override void[] alignedAllocate(size_t n, uint a) { return null; } override bool alignedReallocate(ref void[] b, size_t size, uint alignment) { return false; } override void[] allocateAll() { return null; } override @property Ternary empty() const { return m_fullPools !is null ? Ternary.no : Ternary.yes; } override size_t goodAllocSize(size_t s) { return alignedSize(s); } import std.traits : Parameters; static if (is(Parameters!(IAllocator.resolveInternalPointer)[0] == const(void*))) { override Ternary resolveInternalPointer(const void* p, ref void[] result) { return Ternary.unknown; } } else { override Ternary resolveInternalPointer(void* p, ref void[] result) { return Ternary.unknown; } } static if (is(Parameters!(IAllocator.owns)[0] == const(void[]))) { override Ternary owns(const void[] b) { return Ternary.unknown; } } else { override Ternary owns(void[] b) { return Ternary.unknown; } } override bool reallocate(ref void[] arr, size_t newsize) { return expand(arr, newsize); } override bool expand(ref void[] arr, size_t newsize) { auto aligned_sz = alignedSize(arr.length); auto aligned_newsz = alignedSize(newsize); if (aligned_newsz <= aligned_sz) { arr = arr[0 .. newsize]; // TODO: back up remaining return true; } auto pool = m_freePools; bool last_in_pool = pool && arr.ptr+aligned_sz == pool.remaining.ptr; if (last_in_pool && pool.remaining.length+aligned_sz >= aligned_newsz) { pool.remaining = pool.remaining[aligned_newsz-aligned_sz .. $]; arr = arr.ptr[0 .. aligned_newsz]; assert(arr.ptr+arr.length == pool.remaining.ptr, "Last block does not align with the remaining space!?"); arr = arr[0 .. newsize]; } else { auto ret = allocate(newsize); assert(ret.ptr >= arr.ptr+aligned_sz || ret.ptr+ret.length <= arr.ptr, "New block overlaps old one!?"); ret[0 .. min(arr.length, newsize)] = arr[0 .. min(arr.length, newsize)]; arr = ret; } return true; } override bool deallocate(void[] mem) { return false; } override bool deallocateAll() { // put all full Pools into the free pools list for (Pool* p = cast(Pool*)m_fullPools, pnext; p; p = pnext) { pnext = p.next; p.next = cast(Pool*)m_freePools; m_freePools = cast(Pool*)p; } // free up all pools for (Pool* p = cast(Pool*)m_freePools; p; p = p.next) p.remaining = p.data; Pool* pnext; for (auto p = cast(Pool*)m_freePools; p; p = pnext) { pnext = p.next; static if (!leak) { m_baseAllocator.deallocate(p.data); m_baseAllocator.deallocate((cast(void*)p)[0 .. AllocSize!Pool]); } } m_freePools = null; return true; } }