1 /** Memory management. The code in this module is borrowed from David Simcha's 2 $(LINK2 http://www.dsource.org/projects/dstats/,dstats) project, 3 specifically the dstats.alloc module. 4 5 Authors: David Simcha 6 License: Boost License 1.0 7 */ 8 // Adapted for SciD by Lars T. Kyllingstad 9 module scid.core.memory; 10 11 12 /* Permission is hereby granted, free of charge, to any person or organization 13 * obtaining a copy of the software and accompanying documentation covered by 14 * this license (the "Software") to use, reproduce, display, distribute, 15 * execute, and transmit the Software, and to prepare derivative works of the 16 * Software, and to permit third-parties to whom the Software is furnished to 17 * do so, all subject to the following: 18 * 19 * The copyright notices in the Software and this entire statement, including 20 * the above license grant, this restriction and the following disclaimer, 21 * must be included in all copies of the Software, in whole or in part, and 22 * all derivative works of the Software, unless such copies or derivative 23 * works are solely in the form of machine-executable object code generated by 24 * a source language processor. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 28 * FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT 29 * SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE 30 * FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, 31 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 32 * DEALINGS IN THE SOFTWARE. 33 */ 34 35 36 import core.memory; 37 import std.range; 38 import std.traits; 39 static import core.stdc.stdio; 40 41 42 version(unittest) 43 { 44 import std.conv; 45 } 46 47 48 49 50 template IsType(T, Types...) { 51 // Original idea by Burton Radons, modified 52 static if (Types.length == 0) 53 const bool IsType = false; 54 else 55 const bool IsType = is(T == Types[0]) || IsType!(T, Types[1 .. $]); 56 } 57 58 template ArrayType1(T: T[]) { 59 alias T ArrayType1; 60 } 61 62 template isReferenceType(Types...) { //Thanks to Bearophile. 63 static if (Types.length == 0) { 64 const bool isReferenceType = false; 65 } else static if (Types.length == 1) { 66 static if (IsType!(Unqual!(Types[0]), bool, byte, ubyte, short, ushort, 67 int, uint, long, ulong, float, double, real, ifloat, 68 idouble, ireal, cfloat, cdouble, creal, char, dchar, 69 wchar) ) { 70 const bool isReferenceType = false; 71 } else static if ( is(Types[0] == struct) ) { 72 const bool isReferenceType = 73 isReferenceType!(FieldTypeTuple!(Types[0])); 74 } else static if (isStaticArray!(Types[0])) { 75 const bool isReferenceType = isReferenceType!(ArrayType1!(Types[0])); 76 } else 77 const bool isReferenceType = true; 78 } else 79 const bool isReferenceType = isReferenceType!(Types[0]) | 80 isReferenceType!(Types[1 .. $]); 81 } // end isReferenceType!() 82 83 unittest { 84 static assert(!isReferenceType!(typeof("Foo"[0]))); 85 static assert(isReferenceType!(uint*)); 86 //static assert(!isReferenceType!(typeof([0,1,2]))); 87 struct noPtrs { 88 uint f; 89 uint b; 90 } 91 struct ptrs { 92 uint* f; 93 uint b; 94 } 95 static assert(!isReferenceType!(noPtrs)); 96 static assert(isReferenceType!(ptrs)); 97 //pragma(msg, "Passed isReferenceType unittest"); 98 } 99 100 template blockAttribute(T) { 101 static if (isReferenceType!(T)) 102 enum blockAttribute = 0; 103 else enum blockAttribute = GC.BlkAttr.NO_SCAN; 104 } 105 106 ///Returns a new array of type T w/o initializing elements. 107 T[] newVoid(T)(size_t length) { 108 T* ptr = cast(T*) GC.malloc(length * T.sizeof, blockAttribute!(T)); 109 return ptr[0..length]; 110 } 111 112 void lengthVoid(T)(ref T[] input, int newLength) { 113 input.lengthVoid(cast(size_t) newLength); 114 } 115 116 ///Lengthens an array w/o initializing new elements. 117 void lengthVoid(T)(ref T[] input, size_t newLength) { 118 if (newLength <= input.length || 119 GC.sizeOf(input.ptr) >= newLength * T.sizeof) { 120 input = input.ptr[0..newLength]; //Don't realloc if I don't have to. 121 } else { 122 T* newPtr = cast(T*) GC.realloc(input.ptr, 123 T.sizeof * newLength, blockAttribute!(T)); 124 input = newPtr[0..newLength]; 125 } 126 } 127 128 void reserve(T)(ref T[] input, int newLength) { 129 input.reserve(cast(size_t) newLength); 130 } 131 132 /**Reserves more space for an array w/o changing its length or initializing 133 * the space.*/ 134 void reserve(T)(ref T[] input, size_t newLength) { 135 if (newLength <= input.length || capacity(input.ptr) >= newLength * T.sizeof) 136 return; 137 T* newPtr = cast(T*) GC.realloc(input.ptr, T.sizeof * newLength); 138 staticSetTypeInfo!(T)(newPtr); 139 input = newPtr[0..input.length]; 140 } 141 142 private template Appends(T, U) { 143 enum bool Appends = AppendsImpl!(T, U).ret; 144 } 145 146 private template AppendsImpl(T, U) { 147 T[] a; 148 U b; 149 enum bool ret = is(typeof(a ~= b)); 150 } 151 152 ///Appends to an array, deleting the old array if it has to be realloced. 153 void appendDelOld(T, U)(ref T[] to, U from) 154 if(Appends!(T, U)) { 155 auto oldPtr = to.ptr; 156 to ~= from; 157 if (oldPtr != to.ptr) 158 { 159 static if (__VERSION__ >= 2079) 160 { 161 import core.memory : __delete; 162 __delete(oldPtr); 163 } 164 else 165 delete oldPtr; 166 } 167 } 168 169 unittest { 170 uint[] foo; 171 foo.appendDelOld(5); 172 foo.appendDelOld(4); 173 foo.appendDelOld(3); 174 foo.appendDelOld(2); 175 foo.appendDelOld(1); 176 assert (foo == cast(uint[]) [5,4,3,2,1]); 177 //writefln("Passed appendDelOld test."); 178 } 179 180 // C functions, marked w/ nothrow. 181 extern(C) nothrow int fprintf(shared(void*), in char *,...); 182 extern(C) nothrow void exit(int); 183 184 /**A struct to allocate memory in a strictly first-in last-out order for 185 * things like scratch space. Technically, memory can safely escape the 186 * scope in which it was allocated. However, this is a very bad idea 187 * unless being done within the private API of a class, struct or nested 188 * function, where it can be guaranteed that LIFO will not be violated. 189 * 190 * Under the hood, this works by allocating large blocks (currently 4 MB) 191 * from the GC, and sub-allocating these as a stack. Very large allocations 192 * (currently > 4MB) are simply performed on the heap. There are two ways to 193 * free memory: Calling TempAlloc.free() frees the last allocated block. 194 * Calling TempAlloc.frameFree() frees all memory allocated since the last 195 * call to TempAlloc.frameInit(). 196 * 197 * All allocations are aligned on 16-byte boundaries using padding, since on x86, 198 * 16-byte alignment is necessary to make SSE2 work. Note, however, that this 199 * is implemented based on the assumption that the GC allocates using 16-byte 200 * alignment (which appears to be true in druntime.) 201 */ 202 struct TempAlloc { 203 private: 204 struct Stack(T) { // Simple, fast stack w/o error checking. 205 private size_t capacity; 206 private size_t index; 207 private T* data; 208 private enum sz = T.sizeof; 209 210 private static size_t max(size_t lhs, size_t rhs) pure nothrow { 211 return (rhs > lhs) ? rhs : lhs; 212 } 213 214 void push(T elem) nothrow { 215 if (capacity == index) { 216 capacity = max(16, capacity * 2); 217 data = cast(T*) GC.realloc(data, capacity * sz, cast(GC.BlkAttr) 0); 218 data[index..capacity] = T.init; // Prevent false ptrs. 219 } 220 data[index++] = elem; 221 } 222 223 T pop() nothrow { 224 index--; 225 auto ret = data[index]; 226 data[index] = T.init; // Prevent false ptrs. 227 return ret; 228 } 229 } 230 231 struct Block { 232 size_t used = 0; 233 void* space = null; 234 } 235 236 final class State { 237 size_t used; 238 void* space; 239 size_t totalAllocs; 240 void*[] lastAlloc; 241 uint nblocks; 242 uint nfree; 243 size_t frameIndex; 244 245 // inUse holds info for all blocks except the one currently being 246 // allocated from. freelist holds space ptrs for all free blocks. 247 Stack!(Block) inUse; 248 Stack!(void*) freelist; 249 250 void putLast(void* last) nothrow { 251 // Add an element to lastAlloc, checking length first. 252 if (totalAllocs == lastAlloc.length) 253 doubleSize(lastAlloc); 254 lastAlloc[totalAllocs++] = cast(void*) last; 255 } 256 } 257 258 enum size_t alignBytes = 16U; 259 enum blockSize = 4U * 1024U * 1024U; 260 enum nBookKeep = 1_048_576; // How many bytes to allocate upfront for bookkeeping. 261 static State state; 262 263 static void doubleSize(ref void*[] lastAlloc) nothrow { 264 size_t newSize = lastAlloc.length * 2; 265 void** ptr = cast(void**) 266 GC.realloc(lastAlloc.ptr, newSize * (void*).sizeof, GC.BlkAttr.NO_SCAN); 267 268 if (lastAlloc.ptr != ptr) { 269 GC.free(lastAlloc.ptr); 270 } 271 272 lastAlloc = ptr[0..newSize]; 273 } 274 275 static State stateInit() nothrow { 276 State stateCopy = new State; 277 278 with(stateCopy) { 279 space = GC.malloc(blockSize, GC.BlkAttr.NO_SCAN); 280 lastAlloc = (cast(void**) GC.malloc(nBookKeep, GC.BlkAttr.NO_SCAN)) 281 [0..nBookKeep / (void*).sizeof]; 282 nblocks++; 283 } 284 285 state = stateCopy; 286 return stateCopy; 287 } 288 289 static size_t getAligned(size_t nbytes) pure nothrow { 290 size_t rem = nbytes % alignBytes; 291 return (rem == 0) ? nbytes : nbytes - rem + alignBytes; 292 } 293 294 public: 295 /**Allows caller to cache the state class on the stack and pass it in as a 296 * parameter. This is ugly, but results in a speed boost that can be 297 * significant in some cases because it avoids a thread-local storage 298 * lookup. Also used internally.*/ 299 static State getState() nothrow { 300 State stateCopy = state; 301 return (stateCopy is null) ? stateInit() : stateCopy; 302 } 303 304 /**Initializes a frame, i.e. marks the current allocation position. 305 * Memory past the position at which this was last called will be 306 * freed when frameFree() is called. Returns a reference to the 307 * State class in case the caller wants to cache it for speed.*/ 308 static State frameInit() nothrow { 309 return frameInit(getState()); 310 } 311 312 /**Same as frameInit() but uses stateCopy cached on stack by caller 313 * to avoid a thread-local storage lookup. Strictly a speed hack.*/ 314 static State frameInit(State stateCopy) nothrow { 315 with(stateCopy) { 316 putLast( cast(void*) frameIndex ); 317 frameIndex = totalAllocs; 318 } 319 return stateCopy; 320 } 321 322 /**Frees all memory allocated by TempAlloc since the last call to 323 * frameInit().*/ 324 static void frameFree() nothrow { 325 frameFree(getState()); 326 } 327 328 /**Same as frameFree() but uses stateCopy cached on stack by caller 329 * to avoid a thread-local storage lookup. Strictly a speed hack.*/ 330 static void frameFree(State stateCopy) nothrow { 331 with(stateCopy) { 332 while (totalAllocs > frameIndex) { 333 free(stateCopy); 334 } 335 frameIndex = cast(size_t) lastAlloc[--totalAllocs]; 336 } 337 } 338 339 /**Purely a convenience overload, forwards arguments to TempAlloc.malloc().*/ 340 static void* opCall(T...)(T args) nothrow { 341 return TempAlloc.malloc(args); 342 } 343 344 /**Allocates nbytes bytes on the TempAlloc stack. NOT safe for real-time 345 * programming, since if there's not enough space on the current block, 346 * a new one will automatically be created. Also, very large objects 347 * (currently over 4MB) will simply be heap-allocated. 348 * 349 * Bugs: Memory allocated by TempAlloc is not scanned by the GC. 350 * This is necessary for performance and to avoid false pointer issues. 351 * Do not store the only reference to a GC-allocated object in 352 * TempAlloc-allocated memory.*/ 353 static void* malloc(size_t nbytes) nothrow { 354 return malloc(nbytes, getState()); 355 } 356 357 /**Same as malloc() but uses stateCopy cached on stack by caller 358 * to avoid a thread-local storage lookup. Strictly a speed hack.*/ 359 static void* malloc(size_t nbytes, State stateCopy) nothrow { 360 nbytes = getAligned(nbytes); 361 with(stateCopy) { 362 void* ret; 363 if (blockSize - used >= nbytes) { 364 ret = space + used; 365 used += nbytes; 366 } else if (nbytes > blockSize) { 367 ret = GC.malloc(nbytes, GC.BlkAttr.NO_SCAN); 368 } else if (nfree > 0) { 369 inUse.push(Block(used, space)); 370 space = freelist.pop(); 371 used = nbytes; 372 nfree--; 373 nblocks++; 374 ret = space; 375 } else { // Allocate more space. 376 inUse.push(Block(used, space)); 377 space = GC.malloc(blockSize, GC.BlkAttr.NO_SCAN); 378 nblocks++; 379 used = nbytes; 380 ret = space; 381 } 382 putLast(ret); 383 return ret; 384 } 385 } 386 387 /**Frees the last piece of memory allocated by TempAlloc. Since 388 * all memory must be allocated and freed in strict LIFO order, 389 * there's no need to pass a pointer in. All bookkeeping for figuring 390 * out what to free is done internally.*/ 391 static void free() nothrow { 392 free(getState()); 393 } 394 395 /**Same as free() but uses stateCopy cached on stack by caller 396 * to avoid a thread-local storage lookup. Strictly a speed hack.*/ 397 static void free(State stateCopy) nothrow { 398 with(stateCopy) { 399 void* lastPos = lastAlloc[--totalAllocs]; 400 401 // Handle large blocks. 402 if (lastPos > space + blockSize || lastPos < space) { 403 GC.free(lastPos); 404 return; 405 } 406 407 used = (cast(size_t) lastPos) - (cast(size_t) space); 408 if (nblocks > 1 && used == 0) { 409 freelist.push(space); 410 Block newHead = inUse.pop(); 411 space = newHead.space; 412 used = newHead.used; 413 nblocks--; 414 nfree++; 415 416 if (nfree >= nblocks * 2) { 417 foreach(i; 0..nfree / 2) { 418 GC.free(freelist.pop()); 419 nfree--; 420 } 421 } 422 } 423 } 424 } 425 } 426 427 /**Allocates an array of type T and size size using TempAlloc. 428 * Note that appending to this array using the ~= operator, 429 * or enlarging it using the .length property, will result in 430 * undefined behavior. This is because, if the array is located 431 * at the beginning of a TempAlloc block, the GC will think the 432 * capacity is as large as a TempAlloc block, and will overwrite 433 * adjacent TempAlloc-allocated data, instead of reallocating it. 434 * 435 * Bugs: Do not store the only reference to a GC-allocated reference object 436 * in an array allocated by newStack because this memory is not 437 * scanned by the GC.*/ 438 T[] newStack(T)(size_t size, TempAlloc.State state = null) nothrow { 439 if(state is null) { 440 state = TempAlloc.getState(); 441 } 442 443 size_t bytes = size * T.sizeof; 444 T* ptr = cast(T*) TempAlloc.malloc(bytes, state); 445 return ptr[0..size]; 446 } 447 448 ///**Same as newStack(size_t) but uses stateCopy cached on stack by caller 449 //* to avoid a thread-local storage lookup. Strictly a speed hack.*/ 450 //T[] newStack(T)(size_t size, TempAlloc.State state) nothrow { 451 // size_t bytes = size * T.sizeof; 452 // T* ptr = cast(T*) TempAlloc.malloc(bytes, state); 453 // return ptr[0..size]; 454 //} 455 456 /**Concatenate any number of arrays of the same type, placing results on 457 * the TempAlloc stack.*/ 458 T[0] stackCat(T...)(T data) { 459 foreach(array; data) { 460 static assert(is(typeof(array) == typeof(data[0]))); 461 } 462 463 size_t totalLen = 0; 464 foreach(array; data) { 465 totalLen += array.length; 466 } 467 auto ret = newStack!(Unqual!(typeof(T[0][0])))(totalLen); 468 469 size_t offset = 0; 470 foreach(array; data) { 471 ret[offset..offset + array.length] = array[0..$]; 472 offset += array.length; 473 } 474 return cast(T[0]) ret; 475 } 476 477 void rangeCopy(T, U)(T to, U from) { 478 static if(is(typeof(to[] = from[]))) { 479 to[] = from[]; 480 } else static if(isRandomAccessRange!(T)) { 481 size_t i = 0; 482 foreach(elem; from) { 483 to[i++] = elem; 484 } 485 } 486 } 487 488 /**Creates a duplicate of a range for temporary use within a function in the 489 * best wsy that can be done safely. If ElementType!(T) is a value type 490 * or T is an array, the results can safely be placed in TempAlloc because 491 * either it doesn't need to be scanned by the GC or there's guaranteed to be 492 * another reference to the contents somewhere. Otherwise, the results 493 * are placed on the GC heap. 494 * 495 * This function is much faster if T has a length, but works even if it doesn't. 496 */ 497 Unqual!(ElementType!(T))[] tempdup(T)(T data) 498 if(isInputRange!(T) && (isArray!(T) || !isReferenceType!(ElementType!(T)))) { 499 alias ElementType!(T) E; 500 alias Unqual!(E) U; 501 static if(hasLength!(T)) { 502 U[] ret = newStack!(U)(data.length); 503 rangeCopy(ret, data); 504 return ret; 505 } else { 506 auto state = TempAlloc.getState(); 507 auto startPtr = TempAlloc(0, state); 508 size_t bytesCopied = 0; 509 510 while(!data.empty) { // Make sure range interface is being used. 511 auto elem = data.front; 512 if(state.used + U.sizeof <= TempAlloc.blockSize) { 513 data.popFront; 514 *(cast(U*) (startPtr + bytesCopied)) = elem; 515 bytesCopied += U.sizeof; 516 state.used += U.sizeof; 517 } else { 518 if(bytesCopied + U.sizeof >= TempAlloc.blockSize / 2) { 519 // Then just heap-allocate. 520 U[] result = newVoid!(U)(bytesCopied / U.sizeof); 521 result[] = (cast(U*) startPtr)[0..result.length]; 522 finishCopy(result, data); 523 TempAlloc.free; 524 state.putLast(result.ptr); 525 return result; 526 } else { 527 U[] oldData = (cast(U*) startPtr)[0..bytesCopied / U.sizeof]; 528 state.used -= bytesCopied; 529 state.totalAllocs--; 530 U[] newArray = newStack!(U)(bytesCopied / U.sizeof + 1, state); 531 newArray[0..oldData.length] = oldData[]; 532 startPtr = state.space; 533 newArray[$ - 1] = elem; 534 bytesCopied += U.sizeof; 535 data.popFront; 536 } 537 } 538 } 539 auto rem = bytesCopied % TempAlloc.alignBytes; 540 if(rem != 0) { 541 auto toAdd = 16 - rem; 542 if(state.used + toAdd < TempAlloc.blockSize) { 543 state.used += toAdd; 544 } else { 545 state.used = TempAlloc.blockSize; 546 } 547 } 548 return (cast(U*) startPtr)[0..bytesCopied / U.sizeof]; 549 } 550 } 551 552 Unqual!(ElementType!(T))[] tempdup(T)(T data) 553 if(isInputRange!(T) && !(isArray!(T) || !isReferenceType!(ElementType!(T)))) { 554 auto ret = toArray(data); 555 TempAlloc.getState().putLast(ret.ptr); 556 return ret; 557 } 558 559 private void finishCopy(T, U)(ref T[] result, U range) { 560 auto app = appender(&result); 561 foreach(elem; range) { 562 app.put(elem); 563 } 564 } 565 566 // See Bugzilla 2873. This can be removed once that's fixed. 567 template hasLength(R) { 568 enum bool hasLength = is(typeof(R.init.length) : ulong) || 569 is(typeof(R.init.length()) : ulong); 570 } 571 572 /**Converts any range to an array on the GC heap by the most efficient means 573 * available. If it is already an array, duplicates the range.*/ 574 Unqual!(IterType!(T))[] toArray(T)(T range) if(isIterable!(T)) { 575 static if(isArray!(T)) { 576 // Allow fast copying by assuming that the input is an array. 577 return range.dup; 578 } else static if(hasLength!(T)) { 579 // Preallocate array, then copy. 580 auto ret = newVoid!(Unqual!(IterType!(T)))(range.length); 581 static if(is(typeof(ret[] = range[]))) { 582 ret[] = range[]; 583 } else { 584 size_t pos = 0; 585 foreach(elem; range) { 586 ret[pos++] = elem; 587 } 588 } 589 return ret; 590 } else { 591 // Don't have length, have to use appending. 592 Unqual!(IterType!(T))[] ret; 593 auto app = appender(&ret); 594 foreach(elem; range) { 595 app.put(elem); 596 } 597 return ret; 598 } 599 } 600 601 version (TempAllocUnittest) unittest { 602 // Create quick and dirty finite but lengthless range. 603 struct Count { 604 uint num; 605 uint upTo; 606 uint front() { 607 return num; 608 } 609 void popFront() { 610 num++; 611 } 612 bool empty() { 613 return num >= upTo; 614 } 615 } 616 617 TempAlloc(1024 * 1024 * 3); 618 Count count; 619 count.upTo = 1024 * 1025; 620 auto asArray = tempdup(count); 621 foreach(i, elem; asArray) { 622 assert (i == elem, to!(string)(i) ~ "\t" ~ to!(string)(elem)); 623 } 624 assert (asArray.length == 1024 * 1025); 625 TempAlloc.free; 626 TempAlloc.free; 627 while(TempAlloc.getState().freelist.index > 0) { 628 TempAlloc.getState().freelist.pop; 629 } 630 } 631 632 /**A string to mixin at the beginning of a scope, purely for 633 * convenience. Initializes a TempAlloc frame using frameInit(), 634 * and inserts a scope statement to delete this frame at the end 635 * of the current scope. 636 * 637 * Slower than calling free() manually when only a few pieces 638 * of memory will be allocated in the current scope, due to the 639 * extra bookkeeping involved. Can be faster, however, when 640 * large amounts of allocations, such as arrays of arrays, 641 * are allocated, due to caching of data stored in thread-local 642 * storage.*/ 643 immutable char[] newFrame = 644 "TempAlloc.frameInit(); scope(exit) TempAlloc.frameFree();"; 645 646 version (TempAllocUnittest) unittest { 647 /* Not a particularly good unittest in that it depends on knowing the 648 * internals of TempAlloc, but it's the best I could come up w/. This 649 * is really more of a stress test/sanity check than a normal unittest.*/ 650 651 // First test to make sure a large number of allocations does what it's 652 // supposed to in terms of reallocing lastAlloc[], etc. 653 enum nIter = TempAlloc.blockSize * 5 / TempAlloc.alignBytes; 654 foreach(i; 0..nIter) { 655 TempAlloc(TempAlloc.alignBytes); 656 } 657 assert(TempAlloc.getState().nblocks == 5, to!string(TempAlloc.getState().nblocks)); 658 assert(TempAlloc.getState().nfree == 0); 659 foreach(i; 0..nIter) { 660 TempAlloc.free; 661 } 662 assert(TempAlloc.getState().nblocks == 1); 663 assert(TempAlloc.getState().nfree == 2); 664 665 // Make sure logic for freeing excess blocks works. If it doesn't this 666 // test will run out of memory. 667 enum allocSize = TempAlloc.blockSize / 2; 668 void*[] oldStates; 669 foreach(i; 0..50) { 670 foreach(j; 0..50) { 671 TempAlloc(allocSize); 672 } 673 foreach(j; 0..50) { 674 TempAlloc.free; 675 } 676 oldStates ~= cast(void*) TempAlloc.state; 677 TempAlloc.state = null; 678 } 679 oldStates = null; 680 681 // Make sure data is stored properly. 682 foreach(i; 0..10) { 683 TempAlloc(allocSize); 684 } 685 foreach(i; 0..5) { 686 TempAlloc.free; 687 } 688 GC.collect; // Make sure nothing that shouldn't is getting GC'd. 689 void* space = TempAlloc.state.space; 690 size_t used = TempAlloc.state.used; 691 692 TempAlloc.frameInit; 693 // This array of arrays should not be scanned by the GC because otherwise 694 // bugs caused th not having the GC scan certain internal things in 695 // TempAlloc that it should would not be exposed. 696 uint[][] arrays = (cast(uint[]*) GC.malloc((uint[]).sizeof * 10, 697 GC.BlkAttr.NO_SCAN))[0..10]; 698 foreach(i; 0..10) { 699 uint[] data = newStack!(uint)(250_000); 700 foreach(j, ref e; data) { 701 e = j * (i + 1); // Arbitrary values that can be read back later. 702 } 703 arrays[i] = data; 704 } 705 706 // Make stuff get overwrriten if blocks are getting GC'd when they're not 707 // supposed to. 708 GC.minimize; // Free up all excess pools. 709 uint[][] foo; 710 foreach(i; 0..40) { 711 foo ~= new uint[1_048_576]; 712 } 713 foo = null; 714 715 for(size_t i = 9; i != size_t.max; i--) { 716 foreach(j, e; arrays[i]) { 717 assert (e == j * (i + 1)); 718 } 719 } 720 TempAlloc.frameFree; 721 assert (space == TempAlloc.state.space); 722 assert (used == TempAlloc.state.used); 723 while(TempAlloc.state.nblocks > 1 || TempAlloc.state.used > 0) { 724 TempAlloc.free; 725 } 726 }