pax_global_header 0000666 0000000 0000000 00000000064 13535263154 0014521 g ustar 00root root 0000000 0000000 52 comment=ae4f900c59009aff148302049830db8448362eba stdx-allocator-3.1.0~beta.2/ 0000775 0000000 0000000 00000000000 13535263154 0015654 5 ustar 00root root 0000000 0000000 stdx-allocator-3.1.0~beta.2/.gitignore 0000664 0000000 0000000 00000000313 13535263154 0017641 0 ustar 00root root 0000000 0000000 .dub docs.json __dummy.html docs/ allocator.so allocator.dylib allocator.dll allocator.lib allocator-test-* stdx-allocator-test-* *.a *.exe *.o *.obj *.lst dub.selections.json subprojects/mir-core build stdx-allocator-3.1.0~beta.2/.travis.yml 0000664 0000000 0000000 00000002416 13535263154 0017770 0 ustar 00root root 0000000 0000000 sudo: false language: d os: - linux - osx d: - ldc - ldc-beta - dmd - dmd-beta env: - ARCH="x86_64" branches: only: - master - stable matrix: include: - {os: linux, d: ldc-beta, env: ARCH="x86", addons: {apt: {packages: [[gcc-multilib]]}}} - {os: linux, d: ldc, env: ARCH="x86", addons: {apt: {packages: [[gcc-multilib]]}}} - {os: linux, d: dmd-beta, env: ARCH="x86", addons: {apt: {packages: [[gcc-multilib]]}}} - {os: linux, d: dmd, env: ARCH="x86", addons: {apt: {packages: [[gcc-multilib]]}}} install: - | if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then curl -L "https://github.com/ninja-build/ninja/releases/download/v1.7.2/ninja-linux.zip" -o ninja-linux.zip sudo unzip ninja-linux.zip -d /usr/local/bin sudo chmod 755 /usr/local/bin/ninja sudo add-apt-repository -y ppa:deadsnakes/ppa sudo apt-get -y update sudo apt-get -y install python3.6 curl https://bootstrap.pypa.io/get-pip.py | sudo python3.6 sudo pip3 install meson fi packages: - pkg-config script: - echo "$ARCH" - dub test --arch "$ARCH" - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then meson build --default-library=static && cd build && ninja -j4 && ninja -j4 test -v && cd ..; fi # TODO: 32bit meson test stdx-allocator-3.1.0~beta.2/README.md 0000664 0000000 0000000 00000000346 13535263154 0017136 0 ustar 00root root 0000000 0000000 stdx.allocator =============== [](https://travis-ci.org/dlang-community/stdx-allocator/) Extracted std.experimental.allocator for usage via DUB stdx-allocator-3.1.0~beta.2/dub.sdl 0000664 0000000 0000000 00000000531 13535263154 0017131 0 ustar 00root root 0000000 0000000 name "stdx-allocator" description "Extracted std.experimental.allocator" authors "Team Phobos" copyright "Copyright © 2017, Team Phobos" license "BSL-1.0" dependency "mir-core" version=">=0.0.5 <0.3.0" configuration "unittest" { } configuration "wasm" { dflags "-mtriple=wasm32-unknown-unknown-wasm" "-betterC" targetType "sourceLibrary" } stdx-allocator-3.1.0~beta.2/meson.build 0000664 0000000 0000000 00000004747 13535263154 0020032 0 ustar 00root root 0000000 0000000 project('stdx-allocator', 'd', meson_version: '>=0.45', license: 'BSL-1.0', version: '3.0.1' ) project_soversion = '0' mir_core_dep = dependency('mir-core', fallback : ['mir-core', 'mir_core_dep']) required_deps = [mir_core_dep] pkgc = import('pkgconfig') # # Sources # allocator_src = [ 'source/stdx/allocator/internal.d', 'source/stdx/allocator/building_blocks/null_allocator.d', 'source/stdx/allocator/building_blocks/fallback_allocator.d', 'source/stdx/allocator/building_blocks/bitmapped_block.d', 'source/stdx/allocator/building_blocks/stats_collector.d', 'source/stdx/allocator/building_blocks/package.d', 'source/stdx/allocator/building_blocks/affix_allocator.d', 'source/stdx/allocator/building_blocks/free_list.d', 'source/stdx/allocator/building_blocks/bucketizer.d', 'source/stdx/allocator/building_blocks/free_tree.d', 'source/stdx/allocator/building_blocks/kernighan_ritchie.d', 'source/stdx/allocator/building_blocks/allocator_list.d', 'source/stdx/allocator/building_blocks/segregator.d', 'source/stdx/allocator/building_blocks/scoped_allocator.d', 'source/stdx/allocator/building_blocks/region.d', 'source/stdx/allocator/building_blocks/quantizer.d', 'source/stdx/allocator/typed.d', 'source/stdx/allocator/mmap_allocator.d', 'source/stdx/allocator/showcase.d', 'source/stdx/allocator/mallocator.d', 'source/stdx/allocator/package.d', 'source/stdx/allocator/common.d', 'source/stdx/allocator/gc_allocator.d', ] src_dir = include_directories('source/') # # Targets # allocator_lib = library('stdx-allocator', [allocator_src], include_directories: [src_dir], install: true, version: meson.project_version(), soversion: project_soversion, dependencies: required_deps, ) # # Tests # allocator_test_exe = executable(meson.project_name() + '-test', allocator_src, include_directories: src_dir, d_unittest: true, link_args: '-main', dependencies: required_deps, ) # for use by Vibe.d and others which embed this as subproject allocator_dep = declare_dependency( link_with: allocator_lib, include_directories: src_dir, dependencies: required_deps, ) # # Install # install_subdir('source/stdx/', install_dir: 'include/d/stdx-allocator/') pkgc.generate(name: 'stdx-allocator', libraries: allocator_lib, requires: ['mir-core'], subdirs: 'd/' + meson.project_name(), version: meson.project_version(), description: 'High-level interface for allocators for D, extracted from Phobos.' ) test(meson.project_name() + '-test', allocator_test_exe) stdx-allocator-3.1.0~beta.2/source/ 0000775 0000000 0000000 00000000000 13535263154 0017154 5 ustar 00root root 0000000 0000000 stdx-allocator-3.1.0~beta.2/source/stdx/ 0000775 0000000 0000000 00000000000 13535263154 0020136 5 ustar 00root root 0000000 0000000 stdx-allocator-3.1.0~beta.2/source/stdx/allocator/ 0000775 0000000 0000000 00000000000 13535263154 0022116 5 ustar 00root root 0000000 0000000 stdx-allocator-3.1.0~beta.2/source/stdx/allocator/building_blocks/ 0000775 0000000 0000000 00000000000 13535263154 0025250 5 ustar 00root root 0000000 0000000 stdx-allocator-3.1.0~beta.2/source/stdx/allocator/building_blocks/affix_allocator.d 0000664 0000000 0000000 00000041124 13535263154 0030554 0 ustar 00root root 0000000 0000000 /// module stdx.allocator.building_blocks.affix_allocator; /** Allocator that adds some extra data before (of type $(D Prefix)) and/or after (of type $(D Suffix)) any allocation made with its parent allocator. This is useful for uses where additional allocation-related information is needed, such as mutexes, reference counts, or walls for debugging memory corruption errors. If $(D Prefix) is not $(D void), $(D Allocator) must guarantee an alignment at least as large as $(D Prefix.alignof). Suffixes are slower to get at because of alignment rounding, so prefixes should be preferred. However, small prefixes blunt the alignment so if a large alignment with a small affix is needed, suffixes should be chosen. The following methods are defined if $(D Allocator) defines them, and forward to it: $(D deallocateAll), $(D empty), $(D owns). */ struct AffixAllocator(Allocator, Prefix, Suffix = void) { import mir.utility : min; import mir.conv : emplace; import stdx.allocator : IAllocator, theAllocator; import stdx.allocator.common : stateSize, forwardToMember, roundUpToMultipleOf, alignedAt, alignDownTo, roundUpToMultipleOf, hasStaticallyKnownAlignment; import stdx.allocator.internal : isPowerOf2; import stdx.allocator.internal : Ternary; static if (hasStaticallyKnownAlignment!Allocator) { static assert( !stateSize!Prefix || Allocator.alignment >= Prefix.alignof, "AffixAllocator does not work with allocators offering a smaller" ~ " alignment than the prefix alignment."); } static assert(alignment % Suffix.alignof == 0, "This restriction could be relaxed in the future."); /** If $(D Prefix) is $(D void), the alignment is that of the parent. Otherwise, the alignment is the same as the $(D Prefix)'s alignment. */ static if (hasStaticallyKnownAlignment!Allocator) { enum uint alignment = isPowerOf2(stateSize!Prefix) ? min(stateSize!Prefix, Allocator.alignment) : (stateSize!Prefix ? Prefix.alignof : Allocator.alignment); } else static if (is(Prefix == void)) { enum uint alignment = platformAlignment; } else { enum uint alignment = Prefix.alignof; } /** If the parent allocator $(D Allocator) is stateful, an instance of it is stored as a member. Otherwise, $(D AffixAllocator) uses `Allocator.instance`. In either case, the name $(D _parent) is uniformly used for accessing the parent allocator. */ static if (stateSize!Allocator) { Allocator _parent; static if (is(Allocator == IAllocator)) { Allocator parent() { if (_parent is null) _parent = theAllocator; assert(alignment <= _parent.alignment); return _parent; } } else { alias parent = _parent; } } else { alias parent = Allocator.instance; } private template Impl(bool isStatic) { size_t goodAllocSize(size_t s) { import stdx.allocator.common : goodAllocSize; auto a = actualAllocationSize(s); return roundUpToMultipleOf(parent.goodAllocSize(a) - stateSize!Prefix - stateSize!Suffix, this.alignment); } static if (isStatic) private size_t actualAllocationSize(size_t s) { assert(s > 0); static if (!stateSize!Suffix) { return s + stateSize!Prefix; } else { return roundUpToMultipleOf(s + stateSize!Prefix, Suffix.alignof) + stateSize!Suffix; } } else private size_t actualAllocationSize(size_t s) const { assert(s > 0); static if (!stateSize!Suffix) { return s + stateSize!Prefix; } else { return roundUpToMultipleOf(s + stateSize!Prefix, Suffix.alignof) + stateSize!Suffix; } } static if (isStatic) private void[] actualAllocation(void[] b) { assert(b !is null); return (b.ptr - stateSize!Prefix) [0 .. actualAllocationSize(b.length)]; } else private void[] actualAllocation(void[] b) const { assert(b !is null); return (b.ptr - stateSize!Prefix) [0 .. actualAllocationSize(b.length)]; } void[] allocate(size_t bytes) { if (!bytes) return null; auto result = parent.allocate(actualAllocationSize(bytes)); if (result is null) return null; static if (stateSize!Prefix) { assert(result.ptr.alignedAt(Prefix.alignof)); emplace!Prefix(cast(Prefix*) result.ptr); } static if (stateSize!Suffix) { auto suffixP = result.ptr + result.length - Suffix.sizeof; assert(suffixP.alignedAt(Suffix.alignof)); emplace!Suffix(cast(Suffix*)(suffixP)); } return result[stateSize!Prefix .. stateSize!Prefix + bytes]; } static if (__traits(hasMember, Allocator, "allocateAll")) void[] allocateAll() { auto result = parent.allocateAll(); if (result is null) return null; if (result.length < actualAllocationSize(1)) { deallocate(result); return null; } static if (stateSize!Prefix) { assert(result.length > stateSize!Prefix); emplace!Prefix(cast(Prefix*) result.ptr); result = result[stateSize!Prefix .. $]; } static if (stateSize!Suffix) { assert(result.length > stateSize!Suffix); // Ehm, find a properly aligned place for the suffix auto p = (result.ptr + result.length - stateSize!Suffix) .alignDownTo(Suffix.alignof); assert(p > result.ptr); emplace!Suffix(cast(Suffix*) p); result = result[0 .. p - result.ptr]; } return result; } static if (__traits(hasMember, Allocator, "owns")) Ternary owns(void[] b) { if (b is null) return Ternary.no; return parent.owns(actualAllocation(b)); } static if (__traits(hasMember, Allocator, "resolveInternalPointer")) Ternary resolveInternalPointer(const void* p, ref void[] result) { void[] p1; Ternary r = parent.resolveInternalPointer(p, p1); if (r != Ternary.yes || p1 is null) return r; p1 = p1[stateSize!Prefix .. $]; auto p2 = (p1.ptr + p1.length - stateSize!Suffix) .alignDownTo(Suffix.alignof); result = p1[0 .. p2 - p1.ptr]; return Ternary.yes; } static if (!stateSize!Suffix && __traits(hasMember, Allocator, "expand")) bool expand(ref void[] b, size_t delta) { if (!b.ptr) return delta == 0; auto t = actualAllocation(b); const result = parent.expand(t, delta); if (!result) return false; b = b.ptr[0 .. b.length + delta]; return true; } static if (__traits(hasMember, Allocator, "reallocate")) bool reallocate(ref void[] b, size_t s) { if (b is null) { b = allocate(s); return b.length == s; } auto t = actualAllocation(b); const result = parent.reallocate(t, actualAllocationSize(s)); if (!result) return false; // no harm done b = t.ptr[stateSize!Prefix .. stateSize!Prefix + s]; return true; } static if (__traits(hasMember, Allocator, "deallocate")) bool deallocate(void[] b) { if (!b.ptr) return true; return parent.deallocate(actualAllocation(b)); } /* The following methods are defined if $(D ParentAllocator) defines them, and forward to it: $(D deallocateAll), $(D empty).*/ mixin(forwardToMember("parent", "deallocateAll", "empty")); // Computes suffix type given buffer type private template Payload2Affix(Payload, Affix) { static if (is(Payload[] : void[])) alias Payload2Affix = Affix; else static if (is(Payload[] : shared(void)[])) alias Payload2Affix = shared Affix; else static if (is(Payload[] : immutable(void)[])) alias Payload2Affix = shared Affix; else static if (is(Payload[] : const(shared(void))[])) alias Payload2Affix = shared Affix; else static if (is(Payload[] : const(void)[])) alias Payload2Affix = const Affix; else static assert(0, "Internal error for type " ~ Payload.stringof); } // Extra functions static if (stateSize!Prefix) { static auto ref prefix(T)(T[] b) { assert(b.ptr && b.ptr.alignedAt(Prefix.alignof)); return (cast(Payload2Affix!(T, Prefix)*) b.ptr)[-1]; } } static if (stateSize!Suffix) auto ref suffix(T)(T[] b) { assert(b.ptr); auto p = b.ptr - stateSize!Prefix + actualAllocationSize(b.length); assert(p && p.alignedAt(Suffix.alignof)); return (cast(Payload2Affix!(T, Suffix)*) p)[-1]; } } version (StdDdoc) { /** Standard allocator methods. Each is defined if and only if the parent allocator defines the homonym method (except for $(D goodAllocSize), which may use the global default). Also, the methods will be $(D shared) if the parent allocator defines them as such. */ size_t goodAllocSize(size_t); /// Ditto void[] allocate(size_t); /// Ditto Ternary owns(void[]); /// Ditto bool expand(ref void[] b, size_t delta); /// Ditto bool reallocate(ref void[] b, size_t s); /// Ditto bool deallocate(void[] b); /// Ditto bool deallocateAll(); /// Ditto Ternary empty(); /** The `instance` singleton is defined if and only if the parent allocator has no state and defines its own `it` object. */ static AffixAllocator instance; /** Affix access functions offering references to the affixes of a block `b` previously allocated with this allocator. `b` may not be null. They are defined if and only if the corresponding affix is not `void`. The qualifiers of the affix are not always the same as the qualifiers of the argument. This is because the affixes are not part of the data itself, but instead are just $(I associated) with the data and known to the allocator. The table below documents the type of `preffix(b)` and `affix(b)` depending on the type of `b`. $(BOOKTABLE Result of `prefix`/`suffix` depending on argument (`U` is any unqualified type, `Affix` is `Prefix` or `Suffix`), $(TR $(TH Argument$(NBSP)Type) $(TH Return) $(TH Comments)) $(TR $(TD `shared(U)[]`) $(TD `ref shared Affix`) $(TD Data is shared across threads and the affix follows suit.)) $(TR $(TD `immutable(U)[]`) $(TD `ref shared Affix`) $(TD Although the data is immutable, the allocator "knows" the underlying memory is mutable, so `immutable` is elided for the affix which is independent from the data itself. However, the result is `shared` because `immutable` is implicitly shareable so multiple threads may access and manipulate the affix for the same data.)) $(TR $(TD `const(shared(U))[]`) $(TD `ref shared Affix`) $(TD The data is always shareable across threads. Even if the data is `const`, the affix is modifiable by the same reasoning as for `immutable`.)) $(TR $(TD `const(U)[]`) $(TD `ref const Affix`) $(TD The input may have originated from `U[]` or `immutable(U)[]`, so it may be actually shared or not. Returning an unqualified affix may result in race conditions, whereas returning a `shared` affix may result in inadvertent sharing of mutable thread-local data across multiple threads. So the returned type is conservatively `ref const`.)) $(TR $(TD `U[]`) $(TD `ref Affix`) $(TD Unqualified data has unqualified affixes.)) ) Precondition: `b !is null` and `b` must have been allocated with this allocator. */ static ref auto prefix(T)(T[] b); /// Ditto ref auto suffix(T)(T[] b); } else static if (is(typeof(Allocator.instance) == shared)) { // for backward compatability enum shared AffixAllocator instance = AffixAllocator(); static { mixin Impl!true; } } else { static if (stateSize!Allocator == 0) { enum AffixAllocator instance = AffixAllocator(); static { mixin Impl!true; } } else { mixin Impl!false; } } } /// @system unittest { import stdx.allocator.mallocator : Mallocator; // One word before and after each allocation. alias A = AffixAllocator!(Mallocator, size_t, size_t); auto b = A.instance.allocate(11); A.instance.prefix(b) = 0xCAFE_BABE; A.instance.suffix(b) = 0xDEAD_BEEF; assert(A.instance.prefix(b) == 0xCAFE_BABE && A.instance.suffix(b) == 0xDEAD_BEEF); } @system unittest { import stdx.allocator.gc_allocator : GCAllocator; import stdx.allocator : theAllocator, IAllocator; // One word before and after each allocation. auto A = AffixAllocator!(IAllocator, size_t, size_t)(theAllocator); auto a = A.allocate(11); A.prefix(a) = 0xCAFE_BABE; A.suffix(a) = 0xDEAD_BEEF; assert(A.prefix(a) == 0xCAFE_BABE && A.suffix(a) == 0xDEAD_BEEF); // One word before and after each allocation. auto B = AffixAllocator!(IAllocator, size_t, size_t)(); auto b = B.allocate(11); B.prefix(b) = 0xCAFE_BABE; B.suffix(b) = 0xDEAD_BEEF; assert(B.prefix(b) == 0xCAFE_BABE && B.suffix(b) == 0xDEAD_BEEF); } @system unittest { import stdx.allocator.building_blocks.bitmapped_block : BitmappedBlock; import stdx.allocator.common : testAllocator; testAllocator!({ auto a = AffixAllocator!(BitmappedBlock!128, ulong, ulong) (BitmappedBlock!128(new ubyte[128 * 4096])); return a; }); } @system unittest { import stdx.allocator.mallocator : Mallocator; alias A = AffixAllocator!(Mallocator, size_t); auto b = A.instance.allocate(10); A.instance.prefix(b) = 10; assert(A.instance.prefix(b) == 10); import stdx.allocator.building_blocks.null_allocator : NullAllocator; alias B = AffixAllocator!(NullAllocator, size_t); b = B.instance.allocate(100); assert(b is null); } @system unittest { import stdx.allocator; import stdx.allocator.gc_allocator; import stdx.allocator.internal : Ternary; alias MyAllocator = AffixAllocator!(GCAllocator, uint); auto a = MyAllocator.instance.makeArray!(shared int)(100); static assert(is(typeof(&MyAllocator.instance.prefix(a)) == shared(uint)*)); auto b = MyAllocator.instance.makeArray!(shared const int)(100); static assert(is(typeof(&MyAllocator.instance.prefix(b)) == shared(uint)*)); auto c = MyAllocator.instance.makeArray!(immutable int)(100); static assert(is(typeof(&MyAllocator.instance.prefix(c)) == shared(uint)*)); auto d = MyAllocator.instance.makeArray!(int)(100); static assert(is(typeof(&MyAllocator.instance.prefix(d)) == uint*)); auto e = MyAllocator.instance.makeArray!(const int)(100); static assert(is(typeof(&MyAllocator.instance.prefix(e)) == const(uint)*)); void[] p; assert(MyAllocator.instance.resolveInternalPointer(null, p) == Ternary.no); Ternary r = MyAllocator.instance.resolveInternalPointer(d.ptr, p); assert(p.ptr is d.ptr && p.length >= d.length); } stdx-allocator-3.1.0~beta.2/source/stdx/allocator/building_blocks/allocator_list.d 0000664 0000000 0000000 00000051573 13535263154 0030443 0 ustar 00root root 0000000 0000000 /// module stdx.allocator.building_blocks.allocator_list; import stdx.allocator.building_blocks.null_allocator; import stdx.allocator.common; import stdx.allocator.gc_allocator; version(unittest) import std.stdio; // Turn this on for debugging // debug = allocator_list; /** Given an $(LINK2 https://en.wikipedia.org/wiki/Factory_(object-oriented_programming), object factory) of type `Factory` or a factory function `factoryFunction`, and optionally also `BookkeepingAllocator` as a supplemental allocator for bookkeeping, `AllocatorList` creates an allocator that lazily creates as many allocators are needed for satisfying client allocation requests. An embedded list builds a most-recently-used strategy: the most recent allocators used in calls to either `allocate`, `owns` (successful calls only), or `deallocate` are tried for new allocations in order of their most recent use. Thus, although core operations take in theory $(BIGOH k) time for $(D k) allocators in current use, in many workloads the factor is sublinear. Details of the actual strategy may change in future releases. `AllocatorList` is primarily intended for coarse-grained handling of allocators, i.e. the number of allocators in the list is expected to be relatively small compared to the number of allocations handled by each allocator. However, the per-allocator overhead is small so using `AllocatorList` with a large number of allocators should be satisfactory as long as the most-recently-used strategy is fast enough for the application. `AllocatorList` makes an effort to return allocated memory back when no longer used. It does so by destroying empty allocators. However, in order to avoid thrashing (excessive creation/destruction of allocators under certain use patterns), it keeps unused allocators for a while. Params: factoryFunction = A function or template function (including function literals). New allocators are created by calling `factoryFunction(n)` with strictly positive numbers `n`. Delegates that capture their enviroment are not created amid concerns regarding garbage creation for the environment. When the factory needs state, a `Factory` object should be used. BookkeepingAllocator = Allocator used for storing bookkeeping data. The size of bookkeeping data is proportional to the number of allocators. If $(D BookkeepingAllocator) is $(D NullAllocator), then $(D AllocatorList) is "ouroboros-style", i.e. it keeps the bookkeeping data in memory obtained from the allocators themselves. Note that for ouroboros-style management, the size $(D n) passed to $(D make) will be occasionally different from the size requested by client code. Factory = Type of a factory object that returns new allocators on a need basis. For an object $(D sweatshop) of type $(D Factory), `sweatshop(n)` should return an allocator able to allocate at least `n` bytes (i.e. `Factory` must define `opCall(size_t)` to return an allocator object). Usually the capacity of allocators created should be much larger than $(D n) such that an allocator can be used for many subsequent allocations. $(D n) is passed only to ensure the minimum necessary for the next allocation. The factory object is allowed to hold state, which will be stored inside `AllocatorList` as a direct `public` member called `factory`. */ struct AllocatorList(Factory, BookkeepingAllocator = GCAllocator) { import mir.conv : emplace; import stdx.allocator.building_blocks.stats_collector : StatsCollector, Options; import stdx.allocator.internal : Ternary; private enum ouroboros = is(BookkeepingAllocator == NullAllocator); /** Alias for `typeof(Factory()(1))`, i.e. the type of the individual allocators. */ alias Allocator = typeof(Factory.init(size_t(1))); // Allocator used internally private alias SAllocator = StatsCollector!(Allocator, Options.bytesUsed); private static struct Node { // Allocator in this node SAllocator a; Node* next; @disable this(this); // Is this node unused? void setUnused() { next = &this; } bool unused() const { return next is &this; } // Just forward everything to the allocator alias a this; } /** If $(D BookkeepingAllocator) is not $(D NullAllocator), $(D bkalloc) is defined and accessible. */ // State is stored in an array, but it has a list threaded through it by // means of "nextIdx". // state static if (!ouroboros) { static if (stateSize!BookkeepingAllocator) BookkeepingAllocator bkalloc; else alias bkalloc = BookkeepingAllocator.instance; } static if (stateSize!Factory) { Factory factory; } private Node[] allocators; private Node* root; static if (stateSize!Factory) { private auto make(size_t n) { return factory(n); } } else { private auto make(size_t n) { Factory f; return f(n); } } /** Constructs an `AllocatorList` given a factory object. This constructor is defined only if `Factory` has state. */ static if (stateSize!Factory) this(ref Factory plant) { factory = plant; } /// Ditto static if (stateSize!Factory) this(Factory plant) { factory = plant; } static if (__traits(hasMember, Allocator, "deallocateAll") && __traits(hasMember, Allocator, "owns")) ~this() { deallocateAll; } /** The alignment offered. */ enum uint alignment = Allocator.alignment; /** Allocate a block of size $(D s). First tries to allocate from the existing list of already-created allocators. If neither can satisfy the request, creates a new allocator by calling $(D make(s)) and delegates the request to it. However, if the allocation fresh off a newly created allocator fails, subsequent calls to $(D allocate) will not cause more calls to $(D make). */ void[] allocate(size_t s) { for (auto p = &root, n = *p; n; p = &n.next, n = *p) { auto result = n.allocate(s); if (result.length != s) continue; // Bring to front if not already if (root != n) { *p = n.next; n.next = root; root = n; } return result; } // Can't allocate from the current pool. Check if we just added a new // allocator, in that case it won't do any good to add yet another. if (root && root.empty == Ternary.yes) { // no can do return null; } // Add a new allocator if (auto a = addAllocator(s)) { auto result = a.allocate(s); assert(owns(result) == Ternary.yes || !result.ptr); return result; } return null; } private void moveAllocators(void[] newPlace) { assert(newPlace.ptr.alignedAt(Node.alignof)); assert(newPlace.length % Node.sizeof == 0); auto newAllocators = cast(Node[]) newPlace; assert(allocators.length <= newAllocators.length); // Move allocators foreach (i, ref e; allocators) { if (e.unused) { newAllocators[i].setUnused; continue; } import core.stdc.string : memcpy; memcpy(&newAllocators[i].a, &e.a, e.a.sizeof); if (e.next) { newAllocators[i].next = newAllocators.ptr + (e.next - allocators.ptr); } else { newAllocators[i].next = null; } } // Mark the unused portion as unused foreach (i; allocators.length .. newAllocators.length) { newAllocators[i].setUnused; } auto toFree = allocators; // Change state root = newAllocators.ptr + (root - allocators.ptr); allocators = newAllocators; // Free the olden buffer static if (ouroboros) { static if (__traits(hasMember, Allocator, "deallocate") && __traits(hasMember, Allocator, "owns")) deallocate(toFree); } else { bkalloc.deallocate(toFree); } } static if (ouroboros) private Node* addAllocator(size_t atLeastBytes) { void[] t = allocators; static if (__traits(hasMember, Allocator, "expand") && __traits(hasMember, Allocator, "owns")) { immutable bool expanded = t && this.expand(t, Node.sizeof); } else { enum expanded = false; } if (expanded) { import core.stdc.string : memcpy; assert(t.length % Node.sizeof == 0); assert(t.ptr.alignedAt(Node.alignof)); allocators = cast(Node[]) t; allocators[$ - 1].setUnused; auto newAlloc = SAllocator(make(atLeastBytes)); memcpy(&allocators[$ - 1].a, &newAlloc, newAlloc.sizeof); emplace(&newAlloc); } else { immutable toAlloc = (allocators.length + 1) * Node.sizeof + atLeastBytes + 128; auto newAlloc = SAllocator(make(toAlloc)); auto newPlace = newAlloc.allocate( (allocators.length + 1) * Node.sizeof); if (!newPlace) return null; moveAllocators(newPlace); import core.stdc.string : memcpy; memcpy(&allocators[$ - 1].a, &newAlloc, newAlloc.sizeof); emplace(&newAlloc); assert(allocators[$ - 1].owns(allocators) == Ternary.yes); } // Insert as new root if (root != &allocators[$ - 1]) { allocators[$ - 1].next = root; root = &allocators[$ - 1]; } else { // This is the first one root.next = null; } assert(!root.unused); return root; } static if (!ouroboros) private Node* addAllocator(size_t atLeastBytes) { void[] t = allocators; static if (__traits(hasMember, BookkeepingAllocator, "expand")) immutable bool expanded = bkalloc.expand(t, Node.sizeof); else immutable bool expanded = false; if (expanded) { assert(t.length % Node.sizeof == 0); assert(t.ptr.alignedAt(Node.alignof)); allocators = cast(Node[]) t; allocators[$ - 1].setUnused; } else { // Could not expand, create a new block t = bkalloc.allocate((allocators.length + 1) * Node.sizeof); assert(t.length % Node.sizeof == 0); if (!t.ptr) return null; moveAllocators(t); } assert(allocators[$ - 1].unused); auto newAlloc = SAllocator(make(atLeastBytes)); import core.stdc.string : memcpy; memcpy(&allocators[$ - 1].a, &newAlloc, newAlloc.sizeof); emplace(&newAlloc); // Creation succeeded, insert as root if (allocators.length == 1) allocators[$ - 1].next = null; else allocators[$ - 1].next = root; assert(allocators[$ - 1].a.bytesUsed == 0); root = &allocators[$ - 1]; return root; } /** Defined only if `Allocator` defines `owns`. Tries each allocator in turn, in most-recently-used order. If the owner is found, it is moved to the front of the list as a side effect under the assumption it will be used soon. Returns: `Ternary.yes` if one allocator was found to return `Ternary.yes`, `Ternary.no` if all component allocators returned `Ternary.no`, and `Ternary.unknown` if no allocator returned `Ternary.yes` and at least one returned `Ternary.unknown`. */ static if (__traits(hasMember, Allocator, "owns")) Ternary owns(void[] b) { auto result = Ternary.no; for (auto p = &root, n = *p; n; p = &n.next, n = *p) { immutable t = n.owns(b); if (t != Ternary.yes) { if (t == Ternary.unknown) result = t; continue; } // Move the owner to front, speculating it'll be used if (n != root) { *p = n.next; n.next = root; root = n; } return Ternary.yes; } return result; } /** Defined only if $(D Allocator.expand) is defined. Finds the owner of $(D b) and calls $(D expand) for it. The owner is not brought to the head of the list. */ static if (__traits(hasMember, Allocator, "expand") && __traits(hasMember, Allocator, "owns")) bool expand(ref void[] b, size_t delta) { if (!b.ptr) return delta == 0; for (auto p = &root, n = *p; n; p = &n.next, n = *p) { if (n.owns(b) == Ternary.yes) return n.expand(b, delta); } return false; } /** Defined only if $(D Allocator.reallocate) is defined. Finds the owner of $(D b) and calls $(D reallocate) for it. If that fails, calls the global $(D reallocate), which allocates a new block and moves memory. */ static if (__traits(hasMember, Allocator, "reallocate")) bool reallocate(ref void[] b, size_t s) { // First attempt to reallocate within the existing node if (!b.ptr) { b = allocate(s); return b.length == s; } for (auto p = &root, n = *p; n; p = &n.next, n = *p) { if (n.owns(b) == Ternary.yes) return n.reallocate(b, s); } // Failed, but we may find new memory in a new node. return .reallocate(this, b, s); } /** Defined if $(D Allocator.deallocate) and $(D Allocator.owns) are defined. */ static if (__traits(hasMember, Allocator, "deallocate") && __traits(hasMember, Allocator, "owns")) bool deallocate(void[] b) { if (!b.ptr) return true; assert(allocators.length); assert(owns(b) == Ternary.yes); bool result; for (auto p = &root, n = *p; ; p = &n.next, n = *p) { assert(n); if (n.owns(b) != Ternary.yes) continue; result = n.deallocate(b); // Bring to front if (n != root) { *p = n.next; n.next = root; root = n; } if (n.empty != Ternary.yes) return result; break; } // Hmmm... should we return this allocator back to the wild? Let's // decide if there are TWO empty allocators we can release ONE. This // is to avoid thrashing. // Note that loop starts from the second element. for (auto p = &root.next, n = *p; n; p = &n.next, n = *p) { if (n.unused || n.empty != Ternary.yes) continue; // Used and empty baby, nuke it! n.a.destroy; *p = n.next; n.setUnused; break; } return result; } /** Defined only if $(D Allocator.owns) and $(D Allocator.deallocateAll) are defined. */ static if (ouroboros && __traits(hasMember, Allocator, "deallocateAll") && __traits(hasMember, Allocator, "owns")) bool deallocateAll() { Node* special; foreach (ref n; allocators) { if (n.unused) continue; if (n.owns(allocators) == Ternary.yes) { special = &n; continue; } n.a.deallocateAll; n.a.destroy; } assert(special || !allocators.ptr); if (special) { special.deallocate(allocators); } allocators = null; root = null; return true; } static if (!ouroboros && __traits(hasMember, Allocator, "deallocateAll") && __traits(hasMember, Allocator, "owns")) bool deallocateAll() { foreach (ref n; allocators) { if (n.unused) continue; n.a.deallocateAll; n.a.destroy; } bkalloc.deallocate(allocators); allocators = null; root = null; return true; } /** Returns `Ternary.yes` if no allocators are currently active, `Ternary.no` otherwise. This methods never returns `Ternary.unknown`. */ Ternary empty() const { return Ternary(!allocators.length); } } /// Ditto template AllocatorList(alias factoryFunction, BookkeepingAllocator = GCAllocator) { alias A = typeof(factoryFunction(size_t(1))); static assert( // is a template function (including literals) is(typeof({A function(size_t) @system x = factoryFunction!size_t;})) || // or a function (including literals) is(typeof({A function(size_t) @system x = factoryFunction;})) , "Only function names and function literals that take size_t" ~ " and return an allocator are accepted, not " ~ typeof(factoryFunction).stringof ); static struct Factory { A opCall(size_t n) { return factoryFunction(n); } } alias AllocatorList = .AllocatorList!(Factory, BookkeepingAllocator); } /// version(Posix) @system unittest { import mir.utility : max; import stdx.allocator.building_blocks.free_list : ContiguousFreeList; import stdx.allocator.building_blocks.null_allocator : NullAllocator; import stdx.allocator.building_blocks.region : Region; import stdx.allocator.building_blocks.segregator : Segregator; import stdx.allocator.gc_allocator : GCAllocator; import stdx.allocator.mmap_allocator : MmapAllocator; // Ouroboros allocator list based upon 4MB regions, fetched directly from // mmap. All memory is released upon destruction. alias A1 = AllocatorList!((n) => Region!MmapAllocator(max(n, 1024u * 4096u)), NullAllocator); // Allocator list based upon 4MB regions, fetched from the garbage // collector. All memory is released upon destruction. alias A2 = AllocatorList!((n) => Region!GCAllocator(max(n, 1024u * 4096u))); // Ouroboros allocator list based upon 4MB regions, fetched from the garbage // collector. Memory is left to the collector. alias A3 = AllocatorList!( (n) => Region!NullAllocator(new ubyte[max(n, 1024u * 4096u)]), NullAllocator); // Allocator list that creates one freelist for all objects alias A4 = Segregator!( 64, AllocatorList!( (n) => ContiguousFreeList!(NullAllocator, 0, 64)( cast(ubyte[])(GCAllocator.instance.allocate(4096)))), GCAllocator); A4 a; auto small = a.allocate(64); assert(small); a.deallocate(small); auto b1 = a.allocate(1024 * 8192); assert(b1 !is null); // still works due to overdimensioning b1 = a.allocate(1024 * 10); assert(b1.length == 1024 * 10); } @system unittest { // Create an allocator based upon 4MB regions, fetched from the GC heap. import mir.utility : max; import stdx.allocator.building_blocks.region : Region; AllocatorList!((n) => Region!GCAllocator(new ubyte[max(n, 1024u * 4096u)]), NullAllocator) a; const b1 = a.allocate(1024 * 8192); assert(b1 !is null); // still works due to overdimensioning const b2 = a.allocate(1024 * 10); assert(b2.length == 1024 * 10); a.deallocateAll(); } @system unittest { // Create an allocator based upon 4MB regions, fetched from the GC heap. import mir.utility : max; import stdx.allocator.building_blocks.region : Region; AllocatorList!((n) => Region!()(new ubyte[max(n, 1024u * 4096u)])) a; auto b1 = a.allocate(1024 * 8192); assert(b1 !is null); // still works due to overdimensioning b1 = a.allocate(1024 * 10); assert(b1.length == 1024 * 10); a.deallocateAll(); } @system unittest { import mir.utility : max; import stdx.allocator.building_blocks.region : Region; import stdx.allocator.internal : Ternary; AllocatorList!((n) => Region!()(new ubyte[max(n, 1024u * 4096u)])) a; auto b1 = a.allocate(1024 * 8192); assert(b1 !is null); b1 = a.allocate(1024 * 10); assert(b1.length == 1024 * 10); a.allocate(1024 * 4095); a.deallocateAll(); assert(a.empty == Ternary.yes); } @system unittest { import stdx.allocator.building_blocks.region : Region; enum bs = GCAllocator.alignment; AllocatorList!((n) => Region!GCAllocator(256 * bs)) a; auto b1 = a.allocate(192 * bs); assert(b1.length == 192 * bs); assert(a.allocators.length == 1); auto b2 = a.allocate(64 * bs); assert(b2.length == 64 * bs); assert(a.allocators.length == 1); auto b3 = a.allocate(192 * bs); assert(b3.length == 192 * bs); assert(a.allocators.length == 2); a.deallocate(b1); b1 = a.allocate(64 * bs); assert(b1.length == 64 * bs); assert(a.allocators.length == 2); a.deallocateAll(); } stdx-allocator-3.1.0~beta.2/source/stdx/allocator/building_blocks/bitmapped_block.d 0000664 0000000 0000000 00000133022 13535263154 0030535 0 ustar 00root root 0000000 0000000 /// module stdx.allocator.building_blocks.bitmapped_block; import stdx.allocator.building_blocks.null_allocator; import stdx.allocator.common; /** $(D BitmappedBlock) implements a simple heap consisting of one contiguous area of memory organized in blocks, each of size $(D theBlockSize). A block is a unit of allocation. A bitmap serves as bookkeeping data, more precisely one bit per block indicating whether that block is currently allocated or not. Passing $(D NullAllocator) as $(D ParentAllocator) (the default) means user code manages allocation of the memory block from the outside; in that case $(D BitmappedBlock) must be constructed with a $(D void[]) preallocated block and has no responsibility regarding the lifetime of its support underlying storage. If another allocator type is passed, $(D BitmappedBlock) defines a destructor that uses the parent allocator to release the memory block. That makes the combination of $(D AllocatorList), $(D BitmappedBlock), and a back-end allocator such as $(D MmapAllocator) a simple and scalable solution for memory allocation. There are advantages to storing bookkeeping data separated from the payload (as opposed to e.g. using $(D AffixAllocator) to store metadata together with each allocation). The layout is more compact (overhead is one bit per block), searching for a free block during allocation enjoys better cache locality, and deallocation does not touch memory around the payload being deallocated (which is often cold). Allocation requests are handled on a first-fit basis. Although linear in complexity, allocation is in practice fast because of the compact bookkeeping representation, use of simple and fast bitwise routines, and caching of the first available block position. A known issue with this general approach is fragmentation, partially mitigated by coalescing. Since $(D BitmappedBlock) does not need to maintain the allocated size, freeing memory implicitly coalesces free blocks together. Also, tuning $(D blockSize) has a considerable impact on both internal and external fragmentation. The size of each block can be selected either during compilation or at run time. Statically-known block sizes are frequent in practice and yield slightly better performance. To choose a block size statically, pass it as the $(D blockSize) parameter as in $(D BitmappedBlock!(Allocator, 4096)). To choose a block size parameter, use $(D BitmappedBlock!(Allocator, chooseAtRuntime)) and pass the block size to the constructor. */ struct BitmappedBlock(size_t theBlockSize, uint theAlignment = platformAlignment, ParentAllocator = NullAllocator) { import stdx.allocator.internal : Ternary; import mir.functional : RefTuple; // for internal API only private alias Tuple = RefTuple!(size_t, uint); @system unittest { import mir.utility : max; import stdx.allocator.mallocator : AlignedMallocator; auto m = cast(ubyte[])(AlignedMallocator.instance.alignedAllocate(1024 * 64, max(theAlignment, cast(uint) size_t.sizeof))); scope(exit) AlignedMallocator.instance.deallocate(m); static if (theBlockSize == chooseAtRuntime) { testAllocator!(() => BitmappedBlock!(theBlockSize, theAlignment, NullAllocator)(m, 64)); } else { testAllocator!(() => BitmappedBlock!(theBlockSize, theAlignment, NullAllocator)(m)); } } static assert(theBlockSize > 0 && theAlignment.isGoodStaticAlignment); static assert(theBlockSize == chooseAtRuntime || theBlockSize % theAlignment == 0, "Block size must be a multiple of the alignment"); /** If $(D blockSize == chooseAtRuntime), $(D BitmappedBlock) offers a read/write property $(D blockSize). It must be set before any use of the allocator. Otherwise (i.e. $(D theBlockSize) is a legit constant), $(D blockSize) is an alias for $(D theBlockSize). Whether constant or variable, must also be a multiple of $(D alignment). This constraint is $(D assert)ed statically and dynamically. */ static if (theBlockSize != chooseAtRuntime) { alias blockSize = theBlockSize; } else { @property uint blockSize() { return _blockSize; } @property void blockSize(uint s) { assert(_control.allAre0() && s % alignment == 0); _blockSize = s; } private uint _blockSize; } static if (is(ParentAllocator == NullAllocator)) { private enum parentAlignment = platformAlignment; } else { private alias parentAlignment = ParentAllocator.alignment; static assert(parentAlignment >= ulong.alignof); } /** The _alignment offered is user-configurable statically through parameter $(D theAlignment), defaulted to $(D platformAlignment). */ alias alignment = theAlignment; // state { /** The _parent allocator. Depending on whether $(D ParentAllocator) holds state or not, this is a member variable or an alias for `ParentAllocator.instance`. */ static if (stateSize!ParentAllocator) { ParentAllocator parent; } else { alias parent = ParentAllocator.instance; } private size_t _blocks; private BitVector _control; private void[] _payload; private size_t _startIdx; // } private size_t totalAllocation(size_t capacity) { auto blocks = capacity.divideRoundUp(blockSize); auto leadingUlongs = blocks.divideRoundUp(64); import mir.utility : min; immutable initialAlignment = min(parentAlignment, 1U << trailingZeros(leadingUlongs * 8)); auto maxSlack = alignment <= initialAlignment ? 0 : alignment - initialAlignment; //writeln(maxSlack); return leadingUlongs * 8 + maxSlack + blockSize * blocks; } /** Constructs a block allocator given a hunk of memory, or a desired capacity in bytes. $(UL $(LI If $(D ParentAllocator) is $(D NullAllocator), only the constructor taking $(D data) is defined and the user is responsible for freeing $(D data) if desired.) $(LI Otherwise, both constructors are defined. The $(D data)-based constructor assumes memory has been allocated with the parent allocator. The $(D capacity)-based constructor uses $(D ParentAllocator) to allocate an appropriate contiguous hunk of memory. Regardless of the constructor used, the destructor releases the memory by using $(D ParentAllocator.deallocate).) ) */ this(ubyte[] data) { immutable a = data.ptr.effectiveAlignment; assert(a >= size_t.alignof || !data.ptr, "Data must be aligned properly"); immutable ulong totalBits = data.length * 8; immutable ulong bitsPerBlock = blockSize * 8 + 1; // Get a first estimate _blocks = cast(size_t)(totalBits / bitsPerBlock); // Reality is a bit more complicated, iterate until a good number of // blocks found. for (; _blocks; --_blocks) { immutable controlWords = _blocks.divideRoundUp(64); auto payload = data[controlWords * 8 .. $].roundStartToMultipleOf( alignment); if (payload.length < _blocks * blockSize) { // Overestimated continue; } _control = BitVector((cast(ulong*) data.ptr)[0 .. controlWords]); _control[] = 0; _payload = payload; break; } } /// Ditto static if (!is(ParentAllocator == NullAllocator)) this(size_t capacity) { size_t toAllocate = totalAllocation(capacity); auto data = cast(ubyte[])(parent.allocate(toAllocate)); this(data); assert(_blocks * blockSize >= capacity); } static if (chooseAtRuntime == theBlockSize) this(ubyte[] data, uint blockSize) { this._blockSize = blockSize; this(data); } /** If $(D ParentAllocator) is not $(D NullAllocator) and defines $(D deallocate), the destructor is defined to deallocate the block held. */ static if (!is(ParentAllocator == NullAllocator) && __traits(hasMember, ParentAllocator, "deallocate")) ~this() { auto start = _control.rep.ptr, end = cast(ulong*)(_payload.ptr + _payload.length); parent.deallocate(start[0 .. end - start]); } /* Adjusts the memoized _startIdx to the leftmost control word that has at least one zero bit. Assumes all control words to the left of $(D _control[_startIdx]) are already occupied. */ private void adjustStartIdx() { while (_startIdx < _control.rep.length && _control.rep[_startIdx] == ulong.max) { ++_startIdx; } } /* Returns the blocks corresponding to the control bits starting at word index wordIdx and bit index msbIdx (MSB=0) for a total of howManyBlocks. */ private void[] blocksFor(size_t wordIdx, uint msbIdx, size_t howManyBlocks) { assert(msbIdx <= 63); const start = (wordIdx * 64 + msbIdx) * blockSize; const end = start + blockSize * howManyBlocks; if (end <= _payload.length) return _payload[start .. end]; // This could happen if we have more control bits than available memory. // That's possible because the control bits are rounded up to fit in // 64-bit words. return null; } /** Returns the actual bytes allocated when $(D n) bytes are requested, i.e. $(D n.roundUpToMultipleOf(blockSize)). */ size_t goodAllocSize(size_t n) { return n.roundUpToMultipleOf(blockSize); } /** Allocates $(D s) bytes of memory and returns it, or $(D null) if memory could not be allocated. The following information might be of help with choosing the appropriate block size. Actual allocation occurs in sizes multiple of the block size. Allocating one block is the fastest because only one 0 bit needs to be found in the metadata. Allocating 2 through 64 blocks is the next cheapest because it affects a maximum of two $(D ulong)s in the metadata. Allocations greater than 64 blocks require a multiword search through the metadata. */ @trusted void[] allocate(const size_t s) { const blocks = s.divideRoundUp(blockSize); void[] result = void; switcharoo: switch (blocks) { case 1: // inline code here for speed // find the next available block foreach (i; _startIdx .. _control.rep.length) { const w = _control.rep[i]; if (w == ulong.max) continue; uint j = leadingOnes(w); assert(j < 64); assert((_control.rep[i] & ((1UL << 63) >> j)) == 0); _control.rep[i] |= (1UL << 63) >> j; if (i == _startIdx) { adjustStartIdx(); } result = blocksFor(i, j, 1); break switcharoo; } goto case 0; // fall through case 0: return null; case 2: .. case 64: result = smallAlloc(cast(uint) blocks); break; default: result = hugeAlloc(blocks); break; } return result.ptr ? result.ptr[0 .. s] : null; } /** Allocates a block with specified alignment $(D a). The alignment must be a power of 2. If $(D a <= alignment), function forwards to $(D allocate). Otherwise, it attempts to overallocate and then adjust the result for proper alignment. In the worst case the slack memory is around two blocks. */ void[] alignedAllocate(size_t n, uint a) { import stdx.allocator.internal : isPowerOf2; assert(a.isPowerOf2); if (a <= alignment) return allocate(n); // Overallocate to make sure we can get an aligned block auto b = allocate((n + a - alignment).roundUpToMultipleOf(blockSize)); if (!b.ptr) return null; auto result = b.roundStartToMultipleOf(a); assert(result.length >= n); result = result.ptr[0 .. n]; // final result // Free any blocks that might be slack at the beginning auto slackHeadingBlocks = (result.ptr - b.ptr) / blockSize; if (slackHeadingBlocks) { deallocate(b[0 .. slackHeadingBlocks * blockSize]); } // Free any blocks that might be slack at the end auto slackTrailingBlocks = ((b.ptr + b.length) - (result.ptr + result.length)) / blockSize; if (slackTrailingBlocks) { deallocate(b[$ - slackTrailingBlocks * blockSize .. $]); } return result; } /** If the $(D BitmappedBlock) object is empty (has no active allocation), allocates all memory within and returns a slice to it. Otherwise, returns $(D null) (i.e. no attempt is made to allocate the largest available block). */ void[] allocateAll() { if (empty != Ternary.yes) return null; _control[] = 1; return _payload; } /** Returns `Ternary.yes` if `b` belongs to the `BitmappedBlock` object, `Ternary.no` otherwise. Never returns `Ternary.unkown`. (This method is somewhat tolerant in that accepts an interior slice.) */ Ternary owns(void[] b) const { //if (!b.ptr) return Ternary.no; assert(b.ptr !is null || b.length == 0, "Corrupt block."); return Ternary(b.ptr >= _payload.ptr && b.ptr + b.length <= _payload.ptr + _payload.length); } /* Tries to allocate "blocks" blocks at the exact position indicated by the position wordIdx/msbIdx (msbIdx counts from MSB, i.e. MSB has index 0). If it succeeds, fills "result" with the result and returns tuple(size_t.max, 0). Otherwise, returns a tuple with the next position to search. */ private Tuple allocateAt(size_t wordIdx, uint msbIdx, size_t blocks, ref void[] result) { assert(blocks > 0); assert(wordIdx < _control.rep.length); assert(msbIdx <= 63); if (msbIdx + blocks <= 64) { // Allocation should fit this control word if (setBitsIfZero(_control.rep[wordIdx], cast(uint) (64 - msbIdx - blocks), 63 - msbIdx)) { // Success result = blocksFor(wordIdx, msbIdx, blocks); return Tuple(size_t.max, 0u); } // Can't allocate, make a suggestion return msbIdx + blocks == 64 ? Tuple(wordIdx + 1, 0u) : Tuple(wordIdx, cast(uint) (msbIdx + blocks)); } // Allocation spans two control words or more immutable mask = ulong.max >> msbIdx; if (_control.rep[wordIdx] & mask) { // We can't allocate the rest of this control word, // return a suggestion. return Tuple(wordIdx + 1, 0u); } // We can allocate the rest of this control word, but we first need to // make sure we can allocate the tail. if (wordIdx + 1 == _control.rep.length) { // No more memory return Tuple(_control.rep.length, 0u); } auto hint = allocateAt(wordIdx + 1, 0, blocks - 64 + msbIdx, result); if (hint[0] == size_t.max) { // We did it! _control.rep[wordIdx] |= mask; result = blocksFor(wordIdx, msbIdx, blocks); return Tuple(size_t.max, 0u); } // Failed, return a suggestion that skips this whole run. return hint; } /* Allocates as many blocks as possible at the end of the blocks indicated by wordIdx. Returns the number of blocks allocated. */ private uint allocateAtTail(size_t wordIdx) { assert(wordIdx < _control.rep.length); const available = trailingZeros(_control.rep[wordIdx]); _control.rep[wordIdx] |= ulong.max >> available; return available; } private void[] smallAlloc(uint blocks) { assert(blocks >= 2 && blocks <= 64); foreach (i; _startIdx .. _control.rep.length) { // Test within the current 64-bit word const v = _control.rep[i]; if (v == ulong.max) continue; auto j = findContigOnes(~v, blocks); if (j < 64) { // yay, found stuff setBits(_control.rep[i], 64 - j - blocks, 63 - j); return blocksFor(i, j, blocks); } // Next, try allocations that cross a word auto available = trailingZeros(v); if (available == 0) continue; if (i + 1 >= _control.rep.length) break; assert(available < blocks); // otherwise we should have found it auto needed = blocks - available; assert(needed > 0 && needed < 64); if (allocateAtFront(i + 1, needed)) { // yay, found a block crossing two words _control.rep[i] |= (1UL << available) - 1; return blocksFor(i, 64 - available, blocks); } } return null; } private void[] hugeAlloc(size_t blocks) { assert(blocks > 64); if (_startIdx == _control._rep.length) { assert(_control.allAre1); return null; } auto i = _control.findZeros(blocks, _startIdx * 64); if (i == i.max) return null; // Allocate those bits _control[i .. i + blocks] = 1; return _payload[cast(size_t) (i * blockSize) .. cast(size_t) ((i + blocks) * blockSize)]; } // Rounds sizeInBytes to a multiple of blockSize. private size_t bytes2blocks(size_t sizeInBytes) { return (sizeInBytes + blockSize - 1) / blockSize; } /* Allocates given blocks at the beginning blocks indicated by wordIdx. Returns true if allocation was possible, false otherwise. */ private bool allocateAtFront(size_t wordIdx, uint blocks) { assert(wordIdx < _control.rep.length && blocks >= 1 && blocks <= 64); const mask = (1UL << (64 - blocks)) - 1; if (_control.rep[wordIdx] > mask) return false; // yay, works _control.rep[wordIdx] |= ~mask; return true; } /** Expands an allocated block in place. */ @trusted bool expand(ref void[] b, immutable size_t delta) { // Dispose with trivial corner cases if (delta == 0) return true; if (b is null) return false; /* To simplify matters, refuse to expand buffers that don't start at a block start (this may be the case for blocks allocated with alignedAllocate). */ if ((b.ptr - _payload.ptr) % blockSize) return false; const blocksOld = bytes2blocks(b.length); const blocksNew = bytes2blocks(b.length + delta); assert(blocksOld <= blocksNew); // Possibly we have enough slack at the end of the block! if (blocksOld == blocksNew) { b = b.ptr[0 .. b.length + delta]; return true; } assert((b.ptr - _payload.ptr) % blockSize == 0); const blockIdx = (b.ptr - _payload.ptr) / blockSize; const blockIdxAfter = blockIdx + blocksOld; // Try the maximum const wordIdx = blockIdxAfter / 64, msbIdx = cast(uint) (blockIdxAfter % 64); void[] p; auto hint = allocateAt(wordIdx, msbIdx, blocksNew - blocksOld, p); if (hint[0] != size_t.max) { return false; } // Expansion successful assert(p.ptr == b.ptr + blocksOld * blockSize); b = b.ptr[0 .. b.length + delta]; return true; } /** Reallocates a previously-allocated block. Contractions occur in place. */ @system bool reallocate(ref void[] b, size_t newSize) { if (!b.ptr) { b = allocate(newSize); return b.length == newSize; } if (newSize == 0) { deallocate(b); b = null; return true; } if (newSize < b.length) { // Shrink. Will shrink in place by deallocating the trailing part. auto newCapacity = bytes2blocks(newSize) * blockSize; deallocate(b[newCapacity .. $]); b = b[0 .. newSize]; return true; } // Go the slow route return .reallocate(this, b, newSize); } /** Reallocates a block previously allocated with $(D alignedAllocate). Contractions do not occur in place. */ @system bool alignedReallocate(ref void[] b, size_t newSize, uint a) { if (newSize == 0) { deallocate(b); b = null; return true; } // Go the slow route return .alignedReallocate(this, b, newSize, a); } /** Deallocates a block previously allocated with this allocator. */ bool deallocate(void[] b) { if (b is null) return true; // Locate position immutable pos = b.ptr - _payload.ptr; immutable blockIdx = pos / blockSize; // Adjust pointer, might be inside a block due to alignedAllocate auto begin = _payload.ptr + blockIdx * blockSize, end = b.ptr + b.length; b = begin[0 .. end - begin]; // Round up size to multiple of block size auto blocks = b.length.divideRoundUp(blockSize); // Get into details auto wordIdx = blockIdx / 64, msbIdx = cast(uint) (blockIdx % 64); if (_startIdx > wordIdx) _startIdx = wordIdx; // Three stages: heading bits, full words, leftover bits if (msbIdx) { if (blocks + msbIdx <= 64) { resetBits(_control.rep[wordIdx], cast(uint) (64 - msbIdx - blocks), 63 - msbIdx); return true; } else { _control.rep[wordIdx] &= ulong.max << 64 - msbIdx; blocks -= 64 - msbIdx; ++wordIdx; msbIdx = 0; } } // Stage 2: reset one word at a time for (; blocks >= 64; blocks -= 64) { _control.rep[wordIdx++] = 0; } // Stage 3: deal with leftover bits, if any assert(wordIdx <= _control.rep.length); if (blocks) { _control.rep[wordIdx] &= ulong.max >> blocks; } return true; } /** Forcibly deallocates all memory allocated by this allocator, making it available for further allocations. Does not return memory to $(D ParentAllocator). */ bool deallocateAll() { _control[] = 0; _startIdx = 0; return true; } /** Returns `Ternary.yes` if no memory is currently allocated with this allocator, otherwise `Ternary.no`. This method never returns `Ternary.unknown`. */ Ternary empty() { return Ternary(_control.allAre0()); } debug(std_experimental_allocator_bitmapped_block) void dump()() { import std.stdio : writefln, writeln; writefln("%s @ %s {", typeid(this), cast(void*) _control._rep.ptr); scope(exit) writeln("}"); assert(_payload.length == blockSize * _blocks); assert(_control.length >= _blocks); writefln(" _startIdx=%s; blockSize=%s; blocks=%s", _startIdx, blockSize, _blocks); if (!_control.length) return; uint blockCount = 1; bool inAllocatedStore = _control[0]; void* start = _payload.ptr; for (size_t i = 1;; ++i) { if (i >= _blocks || _control[i] != inAllocatedStore) { writefln(" %s block at 0x%s, length: %s (%s*%s)", inAllocatedStore ? "Busy" : "Free", cast(void*) start, blockCount * blockSize, blockCount, blockSize); if (i >= _blocks) break; assert(i < _control.length); inAllocatedStore = _control[i]; start = _payload.ptr + blockCount * blockSize; blockCount = 1; } else { ++blockCount; } } } } /// @nogc @system unittest { // Create a block allocator on top of a 10KB stack region. import stdx.allocator.building_blocks.region : InSituRegion; InSituRegion!(10_240, 64) r; auto a = BitmappedBlock!(64, 64)(cast(ubyte[])(r.allocateAll())); static assert(__traits(hasMember, InSituRegion!(10_240, 64), "allocateAll")); const b = a.allocate(100); assert(b.length == 100); } @system unittest { import stdx.allocator.gc_allocator : GCAllocator; testAllocator!(() => BitmappedBlock!(64, 8, GCAllocator)(1024 * 64)); } @system unittest { static void testAllocateAll(size_t bs)(size_t blocks, uint blocksAtATime) { import mir.utility : min; assert(bs); import stdx.allocator.gc_allocator : GCAllocator; auto a = BitmappedBlock!(bs, min(bs, platformAlignment))( cast(ubyte[])(GCAllocator.instance.allocate((blocks * bs * 8 + blocks) / 8)) ); import std.conv : text; assert(blocks >= a._blocks, text(blocks, " < ", a._blocks)); blocks = a._blocks; // test allocation of 0 bytes auto x = a.allocate(0); assert(x is null); // test allocation of 1 byte x = a.allocate(1); assert(x.length == 1 || blocks == 0, text(x.ptr, " ", x.length, " ", a)); a.deallocateAll(); bool twice = true; begin: foreach (i; 0 .. blocks / blocksAtATime) { auto b = a.allocate(bs * blocksAtATime); assert(b.length == bs * blocksAtATime, text(i, ": ", b.length)); } assert(a.allocate(bs * blocksAtATime) is null); assert(a.allocate(1) is null); // Now deallocate all and do it again! a.deallocateAll(); // Test deallocation auto v = new void[][blocks / blocksAtATime]; foreach (i; 0 .. blocks / blocksAtATime) { auto b = a.allocate(bs * blocksAtATime); assert(b.length == bs * blocksAtATime, text(i, ": ", b.length)); v[i] = b; } assert(a.allocate(bs * blocksAtATime) is null); assert(a.allocate(1) is null); foreach (i; 0 .. blocks / blocksAtATime) { a.deallocate(v[i]); } foreach (i; 0 .. blocks / blocksAtATime) { auto b = a.allocate(bs * blocksAtATime); assert(b.length == bs * blocksAtATime, text(i, ": ", b.length)); v[i] = b; } foreach (i; 0 .. v.length) { a.deallocate(v[i]); } if (twice) { twice = false; goto begin; } a.deallocateAll; // test expansion if (blocks >= blocksAtATime) { foreach (i; 0 .. blocks / blocksAtATime - 1) { auto b = a.allocate(bs * blocksAtATime); assert(b.length == bs * blocksAtATime, text(i, ": ", b.length)); (cast(ubyte[]) b)[] = 0xff; a.expand(b, blocksAtATime * bs) || assert(0, text(i)); (cast(ubyte[]) b)[] = 0xfe; assert(b.length == bs * blocksAtATime * 2, text(i, ": ", b.length)); a.reallocate(b, blocksAtATime * bs) || assert(0); assert(b.length == bs * blocksAtATime, text(i, ": ", b.length)); } } } testAllocateAll!(1)(0, 1); testAllocateAll!(1)(8, 1); testAllocateAll!(4096)(128, 1); testAllocateAll!(1)(0, 2); testAllocateAll!(1)(128, 2); testAllocateAll!(4096)(128, 2); testAllocateAll!(1)(0, 4); testAllocateAll!(1)(128, 4); testAllocateAll!(4096)(128, 4); testAllocateAll!(1)(0, 3); testAllocateAll!(1)(24, 3); testAllocateAll!(3008)(100, 1); testAllocateAll!(3008)(100, 3); testAllocateAll!(1)(0, 128); testAllocateAll!(1)(128 * 1, 128); testAllocateAll!(128 * 20)(13 * 128, 128); } // Test totalAllocation @safe unittest { BitmappedBlock!(8, 8, NullAllocator) h1; assert(h1.totalAllocation(1) >= 8); assert(h1.totalAllocation(64) >= 64); assert(h1.totalAllocation(8 * 64) >= 8 * 64); assert(h1.totalAllocation(8 * 63) >= 8 * 63); assert(h1.totalAllocation(8 * 64 + 1) >= 8 * 65); BitmappedBlock!(64, 8, NullAllocator) h2; assert(h2.totalAllocation(1) >= 64); assert(h2.totalAllocation(64 * 64) >= 64 * 64); BitmappedBlock!(4096, 4096, NullAllocator) h3; assert(h3.totalAllocation(1) >= 4096); assert(h3.totalAllocation(64 * 4096) >= 64 * 4096); assert(h3.totalAllocation(64 * 4096 + 1) >= 65 * 4096); } // BitmappedBlockWithInternalPointers /** A $(D BitmappedBlock) with additional structure for supporting $(D resolveInternalPointer). To that end, $(D BitmappedBlockWithInternalPointers) adds a bitmap (one bit per block) that marks object starts. The bitmap itself has variable size and is allocated together with regular allocations. The time complexity of $(D resolveInternalPointer) is $(BIGOH k), where $(D k) is the size of the object within which the internal pointer is looked up. */ struct BitmappedBlockWithInternalPointers( size_t theBlockSize, uint theAlignment = platformAlignment, ParentAllocator = NullAllocator) { import stdx.allocator.internal : Ternary; @system unittest { import stdx.allocator.mallocator : AlignedMallocator; auto m = cast(ubyte[])(AlignedMallocator.instance.alignedAllocate(1024 * 64, theAlignment)); scope(exit) AlignedMallocator.instance.deallocate(m); testAllocator!(() => BitmappedBlockWithInternalPointers(m)); } // state { private BitmappedBlock!(theBlockSize, theAlignment, NullAllocator) _heap; private BitVector _allocStart; // } /** Constructors accepting desired capacity or a preallocated buffer, similar in semantics to those of $(D BitmappedBlock). */ this(ubyte[] data) { _heap = BitmappedBlock!(theBlockSize, theAlignment, ParentAllocator)(data); } /// Ditto static if (!is(ParentAllocator == NullAllocator)) this(size_t capacity) { // Add room for the _allocStart vector _heap = BitmappedBlock!(theBlockSize, theAlignment, ParentAllocator) (capacity + capacity.divideRoundUp(64)); } // Makes sure there's enough room for _allocStart private bool ensureRoomForAllocStart(size_t len) { if (_allocStart.length >= len) return true; // Must ensure there's room immutable oldLength = _allocStart.rep.length; immutable bits = len.roundUpToMultipleOf(64); void[] b = _allocStart.rep; if (!_heap.reallocate(b, bits / 8)) return false; assert(b.length * 8 == bits); _allocStart = BitVector(cast(ulong[]) b); assert(_allocStart.rep.length * 64 == bits); _allocStart.rep[oldLength .. $] = ulong.max; return true; } /** Allocator primitives. */ alias alignment = theAlignment; /// Ditto size_t goodAllocSize(size_t n) { return n.roundUpToMultipleOf(_heap.blockSize); } /// Ditto void[] allocate(size_t bytes) { auto r = _heap.allocate(bytes); if (!r.ptr) return r; immutable block = (r.ptr - _heap._payload.ptr) / _heap.blockSize; immutable blocks = (r.length + _heap.blockSize - 1) / _heap.blockSize; if (!ensureRoomForAllocStart(block + blocks)) { // Failed, free r and bailout _heap.deallocate(r); return null; } assert(block < _allocStart.length); assert(block + blocks <= _allocStart.length); // Mark the _allocStart bits assert(blocks > 0); _allocStart[block] = 1; _allocStart[block + 1 .. block + blocks] = 0; assert(block + blocks == _allocStart.length || _allocStart[block + blocks] == 1); return r; } /// Ditto void[] allocateAll() { auto r = _heap.allocateAll(); if (!r.ptr) return r; // Carve space at the end for _allocStart auto p = alignDownTo(r.ptr + r.length - 8, ulong.alignof); r = r[0 .. p - r.ptr]; // Initialize _allocStart _allocStart = BitVector(cast(ulong[]) p[0 .. 8]); _allocStart[] = 0; immutable block = (r.ptr - _heap._payload.ptr) / _heap.blockSize; assert(block < _allocStart.length); _allocStart[block] = 1; return r; } /// Ditto bool expand(ref void[] b, size_t bytes) { if (!bytes) return true; if (b is null) return false; immutable oldBlocks = (b.length + _heap.blockSize - 1) / _heap.blockSize; assert(oldBlocks); immutable newBlocks = (b.length + bytes + _heap.blockSize - 1) / _heap.blockSize; assert(newBlocks >= oldBlocks); immutable block = (b.ptr - _heap._payload.ptr) / _heap.blockSize; assert(_allocStart[block]); if (!ensureRoomForAllocStart(block + newBlocks) || !_heap.expand(b, bytes)) { return false; } // Zero only the expanded bits _allocStart[block + oldBlocks .. block + newBlocks] = 0; assert(_allocStart[block]); return true; } /// Ditto bool deallocate(void[] b) { // No need to touch _allocStart here - except for the first bit, it's // meaningless in freed memory. The first bit is already 1. return _heap.deallocate(b); // TODO: one smart thing to do is reduce memory occupied by // _allocStart if we're freeing the rightmost block. } /// Ditto Ternary resolveInternalPointer(const void* p, ref void[] result) { if (p < _heap._payload.ptr || p >= _heap._payload.ptr + _heap._payload.length) { return Ternary.no; } // Find block start auto block = (p - _heap._payload.ptr) / _heap.blockSize; if (block >= _allocStart.length) return Ternary.no; // Within an allocation, must find the 1 just to the left of it auto i = _allocStart.find1Backward(block); if (i == i.max) return Ternary.no; auto j = _allocStart.find1(i + 1); result = _heap._payload.ptr[cast(size_t) (_heap.blockSize * i) .. cast(size_t) (_heap.blockSize * j)]; return Ternary.yes; } /// Ditto Ternary empty() { return _heap.empty; } // Currently unused private void markAllAsUnused() { // Mark all deallocated memory with 1 so we minimize damage created by // false pointers. TODO: improve speed. foreach (i, ref e; _allocStart.rep) { // Set to 1 all bits in _allocStart[i] that were 0 in control, and // leave the others unchanged. // (0, 0) => 1; (0, 1) => 0; (1, 0) => 1; (1, 1) => 1 e |= ~_heap._control.rep[i]; } // Now zero all control bits _heap._control[] = 0; // EXCEPT for the _allocStart block itself markAsUsed(_allocStart.rep); } // Currently unused private bool markAsUsed(void[] b) { // Locate position immutable pos = b.ptr - _heap._payload.ptr; assert(pos % _heap.blockSize == 0); auto blockIdx = pos / _heap.blockSize; if (_heap._control[blockIdx]) return false; // Round up size to multiple of block size auto blocks = b.length.divideRoundUp(_heap.blockSize); _heap._control[blockIdx .. blockIdx + blocks] = 1; return true; } // Currently unused private void doneMarking() { // Nothing to do, what's free stays free. } } @system unittest { import stdx.allocator.internal : Ternary; auto h = BitmappedBlockWithInternalPointers!(4096)(new ubyte[4096 * 1024]); auto b = h.allocate(123); assert(b.length == 123); void[] p; Ternary r = h.resolveInternalPointer(b.ptr + 17, p); assert(p.ptr is b.ptr); assert(p.length >= b.length); b = h.allocate(4096); h.resolveInternalPointer(b.ptr, p); assert(p is b); h.resolveInternalPointer(b.ptr + 11, p); assert(p is b); void[] unchanged = p; h.resolveInternalPointer(b.ptr - 40_970, p); assert(p is unchanged); assert(h.expand(b, 1)); assert(b.length == 4097); h.resolveInternalPointer(b.ptr + 4096, p); assert(p.ptr is b.ptr); } /** Returns the number of most significant ones before a zero can be found in $(D x). If $(D x) contains no zeros (i.e. is equal to $(D ulong.max)), returns 64. */ private uint leadingOnes()(ulong x) { import mir.bitop: ctlz; x = ~x; if (x) return cast(uint) x.ctlz; return 64; } @system unittest { assert(leadingOnes(0) == 0); assert(leadingOnes(~0UL) == 64); assert(leadingOnes(0xF000_0000_0000_0000) == 4); assert(leadingOnes(0xE400_0000_0000_0000) == 3); assert(leadingOnes(0xC700_0200_0000_0000) == 2); assert(leadingOnes(0x8000_0030_0000_0000) == 1); assert(leadingOnes(0x2000_0000_0000_0000) == 0); } /** Finds a run of contiguous ones in $(D x) of length at least $(D n). */ private uint findContigOnes()(ulong x, uint n) { while (n > 1) { immutable s = n >> 1; x &= x << s; n -= s; } return leadingOnes(~x); } @system unittest { assert(findContigOnes(0x0000_0000_0000_0300, 2) == 54); assert(findContigOnes(~0UL, 1) == 0); assert(findContigOnes(~0UL, 2) == 0); assert(findContigOnes(~0UL, 32) == 0); assert(findContigOnes(~0UL, 64) == 0); assert(findContigOnes(0UL, 1) == 64); assert(findContigOnes(0x4000_0000_0000_0000, 1) == 1); assert(findContigOnes(0x0000_0F00_0000_0000, 4) == 20); } /* Unconditionally sets the bits from lsb through msb in w to zero. */ private void setBits()(ref ulong w, uint lsb, uint msb) { assert(lsb <= msb && msb < 64); const mask = (ulong.max << lsb) & (ulong.max >> (63 - msb)); w |= mask; } @system unittest { ulong w; w = 0; setBits(w, 0, 63); assert(w == ulong.max); w = 0; setBits(w, 1, 63); assert(w == ulong.max - 1); w = 6; setBits(w, 0, 1); assert(w == 7); w = 6; setBits(w, 3, 3); assert(w == 14); } /* Are bits from lsb through msb in w zero? If so, make then 1 and return the resulting w. Otherwise, just return 0. */ private bool setBitsIfZero()(ref ulong w, uint lsb, uint msb) { assert(lsb <= msb && msb < 64); const mask = (ulong.max << lsb) & (ulong.max >> (63 - msb)); if (w & mask) return false; w |= mask; return true; } // Assigns bits in w from lsb through msb to zero. private void resetBits()(ref ulong w, uint lsb, uint msb) { assert(lsb <= msb && msb < 64); const mask = (ulong.max << lsb) & (ulong.max >> (63 - msb)); w &= ~mask; } /* Bit disposition is MSB=0 (leftmost, big endian). */ struct BitVector { ulong[] _rep; @safe pure nothrow @nogc: auto rep() { return _rep; } this(ulong[] data) { _rep = data; } void opSliceAssign(bool b) { _rep[] = b ? ulong.max : 0; } void opSliceAssign(bool b, ulong x, ulong y) { assert(x <= y && y <= _rep.length * 64); if (x == y) return; --y; assert(x / 64 <= size_t.max); immutable i1 = cast(size_t) (x / 64); immutable uint b1 = 63 - x % 64; assert(y / 64 <= size_t.max); immutable i2 = cast(size_t) (y / 64); immutable uint b2 = 63 - y % 64; assert(i1 <= i2 && i2 < _rep.length); if (i1 == i2) { // Inside the same word assert(b1 >= b2); if (b) setBits(_rep[i1], b2, b1); else resetBits(_rep[i1], b2, b1); } else { // Spans multiple words assert(i1 < i2); if (b) setBits(_rep[i1], 0, b1); else resetBits(_rep[i1], 0, b1); _rep[i1 + 1 .. i2] = b; if (b) setBits(_rep[i2], b2, 63); else resetBits(_rep[i2], b2, 63); } } bool opIndex(ulong x) { assert(x < length); return (_rep[cast(size_t) (x / 64)] & (0x8000_0000_0000_0000UL >> (x % 64))) != 0; } void opIndexAssign(bool b, ulong x) { assert(x / 64 <= size_t.max); immutable i = cast(size_t) (x / 64); immutable j = 0x8000_0000_0000_0000UL >> (x % 64); if (b) _rep[i] |= j; else _rep[i] &= ~j; } ulong length() const { return _rep.length * 64; } /* Returns the index of the first 1 to the right of i (including i itself), or length if not found. */ ulong find1(ulong i) { assert(i < length); assert(i / 64 <= size_t.max); auto w = cast(size_t) (i / 64); immutable b = i % 64; // 0 through 63, 0 when i == 0 immutable mask = ulong.max >> b; if (auto current = _rep[w] & mask) { // Great, found return w * 64 + leadingOnes(~current); } // The current word doesn't have the solution, find the leftmost 1 // going to the right. for (++w; w < _rep.length; ++w) { if (auto current = _rep[w]) { return w * 64 + leadingOnes(~current); } } return length; } /* Returns the index of the first 1 to the left of i (including i itself), or ulong.max if not found. */ ulong find1Backward(ulong i) { assert(i < length); auto w = cast(size_t) (i / 64); immutable b = 63 - (i % 64); // 0 through 63, 63 when i == 0 immutable mask = ~((1UL << b) - 1); assert(mask != 0); // First, let's see if the current word has a bit larger than ours. if (auto currentWord = _rep[w] & mask) { // Great, this word contains the result. return w * 64 + 63 - currentWord.trailingZeros; } // The current word doesn't have the solution, find the rightmost 1 // going to the left. while (w >= 1) { --w; if (auto currentWord = _rep[w]) return w * 64 + (63 - currentWord.trailingZeros); } return ulong.max; } /// Are all bits zero? bool allAre0() const { foreach (w; _rep) if (w) return false; return true; } /// Are all bits one? bool allAre1() const { foreach (w; _rep) if (w != ulong.max) return false; return true; } ulong findZeros(immutable size_t howMany, ulong start) { assert(start < length); assert(howMany > 64); auto i = cast(size_t) (start / 64); while (_rep[i] & 1) { // No trailing zeros in this word, try the next one if (++i == _rep.length) return ulong.max; start = i * 64; } // Adjust start to have only trailing zeros after it auto prefixLength = 64; while (_rep[i] & (ulong.max >> (64 - prefixLength))) { assert(prefixLength > 0); --prefixLength; ++start; } assert(howMany > prefixLength); auto needed = howMany - prefixLength; for (++i; needed >= 64; needed -= 64, ++i) { if (i >= _rep.length) return ulong.max; if (_rep[i] != 0) return findZeros(howMany, i * 64); } // Leftover < 64 bits assert(needed < 64); if (!needed) return start; if (i >= _rep.length) return ulong.max; if (leadingOnes(~_rep[i]) >= needed) return start; return findZeros(howMany, i * 64); } } @system unittest { auto v = BitVector(new ulong[10]); assert(v.length == 640); v[] = 0; v[53] = 1; assert(v[52] == 0); assert(v[53] == 1); assert(v[54] == 0); v[] = 0; v[53 .. 55] = 1; assert(v[52] == 0); assert(v[53] == 1); assert(v[54] == 1); assert(v[55] == 0); v[] = 0; v[2 .. 65] = 1; assert(v.rep[0] == 0x3FFF_FFFF_FFFF_FFFF); assert(v.rep[1] == 0x8000_0000_0000_0000); assert(v.rep[2] == 0); v[] = 0; assert(v.find1Backward(0) == ulong.max); assert(v.find1Backward(43) == ulong.max); assert(v.find1Backward(83) == ulong.max); v[0] = 1; assert(v.find1Backward(0) == 0); assert(v.find1Backward(43) == 0); import std.conv : text; assert(v.find1Backward(83) == 0, text(v.find1Backward(83))); v[0] = 0; v[101] = 1; assert(v.find1Backward(0) == ulong.max); assert(v.find1Backward(43) == ulong.max); assert(v.find1Backward(83) == ulong.max); assert(v.find1Backward(100) == ulong.max); assert(v.find1Backward(101) == 101); assert(v.find1Backward(553) == 101); v[0 .. v.length] = 0; v[v.length .. v.length] = 0; v[0 .. 0] = 0; v[] = 0; assert(v.find1(0) == v.length); v[139] = 1; assert(v.find1(0) == 139); assert(v.find1(100) == 139); assert(v.find1(138) == 139); assert(v.find1(139) == 139); assert(v.find1(140) == v.length); v[] = 0; assert(v.findZeros(100, 0) == 0); foreach (i; 0 .. 500) assert(v.findZeros(100, i) == i, text(v.findZeros(100, i), " != ", i)); assert(v.findZeros(540, 99) == 99); assert(v.findZeros(99, 540) == 540); assert(v.findZeros(540, 100) == 100); assert(v.findZeros(640, 0) == 0); assert(v.findZeros(641, 1) == ulong.max); assert(v.findZeros(641, 100) == ulong.max); } stdx-allocator-3.1.0~beta.2/source/stdx/allocator/building_blocks/bucketizer.d 0000664 0000000 0000000 00000016273 13535263154 0027575 0 ustar 00root root 0000000 0000000 /// module stdx.allocator.building_blocks.bucketizer; /** A $(D Bucketizer) uses distinct allocators for handling allocations of sizes in the intervals $(D [min, min + step - 1]), $(D [min + step, min + 2 * step - 1]), $(D [min + 2 * step, min + 3 * step - 1]), $(D ...), $(D [max - step + 1, max]). $(D Bucketizer) holds a fixed-size array of allocators and dispatches calls to them appropriately. The size of the array is $(D (max + 1 - min) / step), which must be an exact division. Allocations for sizes smaller than $(D min) or larger than $(D max) are illegal for $(D Bucketizer). To handle them separately, $(D Segregator) may be of use. */ struct Bucketizer(Allocator, size_t min, size_t max, size_t step) { import common = stdx.allocator.common : roundUpToMultipleOf; import stdx.allocator.internal : Ternary; static assert((max - (min - 1)) % step == 0, "Invalid limits when instantiating " ~ Bucketizer.stringof); // state /** The array of allocators is publicly available for e.g. initialization and inspection. */ Allocator[(max + 1 - min) / step] buckets; private Allocator* allocatorFor(size_t n) { const i = (n - min) / step; return i < buckets.length ? buckets.ptr + i : null; } /** The alignment offered is the same as $(D Allocator.alignment). */ enum uint alignment = Allocator.alignment; /** Rounds up to the maximum size of the bucket in which $(D bytes) falls. */ size_t goodAllocSize(size_t bytes) const { // round up bytes such that bytes - min + 1 is a multiple of step assert(bytes >= min); const min_1 = min - 1; return min_1 + roundUpToMultipleOf(bytes - min_1, step); } /** Directs the call to either one of the $(D buckets) allocators. */ void[] allocate(size_t bytes) { if (!bytes) return null; if (auto a = allocatorFor(bytes)) { const actual = goodAllocSize(bytes); auto result = a.allocate(actual); return result.ptr ? result.ptr[0 .. bytes] : null; } return null; } /** Directs the call to either one of the $(D buckets) allocators. Defined only if `Allocator` defines `alignedAllocate`. */ static if (__traits(hasMember, Allocator, "alignedAllocate")) void[] alignedAllocate(size_t bytes, uint a) { if (!bytes) return null; if (auto a = allocatorFor(b.length)) { const actual = goodAllocSize(bytes); auto result = a.alignedAllocate(actual); return result.ptr ? result.ptr[0 .. bytes] : null; } return null; } /** This method allows expansion within the respective bucket range. It succeeds if both $(D b.length) and $(D b.length + delta) fall in a range of the form $(D [min + k * step, min + (k + 1) * step - 1]). */ bool expand(ref void[] b, size_t delta) { if (!b.ptr) return delta == 0; assert(b.length >= min && b.length <= max); const available = goodAllocSize(b.length); const desired = b.length + delta; if (available < desired) return false; b = b.ptr[0 .. desired]; return true; } /** This method allows reallocation within the respective bucket range. If both $(D b.length) and $(D size) fall in a range of the form $(D [min + k * step, min + (k + 1) * step - 1]), then reallocation is in place. Otherwise, reallocation with moving is attempted. */ bool reallocate(ref void[] b, size_t size) { if (size == 0) { deallocate(b); b = null; return true; } if (size >= b.length) { return expand(b, size - b.length); } assert(b.length >= min && b.length <= max); if (goodAllocSize(size) == goodAllocSize(b.length)) { b = b.ptr[0 .. size]; return true; } // Move cross buckets return common.reallocate(this, b, size); } /** Similar to `reallocate`, with alignment. Defined only if `Allocator` defines `alignedReallocate`. */ static if (__traits(hasMember, Allocator, "alignedReallocate")) bool alignedReallocate(ref void[] b, size_t size, uint a) { if (size == 0) { deallocate(b); b = null; return true; } if (size >= b.length) { return expand(b, size - b.length); } assert(b.length >= min && b.length <= max); if (goodAllocSize(size) == goodAllocSize(b.length)) { b = b.ptr[0 .. size]; return true; } // Move cross buckets return .alignedReallocate(this, b, size, a); } /** Defined only if `Allocator` defines `owns`. Finds the owner of `b` and forwards the call to it. */ static if (__traits(hasMember, Allocator, "owns")) Ternary owns(void[] b) { if (!b.ptr) return Ternary.no; if (auto a = allocatorFor(b.length)) { const actual = goodAllocSize(b.length); return a.owns(b.ptr[0 .. actual]); } return Ternary.no; } /** This method is only defined if $(D Allocator) defines $(D deallocate). */ static if (__traits(hasMember, Allocator, "deallocate")) bool deallocate(void[] b) { if (!b.ptr) return true; if (auto a = allocatorFor(b.length)) { a.deallocate(b.ptr[0 .. goodAllocSize(b.length)]); } return true; } /** This method is only defined if all allocators involved define $(D deallocateAll), and calls it for each bucket in turn. Returns `true` if all allocators could deallocate all. */ static if (__traits(hasMember, Allocator, "deallocateAll")) bool deallocateAll() { bool result = true; foreach (ref a; buckets) { if (!a.deallocateAll()) result = false; } return result; } /** This method is only defined if all allocators involved define $(D resolveInternalPointer), and tries it for each bucket in turn. */ static if (__traits(hasMember, Allocator, "resolveInternalPointer")) Ternary resolveInternalPointer(const void* p, ref void[] result) { foreach (ref a; buckets) { Ternary r = a.resolveInternalPointer(p, result); if (r == Ternary.yes) return r; } return Ternary.no; } } /// @system unittest { import mir.utility : max; import stdx.allocator.building_blocks.allocator_list : AllocatorList; import stdx.allocator.building_blocks.free_list : FreeList; import stdx.allocator.building_blocks.region : Region; import stdx.allocator.common : unbounded; import stdx.allocator.mallocator : Mallocator; import stdx.allocator.internal : Ternary; Bucketizer!( FreeList!( AllocatorList!( (size_t n) => Region!Mallocator(max(n, 1024u * 1024))), 0, unbounded), 65, 512, 64) a; auto b = a.allocate(400); assert(b.length == 400); assert(a.owns(b) == Ternary.yes); void[] p; a.deallocate(b); } stdx-allocator-3.1.0~beta.2/source/stdx/allocator/building_blocks/fallback_allocator.d 0000664 0000000 0000000 00000024642 13535263154 0031224 0 ustar 00root root 0000000 0000000 /// module stdx.allocator.building_blocks.fallback_allocator; import stdx.allocator.common; /** $(D FallbackAllocator) is the allocator equivalent of an "or" operator in algebra. An allocation request is first attempted with the $(D Primary) allocator. If that returns $(D null), the request is forwarded to the $(D Fallback) allocator. All other requests are dispatched appropriately to one of the two allocators. In order to work, $(D FallbackAllocator) requires that $(D Primary) defines the $(D owns) method. This is needed in order to decide which allocator was responsible for a given allocation. $(D FallbackAllocator) is useful for fast, special-purpose allocators backed up by general-purpose allocators. The example below features a stack region backed up by the $(D GCAllocator). */ struct FallbackAllocator(Primary, Fallback) { import mir.utility : min; import stdx.allocator.internal : Ternary; @system unittest { testAllocator!(() => FallbackAllocator()); } /// The primary allocator. static if (stateSize!Primary) Primary primary; else alias primary = Primary.instance; /// The fallback allocator. static if (stateSize!Fallback) Fallback fallback; else alias fallback = Fallback.instance; /** If both $(D Primary) and $(D Fallback) are stateless, $(D FallbackAllocator) defines a static instance called `instance`. */ static if (!stateSize!Primary && !stateSize!Fallback) { enum FallbackAllocator instance = FallbackAllocator(); } /** The alignment offered is the minimum of the two allocators' alignment. */ enum uint alignment = min(Primary.alignment, Fallback.alignment); /** Allocates memory trying the primary allocator first. If it returns $(D null), the fallback allocator is tried. */ void[] allocate(size_t s) { auto result = primary.allocate(s); return result.length == s ? result : fallback.allocate(s); } /** $(D FallbackAllocator) offers $(D alignedAllocate) iff at least one of the allocators also offers it. It attempts to allocate using either or both. */ static if (__traits(hasMember, Primary, "alignedAllocate") || __traits(hasMember, Fallback, "alignedAllocate")) void[] alignedAllocate(size_t s, uint a) { static if (__traits(hasMember, Primary, "alignedAllocate")) {{ auto result = primary.alignedAllocate(s, a); if (result.length == s) return result; }} static if (__traits(hasMember, Fallback, "alignedAllocate")) {{ auto result = fallback.alignedAllocate(s, a); if (result.length == s) return result; }} return null; } /** $(D expand) is defined if and only if at least one of the allocators defines $(D expand). It works as follows. If $(D primary.owns(b)), then the request is forwarded to $(D primary.expand) if it is defined, or fails (returning $(D false)) otherwise. If $(D primary) does not own $(D b), then the request is forwarded to $(D fallback.expand) if it is defined, or fails (returning $(D false)) otherwise. */ static if (__traits(hasMember, Primary, "owns") && (__traits(hasMember, Primary, "expand") || __traits(hasMember, Fallback, "expand"))) bool expand(ref void[] b, size_t delta) { if (!delta) return true; if (!b.ptr) return false; if (primary.owns(b) == Ternary.yes) { static if (__traits(hasMember, Primary, "expand")) return primary.expand(b, delta); else return false; } static if (__traits(hasMember, Fallback, "expand")) return fallback.expand(b, delta); else return false; } /** $(D reallocate) works as follows. If $(D primary.owns(b)), then $(D primary.reallocate(b, newSize)) is attempted. If it fails, an attempt is made to move the allocation from $(D primary) to $(D fallback). If $(D primary) does not own $(D b), then $(D fallback.reallocate(b, newSize)) is attempted. If that fails, an attempt is made to move the allocation from $(D fallback) to $(D primary). */ static if (__traits(hasMember, Primary, "owns")) bool reallocate(ref void[] b, size_t newSize) { bool crossAllocatorMove(From, To)(auto ref From from, auto ref To to) { auto b1 = to.allocate(newSize); if (b1.length != newSize) return false; if (b.length < newSize) b1[0 .. b.length] = b[]; else b1[] = b[0 .. newSize]; static if (__traits(hasMember, From, "deallocate")) from.deallocate(b); b = b1; return true; } if (b is null || primary.owns(b) == Ternary.yes) { return primary.reallocate(b, newSize) // Move from primary to fallback || crossAllocatorMove(primary, fallback); } return fallback.reallocate(b, newSize) // Interesting. Move from fallback to primary. || crossAllocatorMove(fallback, primary); } static if (__traits(hasMember, Primary, "owns") && (__traits(hasMember, Primary, "alignedAllocate") || __traits(hasMember, Fallback, "alignedAllocate"))) bool alignedReallocate(ref void[] b, size_t newSize, uint a) { bool crossAllocatorMove(From, To)(auto ref From from, auto ref To to) { static if (!__traits(hasMember, To, "alignedAllocate")) { return false; } else { auto b1 = to.alignedAllocate(newSize, a); if (b1.length != newSize) return false; if (b.length < newSize) b1[0 .. b.length] = b[]; else b1[] = b[0 .. newSize]; static if (__traits(hasMember, From, "deallocate")) from.deallocate(b); b = b1; return true; } } static if (__traits(hasMember, Primary, "alignedAllocate")) { if (b is null || primary.owns(b) == Ternary.yes) { return primary.alignedReallocate(b, newSize, a) || crossAllocatorMove(primary, fallback); } } static if (__traits(hasMember, Fallback, "alignedAllocate")) { return fallback.alignedReallocate(b, newSize, a) || crossAllocatorMove(fallback, primary); } else { return false; } } /** $(D owns) is defined if and only if both allocators define $(D owns). Returns $(D primary.owns(b) | fallback.owns(b)). */ static if (__traits(hasMember, Primary, "owns") && __traits(hasMember, Fallback, "owns")) Ternary owns(void[] b) { return primary.owns(b) | fallback.owns(b); } /** $(D resolveInternalPointer) is defined if and only if both allocators define it. */ static if (__traits(hasMember, Primary, "resolveInternalPointer") && __traits(hasMember, Fallback, "resolveInternalPointer")) Ternary resolveInternalPointer(const void* p, ref void[] result) { Ternary r = primary.resolveInternalPointer(p, result); return r == Ternary.no ? fallback.resolveInternalPointer(p, result) : r; } /** $(D deallocate) is defined if and only if at least one of the allocators define $(D deallocate). It works as follows. If $(D primary.owns(b)), then the request is forwarded to $(D primary.deallocate) if it is defined, or is a no-op otherwise. If $(D primary) does not own $(D b), then the request is forwarded to $(D fallback.deallocate) if it is defined, or is a no-op otherwise. */ static if (__traits(hasMember, Primary, "owns") && (__traits(hasMember, Primary, "deallocate") || __traits(hasMember, Fallback, "deallocate"))) bool deallocate(void[] b) { if (primary.owns(b) == Ternary.yes) { static if (__traits(hasMember, Primary, "deallocate")) return primary.deallocate(b); else return false; } else { static if (__traits(hasMember, Fallback, "deallocate")) return fallback.deallocate(b); else return false; } } /** $(D empty) is defined if both allocators also define it. Returns: $(D primary.empty & fallback.empty) */ static if (__traits(hasMember, Primary, "empty") && __traits(hasMember, Fallback, "empty")) Ternary empty() { return primary.empty & fallback.empty; } } @system unittest { import std.conv : text; import stdx.allocator.building_blocks.region : InSituRegion; import stdx.allocator.gc_allocator : GCAllocator; import stdx.allocator.internal : Ternary; FallbackAllocator!(InSituRegion!16_384, GCAllocator) a; // This allocation uses the stack auto b1 = a.allocate(1024); assert(b1.length == 1024, text(b1.length)); assert(a.primary.owns(b1) == Ternary.yes); // This large allocation will go to the Mallocator auto b2 = a.allocate(1024 * 1024); assert(a.primary.owns(b2) == Ternary.no); a.deallocate(b1); a.deallocate(b2); } /** Convenience function that uses type deduction to return the appropriate $(D FallbackAllocator) instance. To initialize with allocators that don't have state, use their $(D it) static member. */ FallbackAllocator!(Primary, Fallback) fallbackAllocator(Primary, Fallback)(auto ref Primary p, auto ref Fallback f) { import mir.functional: forward; alias R = FallbackAllocator!(Primary, Fallback); static if (stateSize!Primary) static if (stateSize!Fallback) return R(forward!p, forward!f); else return R(forward!p); else static if (stateSize!Fallback) return R(forward!f); else return R(); } /// @system unittest { import stdx.allocator.building_blocks.region : Region; import stdx.allocator.gc_allocator : GCAllocator; import stdx.allocator.internal : Ternary; auto a = fallbackAllocator(Region!GCAllocator(1024), GCAllocator.instance); auto b1 = a.allocate(1020); assert(b1.length == 1020); assert(a.primary.owns(b1) == Ternary.yes); auto b2 = a.allocate(10); assert(b2.length == 10); assert(a.primary.owns(b2) == Ternary.no); } stdx-allocator-3.1.0~beta.2/source/stdx/allocator/building_blocks/free_list.d 0000664 0000000 0000000 00000106202 13535263154 0027372 0 ustar 00root root 0000000 0000000 /// module stdx.allocator.building_blocks.free_list; import stdx.allocator.common; import std.typecons : Flag, Yes, No; /** $(HTTP en.wikipedia.org/wiki/Free_list, Free list allocator), stackable on top of another allocator. Allocation requests between $(D min) and $(D max) bytes are rounded up to $(D max) and served from a singly-linked list of buffers deallocated in the past. All other allocations are directed to $(D ParentAllocator). Due to the simplicity of free list management, allocations from the free list are fast. One instantiation is of particular interest: $(D FreeList!(0, unbounded)) puts every deallocation in the freelist, and subsequently serves any allocation from the freelist (if not empty). There is no checking of size matching, which would be incorrect for a freestanding allocator but is both correct and fast when an owning allocator on top of the free list allocator (such as $(D Segregator)) is already in charge of handling size checking. The following methods are defined if $(D ParentAllocator) defines them, and forward to it: $(D expand), $(D owns), $(D reallocate). */ struct FreeList(ParentAllocator, size_t minSize, size_t maxSize = minSize, Flag!"adaptive" adaptive = No.adaptive) { import stdx.allocator.internal : Ternary; static assert(minSize != unbounded, "Use minSize = 0 for no low bound."); static assert(maxSize >= (void*).sizeof, "Maximum size must accommodate a pointer."); private enum unchecked = minSize == 0 && maxSize == unbounded; private enum hasTolerance = !unchecked && (minSize != maxSize || maxSize == chooseAtRuntime); static if (minSize == chooseAtRuntime) { /** Returns the smallest allocation size eligible for allocation from the freelist. (If $(D minSize != chooseAtRuntime), this is simply an alias for $(D minSize).) */ @property size_t min() const { assert(_min != chooseAtRuntime); return _min; } /** If $(D FreeList) has been instantiated with $(D minSize == chooseAtRuntime), then the $(D min) property is writable. Setting it must precede any allocation. Params: low = new value for $(D min) Precondition: $(D low <= max), or $(D maxSize == chooseAtRuntime) and $(D max) has not yet been initialized. Also, no allocation has been yet done with this allocator. Postcondition: $(D min == low) */ @property void min(size_t low) { assert(low <= max || max == chooseAtRuntime); minimize; _min = low; } } else { alias min = minSize; } static if (maxSize == chooseAtRuntime) { /** Returns the largest allocation size eligible for allocation from the freelist. (If $(D maxSize != chooseAtRuntime), this is simply an alias for $(D maxSize).) All allocation requests for sizes greater than or equal to $(D min) and less than or equal to $(D max) are rounded to $(D max) and forwarded to the parent allocator. When the block fitting the same constraint gets deallocated, it is put in the freelist with the allocated size assumed to be $(D max). */ @property size_t max() const { return _max; } /** If $(D FreeList) has been instantiated with $(D maxSize == chooseAtRuntime), then the $(D max) property is writable. Setting it must precede any allocation. Params: high = new value for $(D max) Precondition: $(D high >= min), or $(D minSize == chooseAtRuntime) and $(D min) has not yet been initialized. Also $(D high >= (void*).sizeof). Also, no allocation has been yet done with this allocator. Postcondition: $(D max == high) */ @property void max(size_t high) { assert((high >= min || min == chooseAtRuntime) && high >= (void*).sizeof); minimize; _max = high; } @system unittest { import stdx.allocator.common : chooseAtRuntime; import stdx.allocator.mallocator : Mallocator; FreeList!(Mallocator, chooseAtRuntime, chooseAtRuntime) a; a.min = 64; a.max = 128; assert(a.min == 64); assert(a.max == 128); } } else { alias max = maxSize; } private bool tooSmall(size_t n) const { static if (minSize == 0) return false; else return n < min; } private bool tooLarge(size_t n) const { static if (maxSize == unbounded) return false; else return n > max; } private bool freeListEligible(size_t n) const { static if (unchecked) { return true; } else { static if (minSize == 0) { if (!n) return false; } static if (minSize == maxSize && minSize != chooseAtRuntime) return n == maxSize; else return !tooSmall(n) && !tooLarge(n); } } static if (!unchecked) private void[] blockFor(Node* p) { assert(p); return (cast(void*) p)[0 .. max]; } // statistics static if (adaptive == Yes.adaptive) { private enum double windowLength = 1000.0; private enum double tooFewMisses = 0.01; private double probMiss = 1.0; // start with a high miss probability private uint accumSamples, accumMisses; void updateStats() { assert(accumSamples >= accumMisses); /* Given that for the past windowLength samples we saw misses with estimated probability probMiss, and assuming the new sample wasMiss or not, what's the new estimated probMiss? */ probMiss = (probMiss * windowLength + accumMisses) / (windowLength + accumSamples); assert(probMiss <= 1.0); accumSamples = 0; accumMisses = 0; // If probability to miss is under x%, yank one off the freelist static if (!unchecked) { if (probMiss < tooFewMisses && _root) { auto b = blockFor(_root); _root = _root.next; parent.deallocate(b); } } } } private struct Node { Node* next; } static assert(ParentAllocator.alignment >= Node.alignof); // state /** The parent allocator. Depending on whether $(D ParentAllocator) holds state or not, this is a member variable or an alias for `ParentAllocator.instance`. */ static if (stateSize!ParentAllocator) ParentAllocator parent; else alias parent = ParentAllocator.instance; private Node* root; static if (minSize == chooseAtRuntime) private size_t _min = chooseAtRuntime; static if (maxSize == chooseAtRuntime) private size_t _max = chooseAtRuntime; /** Alignment offered. */ alias alignment = ParentAllocator.alignment; /** If $(D maxSize == unbounded), returns $(D parent.goodAllocSize(bytes)). Otherwise, returns $(D max) for sizes in the interval $(D [min, max]), and $(D parent.goodAllocSize(bytes)) otherwise. Precondition: If set at runtime, $(D min) and/or $(D max) must be initialized appropriately. Postcondition: $(D result >= bytes) */ size_t goodAllocSize(size_t bytes) { assert(minSize != chooseAtRuntime && maxSize != chooseAtRuntime); static if (maxSize != unbounded) { if (freeListEligible(bytes)) { assert(parent.goodAllocSize(max) == max, "Wrongly configured freelist maximum value"); return max; } } return parent.goodAllocSize(bytes); } private void[] allocateEligible(size_t bytes) { assert(bytes); if (root) { // faster auto result = (cast(ubyte*) root)[0 .. bytes]; root = root.next; return result; } // slower static if (hasTolerance) { immutable toAllocate = max; } else { alias toAllocate = bytes; } assert(toAllocate == max || max == unbounded); auto result = parent.allocate(toAllocate); static if (hasTolerance) { if (result) result = result.ptr[0 .. bytes]; } static if (adaptive == Yes.adaptive) { ++accumMisses; updateStats; } return result; } /** Allocates memory either off of the free list or from the parent allocator. If $(D n) is within $(D [min, max]) or if the free list is unchecked ($(D minSize == 0 && maxSize == size_t.max)), then the free list is consulted first. If not empty (hit), the block at the front of the free list is removed from the list and returned. Otherwise (miss), a new block of $(D max) bytes is allocated, truncated to $(D n) bytes, and returned. Params: n = number of bytes to allocate Returns: The allocated block, or $(D null). Precondition: If set at runtime, $(D min) and/or $(D max) must be initialized appropriately. Postcondition: $(D result.length == bytes || result is null) */ void[] allocate(size_t n) { static if (adaptive == Yes.adaptive) ++accumSamples; assert(n < size_t.max / 2); // fast path if (freeListEligible(n)) { return allocateEligible(n); } // slower static if (adaptive == Yes.adaptive) { updateStats; } return parent.allocate(n); } // Forwarding methods mixin(forwardToMember("parent", "expand", "owns", "reallocate")); /** If $(D block.length) is within $(D [min, max]) or if the free list is unchecked ($(D minSize == 0 && maxSize == size_t.max)), then inserts the block at the front of the free list. For all others, forwards to $(D parent.deallocate) if $(D Parent.deallocate) is defined. Params: block = Block to deallocate. Precondition: If set at runtime, $(D min) and/or $(D max) must be initialized appropriately. The block must have been allocated with this freelist, and no dynamic changing of $(D min) or $(D max) is allowed to occur between allocation and deallocation. */ bool deallocate(void[] block) { if (freeListEligible(block.length)) { if (min == 0) { // In this case a null pointer might have made it this far. if (block is null) return true; } auto t = root; root = cast(Node*) block.ptr; root.next = t; return true; } static if (__traits(hasMember, ParentAllocator, "deallocate")) return parent.deallocate(block); else return false; } /** Defined only if $(D ParentAllocator) defines $(D deallocateAll). If so, forwards to it and resets the freelist. */ static if (__traits(hasMember, ParentAllocator, "deallocateAll")) bool deallocateAll() { root = null; return parent.deallocateAll(); } /** Nonstandard function that minimizes the memory usage of the freelist by freeing each element in turn. Defined only if $(D ParentAllocator) defines $(D deallocate). */ static if (__traits(hasMember, ParentAllocator, "deallocate") && !unchecked) void minimize() { while (root) { auto nuke = blockFor(root); root = root.next; parent.deallocate(nuke); } } } @system unittest { import stdx.allocator.gc_allocator : GCAllocator; FreeList!(GCAllocator, 0, 8) fl; assert(fl.root is null); auto b1 = fl.allocate(7); fl.allocate(8); assert(fl.root is null); fl.deallocate(b1); assert(fl.root !is null); fl.allocate(8); assert(fl.root is null); } /** Free list built on top of exactly one contiguous block of memory. The block is assumed to have been allocated with $(D ParentAllocator), and is released in $(D ContiguousFreeList)'s destructor (unless $(D ParentAllocator) is $(D NullAllocator)). $(D ContiguousFreeList) has most advantages of $(D FreeList) but fewer disadvantages. It has better cache locality because items are closer to one another. It imposes less fragmentation on its parent allocator. The disadvantages of $(D ContiguousFreeList) over $(D FreeList) are its pay upfront model (as opposed to $(D FreeList)'s pay-as-you-go approach), and a hard limit on the number of nodes in the list. Thus, a large number of long- lived objects may occupy the entire block, making it unavailable for serving allocations from the free list. However, an absolute cap on the free list size may be beneficial. The options $(D minSize == unbounded) and $(D maxSize == unbounded) are not available for $(D ContiguousFreeList). */ struct ContiguousFreeList(ParentAllocator, size_t minSize, size_t maxSize = minSize) { import stdx.allocator.building_blocks.null_allocator : NullAllocator; import stdx.allocator.building_blocks.stats_collector : StatsCollector, Options; import stdx.allocator.internal : Ternary; alias Impl = FreeList!(NullAllocator, minSize, maxSize); enum unchecked = minSize == 0 && maxSize == unbounded; alias Node = Impl.Node; alias SParent = StatsCollector!(ParentAllocator, Options.bytesUsed); // state /** The parent allocator. Depending on whether $(D ParentAllocator) holds state or not, this is a member variable or an alias for `ParentAllocator.instance`. */ SParent parent; FreeList!(NullAllocator, minSize, maxSize) fl; void[] support; size_t allocated; /// Alignment offered. enum uint alignment = (void*).alignof; private void initialize(ubyte[] buffer, size_t itemSize = fl.max) { assert(itemSize != unbounded && itemSize != chooseAtRuntime); assert(buffer.ptr.alignedAt(alignment)); immutable available = buffer.length / itemSize; if (available == 0) return; support = buffer; fl.root = cast(Node*) buffer.ptr; auto past = cast(Node*) (buffer.ptr + available * itemSize); for (auto n = fl.root; ; ) { auto next = cast(Node*) (cast(ubyte*) n + itemSize); if (next == past) { n.next = null; break; } assert(next < past); assert(n < next); n.next = next; n = next; } } /** Constructors setting up the memory structured as a free list. Params: buffer = Buffer to structure as a free list. If $(D ParentAllocator) is not $(D NullAllocator), the buffer is assumed to be allocated by $(D parent) and will be freed in the destructor. parent = Parent allocator. For construction from stateless allocators, use their `instance` static member. bytes = Bytes (not items) to be allocated for the free list. Memory will be allocated during construction and deallocated in the destructor. max = Maximum size eligible for freelisting. Construction with this parameter is defined only if $(D maxSize == chooseAtRuntime) or $(D maxSize == unbounded). min = Minimum size eligible for freelisting. Construction with this parameter is defined only if $(D minSize == chooseAtRuntime). If this condition is met and no $(D min) parameter is present, $(D min) is initialized with $(D max). */ static if (!stateSize!ParentAllocator) this(ubyte[] buffer) { initialize(buffer); } /// ditto static if (stateSize!ParentAllocator) this(ParentAllocator parent, ubyte[] buffer) { initialize(buffer); this.parent = SParent(parent); } /// ditto static if (!stateSize!ParentAllocator) this(size_t bytes) { initialize(cast(ubyte[])(ParentAllocator.instance.allocate(bytes))); } /// ditto static if (stateSize!ParentAllocator) this(ParentAllocator parent, size_t bytes) { initialize(cast(ubyte[])(parent.allocate(bytes))); this.parent = SParent(parent); } /// ditto static if (!stateSize!ParentAllocator && (maxSize == chooseAtRuntime || maxSize == unbounded)) this(size_t bytes, size_t max) { static if (maxSize == chooseAtRuntime) fl.max = max; static if (minSize == chooseAtRuntime) fl.min = max; initialize(cast(ubyte[])(parent.allocate(bytes)), max); } /// ditto static if (stateSize!ParentAllocator && (maxSize == chooseAtRuntime || maxSize == unbounded)) this(ParentAllocator parent, size_t bytes, size_t max) { static if (maxSize == chooseAtRuntime) fl.max = max; static if (minSize == chooseAtRuntime) fl.min = max; initialize(cast(ubyte[])(parent.allocate(bytes)), max); this.parent = SParent(parent); } /// ditto static if (!stateSize!ParentAllocator && (maxSize == chooseAtRuntime || maxSize == unbounded) && minSize == chooseAtRuntime) this(size_t bytes, size_t min, size_t max) { static if (maxSize == chooseAtRuntime) fl.max = max; fl.min = min; initialize(cast(ubyte[])(parent.allocate(bytes)), max); static if (stateSize!ParentAllocator) this.parent = SParent(parent); } /// ditto static if (stateSize!ParentAllocator && (maxSize == chooseAtRuntime || maxSize == unbounded) && minSize == chooseAtRuntime) this(ParentAllocator parent, size_t bytes, size_t min, size_t max) { static if (maxSize == chooseAtRuntime) fl.max = max; fl.min = min; initialize(cast(ubyte[])(parent.allocate(bytes)), max); static if (stateSize!ParentAllocator) this.parent = SParent(parent); } /** If $(D n) is eligible for freelisting, returns $(D max). Otherwise, returns $(D parent.goodAllocSize(n)). Precondition: If set at runtime, $(D min) and/or $(D max) must be initialized appropriately. Postcondition: $(D result >= bytes) */ size_t goodAllocSize(size_t n) { if (fl.freeListEligible(n)) return fl.max; return parent.goodAllocSize(n); } /** Allocate $(D n) bytes of memory. If $(D n) is eligible for freelist and the freelist is not empty, pops the memory off the free list. In all other cases, uses the parent allocator. */ void[] allocate(size_t n) { auto result = fl.allocate(n); if (result) { // Only case we care about: eligible sizes allocated from us ++allocated; return result; } // All others, allocate from parent return parent.allocate(n); } /** Defined if `ParentAllocator` defines it. Checks whether the block belongs to this allocator. */ static if (__traits(hasMember, SParent, "owns") || unchecked) Ternary owns(void[] b) { if (support.ptr <= b.ptr && b.ptr < support.ptr + support.length) return Ternary.yes; static if (unchecked) return Ternary.no; else return parent.owns(b); } /** Deallocates $(D b). If it's of eligible size, it's put on the free list. Otherwise, it's returned to $(D parent). Precondition: $(D b) has been allocated with this allocator, or is $(D null). */ bool deallocate(void[] b) { if (support.ptr <= b.ptr && b.ptr < support.ptr + support.length) { // we own this guy assert(fl.freeListEligible(b.length)); assert(allocated); --allocated; // Put manually in the freelist auto t = fl.root; fl.root = cast(Node*) b.ptr; fl.root.next = t; return true; } return parent.deallocate(b); } /** Deallocates everything from the parent. */ static if (__traits(hasMember, ParentAllocator, "deallocateAll") && stateSize!ParentAllocator) bool deallocateAll() { bool result = fl.deallocateAll && parent.deallocateAll; allocated = 0; return result; } /** Returns `Ternary.yes` if no memory is currently allocated with this allocator, `Ternary.no` otherwise. This method never returns `Ternary.unknown`. */ Ternary empty() { return Ternary(allocated == 0 && parent.bytesUsed == 0); } } /// @nogc @safe unittest { import stdx.allocator.building_blocks.allocator_list : AllocatorList; import stdx.allocator.mallocator : Mallocator; import stdx.allocator.common : unbounded; alias ScalableFreeList = AllocatorList!((n) => ContiguousFreeList!(Mallocator, 0, unbounded)(4096) ); } @system unittest { import stdx.allocator.building_blocks.null_allocator : NullAllocator; import stdx.allocator.internal : Ternary; alias A = ContiguousFreeList!(NullAllocator, 0, 64); auto a = A(new ubyte[1024]); assert(a.empty == Ternary.yes); assert(a.goodAllocSize(15) == 64); assert(a.goodAllocSize(65) == NullAllocator.instance.goodAllocSize(65)); auto b = a.allocate(100); assert(a.empty == Ternary.yes); assert(b.length == 0); a.deallocate(b); b = a.allocate(64); assert(a.empty == Ternary.no); assert(b.length == 64); assert(a.owns(b) == Ternary.yes); assert(a.owns(null) == Ternary.no); a.deallocate(b); } @system unittest { import stdx.allocator.building_blocks.region : Region; import stdx.allocator.gc_allocator : GCAllocator; import stdx.allocator.internal : Ternary; alias A = ContiguousFreeList!(Region!GCAllocator, 0, 64); auto a = A(Region!GCAllocator(1024 * 4), 1024); assert(a.empty == Ternary.yes); assert(a.goodAllocSize(15) == 64); assert(a.goodAllocSize(65) == a.parent.goodAllocSize(65)); auto b = a.allocate(100); assert(a.empty == Ternary.no); assert(a.allocated == 0); assert(b.length == 100); a.deallocate(b); assert(a.empty == Ternary.yes); b = a.allocate(64); assert(a.empty == Ternary.no); assert(b.length == 64); assert(a.owns(b) == Ternary.yes); assert(a.owns(null) == Ternary.no); a.deallocate(b); } @system unittest { import stdx.allocator.gc_allocator : GCAllocator; alias A = ContiguousFreeList!(GCAllocator, 64, 64); auto a = A(1024); const b = a.allocate(100); assert(b.length == 100); } // single @nogc instance for all templates. private static immutable excMin = new Exception("SharedFreeList.min must be initialized exactly once."); private static immutable excMax = new Exception("SharedFreeList.max must be initialized exactly once."); private static immutable excBounds = new Exception("Wrong shared free list bounds."); private static immutable excX = new Exception("x should be positive."); /** FreeList shared across threads. Allocation and deallocation are lock-free. The parameters have the same semantics as for $(D FreeList). $(D expand) is defined to forward to $(D ParentAllocator.expand) (it must be also $(D shared)). */ struct SharedFreeList(ParentAllocator, size_t minSize, size_t maxSize = minSize, size_t approxMaxNodes = unbounded) { static assert(approxMaxNodes, "approxMaxNodes must not be null."); static assert(minSize != unbounded, "Use minSize = 0 for no low bound."); static assert(maxSize >= (void*).sizeof, "Maximum size must accommodate a pointer."); import core.atomic : atomicOp, cas; import core.internal.spinlock : SpinLock; private enum unchecked = minSize == 0 && maxSize == unbounded; static if (minSize != chooseAtRuntime) { alias min = minSize; } else { private shared size_t _min = chooseAtRuntime; @property size_t min() const shared { assert(_min != chooseAtRuntime); return _min; } @property void min(size_t x) shared { if (!(x <= max)) throw excBounds; if (!(cas(&_min, chooseAtRuntime, x))) throw excMin; } static if (maxSize == chooseAtRuntime) { // Both bounds can be set, provide one function for setting both in // one shot. void setBounds(size_t low, size_t high) shared { if (!(low <= high && high >= (void*).sizeof)) throw excBounds; if (!(cas(&_min, chooseAtRuntime, low))) throw excMin; if (!(cas(&_max, chooseAtRuntime, high))) throw excMax; } } } private bool tooSmall(size_t n) const shared { static if (minSize == 0) return false; else static if (minSize == chooseAtRuntime) return n < _min; else return n < minSize; } static if (maxSize != chooseAtRuntime) { alias max = maxSize; } else { private shared size_t _max = chooseAtRuntime; @property size_t max() const shared { return _max; } @property void max(size_t x) shared { if (!(x >= min && x >= (void*).sizeof)) throw excBounds; if (!(cas(&_max, chooseAtRuntime, x))) throw excMax; } } private bool tooLarge(size_t n) const shared { static if (maxSize == unbounded) return false; else static if (maxSize == chooseAtRuntime) return n > _max; else return n > maxSize; } private bool freeListEligible(size_t n) const shared { static if (minSize == maxSize && minSize != chooseAtRuntime) return n == maxSize; else return !tooSmall(n) && !tooLarge(n); } static if (approxMaxNodes != chooseAtRuntime) { alias approxMaxLength = approxMaxNodes; } else { private shared size_t _approxMaxLength = chooseAtRuntime; @property size_t approxMaxLength() const shared { return _approxMaxLength; } @property void approxMaxLength(size_t x) shared { if (x == 0) throw excX; _approxMaxLength = x; } } static if (approxMaxNodes != unbounded) { private shared size_t nodes; private void incNodes() shared { atomicOp!("+=")(nodes, 1); } private void decNodes() shared { assert(nodes); atomicOp!("-=")(nodes, 1); } private void resetNodes() shared { nodes = 0; } private bool nodesFull() shared { return nodes >= approxMaxLength; } } else { private static void incNodes() { } private static void decNodes() { } private static void resetNodes() { } private enum bool nodesFull = false; } version (StdDdoc) { /** Properties for getting (and possibly setting) the bounds. Setting bounds is allowed only once , and before any allocation takes place. Otherwise, the primitives have the same semantics as those of $(D FreeList). */ @property size_t min(); /// Ditto @property void min(size_t newMinSize); /// Ditto @property size_t max(); /// Ditto @property void max(size_t newMaxSize); /// Ditto void setBounds(size_t newMin, size_t newMax); /** Properties for getting (and possibly setting) the approximate maximum length of a shared freelist. */ @property size_t approxMaxLength() const shared; /// ditto @property void approxMaxLength(size_t x) shared; } /** The parent allocator. Depending on whether $(D ParentAllocator) holds state or not, this is a member variable or an alias for `ParentAllocator.instance`. */ static if (stateSize!ParentAllocator) shared ParentAllocator parent; else alias parent = ParentAllocator.instance; mixin(forwardToMember("parent", "expand")); private SpinLock lock; private struct Node { Node* next; } static assert(ParentAllocator.alignment >= Node.alignof); private Node* _root; /// Standard primitives. enum uint alignment = ParentAllocator.alignment; /// Ditto size_t goodAllocSize(size_t bytes) shared { if (freeListEligible(bytes)) return maxSize == unbounded ? bytes : max; return parent.goodAllocSize(bytes); } /// Ditto static if (__traits(hasMember, ParentAllocator, "owns")) Ternary owns(void[] b) shared const { return parent.owns(b); } /// Ditto static if (__traits(hasMember, ParentAllocator, "reallocate")) bool reallocate(ref void[] b, size_t s) shared { return parent.reallocate(b, s); } /// Ditto void[] allocate(size_t bytes) shared { assert(bytes < size_t.max / 2); if (!freeListEligible(bytes)) return parent.allocate(bytes); if (maxSize != unbounded) bytes = max; // Try to pop off the freelist lock.lock(); if (!_root) { lock.unlock(); return allocateFresh(bytes); } else { auto oldRoot = _root; _root = _root.next; decNodes(); lock.unlock(); return (cast(ubyte*) oldRoot)[0 .. bytes]; } } private void[] allocateFresh(const size_t bytes) shared { assert(bytes == max || max == unbounded); return parent.allocate(bytes); } /// Ditto bool deallocate(void[] b) shared { if (!nodesFull && freeListEligible(b.length)) { auto newRoot = cast(shared Node*) b.ptr; lock.lock(); newRoot.next = _root; _root = newRoot; incNodes(); lock.unlock(); return true; } static if (__traits(hasMember, ParentAllocator, "deallocate")) return parent.deallocate(b); else return false; } /// Ditto bool deallocateAll() shared { bool result = false; lock.lock(); scope(exit) lock.unlock(); static if (__traits(hasMember, ParentAllocator, "deallocateAll")) { result = parent.deallocateAll(); } else static if (__traits(hasMember, ParentAllocator, "deallocate")) { result = true; for (auto n = _root; n;) { auto tmp = n.next; if (!parent.deallocate((cast(ubyte*) n)[0 .. max])) result = false; n = tmp; } } _root = null; resetNodes(); return result; } /** Nonstandard function that minimizes the memory usage of the freelist by freeing each element in turn. Defined only if $(D ParentAllocator) defines $(D deallocate). */ static if (__traits(hasMember, ParentAllocator, "deallocate") && !unchecked) void minimize() shared { lock.lock(); scope(exit) lock.unlock(); for (auto n = _root; n;) { auto tmp = n.next; parent.deallocate((cast(ubyte*) n)[0 .. max]); n = tmp; } _root = null; resetNodes(); } } /// @nogc @safe unittest { import stdx.allocator.common : chooseAtRuntime; import stdx.allocator.mallocator : Mallocator; shared SharedFreeList!(Mallocator, chooseAtRuntime, chooseAtRuntime) a; a.setBounds(64, 128); assert(a.max == 128); assert(a.min == 64); } /// @nogc @safe unittest { import stdx.allocator.common : chooseAtRuntime; import stdx.allocator.mallocator : Mallocator; shared SharedFreeList!(Mallocator, 50, 50, chooseAtRuntime) a; // Set the maxSize first so setting the minSize doesn't throw a.approxMaxLength = 128; assert(a.approxMaxLength == 128); a.approxMaxLength = 1024; assert(a.approxMaxLength == 1024); a.approxMaxLength = 1; assert(a.approxMaxLength == 1); } @system unittest { import core.thread : ThreadGroup; import std.algorithm.comparison : equal; import stdx.allocator.mallocator : Mallocator; import std.range : repeat; static shared SharedFreeList!(Mallocator, 64, 128, 10) a; assert(a.goodAllocSize(1) == platformAlignment); auto b = a.allocate(96); a.deallocate(b); void fun() @nogc { auto b = cast(size_t[]) a.allocate(96); b[] = cast(size_t) &b; assert(b.equal(repeat(cast(size_t) &b, b.length))); a.deallocate(b); } auto tg = new ThreadGroup; foreach (i; 0 .. 20) { tg.create(&fun); } tg.joinAll(); } @nogc @system unittest { import stdx.allocator.mallocator : Mallocator; static shared SharedFreeList!(Mallocator, 64, 128, 10) a; auto b = a.allocate(100); a.deallocate(b); assert(a.nodes == 1); b = []; a.deallocateAll(); assert(a.nodes == 0); } @nogc @system unittest { import stdx.allocator.mallocator : Mallocator; static shared SharedFreeList!(Mallocator, 64, 128, 10) a; auto b = a.allocate(100); auto c = a.allocate(100); a.deallocate(c); assert(a.nodes == 1); c = []; a.minimize(); assert(a.nodes == 0); a.deallocate(b); assert(a.nodes == 1); b = []; a.minimize(); assert(a.nodes == 0); } @nogc @system unittest { import stdx.allocator.mallocator : Mallocator; static shared SharedFreeList!(Mallocator, 64, 128, 10) a; auto b = a.allocate(100); auto c = a.allocate(100); assert(a.nodes == 0); a.deallocate(b); a.deallocate(c); assert(a.nodes == 2); b = []; c = []; a.minimize(); assert(a.nodes == 0); } @nogc @system unittest { import stdx.allocator.mallocator : Mallocator; shared SharedFreeList!(Mallocator, chooseAtRuntime, chooseAtRuntime) a; scope(exit) a.deallocateAll(); auto c = a.allocate(64); assert(a.reallocate(c, 96)); assert(c.length == 96); a.deallocate(c); } @nogc @system unittest { import stdx.allocator.mallocator : Mallocator; shared SharedFreeList!(Mallocator, chooseAtRuntime, chooseAtRuntime, chooseAtRuntime) a; scope(exit) a.deallocateAll; a.allocate(64); } @nogc @system unittest { import stdx.allocator.mallocator : Mallocator; shared SharedFreeList!(Mallocator, 30, 40) a; scope(exit) a.deallocateAll; a.allocate(64); } @nogc @system unittest { import stdx.allocator.mallocator : Mallocator; shared SharedFreeList!(Mallocator, 30, 40, chooseAtRuntime) a; scope(exit) a.deallocateAll; a.allocate(64); } @nogc @system unittest { // Pull request #5556 import stdx.allocator.mallocator : Mallocator; shared SharedFreeList!(Mallocator, 0, chooseAtRuntime) a; scope(exit) a.deallocateAll; a.max = 64; a.allocate(64); } @nogc @system unittest { // Pull request #5556 import stdx.allocator.mallocator : Mallocator; shared SharedFreeList!(Mallocator, chooseAtRuntime, 64) a; scope(exit) a.deallocateAll; a.min = 32; a.allocate(64); } stdx-allocator-3.1.0~beta.2/source/stdx/allocator/building_blocks/free_tree.d 0000664 0000000 0000000 00000031754 13535263154 0027367 0 ustar 00root root 0000000 0000000 /// module stdx.allocator.building_blocks.free_tree; import stdx.allocator.common; //debug = std_experimental_allocator_free_tree; /** The Free Tree allocator, stackable on top of any other allocator, bears similarity with the free list allocator. Instead of a singly-linked list of previously freed blocks, it maintains a binary search tree. This allows the Free Tree allocator to manage blocks of arbitrary lengths and search them efficiently. Common uses of $(D FreeTree) include: $(UL $(LI Adding $(D deallocate) capability to an allocator that lacks it (such as simple regions).) $(LI Getting the benefits of multiple adaptable freelists that do not need to be tuned for one specific size but insted automatically adapts itself to frequently used sizes.) ) The free tree has special handling of duplicates (a singly-linked list per node) in anticipation of large number of duplicates. Allocation time from the free tree is expected to be $(BIGOH log n) where $(D n) is the number of distinct sizes (not total nodes) kept in the free tree. Allocation requests first search the tree for a buffer of suitable size deallocated in the past. If a match is found, the node is removed from the tree and the memory is returned. Otherwise, the allocation is directed to $(D ParentAllocator). If at this point $(D ParentAllocator) also fails to allocate, $(D FreeTree) frees everything and then tries the parent allocator again. Upon deallocation, the deallocated block is inserted in the internally maintained free tree (not returned to the parent). The free tree is not kept balanced. Instead, it has a last-in-first-out flavor because newly inserted blocks are rotated to the root of the tree. That way allocations are cache friendly and also frequently used sizes are more likely to be found quickly, whereas seldom used sizes migrate to the leaves of the tree. $(D FreeTree) rounds up small allocations to at least $(D 4 * size_t.sizeof), which on 64-bit system is one cache line size. If very small objects need to be efficiently allocated, the $(D FreeTree) should be fronted with an appropriate small object allocator. The following methods are defined if $(D ParentAllocator) defines them, and forward to it: $(D allocateAll), $(D expand), $(D owns), $(D reallocate). */ struct FreeTree(ParentAllocator) { static assert(ParentAllocator.alignment % size_t.alignof == 0, "FreeTree must be on top of a word-aligned allocator"); import mir.utility : min, max; // State static if (stateSize!ParentAllocator) private ParentAllocator parent; else private alias parent = ParentAllocator.instance; private Node* root; // that's the entire added state private struct Node { Node*[2] kid; Node* sibling; size_t size; ref Node* left() { return kid[0]; } ref Node* right() { return kid[1]; } } // Removes "which" from the tree, returns the memory it occupied private void[] remove(ref Node* which) { assert(which); assert(!which.sibling); auto result = (cast(ubyte*) which)[0 .. which.size]; if (!which.right) which = which.left; else if (!which.left) which = which.right; else { // result has two kids static bool toggler; // Crude randomization: alternate left/right choices toggler = !toggler; auto newRoot = which.kid[toggler], orphan = which.kid[!toggler]; which = newRoot; for (Node* n = void; (n = newRoot.kid[!toggler]) !is null; ) { newRoot = n; } newRoot.kid[!toggler] = orphan; } return result; } private void[] findAndRemove(ref Node* n, size_t s) { if (!n) return null; if (s == n.size) { if (auto sis = n.sibling) { // Nice, give away one from the freelist auto result = (cast(ubyte*) sis)[0 .. sis.size]; n.sibling = sis.sibling; return result; } return remove(n); } return findAndRemove(n.kid[s > n.size], s); } debug(std_experimental_allocator_free_tree) private void dump()() { import std.stdio : writef, writefln, writeln; writeln(typeof(this).stringof, "@", &this, " {"); scope(exit) writeln("}"); if (!root) return; static void recurse(Node* n, uint indent = 4) { if (!n) { writefln("%*s(null)", indent, ""); return; } for (auto sis = n; sis; sis = sis.sibling) { writef("%*s%x (%s bytes) ", indent, "", cast(void*) n, n.size); } writeln; if (!n.left && !n.right) return; recurse(n.left, indent + 4); recurse(n.right, indent + 4); } recurse(root); } private static void rotate(ref Node* parent, bool toRight) { assert(parent); auto opposing = parent.kid[!toRight]; if (!opposing) return; parent.kid[!toRight] = opposing.kid[toRight]; opposing.kid[toRight] = parent; parent = opposing; } // Inserts which into the tree, making it the new root private void insertAsRoot(Node* which) { assert(which); debug(std_experimental_allocator_free_tree) { assertValid; scope(exit) assertValid; } static void recurse(ref Node* where, Node* which) { if (!where) { where = which; which.left = null; which.right = null; which.sibling = null; return; } if (which.size == where.size) { // Special handling of duplicates which.sibling = where.sibling; where.sibling = which; which.left = null; which.right = null; return; } bool goRight = which.size > where.size; recurse(where.kid[goRight], which); rotate(where, !goRight); } recurse(root, which); } private void assertValid() { debug(std_experimental_allocator_free_tree) { static bool isBST(Node* n, size_t lb = 0, size_t ub = size_t.max) { if (!n) return true; for (auto sis = n.sibling; sis; sis = sis.sibling) { assert(n.size == sis.size); assert(sis.left is null); assert(sis.right is null); } return lb < n.size && n.size <= ub && isBST(n.left, lb, min(ub, n.size)) && isBST(n.right, max(lb, n.size), ub); } if (isBST(root)) return; dump; assert(0); } } /** The $(D FreeTree) is word aligned. */ enum uint alignment = size_t.alignof; /** The $(D FreeTree) allocator is noncopyable. */ this(this) @disable; /** The destructor of $(D FreeTree) releases all memory back to the parent allocator. */ static if (__traits(hasMember, ParentAllocator, "deallocate")) ~this() { clear; } /** Returns $(D parent.goodAllocSize(max(Node.sizeof, s))). */ static if (stateSize!ParentAllocator) size_t goodAllocSize(size_t s) { return parent.goodAllocSize(max(Node.sizeof, s)); } else static size_t goodAllocSize(size_t s) { return parent.goodAllocSize(max(Node.sizeof, s)); } /** Allocates $(D n) bytes of memory. First consults the free tree, and returns from it if a suitably sized block is found. Otherwise, the parent allocator is tried. If allocation from the parent succeeds, the allocated block is returned. Otherwise, the free tree tries an alternate strategy: If $(D ParentAllocator) defines $(D deallocate), $(D FreeTree) releases all of its contents and tries again. TODO: Splitting and coalescing should be implemented if $(D ParentAllocator) does not defined $(D deallocate). */ void[] allocate(size_t n) { assertValid; if (n == 0) return null; immutable s = goodAllocSize(n); // Consult the free tree. auto result = findAndRemove(root, s); if (result.ptr) return result.ptr[0 .. n]; // No block found, try the parent allocator. result = parent.allocate(s); if (result.ptr) return result.ptr[0 .. n]; // Parent ran out of juice, desperation mode on static if (__traits(hasMember, ParentAllocator, "deallocate")) { clear; // Try parent allocator again. result = parent.allocate(s); if (result.ptr) return result.ptr[0 .. n]; return null; } else { // TODO: get smart here return null; } } // Forwarding methods mixin(forwardToMember("parent", "allocateAll", "expand", "owns", "reallocate")); /** Places $(D b) into the free tree. */ bool deallocate(void[] b) { if (!b.ptr) return true; auto which = cast(Node*) b.ptr; which.size = goodAllocSize(b.length); // deliberately don't initialize which.left and which.right assert(which.size >= Node.sizeof); insertAsRoot(which); return true; } @system unittest // build a complex free tree { import stdx.allocator.gc_allocator, std.range; FreeTree!GCAllocator a; uint[] sizes = [3008,704,1856,576,1632,672,832,1856,1120,2656,1216,672, 448,992,2400,1376,2688,2656,736,1440]; void[][] allocs; foreach (s; sizes) allocs ~= a.allocate(s); foreach_reverse (b; allocs) { assert(b.ptr); a.deallocate(b); } a.assertValid; allocs = null; foreach (s; sizes) allocs ~= a.allocate(s); assert(a.root is null); a.assertValid; } /** Defined if $(D ParentAllocator.deallocate) exists, and returns to it all memory held in the free tree. */ static if (__traits(hasMember, ParentAllocator, "deallocate")) void clear() { void recurse(Node* n) { if (!n) return; recurse(n.left); recurse(n.right); parent.deallocate((cast(ubyte*) n)[0 .. n.size]); } recurse(root); root = null; } /** Defined if $(D ParentAllocator.deallocateAll) exists, and forwards to it. Also nullifies the free tree (it's assumed the parent frees all memory stil managed by the free tree). */ static if (__traits(hasMember, ParentAllocator, "deallocateAll")) bool deallocateAll() { // This is easy, just nuke the root and deallocate all from the // parent root = null; return parent.deallocateAll; } } @system unittest { import stdx.allocator.gc_allocator; testAllocator!(() => FreeTree!GCAllocator()); } @system unittest // issue 16506 { import stdx.allocator.gc_allocator : GCAllocator; import stdx.allocator.mallocator : Mallocator; static void f(ParentAllocator)(size_t sz) { static FreeTree!ParentAllocator myAlloc; byte[] _payload = cast(byte[]) myAlloc.allocate(sz); assert(_payload, "_payload is null"); _payload[] = 0; myAlloc.deallocate(_payload); } f!Mallocator(33); f!Mallocator(43); f!GCAllocator(1); } @system unittest // issue 16507 { static struct MyAllocator { byte dummy; static bool alive = true; void[] allocate(size_t s) { return new byte[](s); } bool deallocate(void[] ) { if (alive) assert(false); return true; } enum alignment = size_t.sizeof; } FreeTree!MyAllocator ft; void[] x = ft.allocate(1); ft.deallocate(x); ft.allocate(1000); MyAllocator.alive = false; } @system unittest // "desperation mode" { uint myDeallocCounter = 0; struct MyAllocator { byte[] allocation; void[] allocate(size_t s) { if (allocation.ptr) return null; allocation = new byte[](s); return allocation; } bool deallocate(void[] ) { ++myDeallocCounter; allocation = null; return true; } enum alignment = size_t.sizeof; } FreeTree!MyAllocator ft; void[] x = ft.allocate(1); ft.deallocate(x); assert(myDeallocCounter == 0); x = ft.allocate(1000); // Triggers "desperation mode". assert(myDeallocCounter == 1); assert(x.ptr); void[] y = ft.allocate(1000); /* Triggers "desperation mode" but there's nothing to deallocate so MyAllocator can't deliver. */ assert(myDeallocCounter == 1); assert(y.ptr is null); } stdx-allocator-3.1.0~beta.2/source/stdx/allocator/building_blocks/kernighan_ritchie.d 0000664 0000000 0000000 00000067267 13535263154 0031114 0 ustar 00root root 0000000 0000000 /// module stdx.allocator.building_blocks.kernighan_ritchie; import stdx.allocator.building_blocks.null_allocator; //debug = KRRegion; debug(KRRegion) import std.stdio; // KRRegion /** $(D KRRegion) draws inspiration from the $(MREF_ALTTEXT region allocation strategy, std,experimental,allocator,building_blocks,region) and also the $(HTTP stackoverflow.com/questions/13159564/explain-this-implementation-of-malloc-from-the-kr-book, famed allocator) described by Brian Kernighan and Dennis Ritchie in section 8.7 of the book $(HTTP amazon.com/exec/obidos/ASIN/0131103628/classicempire, "The C Programming Language"), Second Edition, Prentice Hall, 1988. $(H4 `KRRegion` = `Region` + Kernighan-Ritchie Allocator) Initially, `KRRegion` starts in "region" mode: allocations are served from the memory chunk in a region fashion. Thus, as long as there is enough memory left, $(D KRRegion.allocate) has the performance profile of a region allocator. Deallocation inserts (in $(BIGOH 1) time) the deallocated blocks in an unstructured freelist, which is not read in region mode. Once the region cannot serve an $(D allocate) request, $(D KRRegion) switches to "free list" mode. It sorts the list of previously deallocated blocks by address and serves allocation requests off that free list. The allocation and deallocation follow the pattern described by Kernighan and Ritchie. The recommended use of `KRRegion` is as a $(I region with deallocation). If the `KRRegion` is dimensioned appropriately, it could often not enter free list mode during its lifetime. Thus it is as fast as a simple region, whilst offering deallocation at a small cost. When the region memory is exhausted, the previously deallocated memory is still usable, at a performance cost. If the region is not excessively large and fragmented, the linear allocation and deallocation cost may still be compensated for by the good locality characteristics. If the chunk of memory managed is large, it may be desirable to switch management to free list from the beginning. That way, memory may be used in a more compact manner than region mode. To force free list mode, call $(D switchToFreeList) shortly after construction or when deemed appropriate. The smallest size that can be allocated is two words (16 bytes on 64-bit systems, 8 bytes on 32-bit systems). This is because the free list management needs two words (one for the length, the other for the next pointer in the singly-linked list). The $(D ParentAllocator) type parameter is the type of the allocator used to allocate the memory chunk underlying the $(D KRRegion) object. Choosing the default ($(D NullAllocator)) means the user is responsible for passing a buffer at construction (and for deallocating it if necessary). Otherwise, $(D KRRegion) automatically deallocates the buffer during destruction. For that reason, if $(D ParentAllocator) is not $(D NullAllocator), then $(D KRRegion) is not copyable. $(H4 Implementation Details) In free list mode, $(D KRRegion) embeds a free blocks list onto the chunk of memory. The free list is circular, coalesced, and sorted by address at all times. Allocations and deallocations take time proportional to the number of previously deallocated blocks. (In practice the cost may be lower, e.g. if memory is deallocated in reverse order of allocation, all operations take constant time.) Memory utilization is good (small control structure and no per-allocation overhead). The disadvantages of freelist mode include proneness to fragmentation, a minimum allocation size of two words, and linear worst-case allocation and deallocation times. Similarities of `KRRegion` (in free list mode) with the Kernighan-Ritchie allocator: $(UL $(LI Free blocks have variable size and are linked in a singly-linked list.) $(LI The freelist is maintained in increasing address order, which makes coalescing easy.) $(LI The strategy for finding the next available block is first fit.) $(LI The free list is circular, with the last node pointing back to the first.) $(LI Coalescing is carried during deallocation.) ) Differences from the Kernighan-Ritchie allocator: $(UL $(LI Once the chunk is exhausted, the Kernighan-Ritchie allocator allocates another chunk using operating system primitives. For better composability, $(D KRRegion) just gets full (returns $(D null) on new allocation requests). The decision to allocate more blocks is deferred to a higher-level entity. For an example, see the example below using $(D AllocatorList) in conjunction with $(D KRRegion).) $(LI Allocated blocks do not hold a size prefix. This is because in D the size information is available in client code at deallocation time.) ) */ struct KRRegion(ParentAllocator = NullAllocator) { import stdx.allocator.common : stateSize, alignedAt; import stdx.allocator.internal : Ternary; private static struct Node { import mir.functional : RefTuple; alias Tuple = RefTuple!(void[], Node*); Node* next; size_t size; this(this) @disable; void[] payload() inout { return (cast(ubyte*) &this)[0 .. size]; } bool adjacent(in Node* right) const { assert(right); auto p = payload; return p.ptr < right && right < p.ptr + p.length + Node.sizeof; } bool coalesce(void* memoryEnd = null) { // Coalesce the last node before the memory end with any possible gap if (memoryEnd && memoryEnd < payload.ptr + payload.length + Node.sizeof) { size += memoryEnd - (payload.ptr + payload.length); return true; } if (!adjacent(next)) return false; size = (cast(ubyte*) next + next.size) - cast(ubyte*) &this; next = next.next; return true; } Tuple allocateHere(size_t bytes) { assert(bytes >= Node.sizeof); assert(bytes % Node.alignof == 0); assert(next); assert(!adjacent(next)); if (size < bytes) return typeof(return)(); assert(size >= bytes); immutable leftover = size - bytes; if (leftover >= Node.sizeof) { // There's room for another node auto newNode = cast(Node*) ((cast(ubyte*) &this) + bytes); newNode.size = leftover; newNode.next = next == &this ? newNode : next; assert(next); return Tuple(payload, newNode); } // No slack space, just return next node return Tuple(payload, next == &this ? null : next); } } // state /** If $(D ParentAllocator) holds state, $(D parent) is a public member of type $(D KRRegion). Otherwise, $(D parent) is an $(D alias) for `ParentAllocator.instance`. */ static if (stateSize!ParentAllocator) ParentAllocator parent; else alias parent = ParentAllocator.instance; private void[] payload; private Node* root; private bool regionMode = true; auto byNodePtr() { static struct Range { Node* start, current; @property bool empty() { return !current; } @property Node* front() { return current; } void popFront() { assert(current && current.next); current = current.next; if (current == start) current = null; } @property Range save() { return this; } } import std.range : isForwardRange; static assert(isForwardRange!Range); return Range(root, root); } string toString()() { import std.format : format; string s = "KRRegion@"; s ~= format("%s-%s(0x%s[%s] %s", &this, &this + 1, payload.ptr, payload.length, regionMode ? "(region)" : "(freelist)"); Node* lastNode = null; if (!regionMode) { foreach (node; byNodePtr) { s ~= format(", %sfree(0x%s[%s])", lastNode && lastNode.adjacent(node) ? "+" : "", cast(void*) node, node.size); lastNode = node; } } else { for (auto node = root; node; node = node.next) { s ~= format(", %sfree(0x%s[%s])", lastNode && lastNode.adjacent(node) ? "+" : "", cast(void*) node, node.size); lastNode = node; } } s ~= ')'; return s; } private void assertValid(string s) { assert(!regionMode); if (!payload.ptr) { assert(!root, s); return; } if (!root) { return; } assert(root >= payload.ptr, s); assert(root < payload.ptr + payload.length, s); // Check that the list terminates size_t n; foreach (node; byNodePtr) { assert(node.next); assert(!node.adjacent(node.next)); assert(n++ < payload.length / Node.sizeof, s); } } private Node* sortFreelist(Node* root) { // Find a monotonic run auto last = root; for (;;) { if (!last.next) return root; if (last > last.next) break; assert(last < last.next); last = last.next; } auto tail = last.next; last.next = null; tail = sortFreelist(tail); return merge(root, tail); } private Node* merge(Node* left, Node* right) { assert(left != right); if (!left) return right; if (!right) return left; if (left < right) { auto result = left; result.next = merge(left.next, right); return result; } auto result = right; result.next = merge(left, right.next); return result; } private void coalesceAndMakeCircular() { for (auto n = root;;) { assert(!n.next || n < n.next); if (!n.next) { // Convert to circular n.next = root; break; } if (n.coalesce) continue; // possibly another coalesce n = n.next; } } /** Create a $(D KRRegion). If $(D ParentAllocator) is not $(D NullAllocator), $(D KRRegion)'s destructor will call $(D parent.deallocate). Params: b = Block of memory to serve as support for the allocator. Memory must be larger than two words and word-aligned. n = Capacity desired. This constructor is defined only if $(D ParentAllocator) is not $(D NullAllocator). */ this(ubyte[] b) { if (b.length < Node.sizeof) { // Init as empty assert(root is null); assert(payload is null); return; } assert(b.length >= Node.sizeof); assert(b.ptr.alignedAt(Node.alignof)); assert(b.length >= 2 * Node.sizeof); payload = b; root = cast(Node*) b.ptr; // Initialize the free list with all list assert(regionMode); root.next = null; root.size = b.length; debug(KRRegion) writefln("KRRegion@%s: init with %s[%s]", &this, b.ptr, b.length); } /// Ditto static if (!is(ParentAllocator == NullAllocator)) this(size_t n) { assert(n > Node.sizeof); this(cast(ubyte[])(parent.allocate(n))); } /// Ditto static if (!is(ParentAllocator == NullAllocator) && __traits(hasMember, ParentAllocator, "deallocate")) ~this() { parent.deallocate(payload); } /** Forces free list mode. If already in free list mode, does nothing. Otherwise, sorts the free list accumulated so far and switches strategy for future allocations to KR style. */ void switchToFreeList() { if (!regionMode) return; regionMode = false; if (!root) return; root = sortFreelist(root); coalesceAndMakeCircular; } /* Noncopyable */ @disable this(this); /** Word-level alignment. */ enum alignment = Node.alignof; /** Allocates $(D n) bytes. Allocation searches the list of available blocks until a free block with $(D n) or more bytes is found (first fit strategy). The block is split (if larger) and returned. Params: n = number of bytes to _allocate Returns: A word-aligned buffer of $(D n) bytes, or $(D null). */ void[] allocate(size_t n) { if (!n || !root) return null; const actualBytes = goodAllocSize(n); // Try the region first if (regionMode) { // Only look at the head of the freelist if (root.size >= actualBytes) { // Enough room for allocation void* result = root; immutable balance = root.size - actualBytes; if (balance >= Node.sizeof) { auto newRoot = cast(Node*) (result + actualBytes); newRoot.next = root.next; newRoot.size = balance; root = newRoot; } else { root = null; switchToFreeList; } return result[0 .. n]; } // Not enough memory, switch to freelist mode and fall through switchToFreeList; } // Try to allocate from next after the iterating node for (auto pnode = root;;) { assert(!pnode.adjacent(pnode.next)); auto k = pnode.next.allocateHere(actualBytes); if (k[0] !is null) { // awes assert(k[0].length >= n); if (root == pnode.next) root = k[1]; pnode.next = k[1]; return k[0][0 .. n]; } pnode = pnode.next; if (pnode == root) break; } return null; } /** Deallocates $(D b), which is assumed to have been previously allocated with this allocator. Deallocation performs a linear search in the free list to preserve its sorting order. It follows that blocks with higher addresses in allocators with many free blocks are slower to deallocate. Params: b = block to be deallocated */ bool deallocate(void[] b) { debug(KRRegion) writefln("KRRegion@%s: deallocate(%s[%s])", &this, b.ptr, b.length); if (!b.ptr) return true; assert(owns(b) == Ternary.yes); assert(b.ptr.alignedAt(Node.alignof)); // Insert back in the freelist, keeping it sorted by address. Do not // coalesce at this time. Instead, do it lazily during allocation. auto n = cast(Node*) b.ptr; n.size = goodAllocSize(b.length); auto memoryEnd = payload.ptr + payload.length; if (regionMode) { assert(root); // Insert right after root n.next = root.next; root.next = n; return true; } if (!root) { // What a sight for sore eyes root = n; root.next = root; // If the first block freed is the last one allocated, // maybe there's a gap after it. root.coalesce(memoryEnd); return true; } version(assert) foreach (test; byNodePtr) { assert(test != n); } // Linear search auto pnode = root; do { assert(pnode && pnode.next); assert(pnode != n); assert(pnode.next != n); if (pnode < pnode.next) { if (pnode >= n || n >= pnode.next) continue; // Insert in between pnode and pnode.next n.next = pnode.next; pnode.next = n; n.coalesce; pnode.coalesce; root = pnode; return true; } else if (pnode < n) { // Insert at the end of the list // Add any possible gap at the end of n to the length of n n.next = pnode.next; pnode.next = n; n.coalesce(memoryEnd); pnode.coalesce; root = pnode; return true; } else if (n < pnode.next) { // Insert at the front of the list n.next = pnode.next; pnode.next = n; n.coalesce; root = n; return true; } } while ((pnode = pnode.next) != root); assert(0, "Wrong parameter passed to deallocate"); } /** Allocates all memory available to this allocator. If the allocator is empty, returns the entire available block of memory. Otherwise, it still performs a best-effort allocation: if there is no fragmentation (e.g. $(D allocate) has been used but not $(D deallocate)), allocates and returns the only available block of memory. The operation takes time proportional to the number of adjacent free blocks at the front of the free list. These blocks get coalesced, whether $(D allocateAll) succeeds or fails due to fragmentation. */ void[] allocateAll() { if (regionMode) switchToFreeList; if (root && root.next == root) return allocate(root.size); return null; } /// @system unittest { import stdx.allocator.gc_allocator : GCAllocator; auto alloc = KRRegion!GCAllocator(1024 * 64); const b1 = alloc.allocate(2048); assert(b1.length == 2048); const b2 = alloc.allocateAll; assert(b2.length == 1024 * 62); } /** Deallocates all memory currently allocated, making the allocator ready for other allocations. This is a $(BIGOH 1) operation. */ bool deallocateAll() { debug(KRRegion) assertValid("deallocateAll"); debug(KRRegion) scope(exit) assertValid("deallocateAll"); root = cast(Node*) payload.ptr; // Initialize the free list with all list if (root) { root.next = root; root.size = payload.length; } return true; } /** Checks whether the allocator is responsible for the allocation of $(D b). It does a simple $(BIGOH 1) range check. $(D b) should be a buffer either allocated with $(D this) or obtained through other means. */ Ternary owns(void[] b) { debug(KRRegion) assertValid("owns"); debug(KRRegion) scope(exit) assertValid("owns"); return Ternary(b.ptr >= payload.ptr && b.ptr < payload.ptr + payload.length); } /** Adjusts $(D n) to a size suitable for allocation (two words or larger, word-aligned). */ static size_t goodAllocSize(size_t n) { import stdx.allocator.common : roundUpToMultipleOf; return n <= Node.sizeof ? Node.sizeof : n.roundUpToMultipleOf(alignment); } /** Returns: `Ternary.yes` if the allocator is empty, `Ternary.no` otherwise. Never returns `Ternary.unknown`. */ Ternary empty() { return Ternary(root && root.size == payload.length); } } /** $(D KRRegion) is preferable to $(D Region) as a front for a general-purpose allocator if $(D deallocate) is needed, yet the actual deallocation traffic is relatively low. The example below shows a $(D KRRegion) using stack storage fronting the GC allocator. */ @system unittest { import stdx.allocator.building_blocks.fallback_allocator : fallbackAllocator; import stdx.allocator.gc_allocator : GCAllocator; import stdx.allocator.internal : Ternary; // KRRegion fronting a general-purpose allocator ubyte[1024 * 128] buf; auto alloc = fallbackAllocator(KRRegion!()(buf), GCAllocator.instance); auto b = alloc.allocate(100); assert(b.length == 100); assert(alloc.primary.owns(b) == Ternary.yes); } /** The code below defines a scalable allocator consisting of 1 MB (or larger) blocks fetched from the garbage-collected heap. Each block is organized as a KR-style heap. More blocks are allocated and freed on a need basis. This is the closest example to the allocator introduced in the K$(AMP)R book. It should perform slightly better because instead of searching through one large free list, it searches through several shorter lists in LRU order. Also, it actually returns memory to the operating system when possible. */ @system unittest { import mir.utility : max; import stdx.allocator.building_blocks.allocator_list : AllocatorList; import stdx.allocator.gc_allocator : GCAllocator; import stdx.allocator.mmap_allocator : MmapAllocator; AllocatorList!(n => KRRegion!MmapAllocator(max(n * 16, 1024u * 1024))) alloc; } @system unittest { import mir.utility : max; import stdx.allocator.building_blocks.allocator_list : AllocatorList; import stdx.allocator.gc_allocator : GCAllocator; import stdx.allocator.mallocator : Mallocator; import stdx.allocator.internal : Ternary; /* Create a scalable allocator consisting of 1 MB (or larger) blocks fetched from the garbage-collected heap. Each block is organized as a KR-style heap. More blocks are allocated and freed on a need basis. */ AllocatorList!(n => KRRegion!Mallocator(max(n * 16, 1024u * 1024)), NullAllocator) alloc; void[][50] array; foreach (i; 0 .. array.length) { auto length = i * 10_000 + 1; array[i] = alloc.allocate(length); assert(array[i].ptr); assert(array[i].length == length); } import std.random : randomShuffle; randomShuffle(array[]); foreach (i; 0 .. array.length) { assert(array[i].ptr); assert(alloc.owns(array[i]) == Ternary.yes); alloc.deallocate(array[i]); } } @system unittest { import mir.utility : max; import stdx.allocator.building_blocks.allocator_list : AllocatorList; import stdx.allocator.gc_allocator : GCAllocator; import stdx.allocator.mmap_allocator : MmapAllocator; import stdx.allocator.internal : Ternary; /* Create a scalable allocator consisting of 1 MB (or larger) blocks fetched from the garbage-collected heap. Each block is organized as a KR-style heap. More blocks are allocated and freed on a need basis. */ AllocatorList!((n) { auto result = KRRegion!MmapAllocator(max(n * 2, 1024u * 1024)); return result; }) alloc; void[][99] array; foreach (i; 0 .. array.length) { auto length = i * 10_000 + 1; array[i] = alloc.allocate(length); assert(array[i].ptr); foreach (j; 0 .. i) { assert(array[i].ptr != array[j].ptr); } assert(array[i].length == length); } import std.random : randomShuffle; randomShuffle(array[]); foreach (i; 0 .. array.length) { assert(alloc.owns(array[i]) == Ternary.yes); alloc.deallocate(array[i]); } } @system unittest { import mir.utility : max; import stdx.allocator.building_blocks.allocator_list : AllocatorList; import stdx.allocator.common : testAllocator; import stdx.allocator.gc_allocator : GCAllocator; testAllocator!(() => AllocatorList!( n => KRRegion!GCAllocator(max(n * 16, 1024u * 1024)))()); } @system unittest { import stdx.allocator.gc_allocator : GCAllocator; auto alloc = KRRegion!GCAllocator(1024u * 1024); void[][] array; foreach (i; 1 .. 4) { array ~= alloc.allocate(i); assert(array[$ - 1].length == i); } alloc.deallocate(array[1]); alloc.deallocate(array[0]); alloc.deallocate(array[2]); assert(alloc.allocateAll().length == 1024u * 1024); } @system unittest { import std.conv : text; import stdx.allocator.gc_allocator : GCAllocator; import stdx.allocator.internal : Ternary; auto alloc = KRRegion!()( cast(ubyte[])(GCAllocator.instance.allocate(1024u * 1024))); const store = alloc.allocate(KRRegion!().sizeof); auto p = cast(KRRegion!()* ) store.ptr; import core.stdc.string : memcpy; import std.algorithm.mutation : move; import mir.conv : emplace; memcpy(p, &alloc, alloc.sizeof); emplace(&alloc); void[][100] array; foreach (i; 0 .. array.length) { auto length = 100 * i + 1; array[i] = p.allocate(length); assert(array[i].length == length, text(array[i].length)); assert(p.owns(array[i]) == Ternary.yes); } import std.random : randomShuffle; randomShuffle(array[]); foreach (i; 0 .. array.length) { assert(p.owns(array[i]) == Ternary.yes); p.deallocate(array[i]); } auto b = p.allocateAll(); assert(b.length == 1024u * 1024 - KRRegion!().sizeof, text(b.length)); } @system unittest { import stdx.allocator.gc_allocator : GCAllocator; auto alloc = KRRegion!()( cast(ubyte[])(GCAllocator.instance.allocate(1024u * 1024))); auto p = alloc.allocateAll(); assert(p.length == 1024u * 1024); alloc.deallocateAll(); p = alloc.allocateAll(); assert(p.length == 1024u * 1024); } @system unittest { import stdx.allocator.building_blocks; import std.random; import stdx.allocator.internal : Ternary; // Both sequences must work on either system // A sequence of allocs which generates the error described in issue 16564 // that is a gap at the end of buf from the perspective of the allocator // for 64 bit systems (leftover balance = 8 bytes < 16) int[] sizes64 = [18904, 2008, 74904, 224, 111904, 1904, 52288, 8]; // for 32 bit systems (leftover balance < 8) int[] sizes32 = [81412, 107068, 49892, 23768]; static if (__VERSION__ >= 2072) { mixin(` void test(int[] sizes) { align(size_t.sizeof) ubyte[256 * 1024] buf; auto a = KRRegion!()(buf); void[][] bufs; foreach (size; sizes) { bufs ~= a.allocate(size); } foreach (b; bufs.randomCover) { a.deallocate(b); } assert(a.empty == Ternary.yes); } test(sizes64); test(sizes32); `); } } @system unittest { import stdx.allocator.building_blocks; import std.random; import stdx.allocator.internal : Ternary; // For 64 bits, we allocate in multiples of 8, but the minimum alloc size is 16. // This can create gaps. // This test is an example of such a case. The gap is formed between the block // allocated for the second value in sizes and the third. There is also a gap // at the very end. (total lost 2 * word) int[] sizes64 = [2008, 18904, 74904, 224, 111904, 1904, 52288, 8]; int[] sizes32 = [81412, 107068, 49892, 23768]; int word64 = 8; int word32 = 4; static if (__VERSION__ >= 2072) { mixin(` void test(int[] sizes, int word) { align(size_t.sizeof) ubyte[256 * 1024] buf; auto a = KRRegion!()(buf); void[][] bufs; foreach (size; sizes) { bufs ~= a.allocate(size); } a.deallocate(bufs[1]); bufs ~= a.allocate(sizes[1] - word); a.deallocate(bufs[0]); foreach (i; 2 .. bufs.length) { a.deallocate(bufs[i]); } assert(a.empty == Ternary.yes); } test(sizes64, word64); test(sizes32, word32); `); } } stdx-allocator-3.1.0~beta.2/source/stdx/allocator/building_blocks/null_allocator.d 0000664 0000000 0000000 00000005640 13535263154 0030434 0 ustar 00root root 0000000 0000000 /// module stdx.allocator.building_blocks.null_allocator; /** $(D NullAllocator) is an emphatically empty implementation of the allocator interface. Although it has no direct use, it is useful as a "terminator" in composite allocators. */ struct NullAllocator { import stdx.allocator.internal : Ternary; /** $(D NullAllocator) advertises a relatively large _alignment equal to 64 KB. This is because $(D NullAllocator) never actually needs to honor this alignment and because composite allocators using $(D NullAllocator) shouldn't be unnecessarily constrained. */ enum uint alignment = 64 * 1024; // /// Returns $(D n). //size_t goodAllocSize(size_t n) shared const //{ return .goodAllocSize(this, n); } /// Always returns $(D null). static void[] allocate()(size_t) { return null; } /// Always returns $(D null). static void[] alignedAllocate()(size_t, uint) { return null; } /// Always returns $(D null). static void[] allocateAll()() { return null; } /** These methods return $(D false). Precondition: $(D b is null). This is because there is no other possible legitimate input. */ static bool expand()(ref void[] b, size_t s) { assert(b is null); return s == 0; } /// Ditto static bool reallocate()(ref void[] b, size_t) { assert(b is null); return false; } /// Ditto static bool alignedReallocate()(ref void[] b, size_t, uint) { assert(b is null); return false; } /// Returns $(D Ternary.no). static Ternary owns()(void[]) { return Ternary.no; } /** Returns $(D Ternary.no). */ static Ternary resolveInternalPointer()(const void*, ref void[]) { return Ternary.no; } /** No-op. Precondition: $(D b is null) */ static bool deallocate()(void[] b) { assert(b is null); return true; } /** No-op. */ static bool deallocateAll()() { return true; } /** Returns $(D Ternary.yes). */ static Ternary empty()() { return Ternary.yes; } /** Returns the $(D static) global instance of the $(D NullAllocator). */ enum NullAllocator instance = NullAllocator(); } @system unittest { assert(NullAllocator.instance.alignedAllocate(100, 0) is null); assert(NullAllocator.instance.allocateAll() is null); auto b = NullAllocator.instance.allocate(100); assert(b is null); assert(NullAllocator.instance.expand(b, 0)); assert(!NullAllocator.instance.expand(b, 42)); assert(!NullAllocator.instance.reallocate(b, 42)); assert(!NullAllocator.instance.alignedReallocate(b, 42, 0)); NullAllocator.instance.deallocate(b); assert(NullAllocator.instance.deallocateAll() == true); import stdx.allocator.internal : Ternary; assert(NullAllocator.instance.empty() == Ternary.yes); assert(NullAllocator.instance.owns(null) == Ternary.no); void[] p; assert(NullAllocator.instance.resolveInternalPointer(null, p) == Ternary.no); } stdx-allocator-3.1.0~beta.2/source/stdx/allocator/building_blocks/package.d 0000664 0000000 0000000 00000037621 13535263154 0027021 0 ustar 00root root 0000000 0000000 /** $(H2 Assembling Your Own Allocator) In addition to defining the interfaces above, this package also implements untyped composable memory allocators. They are $(I untyped) because they deal exclusively in $(D void[]) and have no notion of what type the memory allocated would be destined for. They are $(I composable) because the included allocators are building blocks that can be assembled in complex nontrivial allocators. $(P Unlike the allocators for the C and C++ programming languages, which manage the allocated size internally, these allocators require that the client maintains (or knows $(I a priori)) the allocation size for each piece of memory allocated. Put simply, the client must pass the allocated size upon deallocation. Storing the size in the _allocator has significant negative performance implications, and is virtually always redundant because client code needs knowledge of the allocated size in order to avoid buffer overruns. (See more discussion in a $(HTTP open- std.org/JTC1/SC22/WG21/docs/papers/2013/n3536.html, proposal) for sized deallocation in C++.) For this reason, allocators herein traffic in $(D void[]) as opposed to $(D void*).) $(P In order to be usable as an _allocator, a type should implement the following methods with their respective semantics. Only $(D alignment) and $(D allocate) are required. If any of the other methods is missing, the _allocator is assumed to not have that capability (for example some allocators do not offer manual deallocation of memory). Allocators should NOT implement unsupported methods to always fail. For example, an allocator that lacks the capability to implement `alignedAllocate` should not define it at all (as opposed to defining it to always return `null` or throw an exception). The missing implementation statically informs other components about the allocator's capabilities and allows them to make design decisions accordingly.) $(BOOKTABLE , $(TR $(TH Method name) $(TH Semantics)) $(TR $(TDC uint alignment;, $(POST $(RES) > 0)) $(TD Returns the minimum alignment of all data returned by the allocator. An allocator may implement $(D alignment) as a statically-known $(D enum) value only. Applications that need dynamically-chosen alignment values should use the $(D alignedAllocate) and $(D alignedReallocate) APIs.)) $(TR $(TDC size_t goodAllocSize(size_t n);, $(POST $(RES) >= n)) $(TD Allocators customarily allocate memory in discretely-sized chunks. Therefore, a request for $(D n) bytes may result in a larger allocation. The extra memory allocated goes unused and adds to the so-called $(HTTP goo.gl/YoKffF,internal fragmentation). The function $(D goodAllocSize(n)) returns the actual number of bytes that would be allocated upon a request for $(D n) bytes. This module defines a default implementation that returns $(D n) rounded up to a multiple of the allocator's alignment.)) $(TR $(TDC void[] allocate(size_t s);, $(POST $(RES) is null || $(RES).length == s)) $(TD If $(D s == 0), the call may return any empty slice (including $(D null)). Otherwise, the call allocates $(D s) bytes of memory and returns the allocated block, or $(D null) if the request could not be satisfied.)) $(TR $(TDC void[] alignedAllocate(size_t s, uint a);, $(POST $(RES) is null || $(RES).length == s)) $(TD Similar to `allocate`, with the additional guarantee that the memory returned is aligned to at least `a` bytes. `a` must be a power of 2.)) $(TR $(TDC void[] allocateAll();) $(TD Offers all of allocator's memory to the caller, so it's usually defined by fixed-size allocators. If the allocator is currently NOT managing any memory, then $(D allocateAll()) shall allocate and return all memory available to the allocator, and subsequent calls to all allocation primitives should not succeed (e.g. $(D allocate) shall return $(D null) etc). Otherwise, $(D allocateAll) only works on a best-effort basis, and the allocator is allowed to return $(D null) even if does have available memory. Memory allocated with $(D allocateAll) is not otherwise special (e.g. can be reallocated or deallocated with the usual primitives, if defined).)) $(TR $(TDC bool expand(ref void[] b, size_t delta);, $(POST !$(RES) || b.length == $(I old)(b).length + delta)) $(TD Expands $(D b) by $(D delta) bytes. If $(D delta == 0), succeeds without changing $(D b). If $(D b is null), returns `false` (the null pointer cannot be expanded in place). Otherwise, $(D b) must be a buffer previously allocated with the same allocator. If expansion was successful, $(D expand) changes $(D b)'s length to $(D b.length + delta) and returns $(D true). Upon failure, the call effects no change upon the allocator object, leaves $(D b) unchanged, and returns $(D false).)) $(TR $(TDC bool reallocate(ref void[] b, size_t s);, $(POST !$(RES) || b.length == s)) $(TD Reallocates $(D b) to size $(D s), possibly moving memory around. $(D b) must be $(D null) or a buffer allocated with the same allocator. If reallocation was successful, $(D reallocate) changes $(D b) appropriately and returns $(D true). Upon failure, the call effects no change upon the allocator object, leaves $(D b) unchanged, and returns $(D false). An allocator should implement $(D reallocate) if it can derive some advantage from doing so; otherwise, this module defines a $(D reallocate) free function implemented in terms of $(D expand), $(D allocate), and $(D deallocate).)) $(TR $(TDC bool alignedReallocate(ref void[] b,$(BR) size_t s, uint a);, $(POST !$(RES) || b.length == s)) $(TD Similar to $(D reallocate), but guarantees the reallocated memory is aligned at $(D a) bytes. The buffer must have been originated with a call to $(D alignedAllocate). $(D a) must be a power of 2 greater than $(D (void*).sizeof). An allocator should implement $(D alignedReallocate) if it can derive some advantage from doing so; otherwise, this module defines a $(D alignedReallocate) free function implemented in terms of $(D expand), $(D alignedAllocate), and $(D deallocate).)) $(TR $(TDC Ternary owns(void[] b);) $(TD Returns `Ternary.yes` if `b` has been allocated with this allocator. An allocator should define this method only if it can decide on ownership precisely and fast (in constant time, logarithmic time, or linear time with a low multiplication factor). Traditional allocators such as the C heap do not define such functionality. If $(D b is null), the allocator shall return `Ternary.no`, i.e. no allocator owns the `null` slice.)) $(TR $(TDC Ternary resolveInternalPointer(void* p, ref void[] result);) $(TD If `p` is a pointer somewhere inside a block allocated with this allocator, `result` holds a pointer to the beginning of the allocated block and returns `Ternary.yes`. Otherwise, `result` holds `null` and returns `Ternary.no`. If the pointer points immediately after an allocated block, the result is implementation defined.)) $(TR $(TDC bool deallocate(void[] b);) $(TD If $(D b is null), does nothing and returns `true`. Otherwise, deallocates memory previously allocated with this allocator and returns `true` if successful, `false` otherwise. An implementation that would not support deallocation (i.e. would always return `false` should not define this primitive at all.))) $(TR $(TDC bool deallocateAll();, $(POST empty)) $(TD Deallocates all memory allocated with this allocator. If an allocator implements this method, it must specify whether its destructor calls it, too.)) $(TR $(TDC Ternary empty();) $(TD Returns `Ternary.yes` if and only if the allocator holds no memory (i.e. no allocation has occurred, or all allocations have been deallocated).)) $(TR $(TDC static Allocator instance;, $(POST instance $(I is a valid) Allocator $(I object))) $(TD Some allocators are $(I monostate), i.e. have only an instance and hold only global state. (Notable examples are C's own `malloc`-based allocator and D's garbage-collected heap.) Such allocators must define a static $(D instance) instance that serves as the symbolic placeholder for the global instance of the allocator. An allocator should not hold state and define `instance` simultaneously. Depending on whether the allocator is thread-safe or not, this instance may be $(D shared).)) ) $(H2 Sample Assembly) The example below features an _allocator modeled after $(HTTP goo.gl/m7329l, jemalloc), which uses a battery of free-list allocators spaced so as to keep internal fragmentation to a minimum. The $(D FList) definitions specify no bounds for the freelist because the $(D Segregator) does all size selection in advance. Sizes through 3584 bytes are handled via freelists of staggered sizes. Sizes from 3585 bytes through 4072 KB are handled by a $(D BitmappedBlock) with a block size of 4 KB. Sizes above that are passed direct to the $(D GCAllocator). ---- alias FList = FreeList!(GCAllocator, 0, unbounded); alias A = Segregator!( 8, FreeList!(GCAllocator, 0, 8), 128, Bucketizer!(FList, 1, 128, 16), 256, Bucketizer!(FList, 129, 256, 32), 512, Bucketizer!(FList, 257, 512, 64), 1024, Bucketizer!(FList, 513, 1024, 128), 2048, Bucketizer!(FList, 1025, 2048, 256), 3584, Bucketizer!(FList, 2049, 3584, 512), 4072 * 1024, AllocatorList!( () => BitmappedBlock!(GCAllocator, 4096)(4072 * 1024)), GCAllocator ); A tuMalloc; auto b = tuMalloc.allocate(500); assert(b.length == 500); auto c = tuMalloc.allocate(113); assert(c.length == 113); assert(tuMalloc.expand(c, 14)); tuMalloc.deallocate(b); tuMalloc.deallocate(c); ---- $(H2 Allocating memory for sharing across threads) One allocation pattern used in multithreaded applications is to share memory across threads, and to deallocate blocks in a different thread than the one that allocated it. All allocators in this module accept and return $(D void[]) (as opposed to $(D shared void[])). This is because at the time of allocation, deallocation, or reallocation, the memory is effectively not $(D shared) (if it were, it would reveal a bug at the application level). The issue remains of calling $(D a.deallocate(b)) from a different thread than the one that allocated $(D b). It follows that both threads must have access to the same instance $(D a) of the respective allocator type. By definition of D, this is possible only if $(D a) has the $(D shared) qualifier. It follows that the allocator type must implement $(D allocate) and $(D deallocate) as $(D shared) methods. That way, the allocator commits to allowing usable $(D shared) instances. Conversely, allocating memory with one non-$(D shared) allocator, passing it across threads (by casting the obtained buffer to $(D shared)), and later deallocating it in a different thread (either with a different allocator object or with the same allocator object after casting it to $(D shared)) is illegal. $(H2 Building Blocks) $(P The table below gives a synopsis of predefined allocator building blocks, with their respective modules. Either `import` the needed modules individually, or `import` `stdx.building_blocks`, which imports them all `public`ly. The building blocks can be assembled in unbounded ways and also combined with your own. For a collection of typical and useful preassembled allocators and for inspiration in defining more such assemblies, refer to $(MREF std,experimental,allocator,showcase).) $(BOOKTABLE, $(TR $(TH Allocator$(BR)) $(TH Description)) $(TR $(TDC2 NullAllocator, null_allocator) $(TD Very good at doing absolutely nothing. A good starting point for defining other allocators or for studying the API.)) $(TR $(TDC3 GCAllocator, gc_allocator) $(TD The system-provided garbage-collector allocator. This should be the default fallback allocator tapping into system memory. It offers manual $(D free) and dutifully collects litter.)) $(TR $(TDC3 Mallocator, mallocator) $(TD The C heap _allocator, a.k.a. $(D malloc)/$(D realloc)/$(D free). Use sparingly and only for code that is unlikely to leak.)) $(TR $(TDC3 AlignedMallocator, mallocator) $(TD Interface to OS-specific _allocators that support specifying alignment: $(HTTP man7.org/linux/man-pages/man3/posix_memalign.3.html, $(D posix_memalign)) on Posix and $(HTTP msdn.microsoft.com/en-us/library/fs9stz4e(v=vs.80).aspx, $(D __aligned_xxx)) on Windows.)) $(TR $(TDC2 AffixAllocator, affix_allocator) $(TD Allocator that allows and manages allocating extra prefix and/or a suffix bytes for each block allocated.)) $(TR $(TDC2 BitmappedBlock, bitmapped_block) $(TD Organizes one contiguous chunk of memory in equal-size blocks and tracks allocation status at the cost of one bit per block.)) $(TR $(TDC2 FallbackAllocator, fallback_allocator) $(TD Allocator that combines two other allocators - primary and fallback. Allocation requests are first tried with primary, and upon failure are passed to the fallback. Useful for small and fast allocators fronting general-purpose ones.)) $(TR $(TDC2 FreeList, free_list) $(TD Allocator that implements a $(HTTP wikipedia.org/wiki/Free_list, free list) on top of any other allocator. The preferred size, tolerance, and maximum elements are configurable at compile- and run time.)) $(TR $(TDC2 SharedFreeList, free_list) $(TD Same features as $(D FreeList), but packaged as a $(D shared) structure that is accessible to several threads.)) $(TR $(TDC2 FreeTree, free_tree) $(TD Allocator similar to $(D FreeList) that uses a binary search tree to adaptively store not one, but many free lists.)) $(TR $(TDC2 Region, region) $(TD Region allocator organizes a chunk of memory as a simple bump-the-pointer allocator.)) $(TR $(TDC2 InSituRegion, region) $(TD Region holding its own allocation, most often on the stack. Has statically-determined size.)) $(TR $(TDC2 SbrkRegion, region) $(TD Region using $(D $(LINK2 https://en.wikipedia.org/wiki/Sbrk, sbrk)) for allocating memory.)) $(TR $(TDC3 MmapAllocator, mmap_allocator) $(TD Allocator using $(D $(LINK2 https://en.wikipedia.org/wiki/Mmap, mmap)) directly.)) $(TR $(TDC2 StatsCollector, stats_collector) $(TD Collect statistics about any other allocator.)) $(TR $(TDC2 Quantizer, quantizer) $(TD Allocates in coarse-grained quantas, thus improving performance of reallocations by often reallocating in place. The drawback is higher memory consumption because of allocated and unused memory.)) $(TR $(TDC2 AllocatorList, allocator_list) $(TD Given an allocator factory, lazily creates as many allocators as needed to satisfy allocation requests. The allocators are stored in a linked list. Requests for allocation are satisfied by searching the list in a linear manner.)) $(TR $(TDC2 Segregator, segregator) $(TD Segregates allocation requests by size and dispatches them to distinct allocators.)) $(TR $(TDC2 Bucketizer, bucketizer) $(TD Divides allocation sizes in discrete buckets and uses an array of allocators, one per bucket, to satisfy requests.)) $(COMMENT $(TR $(TDC2 InternalPointersTree) $(TD Adds support for resolving internal pointers on top of another allocator.))) ) Macros: MYREF2 = $(REF_SHORT $1, std,experimental,allocator,building_blocks,$2) MYREF3 = $(REF_SHORT $1, std,experimental,allocator,$2) TDC = $(TDNW $(D $1)$+) TDC2 = $(TDNW $(D $(MYREF2 $1,$+))$(BR)$(SMALL $(D stdx.allocator.building_blocks.$2))) TDC3 = $(TDNW $(D $(MYREF3 $1,$+))$(BR)$(SMALL $(D stdx.allocator.$2))) RES = $(I result) POST = $(BR)$(SMALL $(I Post:) $(BLUE $(D $0))) */ module stdx.allocator.building_blocks; public import stdx.allocator.building_blocks.affix_allocator, stdx.allocator.building_blocks.allocator_list, stdx.allocator.building_blocks.bucketizer, stdx.allocator.building_blocks.fallback_allocator, stdx.allocator.building_blocks.free_list, stdx.allocator.building_blocks.free_tree, stdx.allocator.gc_allocator, stdx.allocator.building_blocks.bitmapped_block, stdx.allocator.building_blocks.kernighan_ritchie, stdx.allocator.mallocator, stdx.allocator.mmap_allocator, stdx.allocator.building_blocks.null_allocator, stdx.allocator.building_blocks.quantizer, stdx.allocator.building_blocks.region, stdx.allocator.building_blocks.segregator, stdx.allocator.building_blocks.stats_collector; stdx-allocator-3.1.0~beta.2/source/stdx/allocator/building_blocks/quantizer.d 0000664 0000000 0000000 00000017710 13535263154 0027445 0 ustar 00root root 0000000 0000000 /// module stdx.allocator.building_blocks.quantizer; import stdx.allocator.common; /** This allocator sits on top of $(D ParentAllocator) and quantizes allocation sizes, usually from arbitrary positive numbers to a small set of round numbers (e.g. powers of two, page sizes etc). This technique is commonly used to: $(UL $(LI Preallocate more memory than requested such that later on, when reallocation is needed (e.g. to grow an array), expansion can be done quickly in place. Reallocation to smaller sizes is also fast (in-place) when the new size requested is within the same quantum as the existing size. Code that's reallocation-heavy can therefore benefit from fronting a generic allocator with a $(D Quantizer). These advantages are present even if $(D ParentAllocator) does not support reallocation at all.) $(LI Improve behavior of allocators sensitive to allocation sizes, such as $(D FreeList) and $(D FreeTree). Rounding allocation requests up makes for smaller free lists/trees at the cost of slack memory (internal fragmentation).) ) The following methods are forwarded to the parent allocator if present: $(D allocateAll), $(D owns), $(D deallocateAll), $(D empty). Preconditions: $(D roundingFunction) must satisfy three constraints. These are not enforced (save for the use of $(D assert)) for the sake of efficiency. $(OL $(LI $(D roundingFunction(n) >= n) for all $(D n) of type $(D size_t);) $(LI $(D roundingFunction) must be monotonically increasing, i.e. $(D roundingFunction(n1) <= roundingFunction(n2)) for all $(D n1 < n2);) $(LI $(D roundingFunction) must be $(D pure), i.e. always return the same value for a given $(D n).) ) */ struct Quantizer(ParentAllocator, alias roundingFunction) { /** The parent allocator. Depending on whether $(D ParentAllocator) holds state or not, this is a member variable or an alias for `ParentAllocator.instance`. */ static if (stateSize!ParentAllocator) { ParentAllocator parent; } else { alias parent = ParentAllocator.instance; enum Quantizer instance = Quantizer(); } /** Returns $(D roundingFunction(n)). */ size_t goodAllocSize(size_t n) { auto result = roundingFunction(n); assert(result >= n); return result; } /** Alignment is identical to that of the parent. */ enum alignment = ParentAllocator.alignment; /** Gets a larger buffer $(D buf) by calling $(D parent.allocate(goodAllocSize(n))). If $(D buf) is $(D null), returns $(D null). Otherwise, returns $(D buf[0 .. n]). */ void[] allocate(size_t n) { auto result = parent.allocate(goodAllocSize(n)); return result.ptr ? result.ptr[0 .. n] : null; } /** Defined only if $(D parent.alignedAllocate) exists and works similarly to $(D allocate) by forwarding to $(D parent.alignedAllocate(goodAllocSize(n), a)). */ static if (__traits(hasMember, ParentAllocator, "alignedAllocate")) void[] alignedAllocate(size_t n, uint) { auto result = parent.alignedAllocate(goodAllocSize(n)); return result.ptr ? result.ptr[0 .. n] : null; } /** First checks whether there's enough slack memory preallocated for $(D b) by evaluating $(D b.length + delta <= goodAllocSize(b.length)). If that's the case, expands $(D b) in place. Otherwise, attempts to use $(D parent.expand) appropriately if present. */ bool expand(ref void[] b, size_t delta) { if (!b.ptr) return delta == 0; immutable allocated = goodAllocSize(b.length), needed = b.length + delta, neededAllocation = goodAllocSize(needed); assert(b.length <= allocated); assert(needed <= neededAllocation); assert(allocated <= neededAllocation); // Second test needed because expand must work for null pointers, too. if (allocated == neededAllocation) { // Nice! b = b.ptr[0 .. needed]; return true; } // Hail Mary static if (__traits(hasMember, ParentAllocator, "expand")) { // Expand to the appropriate quantum auto original = b.ptr[0 .. allocated]; assert(goodAllocSize(needed) >= allocated); if (!parent.expand(original, neededAllocation - allocated)) return false; // Dial back the size b = original.ptr[0 .. needed]; return true; } else { return false; } } /** Expands or shrinks allocated block to an allocated size of $(D goodAllocSize(s)). Expansion occurs in place under the conditions required by $(D expand). Shrinking occurs in place if $(D goodAllocSize(b.length) == goodAllocSize(s)). */ bool reallocate(ref void[] b, size_t s) { if (!b.ptr) { b = allocate(s); return b.length == s; } if (s >= b.length && expand(b, s - b.length)) return true; immutable toAllocate = goodAllocSize(s), allocated = goodAllocSize(b.length); // Are the lengths within the same quantum? if (allocated == toAllocate) { // Reallocation (whether up or down) will be done in place b = b.ptr[0 .. s]; return true; } // Defer to parent (or global) with quantized size auto original = b.ptr[0 .. allocated]; if (!parent.reallocate(original, toAllocate)) return false; b = original.ptr[0 .. s]; return true; } /** Defined only if $(D ParentAllocator.alignedAllocate) exists. Expansion occurs in place under the conditions required by $(D expand). Shrinking occurs in place if $(D goodAllocSize(b.length) == goodAllocSize(s)). */ static if (__traits(hasMember, ParentAllocator, "alignedAllocate")) bool alignedReallocate(ref void[] b, size_t s, uint a) { if (!b.ptr) { b = alignedAllocate(s); return b.length == s; } if (s >= b.length && expand(b, s - b.length)) return true; immutable toAllocate = goodAllocSize(s), allocated = goodAllocSize(b.length); // Are the lengths within the same quantum? if (allocated == toAllocate) { assert(b.ptr); // code above must have caught this // Reallocation (whether up or down) will be done in place b = b.ptr[0 .. s]; return true; } // Defer to parent (or global) with quantized size auto original = b.ptr[0 .. allocated]; if (!parent.alignedReallocate(original, toAllocate, a)) return false; b = original.ptr[0 .. s]; return true; } /** Defined if $(D ParentAllocator.deallocate) exists and forwards to $(D parent.deallocate(b.ptr[0 .. goodAllocSize(b.length)])). */ static if (__traits(hasMember, ParentAllocator, "deallocate")) bool deallocate(void[] b) { if (!b.ptr) return true; return parent.deallocate(b.ptr[0 .. goodAllocSize(b.length)]); } // Forwarding methods mixin(forwardToMember("parent", "allocateAll", "owns", "deallocateAll", "empty")); } /// @system unittest { import stdx.allocator.building_blocks.free_tree : FreeTree; import stdx.allocator.gc_allocator : GCAllocator; size_t roundUpToMultipleOf(size_t s, uint base) { auto rem = s % base; return rem ? s + base - rem : s; } // Quantize small allocations to a multiple of cache line, large ones to a // multiple of page size alias MyAlloc = Quantizer!( FreeTree!GCAllocator, n => roundUpToMultipleOf(n, n <= 16_384 ? 64 : 4096)); MyAlloc alloc; const buf = alloc.allocate(256); assert(buf.ptr); } @system unittest { import stdx.allocator.gc_allocator : GCAllocator; alias MyAlloc = Quantizer!(GCAllocator, (size_t n) => n.roundUpToMultipleOf(64)); testAllocator!(() => MyAlloc()); } stdx-allocator-3.1.0~beta.2/source/stdx/allocator/building_blocks/region.d 0000664 0000000 0000000 00000060403 13535263154 0026703 0 ustar 00root root 0000000 0000000 /// module stdx.allocator.building_blocks.region; import stdx.allocator.building_blocks.null_allocator; import stdx.allocator.common; import std.typecons : Flag, Yes, No; /** A $(D Region) allocator allocates memory straight from one contiguous chunk. There is no deallocation, and once the region is full, allocation requests return $(D null). Therefore, $(D Region)s are often used (a) in conjunction with more sophisticated allocators; or (b) for batch-style very fast allocations that deallocate everything at once. The region only stores three pointers, corresponding to the current position in the store and the limits. One allocation entails rounding up the allocation size for alignment purposes, bumping the current pointer, and comparing it against the limit. If $(D ParentAllocator) is different from $(D NullAllocator), $(D Region) deallocates the chunk of memory during destruction. The $(D minAlign) parameter establishes alignment. If $(D minAlign > 1), the sizes of all allocation requests are rounded up to a multiple of $(D minAlign). Applications aiming at maximum speed may want to choose $(D minAlign = 1) and control alignment externally. */ struct Region(ParentAllocator = NullAllocator, uint minAlign = platformAlignment, Flag!"growDownwards" growDownwards = No.growDownwards) { static assert(minAlign.isGoodStaticAlignment); static assert(ParentAllocator.alignment >= minAlign); import stdx.allocator.internal : Ternary; // state /** The _parent allocator. Depending on whether $(D ParentAllocator) holds state or not, this is a member variable or an alias for `ParentAllocator.instance`. */ static if (stateSize!ParentAllocator) { ParentAllocator parent; } else { alias parent = ParentAllocator.instance; } private void* _current, _begin, _end; /** Constructs a region backed by a user-provided store. Assumes $(D store) is aligned at $(D minAlign). Also assumes the memory was allocated with $(D ParentAllocator) (if different from $(D NullAllocator)). Params: store = User-provided store backing up the region. $(D store) must be aligned at $(D minAlign) (enforced with $(D assert)). If $(D ParentAllocator) is different from $(D NullAllocator), memory is assumed to have been allocated with $(D ParentAllocator). n = Bytes to allocate using $(D ParentAllocator). This constructor is only defined If $(D ParentAllocator) is different from $(D NullAllocator). If $(D parent.allocate(n)) returns $(D null), the region will be initialized as empty (correctly initialized but unable to allocate). */ this(ubyte[] store) { store = cast(ubyte[])(store.roundUpToAlignment(alignment)); store = store[0 .. $.roundDownToAlignment(alignment)]; assert(store.ptr.alignedAt(minAlign)); assert(store.length % minAlign == 0); _begin = store.ptr; _end = store.ptr + store.length; static if (growDownwards) _current = _end; else _current = store.ptr; } /// Ditto static if (!is(ParentAllocator == NullAllocator)) this(size_t n) { this(cast(ubyte[])(parent.allocate(n.roundUpToAlignment(alignment)))); } /* TODO: The postblit of $(D BasicRegion) should be disabled because such objects should not be copied around naively. */ /** If `ParentAllocator` is not `NullAllocator` and defines `deallocate`, the region defines a destructor that uses `ParentAllocator.delete` to free the memory chunk. */ static if (!is(ParentAllocator == NullAllocator) && __traits(hasMember, ParentAllocator, "deallocate")) ~this() { parent.deallocate(_begin[0 .. _end - _begin]); } /** Alignment offered. */ alias alignment = minAlign; /** Allocates $(D n) bytes of memory. The shortest path involves an alignment adjustment (if $(D alignment > 1)), an increment, and a comparison. Params: n = number of bytes to allocate Returns: A properly-aligned buffer of size $(D n) or $(D null) if request could not be satisfied. */ void[] allocate(size_t n) { static if (growDownwards) { if (available < n) return null; static if (minAlign > 1) const rounded = n.roundUpToAlignment(alignment); else alias rounded = n; assert(available >= rounded); auto result = (_current - rounded)[0 .. n]; assert(result.ptr >= _begin); _current = result.ptr; assert(owns(result) == Ternary.yes); return result; } else { auto result = _current[0 .. n]; static if (minAlign > 1) const rounded = n.roundUpToAlignment(alignment); else alias rounded = n; _current += rounded; if (_current <= _end) return result; // Slow path, backtrack _current -= rounded; return null; } } /** Allocates $(D n) bytes of memory aligned at alignment $(D a). Params: n = number of bytes to allocate a = alignment for the allocated block Returns: Either a suitable block of $(D n) bytes aligned at $(D a), or $(D null). */ void[] alignedAllocate(size_t n, uint a) { import stdx.allocator.internal : isPowerOf2; assert(a.isPowerOf2); static if (growDownwards) { const available = _current - _begin; if (available < n) return null; auto result = (_current - n).alignDownTo(a)[0 .. n]; if (result.ptr >= _begin) { _current = result.ptr; return result; } } else { // Just bump the pointer to the next good allocation auto save = _current; _current = _current.alignUpTo(a); auto result = allocate(n); if (result.ptr) { assert(result.length == n); return result; } // Failed, rollback _current = save; } return null; } /// Allocates and returns all memory available to this region. void[] allocateAll() { static if (growDownwards) { auto result = _begin[0 .. available]; _current = _begin; } else { auto result = _current[0 .. available]; _current = _end; } return result; } /** Expands an allocated block in place. Expansion will succeed only if the block is the last allocated. Defined only if `growDownwards` is `No.growDownwards`. */ static if (growDownwards == No.growDownwards) bool expand(ref void[] b, size_t delta) { assert(owns(b) == Ternary.yes || b.ptr is null); assert(b.ptr + b.length <= _current || b.ptr is null); if (!b.ptr) return delta == 0; auto newLength = b.length + delta; if (_current < b.ptr + b.length + alignment) { // This was the last allocation! Allocate some more and we're done. if (this.goodAllocSize(b.length) == this.goodAllocSize(newLength) || allocate(delta).length == delta) { b = b.ptr[0 .. newLength]; assert(_current < b.ptr + b.length + alignment); return true; } } return false; } /** Deallocates $(D b). This works only if $(D b) was obtained as the last call to $(D allocate); otherwise (i.e. another allocation has occurred since) it does nothing. This semantics is tricky and therefore $(D deallocate) is defined only if $(D Region) is instantiated with $(D Yes.defineDeallocate) as the third template argument. Params: b = Block previously obtained by a call to $(D allocate) against this allocator ($(D null) is allowed). */ bool deallocate(void[] b) { assert(owns(b) == Ternary.yes || b.ptr is null); static if (growDownwards) { if (b.ptr == _current) { _current += this.goodAllocSize(b.length); return true; } } else { if (b.ptr + this.goodAllocSize(b.length) == _current) { assert(b.ptr !is null || _current is null); _current = b.ptr; return true; } } return false; } /** Deallocates all memory allocated by this region, which can be subsequently reused for new allocations. */ bool deallocateAll() { static if (growDownwards) { _current = _end; } else { _current = _begin; } return true; } /** Queries whether $(D b) has been allocated with this region. Params: b = Arbitrary block of memory ($(D null) is allowed; $(D owns(null)) returns $(D false)). Returns: $(D true) if $(D b) has been allocated with this region, $(D false) otherwise. */ Ternary owns(void[] b) const { return Ternary(b.ptr >= _begin && b.ptr + b.length <= _end); } /** Returns `Ternary.yes` if no memory has been allocated in this region, `Ternary.no` otherwise. (Never returns `Ternary.unknown`.) */ Ternary empty() const { return Ternary(_current == _begin); } /// Nonstandard property that returns bytes available for allocation. size_t available() const { static if (growDownwards) { return _current - _begin; } else { return _end - _current; } } } /// @system unittest { import mir.utility : max; import stdx.allocator.building_blocks.allocator_list : AllocatorList; import stdx.allocator.mallocator : Mallocator; // Create a scalable list of regions. Each gets at least 1MB at a time by // using malloc. auto batchAllocator = AllocatorList!( (size_t n) => Region!Mallocator(max(n, 1024u * 1024)) )(); auto b = batchAllocator.allocate(101); assert(b.length == 101); // This will cause a second allocation b = batchAllocator.allocate(2 * 1024 * 1024); assert(b.length == 2 * 1024 * 1024); // Destructor will free the memory } @system unittest { import stdx.allocator.mallocator : Mallocator; // Create a 64 KB region allocated with malloc auto reg = Region!(Mallocator, Mallocator.alignment, Yes.growDownwards)(1024 * 64); const b = reg.allocate(101); assert(b.length == 101); // Destructor will free the memory } /** $(D InSituRegion) is a convenient region that carries its storage within itself (in the form of a statically-sized array). The first template argument is the size of the region and the second is the needed alignment. Depending on the alignment requested and platform details, the actual available storage may be smaller than the compile-time parameter. To make sure that at least $(D n) bytes are available in the region, use $(D InSituRegion!(n + a - 1, a)). Given that the most frequent use of `InSituRegion` is as a stack allocator, it allocates starting at the end on systems where stack grows downwards, such that hot memory is used first. */ struct InSituRegion(size_t size, size_t minAlign = platformAlignment) { import mir.utility : max; import stdx.allocator.internal : Ternary; static assert(minAlign.isGoodStaticAlignment); static assert(size >= minAlign); version (X86) enum growDownwards = Yes.growDownwards; else version (X86_64) enum growDownwards = Yes.growDownwards; else version (ARM) enum growDownwards = Yes.growDownwards; else version (AArch64) enum growDownwards = Yes.growDownwards; else version (PPC) enum growDownwards = Yes.growDownwards; else version (PPC64) enum growDownwards = Yes.growDownwards; else version (MIPS32) enum growDownwards = Yes.growDownwards; else version (MIPS64) enum growDownwards = Yes.growDownwards; else version (SPARC) enum growDownwards = Yes.growDownwards; else version (SystemZ) enum growDownwards = Yes.growDownwards; else version (WebAssembly) enum growDownwards = Yes.growDownwards; else static assert(0, "Dunno how the stack grows on this architecture."); @disable this(this); // state { private Region!(NullAllocator, minAlign, growDownwards) _impl; union { private ubyte[size] _store = void; private double _forAlignmentOnly1 = void; } // } /** An alias for $(D minAlign), which must be a valid alignment (nonzero power of 2). The start of the region and all allocation requests will be rounded up to a multiple of the alignment. ---- InSituRegion!(4096) a1; assert(a1.alignment == platformAlignment); InSituRegion!(4096, 64) a2; assert(a2.alignment == 64); ---- */ alias alignment = minAlign; private void lazyInit() { assert(!_impl._current); _impl = typeof(_impl)(_store); assert(_impl._current.alignedAt(alignment)); } /** Allocates $(D bytes) and returns them, or $(D null) if the region cannot accommodate the request. For efficiency reasons, if $(D bytes == 0) the function returns an empty non-null slice. */ void[] allocate(size_t n) { // Fast path entry: auto result = _impl.allocate(n); if (result.length == n) return result; // Slow path if (_impl._current) return null; // no more room lazyInit; assert(_impl._current); goto entry; } /** As above, but the memory allocated is aligned at $(D a) bytes. */ void[] alignedAllocate(size_t n, uint a) { // Fast path entry: auto result = _impl.alignedAllocate(n, a); if (result.length == n) return result; // Slow path if (_impl._current) return null; // no more room lazyInit; assert(_impl._current); goto entry; } /** Deallocates $(D b). This works only if $(D b) was obtained as the last call to $(D allocate); otherwise (i.e. another allocation has occurred since) it does nothing. This semantics is tricky and therefore $(D deallocate) is defined only if $(D Region) is instantiated with $(D Yes.defineDeallocate) as the third template argument. Params: b = Block previously obtained by a call to $(D allocate) against this allocator ($(D null) is allowed). */ bool deallocate(void[] b) { if (!_impl._current) return b is null; return _impl.deallocate(b); } /** Returns `Ternary.yes` if `b` is the result of a previous allocation, `Ternary.no` otherwise. */ Ternary owns(void[] b) { if (!_impl._current) return Ternary.no; return _impl.owns(b); } /** Expands an allocated block in place. Expansion will succeed only if the block is the last allocated. */ static if (__traits(hasMember, typeof(_impl), "expand")) bool expand(ref void[] b, size_t delta) { if (!_impl._current) lazyInit; return _impl.expand(b, delta); } /** Deallocates all memory allocated with this allocator. */ bool deallocateAll() { // We don't care to lazily init the region return _impl.deallocateAll; } /** Allocates all memory available with this allocator. */ void[] allocateAll() { if (!_impl._current) lazyInit; return _impl.allocateAll; } /** Nonstandard function that returns the bytes available for allocation. */ size_t available() { if (!_impl._current) lazyInit; return _impl.available; } } /// @system unittest { // 128KB region, allocated to x86's cache line InSituRegion!(128 * 1024, 16) r1; auto a1 = r1.allocate(101); assert(a1.length == 101); // 128KB region, with fallback to the garbage collector. import stdx.allocator.building_blocks.fallback_allocator : FallbackAllocator; import stdx.allocator.building_blocks.free_list : FreeList; import stdx.allocator.building_blocks.bitmapped_block : BitmappedBlock; import stdx.allocator.gc_allocator : GCAllocator; FallbackAllocator!(InSituRegion!(128 * 1024), GCAllocator) r2; const a2 = r2.allocate(102); assert(a2.length == 102); // Reap with GC fallback. InSituRegion!(128 * 1024, 8) tmp3; FallbackAllocator!(BitmappedBlock!(64, 8), GCAllocator) r3; r3.primary = BitmappedBlock!(64, 8)(cast(ubyte[])(tmp3.allocateAll())); const a3 = r3.allocate(103); assert(a3.length == 103); // Reap/GC with a freelist for small objects up to 16 bytes. InSituRegion!(128 * 1024, 64) tmp4; FreeList!(FallbackAllocator!(BitmappedBlock!(64, 64), GCAllocator), 0, 16) r4; r4.parent.primary = BitmappedBlock!(64, 64)(cast(ubyte[])(tmp4.allocateAll())); const a4 = r4.allocate(104); assert(a4.length == 104); } @system unittest { InSituRegion!(4096, 1) r1; auto a = r1.allocate(2001); assert(a.length == 2001); import std.conv : to; assert(r1.available == 2095, r1.available.to!string); InSituRegion!(65_536, 1024*4) r2; assert(r2.available <= 65_536); a = r2.allocate(2001); assert(a.length == 2001); } private extern(C) void* sbrk(long); private extern(C) int brk(shared void*); /** Allocator backed by $(D $(LINK2 https://en.wikipedia.org/wiki/Sbrk, sbrk)) for Posix systems. Due to the fact that $(D sbrk) is not thread-safe $(HTTP lifecs.likai.org/2010/02/sbrk-is-not-thread-safe.html, by design), $(D SbrkRegion) uses a mutex internally. This implies that uncontrolled calls to $(D brk) and $(D sbrk) may affect the workings of $(D SbrkRegion) adversely. */ version(Posix) struct SbrkRegion(uint minAlign = platformAlignment) { import core.sys.posix.pthread : pthread_mutex_init, pthread_mutex_destroy, pthread_mutex_t, pthread_mutex_lock, pthread_mutex_unlock, PTHREAD_MUTEX_INITIALIZER; private static shared pthread_mutex_t sbrkMutex = PTHREAD_MUTEX_INITIALIZER; import stdx.allocator.internal : Ternary; static assert(minAlign.isGoodStaticAlignment); static assert(size_t.sizeof == (void*).sizeof); private static shared void* _brkInitial, _brkCurrent; /** Instance shared by all callers. */ enum SbrkRegion instance = SbrkRegion(); /** Standard allocator primitives. */ enum uint alignment = minAlign; /// Ditto static void[] allocate(size_t bytes) { static if (minAlign > 1) const rounded = bytes.roundUpToMultipleOf(alignment); else alias rounded = bytes; pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0); scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0); // Assume sbrk returns the old break. Most online documentation confirms // that, except for http://www.inf.udec.cl/~leo/Malloc_tutorial.pdf, // which claims the returned value is not portable. auto p = sbrk(rounded); if (p == cast(void*) -1) { return null; } if (!_brkInitial) { _brkInitial = cast(shared) p; assert(cast(size_t) _brkInitial % minAlign == 0, "Too large alignment chosen for " ~ typeof(this).stringof); } _brkCurrent = cast(shared) (p + rounded); return p[0 .. bytes]; } /// Ditto static void[] alignedAllocate(size_t bytes, uint a) { pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0); scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0); if (!_brkInitial) { // This is one extra call, but it'll happen only once. _brkInitial = cast(shared) sbrk(0); assert(cast(size_t) _brkInitial % minAlign == 0, "Too large alignment chosen for " ~ typeof(this).stringof); (_brkInitial != cast(void*) -1) || assert(0); _brkCurrent = _brkInitial; } immutable size_t delta = cast(shared void*) roundUpToMultipleOf( cast(size_t) _brkCurrent, a) - _brkCurrent; // Still must make sure the total size is aligned to the allocator's // alignment. immutable rounded = (bytes + delta).roundUpToMultipleOf(alignment); auto p = sbrk(rounded); if (p == cast(void*) -1) { return null; } _brkCurrent = cast(shared) (p + rounded); return p[delta .. delta + bytes]; } /** The $(D expand) method may only succeed if the argument is the last block allocated. In that case, $(D expand) attempts to push the break pointer to the right. */ static bool expand(ref void[] b, size_t delta) { if (b is null) return delta == 0; assert(_brkInitial && _brkCurrent); // otherwise where did b come from? pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0); scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0); if (_brkCurrent != b.ptr + b.length) return false; // Great, can expand the last block static if (minAlign > 1) const rounded = delta.roundUpToMultipleOf(alignment); else alias rounded = bytes; auto p = sbrk(rounded); if (p == cast(void*) -1) { return false; } _brkCurrent = cast(shared) (p + rounded); b = b.ptr[0 .. b.length + delta]; return true; } /// Ditto static Ternary owns(void[] b) { // No need to lock here. assert(!_brkCurrent || b.ptr + b.length <= _brkCurrent); return Ternary(_brkInitial && b.ptr >= _brkInitial); } /** The $(D deallocate) method only works (and returns $(D true)) on systems that support reducing the break address (i.e. accept calls to $(D sbrk) with negative offsets). OSX does not accept such. In addition the argument must be the last block allocated. */ static bool deallocate(void[] b) { static if (minAlign > 1) const rounded = b.length.roundUpToMultipleOf(alignment); else const rounded = b.length; pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0); scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0); if (_brkCurrent != b.ptr + rounded) return false; assert(b.ptr >= _brkInitial); if (sbrk(-rounded) == cast(void*) -1) return false; _brkCurrent = cast(shared) b.ptr; return true; } /** The $(D deallocateAll) method only works (and returns $(D true)) on systems that support reducing the break address (i.e. accept calls to $(D sbrk) with negative offsets). OSX does not accept such. */ static bool deallocateAll() { pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0); scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0); return !_brkInitial || brk(_brkInitial) == 0; } /// Standard allocator API. Ternary empty() { // Also works when they're both null. return Ternary(_brkCurrent == _brkInitial); } } version(Posix) @system unittest { // Let's test the assumption that sbrk(n) returns the old address const p1 = sbrk(0); const p2 = sbrk(4096); assert(p1 == p2); const p3 = sbrk(0); assert(p3 == p2 + 4096); // Try to reset brk, but don't make a fuss if it doesn't work sbrk(-4096); } version(Posix) @system unittest { import stdx.allocator.internal : Ternary; alias alloc = SbrkRegion!(8).instance; auto a = alloc.alignedAllocate(2001, 4096); assert(a.length == 2001); auto b = alloc.allocate(2001); assert(b.length == 2001); assert(alloc.owns(a) == Ternary.yes); assert(alloc.owns(b) == Ternary.yes); // reducing the brk does not work on OSX version(OSX) {} else { assert(alloc.deallocate(b)); assert(alloc.deallocateAll); } } stdx-allocator-3.1.0~beta.2/source/stdx/allocator/building_blocks/scoped_allocator.d 0000664 0000000 0000000 00000012460 13535263154 0030735 0 ustar 00root root 0000000 0000000 /// module stdx.allocator.building_blocks.scoped_allocator; import stdx.allocator.common; /** $(D ScopedAllocator) delegates all allocation requests to $(D ParentAllocator). When destroyed, the $(D ScopedAllocator) object automatically calls $(D deallocate) for all memory allocated through its lifetime. (The $(D deallocateAll) function is also implemented with the same semantics.) $(D deallocate) is also supported, which is where most implementation effort and overhead of $(D ScopedAllocator) go. If $(D deallocate) is not needed, a simpler design combining $(D AllocatorList) with $(D Region) is recommended. */ struct ScopedAllocator(ParentAllocator) { @system unittest { testAllocator!(() => ScopedAllocator()); } import stdx.allocator.building_blocks.affix_allocator : AffixAllocator; import stdx.allocator.internal : Ternary; private struct Node { Node* prev; Node* next; size_t length; } alias Allocator = AffixAllocator!(ParentAllocator, Node); // state /** If $(D ParentAllocator) is stateful, $(D parent) is a property giving access to an $(D AffixAllocator!ParentAllocator). Otherwise, $(D parent) is an alias for `AffixAllocator!ParentAllocator.instance`. */ static if (stateSize!ParentAllocator) { Allocator parent; } else { alias parent = Allocator.instance; } private Node* root; /** $(D ScopedAllocator) is not copyable. */ @disable this(this); /** $(D ScopedAllocator)'s destructor releases all memory allocated during its lifetime. */ ~this() { deallocateAll; } /// Alignment offered enum alignment = Allocator.alignment; /** Forwards to $(D parent.goodAllocSize) (which accounts for the management overhead). */ size_t goodAllocSize(size_t n) { return parent.goodAllocSize(n); } /** Allocates memory. For management it actually allocates extra memory from the parent. */ void[] allocate(size_t n) { auto b = parent.allocate(n); if (!b.ptr) return b; Node* toInsert = & parent.prefix(b); toInsert.prev = null; toInsert.next = root; toInsert.length = n; assert(!root || !root.prev); if (root) root.prev = toInsert; root = toInsert; return b; } /** Forwards to $(D parent.expand(b, delta)). */ static if (__traits(hasMember, Allocator, "expand")) bool expand(ref void[] b, size_t delta) { auto result = parent.expand(b, delta); if (result && b.ptr) { parent.prefix(b).length = b.length; } return result; } /** Reallocates $(D b) to new size $(D s). */ bool reallocate(ref void[] b, size_t s) { // Remove from list if (b.ptr) { Node* n = & parent.prefix(b); if (n.prev) n.prev.next = n.next; else root = n.next; if (n.next) n.next.prev = n.prev; } auto result = parent.reallocate(b, s); // Add back to list if (b.ptr) { Node* n = & parent.prefix(b); n.prev = null; n.next = root; n.length = s; if (root) root.prev = n; root = n; } return result; } /** Forwards to $(D parent.owns(b)). */ static if (__traits(hasMember, Allocator, "owns")) Ternary owns(void[] b) { return parent.owns(b); } /** Deallocates $(D b). */ static if (__traits(hasMember, Allocator, "deallocate")) bool deallocate(void[] b) { // Remove from list if (b.ptr) { Node* n = & parent.prefix(b); if (n.prev) n.prev.next = n.next; else root = n.next; if (n.next) n.next.prev = n.prev; } return parent.deallocate(b); } /** Deallocates all memory allocated. */ bool deallocateAll() { bool result = true; for (auto n = root; n; ) { void* p = n + 1; auto length = n.length; n = n.next; if (!parent.deallocate(p[0 .. length])) result = false; } root = null; return result; } /** Returns `Ternary.yes` if this allocator is not responsible for any memory, `Ternary.no` otherwise. (Never returns `Ternary.unknown`.) */ Ternary empty() const { return Ternary(root is null); } } /// @system unittest { import stdx.allocator.mallocator : Mallocator; import stdx.allocator.internal : Ternary; ScopedAllocator!Mallocator alloc; assert(alloc.empty == Ternary.yes); const b = alloc.allocate(10); assert(b.length == 10); assert(alloc.empty == Ternary.no); } @system unittest { import stdx.allocator.gc_allocator : GCAllocator; testAllocator!(() => ScopedAllocator!GCAllocator()); } @system unittest // https://issues.dlang.org/show_bug.cgi?id=16046 { import stdx.allocator; import stdx.allocator.mallocator; ScopedAllocator!Mallocator alloc; auto foo = alloc.make!int(1); auto bar = alloc.make!int(2); assert(foo); assert(bar); alloc.dispose(foo); alloc.dispose(bar); // segfault here } stdx-allocator-3.1.0~beta.2/source/stdx/allocator/building_blocks/segregator.d 0000664 0000000 0000000 00000031603 13535263154 0027562 0 ustar 00root root 0000000 0000000 /// module stdx.allocator.building_blocks.segregator; import stdx.allocator.common; /** Dispatches allocations (and deallocations) between two allocators ($(D SmallAllocator) and $(D LargeAllocator)) depending on the size allocated, as follows. All allocations smaller than or equal to $(D threshold) will be dispatched to $(D SmallAllocator). The others will go to $(D LargeAllocator). If both allocators are $(D shared), the $(D Segregator) will also offer $(D shared) methods. */ struct Segregator(size_t threshold, SmallAllocator, LargeAllocator) { import mir.utility : min; import stdx.allocator.internal : Ternary; static if (stateSize!SmallAllocator) private SmallAllocator _small; else private alias _small = SmallAllocator.instance; static if (stateSize!LargeAllocator) private LargeAllocator _large; else private alias _large = LargeAllocator.instance; version (StdDdoc) { /** The alignment offered is the minimum of the two allocators' alignment. */ enum uint alignment; /** This method is defined only if at least one of the allocators defines it. The good allocation size is obtained from $(D SmallAllocator) if $(D s <= threshold), or $(D LargeAllocator) otherwise. (If one of the allocators does not define $(D goodAllocSize), the default implementation in this module applies.) */ static size_t goodAllocSize(size_t s); /** The memory is obtained from $(D SmallAllocator) if $(D s <= threshold), or $(D LargeAllocator) otherwise. */ void[] allocate(size_t); /** This method is defined if both allocators define it, and forwards to $(D SmallAllocator) or $(D LargeAllocator) appropriately. */ void[] alignedAllocate(size_t, uint); /** This method is defined only if at least one of the allocators defines it. If $(D SmallAllocator) defines $(D expand) and $(D b.length + delta <= threshold), the call is forwarded to $(D SmallAllocator). If $(D LargeAllocator) defines $(D expand) and $(D b.length > threshold), the call is forwarded to $(D LargeAllocator). Otherwise, the call returns $(D false). */ bool expand(ref void[] b, size_t delta); /** This method is defined only if at least one of the allocators defines it. If $(D SmallAllocator) defines $(D reallocate) and $(D b.length <= threshold && s <= threshold), the call is forwarded to $(D SmallAllocator). If $(D LargeAllocator) defines $(D expand) and $(D b.length > threshold && s > threshold), the call is forwarded to $(D LargeAllocator). Otherwise, the call returns $(D false). */ bool reallocate(ref void[] b, size_t s); /** This method is defined only if at least one of the allocators defines it, and work similarly to $(D reallocate). */ bool alignedReallocate(ref void[] b, size_t s); /** This method is defined only if both allocators define it. The call is forwarded to $(D SmallAllocator) if $(D b.length <= threshold), or $(D LargeAllocator) otherwise. */ Ternary owns(void[] b); /** This function is defined only if both allocators define it, and forwards appropriately depending on $(D b.length). */ bool deallocate(void[] b); /** This function is defined only if both allocators define it, and calls $(D deallocateAll) for them in turn. */ bool deallocateAll(); /** This function is defined only if both allocators define it, and returns the conjunction of $(D empty) calls for the two. */ Ternary empty(); } /** Composite allocators involving nested instantiations of $(D Segregator) make it difficult to access individual sub-allocators stored within. $(D allocatorForSize) simplifies the task by supplying the allocator nested inside a $(D Segregator) that is responsible for a specific size $(D s). Example: ---- alias A = Segregator!(300, Segregator!(200, A1, A2), A3); A a; static assert(typeof(a.allocatorForSize!10) == A1); static assert(typeof(a.allocatorForSize!250) == A2); static assert(typeof(a.allocatorForSize!301) == A3); ---- */ ref auto allocatorForSize(size_t s)() { static if (s <= threshold) static if (is(SmallAllocator == Segregator!(Args), Args...)) return _small.allocatorForSize!s; else return _small; else static if (is(LargeAllocator == Segregator!(Args), Args...)) return _large.allocatorForSize!s; else return _large; } enum uint alignment = min(SmallAllocator.alignment, LargeAllocator.alignment); private template Impl() { size_t goodAllocSize(size_t s) { return s <= threshold ? _small.goodAllocSize(s) : _large.goodAllocSize(s); } void[] allocate(size_t s) { return s <= threshold ? _small.allocate(s) : _large.allocate(s); } static if (__traits(hasMember, SmallAllocator, "alignedAllocate") && __traits(hasMember, LargeAllocator, "alignedAllocate")) void[] alignedAllocate(size_t s, uint a) { return s <= threshold ? _small.alignedAllocate(s, a) : _large.alignedAllocate(s, a); } static if (__traits(hasMember, SmallAllocator, "expand") || __traits(hasMember, LargeAllocator, "expand")) bool expand(ref void[] b, size_t delta) { if (!delta) return true; if (b.length + delta <= threshold) { // Old and new allocations handled by _small static if (__traits(hasMember, SmallAllocator, "expand")) return _small.expand(b, delta); else return false; } if (b.length > threshold) { // Old and new allocations handled by _large static if (__traits(hasMember, LargeAllocator, "expand")) return _large.expand(b, delta); else return false; } // Oops, cross-allocator transgression return false; } static if (__traits(hasMember, SmallAllocator, "reallocate") || __traits(hasMember, LargeAllocator, "reallocate")) bool reallocate(ref void[] b, size_t s) { static if (__traits(hasMember, SmallAllocator, "reallocate")) if (b.length <= threshold && s <= threshold) { // Old and new allocations handled by _small return _small.reallocate(b, s); } static if (__traits(hasMember, LargeAllocator, "reallocate")) if (b.length > threshold && s > threshold) { // Old and new allocations handled by _large return _large.reallocate(b, s); } // Cross-allocator transgression static if (!__traits(hasMember, typeof(this), "instance")) return .reallocate(this, b, s); else return .reallocate(instance, b, s); } static if (__traits(hasMember, SmallAllocator, "alignedReallocate") || __traits(hasMember, LargeAllocator, "alignedReallocate")) bool alignedReallocate(ref void[] b, size_t s) { static if (__traits(hasMember, SmallAllocator, "alignedReallocate")) if (b.length <= threshold && s <= threshold) { // Old and new allocations handled by _small return _small.alignedReallocate(b, s); } static if (__traits(hasMember, LargeAllocator, "alignedReallocate")) if (b.length > threshold && s > threshold) { // Old and new allocations handled by _large return _large.alignedReallocate(b, s); } // Cross-allocator transgression static if (!__traits(hasMember, typeof(this), "instance")) return .alignedReallocate(this, b, s); else return .alignedReallocate(instance, b, s); } static if (__traits(hasMember, SmallAllocator, "owns") && __traits(hasMember, LargeAllocator, "owns")) Ternary owns(void[] b) { return Ternary(b.length <= threshold ? _small.owns(b) : _large.owns(b)); } static if (__traits(hasMember, SmallAllocator, "deallocate") && __traits(hasMember, LargeAllocator, "deallocate")) bool deallocate(void[] data) { return data.length <= threshold ? _small.deallocate(data) : _large.deallocate(data); } static if (__traits(hasMember, SmallAllocator, "deallocateAll") && __traits(hasMember, LargeAllocator, "deallocateAll")) bool deallocateAll() { // Use & insted of && to evaluate both return _small.deallocateAll() & _large.deallocateAll(); } static if (__traits(hasMember, SmallAllocator, "empty") && __traits(hasMember, LargeAllocator, "empty")) Ternary empty() { return _small.empty & _large.empty; } static if (__traits(hasMember, SmallAllocator, "resolveInternalPointer") && __traits(hasMember, LargeAllocator, "resolveInternalPointer")) Ternary resolveInternalPointer(const void* p, ref void[] result) { Ternary r = _small.resolveInternalPointer(p, result); return r == Ternary.no ? _large.resolveInternalPointer(p, result) : r; } } private enum sharedMethods = !stateSize!SmallAllocator && !stateSize!LargeAllocator && is(typeof(SmallAllocator.instance) == shared) && is(typeof(LargeAllocator.instance) == shared); static if (sharedMethods) { // for backward compatability enum shared Segregator instance = Segregator(); static { mixin Impl!(); } } else { static if (!stateSize!SmallAllocator && !stateSize!LargeAllocator) { enum shared Segregator instance = Segregator(); static { mixin Impl!(); } } else { mixin Impl!(); } } } /// @system unittest { import stdx.allocator.building_blocks.free_list : FreeList; import stdx.allocator.gc_allocator : GCAllocator; import stdx.allocator.mallocator : Mallocator; alias A = Segregator!( 1024 * 4, Segregator!( 128, FreeList!(Mallocator, 0, 128), GCAllocator), Segregator!( 1024 * 1024, Mallocator, GCAllocator) ); A a; auto b = a.allocate(200); assert(b.length == 200); a.deallocate(b); } /** A $(D Segregator) with more than three arguments expands to a composition of elemental $(D Segregator)s, as illustrated by the following example: ---- alias A = Segregator!( n1, A1, n2, A2, n3, A3, A4 ); ---- With this definition, allocation requests for $(D n1) bytes or less are directed to $(D A1); requests between $(D n1 + 1) and $(D n2) bytes (inclusive) are directed to $(D A2); requests between $(D n2 + 1) and $(D n3) bytes (inclusive) are directed to $(D A3); and requests for more than $(D n3) bytes are directed to $(D A4). If some particular range should not be handled, $(D NullAllocator) may be used appropriately. */ template Segregator(Args...) if (Args.length > 3) { // Binary search private enum cutPoint = ((Args.length - 2) / 4) * 2; static if (cutPoint >= 2) { alias Segregator = .Segregator!( Args[cutPoint], .Segregator!(Args[0 .. cutPoint], Args[cutPoint + 1]), .Segregator!(Args[cutPoint + 2 .. $]) ); } else { // Favor small sizes alias Segregator = .Segregator!( Args[0], Args[1], .Segregator!(Args[2 .. $]) ); } } /// @system unittest { import stdx.allocator.building_blocks.free_list : FreeList; import stdx.allocator.gc_allocator : GCAllocator; import stdx.allocator.mallocator : Mallocator; alias A = Segregator!( 128, FreeList!(Mallocator, 0, 128), 1024 * 4, GCAllocator, 1024 * 1024, Mallocator, GCAllocator ); A a; auto b = a.allocate(201); assert(b.length == 201); a.deallocate(b); } stdx-allocator-3.1.0~beta.2/source/stdx/allocator/building_blocks/stats_collector.d 0000664 0000000 0000000 00000056744 13535263154 0030641 0 ustar 00root root 0000000 0000000 // Written in the D programming language. /** Allocator that collects useful statistics about allocations, both global and per calling point. The statistics collected can be configured statically by choosing combinations of `Options` appropriately. Example: ---- import stdx.allocator.gc_allocator : GCAllocator; import stdx.allocator.building_blocks.free_list : FreeList; alias Allocator = StatsCollector!(GCAllocator, Options.bytesUsed); ---- */ module stdx.allocator.building_blocks.stats_collector; import stdx.allocator.common; /** _Options for $(D StatsCollector) defined below. Each enables during compilation one specific counter, statistic, or other piece of information. */ enum Options : ulong { /** Counts the number of calls to $(D owns). */ numOwns = 1u << 0, /** Counts the number of calls to $(D allocate). All calls are counted, including requests for zero bytes or failed requests. */ numAllocate = 1u << 1, /** Counts the number of calls to $(D allocate) that succeeded, i.e. they returned a block as large as requested. (N.B. requests for zero bytes count as successful.) */ numAllocateOK = 1u << 2, /** Counts the number of calls to $(D expand), regardless of arguments or result. */ numExpand = 1u << 3, /** Counts the number of calls to $(D expand) that resulted in a successful expansion. */ numExpandOK = 1u << 4, /** Counts the number of calls to $(D reallocate), regardless of arguments or result. */ numReallocate = 1u << 5, /** Counts the number of calls to $(D reallocate) that succeeded. (Reallocations to zero bytes count as successful.) */ numReallocateOK = 1u << 6, /** Counts the number of calls to $(D reallocate) that resulted in an in-place reallocation (no memory moved). If this number is close to the total number of reallocations, that indicates the allocator finds room at the current block's end in a large fraction of the cases, but also that internal fragmentation may be high (the size of the unit of allocation is large compared to the typical allocation size of the application). */ numReallocateInPlace = 1u << 7, /** Counts the number of calls to $(D deallocate). */ numDeallocate = 1u << 8, /** Counts the number of calls to $(D deallocateAll). */ numDeallocateAll = 1u << 9, /** Chooses all $(D numXxx) flags. */ numAll = (1u << 10) - 1, /** Tracks bytes currently allocated by this allocator. This number goes up and down as memory is allocated and deallocated, and is zero if the allocator currently has no active allocation. */ bytesUsed = 1u << 10, /** Tracks total cumulative bytes allocated by means of $(D allocate), $(D expand), and $(D reallocate) (when resulting in an expansion). This number always grows and indicates allocation traffic. To compute bytes deallocated cumulatively, subtract $(D bytesUsed) from $(D bytesAllocated). */ bytesAllocated = 1u << 11, /** Tracks the sum of all $(D delta) values in calls of the form $(D expand(b, delta)) that succeed (return $(D true)). */ bytesExpanded = 1u << 12, /** Tracks the sum of all $(D b.length - s) with $(D b.length > s) in calls of the form $(D realloc(b, s)) that succeed (return $(D true)). In per-call statistics, also unambiguously counts the bytes deallocated with $(D deallocate). */ bytesContracted = 1u << 13, /** Tracks the sum of all bytes moved as a result of calls to $(D realloc) that were unable to reallocate in place. A large number (relative to $(D bytesAllocated)) indicates that the application should use larger preallocations. */ bytesMoved = 1u << 14, /** Tracks the sum of all bytes NOT moved as result of calls to $(D realloc) that managed to reallocate in place. A large number (relative to $(D bytesAllocated)) indicates that the application is expansion-intensive and is saving a good amount of moves. However, if this number is relatively small and $(D bytesSlack) is high, it means the application is overallocating for little benefit. */ bytesNotMoved = 1u << 15, /** Measures the sum of extra bytes allocated beyond the bytes requested, i.e. the $(HTTP goo.gl/YoKffF, internal fragmentation). This is the current effective number of slack bytes, and it goes up and down with time. */ bytesSlack = 1u << 16, /** Measures the maximum bytes allocated over the time. This is useful for dimensioning allocators. */ bytesHighTide = 1u << 17, /** Chooses all $(D byteXxx) flags. */ bytesAll = ((1u << 18) - 1) & ~numAll, /** Combines all flags above. */ all = (1u << 18) - 1 } /** Allocator that collects extra data about allocations. Since each piece of information adds size and time overhead, statistics can be individually enabled or disabled through compile-time $(D flags). All stats of the form $(D numXxx) record counts of events occurring, such as calls to functions and specific results. The stats of the form $(D bytesXxx) collect cumulative sizes. In addition, the data $(D callerSize), $(D callerModule), $(D callerFile), $(D callerLine), and $(D callerTime) is associated with each specific allocation. This data prefixes each allocation. */ struct StatsCollector(Allocator, ulong flags = Options.all, ulong perCallFlags = 0) { private: import stdx.allocator.internal : Ternary; enum define = (string type, string[] names...) { string result; foreach (v; names) result ~= "static if (flags & Options."~v~") {" ~ "private "~type~" _"~v~";" ~ "public const("~type~") "~v~"() const { return _"~v~"; }" ~ "}"; return result; }; void add(string counter)(sizediff_t n) { mixin("static if (flags & Options." ~ counter ~ ") _" ~ counter ~ " += n;"); static if (counter == "bytesUsed" && (flags & Options.bytesHighTide)) { if (bytesHighTide < bytesUsed ) _bytesHighTide = bytesUsed; } } void up(string counter)() { add!counter(1); } void down(string counter)() { add!counter(-1); } version (StdDdoc) { /** Read-only properties enabled by the homonym $(D flags) chosen by the user. Example: ---- StatsCollector!(Mallocator, Options.bytesUsed | Options.bytesAllocated) a; auto d1 = a.allocate(10); auto d2 = a.allocate(11); a.deallocate(d1); assert(a.bytesAllocated == 21); assert(a.bytesUsed == 11); a.deallocate(d2); assert(a.bytesAllocated == 21); assert(a.bytesUsed == 0); ---- */ @property ulong numOwns() const; /// Ditto @property ulong numAllocate() const; /// Ditto @property ulong numAllocateOK() const; /// Ditto @property ulong numExpand() const; /// Ditto @property ulong numExpandOK() const; /// Ditto @property ulong numReallocate() const; /// Ditto @property ulong numReallocateOK() const; /// Ditto @property ulong numReallocateInPlace() const; /// Ditto @property ulong numDeallocate() const; /// Ditto @property ulong numDeallocateAll() const; /// Ditto @property ulong bytesUsed() const; /// Ditto @property ulong bytesAllocated() const; /// Ditto @property ulong bytesExpanded() const; /// Ditto @property ulong bytesContracted() const; /// Ditto @property ulong bytesMoved() const; /// Ditto @property ulong bytesNotMoved() const; /// Ditto @property ulong bytesSlack() const; /// Ditto @property ulong bytesHighTide() const; } public: /** The parent allocator is publicly accessible either as a direct member if it holds state, or as an alias to `Allocator.instance` otherwise. One may use it for making calls that won't count toward statistics collection. */ static if (stateSize!Allocator) Allocator parent; else alias parent = Allocator.instance; private: // Per-allocator state mixin(define("ulong", "numOwns", "numAllocate", "numAllocateOK", "numExpand", "numExpandOK", "numReallocate", "numReallocateOK", "numReallocateInPlace", "numDeallocate", "numDeallocateAll", "bytesUsed", "bytesAllocated", "bytesExpanded", "bytesContracted", "bytesMoved", "bytesNotMoved", "bytesSlack", "bytesHighTide", )); public: /// Alignment offered is equal to $(D Allocator.alignment). alias alignment = Allocator.alignment; /** Increments $(D numOwns) (per instance and and per call) and forwards to $(D parent.owns(b)). */ static if (__traits(hasMember, Allocator, "owns")) { static if ((perCallFlags & Options.numOwns) == 0) Ternary owns(void[] b) { return ownsImpl(b); } else Ternary owns(string f = __FILE, uint n = line)(void[] b) { return ownsImpl!(f, n)(b); } } private Ternary ownsImpl(string f = null, uint n = 0)(void[] b) { up!"numOwns"; addPerCall!(f, n, "numOwns")(1); return parent.owns(b); } /** Forwards to $(D parent.allocate). Affects per instance: $(D numAllocate), $(D bytesUsed), $(D bytesAllocated), $(D bytesSlack), $(D numAllocateOK), and $(D bytesHighTide). Affects per call: $(D numAllocate), $(D numAllocateOK), and $(D bytesAllocated). */ static if (!(perCallFlags & (Options.numAllocate | Options.numAllocateOK | Options.bytesAllocated))) { void[] allocate(size_t n) { return allocateImpl(n); } } else { void[] allocate(string f = __FILE__, ulong n = __LINE__) (size_t bytes) { return allocateImpl!(f, n)(bytes); } } private void[] allocateImpl(string f = null, ulong n = 0)(size_t bytes) { auto result = parent.allocate(bytes); add!"bytesUsed"(result.length); add!"bytesAllocated"(result.length); immutable slack = this.goodAllocSize(result.length) - result.length; add!"bytesSlack"(slack); up!"numAllocate"; add!"numAllocateOK"(result.length == bytes); // allocating 0 bytes is OK addPerCall!(f, n, "numAllocate", "numAllocateOK", "bytesAllocated") (1, result.length == bytes, result.length); return result; } /** Defined whether or not $(D Allocator.expand) is defined. Affects per instance: $(D numExpand), $(D numExpandOK), $(D bytesExpanded), $(D bytesSlack), $(D bytesAllocated), and $(D bytesUsed). Affects per call: $(D numExpand), $(D numExpandOK), $(D bytesExpanded), and $(D bytesAllocated). */ static if (!(perCallFlags & (Options.numExpand | Options.numExpandOK | Options.bytesExpanded))) { bool expand(ref void[] b, size_t delta) { return expandImpl(b, delta); } } else { bool expand(string f = __FILE__, uint n = __LINE__) (ref void[] b, size_t delta) { return expandImpl!(f, n)(b, delta); } } private bool expandImpl(string f = null, uint n = 0)(ref void[] b, size_t s) { up!"numExpand"; sizediff_t slack = 0; static if (!__traits(hasMember, Allocator, "expand")) { auto result = s == 0; } else { immutable bytesSlackB4 = this.goodAllocSize(b.length) - b.length; auto result = parent.expand(b, s); if (result) { up!"numExpandOK"; add!"bytesUsed"(s); add!"bytesAllocated"(s); add!"bytesExpanded"(s); slack = sizediff_t(this.goodAllocSize(b.length) - b.length - bytesSlackB4); add!"bytesSlack"(slack); } } immutable xtra = result ? s : 0; addPerCall!(f, n, "numExpand", "numExpandOK", "bytesExpanded", "bytesAllocated") (1, result, xtra, xtra); return result; } /** Defined whether or not $(D Allocator.reallocate) is defined. Affects per instance: $(D numReallocate), $(D numReallocateOK), $(D numReallocateInPlace), $(D bytesNotMoved), $(D bytesAllocated), $(D bytesSlack), $(D bytesExpanded), and $(D bytesContracted). Affects per call: $(D numReallocate), $(D numReallocateOK), $(D numReallocateInPlace), $(D bytesNotMoved), $(D bytesExpanded), $(D bytesContracted), and $(D bytesMoved). */ static if (!(perCallFlags & (Options.numReallocate | Options.numReallocateOK | Options.numReallocateInPlace | Options.bytesNotMoved | Options.bytesExpanded | Options.bytesContracted | Options.bytesMoved))) { bool reallocate(ref void[] b, size_t s) { return reallocateImpl(b, s); } } else { bool reallocate(string f = __FILE__, ulong n = __LINE__) (ref void[] b, size_t s) { return reallocateImpl!(f, n)(b, s); } } private bool reallocateImpl(string f = null, uint n = 0) (ref void[] b, size_t s) { up!"numReallocate"; const bytesSlackB4 = this.goodAllocSize(b.length) - b.length; const oldB = b.ptr; const oldLength = b.length; const result = parent.reallocate(b, s); sizediff_t slack = 0; bool wasInPlace = false; sizediff_t delta = 0; if (result) { up!"numReallocateOK"; slack = (this.goodAllocSize(b.length) - b.length) - bytesSlackB4; add!"bytesSlack"(slack); add!"bytesUsed"(sizediff_t(b.length - oldLength)); if (oldB == b.ptr) { // This was an in-place reallocation, yay wasInPlace = true; up!"numReallocateInPlace"; add!"bytesNotMoved"(oldLength); delta = b.length - oldLength; if (delta >= 0) { // Expansion add!"bytesAllocated"(delta); add!"bytesExpanded"(delta); } else { // Contraction add!"bytesContracted"(-delta); } } else { // This was a allocate-move-deallocate cycle add!"bytesAllocated"(b.length); add!"bytesMoved"(oldLength); } } addPerCall!(f, n, "numReallocate", "numReallocateOK", "numReallocateInPlace", "bytesNotMoved", "bytesExpanded", "bytesContracted", "bytesMoved") (1, result, wasInPlace, wasInPlace ? oldLength : 0, delta >= 0 ? delta : 0, delta < 0 ? -delta : 0, wasInPlace ? 0 : oldLength); return result; } /** Defined whether or not $(D Allocator.deallocate) is defined. Affects per instance: $(D numDeallocate), $(D bytesUsed), and $(D bytesSlack). Affects per call: $(D numDeallocate) and $(D bytesContracted). */ static if (!(perCallFlags & (Options.numDeallocate | Options.bytesContracted))) bool deallocate(void[] b) { return deallocateImpl(b); } else bool deallocate(string f = __FILE__, uint n = __LINE__)(void[] b) { return deallocateImpl!(f, n)(b); } private bool deallocateImpl(string f = null, uint n = 0)(void[] b) { up!"numDeallocate"; add!"bytesUsed"(-sizediff_t(b.length)); add!"bytesSlack"(-(this.goodAllocSize(b.length) - b.length)); addPerCall!(f, n, "numDeallocate", "bytesContracted")(1, b.length); static if (__traits(hasMember, Allocator, "deallocate")) return parent.deallocate(b); else return false; } static if (__traits(hasMember, Allocator, "deallocateAll")) { /** Defined only if $(D Allocator.deallocateAll) is defined. Affects per instance and per call $(D numDeallocateAll). */ static if (!(perCallFlags & Options.numDeallocateAll)) bool deallocateAll() { return deallocateAllImpl(); } else bool deallocateAll(string f = __FILE__, uint n = __LINE__)() { return deallocateAllImpl!(f, n)(); } private bool deallocateAllImpl(string f = null, uint n = 0)() { up!"numDeallocateAll"; addPerCall!(f, n, "numDeallocateAll")(1); static if ((flags & Options.bytesUsed)) _bytesUsed = 0; return parent.deallocateAll(); } } /** Defined only if $(D Options.bytesUsed) is defined. Returns $(D bytesUsed == 0). */ static if (flags & Options.bytesUsed) Ternary empty() { return Ternary(_bytesUsed == 0); } /** Reports per instance statistics to $(D output) (e.g. $(D stdout)). The format is simple: one kind and value per line, separated by a colon, e.g. $(D bytesAllocated:7395404) */ void reportStatistics(R)(auto ref R output) { foreach (member; __traits(allMembers, Options)) {{ enum e = __traits(getMember, Options, member); static if ((flags & e) && e != Options.numAll && e != Options.bytesAll && e != Options.all) output.write(member, ":", e, '\n'); }} } static if (perCallFlags) { /** Defined if $(D perCallFlags) is nonzero. */ struct PerCallStatistics { /// The file and line of the call. string file; /// Ditto uint line; /// The options corresponding to the statistics collected. Options[] opts; /// The values of the statistics. Has the same length as $(D opts). ulong[] values; // Next in the chain. private PerCallStatistics* next; /** Format to a string such as: $(D mymodule.d(655): [numAllocate:21, numAllocateOK:21, bytesAllocated:324202]). */ string toString()() const { import std.conv : text, to; auto result = text(file, "(", line, "): ["); foreach (i, opt; opts) { if (i) result ~= ", "; result ~= opt.to!string; result ~= ':'; result ~= values[i].to!string; } return result ~= "]"; } } private static PerCallStatistics* root; /** Defined if $(D perCallFlags) is nonzero. Iterates all monitored file/line instances. The order of iteration is not meaningful (items are inserted at the front of a list upon the first call), so preprocessing the statistics after collection might be appropriate. */ static auto byFileLine() { static struct Voldemort { PerCallStatistics* current; bool empty() { return !current; } ref PerCallStatistics front() { return *current; } void popFront() { current = current.next; } auto save() { return this; } } return Voldemort(root); } /** Defined if $(D perCallFlags) is nonzero. Outputs (e.g. to a $(D File)) a simple report of the collected per-call statistics. */ static void reportPerCallStatistics(R)(auto ref R output) { output.write("Stats for: ", StatsCollector.stringof, '\n'); foreach (ref stat; byFileLine) { output.write(stat, '\n'); } } private PerCallStatistics* statsAt(string f, uint n, opts...)() { static PerCallStatistics s = { f, n, [ opts ], new ulong[opts.length] }; static bool inserted; if (!inserted) { // Insert as root s.next = root; root = &s; inserted = true; } return &s; } private void addPerCall(string f, uint n, names...)(ulong[] values...) { import std.array : join; enum uint mask = mixin("Options."~[names].join("|Options.")); static if (perCallFlags & mask) { // Per allocation info auto ps = mixin("statsAt!(f, n," ~ "Options."~[names].join(", Options.") ~")"); foreach (i; 0 .. names.length) { ps.values[i] += values[i]; } } } } else { private void addPerCall(string f, uint n, names...)(ulong[]...) { } } } /// @system unittest { import stdx.allocator.building_blocks.free_list : FreeList; import stdx.allocator.gc_allocator : GCAllocator; alias Allocator = StatsCollector!(GCAllocator, Options.all, Options.all); Allocator alloc; auto b = alloc.allocate(10); alloc.reallocate(b, 20); alloc.deallocate(b); static if (__VERSION__ >= 2073) { import std.file : deleteme, remove; import std.range : walkLength; import std.stdio : File; auto f = deleteme ~ "-dlang.stdx.allocator.stats_collector.txt"; scope(exit) remove(f); Allocator.reportPerCallStatistics(File(f, "w")); alloc.reportStatistics(File(f, "a")); assert(File(f).byLine.walkLength == 22); } } @system unittest { void test(Allocator)() { import std.range : walkLength; import std.stdio : writeln; Allocator a; auto b1 = a.allocate(100); assert(a.numAllocate == 1); assert(a.expand(b1, 0)); assert(a.reallocate(b1, b1.length + 1)); auto b2 = a.allocate(101); assert(a.numAllocate == 2); assert(a.bytesAllocated == 202); assert(a.bytesUsed == 202); auto b3 = a.allocate(202); assert(a.numAllocate == 3); assert(a.bytesAllocated == 404); a.deallocate(b2); assert(a.numDeallocate == 1); a.deallocate(b1); assert(a.numDeallocate == 2); a.deallocate(b3); assert(a.numDeallocate == 3); assert(a.numAllocate == a.numDeallocate); assert(a.bytesUsed == 0); } import stdx.allocator.building_blocks.free_list : FreeList; import stdx.allocator.gc_allocator : GCAllocator; test!(StatsCollector!(GCAllocator, Options.all, Options.all)); test!(StatsCollector!(FreeList!(GCAllocator, 128), Options.all, Options.all)); } @system unittest { void test(Allocator)() { import std.range : walkLength; import std.stdio : writeln; Allocator a; auto b1 = a.allocate(100); assert(a.expand(b1, 0)); assert(a.reallocate(b1, b1.length + 1)); auto b2 = a.allocate(101); auto b3 = a.allocate(202); a.deallocate(b2); a.deallocate(b1); a.deallocate(b3); } import stdx.allocator.building_blocks.free_list : FreeList; import stdx.allocator.gc_allocator : GCAllocator; test!(StatsCollector!(GCAllocator, 0, 0)); } stdx-allocator-3.1.0~beta.2/source/stdx/allocator/common.d 0000664 0000000 0000000 00000052341 13535263154 0023560 0 ustar 00root root 0000000 0000000 /** Utility and ancillary artifacts of `stdx.allocator`. This module shouldn't be used directly; its functionality will be migrated into more appropriate parts of `std`. Authors: $(HTTP erdani.com, Andrei Alexandrescu), Timon Gehr (`Ternary`) */ module stdx.allocator.common; import mir.utility; import std.traits; /** Returns the size in bytes of the state that needs to be allocated to hold an object of type $(D T). $(D stateSize!T) is zero for $(D struct)s that are not nested and have no nonstatic member variables. */ template stateSize(T) { static if (is(T == class) || is(T == interface)) enum stateSize = __traits(classInstanceSize, T); else static if (is(T == struct) || is(T == union)) enum stateSize = Fields!T.length || isNested!T ? T.sizeof : 0; else static if (is(T == void)) enum size_t stateSize = 0; else enum stateSize = T.sizeof; } @safe @nogc nothrow pure unittest { static assert(stateSize!void == 0); struct A {} static assert(stateSize!A == 0); struct B { int x; } static assert(stateSize!B == 4); interface I1 {} //static assert(stateSize!I1 == 2 * size_t.sizeof); class C1 {} static assert(stateSize!C1 == 3 * size_t.sizeof); class C2 { char c; } static assert(stateSize!C2 == 4 * size_t.sizeof); static class C3 { char c; } static assert(stateSize!C3 == 2 * size_t.sizeof + char.sizeof); } /** Returns `true` if the `Allocator` has the alignment known at compile time; otherwise it returns `false`. */ template hasStaticallyKnownAlignment(Allocator) { enum hasStaticallyKnownAlignment = __traits(compiles, {enum x = Allocator.alignment;}); } /** $(D chooseAtRuntime) is a compile-time constant of type $(D size_t) that several parameterized structures in this module recognize to mean deferral to runtime of the exact value. For example, $(D BitmappedBlock!(Allocator, 4096)) (described in detail below) defines a block allocator with block size of 4096 bytes, whereas $(D BitmappedBlock!(Allocator, chooseAtRuntime)) defines a block allocator that has a field storing the block size, initialized by the user. */ enum chooseAtRuntime = size_t.max - 1; /** $(D unbounded) is a compile-time constant of type $(D size_t) that several parameterized structures in this module recognize to mean "infinite" bounds for the parameter. For example, $(D Freelist) (described in detail below) accepts a $(D maxNodes) parameter limiting the number of freelist items. If $(D unbounded) is passed for $(D maxNodes), then there is no limit and no checking for the number of nodes. */ enum unbounded = size_t.max; /** The alignment that is guaranteed to accommodate any D object allocation on the current platform. */ enum uint platformAlignment = mir.utility.max(double.alignof, real.alignof); /** The default good size allocation is deduced as $(D n) rounded up to the allocator's alignment. */ size_t goodAllocSize(A)(auto ref A a, size_t n) { return n.roundUpToMultipleOf(a.alignment); } /** Returns s rounded up to a multiple of base. */ @safe @nogc nothrow pure size_t roundUpToMultipleOf()(size_t s, uint base) { assert(base); auto rem = s % base; return rem ? s + base - rem : s; } @safe @nogc nothrow pure unittest { assert(10.roundUpToMultipleOf(11) == 11); assert(11.roundUpToMultipleOf(11) == 11); assert(12.roundUpToMultipleOf(11) == 22); assert(118.roundUpToMultipleOf(11) == 121); } /** Returns `n` rounded up to a multiple of alignment, which must be a power of 2. */ @safe @nogc nothrow pure size_t roundUpToAlignment()(size_t n, uint alignment) { import stdx.allocator.internal : isPowerOf2; assert(alignment.isPowerOf2); immutable uint slack = cast(uint) n & (alignment - 1); const result = slack ? n + alignment - slack : n; assert(result >= n); return result; } @safe @nogc nothrow pure unittest { assert(10.roundUpToAlignment(4) == 12); assert(11.roundUpToAlignment(2) == 12); assert(12.roundUpToAlignment(8) == 16); assert(118.roundUpToAlignment(64) == 128); } /** Returns `n` rounded down to a multiple of alignment, which must be a power of 2. */ @safe @nogc nothrow pure size_t roundDownToAlignment()(size_t n, uint alignment) { import stdx.allocator.internal : isPowerOf2; assert(alignment.isPowerOf2); return n & ~size_t(alignment - 1); } @safe @nogc nothrow pure unittest { assert(10.roundDownToAlignment(4) == 8); assert(11.roundDownToAlignment(2) == 10); assert(12.roundDownToAlignment(8) == 8); assert(63.roundDownToAlignment(64) == 0); } /** Advances the beginning of `b` to start at alignment `a`. The resulting buffer may therefore be shorter. Returns the adjusted buffer, or null if obtaining a non-empty buffer is impossible. */ @nogc nothrow pure void[] roundUpToAlignment()(void[] b, uint a) { auto e = b.ptr + b.length; auto p = cast(void*) roundUpToAlignment(cast(size_t) b.ptr, a); if (e <= p) return null; return p[0 .. e - p]; } @nogc nothrow pure @system unittest { void[] empty; assert(roundUpToAlignment(empty, 4) == null); char[128] buf; // At least one pointer inside buf is 128-aligned assert(roundUpToAlignment(buf, 128) !is null); } /** Like `a / b` but rounds the result up, not down. */ @safe @nogc nothrow pure size_t divideRoundUp()(size_t a, size_t b) { assert(b); return (a + b - 1) / b; } /** Returns `s` rounded up to a multiple of `base`. */ @nogc nothrow pure void[] roundStartToMultipleOf()(void[] s, uint base) { assert(base); auto p = cast(void*) roundUpToMultipleOf( cast(size_t) s.ptr, base); auto end = s.ptr + s.length; return p[0 .. end - p]; } nothrow pure @system unittest { void[] p; assert(roundStartToMultipleOf(p, 16) is null); p = new ulong[10]; assert(roundStartToMultipleOf(p, 16) is p); } /** Returns $(D s) rounded up to the nearest power of 2. */ @safe @nogc nothrow pure size_t roundUpToPowerOf2()(size_t s) { import std.meta : AliasSeq; assert(s <= (size_t.max >> 1) + 1); --s; static if (size_t.sizeof == 4) alias Shifts = AliasSeq!(1, 2, 4, 8, 16); else alias Shifts = AliasSeq!(1, 2, 4, 8, 16, 32); foreach (i; Shifts) { s |= s >> i; } return s + 1; } @safe @nogc nothrow pure unittest { assert(0.roundUpToPowerOf2 == 0); assert(1.roundUpToPowerOf2 == 1); assert(2.roundUpToPowerOf2 == 2); assert(3.roundUpToPowerOf2 == 4); assert(7.roundUpToPowerOf2 == 8); assert(8.roundUpToPowerOf2 == 8); assert(10.roundUpToPowerOf2 == 16); assert(11.roundUpToPowerOf2 == 16); assert(12.roundUpToPowerOf2 == 16); assert(118.roundUpToPowerOf2 == 128); assert((size_t.max >> 1).roundUpToPowerOf2 == (size_t.max >> 1) + 1); assert(((size_t.max >> 1) + 1).roundUpToPowerOf2 == (size_t.max >> 1) + 1); } /** Returns the number of trailing zeros of $(D x). */ @safe @nogc nothrow pure uint trailingZeros()(ulong x) { uint result; while (result < 64 && !(x & (1UL << result))) { ++result; } return result; } @safe @nogc nothrow pure unittest { assert(trailingZeros(0) == 64); assert(trailingZeros(1) == 0); assert(trailingZeros(2) == 1); assert(trailingZeros(3) == 0); assert(trailingZeros(4) == 2); } /** Returns `true` if `ptr` is aligned at `alignment`. */ @nogc nothrow pure bool alignedAt(T)(T* ptr, uint alignment) { return cast(size_t) ptr % alignment == 0; } /** Returns the effective alignment of `ptr`, i.e. the largest power of two that is a divisor of `ptr`. */ @nogc nothrow pure uint effectiveAlignment()(void* ptr) { return 1U << trailingZeros(cast(size_t) ptr); } @nogc nothrow pure @system unittest { int x; assert(effectiveAlignment(&x) >= int.alignof); } /** Aligns a pointer down to a specified alignment. The resulting pointer is less than or equal to the given pointer. */ @nogc nothrow pure void* alignDownTo()(void* ptr, uint alignment) { import stdx.allocator.internal : isPowerOf2; assert(alignment.isPowerOf2); return cast(void*) (cast(size_t) ptr & ~(alignment - 1UL)); } /** Aligns a pointer up to a specified alignment. The resulting pointer is greater than or equal to the given pointer. */ @nogc nothrow pure void* alignUpTo()(void* ptr, uint alignment) { import stdx.allocator.internal : isPowerOf2; assert(alignment.isPowerOf2); immutable uint slack = cast(size_t) ptr & (alignment - 1U); return slack ? ptr + alignment - slack : ptr; } @safe @nogc nothrow pure bool isGoodStaticAlignment()(uint x) { import stdx.allocator.internal : isPowerOf2; return x.isPowerOf2; } @safe @nogc nothrow pure bool isGoodDynamicAlignment()(uint x) { import stdx.allocator.internal : isPowerOf2; return x.isPowerOf2 && x >= (void*).sizeof; } /** The default $(D reallocate) function first attempts to use $(D expand). If $(D Allocator.expand) is not defined or returns $(D false), $(D reallocate) allocates a new block of memory of appropriate size and copies data from the old block to the new block. Finally, if $(D Allocator) defines $(D deallocate), $(D reallocate) uses it to free the old memory block. $(D reallocate) does not attempt to use $(D Allocator.reallocate) even if defined. This is deliberate so allocators may use it internally within their own implementation of $(D reallocate). */ bool reallocate(Allocator)(auto ref Allocator a, ref void[] b, size_t s) { if (b.length == s) return true; static if (__traits(hasMember, Allocator, "expand")) { if (b.length <= s && a.expand(b, s - b.length)) return true; } auto newB = a.allocate(s); if (newB.length != s) return false; if (newB.length <= b.length) newB[] = b[0 .. newB.length]; else newB[0 .. b.length] = b[]; static if (__traits(hasMember, Allocator, "deallocate")) a.deallocate(b); b = newB; return true; } /** The default $(D alignedReallocate) function first attempts to use $(D expand). If $(D Allocator.expand) is not defined or returns $(D false), $(D alignedReallocate) allocates a new block of memory of appropriate size and copies data from the old block to the new block. Finally, if $(D Allocator) defines $(D deallocate), $(D alignedReallocate) uses it to free the old memory block. $(D alignedReallocate) does not attempt to use $(D Allocator.reallocate) even if defined. This is deliberate so allocators may use it internally within their own implementation of $(D reallocate). */ bool alignedReallocate(Allocator)(auto ref Allocator alloc, ref void[] b, size_t s, uint a) { static if (__traits(hasMember, Allocator, "expand")) { if (b.length <= s && b.ptr.alignedAt(a) && alloc.expand(b, s - b.length)) return true; } else { if (b.length == s) return true; } auto newB = alloc.alignedAllocate(s, a); if (newB.length <= b.length) newB[] = b[0 .. newB.length]; else newB[0 .. b.length] = b[]; static if (__traits(hasMember, Allocator, "deallocate")) alloc.deallocate(b); b = newB; return true; } /** Forwards each of the methods in `funs` (if defined) to `member`. */ enum forwardToMember = (string member, string[] funs...) { string result = " import std.traits : Parameters;\n"; foreach (fun; funs) { result ~= " static if (__traits(hasMember, typeof("~member~"), `"~fun~"`)) { static if (__traits(isTemplate, "~member~"."~fun~")) auto ref "~fun~"(Parameters!(typeof("~member~"."~fun~"!())) args) { return "~member~"."~fun~"(args); } else auto ref "~fun~"(Parameters!(typeof("~member~"."~fun~")) args) { return "~member~"."~fun~"(args); } }\n"; } return result; }; version(unittest) { import stdx.allocator : IAllocator, ISharedAllocator; package void testAllocator(alias make)() { import std.conv : text; import stdx.allocator.internal : isPowerOf2; import std.stdio : writeln, stderr; import stdx.allocator.internal : Ternary; alias A = typeof(make()); scope(failure) stderr.writeln("testAllocator failed for ", A.stringof); auto a = make(); // Test alignment static assert(A.alignment.isPowerOf2); // Test goodAllocSize assert(a.goodAllocSize(1) >= A.alignment, text(a.goodAllocSize(1), " < ", A.alignment)); assert(a.goodAllocSize(11) >= 11.roundUpToMultipleOf(A.alignment)); assert(a.goodAllocSize(111) >= 111.roundUpToMultipleOf(A.alignment)); // Test allocate assert(a.allocate(0) is null); auto b1 = a.allocate(1); assert(b1.length == 1); auto b2 = a.allocate(2); assert(b2.length == 2); assert(b2.ptr + b2.length <= b1.ptr || b1.ptr + b1.length <= b2.ptr); // Test alignedAllocate static if (__traits(hasMember, A, "alignedAllocate")) {{ auto b3 = a.alignedAllocate(1, 256); assert(b3.length <= 1); assert(b3.ptr.alignedAt(256)); assert(a.alignedReallocate(b3, 2, 512)); assert(b3.ptr.alignedAt(512)); static if (__traits(hasMember, A, "alignedDeallocate")) { a.alignedDeallocate(b3); } }} else { static assert(!__traits(hasMember, A, "alignedDeallocate")); // This seems to be a bug in the compiler: //static assert(!__traits(hasMember, A, "alignedReallocate"), A.stringof); } static if (__traits(hasMember, A, "allocateAll")) {{ auto aa = make(); if (aa.allocateAll().ptr) { // Can't get any more memory assert(!aa.allocate(1).ptr); } auto ab = make(); const b4 = ab.allocateAll(); assert(b4.length); // Can't get any more memory assert(!ab.allocate(1).ptr); }} static if (__traits(hasMember, A, "expand")) {{ assert(a.expand(b1, 0)); auto len = b1.length; if (a.expand(b1, 102)) { assert(b1.length == len + 102, text(b1.length, " != ", len + 102)); } auto aa = make(); void[] b5 = null; assert(aa.expand(b5, 0)); assert(b5 is null); assert(!aa.expand(b5, 1)); assert(b5.length == 0); }} void[] b6 = null; assert(a.reallocate(b6, 0)); assert(b6.length == 0); assert(a.reallocate(b6, 1)); assert(b6.length == 1, text(b6.length)); assert(a.reallocate(b6, 2)); assert(b6.length == 2); // Test owns static if (__traits(hasMember, A, "owns")) {{ assert(a.owns(null) == Ternary.no); assert(a.owns(b1) == Ternary.yes); assert(a.owns(b2) == Ternary.yes); assert(a.owns(b6) == Ternary.yes); }} static if (__traits(hasMember, A, "resolveInternalPointer")) {{ void[] p; assert(a.resolveInternalPointer(null, p) == Ternary.no); Ternary r = a.resolveInternalPointer(b1.ptr, p); assert(p.ptr is b1.ptr && p.length >= b1.length); r = a.resolveInternalPointer(b1.ptr + b1.length / 2, p); assert(p.ptr is b1.ptr && p.length >= b1.length); r = a.resolveInternalPointer(b2.ptr, p); assert(p.ptr is b2.ptr && p.length >= b2.length); r = a.resolveInternalPointer(b2.ptr + b2.length / 2, p); assert(p.ptr is b2.ptr && p.length >= b2.length); r = a.resolveInternalPointer(b6.ptr, p); assert(p.ptr is b6.ptr && p.length >= b6.length); r = a.resolveInternalPointer(b6.ptr + b6.length / 2, p); assert(p.ptr is b6.ptr && p.length >= b6.length); static int[10] b7 = [ 1, 2, 3 ]; assert(a.resolveInternalPointer(b7.ptr, p) == Ternary.no); assert(a.resolveInternalPointer(b7.ptr + b7.length / 2, p) == Ternary.no); assert(a.resolveInternalPointer(b7.ptr + b7.length, p) == Ternary.no); int[3] b8 = [ 1, 2, 3 ]; assert(a.resolveInternalPointer(b8.ptr, p) == Ternary.no); assert(a.resolveInternalPointer(b8.ptr + b8.length / 2, p) == Ternary.no); assert(a.resolveInternalPointer(b8.ptr + b8.length, p) == Ternary.no); }} } package void testAllocatorObject(AllocInterface)(AllocInterface a) if (is(AllocInterface : IAllocator) || is (AllocInterface : shared ISharedAllocator)) { import std.conv : text; import stdx.allocator.internal : isPowerOf2; import std.stdio : writeln, stderr; import stdx.allocator.internal : Ternary; scope(failure) stderr.writeln("testAllocatorObject failed for ", AllocInterface.stringof); assert(a); // Test alignment assert(a.alignment.isPowerOf2); // Test goodAllocSize assert(a.goodAllocSize(1) >= a.alignment, text(a.goodAllocSize(1), " < ", a.alignment)); assert(a.goodAllocSize(11) >= 11.roundUpToMultipleOf(a.alignment)); assert(a.goodAllocSize(111) >= 111.roundUpToMultipleOf(a.alignment)); // Test empty assert(a.empty != Ternary.no); // Test allocate assert(a.allocate(0) is null); auto b1 = a.allocate(1); assert(b1.length == 1); auto b2 = a.allocate(2); assert(b2.length == 2); assert(b2.ptr + b2.length <= b1.ptr || b1.ptr + b1.length <= b2.ptr); // Test alignedAllocate { // If not implemented it will return null, so those should pass auto b3 = a.alignedAllocate(1, 256); assert(b3.length <= 1); assert(b3.ptr.alignedAt(256)); if (a.alignedReallocate(b3, 1, 256)) { // If it is false, then the wrapped allocator did not implement // this assert(a.alignedReallocate(b3, 2, 512)); assert(b3.ptr.alignedAt(512)); } } // Test allocateAll { auto aa = a.allocateAll(); if (aa.ptr) { // Can't get any more memory assert(!a.allocate(1).ptr); a.deallocate(aa); } const b4 = a.allocateAll(); if (b4.ptr) { // Can't get any more memory assert(!a.allocate(1).ptr); } } // Test expand { assert(a.expand(b1, 0)); auto len = b1.length; if (a.expand(b1, 102)) { assert(b1.length == len + 102, text(b1.length, " != ", len + 102)); } } void[] b6 = null; assert(a.reallocate(b6, 0)); assert(b6.length == 0); assert(a.reallocate(b6, 1)); assert(b6.length == 1, text(b6.length)); assert(a.reallocate(b6, 2)); assert(b6.length == 2); // Test owns { if (a.owns(null) != Ternary.unknown) { assert(a.owns(null) == Ternary.no); assert(a.owns(b1) == Ternary.yes); assert(a.owns(b2) == Ternary.yes); assert(a.owns(b6) == Ternary.yes); } } // Test resolveInternalPointer { void[] p; if (a.resolveInternalPointer(null, p) != Ternary.unknown) { assert(a.resolveInternalPointer(null, p) == Ternary.no); Ternary r = a.resolveInternalPointer(b1.ptr, p); assert(p.ptr is b1.ptr && p.length >= b1.length); r = a.resolveInternalPointer(b1.ptr + b1.length / 2, p); assert(p.ptr is b1.ptr && p.length >= b1.length); r = a.resolveInternalPointer(b2.ptr, p); assert(p.ptr is b2.ptr && p.length >= b2.length); r = a.resolveInternalPointer(b2.ptr + b2.length / 2, p); assert(p.ptr is b2.ptr && p.length >= b2.length); r = a.resolveInternalPointer(b6.ptr, p); assert(p.ptr is b6.ptr && p.length >= b6.length); r = a.resolveInternalPointer(b6.ptr + b6.length / 2, p); assert(p.ptr is b6.ptr && p.length >= b6.length); static int[10] b7 = [ 1, 2, 3 ]; assert(a.resolveInternalPointer(b7.ptr, p) == Ternary.no); assert(a.resolveInternalPointer(b7.ptr + b7.length / 2, p) == Ternary.no); assert(a.resolveInternalPointer(b7.ptr + b7.length, p) == Ternary.no); int[3] b8 = [ 1, 2, 3 ]; assert(a.resolveInternalPointer(b8.ptr, p) == Ternary.no); assert(a.resolveInternalPointer(b8.ptr + b8.length / 2, p) == Ternary.no); assert(a.resolveInternalPointer(b8.ptr + b8.length, p) == Ternary.no); } } // Test deallocateAll { if (a.deallocateAll()) { if (a.empty != Ternary.unknown) { assert(a.empty == Ternary.yes); } } } } } stdx-allocator-3.1.0~beta.2/source/stdx/allocator/gc_allocator.d 0000664 0000000 0000000 00000011743 13535263154 0024722 0 ustar 00root root 0000000 0000000 /// module stdx.allocator.gc_allocator; import stdx.allocator.common; version (D_BetterC) { import stdx.allocator.building_blocks.null_allocator; alias GCAllocator = NullAllocator; } else version = HasDRuntime; version (HasDRuntime): /** D's built-in garbage-collected allocator. */ struct GCAllocator { import core.memory : GC; import stdx.allocator.internal : Ternary; @system unittest { testAllocator!(() => GCAllocator.instance); } /** The alignment is a static constant equal to $(D platformAlignment), which ensures proper alignment for any D data type. */ enum uint alignment = platformAlignment; /** Standard allocator methods per the semantics defined above. The $(D deallocate) and $(D reallocate) methods are $(D @system) because they may move memory around, leaving dangling pointers in user code. */ static pure nothrow @trusted void[] allocate()(size_t bytes) { if (!bytes) return null; auto p = GC.malloc(bytes); return p ? p[0 .. bytes] : null; } /// Ditto static @system bool expand()(ref void[] b, size_t delta) { if (delta == 0) return true; if (b is null) return false; immutable curLength = GC.sizeOf(b.ptr); assert(curLength != 0); // we have a valid GC pointer here immutable desired = b.length + delta; if (desired > curLength) // check to see if the current block can't hold the data { immutable sizeRequest = desired - curLength; immutable newSize = GC.extend(b.ptr, sizeRequest, sizeRequest); if (newSize == 0) { // expansion unsuccessful return false; } assert(newSize >= desired); } b = b.ptr[0 .. desired]; return true; } /// Ditto static pure nothrow @system bool reallocate()(ref void[] b, size_t newSize) { import core.exception : OutOfMemoryError; try { auto p = cast(ubyte*) GC.realloc(b.ptr, newSize); b = p[0 .. newSize]; } catch (OutOfMemoryError) { // leave the block in place, tell caller return false; } return true; } /// Ditto pure nothrow static Ternary resolveInternalPointer()(const void* p, ref void[] result) { auto r = GC.addrOf(cast(void*) p); if (!r) return Ternary.no; result = r[0 .. GC.sizeOf(r)]; return Ternary.yes; } /// Ditto static pure nothrow @system bool deallocate()(void[] b) { GC.free(b.ptr); return true; } /// Ditto static size_t goodAllocSize()(size_t n) { if (n == 0) return 0; if (n <= 16) return 16; import core.bitop : bsr; auto largestBit = bsr(n-1) + 1; if (largestBit <= 12) // 4096 or less return size_t(1) << largestBit; // larger, we use a multiple of 4096. return ((n + 4095) / 4096) * 4096; } /** Returns the global instance of this allocator type. The garbage collected allocator is thread-safe, therefore all of its methods are $(D static) and `instance` itself is $(D shared). */ enum GCAllocator instance = GCAllocator(); // Leave it undocummented for now. static nothrow @trusted void collect()() { GC.collect(); } } /// @system unittest { auto buffer = GCAllocator.instance.allocate(1024 * 1024 * 4); // deallocate upon scope's end (alternatively: leave it to collection) scope(exit) GCAllocator.instance.deallocate(buffer); //... } @system unittest { auto b = GCAllocator.instance.allocate(10_000); assert(GCAllocator.instance.expand(b, 1)); } @system unittest { import core.memory : GC; import stdx.allocator.internal : Ternary; // test allocation sizes assert(GCAllocator.instance.goodAllocSize(1) == 16); for (size_t s = 16; s <= 8192; s *= 2) { assert(GCAllocator.instance.goodAllocSize(s) == s); assert(GCAllocator.instance.goodAllocSize(s - (s / 2) + 1) == s); auto buffer = GCAllocator.instance.allocate(s); scope(exit) GCAllocator.instance.deallocate(buffer); void[] p; assert(GCAllocator.instance.resolveInternalPointer(null, p) == Ternary.no); Ternary r = GCAllocator.instance.resolveInternalPointer(buffer.ptr, p); assert(p.ptr is buffer.ptr && p.length >= buffer.length); assert(GC.sizeOf(buffer.ptr) == s); // the GC should provide power of 2 as "good" sizes, but other sizes are allowed, too version(none) { auto buffer2 = GCAllocator.instance.allocate(s - (s / 2) + 1); scope(exit) GCAllocator.instance.deallocate(buffer2); assert(GC.sizeOf(buffer2.ptr) == s); } } // anything above a page is simply rounded up to next page assert(GCAllocator.instance.goodAllocSize(4096 * 4 + 1) == 4096 * 5); } stdx-allocator-3.1.0~beta.2/source/stdx/allocator/internal.d 0000664 0000000 0000000 00000006105 13535263154 0024101 0 ustar 00root root 0000000 0000000 // Private parts of Phobos module stdx.allocator.internal; import std.traits; // Bulk of emplace unittests ends here static if (is(typeof({ import std.typecons : Ternary; }))) { public import std.typecons : Ternary; } else static if (is(typeof({ import std.experimental.allocator.common : Ternary; }))) { public import std.experimental.allocator.common : Ternary; } else static assert(0, "Oops, dont know how to find Ternary"); /** Check whether a number is an integer power of two. Note that only positive numbers can be integer powers of two. This function always return `false` if `x` is negative or zero. Params: x = the number to test Returns: `true` if `x` is an integer power of two. */ bool isPowerOf2(X)(const X x) pure @safe nothrow @nogc if (isNumeric!X) { static if (isFloatingPoint!X) { import std.math : frexp; int exp; const X sig = frexp(x, exp); return (exp != int.min) && (sig is cast(X) 0.5L); } else { static if (isSigned!X) { auto y = cast(typeof(x + 0))x; return y > 0 && !(y & (y - 1)); } else { auto y = cast(typeof(x + 0u))x; return (y & -y) > (y - 1); } } } /// @safe unittest { import std.math : pow; assert( isPowerOf2(1.0L)); assert( isPowerOf2(2.0L)); assert( isPowerOf2(0.5L)); assert( isPowerOf2(pow(2.0L, 96))); assert( isPowerOf2(pow(2.0L, -77))); assert(!isPowerOf2(-2.0L)); assert(!isPowerOf2(-0.5L)); assert(!isPowerOf2(0.0L)); assert(!isPowerOf2(4.315)); assert(!isPowerOf2(1.0L / 3.0L)); assert(!isPowerOf2(real.nan)); assert(!isPowerOf2(real.infinity)); } /// @safe unittest { assert( isPowerOf2(1)); assert( isPowerOf2(2)); assert( isPowerOf2(1uL << 63)); assert(!isPowerOf2(-4)); assert(!isPowerOf2(0)); assert(!isPowerOf2(1337u)); } @safe unittest { import std.meta : AliasSeq; import std.math : pow; immutable smallP2 = pow(2.0L, -62); immutable bigP2 = pow(2.0L, 50); immutable smallP7 = pow(7.0L, -35); immutable bigP7 = pow(7.0L, 30); foreach (X; AliasSeq!(float, double, real)) { immutable min_sub = X.min_normal * X.epsilon; foreach (x; [smallP2, min_sub, X.min_normal, .25L, 0.5L, 1.0L, 2.0L, 8.0L, pow(2.0L, X.max_exp - 1), bigP2]) { assert( isPowerOf2(cast(X) x)); assert(!isPowerOf2(cast(X)-x)); } foreach (x; [0.0L, 3 * min_sub, smallP7, 0.1L, 1337.0L, bigP7, X.max, real.nan, real.infinity]) { assert(!isPowerOf2(cast(X) x)); assert(!isPowerOf2(cast(X)-x)); } } foreach (X; AliasSeq!(byte, ubyte, short, ushort, int, uint, long, ulong)) { foreach (x; [1, 2, 4, 8, (X.max >>> 1) + 1]) { assert( isPowerOf2(cast(X) x)); static if (isSigned!X) assert(!isPowerOf2(cast(X)-x)); } foreach (x; [0, 3, 5, 13, 77, X.min, X.max]) assert(!isPowerOf2(cast(X) x)); } } stdx-allocator-3.1.0~beta.2/source/stdx/allocator/mallocator.d 0000664 0000000 0000000 00000024364 13535263154 0024431 0 ustar 00root root 0000000 0000000 /// module stdx.allocator.mallocator; import stdx.allocator.common; /** The C heap allocator. */ struct Mallocator { @system unittest { testAllocator!(() => Mallocator.instance); } /** The alignment is a static constant equal to $(D platformAlignment), which ensures proper alignment for any D data type. */ enum uint alignment = platformAlignment; /** Standard allocator methods per the semantics defined above. The $(D deallocate) and $(D reallocate) methods are $(D @system) because they may move memory around, leaving dangling pointers in user code. Somewhat paradoxically, $(D malloc) is $(D @safe) but that's only useful to safe programs that can afford to leak memory allocated. */ @trusted @nogc nothrow static void[] allocate()(size_t bytes) { import core.stdc.stdlib : malloc; if (!bytes) return null; auto p = malloc(bytes); return p ? p[0 .. bytes] : null; } /// Ditto @system @nogc nothrow static bool deallocate()(void[] b) { import core.stdc.stdlib : free; free(b.ptr); return true; } /// Ditto @system @nogc nothrow static bool reallocate()(ref void[] b, size_t s) { import core.stdc.stdlib : realloc; if (!s) { // fuzzy area in the C standard, see http://goo.gl/ZpWeSE // so just deallocate and nullify the pointer deallocate(b); b = null; return true; } auto p = cast(ubyte*) realloc(b.ptr, s); if (!p) return false; b = p[0 .. s]; return true; } /** Returns the global instance of this allocator type. The C heap allocator is thread-safe, therefore all of its methods are $(D static) and `instance` itself is $(D shared). */ enum Mallocator instance = Mallocator(); } /// @nogc nothrow @system unittest { auto buffer = Mallocator.instance.allocate(1024 * 1024 * 4); scope(exit) Mallocator.instance.deallocate(buffer); //... } @nogc nothrow @system unittest { @nogc nothrow static void test(A)() { int* p = null; p = cast(int*) A.instance.allocate(int.sizeof); scope(exit) A.instance.deallocate(p[0 .. int.sizeof]); *p = 42; assert(*p == 42); } test!Mallocator(); } @nogc nothrow @system unittest { static void test(A)() { import stdx.allocator : make; Object p = null; p = A.instance.make!Object(); assert(p !is null); } test!Mallocator(); } version (Posix) @nogc nothrow private extern(C) int posix_memalign(void**, size_t, size_t); version (Windows) { // DMD Win 32 bit, DigitalMars C standard library misses the _aligned_xxx // functions family (snn.lib) version(CRuntime_DigitalMars) { // Helper to cast the infos written before the aligned pointer // this header keeps track of the size (required to realloc) and of // the base ptr (required to free). private struct AlignInfo { void* basePtr; size_t size; @nogc nothrow static AlignInfo* opCall()(void* ptr) { return cast(AlignInfo*) (ptr - AlignInfo.sizeof); } } @nogc nothrow private void* _aligned_malloc()(size_t size, size_t alignment) { import core.stdc.stdlib : malloc; size_t offset = alignment + size_t.sizeof * 2 - 1; // unaligned chunk void* basePtr = malloc(size + offset); if (!basePtr) return null; // get aligned location within the chunk void* alignedPtr = cast(void**)((cast(size_t)(basePtr) + offset) & ~(alignment - 1)); // write the header before the aligned pointer AlignInfo* head = AlignInfo(alignedPtr); head.basePtr = basePtr; head.size = size; return alignedPtr; } @nogc nothrow private void* _aligned_realloc()(void* ptr, size_t size, size_t alignment) { import core.stdc.stdlib : free; import core.stdc.string : memcpy; if (!ptr) return _aligned_malloc(size, alignment); // gets the header from the exising pointer AlignInfo* head = AlignInfo(ptr); // gets a new aligned pointer void* alignedPtr = _aligned_malloc(size, alignment); if (!alignedPtr) { //to https://msdn.microsoft.com/en-us/library/ms235462.aspx //see Return value: in this case the original block is unchanged return null; } // copy exising data memcpy(alignedPtr, ptr, head.size); free(head.basePtr); return alignedPtr; } @nogc nothrow private void _aligned_free()(void *ptr) { import core.stdc.stdlib : free; if (!ptr) return; AlignInfo* head = AlignInfo(ptr); free(head.basePtr); } } // DMD Win 64 bit, uses microsoft standard C library which implements them else { @nogc nothrow private extern(C) void* _aligned_malloc(size_t, size_t); @nogc nothrow private extern(C) void _aligned_free(void *memblock); @nogc nothrow private extern(C) void* _aligned_realloc(void *, size_t, size_t); } } /** Aligned allocator using OS-specific primitives, under a uniform API. */ version (WebAssembly) {} else version = HasMemAlign; version (HasMemAlign) struct AlignedMallocator { @system unittest { testAllocator!(() => typeof(this).instance); } /** The default alignment is $(D platformAlignment). */ enum uint alignment = platformAlignment; /** Forwards to $(D alignedAllocate(bytes, platformAlignment)). */ @trusted @nogc nothrow static void[] allocate()(size_t bytes) { if (!bytes) return null; return alignedAllocate(bytes, alignment); } /** Uses $(HTTP man7.org/linux/man-pages/man3/posix_memalign.3.html, $(D posix_memalign)) on Posix and $(HTTP msdn.microsoft.com/en-us/library/8z34s9c6(v=vs.80).aspx, $(D __aligned_malloc)) on Windows. */ version(Posix) @trusted @nogc nothrow static void[] alignedAllocate()(size_t bytes, uint a) { import core.stdc.errno : ENOMEM, EINVAL; assert(a.isGoodDynamicAlignment); void* result; auto code = posix_memalign(&result, a, bytes); if (code == ENOMEM) return null; else if (code == EINVAL) { assert(0, "AlignedMallocator.alignment is not a power of two " ~"multiple of (void*).sizeof, according to posix_memalign!"); } else if (code != 0) assert(0, "posix_memalign returned an unknown code!"); else return result[0 .. bytes]; } else version(Windows) @trusted @nogc nothrow static void[] alignedAllocate()(size_t bytes, uint a) { auto result = _aligned_malloc(bytes, a); return result ? result[0 .. bytes] : null; } else static assert(0); /** Calls $(D free(b.ptr)) on Posix and $(HTTP msdn.microsoft.com/en-US/library/17b5h8td(v=vs.80).aspx, $(D __aligned_free(b.ptr))) on Windows. */ version (Posix) @system @nogc nothrow static bool deallocate()(void[] b) { import core.stdc.stdlib : free; free(b.ptr); return true; } else version (Windows) @system @nogc nothrow static bool deallocate()(void[] b) { _aligned_free(b.ptr); return true; } else static assert(0); /** On Posix, forwards to $(D realloc). On Windows, forwards to $(D alignedReallocate(b, newSize, platformAlignment)). */ version (Posix) @system @nogc nothrow static bool reallocate()(ref void[] b, size_t newSize) { return Mallocator.instance.reallocate(b, newSize); } version (Windows) @system @nogc nothrow static bool reallocate()(ref void[] b, size_t newSize) { return alignedReallocate(b, newSize, alignment); } /** On Posix, uses $(D alignedAllocate) and copies data around because there is no realloc for aligned memory. On Windows, calls $(HTTP msdn.microsoft.com/en-US/library/y69db7sx(v=vs.80).aspx, $(D __aligned_realloc(b.ptr, newSize, a))). */ version (Windows) @system @nogc nothrow static bool alignedReallocate()(ref void[] b, size_t s, uint a) { if (!s) { deallocate(b); b = null; return true; } auto p = cast(ubyte*) _aligned_realloc(b.ptr, s, a); if (!p) return false; b = p[0 .. s]; return true; } /** Returns the global instance of this allocator type. The C heap allocator is thread-safe, therefore all of its methods are $(D static) and `instance` itself is $(D shared). */ enum AlignedMallocator instance = AlignedMallocator(); } /// @nogc nothrow @system unittest { auto buffer = AlignedMallocator.instance.alignedAllocate(1024 * 1024 * 4, 128); scope(exit) AlignedMallocator.instance.deallocate(buffer); //... } version(unittest) version(CRuntime_DigitalMars) @nogc nothrow size_t addr(ref void* ptr) { return cast(size_t) ptr; } version(CRuntime_DigitalMars) @nogc nothrow @system unittest { void* m; m = _aligned_malloc(16, 0x10); if (m) { assert((m.addr & 0xF) == 0); _aligned_free(m); } m = _aligned_malloc(16, 0x100); if (m) { assert((m.addr & 0xFF) == 0); _aligned_free(m); } m = _aligned_malloc(16, 0x1000); if (m) { assert((m.addr & 0xFFF) == 0); _aligned_free(m); } m = _aligned_malloc(16, 0x10); if (m) { assert((cast(size_t) m & 0xF) == 0); m = _aligned_realloc(m, 32, 0x10000); if (m) assert((m.addr & 0xFFFF) == 0); _aligned_free(m); } m = _aligned_malloc(8, 0x10); if (m) { *cast(ulong*) m = 0X01234567_89ABCDEF; m = _aligned_realloc(m, 0x800, 0x1000); if (m) assert(*cast(ulong*) m == 0X01234567_89ABCDEF); _aligned_free(m); } } stdx-allocator-3.1.0~beta.2/source/stdx/allocator/mmap_allocator.d 0000664 0000000 0000000 00000004443 13535263154 0025262 0 ustar 00root root 0000000 0000000 /// module stdx.allocator.mmap_allocator; // MmapAllocator /** Allocator (currently defined only for Posix and Windows) using $(D $(LINK2 https://en.wikipedia.org/wiki/Mmap, mmap)) and $(D $(LUCKY munmap)) directly (or their Windows equivalents). There is no additional structure: each call to $(D allocate(s)) issues a call to $(D mmap(null, s, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)), and each call to $(D deallocate(b)) issues $(D munmap(b.ptr, b.length)). So $(D MmapAllocator) is usually intended for allocating large chunks to be managed by fine-granular allocators. */ struct MmapAllocator { /// The one shared instance. enum MmapAllocator instance = MmapAllocator(); /** Alignment is page-size and hardcoded to 4096 (even though on certain systems it could be larger). */ enum size_t alignment = 4096; version(Posix) { /// Allocator API. static void[] allocate()(size_t bytes) { import core.sys.posix.sys.mman : mmap, MAP_ANON, PROT_READ, PROT_WRITE, MAP_PRIVATE, MAP_FAILED; if (!bytes) return null; auto p = mmap(null, bytes, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); if (p is MAP_FAILED) return null; return p[0 .. bytes]; } /// Ditto static bool deallocate()(void[] b) { import core.sys.posix.sys.mman : munmap; if (b.ptr) munmap(b.ptr, b.length) == 0 || assert(0); return true; } } else version(Windows) { import core.sys.windows.windows : VirtualAlloc, VirtualFree, MEM_COMMIT, PAGE_READWRITE, MEM_RELEASE; /// Allocator API. static void[] allocate()(size_t bytes) { if (!bytes) return null; auto p = VirtualAlloc(null, bytes, MEM_COMMIT, PAGE_READWRITE); if (p == null) return null; return p[0 .. bytes]; } /// Ditto static bool deallocate()(void[] b) { return b.ptr is null || VirtualFree(b.ptr, 0, MEM_RELEASE) != 0; } } } @system unittest { alias alloc = MmapAllocator.instance; auto p = alloc.allocate(100); assert(p.length == 100); alloc.deallocate(p); } stdx-allocator-3.1.0~beta.2/source/stdx/allocator/package.d 0000664 0000000 0000000 00000250360 13535263154 0023664 0 ustar 00root root 0000000 0000000 // Written in the D programming language. /** High-level interface for allocators. Implements bundled allocation/creation and destruction/deallocation of data including `struct`s and `class`es, and also array primitives related to allocation. This module is the entry point for both making use of allocators and for their documentation. $(SCRIPT inhibitQuickIndex = 1;) $(BOOKTABLE, $(TR $(TH Category) $(TH Functions)) $(TR $(TD Make) $(TD $(LREF make) $(LREF makeArray) $(LREF makeMultidimensionalArray) )) $(TR $(TD Dispose) $(TD $(LREF dispose) $(LREF disposeMultidimensionalArray) )) $(TR $(TD Modify) $(TD $(LREF expandArray) $(LREF shrinkArray) )) $(TR $(TD Global) $(TD $(LREF processAllocator) $(LREF theAllocator) )) $(TR $(TD Class interface) $(TD $(LREF allocatorObject) $(LREF CAllocatorImpl) $(LREF IAllocator) )) ) Synopsis: --- // Allocate an int, initialize it with 42 int* p = theAllocator.make!int(42); assert(*p == 42); // Destroy and deallocate it theAllocator.dispose(p); // Allocate using the global process allocator p = processAllocator.make!int(100); assert(*p == 100); // Destroy and deallocate processAllocator.dispose(p); // Create an array of 50 doubles initialized to -1.0 double[] arr = theAllocator.makeArray!double(50, -1.0); // Append two zeros to it theAllocator.expandArray(arr, 2, 0.0); // On second thought, take that back theAllocator.shrinkArray(arr, 2); // Destroy and deallocate theAllocator.dispose(arr); --- $(H2 Layered Structure) D's allocators have a layered structure in both implementation and documentation: $(OL $(LI A high-level, dynamically-typed layer (described further down in this module). It consists of an interface called $(LREF IAllocator), which concret; allocators need to implement. The interface primitives themselves are oblivious to the type of the objects being allocated; they only deal in `void[]`, by necessity of the interface being dynamic (as opposed to type-parameterized). Each thread has a current allocator it uses by default, which is a thread-local variable $(LREF theAllocator) of type $(LREF IAllocator). The process has a global _allocator called $(LREF processAllocator), also of type $(LREF IAllocator). When a new thread is created, $(LREF processAllocator) is copied into $(LREF theAllocator). An application can change the objects to which these references point. By default, at application startup, $(LREF processAllocator) refers to an object that uses D's garbage collected heap. This layer also include high-level functions such as $(LREF make) and $(LREF dispose) that comfortably allocate/create and respectively destroy/deallocate objects. This layer is all needed for most casual uses of allocation primitives.) $(LI A mid-level, statically-typed layer for assembling several allocators into one. It uses properties of the type of the objects being created to route allocation requests to possibly specialized allocators. This layer is relatively thin and implemented and documented in the $(MREF std,experimental,_allocator,typed) module. It allows an interested user to e.g. use different allocators for arrays versus fixed-sized objects, to the end of better overall performance.) $(LI A low-level collection of highly generic $(I heap building blocks)$(MDASH) Lego-like pieces that can be used to assemble application-specific allocators. The real allocation smarts are occurring at this level. This layer is of interest to advanced applications that want to configure their own allocators. A good illustration of typical uses of these building blocks is module $(MREF std,experimental,_allocator,showcase) which defines a collection of frequently- used preassembled allocator objects. The implementation and documentation entry point is $(MREF std,experimental,_allocator,building_blocks). By design, the primitives of the static interface have the same signatures as the $(LREF IAllocator) primitives but are for the most part optional and driven by static introspection. The parameterized class $(LREF CAllocatorImpl) offers an immediate and useful means to package a static low-level _allocator into an implementation of $(LREF IAllocator).) $(LI Core _allocator objects that interface with D's garbage collected heap ($(MREF std,experimental,_allocator,gc_allocator)), the C `malloc` family ($(MREF std,experimental,_allocator,mallocator)), and the OS ($(MREF std,experimental,_allocator,mmap_allocator)). Most custom allocators would ultimately obtain memory from one of these core allocators.) ) $(H2 Idiomatic Use of $(D stdx._allocator)) As of this time, $(D stdx._allocator) is not integrated with D's built-in operators that allocate memory, such as `new`, array literals, or array concatenation operators. That means $(D stdx._allocator) is opt-in$(MDASH)applications need to make explicit use of it. For casual creation and disposal of dynamically-allocated objects, use $(LREF make), $(LREF dispose), and the array-specific functions $(LREF makeArray), $(LREF expandArray), and $(LREF shrinkArray). These use by default D's garbage collected heap, but open the application to better configuration options. These primitives work either with `theAllocator` but also with any allocator obtained by combining heap building blocks. For example: ---- void fun(size_t n) { // Use the current allocator int[] a1 = theAllocator.makeArray!int(n); scope(exit) theAllocator.dispose(a1); ... } ---- To experiment with alternative allocators, set $(LREF theAllocator) for the current thread. For example, consider an application that allocates many 8-byte objects. These are not well supported by the default _allocator, so a $(MREF_ALTTEXT free list _allocator, std,experimental,_allocator,building_blocks,free_list) would be recommended. To install one in `main`, the application would use: ---- void main() { import stdx.allocator.building_blocks.free_list : FreeList; theAllocator = allocatorObject(FreeList!8()); ... } ---- $(H3 Saving the `IAllocator` Reference For Later Use) As with any global resource, setting `theAllocator` and `processAllocator` should not be done often and casually. In particular, allocating memory with one allocator and deallocating with another causes undefined behavior. Typically, these variables are set during application initialization phase and last through the application. To avoid this, long-lived objects that need to perform allocations, reallocations, and deallocations relatively often may want to store a reference to the _allocator object they use throughout their lifetime. Then, instead of using `theAllocator` for internal allocation-related tasks, they'd use the internally held reference. For example, consider a user-defined hash table: ---- struct HashTable { private IAllocator _allocator; this(size_t buckets, IAllocator allocator = theAllocator) { this._allocator = allocator; ... } // Getter and setter IAllocator allocator() { return _allocator; } void allocator(IAllocator a) { assert(empty); _allocator = a; } } ---- Following initialization, the `HashTable` object would consistently use its $(D _allocator) object for acquiring memory. Furthermore, setting $(D HashTable._allocator) to point to a different _allocator should be legal but only if the object is empty; otherwise, the object wouldn't be able to deallocate its existing state. $(H3 Using Allocators without `IAllocator`) Allocators assembled from the heap building blocks don't need to go through `IAllocator` to be usable. They have the same primitives as `IAllocator` and they work with $(LREF make), $(LREF makeArray), $(LREF dispose) etc. So it suffice to create allocator objects wherever fit and use them appropriately: ---- void fun(size_t n) { // Use a stack-installed allocator for up to 64KB StackFront!65536 myAllocator; int[] a2 = myAllocator.makeArray!int(n); scope(exit) myAllocator.dispose(a2); ... } ---- In this case, `myAllocator` does not obey the `IAllocator` interface, but implements its primitives so it can work with `makeArray` by means of duck typing. One important thing to note about this setup is that statically-typed assembled allocators are almost always faster than allocators that go through `IAllocator`. An important rule of thumb is: "assemble allocator first, adapt to `IAllocator` after". A good allocator implements intricate logic by means of template assembly, and gets wrapped with `IAllocator` (usually by means of $(LREF allocatorObject)) only once, at client level. Copyright: Andrei Alexandrescu 2013-. License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). Authors: $(HTTP erdani.com, Andrei Alexandrescu) Source: $(PHOBOSSRC std/experimental/_allocator) */ module stdx.allocator; version (D_BetterC) {} else version = HasDRuntime; version (HasDRuntime): public import stdx.allocator.common, stdx.allocator.typed; // Example in the synopsis above @system unittest { import mir.utility : min, max; import stdx.allocator.building_blocks.allocator_list : AllocatorList; import stdx.allocator.building_blocks.bitmapped_block : BitmappedBlock; import stdx.allocator.building_blocks.bucketizer : Bucketizer; import stdx.allocator.building_blocks.free_list : FreeList; import stdx.allocator.building_blocks.segregator : Segregator; import stdx.allocator.gc_allocator : GCAllocator; alias FList = FreeList!(GCAllocator, 0, unbounded); alias A = Segregator!( 8, FreeList!(GCAllocator, 0, 8), 128, Bucketizer!(FList, 1, 128, 16), 256, Bucketizer!(FList, 129, 256, 32), 512, Bucketizer!(FList, 257, 512, 64), 1024, Bucketizer!(FList, 513, 1024, 128), 2048, Bucketizer!(FList, 1025, 2048, 256), 3584, Bucketizer!(FList, 2049, 3584, 512), 4072u * 1024, AllocatorList!( (n) => BitmappedBlock!(4096)( cast(ubyte[])(GCAllocator.instance.allocate( max(n, 4072u * 1024))))), GCAllocator ); A tuMalloc; auto b = tuMalloc.allocate(500); assert(b.length == 500); auto c = tuMalloc.allocate(113); assert(c.length == 113); assert(tuMalloc.expand(c, 14)); tuMalloc.deallocate(b); tuMalloc.deallocate(c); } import std.range.primitives; import std.traits; import stdx.allocator.internal : Ternary; import std.typecons : Flag, Yes, No; /** Dynamic allocator interface. Code that defines allocators ultimately implements this interface. This should be used wherever a uniform type is required for encapsulating various allocator implementations. Composition of allocators is not recommended at this level due to inflexibility of dynamic interfaces and inefficiencies caused by cascaded multiple calls. Instead, compose allocators using the static interface defined in $(A std_experimental_allocator_building_blocks.html, `stdx.allocator.building_blocks`), then adapt the composed allocator to `IAllocator` (possibly by using $(LREF CAllocatorImpl) below). Methods returning $(D Ternary) return $(D Ternary.yes) upon success, $(D Ternary.no) upon failure, and $(D Ternary.unknown) if the primitive is not implemented by the allocator instance. */ interface IAllocator { /** Returns the alignment offered. */ @property uint alignment(); /** Returns the good allocation size that guarantees zero internal fragmentation. */ size_t goodAllocSize(size_t s); /** Allocates `n` bytes of memory. */ void[] allocate(size_t, TypeInfo ti = null); /** Allocates `n` bytes of memory with specified alignment `a`. Implementations that do not support this primitive should always return `null`. */ void[] alignedAllocate(size_t n, uint a); /** Allocates and returns all memory available to this allocator. Implementations that do not support this primitive should always return `null`. */ void[] allocateAll(); /** Expands a memory block in place and returns `true` if successful. Implementations that don't support this primitive should always return `false`. */ bool expand(ref void[], size_t); /// Reallocates a memory block. bool reallocate(ref void[], size_t); /// Reallocates a memory block with specified alignment. bool alignedReallocate(ref void[] b, size_t size, uint alignment); /** Returns $(D Ternary.yes) if the allocator owns $(D b), $(D Ternary.no) if the allocator doesn't own $(D b), and $(D Ternary.unknown) if ownership cannot be determined. Implementations that don't support this primitive should always return `Ternary.unknown`. */ Ternary owns(void[] b); /** Resolves an internal pointer to the full block allocated. Implementations that don't support this primitive should always return `Ternary.unknown`. */ Ternary resolveInternalPointer(const void* p, ref void[] result); /** Deallocates a memory block. Implementations that don't support this primitive should always return `false`. A simple way to check that an allocator supports deallocation is to call $(D deallocate(null)). */ bool deallocate(void[] b); /** Deallocates all memory. Implementations that don't support this primitive should always return `false`. */ bool deallocateAll(); /** Returns $(D Ternary.yes) if no memory is currently allocated from this allocator, $(D Ternary.no) if some allocations are currently active, or $(D Ternary.unknown) if not supported. */ Ternary empty(); } /** Dynamic shared allocator interface. Code that defines allocators shareable across threads ultimately implements this interface. This should be used wherever a uniform type is required for encapsulating various allocator implementations. Composition of allocators is not recommended at this level due to inflexibility of dynamic interfaces and inefficiencies caused by cascaded multiple calls. Instead, compose allocators using the static interface defined in $(A std_experimental_allocator_building_blocks.html, `stdx.allocator.building_blocks`), then adapt the composed allocator to `ISharedAllocator` (possibly by using $(LREF CSharedAllocatorImpl) below). Methods returning $(D Ternary) return $(D Ternary.yes) upon success, $(D Ternary.no) upon failure, and $(D Ternary.unknown) if the primitive is not implemented by the allocator instance. */ interface ISharedAllocator { /** Returns the alignment offered. */ @property uint alignment() shared; /** Returns the good allocation size that guarantees zero internal fragmentation. */ size_t goodAllocSize(size_t s) shared; /** Allocates `n` bytes of memory. */ void[] allocate(size_t, TypeInfo ti = null) shared; /** Allocates `n` bytes of memory with specified alignment `a`. Implementations that do not support this primitive should always return `null`. */ void[] alignedAllocate(size_t n, uint a) shared; /** Allocates and returns all memory available to this allocator. Implementations that do not support this primitive should always return `null`. */ void[] allocateAll() shared; /** Expands a memory block in place and returns `true` if successful. Implementations that don't support this primitive should always return `false`. */ bool expand(ref void[], size_t) shared; /// Reallocates a memory block. bool reallocate(ref void[], size_t) shared; /// Reallocates a memory block with specified alignment. bool alignedReallocate(ref void[] b, size_t size, uint alignment) shared; /** Returns $(D Ternary.yes) if the allocator owns $(D b), $(D Ternary.no) if the allocator doesn't own $(D b), and $(D Ternary.unknown) if ownership cannot be determined. Implementations that don't support this primitive should always return `Ternary.unknown`. */ Ternary owns(void[] b) shared; /** Resolves an internal pointer to the full block allocated. Implementations that don't support this primitive should always return `Ternary.unknown`. */ Ternary resolveInternalPointer(const void* p, ref void[] result) shared; /** Deallocates a memory block. Implementations that don't support this primitive should always return `false`. A simple way to check that an allocator supports deallocation is to call $(D deallocate(null)). */ bool deallocate(void[] b) shared; /** Deallocates all memory. Implementations that don't support this primitive should always return `false`. */ bool deallocateAll() shared; /** Returns $(D Ternary.yes) if no memory is currently allocated from this allocator, $(D Ternary.no) if some allocations are currently active, or $(D Ternary.unknown) if not supported. */ Ternary empty() shared; } private shared ISharedAllocator _processAllocator; private IAllocator _threadAllocator; private IAllocator setupThreadAllocator()() nothrow @nogc @safe { /* Forwards the `_threadAllocator` calls to the `processAllocator` */ static class ThreadAllocator : IAllocator { override @property uint alignment() { return processAllocator.alignment(); } override size_t goodAllocSize(size_t s) { return processAllocator.goodAllocSize(s); } override void[] allocate(size_t n, TypeInfo ti = null) { return processAllocator.allocate(n, ti); } override void[] alignedAllocate(size_t n, uint a) { return processAllocator.alignedAllocate(n, a); } override void[] allocateAll() { return processAllocator.allocateAll(); } override bool expand(ref void[] b, size_t size) { return processAllocator.expand(b, size); } override bool reallocate(ref void[] b, size_t size) { return processAllocator.reallocate(b, size); } override bool alignedReallocate(ref void[] b, size_t size, uint alignment) { return processAllocator.alignedReallocate(b, size, alignment); } override Ternary owns(void[] b) { return processAllocator.owns(b); } override Ternary resolveInternalPointer(const void* p, ref void[] result) { return processAllocator.resolveInternalPointer(p, result); } override bool deallocate(void[] b) { return processAllocator.deallocate(b); } override bool deallocateAll() { return processAllocator.deallocateAll(); } override Ternary empty() { return processAllocator.empty(); } } assert(!_threadAllocator); import mir.conv : emplace; static ulong[stateSize!(ThreadAllocator).divideRoundUp(ulong.sizeof)] _threadAllocatorState; _threadAllocator = () @trusted { return emplace!(ThreadAllocator)(_threadAllocatorState[]); } (); return _threadAllocator; } /** Gets/sets the allocator for the current thread. This is the default allocator that should be used for allocating thread-local memory. For allocating memory to be shared across threads, use $(D processAllocator) (below). By default, $(D theAllocator) ultimately fetches memory from $(D processAllocator), which in turn uses the garbage collected heap. */ nothrow @safe @nogc @property IAllocator theAllocator() { auto p = _threadAllocator; return p !is null ? p : setupThreadAllocator(); } /// Ditto nothrow @safe @nogc @property void theAllocator(IAllocator a) { assert(a); _threadAllocator = a; } /// @system unittest { // Install a new allocator that is faster for 128-byte allocations. import stdx.allocator.building_blocks.free_list : FreeList; import stdx.allocator.gc_allocator : GCAllocator; auto oldAllocator = theAllocator; scope(exit) theAllocator = oldAllocator; theAllocator = allocatorObject(FreeList!(GCAllocator, 128)()); // Use the now changed allocator to allocate an array const ubyte[] arr = theAllocator.makeArray!ubyte(128); assert(arr.ptr); //... } /** Gets/sets the allocator for the current process. This allocator must be used for allocating memory shared across threads. Objects created using this allocator can be cast to $(D shared). */ @property shared(ISharedAllocator) processAllocator() { import stdx.allocator.gc_allocator : GCAllocator; import std.concurrency : initOnce; return initOnce!_processAllocator( sharedAllocatorObject(GCAllocator.instance)); } /// Ditto @property void processAllocator(shared ISharedAllocator a) { assert(a); _processAllocator = a; } @system unittest { import core.exception : AssertError; import std.exception : assertThrown; import stdx.allocator.building_blocks.free_list : SharedFreeList; import stdx.allocator.mallocator : Mallocator; assert(processAllocator); assert(theAllocator); testAllocatorObject(processAllocator); testAllocatorObject(theAllocator); shared SharedFreeList!(Mallocator, chooseAtRuntime, chooseAtRuntime) sharedFL; shared ISharedAllocator sharedFLObj = sharedAllocatorObject(sharedFL); assert(sharedFLObj); testAllocatorObject(sharedFLObj); // Test processAllocator setter shared ISharedAllocator oldProcessAllocator = processAllocator; processAllocator = sharedFLObj; assert(processAllocator is sharedFLObj); testAllocatorObject(processAllocator); testAllocatorObject(theAllocator); assertThrown!AssertError(processAllocator = null); // Restore initial processAllocator state processAllocator = oldProcessAllocator; assert(processAllocator is oldProcessAllocator); shared ISharedAllocator indirectShFLObj = sharedAllocatorObject(&sharedFL); testAllocatorObject(indirectShFLObj); IAllocator indirectMallocator = allocatorObject(Mallocator.instance); testAllocatorObject(indirectMallocator); } /** Dynamically allocates (using $(D alloc)) and then creates in the memory allocated an object of type $(D T), using $(D args) (if any) for its initialization. Initialization occurs in the memory allocated and is otherwise semantically the same as $(D T(args)). (Note that using $(D alloc.make!(T[])) creates a pointer to an (empty) array of $(D T)s, not an array. To use an allocator to allocate and initialize an array, use $(D alloc.makeArray!T) described below.) Params: T = Type of the object being created. alloc = The allocator used for getting the needed memory. It may be an object implementing the static interface for allocators, or an $(D IAllocator) reference. args = Optional arguments used for initializing the created object. If not present, the object is default constructed. Returns: If $(D T) is a class type, returns a reference to the created $(D T) object. Otherwise, returns a $(D T*) pointing to the created object. In all cases, returns $(D null) if allocation failed. Throws: If $(D T)'s constructor throws, deallocates the allocated memory and propagates the exception. */ auto make(T, Allocator, A...)(auto ref Allocator alloc, auto ref A args) { import mir.utility : max; import mir.conv : emplace, emplaceRef; auto m = alloc.allocate(max(stateSize!T, size_t(1))); if (!m.ptr) return null; // make can only be @safe if emplace or emplaceRef is `pure` auto construct() { static if (is(T == class)) { return emplace!T(m, args); } else { // Assume cast is safe as allocation succeeded for `stateSize!T` auto p = () @trusted { return cast(T*) m.ptr; }(); emplaceRef(*p, args); return p; } } scope(failure) { static if (is(typeof(() pure { return construct(); }))) { // Assume deallocation is safe because: // 1) in case of failure, `m` is the only reference to this memory // 2) `m` is known to originate from `alloc` () @trusted { alloc.deallocate(m); }(); } else { alloc.deallocate(m); } } return construct(); } /// @system unittest { // Dynamically allocate one integer const int* p1 = theAllocator.make!int; // It's implicitly initialized with its .init value assert(*p1 == 0); // Dynamically allocate one double, initialize to 42.5 const double* p2 = theAllocator.make!double(42.5); assert(*p2 == 42.5); // Dynamically allocate a struct static struct Point { int x, y, z; } // Use the generated constructor taking field values in order const Point* p = theAllocator.make!Point(1, 2); assert(p.x == 1 && p.y == 2 && p.z == 0); // Dynamically allocate a class object static class Customer { uint id = uint.max; this() {} this(uint id) { this.id = id; } // ... } Customer cust = theAllocator.make!Customer; assert(cust.id == uint.max); // default initialized cust = theAllocator.make!Customer(42); assert(cust.id == 42); // explicit passing of outer pointer static class Outer { int x = 3; class Inner { auto getX() { return x; } } } auto outer = theAllocator.make!Outer(); auto inner = theAllocator.make!(Outer.Inner)(outer); assert(outer.x == inner.getX); } @system unittest // bugzilla 15639 & 15772 { abstract class Foo {} class Bar: Foo {} static assert(!is(typeof(theAllocator.make!Foo))); static assert( is(typeof(theAllocator.make!Bar))); } @system unittest { void test(Allocator)(auto ref Allocator alloc) { const int* a = alloc.make!int(10); assert(*a == 10); struct A { int x; string y; double z; } A* b = alloc.make!A(42); assert(b.x == 42); assert(b.y is null); import std.math : isNaN; assert(b.z.isNaN); b = alloc.make!A(43, "44", 45); assert(b.x == 43); assert(b.y == "44"); assert(b.z == 45); static class B { int x; string y; double z; this(int _x, string _y = null, double _z = double.init) { x = _x; y = _y; z = _z; } } B c = alloc.make!B(42); assert(c.x == 42); assert(c.y is null); assert(c.z.isNaN); c = alloc.make!B(43, "44", 45); assert(c.x == 43); assert(c.y == "44"); assert(c.z == 45); const parray = alloc.make!(int[]); assert((*parray).empty); } import stdx.allocator.gc_allocator : GCAllocator; test(GCAllocator.instance); test(theAllocator); } // Attribute propagation nothrow @safe @nogc unittest { import stdx.allocator.mallocator : Mallocator; alias alloc = Mallocator.instance; void test(T, Args...)(auto ref Args args) { auto k = alloc.make!T(args); () @trusted { alloc.dispose(k); }(); } test!int; test!(int*); test!int(0); test!(int*)(null); } // should be pure with the GCAllocator /*pure nothrow*/ @safe unittest { import stdx.allocator.gc_allocator : GCAllocator; alias alloc = GCAllocator.instance; void test(T, Args...)(auto ref Args args) { auto k = alloc.make!T(args); (a) @trusted { a.dispose(k); }(alloc); } test!int(); test!(int*); test!int(0); test!(int*)(null); } // Verify that making an object by calling an impure constructor is not @safe nothrow @safe @nogc unittest { import stdx.allocator.mallocator : Mallocator; static struct Pure { this(int) pure nothrow @nogc @safe {} } cast(void) Mallocator.instance.make!Pure(0); static int g = 0; static struct Impure { this(int) nothrow @nogc @safe { g++; } } static assert(!__traits(compiles, cast(void) Mallocator.instance.make!Impure(0))); } // test failure with a pure, failing struct @safe unittest { import std.exception : assertThrown, enforce; // this struct can't be initialized struct InvalidStruct { this(int b) { enforce(1 == 2); } } import stdx.allocator.mallocator : Mallocator; assertThrown(make!InvalidStruct(Mallocator.instance, 42)); } // test failure with an impure, failing struct @system unittest { import std.exception : assertThrown, enforce; static int g; struct InvalidImpureStruct { this(int b) { g++; enforce(1 == 2); } } import stdx.allocator.mallocator : Mallocator; assertThrown(make!InvalidImpureStruct(Mallocator.instance, 42)); } /++ +/ T[] uninitializedFillDefault(T)(T[] array) nothrow @nogc { static if (__VERSION__ < 2083) { static if (is(Unqual!T == char) || is(Unqual!T == wchar)) { import core.stdc.string : memset; if (array !is null) memset(array.ptr, 0xff, T.sizeof * array.length); return array; } else { pragma(inline, false); import mir.conv : emplaceInitializer; foreach(ref e; array) emplaceInitializer(e); return array; } } else { static if (__traits(isZeroInit, T)) { import core.stdc.string : memset; if (array !is null) memset(array.ptr, 0, T.sizeof * array.length); return array; } else static if (is(Unqual!T == char) || is(Unqual!T == wchar)) { import core.stdc.string : memset; if (array !is null) memset(array.ptr, 0xff, T.sizeof * array.length); return array; } else { pragma(inline, false); import mir.conv : emplaceInitializer; foreach(ref e; array) emplaceInitializer(e); return array; } } } /// pure nothrow @nogc @system unittest { static struct S { int x = 42; @disable this(this); } int[5] expected = [42, 42, 42, 42, 42]; S[5] arr = void; uninitializedFillDefault(arr); assert((cast(int*) arr.ptr)[0 .. arr.length] == expected); } @system unittest { int[] a = [1, 2, 4]; uninitializedFillDefault(a); assert(a == [0, 0, 0]); } /** Create an array of $(D T) with $(D length) elements using $(D alloc). The array is either default-initialized, filled with copies of $(D init), or initialized with values fetched from `range`. Params: T = element type of the array being created alloc = the allocator used for getting memory length = length of the newly created array init = element used for filling the array range = range used for initializing the array elements Returns: The newly-created array, or $(D null) if either $(D length) was $(D 0) or allocation failed. Throws: The first two overloads throw only if `alloc`'s primitives do. The overloads that involve copy initialization deallocate memory and propagate the exception if the copy operation throws. */ T[] makeArray(T, Allocator)(auto ref Allocator alloc, size_t length) { if (!length) return null; auto m = alloc.allocate(T.sizeof * length); if (!m.ptr) return null; alias U = Unqual!T; return () @trusted { return cast(T[]) uninitializedFillDefault(cast(U[]) m); }(); } @system unittest { void test1(A)(auto ref A alloc) { int[] a = alloc.makeArray!int(0); assert(a.length == 0 && a.ptr is null); a = alloc.makeArray!int(5); assert(a.length == 5); static immutable cheatsheet = [0, 0, 0, 0, 0]; assert(a == cheatsheet); } void test2(A)(auto ref A alloc) { static struct S { int x = 42; @disable this(this); } S[] arr = alloc.makeArray!S(5); assert(arr.length == 5); int[] arrInt = () @trusted { return (cast(int*) arr.ptr)[0 .. 5]; }(); static immutable res = [42, 42, 42, 42, 42]; assert(arrInt == res); } import stdx.allocator.gc_allocator : GCAllocator; import stdx.allocator.mallocator : Mallocator; (alloc) /*pure nothrow*/ @safe { test1(alloc); test2(alloc);} (GCAllocator.instance); (alloc) nothrow @safe @nogc { test1(alloc); test2(alloc);} (Mallocator.instance); test2(theAllocator); } @system unittest { auto a = theAllocator.makeArray!(shared int)(5); static assert(is(typeof(a) == shared(int)[])); assert(a.length == 5); assert(cast(int[])a == [0, 0, 0, 0, 0]); auto b = theAllocator.makeArray!(const int)(5); static assert(is(typeof(b) == const(int)[])); assert(b.length == 5); assert(cast(int[])b == [0, 0, 0, 0, 0]); auto c = theAllocator.makeArray!(immutable int)(5); static assert(is(typeof(c) == immutable(int)[])); assert(c.length == 5); assert(cast(int[])c == [0, 0, 0, 0, 0]); } private enum hasPurePostblit(T) = !hasElaborateCopyConstructor!T || is(typeof(() pure { T.init.__xpostblit(); })); private enum hasPureDtor(T) = !hasElaborateDestructor!T || is(typeof(() pure { T.init.__xdtor(); })); // `true` when postblit and destructor of T cannot escape references to itself private enum canSafelyDeallocPostRewind(T) = hasPurePostblit!T && hasPureDtor!T; /// Ditto T[] makeArray(T, Allocator)(auto ref Allocator alloc, size_t length, auto ref T init) { if (!length) return null; auto m = alloc.allocate(T.sizeof * length); if (!m.ptr) return null; auto result = () @trusted { return cast(T[]) m; } (); import std.traits : hasElaborateCopyConstructor; size_t i = 0; static if (hasElaborateCopyConstructor!T) { scope(failure) { static if (canSafelyDeallocPostRewind!T) () @trusted { alloc.deallocate(m); } (); else alloc.deallocate(m); } static if (hasElaborateDestructor!T) { scope (failure) { foreach (j; 0 .. i) { destroy(result[j]); } } } } import mir.conv: emplaceRef; for (; i < length; ++i) { emplaceRef!T(result[i], init); } return result; } /// @system unittest { static void test(T)() { T[] a = theAllocator.makeArray!T(2); assert(cast(int[])a == [0, 0]); a = theAllocator.makeArray!T(3, 42); assert(cast(int[])a == [42, 42, 42]); import std.range : only; a = theAllocator.makeArray!T(only(42, 43, 44)); assert(cast(int[])a == [42, 43, 44]); } test!int(); test!(shared int)(); test!(const int)(); test!(immutable int)(); } @system unittest { void test(A)(auto ref A alloc) { long[] a = alloc.makeArray!long(0, 42); assert(a.length == 0 && a.ptr is null); a = alloc.makeArray!long(5, 42); assert(a.length == 5); assert(a == [ 42, 42, 42, 42, 42 ]); } import stdx.allocator.gc_allocator : GCAllocator; (alloc) /*pure nothrow*/ @safe { test(alloc); } (GCAllocator.instance); test(theAllocator); } // test failure with a pure, failing struct @safe unittest { import std.exception : assertThrown, enforce; struct NoCopy { @disable this(); this(int b){} // can't be copied this(this) { enforce(1 == 2); } } import stdx.allocator.mallocator : Mallocator; assertThrown(makeArray!NoCopy(Mallocator.instance, 10, NoCopy(42))); } // test failure with an impure, failing struct @system unittest { import std.exception : assertThrown, enforce; static int i = 0; struct Singleton { @disable this(); this(int b){} // can't be copied this(this) { enforce(i++ == 0); } ~this() { i--; } } import stdx.allocator.mallocator : Mallocator; assertThrown(makeArray!Singleton(Mallocator.instance, 10, Singleton(42))); } /// Ditto Unqual!(ElementEncodingType!R)[] makeArray(Allocator, R)(auto ref Allocator alloc, R range) if (isInputRange!R && !isInfinite!R) { alias T = Unqual!(ElementEncodingType!R); return makeArray!(T, Allocator, R)(alloc, range); } /// Ditto T[] makeArray(T, Allocator, R)(auto ref Allocator alloc, R range) if (isInputRange!R && !isInfinite!R) { static if (isForwardRange!R || hasLength!R) { static if (hasLength!R || isNarrowString!R) immutable length = range.length; else immutable length = range.save.walkLength; if (!length) return null; auto m = alloc.allocate(T.sizeof * length); if (!m.ptr) return null; auto result = () @trusted { return cast(T[]) m; } (); size_t i = 0; scope (failure) { foreach (j; 0 .. i) { auto p = () @trusted { return cast(Unqual!T*) &result[j]; }(); destroy(p); } static if (canSafelyDeallocPostRewind!T) () @trusted { alloc.deallocate(m); } (); else alloc.deallocate(m); } import mir.conv : emplaceRef; static if (isNarrowString!R || isRandomAccessRange!R) { foreach (j; 0 .. range.length) { emplaceRef!T(result[i++], range[j]); } } else { for (; !range.empty; range.popFront, ++i) { emplaceRef!T(result[i], range.front); } } return result; } else { // Estimated size size_t estimated = 8; auto m = alloc.allocate(T.sizeof * estimated); if (!m.ptr) return null; auto result = () @trusted { return cast(T[]) m; } (); size_t initialized = 0; void bailout() { foreach (i; 0 .. initialized + 1) { destroy(result[i]); } static if (canSafelyDeallocPostRewind!T) () @trusted { alloc.deallocate(m); } (); else alloc.deallocate(m); } scope (failure) bailout; for (; !range.empty; range.popFront, ++initialized) { if (initialized == estimated) { // Need to reallocate static if (hasPurePostblit!T) auto success = () @trusted { return alloc.reallocate(m, T.sizeof * (estimated *= 2)); } (); else auto success = alloc.reallocate(m, T.sizeof * (estimated *= 2)); if (!success) { bailout; return null; } result = () @trusted { return cast(T[]) m; } (); } import mir.conv : emplaceRef; emplaceRef(result[initialized], range.front); } if (initialized < estimated) { // Try to shrink memory, no harm if not possible static if (hasPurePostblit!T) auto success = () @trusted { return alloc.reallocate(m, T.sizeof * initialized); } (); else auto success = alloc.reallocate(m, T.sizeof * initialized); if (success) result = () @trusted { return cast(T[]) m; } (); } return result[0 .. initialized]; } } @system unittest { void test(A)(auto ref A alloc) { long[] a = alloc.makeArray!long((int[]).init); assert(a.length == 0 && a.ptr is null); a = alloc.makeArray!long([5, 42]); assert(a.length == 2); assert(a == [ 5, 42]); // we can also infer the type auto b = alloc.makeArray([4.0, 2.0]); static assert(is(typeof(b) == double[])); assert(b == [4.0, 2.0]); } import stdx.allocator.gc_allocator : GCAllocator; (alloc) pure nothrow @safe { test(alloc); } (GCAllocator.instance); test(theAllocator); } // infer types for strings @system unittest { void test(A)(auto ref A alloc) { auto c = alloc.makeArray("fooπ😜"); static assert(is(typeof(c) == char[])); assert(c == "fooπ😜"); auto d = alloc.makeArray("fooπ😜"d); static assert(is(typeof(d) == dchar[])); assert(d == "fooπ😜"); auto w = alloc.makeArray("fooπ😜"w); static assert(is(typeof(w) == wchar[])); assert(w == "fooπ😜"); } import stdx.allocator.gc_allocator : GCAllocator; (alloc) pure nothrow @safe { test(alloc); } (GCAllocator.instance); test(theAllocator); } /*pure*/ nothrow @safe unittest { import std.algorithm.comparison : equal; import stdx.allocator.gc_allocator : GCAllocator; import std.internal.test.dummyrange; import std.range : iota; foreach (DummyType; AllDummyRanges) { (alloc) pure nothrow @safe { DummyType d; auto arr = alloc.makeArray(d); assert(arr.length == 10); assert(arr.equal(iota(1, 11))); } (GCAllocator.instance); } } // test failure with a pure, failing struct @safe unittest { import std.exception : assertThrown, enforce; struct NoCopy { int b; @disable this(); this(int b) { this.b = b; } // can't be copied this(this) { enforce(b < 3, "there can only be three elements"); } } import stdx.allocator.mallocator : Mallocator; auto arr = [NoCopy(1), NoCopy(2), NoCopy(3)]; assertThrown(makeArray!NoCopy(Mallocator.instance, arr)); struct NoCopyRange { static j = 0; bool empty() { return j > 5; } auto front() { return NoCopy(j); } void popFront() { j++; } } assertThrown(makeArray!NoCopy(Mallocator.instance, NoCopyRange())); } // test failure with an impure, failing struct @system unittest { import std.exception : assertThrown, enforce; static i = 0; static maxElements = 2; struct NoCopy { int val; @disable this(); this(int b){ this.val = i++; } // can't be copied this(this) { enforce(i++ < maxElements, "there can only be four elements"); } } import stdx.allocator.mallocator : Mallocator; auto arr = [NoCopy(1), NoCopy(2)]; assertThrown(makeArray!NoCopy(Mallocator.instance, arr)); // allow more copies and thus force reallocation i = 0; maxElements = 30; static j = 0; struct NoCopyRange { bool empty() { return j > 100; } auto front() { return NoCopy(1); } void popFront() { j++; } } assertThrown(makeArray!NoCopy(Mallocator.instance, NoCopyRange())); maxElements = 300; auto arr2 = makeArray!NoCopy(Mallocator.instance, NoCopyRange()); import std.algorithm.comparison : equal; import std.algorithm.iteration : map; import std.range : iota; assert(arr2.map!`a.val`.equal(iota(32, 204, 2))); } version(unittest) { private struct ForcedInputRange { int[]* array; pure nothrow @safe @nogc: bool empty() { return !array || (*array).empty; } ref int front() { return (*array)[0]; } void popFront() { *array = (*array)[1 .. $]; } } } @system unittest { import std.array : array; import std.range : iota; int[] arr = iota(10).array; void test(A)(auto ref A alloc) { ForcedInputRange r; long[] a = alloc.makeArray!long(r); assert(a.length == 0 && a.ptr is null); auto arr2 = arr; r.array = () @trusted { return &arr2; } (); a = alloc.makeArray!long(r); assert(a.length == 10); assert(a == iota(10).array); } import stdx.allocator.gc_allocator : GCAllocator; (alloc) pure nothrow @safe { test(alloc); } (GCAllocator.instance); test(theAllocator); } /** Grows $(D array) by appending $(D delta) more elements. The needed memory is allocated using $(D alloc). The extra elements added are either default- initialized, filled with copies of $(D init), or initialized with values fetched from `range`. Params: T = element type of the array being created alloc = the allocator used for getting memory array = a reference to the array being grown delta = number of elements to add (upon success the new length of $(D array) is $(D array.length + delta)) init = element used for filling the array range = range used for initializing the array elements Returns: $(D true) upon success, $(D false) if memory could not be allocated. In the latter case $(D array) is left unaffected. Throws: The first two overloads throw only if `alloc`'s primitives do. The overloads that involve copy initialization deallocate memory and propagate the exception if the copy operation throws. */ bool expandArray(T, Allocator)(auto ref Allocator alloc, ref T[] array, size_t delta) { if (!delta) return true; if (array is null) return false; immutable oldLength = array.length; void[] buf = array; if (!alloc.reallocate(buf, buf.length + T.sizeof * delta)) return false; array = cast(T[]) buf; array[oldLength .. $].uninitializedFillDefault; return true; } @system unittest { void test(A)(auto ref A alloc) { auto arr = alloc.makeArray!int([1, 2, 3]); assert(alloc.expandArray(arr, 3)); assert(arr == [1, 2, 3, 0, 0, 0]); } import stdx.allocator.gc_allocator : GCAllocator; test(GCAllocator.instance); test(theAllocator); } /// Ditto bool expandArray(T, Allocator)(auto ref Allocator alloc, ref T[] array, size_t delta, auto ref T init) { if (!delta) return true; if (array is null) return false; void[] buf = array; if (!alloc.reallocate(buf, buf.length + T.sizeof * delta)) return false; immutable oldLength = array.length; array = cast(T[]) buf; scope(failure) array[oldLength .. $].uninitializedFillDefault; import std.algorithm.mutation : uninitializedFill; array[oldLength .. $].uninitializedFill(init); return true; } @system unittest { void test(A)(auto ref A alloc) { auto arr = alloc.makeArray!int([1, 2, 3]); assert(alloc.expandArray(arr, 3, 1)); assert(arr == [1, 2, 3, 1, 1, 1]); } import stdx.allocator.gc_allocator : GCAllocator; test(GCAllocator.instance); test(theAllocator); } /// Ditto bool expandArray(T, Allocator, R)(auto ref Allocator alloc, ref T[] array, R range) if (isInputRange!R) { if (array is null) return false; static if (isForwardRange!R) { immutable delta = walkLength(range.save); if (!delta) return true; immutable oldLength = array.length; // Reallocate support memory void[] buf = array; if (!alloc.reallocate(buf, buf.length + T.sizeof * delta)) { return false; } array = cast(T[]) buf; // At this point we're committed to the new length. auto toFill = array[oldLength .. $]; scope (failure) { // Fill the remainder with default-constructed data toFill.uninitializedFillDefault; } for (; !range.empty; range.popFront, toFill.popFront) { assert(!toFill.empty); import mir.conv : emplace; emplace!T(&toFill.front, range.front); } assert(toFill.empty); } else { scope(failure) { // The last element didn't make it, fill with default array[$ - 1 .. $].uninitializedFillDefault; } void[] buf = array; for (; !range.empty; range.popFront) { if (!alloc.reallocate(buf, buf.length + T.sizeof)) { array = cast(T[]) buf; return false; } import mir.conv : emplace; emplace!T(buf[$ - T.sizeof .. $], range.front); } array = cast(T[]) buf; } return true; } /// @system unittest { auto arr = theAllocator.makeArray!int([1, 2, 3]); assert(theAllocator.expandArray(arr, 2)); assert(arr == [1, 2, 3, 0, 0]); import std.range : only; assert(theAllocator.expandArray(arr, only(4, 5))); assert(arr == [1, 2, 3, 0, 0, 4, 5]); } @system unittest { auto arr = theAllocator.makeArray!int([1, 2, 3]); ForcedInputRange r; int[] b = [ 1, 2, 3, 4 ]; auto temp = b; r.array = &temp; assert(theAllocator.expandArray(arr, r)); assert(arr == [1, 2, 3, 1, 2, 3, 4]); } /** Shrinks an array by $(D delta) elements. If $(D array.length < delta), does nothing and returns `false`. Otherwise, destroys the last $(D array.length - delta) elements in the array and then reallocates the array's buffer. If reallocation fails, fills the array with default-initialized data. Params: T = element type of the array being created alloc = the allocator used for getting memory array = a reference to the array being shrunk delta = number of elements to remove (upon success the new length of $(D array) is $(D array.length - delta)) Returns: `true` upon success, `false` if memory could not be reallocated. In the latter case, the slice $(D array[$ - delta .. $]) is left with default-initialized elements. Throws: The first two overloads throw only if `alloc`'s primitives do. The overloads that involve copy initialization deallocate memory and propagate the exception if the copy operation throws. */ bool shrinkArray(T, Allocator)(auto ref Allocator alloc, ref T[] array, size_t delta) { if (delta > array.length) return false; // Destroy elements. If a destructor throws, fill the already destroyed // stuff with the default initializer. { size_t destroyed; scope(failure) { array[$ - delta .. $][0 .. destroyed].uninitializedFillDefault; } foreach (ref e; array[$ - delta .. $]) { e.destroy; ++destroyed; } } if (delta == array.length) { alloc.deallocate(array); array = null; return true; } void[] buf = array; if (!alloc.reallocate(buf, buf.length - T.sizeof * delta)) { // urgh, at least fill back with default array[$ - delta .. $].uninitializedFillDefault; return false; } array = cast(T[]) buf; return true; } /// @system unittest { int[] a = theAllocator.makeArray!int(100, 42); assert(a.length == 100); assert(theAllocator.shrinkArray(a, 98)); assert(a.length == 2); assert(a == [42, 42]); } @system unittest { void test(A)(auto ref A alloc) { long[] a = alloc.makeArray!long((int[]).init); assert(a.length == 0 && a.ptr is null); a = alloc.makeArray!long(100, 42); assert(alloc.shrinkArray(a, 98)); assert(a.length == 2); assert(a == [ 42, 42]); } import stdx.allocator.gc_allocator : GCAllocator; test(GCAllocator.instance); test(theAllocator); } /** Destroys and then deallocates (using $(D alloc)) the object pointed to by a pointer, the class object referred to by a $(D class) or $(D interface) reference, or an entire array. It is assumed the respective entities had been allocated with the same allocator. */ void dispose(A, T)(auto ref A alloc, auto ref T* p) { static if (hasElaborateDestructor!T) { destroy(*p); } alloc.deallocate((cast(void*) p)[0 .. T.sizeof]); static if (__traits(isRef, p)) p = null; } /// Ditto void dispose(A, T)(auto ref A alloc, auto ref T p) if (is(T == class) || is(T == interface)) { if (!p) return; static if (is(T == interface)) { version(Windows) { import core.sys.windows.unknwn : IUnknown; static assert(!is(T: IUnknown), "COM interfaces can't be destroyed in " ~ __PRETTY_FUNCTION__); } auto ob = cast(Object) p; } else alias ob = p; auto support = (cast(void*) ob)[0 .. typeid(ob).initializer.length]; destroy(p); alloc.deallocate(support); static if (__traits(isRef, p)) p = null; } /// Ditto void dispose(A, T)(auto ref A alloc, auto ref T[] array) { static if (hasElaborateDestructor!(typeof(array[0]))) { foreach (ref e; array) { destroy(e); } } alloc.deallocate(array); static if (__traits(isRef, array)) array = null; } @system unittest { static int x; static interface I { void method(); } static class A : I { int y; override void method() { x = 21; } ~this() { x = 42; } } static class B : A { } auto a = theAllocator.make!A; a.method(); assert(x == 21); theAllocator.dispose(a); assert(x == 42); B b = theAllocator.make!B; b.method(); assert(x == 21); theAllocator.dispose(b); assert(x == 42); I i = theAllocator.make!B; i.method(); assert(x == 21); theAllocator.dispose(i); assert(x == 42); int[] arr = theAllocator.makeArray!int(43); theAllocator.dispose(arr); } @system unittest //bugzilla 16512 { import stdx.allocator.mallocator : Mallocator; int* i = Mallocator.instance.make!int(0); Mallocator.instance.dispose(i); assert(i is null); Object o = Mallocator.instance.make!Object(); Mallocator.instance.dispose(o); assert(o is null); uint* u = Mallocator.instance.make!uint(0); Mallocator.instance.dispose((){return u;}()); assert(u !is null); uint[] ua = Mallocator.instance.makeArray!uint([0,1,2]); Mallocator.instance.dispose(ua); assert(ua is null); } @system unittest //bugzilla 15721 { import stdx.allocator.mallocator : Mallocator; interface Foo {} class Bar: Foo {} Bar bar; Foo foo; bar = Mallocator.instance.make!Bar; foo = cast(Foo) bar; Mallocator.instance.dispose(foo); } /** Allocates a multidimensional array of elements of type T. Params: N = number of dimensions T = element type of an element of the multidimensional arrat alloc = the allocator used for getting memory lengths = static array containing the size of each dimension Returns: An N-dimensional array with individual elements of type T. */ auto makeMultidimensionalArray(T, Allocator, size_t N)(auto ref Allocator alloc, size_t[N] lengths...) { static if (N == 1) { return makeArray!T(alloc, lengths[0]); } else { alias E = typeof(makeMultidimensionalArray!(T, Allocator, N - 1)(alloc, lengths[1 .. $])); auto ret = makeArray!E(alloc, lengths[0]); foreach (ref e; ret) e = makeMultidimensionalArray!(T, Allocator, N - 1)(alloc, lengths[1 .. $]); return ret; } } /// @system unittest { import stdx.allocator.mallocator : Mallocator; auto mArray = Mallocator.instance.makeMultidimensionalArray!int(2, 3, 6); // deallocate when exiting scope scope(exit) { Mallocator.instance.disposeMultidimensionalArray(mArray); } assert(mArray.length == 2); foreach (lvl2Array; mArray) { assert(lvl2Array.length == 3); foreach (lvl3Array; lvl2Array) assert(lvl3Array.length == 6); } } /** Destroys and then deallocates a multidimensional array, assuming it was created with makeMultidimensionalArray and the same allocator was used. Params: T = element type of an element of the multidimensional array alloc = the allocator used for getting memory array = the multidimensional array that is to be deallocated */ void disposeMultidimensionalArray(T, Allocator)(auto ref Allocator alloc, auto ref T[] array) { static if (isArray!T) { foreach (ref e; array) disposeMultidimensionalArray(alloc, e); } dispose(alloc, array); static if (__traits(isRef, array)) array = null; } /// @system unittest { struct TestAllocator { import stdx.allocator.common : platformAlignment; import stdx.allocator.mallocator : Mallocator; alias allocator = Mallocator.instance; private static struct ByteRange { void* ptr; size_t length; } private ByteRange[] _allocations; enum uint alignment = platformAlignment; void[] allocate(size_t numBytes) { auto ret = allocator.allocate(numBytes); _allocations ~= ByteRange(ret.ptr, ret.length); return ret; } bool deallocate(void[] bytes) { import std.algorithm.mutation : remove; import std.algorithm.searching : canFind; bool pred(ByteRange other) { return other.ptr == bytes.ptr && other.length == bytes.length; } assert(_allocations.canFind!pred); _allocations = _allocations.remove!pred; return allocator.deallocate(bytes); } ~this() { assert(!_allocations.length); } } TestAllocator allocator; auto mArray = allocator.makeMultidimensionalArray!int(2, 3, 5, 6, 7, 2); allocator.disposeMultidimensionalArray(mArray); } /** Returns a dynamically-typed $(D CAllocator) built around a given statically- typed allocator $(D a) of type $(D A). Passing a pointer to the allocator creates a dynamic allocator around the allocator pointed to by the pointer, without attempting to copy or move it. Passing the allocator by value or reference behaves as follows. $(UL $(LI If $(D A) has no state, the resulting object is allocated in static shared storage.) $(LI If $(D A) has state and is copyable, the result will store a copy of it within. The result itself is allocated in its own statically-typed allocator.) $(LI If $(D A) has state and is not copyable, the result will move the passed-in argument into the result. The result itself is allocated in its own statically-typed allocator.) ) */ CAllocatorImpl!A allocatorObject(A)(auto ref A a) if (!isPointer!A) { import mir.conv : emplace; static if (stateSize!A == 0) { enum s = stateSize!(CAllocatorImpl!A).divideRoundUp(ulong.sizeof); static __gshared ulong[s] state; static __gshared CAllocatorImpl!A result; if (!result) { // Don't care about a few races result = emplace!(CAllocatorImpl!A)(state[]); } assert(result); return result; } else static if (is(typeof({ A b = a; A c = b; }))) // copyable { auto state = a.allocate(stateSize!(CAllocatorImpl!A)); static if (__traits(hasMember, A, "deallocate")) { scope(failure) a.deallocate(state); } return cast(CAllocatorImpl!A) emplace!(CAllocatorImpl!A)(state); } else // the allocator object is not copyable { // This is sensitive... create on the stack and then move enum s = stateSize!(CAllocatorImpl!A).divideRoundUp(ulong.sizeof); ulong[s] state; import std.algorithm.mutation : move; emplace!(CAllocatorImpl!A)(state[], move(a)); auto dynState = a.allocate(stateSize!(CAllocatorImpl!A)); // Bitblast the object in its final destination dynState[] = state[]; return cast(CAllocatorImpl!A) dynState.ptr; } } /// Ditto CAllocatorImpl!(A, Yes.indirect) allocatorObject(A)(A* pa) { assert(pa); import mir.conv : emplace; auto state = pa.allocate(stateSize!(CAllocatorImpl!(A, Yes.indirect))); static if (__traits(hasMember, A, "deallocate")) { scope(failure) pa.deallocate(state); } return emplace!(CAllocatorImpl!(A, Yes.indirect)) (state, pa); } /// @system unittest { import stdx.allocator.mallocator : Mallocator; IAllocator a = allocatorObject(Mallocator.instance); auto b = a.allocate(100); assert(b.length == 100); assert(a.deallocate(b)); // The in-situ region must be used by pointer import stdx.allocator.building_blocks.region : InSituRegion; auto r = InSituRegion!1024(); a = allocatorObject(&r); b = a.allocate(200); assert(b.length == 200); // In-situ regions can deallocate the last allocation assert(a.deallocate(b)); } /** Returns a dynamically-typed $(D CSharedAllocator) built around a given statically- typed allocator $(D a) of type $(D A). Passing a pointer to the allocator creates a dynamic allocator around the allocator pointed to by the pointer, without attempting to copy or move it. Passing the allocator by value or reference behaves as follows. $(UL $(LI If $(D A) has no state, the resulting object is allocated in static shared storage.) $(LI If $(D A) has state and is copyable, the result will store a copy of it within. The result itself is allocated in its own statically-typed allocator.) $(LI If $(D A) has state and is not copyable, the result will move the passed-in argument into the result. The result itself is allocated in its own statically-typed allocator.) ) */ shared(CSharedAllocatorImpl!A) sharedAllocatorObject(A)(auto ref A a) if (!isPointer!A) { import mir.conv : emplace; static if (stateSize!A == 0) { enum s = stateSize!(CSharedAllocatorImpl!A).divideRoundUp(ulong.sizeof); static __gshared ulong[s] state; static shared CSharedAllocatorImpl!A result; if (!result) { // Don't care about a few races result = cast(shared CSharedAllocatorImpl!A)(emplace!(CSharedAllocatorImpl!A)(state[])); } assert(result); return result; } else static if (is(typeof({ shared A b = a; shared A c = b; }))) // copyable { auto state = a.allocate(stateSize!(CSharedAllocatorImpl!A)); static if (__traits(hasMember, A, "deallocate")) { scope(failure) a.deallocate(state); } return emplace!(shared CSharedAllocatorImpl!A)(state); } else // the allocator object is not copyable { assert(0, "Not yet implemented"); } } /// Ditto shared(CSharedAllocatorImpl!(A, Yes.indirect)) sharedAllocatorObject(A)(A* pa) { assert(pa); import mir.conv : emplace; auto state = pa.allocate(stateSize!(CSharedAllocatorImpl!(A, Yes.indirect))); static if (__traits(hasMember, A, "deallocate")) { scope(failure) pa.deallocate(state); } return emplace!(shared CSharedAllocatorImpl!(A, Yes.indirect))(state, pa); } /** Implementation of `IAllocator` using `Allocator`. This adapts a statically-built allocator type to `IAllocator` that is directly usable by non-templated code. Usually `CAllocatorImpl` is used indirectly by calling $(LREF theAllocator). */ class CAllocatorImpl(Allocator, Flag!"indirect" indirect = No.indirect) : IAllocator { /** The implementation is available as a public member. */ static if (indirect) { private Allocator* pimpl; ref Allocator impl() { return *pimpl; } this(Allocator* pa) { pimpl = pa; } } else { static if (stateSize!Allocator) Allocator impl; else alias impl = Allocator.instance; } /// Returns `impl.alignment`. override @property uint alignment() { return impl.alignment; } /** Returns `impl.goodAllocSize(s)`. */ override size_t goodAllocSize(size_t s) { return impl.goodAllocSize(s); } /** Returns `impl.allocate(s)`. */ override void[] allocate(size_t s, TypeInfo ti = null) { return impl.allocate(s); } /** If `impl.alignedAllocate` exists, calls it and returns the result. Otherwise, always returns `null`. */ override void[] alignedAllocate(size_t s, uint a) { static if (__traits(hasMember, Allocator, "alignedAllocate")) return impl.alignedAllocate(s, a); else return null; } /** If `Allocator` implements `owns`, forwards to it. Otherwise, returns `Ternary.unknown`. */ override Ternary owns(void[] b) { static if (__traits(hasMember, Allocator, "owns")) return impl.owns(b); else return Ternary.unknown; } /// Returns $(D impl.expand(b, s)) if defined, `false` otherwise. override bool expand(ref void[] b, size_t s) { static if (__traits(hasMember, Allocator, "expand")) return impl.expand(b, s); else return s == 0; } /// Returns $(D impl.reallocate(b, s)). override bool reallocate(ref void[] b, size_t s) { return impl.reallocate(b, s); } /// Forwards to `impl.alignedReallocate` if defined, `false` otherwise. bool alignedReallocate(ref void[] b, size_t s, uint a) { static if (!__traits(hasMember, Allocator, "alignedAllocate")) { return false; } else { return impl.alignedReallocate(b, s, a); } } // Undocumented for now Ternary resolveInternalPointer(const void* p, ref void[] result) { static if (__traits(hasMember, Allocator, "resolveInternalPointer")) { return impl.resolveInternalPointer(p, result); } else { return Ternary.unknown; } } /** If `impl.deallocate` is not defined, returns `false`. Otherwise it forwards the call. */ override bool deallocate(void[] b) { static if (__traits(hasMember, Allocator, "deallocate")) { return impl.deallocate(b); } else { return false; } } /** Calls `impl.deallocateAll()` and returns the result if defined, otherwise returns `false`. */ override bool deallocateAll() { static if (__traits(hasMember, Allocator, "deallocateAll")) { return impl.deallocateAll(); } else { return false; } } /** Forwards to `impl.empty()` if defined, otherwise returns `Ternary.unknown`. */ override Ternary empty() { static if (__traits(hasMember, Allocator, "empty")) { return Ternary(impl.empty); } else { return Ternary.unknown; } } /** Returns `impl.allocateAll()` if present, `null` otherwise. */ override void[] allocateAll() { static if (__traits(hasMember, Allocator, "allocateAll")) { return impl.allocateAll(); } else { return null; } } } /** Implementation of `ISharedAllocator` using `Allocator`. This adapts a statically-built, shareable across threads, allocator type to `ISharedAllocator` that is directly usable by non-templated code. Usually `CSharedAllocatorImpl` is used indirectly by calling $(LREF processAllocator). */ class CSharedAllocatorImpl(Allocator, Flag!"indirect" indirect = No.indirect) : ISharedAllocator { /** The implementation is available as a public member. */ static if (indirect) { private shared Allocator* pimpl; ref Allocator impl() shared { return *pimpl; } this(Allocator* pa) shared { pimpl = pa; } } else { static if (stateSize!Allocator) shared Allocator impl; else alias impl = Allocator.instance; } /// Returns `impl.alignment`. override @property uint alignment() shared { return impl.alignment; } /** Returns `impl.goodAllocSize(s)`. */ override size_t goodAllocSize(size_t s) shared { return impl.goodAllocSize(s); } /** Returns `impl.allocate(s)`. */ override void[] allocate(size_t s, TypeInfo ti = null) shared { return impl.allocate(s); } /** If `impl.alignedAllocate` exists, calls it and returns the result. Otherwise, always returns `null`. */ override void[] alignedAllocate(size_t s, uint a) shared { static if (__traits(hasMember, Allocator, "alignedAllocate")) return impl.alignedAllocate(s, a); else return null; } /** If `Allocator` implements `owns`, forwards to it. Otherwise, returns `Ternary.unknown`. */ override Ternary owns(void[] b) shared { static if (__traits(hasMember, Allocator, "owns")) return impl.owns(b); else return Ternary.unknown; } /// Returns $(D impl.expand(b, s)) if defined, `false` otherwise. override bool expand(ref void[] b, size_t s) shared { static if (__traits(hasMember, Allocator, "expand")) return impl.expand(b, s); else return s == 0; } /// Returns $(D impl.reallocate(b, s)). override bool reallocate(ref void[] b, size_t s) shared { return impl.reallocate(b, s); } /// Forwards to `impl.alignedReallocate` if defined, `false` otherwise. bool alignedReallocate(ref void[] b, size_t s, uint a) shared { static if (!__traits(hasMember, Allocator, "alignedAllocate")) { return false; } else { return impl.alignedReallocate(b, s, a); } } // Undocumented for now Ternary resolveInternalPointer(const void* p, ref void[] result) shared { static if (__traits(hasMember, Allocator, "resolveInternalPointer")) { return impl.resolveInternalPointer(p, result); } else { return Ternary.unknown; } } /** If `impl.deallocate` is not defined, returns `false`. Otherwise it forwards the call. */ override bool deallocate(void[] b) shared { static if (__traits(hasMember, Allocator, "deallocate")) { return impl.deallocate(b); } else { return false; } } /** Calls `impl.deallocateAll()` and returns the result if defined, otherwise returns `false`. */ override bool deallocateAll() shared { static if (__traits(hasMember, Allocator, "deallocateAll")) { return impl.deallocateAll(); } else { return false; } } /** Forwards to `impl.empty()` if defined, otherwise returns `Ternary.unknown`. */ override Ternary empty() shared { static if (__traits(hasMember, Allocator, "empty")) { return Ternary(impl.empty); } else { return Ternary.unknown; } } /** Returns `impl.allocateAll()` if present, `null` otherwise. */ override void[] allocateAll() shared { static if (__traits(hasMember, Allocator, "allocateAll")) { return impl.allocateAll(); } else { return null; } } } // Example in intro above @system unittest { // Allocate an int, initialize it with 42 int* p = theAllocator.make!int(42); assert(*p == 42); // Destroy and deallocate it theAllocator.dispose(p); // Allocate using the global process allocator p = processAllocator.make!int(100); assert(*p == 100); // Destroy and deallocate processAllocator.dispose(p); // Create an array of 50 doubles initialized to -1.0 double[] arr = theAllocator.makeArray!double(50, -1.0); // Check internal pointer void[] result; assert(theAllocator.resolveInternalPointer(null, result) == Ternary.no); Ternary r = theAllocator.resolveInternalPointer(arr.ptr, result); assert(result.ptr is arr.ptr && result.length >= arr.length); // Append two zeros to it theAllocator.expandArray(arr, 2, 0.0); // On second thought, take that back theAllocator.shrinkArray(arr, 2); // Destroy and deallocate theAllocator.dispose(arr); } __EOF__ /** Stores an allocator object in thread-local storage (i.e. non-$(D shared) D global). $(D ThreadLocal!A) is a subtype of $(D A) so it appears to implement $(D A)'s allocator primitives. $(D A) must hold state, otherwise $(D ThreadLocal!A) refuses instantiation. This means e.g. $(D ThreadLocal!Mallocator) does not work because $(D Mallocator)'s state is not stored as members of $(D Mallocator), but instead is hidden in the C library implementation. */ struct ThreadLocal(A) { static assert(stateSize!A, typeof(A).stringof ~ " does not have state so it cannot be used with ThreadLocal"); /** The allocator instance. */ static A instance; /** `ThreadLocal!A` is a subtype of `A` so it appears to implement `A`'s allocator primitives. */ alias instance this; /** `ThreadLocal` disables all constructors. The intended usage is `ThreadLocal!A.instance`. */ @disable this(); /// Ditto @disable this(this); } /// unittest { static assert(!is(ThreadLocal!Mallocator)); static assert(!is(ThreadLocal!GCAllocator)); alias ThreadLocal!(FreeList!(GCAllocator, 0, 8)) Allocator; auto b = Allocator.instance.allocate(5); static assert(__traits(hasMember, Allocator, "allocate")); } /* (Not public.) A binary search tree that uses no allocation of its own. Instead, it relies on user code to allocate nodes externally. Then $(D EmbeddedTree)'s primitives wire the nodes appropriately. Warning: currently $(D EmbeddedTree) is not using rebalancing, so it may degenerate. A red-black tree implementation storing the color with one of the pointers is planned for the future. */ private struct EmbeddedTree(T, alias less) { static struct Node { T payload; Node* left, right; } private Node* root; private Node* insert(Node* n, ref Node* backref) { backref = n; n.left = n.right = null; return n; } Node* find(Node* data) { for (auto n = root; n; ) { if (less(data, n)) { n = n.left; } else if (less(n, data)) { n = n.right; } else { return n; } } return null; } Node* insert(Node* data) { if (!root) { root = data; data.left = data.right = null; return root; } auto n = root; for (;;) { if (less(data, n)) { if (!n.left) { // Found insertion point return insert(data, n.left); } n = n.left; } else if (less(n, data)) { if (!n.right) { // Found insertion point return insert(data, n.right); } n = n.right; } else { // Found return n; } if (!n) return null; } } Node* remove(Node* data) { auto n = root; Node* parent = null; for (;;) { if (!n) return null; if (less(data, n)) { parent = n; n = n.left; } else if (less(n, data)) { parent = n; n = n.right; } else { // Found remove(n, parent); return n; } } } private void remove(Node* n, Node* parent) { assert(n); assert(!parent || parent.left == n || parent.right == n); Node** referrer = parent ? (parent.left == n ? &parent.left : &parent.right) : &root; if (!n.left) { *referrer = n.right; } else if (!n.right) { *referrer = n.left; } else { // Find the leftmost child in the right subtree auto leftmost = n.right; Node** leftmostReferrer = &n.right; while (leftmost.left) { leftmostReferrer = &leftmost.left; leftmost = leftmost.left; } // Unlink leftmost from there *leftmostReferrer = leftmost.right; // Link leftmost in lieu of n leftmost.left = n.left; leftmost.right = n.right; *referrer = leftmost; } } Ternary empty() const { return Ternary(!root); } void dump() { writeln(typeid(this), " @ ", cast(void*) &this); dump(root, 3); } void dump(Node* r, uint indent) { write(repeat(' ', indent).array); if (!r) { writeln("(null)"); return; } writeln(r.payload, " @ ", cast(void*) r); dump(r.left, indent + 3); dump(r.right, indent + 3); } void assertSane() { static bool isBST(Node* r, Node* lb, Node* ub) { if (!r) return true; if (lb && !less(lb, r)) return false; if (ub && !less(r, ub)) return false; return isBST(r.left, lb, r) && isBST(r.right, r, ub); } if (isBST(root, null, null)) return; dump; assert(0); } } unittest { alias a = GCAllocator.instance; alias Tree = EmbeddedTree!(int, (a, b) => a.payload < b.payload); Tree t; assert(t.empty); int[] vals = [ 6, 3, 9, 1, 0, 2, 8, 11 ]; foreach (v; vals) { auto n = new Tree.Node(v, null, null); assert(t.insert(n)); assert(n); t.assertSane; } assert(!t.empty); foreach (v; vals) { Tree.Node n = { v }; assert(t.remove(&n)); t.assertSane; } assert(t.empty); } /* $(D InternalPointersTree) adds a primitive on top of another allocator: calling $(D resolveInternalPointer(p)) returns the block within which the internal pointer $(D p) lies. Pointers right after the end of allocated blocks are also considered internal. The implementation stores three additional words with each allocation (one for the block size and two for search management). */ private struct InternalPointersTree(Allocator) { alias Tree = EmbeddedTree!(size_t, (a, b) => cast(void*) a + a.payload < cast(void*) b); alias Parent = AffixAllocator!(Allocator, Tree.Node); // Own state private Tree blockMap; alias alignment = Parent.alignment; /** The implementation is available as a public member. */ static if (stateSize!Parent) Parent parent; else alias parent = Parent.instance; /// Allocator API. void[] allocate(size_t bytes) { auto r = parent.allocate(bytes); if (!r.ptr) return r; Tree.Node* n = &parent.prefix(r); n.payload = bytes; blockMap.insert(n) || assert(0); return r; } /// Ditto bool deallocate(void[] b) { if (!b.ptr) return; Tree.Node* n = &parent.prefix(b); blockMap.remove(n) || assert(false); parent.deallocate(b); return true; } /// Ditto static if (__traits(hasMember, Allocator, "reallocate")) bool reallocate(ref void[] b, size_t s) { auto n = &parent.prefix(b); assert(n.payload == b.length); blockMap.remove(n) || assert(0); if (!parent.reallocate(b, s)) { // Failed, must reinsert the same node in the tree assert(n.payload == b.length); blockMap.insert(n) || assert(0); return false; } // Insert the new node n = &parent.prefix(b); n.payload = s; blockMap.insert(n) || assert(0); return true; } /// Ditto Ternary owns(void[] b) { void[] result; return resolveInternalPointer(b.ptr, result); } /// Ditto Ternary empty() { return Ternary(blockMap.empty); } /** Returns the block inside which $(D p) resides, or $(D null) if the pointer does not belong. */ Ternary resolveInternalPointer(const void* p, ref void[] result) { // Must define a custom find Tree.Node* find() { for (auto n = blockMap.root; n; ) { if (p < n) { n = n.left; } else if (p > (cast(void*) (n + 1)) + n.payload) { n = n.right; } else { return n; } } return null; } auto n = find(); if (!n) return Ternary.no; result = (cast(void*) (n + 1))[0 .. n.payload]; return Ternary.yes; } } unittest { InternalPointersTree!(Mallocator) a; int[] vals = [ 6, 3, 9, 1, 2, 8, 11 ]; void[][] allox; foreach (v; vals) { allox ~= a.allocate(v); } a.blockMap.assertSane; foreach (b; allox) { void[] p; Ternary r = a.resolveInternalPointer(b.ptr, p); assert(p.ptr is b.ptr && p.length >= b.length); r = a.resolveInternalPointer(b.ptr + b.length, p); assert(p.ptr is b.ptr && p.length >= b.length); r = a.resolveInternalPointer(b.ptr + b.length / 2, p); assert(p.ptr is b.ptr && p.length >= b.length); auto bogus = new void[b.length]; assert(a.resolveInternalPointer(bogus.ptr, p) == Ternary.no); } foreach (b; allox.randomCover) { a.deallocate(b); } assert(a.empty); } //version (std_allocator_benchmark) unittest { static void testSpeed(A)() { static if (stateSize!A) A a; else alias a = A.instance; void[][128] bufs; import std.random; foreach (i; 0 .. 100_000) { auto j = uniform(0, bufs.length); switch (uniform(0, 2)) { case 0: a.deallocate(bufs[j]); bufs[j] = a.allocate(uniform(0, 4096)); break; case 1: a.deallocate(bufs[j]); bufs[j] = null; break; default: assert(0); } } } alias FList = FreeList!(GCAllocator, 0, unbounded); alias A = Segregator!( 8, FreeList!(GCAllocator, 0, 8), 128, Bucketizer!(FList, 1, 128, 16), 256, Bucketizer!(FList, 129, 256, 32), 512, Bucketizer!(FList, 257, 512, 64), 1024, Bucketizer!(FList, 513, 1024, 128), 2048, Bucketizer!(FList, 1025, 2048, 256), 3584, Bucketizer!(FList, 2049, 3584, 512), 4072u * 1024, AllocatorList!( (size_t n) => BitmappedBlock!(4096)(GCAllocator.instance.allocate( max(n, 4072u * 1024)))), GCAllocator ); import std.datetime, stdx.allocator.null_allocator; if (false) writeln(benchmark!( testSpeed!NullAllocator, testSpeed!Mallocator, testSpeed!GCAllocator, testSpeed!(ThreadLocal!A), testSpeed!(A), )(20)[].map!(t => t.to!("seconds", double))); } unittest { auto a = allocatorObject(Mallocator.instance); auto b = a.allocate(100); assert(b.length == 100); FreeList!(GCAllocator, 0, 8) fl; auto sa = allocatorObject(fl); b = a.allocate(101); assert(b.length == 101); FallbackAllocator!(InSituRegion!(10240, 64), GCAllocator) fb; // Doesn't work yet... //a = allocatorObject(fb); //b = a.allocate(102); //assert(b.length == 102); } /// unittest { /// Define an allocator bound to the built-in GC. IAllocator alloc = allocatorObject(GCAllocator.instance); auto b = alloc.allocate(42); assert(b.length == 42); assert(alloc.deallocate(b) == Ternary.yes); // Define an elaborate allocator and bind it to the class API. // Note that the same variable "alloc" is used. alias FList = FreeList!(GCAllocator, 0, unbounded); alias A = ThreadLocal!( Segregator!( 8, FreeList!(GCAllocator, 0, 8), 128, Bucketizer!(FList, 1, 128, 16), 256, Bucketizer!(FList, 129, 256, 32), 512, Bucketizer!(FList, 257, 512, 64), 1024, Bucketizer!(FList, 513, 1024, 128), 2048, Bucketizer!(FList, 1025, 2048, 256), 3584, Bucketizer!(FList, 2049, 3584, 512), 4072u * 1024, AllocatorList!( (n) => BitmappedBlock!(4096)(GCAllocator.instance.allocate( max(n, 4072u * 1024)))), GCAllocator ) ); auto alloc2 = allocatorObject(A.instance); b = alloc.allocate(101); assert(alloc.deallocate(b) == Ternary.yes); } stdx-allocator-3.1.0~beta.2/source/stdx/allocator/showcase.d 0000664 0000000 0000000 00000005316 13535263154 0024104 0 ustar 00root root 0000000 0000000 /** Collection of typical and useful prebuilt allocators using the given components. User code would typically import this module and use its facilities, or import individual heap building blocks and assemble them. */ module stdx.allocator.showcase; version (D_BetterC) {} else version = HasDRuntime; version (HasDRuntime): import stdx.allocator.building_blocks.fallback_allocator, stdx.allocator.gc_allocator, stdx.allocator.building_blocks.region; /** Allocator that uses stack allocation for up to $(D stackSize) bytes and then falls back to $(D Allocator). Defined as: ---- alias StackFront(size_t stackSize, Allocator) = FallbackAllocator!( InSituRegion!(stackSize, Allocator.alignment, __traits(hasMember, Allocator, "deallocate") ? Yes.defineDeallocate : No.defineDeallocate), Allocator); ---- Choosing `stackSize` is as always a compromise. Too small a size exhausts the stack storage after a few allocations, after which there are no gains over the backup allocator. Too large a size increases the stack consumed by the thread and may end up worse off because it explores cold portions of the stack. */ alias StackFront(size_t stackSize, Allocator = GCAllocator) = FallbackAllocator!( InSituRegion!(stackSize, Allocator.alignment), Allocator); /// @system unittest { StackFront!4096 a; auto b = a.allocate(4000); assert(b.length == 4000); auto c = a.allocate(4000); assert(c.length == 4000); a.deallocate(b); a.deallocate(c); } /** Creates a scalable `AllocatorList` of `Regions`, each having at least `bytesPerRegion` bytes. Allocation is very fast. This allocator does not offer `deallocate` but does free all regions in its destructor. It is recommended for short-lived batch applications that count on never running out of memory. */ auto mmapRegionList(size_t bytesPerRegion) { static struct Factory { size_t bytesPerRegion; import mir.utility : max; import stdx.allocator.building_blocks.region : Region; import stdx.allocator.mmap_allocator : MmapAllocator; this(size_t n) { bytesPerRegion = n; } auto opCall(size_t n) { return Region!MmapAllocator(max(n, bytesPerRegion)); } } import stdx.allocator.building_blocks.allocator_list : AllocatorList; import stdx.allocator.building_blocks.null_allocator : NullAllocator; auto shop = Factory(bytesPerRegion); return AllocatorList!(Factory, NullAllocator)(shop); } /// @system unittest { auto alloc = mmapRegionList(1024 * 1024); const b = alloc.allocate(100); assert(b.length == 100); } stdx-allocator-3.1.0~beta.2/source/stdx/allocator/typed.d 0000664 0000000 0000000 00000034651 13535263154 0023421 0 ustar 00root root 0000000 0000000 /** This module defines `TypedAllocator`, a statically-typed allocator that aggregates multiple untyped allocators and uses them depending on the static properties of the types allocated. For example, distinct allocators may be used for thread-local vs. thread-shared data, or for fixed-size data (`struct`, `class` objects) vs. resizable data (arrays). Macros: T2=$(TR