This commit is contained in:
2016-08-24 18:15:21 +02:00
parent c0df3c9330
commit a3efee6d7f
21 changed files with 4193 additions and 0 deletions

View File

@ -0,0 +1,58 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/**
* Copyright: Eugene Wissner 2016.
* License: $(LINK2 https://www.mozilla.org/en-US/MPL/2.0/,
* Mozilla Public License, v. 2.0).
* Authors: $(LINK2 mailto:info@caraus.de, Eugene Wissner)
*/
module tanya.memory.allocator;
/**
* This interface should be similar to $(D_PSYMBOL
* std.experimental.allocator.IAllocator), but usable in
* $(D_KEYWORD @nogc)-code.
*/
interface Allocator
{
@nogc:
/**
* Allocates $(D_PARAM s) bytes of memory.
*
* Params:
* s = Amount of memory to allocate.
*
* Returns: The pointer to the new allocated memory.
*/
void[] allocate(size_t s) @safe;
/**
* Deallocates a memory block.
*
* Params:
* p = A pointer to the memory block to be freed.
*
* Returns: Whether the deallocation was successful.
*/
bool deallocate(void[] p) @safe;
/**
* Increases or decreases the size of a memory block.
*
* Params:
* p = A pointer to the memory block.
* size = Size of the reallocated block.
*
* Returns: Whether the reallocation was successful.
*/
bool reallocate(ref void[] p, size_t s) @safe;
/**
* Static allocator instance and initializer.
*
* Returns: An $(D_PSYMBOL Allocator) instance.
*/
static @property Allocator instance() @safe;
}

View File

@ -0,0 +1,207 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/**
* Copyright: Eugene Wissner 2016.
* License: $(LINK2 https://www.mozilla.org/en-US/MPL/2.0/,
* Mozilla Public License, v. 2.0).
* Authors: $(LINK2 mailto:info@caraus.de, Eugene Wissner)
*/
module tanya.memory;
public import tanya.memory.allocator;
public import std.experimental.allocator : make, makeArray, expandArray, shrinkArray, IAllocator;
import core.atomic;
import core.stdc.stdlib;
import std.traits;
version (Windows)
{
import core.sys.windows.windows;
}
else version (Posix)
{
public import tanya.memory.ullocator;
import core.sys.posix.pthread;
}
@nogc:
version (Windows)
{
package alias Mutex = CRITICAL_SECTION;
package alias destroyMutex = DeleteCriticalSection;
}
else version (Posix)
{
package alias Mutex = pthread_mutex_t;
package void destroyMutex(pthread_mutex_t* mtx)
{
pthread_mutex_destroy(mtx) && assert(0);
}
}
@property void defaultAllocator(Allocator allocator) @safe nothrow
{
_defaultAllocator = allocator;
}
@property Allocator defaultAllocator() @safe nothrow
{
return _defaultAllocator;
}
static this() @safe nothrow
{
defaultAllocator = Ullocator.instance;
}
package struct Monitor
{
Object.Monitor impl; // for user-level monitors
void delegate(Object) @nogc[] devt; // for internal monitors
size_t refs; // reference count
version (Posix)
{
Mutex mtx;
}
}
package @property ref shared(Monitor*) monitor(Object h) pure nothrow
{
return *cast(shared Monitor**)&h.__monitor;
}
/**
* Destroys and then deallocates (using $(D_PARAM allocator)) the class
* object referred to by a $(D_KEYWORD class) or $(D_KEYWORD interface)
* reference. It is assumed the respective entities had been allocated with
* the same allocator.
*
* Params:
* A = The type of the allocator used for the ojbect allocation.
* T = The type of the object that should be destroyed.
* allocator = The allocator used for the object allocation.
* p = The object should be destroyed.
*/
void finalize(A, T)(auto ref A allocator, ref T p)
if (is(T == class) || is(T == interface))
{
static if (is(T == interface))
{
auto ob = cast(Object) p;
}
else
{
alias ob = p;
}
auto pp = cast(void*) ob;
auto ppv = cast(void**) pp;
if (!pp || !*ppv)
{
return;
}
auto support = (cast(void*) ob)[0 .. typeid(ob).initializer.length];
auto pc = cast(ClassInfo*) *ppv;
auto c = *pc;
do
{
if (c.destructor)
{
(cast(void function(Object)) c.destructor)(ob);
}
} while ((c = c.base) !is null);
// Take care of monitors for synchronized blocks
if (ppv[1])
{
shared(Monitor)* m = atomicLoad!(MemoryOrder.acq)(ob.monitor);
if (m !is null)
{
auto mc = cast(Monitor*) m;
if (!atomicOp!("-=")(m.refs, cast(size_t) 1))
{
foreach (v; mc.devt)
{
if (v)
{
v(ob);
}
}
if (mc.devt.ptr)
{
free(mc.devt.ptr);
}
destroyMutex(&mc.mtx);
free(mc);
atomicStore!(MemoryOrder.rel)(ob.monitor, null);
}
}
}
*ppv = null;
allocator.deallocate(support);
p = null;
}
/// Ditto.
void finalize(A, T)(auto ref A allocator, ref T *p)
if (is(T == struct))
{
if (p is null)
{
return;
}
static if (hasElaborateDestructor!T)
{
*p.__xdtor();
}
allocator.deallocate((cast(void*)p)[0 .. T.sizeof]);
p = null;
}
/// Ditto.
void finalize(A, T)(auto ref A allocator, ref T[] p)
{
static if (hasElaborateDestructor!T)
{
foreach (ref e; p)
{
finalize(allocator, e);
}
}
allocator.deallocate(p);
p = null;
}
bool resizeArray(T, A)(auto ref A allocator, ref T[] array, in size_t length)
@trusted
{
if (length == array.length)
{
return true;
}
if (array is null && length > 0)
{
array = makeArray!T(allocator, length);
return array !is null;
}
if (length == 0)
{
finalize(allocator, array);
return true;
}
void[] buf = array;
if (!allocator.reallocate(buf, length * T.sizeof))
{
return false;
}
array = cast(T[]) buf;
return true;
}
enum bool isFinalizable(T) = is(T == class) || is(T == interface)
|| hasElaborateDestructor!T || isDynamicArray!T;
private Allocator _defaultAllocator;

View File

@ -0,0 +1,423 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/**
* Copyright: Eugene Wissner 2016.
* License: $(LINK2 https://www.mozilla.org/en-US/MPL/2.0/,
* Mozilla Public License, v. 2.0).
* Authors: $(LINK2 mailto:info@caraus.de, Eugene Wissner)
*/
module tanya.memory.ullocator;
import tanya.memory.allocator;
@nogc:
version (Posix):
import core.sys.posix.sys.mman;
import core.sys.posix.unistd;
/**
* Allocator for Posix systems with mmap/munmap support.
*
* This allocator allocates memory in regions (multiple of 4 KB for example).
* Each region is then splitted in blocks. So it doesn't request the memory
* from the operating system on each call, but only if there are no large
* enought free blocks in the available regions.
* Deallocation works in the same way. Deallocation doesn't immediately
* gives the memory back to the operating system, but marks the appropriate
* block as free and only if all blocks in the region are free, the complet
* region is deallocated.
*
* ----------------------------------------------------------------------------
* | | | | | || | | |
* | |prev <----------- | || | | |
* | R | B | | B | || R | B | |
* | E | L | | L | next E | L | |
* | G | O | DATA | O | FREE ---> G | O | DATA |
* | I | C | | C | <--- I | C | |
* | O | K | | K | prev O | K | |
* | N | -----------> next| || N | | |
* | | | | | || | | |
* --------------------------------------------------- ------------------------
*/
class Ullocator : Allocator
{
@nogc:
@disable this();
shared static this() @safe nothrow
{
pageSize = sysconf(_SC_PAGE_SIZE);
}
/**
* Allocates $(D_PARAM size) bytes of memory.
*
* Params:
* size = Amount of memory to allocate.
*
* Returns: The pointer to the new allocated memory.
*/
void[] allocate(size_t size) @trusted nothrow
{
immutable dataSize = addAlignment(size);
void* data = findBlock(dataSize);
if (data is null)
{
data = initializeRegion(dataSize);
}
return data is null ? null : data[0..size];
}
///
unittest
{
auto p = Ullocator.instance.allocate(20);
assert(p);
Ullocator.instance.deallocate(p);
}
/**
* Search for a block large enough to keep $(D_PARAM size) and split it
* into two blocks if the block is too large.
*
* Params:
* size = Minimum size the block should have.
*
* Returns: Data the block points to or $(D_KEYWORD null).
*/
private void* findBlock(size_t size) nothrow
{
Block block1;
RegionLoop: for (auto r = head; r !is null; r = r.next)
{
block1 = cast(Block) (cast(void*) r + regionEntrySize);
do
{
if (block1.free && block1.size >= size)
{
break RegionLoop;
}
}
while ((block1 = block1.next) !is null);
}
if (block1 is null)
{
return null;
}
else if (block1.size >= size + alignment + blockEntrySize)
{ // Split the block if needed
Block block2 = cast(Block) (cast(void*) block1 + blockEntrySize + size);
block2.prev = block1;
if (block1.next is null)
{
block2.next = null;
}
else
{
block2.next = block1.next.next;
}
block1.next = block2;
block1.free = false;
block2.free = true;
block2.size = block1.size - blockEntrySize - size;
block1.size = size;
block2.region = block1.region;
++block1.region.blocks;
}
else
{
block1.free = false;
++block1.region.blocks;
}
return cast(void*) block1 + blockEntrySize;
}
/**
* Deallocates a memory block.
*
* Params:
* p = A pointer to the memory block to be freed.
*
* Returns: Whether the deallocation was successful.
*/
bool deallocate(void[] p) @trusted nothrow
{
if (p is null)
{
return true;
}
Block block = cast(Block) (p.ptr - blockEntrySize);
if (block.region.blocks <= 1)
{
if (block.region.prev !is null)
{
block.region.prev.next = block.region.next;
}
else // Replace the list head. It is being deallocated
{
head = block.region.next;
}
if (block.region.next !is null)
{
block.region.next.prev = block.region.prev;
}
return munmap(block.region, block.region.size) == 0;
}
else
{
block.free = true;
--block.region.blocks;
return true;
}
}
///
unittest
{
auto p = Ullocator.instance.allocate(20);
assert(Ullocator.instance.deallocate(p));
}
/**
* Increases or decreases the size of a memory block.
*
* Params:
* p = A pointer to the memory block.
* size = Size of the reallocated block.
*
* Returns: Whether the reallocation was successful.
*/
bool reallocate(ref void[] p, size_t size) @trusted nothrow
{
if (size == p.length)
{
return true;
}
auto reallocP = allocate(size);
if (reallocP is null)
{
return false;
}
if (p !is null)
{
if (size > p.length)
{
reallocP[0..p.length] = p[0..$];
}
else
{
reallocP[0..size] = p[0..size];
}
deallocate(p);
}
p = reallocP;
return true;
}
///
unittest
{
void[] p;
Ullocator.instance.reallocate(p, 10 * int.sizeof);
(cast(int[]) p)[7] = 123;
assert(p.length == 40);
Ullocator.instance.reallocate(p, 8 * int.sizeof);
assert(p.length == 32);
assert((cast(int[]) p)[7] == 123);
Ullocator.instance.reallocate(p, 20 * int.sizeof);
(cast(int[]) p)[15] = 8;
assert(p.length == 80);
assert((cast(int[]) p)[15] == 8);
assert((cast(int[]) p)[7] == 123);
Ullocator.instance.reallocate(p, 8 * int.sizeof);
assert(p.length == 32);
assert((cast(int[]) p)[7] == 123);
Ullocator.instance.deallocate(p);
}
/**
* Static allocator instance and initializer.
*
* Returns: The global $(D_PSYMBOL Allocator) instance.
*/
static @property Ullocator instance() @trusted nothrow
{
if (instance_ is null)
{
immutable instanceSize = addAlignment(__traits(classInstanceSize, Ullocator));
Region head; // Will become soon our region list head
void* data = initializeRegion(instanceSize, head);
if (data is null)
{
return null;
}
data[0..instanceSize] = typeid(Ullocator).initializer[];
instance_ = cast(Ullocator) data;
instance_.head = head;
}
return instance_;
}
///
unittest
{
assert(instance is instance);
}
/**
* Initializes a region for one element.
*
* Params:
* size = Aligned size of the first data block in the region.
* head = Region list head.
*
* Returns: A pointer to the data.
*/
pragma(inline)
private static void* initializeRegion(size_t size,
ref Region head) nothrow
{
immutable regionSize = calculateRegionSize(size);
void* p = mmap(null,
regionSize,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON,
-1,
0);
if (p is MAP_FAILED)
{
return null;
}
Region region = cast(Region) p;
region.blocks = 1;
region.size = regionSize;
// Set the pointer to the head of the region list
if (head !is null)
{
head.prev = region;
}
region.next = head;
region.prev = null;
head = region;
// Initialize the data block
void* memoryPointer = p + regionEntrySize;
Block block1 = cast(Block) memoryPointer;
block1.size = size;
block1.free = false;
// It is what we want to return
void* data = memoryPointer + blockEntrySize;
// Free block after data
memoryPointer = data + size;
Block block2 = cast(Block) memoryPointer;
block1.prev = block2.next = null;
block1.next = block2;
block2.prev = block1;
block2.size = regionSize - size - regionEntrySize - blockEntrySize * 2;
block2.free = true;
block1.region = block2.region = region;
return data;
}
/// Ditto.
private void* initializeRegion(size_t size) nothrow
{
return initializeRegion(size, head);
}
/**
* Params:
* x = Space to be aligned.
*
* Returns: Aligned size of $(D_PARAM x).
*/
pragma(inline)
private static immutable(size_t) addAlignment(size_t x) @safe pure nothrow
out (result)
{
assert(result > 0);
}
body
{
return (x - 1) / alignment * alignment + alignment;
}
/**
* Params:
* x = Required space.
*
* Returns: Minimum region size (a multiple of $(D_PSYMBOL pageSize)).
*/
pragma(inline)
private static immutable(size_t) calculateRegionSize(size_t x)
@safe pure nothrow
out (result)
{
assert(result > 0);
}
body
{
x += regionEntrySize + blockEntrySize * 2;
return x / pageSize * pageSize + pageSize;
}
enum alignment = 8;
private static Ullocator instance_;
private shared static immutable long pageSize;
private struct RegionEntry
{
Region prev;
Region next;
uint blocks;
ulong size;
}
private alias Region = RegionEntry*;
private enum regionEntrySize = 32;
private Region head;
private struct BlockEntry
{
Block prev;
Block next;
bool free;
ulong size;
Region region;
}
private alias Block = BlockEntry*;
private enum blockEntrySize = 40;
}