15 Commits

11 changed files with 1576 additions and 1024 deletions

View File

@ -6,7 +6,8 @@ os:
language: d
d:
- dmd-2.072.1
- dmd-2.072.2
- dmd-2.071.2
env:
matrix:

View File

@ -1,7 +1,8 @@
# Tanya
[![Build Status](https://travis-ci.org/caraus-ecms/tanya.svg?branch=master)](https://travis-ci.org/caraus-ecms/tanya)
[![Dub Version](https://img.shields.io/dub/v/tanya.svg)](https://code.dlang.org/packages/tanya)
[![Dub version](https://img.shields.io/dub/v/tanya.svg)](https://code.dlang.org/packages/tanya)
[![Dub downloads](https://img.shields.io/dub/dt/tanya.svg)](https://code.dlang.org/packages/tanya)
[![License](https://img.shields.io/badge/license-MPL_2.0-blue.svg)](https://raw.githubusercontent.com/caraus-ecms/tanya/master/LICENSE)
Tanya is a general purpose library for D programming language that doesn't
@ -17,13 +18,19 @@ data structures and utilities that depend on the Garbage Collector in Phobos.
Tanya consists of the following packages:
* `async`: Event loop.
* `container`: Queue, Vector, Singly linked list.
* `async`: Event loop (epoll, kqueue and IOCP).
* `container`: Queue, Vector, Singly linked list, buffers.
* `crypto`: Work in progress TLS implementation.
* `math`: Multiple precision integer and a set of functions.
* `memory`: Tools for manual memory management (allocator, reference counting, helper functions).
* `memory`: Tools for manual memory management (allocator, reference counting,
helper functions).
* `network`: URL-Parsing, sockets.
### Supported compilers
* dmd 2.072.2
* dmd 2.071.2
### Current status
The library is currently under development, but some parts of it can already be
@ -42,23 +49,18 @@ testing and work on its performance.
I'm currently mostly working on `crypto` that is not a complete cryptographic
suite, but contains (will contain) algorithm implementations required by TLS.
### Other properties
### Further characteristics
* Tanya is a native D library (only D and Assembler are tolerated).
* Tanya is a native D library.
* It is important for me to document the code and attach at least a few unit
tests where possible. So the documentation and usage examples can be found in
the source code.
* Documentation and usage examples can be found in the source code.
Online documentation will be published soon.
* Tanya is mostly tested on a 64-bit Linux and some features are
platform-dependant, but not because it is a Linux-only library. Therefore any
help to bring better support for Windows and BSD systems would be accepted.
* Tanya is cross-platform. The development happens on a 64-bit Linux, but it
is being tested on Windows and FreeBSD as well.
* The library isn't thread-safe. Thread-safity should be added later.
* I'm working with the latest dmd version, but will be looking to support other
D compilers and keep compatibility with the elder dmd versions in the future.
## Contributing
Since I'm mostly busy writing new code and implementing new features I would

View File

@ -16,7 +16,7 @@ version (unittest)
{
package struct ConstEqualsStruct
{
int opEquals(typeof(this) that) const
int opEquals(typeof(this) that) const @nogc
{
return true;
}
@ -24,7 +24,7 @@ version (unittest)
package struct MutableEqualsStruct
{
int opEquals(typeof(this) that)
int opEquals(typeof(this) that) @nogc
{
return true;
}

File diff suppressed because it is too large Load Diff

View File

@ -391,7 +391,7 @@ struct Integer
{
auto tmp = Integer(h);
tmp.subtract(rep);
rep = tmp.rep;
swap(rep, tmp.rep);
sign = length == 0 ? false : h.sign;
}
}
@ -408,7 +408,7 @@ struct Integer
{
auto tmp = Integer(h);
tmp.subtract(rep);
rep = tmp.rep;
swap(rep, tmp.rep);
sign = length == 0 ? false : !sign;
}
}

View File

@ -16,7 +16,7 @@ module tanya.memory.allocator;
interface Allocator
{
/**
* Returns: Alignment.
* Returns: Alignment offered.
*/
@property uint alignment() const shared pure nothrow @safe @nogc;
@ -28,7 +28,7 @@ interface Allocator
*
* Returns: Pointer to the new allocated memory.
*/
void[] allocate(size_t size) shared nothrow @nogc;
void[] allocate(in size_t size) shared nothrow @nogc;
/**
* Deallocates a memory block.
@ -49,7 +49,18 @@ interface Allocator
*
* Returns: Pointer to the allocated memory.
*/
bool reallocate(ref void[] p, size_t size) shared nothrow @nogc;
bool reallocate(ref void[] p, in size_t size) shared nothrow @nogc;
/**
* Expands a memory block in place.
*
* Params:
* p = A pointer to the memory block.
* size = Size of the reallocated block.
*
* Returns: $(D_KEYWORD true) if successful, $(D_KEYWORD false) otherwise.
*/
bool expand(ref void[] p, in size_t size) shared nothrow @nogc;
}
/**

View File

@ -10,9 +10,9 @@
*/
module tanya.memory.mmappool;
import core.stdc.string;
import std.algorithm.comparison;
import tanya.memory.allocator;
import core.atomic;
import core.exception;
version (Posix)
{
@ -27,7 +27,7 @@ else version (Windows)
}
/**
* This allocator allocates memory in regions (multiple of 4 KB for example).
* This allocator allocates memory in regions (multiple of 64 KB for example).
* Each region is then splitted in blocks. So it doesn't request the memory
* from the operating system on each call, but only if there are no large
* enough free blocks in the available regions.
@ -50,21 +50,6 @@ else version (Windows)
*/
final class MmapPool : Allocator
{
@nogc:
shared static this()
{
version (Posix)
{
pageSize = sysconf(_SC_PAGE_SIZE);
}
else version (Windows)
{
SYSTEM_INFO si;
GetSystemInfo(&si);
pageSize = si.dwPageSize;
}
}
/**
* Allocates $(D_PARAM size) bytes of memory.
*
@ -73,7 +58,7 @@ final class MmapPool : Allocator
*
* Returns: Pointer to the new allocated memory.
*/
void[] allocate(size_t size) shared nothrow
void[] allocate(in size_t size) shared nothrow @nogc
{
if (!size)
{
@ -87,7 +72,7 @@ final class MmapPool : Allocator
data = initializeRegion(dataSize);
}
return data is null ? null : data[0..size];
return data is null ? null : data[0 .. size];
}
///
@ -105,16 +90,16 @@ final class MmapPool : Allocator
* into two blocks if the block is too large.
*
* Params:
* size = Minimum size the block should have.
* size = Minimum size the block should have (aligned).
*
* Returns: Data the block points to or $(D_KEYWORD null).
*/
private void* findBlock(size_t size) shared nothrow
private void* findBlock(in ref size_t size) shared nothrow @nogc
{
Block block1;
RegionLoop: for (auto r = head; r !is null; r = r.next)
{
block1 = cast(Block) (cast(void*) r + regionEntrySize);
block1 = cast(Block) (cast(void*) r + RegionEntry.sizeof);
do
{
if (block1.free && block1.size >= size)
@ -128,35 +113,37 @@ final class MmapPool : Allocator
{
return null;
}
else if (block1.size >= size + alignment + blockEntrySize)
else if (block1.size >= size + alignment_ + BlockEntry.sizeof)
{ // Split the block if needed
Block block2 = cast(Block) (cast(void*) block1 + blockEntrySize + size);
Block block2 = cast(Block) (cast(void*) block1 + BlockEntry.sizeof + size);
block2.prev = block1;
if (block1.next is null)
block2.next = block1.next;
block2.free = true;
block2.size = block1.size - BlockEntry.sizeof - size;
block2.region = block1.region;
if (block1.next !is null)
{
block2.next = null;
}
else
{
block2.next = block1.next.next;
block1.next.prev = block2;
}
block1.next = block2;
block1.free = false;
block2.free = true;
block2.size = block1.size - blockEntrySize - size;
block1.size = size;
}
block1.free = false;
block1.region.blocks = block1.region.blocks + 1;
block2.region = block1.region;
atomicOp!"+="(block1.region.blocks, 1);
}
else
return cast(void*) block1 + BlockEntry.sizeof;
}
// Merge block with the next one.
private void mergeNext(Block block) shared const pure nothrow @safe @nogc
{
block.size = block.size + BlockEntry.sizeof + block.next.size;
if (block.next.next !is null)
{
block1.free = false;
atomicOp!"+="(block1.region.blocks, 1);
block.next.next.prev = block;
}
return cast(void*) block1 + blockEntrySize;
block.next = block.next.next;
}
/**
@ -167,14 +154,14 @@ final class MmapPool : Allocator
*
* Returns: Whether the deallocation was successful.
*/
bool deallocate(void[] p) shared nothrow
bool deallocate(void[] p) shared nothrow @nogc
{
if (p is null)
{
return true;
}
Block block = cast(Block) (p.ptr - blockEntrySize);
Block block = cast(Block) (p.ptr - BlockEntry.sizeof);
if (block.region.blocks <= 1)
{
if (block.region.prev !is null)
@ -198,12 +185,26 @@ final class MmapPool : Allocator
return VirtualFree(cast(void*) block.region, 0, MEM_RELEASE) == 0;
}
}
// Merge blocks if neigbours are free.
if (block.next !is null && block.next.free)
{
mergeNext(block);
}
if (block.prev !is null && block.prev.free)
{
block.prev.size = block.prev.size + BlockEntry.sizeof + block.size;
if (block.next !is null)
{
block.next.prev = block.prev;
}
block.prev.next = block.next;
}
else
{
block.free = true;
atomicOp!"-="(block.region.blocks, 1);
return true;
}
block.region.blocks = block.region.blocks - 1;
return true;
}
///
@ -214,6 +215,99 @@ final class MmapPool : Allocator
assert(MmapPool.instance.deallocate(p));
}
/**
* Expands a memory block in place.
*
* Params:
* p = A pointer to the memory block.
* size = Size of the reallocated block.
*
* Returns: $(D_KEYWORD true) if successful, $(D_KEYWORD false) otherwise.
*/
bool expand(ref void[] p, in size_t size) shared nothrow @nogc
{
if (size <= p.length)
{
return true;
}
if (p is null)
{
return false;
}
Block block1 = cast(Block) (p.ptr - BlockEntry.sizeof);
if (block1.size >= size)
{
// Enough space in the current block. Can happen because of the alignment.
p = p.ptr[0 .. size];
return true;
}
immutable dataSize = addAlignment(size);
immutable delta = dataSize - p.length;
if (block1.next is null
|| !block1.next.free
|| block1.next.size + BlockEntry.sizeof < delta)
{
// It is the last block in the region or the next block is too small or not
// free.
return false;
}
if (block1.next.size >= delta + alignment_)
{
// We should move the start position of the next block. The order may be
// important because the old block and the new one can overlap.
auto block2 = cast(Block) (p.ptr + dataSize);
block2.size = block1.next.size - delta;
block2.free = true;
block2.region = block1.region;
block2.next = block1.next.next;
block2.prev = block1;
block1.size = block1.size + delta;
if (block1.next.next !is null)
{
block1.next.next.prev = block2;
}
block1.next = block2;
}
else
{
// The next block has enough space, but is too small for further
// allocations. Merge it with the current block.
mergeNext(block1);
}
p = p.ptr[0 .. size];
return true;
}
///
nothrow unittest
{
void[] p;
assert(!MmapPool.instance.expand(p, 5));
assert(p is null);
p = MmapPool.instance.allocate(1);
auto orig = p.ptr;
assert(MmapPool.instance.expand(p, 2));
assert(p.length == 2);
assert(p.ptr == orig);
assert(MmapPool.instance.expand(p, 4));
assert(p.length == 4);
assert(p.ptr == orig);
assert(MmapPool.instance.expand(p, 2));
assert(p.length == 4);
assert(p.ptr == orig);
MmapPool.instance.deallocate(p);
}
/**
* Increases or decreases the size of a memory block.
*
@ -223,33 +317,36 @@ final class MmapPool : Allocator
*
* Returns: Whether the reallocation was successful.
*/
bool reallocate(ref void[] p, size_t size) shared nothrow
bool reallocate(ref void[] p, in size_t size) shared nothrow @nogc
{
void[] reallocP;
if (size == p.length)
if (size == 0)
{
if (deallocate(p))
{
p = null;
return true;
}
return false;
}
else if (size <= p.length)
{
// Leave the block as is.
p = p.ptr[0 .. size];
return true;
}
else if (expand(p, size))
{
return true;
}
else if (size > 0)
// Can't extend, allocate a new block, copy and delete the previous.
void[] reallocP = allocate(size);
if (reallocP is null)
{
reallocP = allocate(size);
if (reallocP is null)
{
return false;
}
return false;
}
if (p !is null)
{
if (size > p.length)
{
reallocP[0..p.length] = p[0..$];
}
else if (size > 0)
{
reallocP[0..size] = p[0..size];
}
memcpy(reallocP.ptr, p.ptr, min(p.length, size));
deallocate(p);
}
p = reallocP;
@ -291,17 +388,33 @@ final class MmapPool : Allocator
*
* Returns: Global $(D_PSYMBOL MmapPool) instance.
*/
static @property ref shared(MmapPool) instance() nothrow
static @property ref shared(MmapPool) instance() nothrow @nogc
{
if (instance_ is null)
{
// Get system dependend page size.
version (Posix)
{
pageSize = sysconf(_SC_PAGE_SIZE);
if (pageSize < 65536)
{
pageSize = pageSize * 65536 / pageSize;
}
}
else version (Windows)
{
SYSTEM_INFO si;
GetSystemInfo(&si);
pageSize = si.dwPageSize;
}
immutable instanceSize = addAlignment(__traits(classInstanceSize, MmapPool));
Region head; // Will become soon our region list head
void* data = initializeRegion(instanceSize, head);
if (data !is null)
{
data[0..instanceSize] = typeid(MmapPool).initializer[];
memcpy(data, typeid(MmapPool).initializer.ptr, instanceSize);
instance_ = cast(shared MmapPool) data;
instance_.head = head;
}
@ -315,7 +428,7 @@ final class MmapPool : Allocator
assert(instance is instance);
}
/**
/*
* Initializes a region for one element.
*
* Params:
@ -324,40 +437,32 @@ final class MmapPool : Allocator
*
* Returns: A pointer to the data.
*/
private static void* initializeRegion(size_t size,
ref Region head) nothrow
private static void* initializeRegion(size_t size, ref Region head)
nothrow @nogc
{
immutable regionSize = calculateRegionSize(size);
version (Posix)
{
void* p = mmap(null,
regionSize,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON,
-1,
0);
regionSize,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON,
-1,
0);
if (p is MAP_FAILED)
{
if (errno == ENOMEM)
{
onOutOfMemoryError();
}
return null;
}
}
else version (Windows)
{
void* p = VirtualAlloc(null,
regionSize,
MEM_COMMIT,
PAGE_READWRITE);
regionSize,
MEM_COMMIT,
PAGE_READWRITE);
if (p is null)
{
if (GetLastError() == ERROR_NOT_ENOUGH_MEMORY)
{
onOutOfMemoryError();
}
return null;
}
}
@ -376,13 +481,13 @@ final class MmapPool : Allocator
head = region;
// Initialize the data block
void* memoryPointer = p + regionEntrySize;
void* memoryPointer = p + RegionEntry.sizeof;
Block block1 = cast(Block) memoryPointer;
block1.size = size;
block1.free = false;
// It is what we want to return
void* data = memoryPointer + blockEntrySize;
void* data = memoryPointer + BlockEntry.sizeof;
// Free block after data
memoryPointer = data + size;
@ -390,28 +495,26 @@ final class MmapPool : Allocator
block1.prev = block2.next = null;
block1.next = block2;
block2.prev = block1;
block2.size = regionSize - size - regionEntrySize - blockEntrySize * 2;
block2.size = regionSize - size - RegionEntry.sizeof - BlockEntry.sizeof * 2;
block2.free = true;
block1.region = block2.region = region;
return data;
}
/// Ditto.
private void* initializeRegion(size_t size) shared nothrow
private void* initializeRegion(size_t size) shared nothrow @nogc
{
return initializeRegion(size, head);
}
/**
/*
* Params:
* x = Space to be aligned.
*
* Returns: Aligned size of $(D_PARAM x).
*/
pragma(inline)
private static immutable(size_t) addAlignment(size_t x)
@safe pure nothrow
pure nothrow @safe @nogc
out (result)
{
assert(result > 0);
@ -421,34 +524,35 @@ final class MmapPool : Allocator
return (x - 1) / alignment_ * alignment_ + alignment_;
}
/**
/*
* Params:
* x = Required space.
*
* Returns: Minimum region size (a multiple of $(D_PSYMBOL pageSize)).
*/
pragma(inline)
private static immutable(size_t) calculateRegionSize(size_t x)
@safe pure nothrow
nothrow @safe @nogc
out (result)
{
assert(result > 0);
}
body
{
x += regionEntrySize + blockEntrySize * 2;
x += RegionEntry.sizeof + BlockEntry.sizeof * 2;
return x / pageSize * pageSize + pageSize;
}
@property uint alignment() shared const pure nothrow @safe
/**
* Returns: Alignment offered.
*/
@property uint alignment() shared const pure nothrow @safe @nogc
{
return alignment_;
}
private enum alignment_ = 8;
private static shared MmapPool instance_;
private shared static immutable size_t pageSize;
private shared static MmapPool instance_;
private shared static size_t pageSize;
private shared struct RegionEntry
{
@ -458,18 +562,15 @@ final class MmapPool : Allocator
size_t size;
}
private alias Region = shared RegionEntry*;
private enum regionEntrySize = 32;
private shared Region head;
private shared struct BlockEntry
{
Block prev;
Block next;
bool free;
size_t size;
Region region;
size_t size;
bool free;
}
private alias Block = shared BlockEntry*;
private enum blockEntrySize = 40;
}

View File

@ -78,8 +78,8 @@ size_t alignedSize(in size_t size, in size_t alignment = 8) pure nothrow @safe @
*
* Params:
* T = Element type of the array being created.
* Init = If should be initialized.
* Throws = If $(D_PSYMBOL OutOfMemoryError) should be throwsn.
* Init = If should be initialized.
* Throws = If $(D_PSYMBOL OutOfMemoryError) should be throwsn.
* allocator = The allocator used for getting memory.
* array = A reference to the array being changed.
* length = New array length.

View File

@ -303,7 +303,7 @@ unittest
version (unittest)
{
class A
private class A
{
uint *destroyed;
@ -318,7 +318,7 @@ version (unittest)
}
}
struct B
private struct B
{
int prop;
@disable this();

View File

@ -3,14 +3,14 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/**
* Generic enum templates.
* Templates that generate values.
*
* Copyright: Eugene Wissner 2016.
* License: $(LINK2 https://www.mozilla.org/en-US/MPL/2.0/,
* Mozilla Public License, v. 2.0).
* Authors: $(LINK2 mailto:belka@caraus.de, Eugene Wissner)
* Authors: $(LINK2 mailto:info@caraus.de, Eugene Wissner)
*/
module tanya.enums;
module tanya.meta.gen;
import std.traits;

View File

@ -0,0 +1,15 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/**
* Metaprogramming.
*
* Copyright: Eugene Wissner 2016.
* License: $(LINK2 https://www.mozilla.org/en-US/MPL/2.0/,
* Mozilla Public License, v. 2.0).
* Authors: $(LINK2 mailto:info@caraus.de, Eugene Wissner)
*/
module tanya.meta;
public import tanya.meta.gen;