mirror of
https://github.com/fluencelabs/assemblyscript
synced 2025-06-20 02:11:31 +00:00
unsafe, stub
This commit is contained in:
@ -12,28 +12,30 @@ import { AL_MASK, MAX_SIZE_32 } from "../internal/allocator";
|
||||
var startOffset: usize = (HEAP_BASE + AL_MASK) & ~AL_MASK;
|
||||
var offset: usize = startOffset;
|
||||
|
||||
// Memory allocator interface
|
||||
// Memory allocator implementation
|
||||
@global namespace memory {
|
||||
|
||||
@global export function __memory_allocate(size: usize): usize {
|
||||
if (size > MAX_SIZE_32) unreachable();
|
||||
var ptr = offset;
|
||||
var newPtr = (ptr + max<usize>(size, 1) + AL_MASK) & ~AL_MASK;
|
||||
var pagesBefore = memory.size();
|
||||
if (newPtr > <usize>pagesBefore << 16) {
|
||||
let pagesNeeded = ((newPtr - ptr + 0xffff) & ~0xffff) >>> 16;
|
||||
let pagesWanted = max(pagesBefore, pagesNeeded); // double memory
|
||||
if (memory.grow(pagesWanted) < 0) {
|
||||
if (memory.grow(pagesNeeded) < 0) {
|
||||
unreachable(); // out of memory
|
||||
export function allocate(size: usize): usize {
|
||||
if (size > MAX_SIZE_32) unreachable();
|
||||
var ptr = offset;
|
||||
var newPtr = (ptr + max<usize>(size, 1) + AL_MASK) & ~AL_MASK;
|
||||
var pagesBefore = memory.size();
|
||||
if (newPtr > <usize>pagesBefore << 16) {
|
||||
let pagesNeeded = ((newPtr - ptr + 0xffff) & ~0xffff) >>> 16;
|
||||
let pagesWanted = max(pagesBefore, pagesNeeded); // double memory
|
||||
if (memory.grow(pagesWanted) < 0) {
|
||||
if (memory.grow(pagesNeeded) < 0) {
|
||||
unreachable(); // out of memory
|
||||
}
|
||||
}
|
||||
}
|
||||
offset = newPtr;
|
||||
return ptr;
|
||||
}
|
||||
offset = newPtr;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
@global export function __memory_free(ptr: usize): void { /* nop */ }
|
||||
export function free(ptr: usize): void { /* nop */ }
|
||||
|
||||
@global export function __memory_reset(): void {
|
||||
offset = startOffset;
|
||||
export function reset(): void {
|
||||
offset = startOffset;
|
||||
}
|
||||
}
|
||||
|
@ -338,203 +338,205 @@ function lower_bucket_limit(bucket: usize): u32 {
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Memory allocator interface
|
||||
// Memory allocator implementation
|
||||
@global namespace memory {
|
||||
|
||||
@global export function __memory_allocate(request: usize): usize {
|
||||
var original_bucket: usize, bucket: usize;
|
||||
|
||||
/*
|
||||
* Make sure it's possible for an allocation of this size to succeed. There's
|
||||
* a hard-coded limit on the maximum allocation size because of the way this
|
||||
* allocator works.
|
||||
*/
|
||||
if (request > MAX_ALLOC - HEADER_SIZE) unreachable();
|
||||
|
||||
/*
|
||||
* Initialize our global state if this is the first call to "malloc". At the
|
||||
* beginning, the tree has a single node that represents the smallest
|
||||
* possible allocation size. More memory will be reserved later as needed.
|
||||
*/
|
||||
if (base_ptr == 0) {
|
||||
// base_ptr = max_ptr = (uint8_t *)sbrk(0);
|
||||
base_ptr = (NODE_IS_SPLIT_END + 7) & ~7; // must be aligned
|
||||
max_ptr = <usize>memory.size() << 16; // must grow first
|
||||
bucket_limit = BUCKET_COUNT - 1;
|
||||
if (!update_max_ptr(base_ptr + List.SIZE)) {
|
||||
return 0;
|
||||
}
|
||||
list_init(buckets$get(BUCKET_COUNT - 1));
|
||||
list_push(buckets$get(BUCKET_COUNT - 1), changetype<List>(base_ptr));
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the smallest bucket that will fit this request. This doesn't check
|
||||
* that there's space for the request yet.
|
||||
*/
|
||||
bucket = bucket_for_request(request + HEADER_SIZE);
|
||||
original_bucket = bucket;
|
||||
|
||||
/*
|
||||
* Search for a bucket with a non-empty free list that's as large or larger
|
||||
* than what we need. If there isn't an exact match, we'll need to split a
|
||||
* larger one to get a match.
|
||||
*/
|
||||
while (bucket + 1 != 0) {
|
||||
let size: usize, bytes_needed: usize, i: usize;
|
||||
let ptr: usize;
|
||||
export function allocate(request: usize): usize {
|
||||
var original_bucket: usize, bucket: usize;
|
||||
|
||||
/*
|
||||
* We may need to grow the tree to be able to fit an allocation of this
|
||||
* size. Try to grow the tree and stop here if we can't.
|
||||
*/
|
||||
if (!lower_bucket_limit(bucket)) {
|
||||
return 0;
|
||||
}
|
||||
* Make sure it's possible for an allocation of this size to succeed. There's
|
||||
* a hard-coded limit on the maximum allocation size because of the way this
|
||||
* allocator works.
|
||||
*/
|
||||
if (request > MAX_ALLOC - HEADER_SIZE) unreachable();
|
||||
|
||||
/*
|
||||
* Try to pop a block off the free list for this bucket. If the free list
|
||||
* is empty, we're going to have to split a larger block instead.
|
||||
*/
|
||||
ptr = changetype<usize>(list_pop(buckets$get(bucket)));
|
||||
if (!ptr) {
|
||||
/*
|
||||
* If we're not at the root of the tree or it's impossible to grow the
|
||||
* tree any more, continue on to the next bucket.
|
||||
*/
|
||||
if (bucket != bucket_limit || bucket == 0) {
|
||||
bucket--;
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Otherwise, grow the tree one more level and then pop a block off the
|
||||
* free list again. Since we know the root of the tree is used (because
|
||||
* the free list was empty), this will add a parent above this node in
|
||||
* the SPLIT state and then add the new right child node to the free list
|
||||
* for this bucket. Popping the free list will give us this right child.
|
||||
*/
|
||||
if (!lower_bucket_limit(bucket - 1)) {
|
||||
* Initialize our global state if this is the first call to "malloc". At the
|
||||
* beginning, the tree has a single node that represents the smallest
|
||||
* possible allocation size. More memory will be reserved later as needed.
|
||||
*/
|
||||
if (base_ptr == 0) {
|
||||
// base_ptr = max_ptr = (uint8_t *)sbrk(0);
|
||||
base_ptr = (NODE_IS_SPLIT_END + 7) & ~7; // must be aligned
|
||||
max_ptr = <usize>memory.size() << 16; // must grow first
|
||||
bucket_limit = BUCKET_COUNT - 1;
|
||||
if (!update_max_ptr(base_ptr + List.SIZE)) {
|
||||
return 0;
|
||||
}
|
||||
list_init(buckets$get(BUCKET_COUNT - 1));
|
||||
list_push(buckets$get(BUCKET_COUNT - 1), changetype<List>(base_ptr));
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the smallest bucket that will fit this request. This doesn't check
|
||||
* that there's space for the request yet.
|
||||
*/
|
||||
bucket = bucket_for_request(request + HEADER_SIZE);
|
||||
original_bucket = bucket;
|
||||
|
||||
/*
|
||||
* Search for a bucket with a non-empty free list that's as large or larger
|
||||
* than what we need. If there isn't an exact match, we'll need to split a
|
||||
* larger one to get a match.
|
||||
*/
|
||||
while (bucket + 1 != 0) {
|
||||
let size: usize, bytes_needed: usize, i: usize;
|
||||
let ptr: usize;
|
||||
|
||||
/*
|
||||
* We may need to grow the tree to be able to fit an allocation of this
|
||||
* size. Try to grow the tree and stop here if we can't.
|
||||
*/
|
||||
if (!lower_bucket_limit(bucket)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to pop a block off the free list for this bucket. If the free list
|
||||
* is empty, we're going to have to split a larger block instead.
|
||||
*/
|
||||
ptr = changetype<usize>(list_pop(buckets$get(bucket)));
|
||||
if (!ptr) {
|
||||
/*
|
||||
* If we're not at the root of the tree or it's impossible to grow the
|
||||
* tree any more, continue on to the next bucket.
|
||||
*/
|
||||
if (bucket != bucket_limit || bucket == 0) {
|
||||
bucket--;
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Otherwise, grow the tree one more level and then pop a block off the
|
||||
* free list again. Since we know the root of the tree is used (because
|
||||
* the free list was empty), this will add a parent above this node in
|
||||
* the SPLIT state and then add the new right child node to the free list
|
||||
* for this bucket. Popping the free list will give us this right child.
|
||||
*/
|
||||
if (!lower_bucket_limit(bucket - 1)) {
|
||||
return 0;
|
||||
}
|
||||
ptr = changetype<usize>(list_pop(buckets$get(bucket)));
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to expand the address space first before going any further. If we
|
||||
* have run out of space, put this block back on the free list and fail.
|
||||
*/
|
||||
size = 1 << (MAX_ALLOC_LOG2 - bucket);
|
||||
bytes_needed = bucket < original_bucket ? size / 2 + List.SIZE : size;
|
||||
if (!update_max_ptr(ptr + bytes_needed)) {
|
||||
list_push(buckets$get(bucket), changetype<List>(ptr));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we got a node off the free list, change the node from UNUSED to USED.
|
||||
* This involves flipping our parent's "is split" bit because that bit is
|
||||
* the exclusive-or of the UNUSED flags of both children, and our UNUSED
|
||||
* flag (which isn't ever stored explicitly) has just changed.
|
||||
*
|
||||
* Note that we shouldn't ever need to flip the "is split" bit of our
|
||||
* grandparent because we know our buddy is USED so it's impossible for our
|
||||
* grandparent to be UNUSED (if our buddy chunk was UNUSED, our parent
|
||||
* wouldn't ever have been split in the first place).
|
||||
*/
|
||||
i = node_for_ptr(ptr, bucket);
|
||||
if (i != 0) {
|
||||
flip_parent_is_split(i);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the node we got is larger than we need, split it down to the correct
|
||||
* size and put the new unused child nodes on the free list in the
|
||||
* corresponding bucket. This is done by repeatedly moving to the left
|
||||
* child, splitting the parent, and then adding the right child to the free
|
||||
* list.
|
||||
*/
|
||||
while (bucket < original_bucket) {
|
||||
i = i * 2 + 1;
|
||||
bucket++;
|
||||
flip_parent_is_split(i);
|
||||
list_push(
|
||||
buckets$get(bucket),
|
||||
changetype<List>(ptr_for_node(i + 1, bucket))
|
||||
);
|
||||
}
|
||||
|
||||
/*
|
||||
* Now that we have a memory address, write the block header (just the size
|
||||
* of the allocation) and return the address immediately after the header.
|
||||
*/
|
||||
store<usize>(ptr, request);
|
||||
return ptr + HEADER_SIZE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
export function free(ptr: usize): void {
|
||||
var bucket: usize, i: usize;
|
||||
|
||||
/*
|
||||
* Ignore any attempts to free a NULL pointer.
|
||||
*/
|
||||
if (!ptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to expand the address space first before going any further. If we
|
||||
* have run out of space, put this block back on the free list and fail.
|
||||
*/
|
||||
size = 1 << (MAX_ALLOC_LOG2 - bucket);
|
||||
bytes_needed = bucket < original_bucket ? size / 2 + List.SIZE : size;
|
||||
if (!update_max_ptr(ptr + bytes_needed)) {
|
||||
list_push(buckets$get(bucket), changetype<List>(ptr));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we got a node off the free list, change the node from UNUSED to USED.
|
||||
* This involves flipping our parent's "is split" bit because that bit is
|
||||
* the exclusive-or of the UNUSED flags of both children, and our UNUSED
|
||||
* flag (which isn't ever stored explicitly) has just changed.
|
||||
*
|
||||
* Note that we shouldn't ever need to flip the "is split" bit of our
|
||||
* grandparent because we know our buddy is USED so it's impossible for our
|
||||
* grandparent to be UNUSED (if our buddy chunk was UNUSED, our parent
|
||||
* wouldn't ever have been split in the first place).
|
||||
*/
|
||||
* We were given the address returned by "malloc" so get back to the actual
|
||||
* address of the node by subtracting off the size of the block header. Then
|
||||
* look up the index of the node corresponding to this address.
|
||||
*/
|
||||
ptr = ptr - HEADER_SIZE;
|
||||
bucket = bucket_for_request(load<usize>(ptr) + HEADER_SIZE);
|
||||
i = node_for_ptr(ptr, bucket);
|
||||
if (i != 0) {
|
||||
|
||||
/*
|
||||
* Traverse up to the root node, flipping USED blocks to UNUSED and merging
|
||||
* UNUSED buddies together into a single UNUSED parent.
|
||||
*/
|
||||
while (i != 0) {
|
||||
/*
|
||||
* Change this node from UNUSED to USED. This involves flipping our
|
||||
* parent's "is split" bit because that bit is the exclusive-or of the
|
||||
* UNUSED flags of both children, and our UNUSED flag (which isn't ever
|
||||
* stored explicitly) has just changed.
|
||||
*/
|
||||
flip_parent_is_split(i);
|
||||
|
||||
/*
|
||||
* If the parent is now SPLIT, that means our buddy is USED, so don't merge
|
||||
* with it. Instead, stop the iteration here and add ourselves to the free
|
||||
* list for our bucket.
|
||||
*
|
||||
* Also stop here if we're at the current root node, even if that root node
|
||||
* is now UNUSED. Root nodes don't have a buddy so we can't merge with one.
|
||||
*/
|
||||
if (parent_is_split(i) || bucket == bucket_limit) {
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we get here, we know our buddy is UNUSED. In this case we should
|
||||
* merge with that buddy and continue traversing up to the root node. We
|
||||
* need to remove the buddy from its free list here but we don't need to
|
||||
* add the merged parent to its free list yet. That will be done once after
|
||||
* this loop is finished.
|
||||
*/
|
||||
list_remove(changetype<List>(ptr_for_node(((i - 1) ^ 1) + 1, bucket)));
|
||||
i = (i - 1) / 2;
|
||||
bucket--;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the node we got is larger than we need, split it down to the correct
|
||||
* size and put the new unused child nodes on the free list in the
|
||||
* corresponding bucket. This is done by repeatedly moving to the left
|
||||
* child, splitting the parent, and then adding the right child to the free
|
||||
* list.
|
||||
*/
|
||||
while (bucket < original_bucket) {
|
||||
i = i * 2 + 1;
|
||||
bucket++;
|
||||
flip_parent_is_split(i);
|
||||
list_push(
|
||||
buckets$get(bucket),
|
||||
changetype<List>(ptr_for_node(i + 1, bucket))
|
||||
);
|
||||
}
|
||||
|
||||
/*
|
||||
* Now that we have a memory address, write the block header (just the size
|
||||
* of the allocation) and return the address immediately after the header.
|
||||
*/
|
||||
store<usize>(ptr, request);
|
||||
return ptr + HEADER_SIZE;
|
||||
* Add ourselves to the free list for our bucket. We add to the back of the
|
||||
* list because "malloc" takes from the back of the list and we want a "free"
|
||||
* followed by a "malloc" of the same size to ideally use the same address
|
||||
* for better memory locality.
|
||||
*/
|
||||
list_push(buckets$get(bucket), changetype<List>(ptr_for_node(i, bucket)));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@global export function __memory_free(ptr: usize): void {
|
||||
var bucket: usize, i: usize;
|
||||
|
||||
/*
|
||||
* Ignore any attempts to free a NULL pointer.
|
||||
*/
|
||||
if (!ptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We were given the address returned by "malloc" so get back to the actual
|
||||
* address of the node by subtracting off the size of the block header. Then
|
||||
* look up the index of the node corresponding to this address.
|
||||
*/
|
||||
ptr = ptr - HEADER_SIZE;
|
||||
bucket = bucket_for_request(load<usize>(ptr) + HEADER_SIZE);
|
||||
i = node_for_ptr(ptr, bucket);
|
||||
|
||||
/*
|
||||
* Traverse up to the root node, flipping USED blocks to UNUSED and merging
|
||||
* UNUSED buddies together into a single UNUSED parent.
|
||||
*/
|
||||
while (i != 0) {
|
||||
/*
|
||||
* Change this node from UNUSED to USED. This involves flipping our
|
||||
* parent's "is split" bit because that bit is the exclusive-or of the
|
||||
* UNUSED flags of both children, and our UNUSED flag (which isn't ever
|
||||
* stored explicitly) has just changed.
|
||||
*/
|
||||
flip_parent_is_split(i);
|
||||
|
||||
/*
|
||||
* If the parent is now SPLIT, that means our buddy is USED, so don't merge
|
||||
* with it. Instead, stop the iteration here and add ourselves to the free
|
||||
* list for our bucket.
|
||||
*
|
||||
* Also stop here if we're at the current root node, even if that root node
|
||||
* is now UNUSED. Root nodes don't have a buddy so we can't merge with one.
|
||||
*/
|
||||
if (parent_is_split(i) || bucket == bucket_limit) {
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we get here, we know our buddy is UNUSED. In this case we should
|
||||
* merge with that buddy and continue traversing up to the root node. We
|
||||
* need to remove the buddy from its free list here but we don't need to
|
||||
* add the merged parent to its free list yet. That will be done once after
|
||||
* this loop is finished.
|
||||
*/
|
||||
list_remove(changetype<List>(ptr_for_node(((i - 1) ^ 1) + 1, bucket)));
|
||||
i = (i - 1) / 2;
|
||||
bucket--;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add ourselves to the free list for our bucket. We add to the back of the
|
||||
* list because "malloc" takes from the back of the list and we want a "free"
|
||||
* followed by a "malloc" of the same size to ideally use the same address
|
||||
* for better memory locality.
|
||||
*/
|
||||
list_push(buckets$get(bucket), changetype<List>(ptr_for_node(i, bucket)));
|
||||
}
|
||||
|
@ -11,12 +11,14 @@
|
||||
declare function _malloc(size: usize): usize;
|
||||
declare function _free(ptr: usize): void;
|
||||
|
||||
// Memory allocator interface
|
||||
// Memory allocator implementation
|
||||
@global namespace memory {
|
||||
|
||||
@global export function __memory_allocate(size: usize): usize {
|
||||
return _malloc(size);
|
||||
}
|
||||
@inline export function allocate(size: usize): usize {
|
||||
return _malloc(size);
|
||||
}
|
||||
|
||||
@global export function __memory_free(ptr: usize): void {
|
||||
_free(ptr);
|
||||
@inline export function free(ptr: usize): void {
|
||||
_free(ptr);
|
||||
}
|
||||
}
|
||||
|
@ -11,11 +11,13 @@ declare function malloc(size: usize): usize;
|
||||
declare function free(ptr: usize): void;
|
||||
|
||||
// Memory allocator interface
|
||||
@global namespace memory {
|
||||
|
||||
@global export function __memory_allocate(size: usize): usize {
|
||||
return malloc(size);
|
||||
}
|
||||
@inline export function allocate(size: usize): usize {
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
@global export function __memory_free(ptr: usize): void {
|
||||
free(ptr);
|
||||
@inline export function free(ptr: usize): void {
|
||||
free(ptr);
|
||||
}
|
||||
}
|
||||
|
@ -434,70 +434,67 @@ function fls<T>(word: T): T {
|
||||
var ROOT: Root = changetype<Root>(0);
|
||||
|
||||
// Memory allocator interface
|
||||
@global namespace memory {
|
||||
|
||||
/** Allocates a chunk of memory. */
|
||||
@global export function __memory_allocate(size: usize): usize {
|
||||
/** Allocates a chunk of memory. */
|
||||
export function allocate(size: usize): usize {
|
||||
// initialize if necessary
|
||||
var root = ROOT;
|
||||
if (!root) {
|
||||
let rootOffset = (HEAP_BASE + AL_MASK) & ~AL_MASK;
|
||||
let pagesBefore = memory.size();
|
||||
let pagesNeeded = <i32>((((rootOffset + Root.SIZE) + 0xffff) & ~0xffff) >>> 16);
|
||||
if (pagesNeeded > pagesBefore && memory.grow(pagesNeeded - pagesBefore) < 0) unreachable();
|
||||
ROOT = root = changetype<Root>(rootOffset);
|
||||
root.tailRef = 0;
|
||||
root.flMap = 0;
|
||||
for (let fl: usize = 0; fl < FL_BITS; ++fl) {
|
||||
root.setSLMap(fl, 0);
|
||||
for (let sl: u32 = 0; sl < SL_SIZE; ++sl) {
|
||||
root.setHead(fl, sl, null);
|
||||
}
|
||||
}
|
||||
root.addMemory((rootOffset + Root.SIZE + AL_MASK) & ~AL_MASK, memory.size() << 16);
|
||||
}
|
||||
|
||||
// initialize if necessary
|
||||
var root = ROOT;
|
||||
if (!root) {
|
||||
let rootOffset = (HEAP_BASE + AL_MASK) & ~AL_MASK;
|
||||
let pagesBefore = memory.size();
|
||||
let pagesNeeded = <i32>((((rootOffset + Root.SIZE) + 0xffff) & ~0xffff) >>> 16);
|
||||
if (pagesNeeded > pagesBefore && memory.grow(pagesNeeded - pagesBefore) < 0) unreachable();
|
||||
ROOT = root = changetype<Root>(rootOffset);
|
||||
root.tailRef = 0;
|
||||
root.flMap = 0;
|
||||
for (let fl: usize = 0; fl < FL_BITS; ++fl) {
|
||||
root.setSLMap(fl, 0);
|
||||
for (let sl: u32 = 0; sl < SL_SIZE; ++sl) {
|
||||
root.setHead(fl, sl, null);
|
||||
// search for a suitable block
|
||||
if (size > Block.MAX_SIZE) unreachable();
|
||||
|
||||
// 32-bit MAX_SIZE is 1 << 30 and itself aligned, hence the following can't overflow MAX_SIZE
|
||||
size = max<usize>((size + AL_MASK) & ~AL_MASK, Block.MIN_SIZE);
|
||||
|
||||
var block = root.search(size);
|
||||
if (!block) {
|
||||
|
||||
// request more memory
|
||||
let pagesBefore = memory.size();
|
||||
let pagesNeeded = <i32>(((size + 0xffff) & ~0xffff) >>> 16);
|
||||
let pagesWanted = max(pagesBefore, pagesNeeded); // double memory
|
||||
if (memory.grow(pagesWanted) < 0) {
|
||||
if (memory.grow(pagesNeeded) < 0) {
|
||||
unreachable(); // out of memory
|
||||
}
|
||||
}
|
||||
let pagesAfter = memory.size();
|
||||
root.addMemory(<usize>pagesBefore << 16, <usize>pagesAfter << 16);
|
||||
block = assert(root.search(size)); // must be found now
|
||||
}
|
||||
|
||||
assert((block.info & ~TAGS) >= size);
|
||||
return root.use(<Block>block, size);
|
||||
}
|
||||
|
||||
/** Frees the chunk of memory at the specified address. */
|
||||
export function free(data: usize): void {
|
||||
if (data) {
|
||||
let root = ROOT;
|
||||
if (root) {
|
||||
let block = changetype<Block>(data - Block.INFO);
|
||||
let blockInfo = block.info;
|
||||
assert(!(blockInfo & FREE)); // must be used
|
||||
block.info = blockInfo | FREE;
|
||||
root.insert(changetype<Block>(data - Block.INFO));
|
||||
}
|
||||
}
|
||||
root.addMemory((rootOffset + Root.SIZE + AL_MASK) & ~AL_MASK, memory.size() << 16);
|
||||
}
|
||||
|
||||
// search for a suitable block
|
||||
if (size > Block.MAX_SIZE) unreachable();
|
||||
|
||||
// 32-bit MAX_SIZE is 1 << 30 and itself aligned, hence the following can't overflow MAX_SIZE
|
||||
size = max<usize>((size + AL_MASK) & ~AL_MASK, Block.MIN_SIZE);
|
||||
|
||||
var block = root.search(size);
|
||||
if (!block) {
|
||||
|
||||
// request more memory
|
||||
let pagesBefore = memory.size();
|
||||
let pagesNeeded = <i32>(((size + 0xffff) & ~0xffff) >>> 16);
|
||||
let pagesWanted = max(pagesBefore, pagesNeeded); // double memory
|
||||
if (memory.grow(pagesWanted) < 0) {
|
||||
if (memory.grow(pagesNeeded) < 0) {
|
||||
unreachable(); // out of memory
|
||||
}
|
||||
}
|
||||
let pagesAfter = memory.size();
|
||||
root.addMemory(<usize>pagesBefore << 16, <usize>pagesAfter << 16);
|
||||
block = assert(root.search(size)); // must be found now
|
||||
}
|
||||
|
||||
assert((block.info & ~TAGS) >= size);
|
||||
return root.use(<Block>block, size);
|
||||
}
|
||||
|
||||
/** Frees the chunk of memory at the specified address. */
|
||||
@global export function __memory_free(data: usize): void {
|
||||
if (data) {
|
||||
let root = ROOT;
|
||||
if (root) {
|
||||
let block = changetype<Block>(data - Block.INFO);
|
||||
let blockInfo = block.info;
|
||||
assert(!(blockInfo & FREE)); // must be used
|
||||
block.info = blockInfo | FREE;
|
||||
root.insert(changetype<Block>(data - Block.INFO));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@global export function __memory_reset(): void {
|
||||
unreachable();
|
||||
}
|
||||
|
@ -13,6 +13,7 @@
|
||||
@builtin export declare function isFunction<T>(value?: T): bool;
|
||||
@builtin export declare function isNullable<T>(value?: T): bool;
|
||||
@builtin export declare function isDefined(expression: void): bool;
|
||||
@builtin export declare function isImplemented(expression: void): bool;
|
||||
@builtin export declare function isConstant(expression: void): bool;
|
||||
@builtin export declare function isManaged<T>(value?: T): bool;
|
||||
@inline export function isNaN<T>(value: T): bool { return value != value; }
|
||||
@ -501,6 +502,3 @@ export namespace v8x16 {
|
||||
}
|
||||
|
||||
@builtin export declare function start(): void;
|
||||
|
||||
@builtin export declare function __rt_classid<T>(): u32;
|
||||
@builtin export declare function __rt_iterateroots(fn: (ref: usize) => void): void;
|
||||
|
@ -1,10 +0,0 @@
|
||||
/* tslint:disable */
|
||||
|
||||
export namespace gc {
|
||||
|
||||
export function collect(): void {
|
||||
if (isDefined(__gc_collect)) { __gc_collect(); return; }
|
||||
WARNING("Calling 'gc.collect' requires a garbage collector to be present.");
|
||||
unreachable();
|
||||
}
|
||||
}
|
4
std/assembly/index.d.ts
vendored
4
std/assembly/index.d.ts
vendored
@ -144,6 +144,8 @@ declare function isFunction<T>(value?: any): value is (...args: any) => any;
|
||||
declare function isNullable<T>(value?: any): bool;
|
||||
/** Tests if the specified expression resolves to a defined element. Compiles to a constant. */
|
||||
declare function isDefined(expression: any): bool;
|
||||
/** Tests if the specified expression resolves to an implemented (non-stub) element. Compiles to a constant. */
|
||||
declare function isImplemented(expression: any): bool;
|
||||
/** Tests if the specified expression evaluates to a constant value. Compiles to a constant. */
|
||||
declare function isConstant(expression: any): bool;
|
||||
/** Tests if the specified type *or* expression is of a managed type. Compiles to a constant. */
|
||||
@ -980,6 +982,8 @@ declare namespace memory {
|
||||
export function fill(dst: usize, value: u8, count: usize): void;
|
||||
/** Copies n bytes from the specified source to the specified destination in memory. These regions may overlap. */
|
||||
export function copy(dst: usize, src: usize, n: usize): void;
|
||||
/** Repeats `src` of length `srcLength` `count` times at `dst`. */
|
||||
export function repeat(dst: usize, src: usize, srcLength: usize, count: usize): void;
|
||||
/** Copies elements from a passive element segment to a table. */
|
||||
// export function init(segmentIndex: u32, srcOffset: usize, dstOffset: usize, n: usize): void;
|
||||
/** Prevents further use of a passive element segment. */
|
||||
|
@ -1,55 +0,0 @@
|
||||
import { memcmp, memmove, memset } from "./internal/memory";
|
||||
|
||||
@builtin export declare const HEAP_BASE: usize; // tslint:disable-line
|
||||
|
||||
/* tslint:disable */
|
||||
|
||||
export namespace memory {
|
||||
|
||||
@builtin export declare function size(): i32;
|
||||
|
||||
@builtin export declare function grow(pages: i32): i32;
|
||||
|
||||
@builtin @inline
|
||||
export function fill(dest: usize, c: u8, n: usize): void { // see: musl/src/string/memset
|
||||
memset(dest, c, n); // fallback if "bulk-memory" isn't enabled
|
||||
}
|
||||
|
||||
@builtin @inline
|
||||
export function copy(dest: usize, src: usize, n: usize): void { // see: musl/src/string/memmove.c
|
||||
memmove(dest, src, n); // fallback if "bulk-memory" isn't enabled
|
||||
}
|
||||
|
||||
@inline export function compare(vl: usize, vr: usize, n: usize): i32 { // see: musl/src/string/memcmp.c
|
||||
return memcmp(vl, vr, n);
|
||||
}
|
||||
|
||||
// Passive segments
|
||||
|
||||
// export function init(segmentIndex: u32, srcOffset: usize, dstOffset: usize, n: usize): void {
|
||||
// __memory_init(segmentIndex, srcOffset, dstOffset);
|
||||
// }
|
||||
|
||||
// export function drop(segmentIndex: u32): void {
|
||||
// __memory_drop(segmentIndex);
|
||||
// }
|
||||
|
||||
// Allocator
|
||||
|
||||
@inline export function allocate(size: usize): usize {
|
||||
if (isDefined(__memory_allocate)) return __memory_allocate(size);
|
||||
WARNING("Calling 'memory.allocate' requires a memory manager to be present.");
|
||||
return <usize>unreachable();
|
||||
}
|
||||
|
||||
@inline export function free(ptr: usize): void {
|
||||
if (isDefined(__memory_free)) { __memory_free(ptr); return; }
|
||||
WARNING("Calling 'memory.free' requires a memory manager to be present.");
|
||||
unreachable();
|
||||
}
|
||||
|
||||
@inline export function reset(): void {
|
||||
if (isDefined(__memory_reset)) { __memory_reset(); return; }
|
||||
unreachable();
|
||||
}
|
||||
}
|
@ -1,5 +1,9 @@
|
||||
import { AL_MASK, MAX_SIZE_32 } from "../internal/allocator";
|
||||
import { __rt_classid } from "../builtins";
|
||||
import {
|
||||
AL_MASK,
|
||||
MAX_SIZE_32
|
||||
} from "./internal/allocator";
|
||||
|
||||
@builtin export declare const HEAP_BASE: usize;
|
||||
|
||||
/** Common runtime header of all objects. */
|
||||
@unmanaged export class HEADER {
|
||||
@ -18,7 +22,7 @@ import { __rt_classid } from "../builtins";
|
||||
// runtime will most likely change significantly once reftypes and WASM GC are a thing.
|
||||
|
||||
/** Whether a GC is present or not. */
|
||||
@inline export const GC = isDefined(gc);
|
||||
@inline export const GC = isImplemented(gc.register) && isImplemented(gc.link);
|
||||
|
||||
/** Size of the common runtime header. */
|
||||
@inline export const HEADER_SIZE: usize = GC
|
||||
@ -28,8 +32,8 @@ import { __rt_classid } from "../builtins";
|
||||
/** Magic value used to validate common runtime headers. */
|
||||
@inline export const HEADER_MAGIC: u32 = 0xA55E4B17;
|
||||
|
||||
/** Aligns an allocation to actual block size. Primarily targets TLSF. */
|
||||
export function ALIGN(payloadSize: usize): usize {
|
||||
/** Adjusts an allocation to actual block size. Primarily targets TLSF. */
|
||||
export function ADJUST(payloadSize: usize): usize {
|
||||
// round up to power of 2, e.g. with HEADER_SIZE=8:
|
||||
// 0 -> 2^3 = 8
|
||||
// 1..8 -> 2^4 = 16
|
||||
@ -40,8 +44,8 @@ export function ALIGN(payloadSize: usize): usize {
|
||||
}
|
||||
|
||||
/** Allocates a new object and returns a pointer to its payload. */
|
||||
export function ALLOC(payloadSize: u32): usize {
|
||||
var header = changetype<HEADER>(memory.allocate(ALIGN(payloadSize)));
|
||||
@unsafe export function ALLOC(payloadSize: u32): usize {
|
||||
var header = changetype<HEADER>(memory.allocate(ADJUST(payloadSize)));
|
||||
header.classId = HEADER_MAGIC;
|
||||
header.payloadSize = payloadSize;
|
||||
if (GC) {
|
||||
@ -54,14 +58,18 @@ export function ALLOC(payloadSize: u32): usize {
|
||||
}
|
||||
|
||||
/** Reallocates an object if necessary. Returns a pointer to its (moved) payload. */
|
||||
export function REALLOC(ref: usize, newPayloadSize: u32): usize {
|
||||
@unsafe export function REALLOC(ref: usize, newPayloadSize: u32): usize {
|
||||
// Background: When managed objects are allocated these aren't immediately registered with GC
|
||||
// but can be used as scratch objects while unregistered. This is useful in situations where
|
||||
// the object must be reallocated multiple times because its final size isn't known beforehand,
|
||||
// e.g. in Array#filter, with only the final object making it into GC'ed userland.
|
||||
var header = changetype<HEADER>(ref - HEADER_SIZE);
|
||||
var payloadSize = header.payloadSize;
|
||||
if (payloadSize < newPayloadSize) {
|
||||
let newAlignedSize = ALIGN(newPayloadSize);
|
||||
if (ALIGN(payloadSize) < newAlignedSize) {
|
||||
// move if the allocation isn't large enough to hold the new payload
|
||||
let newHeader = changetype<HEADER>(memory.allocate(newAlignedSize));
|
||||
let newAdjustedSize = ADJUST(newPayloadSize);
|
||||
if (select(ADJUST(payloadSize), 0, ref > HEAP_BASE) < newAdjustedSize) {
|
||||
// move if the allocation isn't large enough or not a heap object
|
||||
let newHeader = changetype<HEADER>(memory.allocate(newAdjustedSize));
|
||||
newHeader.classId = HEADER_MAGIC;
|
||||
if (GC) {
|
||||
newHeader.gc1 = 0;
|
||||
@ -72,6 +80,7 @@ export function REALLOC(ref: usize, newPayloadSize: u32): usize {
|
||||
memory.fill(newRef + payloadSize, 0, newPayloadSize - payloadSize);
|
||||
if (header.classId == HEADER_MAGIC) {
|
||||
// free right away if not registered yet
|
||||
assert(ref > HEAP_BASE); // static objects aren't scratch objects
|
||||
memory.free(changetype<usize>(header));
|
||||
}
|
||||
header = newHeader;
|
||||
@ -82,8 +91,7 @@ export function REALLOC(ref: usize, newPayloadSize: u32): usize {
|
||||
}
|
||||
} else {
|
||||
// if the size is the same or less, just update the header accordingly.
|
||||
// it is not necessary to free unused space here because it is cleared
|
||||
// when grown again anyway.
|
||||
// unused space is cleared when grown, so no need to do this here.
|
||||
}
|
||||
header.payloadSize = newPayloadSize;
|
||||
return ref;
|
||||
@ -97,20 +105,21 @@ function unref(ref: usize): HEADER {
|
||||
}
|
||||
|
||||
/** Frees an object. Must not have been registered with GC yet. */
|
||||
export function FREE(ref: usize): void {
|
||||
@unsafe export function FREE(ref: usize): void {
|
||||
memory.free(changetype<usize>(unref(ref)));
|
||||
}
|
||||
|
||||
/** Registers a managed object. Cannot be free'd anymore afterwards. */
|
||||
@inline export function REGISTER<T>(ref: usize): T {
|
||||
// inline this because it's generic so we don't get a bunch of functions
|
||||
unref(ref).classId = __rt_classid<T>();
|
||||
@unsafe @inline export function REGISTER<T>(ref: usize): T {
|
||||
// see comment in REALLOC why this is useful. also inline this because
|
||||
// it's generic so we don't get a bunch of functions.
|
||||
unref(ref).classId = gc.classId<T>();
|
||||
if (GC) gc.register(ref);
|
||||
return changetype<T>(ref);
|
||||
}
|
||||
|
||||
/** Links a managed object with its managed parent. */
|
||||
export function LINK(ref: usize, parentRef: usize): void {
|
||||
@unsafe export function LINK(ref: usize, parentRef: usize): void {
|
||||
assert(ref >= HEAP_BASE + HEADER_SIZE); // must be a heap object
|
||||
var header = changetype<HEADER>(ref - HEADER_SIZE);
|
||||
assert(header.classId != HEADER_MAGIC && header.gc1 != 0 && header.gc2 != 0); // must be registered
|
||||
@ -119,7 +128,7 @@ export function LINK(ref: usize, parentRef: usize): void {
|
||||
|
||||
/** ArrayBuffer base class. */
|
||||
export abstract class ArrayBufferBase {
|
||||
static readonly MAX_BYTELENGTH: i32 = MAX_SIZE_32 - HEADER_SIZE;
|
||||
@lazy static readonly MAX_BYTELENGTH: i32 = MAX_SIZE_32 - HEADER_SIZE;
|
||||
get byteLength(): i32 {
|
||||
return changetype<HEADER>(changetype<usize>(this) - HEADER_SIZE).payloadSize;
|
||||
}
|
||||
@ -127,8 +136,62 @@ export abstract class ArrayBufferBase {
|
||||
|
||||
/** String base class. */
|
||||
export abstract class StringBase {
|
||||
static readonly MAX_LENGTH: i32 = (MAX_SIZE_32 - HEADER_SIZE) >> 1;
|
||||
@lazy static readonly MAX_LENGTH: i32 = (MAX_SIZE_32 - HEADER_SIZE) >> 1;
|
||||
get length(): i32 {
|
||||
return changetype<HEADER>(changetype<usize>(this) - HEADER_SIZE).payloadSize >> 1;
|
||||
}
|
||||
}
|
||||
|
||||
import { memcmp, memmove, memset } from "./internal/memory";
|
||||
|
||||
/** Memory manager interface. */
|
||||
export namespace memory {
|
||||
@builtin export declare function size(): i32;
|
||||
@builtin @unsafe export declare function grow(pages: i32): i32;
|
||||
@builtin @unsafe @inline export function fill(dst: usize, c: u8, n: usize): void {
|
||||
memset(dst, c, n); // fallback if "bulk-memory" isn't enabled
|
||||
}
|
||||
@builtin @unsafe @inline export function copy(dst: usize, src: usize, n: usize): void {
|
||||
memmove(dst, src, n); // fallback if "bulk-memory" isn't enabled
|
||||
}
|
||||
@unsafe export function init(segmentIndex: u32, srcOffset: usize, dstOffset: usize, n: usize): void {
|
||||
ERROR("not implemented");
|
||||
}
|
||||
@unsafe export function drop(segmentIndex: u32): void {
|
||||
ERROR("not implemented");
|
||||
}
|
||||
@stub @inline export function allocate(size: usize): usize {
|
||||
ERROR("stub: missing memory manager");
|
||||
return <usize>unreachable();
|
||||
}
|
||||
@stub @unsafe @inline export function free(ptr: usize): void {
|
||||
ERROR("stub: missing memory manager");
|
||||
}
|
||||
@stub @unsafe @inline export function reset(): void {
|
||||
ERROR("stub: not supported by memory manager");
|
||||
}
|
||||
@inline export function compare(vl: usize, vr: usize, n: usize): i32 {
|
||||
return memcmp(vl, vr, n);
|
||||
}
|
||||
@unsafe export function repeat(dst: usize, src: usize, srcLength: usize, count: usize): void {
|
||||
var index: usize = 0;
|
||||
var total = srcLength * count;
|
||||
while (index < total) {
|
||||
memory.copy(dst + index, src, srcLength);
|
||||
index += srcLength;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Garbage collector interface. */
|
||||
export namespace gc {
|
||||
@builtin @unsafe export declare function classId<T>(): u32;
|
||||
@builtin @unsafe export declare function iterateRoots(fn: (ref: usize) => void): void;
|
||||
@stub @unsafe export function register(ref: usize): void {
|
||||
ERROR("stub: missing garbage collector");
|
||||
}
|
||||
@stub @unsafe export function link(ref: usize, parentRef: usize): void {
|
||||
ERROR("stub: missing garbage collector");
|
||||
}
|
||||
@stub export function collect(): void {}
|
||||
}
|
@ -1,8 +0,0 @@
|
||||
@global namespace gc {
|
||||
@unsafe export function register(ref: usize): void {
|
||||
}
|
||||
@unsafe export function link(ref: usize, parentRef: usize): void {
|
||||
}
|
||||
export function collect(): void {
|
||||
}
|
||||
}
|
@ -13,75 +13,16 @@ import {
|
||||
STORE
|
||||
} from "./internal/arraybuffer";
|
||||
|
||||
function compareImpl(str1: String, offset1: usize, str2: String, offset2: usize, len: usize): i32 {
|
||||
function compareImpl(str1: String, index1: usize, str2: String, index2: usize, len: usize): i32 {
|
||||
var result: i32 = 0;
|
||||
var ptr1 = changetype<usize>(str1) + (offset1 << 1);
|
||||
var ptr2 = changetype<usize>(str2) + (offset2 << 1);
|
||||
var ptr1 = changetype<usize>(str1) + (index1 << 1);
|
||||
var ptr2 = changetype<usize>(str2) + (index2 << 1);
|
||||
while (len && !(result = <i32>load<u16>(ptr1) - <i32>load<u16>(ptr2))) {
|
||||
--len, ptr1 += 2, ptr2 += 2;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
function repeatImpl(dst: usize, dstIndex: usize, src: String, count: i32): void {
|
||||
var length = src.length;
|
||||
if (ASC_SHRINK_LEVEL > 1) {
|
||||
let strLen = length << 1;
|
||||
let to = changetype<usize>(dst) + (dstIndex << 1);
|
||||
let from = changetype<usize>(src);
|
||||
for (let i = 0, len = strLen * count; i < len; i += strLen) {
|
||||
memory.copy(to + i, from, strLen);
|
||||
}
|
||||
} else {
|
||||
switch (length) {
|
||||
case 0: break;
|
||||
case 1: {
|
||||
let cc = load<u16>(changetype<usize>(src));
|
||||
let out = changetype<usize>(dst) + (dstIndex << 1);
|
||||
for (let i = 0; i < count; ++i) {
|
||||
store<u16>(out + (i << 1), cc);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
let cc = load<u32>(changetype<usize>(src));
|
||||
let out = changetype<usize>(dst) + (dstIndex << 1);
|
||||
for (let i = 0; i < count; ++i) {
|
||||
store<u32>(out + (i << 2), cc);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 3: {
|
||||
let cc1 = load<u32>(changetype<usize>(src));
|
||||
let cc2 = load<u16>(changetype<usize>(src), 4);
|
||||
let out = changetype<usize>(dst) + (dstIndex << 1);
|
||||
for (let i = 0; i < count; ++i) {
|
||||
store<u32>(out + (i << 2), cc1);
|
||||
store<u16>(out + (i << 1), cc2, 4);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 4: {
|
||||
let cc = load<u64>(changetype<usize>(src));
|
||||
let out = changetype<usize>(dst) + (dstIndex << 1);
|
||||
for (let i = 0; i < count; ++i) {
|
||||
store<u64>(out + (i << 3), cc);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
let strLen = length << 1;
|
||||
let to = changetype<usize>(dst) + (dstIndex << 1);
|
||||
let from = changetype<usize>(src);
|
||||
for (let i = 0, len = strLen * count; i < len; i += strLen) {
|
||||
memory.copy(to + i, from, strLen);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function isWhiteSpaceOrLineTerminator(c: u16): bool {
|
||||
switch (c) {
|
||||
case 9: // <TAB>
|
||||
@ -373,7 +314,7 @@ export class String extends StringBase {
|
||||
let count = (len - 1) / padLen;
|
||||
let base = count * padLen;
|
||||
let rest = len - base;
|
||||
repeatImpl(out, 0, padString, count);
|
||||
memory.repeat(out, changetype<usize>(padString), <usize>padString.length << 1, count);
|
||||
if (rest) {
|
||||
memory.copy(out + (<usize>base << 1), changetype<usize>(padString), <usize>rest << 1);
|
||||
}
|
||||
@ -400,7 +341,7 @@ export class String extends StringBase {
|
||||
let count = (len - 1) / padLen;
|
||||
let base = count * padLen;
|
||||
let rest = len - base;
|
||||
repeatImpl(out, length, padString, count);
|
||||
memory.repeat(out + (<usize>length << 1), changetype<usize>(padString), <usize>padString.length << 1, count);
|
||||
if (rest) {
|
||||
memory.copy(out + ((<usize>base + <usize>length) << 1), changetype<usize>(padString), <usize>rest << 1);
|
||||
}
|
||||
@ -422,7 +363,7 @@ export class String extends StringBase {
|
||||
if (count == 0 || !length) return changetype<String>("");
|
||||
if (count == 1) return this;
|
||||
var out = ALLOC(length * count);
|
||||
repeatImpl(out, 0, this, count);
|
||||
memory.repeat(out, changetype<usize>(this), <usize>length << 1, count);
|
||||
return REGISTER<String>(out);
|
||||
}
|
||||
|
||||
@ -468,7 +409,7 @@ export class String extends StringBase {
|
||||
}
|
||||
var result = new Array<String>();
|
||||
var end = 0, start = 0, i = 0;
|
||||
while ((end = this.indexOf(separator, start)) != -1) {
|
||||
while ((end = this.indexOf(separator!, start)) != -1) {
|
||||
let len = end - start;
|
||||
if (len > 0) {
|
||||
let out = ALLOC(<usize>len << 1);
|
||||
|
Reference in New Issue
Block a user