slim down runtime

This commit is contained in:
dcode
2019-04-04 02:25:22 +02:00
parent 85f3fc54a7
commit 25c5dfddad
94 changed files with 22219 additions and 34648 deletions

View File

@ -1,13 +1,12 @@
/// <reference path="./collector/index.d.ts" />
import { MAX_BYTELENGTH } from "./util/runtime";
import { MAX_BYTELENGTH, reallocate } from "./util/runtime";
import { COMPARATOR, SORT } from "./util/sort";
import { runtime, __runtime_id } from "./runtime";
import { runtime, __runtime_id, __gc_mark_members } from "./runtime";
import { ArrayBuffer, ArrayBufferView } from "./arraybuffer";
import { itoa, dtoa, itoa_stream, dtoa_stream, MAX_DOUBLE_LENGTH } from "./util/number";
import { isArray as builtin_isArray } from "./builtins";
import { E_INDEXOUTOFRANGE, E_INVALIDLENGTH, E_EMPTYARRAY, E_HOLEYARRAY } from "./util/error";
import { __gc_mark_members } from "./gc";
/** Ensures that the given array has _at least_ the specified capacity. */
function ensureCapacity(array: ArrayBufferView, minCapacity: i32, alignLog2: u32): void {
@ -15,7 +14,7 @@ function ensureCapacity(array: ArrayBufferView, minCapacity: i32, alignLog2: u32
if (<u32>minCapacity > <u32>(MAX_BYTELENGTH >>> alignLog2)) throw new RangeError(E_INVALIDLENGTH);
let oldData = array.data;
let newByteLength = minCapacity << alignLog2;
let newData = runtime.reallocate(changetype<usize>(oldData), <usize>newByteLength); // registers on move
let newData = reallocate(changetype<usize>(oldData), <usize>newByteLength); // registers on move
if (newData !== changetype<usize>(oldData)) {
array.data = changetype<ArrayBuffer>(newData); // links
array.dataStart = newData;

View File

@ -5,7 +5,7 @@
const TRACE = isDefined(GC_TRACE);
import { HEADER_SIZE } from "../util/runtime";
import { __gc_mark_roots, __gc_mark_members } from "../gc";
import { __gc_mark_roots, __gc_mark_members } from "../runtime";
/** Collector states. */
const enum State {

View File

@ -1,7 +1,6 @@
import { HEADER, HEADER_SIZE, MAX_BYTELENGTH } from "./util/runtime";
import { runtime, __runtime_id } from "./runtime";
import { runtime, __runtime_id, __gc_mark_members } from "./runtime";
import { E_INDEXOUTOFRANGE, E_INVALIDLENGTH, E_HOLEYARRAY } from "./util/error";
import { __gc_mark_members } from "./gc";
// NOTE: DO NOT USE YET!

View File

@ -1,60 +0,0 @@
/// <reference path="./collector/index.d.ts" />
import { E_NOTIMPLEMENTED } from "./util/error";
/** Marks root objects. */
// @ts-ignore: decorator
@unsafe @builtin
export declare function __gc_mark_roots(): void;
/** Marks class members. */
// @ts-ignore: decorator
@unsafe @builtin
export declare function __gc_mark_members(classId: u32, ref: usize): void;
// @ts-ignore
@lazy
var ROOT = new Set<usize>();
/** Garbage collector interface. */
export namespace gc {
/** Whether the garbage collector interface is implemented. */
// @ts-ignore: decorator
@lazy
export const implemented: bool = isDefined(__ref_collect);
/** Performs a full garbage collection cycle. */
export function collect(): void {
if (isDefined(__ref_collect)) __ref_collect();
else throw new Error(E_NOTIMPLEMENTED);
}
/** Retains a managed object externally, making sure that it doesn't become collected. */
// @ts-ignore: decorator
@unsafe
export function retain(ref: usize): void {
var root = ROOT;
if (!root.has(ref)) {
root.add(ref);
if (implemented) {
if (isDefined(__ref_link)) __ref_link(ref, changetype<usize>(root));
else if (isDefined(__ref_retain)) __ref_retain(ref);
}
}
}
/** Releases a managed object externally, allowing it to become collected. */
// @ts-ignore: decorator
@unsafe
export function release(ref: usize): void {
var root = ROOT;
if (root.has(ref)) {
root.delete(ref);
if (implemented) {
if (isDefined(__ref_unlink)) __ref_unlink(ref, changetype<usize>(root));
else if (isDefined(__ref_release)) __ref_release(ref);
}
}
}
}

View File

@ -1,8 +1,7 @@
/// <reference path="./collector/index.d.ts" />
import { HASH } from "./util/hash";
import { __runtime_id } from "./runtime";
import { __gc_mark_members } from "./gc";
import { __runtime_id, __gc_mark_members } from "./runtime";
// A deterministic hash map based on CloseTable from https://github.com/jorendorff/dht

View File

@ -4,6 +4,7 @@
import { HEADER, HEADER_SIZE, HEADER_MAGIC, adjust } from "./util/runtime";
import { HEAP_BASE, memory } from "./memory";
import { ArrayBufferView } from "./arraybuffer";
import { E_NOTIMPLEMENTED } from "./util/error";
/** Gets the computed unique id of a class type. */
// @ts-ignore: decorator
@ -15,6 +16,16 @@ export declare function __runtime_id<T>(): u32;
@unsafe @builtin
export declare function __runtime_instanceof(id: u32, superId: u32): bool;
/** Marks root objects when a tracing GC is present. */
// @ts-ignore: decorator
@unsafe @builtin
export declare function __gc_mark_roots(): void;
/** Marks class members when a tracing GC is present. */
// @ts-ignore: decorator
@unsafe @builtin
export declare function __gc_mark_members(classId: u32, ref: usize): void;
/** Runtime implementation. */
@unmanaged
export class runtime {
@ -49,52 +60,6 @@ export namespace runtime {
return changetype<usize>(header) + HEADER_SIZE;
}
/** Reallocates the memory of a managed object that turned out to be too small or too large. */
// @ts-ignore: decorator
@unsafe
export function reallocate(ref: usize, newPayloadSize: usize): usize {
// Background: When managed objects are allocated these aren't immediately registered with GC
// but can be used as scratch objects while unregistered. This is useful in situations where
// the object must be reallocated multiple times because its final size isn't known beforehand,
// e.g. in Array#filter, with only the final object making it into GC'ed userland.
var header = changetype<HEADER>(ref - HEADER_SIZE);
var payloadSize = header.payloadSize;
if (payloadSize < newPayloadSize) {
let newAdjustedSize = adjust(newPayloadSize);
if (select(adjust(payloadSize), 0, ref > HEAP_BASE) < newAdjustedSize) {
// move if the allocation isn't large enough or not a heap object
let newHeader = changetype<HEADER>(memory.allocate(newAdjustedSize));
newHeader.classId = header.classId;
if (isDefined(__ref_collect)) {
newHeader.reserved1 = 0;
newHeader.reserved2 = 0;
}
let newRef = changetype<usize>(newHeader) + HEADER_SIZE;
memory.copy(newRef, ref, payloadSize);
memory.fill(newRef + payloadSize, 0, newPayloadSize - payloadSize);
if (header.classId == HEADER_MAGIC) {
// free right away if not registered yet
assert(ref > HEAP_BASE); // static objects aren't scratch objects
memory.free(changetype<usize>(header));
} else if (isDefined(__ref_collect)) {
// if previously registered, register again
// @ts-ignore: stub
__ref_register(ref);
}
header = newHeader;
ref = newRef;
} else {
// otherwise just clear additional memory within this block
memory.fill(ref + payloadSize, 0, newPayloadSize - payloadSize);
}
} else {
// if the size is the same or less, just update the header accordingly.
// unused space is cleared when grown, so no need to do this here.
}
header.payloadSize = newPayloadSize;
return ref;
}
/** Discards the memory of a managed object that hasn't been registered yet. */
// @ts-ignore: decorator
@unsafe
@ -129,14 +94,14 @@ export namespace runtime {
// @ts-ignore: decorator
@unsafe
export function newString(length: i32): usize {
return runtime.register(runtime.allocate(<usize>length << 1), __runtime_id<String>());
return register(allocate(<usize>length << 1), __runtime_id<String>());
}
/** Allocates and registers, but doesn't initialize the data of, a new `ArrayBuffer` of the specified byteLength. */
// @ts-ignore: decorator
@unsafe
export function newArrayBuffer(byteLength: i32): usize {
return runtime.register(runtime.allocate(<usize>byteLength), __runtime_id<ArrayBuffer>());
return register(allocate(<usize>byteLength), __runtime_id<ArrayBuffer>());
}
/** Allocates and registers, but doesn't initialize the data of, a new `Array` of the specified length and element alignment.*/
@ -148,9 +113,9 @@ export namespace runtime {
// called and the static buffer provided as `data`. This function can also be used to
// create typed arrays in that `Array` also implements `ArrayBufferView` but has an
// additional `.length_` property that remains unused overhead for typed arrays.
var array = runtime.register(runtime.allocate(offsetof<i32[]>()), id);
var array = register(allocate(offsetof<i32[]>()), id);
var bufferSize = <usize>length << alignLog2;
var buffer = runtime.register(runtime.allocate(bufferSize), __runtime_id<ArrayBuffer>());
var buffer = register(allocate(bufferSize), __runtime_id<ArrayBuffer>());
changetype<ArrayBufferView>(array).data = changetype<ArrayBuffer>(buffer); // links
changetype<ArrayBufferView>(array).dataStart = buffer;
changetype<ArrayBufferView>(array).dataLength = bufferSize;
@ -158,4 +123,36 @@ export namespace runtime {
if (data) memory.copy(buffer, data, bufferSize);
return array;
}
/** Retains a managed object externally, making sure that it doesn't become collected. */
// @ts-ignore: decorator
@unsafe
export function retain(ref: usize): void {
if (isDefined(__ref_collect)) {
if (isDefined(__ref_link)) __ref_link(ref, changetype<usize>(ROOT));
else if (isDefined(__ref_retain)) __ref_retain(ref);
}
}
/** Releases a managed object externally, allowing it to become collected. */
// @ts-ignore: decorator
@unsafe
export function release(ref: usize): void {
if (isDefined(__ref_collect)) {
if (isDefined(__ref_unlink)) __ref_unlink(ref, changetype<usize>(ROOT));
else if (isDefined(__ref_release)) __ref_release(ref);
}
}
/** Performs a full garbage collection cycle. */
export function collect(): void {
if (isDefined(__ref_collect)) __ref_collect();
else throw new Error(E_NOTIMPLEMENTED);
}
}
class Root {}
// @ts-ignore
@lazy
var ROOT = new Root();

View File

@ -1,3 +1 @@
import "allocator/arena";
// export { memory };

View File

@ -1,4 +1,4 @@
import "allocator/tlsf";
import "collector/itcm";
// export { memory, gc };
export { runtime };

View File

@ -1,8 +1,7 @@
/// <reference path="./collector/index.d.ts" />
import { HASH } from "./util/hash";
import { __runtime_id } from "./runtime";
import { __gc_mark_members } from "./gc";
import { __runtime_id, __gc_mark_members } from "./runtime";
// A deterministic hash set based on CloseTable from https://github.com/jorendorff/dht

View File

@ -1,154 +1,154 @@
export function memcpy(dest: usize, src: usize, n: usize): void { // see: musl/src/string/memcpy.c
var w: u32, x: u32;
// export function memcpy(dest: usize, src: usize, n: usize): void { // see: musl/src/string/memcpy.c
// var w: u32, x: u32;
// copy 1 byte each until src is aligned to 4 bytes
while (n && (src & 3)) {
store<u8>(dest++, load<u8>(src++));
n--;
}
// // copy 1 byte each until src is aligned to 4 bytes
// while (n && (src & 3)) {
// store<u8>(dest++, load<u8>(src++));
// n--;
// }
// if dst is aligned to 4 bytes as well, copy 4 bytes each
if ((dest & 3) == 0) {
while (n >= 16) {
store<u32>(dest , load<u32>(src ));
store<u32>(dest + 4, load<u32>(src + 4));
store<u32>(dest + 8, load<u32>(src + 8));
store<u32>(dest + 12, load<u32>(src + 12));
src += 16; dest += 16; n -= 16;
}
if (n & 8) {
store<u32>(dest , load<u32>(src ));
store<u32>(dest + 4, load<u32>(src + 4));
dest += 8; src += 8;
}
if (n & 4) {
store<u32>(dest, load<u32>(src));
dest += 4; src += 4;
}
if (n & 2) { // drop to 2 bytes each
store<u16>(dest, load<u16>(src));
dest += 2; src += 2;
}
if (n & 1) { // drop to 1 byte
store<u8>(dest++, load<u8>(src++));
}
return;
}
// // if dst is aligned to 4 bytes as well, copy 4 bytes each
// if ((dest & 3) == 0) {
// while (n >= 16) {
// store<u32>(dest , load<u32>(src ));
// store<u32>(dest + 4, load<u32>(src + 4));
// store<u32>(dest + 8, load<u32>(src + 8));
// store<u32>(dest + 12, load<u32>(src + 12));
// src += 16; dest += 16; n -= 16;
// }
// if (n & 8) {
// store<u32>(dest , load<u32>(src ));
// store<u32>(dest + 4, load<u32>(src + 4));
// dest += 8; src += 8;
// }
// if (n & 4) {
// store<u32>(dest, load<u32>(src));
// dest += 4; src += 4;
// }
// if (n & 2) { // drop to 2 bytes each
// store<u16>(dest, load<u16>(src));
// dest += 2; src += 2;
// }
// if (n & 1) { // drop to 1 byte
// store<u8>(dest++, load<u8>(src++));
// }
// return;
// }
// if dst is not aligned to 4 bytes, use alternating shifts to copy 4 bytes each
// doing shifts if faster when copying enough bytes (here: 32 or more)
if (n >= 32) {
switch (dest & 3) {
// known to be != 0
case 1: {
w = load<u32>(src);
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
n -= 3;
while (n >= 17) {
x = load<u32>(src + 1);
store<u32>(dest, w >> 24 | x << 8);
w = load<u32>(src + 5);
store<u32>(dest + 4, x >> 24 | w << 8);
x = load<u32>(src + 9);
store<u32>(dest + 8, w >> 24 | x << 8);
w = load<u32>(src + 13);
store<u32>(dest + 12, x >> 24 | w << 8);
src += 16; dest += 16; n -= 16;
}
break;
}
case 2: {
w = load<u32>(src);
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
n -= 2;
while (n >= 18) {
x = load<u32>(src + 2);
store<u32>(dest, w >> 16 | x << 16);
w = load<u32>(src + 6);
store<u32>(dest + 4, x >> 16 | w << 16);
x = load<u32>(src + 10);
store<u32>(dest + 8, w >> 16 | x << 16);
w = load<u32>(src + 14);
store<u32>(dest + 12, x >> 16 | w << 16);
src += 16; dest += 16; n -= 16;
}
break;
}
case 3: {
w = load<u32>(src);
store<u8>(dest++, load<u8>(src++));
n -= 1;
while (n >= 19) {
x = load<u32>(src + 3);
store<u32>(dest, w >> 8 | x << 24);
w = load<u32>(src + 7);
store<u32>(dest + 4, x >> 8 | w << 24);
x = load<u32>(src + 11);
store<u32>(dest + 8, w >> 8 | x << 24);
w = load<u32>(src + 15);
store<u32>(dest + 12, x >> 8 | w << 24);
src += 16; dest += 16; n -= 16;
}
break;
}
}
}
// // if dst is not aligned to 4 bytes, use alternating shifts to copy 4 bytes each
// // doing shifts if faster when copying enough bytes (here: 32 or more)
// if (n >= 32) {
// switch (dest & 3) {
// // known to be != 0
// case 1: {
// w = load<u32>(src);
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// n -= 3;
// while (n >= 17) {
// x = load<u32>(src + 1);
// store<u32>(dest, w >> 24 | x << 8);
// w = load<u32>(src + 5);
// store<u32>(dest + 4, x >> 24 | w << 8);
// x = load<u32>(src + 9);
// store<u32>(dest + 8, w >> 24 | x << 8);
// w = load<u32>(src + 13);
// store<u32>(dest + 12, x >> 24 | w << 8);
// src += 16; dest += 16; n -= 16;
// }
// break;
// }
// case 2: {
// w = load<u32>(src);
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// n -= 2;
// while (n >= 18) {
// x = load<u32>(src + 2);
// store<u32>(dest, w >> 16 | x << 16);
// w = load<u32>(src + 6);
// store<u32>(dest + 4, x >> 16 | w << 16);
// x = load<u32>(src + 10);
// store<u32>(dest + 8, w >> 16 | x << 16);
// w = load<u32>(src + 14);
// store<u32>(dest + 12, x >> 16 | w << 16);
// src += 16; dest += 16; n -= 16;
// }
// break;
// }
// case 3: {
// w = load<u32>(src);
// store<u8>(dest++, load<u8>(src++));
// n -= 1;
// while (n >= 19) {
// x = load<u32>(src + 3);
// store<u32>(dest, w >> 8 | x << 24);
// w = load<u32>(src + 7);
// store<u32>(dest + 4, x >> 8 | w << 24);
// x = load<u32>(src + 11);
// store<u32>(dest + 8, w >> 8 | x << 24);
// w = load<u32>(src + 15);
// store<u32>(dest + 12, x >> 8 | w << 24);
// src += 16; dest += 16; n -= 16;
// }
// break;
// }
// }
// }
// copy remaining bytes one by one
if (n & 16) {
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
}
if (n & 8) {
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
}
if (n & 4) {
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
}
if (n & 2) {
store<u8>(dest++, load<u8>(src++));
store<u8>(dest++, load<u8>(src++));
}
if (n & 1) {
store<u8>(dest++, load<u8>(src++));
}
}
// // copy remaining bytes one by one
// if (n & 16) {
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// }
// if (n & 8) {
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// }
// if (n & 4) {
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// }
// if (n & 2) {
// store<u8>(dest++, load<u8>(src++));
// store<u8>(dest++, load<u8>(src++));
// }
// if (n & 1) {
// store<u8>(dest++, load<u8>(src++));
// }
// }
// @ts-ignore: decorator
@inline
export function memmove(dest: usize, src: usize, n: usize): void { // see: musl/src/string/memmove.c
if (dest === src) return;
if (src + n <= dest || dest + n <= src) {
memcpy(dest, src, n);
return;
}
// if (src + n <= dest || dest + n <= src) {
// memcpy(dest, src, n);
// return;
// }
if (dest < src) {
if ((src & 7) == (dest & 7)) {
while (dest & 7) {

View File

@ -45,3 +45,49 @@ export function adjust(payloadSize: usize): usize {
// MAX_LENGTH -> 2^30 = 0x40000000 (MAX_SIZE_32)
return <usize>1 << <usize>(<u32>32 - clz<u32>(payloadSize + HEADER_SIZE - 1));
}
/** Reallocates the memory of a managed object that turned out to be too small or too large. */
// @ts-ignore: decorator
@unsafe
export function reallocate(ref: usize, newPayloadSize: usize): usize {
// Background: When managed objects are allocated these aren't immediately registered with GC
// but can be used as scratch objects while unregistered. This is useful in situations where
// the object must be reallocated multiple times because its final size isn't known beforehand,
// e.g. in Array#filter, with only the final object making it into GC'ed userland.
var header = changetype<HEADER>(ref - HEADER_SIZE);
var payloadSize = header.payloadSize;
if (payloadSize < newPayloadSize) {
let newAdjustedSize = adjust(newPayloadSize);
if (select(adjust(payloadSize), 0, ref > HEAP_BASE) < newAdjustedSize) {
// move if the allocation isn't large enough or not a heap object
let newHeader = changetype<HEADER>(memory.allocate(newAdjustedSize));
newHeader.classId = header.classId;
if (isDefined(__ref_collect)) {
newHeader.reserved1 = 0;
newHeader.reserved2 = 0;
}
let newRef = changetype<usize>(newHeader) + HEADER_SIZE;
memory.copy(newRef, ref, payloadSize);
memory.fill(newRef + payloadSize, 0, newPayloadSize - payloadSize);
if (header.classId == HEADER_MAGIC) {
// free right away if not registered yet
assert(ref > HEAP_BASE); // static objects aren't scratch objects
memory.free(changetype<usize>(header));
} else if (isDefined(__ref_collect)) {
// if previously registered, register again
// @ts-ignore: stub
__ref_register(ref);
}
header = newHeader;
ref = newRef;
} else {
// otherwise just clear additional memory within this block
memory.fill(ref + payloadSize, 0, newPayloadSize - payloadSize);
}
} else {
// if the size is the same or less, just update the header accordingly.
// unused space is cleared when grown, so no need to do this here.
}
header.payloadSize = newPayloadSize;
return ref;
}