/// const ALIGN_LOG2: usize = 3; const ALIGN_SIZE: usize = 1 << ALIGN_LOG2; const ALIGN_MASK: usize = ALIGN_SIZE - 1; let HEAP_OFFSET: usize = HEAP_START; // HEAP_START is a constant generated by the compiler @global() @struct() class Heap { static allocate(size: usize): usize { const ptr: usize = HEAP_OFFSET; assert(ptr + size <= (current_memory() << 16)); if (((HEAP_OFFSET += size) & ALIGN_MASK) != 0) // align next offset HEAP_OFFSET = (HEAP_OFFSET | ALIGN_MASK) + 1; return ptr; } static dispose(ptr: usize): void { // just a big chunk of non-disposable memory for now } static get used(): usize { return HEAP_OFFSET - HEAP_START; } static get free(): usize { return (current_memory() << 16) - HEAP_OFFSET; } static get size(): usize { return (current_memory() << 16) - HEAP_START; } static copy(dest: usize, src: usize, n: usize): usize { assert(dest >= HEAP_START); // the following is based on musl's implementation of memcpy let dst: usize = dest; let w: u32, x: u32; // copy 1 byte each until src is aligned to 4 bytes while (n != 0 && src % 4 != 0) { store(dst++, load(src++)); n--; } // if dst is aligned to 4 bytes as well, copy 4 bytes each if (dst % 4 == 0) { while (n >= 16) { store(dst , load(src )); store(dst + 4, load(src + 4)); store(dst + 8, load(src + 8)); store(dst + 12, load(src + 12)); src += 16; dst += 16; n -= 16; } if (n & 8) { store(dst , load(src )); store(dst + 4, load(src + 4)); dst += 8; src += 8; } if (n & 4) { store(dst, load(src)); dst += 4; src += 4; } if (n & 2) { // drop to 2 bytes each store(dst, load(src)); dst += 2; src += 2; } if (n & 1) { // drop to 1 byte store(dst++, load(src++)); } return dest; } // if dst is not aligned to 4 bytes, use alternating shifts to copy 4 bytes each // doing shifts if faster when copying enough bytes (here: 32 or more) if (n >= 32) { switch (dst % 4) { // known to be != 0 case 1: w = load(src); store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); n -= 3; while (n >= 17) { x = load(src + 1); store(dst, w >> 24 | x << 8); w = load(src + 5); store(dst + 4, x >> 24 | w << 8); x = load(src + 9); store(dst + 8, w >> 24 | x << 8); w = load(src + 13); store(dst + 12, x >> 24 | w << 8); src += 16; dst += 16; n -= 16; } break; case 2: w = load(src); store(dst++, load(src++)); store(dst++, load(src++)); n -= 2; while (n >= 18) { x = load(src + 2); store(dst, w >> 16 | x << 16); w = load(src + 6); store(dst + 4, x >> 16 | w << 16); x = load(src + 10); store(dst + 8, w >> 16 | x << 16); w = load(src + 14); store(dst + 12, x >> 16 | w << 16); src += 16; dst += 16; n -= 16; } break; case 3: w = load(src); store(dst++, load(src++)); n -= 1; while (n >= 19) { x = load(src + 3); store(dst, w >> 8 | x << 24); w = load(src + 7); store(dst + 4, x >> 8 | w << 24); x = load(src + 11); store(dst + 8, w >> 8 | x << 24); w = load(src + 15); store(dst + 12, x >> 8 | w << 24); src += 16; dst += 16; n -= 16; } break; } } // copy remaining bytes one by one if (n & 16) { store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); } if (n & 8) { store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); } if (n & 4) { store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); store(dst++, load(src++)); } if (n & 2) { store(dst++, load(src++)); store(dst++, load(src++)); } if (n & 1) { store(dst++, load(src++)); } return dest; } private constructor() {} }