diff --git a/src/ast.ts b/src/ast.ts index 560d1a93..b0fdd9db 100644 --- a/src/ast.ts +++ b/src/ast.ts @@ -1350,6 +1350,26 @@ export class CallExpression extends Expression { typeArguments: CommonTypeNode[] | null; /** Provided arguments. */ arguments: Expression[]; + + /** Gets the type arguments range for reporting. */ + get typeArgumentsRange(): Range { + var typeArguments = this.typeArguments; + var numTypeArguments: i32; + if (typeArguments && (numTypeArguments = typeArguments.length)) { + return Range.join(typeArguments[0].range, typeArguments[numTypeArguments - 1].range); + } + return this.expression.range; + } + + /** Gets the arguments range for reporting. */ + get argumentsRange(): Range { + var args = this.arguments; + var numArguments = args.length; + if (numArguments) { + return Range.join(args[0].range, args[numArguments - 1].range); + } + return this.expression.range; + } } /** Represents a class expression using the 'class' keyword. */ diff --git a/src/builtins.ts b/src/builtins.ts index b9da3019..e18d8ac5 100644 --- a/src/builtins.ts +++ b/src/builtins.ts @@ -227,54 +227,54 @@ export namespace BuiltinSymbols { export const i64_atomic_store16 = "~lib/builtins/i64.atomic.store16"; export const i64_atomic_store32 = "~lib/builtins/i64.atomic.store32"; export const i64_atomic_store = "~lib/builtins/i64.atomic.store"; - export const i32_atomic_rmw8_u_add = "~lib/builtins/i32.atomic.rmw8_u.add"; - export const i32_atomic_rmw16_u_add = "~lib/builtins/i32.atomic.rmw16_u.add"; + export const i32_atomic_rmw8_add_u = "~lib/builtins/i32.atomic.rmw8.add_u"; + export const i32_atomic_rmw16_add_u = "~lib/builtins/i32.atomic.rmw16.add_u"; export const i32_atomic_rmw_add = "~lib/builtins/i32.atomic.rmw.add"; - export const i64_atomic_rmw8_u_add = "~lib/builtins/i64.atomic.rmw8_u.add"; - export const i64_atomic_rmw16_u_add = "~lib/builtins/i64.atomic.rmw16_u.add"; - export const i64_atomic_rmw32_u_add = "~lib/builtins/i64.atomic.rmw32_u.add"; + export const i64_atomic_rmw8_add_u = "~lib/builtins/i64.atomic.rmw8.add_u"; + export const i64_atomic_rmw16_add_u = "~lib/builtins/i64.atomic.rmw16.add_u"; + export const i64_atomic_rmw32_add_u = "~lib/builtins/i64.atomic.rmw32.add_u"; export const i64_atomic_rmw_add = "~lib/builtins/i64.atomic.rmw.add"; - export const i32_atomic_rmw8_u_sub = "~lib/builtins/i32.atomic.rmw8_u.sub"; - export const i32_atomic_rmw16_u_sub = "~lib/builtins/i32.atomic.rmw16_u.sub"; + export const i32_atomic_rmw8_sub_u = "~lib/builtins/i32.atomic.rmw8.sub_u"; + export const i32_atomic_rmw16_sub_u = "~lib/builtins/i32.atomic.rmw16.sub_u"; export const i32_atomic_rmw_sub = "~lib/builtins/i32.atomic.rmw.sub"; - export const i64_atomic_rmw8_u_sub = "~lib/builtins/i64.atomic.rmw8_u.sub"; - export const i64_atomic_rmw16_u_sub = "~lib/builtins/i64.atomic.rmw16_u.sub"; - export const i64_atomic_rmw32_u_sub = "~lib/builtins/i64.atomic.rmw32_u.sub"; + export const i64_atomic_rmw8_sub_u = "~lib/builtins/i64.atomic.rmw8.sub_u"; + export const i64_atomic_rmw16_sub_u = "~lib/builtins/i64.atomic.rmw16.sub_u"; + export const i64_atomic_rmw32_sub_u = "~lib/builtins/i64.atomic.rmw32.sub_u"; export const i64_atomic_rmw_sub = "~lib/builtins/i64.atomic.rmw.sub"; - export const i32_atomic_rmw8_u_and = "~lib/builtins/i32.atomic.rmw8_u.and"; - export const i32_atomic_rmw16_u_and = "~lib/builtins/i32.atomic.rmw16_u.and"; + export const i32_atomic_rmw8_and_u = "~lib/builtins/i32.atomic.rmw8.and_u"; + export const i32_atomic_rmw16_and_u = "~lib/builtins/i32.atomic.rmw16.and_u"; export const i32_atomic_rmw_and = "~lib/builtins/i32.atomic.rmw.and"; - export const i64_atomic_rmw8_u_and = "~lib/builtins/i64.atomic.rmw8_u.and"; - export const i64_atomic_rmw16_u_and = "~lib/builtins/i64.atomic.rmw16_u.and"; - export const i64_atomic_rmw32_u_and = "~lib/builtins/i64.atomic.rmw32_u.and"; + export const i64_atomic_rmw8_and_u = "~lib/builtins/i64.atomic.rmw8.and_u"; + export const i64_atomic_rmw16_and_u = "~lib/builtins/i64.atomic.rmw16.and_u"; + export const i64_atomic_rmw32_and_u = "~lib/builtins/i64.atomic.rmw32.and_u"; export const i64_atomic_rmw_and = "~lib/builtins/i64.atomic.rmw.and"; - export const i32_atomic_rmw8_u_or = "~lib/builtins/i32.atomic.rmw8_u.or"; - export const i32_atomic_rmw16_u_or = "~lib/builtins/i32.atomic.rmw16_u.or"; + export const i32_atomic_rmw8_or_u = "~lib/builtins/i32.atomic.rmw8.or_u"; + export const i32_atomic_rmw16_or_u = "~lib/builtins/i32.atomic.rmw16.or_u"; export const i32_atomic_rmw_or = "~lib/builtins/i32.atomic.rmw.or"; - export const i64_atomic_rmw8_u_or = "~lib/builtins/i64.atomic.rmw8_u.or"; - export const i64_atomic_rmw16_u_or = "~lib/builtins/i64.atomic.rmw16_u.or"; - export const i64_atomic_rmw32_u_or = "~lib/builtins/i64.atomic.rmw32_u.or"; + export const i64_atomic_rmw8_or_u = "~lib/builtins/i64.atomic.rmw8.or_u"; + export const i64_atomic_rmw16_or_u = "~lib/builtins/i64.atomic.rmw16.or_u"; + export const i64_atomic_rmw32_or_u = "~lib/builtins/i64.atomic.rmw32.or_u"; export const i64_atomic_rmw_or = "~lib/builtins/i64.atomic.rmw.or"; - export const i32_atomic_rmw8_u_xor = "~lib/builtins/i32.atomic.rmw8_u.xor"; - export const i32_atomic_rmw16_u_xor = "~lib/builtins/i32.atomic.rmw16_u.xor"; + export const i32_atomic_rmw8_u_xor = "~lib/builtins/i32.atomic.rmw8.xor_u"; + export const i32_atomic_rmw16_u_xor = "~lib/builtins/i32.atomic.rmw16.xor_u"; export const i32_atomic_rmw_xor = "~lib/builtins/i32.atomic.rmw.xor"; - export const i64_atomic_rmw8_u_xor = "~lib/builtins/i64.atomic.rmw8_u.xor"; - export const i64_atomic_rmw16_u_xor = "~lib/builtins/i64.atomic.rmw16_u.xor"; - export const i64_atomic_rmw32_u_xor = "~lib/builtins/i64.atomic.rmw32_u.xor"; + export const i64_atomic_rmw8_xor_u = "~lib/builtins/i64.atomic.rmw8.xor_u"; + export const i64_atomic_rmw16_xor_u = "~lib/builtins/i64.atomic.rmw16.xor_u"; + export const i64_atomic_rmw32_xor_u = "~lib/builtins/i64.atomic.rmw32.xor_u"; export const i64_atomic_rmw_xor = "~lib/builtins/i64.atomic.rmw.xor"; - export const i32_atomic_rmw8_u_xchg = "~lib/builtins/i32.atomic.rmw8_u.xchg"; - export const i32_atomic_rmw16_u_xchg = "~lib/builtins/i32.atomic.rmw16_u.xchg"; + export const i32_atomic_rmw8_xchg_u = "~lib/builtins/i32.atomic.rmw8.xchg_u"; + export const i32_atomic_rmw16_xchg_u = "~lib/builtins/i32.atomic.rmw16.xchg_u"; export const i32_atomic_rmw_xchg = "~lib/builtins/i32.atomic.rmw.xchg"; - export const i64_atomic_rmw8_u_xchg = "~lib/builtins/i64.atomic.rmw8_u.xchg"; - export const i64_atomic_rmw16_u_xchg = "~lib/builtins/i64.atomic.rmw16_u.xchg"; - export const i64_atomic_rmw32_u_xchg = "~lib/builtins/i64.atomic.rmw32_u.xchg"; + export const i64_atomic_rmw8_xchg_u = "~lib/builtins/i64.atomic.rmw8.xchg_u"; + export const i64_atomic_rmw16_xchg_u = "~lib/builtins/i64.atomic.rmw16.xchg_u"; + export const i64_atomic_rmw32_xchg_u = "~lib/builtins/i64.atomic.rmw32.xchg_u"; export const i64_atomic_rmw_xchg = "~lib/builtins/i64.atomic.rmw.xchg"; - export const i32_atomic_rmw8_u_cmpxchg = "~lib/builtins/i32.atomic.rmw8_u.cmpxchg"; - export const i32_atomic_rmw16_u_cmpxchg = "~lib/builtins/i32.atomic.rmw16_u.cmpxchg"; + export const i32_atomic_rmw8_cmpxchg_u = "~lib/builtins/i32.atomic.rmw8.cmpxchg_u"; + export const i32_atomic_rmw16_cmpxchg_u = "~lib/builtins/i32.atomic.rmw16.cmpxchg_u"; export const i32_atomic_rmw_cmpxchg = "~lib/builtins/i32.atomic.rmw.cmpxchg"; - export const i64_atomic_rmw8_u_cmpxchg = "~lib/builtins/i64.atomic.rmw8_u.cmpxchg"; - export const i64_atomic_rmw16_u_cmpxchg = "~lib/builtins/i64.atomic.rmw16_u.cmpxchg"; - export const i64_atomic_rmw32_u_cmpxchg = "~lib/builtins/i64.atomic.rmw32_u.cmpxchg"; + export const i64_atomic_rmw8_cmpxchg_u = "~lib/builtins/i64.atomic.rmw8.cmpxchg_u"; + export const i64_atomic_rmw16_cmpxchg_u = "~lib/builtins/i64.atomic.rmw16.cmpxchg_u"; + export const i64_atomic_rmw32_cmpxchg_u = "~lib/builtins/i64.atomic.rmw32.cmpxchg_u"; export const i64_atomic_rmw_cmpxchg = "~lib/builtins/i64.atomic.rmw.cmpxchg"; export const i32_wait = "~lib/builtins/i32.wait"; export const i64_wait = "~lib/builtins/i64.wait"; @@ -483,7 +483,8 @@ export function compileCall( typeArguments: Type[] | null, operands: Expression[], contextualType: Type, - reportNode: CallExpression + reportNode: CallExpression, + isAsm: bool = false ): ExpressionRef { var module = compiler.module; @@ -2151,33 +2152,35 @@ export function compileCall( compiler.currentType = Type.void; return module.createStore(typeArguments[0].byteSize, arg0, arg1, type.toNativeType(), offset, align); } - case BuiltinSymbols.atomic_load: { + case BuiltinSymbols.atomic_load: { // load(offset: usize, immOffset?) -> T if (!compiler.options.hasFeature(Feature.THREADS)) break; - if (operands.length < 1 || operands.length > 2) { - if (!(typeArguments && typeArguments.length == 1)) { - compiler.error( - DiagnosticCode.Expected_0_type_arguments_but_got_1, - reportNode.range, "1", typeArguments ? typeArguments.length.toString(10) : "0" - ); - } - if (operands.length < 1) { - compiler.error( - DiagnosticCode.Expected_at_least_0_arguments_but_got_1, - reportNode.range, "1", operands.length.toString(10) - ); - } else { - compiler.error( - DiagnosticCode.Expected_0_arguments_but_got_1, - reportNode.range, "2", operands.length.toString(10) - ); - } - return module.createUnreachable(); - } + let hasError = false; if (!(typeArguments && typeArguments.length == 1)) { - if (typeArguments && typeArguments.length) compiler.currentType = typeArguments[0]; compiler.error( DiagnosticCode.Expected_0_type_arguments_but_got_1, - reportNode.range, "1", typeArguments ? typeArguments.length.toString(10) : "0" + reportNode.typeArgumentsRange, "1", typeArguments ? typeArguments.length.toString() : "0" + ); + hasError = true; + } + if (operands.length < 1) { + compiler.error( + DiagnosticCode.Expected_at_least_0_arguments_but_got_1, + reportNode.argumentsRange, "1", operands.length.toString(10) + ); + hasError = true; + } else if (operands.length > 2) { + compiler.error( + DiagnosticCode.Expected_0_arguments_but_got_1, + reportNode.argumentsRange, "2", operands.length.toString(10) + ); + hasError = true; + } + if (hasError) return module.createUnreachable(); + let loadType = typeArguments![0]; + if (!loadType.is(TypeFlags.INTEGER)) { + compiler.error( + DiagnosticCode.Operation_not_supported, + reportNode.typeArgumentsRange ); return module.createUnreachable(); } @@ -2188,48 +2191,49 @@ export function compileCall( WrapMode.NONE ); let offset = operands.length == 2 ? evaluateImmediateOffset(compiler, operands[1]) : 0; // reports - if (offset < 0) { // reported in evaluateImmediateOffset - return module.createUnreachable(); - } - compiler.currentType = typeArguments[0]; + if (offset < 0) return module.createUnreachable(); + compiler.currentType = loadType; return module.createAtomicLoad( - typeArguments[0].byteSize, + loadType.byteSize, arg0, - typeArguments[0].is(TypeFlags.INTEGER) && + loadType.is(TypeFlags.INTEGER) && contextualType.is(TypeFlags.INTEGER) && - contextualType.size > typeArguments[0].size + contextualType.size > loadType.size ? (compiler.currentType = contextualType).toNativeType() - : (compiler.currentType = typeArguments[0]).toNativeType(), + : (compiler.currentType = loadType).toNativeType(), offset ); } - case BuiltinSymbols.atomic_store: { // store(offset: usize, value: *, immOffset?, immAlign?) -> void + case BuiltinSymbols.atomic_store: { // store(offset: usize, value: *, immOffset?) -> void if (!compiler.options.hasFeature(Feature.THREADS)) break; compiler.currentType = Type.void; - if (operands.length < 2 || operands.length > 3) { - if (!(typeArguments && typeArguments.length == 1)) { - compiler.error( - DiagnosticCode.Expected_0_type_arguments_but_got_1, - reportNode.range, "1", typeArguments ? typeArguments.length.toString(10) : "0" - ); - } - if (operands.length < 2) { - compiler.error( - DiagnosticCode.Expected_at_least_0_arguments_but_got_1, - reportNode.range, "2", operands.length.toString(10) - ); - } else { - compiler.error( - DiagnosticCode.Expected_0_arguments_but_got_1, - reportNode.range, "3", operands.length.toString(10) - ); - } - return module.createUnreachable(); - } + let hasError = false; if (!(typeArguments && typeArguments.length == 1)) { compiler.error( DiagnosticCode.Expected_0_type_arguments_but_got_1, - reportNode.range, "1", typeArguments ? typeArguments.length.toString(10) : "0" + reportNode.typeArgumentsRange, "1", typeArguments ? typeArguments.length.toString(10) : "0" + ); + hasError = true; + } + if (operands.length < 2) { + compiler.error( + DiagnosticCode.Expected_at_least_0_arguments_but_got_1, + reportNode.argumentsRange, "2", operands.length.toString(10) + ); + hasError = true; + } else if (operands.length > 3) { + compiler.error( + DiagnosticCode.Expected_0_arguments_but_got_1, + reportNode.argumentsRange, "3", operands.length.toString(10) + ); + hasError = true; + } + if (hasError) return module.createUnreachable(); + let storeType = typeArguments![0]; + if (!storeType.is(TypeFlags.INTEGER) || storeType.size < 8) { + compiler.error( + DiagnosticCode.Operation_not_supported, + reportNode.typeArgumentsRange ); return module.createUnreachable(); } @@ -2241,68 +2245,69 @@ export function compileCall( ); arg1 = compiler.compileExpression( operands[1], - typeArguments[0], - typeArguments[0].is(TypeFlags.INTEGER) + storeType, + storeType.is(TypeFlags.INTEGER) ? ConversionKind.NONE // no need to convert to small int (but now might result in a float) : ConversionKind.IMPLICIT, WrapMode.NONE ); - let type: Type; + let valueType = storeType; if ( - typeArguments[0].is(TypeFlags.INTEGER) && + storeType.is(TypeFlags.INTEGER) && ( - !compiler.currentType.is(TypeFlags.INTEGER) || // float to int - compiler.currentType.size < typeArguments[0].size // int to larger int (clear garbage bits) + !compiler.currentType.is(TypeFlags.INTEGER) || // float to int + compiler.currentType.size < storeType.size // int to larger int (clear garbage bits) ) ) { arg1 = compiler.convertExpression( arg1, - compiler.currentType, typeArguments[0], + compiler.currentType, storeType, ConversionKind.IMPLICIT, WrapMode.NONE, // still clears garbage bits operands[1] ); - type = typeArguments[0]; } else { - type = compiler.currentType; + valueType = compiler.currentType; } let offset = operands.length == 3 ? evaluateImmediateOffset(compiler, operands[2]) : 0; // reports if (offset < 0) return module.createUnreachable(); compiler.currentType = Type.void; - return module.createAtomicStore(typeArguments[0].byteSize, arg0, arg1, type.toNativeType(), offset); + return module.createAtomicStore(storeType.byteSize, arg0, arg1, valueType.toNativeType(), offset); } case BuiltinSymbols.atomic_add: // add(ptr, value: T, immOffset?: usize): T; case BuiltinSymbols.atomic_sub: // sub(ptr, value: T, immOffset?: usize): T; case BuiltinSymbols.atomic_and: // and(ptr, value: T, immOffset?: usize): T; case BuiltinSymbols.atomic_or: // or(ptr, value: T, immOffset?: usize): T; case BuiltinSymbols.atomic_xor: // xor(ptr, value: T, immOffset?: usize): T; - case BuiltinSymbols.atomic_xchg: // xchg(ptr, value, immOffset?: usize): T; - { + case BuiltinSymbols.atomic_xchg: { // xchg(ptr, value, immOffset?: usize): T; if (!compiler.options.hasFeature(Feature.THREADS)) break; - if (operands.length < 2 || operands.length > 3) { - if (!(typeArguments && typeArguments.length == 1)) { - compiler.error( - DiagnosticCode.Expected_0_type_arguments_but_got_1, - reportNode.range, "1", typeArguments ? typeArguments.length.toString(10) : "0" - ); - } - if (operands.length < 2) { - compiler.error( - DiagnosticCode.Expected_at_least_0_arguments_but_got_1, - reportNode.range, "2", operands.length.toString(10) - ); - } else { - compiler.error( - DiagnosticCode.Expected_0_arguments_but_got_1, - reportNode.range, "3", operands.length.toString(10) - ); - } - return module.createUnreachable(); - } + let hasError = false; if (!(typeArguments && typeArguments.length == 1)) { compiler.error( DiagnosticCode.Expected_0_type_arguments_but_got_1, - reportNode.range, "1", typeArguments ? typeArguments.length.toString(10) : "0" + reportNode.typeArgumentsRange, "1", typeArguments ? typeArguments.length.toString(10) : "0" + ); + hasError = true; + } + if (operands.length < 2) { + compiler.error( + DiagnosticCode.Expected_at_least_0_arguments_but_got_1, + reportNode.argumentsRange, "2", operands.length.toString(10) + ); + hasError = true; + } else if (operands.length > 3) { + compiler.error( + DiagnosticCode.Expected_0_arguments_but_got_1, + reportNode.argumentsRange, "3", operands.length.toString(10) + ); + hasError = true; + } + if (hasError) return module.createUnreachable(); + let resultType = typeArguments![0]; + if (!resultType.is(TypeFlags.INTEGER) || resultType.size < 8) { + compiler.error( + DiagnosticCode.Operation_not_supported, + reportNode.typeArgumentsRange ); return module.createUnreachable(); } @@ -2314,37 +2319,35 @@ export function compileCall( ); arg1 = compiler.compileExpression( operands[1], - typeArguments[0], - typeArguments[0].is(TypeFlags.INTEGER) + resultType, + resultType.is(TypeFlags.INTEGER) ? ConversionKind.NONE // no need to convert to small int (but now might result in a float) : ConversionKind.IMPLICIT, WrapMode.NONE ); - - let type: Type; + let valueType = resultType; if ( - typeArguments[0].is(TypeFlags.INTEGER) && + resultType.is(TypeFlags.INTEGER) && ( - !compiler.currentType.is(TypeFlags.INTEGER) || // float to int - compiler.currentType.size < typeArguments[0].size // int to larger int (clear garbage bits) + !compiler.currentType.is(TypeFlags.INTEGER) || // float to int + compiler.currentType.size < resultType.size // int to larger int (clear garbage bits) ) ) { arg1 = compiler.convertExpression( arg1, - compiler.currentType, typeArguments[0], + compiler.currentType, resultType, ConversionKind.IMPLICIT, WrapMode.NONE, // still clears garbage bits operands[1] ); - type = typeArguments[0]; } else { - type = compiler.currentType; + valueType = compiler.currentType; } - let offset = operands.length == 3 ? evaluateImmediateOffset(compiler, operands[2]) : 0; // reports if (offset < 0) return module.createUnreachable(); - let RMWOp: AtomicRMWOp | null = null; + let RMWOp: AtomicRMWOp; switch (prototype.internalName) { + default: assert(false); case BuiltinSymbols.atomic_add: { RMWOp = AtomicRMWOp.Add; break; } case BuiltinSymbols.atomic_sub: { RMWOp = AtomicRMWOp.Sub; break; } case BuiltinSymbols.atomic_and: { RMWOp = AtomicRMWOp.And; break; } @@ -2352,45 +2355,40 @@ export function compileCall( case BuiltinSymbols.atomic_xor: { RMWOp = AtomicRMWOp.Xor; break; } case BuiltinSymbols.atomic_xchg: { RMWOp = AtomicRMWOp.Xchg; break; } } - compiler.currentType = typeArguments[0]; - if (RMWOp !== null) { - return module.createAtomicRMW( - RMWOp, typeArguments[0].byteSize, offset, arg0, arg1, type.toNativeType() - ); - } else { - compiler.error( - DiagnosticCode.Operation_not_supported, - reportNode.range, "1", typeArguments ? typeArguments.length.toString(10) : "0" - ); - return module.createUnreachable(); - } + compiler.currentType = resultType; + return module.createAtomicRMW( + RMWOp, resultType.byteSize, offset, arg0, arg1, valueType.toNativeType() + ); } case BuiltinSymbols.atomic_cmpxchg: { // cmpxchg(ptr: usize, expected: T, replacement: T, cOff?: usize): T if (!compiler.options.hasFeature(Feature.THREADS)) break; - if (operands.length < 3 || operands.length > 4) { - if (!(typeArguments && typeArguments.length == 1)) { - compiler.error( - DiagnosticCode.Expected_0_type_arguments_but_got_1, - reportNode.range, "1", typeArguments ? typeArguments.length.toString(10) : "0" - ); - } - if (operands.length < 3) { - compiler.error( - DiagnosticCode.Expected_at_least_0_arguments_but_got_1, - reportNode.range, "2", operands.length.toString(10) - ); - } else { - compiler.error( - DiagnosticCode.Expected_0_arguments_but_got_1, - reportNode.range, "3", operands.length.toString(10) - ); - } - return module.createUnreachable(); - } + let hasError = false; if (!(typeArguments && typeArguments.length == 1)) { compiler.error( DiagnosticCode.Expected_0_type_arguments_but_got_1, - reportNode.range, "1", typeArguments ? typeArguments.length.toString(10) : "0" + reportNode.typeArgumentsRange, "1", typeArguments ? typeArguments.length.toString() : "0" + ); + hasError = true; + } + if (operands.length < 3) { + compiler.error( + DiagnosticCode.Expected_at_least_0_arguments_but_got_1, + reportNode.argumentsRange, "3", operands.length.toString() + ); + hasError = true; + } else if (operands.length > 4) { + compiler.error( + DiagnosticCode.Expected_0_arguments_but_got_1, + reportNode.argumentsRange, "4", operands.length.toString() + ); + hasError = true; + } + if (hasError) return module.createUnreachable(); + let resultType = typeArguments![0]; + if (!resultType.is(TypeFlags.INTEGER) || resultType.size < 8) { + compiler.error( + DiagnosticCode.Operation_not_supported, + reportNode.typeArgumentsRange ); return module.createUnreachable(); } @@ -2402,77 +2400,76 @@ export function compileCall( ); arg1 = compiler.compileExpression( operands[1], - typeArguments[0], - typeArguments[0].is(TypeFlags.INTEGER) + resultType, + resultType.is(TypeFlags.INTEGER) ? ConversionKind.NONE // no need to convert to small int (but now might result in a float) : ConversionKind.IMPLICIT, WrapMode.NONE ); arg2 = compiler.compileExpression( operands[2], - typeArguments[0], - typeArguments[0].is(TypeFlags.INTEGER) - ? ConversionKind.NONE // no need to convert to small int (but now might result in a float) - : ConversionKind.IMPLICIT, + compiler.currentType, + ConversionKind.IMPLICIT, WrapMode.NONE ); - - let type: Type; + let valueType = resultType; if ( - typeArguments[0].is(TypeFlags.INTEGER) && + resultType.is(TypeFlags.INTEGER) && ( - !compiler.currentType.is(TypeFlags.INTEGER) || // float to int - compiler.currentType.size < typeArguments[0].size // int to larger int (clear garbage bits) + !compiler.currentType.is(TypeFlags.INTEGER) || // float to int + compiler.currentType.size < resultType.size // int to larger int (clear garbage bits) ) ) { arg1 = compiler.convertExpression( arg1, - compiler.currentType, typeArguments[0], + compiler.currentType, resultType, ConversionKind.IMPLICIT, WrapMode.NONE, // still clears garbage bits operands[1] ); arg2 = compiler.convertExpression( arg2, - compiler.currentType, typeArguments[0], + compiler.currentType, resultType, ConversionKind.IMPLICIT, WrapMode.NONE, // still clears garbage bits operands[2] ); - type = typeArguments[0]; } else { - type = compiler.currentType; + valueType = compiler.currentType; } - let offset = operands.length == 4 ? evaluateImmediateOffset(compiler, operands[3]) : 0; // reports if (offset < 0) return module.createUnreachable(); - compiler.currentType = typeArguments[0]; + compiler.currentType = resultType; return module.createAtomicCmpxchg( - typeArguments[0].byteSize, offset, arg0, arg1, arg2, type.toNativeType() + resultType.byteSize, offset, arg0, arg1, arg2, valueType.toNativeType() ); } case BuiltinSymbols.atomic_wait: { // wait(ptr: usize, expected:T, timeout: i64): i32; if (!compiler.options.hasFeature(Feature.THREADS)) break; - let hasError = typeArguments == null; - if (operands.length != 3) { - compiler.error( - DiagnosticCode.Expected_0_arguments_but_got_1, - reportNode.range, "3", operands.length.toString(10) - ); - hasError = true; - } + let hasError = false; if (!(typeArguments && typeArguments.length == 1)) { compiler.error( DiagnosticCode.Expected_0_type_arguments_but_got_1, - reportNode.range, "1", typeArguments ? typeArguments.length.toString(10) : "0" + reportNode.typeArgumentsRange, "1", typeArguments ? typeArguments.length.toString(10) : "0" ); hasError = true; } - - if (!typeArguments || hasError) { + if (operands.length != 3) { + compiler.error( + DiagnosticCode.Expected_0_arguments_but_got_1, + reportNode.argumentsRange, "3", operands.length.toString(10) + ); + hasError = true; + } + if (hasError) return module.createUnreachable(); + let valueType = typeArguments![0]; + if (!valueType.is(TypeFlags.INTEGER) || valueType.size < 32) { + compiler.error( + DiagnosticCode.Operation_not_supported, + reportNode.typeArgumentsRange + ); return module.createUnreachable(); } - arg0 = compiler.compileExpression( operands[0], compiler.options.usizeType, @@ -2481,10 +2478,8 @@ export function compileCall( ); arg1 = compiler.compileExpression( operands[1], - typeArguments[0], - typeArguments[0].is(TypeFlags.INTEGER) - ? ConversionKind.NONE // no need to convert to small int (but now might result in a float) - : ConversionKind.IMPLICIT, + valueType, + ConversionKind.IMPLICIT, WrapMode.NONE ); arg2 = compiler.compileExpression( @@ -2493,57 +2488,35 @@ export function compileCall( ConversionKind.IMPLICIT, WrapMode.NONE ); - - let type: Type = typeArguments[0]; - if ( - typeArguments[0].is(TypeFlags.INTEGER) && - ( - !compiler.currentType.is(TypeFlags.INTEGER) || // float to int - compiler.currentType.size < typeArguments[0].size // int to larger int (clear garbage bits) - ) - ) { - arg1 = compiler.convertExpression( - arg1, - compiler.currentType, typeArguments[0], - ConversionKind.IMPLICIT, - WrapMode.NONE, // still clears garbage bits - operands[1] - ); - arg2 = compiler.convertExpression( - arg2, - compiler.currentType, typeArguments[0], - ConversionKind.IMPLICIT, - WrapMode.NONE, // still clears garbage bits - operands[2] - ); - } - - return module.createAtomicWait( - arg0, arg1, arg2, type.toNativeType() - ); + compiler.currentType = Type.i32; + return module.createAtomicWait(arg0, arg1, arg2, valueType.toNativeType()); } case BuiltinSymbols.atomic_notify: { // notify(ptr: usize, count: u32): u32; if (!compiler.options.hasFeature(Feature.THREADS)) break; - let hasError = typeArguments == null; - if (operands.length != 2) { - compiler.error( - DiagnosticCode.Expected_0_arguments_but_got_1, - reportNode.range, "2", operands.length.toString(10) - ); - hasError = true; - } + let hasError = false; if (!(typeArguments && typeArguments.length == 1)) { compiler.error( DiagnosticCode.Expected_0_type_arguments_but_got_1, - reportNode.range, "1", typeArguments ? typeArguments.length.toString(10) : "0" + reportNode.typeArgumentsRange, "1", typeArguments ? typeArguments.length.toString(10) : "0" ); hasError = true; } - - if (!typeArguments || hasError) { + if (operands.length != 2) { + compiler.error( + DiagnosticCode.Expected_0_arguments_but_got_1, + reportNode.argumentsRange, "2", operands.length.toString(10) + ); + hasError = true; + } + if (hasError) return module.createUnreachable(); + let valueType = typeArguments![0]; + if (!valueType.is(TypeFlags.INTEGER) || valueType.size < 32) { + compiler.error( + DiagnosticCode.Operation_not_supported, + reportNode.typeArgumentsRange + ); return module.createUnreachable(); } - arg0 = compiler.compileExpression( operands[0], compiler.options.usizeType, @@ -2552,14 +2525,12 @@ export function compileCall( ); arg1 = compiler.compileExpression( operands[1], - Type.i32, + valueType, ConversionKind.IMPLICIT, WrapMode.NONE ); - - return module.createAtomicWake( - arg0, arg1 - ); + compiler.currentType = Type.i32; + return module.createAtomicWake(arg0, arg1); } case BuiltinSymbols.sizeof: { // sizeof() -> usize compiler.currentType = compiler.options.usizeType; @@ -5539,16 +5510,16 @@ function tryDeferASM( case BuiltinSymbols.f32_trunc: return deferASM(BuiltinSymbols.trunc, compiler, Type.f32, operands, Type.f32, reportNode); case BuiltinSymbols.f64_trunc: return deferASM(BuiltinSymbols.trunc, compiler, Type.f64, operands, Type.f64, reportNode); case BuiltinSymbols.i32_load8_s: return deferASM(BuiltinSymbols.load, compiler, Type.i8, operands, Type.i32, reportNode); - case BuiltinSymbols.i32_load8_u: return deferASM(BuiltinSymbols.load, compiler, Type.u8, operands, Type.u32, reportNode); + case BuiltinSymbols.i32_load8_u: return deferASM(BuiltinSymbols.load, compiler, Type.u8, operands, Type.i32, reportNode); case BuiltinSymbols.i32_load16_s: return deferASM(BuiltinSymbols.load, compiler, Type.i16, operands, Type.i32, reportNode); - case BuiltinSymbols.i32_load16_u: return deferASM(BuiltinSymbols.load, compiler, Type.u16, operands, Type.u32, reportNode); + case BuiltinSymbols.i32_load16_u: return deferASM(BuiltinSymbols.load, compiler, Type.u16, operands, Type.i32, reportNode); case BuiltinSymbols.i32_load: return deferASM(BuiltinSymbols.load, compiler, Type.i32, operands, Type.i32, reportNode); case BuiltinSymbols.i64_load8_s: return deferASM(BuiltinSymbols.load, compiler, Type.i8, operands, Type.i64, reportNode); - case BuiltinSymbols.i64_load8_u: return deferASM(BuiltinSymbols.load, compiler, Type.u8, operands, Type.u64, reportNode); + case BuiltinSymbols.i64_load8_u: return deferASM(BuiltinSymbols.load, compiler, Type.u8, operands, Type.i64, reportNode); case BuiltinSymbols.i64_load16_s: return deferASM(BuiltinSymbols.load, compiler, Type.i16, operands, Type.i64, reportNode); - case BuiltinSymbols.i64_load16_u: return deferASM(BuiltinSymbols.load, compiler, Type.u16, operands, Type.u64, reportNode); + case BuiltinSymbols.i64_load16_u: return deferASM(BuiltinSymbols.load, compiler, Type.u16, operands, Type.i64, reportNode); case BuiltinSymbols.i64_load32_s: return deferASM(BuiltinSymbols.load, compiler, Type.i32, operands, Type.i64, reportNode); - case BuiltinSymbols.i64_load32_u: return deferASM(BuiltinSymbols.load, compiler, Type.u32, operands, Type.u64, reportNode); + case BuiltinSymbols.i64_load32_u: return deferASM(BuiltinSymbols.load, compiler, Type.u32, operands, Type.i64, reportNode); case BuiltinSymbols.i64_load: return deferASM(BuiltinSymbols.load, compiler, Type.i64, operands, Type.i64, reportNode); case BuiltinSymbols.f32_load: return deferASM(BuiltinSymbols.load, compiler, Type.f32, operands, Type.f32, reportNode); case BuiltinSymbols.f64_load: return deferASM(BuiltinSymbols.load, compiler, Type.f64, operands, Type.f64, reportNode); @@ -5565,82 +5536,82 @@ function tryDeferASM( if (compiler.options.hasFeature(Feature.THREADS)) { switch (prototype.internalName) { - case BuiltinSymbols.i32_atomic_load8_u: return deferASM(BuiltinSymbols.atomic_load, compiler, Type.u8, operands, Type.u32, reportNode); - case BuiltinSymbols.i32_atomic_load16_u: return deferASM(BuiltinSymbols.atomic_load, compiler, Type.u16, operands, Type.u32, reportNode); + case BuiltinSymbols.i32_atomic_load8_u: return deferASM(BuiltinSymbols.atomic_load, compiler, Type.u8, operands, Type.i32, reportNode); + case BuiltinSymbols.i32_atomic_load16_u: return deferASM(BuiltinSymbols.atomic_load, compiler, Type.u16, operands, Type.i32, reportNode); case BuiltinSymbols.i32_atomic_load: return deferASM(BuiltinSymbols.atomic_load, compiler, Type.i32, operands, Type.i32, reportNode); - case BuiltinSymbols.i64_atomic_load8_u: return deferASM(BuiltinSymbols.atomic_load, compiler, Type.u8, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_load16_u: return deferASM(BuiltinSymbols.atomic_load, compiler, Type.u16, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_load32_u: return deferASM(BuiltinSymbols.atomic_load, compiler, Type.u32, operands, Type.u64, reportNode); + case BuiltinSymbols.i64_atomic_load8_u: return deferASM(BuiltinSymbols.atomic_load, compiler, Type.u8, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_load16_u: return deferASM(BuiltinSymbols.atomic_load, compiler, Type.u16, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_load32_u: return deferASM(BuiltinSymbols.atomic_load, compiler, Type.u32, operands, Type.i64, reportNode); case BuiltinSymbols.i64_atomic_load: return deferASM(BuiltinSymbols.atomic_load, compiler, Type.i64, operands, Type.i64, reportNode); - case BuiltinSymbols.i32_atomic_store8: return deferASM(BuiltinSymbols.atomic_store, compiler, Type.i8, operands, Type.i32, reportNode); - case BuiltinSymbols.i32_atomic_store16: return deferASM(BuiltinSymbols.atomic_store, compiler, Type.i16, operands, Type.i32, reportNode); + case BuiltinSymbols.i32_atomic_store8: return deferASM(BuiltinSymbols.atomic_store, compiler, Type.u8, operands, Type.i32, reportNode); + case BuiltinSymbols.i32_atomic_store16: return deferASM(BuiltinSymbols.atomic_store, compiler, Type.u16, operands, Type.i32, reportNode); case BuiltinSymbols.i32_atomic_store: return deferASM(BuiltinSymbols.atomic_store, compiler, Type.i32, operands, Type.i32, reportNode); - case BuiltinSymbols.i64_atomic_store8: return deferASM(BuiltinSymbols.atomic_store, compiler, Type.i8, operands, Type.i64, reportNode); - case BuiltinSymbols.i64_atomic_store16: return deferASM(BuiltinSymbols.atomic_store, compiler, Type.i16, operands, Type.i64, reportNode); - case BuiltinSymbols.i64_atomic_store32: return deferASM(BuiltinSymbols.atomic_store, compiler, Type.i32, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_store8: return deferASM(BuiltinSymbols.atomic_store, compiler, Type.u8, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_store16: return deferASM(BuiltinSymbols.atomic_store, compiler, Type.u16, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_store32: return deferASM(BuiltinSymbols.atomic_store, compiler, Type.u32, operands, Type.i64, reportNode); case BuiltinSymbols.i64_atomic_store: return deferASM(BuiltinSymbols.atomic_store, compiler, Type.i64, operands, Type.i64, reportNode); - case BuiltinSymbols.i32_atomic_rmw8_u_add: return deferASM(BuiltinSymbols.atomic_add, compiler, Type.u8, operands, Type.u32, reportNode); - case BuiltinSymbols.i32_atomic_rmw16_u_add: return deferASM(BuiltinSymbols.atomic_add, compiler, Type.u16, operands, Type.u32, reportNode); - case BuiltinSymbols.i32_atomic_rmw_add: return deferASM(BuiltinSymbols.atomic_add, compiler, Type.u32, operands, Type.u32, reportNode); - case BuiltinSymbols.i64_atomic_rmw8_u_add: return deferASM(BuiltinSymbols.atomic_add, compiler, Type.u8, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_rmw16_u_add: return deferASM(BuiltinSymbols.atomic_add, compiler, Type.u16, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_rmw32_u_add: return deferASM(BuiltinSymbols.atomic_add, compiler, Type.u32, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_rmw_add: return deferASM(BuiltinSymbols.atomic_add, compiler, Type.u64, operands, Type.u64, reportNode); + case BuiltinSymbols.i32_atomic_rmw8_add_u: return deferASM(BuiltinSymbols.atomic_add, compiler, Type.u8, operands, Type.i32, reportNode); + case BuiltinSymbols.i32_atomic_rmw16_add_u: return deferASM(BuiltinSymbols.atomic_add, compiler, Type.u16, operands, Type.i32, reportNode); + case BuiltinSymbols.i32_atomic_rmw_add: return deferASM(BuiltinSymbols.atomic_add, compiler, Type.i32, operands, Type.i32, reportNode); + case BuiltinSymbols.i64_atomic_rmw8_add_u: return deferASM(BuiltinSymbols.atomic_add, compiler, Type.u8, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_rmw16_add_u: return deferASM(BuiltinSymbols.atomic_add, compiler, Type.u16, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_rmw32_add_u: return deferASM(BuiltinSymbols.atomic_add, compiler, Type.u32, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_rmw_add: return deferASM(BuiltinSymbols.atomic_add, compiler, Type.i64, operands, Type.i64, reportNode); - case BuiltinSymbols.i32_atomic_rmw8_u_sub: return deferASM(BuiltinSymbols.atomic_sub, compiler, Type.u8, operands, Type.u32, reportNode); - case BuiltinSymbols.i32_atomic_rmw16_u_sub: return deferASM(BuiltinSymbols.atomic_sub, compiler, Type.u16, operands, Type.u32, reportNode); - case BuiltinSymbols.i32_atomic_rmw_sub: return deferASM(BuiltinSymbols.atomic_sub, compiler, Type.u32, operands, Type.u32, reportNode); - case BuiltinSymbols.i64_atomic_rmw8_u_sub: return deferASM(BuiltinSymbols.atomic_sub, compiler, Type.u8, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_rmw16_u_sub: return deferASM(BuiltinSymbols.atomic_sub, compiler, Type.u16, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_rmw32_u_sub: return deferASM(BuiltinSymbols.atomic_sub, compiler, Type.u32, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_rmw_sub: return deferASM(BuiltinSymbols.atomic_sub, compiler, Type.u64, operands, Type.u64, reportNode); + case BuiltinSymbols.i32_atomic_rmw8_sub_u: return deferASM(BuiltinSymbols.atomic_sub, compiler, Type.u8, operands, Type.i32, reportNode); + case BuiltinSymbols.i32_atomic_rmw16_sub_u: return deferASM(BuiltinSymbols.atomic_sub, compiler, Type.u16, operands, Type.i32, reportNode); + case BuiltinSymbols.i32_atomic_rmw_sub: return deferASM(BuiltinSymbols.atomic_sub, compiler, Type.i32, operands, Type.i32, reportNode); + case BuiltinSymbols.i64_atomic_rmw8_sub_u: return deferASM(BuiltinSymbols.atomic_sub, compiler, Type.u8, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_rmw16_sub_u: return deferASM(BuiltinSymbols.atomic_sub, compiler, Type.u16, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_rmw32_sub_u: return deferASM(BuiltinSymbols.atomic_sub, compiler, Type.u32, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_rmw_sub: return deferASM(BuiltinSymbols.atomic_sub, compiler, Type.i64, operands, Type.i64, reportNode); - case BuiltinSymbols.i32_atomic_rmw8_u_and: return deferASM(BuiltinSymbols.atomic_and, compiler, Type.u8, operands, Type.u32, reportNode); - case BuiltinSymbols.i32_atomic_rmw16_u_and: return deferASM(BuiltinSymbols.atomic_and, compiler, Type.u16, operands, Type.u32, reportNode); - case BuiltinSymbols.i32_atomic_rmw_and: return deferASM(BuiltinSymbols.atomic_and, compiler, Type.u32, operands, Type.u32, reportNode); - case BuiltinSymbols.i64_atomic_rmw8_u_and: return deferASM(BuiltinSymbols.atomic_and, compiler, Type.u8, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_rmw16_u_and: return deferASM(BuiltinSymbols.atomic_and, compiler, Type.u16, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_rmw32_u_and: return deferASM(BuiltinSymbols.atomic_and, compiler, Type.u32, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_rmw_and: return deferASM(BuiltinSymbols.atomic_and, compiler, Type.u64, operands, Type.u64, reportNode); + case BuiltinSymbols.i32_atomic_rmw8_and_u: return deferASM(BuiltinSymbols.atomic_and, compiler, Type.u8, operands, Type.i32, reportNode); + case BuiltinSymbols.i32_atomic_rmw16_and_u: return deferASM(BuiltinSymbols.atomic_and, compiler, Type.u16, operands, Type.i32, reportNode); + case BuiltinSymbols.i32_atomic_rmw_and: return deferASM(BuiltinSymbols.atomic_and, compiler, Type.i32, operands, Type.i32, reportNode); + case BuiltinSymbols.i64_atomic_rmw8_and_u: return deferASM(BuiltinSymbols.atomic_and, compiler, Type.u8, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_rmw16_and_u: return deferASM(BuiltinSymbols.atomic_and, compiler, Type.u16, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_rmw32_and_u: return deferASM(BuiltinSymbols.atomic_and, compiler, Type.u32, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_rmw_and: return deferASM(BuiltinSymbols.atomic_and, compiler, Type.i64, operands, Type.i64, reportNode); - case BuiltinSymbols.i32_atomic_rmw8_u_or: return deferASM(BuiltinSymbols.atomic_or, compiler, Type.u8, operands, Type.u32, reportNode); - case BuiltinSymbols.i32_atomic_rmw16_u_or: return deferASM(BuiltinSymbols.atomic_or, compiler, Type.u16, operands, Type.u32, reportNode); - case BuiltinSymbols.i32_atomic_rmw_or: return deferASM(BuiltinSymbols.atomic_or, compiler, Type.u32, operands, Type.u32, reportNode); - case BuiltinSymbols.i64_atomic_rmw8_u_or: return deferASM(BuiltinSymbols.atomic_or, compiler, Type.u8, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_rmw16_u_or: return deferASM(BuiltinSymbols.atomic_or, compiler, Type.u16, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_rmw32_u_or: return deferASM(BuiltinSymbols.atomic_or, compiler, Type.u32, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_rmw_or: return deferASM(BuiltinSymbols.atomic_or, compiler, Type.u64, operands, Type.u64, reportNode); + case BuiltinSymbols.i32_atomic_rmw8_or_u: return deferASM(BuiltinSymbols.atomic_or, compiler, Type.u8, operands, Type.i32, reportNode); + case BuiltinSymbols.i32_atomic_rmw16_or_u: return deferASM(BuiltinSymbols.atomic_or, compiler, Type.u16, operands, Type.i32, reportNode); + case BuiltinSymbols.i32_atomic_rmw_or: return deferASM(BuiltinSymbols.atomic_or, compiler, Type.i32, operands, Type.i32, reportNode); + case BuiltinSymbols.i64_atomic_rmw8_or_u: return deferASM(BuiltinSymbols.atomic_or, compiler, Type.u8, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_rmw16_or_u: return deferASM(BuiltinSymbols.atomic_or, compiler, Type.u16, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_rmw32_or_u: return deferASM(BuiltinSymbols.atomic_or, compiler, Type.u32, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_rmw_or: return deferASM(BuiltinSymbols.atomic_or, compiler, Type.i64, operands, Type.i64, reportNode); - case BuiltinSymbols.i32_atomic_rmw8_u_xor: return deferASM(BuiltinSymbols.atomic_xor, compiler, Type.u8, operands, Type.u32, reportNode); - case BuiltinSymbols.i32_atomic_rmw16_u_xor: return deferASM(BuiltinSymbols.atomic_xor, compiler, Type.u8, operands, Type.u32, reportNode); - case BuiltinSymbols.i32_atomic_rmw_xor: return deferASM(BuiltinSymbols.atomic_xor, compiler, Type.u8, operands, Type.u32, reportNode); - case BuiltinSymbols.i64_atomic_rmw8_u_xor: return deferASM(BuiltinSymbols.atomic_xor, compiler, Type.u8, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_rmw16_u_xor: return deferASM(BuiltinSymbols.atomic_xor, compiler, Type.u16, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_rmw32_u_xor: return deferASM(BuiltinSymbols.atomic_xor, compiler, Type.u32, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_rmw_xor: return deferASM(BuiltinSymbols.atomic_xor, compiler, Type.u64, operands, Type.u64, reportNode); + case BuiltinSymbols.i32_atomic_rmw8_u_xor: return deferASM(BuiltinSymbols.atomic_xor, compiler, Type.u8, operands, Type.i32, reportNode); + case BuiltinSymbols.i32_atomic_rmw16_u_xor: return deferASM(BuiltinSymbols.atomic_xor, compiler, Type.u16, operands, Type.i32, reportNode); + case BuiltinSymbols.i32_atomic_rmw_xor: return deferASM(BuiltinSymbols.atomic_xor, compiler, Type.i32, operands, Type.i32, reportNode); + case BuiltinSymbols.i64_atomic_rmw8_xor_u: return deferASM(BuiltinSymbols.atomic_xor, compiler, Type.u8, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_rmw16_xor_u: return deferASM(BuiltinSymbols.atomic_xor, compiler, Type.u16, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_rmw32_xor_u: return deferASM(BuiltinSymbols.atomic_xor, compiler, Type.u32, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_rmw_xor: return deferASM(BuiltinSymbols.atomic_xor, compiler, Type.i64, operands, Type.i64, reportNode); - case BuiltinSymbols.i32_atomic_rmw8_u_xchg: return deferASM(BuiltinSymbols.atomic_xchg, compiler, Type.u8, operands, Type.u32, reportNode); - case BuiltinSymbols.i32_atomic_rmw16_u_xchg: return deferASM(BuiltinSymbols.atomic_xchg, compiler, Type.u8, operands, Type.u32, reportNode); - case BuiltinSymbols.i32_atomic_rmw_xchg: return deferASM(BuiltinSymbols.atomic_xchg, compiler, Type.u8, operands, Type.u32, reportNode); - case BuiltinSymbols.i64_atomic_rmw8_u_xchg: return deferASM(BuiltinSymbols.atomic_xchg, compiler, Type.u8, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_rmw16_u_xchg: return deferASM(BuiltinSymbols.atomic_xchg, compiler, Type.u16, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_rmw32_u_xchg: return deferASM(BuiltinSymbols.atomic_xchg, compiler, Type.u32, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_rmw_xchg: return deferASM(BuiltinSymbols.atomic_xchg, compiler, Type.u64, operands, Type.u64, reportNode); + case BuiltinSymbols.i32_atomic_rmw8_xchg_u: return deferASM(BuiltinSymbols.atomic_xchg, compiler, Type.u8, operands, Type.i32, reportNode); + case BuiltinSymbols.i32_atomic_rmw16_xchg_u: return deferASM(BuiltinSymbols.atomic_xchg, compiler, Type.u16, operands, Type.i32, reportNode); + case BuiltinSymbols.i32_atomic_rmw_xchg: return deferASM(BuiltinSymbols.atomic_xchg, compiler, Type.i32, operands, Type.i32, reportNode); + case BuiltinSymbols.i64_atomic_rmw8_xchg_u: return deferASM(BuiltinSymbols.atomic_xchg, compiler, Type.u8, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_rmw16_xchg_u: return deferASM(BuiltinSymbols.atomic_xchg, compiler, Type.u16, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_rmw32_xchg_u: return deferASM(BuiltinSymbols.atomic_xchg, compiler, Type.u32, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_rmw_xchg: return deferASM(BuiltinSymbols.atomic_xchg, compiler, Type.i64, operands, Type.i64, reportNode); - case BuiltinSymbols.i32_atomic_rmw8_u_cmpxchg: return deferASM(BuiltinSymbols.atomic_cmpxchg, compiler, Type.u8, operands, Type.u32, reportNode); - case BuiltinSymbols.i32_atomic_rmw16_u_cmpxchg: return deferASM(BuiltinSymbols.atomic_cmpxchg, compiler, Type.u8, operands, Type.u32, reportNode); - case BuiltinSymbols.i32_atomic_rmw_cmpxchg: return deferASM(BuiltinSymbols.atomic_cmpxchg, compiler, Type.u8, operands, Type.u32, reportNode); - case BuiltinSymbols.i64_atomic_rmw8_u_cmpxchg: return deferASM(BuiltinSymbols.atomic_cmpxchg, compiler, Type.u8, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_rmw16_u_cmpxchg: return deferASM(BuiltinSymbols.atomic_cmpxchg, compiler, Type.u16, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_rmw32_u_cmpxchg: return deferASM(BuiltinSymbols.atomic_cmpxchg, compiler, Type.u32, operands, Type.u64, reportNode); - case BuiltinSymbols.i64_atomic_rmw_cmpxchg: return deferASM(BuiltinSymbols.atomic_cmpxchg, compiler, Type.u64, operands, Type.u64, reportNode); + case BuiltinSymbols.i32_atomic_rmw8_cmpxchg_u: return deferASM(BuiltinSymbols.atomic_cmpxchg, compiler, Type.u8, operands, Type.i32, reportNode); + case BuiltinSymbols.i32_atomic_rmw16_cmpxchg_u: return deferASM(BuiltinSymbols.atomic_cmpxchg, compiler, Type.u16, operands, Type.i32, reportNode); + case BuiltinSymbols.i32_atomic_rmw_cmpxchg: return deferASM(BuiltinSymbols.atomic_cmpxchg, compiler, Type.i32, operands, Type.i32, reportNode); + case BuiltinSymbols.i64_atomic_rmw8_cmpxchg_u: return deferASM(BuiltinSymbols.atomic_cmpxchg, compiler, Type.u8, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_rmw16_cmpxchg_u: return deferASM(BuiltinSymbols.atomic_cmpxchg, compiler, Type.u16, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_rmw32_cmpxchg_u: return deferASM(BuiltinSymbols.atomic_cmpxchg, compiler, Type.u32, operands, Type.i64, reportNode); + case BuiltinSymbols.i64_atomic_rmw_cmpxchg: return deferASM(BuiltinSymbols.atomic_cmpxchg, compiler, Type.i64, operands, Type.i64, reportNode); - case BuiltinSymbols.i32_wait: return deferASM(BuiltinSymbols.atomic_wait, compiler, Type.i32, operands, Type.u32, reportNode); - case BuiltinSymbols.i64_wait: return deferASM(BuiltinSymbols.atomic_wait, compiler, Type.i64, operands, Type.i64, reportNode); - case BuiltinSymbols.i32_notify: return deferASM(BuiltinSymbols.atomic_notify, compiler, Type.i32, operands, Type.u32, reportNode); - case BuiltinSymbols.i64_notify: return deferASM(BuiltinSymbols.atomic_notify, compiler, Type.i64, operands, Type.i64, reportNode); + case BuiltinSymbols.i32_wait: return deferASM(BuiltinSymbols.atomic_wait, compiler, Type.i32, operands, Type.i32, reportNode); + case BuiltinSymbols.i64_wait: return deferASM(BuiltinSymbols.atomic_wait, compiler, Type.i64, operands, Type.i32, reportNode); + case BuiltinSymbols.i32_notify: return deferASM(BuiltinSymbols.atomic_notify, compiler, Type.i32, operands, Type.i32, reportNode); + case BuiltinSymbols.i64_notify: return deferASM(BuiltinSymbols.atomic_notify, compiler, Type.i64, operands, Type.i32, reportNode); } } if (compiler.options.hasFeature(Feature.SIMD)) { @@ -5799,13 +5770,21 @@ function deferASM( compiler: Compiler, typeArgument: Type, operands: Expression[], - valueType: Type, + contextualType: Type, reportNode: CallExpression ): ExpressionRef { assert(compiler.program.elementsByName.has(name)); var prototype = compiler.program.elementsByName.get(name)!; assert(prototype.kind == ElementKind.FUNCTION_PROTOTYPE); - return compileCall(compiler, prototype, [ typeArgument ], operands, valueType, reportNode); + return compileCall( + compiler, + prototype, + [ typeArgument ], + operands, + contextualType, + reportNode, + /* isAsm */ true + ); } /** Evaluates the constant type of a type argument *or* expression. */ diff --git a/src/compiler.ts b/src/compiler.ts index 7f4022f7..59b12553 100644 --- a/src/compiler.ts +++ b/src/compiler.ts @@ -411,7 +411,8 @@ export class Compiler extends DiagnosticEmitter { isSharedMemory ? options.sharedMemory : Module.UNLIMITED_MEMORY, this.memorySegments, options.target, - "memory" + "memory", + isSharedMemory ); // import memory if requested (default memory is named '0' by Binaryen) diff --git a/std/assembly/builtins.ts b/std/assembly/builtins.ts index ded0f6e1..8ee6f1f4 100644 --- a/std/assembly/builtins.ts +++ b/std/assembly/builtins.ts @@ -48,18 +48,24 @@ export namespace atomic { @builtin export declare function load(offset: usize, immOffset?: usize): T; - @builtin export declare function store(offset: usize, value: void, immOffset?: usize): void; + @builtin export declare function store(offset: usize, value: T, immOffset?: usize): void; @builtin export declare function add(ptr: usize, value: T, immOffset?: usize): T; @builtin export declare function sub(ptr: usize, value: T, immOffset?: usize): T; @builtin export declare function and(ptr: usize, value: T, immOffset?: usize): T; @builtin export declare function or(ptr: usize, value: T, immOffset?: usize): T; @builtin export declare function xor(ptr: usize, value: T, immOffset?: usize): T; @builtin export declare function xchg(ptr: usize, value: T, immOffset?: usize): T; - @builtin export declare function cmpxchg(ptr: usize, expected:T, replacement: T, immOffset?: usize): T; - @builtin export declare function wait(ptr: usize, expected:T, timeout:i64): i32; + @builtin export declare function cmpxchg(ptr: usize, expected: T, replacement: T, immOffset?: usize): T; + @builtin export declare function wait(ptr: usize, expected: T, timeout: i64): AtomicWaitResult; @builtin export declare function notify(ptr: usize, count: u32): u32; } +@lazy export const enum AtomicWaitResult { + OK = 0, + NOT_EQUAL = 1, + TIMED_OUT = 2 +} + @builtin export declare function i8(value: void): i8; export namespace i8 { @lazy export const MIN_VALUE: i8 = -128; @@ -92,37 +98,33 @@ export namespace i32 { @builtin export declare function store(offset: usize, value: i32, immOffset?: usize, immAlign?: usize): void; export namespace atomic { - @builtin export declare function load8_s(offset: usize, immOffset?: usize): i32; @builtin export declare function load8_u(offset: usize, immOffset?: usize): i32; - @builtin export declare function load16_s(offset: usize, immOffset?: usize): i32; @builtin export declare function load16_u(offset: usize, immOffset?: usize): i32; @builtin export declare function load(offset: usize, immOffset?: usize): i32; @builtin export declare function store8(offset: usize, value: i32, immOffset?: usize): void; @builtin export declare function store16(offset: usize, value: i32, immOffset?: usize): void; @builtin export declare function store(offset: usize, value: i32, immOffset?: usize): void; - @builtin export declare function wait(ptr: usize, expected:i32, timeout:i64): i32; - @builtin export declare function notify(ptr: usize, count:u32): u32; + @builtin export declare function wait(ptr: usize, expected: i32, timeout: i64): AtomicWaitResult; + @builtin export declare function notify(ptr: usize, count: i32): i32; - export namespace rmw8_u { - @builtin export declare function add(offset: usize, value: i32, immOffset?: usize): i32; - @builtin export declare function sub(offset: usize, value: i32, immOffset?: usize): i32; - @builtin export declare function and(offset: usize, value: i32, immOffset?: usize): i32; - @builtin export declare function or(offset: usize, value: i32, immOffset?: usize): i32; - @builtin export declare function xor(offset: usize, value: i32, immOffset?: usize): i32; - @builtin export declare function xchg(offset: usize, value: i32, immOffset?: usize): i32; - @builtin export declare function cmpxchg(offset: usize, expected:i32, replacement: i32, immOffset?: usize): i32; + export namespace rmw8 { + @builtin export declare function add_u(offset: usize, value: i32, immOffset?: usize): i32; + @builtin export declare function sub_u(offset: usize, value: i32, immOffset?: usize): i32; + @builtin export declare function and_u(offset: usize, value: i32, immOffset?: usize): i32; + @builtin export declare function or_u(offset: usize, value: i32, immOffset?: usize): i32; + @builtin export declare function xor_u(offset: usize, value: i32, immOffset?: usize): i32; + @builtin export declare function xchg_u(offset: usize, value: i32, immOffset?: usize): i32; + @builtin export declare function cmpxchg_u(offset: usize, expected: i32, replacement: i32, immOffset?: usize): i32; } - - export namespace rmw16_u { - @builtin export declare function add(offset: usize, value: i32, immOffset?: usize): i32; - @builtin export declare function sub(offset: usize, value: i32, immOffset?: usize): i32; - @builtin export declare function and(offset: usize, value: i32, immOffset?: usize): i32; - @builtin export declare function or(offset: usize, value: i32, immOffset?: usize): i32; - @builtin export declare function xor(offset: usize, value: i32, immOffset?: usize): i32; - @builtin export declare function xchg(offset: usize, value: i32, immOffset?: usize): i32; - @builtin export declare function cmpxchg(offset: usize, expected:i32, replacement: i32, immOffset?: usize): i32; + export namespace rmw16 { + @builtin export declare function add_u(offset: usize, value: i32, immOffset?: usize): i32; + @builtin export declare function sub_u(offset: usize, value: i32, immOffset?: usize): i32; + @builtin export declare function and_u(offset: usize, value: i32, immOffset?: usize): i32; + @builtin export declare function or_u(offset: usize, value: i32, immOffset?: usize): i32; + @builtin export declare function xor_u(offset: usize, value: i32, immOffset?: usize): i32; + @builtin export declare function xchg_u(offset: usize, value: i32, immOffset?: usize): i32; + @builtin export declare function cmpxchg_u(offset: usize, expected: i32, replacement: i32, immOffset?: usize): i32; } - export namespace rmw { @builtin export declare function add(offset: usize, value: i32, immOffset?: usize): i32; @builtin export declare function sub(offset: usize, value: i32, immOffset?: usize): i32; @@ -130,7 +132,7 @@ export namespace i32 { @builtin export declare function or(offset: usize, value: i32, immOffset?: usize): i32; @builtin export declare function xor(offset: usize, value: i32, immOffset?: usize): i32; @builtin export declare function xchg(offset: usize, value: i32, immOffset?: usize): i32; - @builtin export declare function cmpxchg(offset: usize, expected:i32, replacement: i32, immOffset?: usize): i32; + @builtin export declare function cmpxchg(offset: usize, expected: i32, replacement: i32, immOffset?: usize): i32; } } } @@ -142,11 +144,11 @@ export namespace i64 { @builtin export declare function clz(value: i64): i64; @builtin export declare function ctz(value: i64): i64; @builtin export declare function load8_s(offset: usize, immOffset?: usize, immAlign?: usize): i64; - @builtin export declare function load8_u(offset: usize, immOffset?: usize, immAlign?: usize): u64; + @builtin export declare function load8_u(offset: usize, immOffset?: usize, immAlign?: usize): i64; @builtin export declare function load16_s(offset: usize, immOffset?: usize, immAlign?: usize): i64; - @builtin export declare function load16_u(offset: usize, immOffset?: usize, immAlign?: usize): u64; + @builtin export declare function load16_u(offset: usize, immOffset?: usize, immAlign?: usize): i64; @builtin export declare function load32_s(offset: usize, immOffset?: usize, immAlign?: usize): i64; - @builtin export declare function load32_u(offset: usize, immOffset?: usize, immAlign?: usize): u64; + @builtin export declare function load32_u(offset: usize, immOffset?: usize, immAlign?: usize): i64; @builtin export declare function load(offset: usize, immOffset?: usize): i64; @builtin export declare function popcnt(value: i64): i64; @builtin export declare function rotl(value: i64, shift: i64): i64; @@ -157,48 +159,45 @@ export namespace i64 { @builtin export declare function store32(offset: usize, value: i64, immOffset?: usize, immAlign?: usize): void; @builtin export declare function store(offset: usize, value: i64, immOffset?: usize, immAlign?: usize): void; - namespace atomic { - @builtin export declare function load8_s(offset: usize, immOffset?: usize): i64; + export namespace atomic { @builtin export declare function load8_u(offset: usize, immOffset?: usize): i64; - @builtin export declare function load16_s(offset: usize, immOffset?: usize): i64; @builtin export declare function load16_u(offset: usize, immOffset?: usize): i64; + @builtin export declare function load32_u(offset: usize, immOffset?: usize): i64; @builtin export declare function load(offset: usize, immOffset?: usize): i64; @builtin export declare function store8(offset: usize, value: i64, immOffset?: usize): void; @builtin export declare function store16(offset: usize, value: i64, immOffset?: usize): void; + @builtin export declare function store32(offset: usize, value: i64, immOffset?: usize): void; @builtin export declare function store(offset: usize, value: i64, immOffset?: usize): void; - @builtin export declare function wait(ptr: usize, expected:i64, timeout:i64): i32; - @builtin export declare function notify(ptr: usize, count:u32): u32; + @builtin export declare function wait(ptr: usize, expected: i64, timeout: i64): AtomicWaitResult; + @builtin export declare function notify(ptr: usize, count: i32): i32; - export namespace rmw8_u { - @builtin export declare function add(offset: usize, value: i64, immOffset?: usize): i64; - @builtin export declare function sub(offset: usize, value: i64, immOffset?: usize): i64; - @builtin export declare function and(offset: usize, value: i64, immOffset?: usize): i64; - @builtin export declare function or(offset: usize, value: i64, immOffset?: usize): i64; - @builtin export declare function xor(offset: usize, value: i64, immOffset?: usize): i64; - @builtin export declare function xchg(offset: usize, value: i64, immOffset?: usize): i64; - @builtin export declare function cmpxchg(offset: usize, expected:i64, replacement: i64, immOffset?: usize): i64; + export namespace rmw8 { + @builtin export declare function add_u(offset: usize, value: i64, immOffset?: usize): i64; + @builtin export declare function sub_u(offset: usize, value: i64, immOffset?: usize): i64; + @builtin export declare function and_u(offset: usize, value: i64, immOffset?: usize): i64; + @builtin export declare function or_u(offset: usize, value: i64, immOffset?: usize): i64; + @builtin export declare function xor_u(offset: usize, value: i64, immOffset?: usize): i64; + @builtin export declare function xchg_u(offset: usize, value: i64, immOffset?: usize): i64; + @builtin export declare function cmpxchg_u(offset: usize, expected: i64, replacement: i64, immOffset?: usize): i64; } - - export namespace rmw16_u { - @builtin export declare function add(offset: usize, value: i64, immOffset?: usize): i64; - @builtin export declare function sub(offset: usize, value: i64, immOffset?: usize): i64; - @builtin export declare function and(offset: usize, value: i64, immOffset?: usize): i64; - @builtin export declare function or(offset: usize, value: i64, immOffset?: usize): i64; - @builtin export declare function xor(offset: usize, value: i64, immOffset?: usize): i64; - @builtin export declare function xchg(offset: usize, value: i64, immOffset?: usize): i64; - @builtin export declare function cmpxchg(offset: usize, expected:i64, replacement: i64, immOffset?: usize): i64; + export namespace rmw16 { + @builtin export declare function add_u(offset: usize, value: i64, immOffset?: usize): i64; + @builtin export declare function sub_u(offset: usize, value: i64, immOffset?: usize): i64; + @builtin export declare function and_u(offset: usize, value: i64, immOffset?: usize): i64; + @builtin export declare function or_u(offset: usize, value: i64, immOffset?: usize): i64; + @builtin export declare function xor_u(offset: usize, value: i64, immOffset?: usize): i64; + @builtin export declare function xchg_u(offset: usize, value: i64, immOffset?: usize): i64; + @builtin export declare function cmpxchg_u(offset: usize, expected: i64, replacement: i64, immOffset?: usize): i64; } - - export namespace rmw32_u { - @builtin export declare function add(offset: usize, value: i64, immOffset?: usize): i64; - @builtin export declare function sub(offset: usize, value: i64, immOffset?: usize): i64; - @builtin export declare function and(offset: usize, value: i64, immOffset?: usize): i64; - @builtin export declare function or(offset: usize, value: i64, immOffset?: usize): i64; - @builtin export declare function xor(offset: usize, value: i64, immOffset?: usize): i64; - @builtin export declare function xchg(offset: usize, value: i64, immOffset?: usize): i64; - @builtin export declare function cmpxchg(offset: usize, expected:i64, replacement: i64, immOffset?: usize): i64; + export namespace rmw32 { + @builtin export declare function add_u(offset: usize, value: i64, immOffset?: usize): i64; + @builtin export declare function sub_u(offset: usize, value: i64, immOffset?: usize): i64; + @builtin export declare function and_u(offset: usize, value: i64, immOffset?: usize): i64; + @builtin export declare function or_u(offset: usize, value: i64, immOffset?: usize): i64; + @builtin export declare function xor_u(offset: usize, value: i64, immOffset?: usize): i64; + @builtin export declare function xchg_u(offset: usize, value: i64, immOffset?: usize): i64; + @builtin export declare function cmpxchg_u(offset: usize, expected: i64, replacement: i64, immOffset?: usize): i64; } - export namespace rmw { @builtin export declare function add(offset: usize, value: i64, immOffset?: usize): i64; @builtin export declare function sub(offset: usize, value: i64, immOffset?: usize): i64; @@ -206,9 +205,9 @@ export namespace i64 { @builtin export declare function or(offset: usize, value: i64, immOffset?: usize): i64; @builtin export declare function xor(offset: usize, value: i64, immOffset?: usize): i64; @builtin export declare function xchg(offset: usize, value: i64, immOffset?: usize): i64; - @builtin export declare function cmpxchg(offset: usize, expected:i64, replacement: i64, immOffset?: usize): i64; + @builtin export declare function cmpxchg(offset: usize, expected: i64, replacement: i64, immOffset?: usize): i64; } - } + } } @builtin export declare function isize(value: void): isize; diff --git a/std/assembly/index.d.ts b/std/assembly/index.d.ts index 5ece5659..8c82c19e 100644 --- a/std/assembly/index.d.ts +++ b/std/assembly/index.d.ts @@ -163,6 +163,42 @@ declare function fmod(x: f64, y: f64): f64; /** Returns the 32-bit floating-point remainder of `x/y`. */ declare function fmodf(x: f32, y: f32): f32; +/** Atomic operations. */ +declare namespace atomic { + /** Atomically loads an integer value from memory and returns it. */ + export function load(offset: usize, immOffset?: usize): T; + /** Atomically stores an integer value to memory. */ + export function store(offset: usize, value: T, immOffset?: usize): void; + /** Atomically adds an integer value in memory. */ + export function add(ptr: usize, value: T, immOffset?: usize): T; + /** Atomically subtracts an integer value in memory. */ + export function sub(ptr: usize, value: T, immOffset?: usize): T; + /** Atomically performs a bitwise AND operation on an integer value in memory. */ + export function and(ptr: usize, value: T, immOffset?: usize): T; + /** Atomically performs a bitwise OR operation on an integer value in memory. */ + export function or(ptr: usize, value: T, immOffset?: usize): T; + /** Atomically performs a bitwise XOR operation on an integer value in memory. */ + export function xor(ptr: usize, value: T, immOffset?: usize): T; + /** Atomically exchanges an integer value in memory. */ + export function xchg(ptr: usize, value: T, immOffset?: usize): T; + /** Atomically compares and exchanges an integer value in memory if the condition is met. */ + export function cmpxchg(ptr: usize, expected: T, replacement: T, immOffset?: usize): T; + /** Performs a wait operation on an integer value in memory suspending this agent if the condition is met. */ + export function wait(ptr: usize, expected: T, timeout: i64): AtomicWaitResult; + /** Performs a notify operation on an integer value in memory waking up suspended agents. */ + export function notify(ptr: usize, count: u32): i32; +} + +/** Describes the result of an atomic wait operation. */ +declare enum AtomicWaitResult { + /** Woken by another agent. */ + OK, + /** Loaded value did not match the expected value. */ + NOT_EQUAL, + /** Not woken before the timeout expired. */ + TIMED_OUT +} + /** Converts any other numeric value to an 8-bit signed integer. */ declare function i8(value: i8 | i16 | i32 | i64 | isize | u8 | u16 | u32 | u64 | usize | bool | f32 | f64): i8; declare namespace i8 { @@ -186,22 +222,92 @@ declare namespace i32 { export const MIN_VALUE: i32; /** Largest representable value. */ export const MAX_VALUE: i32; - /** Loads an 8-bit signed integer from memory and returns it as a 32-bit integer. */ + /** Loads an 8-bit signed integer value from memory and returns it as a 32-bit integer. */ export function load8_s(offset: usize, immOffset?: usize, immAlign?: usize): i32; - /** Loads an 8-bit unsigned integer from memory and returns it as a 32-bit integer. */ + /** Loads an 8-bit unsigned integer value from memory and returns it as a 32-bit integer. */ export function load8_u(offset: usize, immOffset?: usize, immAlign?: usize): i32; - /** Loads a 16-bit signed integer from memory and returns it as a 32-bit integer. */ + /** Loads a 16-bit signed integer value from memory and returns it as a 32-bit integer. */ export function load16_s(offset: usize, immOffset?: usize, immAlign?: usize): i32; - /** Loads a 16-bit unsigned integer from memory and returns it as a 32-bit integer. */ + /** Loads a 16-bit unsigned integer value from memory and returns it as a 32-bit integer. */ export function load16_u(offset: usize, immOffset?: usize, immAlign?: usize): i32; - /** Loads a 32-bit integer from memory. */ + /** Loads a 32-bit integer value from memory. */ export function load(offset: usize, immOffset?: usize, immAlign?: usize): i32; - /** Stores a 32-bit integer to memory as an 8-bit integer. */ + /** Stores a 32-bit integer value to memory as an 8-bit integer. */ export function store8(offset: usize, value: i32, immOffset?: usize, immAlign?: usize): void; - /** Stores a 32-bit integer to memory as a 16-bit integer. */ + /** Stores a 32-bit integer value to memory as a 16-bit integer. */ export function store16(offset: usize, value: i32, immOffset?: usize, immAlign?: usize): void; - /** Stores a 32-bit integer to memory. */ + /** Stores a 32-bit integer value to memory. */ export function store(offset: usize, value: i32, immOffset?: usize, immAlign?: usize): void; + /** Atomic 32-bit integer operations. */ + export namespace atomic { + /** Atomically loads an 8-bit unsigned integer value from memory and returns it as a 32-bit integer. */ + export function load8_u(offset: usize, immOffset?: usize): i32; + /** Atomically loads a 16-bit unsigned integer value from memory and returns it as a 32-bit integer. */ + export function load16_u(offset: usize, immOffset?: usize): i32; + /** Atomically loads a 32-bit integer value from memory and returns it. */ + export function load(offset: usize, immOffset?: usize): i32; + /** Atomically stores a 32-bit integer value to memory as an 8-bit integer. */ + export function store8(offset: usize, value: i32, immOffset?: usize): void; + /** Atomically stores a 32-bit integer value to memory as a 16-bit integer. */ + export function store16(offset: usize, value: i32, immOffset?: usize): void; + /** Atomically stores a 32-bit integer value to memory. */ + export function store(offset: usize, value: i32, immOffset?: usize): void; + /** Performs a wait operation on a 32-bit integer value in memory suspending this agent if the condition is met. */ + export function wait(ptr: usize, expected: i32, timeout: i64): AtomicWaitResult; + /** Performs a notify operation on a 32-bit integer value in memory waking up suspended agents. */ + export function notify(ptr: usize, count: i32): i32; + /** Atomic 32-bit integer read-modify-write operations on 8-bit values. */ + export namespace rmw8 { + /** Atomically adds an 8-bit unsigned integer value in memory. */ + export function add_u(offset: usize, value: i32, immOffset?: usize): i32; + /** Atomically subtracts an 8-bit unsigned integer value in memory. */ + export function sub_u(offset: usize, value: i32, immOffset?: usize): i32; + /** Atomically performs a bitwise AND operation an 8-bit unsigned integer value in memory. */ + export function and_u(offset: usize, value: i32, immOffset?: usize): i32; + /** Atomically performs a bitwise OR operation an 8-bit unsigned integer value in memory. */ + export function or_u(offset: usize, value: i32, immOffset?: usize): i32; + /** Atomically performs a bitwise XOR operation an 8-bit unsigned integer value in memory. */ + export function xor_u(offset: usize, value: i32, immOffset?: usize): i32; + /** Atomically exchanges an 8-bit unsigned integer value in memory. */ + export function xchg_u(offset: usize, value: i32, immOffset?: usize): i32; + /** Atomically compares and exchanges an 8-bit unsigned integer value in memory if the condition is met. */ + export function cmpxchg_u(offset: usize, expected: i32, replacement: i32, immOffset?: usize): i32; + } + /** Atomic 32-bit integer read-modify-write operations on 16-bit values. */ + export namespace rmw16 { + /** Atomically adds a 16-bit unsigned integer value in memory. */ + export function add_u(offset: usize, value: i32, immOffset?: usize): i32; + /** Atomically adds a 16-bit unsigned integer value in memory. */ + export function sub_u(offset: usize, value: i32, immOffset?: usize): i32; + /** Atomically performs a bitwise AND operation a 16-bit unsigned integer value in memory. */ + export function and_u(offset: usize, value: i32, immOffset?: usize): i32; + /** Atomically performs a bitwise OR operation a 16-bit unsigned integer value in memory. */ + export function or_u(offset: usize, value: i32, immOffset?: usize): i32; + /** Atomically performs a bitwise XOR operation a 16-bit unsigned integer value in memory. */ + export function xor_u(offset: usize, value: i32, immOffset?: usize): i32; + /** Atomically exchanges a 16-bit unsigned integer value in memory. */ + export function xchg_u(offset: usize, value: i32, immOffset?: usize): i32; + /** Atomically compares and exchanges a 16-bit unsigned integer value in memory if the condition is met. */ + export function cmpxchg_u(offset: usize, expected: i32, replacement: i32, immOffset?: usize): i32; + } + /** Atomic 32-bit integer read-modify-write operations. */ + export namespace rmw { + /** Atomically adds a 32-bit integer value in memory. */ + export function add(offset: usize, value: i32, immOffset?: usize): i32; + /** Atomically subtracts a 32-bit integer value in memory. */ + export function sub(offset: usize, value: i32, immOffset?: usize): i32; + /** Atomically performs a bitwise AND operation a 32-bit integer value in memory. */ + export function and(offset: usize, value: i32, immOffset?: usize): i32; + /** Atomically performs a bitwise OR operation a 32-bit integer value in memory. */ + export function or(offset: usize, value: i32, immOffset?: usize): i32; + /** Atomically performs a bitwise XOR operation a 32-bit integer value in memory. */ + export function xor(offset: usize, value: i32, immOffset?: usize): i32; + /** Atomically exchanges a 32-bit integer value in memory. */ + export function xchg(offset: usize, value: i32, immOffset?: usize): i32; + /** Atomically compares and exchanges a 32-bit integer value in memory if the condition is met. */ + export function cmpxchg(offset: usize, expected: i32, replacement: i32, immOffset?: usize): i32; + } + } } /** Converts any other numeric value to a 64-bit signed integer. */ declare function i64(value: i8 | i16 | i32 | i64 | isize | u8 | u16 | u32 | u64 | usize | bool | f32 | f64): i64; @@ -210,28 +316,119 @@ declare namespace i64 { export const MIN_VALUE: i64; /** Largest representable value. */ export const MAX_VALUE: i64; - /** Loads an 8-bit signed integer from memory and returns it as a 64-bit signed integer. */ + /** Loads an 8-bit signed integer value from memory and returns it as a 64-bit integer. */ export function load8_s(offset: usize, immOffset?: usize, immAlign?: usize): i64; - /** Loads an 8-bit unsigned integer from memory and returns it as a 64-bit unsigned integer. */ - export function load8_u(offset: usize, immOffset?: usize, immAlign?: usize): u64; - /** Loads a 16-bit signed integer from memory and returns it as a 64-bit signed integer. */ + /** Loads an 8-bit unsigned integer value from memory and returns it as a 64-bit integer. */ + export function load8_u(offset: usize, immOffset?: usize, immAlign?: usize): i64; + /** Loads a 16-bit signed integer value from memory and returns it as a 64-bit integer. */ export function load16_s(offset: usize, immOffset?: usize, immAlign?: usize): i64; - /** Loads a 16-bit unsigned integer from memory and returns it as a 64-bit unsigned integer. */ - export function load16_u(offset: usize, immOffset?: usize, immAlign?: usize): u64; - /** Loads a 32-bit signed integer from memory and returns it as a 64-bit signed integer. */ + /** Loads a 16-bit unsigned integer value from memory and returns it as a 64-bit integer. */ + export function load16_u(offset: usize, immOffset?: usize, immAlign?: usize): i64; + /** Loads a 32-bit signed integer value from memory and returns it as a 64-bit integer. */ export function load32_s(offset: usize, immOffset?: usize, immAlign?: usize): i64; - /** Loads a 32-bit unsigned integer from memory and returns it as a 64-bit unsigned integer. */ - export function load32_u(offset: usize, immOffset?: usize, immAlign?: usize): u64; - /** Loads a 64-bit unsigned integer from memory. */ + /** Loads a 32-bit unsigned integer value from memory and returns it as a 64-bit integer. */ + export function load32_u(offset: usize, immOffset?: usize, immAlign?: usize): i64; + /** Loads a 64-bit unsigned integer value from memory. */ export function load(offset: usize, immOffset?: usize, immAlign?: usize): i64; - /** Stores a 64-bit integer to memory as an 8-bit integer. */ + /** Stores a 64-bit integer value to memory as an 8-bit integer. */ export function store8(offset: usize, value: i64, immOffset?: usize, immAlign?: usize): void; - /** Stores a 64-bit integer to memory as a 16-bit integer. */ + /** Stores a 64-bit integer value to memory as a 16-bit integer. */ export function store16(offset: usize, value: i64, immOffset?: usize, immAlign?: usize): void; - /** Stores a 64-bit integer to memory as a 32-bit integer. */ + /** Stores a 64-bit integer value to memory as a 32-bit integer. */ export function store32(offset: usize, value: i64, immOffset?: usize, immAlign?: usize): void; - /** Stores a 64-bit integer to memory. */ + /** Stores a 64-bit integer value to memory. */ export function store(offset: usize, value: i64, immOffset?: usize, immAlign?: usize): void; + /** Atomic 64-bit integer operations. */ + export namespace atomic { + /** Atomically loads an 8-bit unsigned integer value from memory and returns it as a 64-bit integer. */ + export function load8_u(offset: usize, immOffset?: usize): i64; + /** Atomically loads a 16-bit unsigned integer value from memory and returns it as a 64-bit integer. */ + export function load16_u(offset: usize, immOffset?: usize): i64; + /** Atomically loads a 32-bit unsigned integer value from memory and returns it as a 64-bit integer. */ + export function load32_u(offset: usize, immOffset?: usize): i64; + /** Atomically loads a 64-bit integer value from memory and returns it. */ + export function load(offset: usize, immOffset?: usize): i64; + /** Atomically stores a 64-bit integer value to memory as an 8-bit integer. */ + export function store8(offset: usize, value: i64, immOffset?: usize): void; + /** Atomically stores a 64-bit integer value to memory as a 16-bit integer. */ + export function store16(offset: usize, value: i64, immOffset?: usize): void; + /** Atomically stores a 64-bit integer value to memory as a 32-bit integer. */ + export function store32(offset: usize, value: i64, immOffset?: usize): void; + /** Atomically stores a 64-bit integer value to memory. */ + export function store(offset: usize, value: i64, immOffset?: usize): void; + /** Performs a wait operation on a 64-bit integer value in memory suspending this agent if the condition is met. */ + export function wait(ptr: usize, expected: i64, timeout: i64): AtomicWaitResult; + /** Performs a notify operation on a 64-bit integer value in memory waking up suspended agents. */ + export function notify(ptr: usize, count: i32): i32; + /** Atomic 64-bit integer read-modify-write operations on 8-bit values. */ + export namespace rmw8 { + /** Atomically adds an 8-bit unsigned integer value in memory. */ + export function add_u(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically subtracts an 8-bit unsigned integer value in memory. */ + export function sub_u(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically performs a bitwise AND operation on an 8-bit unsigned integer value in memory. */ + export function and_u(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically performs a bitwise OR operation on an 8-bit unsigned integer value in memory. */ + export function or_u(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically performs a bitwise XOR operation on an 8-bit unsigned integer value in memory. */ + export function xor_u(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically exchanges an 8-bit unsigned integer value in memory. */ + export function xchg_u(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically compares and exchanges an 8-bit unsigned integer value in memory if the condition is met. */ + export function cmpxchg_u(offset: usize, expected: i64, replacement: i64, immOffset?: usize): i64; + } + /** Atomic 64-bit integer read-modify-write operations on 16-bit values. */ + export namespace rmw16 { + /** Atomically adds a 16-bit unsigned integer value in memory. */ + export function add_u(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically subtracts a 16-bit unsigned integer value in memory. */ + export function sub_u(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically performs a bitwise AND operation on a 16-bit unsigned integer value in memory. */ + export function and_u(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically performs a bitwise OR operation on a 16-bit unsigned integer value in memory. */ + export function or_u(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically performs a bitwise XOR operation on a 16-bit unsigned integer value in memory. */ + export function xor_u(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically exchanges a 16-bit unsigned integer value in memory. */ + export function xchg_u(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically compares and exchanges a 16-bit unsigned integer value in memory if the condition is met. */ + export function cmpxchg_u(offset: usize, expected: i64, replacement: i64, immOffset?: usize): i64; + } + /** Atomic 64-bit integer read-modify-write operations on 32-bit values. */ + export namespace rmw32 { + /** Atomically adds a 32-bit unsigned integer value in memory. */ + export function add_u(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically subtracts a 32-bit unsigned integer value in memory. */ + export function sub_u(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically performs a bitwise AND operation on a 32-bit unsigned integer value in memory. */ + export function and_u(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically performs a bitwise OR operation on a 32-bit unsigned integer value in memory. */ + export function or_u(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically performs a bitwise XOR operation on a 32-bit unsigned integer value in memory. */ + export function xor_u(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically exchanges a 32-bit unsigned integer value in memory. */ + export function xchg_u(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically compares and exchanges a 32-bit unsigned integer value in memory if the condition is met. */ + export function cmpxchg_u(offset: usize, expected: i64, replacement: i64, immOffset?: usize): i64; + } + /** Atomic 64-bit integer read-modify-write operations. */ + export namespace rmw { + /** Atomically adds a 64-bit integer value in memory. */ + export function add(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically subtracts a 64-bit integer value in memory. */ + export function sub(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically performs a bitwise AND operation on a 64-bit integer value in memory. */ + export function and(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically performs a bitwise OR operation on a 64-bit integer value in memory. */ + export function or(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically performs a bitwise XOR operation on a 64-bit integer value in memory. */ + export function xor(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically exchanges a 64-bit integer value in memory. */ + export function xchg(offset: usize, value: i64, immOffset?: usize): i64; + /** Atomically compares and exchanges a 64-bit integer value in memory if the condition is met. */ + export function cmpxchg(offset: usize, expected: i64, replacement: i64, immOffset?: usize): i64; + } + } } /** Converts any other numeric value to a 32-bit (in WASM32) respectivel 64-bit (in WASM64) signed integer. */ declare var isize: typeof i32 | typeof i64; diff --git a/tests/compiler.js b/tests/compiler.js index 77d6041f..7322a4eb 100644 --- a/tests/compiler.js +++ b/tests/compiler.js @@ -126,6 +126,11 @@ tests.forEach(filename => { return; } } + if (config.asc_flags) { + config.asc_flags.forEach(flag => { + Array.prototype.push.apply(asc_flags, flag.split(" ")); + }); + } var failed = false; diff --git a/tests/compiler/threads.json b/tests/compiler/threads.json new file mode 100644 index 00000000..32c45dde --- /dev/null +++ b/tests/compiler/threads.json @@ -0,0 +1,9 @@ +{ + "features": [ + "threads" + ], + "asc_flags": [ + "--memoryBase 8", + "--sharedMemory 1" + ] +} diff --git a/tests/compiler/threads.optimized.wat b/tests/compiler/threads.optimized.wat new file mode 100644 index 00000000..85fe9b4a --- /dev/null +++ b/tests/compiler/threads.optimized.wat @@ -0,0 +1,545 @@ +(module + (type $FUNCSIG$v (func)) + (memory $0 (shared 1 1)) + (table $0 1 funcref) + (elem (i32.const 0) $null) + (export "memory" (memory $0)) + (export "table" (table $0)) + (start $start) + (func $threads/testAtomic (; 0 ;) (type $FUNCSIG$v) + i32.const 0 + i32.atomic.load8_u + drop + i32.const 0 + i32.atomic.load16_u + drop + i32.const 0 + i32.atomic.load + drop + i32.const 0 + i64.atomic.load8_u + drop + i32.const 0 + i64.atomic.load16_u + drop + i32.const 0 + i64.atomic.load32_u + drop + i32.const 0 + i64.atomic.load + drop + i32.const 8 + i64.atomic.load + drop + i32.const 0 + i32.const 1 + i32.atomic.store8 + i32.const 0 + i32.const 1 + i32.atomic.store16 + i32.const 0 + i32.const 1 + i32.atomic.store + i32.const 0 + i64.const 1 + i64.atomic.store8 + i32.const 0 + i64.const 1 + i64.atomic.store16 + i32.const 0 + i64.const 1 + i64.atomic.store32 + i32.const 0 + i64.const 1 + i64.atomic.store + i32.const 8 + i64.const 1 + i64.atomic.store + i32.const 0 + i32.const 1 + i32.atomic.rmw8.add_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.add_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.add + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.add_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.add_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.add_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.add + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.add offset=8 + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw8.sub_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.sub_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.sub + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.sub_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.sub_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.sub_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.sub + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.sub offset=8 + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw8.and_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.and_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.and + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.and_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.and_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.and_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.and + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.and offset=8 + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw8.or_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.or_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.or + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.or_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.or_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.or_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.or + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.or offset=8 + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw8.xor_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.xor_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.xor + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.xor_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.xor_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.xor_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.xor + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.xor offset=8 + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw8.xchg_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.xchg_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.xchg + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.xchg_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.xchg_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.xchg_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.xchg + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.xchg offset=8 + drop + i32.const 0 + i32.const 1 + i32.const 2 + i32.atomic.rmw8.cmpxchg_u + drop + i32.const 0 + i32.const 1 + i32.const 2 + i32.atomic.rmw16.cmpxchg_u + drop + i32.const 0 + i32.const 1 + i32.const 2 + i32.atomic.rmw.cmpxchg + drop + i32.const 0 + i64.const 1 + i64.const 2 + i64.atomic.rmw8.cmpxchg_u + drop + i32.const 0 + i64.const 1 + i64.const 2 + i64.atomic.rmw16.cmpxchg_u + drop + i32.const 0 + i64.const 1 + i64.const 2 + i64.atomic.rmw32.cmpxchg_u + drop + i32.const 0 + i64.const 1 + i64.const 2 + i64.atomic.rmw.cmpxchg + drop + i32.const 0 + i64.const 1 + i64.const 2 + i64.atomic.rmw.cmpxchg offset=8 + drop + ) + (func $threads/testAtomicAsm (; 1 ;) (type $FUNCSIG$v) + i32.const 0 + i32.atomic.load8_u + drop + i32.const 0 + i32.atomic.load16_u + drop + i32.const 0 + i32.atomic.load + drop + i32.const 0 + i64.atomic.load8_u + drop + i32.const 0 + i64.atomic.load16_u + drop + i32.const 0 + i64.atomic.load32_u + drop + i32.const 0 + i64.atomic.load + drop + i32.const 0 + i32.const 1 + i32.atomic.store8 + i32.const 0 + i32.const 1 + i32.atomic.store16 + i32.const 0 + i32.const 1 + i32.atomic.store + i32.const 0 + i64.const 1 + i64.atomic.store8 + i32.const 0 + i64.const 1 + i64.atomic.store16 + i32.const 0 + i64.const 1 + i64.atomic.store32 + i32.const 0 + i64.const 1 + i64.atomic.store + i32.const 0 + i32.const 1 + i32.atomic.rmw8.add_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.add_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.add + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.add_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.add_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.add_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.add + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw8.sub_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.sub_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.sub + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.sub_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.sub_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.sub_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.sub + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw8.and_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.and_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.and + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.and_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.and_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.and_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.and + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw8.or_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.or_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.or + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.or_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.or_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.or_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.or + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw8.xor_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.xor_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.xor + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.xor_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.xor_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.xor_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.xor + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw8.xchg_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.xchg_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.xchg + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.xchg_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.xchg_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.xchg_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.xchg + drop + i32.const 0 + i32.const 1 + i32.const 2 + i32.atomic.rmw8.cmpxchg_u + drop + i32.const 0 + i32.const 1 + i32.const 2 + i32.atomic.rmw16.cmpxchg_u + drop + i32.const 0 + i32.const 1 + i32.const 2 + i32.atomic.rmw.cmpxchg + drop + i32.const 0 + i64.const 1 + i64.const 2 + i64.atomic.rmw8.cmpxchg_u + drop + i32.const 0 + i64.const 1 + i64.const 2 + i64.atomic.rmw16.cmpxchg_u + drop + i32.const 0 + i64.const 1 + i64.const 2 + i64.atomic.rmw32.cmpxchg_u + drop + i32.const 0 + i64.const 1 + i64.const 2 + i64.atomic.rmw.cmpxchg + drop + ) + (func $start (; 2 ;) (type $FUNCSIG$v) + call $threads/testAtomic + call $threads/testAtomicAsm + ) + (func $null (; 3 ;) (type $FUNCSIG$v) + nop + ) +) diff --git a/tests/compiler/threads.ts b/tests/compiler/threads.ts new file mode 100644 index 00000000..b8dde085 --- /dev/null +++ b/tests/compiler/threads.ts @@ -0,0 +1,172 @@ +function testAtomic(): void { + atomic.load(0); // i32.atomic.load8_u + atomic.load(0); // i32.atomic.load16_u + atomic.load(0); // i32.atomic.load + atomic.load(0); // i64.atomic.load8_u + atomic.load(0); // i64.atomic.load16_u + atomic.load(0); // i64.atomic.load32_u + atomic.load(0); // i64.atomic.load + atomic.load(0, 8); // with offset + + atomic.store(0, 1); // i32.atomic.store8 + atomic.store(0, 1); // i32.atomic.store16 + atomic.store(0, 1); // i32.atomic.store + atomic.store(0, 1); // i64.atomic.store8 + atomic.store(0, 1); // i64.atomic.store16 + atomic.store(0, 1); // i64.atomic.store32 + atomic.store(0, 1); // i64.atomic.store + atomic.store(0, 1, 8); // with offset + + atomic.add(0, 1); // i32.atomic.rmw8.add_u + atomic.add(0, 1); // i32.atomic.rmw16.add_u + atomic.add(0, 1); // i32.atomic.rmw.add + atomic.add(0, 1); // i64.atomic.rmw8.add_u + atomic.add(0, 1); // i64.atomic.rmw16.add_u + atomic.add(0, 1); // i64.atomic.rmw32.add_u + atomic.add(0, 1); // i64.atomic.rmw.add + atomic.add(0, 1, 8); // with offset + + atomic.sub(0, 1); // i32.atomic.rmw8.sub_u + atomic.sub(0, 1); // i32.atomic.rmw16.sub_u + atomic.sub(0, 1); // i32.atomic.rmw.sub + atomic.sub(0, 1); // i64.atomic.rmw8.sub_u + atomic.sub(0, 1); // i64.atomic.rmw16.sub_u + atomic.sub(0, 1); // i64.atomic.rmw32.sub_u + atomic.sub(0, 1); // i64.atomic.rmw.sub + atomic.sub(0, 1, 8); // with offset + + atomic.and(0, 1); // i32.atomic.rmw8.and_u + atomic.and(0, 1); // i32.atomic.rmw16.and_u + atomic.and(0, 1); // i32.atomic.rmw.and + atomic.and(0, 1); // i64.atomic.rmw8.and_u + atomic.and(0, 1); // i64.atomic.rmw16.and_u + atomic.and(0, 1); // i64.atomic.rmw32.and_u + atomic.and(0, 1); // i64.atomic.rmw.and + atomic.and(0, 1, 8); // with offset + + atomic.or(0, 1); // i32.atomic.rmw8.or_u + atomic.or(0, 1); // i32.atomic.rmw16.or_u + atomic.or(0, 1); // i32.atomic.rmw.or + atomic.or(0, 1); // i64.atomic.rmw8.or_u + atomic.or(0, 1); // i64.atomic.rmw16.or_u + atomic.or(0, 1); // i64.atomic.rmw32.or_u + atomic.or(0, 1); // i64.atomic.rmw.or + atomic.or(0, 1, 8); // with offset + + atomic.xor(0, 1); // i32.atomic.rmw8.xor_u + atomic.xor(0, 1); // i32.atomic.rmw16.xor_u + atomic.xor(0, 1); // i32.atomic.rmw.xor + atomic.xor(0, 1); // i64.atomic.rmw8.xor_u + atomic.xor(0, 1); // i64.atomic.rmw16.xor_u + atomic.xor(0, 1); // i64.atomic.rmw32.xor_u + atomic.xor(0, 1); // i64.atomic.rmw.xor + atomic.xor(0, 1, 8); // with offset + + atomic.xchg(0, 1); // i32.atomic.rmw8.xchg_u + atomic.xchg(0, 1); // i32.atomic.rmw16.xchg_u + atomic.xchg(0, 1); // i32.atomic.rmw.xchg + atomic.xchg(0, 1); // i64.atomic.rmw8.xchg_u + atomic.xchg(0, 1); // i64.atomic.rmw16.xchg_u + atomic.xchg(0, 1); // i64.atomic.rmw32.xchg_u + atomic.xchg(0, 1); // i64.atomic.rmw.xchg + atomic.xchg(0, 1, 8); // with offset + + atomic.cmpxchg(0, 1, 2); // i32.atomic.rmw8.xchg_u + atomic.cmpxchg(0, 1, 2); // i32.atomic.rmw16.xchg_u + atomic.cmpxchg(0, 1, 2); // i32.atomic.rmw.xchg + atomic.cmpxchg(0, 1, 2); // i64.atomic.rmw8.xchg_u + atomic.cmpxchg(0, 1, 2); // i64.atomic.rmw16.xchg_u + atomic.cmpxchg(0, 1, 2); // i64.atomic.rmw32.xchg_u + atomic.cmpxchg(0, 1, 2); // i64.atomic.rmw.xchg + atomic.cmpxchg(0, 1, 2, 8); // with offset + + // atomic.wait(0, 0, -1); // i32.atomic.wait + // atomic.wait(0, 0, -1); // i32.atomic.notify + // atomic.notify(0, 1); // i64.atomic.wait + // atomic.notify(0, 1); // i64.atomic.notify +} + +function testAtomicAsm(): void { + + i32.atomic.load8_u(0); + i32.atomic.load16_u(0); + i32.atomic.load(0); + i64.atomic.load8_u(0); + i64.atomic.load16_u(0); + i64.atomic.load32_u(0); + i64.atomic.load(0); + + i32.atomic.store8(0, 1); + i32.atomic.store16(0, 1); + i32.atomic.store(0, 1); + i64.atomic.store8(0, 1); + i64.atomic.store16(0, 1); + i64.atomic.store32(0, 1); + i64.atomic.store(0, 1); + + i32.atomic.rmw8.add_u(0, 1); + i32.atomic.rmw16.add_u(0, 1); + i32.atomic.rmw.add(0, 1); + i64.atomic.rmw8.add_u(0, 1); + i64.atomic.rmw16.add_u(0, 1); + i64.atomic.rmw32.add_u(0, 1); + i64.atomic.rmw.add(0, 1); + + i32.atomic.rmw8.sub_u(0, 1); + i32.atomic.rmw16.sub_u(0, 1); + i32.atomic.rmw.sub(0, 1); + i64.atomic.rmw8.sub_u(0, 1); + i64.atomic.rmw16.sub_u(0, 1); + i64.atomic.rmw32.sub_u(0, 1); + i64.atomic.rmw.sub(0, 1); + + i32.atomic.rmw8.and_u(0, 1); + i32.atomic.rmw16.and_u(0, 1); + i32.atomic.rmw.and(0, 1); + i64.atomic.rmw8.and_u(0, 1); + i64.atomic.rmw16.and_u(0, 1); + i64.atomic.rmw32.and_u(0, 1); + i64.atomic.rmw.and(0, 1); + + i32.atomic.rmw8.or_u(0, 1); + i32.atomic.rmw16.or_u(0, 1); + i32.atomic.rmw.or(0, 1); + i64.atomic.rmw8.or_u(0, 1); + i64.atomic.rmw16.or_u(0, 1); + i64.atomic.rmw32.or_u(0, 1); + i64.atomic.rmw.or(0, 1); + + i32.atomic.rmw8.xor_u(0, 1); + i32.atomic.rmw16.xor_u(0, 1); + i32.atomic.rmw.xor(0, 1); + i64.atomic.rmw8.xor_u(0, 1); + i64.atomic.rmw16.xor_u(0, 1); + i64.atomic.rmw32.xor_u(0, 1); + i64.atomic.rmw.xor(0, 1); + + i32.atomic.rmw8.xchg_u(0, 1); + i32.atomic.rmw16.xchg_u(0, 1); + i32.atomic.rmw.xchg(0, 1); + i64.atomic.rmw8.xchg_u(0, 1); + i64.atomic.rmw16.xchg_u(0, 1); + i64.atomic.rmw32.xchg_u(0, 1); + i64.atomic.rmw.xchg(0, 1); + + i32.atomic.rmw8.cmpxchg_u(0, 1, 2); + i32.atomic.rmw16.cmpxchg_u(0, 1, 2); + i32.atomic.rmw.cmpxchg(0, 1, 2); + i64.atomic.rmw8.cmpxchg_u(0, 1, 2); + i64.atomic.rmw16.cmpxchg_u(0, 1, 2); + i64.atomic.rmw32.cmpxchg_u(0, 1, 2); + i64.atomic.rmw.cmpxchg(0, 1, 2); + + // i32.atomic.wait(0, 0, -1); + // i64.atomic.wait(0, 0, -1); + // i32.atomic.notify(0, 1); + // i64.atomic.notify(0, 1); +} + +if (ASC_FEATURE_THREADS) { + testAtomic(); + testAtomicAsm(); +} diff --git a/tests/compiler/threads.untouched.wat b/tests/compiler/threads.untouched.wat new file mode 100644 index 00000000..74598aa5 --- /dev/null +++ b/tests/compiler/threads.untouched.wat @@ -0,0 +1,549 @@ +(module + (type $FUNCSIG$v (func)) + (memory $0 (shared 1 1)) + (table $0 1 funcref) + (elem (i32.const 0) $null) + (global $~lib/ASC_FEATURE_THREADS i32 (i32.const 0)) + (global $~lib/memory/HEAP_BASE i32 (i32.const 8)) + (export "memory" (memory $0)) + (export "table" (table $0)) + (start $start) + (func $threads/testAtomic (; 0 ;) (type $FUNCSIG$v) + i32.const 0 + i32.atomic.load8_u + drop + i32.const 0 + i32.atomic.load16_u + drop + i32.const 0 + i32.atomic.load + drop + i32.const 0 + i64.atomic.load8_u + drop + i32.const 0 + i64.atomic.load16_u + drop + i32.const 0 + i64.atomic.load32_u + drop + i32.const 0 + i64.atomic.load + drop + i32.const 0 + i64.atomic.load offset=8 + drop + i32.const 0 + i32.const 1 + i32.atomic.store8 + i32.const 0 + i32.const 1 + i32.atomic.store16 + i32.const 0 + i32.const 1 + i32.atomic.store + i32.const 0 + i64.const 1 + i64.atomic.store8 + i32.const 0 + i64.const 1 + i64.atomic.store16 + i32.const 0 + i64.const 1 + i64.atomic.store32 + i32.const 0 + i64.const 1 + i64.atomic.store + i32.const 0 + i64.const 1 + i64.atomic.store offset=8 + i32.const 0 + i32.const 1 + i32.atomic.rmw8.add_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.add_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.add + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.add_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.add_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.add_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.add + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.add offset=8 + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw8.sub_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.sub_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.sub + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.sub_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.sub_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.sub_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.sub + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.sub offset=8 + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw8.and_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.and_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.and + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.and_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.and_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.and_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.and + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.and offset=8 + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw8.or_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.or_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.or + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.or_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.or_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.or_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.or + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.or offset=8 + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw8.xor_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.xor_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.xor + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.xor_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.xor_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.xor_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.xor + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.xor offset=8 + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw8.xchg_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.xchg_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.xchg + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.xchg_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.xchg_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.xchg_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.xchg + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.xchg offset=8 + drop + i32.const 0 + i32.const 1 + i32.const 2 + i32.atomic.rmw8.cmpxchg_u + drop + i32.const 0 + i32.const 1 + i32.const 2 + i32.atomic.rmw16.cmpxchg_u + drop + i32.const 0 + i32.const 1 + i32.const 2 + i32.atomic.rmw.cmpxchg + drop + i32.const 0 + i64.const 1 + i64.const 2 + i64.atomic.rmw8.cmpxchg_u + drop + i32.const 0 + i64.const 1 + i64.const 2 + i64.atomic.rmw16.cmpxchg_u + drop + i32.const 0 + i64.const 1 + i64.const 2 + i64.atomic.rmw32.cmpxchg_u + drop + i32.const 0 + i64.const 1 + i64.const 2 + i64.atomic.rmw.cmpxchg + drop + i32.const 0 + i64.const 1 + i64.const 2 + i64.atomic.rmw.cmpxchg offset=8 + drop + ) + (func $threads/testAtomicAsm (; 1 ;) (type $FUNCSIG$v) + i32.const 0 + i32.atomic.load8_u + drop + i32.const 0 + i32.atomic.load16_u + drop + i32.const 0 + i32.atomic.load + drop + i32.const 0 + i64.atomic.load8_u + drop + i32.const 0 + i64.atomic.load16_u + drop + i32.const 0 + i64.atomic.load32_u + drop + i32.const 0 + i64.atomic.load + drop + i32.const 0 + i32.const 1 + i32.atomic.store8 + i32.const 0 + i32.const 1 + i32.atomic.store16 + i32.const 0 + i32.const 1 + i32.atomic.store + i32.const 0 + i64.const 1 + i64.atomic.store8 + i32.const 0 + i64.const 1 + i64.atomic.store16 + i32.const 0 + i64.const 1 + i64.atomic.store32 + i32.const 0 + i64.const 1 + i64.atomic.store + i32.const 0 + i32.const 1 + i32.atomic.rmw8.add_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.add_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.add + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.add_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.add_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.add_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.add + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw8.sub_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.sub_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.sub + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.sub_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.sub_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.sub_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.sub + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw8.and_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.and_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.and + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.and_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.and_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.and_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.and + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw8.or_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.or_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.or + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.or_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.or_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.or_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.or + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw8.xor_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.xor_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.xor + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.xor_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.xor_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.xor_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.xor + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw8.xchg_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw16.xchg_u + drop + i32.const 0 + i32.const 1 + i32.atomic.rmw.xchg + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw8.xchg_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw16.xchg_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw32.xchg_u + drop + i32.const 0 + i64.const 1 + i64.atomic.rmw.xchg + drop + i32.const 0 + i32.const 1 + i32.const 2 + i32.atomic.rmw8.cmpxchg_u + drop + i32.const 0 + i32.const 1 + i32.const 2 + i32.atomic.rmw16.cmpxchg_u + drop + i32.const 0 + i32.const 1 + i32.const 2 + i32.atomic.rmw.cmpxchg + drop + i32.const 0 + i64.const 1 + i64.const 2 + i64.atomic.rmw8.cmpxchg_u + drop + i32.const 0 + i64.const 1 + i64.const 2 + i64.atomic.rmw16.cmpxchg_u + drop + i32.const 0 + i64.const 1 + i64.const 2 + i64.atomic.rmw32.cmpxchg_u + drop + i32.const 0 + i64.const 1 + i64.const 2 + i64.atomic.rmw.cmpxchg + drop + ) + (func $start:threads (; 2 ;) (type $FUNCSIG$v) + call $threads/testAtomic + call $threads/testAtomicAsm + ) + (func $start (; 3 ;) (type $FUNCSIG$v) + call $start:threads + ) + (func $null (; 4 ;) (type $FUNCSIG$v) + ) +) diff --git a/tests/features.json b/tests/features.json index cc995dbe..3a203c48 100644 --- a/tests/features.json +++ b/tests/features.json @@ -6,5 +6,13 @@ "v8_flags": [ "--experimental-wasm-simd" ] + }, + "threads": { + "asc_flags": [ + "--enable threads" + ], + "v8_flags": [ + "--experimental-wasm-threads" + ] } }