diff --git a/benchmarks/wasm/fp_ops.wat b/benchmarks/wasm/fp_ops.wat index 99698790..17fdaf1c 100644 --- a/benchmarks/wasm/fp_ops.wat +++ b/benchmarks/wasm/fp_ops.wat @@ -115,6 +115,30 @@ local.get 3 f64.le i32.eqz + call 0 + + i32.const -1 + f32.convert_i32_s + f32.const -1.0 + f32.eq + call 0 + + i32.const -1 + f32.convert_i32_u + f32.const 0x1p+32 (;=4294967296;) + f32.eq + call 0 + + i32.const -1 + f64.convert_i32_u + f64.const 4294967295.0 + f64.eq + call 0 + + i64.const -1 + f64.convert_i64_s + f64.const -1.0 + f64.eq call 0) ;; symbolic branch guard for f32 diff --git a/benchmarks/wasm/mem-sym.wat b/benchmarks/wasm/mem-sym.wat index 45b9d1c3..187682ec 100644 --- a/benchmarks/wasm/mem-sym.wat +++ b/benchmarks/wasm/mem-sym.wat @@ -12,24 +12,48 @@ i32.load i32.const 25 i32.eq - if (result i32) ;; if x == 25 - i32.const 0 + if ;; if x == 25 + i32.const 0 call 0 ;; assert false - i32.const 1 ;; to satisfy the type checker, this line will never be reached - else - i32.const 1 - i32.load - i32.const 1 - i32.eq - if (result i32) ;; if x >> 8 == 1 - i32.const 0 - call 0 ;; assert false - i32.const 1 ;; to satisfy the type checker, this line will never be reached - else - i32.const 1 - end - i32.const 1 end + i32.const 1 + i32.load + i32.const 1 + i32.eq + if ;; if x >> 8 == 1 + i32.const 0 + call 0 ;; assert false + end + i32.const 4 + i64.load + i64.eqz + i32.eqz + if + i32.const 0 + call 0 ;; assert false + end + i32.const 0 + i64.load + i64.const 32 + i64.shr_u + i64.eqz + i32.eqz + if + i32.const 0 + call 0 ;; assert false + end + i32.const 8 + i64.const 0x0102030405060708 + i64.store + i32.const 8 + i64.load + i64.const 0x0102030405060708 + i64.ne + if + i32.const 0 + call 0 ;; assert false + end + i32.const 1 ) (func (;2;) (type 1) i32.const 0 @@ -40,4 +64,4 @@ (memory (;0;) 2) (export "main" (func 1)) (global (;0;) (mut i32) (i32.const 42)) -) \ No newline at end of file +) diff --git a/headers/wasm/concrete_num.hpp b/headers/wasm/concrete_num.hpp index 3812f4a1..80a7fef0 100644 --- a/headers/wasm/concrete_num.hpp +++ b/headers/wasm/concrete_num.hpp @@ -236,10 +236,18 @@ struct Num { } // i64.extend_i32_s: sign-extend low 32 bits to i64 - inline Num i32_extend_to_i64() const { + inline Num i32_extend_to_i64_s() const { int64_t result_s = static_cast(this->toInt()); Num res(result_s); - debug_print("i32.extend_to_i64", *this, *this, res); + debug_print("i32.extend_to_i64_s", *this, *this, res); + return res; + } + + // i64.extend_i32_u: zero-extend low 32 bits to i64 + inline Num i32_extend_to_i64_u() const { + uint64_t result_u = static_cast(this->toUInt()); + Num res(static_cast(result_u)); + debug_print("i32.extend_to_i64_u", *this, *this, res); return res; } @@ -611,6 +619,26 @@ struct Num { return res; } + inline Num convert_i32_to_f32_s() const { + uint32_t r_bits = f32_to_bits(static_cast(toInt())); + return Num(static_cast(r_bits)); + } + + inline Num convert_i32_to_f32_u() const { + uint32_t r_bits = f32_to_bits(static_cast(toUInt())); + return Num(static_cast(r_bits)); + } + + inline Num convert_i64_to_f32_s() const { + uint32_t r_bits = f32_to_bits(static_cast(toInt64())); + return Num(static_cast(r_bits)); + } + + inline Num convert_i64_to_f32_u() const { + uint32_t r_bits = f32_to_bits(static_cast(toUInt64())); + return Num(static_cast(r_bits)); + } + // f32.min / f32.max: follow wasm-ish semantics: if either is NaN, return NaN // (propagate) inline Num f32_min(const Num &other) const { @@ -829,6 +857,26 @@ struct Num { return res; } + inline Num convert_i32_to_f64_s() const { + uint64_t r_bits = f64_to_bits(static_cast(toInt())); + return Num(static_cast(r_bits)); + } + + inline Num convert_i32_to_f64_u() const { + uint64_t r_bits = f64_to_bits(static_cast(toUInt())); + return Num(static_cast(r_bits)); + } + + inline Num convert_i64_to_f64_s() const { + uint64_t r_bits = f64_to_bits(static_cast(toInt64())); + return Num(static_cast(r_bits)); + } + + inline Num convert_i64_to_f64_u() const { + uint64_t r_bits = f64_to_bits(static_cast(toUInt64())); + return Num(static_cast(r_bits)); + } + // f64.min / f64.max: follow wasm-ish semantics: if either is NaN, return // NaN (propagate) inline Num f64_min(const Num &other) const { diff --git a/headers/wasm/concrete_rt.hpp b/headers/wasm/concrete_rt.hpp index 7e3780e0..6584eba2 100644 --- a/headers/wasm/concrete_rt.hpp +++ b/headers/wasm/concrete_rt.hpp @@ -280,6 +280,18 @@ struct Memory_t { return result; } + int64_t loadLong(int32_t base, int32_t offset) { + int32_t addr = base + offset; + if (!(addr + 7 < memory.size()) || addr < 0) { + throw std::runtime_error("Invalid memory access " + std::to_string(addr)); + } + int64_t result = 0; + for (int i = 0; i < 8; ++i) { + result |= static_cast(memory[addr + i]) << (8 * i); + } + return result; + } + std::monostate storeInt(int32_t base, int32_t offset, int32_t value) { int32_t addr = base + offset; // Ensure we don't write out of bounds @@ -296,6 +308,17 @@ struct Memory_t { return std::monostate{}; } + std::monostate storeLong(int32_t base, int32_t offset, int64_t value) { + int32_t addr = base + offset; + if (!(addr + 7 < memory.size()) || addr < 0) { + throw std::runtime_error("Invalid memory access " + std::to_string(addr)); + } + for (int i = 0; i < 8; ++i) { + memory[addr + i] = static_cast((static_cast(value) >> (8 * i)) & 0xFF); + } + return std::monostate{}; + } + std::monostate store_byte(int32_t addr, uint8_t value) { #ifdef DEBUG std::cout << "[Debug] storing byte " << std::to_string(value) diff --git a/headers/wasm/sym_rt.hpp b/headers/wasm/sym_rt.hpp index 62d27247..6849f39c 100644 --- a/headers/wasm/sym_rt.hpp +++ b/headers/wasm/sym_rt.hpp @@ -315,6 +315,48 @@ class SymMemory_t { #endif } + SymVal loadSymLong(int32_t base, int32_t offset) { +#ifdef USE_IMM + int32_t addr = base + offset; + auto it = memory.find(addr); + SymVal s0 = it ? *it : SVFactory::ZeroByte; + it = memory.find(addr + 1); + SymVal s1 = it ? *it : SVFactory::ZeroByte; + it = memory.find(addr + 2); + SymVal s2 = it ? *it : SVFactory::ZeroByte; + it = memory.find(addr + 3); + SymVal s3 = it ? *it : SVFactory::ZeroByte; + it = memory.find(addr + 4); + SymVal s4 = it ? *it : SVFactory::ZeroByte; + it = memory.find(addr + 5); + SymVal s5 = it ? *it : SVFactory::ZeroByte; + it = memory.find(addr + 6); + SymVal s6 = it ? *it : SVFactory::ZeroByte; + it = memory.find(addr + 7); + SymVal s7 = it ? *it : SVFactory::ZeroByte; +#else + int32_t addr = base + offset; + auto it = memory.find(addr); + SymVal s0 = (it != memory.end()) ? it->second : SVFactory::ZeroByte; + it = memory.find(addr + 1); + SymVal s1 = (it != memory.end()) ? it->second : SVFactory::ZeroByte; + it = memory.find(addr + 2); + SymVal s2 = (it != memory.end()) ? it->second : SVFactory::ZeroByte; + it = memory.find(addr + 3); + SymVal s3 = (it != memory.end()) ? it->second : SVFactory::ZeroByte; + it = memory.find(addr + 4); + SymVal s4 = (it != memory.end()) ? it->second : SVFactory::ZeroByte; + it = memory.find(addr + 5); + SymVal s5 = (it != memory.end()) ? it->second : SVFactory::ZeroByte; + it = memory.find(addr + 6); + SymVal s6 = (it != memory.end()) ? it->second : SVFactory::ZeroByte; + it = memory.find(addr + 7); + SymVal s7 = (it != memory.end()) ? it->second : SVFactory::ZeroByte; +#endif + + return s7.concat(s6).concat(s5).concat(s4).concat(s3).concat(s2).concat(s1).concat(s0); + } + // when loading a symval, we need to concat 4 symbolic values // This sounds terribly bad for SMT... // Load a 4-byte symbolic value from memory @@ -333,6 +375,27 @@ class SymMemory_t { return std::monostate{}; } + std::monostate storeSymLong(int32_t base, int32_t offset, SymVal value) { + int32_t addr = base + offset; + SymVal s0 = value.extract(1, 1); + SymVal s1 = value.extract(2, 2); + SymVal s2 = value.extract(3, 3); + SymVal s3 = value.extract(4, 4); + SymVal s4 = value.extract(5, 5); + SymVal s5 = value.extract(6, 6); + SymVal s6 = value.extract(7, 7); + SymVal s7 = value.extract(8, 8); + storeSymByte(addr, s0); + storeSymByte(addr + 1, s1); + storeSymByte(addr + 2, s2); + storeSymByte(addr + 3, s3); + storeSymByte(addr + 4, s4); + storeSymByte(addr + 5, s5); + storeSymByte(addr + 6, s6); + storeSymByte(addr + 7, s7); + return std::monostate{}; + } + std::monostate storeSymByte(int32_t addr, SymVal value) { // assume the input value is 8-bit symbolic value bool exists; diff --git a/headers/wasm/symval_factory.hpp b/headers/wasm/symval_factory.hpp index c4f533b2..f7a1af00 100644 --- a/headers/wasm/symval_factory.hpp +++ b/headers/wasm/symval_factory.hpp @@ -2,8 +2,8 @@ #define WASM_SYMVAL_FACTORY_HPP #include "heap_mem_bookkeeper.hpp" -#include "symval_decl.hpp" #include "symbolic_decl.hpp" +#include "symval_decl.hpp" namespace SVFactory { @@ -116,7 +116,7 @@ struct UnaryOpKeyHash { // Caches. static std::unordered_map SymbolStore; -static std::unordered_map concrete_pool; +static std::unordered_map FPStore; static std::unordered_map SmallBVStore; static std::unordered_map ExtractOperationStore; @@ -126,26 +126,27 @@ static std::unordered_map // Factory implementations. inline SymVal make_concrete_bv(Num num, int width) { - auto it = concrete_pool.find(num.toInt()); - if (it != concrete_pool.end()) { + auto key = SmallBVKey(width, num.toInt64()); + auto it = SmallBVStore.find(key); + if (it != SmallBVStore.end()) { return it->second; } auto new_val = SymVal(SymBookKeeper.allocate(num, KindBV, width)); - concrete_pool.insert({num.toInt(), new_val}); + SmallBVStore.insert({key, new_val}); return new_val; } inline SymVal make_concrete_fp(Num num, int width) { - auto it = concrete_pool.find(num.toInt()); - if (it != concrete_pool.end()) { + auto it = FPStore.find(num.toInt64()); + if (it != FPStore.end()) { return it->second; } auto new_val = SymVal(SymBookKeeper.allocate(num, KindFP, width)); - concrete_pool.insert({num.toInt(), new_val}); + FPStore.insert({num.toInt64(), new_val}); return new_val; } @@ -205,7 +206,8 @@ inline SymVal make_extract(const SymVal &value, int high, int low) { if (auto concrete = std::dynamic_pointer_cast(value.symptr)) { if (concrete->kind != KindBV) { - throw std::runtime_error("Extract only supports bitvector concrete values"); + throw std::runtime_error( + "Extract only supports bitvector concrete values"); } // extract from concrete bitvector value int64_t val = concrete->value.value; @@ -520,7 +522,8 @@ inline SymVal make_binary(BinOperation op, const SymVal &lhs, } } - auto result = SymVal(SVFactory::SymBookKeeper.allocate(op, lhs, rhs)); + auto result = + SymVal(SVFactory::SymBookKeeper.allocate(op, lhs, rhs)); BinaryOperationStore.insert({key, result}); return result; } @@ -594,8 +597,8 @@ inline SymVal make_unary(UnaryOperation op, const SymVal &value) { break; } if (negated_op != inner_binary->op) { - auto result = - SVFactory::make_binary(negated_op, inner_binary->lhs, inner_binary->rhs); + auto result = SVFactory::make_binary(negated_op, inner_binary->lhs, + inner_binary->rhs); UnaryOperationStore.insert({key, result}); return result; } @@ -609,7 +612,8 @@ inline SymVal make_unary(UnaryOperation op, const SymVal &value) { inline SymVal make_concat(const SymVal &lhs, const SymVal &rhs) { if (auto lhs_concrete = std::dynamic_pointer_cast(lhs.symptr)) { - if (auto rhs_concrete = std::dynamic_pointer_cast(rhs.symptr)) { + if (auto rhs_concrete = + std::dynamic_pointer_cast(rhs.symptr)) { if (lhs_concrete->kind == KindBV && rhs_concrete->kind == KindBV) { int new_width = lhs_concrete->width() + rhs_concrete->width(); int64_t new_value = diff --git a/headers/wasm/symval_impl.hpp b/headers/wasm/symval_impl.hpp index 5255a13c..ae0f5c21 100644 --- a/headers/wasm/symval_impl.hpp +++ b/headers/wasm/symval_impl.hpp @@ -3,6 +3,7 @@ #include "symval_decl.hpp" #include "symval_factory.hpp" +#include "wasm/concrete_num.hpp" inline SymVal SymVal::add(const SymVal &other) const { return SVFactory::make_binary(ADD, *this, other); @@ -99,13 +100,13 @@ inline SymVal SymVal::rem_u(const SymVal &other) const { inline SymVal SymVal::is_zero() const { return SVFactory::make_binary( - EQ_BOOL, *this, SVFactory::make_concrete_bv(I32V(0), symptr->width())); + EQ_BOOL, *this, SVFactory::make_concrete_bv(I64V(0), symptr->width())); } inline SymVal SymVal::bv_negate() const { assert(symptr->width() != 1); return SVFactory::make_binary( - EQ_BOOL, *this, SVFactory::make_concrete_bv(I32V(0), symptr->width())); + EQ_BOOL, *this, SVFactory::make_concrete_bv(I64V(0), symptr->width())); } inline SymVal SymVal::bool_not() const { diff --git a/src/main/scala/wasm/StagedConcolicMiniWasm.scala b/src/main/scala/wasm/StagedConcolicMiniWasm.scala index fd18ceb1..49361367 100644 --- a/src/main/scala/wasm/StagedConcolicMiniWasm.scala +++ b/src/main/scala/wasm/StagedConcolicMiniWasm.scala @@ -515,10 +515,44 @@ trait ConcreteOps extends StagedWasmValueDomains with ValueCreation { } } - def extend(): StagedConcreteNum = { - num.tipe match { - case NumType(I32Type) => StagedConcreteNum(NumType(I64Type), "i32-extend-to-i64".reflectCtrlWith[Num](num.i)) - } + def extendS(): StagedConcreteNum = num.tipe match { + case NumType(I32Type) => StagedConcreteNum(NumType(I64Type), "i32-extend-to-i64-s".reflectCtrlWith[Num](num.i)) + } + + def extendU(): StagedConcreteNum = num.tipe match { + case NumType(I32Type) => StagedConcreteNum(NumType(I64Type), "i32-extend-to-i64-u".reflectCtrlWith[Num](num.i)) + } + + def convertI32ToF32S(): StagedConcreteNum = num.tipe match { + case NumType(I32Type) => StagedConcreteNum(NumType(F32Type), "i32-convert-to-f32-s".reflectCtrlWith[Num](num.i)) + } + + def convertI64ToF32S(): StagedConcreteNum = num.tipe match { + case NumType(I64Type) => StagedConcreteNum(NumType(F32Type), "i64-convert-to-f32-s".reflectCtrlWith[Num](num.i)) + } + + def convertI32ToF32U(): StagedConcreteNum = num.tipe match { + case NumType(I32Type) => StagedConcreteNum(NumType(F32Type), "i32-convert-to-f32-u".reflectCtrlWith[Num](num.i)) + } + + def convertI64ToF32U(): StagedConcreteNum = num.tipe match { + case NumType(I64Type) => StagedConcreteNum(NumType(F32Type), "i64-convert-to-f32-u".reflectCtrlWith[Num](num.i)) + } + + def convertI32ToF64S(): StagedConcreteNum = num.tipe match { + case NumType(I32Type) => StagedConcreteNum(NumType(F64Type), "i32-convert-to-f64-s".reflectCtrlWith[Num](num.i)) + } + + def convertI64ToF64S(): StagedConcreteNum = num.tipe match { + case NumType(I64Type) => StagedConcreteNum(NumType(F64Type), "i64-convert-to-f64-s".reflectCtrlWith[Num](num.i)) + } + + def convertI32ToF64U(): StagedConcreteNum = num.tipe match { + case NumType(I32Type) => StagedConcreteNum(NumType(F64Type), "i32-convert-to-f64-u".reflectCtrlWith[Num](num.i)) + } + + def convertI64ToF64U(): StagedConcreteNum = num.tipe match { + case NumType(I64Type) => StagedConcreteNum(NumType(F64Type), "i64-convert-to-f64-u".reflectCtrlWith[Num](num.i)) } def assert(): Rep[Unit] = { @@ -1039,17 +1073,23 @@ trait StagedMemory extends SAIOps with StagedWasmValueDomains with Continuations object Memory { // TODO: why this is only one function, rather than `storeInC` and `storeInS`? // TODO: what should the type of SymVal be? - def storeInt(base: Rep[Int], offset: Int, value: (Rep[Int], StagedSymbolicNum)): Rep[Unit] = { - "memory-store-int".reflectCtrlWith[Unit](base, offset, value._1) - "sym-store-int".reflectCtrlWith[Unit](base, offset, value._2.s) + def store(ty: ValueType, base: Rep[Int], offset: Int, value: (StagedConcreteNum, StagedSymbolicNum)): Rep[Unit] = ty match { + case NumType(I32Type) => + "memory-store-int".reflectCtrlWith[Unit](base, offset, value._1.i) + "sym-store-int".reflectCtrlWith[Unit](base, offset, value._2.s) + case NumType(I64Type) => + "memory-store-long".reflectCtrlWith[Unit](base, offset, value._1.i) + "sym-store-long".reflectCtrlWith[Unit](base, offset, value._2.s) } - def loadIntC(base: Rep[Int], offset: Int): StagedConcreteNum = { - StagedConcreteNum(NumType(I32Type), "I32V".reflectCtrlWith[Num]("memory-load-int".reflectCtrlWith[Int](base, offset))) + def loadC(ty: ValueType, base: Rep[Int], offset: Int): StagedConcreteNum = ty match { + case NumType(I32Type) => StagedConcreteNum(NumType(I32Type), "I32V".reflectCtrlWith[Num]("memory-load-int".reflectCtrlWith[Int](base, offset))) + case NumType(I64Type) => StagedConcreteNum(NumType(I64Type), "I64V".reflectCtrlWith[Num]("memory-load-long".reflectCtrlWith[Long](base, offset))) } - def loadIntS(base: Rep[Int], offset: Int): StagedSymbolicNum = { - StagedSymbolicNum(NumType(I32Type), "sym-load-int".reflectCtrlWith[SymVal](base, offset)) + def loadS(ty: ValueType, base: Rep[Int], offset: Int): StagedSymbolicNum = ty match { + case NumType(I32Type) => StagedSymbolicNum(NumType(I32Type), "sym-load-int".reflectCtrlWith[SymVal](base, offset)) + case NumType(I64Type) => StagedSymbolicNum(NumType(I64Type), "sym-load-long".reflectCtrlWith[SymVal](base, offset)) } // Returns the previous memory size on success, or -1 if the memory cannot be grown. @@ -1319,7 +1359,8 @@ trait StagedWasmEvaluator extends SAIOps } } eval(rest, kont, trail)(newCtx) - case Store(StoreOp(align, offset, NumType(I32Type), None)) => + case Nop => eval(rest, kont, trail) + case Store(StoreOp(align, offset, ty, None)) => val newCtx2 = withBlock { val (ty1, newCtx1) = ctx.pop() val value = Stack.popC(ty1) @@ -1327,18 +1368,17 @@ trait StagedWasmEvaluator extends SAIOps val (ty2, newCtx2) = newCtx1.pop() val addr = Stack.popC(ty2) val symAddr = Stack.popS(ty2) - Memory.storeInt(addr.toInt, offset, (value.toInt, symValue)) + Memory.store(ty, addr.toInt, offset, (value, symValue)) newCtx2 } eval(rest, kont, trail)(newCtx2) - case Nop => eval(rest, kont, trail) case Load(LoadOp(align, offset, ty, None, None)) => val newCtx1 = withBlock { val (ty1, newCtx1) = ctx.pop() val addr = Stack.popC(ty1) Stack.popS(ty1) - val num = Memory.loadIntC(addr.toInt, offset) - val sym = Memory.loadIntS(addr.toInt, offset) + val num = Memory.loadC(ty, addr.toInt, offset) + val sym = Memory.loadS(ty, addr.toInt, offset) Stack.pushC(num) Stack.pushS(sym) newCtx1 @@ -1896,15 +1936,25 @@ trait StagedWasmEvaluator extends SAIOps } def evalCvtOpC(op: CvtOp, value: StagedConcreteNum): StagedConcreteNum = op match { - case Extend(NumType(I32Type), NumType(I64Type), ZX) => value.extend + case Extend(NumType(I32Type), NumType(I64Type), SX) => value.extendS() + case Extend(NumType(I32Type), NumType(I64Type), ZX) => value.extendU() + case ConvertTo(NumType(I32Type), NumType(F32Type), SX) => value.convertI32ToF32S() + case ConvertTo(NumType(I32Type), NumType(F32Type), ZX) => value.convertI32ToF32U() + case ConvertTo(NumType(I64Type), NumType(F32Type), SX) => value.convertI64ToF32S() + case ConvertTo(NumType(I64Type), NumType(F32Type), ZX) => value.convertI64ToF32U() + case ConvertTo(NumType(I32Type), NumType(F64Type), SX) => value.convertI32ToF64S() + case ConvertTo(NumType(I32Type), NumType(F64Type), ZX) => value.convertI32ToF64U() + case ConvertTo(NumType(I64Type), NumType(F64Type), SX) => value.convertI64ToF64S() + case ConvertTo(NumType(I64Type), NumType(F64Type), ZX) => value.convertI64ToF64U() + case _ => throw new UnsupportedOperationException(s"Unsupported concrete conversion $op") } def evalCvtOpS(op: CvtOp, value: StagedSymbolicNum, c: StagedConcreteNum): StagedSymbolicNum = { - val res = if (allConcrete(value)) { - c.toStagedSymbolicNum.s - } else { + var res = c.toStagedSymbolicNum.s + if (!allConcrete(value)) { op match { case Extend(NumType(I32Type), NumType(I64Type), ZX) => value.extend().s + case _ => "debug-assert".reflectCtrlWith[SymVal](false, "All runtime float point must be concrete values") } } StagedSymbolicNum(c.tipe, res) @@ -2063,6 +2113,7 @@ trait StagedWasmCppGen extends CGenBase with CppSAICodeGenBase { registerHeader("") registerHeader("") registerHeader("") + registerHeader("") override def mayInline(n: Node): Boolean = n match { case Node(_, "stack-pop", _, _) @@ -2198,9 +2249,13 @@ trait StagedWasmCppGen extends CGenBase with CppSAICodeGenBase { case Node(_, "slice-reverse", List(slice), _) => shallow(slice); emit(".reverse") case Node(_, "memory-store-int", List(base, offset, value), _) => - emit("Memory.storeInt("); shallow(base); emit(", "); shallow(offset); emit(", "); shallow(value); emit(")") + emit("Memory.storeInt("); shallow(base); emit(", "); shallow(offset); emit(", "); shallow(value); emit(".toInt())") + case Node(_, "memory-store-long", List(base, offset, value), _) => + emit("Memory.storeLong("); shallow(base); emit(", "); shallow(offset); emit(", "); shallow(value); emit(".toInt64())") case Node(_, "memory-load-int", List(base, offset), _) => emit("Memory.loadInt("); shallow(base); emit(", "); shallow(offset); emit(")") + case Node(_, "memory-load-long", List(base, offset), _) => + emit("Memory.loadLong("); shallow(base); emit(", "); shallow(offset); emit(")") case Node(_, "memory-grow", List(delta), _) => emit("Memory.grow("); shallow(delta); emit(")") case Node(_, "stack-size", _, _) => @@ -2208,8 +2263,12 @@ trait StagedWasmCppGen extends CGenBase with CppSAICodeGenBase { // Symbolic Memory case Node(_, "sym-store-int", List(base, offset, s_value), _) => emit("SymMemory.storeSym("); shallow(base); emit(", "); shallow(offset); emit(", "); shallow(s_value); emit(")") + case Node(_, "sym-store-long", List(base, offset, s_value), _) => + emit("SymMemory.storeSymLong("); shallow(base); emit(", "); shallow(offset); emit(", "); shallow(s_value); emit(")") case Node(_, "sym-load-int", List(base, offset), _) => emit("SymMemory.loadSym("); shallow(base); emit(", "); shallow(offset); emit(")") + case Node(_, "sym-load-long", List(base, offset), _) => + emit("SymMemory.loadSymLong("); shallow(base); emit(", "); shallow(offset); emit(")") case Node(_, "sym-memory-grow", List(delta), _) => emit("SymMemory.grow("); shallow(delta); emit(")") // Globals @@ -2321,8 +2380,26 @@ trait StagedWasmCppGen extends CGenBase with CppSAICodeGenBase { shallow(lhs); emit(".i32_or("); shallow(rhs); emit(")") case Node(_, "i64-binary-or", List(lhs, rhs), _) => shallow(lhs); emit(".i64_or("); shallow(rhs); emit(")") - case Node(_, "i32-extend-to-i64", List(num), _) => - emit("("); shallow(num); emit(".i32_extend_to_i64())") + case Node(_, "i32-extend-to-i64-s", List(num), _) => + emit("("); shallow(num); emit(".i32_extend_to_i64_s())") + case Node(_, "i32-extend-to-i64-u", List(num), _) => + emit("("); shallow(num); emit(".i32_extend_to_i64_u())") + case Node(_, "i32-convert-to-f32-s", List(num), _) => + emit("("); shallow(num); emit(".convert_i32_to_f32_s())") + case Node(_, "i32-convert-to-f32-u", List(num), _) => + emit("("); shallow(num); emit(".convert_i32_to_f32_u())") + case Node(_, "i64-convert-to-f32-s", List(num), _) => + emit("("); shallow(num); emit(".convert_i64_to_f32_s())") + case Node(_, "i64-convert-to-f32-u", List(num), _) => + emit("("); shallow(num); emit(".convert_i64_to_f32_u())") + case Node(_, "i32-convert-to-f64-s", List(num), _) => + emit("("); shallow(num); emit(".convert_i32_to_f64_s())") + case Node(_, "i32-convert-to-f64-u", List(num), _) => + emit("("); shallow(num); emit(".convert_i32_to_f64_u())") + case Node(_, "i64-convert-to-f64-s", List(num), _) => + emit("("); shallow(num); emit(".convert_i64_to_f64_s())") + case Node(_, "i64-convert-to-f64-u", List(num), _) => + emit("("); shallow(num); emit(".convert_i64_to_f64_u())") case Node(_, "f32-binary-add", List(lhs, rhs), _) => shallow(lhs); emit(".f32_add("); shallow(rhs); emit(")") case Node(_, "f64-binary-add", List(lhs, rhs), _) => @@ -2421,6 +2498,8 @@ trait StagedWasmCppGen extends CGenBase with CppSAICodeGenBase { emit("make_symbolic("); shallow(num); emit(", "); shallow(width); emit(")") case Node(_, "sym-env-read", List(sym), _) => emit("SymEnv.read("); shallow(sym); emit(")") + case (Node(_, "debug-assert", List(cond, msg), _)) => + emit("assert("); shallow(cond); emit(" && "); shallow(msg); emit(")") case Node(_, "assert-true", List(cond), _) => emit("GENSYM_ASSERT("); shallow(cond); emit(")") case Node(_, "sym-assert-true", List(s_cond), _) =>