diff --git a/interpreter/text/lexer.mll b/interpreter/text/lexer.mll index c5d759fb88..161520e0a4 100644 --- a/interpreter/text/lexer.mll +++ b/interpreter/text/lexer.mll @@ -1,17 +1,18 @@ { open Parser open Operators +open Source let convert_pos pos = - { Source.file = pos.Lexing.pos_fname; - Source.line = pos.Lexing.pos_lnum; - Source.column = pos.Lexing.pos_cnum - pos.Lexing.pos_bol + { file = pos.Lexing.pos_fname; + line = pos.Lexing.pos_lnum; + column = pos.Lexing.pos_cnum - pos.Lexing.pos_bol } let region lexbuf = let left = convert_pos (Lexing.lexeme_start_p lexbuf) in let right = convert_pos (Lexing.lexeme_end_p lexbuf) in - {Source.left = left; Source.right = right} + {left = left; right = right} let error lexbuf msg = raise (Script.Syntax (region lexbuf, msg)) let error_nest start lexbuf msg = @@ -47,92 +48,7 @@ let string s = done; Buffer.contents b -let num_type = function - | "i32" -> Types.I32Type - | "i64" -> Types.I64Type - | "f32" -> Types.F32Type - | "f64" -> Types.F64Type - | _ -> assert false - -let vec_type = function - | "v128" -> Types.V128Type - | _ -> assert false - -let intop t i32 i64 = - match t with - | "i32" -> i32 - | "i64" -> i64 - | _ -> assert false - -let floatop t f32 f64 = - match t with - | "f32" -> f32 - | "f64" -> f64 - | _ -> assert false - -let numop t i32 i64 f32 f64 = - match t with - | "i32" -> i32 - | "i64" -> i64 - | "f32" -> f32 - | "f64" -> f64 - | _ -> assert false - -let v128op s i8x16 i16x8 i32x4 i64x2 f32x4 f64x2 = - match s with - | "i8x16" -> i8x16 - | "i16x8" -> i16x8 - | "i32x4" -> i32x4 - | "i64x2" -> i64x2 - | "f32x4" -> f32x4 - | "f64x2" -> f64x2 - | _ -> assert false - -let v128intop s i8x16 i16x8 i32x4 i64x2 = - match s with - | "i8x16" -> i8x16 - | "i16x8" -> i16x8 - | "i32x4" -> i32x4 - | "i64x2" -> i64x2 - | _ -> assert false - -let v128floatop s f32x4 f64x2 = - match s with - | "f32x4" -> f32x4 - | "f64x2" -> f64x2 - | _ -> assert false - -let memsz sz m8 m16 m32 = - match sz with - | "8" -> m8 - | "16" -> m16 - | "32" -> m32 - | _ -> assert false - -let ext e s u = - match e with - | 's' -> s - | 'u' -> u - | _ -> assert false - let opt = Lib.Option.get - -let v128_shape = function - | "i8x16" -> V128.I8x16 () - | "i16x8" -> V128.I16x8 () - | "i32x4" -> V128.I32x4 () - | "i64x2" -> V128.I64x2 () - | "f32x4" -> V128.F32x4 () - | "f64x2" -> V128.F64x2 () - | _ -> assert false - -let only shapes s lexbuf = - if not (List.mem s shapes) then - unknown lexbuf - -let except shapes s lexbuf = - if (List.mem s shapes) then - unknown lexbuf } let sign = '+' | '-' @@ -181,8 +97,13 @@ let float = | sign? "nan" | sign? "nan:" "0x" hexnum let string = '"' character* '"' -let reserved = (letter | digit | '_' | symbol)+ -let name = '$' reserved + +let idchar = letter | digit | '_' | symbol +let name = idchar+ +let id = '$' name + +let keyword = ['a'-'z'] (letter | digit | '_' | '.' | ':')+ +let reserved = name | ',' | ';' | '[' | ']' | '{' | '}' let ixx = "i" ("32" | "64") let fxx = "f" ("32" | "64") @@ -211,469 +132,569 @@ rule token = parse | '"'character*'\\'_ { error_nest (Lexing.lexeme_end_p lexbuf) lexbuf "illegal escape" } - | "extern" { EXTERN } - | "externref" { EXTERNREF } - | "funcref" { FUNCREF } - | nxx as t { NUM_TYPE (num_type t) } - | vxxx as t { VEC_TYPE (vec_type t) } - | "mut" { MUT } - - | v128_shape as s { VEC_SHAPE (v128_shape s) } - - | (nxx as t)".const" - { let open Source in - CONST (numop t - (fun s -> let n = I32.of_string s.it in - i32_const (n @@ s.at), Values.I32 n) - (fun s -> let n = I64.of_string s.it in - i64_const (n @@ s.at), Values.I64 n) - (fun s -> let n = F32.of_string s.it in - f32_const (n @@ s.at), Values.F32 n) - (fun s -> let n = F64.of_string s.it in - f64_const (n @@ s.at), Values.F64 n)) - } - | vxxx".const" - { let open Source in - VEC_CONST - (fun shape ss at -> - let v = V128.of_strings shape (List.map (fun s -> s.it) ss) in - (v128_const (v @@ at), Values.V128 v)) + | keyword as s + { match s with + | "i32" -> NUM_TYPE Types.I32Type + | "i64" -> NUM_TYPE Types.I64Type + | "f32" -> NUM_TYPE Types.F32Type + | "f64" -> NUM_TYPE Types.F64Type + | "v128" -> VEC_TYPE Types.V128Type + | "i8x16" -> VEC_SHAPE (V128.I8x16 ()) + | "i16x8" -> VEC_SHAPE (V128.I16x8 ()) + | "i32x4" -> VEC_SHAPE (V128.I32x4 ()) + | "i64x2" -> VEC_SHAPE (V128.I64x2 ()) + | "f32x4" -> VEC_SHAPE (V128.F32x4 ()) + | "f64x2" -> VEC_SHAPE (V128.F64x2 ()) + + | "extern" -> EXTERN + | "externref" -> EXTERNREF + | "funcref" -> FUNCREF + | "mut" -> MUT + + | "nop" -> NOP + | "unreachable" -> UNREACHABLE + | "drop" -> DROP + | "block" -> BLOCK + | "loop" -> LOOP + | "end" -> END + | "br" -> BR + | "br_if" -> BR_IF + | "br_table" -> BR_TABLE + | "return" -> RETURN + | "if" -> IF + | "then" -> THEN + | "else" -> ELSE + | "select" -> SELECT + | "call" -> CALL + | "call_indirect" -> CALL_INDIRECT + + | "local.get" -> LOCAL_GET + | "local.set" -> LOCAL_SET + | "local.tee" -> LOCAL_TEE + | "global.get" -> GLOBAL_GET + | "global.set" -> GLOBAL_SET + + | "table.get" -> TABLE_GET + | "table.set" -> TABLE_SET + | "table.size" -> TABLE_SIZE + | "table.grow" -> TABLE_GROW + | "table.fill" -> TABLE_FILL + | "table.copy" -> TABLE_COPY + | "table.init" -> TABLE_INIT + | "elem.drop" -> ELEM_DROP + + | "memory.size" -> MEMORY_SIZE + | "memory.grow" -> MEMORY_GROW + | "memory.fill" -> MEMORY_FILL + | "memory.copy" -> MEMORY_COPY + | "memory.init" -> MEMORY_INIT + | "data.drop" -> DATA_DROP + + | "i32.load" -> LOAD (fun a o -> i32_load (opt a 2) o) + | "i64.load" -> LOAD (fun a o -> i64_load (opt a 3) o) + | "f32.load" -> LOAD (fun a o -> f32_load (opt a 2) o) + | "f64.load" -> LOAD (fun a o -> f64_load (opt a 3) o) + | "i32.store" -> STORE (fun a o -> i32_store (opt a 2) o) + | "i64.store" -> STORE (fun a o -> i64_store (opt a 3) o) + | "f32.store" -> STORE (fun a o -> f32_store (opt a 2) o) + | "f64.store" -> STORE (fun a o -> f64_store (opt a 3) o) + + | "i32.load8_u" -> LOAD (fun a o -> i32_load8_u (opt a 0) o) + | "i32.load8_s" -> LOAD (fun a o -> i32_load8_s (opt a 0) o) + | "i32.load16_u" -> LOAD (fun a o -> i32_load16_u (opt a 1) o) + | "i32.load16_s" -> LOAD (fun a o -> i32_load16_s (opt a 1) o) + | "i64.load8_u" -> LOAD (fun a o -> i64_load8_u (opt a 0) o) + | "i64.load8_s" -> LOAD (fun a o -> i64_load8_s (opt a 0) o) + | "i64.load16_u" -> LOAD (fun a o -> i64_load16_u (opt a 1) o) + | "i64.load16_s" -> LOAD (fun a o -> i64_load16_s (opt a 1) o) + | "i64.load32_u" -> LOAD (fun a o -> i64_load32_u (opt a 2) o) + | "i64.load32_s" -> LOAD (fun a o -> i64_load32_s (opt a 2) o) + + | "i32.store8" -> LOAD (fun a o -> i32_store8 (opt a 0) o) + | "i32.store16" -> LOAD (fun a o -> i32_store16 (opt a 1) o) + | "i64.store8" -> LOAD (fun a o -> i64_store8 (opt a 0) o) + | "i64.store16" -> LOAD (fun a o -> i64_store16 (opt a 1) o) + | "i64.store32" -> LOAD (fun a o -> i64_store32 (opt a 2) o) + + | "v128.load" -> VEC_LOAD (fun a o -> v128_load (opt a 4) o) + | "v128.store" -> VEC_STORE (fun a o -> v128_store (opt a 4) o) + | "v128.load8x8_u" -> VEC_LOAD (fun a o -> v128_load8x8_u (opt a 3) o) + | "v128.load8x8_s" -> VEC_LOAD (fun a o -> v128_load8x8_s (opt a 3) o) + | "v128.load16x4_u" -> VEC_LOAD (fun a o -> v128_load16x4_u (opt a 3) o) + | "v128.load16x4_s" -> VEC_LOAD (fun a o -> v128_load16x4_s (opt a 3) o) + | "v128.load32x2_u" -> VEC_LOAD (fun a o -> v128_load32x2_u (opt a 3) o) + | "v128.load32x2_s" -> VEC_LOAD (fun a o -> v128_load32x2_s (opt a 3) o) + | "v128.load8_splat" -> + VEC_LOAD (fun a o -> v128_load8_splat (opt a 0) o) + | "v128.load16_splat" -> + VEC_LOAD (fun a o -> v128_load16_splat (opt a 1) o) + | "v128.load32_splat" -> + VEC_LOAD (fun a o -> v128_load32_splat (opt a 2) o) + | "v128.load64_splat" -> + VEC_LOAD (fun a o -> v128_load64_splat (opt a 3) o) + | "v128.load32_zero" -> + VEC_LOAD (fun a o -> v128_load32_zero (opt a 2) o) + | "v128.load64_zero" -> + VEC_LOAD (fun a o -> v128_load64_zero (opt a 3) o) + | "v128.load8_lane" -> + VEC_LOAD_LANE (fun a o i -> v128_load8_lane (opt a 0) o i) + | "v128.load16_lane" -> + VEC_LOAD_LANE (fun a o i -> v128_load16_lane (opt a 1) o i) + | "v128.load32_lane" -> + VEC_LOAD_LANE (fun a o i -> v128_load32_lane (opt a 2) o i) + | "v128.load64_lane" -> + VEC_LOAD_LANE (fun a o i -> v128_load64_lane (opt a 3) o i) + | "v128.store8_lane" -> + VEC_STORE_LANE (fun a o i -> v128_store8_lane (opt a 0) o i) + | "v128.store16_lane" -> + VEC_STORE_LANE (fun a o i -> v128_store16_lane (opt a 1) o i) + | "v128.store32_lane" -> + VEC_STORE_LANE (fun a o i -> v128_store32_lane (opt a 2) o i) + | "v128.store64_lane" -> + VEC_STORE_LANE (fun a o i -> v128_store64_lane (opt a 3) o i) + + | "i32.const" -> + CONST (fun s -> + let n = I32.of_string s.it in i32_const (n @@ s.at), Values.I32 n) + | "i64.const" -> + CONST (fun s -> + let n = I64.of_string s.it in i64_const (n @@ s.at), Values.I64 n) + | "f32.const" -> + CONST (fun s -> + let n = F32.of_string s.it in f32_const (n @@ s.at), Values.F32 n) + | "f64.const" -> + CONST (fun s -> + let n = F64.of_string s.it in f64_const (n @@ s.at), Values.F64 n) + | "v128.const" -> + VEC_CONST + (fun shape ss at -> + let v = V128.of_strings shape (List.map (fun s -> s.it) ss) in + (v128_const (v @@ at), Values.V128 v)) + + | "ref.null" -> REF_NULL + | "ref.func" -> REF_FUNC + | "ref.extern" -> REF_EXTERN + | "ref.is_null" -> REF_IS_NULL + + | "i32.clz" -> UNARY i32_clz + | "i32.ctz" -> UNARY i32_ctz + | "i32.popcnt" -> UNARY i32_popcnt + | "i32.extend8_s" -> UNARY i32_extend8_s + | "i32.extend16_s" -> UNARY i32_extend16_s + | "i64.clz" -> UNARY i64_clz + | "i64.ctz" -> UNARY i64_ctz + | "i64.popcnt" -> UNARY i64_popcnt + | "i64.extend8_s" -> UNARY i64_extend8_s + | "i64.extend16_s" -> UNARY i64_extend16_s + | "i64.extend32_s" -> UNARY i64_extend32_s + + | "f32.neg" -> UNARY f32_neg + | "f32.abs" -> UNARY f32_abs + | "f32.sqrt" -> UNARY f32_sqrt + | "f32.ceil" -> UNARY f32_ceil + | "f32.floor" -> UNARY f32_floor + | "f32.trunc" -> UNARY f32_trunc + | "f32.nearest" -> UNARY f32_nearest + | "f64.neg" -> UNARY f64_neg + | "f64.abs" -> UNARY f64_abs + | "f64.sqrt" -> UNARY f64_sqrt + | "f64.ceil" -> UNARY f64_ceil + | "f64.floor" -> UNARY f64_floor + | "f64.trunc" -> UNARY f64_trunc + | "f64.nearest" -> UNARY f64_nearest + + | "i32.add" -> BINARY i32_add + | "i32.sub" -> BINARY i32_sub + | "i32.mul" -> BINARY i32_mul + | "i32.div_u" -> BINARY i32_div_u + | "i32.div_s" -> BINARY i32_div_s + | "i32.rem_u" -> BINARY i32_rem_u + | "i32.rem_s" -> BINARY i32_rem_s + | "i32.and" -> BINARY i32_and + | "i32.or" -> BINARY i32_or + | "i32.xor" -> BINARY i32_xor + | "i32.shl" -> BINARY i32_shl + | "i32.shr_u" -> BINARY i32_shr_u + | "i32.shr_s" -> BINARY i32_shr_s + | "i32.rotl" -> BINARY i32_rotl + | "i32.rotr" -> BINARY i32_rotr + | "i64.add" -> BINARY i64_add + | "i64.sub" -> BINARY i64_sub + | "i64.mul" -> BINARY i64_mul + | "i64.div_u" -> BINARY i64_div_u + | "i64.div_s" -> BINARY i64_div_s + | "i64.rem_u" -> BINARY i64_rem_u + | "i64.rem_s" -> BINARY i64_rem_s + | "i64.and" -> BINARY i64_and + | "i64.or" -> BINARY i64_or + | "i64.xor" -> BINARY i64_xor + | "i64.shl" -> BINARY i64_shl + | "i64.shr_u" -> BINARY i64_shr_u + | "i64.shr_s" -> BINARY i64_shr_s + | "i64.rotl" -> BINARY i64_rotl + | "i64.rotr" -> BINARY i64_rotr + + | "f32.add" -> BINARY f32_add + | "f32.sub" -> BINARY f32_sub + | "f32.mul" -> BINARY f32_mul + | "f32.div" -> BINARY f32_div + | "f32.min" -> BINARY f32_min + | "f32.max" -> BINARY f32_max + | "f32.copysign" -> BINARY f32_copysign + | "f64.add" -> BINARY f64_add + | "f64.sub" -> BINARY f64_sub + | "f64.mul" -> BINARY f64_mul + | "f64.div" -> BINARY f64_div + | "f64.min" -> BINARY f64_min + | "f64.max" -> BINARY f64_max + | "f64.copysign" -> BINARY f64_copysign + + | "i32.eqz" -> TEST i32_eqz + | "i64.eqz" -> TEST i64_eqz + + | "i32.eq" -> COMPARE i32_eq + | "i32.ne" -> COMPARE i32_ne + | "i32.lt_u" -> COMPARE i32_lt_u + | "i32.lt_s" -> COMPARE i32_lt_s + | "i32.le_u" -> COMPARE i32_le_u + | "i32.le_s" -> COMPARE i32_le_s + | "i32.gt_u" -> COMPARE i32_gt_u + | "i32.gt_s" -> COMPARE i32_gt_s + | "i32.ge_u" -> COMPARE i32_ge_u + | "i32.ge_s" -> COMPARE i32_ge_s + | "i64.eq" -> COMPARE i64_eq + | "i64.ne" -> COMPARE i64_ne + | "i64.lt_u" -> COMPARE i64_lt_u + | "i64.lt_s" -> COMPARE i64_lt_s + | "i64.le_u" -> COMPARE i64_le_u + | "i64.le_s" -> COMPARE i64_le_s + | "i64.gt_u" -> COMPARE i64_gt_u + | "i64.gt_s" -> COMPARE i64_gt_s + | "i64.ge_u" -> COMPARE i64_ge_u + | "i64.ge_s" -> COMPARE i64_ge_s + + | "f32.eq" -> COMPARE f32_eq + | "f32.ne" -> COMPARE f32_ne + | "f32.lt" -> COMPARE f32_lt + | "f32.le" -> COMPARE f32_le + | "f32.gt" -> COMPARE f32_gt + | "f32.ge" -> COMPARE f32_ge + | "f64.eq" -> COMPARE f64_eq + | "f64.ne" -> COMPARE f64_ne + | "f64.lt" -> COMPARE f64_lt + | "f64.le" -> COMPARE f64_le + | "f64.gt" -> COMPARE f64_gt + | "f64.ge" -> COMPARE f64_ge + + | "i32.wrap_i64" -> CONVERT i32_wrap_i64 + | "i64.extend_i32_s" -> CONVERT i64_extend_i32_s + | "i64.extend_i32_u" -> CONVERT i64_extend_i32_u + | "f32.demote_f64" -> CONVERT f32_demote_f64 + | "f64.promote_f32" -> CONVERT f64_promote_f32 + | "i32.trunc_f32_u" -> CONVERT i32_trunc_f32_u + | "i32.trunc_f32_s" -> CONVERT i32_trunc_f32_s + | "i64.trunc_f32_u" -> CONVERT i64_trunc_f32_u + | "i64.trunc_f32_s" -> CONVERT i64_trunc_f32_s + | "i32.trunc_f64_u" -> CONVERT i32_trunc_f64_u + | "i32.trunc_f64_s" -> CONVERT i32_trunc_f64_s + | "i64.trunc_f64_u" -> CONVERT i64_trunc_f64_u + | "i64.trunc_f64_s" -> CONVERT i64_trunc_f64_s + | "i32.trunc_sat_f32_u" -> CONVERT i32_trunc_sat_f32_u + | "i32.trunc_sat_f32_s" -> CONVERT i32_trunc_sat_f32_s + | "i64.trunc_sat_f32_u" -> CONVERT i64_trunc_sat_f32_u + | "i64.trunc_sat_f32_s" -> CONVERT i64_trunc_sat_f32_s + | "i32.trunc_sat_f64_u" -> CONVERT i32_trunc_sat_f64_u + | "i32.trunc_sat_f64_s" -> CONVERT i32_trunc_sat_f64_s + | "i64.trunc_sat_f64_u" -> CONVERT i64_trunc_sat_f64_u + | "i64.trunc_sat_f64_s" -> CONVERT i64_trunc_sat_f64_s + | "f32.convert_i32_u" -> CONVERT f32_convert_i32_u + | "f32.convert_i32_s" -> CONVERT f32_convert_i32_s + | "f64.convert_i32_u" -> CONVERT f64_convert_i32_u + | "f64.convert_i32_s" -> CONVERT f64_convert_i32_s + | "f32.convert_i64_u" -> CONVERT f32_convert_i64_u + | "f32.convert_i64_s" -> CONVERT f32_convert_i64_s + | "f64.convert_i64_u" -> CONVERT f64_convert_i64_u + | "f64.convert_i64_s" -> CONVERT f64_convert_i64_s + | "f32.reinterpret_i32" -> CONVERT f32_reinterpret_i32 + | "f64.reinterpret_i64" -> CONVERT f64_reinterpret_i64 + | "i32.reinterpret_f32" -> CONVERT i32_reinterpret_f32 + | "i64.reinterpret_f64" -> CONVERT i64_reinterpret_f64 + + | "v128.not" -> VEC_UNARY v128_not + | "v128.and" -> VEC_UNARY v128_and + | "v128.andnot" -> VEC_UNARY v128_andnot + | "v128.or" -> VEC_UNARY v128_or + | "v128.xor" -> VEC_UNARY v128_xor + | "v128.bitselect" -> VEC_TERNARY v128_bitselect + | "v128.any_true" -> VEC_TEST v128_any_true + + | "i8x16.neg" -> VEC_UNARY i8x16_neg + | "i16x8.neg" -> VEC_UNARY i16x8_neg + | "i32x4.neg" -> VEC_UNARY i32x4_neg + | "i64x2.neg" -> VEC_UNARY i64x2_neg + | "i8x16.abs" -> VEC_UNARY i8x16_abs + | "i16x8.abs" -> VEC_UNARY i16x8_abs + | "i32x4.abs" -> VEC_UNARY i32x4_abs + | "i64x2.abs" -> VEC_UNARY i64x2_abs + | "i8x16.popcnt" -> VEC_UNARY i8x16_popcnt + | "i8x16.avgr_u" -> VEC_UNARY i8x16_avgr_u + | "i16x8.avgr_u" -> VEC_UNARY i16x8_avgr_u + + | "f32x4.neg" -> VEC_UNARY f32x4_neg + | "f64x2.neg" -> VEC_UNARY f64x2_neg + | "f32x4.abs" -> VEC_UNARY f32x4_abs + | "f64x2.abs" -> VEC_UNARY f64x2_abs + | "f32x4.sqrt" -> VEC_UNARY f32x4_sqrt + | "f64x2.sqrt" -> VEC_UNARY f64x2_sqrt + | "f32x4.ceil" -> VEC_UNARY f32x4_ceil + | "f64x2.ceil" -> VEC_UNARY f64x2_ceil + | "f32x4.floor" -> VEC_UNARY f32x4_floor + | "f64x2.floor" -> VEC_UNARY f64x2_floor + | "f32x4.trunc" -> VEC_UNARY f32x4_trunc + | "f64x2.trunc" -> VEC_UNARY f64x2_trunc + | "f32x4.nearest" -> VEC_UNARY f32x4_nearest + | "f64x2.nearest" -> VEC_UNARY f64x2_nearest + + | "i32x4.trunc_sat_f32x4_u" -> VEC_UNARY i32x4_trunc_sat_f32x4_u + | "i32x4.trunc_sat_f32x4_s" -> VEC_UNARY i32x4_trunc_sat_f32x4_s + | "i32x4.trunc_sat_f64x2_u_zero" -> + VEC_UNARY i32x4_trunc_sat_f64x2_u_zero + | "i32x4.trunc_sat_f64x2_s_zero" -> + VEC_UNARY i32x4_trunc_sat_f64x2_s_zero + | "f64x2.promote_low_f32x4" -> VEC_UNARY f64x2_promote_low_f32x4 + | "f32x4.demote_f64x2_zero" -> VEC_UNARY f32x4_demote_f64x2_zero + | "f32x4.convert_i32x4_u" -> VEC_UNARY f32x4_convert_i32x4_u + | "f32x4.convert_i32x4_s" -> VEC_UNARY f32x4_convert_i32x4_s + | "f64x2.convert_low_i32x4_u" -> VEC_UNARY f64x2_convert_low_i32x4_u + | "f64x2.convert_low_i32x4_s" -> VEC_UNARY f64x2_convert_low_i32x4_s + | "i16x8.extadd_pairwise_i8x16_u" -> + VEC_UNARY i16x8_extadd_pairwise_i8x16_u + | "i16x8.extadd_pairwise_i8x16_s" -> + VEC_UNARY i16x8_extadd_pairwise_i8x16_s + | "i32x4.extadd_pairwise_i16x8_u" -> + VEC_UNARY i32x4_extadd_pairwise_i16x8_u + | "i32x4.extadd_pairwise_i16x8_s" -> + VEC_UNARY i32x4_extadd_pairwise_i16x8_s + + | "i8x16.eq" -> VEC_BINARY i8x16_eq + | "i16x8.eq" -> VEC_BINARY i16x8_eq + | "i32x4.eq" -> VEC_BINARY i32x4_eq + | "i64x2.eq" -> VEC_BINARY i64x2_eq + | "i8x16.ne" -> VEC_BINARY i8x16_ne + | "i16x8.ne" -> VEC_BINARY i16x8_ne + | "i32x4.ne" -> VEC_BINARY i32x4_ne + | "i64x2.ne" -> VEC_BINARY i64x2_ne + | "i8x16.lt_u" -> VEC_BINARY i8x16_lt_u + | "i8x16.lt_s" -> VEC_BINARY i8x16_lt_s + | "i16x8.lt_u" -> VEC_BINARY i16x8_lt_u + | "i16x8.lt_s" -> VEC_BINARY i16x8_lt_s + | "i32x4.lt_u" -> VEC_BINARY i32x4_lt_u + | "i32x4.lt_s" -> VEC_BINARY i32x4_lt_s + | "i64x2.lt_s" -> VEC_BINARY i64x2_lt_s + | "i8x16.le_u" -> VEC_BINARY i8x16_le_u + | "i8x16.le_s" -> VEC_BINARY i8x16_le_s + | "i16x8.le_u" -> VEC_BINARY i16x8_le_u + | "i16x8.le_s" -> VEC_BINARY i16x8_le_s + | "i32x4.le_u" -> VEC_BINARY i32x4_le_u + | "i32x4.le_s" -> VEC_BINARY i32x4_le_s + | "i64x2.le_s" -> VEC_BINARY i64x2_le_s + | "i8x16.gt_u" -> VEC_BINARY i8x16_gt_u + | "i8x16.gt_s" -> VEC_BINARY i8x16_gt_s + | "i16x8.gt_u" -> VEC_BINARY i16x8_gt_u + | "i16x8.gt_s" -> VEC_BINARY i16x8_gt_s + | "i32x4.gt_u" -> VEC_BINARY i32x4_gt_u + | "i32x4.gt_s" -> VEC_BINARY i32x4_gt_s + | "i64x2.gt_s" -> VEC_BINARY i64x2_gt_s + | "i8x16.ge_u" -> VEC_BINARY i8x16_ge_u + | "i8x16.ge_s" -> VEC_BINARY i8x16_ge_s + | "i16x8.ge_u" -> VEC_BINARY i16x8_ge_u + | "i16x8.ge_s" -> VEC_BINARY i16x8_ge_s + | "i32x4.ge_u" -> VEC_BINARY i32x4_ge_u + | "i32x4.ge_s" -> VEC_BINARY i32x4_ge_s + | "i64x2.ge_s" -> VEC_BINARY i64x2_ge_s + + | "f32x4.eq" -> VEC_BINARY f32x4_eq + | "f64x2.eq" -> VEC_BINARY f64x2_eq + | "f32x4.ne" -> VEC_BINARY f32x4_ne + | "f64x2.ne" -> VEC_BINARY f64x2_ne + | "f32x4.lt" -> VEC_BINARY f32x4_lt + | "f64x2.lt" -> VEC_BINARY f64x2_lt + | "f32x4.le" -> VEC_BINARY f32x4_le + | "f64x2.le" -> VEC_BINARY f64x2_le + | "f32x4.gt" -> VEC_BINARY f32x4_gt + | "f64x2.gt" -> VEC_BINARY f64x2_gt + | "f32x4.ge" -> VEC_BINARY f32x4_ge + | "f64x2.ge" -> VEC_BINARY f64x2_ge + | "i8x16.swizzle" -> VEC_BINARY i8x16_swizzle + + | "i8x16.add" -> VEC_BINARY i8x16_add + | "i16x8.add" -> VEC_BINARY i16x8_add + | "i32x4.add" -> VEC_BINARY i32x4_add + | "i64x2.add" -> VEC_BINARY i64x2_add + | "i8x16.sub" -> VEC_BINARY i8x16_sub + | "i16x8.sub" -> VEC_BINARY i16x8_sub + | "i32x4.sub" -> VEC_BINARY i32x4_sub + | "i64x2.sub" -> VEC_BINARY i64x2_sub + | "i16x8.mul" -> VEC_BINARY i16x8_mul + | "i32x4.mul" -> VEC_BINARY i32x4_mul + | "i64x2.mul" -> VEC_BINARY i64x2_mul + | "i8x16.add_sat_u" -> VEC_BINARY i8x16_add_sat_u + | "i8x16.add_sat_s" -> VEC_BINARY i8x16_add_sat_s + | "i16x8.add_sat_u" -> VEC_BINARY i16x8_add_sat_u + | "i16x8.add_sat_s" -> VEC_BINARY i16x8_add_sat_s + | "i8x16.sub_sat_u" -> VEC_BINARY i8x16_sub_sat_u + | "i8x16.sub_sat_s" -> VEC_BINARY i8x16_sub_sat_s + | "i16x8.sub_sat_u" -> VEC_BINARY i16x8_sub_sat_u + | "i16x8.sub_sat_s" -> VEC_BINARY i16x8_sub_sat_s + | "i32x4.dot_i16x8_s" -> VEC_BINARY i32x4_dot_i16x8_s + + | "i8x16.min_u" -> VEC_BINARY i8x16_min_u + | "i16x8.min_u" -> VEC_BINARY i16x8_min_u + | "i32x4.min_u" -> VEC_BINARY i32x4_min_u + | "i8x16.min_s" -> VEC_BINARY i8x16_min_s + | "i16x8.min_s" -> VEC_BINARY i16x8_min_s + | "i32x4.min_s" -> VEC_BINARY i32x4_min_s + | "i8x16.max_u" -> VEC_BINARY i8x16_max_u + | "i16x8.max_u" -> VEC_BINARY i16x8_max_u + | "i32x4.max_u" -> VEC_BINARY i32x4_max_u + | "i8x16.max_s" -> VEC_BINARY i8x16_max_s + | "i16x8.max_s" -> VEC_BINARY i16x8_max_s + | "i32x4.max_s" -> VEC_BINARY i32x4_max_s + + | "f32x4.add" -> VEC_BINARY f32x4_add + | "f64x2.add" -> VEC_BINARY f64x2_add + | "f32x4.sub" -> VEC_BINARY f32x4_sub + | "f64x2.sub" -> VEC_BINARY f64x2_sub + | "f32x4.mul" -> VEC_BINARY f32x4_mul + | "f64x2.mul" -> VEC_BINARY f64x2_mul + | "f32x4.div" -> VEC_BINARY f32x4_div + | "f64x2.div" -> VEC_BINARY f64x2_div + + | "f32x4.min" -> VEC_BINARY f32x4_min + | "f64x2.min" -> VEC_BINARY f64x2_min + | "f32x4.max" -> VEC_BINARY f32x4_max + | "f64x2.max" -> VEC_BINARY f64x2_max + | "f32x4.pmin" -> VEC_BINARY f32x4_pmin + | "f64x2.pmin" -> VEC_BINARY f64x2_pmin + | "f32x4.pmax" -> VEC_BINARY f32x4_pmax + | "f64x2.pmax" -> VEC_BINARY f64x2_pmax + + | "i16x8.q15mulr_sat_s" -> VEC_BINARY i16x8_q15mulr_sat_s + | "i8x16.narrow_i16x8_u" -> VEC_BINARY i8x16_narrow_i16x8_u + | "i8x16.narrow_i16x8_s" -> VEC_BINARY i8x16_narrow_i16x8_s + | "i16x8.narrow_i32x4_u" -> VEC_BINARY i16x8_narrow_i32x4_u + | "i16x8.narrow_i32x4_s" -> VEC_BINARY i16x8_narrow_i32x4_s + | "i16x8.extend_low_i8x16_u" -> VEC_UNARY i16x8_extend_low_i8x16_u + | "i16x8.extend_low_i8x16_s" -> VEC_UNARY i16x8_extend_low_i8x16_s + | "i16x8.extend_high_i8x16_u" -> VEC_UNARY i16x8_extend_high_i8x16_u + | "i16x8.extend_high_i8x16_s" -> VEC_UNARY i16x8_extend_high_i8x16_s + | "i32x4.extend_low_i16x8_u" -> VEC_UNARY i32x4_extend_low_i16x8_u + | "i32x4.extend_low_i16x8_s" -> VEC_UNARY i32x4_extend_low_i16x8_s + | "i32x4.extend_high_i16x8_u" -> VEC_UNARY i32x4_extend_high_i16x8_u + | "i32x4.extend_high_i16x8_s" -> VEC_UNARY i32x4_extend_high_i16x8_s + | "i64x2.extend_low_i32x4_u" -> VEC_UNARY i64x2_extend_low_i32x4_u + | "i64x2.extend_low_i32x4_s" -> VEC_UNARY i64x2_extend_low_i32x4_s + | "i64x2.extend_high_i32x4_u" -> VEC_UNARY i64x2_extend_high_i32x4_u + | "i64x2.extend_high_i32x4_s" -> VEC_UNARY i64x2_extend_high_i32x4_s + | "i16x8.extmul_low_i8x16_u" -> VEC_UNARY i16x8_extmul_low_i8x16_u + | "i16x8.extmul_low_i8x16_s" -> VEC_UNARY i16x8_extmul_low_i8x16_s + | "i16x8.extmul_high_i8x16_u" -> VEC_UNARY i16x8_extmul_high_i8x16_u + | "i16x8.extmul_high_i8x16_s" -> VEC_UNARY i16x8_extmul_high_i8x16_s + | "i32x4.extmul_low_i16x8_u" -> VEC_UNARY i32x4_extmul_low_i16x8_u + | "i32x4.extmul_low_i16x8_s" -> VEC_UNARY i32x4_extmul_low_i16x8_s + | "i32x4.extmul_high_i16x8_u" -> VEC_UNARY i32x4_extmul_high_i16x8_u + | "i32x4.extmul_high_i16x8_s" -> VEC_UNARY i32x4_extmul_high_i16x8_s + | "i64x2.extmul_low_i32x4_u" -> VEC_UNARY i64x2_extmul_low_i32x4_u + | "i64x2.extmul_low_i32x4_s" -> VEC_UNARY i64x2_extmul_low_i32x4_s + | "i64x2.extmul_high_i32x4_u" -> VEC_UNARY i64x2_extmul_high_i32x4_u + | "i64x2.extmul_high_i32x4_s" -> VEC_UNARY i64x2_extmul_high_i32x4_s + + | "i8x16.all_true" -> VEC_TEST i8x16_all_true + | "i16x8.all_true" -> VEC_TEST i16x8_all_true + | "i32x4.all_true" -> VEC_TEST i32x4_all_true + | "i64x2.all_true" -> VEC_TEST i64x2_all_true + | "i8x16.bitmask" -> VEC_BITMASK i8x16_bitmask + | "i16x8.bitmask" -> VEC_BITMASK i16x8_bitmask + | "i32x4.bitmask" -> VEC_BITMASK i32x4_bitmask + | "i64x2.bitmask" -> VEC_BITMASK i64x2_bitmask + | "i8x16.shl" -> VEC_SHIFT i8x16_shl + | "i16x8.shl" -> VEC_SHIFT i16x8_shl + | "i32x4.shl" -> VEC_SHIFT i32x4_shl + | "i64x2.shl" -> VEC_SHIFT i64x2_shl + | "i8x16.shr_u" -> VEC_SHIFT i8x16_shr_u + | "i8x16.shr_s" -> VEC_SHIFT i8x16_shr_s + | "i16x8.shr_u" -> VEC_SHIFT i16x8_shr_u + | "i16x8.shr_s" -> VEC_SHIFT i16x8_shr_s + | "i32x4.shr_u" -> VEC_SHIFT i32x4_shr_u + | "i32x4.shr_s" -> VEC_SHIFT i32x4_shr_s + | "i64x2.shr_u" -> VEC_SHIFT i64x2_shr_u + | "i64x2.shr_s" -> VEC_SHIFT i64x2_shr_s + | "i8x16.shuffle" -> VEC_SHUFFLE + + | "i8x16.splat" -> VEC_SPLAT i8x16_splat + | "i16x8.splat" -> VEC_SPLAT i16x8_splat + | "i32x4.splat" -> VEC_SPLAT i32x4_splat + | "i64x2.splat" -> VEC_SPLAT i64x2_splat + | "f32x4.splat" -> VEC_SPLAT f32x4_splat + | "f64x2.splat" -> VEC_SPLAT f64x2_splat + | "i8x16.extract_lane_u" -> VEC_EXTRACT i8x16_extract_lane_u + | "i8x16.extract_lane_s" -> VEC_EXTRACT i8x16_extract_lane_s + | "i16x8.extract_lane_u" -> VEC_EXTRACT i16x8_extract_lane_u + | "i16x8.extract_lane_s" -> VEC_EXTRACT i16x8_extract_lane_s + | "i32x4.extract_lane" -> VEC_EXTRACT i32x4_extract_lane + | "i64x2.extract_lane" -> VEC_EXTRACT i64x2_extract_lane + | "f32x4.extract_lane" -> VEC_EXTRACT f32x4_extract_lane + | "f64x2.extract_lane" -> VEC_EXTRACT f64x2_extract_lane + | "i8x16.replace_lane" -> VEC_REPLACE i8x16_replace_lane + | "i16x8.replace_lane" -> VEC_REPLACE i16x8_replace_lane + | "i32x4.replace_lane" -> VEC_REPLACE i32x4_replace_lane + | "i64x2.replace_lane" -> VEC_REPLACE i64x2_replace_lane + | "f32x4.replace_lane" -> VEC_REPLACE f32x4_replace_lane + | "f64x2.replace_lane" -> VEC_REPLACE f64x2_replace_lane + + | "type" -> TYPE + | "func" -> FUNC + | "param" -> PARAM + | "result" -> RESULT + | "start" -> START + | "local" -> LOCAL + | "global" -> GLOBAL + | "table" -> TABLE + | "memory" -> MEMORY + | "elem" -> ELEM + | "data" -> DATA + | "declare" -> DECLARE + | "offset" -> OFFSET + | "item" -> ITEM + | "import" -> IMPORT + | "export" -> EXPORT + + | "module" -> MODULE + | "binary" -> BIN + | "quote" -> QUOTE + + | "script" -> SCRIPT + | "register" -> REGISTER + | "invoke" -> INVOKE + | "get" -> GET + | "assert_malformed" -> ASSERT_MALFORMED + | "assert_invalid" -> ASSERT_INVALID + | "assert_unlinkable" -> ASSERT_UNLINKABLE + | "assert_return" -> ASSERT_RETURN + | "assert_trap" -> ASSERT_TRAP + | "assert_exhaustion" -> ASSERT_EXHAUSTION + | "nan:canonical" -> NAN Script.CanonicalNan + | "nan:arithmetic" -> NAN Script.ArithmeticNan + | "input" -> INPUT + | "output" -> OUTPUT + + | _ -> unknown lexbuf } - | "ref.null" { REF_NULL } - | "ref.func" { REF_FUNC } - | "ref.extern" { REF_EXTERN } - | "ref.is_null" { REF_IS_NULL } - - | "nop" { NOP } - | "unreachable" { UNREACHABLE } - | "drop" { DROP } - | "block" { BLOCK } - | "loop" { LOOP } - | "end" { END } - | "br" { BR } - | "br_if" { BR_IF } - | "br_table" { BR_TABLE } - | "return" { RETURN } - | "if" { IF } - | "then" { THEN } - | "else" { ELSE } - | "select" { SELECT } - | "call" { CALL } - | "call_indirect" { CALL_INDIRECT } - - | "local.get" { LOCAL_GET } - | "local.set" { LOCAL_SET } - | "local.tee" { LOCAL_TEE } - | "global.get" { GLOBAL_GET } - | "global.set" { GLOBAL_SET } - - | "table.get" { TABLE_GET } - | "table.set" { TABLE_SET } - | "table.size" { TABLE_SIZE } - | "table.grow" { TABLE_GROW } - | "table.fill" { TABLE_FILL } - | "table.copy" { TABLE_COPY } - | "table.init" { TABLE_INIT } - | "elem.drop" { ELEM_DROP } - - | "memory.size" { MEMORY_SIZE } - | "memory.grow" { MEMORY_GROW } - | "memory.fill" { MEMORY_FILL } - | "memory.copy" { MEMORY_COPY } - | "memory.init" { MEMORY_INIT } - | "data.drop" { DATA_DROP } - - | (nxx as t)".load" - { LOAD (fun a o -> - numop t (i32_load (opt a 2)) (i64_load (opt a 3)) - (f32_load (opt a 2)) (f64_load (opt a 3)) o) } - | (nxx as t)".store" - { STORE (fun a o -> - numop t (i32_store (opt a 2)) (i64_store (opt a 3)) - (f32_store (opt a 2)) (f64_store (opt a 3)) o) } - | (ixx as t)".load"(mem_size as sz)"_"(sign as s) - { if t = "i32" && sz = "32" then unknown lexbuf; - LOAD (fun a o -> - intop t - (memsz sz - (ext s i32_load8_s i32_load8_u (opt a 0)) - (ext s i32_load16_s i32_load16_u (opt a 1)) - (fun _ -> unreachable) o) - (memsz sz - (ext s i64_load8_s i64_load8_u (opt a 0)) - (ext s i64_load16_s i64_load16_u (opt a 1)) - (ext s i64_load32_s i64_load32_u (opt a 2)) o)) } - | (ixx as t)".store"(mem_size as sz) - { if t = "i32" && sz = "32" then unknown lexbuf; - STORE (fun a o -> - intop t - (memsz sz - (i32_store8 (opt a 0)) - (i32_store16 (opt a 1)) - (fun _ -> unreachable) o) - (memsz sz - (i64_store8 (opt a 0)) - (i64_store16 (opt a 1)) - (i64_store32 (opt a 2)) o)) } - | "v128.load" - { VEC_LOAD (fun a o -> (v128_load (opt a 4)) o) } - | "v128.store" - { VEC_STORE (fun a o -> (v128_store (opt a 4)) o) } - | "v128.load8x8_"(sign as s) - { VEC_LOAD (fun a o -> (ext s v128_load8x8_s v128_load8x8_u (opt a 3)) o) } - | "v128.load16x4_"(sign as s) - { VEC_LOAD (fun a o -> (ext s v128_load16x4_s v128_load16x4_u (opt a 3)) o) } - | "v128.load32x2_"(sign as s) - { VEC_LOAD (fun a o -> (ext s v128_load32x2_s v128_load32x2_u (opt a 3)) o) } - | "v128.load8_splat" - { VEC_LOAD (fun a o -> (v128_load8_splat (opt a 0)) o) } - | "v128.load16_splat" - { VEC_LOAD (fun a o -> (v128_load16_splat (opt a 1)) o) } - | "v128.load32_splat" - { VEC_LOAD (fun a o -> (v128_load32_splat (opt a 2)) o) } - | "v128.load64_splat" - { VEC_LOAD (fun a o -> (v128_load64_splat (opt a 3)) o) } - | "v128.load32_zero" - { VEC_LOAD (fun a o -> (v128_load32_zero (opt a 2)) o) } - | "v128.load64_zero" - { VEC_LOAD (fun a o -> (v128_load64_zero (opt a 3)) o) } - | "v128.load8_lane" - { VEC_LOAD_LANE (fun a o i -> (v128_load8_lane (opt a 0)) o i) } - | "v128.load16_lane" - { VEC_LOAD_LANE (fun a o i -> (v128_load16_lane (opt a 1)) o i) } - | "v128.load32_lane" - { VEC_LOAD_LANE (fun a o i -> (v128_load32_lane (opt a 2)) o i) } - | "v128.load64_lane" - { VEC_LOAD_LANE (fun a o i -> (v128_load64_lane (opt a 3)) o i) } - | "v128.store8_lane" - { VEC_STORE_LANE (fun a o i -> (v128_store8_lane (opt a 0)) o i) } - | "v128.store16_lane" - { VEC_STORE_LANE (fun a o i -> (v128_store16_lane (opt a 1)) o i) } - | "v128.store32_lane" - { VEC_STORE_LANE (fun a o i -> (v128_store32_lane (opt a 2)) o i) } - | "v128.store64_lane" - { VEC_STORE_LANE (fun a o i -> (v128_store64_lane (opt a 3)) o i) } | "offset="(nat as s) { OFFSET_EQ_NAT s } | "align="(nat as s) { ALIGN_EQ_NAT s } - | (ixx as t)".clz" { UNARY (intop t i32_clz i64_clz) } - | (ixx as t)".ctz" { UNARY (intop t i32_ctz i64_ctz) } - | (ixx as t)".popcnt" { UNARY (intop t i32_popcnt i64_popcnt) } - | (ixx as t)".extend8_s" { UNARY (intop t i32_extend8_s i64_extend8_s) } - | (ixx as t)".extend16_s" { UNARY (intop t i32_extend16_s i64_extend16_s) } - | "i64.extend32_s" { UNARY i64_extend32_s } - | (fxx as t)".neg" { UNARY (floatop t f32_neg f64_neg) } - | (fxx as t)".abs" { UNARY (floatop t f32_abs f64_abs) } - | (fxx as t)".sqrt" { UNARY (floatop t f32_sqrt f64_sqrt) } - | (fxx as t)".ceil" { UNARY (floatop t f32_ceil f64_ceil) } - | (fxx as t)".floor" { UNARY (floatop t f32_floor f64_floor) } - | (fxx as t)".trunc" { UNARY (floatop t f32_trunc f64_trunc) } - | (fxx as t)".nearest" { UNARY (floatop t f32_nearest f64_nearest) } - - | (ixx as t)".add" { BINARY (intop t i32_add i64_add) } - | (ixx as t)".sub" { BINARY (intop t i32_sub i64_sub) } - | (ixx as t)".mul" { BINARY (intop t i32_mul i64_mul) } - | (ixx as t)".div_s" { BINARY (intop t i32_div_s i64_div_s) } - | (ixx as t)".div_u" { BINARY (intop t i32_div_u i64_div_u) } - | (ixx as t)".rem_s" { BINARY (intop t i32_rem_s i64_rem_s) } - | (ixx as t)".rem_u" { BINARY (intop t i32_rem_u i64_rem_u) } - | (ixx as t)".and" { BINARY (intop t i32_and i64_and) } - | (ixx as t)".or" { BINARY (intop t i32_or i64_or) } - | (ixx as t)".xor" { BINARY (intop t i32_xor i64_xor) } - | (ixx as t)".shl" { BINARY (intop t i32_shl i64_shl) } - | (ixx as t)".shr_s" { BINARY (intop t i32_shr_s i64_shr_s) } - | (ixx as t)".shr_u" { BINARY (intop t i32_shr_u i64_shr_u) } - | (ixx as t)".rotl" { BINARY (intop t i32_rotl i64_rotl) } - | (ixx as t)".rotr" { BINARY (intop t i32_rotr i64_rotr) } - | (fxx as t)".add" { BINARY (floatop t f32_add f64_add) } - | (fxx as t)".sub" { BINARY (floatop t f32_sub f64_sub) } - | (fxx as t)".mul" { BINARY (floatop t f32_mul f64_mul) } - | (fxx as t)".div" { BINARY (floatop t f32_div f64_div) } - | (fxx as t)".min" { BINARY (floatop t f32_min f64_min) } - | (fxx as t)".max" { BINARY (floatop t f32_max f64_max) } - | (fxx as t)".copysign" { BINARY (floatop t f32_copysign f64_copysign) } - - | (ixx as t)".eqz" { TEST (intop t i32_eqz i64_eqz) } - - | (ixx as t)".eq" { COMPARE (intop t i32_eq i64_eq) } - | (ixx as t)".ne" { COMPARE (intop t i32_ne i64_ne) } - | (ixx as t)".lt_s" { COMPARE (intop t i32_lt_s i64_lt_s) } - | (ixx as t)".lt_u" { COMPARE (intop t i32_lt_u i64_lt_u) } - | (ixx as t)".le_s" { COMPARE (intop t i32_le_s i64_le_s) } - | (ixx as t)".le_u" { COMPARE (intop t i32_le_u i64_le_u) } - | (ixx as t)".gt_s" { COMPARE (intop t i32_gt_s i64_gt_s) } - | (ixx as t)".gt_u" { COMPARE (intop t i32_gt_u i64_gt_u) } - | (ixx as t)".ge_s" { COMPARE (intop t i32_ge_s i64_ge_s) } - | (ixx as t)".ge_u" { COMPARE (intop t i32_ge_u i64_ge_u) } - | (fxx as t)".eq" { COMPARE (floatop t f32_eq f64_eq) } - | (fxx as t)".ne" { COMPARE (floatop t f32_ne f64_ne) } - | (fxx as t)".lt" { COMPARE (floatop t f32_lt f64_lt) } - | (fxx as t)".le" { COMPARE (floatop t f32_le f64_le) } - | (fxx as t)".gt" { COMPARE (floatop t f32_gt f64_gt) } - | (fxx as t)".ge" { COMPARE (floatop t f32_ge f64_ge) } - - | "i32.wrap_i64" { CONVERT i32_wrap_i64 } - | "i64.extend_i32_s" { CONVERT i64_extend_i32_s } - | "i64.extend_i32_u" { CONVERT i64_extend_i32_u } - | "f32.demote_f64" { CONVERT f32_demote_f64 } - | "f64.promote_f32" { CONVERT f64_promote_f32 } - | (ixx as t)".trunc_f32_s" - { CONVERT (intop t i32_trunc_f32_s i64_trunc_f32_s) } - | (ixx as t)".trunc_f32_u" - { CONVERT (intop t i32_trunc_f32_u i64_trunc_f32_u) } - | (ixx as t)".trunc_f64_s" - { CONVERT (intop t i32_trunc_f64_s i64_trunc_f64_s) } - | (ixx as t)".trunc_f64_u" - { CONVERT (intop t i32_trunc_f64_u i64_trunc_f64_u) } - | (ixx as t)".trunc_sat_f32_s" - { CONVERT (intop t i32_trunc_sat_f32_s i64_trunc_sat_f32_s) } - | (ixx as t)".trunc_sat_f32_u" - { CONVERT (intop t i32_trunc_sat_f32_u i64_trunc_sat_f32_u) } - | (ixx as t)".trunc_sat_f64_s" - { CONVERT (intop t i32_trunc_sat_f64_s i64_trunc_sat_f64_s) } - | (ixx as t)".trunc_sat_f64_u" - { CONVERT (intop t i32_trunc_sat_f64_u i64_trunc_sat_f64_u) } - | (fxx as t)".convert_i32_s" - { CONVERT (floatop t f32_convert_i32_s f64_convert_i32_s) } - | (fxx as t)".convert_i32_u" - { CONVERT (floatop t f32_convert_i32_u f64_convert_i32_u) } - | (fxx as t)".convert_i64_s" - { CONVERT (floatop t f32_convert_i64_s f64_convert_i64_s) } - | (fxx as t)".convert_i64_u" - { CONVERT (floatop t f32_convert_i64_u f64_convert_i64_u) } - | "f32.reinterpret_i32" { CONVERT f32_reinterpret_i32 } - | "f64.reinterpret_i64" { CONVERT f64_reinterpret_i64 } - | "i32.reinterpret_f32" { CONVERT i32_reinterpret_f32 } - | "i64.reinterpret_f64" { CONVERT i64_reinterpret_f64 } - - | "type" { TYPE } - | "func" { FUNC } - | "start" { START } - | "param" { PARAM } - | "result" { RESULT } - | "local" { LOCAL } - | "global" { GLOBAL } - | "table" { TABLE } - | "memory" { MEMORY } - | "elem" { ELEM } - | "data" { DATA } - | "declare" { DECLARE } - | "offset" { OFFSET } - | "item" { ITEM } - | "import" { IMPORT } - | "export" { EXPORT } - - | "module" { MODULE } - | "binary" { BIN } - | "quote" { QUOTE } - - | "script" { SCRIPT } - | "register" { REGISTER } - | "invoke" { INVOKE } - | "get" { GET } - | "assert_malformed" { ASSERT_MALFORMED } - | "assert_invalid" { ASSERT_INVALID } - | "assert_unlinkable" { ASSERT_UNLINKABLE } - | "assert_return" { ASSERT_RETURN } - | "assert_trap" { ASSERT_TRAP } - | "assert_exhaustion" { ASSERT_EXHAUSTION } - | "nan:canonical" { NAN Script.CanonicalNan } - | "nan:arithmetic" { NAN Script.ArithmeticNan } - | "input" { INPUT } - | "output" { OUTPUT } - - | vxxx".not" { VEC_UNARY v128_not } - | vxxx".and" { VEC_UNARY v128_and } - | vxxx".andnot" { VEC_UNARY v128_andnot } - | vxxx".or" { VEC_UNARY v128_or } - | vxxx".xor" { VEC_UNARY v128_xor } - | vxxx".bitselect" { VEC_TERNARY v128_bitselect } - | vxxx".any_true" { VEC_TEST (v128_any_true) } - - | (v128_shape as s)".neg" - { VEC_UNARY - (v128op s i8x16_neg i16x8_neg i32x4_neg i64x2_neg f32x4_neg f64x2_neg) } - | (v128_float_shape as s)".sqrt" - { VEC_UNARY (v128floatop s f32x4_sqrt f64x2_sqrt) } - | (v128_float_shape as s)".ceil" - { VEC_UNARY (v128floatop s f32x4_ceil f64x2_ceil) } - | (v128_float_shape as s)".floor" - { VEC_UNARY (v128floatop s f32x4_floor f64x2_floor) } - | (v128_float_shape as s)".trunc" - { VEC_UNARY (v128floatop s f32x4_trunc f64x2_trunc) } - | (v128_float_shape as s)".nearest" - { VEC_UNARY (v128floatop s f32x4_nearest f64x2_nearest) } - | (v128_shape as s)".abs" - { VEC_UNARY - (v128op s i8x16_abs i16x8_abs i32x4_abs i64x2_abs f32x4_abs f64x2_abs) } - | "i8x16.popcnt" { VEC_UNARY i8x16_popcnt } - | (v128_int_shape as s)".avgr_u" - { only ["i8x16"; "i16x8"] s lexbuf; - VEC_UNARY (v128intop s i8x16_avgr_u i16x8_avgr_u unreachable unreachable) } - | "i32x4.trunc_sat_f32x4_"(sign as s) - { VEC_UNARY (ext s i32x4_trunc_sat_f32x4_s i32x4_trunc_sat_f32x4_u) } - | "i32x4.trunc_sat_f64x2_"(sign as s)"_zero" - { VEC_UNARY (ext s i32x4_trunc_sat_f64x2_s_zero i32x4_trunc_sat_f64x2_u_zero) } - | "f64x2.promote_low_f32x4" - { VEC_UNARY f64x2_promote_low_f32x4 } - | "f32x4.demote_f64x2_zero" - { VEC_UNARY f32x4_demote_f64x2_zero } - | "f32x4.convert_i32x4_"(sign as s) - { VEC_UNARY (ext s f32x4_convert_i32x4_s f32x4_convert_i32x4_u) } - | "f64x2.convert_low_i32x4_"(sign as s) - { VEC_UNARY (ext s f64x2_convert_low_i32x4_s f64x2_convert_low_i32x4_u) } - | "i16x8.extadd_pairwise_i8x16_"(sign as s) - { VEC_UNARY (ext s i16x8_extadd_pairwise_i8x16_s i16x8_extadd_pairwise_i8x16_u) } - | "i32x4.extadd_pairwise_i16x8_"(sign as s) - { VEC_UNARY (ext s i32x4_extadd_pairwise_i16x8_s i32x4_extadd_pairwise_i16x8_u) } - - | (v128_shape as s)".eq" - { VEC_BINARY (v128op s i8x16_eq i16x8_eq i32x4_eq i64x2_eq f32x4_eq f64x2_eq) } - | (v128_shape as s)".ne" - { VEC_BINARY (v128op s i8x16_ne i16x8_ne i32x4_ne i64x2_ne f32x4_ne f64x2_ne) } - | (v128_int_shape as s)".lt_s" - { VEC_BINARY (v128intop s i8x16_lt_s i16x8_lt_s i32x4_lt_s i64x2_lt_s) } - | (v128_int_shape as s)".lt_u" - { except ["i64x2"] s lexbuf; - VEC_BINARY (v128intop s i8x16_lt_u i16x8_lt_u i32x4_lt_u unreachable) } - | (v128_int_shape as s)".le_s" - { VEC_BINARY (v128intop s i8x16_le_s i16x8_le_s i32x4_le_s i64x2_le_s) } - | (v128_int_shape as s)".le_u" - { except ["i64x2"] s lexbuf; - VEC_BINARY (v128intop s i8x16_le_u i16x8_le_u i32x4_le_u unreachable) } - | (v128_int_shape as s)".gt_s" - { VEC_BINARY (v128intop s i8x16_gt_s i16x8_gt_s i32x4_gt_s i64x2_gt_s) } - | (v128_int_shape as s)".gt_u" - { except ["i64x2"] s lexbuf; - VEC_BINARY (v128intop s i8x16_gt_u i16x8_gt_u i32x4_gt_u unreachable) } - | (v128_int_shape as s)".ge_s" - { VEC_BINARY (v128intop s i8x16_ge_s i16x8_ge_s i32x4_ge_s i64x2_ge_s) } - | (v128_int_shape as s)".ge_u" - { except ["i64x2"] s lexbuf; - VEC_BINARY (v128intop s i8x16_ge_u i16x8_ge_u i32x4_ge_u unreachable) } - | (v128_float_shape as s)".lt" { VEC_BINARY (v128floatop s f32x4_lt f64x2_lt) } - | (v128_float_shape as s)".le" { VEC_BINARY (v128floatop s f32x4_le f64x2_le) } - | (v128_float_shape as s)".gt" { VEC_BINARY (v128floatop s f32x4_gt f64x2_gt) } - | (v128_float_shape as s)".ge" { VEC_BINARY (v128floatop s f32x4_ge f64x2_ge) } - | "i8x16.swizzle" { VEC_BINARY i8x16_swizzle } - - | (v128_shape as s)".add" - { VEC_BINARY - (v128op s i8x16_add i16x8_add i32x4_add i64x2_add f32x4_add f64x2_add) } - | (v128_shape as s)".sub" - { VEC_BINARY - (v128op s i8x16_sub i16x8_sub i32x4_sub i64x2_sub f32x4_sub f64x2_sub) } - | (v128_shape as s)".min_s" - { only ["i8x16"; "i16x8"; "i32x4"] s lexbuf; - VEC_BINARY - (v128op s i8x16_min_s i16x8_min_s i32x4_min_s unreachable - unreachable unreachable) } - | (v128_shape as s)".min_u" - { only ["i8x16"; "i16x8"; "i32x4"] s lexbuf; - VEC_BINARY - (v128op s i8x16_min_u i16x8_min_u i32x4_min_u unreachable - unreachable unreachable) } - | (v128_shape as s)".max_s" - { only ["i8x16"; "i16x8"; "i32x4"] s lexbuf; - VEC_BINARY - (v128op s i8x16_max_s i16x8_max_s i32x4_max_s unreachable - unreachable unreachable) } - | (v128_shape as s)".max_u" - { only ["i8x16"; "i16x8"; "i32x4"] s lexbuf; - VEC_BINARY - (v128op s i8x16_max_u i16x8_max_u i32x4_max_u unreachable - unreachable unreachable) } - | (v128_shape as s)".mul" - { only ["i16x8"; "i32x4"; "i64x2"; "f32x4"; "f64x2"] s lexbuf; - VEC_BINARY - (v128op s unreachable i16x8_mul i32x4_mul i64x2_mul f32x4_mul f64x2_mul) } - | (v128_float_shape as s)".div" - { VEC_BINARY (v128floatop s f32x4_div f64x2_div) } - | (v128_float_shape as s)".min" - { VEC_BINARY (v128floatop s f32x4_min f64x2_min) } - | (v128_float_shape as s)".max" - { VEC_BINARY (v128floatop s f32x4_max f64x2_max) } - | (v128_float_shape as s)".pmin" - { VEC_BINARY (v128floatop s f32x4_pmin f64x2_pmin) } - | (v128_float_shape as s)".pmax" - { VEC_BINARY (v128floatop s f32x4_pmax f64x2_pmax) } - | "i8x16.add_sat_"(sign as s) - { VEC_BINARY (ext s i8x16_add_sat_s i8x16_add_sat_u) } - | "i8x16.sub_sat_"(sign as s) - { VEC_BINARY (ext s i8x16_sub_sat_s i8x16_sub_sat_u) } - | "i16x8.add_sat_"(sign as s) - { VEC_BINARY (ext s i16x8_add_sat_s i16x8_add_sat_u) } - | "i16x8.sub_sat_"(sign as s) - { VEC_BINARY (ext s i16x8_sub_sat_s i16x8_sub_sat_u) } - | "i32x4.dot_i16x8_s" - { VEC_BINARY i32x4_dot_i16x8_s } - | "i8x16.narrow_i16x8_"(sign as s) - { VEC_BINARY (ext s i8x16_narrow_i16x8_s i8x16_narrow_i16x8_u) } - | "i16x8.narrow_i32x4_"(sign as s) - { VEC_BINARY (ext s i16x8_narrow_i32x4_s i16x8_narrow_i32x4_u) } - | "i16x8.extend_low_i8x16_"(sign as s) - { VEC_UNARY (ext s i16x8_extend_low_i8x16_s i16x8_extend_low_i8x16_u) } - | "i16x8.extend_high_i8x16_"(sign as s) - { VEC_UNARY (ext s i16x8_extend_high_i8x16_s i16x8_extend_high_i8x16_u) } - | "i32x4.extend_low_i16x8_"(sign as s) - { VEC_UNARY (ext s i32x4_extend_low_i16x8_s i32x4_extend_low_i16x8_u) } - | "i32x4.extend_high_i16x8_"(sign as s) - { VEC_UNARY (ext s i32x4_extend_high_i16x8_s i32x4_extend_high_i16x8_u) } - | "i64x2.extend_low_i32x4_"(sign as s) - { VEC_UNARY (ext s i64x2_extend_low_i32x4_s i64x2_extend_low_i32x4_u) } - | "i64x2.extend_high_i32x4_"(sign as s) - { VEC_UNARY (ext s i64x2_extend_high_i32x4_s i64x2_extend_high_i32x4_u) } - | "i16x8.extmul_low_i8x16_"(sign as s) - { VEC_BINARY (ext s i16x8_extmul_low_i8x16_s i16x8_extmul_low_i8x16_u) } - | "i16x8.extmul_high_i8x16_"(sign as s) - { VEC_BINARY (ext s i16x8_extmul_high_i8x16_s i16x8_extmul_high_i8x16_u) } - | "i32x4.extmul_low_i16x8_"(sign as s) - { VEC_BINARY (ext s i32x4_extmul_low_i16x8_s i32x4_extmul_low_i16x8_u) } - | "i32x4.extmul_high_i16x8_"(sign as s) - { VEC_BINARY (ext s i32x4_extmul_high_i16x8_s i32x4_extmul_high_i16x8_u) } - | "i64x2.extmul_low_i32x4_"(sign as s) - { VEC_BINARY (ext s i64x2_extmul_low_i32x4_s i64x2_extmul_low_i32x4_u) } - | "i64x2.extmul_high_i32x4_"(sign as s) - { VEC_BINARY (ext s i64x2_extmul_high_i32x4_s i64x2_extmul_high_i32x4_u) } - | "i16x8.q15mulr_sat_s" - { VEC_BINARY i16x8_q15mulr_sat_s } - - | (v128_int_shape as s)".all_true" - { VEC_TEST - (v128intop s i8x16_all_true i16x8_all_true i32x4_all_true i64x2_all_true) } - | (v128_int_shape as s)".bitmask" - { VEC_BITMASK - (v128intop s i8x16_bitmask i16x8_bitmask i32x4_bitmask i64x2_bitmask) } - | (v128_int_shape as s)".shl" - { VEC_SHIFT (v128intop s i8x16_shl i16x8_shl i32x4_shl i64x2_shl) } - | (v128_int_shape as s)".shr_s" - { VEC_SHIFT (v128intop s i8x16_shr_s i16x8_shr_s i32x4_shr_s i64x2_shr_s) } - | (v128_int_shape as s)".shr_u" - { VEC_SHIFT (v128intop s i8x16_shr_u i16x8_shr_u i32x4_shr_u i64x2_shr_u) } - | "i8x16.shuffle" { VEC_SHUFFLE } - - | (v128_shape as s)".splat" - { VEC_SPLAT (v128op s i8x16_splat i16x8_splat i32x4_splat - i64x2_splat f32x4_splat f64x2_splat) } - | (v128_shape as s)".extract_lane" - { except ["i8x16"; "i16x8"] s lexbuf; - VEC_EXTRACT (fun i -> - v128op s - (fun _ -> unreachable) (fun _ -> unreachable) - i32x4_extract_lane i64x2_extract_lane - f32x4_extract_lane f64x2_extract_lane i) } - | (("i8x16"|"i16x8") as t)".extract_lane_"(sign as s) - { VEC_EXTRACT (fun i -> - if t = "i8x16" - then ext s i8x16_extract_lane_s i8x16_extract_lane_u i - else ext s i16x8_extract_lane_s i16x8_extract_lane_u i )} - | (v128_shape as s)".replace_lane" - { VEC_REPLACE - (v128op s i8x16_replace_lane i16x8_replace_lane i32x4_replace_lane - i64x2_replace_lane f32x4_replace_lane f64x2_replace_lane) } - - | name as s { VAR s } + | id as s { VAR s } | ";;"utf8_no_nl*eof { EOF } | ";;"utf8_no_nl*'\n' { Lexing.new_line lexbuf; token lexbuf }