From e39c6b76f9f01b5a5383a62a2a4a12997416cbef Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Tue, 7 Nov 2023 10:47:16 -0800 Subject: [PATCH] Cranelift: Fix union node bitpacking (#7465) * Cranelift: Fix union node bitpacking It turns out we have just been taking the newest rewrite's value for a eclass union and never actually comparing costs and taking the value with the minimum cost. Whoops! Fixing this made some test expectations fail, which we resolved by tweaking the cost function to give materializing constants nonzero cost. This way we prefer `-x` to `0 - x`. We also made elaboration function break ties between values with the same cost with the value index. It prefers larger value indices, since the original value's index will be lower than all of its rewritten values' indices. This heuristically prefers rewritten values because we hope our rewrites are all improvements even when the cost function can't show that. Co-Authored-By: Chris Fallin Co-Authored-By: Trevor Elliott * Add more information to assertion message * Fix off-by-one bug in assertion * Limit number of matches consumed from ISLE We generally want to clamp down and avoid runaway behavior here. But there also seems to be some sort of rustc/llvm bug on Rust 1.71 that is causing iteration to wild here. This commit avoids that bug. * Update test expectation * prtest:full --------- Co-authored-by: Chris Fallin Co-authored-by: Trevor Elliott --- cranelift/codegen/src/egraph.rs | 16 ++++ cranelift/codegen/src/egraph/cost.rs | 11 ++- cranelift/codegen/src/egraph/elaborate.rs | 34 +++++-- cranelift/codegen/src/ir/dfg.rs | 11 ++- .../filetests/egraph/arithmetic.clif | 8 +- .../filetests/egraph/cprop-splat.clif | 11 --- .../filetests/egraph/icmp-parameterized.clif | 92 ------------------- .../filetests/filetests/egraph/icmp.clif | 3 - .../filetests/filetests/egraph/shifts.clif | 6 +- .../filetests/filetests/wasm/byteswap.wat | 2 - cranelift/isle/isle/src/codegen.rs | 4 + 11 files changed, 70 insertions(+), 128 deletions(-) diff --git a/cranelift/codegen/src/egraph.rs b/cranelift/codegen/src/egraph.rs index b54c1e7b63d3..dab240c3a925 100644 --- a/cranelift/codegen/src/egraph.rs +++ b/cranelift/codegen/src/egraph.rs @@ -230,6 +230,10 @@ where return orig_value; } isle_ctx.ctx.rewrite_depth += 1; + trace!( + "Incrementing rewrite depth; now {}", + isle_ctx.ctx.rewrite_depth + ); // Invoke the ISLE toplevel constructor, getting all new // values produced as equivalents to this value. @@ -237,12 +241,24 @@ where isle_ctx.ctx.stats.rewrite_rule_invoked += 1; let mut optimized_values = crate::opts::generated_code::constructor_simplify(&mut isle_ctx, orig_value); + trace!( + " -> returned from ISLE, optimized values's size hint = {:?}", + optimized_values.size_hint() + ); // Create a union of all new values with the original (or // maybe just one new value marked as "subsuming" the // original, if present.) + let mut i = 0; let mut union_value = orig_value; while let Some(optimized_value) = optimized_values.next(&mut isle_ctx) { + i += 1; + const MATCHES_LIMIT: u32 = 5; + if i > MATCHES_LIMIT { + trace!("Reached maximum matches limit; too many optimized values, ignoring rest."); + break; + } + trace!( "Returned from ISLE for {}, got {:?}", orig_value, diff --git a/cranelift/codegen/src/egraph/cost.rs b/cranelift/codegen/src/egraph/cost.rs index bc807df02dd1..34ac26e1cd5a 100644 --- a/cranelift/codegen/src/egraph/cost.rs +++ b/cranelift/codegen/src/egraph/cost.rs @@ -70,11 +70,13 @@ impl std::ops::Add for Cost { pub(crate) fn pure_op_cost(op: Opcode) -> Cost { match op { // Constants. - Opcode::Iconst | Opcode::F32const | Opcode::F64const => Cost(0), + Opcode::Iconst | Opcode::F32const | Opcode::F64const => Cost(1), + // Extends/reduces. Opcode::Uextend | Opcode::Sextend | Opcode::Ireduce | Opcode::Iconcat | Opcode::Isplit => { - Cost(1) + Cost(2) } + // "Simple" arithmetic. Opcode::Iadd | Opcode::Isub @@ -84,8 +86,9 @@ pub(crate) fn pure_op_cost(op: Opcode) -> Cost { | Opcode::Bnot | Opcode::Ishl | Opcode::Ushr - | Opcode::Sshr => Cost(2), + | Opcode::Sshr => Cost(3), + // Everything else (pure.) - _ => Cost(3), + _ => Cost(4), } } diff --git a/cranelift/codegen/src/egraph/elaborate.rs b/cranelift/codegen/src/egraph/elaborate.rs index 26c7378b2c7f..18e3629a5f03 100644 --- a/cranelift/codegen/src/egraph/elaborate.rs +++ b/cranelift/codegen/src/egraph/elaborate.rs @@ -42,7 +42,7 @@ pub(crate) struct Elaborator<'a> { value_to_elaborated_value: ScopedHashMap, /// Map from Value to the best (lowest-cost) Value in its eclass /// (tree of union value-nodes). - value_to_best_value: SecondaryMap, + value_to_best_value: SecondaryMap, /// Stack of blocks and loops in current elaboration path. loop_stack: SmallVec<[LoopStackEntry; 8]>, /// The current block into which we are elaborating. @@ -64,6 +64,28 @@ pub(crate) struct Elaborator<'a> { stats: &'a mut Stats, } +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +struct BestEntry(Cost, Value); + +impl PartialOrd for BestEntry { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for BestEntry { + #[inline] + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.cmp(&other.0).then_with(|| { + // Note that this comparison is reversed. When costs are equal, + // prefer the value with the bigger index. This is a heuristic that + // prefers results of rewrites to the original value, since we + // expect that our rewrites are generally improvements. + self.1.cmp(&other.1).reverse() + }) + } +} + #[derive(Clone, Copy, Debug)] struct ElaboratedValue { in_block: Block, @@ -120,7 +142,7 @@ impl<'a> Elaborator<'a> { ) -> Self { let num_values = func.dfg.num_values(); let mut value_to_best_value = - SecondaryMap::with_default((Cost::infinity(), Value::reserved_value())); + SecondaryMap::with_default(BestEntry(Cost::infinity(), Value::reserved_value())); value_to_best_value.resize(num_values); Self { func, @@ -208,7 +230,7 @@ impl<'a> Elaborator<'a> { trace!(" -> {:?}", best[value]); } ValueDef::Param(_, _) => { - best[value] = (Cost::zero(), value); + best[value] = BestEntry(Cost::zero(), value); } // If the Inst is inserted into the layout (which is, // at this point, only the side-effecting skeleton), @@ -216,7 +238,7 @@ impl<'a> Elaborator<'a> { // cost. ValueDef::Result(inst, _) => { if let Some(_) = self.func.layout.inst_block(inst) { - best[value] = (Cost::zero(), value); + best[value] = BestEntry(Cost::zero(), value); } else { trace!(" -> value {}: result, computing cost", value); let inst_data = &self.func.dfg.insts[inst]; @@ -230,7 +252,7 @@ impl<'a> Elaborator<'a> { .fold(pure_op_cost(inst_data.opcode()), |cost, value| { cost + best[value].0 }); - best[value] = (cost, value); + best[value] = BestEntry(cost, value); } } }; @@ -319,7 +341,7 @@ impl<'a> Elaborator<'a> { // value) here so we have a full view of the // eclass. trace!("looking up best value for {}", value); - let (_, best_value) = self.value_to_best_value[value]; + let BestEntry(_, best_value) = self.value_to_best_value[value]; trace!("elaborate: value {} -> best {}", value, best_value); debug_assert_ne!(best_value, Value::reserved_value()); diff --git a/cranelift/codegen/src/ir/dfg.rs b/cranelift/codegen/src/ir/dfg.rs index 501a7f0a4ef4..feb7466c3f37 100644 --- a/cranelift/codegen/src/ir/dfg.rs +++ b/cranelift/codegen/src/ir/dfg.rs @@ -551,10 +551,15 @@ struct ValueDataPacked(u64); /// (and is implied by `mask`), by translating 2^32-1 (0xffffffff) /// into 2^n-1 and panic'ing on 2^n..2^32-1. fn encode_narrow_field(x: u32, bits: u8) -> u32 { + let max = (1 << bits) - 1; if x == 0xffff_ffff { - (1 << bits) - 1 + max } else { - debug_assert!(x < (1 << bits)); + debug_assert!( + x < max, + "{x} does not fit into {bits} bits (must be less than {max} to \ + allow for a 0xffffffff sentinal)" + ); x } } @@ -630,7 +635,7 @@ impl From for ValueDataPacked { Self::make(Self::TAG_ALIAS, ty, 0, original.as_bits()) } ValueData::Union { ty, x, y } => { - Self::make(Self::TAG_ALIAS, ty, x.as_bits(), y.as_bits()) + Self::make(Self::TAG_UNION, ty, x.as_bits(), y.as_bits()) } } } diff --git a/cranelift/filetests/filetests/egraph/arithmetic.clif b/cranelift/filetests/filetests/egraph/arithmetic.clif index 630aebeca450..24d0d9c4201e 100644 --- a/cranelift/filetests/filetests/egraph/arithmetic.clif +++ b/cranelift/filetests/filetests/egraph/arithmetic.clif @@ -4,10 +4,11 @@ target x86_64 function %f0(i32) -> i32 { block0(v0: i32): - v1 = iconst.i32 2 + v1 = iconst.i32 4 v2 = imul v0, v1 - ; check: v5 = ishl v0, v4 ; v4 = 1 - ; check: return v5 + ; check: v3 = iconst.i32 2 + ; check: v4 = ishl v0, v3 ; v3 = 2 + ; check: return v4 return v2 } @@ -60,7 +61,6 @@ block0(v0: i32): v2 = imul v0, v1 return v2 ; check: v3 = ineg v0 - ; check: v4 -> v3 ; check: return v3 } diff --git a/cranelift/filetests/filetests/egraph/cprop-splat.clif b/cranelift/filetests/filetests/egraph/cprop-splat.clif index 798671fe2a48..90ee138200ce 100644 --- a/cranelift/filetests/filetests/egraph/cprop-splat.clif +++ b/cranelift/filetests/filetests/egraph/cprop-splat.clif @@ -14,7 +14,6 @@ block0: ; ; block0: ; v3 = vconst.i8x16 const0 -; v4 -> v3 ; return v3 ; v3 = const0 ; } @@ -30,7 +29,6 @@ block0: ; ; block0: ; v3 = vconst.i8x16 const0 -; v4 -> v3 ; return v3 ; v3 = const0 ; } @@ -46,7 +44,6 @@ block0: ; ; block0: ; v3 = vconst.i16x8 const0 -; v4 -> v3 ; return v3 ; v3 = const0 ; } @@ -62,7 +59,6 @@ block0: ; ; block0: ; v3 = vconst.i16x8 const0 -; v4 -> v3 ; return v3 ; v3 = const0 ; } @@ -78,7 +74,6 @@ block0: ; ; block0: ; v3 = vconst.i32x4 const0 -; v4 -> v3 ; return v3 ; v3 = const0 ; } @@ -94,7 +89,6 @@ block0: ; ; block0: ; v3 = vconst.i32x4 const0 -; v4 -> v3 ; return v3 ; v3 = const0 ; } @@ -110,7 +104,6 @@ block0: ; ; block0: ; v3 = vconst.i64x2 const0 -; v4 -> v3 ; return v3 ; v3 = const0 ; } @@ -126,7 +119,6 @@ block0: ; ; block0: ; v3 = vconst.i64x2 const0 -; v4 -> v3 ; return v3 ; v3 = const0 ; } @@ -142,7 +134,6 @@ block0: ; ; block0: ; v3 = vconst.i8x16 const0 -; v4 -> v3 ; return v3 ; v3 = const0 ; } @@ -158,7 +149,6 @@ block0: ; ; block0: ; v3 = vconst.f32x4 const0 -; v4 -> v3 ; return v3 ; v3 = const0 ; } @@ -174,7 +164,6 @@ block0: ; ; block0: ; v3 = vconst.f64x2 const0 -; v4 -> v3 ; return v3 ; v3 = const0 ; } diff --git a/cranelift/filetests/filetests/egraph/icmp-parameterized.clif b/cranelift/filetests/filetests/egraph/icmp-parameterized.clif index 7c7c93083baa..095361408944 100644 --- a/cranelift/filetests/filetests/egraph/icmp-parameterized.clif +++ b/cranelift/filetests/filetests/egraph/icmp-parameterized.clif @@ -16,7 +16,6 @@ block0(v0: i32): ; function %icmp_eq_self(i32) -> i8 fast { ; block0(v0: i32): ; v2 = iconst.i8 1 -; v3 -> v2 ; return v2 ; v2 = 1 ; } @@ -29,7 +28,6 @@ block0(v0: i32): ; function %icmp_ne_self(i32) -> i8 fast { ; block0(v0: i32): ; v2 = iconst.i8 0 -; v3 -> v2 ; return v2 ; v2 = 0 ; } @@ -42,7 +40,6 @@ block0(v0: i32): ; function %icmp_ult_self(i32) -> i8 fast { ; block0(v0: i32): ; v2 = iconst.i8 0 -; v3 -> v2 ; return v2 ; v2 = 0 ; } @@ -55,7 +52,6 @@ block0(v0: i32): ; function %icmp_ule_self(i32) -> i8 fast { ; block0(v0: i32): ; v2 = iconst.i8 1 -; v3 -> v2 ; return v2 ; v2 = 1 ; } @@ -68,7 +64,6 @@ block0(v0: i32): ; function %icmp_ugt_self(i32) -> i8 fast { ; block0(v0: i32): ; v2 = iconst.i8 0 -; v3 -> v2 ; return v2 ; v2 = 0 ; } @@ -81,7 +76,6 @@ block0(v0: i32): ; function %icmp_uge_self(i32) -> i8 fast { ; block0(v0: i32): ; v2 = iconst.i8 1 -; v3 -> v2 ; return v2 ; v2 = 1 ; } @@ -94,7 +88,6 @@ block0(v0: i32): ; function %icmp_slt_self(i32) -> i8 fast { ; block0(v0: i32): ; v2 = iconst.i8 0 -; v3 -> v2 ; return v2 ; v2 = 0 ; } @@ -107,7 +100,6 @@ block0(v0: i32): ; function %icmp_sle_self(i32) -> i8 fast { ; block0(v0: i32): ; v2 = iconst.i8 1 -; v3 -> v2 ; return v2 ; v2 = 1 ; } @@ -120,7 +112,6 @@ block0(v0: i32): ; function %icmp_sgt_self(i32) -> i8 fast { ; block0(v0: i32): ; v2 = iconst.i8 0 -; v3 -> v2 ; return v2 ; v2 = 0 ; } @@ -133,7 +124,6 @@ block0(v0: i32): ; function %icmp_sge_self(i32) -> i8 fast { ; block0(v0: i32): ; v2 = iconst.i8 1 -; v3 -> v2 ; return v2 ; v2 = 1 ; } @@ -148,7 +138,6 @@ block0(v1: i64, v2: i64): ; function %mask_icmp_result(i64, i64) -> i8 fast { ; block0(v1: i64, v2: i64): ; v3 = icmp ult v1, v2 -; v6 -> v3 ; return v3 ; } @@ -165,7 +154,6 @@ block0(v1: i64, v2: i64): ; block0(v1: i64, v2: i64): ; v3 = icmp ult v1, v2 ; v4 = uextend.i64 v3 -; v7 -> v4 ; return v4 ; } @@ -305,7 +293,6 @@ block0(v0: i32): ; block0(v0: i32): ; v1 = iconst.i32 0xffff_ffff ; v3 = icmp ne v0, v1 ; v1 = 0xffff_ffff -; v4 -> v3 ; return v3 ; } @@ -348,7 +335,6 @@ block0(v0: i32): ; block0(v0: i32): ; v1 = iconst.i32 0 ; v3 = icmp eq v0, v1 ; v1 = 0 -; v4 -> v3 ; return v3 ; } @@ -404,7 +390,6 @@ block0(v0: i32): ; block0(v0: i32): ; v1 = iconst.i32 0 ; v3 = icmp ne v0, v1 ; v1 = 0 -; v4 -> v3 ; return v3 ; } @@ -473,7 +458,6 @@ block0(v0: i32): ; block0(v0: i32): ; v1 = iconst.i32 0xffff_ffff ; v3 = icmp eq v0, v1 ; v1 = 0xffff_ffff -; v4 -> v3 ; return v3 ; } @@ -557,7 +541,6 @@ block0(v0: i32): ; block0(v0: i32): ; v1 = iconst.i32 0x7fff_ffff ; v3 = icmp ne v0, v1 ; v1 = 0x7fff_ffff -; v4 -> v3 ; return v3 ; } @@ -600,7 +583,6 @@ block0(v0: i32): ; block0(v0: i32): ; v1 = iconst.i32 0x8000_0000 ; v3 = icmp eq v0, v1 ; v1 = 0x8000_0000 -; v4 -> v3 ; return v3 ; } @@ -656,7 +638,6 @@ block0(v0: i32): ; block0(v0: i32): ; v1 = iconst.i32 0x8000_0000 ; v3 = icmp ne v0, v1 ; v1 = 0x8000_0000 -; v4 -> v3 ; return v3 ; } @@ -725,7 +706,6 @@ block0(v0: i32): ; block0(v0: i32): ; v1 = iconst.i32 0x7fff_ffff ; v3 = icmp eq v0, v1 ; v1 = 0x7fff_ffff -; v4 -> v3 ; return v3 ; } @@ -782,7 +762,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_eq_ule(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp eq v0, v1 -; v6 -> v5 ; return v5 ; } @@ -811,7 +790,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_eq_uge(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp eq v0, v1 -; v6 -> v5 ; return v5 ; } @@ -840,7 +818,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_eq_sle(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp eq v0, v1 -; v6 -> v5 ; return v5 ; } @@ -869,7 +846,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_eq_sge(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp eq v0, v1 -; v6 -> v5 ; return v5 ; } @@ -912,7 +888,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_ne_ult(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ult v0, v1 -; v6 -> v5 ; return v5 ; } @@ -927,7 +902,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_ne_ule(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ult v0, v1 -; v6 -> v5 ; return v5 ; } @@ -942,7 +916,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_ne_ugt(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ugt v0, v1 -; v6 -> v5 ; return v5 ; } @@ -957,7 +930,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_ne_uge(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ugt v0, v1 -; v6 -> v5 ; return v5 ; } @@ -972,7 +944,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_ne_slt(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp slt v0, v1 -; v6 -> v5 ; return v5 ; } @@ -987,7 +958,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_ne_sle(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp slt v0, v1 -; v6 -> v5 ; return v5 ; } @@ -1002,7 +972,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_ne_sgt(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp sgt v0, v1 -; v6 -> v5 ; return v5 ; } @@ -1017,7 +986,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_ne_sge(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp sgt v0, v1 -; v6 -> v5 ; return v5 ; } @@ -1046,7 +1014,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_ult_ne(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ult v0, v1 -; v6 -> v5 ; return v5 ; } @@ -1075,7 +1042,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_ult_ule(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ult v0, v1 -; v6 -> v5 ; return v5 ; } @@ -1182,7 +1148,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_ule_eq(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp eq v0, v1 -; v6 -> v5 ; return v5 ; } @@ -1197,7 +1162,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_ule_ne(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ult v0, v1 -; v6 -> v5 ; return v5 ; } @@ -1212,7 +1176,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_ule_ult(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ult v0, v1 -; v6 -> v5 ; return v5 ; } @@ -1255,7 +1218,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_ule_uge(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp eq v0, v1 -; v6 -> v5 ; return v5 ; } @@ -1348,7 +1310,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_ugt_ne(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ugt v0, v1 -; v6 -> v5 ; return v5 ; } @@ -1405,7 +1366,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_ugt_uge(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ugt v0, v1 -; v6 -> v5 ; return v5 ; } @@ -1484,7 +1444,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_uge_eq(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp eq v0, v1 -; v6 -> v5 ; return v5 ; } @@ -1499,7 +1458,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_uge_ne(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ugt v0, v1 -; v6 -> v5 ; return v5 ; } @@ -1528,7 +1486,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_uge_ule(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp eq v0, v1 -; v6 -> v5 ; return v5 ; } @@ -1543,7 +1500,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_uge_ugt(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ugt v0, v1 -; v6 -> v5 ; return v5 ; } @@ -1650,7 +1606,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_slt_ne(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp slt v0, v1 -; v6 -> v5 ; return v5 ; } @@ -1743,7 +1698,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_slt_sle(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp slt v0, v1 -; v6 -> v5 ; return v5 ; } @@ -1786,7 +1740,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_sle_eq(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp eq v0, v1 -; v6 -> v5 ; return v5 ; } @@ -1801,7 +1754,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_sle_ne(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp slt v0, v1 -; v6 -> v5 ; return v5 ; } @@ -1880,7 +1832,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_sle_slt(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp slt v0, v1 -; v6 -> v5 ; return v5 ; } @@ -1923,7 +1874,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_sle_sge(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp eq v0, v1 -; v6 -> v5 ; return v5 ; } @@ -1952,7 +1902,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_sgt_ne(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp sgt v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2073,7 +2022,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_sgt_sge(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp sgt v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2088,7 +2036,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_sge_eq(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp eq v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2103,7 +2050,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_sge_ne(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp sgt v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2196,7 +2142,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_sge_sle(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp eq v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2211,7 +2156,6 @@ block0(v0: i32, v1: i32): ; function %icmp_and_sge_sgt(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp sgt v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2268,7 +2212,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_eq_ult(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ule v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2283,7 +2226,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_eq_ule(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ule v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2298,7 +2240,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_eq_ugt(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp uge v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2313,7 +2254,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_eq_uge(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp uge v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2328,7 +2268,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_eq_slt(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp sle v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2343,7 +2282,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_eq_sle(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp sle v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2358,7 +2296,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_eq_sgt(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp sge v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2373,7 +2310,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_eq_sge(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp sge v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2416,7 +2352,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_ne_ult(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ne v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2445,7 +2380,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_ne_ugt(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ne v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2474,7 +2408,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_ne_slt(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ne v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2503,7 +2436,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_ne_sgt(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ne v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2532,7 +2464,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_ult_eq(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ule v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2547,7 +2478,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_ult_ne(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ne v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2576,7 +2506,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_ult_ule(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ule v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2591,7 +2520,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_ult_ugt(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ne v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2684,7 +2612,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_ule_eq(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ule v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2713,7 +2640,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_ule_ult(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ule v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2834,7 +2760,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_ugt_eq(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp uge v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2849,7 +2774,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_ugt_ne(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ne v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2864,7 +2788,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_ugt_ult(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ne v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2907,7 +2830,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_ugt_uge(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp uge v0, v1 -; v6 -> v5 ; return v5 ; } @@ -2986,7 +2908,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_uge_eq(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp uge v0, v1 -; v6 -> v5 ; return v5 ; } @@ -3043,7 +2964,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_uge_ugt(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp uge v0, v1 -; v6 -> v5 ; return v5 ; } @@ -3136,7 +3056,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_slt_eq(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp sle v0, v1 -; v6 -> v5 ; return v5 ; } @@ -3151,7 +3070,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_slt_ne(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ne v0, v1 -; v6 -> v5 ; return v5 ; } @@ -3244,7 +3162,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_slt_sle(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp sle v0, v1 -; v6 -> v5 ; return v5 ; } @@ -3259,7 +3176,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_slt_sgt(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ne v0, v1 -; v6 -> v5 ; return v5 ; } @@ -3288,7 +3204,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_sle_eq(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp sle v0, v1 -; v6 -> v5 ; return v5 ; } @@ -3381,7 +3296,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_sle_slt(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp sle v0, v1 -; v6 -> v5 ; return v5 ; } @@ -3438,7 +3352,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_sgt_eq(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp sge v0, v1 -; v6 -> v5 ; return v5 ; } @@ -3453,7 +3366,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_sgt_ne(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ne v0, v1 -; v6 -> v5 ; return v5 ; } @@ -3532,7 +3444,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_sgt_slt(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp ne v0, v1 -; v6 -> v5 ; return v5 ; } @@ -3575,7 +3486,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_sgt_sge(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp sge v0, v1 -; v6 -> v5 ; return v5 ; } @@ -3590,7 +3500,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_sge_eq(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp sge v0, v1 -; v6 -> v5 ; return v5 ; } @@ -3711,7 +3620,6 @@ block0(v0: i32, v1: i32): ; function %icmp_or_sge_sgt(i32, i32) -> i8 fast { ; block0(v0: i32, v1: i32): ; v5 = icmp sge v0, v1 -; v6 -> v5 ; return v5 ; } diff --git a/cranelift/filetests/filetests/egraph/icmp.clif b/cranelift/filetests/filetests/egraph/icmp.clif index 46eaac465a2c..1a8874845f5f 100644 --- a/cranelift/filetests/filetests/egraph/icmp.clif +++ b/cranelift/filetests/filetests/egraph/icmp.clif @@ -15,7 +15,6 @@ block0(v1: i64, v2: i64): ; function %mask_icmp_result(i64, i64) -> i8 fast { ; block0(v1: i64, v2: i64): ; v3 = icmp ult v1, v2 -; v6 -> v3 ; return v3 ; } @@ -32,7 +31,6 @@ block0(v1: i64, v2: i64): ; block0(v1: i64, v2: i64): ; v3 = icmp ult v1, v2 ; v4 = uextend.i64 v3 -; v7 -> v4 ; return v4 ; } @@ -53,7 +51,6 @@ block0: ; v2 = bmask.i8 v1 ; v3 = iconst.i8 0 ; v5 = icmp ne v2, v3 ; v3 = 0 -; v6 -> v5 ; return v5 ; } diff --git a/cranelift/filetests/filetests/egraph/shifts.clif b/cranelift/filetests/filetests/egraph/shifts.clif index fbe308ddb5dc..f2b992771bde 100644 --- a/cranelift/filetests/filetests/egraph/shifts.clif +++ b/cranelift/filetests/filetests/egraph/shifts.clif @@ -74,9 +74,9 @@ block0(v0: i8): v2 = sshr v0, v1 v3 = ishl v2, v1 return v3 - ; check: v8 = iconst.i8 224 - ; check: v9 = band v0, v8 - ; check: return v9 + ; check: v7 = iconst.i8 224 + ; check: v8 = band v0, v7 + ; check: return v8 } function %sextend_shift_32_64_unsigned(i32) -> i64 { diff --git a/cranelift/filetests/filetests/wasm/byteswap.wat b/cranelift/filetests/filetests/wasm/byteswap.wat index de5b51985d8f..81ccba004455 100644 --- a/cranelift/filetests/filetests/wasm/byteswap.wat +++ b/cranelift/filetests/filetests/wasm/byteswap.wat @@ -78,7 +78,6 @@ ;; ;; block1: ;; v18 = bswap.i32 v0 -;; v19 -> v18 ;; @0057 return v18 ;; } ;; @@ -88,6 +87,5 @@ ;; ;; block1: ;; v38 = bswap.i64 v0 -;; v39 -> v38 ;; @00ad return v38 ;; } diff --git a/cranelift/isle/isle/src/codegen.rs b/cranelift/isle/isle/src/codegen.rs index d55ca3eaa4f6..d540afefb029 100644 --- a/cranelift/isle/isle/src/codegen.rs +++ b/cranelift/isle/isle/src/codegen.rs @@ -221,6 +221,7 @@ impl<'a> Codegen<'a> { type Context; type Output; fn next(&mut self, ctx: &mut Self::Context) -> Option; + fn size_hint(&self) -> (usize, Option) {{ (0, None) }} }} pub struct ContextIterWrapper, C: Context> {{ @@ -238,6 +239,9 @@ impl<'a> Codegen<'a> { fn next(&mut self, _ctx: &mut Self::Context) -> Option {{ self.iter.next() }} + fn size_hint(&self) -> (usize, Option) {{ + self.iter.size_hint() + }} }} "#, )