diff --git a/cranelift/codegen/src/ir/pcc.rs b/cranelift/codegen/src/ir/pcc.rs index 739f51f96b8c..7502610a5d2e 100644 --- a/cranelift/codegen/src/ir/pcc.rs +++ b/cranelift/codegen/src/ir/pcc.rs @@ -128,19 +128,25 @@ pub enum PccError { #[derive(Clone, Debug, Hash, PartialEq, Eq)] #[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] pub enum Fact { - /// A bitslice of a value (up to a bitwidth) is less than or equal - /// to a given maximum value. + /// A bitslice of a value (up to a bitwidth) is within the given + /// integer range. /// /// The slicing behavior is needed because this fact can describe /// both an SSA `Value`, whose entire value is well-defined, and a /// `VReg` in VCode, whose bits beyond the type stored in that /// register are don't-care (undefined). - ValueMax { + Range { /// The bitwidth of bits we care about, from the LSB upward. bit_width: u16, + /// The minimum value that the bitslice can take + /// (inclusive). The range is unsigned: the specified bits of + /// the actual value will be greater than or equal to this + /// value, as evaluated by an unsigned integer comparison. + min: u64, /// The maximum value that the bitslice can take - /// (inclusive). The range is unsigned: the bits of the value - /// will be within the range `0..=max`. + /// (inclusive). The range is unsigned: the specified bits of + /// the actual value will be less than or equal to this value, + /// as evaluated by an unsigned integer comparison. max: u64, }, @@ -148,16 +154,26 @@ pub enum Fact { Mem { /// The memory type. ty: ir::MemoryType, - /// The offset into the memory type. - offset: i64, + /// The minimum offset into the memory type, inclusive. + min_offset: u64, + /// The maximum offset into the memory type, inclusive. + max_offset: u64, }, } impl fmt::Display for Fact { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - Fact::ValueMax { bit_width, max } => write!(f, "max({}, {:#x})", bit_width, max), - Fact::Mem { ty, offset } => write!(f, "mem({}, {:#x})", ty, offset), + Fact::Range { + bit_width, + min, + max, + } => write!(f, "range({}, {:#x}, {:#x})", bit_width, min, max), + Fact::Mem { + ty, + min_offset, + max_offset, + } => write!(f, "mem({}, {:#x}, {:#x})", ty, min_offset, max_offset), } } } @@ -166,20 +182,24 @@ impl Fact { /// Try to infer a minimal fact for a value of the given IR type. pub fn infer_from_type(ty: ir::Type) -> Option<&'static Self> { static FACTS: [Fact; 4] = [ - Fact::ValueMax { + Fact::Range { bit_width: 8, + min: 0, max: u8::MAX as u64, }, - Fact::ValueMax { + Fact::Range { bit_width: 16, + min: 0, max: u16::MAX as u64, }, - Fact::ValueMax { + Fact::Range { bit_width: 32, + min: 0, max: u32::MAX as u64, }, - Fact::ValueMax { + Fact::Range { bit_width: 64, + min: 0, max: u64::MAX, }, ]; @@ -231,24 +251,25 @@ impl<'a> FactContext<'a> { (l, r) if l == r => true, ( - Fact::ValueMax { + Fact::Range { bit_width: bw_lhs, + min: min_lhs, max: max_lhs, }, - Fact::ValueMax { + Fact::Range { bit_width: bw_rhs, + min: min_rhs, max: max_rhs, }, ) => { // If the bitwidths we're claiming facts about are the - // same, and if the value is less than or equal to - // `max_lhs`, and if `max_rhs` is less than `max_lhs`, - // then it is certainly less than or equal to - // `max_rhs`. + // same, and if the right-hand-side range is larger + // than the left-hand-side range, than the LHS + // subsumes the RHS. // // In other words, we can always expand the claimed // possible value range. - bw_lhs == bw_rhs && max_lhs <= max_rhs + bw_lhs == bw_rhs && max_lhs <= max_rhs && min_lhs >= min_rhs } _ => false, @@ -275,39 +296,58 @@ impl<'a> FactContext<'a> { pub fn add(&self, lhs: &Fact, rhs: &Fact, add_width: u16) -> Option { match (lhs, rhs) { ( - Fact::ValueMax { + Fact::Range { bit_width: bw_lhs, - max: lhs, + min: min_lhs, + max: max_lhs, }, - Fact::ValueMax { + Fact::Range { bit_width: bw_rhs, - max: rhs, + min: min_rhs, + max: max_rhs, }, ) if bw_lhs == bw_rhs && add_width >= *bw_lhs => { - let computed_max = lhs.checked_add(*rhs)?; + let computed_min = min_lhs.checked_add(*min_rhs)?; + let computed_max = max_lhs.checked_add(*max_rhs)?; let computed_max = std::cmp::min(max_value_for_width(add_width), computed_max); - Some(Fact::ValueMax { + Some(Fact::Range { bit_width: *bw_lhs, + min: computed_min, max: computed_max, }) } ( - Fact::ValueMax { + Fact::Range { bit_width: bw_max, + min, max, }, - Fact::Mem { ty, offset }, + Fact::Mem { + ty, + min_offset, + max_offset, + }, ) | ( - Fact::Mem { ty, offset }, - Fact::ValueMax { + Fact::Mem { + ty, + min_offset, + max_offset, + }, + Fact::Range { bit_width: bw_max, + min, max, }, ) if *bw_max >= self.pointer_width && add_width >= *bw_max => { - let offset = offset.checked_add(i64::try_from(*max).ok()?)?; - Some(Fact::Mem { ty: *ty, offset }) + let min_offset = min_offset.checked_add(*min)?; + let max_offset = max_offset.checked_add(*max)?; + Some(Fact::Mem { + ty: *ty, + min_offset, + max_offset, + }) } _ => None, @@ -322,14 +362,20 @@ impl<'a> FactContext<'a> { // bit_width and from_bits are exactly contiguous, then we // have defined values in 0..to_bits (and because this is // a zero-extend, the max value is the same). - Fact::ValueMax { bit_width, max } if *bit_width == from_width => Some(Fact::ValueMax { + Fact::Range { + bit_width, + min, + max, + } if *bit_width == from_width => Some(Fact::Range { bit_width: to_width, + min: *min, max: *max, }), // Otherwise, we can at least claim that the value is // within the range of `to_width`. - Fact::ValueMax { .. } => Some(Fact::ValueMax { + Fact::Range { .. } => Some(Fact::Range { bit_width: to_width, + min: 0, max: max_value_for_width(to_width), }), _ => None, @@ -342,9 +388,13 @@ impl<'a> FactContext<'a> { // If we have a defined value in bits 0..bit_width, and // the MSB w.r.t. `from_width` is *not* set, then we can // do the same as `uextend`. - Fact::ValueMax { bit_width, max } - if *bit_width == from_width && (*max & (1 << (*bit_width - 1)) == 0) => - { + Fact::Range { + bit_width, + // We can ignore `min`: it is always <= max in + // unsigned terms, and we check max's LSB below. + min: _, + max, + } if *bit_width == from_width && (*max & (1 << (*bit_width - 1)) == 0) => { self.uextend(fact, from_width, to_width) } _ => None, @@ -353,23 +403,20 @@ impl<'a> FactContext<'a> { /// Scales a value with a fact by a known constant. pub fn scale(&self, fact: &Fact, width: u16, factor: u32) -> Option { - // The minimal (loosest) fact we can claim: the value will be - // within the range implied by its bitwidth. - let minimal_fact = Fact::ValueMax { - bit_width: width, - max: max_value_for_width(width), - }; match fact { - Fact::ValueMax { bit_width, max } if *bit_width == width => { - let max = match max.checked_mul(u64::from(factor)) { - Some(max) => max, - None => return Some(minimal_fact), - }; + Fact::Range { + bit_width, + min, + max, + } if *bit_width == width => { + let min = min.checked_mul(u64::from(factor))?; + let max = max.checked_mul(u64::from(factor))?; if *bit_width < 64 && max > max_value_for_width(width) { - return Some(minimal_fact); + return None; } - Some(Fact::ValueMax { + Some(Fact::Range { bit_width: *bit_width, + min, max, }) } @@ -388,36 +435,38 @@ impl<'a> FactContext<'a> { /// Offsets a value with a fact by a known amount. pub fn offset(&self, fact: &Fact, width: u16, offset: i64) -> Option { - match fact { - Fact::ValueMax { bit_width, max } if *bit_width == width => { - // If we eventually support two-sided ranges, we can - // represent (0..n) + m -> ((0+m)..(n+m)). However, - // right now, all ranges start with zero, so any - // negative offset could underflow, and removes all - // claims of constrained range. - let offset = u64::try_from(offset).ok()?; - - let max = match max.checked_add(offset) { - Some(max) => max, - None => { - return Some(Fact::ValueMax { - bit_width: width, - max: max_value_for_width(width), - }) - } - }; + // Any negative offset could underflow, and removes + // all claims of constrained range, so for now we only + // support positive offsets. + let offset = u64::try_from(offset).ok()?; - Some(Fact::ValueMax { + match fact { + Fact::Range { + bit_width, + min, + max, + } if *bit_width == width => { + let min = min.checked_add(offset)?; + let max = max.checked_add(offset)?; + + Some(Fact::Range { bit_width: *bit_width, + min, max, }) } Fact::Mem { ty, - offset: mem_offset, + min_offset: mem_min_offset, + max_offset: mem_max_offset, } => { - let offset = mem_offset.checked_sub(offset)?; - Some(Fact::Mem { ty: *ty, offset }) + let min_offset = mem_min_offset.checked_sub(offset)?; + let max_offset = mem_max_offset.checked_sub(offset)?; + Some(Fact::Mem { + ty: *ty, + min_offset, + max_offset, + }) } _ => None, } @@ -427,15 +476,18 @@ impl<'a> FactContext<'a> { /// a memory access of the given size, is valid. /// /// If valid, returns the memory type and offset into that type - /// that this address accesses. - fn check_address(&self, fact: &Fact, size: u32) -> PccResult<(ir::MemoryType, i64)> { + /// that this address accesses, if known, or `None` if the range + /// doesn't constrain the access to exactly one location. + fn check_address(&self, fact: &Fact, size: u32) -> PccResult> { match fact { - Fact::Mem { ty, offset } => { - let end_offset: i64 = offset - .checked_add(i64::from(size)) + Fact::Mem { + ty, + min_offset, + max_offset, + } => { + let end_offset: u64 = max_offset + .checked_add(u64::from(size)) .ok_or(PccError::Overflow)?; - let end_offset: u64 = - u64::try_from(end_offset).map_err(|_| PccError::OutOfBounds)?; match &self.function.memory_types[*ty] { ir::MemoryTypeData::Struct { size, .. } | ir::MemoryTypeData::Memory { size } => { @@ -443,7 +495,12 @@ impl<'a> FactContext<'a> { } ir::MemoryTypeData::Empty => bail!(OutOfBounds), } - Ok((*ty, *offset)) + let specific_ty_and_offset = if min_offset == max_offset { + Some((*ty, *min_offset)) + } else { + None + }; + Ok(specific_ty_and_offset) } _ => bail!(OutOfBounds), } @@ -456,9 +513,10 @@ impl<'a> FactContext<'a> { fact: &Fact, access_ty: ir::Type, ) -> PccResult> { - let (ty, offset) = self.check_address(fact, access_ty.bytes())?; - let offset = - u64::try_from(offset).expect("valid access address cannot have a negative offset"); + let (ty, offset) = match self.check_address(fact, access_ty.bytes())? { + Some((ty, offset)) => (ty, offset), + None => return Ok(None), + }; if let ir::MemoryTypeData::Struct { fields, .. } = &self.function.memory_types[ty] { let field = fields diff --git a/cranelift/codegen/src/isa/aarch64/pcc.rs b/cranelift/codegen/src/isa/aarch64/pcc.rs index dbc24e19541f..83a039ec63fe 100644 --- a/cranelift/codegen/src/isa/aarch64/pcc.rs +++ b/cranelift/codegen/src/isa/aarch64/pcc.rs @@ -147,8 +147,9 @@ pub(crate) fn check(ctx: &FactContext, vcode: &VCode, inst: &Inst) -> PccR imm12, } => check_output(&ctx, vcode, rd.to_reg(), || { let rn = get_fact_or_default(vcode, *rn)?; - let imm_fact = Fact::ValueMax { + let imm_fact = Fact::Range { bit_width: size.bits().into(), + min: imm12.value(), max: imm12.value(), }; fail_if_missing(ctx.add(&rn, &imm_fact, size.bits().into())) @@ -218,8 +219,9 @@ pub(crate) fn check(ctx: &FactContext, vcode: &VCode, inst: &Inst) -> PccR // Any ALU op can validate a max-value fact where the // value is the maximum for its bit-width. check_output(&ctx, vcode, rd.to_reg(), || { - Ok(Fact::ValueMax { + Ok(Fact::Range { bit_width: size.bits().into(), + min: 0, max: size.max_value(), }) }) diff --git a/cranelift/filetests/filetests/pcc/fail/add.clif b/cranelift/filetests/filetests/pcc/fail/add.clif index e32ed55d66ed..a40879f3df72 100644 --- a/cranelift/filetests/filetests/pcc/fail/add.clif +++ b/cranelift/filetests/filetests/pcc/fail/add.clif @@ -3,48 +3,48 @@ set enable_pcc=true target aarch64 function %f0(i32, i32) -> i32 { -block0(v0 ! max(32, 0x100): i32, v1 ! max(32, 0x80): i32): - v2 ! max(32, 0x17f) = iadd.i32 v0, v1 +block0(v0 ! range(32, 0, 0x100): i32, v1 ! range(32, 0, 0x80): i32): + v2 ! range(32, 0, 0x17f) = iadd.i32 v0, v1 return v2 } function %f1(i32) -> i32 { -block0(v0 ! max(32, 0x100): i32): - v1 ! max(32, 1) = iconst.i32 1 - v2 ! max(32, 0x100) = iadd.i32 v0, v1 +block0(v0 ! range(32, 0, 0x100): i32): + v1 ! range(32, 0, 1) = iconst.i32 1 + v2 ! range(32, 0, 0x100) = iadd.i32 v0, v1 return v2 } function %f3(i32) -> i64 { block0(v0: i32): - v1 ! max(32, 1) = iconst.i32 1 - v2 ! max(32, 0xffff_fffe) = iadd.i32 v0, v1 - v3 ! max(64, 0xffff_fffe) = uextend.i64 v2 + v1 ! range(32, 0, 1) = iconst.i32 1 + v2 ! range(32, 0, 0xffff_fffe) = iadd.i32 v0, v1 + v3 ! range(64, 0, 0xffff_fffe) = uextend.i64 v2 return v3 } function %f3(i32) -> i64 { block0(v0: i32): - v1 ! max(32, 1) = iconst.i32 1 - v2 ! max(32, 0xffff_ffff) = iadd.i32 v0, v1 - v3 ! max(64, 0xffff_ffff) = uextend.i64 v2 - v4 ! max(64, 0x1) = iconst.i64 1 - v5 ! max(64, 0xffff_ffff) = iadd.i64 v3, v4 + v1 ! range(32, 0, 1) = iconst.i32 1 + v2 ! range(32, 0, 0xffff_ffff) = iadd.i32 v0, v1 + v3 ! range(64, 0, 0xffff_ffff) = uextend.i64 v2 + v4 ! range(64, 0, 0x1) = iconst.i64 1 + v5 ! range(64, 0, 0xffff_ffff) = iadd.i64 v3, v4 return v5 } ;; check merged ops: function %f4(i32, i32) -> i32 { -block0(v0 ! max(32, 0x100): i32, v1 ! max(32, 0x200): i32): +block0(v0 ! range(32, 0, 0x100): i32, v1 ! range(32, 0, 0x200): i32): v2 = iconst.i32 2 - v3 ! max(32, 0x400) = ishl.i32 v0, v2 - v4 ! max(32, 0x5ff) = iadd.i32 v1, v3 + v3 ! range(32, 0, 0x400) = ishl.i32 v0, v2 + v4 ! range(32, 0, 0x5ff) = iadd.i32 v1, v3 return v4 } function %f5(i32, i64) -> i64 { -block0(v0 ! max(32, 0x100): i32, v1 ! max(64, 0x200): i64): - v2 ! max(64, 0x100) = uextend.i64 v0 - v3 ! max(64, 0x2ff) = iadd.i64 v1, v2 +block0(v0 ! range(32, 0, 0x100): i32, v1 ! range(64, 0, 0x200): i64): + v2 ! range(64, 0, 0x100) = uextend.i64 v0 + v3 ! range(64, 0, 0x2ff) = iadd.i64 v1, v2 return v3 } diff --git a/cranelift/filetests/filetests/pcc/fail/blockparams.clif b/cranelift/filetests/filetests/pcc/fail/blockparams.clif index 29863e06b89f..0e8386f838c8 100644 --- a/cranelift/filetests/filetests/pcc/fail/blockparams.clif +++ b/cranelift/filetests/filetests/pcc/fail/blockparams.clif @@ -3,17 +3,17 @@ set enable_pcc=true target aarch64 function %f0(i64, i32) -> i64 { -block0(v0 ! max(64, 0x100): i64, v1: i32): - v2 ! max(64, 0x100) = iconst.i64 0x100 - v3 ! max(64, 0x200) = iadd v0, v2 +block0(v0 ! range(64, 0, 0x100): i64, v1: i32): + v2 ! range(64, 0, 0x100) = iconst.i64 0x100 + v3 ! range(64, 0, 0x200) = iadd v0, v2 brif v1, block1(v0), block2(v3) -block1(v4 ! max(64, 0xff): i64): ;; shrink the range -- should be caught +block1(v4 ! range(64, 0, 0xff): i64): ;; shrink the range -- should be caught jump block3(v4) -block2(v5 ! max(64, 0x1ff): i64): +block2(v5 ! range(64, 0, 0x1ff): i64): jump block3(v5) -block3(v6 ! max(64, 1): i64): +block3(v6 ! range(64, 0, 1): i64): return v6 } diff --git a/cranelift/filetests/filetests/pcc/fail/extend.clif b/cranelift/filetests/filetests/pcc/fail/extend.clif index dab088874d17..a1c31ee5fb5a 100644 --- a/cranelift/filetests/filetests/pcc/fail/extend.clif +++ b/cranelift/filetests/filetests/pcc/fail/extend.clif @@ -3,8 +3,8 @@ set enable_pcc=true target aarch64 function %f0(i32) -> i64 { -block0(v0 ! max(32, 0xffff_ffff): i32): - v1 ! max(64, 0xffff_0000) = uextend.i64 v0 +block0(v0 ! range(32, 0, 0xffff_ffff): i32): + v1 ! range(64, 0, 0xffff_0000) = uextend.i64 v0 return v1 } @@ -14,7 +14,7 @@ block0(v0 ! max(32, 0xffff_ffff): i32): ;; possible. If the `i32` were taken through another 32-bit operation ;; and we asserted its 32-bit range at that point, it would work. function %f1(i32) -> i64 { -block0(v0 ! max(16, 0xffff): i32): - v1 ! max(64, 0xffff_ffff) = uextend.i64 v0 +block0(v0 ! range(16, 0, 0xffff): i32): + v1 ! range(64, 0, 0xffff_ffff) = uextend.i64 v0 return v1 } diff --git a/cranelift/filetests/filetests/pcc/fail/load.clif b/cranelift/filetests/filetests/pcc/fail/load.clif index a7b916addbf0..5fa42311d2ce 100644 --- a/cranelift/filetests/filetests/pcc/fail/load.clif +++ b/cranelift/filetests/filetests/pcc/fail/load.clif @@ -4,9 +4,9 @@ target aarch64 function %f0(i64, i32) -> i64 { mt0 = memory 0x1000 -block0(v0 ! mem(mt0, 0): i64, v1 ! max(32, 0x1000): i32): - v2 ! max(64, 0x100) = uextend.i64 v1 - v3 ! mem(mt0, 0x100) = iadd.i64 v0, v2 +block0(v0 ! mem(mt0, 0, 0): i64, v1 ! range(32, 0, 0x1000): i32): + v2 ! range(64, 0, 0x100) = uextend.i64 v1 + v3 ! mem(mt0, 0, 0x100) = iadd.i64 v0, v2 v4 = load.i64 checked v3 return v4 } @@ -14,9 +14,9 @@ block0(v0 ! mem(mt0, 0): i64, v1 ! max(32, 0x1000): i32): ;; Insufficient guard region: the 8-byte load could go off the end. function %f1(i64, i32) -> i64 { mt0 = memory 0x1_0000_0000 -block0(v0 ! mem(mt0, 0): i64, v1 ! max(32, 0xffff_ffff): i32): - v2 ! max(64, 0xffff_ffff) = uextend.i64 v1 - v3 ! mem(mt0, 0xffff_ffff) = iadd.i64 v0, v2 +block0(v0 ! mem(mt0, 0, 0): i64, v1 ! range(32, 0, 0xffff_ffff): i32): + v2 ! range(64, 0, 0xffff_ffff) = uextend.i64 v1 + v3 ! mem(mt0, 0, 0xffff_ffff) = iadd.i64 v0, v2 v4 = load.i64 checked v3 return v4 } @@ -24,9 +24,9 @@ block0(v0 ! mem(mt0, 0): i64, v1 ! max(32, 0xffff_ffff): i32): ;; RegRegExtend mode on aarch64. function %f2(i64, i32) -> i8 { mt0 = memory 0x1000 -block0(v0 ! mem(mt0, 0x1000): i64, v1 ! max(32, 0x1000): i32): - v2 ! max(64, 0x100) = uextend.i64 v1 - v3 ! mem(mt0, 0x100) = iadd.i64 v0, v2 +block0(v0 ! mem(mt0, 0, 0x1000): i64, v1 ! range(32, 0, 0x1000): i32): + v2 ! range(64, 0, 0x100) = uextend.i64 v1 + v3 ! mem(mt0, 0, 0x100) = iadd.i64 v0, v2 v4 = load.i8 checked v3 return v4 } @@ -34,8 +34,8 @@ block0(v0 ! mem(mt0, 0x1000): i64, v1 ! max(32, 0x1000): i32): ;; RegReg mode on aarch64. function %f3(i64, i64) -> i8 { mt0 = memory 0x100 -block0(v0 ! mem(mt0, 0): i64, v1 ! max(64, 0xfff): i64): - v2 ! mem(mt0, 0xfff) = iadd.i64 v0, v1 +block0(v0 ! mem(mt0, 0, 0): i64, v1 ! range(64, 0, 0xfff): i64): + v2 ! mem(mt0, 0, 0xfff) = iadd.i64 v0, v1 v3 = load.i8 checked v2 return v3 } @@ -43,11 +43,11 @@ block0(v0 ! mem(mt0, 0): i64, v1 ! max(64, 0xfff): i64): ;; RegScaledExtended mode on aarch64. function %f4(i64, i32) -> i64 { mt0 = memory 0x7000 -block0(v0 ! mem(mt0, 0): i64, v1 ! max(32, 0xfff): i32): - v2 ! max(64, 0xfff) = uextend.i64 v1 +block0(v0 ! mem(mt0, 0, 0): i64, v1 ! range(32, 0, 0xfff): i32): + v2 ! range(64, 0, 0xfff) = uextend.i64 v1 v3 = iconst.i32 3 - v4 ! max(64, 0x7ff8) = ishl.i64 v2, v3 - v5 ! mem(mt0, 0x7ff8) = iadd.i64 v0, v4 + v4 ! range(64, 0, 0x7ff8) = ishl.i64 v2, v3 + v5 ! mem(mt0, 0, 0x7ff8) = iadd.i64 v0, v4 v6 = load.i64 checked v5 return v6 } @@ -55,10 +55,10 @@ block0(v0 ! mem(mt0, 0): i64, v1 ! max(32, 0xfff): i32): ;; RegScaled mode on aarch64. function %f5(i64, i64) -> i64 { mt0 = memory 0x7000 -block0(v0 ! mem(mt0, 0): i64, v1 ! max(64, 0xfff): i64): +block0(v0 ! mem(mt0, 0, 0): i64, v1 ! range(64, 0, 0xfff): i64): v2 = iconst.i32 3 - v3 ! max(64, 0x7ff8) = ishl.i64 v1, v2 - v4 ! mem(mt0, 0x7ff8) = iadd.i64 v0, v3 + v3 ! range(64, 0, 0x7ff8) = ishl.i64 v1, v2 + v4 ! mem(mt0, 0, 0x7ff8) = iadd.i64 v0, v3 v5 = load.i64 checked v4 return v5 } diff --git a/cranelift/filetests/filetests/pcc/fail/memtypes.clif b/cranelift/filetests/filetests/pcc/fail/memtypes.clif index 2a9c87f1ba27..bf407779299a 100644 --- a/cranelift/filetests/filetests/pcc/fail/memtypes.clif +++ b/cranelift/filetests/filetests/pcc/fail/memtypes.clif @@ -5,25 +5,25 @@ target aarch64 function %f0(i64) -> i32 { mt0 = struct 8 { 4: i32, 0: i32 } ; error: out-of-order -block0(v0 ! mem(mt0, 0): i64): +block0(v0 ! mem(mt0, 0, 0): i64): v1 = load.i32 v0+0 return v1 } -function %f0(i64) -> i32 { +function %f1(i64) -> i32 { ;; out-of-bounds field: mt0 = struct 8 { 0: i32, 6: i32 } ; error: field at offset 6 of size 4 that overflows -block0(v0 ! mem(mt0, 0): i64): +block0(v0 ! mem(mt0, 0, 0): i64): v1 = load.i32 v0+0 return v1 } -function %f0(i64) -> i32 { +function %f2(i64) -> i32 { ;; overflowing offset + field size: mt0 = struct 8 { 0: i32, 0xffff_ffff_ffff_ffff: i32 } ; error: field at offset 18446744073709551615 of size 4; offset plus size overflows a u64 -block0(v0 ! mem(mt0, 0): i64): +block0(v0 ! mem(mt0, 0, 0): i64): v1 = load.i32 v0+0 return v1 } diff --git a/cranelift/filetests/filetests/pcc/fail/shift.clif b/cranelift/filetests/filetests/pcc/fail/shift.clif index fba82900bc99..4059c3eba35d 100644 --- a/cranelift/filetests/filetests/pcc/fail/shift.clif +++ b/cranelift/filetests/filetests/pcc/fail/shift.clif @@ -3,8 +3,8 @@ set enable_pcc=true target aarch64 function %f0(i32) -> i32 { -block0(v0 ! max(32, 0x100): i32): +block0(v0 ! range(32, 1, 0x100): i32): v1 = iconst.i32 2 - v2 ! max(32, 0x3ff) = ishl.i32 v0, v1 + v2 ! range(32, 4, 0x3ff) = ishl.i32 v0, v1 return v2 } diff --git a/cranelift/filetests/filetests/pcc/fail/simple.clif b/cranelift/filetests/filetests/pcc/fail/simple.clif index 6196e4e31050..777451212d41 100644 --- a/cranelift/filetests/filetests/pcc/fail/simple.clif +++ b/cranelift/filetests/filetests/pcc/fail/simple.clif @@ -7,9 +7,9 @@ target aarch64 function %simple1(i64 vmctx, i32) -> i8 { mt0 = memory 0x8000_0000 -block0(v0 ! mem(mt0, 0): i64, v1 ! max(32, 0xffff_ffff): i32): - v2 ! max(64, 0xffff_ffff) = uextend.i64 v1 - v3 ! mem(mt0, 0xffff_ffff) = iadd.i64 v0, v2 +block0(v0 ! mem(mt0, 0, 0): i64, v1 ! range(32, 0, 0xffff_ffff): i32): + v2 ! range(64, 0, 0xffff_ffff) = uextend.i64 v1 + v3 ! mem(mt0, 0, 0xffff_ffff) = iadd.i64 v0, v2 v4 = load.i8 checked v3 return v4 } @@ -18,9 +18,9 @@ block0(v0 ! mem(mt0, 0): i64, v1 ! max(32, 0xffff_ffff): i32): function %simple2(i64 vmctx, i32) -> i8 { mt0 = memory 0x8000_0000 -block0(v0 ! mem(mt0, 0): i64, v1 ! max(32, 0xffff_ffff): i32): - v2 ! max(64, 0xffff_ffff) = uextend.i64 v1 - v3 ! mem(mt0, 0) = iadd.i64 v0, v2 +block0(v0 ! mem(mt0, 0, 0): i64, v1 ! range(32, 0, 0xffff_ffff): i32): + v2 ! range(64, 0, 0xffff_ffff) = uextend.i64 v1 + v3 ! mem(mt0, 0, 0) = iadd.i64 v0, v2 v4 = load.i8 checked v3 return v4 } diff --git a/cranelift/filetests/filetests/pcc/fail/struct.clif b/cranelift/filetests/filetests/pcc/fail/struct.clif index 22e455bea47a..c8d79dcdb558 100644 --- a/cranelift/filetests/filetests/pcc/fail/struct.clif +++ b/cranelift/filetests/filetests/pcc/fail/struct.clif @@ -3,17 +3,26 @@ set enable_pcc=true target aarch64 function %f0(i64) -> i64 { - mt0 = struct 8 { 0: i64 ! mem(mt1, 0) } + mt0 = struct 8 { 0: i64 ! mem(mt1, 0, 0) } mt1 = memory 0x1_0000_0000 -block0(v0 ! mem(mt0, 0): i64): - v1 ! mem(mt1, 8) = load.i64 checked v0 +block0(v0 ! mem(mt0, 0, 0): i64): + v1 ! mem(mt1, 8, 8) = load.i64 checked v0 return v1 } function %f1(i64, i64) { - mt0 = struct 8 { 0: i64 ! mem(mt1, 0) } + mt0 = struct 8 { 0: i64 ! mem(mt1, 0, 0) } mt1 = memory 0x1_0000_0000 -block0(v0 ! mem(mt0, 0): i64, v1 ! mem(mt1, 8): i64): +block0(v0 ! mem(mt0, 0, 0): i64, v1 ! mem(mt1, 8, 8): i64): store.i64 checked v1, v0 return } + +function %f2(i64) -> i32 { + mt0 = struct 8 { 0: i32 ! range(32, 0, 1), 4: i32 } + + ;; insufficiently-constrained range: +block0(v0 ! mem(mt0, 0, 8): i64): + v1 ! range(32, 0, 1) = load.i32 checked v0+0 + return v1 +} diff --git a/cranelift/filetests/filetests/pcc/fail/vmctx.clif b/cranelift/filetests/filetests/pcc/fail/vmctx.clif index f29e173449b3..9f53602f7867 100644 --- a/cranelift/filetests/filetests/pcc/fail/vmctx.clif +++ b/cranelift/filetests/filetests/pcc/fail/vmctx.clif @@ -5,16 +5,16 @@ target aarch64 ;; Equivalent to a Wasm `i64.load` from a static memory. function %f0(i64, i32) -> i64 { ;; mock vmctx struct: - mt0 = struct 8 { 0: i64 readonly ! mem(mt1, 0) } + mt0 = struct 8 { 0: i64 readonly ! mem(mt1, 0, 0) } ;; mock static memory: 4GiB range, *but insufficient guard* mt1 = memory 0x1_0000_0000 -block0(v0 ! mem(mt0, 0): i64, v1: i32): +block0(v0 ! mem(mt0, 0, 0): i64, v1: i32): ;; Compute the address: base + offset. Guard region (2GiB) is ;; sufficient for an 8-byte I64 load. - v2 ! mem(mt1, 0) = load.i64 checked v0+0 ;; base pointer - v3 ! max(64, 0xffff_ffff) = uextend.i64 v1 ;; offset - v4 ! mem(mt1, 0xffff_ffff) = iadd.i64 v2, v3 + v2 ! mem(mt1, 0, 0) = load.i64 checked v0+0 ;; base pointer + v3 ! range(64, 0, 0xffff_ffff) = uextend.i64 v1 ;; offset + v4 ! mem(mt1, 0, 0xffff_ffff) = iadd.i64 v2, v3 v5 = load.i64 checked v4 return v5 } @@ -22,16 +22,16 @@ block0(v0 ! mem(mt0, 0): i64, v1: i32): ;; Equivalent to a Wasm `i64.load` from a static memory. function %f1(i64, i32) -> i64 { ;; mock vmctx struct: - mt0 = struct 16 { 0: i64 readonly ! mem(mt1, 0), 8: i64 readonly } + mt0 = struct 16 { 0: i64 readonly ! mem(mt1, 0, 0), 8: i64 readonly } ;; mock static memory: 4GiB range, *but insufficient guard* mt1 = memory 0x1_8000_0000 -block0(v0 ! mem(mt0, 0): i64, v1: i32): +block0(v0 ! mem(mt0, 0, 0): i64, v1: i32): ;; Compute the address: base + offset. Guard region (2GiB) is ;; sufficient for an 8-byte I64 load. - v2 ! mem(mt1, 0) = load.i64 checked v0+8 ;; base pointer, but the wrong one - v3 ! max(64, 0xffff_ffff) = uextend.i64 v1 ;; offset - v4 ! mem(mt1, 0xffff_ffff) = iadd.i64 v2, v3 + v2 ! mem(mt1, 0, 0) = load.i64 checked v0+8 ;; base pointer, but the wrong one + v3 ! range(64, 0, 0xffff_ffff) = uextend.i64 v1 ;; offset + v4 ! mem(mt1, 0, 0xffff_ffff) = iadd.i64 v2, v3 v5 = load.i64 checked v4 return v5 } diff --git a/cranelift/filetests/filetests/pcc/succeed/add.clif b/cranelift/filetests/filetests/pcc/succeed/add.clif index 26e72c7c18ae..6cfcb486f618 100644 --- a/cranelift/filetests/filetests/pcc/succeed/add.clif +++ b/cranelift/filetests/filetests/pcc/succeed/add.clif @@ -3,58 +3,58 @@ set enable_pcc=true target aarch64 function %f0(i32, i32) -> i32 { -block0(v0 ! max(32, 0x100): i32, v1 ! max(32, 0x80): i32): - v2 ! max(32, 0x180) = iadd.i32 v0, v1 +block0(v0 ! range(32, 0, 0x100): i32, v1 ! range(32, 0, 0x80): i32): + v2 ! range(32, 0, 0x180) = iadd.i32 v0, v1 return v2 } function %f1(i32) -> i32 { -block0(v0 ! max(32, 0x100): i32): - v1 ! max(32, 1) = iconst.i32 1 - v2 ! max(32, 0x101) = iadd.i32 v0, v1 +block0(v0 ! range(32, 0, 0x100): i32): + v1 ! range(32, 0, 1) = iconst.i32 1 + v2 ! range(32, 0, 0x101) = iadd.i32 v0, v1 return v2 } ;; a looser but still accurate bound should check too: function %f2(i32) -> i32 { -block0(v0 ! max(32, 0x100): i32): - v1 ! max(32, 1) = iconst.i32 1 - v2 ! max(32, 0x102) = iadd.i32 v0, v1 +block0(v0 ! range(32, 0, 0x100): i32): + v1 ! range(32, 0, 1) = iconst.i32 1 + v2 ! range(32, 0, 0x102) = iadd.i32 v0, v1 return v2 } ;; we should be able to verify a range based on the type alone: function %f3(i32) -> i64 { block0(v0: i32): - v1 ! max(32, 1) = iconst.i32 1 - v2 ! max(32, 0xffff_ffff) = iadd.i32 v0, v1 - v3 ! max(64, 0xffff_ffff) = uextend.i64 v2 + v1 ! range(32, 0, 1) = iconst.i32 1 + v2 ! range(32, 0, 0xffff_ffff) = iadd.i32 v0, v1 + v3 ! range(64, 0, 0xffff_ffff) = uextend.i64 v2 return v3 } ;; we should be able to verify a range based on the type alone: function %f3(i32) -> i64 { block0(v0: i32): - v1 ! max(32, 1) = iconst.i32 1 - v2 ! max(32, 0xffff_ffff) = iadd.i32 v0, v1 - v3 ! max(64, 0xffff_ffff) = uextend.i64 v2 - v4 ! max(64, 0x1) = iconst.i64 1 - v5 ! max(64, 0x1_0000_0000) = iadd.i64 v3, v4 + v1 ! range(32, 0, 1) = iconst.i32 1 + v2 ! range(32, 0, 0xffff_ffff) = iadd.i32 v0, v1 + v3 ! range(64, 0, 0xffff_ffff) = uextend.i64 v2 + v4 ! range(64, 0, 0x1) = iconst.i64 1 + v5 ! range(64, 0, 0x1_0000_0000) = iadd.i64 v3, v4 return v5 } ;; check merged ops: function %f4(i32, i32) -> i32 { -block0(v0 ! max(32, 0x100): i32, v1 ! max(32, 0x200): i32): +block0(v0 ! range(32, 0, 0x100): i32, v1 ! range(32, 0, 0x200): i32): v2 = iconst.i32 2 - v3 ! max(32, 0x400) = ishl.i32 v0, v2 - v4 ! max(32, 0x600) = iadd.i32 v1, v3 + v3 ! range(32, 0, 0x400) = ishl.i32 v0, v2 + v4 ! range(32, 0, 0x600) = iadd.i32 v1, v3 return v4 } function %f5(i32, i64) -> i64 { -block0(v0 ! max(32, 0x100): i32, v1 ! max(64, 0x200): i64): - v2 ! max(64, 0x100) = uextend.i64 v0 - v3 ! max(64, 0x300) = iadd.i64 v1, v2 +block0(v0 ! range(32, 0, 0x100): i32, v1 ! range(64, 0, 0x200): i64): + v2 ! range(64, 0, 0x100) = uextend.i64 v0 + v3 ! range(64, 0, 0x300) = iadd.i64 v1, v2 return v3 } diff --git a/cranelift/filetests/filetests/pcc/succeed/blockparams.clif b/cranelift/filetests/filetests/pcc/succeed/blockparams.clif index 092c1838c584..914edf12a17f 100644 --- a/cranelift/filetests/filetests/pcc/succeed/blockparams.clif +++ b/cranelift/filetests/filetests/pcc/succeed/blockparams.clif @@ -3,17 +3,17 @@ set enable_pcc=true target aarch64 function %f0(i64, i32) -> i64 { -block0(v0 ! max(64, 0x100): i64, v1: i32): - v2 ! max(64, 0x100) = iconst.i64 0x100 - v3 ! max(64, 0x200) = iadd v0, v2 +block0(v0 ! range(64, 0, 0x100): i64, v1: i32): + v2 ! range(64, 0, 0x100) = iconst.i64 0x100 + v3 ! range(64, 0, 0x200) = iadd v0, v2 brif v1, block1(v0), block2(v3) -block1(v4 ! max(64, 0x1000): i64): ;; broaden the range -- always allowed +block1(v4 ! range(64, 0, 0x1000): i64): ;; broaden the range -- always allowed jump block3(v4) -block2(v5 ! max(64, 0x2000): i64): +block2(v5 ! range(64, 0, 0x2000): i64): jump block3(v5) -block3(v6 ! max(64, 0x2001): i64): +block3(v6 ! range(64, 0, 0x2001): i64): return v6 } diff --git a/cranelift/filetests/filetests/pcc/succeed/extend.clif b/cranelift/filetests/filetests/pcc/succeed/extend.clif index 6f89f930928c..690e3d0acd90 100644 --- a/cranelift/filetests/filetests/pcc/succeed/extend.clif +++ b/cranelift/filetests/filetests/pcc/succeed/extend.clif @@ -3,7 +3,8 @@ set enable_pcc=true target aarch64 function %f0(i32) -> i64 { -block0(v0 ! max(32, 0xffff_ffff): i32): - v1 ! max(64, 0xffff_ffff) = uextend.i64 v0 +block0(v0 ! range(32, 42, 0xffff_fffe): i32): + ;; we're allowed to broaden the range on either end: + v1 ! range(64, 1, 0xffff_ffff) = uextend.i64 v0 return v1 } diff --git a/cranelift/filetests/filetests/pcc/succeed/load.clif b/cranelift/filetests/filetests/pcc/succeed/load.clif index 6766c7a28b8c..276eab361636 100644 --- a/cranelift/filetests/filetests/pcc/succeed/load.clif +++ b/cranelift/filetests/filetests/pcc/succeed/load.clif @@ -4,9 +4,9 @@ target aarch64 function %f0(i64, i32) -> i64 { mt0 = memory 0x1_0000_0000 -block0(v0 ! mem(mt0, 0): i64, v1 ! max(32, 0x100): i32): - v2 ! max(64, 0x100) = uextend.i64 v1 - v3 ! mem(mt0, 8) = iadd.i64 v0, v2 +block0(v0 ! mem(mt0, 0, 0): i64, v1 ! range(32, 0, 0x100): i32): + v2 ! range(64, 0, 0x100) = uextend.i64 v1 + v3 ! mem(mt0, 0, 8) = iadd.i64 v0, v2 v4 = load.i64 checked v3 return v4 } @@ -14,9 +14,9 @@ block0(v0 ! mem(mt0, 0): i64, v1 ! max(32, 0x100): i32): function %f1(i64, i32) -> i64 { ;; Note the guard region of 8 bytes -- just enough for the below! mt0 = memory 0x1_0000_0008 -block0(v0 ! mem(mt0, 0): i64, v1 ! max(32, 0xffff_ffff): i32): - v2 ! max(64, 0xffff_ffff) = uextend.i64 v1 - v3 ! mem(mt0, 0xffff_ffff) = iadd.i64 v0, v2 +block0(v0 ! mem(mt0, 0, 0): i64, v1 ! range(32, 0, 0xffff_ffff): i32): + v2 ! range(64, 0, 0xffff_ffff) = uextend.i64 v1 + v3 ! mem(mt0, 0, 0xffff_ffff) = iadd.i64 v0, v2 v4 = load.i64 checked v3 return v4 } @@ -24,9 +24,9 @@ block0(v0 ! mem(mt0, 0): i64, v1 ! max(32, 0xffff_ffff): i32): ;; RegRegExtend mode on aarch64. function %f2(i64, i32) -> i8 { mt0 = memory 0x1000 -block0(v0 ! mem(mt0, 0): i64, v1 ! max(32, 0xfff): i32): - v2 ! max(64, 0x100) = uextend.i64 v1 - v3 ! mem(mt0, 0x100) = iadd.i64 v0, v2 +block0(v0 ! mem(mt0, 0, 0): i64, v1 ! range(32, 0, 0xfff): i32): + v2 ! range(64, 0, 0x100) = uextend.i64 v1 + v3 ! mem(mt0, 0, 0x100) = iadd.i64 v0, v2 v4 = load.i8 checked v3 return v4 } @@ -34,8 +34,8 @@ block0(v0 ! mem(mt0, 0): i64, v1 ! max(32, 0xfff): i32): ;; RegReg mode on aarch64. function %f3(i64, i64) -> i8 { mt0 = memory 0x1000 -block0(v0 ! mem(mt0, 0): i64, v1 ! max(64, 0xfff): i64): - v2 ! mem(mt0, 0xfff) = iadd.i64 v0, v1 +block0(v0 ! mem(mt0, 0, 0): i64, v1 ! range(64, 0, 0xfff): i64): + v2 ! mem(mt0, 0, 0xfff) = iadd.i64 v0, v1 v3 = load.i8 checked v2 return v3 } @@ -43,11 +43,11 @@ block0(v0 ! mem(mt0, 0): i64, v1 ! max(64, 0xfff): i64): ;; RegScaledExtended mode on aarch64. function %f4(i64, i32) -> i64 { mt0 = memory 0x8000 -block0(v0 ! mem(mt0, 0): i64, v1 ! max(32, 0xfff): i32): - v2 ! max(64, 0xfff) = uextend.i64 v1 +block0(v0 ! mem(mt0, 0, 0): i64, v1 ! range(32, 0, 0xfff): i32): + v2 ! range(64, 0, 0xfff) = uextend.i64 v1 v3 = iconst.i32 3 - v4 ! max(64, 0x7ff8) = ishl.i64 v2, v3 - v5 ! mem(mt0, 0x7ff8) = iadd.i64 v0, v4 + v4 ! range(64, 0, 0x7ff8) = ishl.i64 v2, v3 + v5 ! mem(mt0, 0, 0x7ff8) = iadd.i64 v0, v4 v6 = load.i64 checked v5 return v6 } @@ -55,10 +55,10 @@ block0(v0 ! mem(mt0, 0): i64, v1 ! max(32, 0xfff): i32): ;; RegScaled mode on aarch64. function %f5(i64, i64) -> i64 { mt0 = memory 0x8000 -block0(v0 ! mem(mt0, 0): i64, v1 ! max(64, 0xfff): i64): +block0(v0 ! mem(mt0, 0, 0): i64, v1 ! range(64, 0, 0xfff): i64): v2 = iconst.i32 3 - v3 ! max(64, 0x7ff8) = ishl.i64 v1, v2 - v4 ! mem(mt0, 0x7ff8) = iadd.i64 v0, v3 + v3 ! range(64, 0, 0x7ff8) = ishl.i64 v1, v2 + v4 ! mem(mt0, 0, 0x7ff8) = iadd.i64 v0, v3 v5 = load.i64 checked v4 return v5 } diff --git a/cranelift/filetests/filetests/pcc/succeed/memtypes.clif b/cranelift/filetests/filetests/pcc/succeed/memtypes.clif index 9849ea00eb7c..ea35dcdbf462 100644 --- a/cranelift/filetests/filetests/pcc/succeed/memtypes.clif +++ b/cranelift/filetests/filetests/pcc/succeed/memtypes.clif @@ -5,7 +5,7 @@ target aarch64 function %f0(i64) -> i32 { mt0 = struct 8 { 0: i32, 4: i32 readonly } -block0(v0 ! mem(mt0, 0): i64): ;; v0 points to an instance of mt0, at offset 0 +block0(v0 ! mem(mt0, 0, 0): i64): ;; v0 points to an instance of mt0, at offset 0 v1 = load.i32 v0+0 v2 = load.i32 v0+4 v3 = iadd.i32 v1, v2 @@ -13,11 +13,11 @@ block0(v0 ! mem(mt0, 0): i64): ;; v0 points to an instance of mt0, at offset 0 } function %f1(i64) -> i32 { - mt0 = struct 8 { 0: i64 readonly ! mem(mt1, 0) } + mt0 = struct 8 { 0: i64 readonly ! mem(mt1, 0, 0) } mt1 = memory 0x1_0000_0000 -block0(v0 ! mem(mt0, 0): i64): - v1 ! mem(mt1, 0) = load.i64 v0 +block0(v0 ! mem(mt0, 0, 0): i64): + v1 ! mem(mt1, 0, 0) = load.i64 v0 v2 = load.i32 v1+0x1000 return v2 } diff --git a/cranelift/filetests/filetests/pcc/succeed/shift.clif b/cranelift/filetests/filetests/pcc/succeed/shift.clif index d4ebd25fcc92..685d5749774a 100644 --- a/cranelift/filetests/filetests/pcc/succeed/shift.clif +++ b/cranelift/filetests/filetests/pcc/succeed/shift.clif @@ -3,15 +3,15 @@ set enable_pcc=true target aarch64 function %f0(i32) -> i32 { -block0(v0 ! max(32, 0x100): i32): +block0(v0 ! range(32, 1, 0x100): i32): v1 = iconst.i32 2 - v2 ! max(32, 0x400) = ishl.i32 v0, v1 + v2 ! range(32, 4, 0x400) = ishl.i32 v0, v1 return v2 } function %f0(i32) -> i32 { block0(v0: i32): v1 = iconst.i32 2 - v2 ! max(32, 0xffff_ffff) = ishl.i32 v0, v1 + v2 ! range(32, 0, 0xffff_ffff) = ishl.i32 v0, v1 return v2 } diff --git a/cranelift/filetests/filetests/pcc/succeed/simple.clif b/cranelift/filetests/filetests/pcc/succeed/simple.clif index 241124e678a5..2baa3f9915e0 100644 --- a/cranelift/filetests/filetests/pcc/succeed/simple.clif +++ b/cranelift/filetests/filetests/pcc/succeed/simple.clif @@ -4,9 +4,9 @@ target aarch64 function %simple1(i64 vmctx, i32) -> i8 { mt0 = memory 0x1_0000_0000 -block0(v0 ! mem(mt0, 0): i64, v1 ! max(32, 0xffff_ffff): i32): - v2 ! max(64, 0xffff_ffff) = uextend.i64 v1 - v3 ! mem(mt0, 0xffff_ffff) = iadd.i64 v0, v2 +block0(v0 ! mem(mt0, 0, 0): i64, v1 ! range(32, 0, 0xffff_ffff): i32): + v2 ! range(64, 0, 0xffff_ffff) = uextend.i64 v1 + v3 ! mem(mt0, 0, 0xffff_ffff) = iadd.i64 v0, v2 v4 = load.i8 checked v3 return v4 } diff --git a/cranelift/filetests/filetests/pcc/succeed/struct.clif b/cranelift/filetests/filetests/pcc/succeed/struct.clif index 6555b22ad3c1..1d7c5688249a 100644 --- a/cranelift/filetests/filetests/pcc/succeed/struct.clif +++ b/cranelift/filetests/filetests/pcc/succeed/struct.clif @@ -3,17 +3,17 @@ set enable_pcc=true target aarch64 function %f0(i64) -> i64 { - mt0 = struct 8 { 0: i64 ! mem(mt1, 0) } + mt0 = struct 8 { 0: i64 ! mem(mt1, 0, 0) } mt1 = memory 0x1_0000_0000 -block0(v0 ! mem(mt0, 0): i64): - v1 ! mem(mt1, 0) = load.i64 checked v0 +block0(v0 ! mem(mt0, 0, 0): i64): + v1 ! mem(mt1, 0, 0) = load.i64 checked v0 return v1 } function %f1(i64, i64) { - mt0 = struct 8 { 0: i64 ! mem(mt1, 0) } + mt0 = struct 8 { 0: i64 ! mem(mt1, 0, 0) } mt1 = memory 0x1_0000_0000 -block0(v0 ! mem(mt0, 0): i64, v1 ! mem(mt1, 0): i64): +block0(v0 ! mem(mt0, 0, 0): i64, v1 ! mem(mt1, 0, 0): i64): store.i64 checked v1, v0 return } diff --git a/cranelift/filetests/filetests/pcc/succeed/vmctx.clif b/cranelift/filetests/filetests/pcc/succeed/vmctx.clif index 9747182931ac..643984fa79fc 100644 --- a/cranelift/filetests/filetests/pcc/succeed/vmctx.clif +++ b/cranelift/filetests/filetests/pcc/succeed/vmctx.clif @@ -5,16 +5,16 @@ target aarch64 ;; Equivalent to a Wasm `i64.load` from a static memory. function %f0(i64, i32) -> i64 { ;; mock vmctx struct: - mt0 = struct 8 { 0: i64 readonly ! mem(mt1, 0) } + mt0 = struct 8 { 0: i64 readonly ! mem(mt1, 0, 0) } ;; mock static memory: 4GiB range, plus 2GiB guard mt1 = memory 0x1_8000_0000 -block0(v0 ! mem(mt0, 0): i64, v1: i32): +block0(v0 ! mem(mt0, 0, 0): i64, v1: i32): ;; Compute the address: base + offset. Guard region (2GiB) is ;; sufficient for an 8-byte I64 load. - v2 ! mem(mt1, 0) = load.i64 checked v0+0 ;; base pointer - v3 ! max(64, 0xffff_ffff) = uextend.i64 v1 ;; offset - v4 ! mem(mt1, 0xffff_ffff) = iadd.i64 v2, v3 + v2 ! mem(mt1, 0, 0) = load.i64 checked v0+0 ;; base pointer + v3 ! range(64, 0, 0xffff_ffff) = uextend.i64 v1 ;; offset + v4 ! mem(mt1, 0, 0xffff_ffff) = iadd.i64 v2, v3 v5 = load.i64 checked v4 return v5 } diff --git a/cranelift/reader/src/parser.rs b/cranelift/reader/src/parser.rs index eabb7dff4027..16baa3588bba 100644 --- a/cranelift/reader/src/parser.rs +++ b/cranelift/reader/src/parser.rs @@ -2147,39 +2147,50 @@ impl<'a> Parser<'a> { // Parse a "fact" for proof-carrying code, attached to a value. // - // fact ::= "max" "(" bit-width "," max-value ")" - // | "mem" "(" memory-type "," mt-offset ")" + // fact ::= "range" "(" bit-width "," min-value "," max-value ")" + // | "mem" "(" memory-type "," mt-offset "," mt-offset ")" // bit-width ::= uimm64 + // min-value ::= uimm64 // max-value ::= uimm64 // valid-range ::= uimm64 - // mt-offset ::= imm64 + // mt-offset ::= uimm64 fn parse_fact(&mut self) -> ParseResult { match self.token() { - Some(Token::Identifier("max")) => { + Some(Token::Identifier("range")) => { self.consume(); - self.match_token(Token::LPar, "`max` fact needs an opening `(`")?; + self.match_token(Token::LPar, "`range` fact needs an opening `(`")?; let bit_width: u64 = self - .match_uimm64("expected a bit-width value for `max` fact")? + .match_uimm64("expected a bit-width value for `range` fact")? + .into(); + self.match_token(Token::Comma, "expected a comma")?; + let min: u64 = self + .match_uimm64("expected a min value for `range` fact")? .into(); self.match_token(Token::Comma, "expected a comma")?; let max: u64 = self - .match_uimm64("expected a max value for `max` fact")? + .match_uimm64("expected a max value for `range` fact")? .into(); - self.match_token(Token::RPar, "`max` fact needs a closing `)`")?; + self.match_token(Token::RPar, "`range` fact needs a closing `)`")?; let bit_width_max = match bit_width { x if x > 64 => { - return Err(self.error("bitwidth must be <= 64 bits on a `max` fact")); + return Err(self.error("bitwidth must be <= 64 bits on a `range` fact")); } 64 => u64::MAX, x => (1u64 << x) - 1, }; + if min > max { + return Err(self.error( + "min value must be less than or equal to max value on a `range` fact", + )); + } if max > bit_width_max { return Err( - self.error("max value is out of range for bitwidth on a `max` fact") + self.error("max value is out of range for bitwidth on a `range` fact") ); } - Ok(Fact::ValueMax { + Ok(Fact::Range { bit_width: u16::try_from(bit_width).unwrap(), + min: min.into(), max: max.into(), }) } @@ -2191,13 +2202,24 @@ impl<'a> Parser<'a> { Token::Comma, "expected a comma after memory type in `mem` fact", )?; - let offset: i64 = self - .match_imm64("expected an imm64 pointer offset for `mem` fact")? + let min_offset: u64 = self + .match_uimm64("expected a uimm64 minimum pointer offset for `mem` fact")? + .into(); + self.match_token( + Token::Comma, + "expected a comma after memory type in `mem` fact", + )?; + let max_offset: u64 = self + .match_uimm64("expected a uimm64 maximum pointer offset for `mem` fact")? .into(); self.match_token(Token::RPar, "expected a `)`")?; - Ok(Fact::Mem { ty, offset }) + Ok(Fact::Mem { + ty, + min_offset, + max_offset, + }) } - _ => Err(self.error("expected a `max` or `mem` fact")), + _ => Err(self.error("expected a `range` or `mem` fact")), } }