diff --git a/compiler/rustc_target/src/callconv/mips64.rs b/compiler/rustc_target/src/callconv/mips64.rs index 8386a15933c98..a663f7a472213 100644 --- a/compiler/rustc_target/src/callconv/mips64.rs +++ b/compiler/rustc_target/src/callconv/mips64.rs @@ -34,7 +34,7 @@ where } } -fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>) +fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, offset: &mut Size) where Ty: TyAbiInterface<'a, C> + Copy, C: HasDataLayout, @@ -70,73 +70,79 @@ where ret.cast_to(Uniform::new(Reg::i64(), size)); } else { ret.make_indirect(); + *offset += cx.data_layout().pointer_size(); } } -fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) +fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, offset: &mut Size) where Ty: TyAbiInterface<'a, C> + Copy, C: HasDataLayout, { - if !arg.layout.is_aggregate() { - extend_integer_width_mips(arg, 64); - return; - } - let dl = cx.data_layout(); let size = arg.layout.size; let mut prefix = [None; 8]; let mut prefix_index = 0; - match arg.layout.fields { - FieldsShape::Primitive => unreachable!(), - FieldsShape::Array { .. } => { - // Arrays are passed indirectly - arg.make_indirect(); - return; - } - FieldsShape::Union(_) => { - // Unions and are always treated as a series of 64-bit integer chunks - } - FieldsShape::Arbitrary { .. } => { - // Structures are split up into a series of 64-bit integer chunks, but any aligned - // doubles not part of another aggregate are passed as floats. - let mut last_offset = Size::ZERO; - - for i in 0..arg.layout.fields.count() { - let field = arg.layout.field(cx, i); - let offset = arg.layout.fields.offset(i); - - // We only care about aligned doubles - if let BackendRepr::Scalar(scalar) = field.backend_repr { - if scalar.primitive() == Primitive::Float(Float::F64) { - if offset.is_aligned(dl.f64_align) { - // Insert enough integers to cover [last_offset, offset) - assert!(last_offset.is_aligned(dl.f64_align)); - for _ in 0..((offset - last_offset).bits() / 64) - .min((prefix.len() - prefix_index) as u64) - { - prefix[prefix_index] = Some(Reg::i64()); - prefix_index += 1; - } + // Detect need for padding + let align = arg.layout.align.abi.max(dl.i64_align).min(dl.i128_align); + let pad_i32 = !offset.is_aligned(align); - if prefix_index == prefix.len() { - break; + if !arg.layout.is_aggregate() { + extend_integer_width_mips(arg, 64); + } else { + match arg.layout.fields { + FieldsShape::Primitive => unreachable!(), + FieldsShape::Array { .. } => { + // Arrays are passed indirectly + arg.make_indirect(); + } + FieldsShape::Union(_) => { + // Unions and are always treated as a series of 64-bit integer chunks + } + FieldsShape::Arbitrary { .. } => { + // Structures are split up into a series of 64-bit integer chunks, but any aligned + // doubles not part of another aggregate are passed as floats. + let mut last_offset = Size::ZERO; + + for i in 0..arg.layout.fields.count() { + let field = arg.layout.field(cx, i); + let offset = arg.layout.fields.offset(i); + + // We only care about aligned doubles + if let BackendRepr::Scalar(scalar) = field.backend_repr { + if scalar.primitive() == Primitive::Float(Float::F64) { + if offset.is_aligned(dl.f64_align) { + // Insert enough integers to cover [last_offset, offset) + assert!(last_offset.is_aligned(dl.f64_align)); + for _ in 0..((offset - last_offset).bits() / 64) + .min((prefix.len() - prefix_index) as u64) + { + prefix[prefix_index] = Some(Reg::i64()); + prefix_index += 1; + } + + if prefix_index == prefix.len() { + break; + } + + prefix[prefix_index] = Some(Reg::f64()); + prefix_index += 1; + last_offset = offset + Reg::f64().size; } - - prefix[prefix_index] = Some(Reg::f64()); - prefix_index += 1; - last_offset = offset + Reg::f64().size; } } } } - } - }; - - // Extract first 8 chunks as the prefix - let rest_size = size - Size::from_bytes(8) * prefix_index as u64; - arg.cast_to(CastTarget::prefixed(prefix, Uniform::new(Reg::i64(), rest_size))); + }; + // Extract first 8 chunks as the prefix + let rest_size = size - Size::from_bytes(8) * prefix_index as u64; + arg.cast_to_and_pad_i32( + CastTarget::prefixed(prefix, Uniform::new(Reg::i64(), rest_size)), + pad_i32, + ); + } + *offset = offset.align_to(align) + size.align_to(align); } pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>) @@ -144,14 +150,18 @@ where Ty: TyAbiInterface<'a, C> + Copy, C: HasDataLayout, { + // mips64 argument passing is also affected by the alignment of aggregates. + // see mips.rs for how the offset is used + let mut offset = Size::ZERO; + if !fn_abi.ret.is_ignore() { - classify_ret(cx, &mut fn_abi.ret); + classify_ret(cx, &mut fn_abi.ret, &mut offset); } for arg in fn_abi.args.iter_mut() { if arg.is_ignore() { continue; } - classify_arg(cx, arg); + classify_arg(cx, arg, &mut offset); } }