Skip to content

Commit

Permalink
Auto merge of rust-lang#126513 - matthiaskrgr:rollup-zy7sylj, r=matth…
Browse files Browse the repository at this point in the history
…iaskrgr

Rollup of 10 pull requests

Successful merges:

 - rust-lang#125829 (rustc_span: Add conveniences for working with span formats)
 - rust-lang#126279 (Migrate `inaccessible-temp-dir`, `output-with-hyphens` and `issue-10971-temps-dir` `run-make` tests to `rmake`)
 - rust-lang#126361 (Unify intrinsics body handling in StableMIR)
 - rust-lang#126417 (Add `f16` and `f128` inline ASM support for `x86` and `x86-64`)
 - rust-lang#126424 ( Also sort `crt-static` in `--print target-features` output)
 - rust-lang#126428 (Polish `std::path::absolute` documentation.)
 - rust-lang#126429 (Add `f16` and `f128` const eval for binary and unary operationations)
 - rust-lang#126448 (End support for Python 3.8 in tidy)
 - rust-lang#126488 (Use `std::path::absolute` in bootstrap)
 - rust-lang#126511 (.mailmap: Associate both my work and my private email with me)

r? `@ghost`
`@rustbot` modify labels: rollup
  • Loading branch information
bors committed Jun 15, 2024
2 parents 1d1356d + 1d270e6 commit 5758e23
Show file tree
Hide file tree
Showing 38 changed files with 819 additions and 565 deletions.
1 change: 1 addition & 0 deletions .mailmap
Original file line number Diff line number Diff line change
Expand Up @@ -379,6 +379,7 @@ Markus Westerlind <marwes91@gmail.com> Markus <marwes91@gmail.com>
Martin Carton <cartonmartin+git@gmail.com>
Martin Habovštiak <martin.habovstiak@gmail.com>
Martin Hafskjold Thoresen <martinhath@gmail.com>
Martin Nordholts <martin.nordholts@codetale.se> <enselic@gmail.com>
Matej Lach <matej.lach@gmail.com> Matej Ľach <matej.lach@gmail.com>
Mateusz Mikuła <mati865@gmail.com>
Mateusz Mikuła <mati865@gmail.com> <mati865@users.noreply.github.com>
Expand Down
100 changes: 100 additions & 0 deletions compiler/rustc_codegen_llvm/src/asm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -959,6 +959,43 @@ fn llvm_fixup_input<'ll, 'tcx>(
InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
Abi::Vector { .. },
) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
(
InlineAsmRegClass::X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Scalar(s),
) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
&& s.primitive() == Primitive::Float(Float::F128) =>
{
bx.bitcast(value, bx.type_vector(bx.type_i32(), 4))
}
(
InlineAsmRegClass::X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Scalar(s),
) if s.primitive() == Primitive::Float(Float::F16) => {
let value = bx.insert_element(
bx.const_undef(bx.type_vector(bx.type_f16(), 8)),
value,
bx.const_usize(0),
);
bx.bitcast(value, bx.type_vector(bx.type_i16(), 8))
}
(
InlineAsmRegClass::X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Vector { element, count: count @ (8 | 16) },
) if element.primitive() == Primitive::Float(Float::F16) => {
bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
}
(
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
Abi::Scalar(s),
Expand Down Expand Up @@ -1036,6 +1073,39 @@ fn llvm_fixup_output<'ll, 'tcx>(
InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
Abi::Vector { .. },
) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
(
InlineAsmRegClass::X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Scalar(s),
) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
&& s.primitive() == Primitive::Float(Float::F128) =>
{
bx.bitcast(value, bx.type_f128())
}
(
InlineAsmRegClass::X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Scalar(s),
) if s.primitive() == Primitive::Float(Float::F16) => {
let value = bx.bitcast(value, bx.type_vector(bx.type_f16(), 8));
bx.extract_element(value, bx.const_usize(0))
}
(
InlineAsmRegClass::X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Vector { element, count: count @ (8 | 16) },
) if element.primitive() == Primitive::Float(Float::F16) => {
bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
}
(
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
Abi::Scalar(s),
Expand Down Expand Up @@ -1109,6 +1179,36 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
Abi::Vector { .. },
) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
(
InlineAsmRegClass::X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Scalar(s),
) if cx.sess().asm_arch == Some(InlineAsmArch::X86)
&& s.primitive() == Primitive::Float(Float::F128) =>
{
cx.type_vector(cx.type_i32(), 4)
}
(
InlineAsmRegClass::X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Scalar(s),
) if s.primitive() == Primitive::Float(Float::F16) => cx.type_vector(cx.type_i16(), 8),
(
InlineAsmRegClass::X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Vector { element, count: count @ (8 | 16) },
) if element.primitive() == Primitive::Float(Float::F16) => {
cx.type_vector(cx.type_i16(), count)
}
(
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
Abi::Scalar(s),
Expand Down
5 changes: 5 additions & 0 deletions compiler/rustc_codegen_llvm/src/llvm_util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -394,10 +394,15 @@ fn print_target_features(out: &mut dyn PrintBackendInfo, sess: &Session, tm: &ll
(*feature, desc)
})
.collect::<Vec<_>>();

// Since we add this at the end ...
rustc_target_features.extend_from_slice(&[(
"crt-static",
"Enables C Run-time Libraries to be statically linked",
)]);
// ... we need to sort the list again.
rustc_target_features.sort();

llvm_target_features.retain(|(f, _d)| !known_llvm_target_features.contains(f));

let max_feature_len = llvm_target_features
Expand Down
21 changes: 15 additions & 6 deletions compiler/rustc_const_eval/src/interpret/operator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -362,14 +362,18 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let left = left.to_scalar();
let right = right.to_scalar();
Ok(match fty {
FloatTy::F16 => unimplemented!("f16_f128"),
FloatTy::F16 => {
self.binary_float_op(bin_op, layout, left.to_f16()?, right.to_f16()?)
}
FloatTy::F32 => {
self.binary_float_op(bin_op, layout, left.to_f32()?, right.to_f32()?)
}
FloatTy::F64 => {
self.binary_float_op(bin_op, layout, left.to_f64()?, right.to_f64()?)
}
FloatTy::F128 => unimplemented!("f16_f128"),
FloatTy::F128 => {
self.binary_float_op(bin_op, layout, left.to_f128()?, right.to_f128()?)
}
})
}
_ if left.layout.ty.is_integral() => {
Expand Down Expand Up @@ -429,11 +433,16 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
}
ty::Float(fty) => {
let val = val.to_scalar();
if un_op != Neg {
span_bug!(self.cur_span(), "Invalid float op {:?}", un_op);
}

// No NaN adjustment here, `-` is a bitwise operation!
let res = match (un_op, fty) {
(Neg, FloatTy::F32) => Scalar::from_f32(-val.to_f32()?),
(Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
_ => span_bug!(self.cur_span(), "Invalid float op {:?}", un_op),
let res = match fty {
FloatTy::F16 => Scalar::from_f16(-val.to_f16()?),
FloatTy::F32 => Scalar::from_f32(-val.to_f32()?),
FloatTy::F64 => Scalar::from_f64(-val.to_f64()?),
FloatTy::F128 => Scalar::from_f128(-val.to_f128()?),
};
Ok(ImmTy::from_scalar(res, layout))
}
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_expand/src/mbe/transcribe.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ impl MutVisitor for Marker {
// it's some advanced case with macro-generated macros. So if we cache the marked version
// of that context once, we'll typically have a 100% cache hit rate after that.
let Marker(expn_id, transparency, ref mut cache) = *self;
span.update_ctxt(|ctxt| {
*span = span.map_ctxt(|ctxt| {
*cache
.entry(ctxt)
.or_insert_with(|| ctxt.apply_mark(expn_id.to_expn_id(), transparency))
Expand Down
4 changes: 4 additions & 0 deletions compiler/rustc_hir_analysis/src/check/intrinsicck.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,10 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> {
ty::Int(IntTy::I64) | ty::Uint(UintTy::U64) => Some(InlineAsmType::I64),
ty::Int(IntTy::I128) | ty::Uint(UintTy::U128) => Some(InlineAsmType::I128),
ty::Int(IntTy::Isize) | ty::Uint(UintTy::Usize) => Some(asm_ty_isize),
ty::Float(FloatTy::F16) => Some(InlineAsmType::F16),
ty::Float(FloatTy::F32) => Some(InlineAsmType::F32),
ty::Float(FloatTy::F64) => Some(InlineAsmType::F64),
ty::Float(FloatTy::F128) => Some(InlineAsmType::F128),
ty::FnPtr(_) => Some(asm_ty_isize),
ty::RawPtr(ty, _) if self.is_thin_ptr_ty(ty) => Some(asm_ty_isize),
ty::Adt(adt, args) if adt.repr().simd() => {
Expand Down Expand Up @@ -105,8 +107,10 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> {
width => bug!("unsupported pointer width: {width}"),
})
}
ty::Float(FloatTy::F16) => Some(InlineAsmType::VecF16(size)),
ty::Float(FloatTy::F32) => Some(InlineAsmType::VecF32(size)),
ty::Float(FloatTy::F64) => Some(InlineAsmType::VecF64(size)),
ty::Float(FloatTy::F128) => Some(InlineAsmType::VecF128(size)),
_ => None,
}
}
Expand Down
14 changes: 14 additions & 0 deletions compiler/rustc_middle/src/mir/interpret/value.rs
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,13 @@ impl<Prov: Provenance> fmt::LowerHex for Scalar<Prov> {
}
}

impl<Prov> From<Half> for Scalar<Prov> {
#[inline(always)]
fn from(f: Half) -> Self {
Scalar::from_f16(f)
}
}

impl<Prov> From<Single> for Scalar<Prov> {
#[inline(always)]
fn from(f: Single) -> Self {
Expand All @@ -83,6 +90,13 @@ impl<Prov> From<Double> for Scalar<Prov> {
}
}

impl<Prov> From<Quad> for Scalar<Prov> {
#[inline(always)]
fn from(f: Quad) -> Self {
Scalar::from_f128(f)
}
}

impl<Prov> From<ScalarInt> for Scalar<Prov> {
#[inline(always)]
fn from(ptr: ScalarInt) -> Self {
Expand Down
16 changes: 5 additions & 11 deletions compiler/rustc_smir/src/rustc_smir/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,10 @@ impl<'tcx> Context for TablesWrapper<'tcx> {
}

fn has_body(&self, def: DefId) -> bool {
let tables = self.0.borrow();
let def_id = tables[def];
tables.tcx.is_mir_available(def_id)
let mut tables = self.0.borrow_mut();
let tcx = tables.tcx;
let def_id = def.internal(&mut *tables, tcx);
tables.item_has_body(def_id)
}

fn foreign_modules(&self, crate_num: CrateNum) -> Vec<stable_mir::ty::ForeignModuleDef> {
Expand Down Expand Up @@ -323,13 +324,6 @@ impl<'tcx> Context for TablesWrapper<'tcx> {
tcx.intrinsic(def_id).unwrap().name.to_string()
}

fn intrinsic_must_be_overridden(&self, def: IntrinsicDef) -> bool {
let mut tables = self.0.borrow_mut();
let tcx = tables.tcx;
let def_id = def.0.internal(&mut *tables, tcx);
tcx.intrinsic_raw(def_id).unwrap().must_be_overridden
}

fn closure_sig(&self, args: &GenericArgs) -> PolyFnSig {
let mut tables = self.0.borrow_mut();
let tcx = tables.tcx;
Expand Down Expand Up @@ -516,7 +510,7 @@ impl<'tcx> Context for TablesWrapper<'tcx> {
let mut tables = self.0.borrow_mut();
let instance = tables.instances[def];
tables
.has_body(instance)
.instance_has_body(instance)
.then(|| BodyBuilder::new(tables.tcx, instance).build(&mut *tables))
}

Expand Down
21 changes: 19 additions & 2 deletions compiler/rustc_smir/src/rustc_smir/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,16 +51,33 @@ impl<'tcx> Tables<'tcx> {
self.mir_consts.create_or_fetch(constant)
}

pub(crate) fn has_body(&self, instance: Instance<'tcx>) -> bool {
/// Return whether the instance as a body available.
///
/// Items and intrinsics may have a body available from its definition.
/// Shims body may be generated depending on their type.
pub(crate) fn instance_has_body(&self, instance: Instance<'tcx>) -> bool {
let def_id = instance.def_id();
self.tcx.is_mir_available(def_id)
self.item_has_body(def_id)
|| !matches!(
instance.def,
ty::InstanceDef::Virtual(..)
| ty::InstanceDef::Intrinsic(..)
| ty::InstanceDef::Item(..)
)
}

/// Return whether the item has a body defined by the user.
///
/// Note that intrinsics may have a placeholder body that shouldn't be used in practice.
/// In StableMIR, we handle this case as if the body is not available.
pub(crate) fn item_has_body(&self, def_id: DefId) -> bool {
let must_override = if let Some(intrinsic) = self.tcx.intrinsic(def_id) {
intrinsic.must_be_overridden
} else {
false
};
!must_override && self.tcx.is_mir_available(def_id)
}
}

/// Build a stable mir crate from a given crate number.
Expand Down
21 changes: 9 additions & 12 deletions compiler/rustc_span/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -520,6 +520,7 @@ impl SpanData {
pub fn with_hi(&self, hi: BytePos) -> Span {
Span::new(self.lo, hi, self.ctxt, self.parent)
}
/// Avoid if possible, `Span::map_ctxt` should be preferred.
#[inline]
fn with_ctxt(&self, ctxt: SyntaxContext) -> Span {
Span::new(self.lo, self.hi, ctxt, self.parent)
Expand Down Expand Up @@ -576,9 +577,8 @@ impl Span {
self.data().with_hi(hi)
}
#[inline]
pub fn with_ctxt(mut self, ctxt: SyntaxContext) -> Span {
self.update_ctxt(|_| ctxt);
self
pub fn with_ctxt(self, ctxt: SyntaxContext) -> Span {
self.map_ctxt(|_| ctxt)
}
#[inline]
pub fn parent(self) -> Option<LocalDefId> {
Expand Down Expand Up @@ -1059,9 +1059,8 @@ impl Span {
}

#[inline]
pub fn apply_mark(mut self, expn_id: ExpnId, transparency: Transparency) -> Span {
self.update_ctxt(|ctxt| ctxt.apply_mark(expn_id, transparency));
self
pub fn apply_mark(self, expn_id: ExpnId, transparency: Transparency) -> Span {
self.map_ctxt(|ctxt| ctxt.apply_mark(expn_id, transparency))
}

#[inline]
Expand Down Expand Up @@ -1109,15 +1108,13 @@ impl Span {
}

#[inline]
pub fn normalize_to_macros_2_0(mut self) -> Span {
self.update_ctxt(|ctxt| ctxt.normalize_to_macros_2_0());
self
pub fn normalize_to_macros_2_0(self) -> Span {
self.map_ctxt(|ctxt| ctxt.normalize_to_macros_2_0())
}

#[inline]
pub fn normalize_to_macro_rules(mut self) -> Span {
self.update_ctxt(|ctxt| ctxt.normalize_to_macro_rules());
self
pub fn normalize_to_macro_rules(self) -> Span {
self.map_ctxt(|ctxt| ctxt.normalize_to_macro_rules())
}
}

Expand Down
Loading

0 comments on commit 5758e23

Please sign in to comment.