Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

miri: implement most floating point math operations using soft floats #3969

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 31 additions & 5 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ ctrlc = "3.2.5"
chrono = { version = "0.4.38", default-features = false }
chrono-tz = "0.10"
directories = "5"
fpmath = { version = "0.1.1", features = ["soft-float"] }

# Copied from `compiler/rustc/Cargo.toml`.
# But only for some targets, it fails for others. Rustc configures this in its CI, but we can't
Expand Down
62 changes: 24 additions & 38 deletions src/intrinsics/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -202,21 +202,18 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
=> {
let [f] = check_arg_count(args)?;
let f = this.read_scalar(f)?.to_f32()?;
// Using host floats (but it's fine, these operations do not have guaranteed precision).
let f_host = f.to_host();
let res = match intrinsic_name {
"sinf32" => f_host.sin(),
"cosf32" => f_host.cos(),
"sqrtf32" => f_host.sqrt(), // FIXME Using host floats, this should use full-precision soft-floats
"expf32" => f_host.exp(),
"exp2f32" => f_host.exp2(),
"logf32" => f_host.ln(),
"log10f32" => f_host.log10(),
"log2f32" => f_host.log2(),
let op = match intrinsic_name {
"sinf32" => math::UnaryOp::Sin,
"cosf32" => math::UnaryOp::Cos,
"sqrtf32" => math::UnaryOp::Sqrt,
"expf32" => math::UnaryOp::Exp,
"exp2f32" => math::UnaryOp::Exp2,
"logf32" => math::UnaryOp::Ln,
"log10f32" => math::UnaryOp::Log10,
"log2f32" => math::UnaryOp::Log2,
_ => bug!(),
};
let res = res.to_soft();
let res = this.adjust_nan(res, &[f]);
let res = math::unary_op(this, op, f);
this.write_scalar(res, dest)?;
}
#[rustfmt::skip]
Expand All @@ -231,21 +228,18 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
=> {
let [f] = check_arg_count(args)?;
let f = this.read_scalar(f)?.to_f64()?;
// Using host floats (but it's fine, these operations do not have guaranteed precision).
let f_host = f.to_host();
let res = match intrinsic_name {
"sinf64" => f_host.sin(),
"cosf64" => f_host.cos(),
"sqrtf64" => f_host.sqrt(), // FIXME Using host floats, this should use full-precision soft-floats
"expf64" => f_host.exp(),
"exp2f64" => f_host.exp2(),
"logf64" => f_host.ln(),
"log10f64" => f_host.log10(),
"log2f64" => f_host.log2(),
let op = match intrinsic_name {
"sinf64" => math::UnaryOp::Sin,
"cosf64" => math::UnaryOp::Cos,
"sqrtf64" => math::UnaryOp::Sqrt,
"expf64" => math::UnaryOp::Exp,
"exp2f64" => math::UnaryOp::Exp2,
"logf64" => math::UnaryOp::Ln,
"log10f64" => math::UnaryOp::Log10,
"log2f64" => math::UnaryOp::Log2,
_ => bug!(),
};
let res = res.to_soft();
let res = this.adjust_nan(res, &[f]);
let res = math::unary_op(this, op, f);
this.write_scalar(res, dest)?;
}

Expand Down Expand Up @@ -299,37 +293,29 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let [f1, f2] = check_arg_count(args)?;
let f1 = this.read_scalar(f1)?.to_f32()?;
let f2 = this.read_scalar(f2)?.to_f32()?;
// Using host floats (but it's fine, this operation does not have guaranteed precision).
let res = f1.to_host().powf(f2.to_host()).to_soft();
let res = this.adjust_nan(res, &[f1, f2]);
let res = math::binary_op(this, math::BinaryOp::Powf, f1, f2);
this.write_scalar(res, dest)?;
}
"powf64" => {
let [f1, f2] = check_arg_count(args)?;
let f1 = this.read_scalar(f1)?.to_f64()?;
let f2 = this.read_scalar(f2)?.to_f64()?;
// Using host floats (but it's fine, this operation does not have guaranteed precision).
let res = f1.to_host().powf(f2.to_host()).to_soft();
let res = this.adjust_nan(res, &[f1, f2]);
let res = math::binary_op(this, math::BinaryOp::Powf, f1, f2);
this.write_scalar(res, dest)?;
}

"powif32" => {
let [f, i] = check_arg_count(args)?;
let f = this.read_scalar(f)?.to_f32()?;
let i = this.read_scalar(i)?.to_i32()?;
// Using host floats (but it's fine, this operation does not have guaranteed precision).
let res = f.to_host().powi(i).to_soft();
let res = this.adjust_nan(res, &[f]);
let res = math::powi(this, f, i);
this.write_scalar(res, dest)?;
}
"powif64" => {
let [f, i] = check_arg_count(args)?;
let f = this.read_scalar(f)?.to_f64()?;
let i = this.read_scalar(i)?.to_i32()?;
// Using host floats (but it's fine, this operation does not have guaranteed precision).
let res = f.to_host().powi(i).to_soft();
let res = this.adjust_nan(res, &[f]);
let res = math::powi(this, f, i);
this.write_scalar(res, dest)?;
}

Expand Down
47 changes: 14 additions & 33 deletions src/intrinsics/simd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,12 +56,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
assert_eq!(dest_len, op_len);

#[derive(Copy, Clone)]
enum Op<'a> {
enum Op {
MirOp(mir::UnOp),
Abs,
Round(rustc_apfloat::Round),
Numeric(Symbol),
HostOp(&'a str),
Math(math::UnaryOp),
}
let which = match intrinsic_name {
"neg" => Op::MirOp(mir::UnOp::Neg),
Expand All @@ -75,7 +75,15 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
"cttz" => Op::Numeric(sym::cttz),
"bswap" => Op::Numeric(sym::bswap),
"bitreverse" => Op::Numeric(sym::bitreverse),
_ => Op::HostOp(intrinsic_name),
"fsqrt" => Op::Math(math::UnaryOp::Sqrt),
"fsin" => Op::Math(math::UnaryOp::Sin),
"fcos" => Op::Math(math::UnaryOp::Cos),
"fexp" => Op::Math(math::UnaryOp::Exp),
"fexp2" => Op::Math(math::UnaryOp::Exp2),
"flog" => Op::Math(math::UnaryOp::Ln),
"flog2" => Op::Math(math::UnaryOp::Log2),
"flog10" => Op::Math(math::UnaryOp::Log10),
_ => bug!(),
};

for i in 0..dest_len {
Expand All @@ -100,47 +108,20 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
FloatTy::F128 => unimplemented!("f16_f128"),
}
}
Op::HostOp(host_op) => {
Op::Math(math_op) => {
let ty::Float(float_ty) = op.layout.ty.kind() else {
span_bug!(this.cur_span(), "{} operand is not a float", intrinsic_name)
};
// Using host floats (but it's fine, these operations do not have guaranteed precision).
match float_ty {
FloatTy::F16 => unimplemented!("f16_f128"),
FloatTy::F32 => {
let f = op.to_scalar().to_f32()?;
let f_host = f.to_host();
let res = match host_op {
"fsqrt" => f_host.sqrt(), // FIXME Using host floats, this should use full-precision soft-floats
"fsin" => f_host.sin(),
"fcos" => f_host.cos(),
"fexp" => f_host.exp(),
"fexp2" => f_host.exp2(),
"flog" => f_host.ln(),
"flog2" => f_host.log2(),
"flog10" => f_host.log10(),
_ => bug!(),
};
let res = res.to_soft();
let res = this.adjust_nan(res, &[f]);
let res = math::unary_op(this, math_op, f);
Scalar::from(res)
}
FloatTy::F64 => {
let f = op.to_scalar().to_f64()?;
let f_host = f.to_host();
let res = match host_op {
"fsqrt" => f_host.sqrt(),
"fsin" => f_host.sin(),
"fcos" => f_host.cos(),
"fexp" => f_host.exp(),
"fexp2" => f_host.exp2(),
"flog" => f_host.ln(),
"flog2" => f_host.log2(),
"flog10" => f_host.log10(),
_ => bug!(),
};
let res = res.to_soft();
let res = this.adjust_nan(res, &[f]);
let res = math::unary_op(this, math_op, f);
Scalar::from(res)
}
FloatTy::F128 => unimplemented!("f16_f128"),
Expand Down
1 change: 1 addition & 0 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ mod eval;
mod helpers;
mod intrinsics;
mod machine;
mod math;
mod mono_hash_map;
mod operator;
mod provenance_gc;
Expand Down
Loading