Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions compiler/rustc_span/src/symbol.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1089,6 +1089,7 @@ symbols! {
file_options,
flags,
float,
float_mul_add_relaxed,
float_to_int_unchecked,
floorf16,
floorf32,
Expand Down
53 changes: 53 additions & 0 deletions library/core/src/num/f128.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1449,6 +1449,59 @@ impl f128 {
pub const fn algebraic_rem(self, rhs: f128) -> f128 {
intrinsics::frem_algebraic(self, rhs)
}

/// Fused multiply-add with relaxed precision semantics. Computes `(self * a) + b`,
/// non-deterministically executing either a fused multiply-add (one rounding) or
/// two separate operations with intermediate rounding.
///
/// The operation may be fused if the code generator determines that the target
/// instruction set has support for a fused operation and that it is more efficient
/// than separate multiply and add instructions. Whether fusion occurs is unspecified
/// and may depend on optimization level and context.
///
/// # Precision
///
/// Unlike [`mul_add`](Self::mul_add), this operation does not guarantee which
/// rounding behavior will occur. It may perform either:
/// - A fused multiply-add with one rounding (more accurate)
/// - Separate multiply and add operations with two roundings (less accurate)
///
/// Use this method when you need performance optimization but can tolerate
/// non-deterministic precision. If you require guaranteed precision, use
/// [`mul_add`](Self::mul_add) (guaranteed one rounding) or the separate
/// multiply and add operations (guaranteed two roundings).
///
/// If you want even more optimization opportunities and aren't concerned about
/// error bounds, consider using the algebraic operations such as
/// [`algebraic_mul`](Self::algebraic_mul) and [`algebraic_add`](Self::algebraic_add).
///
/// # Examples
///
/// ```ignore (f128 support is platform-specific)
/// #![feature(f128)]
/// #![feature(float_mul_add_relaxed)]
/// # #[cfg(reliable_f128)] {
///
/// let m = 10.0_f128;
/// let x = 4.0_f128;
/// let b = 60.0_f128;
///
/// // The result may be computed as either:
/// // - (m * x) + b with fused rounding, or
/// // - (m * x) + b with separate roundings
/// let result = m.mul_add_relaxed(x, b);
///
/// // For simple values, both approaches give the same result
/// assert_eq!(result, 100.0);
/// # }
/// ```
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_mul_add_relaxed", issue = "151770")]
#[rustc_const_unstable(feature = "float_mul_add_relaxed", issue = "151770")]
#[inline]
pub const fn mul_add_relaxed(self, a: f128, b: f128) -> f128 {
intrinsics::fmuladdf128(self, a, b)
}
}

// Functions in this module fall into `core_float_math`
Expand Down
53 changes: 53 additions & 0 deletions library/core/src/num/f16.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1434,6 +1434,59 @@ impl f16 {
pub const fn algebraic_rem(self, rhs: f16) -> f16 {
intrinsics::frem_algebraic(self, rhs)
}

/// Fused multiply-add with relaxed precision semantics. Computes `(self * a) + b`,
/// non-deterministically executing either a fused multiply-add (one rounding) or
/// two separate operations with intermediate rounding.
///
/// The operation may be fused if the code generator determines that the target
/// instruction set has support for a fused operation and that it is more efficient
/// than separate multiply and add instructions. Whether fusion occurs is unspecified
/// and may depend on optimization level and context.
///
/// # Precision
///
/// Unlike [`mul_add`](Self::mul_add), this operation does not guarantee which
/// rounding behavior will occur. It may perform either:
/// - A fused multiply-add with one rounding (more accurate)
/// - Separate multiply and add operations with two roundings (less accurate)
///
/// Use this method when you need performance optimization but can tolerate
/// non-deterministic precision. If you require guaranteed precision, use
/// [`mul_add`](Self::mul_add) (guaranteed one rounding) or the separate
/// multiply and add operations (guaranteed two roundings).
///
/// If you want even more optimization opportunities and aren't concerned about
/// error bounds, consider using the algebraic operations such as
/// [`algebraic_mul`](Self::algebraic_mul) and [`algebraic_add`](Self::algebraic_add).
///
/// # Examples
///
/// ```ignore (f16 support is platform-specific)
/// #![feature(f16)]
/// #![feature(float_mul_add_relaxed)]
/// # #[cfg(reliable_f16)] {
///
/// let m = 10.0_f16;
/// let x = 4.0_f16;
/// let b = 60.0_f16;
///
/// // The result may be computed as either:
/// // - (m * x) + b with fused rounding, or
/// // - (m * x) + b with separate roundings
/// let result = m.mul_add_relaxed(x, b);
///
/// // For simple values, both approaches give the same result
/// assert_eq!(result, 100.0);
/// # }
/// ```
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_mul_add_relaxed", issue = "151770")]
#[rustc_const_unstable(feature = "float_mul_add_relaxed", issue = "151770")]
#[inline]
pub const fn mul_add_relaxed(self, a: f16, b: f16) -> f16 {
intrinsics::fmuladdf16(self, a, b)
}
}

// Functions in this module fall into `core_float_math`
Expand Down
50 changes: 50 additions & 0 deletions library/core/src/num/f32.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1612,6 +1612,56 @@ impl f32 {
pub const fn algebraic_rem(self, rhs: f32) -> f32 {
intrinsics::frem_algebraic(self, rhs)
}

/// Fused multiply-add with relaxed precision semantics. Computes `(self * a) + b`,
/// non-deterministically executing either a fused multiply-add (one rounding) or
/// two separate operations with intermediate rounding.
///
/// The operation may be fused if the code generator determines that the target
/// instruction set has support for a fused operation and that it is more efficient
/// than separate multiply and add instructions. Whether fusion occurs is unspecified
/// and may depend on optimization level and context.
///
/// # Precision
///
/// Unlike [`mul_add`](Self::mul_add), this operation does not guarantee which
/// rounding behavior will occur. It may perform either:
/// - A fused multiply-add with one rounding (more accurate)
/// - Separate multiply and add operations with two roundings (less accurate)
///
/// Use this method when you need performance optimization but can tolerate
/// non-deterministic precision. If you require guaranteed precision, use
/// [`mul_add`](Self::mul_add) (guaranteed one rounding) or the separate
/// multiply and add operations (guaranteed two roundings).
///
/// If you want even more optimization opportunities and aren't concerned about
/// error bounds, consider using the algebraic operations such as
/// [`algebraic_mul`](Self::algebraic_mul) and [`algebraic_add`](Self::algebraic_add).
///
/// # Examples
///
/// ```
/// #![feature(float_mul_add_relaxed)]
///
/// let m = 10.0_f32;
/// let x = 4.0_f32;
/// let b = 60.0_f32;
///
/// // The result may be computed as either:
/// // - (m * x) + b with fused rounding, or
/// // - (m * x) + b with separate roundings
/// let result = m.mul_add_relaxed(x, b);
///
/// // For simple values, both approaches give the same result
/// assert_eq!(result, 100.0);
/// ```
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_mul_add_relaxed", issue = "151770")]
#[rustc_const_unstable(feature = "float_mul_add_relaxed", issue = "151770")]
#[inline]
pub const fn mul_add_relaxed(self, a: f32, b: f32) -> f32 {
intrinsics::fmuladdf32(self, a, b)
}
}

/// Experimental implementations of floating point functions in `core`.
Expand Down
50 changes: 50 additions & 0 deletions library/core/src/num/f64.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1610,6 +1610,56 @@ impl f64 {
pub const fn algebraic_rem(self, rhs: f64) -> f64 {
intrinsics::frem_algebraic(self, rhs)
}

/// Fused multiply-add with relaxed precision semantics. Computes `(self * a) + b`,
/// non-deterministically executing either a fused multiply-add (one rounding) or
/// two separate operations with intermediate rounding.
///
/// The operation may be fused if the code generator determines that the target
/// instruction set has support for a fused operation and that it is more efficient
/// than separate multiply and add instructions. Whether fusion occurs is unspecified
/// and may depend on optimization level and context.
///
/// # Precision
///
/// Unlike [`mul_add`](Self::mul_add), this operation does not guarantee which
/// rounding behavior will occur. It may perform either:
/// - A fused multiply-add with one rounding (more accurate)
/// - Separate multiply and add operations with two roundings (less accurate)
///
/// Use this method when you need performance optimization but can tolerate
/// non-deterministic precision. If you require guaranteed precision, use
/// [`mul_add`](Self::mul_add) (guaranteed one rounding) or the separate
/// multiply and add operations (guaranteed two roundings).
///
/// If you want even more optimization opportunities and aren't concerned about
/// error bounds, consider using the algebraic operations such as
/// [`algebraic_mul`](Self::algebraic_mul) and [`algebraic_add`](Self::algebraic_add).
///
/// # Examples
///
/// ```
/// #![feature(float_mul_add_relaxed)]
///
/// let m = 10.0_f64;
/// let x = 4.0_f64;
/// let b = 60.0_f64;
///
/// // The result may be computed as either:
/// // - (m * x) + b with fused rounding, or
/// // - (m * x) + b with separate roundings
/// let result = m.mul_add_relaxed(x, b);
///
/// // For simple values, both approaches give the same result
/// assert_eq!(result, 100.0);
/// ```
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_mul_add_relaxed", issue = "151770")]
#[rustc_const_unstable(feature = "float_mul_add_relaxed", issue = "151770")]
#[inline]
pub const fn mul_add_relaxed(self, a: f64, b: f64) -> f64 {
intrinsics::fmuladdf64(self, a, b)
}
}

#[unstable(feature = "core_float_math", issue = "137578")]
Expand Down
49 changes: 49 additions & 0 deletions tests/ui/intrinsics/float-mul-add-relaxed.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
//@ run-pass
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

f16 and f128 should be covered here too. They just need to be gated behing #[cfg(target_has_reliable_f16_math)] / target_has_reliable_f128_math

//@ compile-flags: -O

#![feature(float_mul_add_relaxed)]

fn main() {
test_f32();
test_f64();
}

fn test_f32() {
let a = 2.0_f32;
let b = 3.0_f32;
let c = 4.0_f32;

let result = a.mul_add_relaxed(b, c);
assert_eq!(result, 10.0);

// Test with values where precision matters less
let x = 1.0_f32;
let y = 1.0_f32;
let z = 1.0_f32;
assert_eq!(x.mul_add_relaxed(y, z), 2.0);

// Test edge cases
assert!(f32::NAN.mul_add_relaxed(1.0, 1.0).is_nan());
assert_eq!(f32::INFINITY.mul_add_relaxed(2.0, 1.0), f32::INFINITY);
assert_eq!(0.0_f32.mul_add_relaxed(f32::INFINITY, 1.0), 1.0);
}

fn test_f64() {
let a = 2.0_f64;
let b = 3.0_f64;
let c = 4.0_f64;

let result = a.mul_add_relaxed(b, c);
assert_eq!(result, 10.0);

// Test with values where precision matters less
let x = 1.0_f64;
let y = 1.0_f64;
let z = 1.0_f64;
assert_eq!(x.mul_add_relaxed(y, z), 2.0);

// Test edge cases
assert!(f64::NAN.mul_add_relaxed(1.0, 1.0).is_nan());
assert_eq!(f64::INFINITY.mul_add_relaxed(2.0, 1.0), f64::INFINITY);
assert_eq!(0.0_f64.mul_add_relaxed(f64::INFINITY, 1.0), 1.0);
}
Loading