@@ -228,134 +228,6 @@ macro_rules! midpoint_impl {
228
228
};
229
229
}
230
230
231
- macro_rules! widening_impl {
232
- ($SelfT:ty, $WideT:ty, $BITS:literal, unsigned) => {
233
- /// Calculates the complete product `self * rhs` without the possibility to overflow.
234
- ///
235
- /// This returns the low-order (wrapping) bits and the high-order (overflow) bits
236
- /// of the result as two separate values, in that order.
237
- ///
238
- /// If you also need to add a carry to the wide result, then you want
239
- /// [`Self::carrying_mul`] instead.
240
- ///
241
- /// # Examples
242
- ///
243
- /// Basic usage:
244
- ///
245
- /// Please note that this example is shared between integer types.
246
- /// Which explains why `u32` is used here.
247
- ///
248
- /// ```
249
- /// #![feature(bigint_helper_methods)]
250
- /// assert_eq!(5u32.widening_mul(2), (10, 0));
251
- /// assert_eq!(1_000_000_000u32.widening_mul(10), (1410065408, 2));
252
- /// ```
253
- #[unstable(feature = "bigint_helper_methods", issue = "85532")]
254
- #[must_use = "this returns the result of the operation, \
255
- without modifying the original"]
256
- #[inline]
257
- pub const fn widening_mul(self, rhs: Self) -> (Self, Self) {
258
- // note: longer-term this should be done via an intrinsic,
259
- // but for now we can deal without an impl for u128/i128
260
- // SAFETY: overflow will be contained within the wider types
261
- let wide = unsafe { (self as $WideT).unchecked_mul(rhs as $WideT) };
262
- (wide as $SelfT, (wide >> $BITS) as $SelfT)
263
- }
264
-
265
- /// Calculates the "full multiplication" `self * rhs + carry`
266
- /// without the possibility to overflow.
267
- ///
268
- /// This returns the low-order (wrapping) bits and the high-order (overflow) bits
269
- /// of the result as two separate values, in that order.
270
- ///
271
- /// Performs "long multiplication" which takes in an extra amount to add, and may return an
272
- /// additional amount of overflow. This allows for chaining together multiple
273
- /// multiplications to create "big integers" which represent larger values.
274
- ///
275
- /// If you don't need the `carry`, then you can use [`Self::widening_mul`] instead.
276
- ///
277
- /// # Examples
278
- ///
279
- /// Basic usage:
280
- ///
281
- /// Please note that this example is shared between integer types.
282
- /// Which explains why `u32` is used here.
283
- ///
284
- /// ```
285
- /// #![feature(bigint_helper_methods)]
286
- /// assert_eq!(5u32.carrying_mul(2, 0), (10, 0));
287
- /// assert_eq!(5u32.carrying_mul(2, 10), (20, 0));
288
- /// assert_eq!(1_000_000_000u32.carrying_mul(10, 0), (1410065408, 2));
289
- /// assert_eq!(1_000_000_000u32.carrying_mul(10, 10), (1410065418, 2));
290
- #[doc = concat!("assert_eq!(",
291
- stringify!($SelfT), "::MAX.carrying_mul(", stringify!($SelfT), "::MAX, ", stringify!($SelfT), "::MAX), ",
292
- "(0, ", stringify!($SelfT), "::MAX));"
293
- )]
294
- /// ```
295
- ///
296
- /// This is the core operation needed for scalar multiplication when
297
- /// implementing it for wider-than-native types.
298
- ///
299
- /// ```
300
- /// #![feature(bigint_helper_methods)]
301
- /// fn scalar_mul_eq(little_endian_digits: &mut Vec<u16>, multiplicand: u16) {
302
- /// let mut carry = 0;
303
- /// for d in little_endian_digits.iter_mut() {
304
- /// (*d, carry) = d.carrying_mul(multiplicand, carry);
305
- /// }
306
- /// if carry != 0 {
307
- /// little_endian_digits.push(carry);
308
- /// }
309
- /// }
310
- ///
311
- /// let mut v = vec![10, 20];
312
- /// scalar_mul_eq(&mut v, 3);
313
- /// assert_eq!(v, [30, 60]);
314
- ///
315
- /// assert_eq!(0x87654321_u64 * 0xFEED, 0x86D3D159E38D);
316
- /// let mut v = vec![0x4321, 0x8765];
317
- /// scalar_mul_eq(&mut v, 0xFEED);
318
- /// assert_eq!(v, [0xE38D, 0xD159, 0x86D3]);
319
- /// ```
320
- ///
321
- /// If `carry` is zero, this is similar to [`overflowing_mul`](Self::overflowing_mul),
322
- /// except that it gives the value of the overflow instead of just whether one happened:
323
- ///
324
- /// ```
325
- /// #![feature(bigint_helper_methods)]
326
- /// let r = u8::carrying_mul(7, 13, 0);
327
- /// assert_eq!((r.0, r.1 != 0), u8::overflowing_mul(7, 13));
328
- /// let r = u8::carrying_mul(13, 42, 0);
329
- /// assert_eq!((r.0, r.1 != 0), u8::overflowing_mul(13, 42));
330
- /// ```
331
- ///
332
- /// The value of the first field in the returned tuple matches what you'd get
333
- /// by combining the [`wrapping_mul`](Self::wrapping_mul) and
334
- /// [`wrapping_add`](Self::wrapping_add) methods:
335
- ///
336
- /// ```
337
- /// #![feature(bigint_helper_methods)]
338
- /// assert_eq!(
339
- /// 789_u16.carrying_mul(456, 123).0,
340
- /// 789_u16.wrapping_mul(456).wrapping_add(123),
341
- /// );
342
- /// ```
343
- #[unstable(feature = "bigint_helper_methods", issue = "85532")]
344
- #[must_use = "this returns the result of the operation, \
345
- without modifying the original"]
346
- #[inline]
347
- pub const fn carrying_mul(self, rhs: Self, carry: Self) -> (Self, Self) {
348
- // note: longer-term this should be done via an intrinsic,
349
- // but for now we can deal without an impl for u128/i128
350
- // SAFETY: overflow will be contained within the wider types
351
- let wide = unsafe {
352
- (self as $WideT).unchecked_mul(rhs as $WideT).unchecked_add(carry as $WideT)
353
- };
354
- (wide as $SelfT, (wide >> $BITS) as $SelfT)
355
- }
356
- };
357
- }
358
-
359
231
impl i8 {
360
232
int_impl! {
361
233
Self = i8,
@@ -576,7 +448,6 @@ impl u8 {
576
448
from_xe_bytes_doc = u8_xe_bytes_doc!(),
577
449
bound_condition = "",
578
450
}
579
- widening_impl! { u8, u16, 8, unsigned }
580
451
midpoint_impl! { u8, u16, unsigned }
581
452
582
453
/// Checks if the value is within the ASCII range.
@@ -1192,7 +1063,6 @@ impl u16 {
1192
1063
from_xe_bytes_doc = "",
1193
1064
bound_condition = "",
1194
1065
}
1195
- widening_impl! { u16, u32, 16, unsigned }
1196
1066
midpoint_impl! { u16, u32, unsigned }
1197
1067
1198
1068
/// Checks if the value is a Unicode surrogate code point, which are disallowed values for [`char`].
@@ -1240,7 +1110,6 @@ impl u32 {
1240
1110
from_xe_bytes_doc = "",
1241
1111
bound_condition = "",
1242
1112
}
1243
- widening_impl! { u32, u64, 32, unsigned }
1244
1113
midpoint_impl! { u32, u64, unsigned }
1245
1114
}
1246
1115
@@ -1264,7 +1133,6 @@ impl u64 {
1264
1133
from_xe_bytes_doc = "",
1265
1134
bound_condition = "",
1266
1135
}
1267
- widening_impl! { u64, u128, 64, unsigned }
1268
1136
midpoint_impl! { u64, u128, unsigned }
1269
1137
}
1270
1138
@@ -1314,7 +1182,6 @@ impl usize {
1314
1182
from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(),
1315
1183
bound_condition = " on 16-bit targets",
1316
1184
}
1317
- widening_impl! { usize, u32, 16, unsigned }
1318
1185
midpoint_impl! { usize, u32, unsigned }
1319
1186
}
1320
1187
@@ -1339,7 +1206,6 @@ impl usize {
1339
1206
from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(),
1340
1207
bound_condition = " on 32-bit targets",
1341
1208
}
1342
- widening_impl! { usize, u64, 32, unsigned }
1343
1209
midpoint_impl! { usize, u64, unsigned }
1344
1210
}
1345
1211
@@ -1364,7 +1230,6 @@ impl usize {
1364
1230
from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(),
1365
1231
bound_condition = " on 64-bit targets",
1366
1232
}
1367
- widening_impl! { usize, u128, 64, unsigned }
1368
1233
midpoint_impl! { usize, u128, unsigned }
1369
1234
}
1370
1235
0 commit comments