@@ -2212,6 +2212,34 @@ impl<T> SpecFrom<T, IntoIter<T>> for Vec<T> {
2212
2212
}
2213
2213
}
2214
2214
2215
+ fn write_in_place<T>(src_end: *const T) -> impl FnMut(*mut T, T) -> Result<*mut T, !> {
2216
+ move |mut dst, item| {
2217
+ unsafe {
2218
+ // the InPlaceIterable contract cannot be verified precisely here since
2219
+ // try_fold has an exclusive reference to the source pointer
2220
+ // all we can do is check if it's still in range
2221
+ debug_assert!(dst as *const _ <= src_end, "InPlaceIterable contract violation");
2222
+ ptr::write(dst, item);
2223
+ dst = dst.add(1);
2224
+ }
2225
+ Ok(dst)
2226
+ }
2227
+ }
2228
+
2229
+ fn write_in_place_with_drop<T>(
2230
+ src_end: *const T,
2231
+ ) -> impl FnMut(InPlaceDrop<T>, T) -> Result<InPlaceDrop<T>, !> {
2232
+ move |mut sink, item| {
2233
+ unsafe {
2234
+ // same caveat as above
2235
+ debug_assert!(sink.dst as *const _ <= src_end, "InPlaceIterable contract violation");
2236
+ ptr::write(sink.dst, item);
2237
+ sink.dst = sink.dst.add(1);
2238
+ }
2239
+ Ok(sink)
2240
+ }
2241
+ }
2242
+
2215
2243
// Further specialization potential once
2216
2244
// https://github.com/rust-lang/rust/issues/62645 has been solved:
2217
2245
// T can be split into IN and OUT which only need to have the same size and alignment
@@ -2230,46 +2258,23 @@ where
2230
2258
let inner = unsafe { iterator.as_inner().as_into_iter() };
2231
2259
(inner.buf.as_ptr(), inner.end, inner.cap)
2232
2260
};
2233
- let dst = src_buf;
2234
2261
2262
+ // use try-fold
2263
+ // - it vectorizes better for some iterator adapters
2264
+ // - unlike most internal iteration methods methods it only takes a &mut self
2265
+ // - lets us thread the write pointer through its innards and get it back in the end
2235
2266
let dst = if mem::needs_drop::<T>() {
2236
- // special-case drop handling since it prevents vectorization
2237
- let mut sink = InPlaceDrop { inner: src_buf, dst };
2238
- let _ = iterator.try_for_each::<_, Result<_, !>>(|item| {
2239
- unsafe {
2240
- debug_assert!(
2241
- sink.dst as *const _ <= src_end,
2242
- "InPlaceIterable contract violation"
2243
- );
2244
- ptr::write(sink.dst, item);
2245
- sink.dst = sink.dst.add(1);
2246
- }
2247
- Ok(())
2248
- });
2267
+ // special-case drop handling since it forces us to lug that extra field around which
2268
+ // can inhibit optimizations
2269
+ let sink = InPlaceDrop { inner: src_buf, dst: src_buf };
2270
+ let sink = iterator
2271
+ .try_fold::<_, _, Result<_, !>>(sink, write_in_place_with_drop(src_end))
2272
+ .unwrap();
2249
2273
// iteration succeeded, don't drop head
2250
2274
let sink = mem::ManuallyDrop::new(sink);
2251
2275
sink.dst
2252
2276
} else {
2253
- // use try-fold
2254
- // - it vectorizes better
2255
- // - unlike most internal iteration methods methods it only takes a &mut self
2256
- // - lets us thread the write pointer through its innards and get it back in the end
2257
- iterator
2258
- .try_fold::<_, _, Result<_, !>>(dst, move |mut dst, item| {
2259
- unsafe {
2260
- // the InPlaceIterable contract cannot be verified precisely here since
2261
- // try_fold has an exclusive reference to the source pointer
2262
- // all we can do is check if it's still in range
2263
- debug_assert!(
2264
- dst as *const _ <= src_end,
2265
- "InPlaceIterable contract violation"
2266
- );
2267
- ptr::write(dst, item);
2268
- dst = dst.add(1);
2269
- }
2270
- Ok(dst)
2271
- })
2272
- .unwrap()
2277
+ iterator.try_fold::<_, _, Result<_, !>>(src_buf, write_in_place(src_end)).unwrap()
2273
2278
};
2274
2279
2275
2280
let src = unsafe { iterator.as_inner().as_into_iter() };
0 commit comments