@@ -251,7 +251,7 @@ impl<A, D> Array<A, D>
251
251
/// [1., 1., 1., 1.],
252
252
/// [1., 1., 1., 1.]]);
253
253
/// ```
254
- pub fn try_append_array ( & mut self , axis : Axis , array : ArrayView < A , D > )
254
+ pub fn try_append_array ( & mut self , axis : Axis , mut array : ArrayView < A , D > )
255
255
-> Result < ( ) , ShapeError >
256
256
where
257
257
A : Clone ,
@@ -310,7 +310,7 @@ impl<A, D> Array<A, D>
310
310
// make a raw view with the new row
311
311
// safe because the data was "full"
312
312
let tail_ptr = self . data . as_end_nonnull ( ) ;
313
- let tail_view = RawArrayViewMut :: new ( tail_ptr, array_shape, strides. clone ( ) ) ;
313
+ let mut tail_view = RawArrayViewMut :: new ( tail_ptr, array_shape, strides. clone ( ) ) ;
314
314
315
315
struct SetLenOnDrop < ' a , A : ' a > {
316
316
len : usize ,
@@ -330,37 +330,86 @@ impl<A, D> Array<A, D>
330
330
}
331
331
}
332
332
333
- // we have a problem here XXX
334
- //
335
333
// To be robust for panics and drop the right elements, we want
336
334
// to fill the tail in-order, so that we can drop the right elements on
337
- // panic. Don't know how to achieve that.
335
+ // panic.
338
336
//
339
- // It might be easier to retrace our steps in a scope guard to drop the right
340
- // elements.. (PartialArray style).
337
+ // We have: Zip::from(tail_view).and(array)
338
+ // Transform tail_view into standard order by inverting and moving its axes.
339
+ // Keep the Zip traversal unchanged by applying the same axis transformations to
340
+ // `array`. This ensures the Zip traverses the underlying memory in order.
341
341
//
342
- // assign the new elements
342
+ // XXX It would be possible to skip this transformation if the element
343
+ // doesn't have drop. However, in the interest of code coverage, all elements
344
+ // use this code initially.
345
+
346
+ if tail_view. ndim ( ) > 1 {
347
+ for i in 0 ..tail_view. ndim ( ) {
348
+ if tail_view. stride_of ( Axis ( i) ) < 0 {
349
+ tail_view. invert_axis ( Axis ( i) ) ;
350
+ array. invert_axis ( Axis ( i) ) ;
351
+ }
352
+ }
353
+ sort_axes_to_standard_order ( & mut tail_view, & mut array) ;
354
+ }
343
355
Zip :: from ( tail_view) . and ( array)
356
+ . debug_assert_c_order ( )
344
357
. for_each ( |to, from| {
345
358
to. write ( from. clone ( ) ) ;
346
359
length_guard. len += 1 ;
347
360
} ) ;
348
361
349
- //length_guard.len += len_to_append;
350
- dbg ! ( len_to_append) ;
351
362
drop ( length_guard) ;
352
363
353
364
// update array dimension
354
365
self . strides = strides;
355
366
self . dim = res_dim;
356
- dbg ! ( & self . dim) ;
357
-
358
367
}
359
368
// multiple assertions after pointer & dimension update
360
369
debug_assert_eq ! ( self . data. len( ) , self . len( ) ) ;
361
370
debug_assert_eq ! ( self . len( ) , new_len) ;
362
- debug_assert ! ( self . is_standard_layout( ) ) ;
363
371
364
372
Ok ( ( ) )
365
373
}
366
374
}
375
+
376
+ fn sort_axes_to_standard_order < S , S2 , D > ( a : & mut ArrayBase < S , D > , b : & mut ArrayBase < S2 , D > )
377
+ where
378
+ S : RawData ,
379
+ S2 : RawData ,
380
+ D : Dimension ,
381
+ {
382
+ if a. ndim ( ) <= 1 {
383
+ return ;
384
+ }
385
+ sort_axes_impl ( & mut a. dim , & mut a. strides , & mut b. dim , & mut b. strides ) ;
386
+ debug_assert ! ( a. is_standard_layout( ) ) ;
387
+ }
388
+
389
+ fn sort_axes_impl < D > ( adim : & mut D , astrides : & mut D , bdim : & mut D , bstrides : & mut D )
390
+ where
391
+ D : Dimension ,
392
+ {
393
+ debug_assert ! ( adim. ndim( ) > 1 ) ;
394
+ debug_assert_eq ! ( adim. ndim( ) , bdim. ndim( ) ) ;
395
+ // bubble sort axes
396
+ let mut changed = true ;
397
+ while changed {
398
+ changed = false ;
399
+ for i in 0 ..adim. ndim ( ) - 1 {
400
+ let axis_i = i;
401
+ let next_axis = i + 1 ;
402
+
403
+ // make sure higher stride axes sort before.
404
+ debug_assert ! ( astrides. slice( ) [ axis_i] as isize >= 0 ) ;
405
+ if ( astrides. slice ( ) [ axis_i] as isize ) < astrides. slice ( ) [ next_axis] as isize {
406
+ changed = true ;
407
+ adim. slice_mut ( ) . swap ( axis_i, next_axis) ;
408
+ astrides. slice_mut ( ) . swap ( axis_i, next_axis) ;
409
+ bdim. slice_mut ( ) . swap ( axis_i, next_axis) ;
410
+ bstrides. slice_mut ( ) . swap ( axis_i, next_axis) ;
411
+ }
412
+ }
413
+ }
414
+ }
415
+
0 commit comments