2
2
use alloc:: vec:: Vec ;
3
3
use std:: mem:: MaybeUninit ;
4
4
5
+ use rawpointer:: PointerExt ;
6
+
5
7
use crate :: imp_prelude:: * ;
6
8
7
9
use crate :: dimension;
@@ -332,15 +334,18 @@ impl<A, D> Array<A, D>
332
334
/// - If the array is empty (the axis or any other has length 0) or if `axis`
333
335
/// has length 1, then the array can always be appended.
334
336
///
335
- /// ***Errors*** with a layout error if the array is not in standard order or
336
- /// if it has holes, even exterior holes (from slicing). <br>
337
- /// ***Errors*** with shape error if the length of the input row does not match
338
- /// the length of the rows in the array. <br>
337
+ /// ***Errors*** with shape error if the shape of self does not match the array-to-append;
338
+ /// all axes *except* the axis along which it being appended matter for this check.
339
+ ///
340
+ /// The memory layout of the `self` array matters for ensuring that the append is efficient.
341
+ /// Appending automatically changes memory layout of the array so that it is appended to
342
+ /// along the "growing axis".
343
+ ///
344
+ /// Ensure appending is efficient by for example starting from an empty array and/or always
345
+ /// appending to an array along the same axis.
339
346
///
340
- /// The memory layout of the `self` array matters, since it determines in which direction the
341
- /// array can easily grow. Notice that an empty array is compatible both ways. The amortized
342
- /// average complexity of the append is O(m) where *m* is the number of elements in the
343
- /// array-to-append (equivalent to how `Vec::extend` works).
347
+ /// The amortized average complexity of the append, when appending along its growing axis, is
348
+ /// O(*m*) where *m* is the length of the row.
344
349
///
345
350
/// The memory layout of the argument `array` does not matter.
346
351
///
@@ -404,8 +409,11 @@ impl<A, D> Array<A, D>
404
409
// array must be empty or have `axis` as the outermost (longest stride) axis
405
410
if !self_is_empty && current_axis_len > 1 {
406
411
// `axis` must be max stride axis or equal to its stride
407
- let max_stride_axis = self . axes ( ) . max_by_key ( |ax| ax. stride ) . unwrap ( ) ;
408
- if max_stride_axis. axis != axis && max_stride_axis. stride > self . stride_of ( axis) {
412
+ let max_axis = self . axes ( ) . max_by_key ( |ax| ax. stride . abs ( ) ) . unwrap ( ) ;
413
+ if max_axis. axis != axis && max_axis. stride . abs ( ) > self . stride_of ( axis) {
414
+ incompatible_layout = true ;
415
+ }
416
+ if self . stride_of ( axis) < 0 {
409
417
incompatible_layout = true ;
410
418
}
411
419
}
@@ -442,7 +450,8 @@ impl<A, D> Array<A, D>
442
450
// This is the outermost/longest stride axis; so we find the max across the other axes
443
451
let new_stride = self . axes ( ) . fold ( 1 , |acc, ax| {
444
452
if ax. axis == axis { acc } else {
445
- Ord :: max ( acc, ax. len as isize * ax. stride )
453
+ let this_ax = ax. len as isize * ax. stride ;
454
+ if this_ax. abs ( ) > acc { this_ax } else { acc }
446
455
}
447
456
} ) ;
448
457
let mut strides = self . strides . clone ( ) ;
@@ -454,26 +463,58 @@ impl<A, D> Array<A, D>
454
463
455
464
unsafe {
456
465
// grow backing storage and update head ptr
457
- debug_assert_eq ! ( self . data. as_ptr( ) , self . as_ptr( ) ) ;
458
- self . ptr = self . data . reserve ( len_to_append) ; // because we are standard order
466
+ let data_to_array_offset = if std:: mem:: size_of :: < A > ( ) != 0 {
467
+ self . as_ptr ( ) . offset_from ( self . data . as_ptr ( ) )
468
+ } else {
469
+ 0
470
+ } ;
471
+ debug_assert ! ( data_to_array_offset >= 0 ) ;
472
+ self . ptr = self . data . reserve ( len_to_append) . offset ( data_to_array_offset) ;
459
473
460
- // copy elements from view to the array now
474
+ // clone elements from view to the array now
475
+ //
476
+ // To be robust for panics and drop the right elements, we want
477
+ // to fill the tail in memory order, so that we can drop the right elements on panic.
478
+ //
479
+ // We have: Zip::from(tail_view).and(array)
480
+ // Transform tail_view into standard order by inverting and moving its axes.
481
+ // Keep the Zip traversal unchanged by applying the same axis transformations to
482
+ // `array`. This ensures the Zip traverses the underlying memory in order.
461
483
//
462
- // make a raw view with the new row
463
- // safe because the data was "full"
484
+ // XXX It would be possible to skip this transformation if the element
485
+ // doesn't have drop. However, in the interest of code coverage, all elements
486
+ // use this code initially.
487
+
488
+ // Invert axes in tail_view by inverting strides
489
+ let mut tail_strides = strides. clone ( ) ;
490
+ if tail_strides. ndim ( ) > 1 {
491
+ for i in 0 ..tail_strides. ndim ( ) {
492
+ let s = tail_strides[ i] as isize ;
493
+ if s < 0 {
494
+ tail_strides. set_axis ( Axis ( i) , -s as usize ) ;
495
+ array. invert_axis ( Axis ( i) ) ;
496
+ }
497
+ }
498
+ }
499
+
500
+ // With > 0 strides, the current end of data is the correct base pointer for tail_view
464
501
let tail_ptr = self . data . as_end_nonnull ( ) ;
465
- let mut tail_view = RawArrayViewMut :: new ( tail_ptr, array_shape, strides. clone ( ) ) ;
502
+ let mut tail_view = RawArrayViewMut :: new ( tail_ptr, array_shape, tail_strides) ;
503
+
504
+ if tail_view. ndim ( ) > 1 {
505
+ sort_axes_in_default_order_tandem ( & mut tail_view, & mut array) ;
506
+ debug_assert ! ( tail_view. is_standard_layout( ) ,
507
+ "not std layout dim: {:?}, strides: {:?}" ,
508
+ tail_view. shape( ) , tail_view. strides( ) ) ;
509
+ }
466
510
511
+ // Keep track of currently filled lenght of `self.data` and update it
512
+ // on scope exit (panic or loop finish).
467
513
struct SetLenOnDrop < ' a , A : ' a > {
468
514
len : usize ,
469
515
data : & ' a mut OwnedRepr < A > ,
470
516
}
471
517
472
- let mut length_guard = SetLenOnDrop {
473
- len : self . data . len ( ) ,
474
- data : & mut self . data ,
475
- } ;
476
-
477
518
impl < A > Drop for SetLenOnDrop < ' _ , A > {
478
519
fn drop ( & mut self ) {
479
520
unsafe {
@@ -482,36 +523,18 @@ impl<A, D> Array<A, D>
482
523
}
483
524
}
484
525
485
- // To be robust for panics and drop the right elements, we want
486
- // to fill the tail in-order, so that we can drop the right elements on
487
- // panic.
488
- //
489
- // We have: Zip::from(tail_view).and(array)
490
- // Transform tail_view into standard order by inverting and moving its axes.
491
- // Keep the Zip traversal unchanged by applying the same axis transformations to
492
- // `array`. This ensures the Zip traverses the underlying memory in order.
493
- //
494
- // XXX It would be possible to skip this transformation if the element
495
- // doesn't have drop. However, in the interest of code coverage, all elements
496
- // use this code initially.
526
+ let mut data_length_guard = SetLenOnDrop {
527
+ len : self . data . len ( ) ,
528
+ data : & mut self . data ,
529
+ } ;
497
530
498
- if tail_view. ndim ( ) > 1 {
499
- for i in 0 ..tail_view. ndim ( ) {
500
- if tail_view. stride_of ( Axis ( i) ) < 0 {
501
- tail_view. invert_axis ( Axis ( i) ) ;
502
- array. invert_axis ( Axis ( i) ) ;
503
- }
504
- }
505
- sort_axes_to_standard_order_tandem ( & mut tail_view, & mut array) ;
506
- }
507
531
Zip :: from ( tail_view) . and ( array)
508
532
. debug_assert_c_order ( )
509
533
. for_each ( |to, from| {
510
534
to. write ( from. clone ( ) ) ;
511
- length_guard . len += 1 ;
535
+ data_length_guard . len += 1 ;
512
536
} ) ;
513
-
514
- drop ( length_guard) ;
537
+ drop ( data_length_guard) ;
515
538
516
539
// update array dimension
517
540
self . strides = strides;
@@ -520,6 +543,7 @@ impl<A, D> Array<A, D>
520
543
// multiple assertions after pointer & dimension update
521
544
debug_assert_eq ! ( self . data. len( ) , self . len( ) ) ;
522
545
debug_assert_eq ! ( self . len( ) , new_len) ;
546
+ debug_assert ! ( self . pointer_is_inbounds( ) ) ;
523
547
524
548
Ok ( ( ) )
525
549
}
@@ -539,7 +563,10 @@ where
539
563
sort_axes1_impl ( & mut a. dim , & mut a. strides ) ;
540
564
}
541
565
542
- fn sort_axes_to_standard_order_tandem < S , S2 , D > ( a : & mut ArrayBase < S , D > , b : & mut ArrayBase < S2 , D > )
566
+ /// Sort axes to standard order, i.e Axis(0) has biggest stride and Axis(n - 1) least stride
567
+ ///
568
+ /// The axes should have stride >= 0 before calling this method.
569
+ fn sort_axes_in_default_order_tandem < S , S2 , D > ( a : & mut ArrayBase < S , D > , b : & mut ArrayBase < S2 , D > )
543
570
where
544
571
S : RawData ,
545
572
S2 : RawData ,
@@ -549,8 +576,6 @@ where
549
576
return ;
550
577
}
551
578
sort_axes_impl ( & mut a. dim , & mut a. strides , & mut b. dim , & mut b. strides ) ;
552
- debug_assert ! ( a. is_standard_layout( ) , "not std layout dim: {:?}, strides: {:?}" ,
553
- a. shape( ) , a. strides( ) ) ;
554
579
}
555
580
556
581
fn sort_axes1_impl < D > ( adim : & mut D , astrides : & mut D )
0 commit comments