@@ -656,113 +656,117 @@ function Base.setindex!(a::Array, d::DArray,
656
656
return a
657
657
end
658
658
659
- # Similar to Base.indexin, but just create a logical mask
660
- indexin_mask (a, b:: Number ) = a .== b
661
- indexin_mask (a, r:: Range{Int} ) = [i in r for i in a]
662
- indexin_mask (a, b:: AbstractArray{Int} ) = indexin_mask (a, IntSet (b))
663
- indexin_mask (a, b:: AbstractArray ) = indexin_mask (a, Set (b))
664
- indexin_mask (a, b) = [i in b for i in a]
665
-
666
- import Base: tail
667
- # Given a tuple of indices and a tuple of masks, restrict the indices to the
668
- # valid regions. This is, effectively, reversing Base.setindex_shape_check.
669
- # We can't just use indexing into MergedIndices here because getindex is much
670
- # pickier about singleton dimensions than setindex! is.
671
- restrict_indices (:: Tuple{} , :: Tuple{} ) = ()
672
- function restrict_indices (a:: Tuple{Any, Vararg{Any}} , b:: Tuple{Any, Vararg{Any}} )
673
- if (length (a[1 ]) == length (b[1 ]) == 1 ) || (length (a[1 ]) > 1 && length (b[1 ]) > 1 )
674
- (vec (a[1 ])[vec (b[1 ])], restrict_indices (tail (a), tail (b))... )
675
- elseif length (a[1 ]) == 1
676
- (a[1 ], restrict_indices (tail (a), b))
677
- elseif length (b[1 ]) == 1 && b[1 ][1 ]
678
- restrict_indices (a, tail (b))
679
- else
680
- throw (DimensionMismatch (" this should be caught by setindex_shape_check; please submit an issue" ))
659
+ # We also want to optimize setindex! with a SubDArray source, but this is hard
660
+ # and only works on 0.5.
661
+ if VERSION > v " 0.5.0-dev+5230"
662
+ # Similar to Base.indexin, but just create a logical mask
663
+ indexin_mask (a, b:: Number ) = a .== b
664
+ indexin_mask (a, r:: Range{Int} ) = [i in r for i in a]
665
+ indexin_mask (a, b:: AbstractArray{Int} ) = indexin_mask (a, IntSet (b))
666
+ indexin_mask (a, b:: AbstractArray ) = indexin_mask (a, Set (b))
667
+ indexin_mask (a, b) = [i in b for i in a]
668
+
669
+ import Base: tail
670
+ # Given a tuple of indices and a tuple of masks, restrict the indices to the
671
+ # valid regions. This is, effectively, reversing Base.setindex_shape_check.
672
+ # We can't just use indexing into MergedIndices here because getindex is much
673
+ # pickier about singleton dimensions than setindex! is.
674
+ restrict_indices (:: Tuple{} , :: Tuple{} ) = ()
675
+ function restrict_indices (a:: Tuple{Any, Vararg{Any}} , b:: Tuple{Any, Vararg{Any}} )
676
+ if (length (a[1 ]) == length (b[1 ]) == 1 ) || (length (a[1 ]) > 1 && length (b[1 ]) > 1 )
677
+ (vec (a[1 ])[vec (b[1 ])], restrict_indices (tail (a), tail (b))... )
678
+ elseif length (a[1 ]) == 1
679
+ (a[1 ], restrict_indices (tail (a), b))
680
+ elseif length (b[1 ]) == 1 && b[1 ][1 ]
681
+ restrict_indices (a, tail (b))
682
+ else
683
+ throw (DimensionMismatch (" this should be caught by setindex_shape_check; please submit an issue" ))
684
+ end
681
685
end
682
- end
683
- # The final indices are funky - they're allowed to accumulate together.
684
- # Too many masks is an easy fix -- just use the outer product to merge them:
685
- function restrict_indices (a:: Tuple{Any} , b :: Tuple{Any, Any, Vararg{Any}} )
686
- restrict_indices (a, ( map (Bool, vec ( vec (b[ 1 ]) * vec (b[ 2 ]) ' )), tail ( tail (b)) ... ))
687
- end
688
- # But too many indices is much harder; this will require merging the indices
689
- # in `a` before applying the final mask in `b`.
690
- function restrict_indices (a :: Tuple{Any, Any, Vararg{Any}} , b :: Tuple{Any} )
691
- if length (a[1 ]) == 1
692
- (a[ 1 ], restrict_indices ( tail (a), b))
693
- else
694
- # When one mask spans multiple indices , we need to merge the indices
695
- # together. At this point, we can just use indexing to merge them since
696
- # there's no longer special handling of singleton dimensions
697
- ( view ( MergedIndices (a, map (length, a)), b[ 1 ]),)
698
- end
699
- end
700
-
701
- immutable MergedIndices{T,N} <: AbstractArray{CartesianIndex{N}, N}
702
- indices :: T
703
- sz :: NTuple{N,Int}
704
- end
705
- Base. size (M:: MergedIndices ) = M . sz
706
- Base . getindex {_,N} (M :: MergedIndices{_,N} , I :: Vararg{Int, N} ) = CartesianIndex ( map (getindex, M . indices, I))
707
- # Boundschecking for using MergedIndices as an array index. This is overly
708
- # strict -- even for SubArrays of ReshapedIndices, we require that the entire
709
- # parent array's indices are valid. In this usage, it is just fine... and is a
710
- # huge optimization over exact bounds checking.
711
- typealias ReshapedMergedIndices {T,N,M<: MergedIndices } Base . ReshapedArray {T,N,M}
712
- typealias SubMergedIndices{T,N,M <: Union{MergedIndices, ReshapedMergedIndices} } SubArray{T,N,M }
713
- typealias MergedIndicesOrSub Union{MergedIndices, SubMergedIndices}
714
- import Base : _chkbnds
715
- # Ambiguity with linear indexing:
716
- @inline _chkbnds (A:: AbstractVector , checked:: NTuple{1,Bool} , I:: MergedIndicesOrSub ) = _chkbnds (A, checked, parent (parent (I)). indices... )
717
- @inline _chkbnds (A :: AbstractArray , checked :: NTuple{1,Bool} , I :: MergedIndicesOrSub ) = _chkbnds (A, checked, parent ( parent (I)) . indices ... )
718
- # Generic bounds checking
719
- @inline _chkbnds {T,N} (A:: AbstractArray{T,N} , checked:: NTuple{N ,Bool} , I1:: MergedIndicesOrSub , I... ) = _chkbnds (A, checked, parent (parent (I1)). indices... , I... )
720
- @inline _chkbnds {T,N,M} (A :: AbstractArray{T,N} , checked :: NTuple{M,Bool} , I1 :: MergedIndicesOrSub , I ... ) = _chkbnds (A, checked, parent ( parent (I1)) . indices ... , I ... )
721
- import Base : checkbounds_indices
722
- @inline checkbounds_indices (:: Tuple{} , I:: Tuple{MergedIndicesOrSub,Vararg{Any}} ) = checkbounds_indices ((), (parent (parent (I[1 ])). indices... , tail (I)... ))
723
- @inline checkbounds_indices (inds:: Tuple{Any} , I:: Tuple{MergedIndicesOrSub,Vararg{Any}} ) = checkbounds_indices (inds, (parent (parent (I[1 ])). indices... , tail (I)... ))
724
- @inline checkbounds_indices (inds :: Tuple , I :: Tuple{MergedIndicesOrSub,Vararg{Any}} ) = checkbounds_indices (inds, ( parent ( parent (I[ 1 ])) . indices ... , tail (I) ... ))
725
-
726
- # The tricky thing here is that we want to optimize the accesses into the
727
- # distributed array, but in doing so, we lose track of which indices in I we
728
- # should be using.
729
- #
730
- # I’ve come to the conclusion that the function is utterly insane.
731
- # There are *6* flavors of indices with four different reference points:
732
- # 1 . Find the indices of each portion of the DArray .
733
- # 2 . Find the valid subset of indices for the SubArray into that portion.
734
- # 3. Find the portion of the `I ` indices that should be used when you access the
735
- # `K` indices in the subarray. This guy is nasty. It’s totally backwards
736
- # from all other arrays, wherein we simply iterate over the source array’s
737
- # elements. You need to *both* know which elements in `J` were skipped
738
- # (`indexin_mask`) and which dimensions should match up (`restrict_indices`)
739
- # 4. If `K` doesn’t correspond to an entire chunk, reinterpret `K` in terms of
740
- # the local portion of the source array
741
- function Base . setindex! (a :: Array , s :: SubDArray ,
742
- I :: Union{UnitRange{Int},Colon,Vector{Int},StepRange{Int,Int}} ...)
743
- Base . setindex_shape_check (s, Base . index_lengths (a, I ... ) ... )
744
- n = length (I)
745
- d = s . parent
746
- J = Base . decolon (d, s . indexes ... )
747
- @sync for i = 1 : length (d . pids)
748
- K_c = d . indexes[i]
749
- K = map (intersect, J, K_c )
750
- if ! any (isempty, K )
751
- K_mask = map (indexin_mask, J, K_c )
752
- idxs = restrict_indices (Base . decolon (a, I ... ), K_mask )
753
- if isequal (K, K_c)
754
- # whole chunk
755
- @async a[idxs ... ] = chunk (d, i)
756
- else
757
- # partial chunk
758
- @async a[idxs ... ] =
759
- remotecall_fetch (d . pids[i]) do
760
- view ( localpart (d), [K[j] - first (K_c[j]) + 1 for j = 1 : length (J)] . .. )
761
- end
686
+ # The final indices are funky - they're allowed to accumulate together.
687
+ # Too many masks is an easy fix -- just use the outer product to merge them:
688
+ function restrict_indices (a :: Tuple{Any} , b :: Tuple{Any, Any, Vararg{Any}} )
689
+ restrict_indices (a, ( map (Bool, vec ( vec (b[ 1 ]) * vec (b[ 2 ]) ' )), tail ( tail (b)) ... ) )
690
+ end
691
+ # But too many indices is much harder; this will require merging the indices
692
+ # in `a` before applying the final mask in `b`.
693
+ function restrict_indices (a :: Tuple{Any, Any, Vararg{Any}} , b :: Tuple{Any} )
694
+ if length (a[ 1 ]) == 1
695
+ (a[1 ], restrict_indices ( tail (a), b))
696
+ else
697
+ # When one mask spans multiple indices, we need to merge the indices
698
+ # together. At this point , we can just use indexing to merge them since
699
+ # there's no longer special handling of singleton dimensions
700
+ ( view ( MergedIndices (a, map (length, a)), b[ 1 ]),)
701
+ end
702
+ end
703
+
704
+ immutable MergedIndices{T,N} <: AbstractArray{CartesianIndex{N}, N}
705
+ indices :: T
706
+ sz :: NTuple{N,Int}
707
+ end
708
+ Base . size (M :: MergedIndices ) = M . sz
709
+ Base. getindex {_,N} (M:: MergedIndices{_,N} , I :: Vararg{Int, N} ) = CartesianIndex ( map (getindex, M . indices, I))
710
+ # Boundschecking for using MergedIndices as an array index. This is overly
711
+ # strict -- even for SubArrays of ReshapedIndices, we require that the entire
712
+ # parent array's indices are valid. In this usage, it is just fine... and is a
713
+ # huge optimization over exact bounds checking.
714
+ typealias ReshapedMergedIndices{T,N,M <: MergedIndices } Base . ReshapedArray{T,N,M}
715
+ typealias SubMergedIndices {T,N,M<: Union{ MergedIndices, ReshapedMergedIndices} } SubArray {T,N,M}
716
+ typealias MergedIndicesOrSub Union{MergedIndices, SubMergedIndices }
717
+ import Base : _chkbnds
718
+ # Ambiguity with linear indexing:
719
+ @inline _chkbnds (A :: AbstractVector , checked :: NTuple{1,Bool} , I :: MergedIndicesOrSub ) = _chkbnds (A, checked, parent ( parent (I)) . indices ... )
720
+ @inline _chkbnds (A:: AbstractArray , checked:: NTuple{1,Bool} , I:: MergedIndicesOrSub ) = _chkbnds (A, checked, parent (parent (I)). indices... )
721
+ # Generic bounds checking
722
+ @inline _chkbnds {T,N} (A :: AbstractArray{T,N} , checked :: NTuple{N,Bool} , I1 :: MergedIndicesOrSub , I ... ) = _chkbnds (A, checked, parent ( parent (I1)) . indices ... , I ... )
723
+ @inline _chkbnds {T,N,M } (A:: AbstractArray{T,N} , checked:: NTuple{M ,Bool} , I1:: MergedIndicesOrSub , I... ) = _chkbnds (A, checked, parent (parent (I1)). indices... , I... )
724
+ import Base : checkbounds_indices
725
+ @inline checkbounds_indices ( :: Tuple{} , I :: Tuple{MergedIndicesOrSub,Vararg{Any}} ) = checkbounds_indices ((), ( parent ( parent (I[ 1 ])) . indices ... , tail (I) ... ))
726
+ @inline checkbounds_indices (inds :: Tuple{Any } , I:: Tuple{MergedIndicesOrSub,Vararg{Any}} ) = checkbounds_indices (inds, (parent (parent (I[1 ])). indices... , tail (I)... ))
727
+ @inline checkbounds_indices (inds:: Tuple , I:: Tuple{MergedIndicesOrSub,Vararg{Any}} ) = checkbounds_indices (inds, (parent (parent (I[1 ])). indices... , tail (I)... ))
728
+
729
+ # The tricky thing here is that we want to optimize the accesses into the
730
+ # distributed array, but in doing so, we lose track of which indices in I we
731
+ # should be using.
732
+ #
733
+ # I’ve come to the conclusion that the function is utterly insane.
734
+ # There are *6* flavors of indices with four different reference points:
735
+ # 1. Find the indices of each portion of the DArray.
736
+ # 2 . Find the valid subset of indices for the SubArray into that portion .
737
+ # 3 . Find the portion of the `I` indices that should be used when you access the
738
+ # `K ` indices in the subarray. This guy is nasty. It’s totally backwards
739
+ # from all other arrays, wherein we simply iterate over the source array’s
740
+ # elements. You need to *both* know which elements in `J` were skipped
741
+ # (`indexin_mask`) and which dimensions should match up (`restrict_indices`)
742
+ # 4. If `K` doesn’t correspond to an entire chunk, reinterpret `K` in terms of
743
+ # the local portion of the source array
744
+ function Base . setindex! (a :: Array , s :: SubDArray ,
745
+ I :: Union{UnitRange{Int},Colon,Vector{Int},StepRange{Int,Int}} ...)
746
+ Base . setindex_shape_check (s, Base . index_lengths (a, I ... ) ... )
747
+ n = length (I )
748
+ d = s . parent
749
+ J = Base . decolon (d, s . indexes ... )
750
+ @sync for i = 1 : length (d . pids )
751
+ K_c = d . indexes[i]
752
+ K = map (intersect, J, K_c)
753
+ if ! any (isempty, K )
754
+ K_mask = map (indexin_mask, J, K_c )
755
+ idxs = restrict_indices (Base . decolon (a, I ... ), K_mask )
756
+ if isequal (K, K_c )
757
+ # whole chunk
758
+ @async a[idxs ... ] = chunk (d, i)
759
+ else
760
+ # partial chunk
761
+ @async a[idxs ... ] =
762
+ remotecall_fetch (d . pids[i]) do
763
+ view ( localpart (d), [K[j] - first (K_c[j]) + 1 for j = 1 : length (J)] . .. )
764
+ end
765
+ end
762
766
end
763
767
end
768
+ return a
764
769
end
765
- return a
766
770
end
767
771
768
772
Base. fill! (A:: DArray , x) = begin
0 commit comments