@@ -457,93 +457,21 @@ def _make_image(self, A, in_bbox, out_bbox, clip_bbox, magnification=1.0,
457457 # input data is not going to match the size on the screen so we
458458 # have to resample to the correct number of pixels
459459
460- # TODO slice input array first
461- a_min = A .min ()
462- a_max = A .max ()
463- if a_min is np .ma .masked : # All masked; values don't matter.
464- a_min , a_max = np .int32 (0 ), np .int32 (1 )
465460 if A .dtype .kind == 'f' : # Float dtype: scale to same dtype.
466- scaled_dtype = np .dtype (
467- np .float64 if A .dtype .itemsize > 4 else np .float32 )
461+ scaled_dtype = np .dtype ("f8" if A .dtype .itemsize > 4 else "f4" )
468462 if scaled_dtype .itemsize < A .dtype .itemsize :
469463 _api .warn_external (f"Casting input data from { A .dtype } "
470464 f" to { scaled_dtype } for imshow." )
471465 else : # Int dtype, likely.
466+ # TODO slice input array first
472467 # Scale to appropriately sized float: use float32 if the
473468 # dynamic range is small, to limit the memory footprint.
474- da = a_max .astype (np .float64 ) - a_min .astype (np .float64 )
475- scaled_dtype = np .float64 if da > 1e8 else np .float32
476-
477- # Scale the input data to [.1, .9]. The Agg interpolators clip
478- # to [0, 1] internally, and we use a smaller input scale to
479- # identify the interpolated points that need to be flagged as
480- # over/under. This may introduce numeric instabilities in very
481- # broadly scaled data.
482-
483- # Always copy, and don't allow array subtypes.
484- A_scaled = np .array (A , dtype = scaled_dtype )
485- # Clip scaled data around norm if necessary. This is necessary
486- # for big numbers at the edge of float64's ability to represent
487- # changes. Applying a norm first would be good, but ruins the
488- # interpolation of over numbers.
489- self .norm .autoscale_None (A )
490- dv = np .float64 (self .norm .vmax ) - np .float64 (self .norm .vmin )
491- vmid = np .float64 (self .norm .vmin ) + dv / 2
492- fact = 1e7 if scaled_dtype == np .float64 else 1e4
493- newmin = vmid - dv * fact
494- if newmin < a_min :
495- newmin = None
496- else :
497- a_min = np .float64 (newmin )
498- newmax = vmid + dv * fact
499- if newmax > a_max :
500- newmax = None
501- else :
502- a_max = np .float64 (newmax )
503- if newmax is not None or newmin is not None :
504- np .clip (A_scaled , newmin , newmax , out = A_scaled )
505-
506- # Rescale the raw data to [offset, 1-offset] so that the
507- # resampling code will run cleanly. Using dyadic numbers here
508- # could reduce the error, but would not fully eliminate it and
509- # breaks a number of tests (due to the slightly different
510- # error bouncing some pixels across a boundary in the (very
511- # quantized) colormapping step).
512- offset = .1
513- frac = .8
514- # Run vmin/vmax through the same rescaling as the raw data;
515- # otherwise, data values close or equal to the boundaries can
516- # end up on the wrong side due to floating point error.
517- vmin , vmax = self .norm .vmin , self .norm .vmax
518- if vmin is np .ma .masked :
519- vmin , vmax = a_min , a_max
520- vrange = np .array ([vmin , vmax ], dtype = scaled_dtype )
521-
522- A_scaled -= a_min
523- vrange -= a_min
524- # .item() handles a_min/a_max being ndarray subclasses.
525- a_min = a_min .astype (scaled_dtype ).item ()
526- a_max = a_max .astype (scaled_dtype ).item ()
527-
528- if a_min != a_max :
529- A_scaled /= ((a_max - a_min ) / frac )
530- vrange /= ((a_max - a_min ) / frac )
531- A_scaled += offset
532- vrange += offset
469+ da = A .max ().astype ("f8" ) - A .min ().astype ("f8" )
470+ scaled_dtype = "f8" if da > 1e8 else "f4"
471+
533472 # resample the input data to the correct resolution and shape
534- A_resampled = _resample (self , A_scaled , out_shape , t )
535- del A_scaled # Make sure we don't use A_scaled anymore!
536- # Un-scale the resampled data to approximately the original
537- # range. Things that interpolated to outside the original range
538- # will still be outside, but possibly clipped in the case of
539- # higher order interpolation + drastically changing data.
540- A_resampled -= offset
541- vrange -= offset
542- if a_min != a_max :
543- A_resampled *= ((a_max - a_min ) / frac )
544- vrange *= ((a_max - a_min ) / frac )
545- A_resampled += a_min
546- vrange += a_min
473+ A_resampled = _resample (self , A .astype (scaled_dtype ), out_shape , t )
474+
547475 # if using NoNorm, cast back to the original datatype
548476 if isinstance (self .norm , mcolors .NoNorm ):
549477 A_resampled = A_resampled .astype (A .dtype )
@@ -564,21 +492,10 @@ def _make_image(self, A, in_bbox, out_bbox, clip_bbox, magnification=1.0,
564492 # Apply the pixel-by-pixel alpha values if present
565493 alpha = self .get_alpha ()
566494 if alpha is not None and np .ndim (alpha ) > 0 :
567- out_alpha *= _resample (self , alpha , out_shape ,
568- t , resample = True )
495+ out_alpha *= _resample (self , alpha , out_shape , t , resample = True )
569496 # mask and run through the norm
570497 resampled_masked = np .ma .masked_array (A_resampled , out_mask )
571- # we have re-set the vmin/vmax to account for small errors
572- # that may have moved input values in/out of range
573- s_vmin , s_vmax = vrange
574- if isinstance (self .norm , mcolors .LogNorm ) and s_vmin <= 0 :
575- # Don't give 0 or negative values to LogNorm
576- s_vmin = np .finfo (scaled_dtype ).eps
577- # Block the norm from sending an update signal during the
578- # temporary vmin/vmax change
579- with self .norm .callbacks .blocked (), \
580- cbook ._setattr_cm (self .norm , vmin = s_vmin , vmax = s_vmax ):
581- output = self .norm (resampled_masked )
498+ output = self .norm (resampled_masked )
582499 else :
583500 if A .ndim == 2 : # interpolation_stage = 'rgba'
584501 self .norm .autoscale_None (A )
0 commit comments