1818from monai .transforms import ClipIntensityPercentiles
1919from monai .transforms .utils import soft_clip
2020from monai .transforms .utils_pytorch_numpy_unification import clip , percentile
21+ from monai .utils .type_conversion import convert_to_tensor
2122from tests .utils import TEST_NDARRAYS , NumpyImageTestCase2D , NumpyImageTestCase3D , assert_allclose
2223
2324
25+ def test_hard_clip_func (im , lower , upper ):
26+ im_t = convert_to_tensor (im )
27+ if lower is None :
28+ upper = percentile (im_t , upper )
29+ elif upper is None :
30+ lower = percentile (im_t , lower )
31+ else :
32+ lower , upper = percentile (im_t , (lower , upper ))
33+ return clip (im_t , lower , upper )
34+
35+
36+ def test_soft_clip_func (im , lower , upper ):
37+ im_t = convert_to_tensor (im )
38+ if lower is None :
39+ upper = percentile (im_t , upper )
40+ elif upper is None :
41+ lower = percentile (im_t , lower )
42+ else :
43+ lower , upper = percentile (im_t , (lower , upper ))
44+ return soft_clip (im_t , minv = lower , maxv = upper , sharpness_factor = 1.0 , dtype = torch .float32 )
45+
46+
2447class TestClipIntensityPercentiles2D (NumpyImageTestCase2D ):
2548
2649 @parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
2750 def test_hard_clipping_two_sided (self , p ):
2851 hard_clipper = ClipIntensityPercentiles (upper = 95 , lower = 5 )
2952 im = p (self .imt )
3053 result = hard_clipper (im )
31- lower , upper = percentile (im , (5 , 95 ))
32- expected = clip (im , lower , upper )
54+ expected = test_hard_clip_func (im , 5 , 95 )
3355 assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
3456
3557 @parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
3658 def test_hard_clipping_one_sided_high (self , p ):
3759 hard_clipper = ClipIntensityPercentiles (upper = 95 , lower = None )
3860 im = p (self .imt )
3961 result = hard_clipper (im )
40- lower , upper = percentile (im , (0 , 95 ))
41- expected = clip (im , lower , upper )
62+ expected = test_hard_clip_func (im , 0 , 95 )
4263 assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
4364
4465 @parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
4566 def test_hard_clipping_one_sided_low (self , p ):
4667 hard_clipper = ClipIntensityPercentiles (upper = None , lower = 5 )
4768 im = p (self .imt )
4869 result = hard_clipper (im )
49- lower , upper = percentile (im , (5 , 100 ))
50- expected = clip (im , lower , upper )
70+ expected = test_hard_clip_func (im , 5 , 100 )
5171 assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
5272
5373 @parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
5474 def test_soft_clipping_two_sided (self , p ):
5575 soft_clipper = ClipIntensityPercentiles (upper = 95 , lower = 5 , sharpness_factor = 1.0 )
5676 im = p (self .imt )
5777 result = soft_clipper (im )
58- lower , upper = percentile (im , (5 , 95 ))
59- expected = soft_clip (im , sharpness_factor = 1.0 , minv = lower , maxv = upper , dtype = torch .float32 )
60- # the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy
78+ expected = test_soft_clip_func (im , 5 , 95 )
79+ # the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
6180 assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
6281
6382 @parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
6483 def test_soft_clipping_one_sided_high (self , p ):
6584 soft_clipper = ClipIntensityPercentiles (upper = 95 , lower = None , sharpness_factor = 1.0 )
6685 im = p (self .imt )
6786 result = soft_clipper (im )
68- upper = percentile (im , 95 )
69- expected = soft_clip (im , sharpness_factor = 1.0 , minv = None , maxv = upper , dtype = torch .float32 )
70- # the rtol is set to 5e-5 because the logaddexp function used in softplus is not stable accross torch and numpy
87+ expected = test_soft_clip_func (im , None , 95 )
88+ # the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
7189 assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
7290
7391 @parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
7492 def test_soft_clipping_one_sided_low (self , p ):
7593 soft_clipper = ClipIntensityPercentiles (upper = None , lower = 5 , sharpness_factor = 1.0 )
7694 im = p (self .imt )
7795 result = soft_clipper (im )
78- lower = percentile (im , 5 )
79- expected = soft_clip (im , sharpness_factor = 1.0 , minv = lower , maxv = None , dtype = torch .float32 )
80- # the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy
96+ expected = test_soft_clip_func (im , 5 , None )
97+ # the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
8198 assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
8299
83100 @parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
84101 def test_channel_wise (self , p ):
85102 clipper = ClipIntensityPercentiles (upper = 95 , lower = 5 , channel_wise = True )
86103 im = p (self .imt )
87104 result = clipper (im )
88- for i , c in enumerate (im ):
105+ im_t = convert_to_tensor (self .imt )
106+ for i , c in enumerate (im_t ):
89107 lower , upper = percentile (c , (5 , 95 ))
90108 expected = clip (c , lower , upper )
91109 assert_allclose (result [i ], p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
@@ -118,35 +136,31 @@ def test_hard_clipping_two_sided(self, p):
118136 hard_clipper = ClipIntensityPercentiles (upper = 95 , lower = 5 )
119137 im = p (self .imt )
120138 result = hard_clipper (im )
121- lower , upper = percentile (im , (5 , 95 ))
122- expected = clip (im , lower , upper )
139+ expected = test_hard_clip_func (im , 5 , 95 )
123140 assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
124141
125142 @parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
126143 def test_hard_clipping_one_sided_high (self , p ):
127144 hard_clipper = ClipIntensityPercentiles (upper = 95 , lower = None )
128145 im = p (self .imt )
129146 result = hard_clipper (im )
130- lower , upper = percentile (im , (0 , 95 ))
131- expected = clip (im , lower , upper )
147+ expected = test_hard_clip_func (im , 0 , 95 )
132148 assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
133149
134150 @parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
135151 def test_hard_clipping_one_sided_low (self , p ):
136152 hard_clipper = ClipIntensityPercentiles (upper = None , lower = 5 )
137153 im = p (self .imt )
138154 result = hard_clipper (im )
139- lower , upper = percentile (im , (5 , 100 ))
140- expected = clip (im , lower , upper )
155+ expected = test_hard_clip_func (im , 5 , 100 )
141156 assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
142157
143158 @parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
144159 def test_soft_clipping_two_sided (self , p ):
145160 soft_clipper = ClipIntensityPercentiles (upper = 95 , lower = 5 , sharpness_factor = 1.0 )
146161 im = p (self .imt )
147162 result = soft_clipper (im )
148- lower , upper = percentile (im , (5 , 95 ))
149- expected = soft_clip (im , sharpness_factor = 1.0 , minv = lower , maxv = upper , dtype = torch .float32 )
163+ expected = test_soft_clip_func (im , 5 , 95 )
150164 # the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
151165 assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
152166
@@ -155,27 +169,26 @@ def test_soft_clipping_one_sided_high(self, p):
155169 soft_clipper = ClipIntensityPercentiles (upper = 95 , lower = None , sharpness_factor = 1.0 )
156170 im = p (self .imt )
157171 result = soft_clipper (im )
158- upper = percentile (im , 95 )
159- expected = soft_clip (im , sharpness_factor = 1.0 , minv = None , maxv = upper , dtype = torch .float32 )
160- # the rtol is set to 5e-5 because the logaddexp function used in softplus is not stable accross torch and numpy
172+ expected = test_soft_clip_func (im , None , 95 )
173+ # the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
161174 assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
162175
163176 @parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
164177 def test_soft_clipping_one_sided_low (self , p ):
165178 soft_clipper = ClipIntensityPercentiles (upper = None , lower = 5 , sharpness_factor = 1.0 )
166179 im = p (self .imt )
167180 result = soft_clipper (im )
168- lower = percentile (im , 5 )
169- expected = soft_clip (im , sharpness_factor = 1.0 , minv = lower , maxv = None , dtype = torch .float32 )
170- # the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy
181+ expected = test_soft_clip_func (im , 5 , None )
182+ # the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
171183 assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
172184
173185 @parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
174186 def test_channel_wise (self , p ):
175187 clipper = ClipIntensityPercentiles (upper = 95 , lower = 5 , channel_wise = True )
176188 im = p (self .imt )
177189 result = clipper (im )
178- for i , c in enumerate (im ):
190+ im_t = convert_to_tensor (self .imt )
191+ for i , c in enumerate (im_t ):
179192 lower , upper = percentile (c , (5 , 95 ))
180193 expected = clip (c , lower , upper )
181194 assert_allclose (result [i ], p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
0 commit comments