@@ -232,7 +232,7 @@ def func(z):
232232    @pytest .mark .parametrize ("x_dtype" , (torch .float , torch .half )) 
233233    @pytest .mark .parametrize ("rois_dtype" , (torch .float , torch .half )) 
234234    def  test_autocast (self , x_dtype , rois_dtype ):
235-         with  torch .cuda . amp .autocast ():
235+         with  torch .amp .autocast ("cuda" ):
236236            self .test_forward (torch .device ("cuda" ), contiguous = False , x_dtype = x_dtype , rois_dtype = rois_dtype )
237237
238238    def  _helper_boxes_shape (self , func ):
@@ -497,7 +497,7 @@ def test_forward(self, device, contiguous, deterministic, aligned, x_dtype, rois
497497    @pytest .mark .parametrize ("rois_dtype" , (torch .float , torch .half )) 
498498    @pytest .mark .opcheck_only_one () 
499499    def  test_autocast (self , aligned , deterministic , x_dtype , rois_dtype ):
500-         with  torch .cuda . amp .autocast ():
500+         with  torch .amp .autocast ("cuda" ):
501501            self .test_forward (
502502                torch .device ("cuda" ),
503503                contiguous = False ,
@@ -513,7 +513,7 @@ def test_autocast(self, aligned, deterministic, x_dtype, rois_dtype):
513513    @pytest .mark .parametrize ("x_dtype" , (torch .float , torch .bfloat16 )) 
514514    @pytest .mark .parametrize ("rois_dtype" , (torch .float , torch .bfloat16 )) 
515515    def  test_autocast_cpu (self , aligned , deterministic , x_dtype , rois_dtype ):
516-         with  torch .cpu . amp .autocast ():
516+         with  torch .amp .autocast ("cpu" ):
517517            self .test_forward (
518518                torch .device ("cpu" ),
519519                contiguous = False ,
@@ -856,14 +856,14 @@ def test_nms_gpu(self, iou, device, dtype=torch.float64):
856856    @pytest .mark .parametrize ("dtype" , (torch .float , torch .half )) 
857857    @pytest .mark .opcheck_only_one () 
858858    def  test_autocast (self , iou , dtype ):
859-         with  torch .cuda . amp .autocast ():
859+         with  torch .amp .autocast ("cuda" ):
860860            self .test_nms_gpu (iou = iou , dtype = dtype , device = "cuda" )
861861
862862    @pytest .mark .parametrize ("iou" , (0.2 , 0.5 , 0.8 )) 
863863    @pytest .mark .parametrize ("dtype" , (torch .float , torch .bfloat16 )) 
864864    def  test_autocast_cpu (self , iou , dtype ):
865865        boxes , scores  =  self ._create_tensors_with_iou (1000 , iou )
866-         with  torch .cpu . amp .autocast ():
866+         with  torch .amp .autocast ("cpu" ):
867867            keep_ref_float  =  ops .nms (boxes .to (dtype ).float (), scores .to (dtype ).float (), iou )
868868            keep_dtype  =  ops .nms (boxes .to (dtype ), scores .to (dtype ), iou )
869869        torch .testing .assert_close (keep_ref_float , keep_dtype )
@@ -1193,7 +1193,7 @@ def test_compare_cpu_cuda_grads(self, contiguous):
11931193    @pytest .mark .parametrize ("dtype" , (torch .float , torch .half )) 
11941194    @pytest .mark .opcheck_only_one () 
11951195    def  test_autocast (self , batch_sz , dtype ):
1196-         with  torch .cuda . amp .autocast ():
1196+         with  torch .amp .autocast ("cuda" ):
11971197            self .test_forward (torch .device ("cuda" ), contiguous = False , batch_sz = batch_sz , dtype = dtype )
11981198
11991199    def  test_forward_scriptability (self ):
0 commit comments