diff --git a/README.md b/README.md index 95e48c27..2b588dde 100644 --- a/README.md +++ b/README.md @@ -281,7 +281,7 @@ two smoothing methods are supported: Usage: `python cam.py --image-path --method --output-dir ` -To use with a specific device, like cpu, cuda, cuda:0 or mps: +To use with a specific device, like cpu, cuda, cuda:0, mps or hpu: `python cam.py --image-path --device cuda --output-dir ` ---------- diff --git a/cam.py b/cam.py index 9f3e377c..935afeb2 100644 --- a/cam.py +++ b/cam.py @@ -77,6 +77,9 @@ def get_args(): 'kpcacam': KPCA_CAM } + if args.device=='hpu': + import habana_frameworks.torch.core as htcore + model = models.resnet50(pretrained=True).to(torch.device(args.device)).eval() # Choose the target layer you want to compute the visualization for. diff --git a/pytorch_grad_cam/base_cam.py b/pytorch_grad_cam/base_cam.py index 4b2850a9..44ae5b90 100644 --- a/pytorch_grad_cam/base_cam.py +++ b/pytorch_grad_cam/base_cam.py @@ -25,6 +25,13 @@ def __init__( # Use the same device as the model. self.device = next(self.model.parameters()).device + if 'hpu' in str(self.device): + try: + import habana_frameworks.torch.core as htcore + except ImportError as error: + error.msg = f"Could not import habana_frameworks.torch.core. {error.msg}." + raise error + self.__htcore = htcore self.reshape_transform = reshape_transform self.compute_input_gradient = compute_input_gradient self.uses_gradients = uses_gradients @@ -97,6 +104,8 @@ def forward( self.model.zero_grad() loss = sum([target(output) for target, output in zip(targets, outputs)]) loss.backward(retain_graph=True) + if 'hpu' in str(self.device): + self.__htcore.mark_step() # In most of the saliency attribution papers, the saliency is # computed with a single target layer.