CaptumExplainer Fails with edge_mask_type='object' and visualize_graph #9495
Unanswered
Moemenhussein11
asked this question in
Q&A
Replies: 0 comments
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
Uh oh!
There was an error while loading. Please reload this page.
-
Problem Description
I am trying to explain the predictions of my GNN model, which performs multiclass node classification on a homogeneous graph.
The
GNNExplainerworks well, but I am encountering two issues withCaptumExplainer:edge_mask_type='object'. (see Error (1))explanation.visualize_graph(). (see Error(2))Error(1)
{ "name": "RuntimeError", "message": "One of the differentiated Tensors appears to not have been used in the graph. Set allow_unused=True if this is the desired behavior.", "stack": "--------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) Cell In[150], line 15 1 explainer = Explainer( 2 model=model, 3 algorithm=CaptumExplainer('IntegratedGradients'), (...) 11 ), 12 ) 14 node_index = 20 ---> 15 explanation = explainer(data.x, data.edge_index, index=node_index , target=data.y) 17 explanation.visualize_feature_importance( top_k=10) File c:\\Users\\100062576\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\torch_geometric\\explain\\explainer.py:205, in Explainer.__call__(self, x, edge_index, target, index, **kwargs) 202 training = self.model.training 203 self.model.eval() --> 205 explanation = self.algorithm( 206 self.model, 207 x, 208 edge_index, 209 target=target, 210 index=index, 211 **kwargs, 212 ) 214 self.model.train(training) 216 # Add explainer objectives to the `Explanation` object: File c:\\Users\\100062576\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\torch\ n\\modules\\module.py:1532, in Module._wrapped_call_impl(self, *args, **kwargs) 1530 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc] 1531 else: -> 1532 return self._call_impl(*args, **kwargs) File c:\\Users\\100062576\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\torch\ n\\modules\\module.py:1541, in Module._call_impl(self, *args, **kwargs) 1536 # If we don't have any hooks, we want to skip the rest of the logic in 1537 # this function, and just call forward. 1538 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks 1539 or _global_backward_pre_hooks or _global_backward_hooks 1540 or _global_forward_hooks or _global_forward_pre_hooks): -> 1541 return forward_call(*args, **kwargs) 1543 try: 1544 result = None File c:\\Users\\100062576\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\torch_geometric\\explain\\algorithm\\captum_explainer.py:170, in CaptumExplainer.forward(self, model, x, edge_index, target, index, **kwargs) 167 elif index is not None: 168 target = target[index] --> 170 attributions = self.attribution_method_instance.attribute( 171 inputs=inputs, 172 target=target, 173 additional_forward_args=add_forward_args, 174 **self.kwargs, 175 ) 177 node_mask, edge_mask = convert_captum_output( 178 attributions, 179 mask_type, 180 metadata, 181 ) 183 if not isinstance(x, dict): File c:\\Users\\100062576\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\captum\\log\\__init__.py:42, in log_usage.<locals>._log_usage.<locals>.wrapper(*args, **kwargs) 40 @wraps(func) 41 def wrapper(*args, **kwargs): ---> 42 return func(*args, **kwargs) File c:\\Users\\100062576\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\captum\\attr\\_core\\integrated_gradients.py:274, in IntegratedGradients.attribute(self, inputs, baselines, target, additional_forward_args, n_steps, method, internal_batch_size, return_convergence_delta) 272 if internal_batch_size is not None: 273 num_examples = inputs[0].shape[0] --> 274 attributions = _batch_attribution( 275 self, 276 num_examples, 277 internal_batch_size, 278 n_steps, 279 inputs=inputs, 280 baselines=baselines, 281 target=target, 282 additional_forward_args=additional_forward_args, 283 method=method, 284 ) 285 else: 286 attributions = self._attribute( 287 inputs=inputs, 288 baselines=baselines, (...) 292 method=method, 293 ) File c:\\Users\\100062576\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\captum\\attr\\_utils\\batching.py:78, in _batch_attribution(attr_method, num_examples, internal_batch_size, n_steps, include_endpoint, **kwargs) 76 step_sizes = full_step_sizes[start_step:end_step] 77 alphas = full_alphas[start_step:end_step] ---> 78 current_attr = attr_method._attribute( 79 **kwargs, n_steps=batch_steps, step_sizes_and_alphas=(step_sizes, alphas) 80 ) 82 if total_attr is None: 83 total_attr = current_attr File c:\\Users\\100062576\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\captum\\attr\\_core\\integrated_gradients.py:351, in IntegratedGradients._attribute(self, inputs, baselines, target, additional_forward_args, n_steps, method, step_sizes_and_alphas) 348 expanded_target = _expand_target(target, n_steps) 350 # grads: dim -> (bsz * #steps x inputs[0].shape[1:], ...) --> 351 grads = self.gradient_func( 352 forward_fn=self.forward_func, 353 inputs=scaled_features_tpl, 354 target_ind=expanded_target, 355 additional_forward_args=input_additional_args, 356 ) 358 # flattening grads so that we can multilpy it with step-size 359 # calling contiguous to avoid `memory whole` problems 360 scaled_grads = [ 361 grad.contiguous().view(n_steps, -1) 362 * torch.tensor(step_sizes).view(n_steps, 1).to(grad.device) 363 for grad in grads 364 ] File c:\\Users\\100062576\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\captum\\_utils\\gradient.py:119, in compute_gradients(forward_fn, inputs, target_ind, additional_forward_args) 113 assert outputs[0].numel() == 1, ( 114 \"Target not provided when necessary, cannot\" 115 \" take gradient with respect to multiple outputs.\" 116 ) 117 # torch.unbind(forward_out) is a list of scalar tensor tuples and 118 # contains batch_size * #steps elements --> 119 grads = torch.autograd.grad(torch.unbind(outputs), inputs) 120 return grads File c:\\Users\\100062576\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\torch\\autograd\\__init__.py:412, in grad(outputs, inputs, grad_outputs, retain_graph, create_graph, only_inputs, allow_unused, is_grads_batched, materialize_grads) 408 result = _vmap_internals._vmap(vjp, 0, 0, allow_none_pass_through=True)( 409 grad_outputs_ 410 ) 411 else: --> 412 result = _engine_run_backward( 413 t_outputs, 414 grad_outputs_, 415 retain_graph, 416 create_graph, 417 inputs, 418 allow_unused, 419 accumulate_grad=False, 420 ) 421 if materialize_grads: 422 if any( 423 result[i] is None and not is_tensor_like(inputs[i]) 424 for i in range(len(inputs)) 425 ): continue 426 result[i] = torch.zeros_like(inputs[i]) 427 return result }Error(2)
{ "name": "ValueError", "message": "The attribute 'edge_mask' is not available in 'Explanation' (got ['node_mask'])", "stack": "--------------------------------------------------------------------------- ValueError Traceback (most recent call last) Cell In[152], line 18 15 explanation = explainer(data.x, data.edge_index, index=node_index , target=data.y) 17 explanation.visualize_feature_importance( top_k=10) ---> 18 explanation.visualize_graph() File c:\\Users\\100062576\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\torch_geometric\\explain\\explanation.py:258, in Explanation.visualize_graph(self, path, backend, node_labels) 256 edge_mask = self.get('edge_mask') 257 if edge_mask is None: --> 258 raise ValueError(f\"The attribute 'edge_mask' is not available \" 259 f\"in '{self.__class__.__name__}' \" 260 f\"(got {self.available_explanations})\") 261 visualize_graph(self.edge_index, edge_mask, path, backend, node_labels) ValueError: The attribute 'edge_mask' is not available in 'Explanation' (got ['node_mask'])" }Explainer Instantiation
My Model
Request for Help
Any insights or solutions to ensure the explainer works when specifying the node index would be greatly appreciated. Thank you!
Beta Was this translation helpful? Give feedback.
All reactions