You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Using a non-full backward hook when the forward contains multiple autograd Nodes is deprecated and will be removed in future versions. This hook will be missing some grad_input. Please use register_full_backward_hook to get the documented behavior.
/kaggle/working/flow-forecast/flood_forecast/trainer.py in train_function(model_type, params)
150 # TODO Move to other func
151 if params["dataset_params"]["class"] != "GeneralClassificationLoader":
--> 152 handle_model_evaluation1(trained_model, params, model_type)
153
154 else:
/opt/conda/lib/python3.7/site-packages/shap/explainers/_deep/init.py in shap_values(self, X, ranked_outputs, output_rank_order, check_additivity)
122 were chosen as "top".
123 """
--> 124 return self.explainer.shap_values(X, ranked_outputs, output_rank_order, check_additivity=check_additivity)
/opt/conda/lib/python3.7/site-packages/shap/explainers/_deep/deep_pytorch.py in shap_values(self, X, ranked_outputs, output_rank_order, check_additivity)
183 # run attribution computation graph
184 feature_ind = model_output_ranks[j, i]
--> 185 sample_phis = self.gradient(feature_ind, joint_x)
186 # assign the attributions to the right part of the output arrays
187 if self.interim:
/opt/conda/lib/python3.7/site-packages/shap/explainers/_deep/deep_pytorch.py in gradient(self, idx, inputs)
121 grad = torch.autograd.grad(selected, x,
122 retain_graph=True if idx + 1 < len(X) else None,
--> 123 allow_unused=True)[0]
124 if grad is not None:
125 grad = grad.cpu().numpy()
/opt/conda/lib/python3.7/site-packages/torch/autograd/init.py in grad(outputs, inputs, grad_outputs, retain_graph, create_graph, only_inputs, allow_unused)
219
220 grad_outputs_ = tensor_or_tensors_to_tuple(grad_outputs, len(outputs))
--> 221 grad_outputs = make_grads(outputs, grad_outputs)
222
223 if retain_graph is None:
/opt/conda/lib/python3.7/site-packages/torch/autograd/init.py in _make_grads(outputs, grads)
48 if out.requires_grad:
49 if out.numel() != 1:
---> 50 raise RuntimeError("grad can be implicitly created only for scalar outputs")
51 new_grads.append(torch.ones_like(out, memory_format=torch.preserve_format))
52 else:
RuntimeError: grad can be implicitly created only for scalar outputs
The text was updated successfully, but these errors were encountered:
Using a non-full backward hook when the forward contains multiple autograd Nodes is deprecated and will be removed in future versions. This hook will be missing some grad_input. Please use register_full_backward_hook to get the documented behavior.
RuntimeError Traceback (most recent call last)
/tmp/ipykernel_35/811484386.py in
5 os.environ["WANDB_API_KEY"] = user_secrets.get_secret("WANDB_KEY")
6 # sweep_full = wandb.sweep(wandb_sweep_config_full, project="bitcoin_forecasts")
----> 7 train_function("PyTorch", make_config_file("asset_0.csv"))
8
/kaggle/working/flow-forecast/flood_forecast/trainer.py in train_function(model_type, params)
150 # TODO Move to other func
151 if params["dataset_params"]["class"] != "GeneralClassificationLoader":
--> 152 handle_model_evaluation1(trained_model, params, model_type)
153
154 else:
/kaggle/working/flow-forecast/flood_forecast/trainer.py in handle_model_evaluation1(trained_model, params, model_type)
30 params["metrics"],
31 params["inference_params"],
---> 32 {})
33 wandb.run.summary["test_accuracy"] = test_acc[0]
34 df_train_and_test = test_acc[1]
/kaggle/working/flow-forecast/flood_forecast/evaluator.py in evaluate_model(model, model_type, target_col, evaluation_metrics, inference_params, eval_log)
185 else:
186 deep_explain_model_summary_plot(
--> 187 model, test_data, inference_params["datetime_start"]
188 )
189 deep_explain_model_heatmap(model, test_data, inference_params["datetime_start"])
/kaggle/working/flow-forecast/flood_forecast/explain_model_output.py in deep_explain_model_summary_plot(model, csv_test_loader, datetime_start)
107 model.model = model.model.to("cpu")
108 deep_explainer = shap.DeepExplainer(model.model, history)
--> 109 shap_values = deep_explainer.shap_values(history)
110 s_values_list.append(shap_values)
111 else:
/opt/conda/lib/python3.7/site-packages/shap/explainers/_deep/init.py in shap_values(self, X, ranked_outputs, output_rank_order, check_additivity)
122 were chosen as "top".
123 """
--> 124 return self.explainer.shap_values(X, ranked_outputs, output_rank_order, check_additivity=check_additivity)
/opt/conda/lib/python3.7/site-packages/shap/explainers/_deep/deep_pytorch.py in shap_values(self, X, ranked_outputs, output_rank_order, check_additivity)
183 # run attribution computation graph
184 feature_ind = model_output_ranks[j, i]
--> 185 sample_phis = self.gradient(feature_ind, joint_x)
186 # assign the attributions to the right part of the output arrays
187 if self.interim:
/opt/conda/lib/python3.7/site-packages/shap/explainers/_deep/deep_pytorch.py in gradient(self, idx, inputs)
121 grad = torch.autograd.grad(selected, x,
122 retain_graph=True if idx + 1 < len(X) else None,
--> 123 allow_unused=True)[0]
124 if grad is not None:
125 grad = grad.cpu().numpy()
/opt/conda/lib/python3.7/site-packages/torch/autograd/init.py in grad(outputs, inputs, grad_outputs, retain_graph, create_graph, only_inputs, allow_unused)
219
220 grad_outputs_ = tensor_or_tensors_to_tuple(grad_outputs, len(outputs))
--> 221 grad_outputs = make_grads(outputs, grad_outputs)
222
223 if retain_graph is None:
/opt/conda/lib/python3.7/site-packages/torch/autograd/init.py in _make_grads(outputs, grads)
48 if out.requires_grad:
49 if out.numel() != 1:
---> 50 raise RuntimeError("grad can be implicitly created only for scalar outputs")
51 new_grads.append(torch.ones_like(out, memory_format=torch.preserve_format))
52 else:
RuntimeError: grad can be implicitly created only for scalar outputs
The text was updated successfully, but these errors were encountered: