Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).

### Fixed

- Fixed Pytorch warnings in several examples ([#10571](https://github.com/pyg-team/pytorch_geometric/pull/10571)))
- Fixed `ogbn_train_cugraph` example for distributed cuGraph ([#10439](https://github.com/pyg-team/pytorch_geometric/pull/10439))
- Added `safe_onnx_export` function with workarounds for `onnx_ir.serde.SerdeError` issues in ONNX export ([#10422](https://github.com/pyg-team/pytorch_geometric/pull/10422))
- Fixed importing PyTorch Lightning in `torch_geometric.graphgym` and `torch_geometric.data.lightning` when using `lightning` instead of `pytorch-lightning` ([#10404](https://github.com/pyg-team/pytorch_geometric/pull/10404), [#10417](https://github.com/pyg-team/pytorch_geometric/pull/10417)))
Expand Down
2 changes: 1 addition & 1 deletion examples/attentive_fp.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ def train():
loss = F.mse_loss(out, data.y)
loss.backward()
optimizer.step()
total_loss += float(loss) * data.num_graphs
total_loss += float(loss.detach()) * data.num_graphs
total_examples += data.num_graphs
return sqrt(total_loss / total_examples)

Expand Down
2 changes: 1 addition & 1 deletion examples/autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def train():
loss = loss + (1 / train_data.num_nodes) * model.kl_loss()
loss.backward()
optimizer.step()
return float(loss)
return float(loss.detach())


@torch.no_grad()
Expand Down
2 changes: 1 addition & 1 deletion examples/dir_gnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def train():
loss = F.cross_entropy(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
return float(loss)
return float(loss.detach())


@torch.no_grad()
Expand Down
2 changes: 1 addition & 1 deletion examples/gcn2_cora.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def train():
loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
return float(loss)
return float(loss.detach())


@torch.no_grad()
Expand Down
4 changes: 2 additions & 2 deletions examples/glnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def train_teacher():
loss = F.cross_entropy(out[data.train_mask], data.y[data.train_mask])
loss.backward()
gnn_optimizer.step()
return float(loss)
return float(loss.detach())


@torch.no_grad()
Expand Down Expand Up @@ -84,7 +84,7 @@ def train_student():
loss = args.lamb * loss1 + (1 - args.lamb) * loss2
loss.backward()
mlp_optimizer.step()
return float(loss)
return float(loss.detach())


@torch.no_grad()
Expand Down
2 changes: 1 addition & 1 deletion examples/mixhop.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def train():
loss.backward()
optimizer.step()
scheduler.step()
return float(loss)
return float(loss.detach())


@torch.no_grad()
Expand Down
2 changes: 1 addition & 1 deletion examples/ogbn_proteins_deepgcn.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def train(epoch):
loss.backward()
optimizer.step()

total_loss += float(loss) * int(data.train_mask.sum())
total_loss += float(loss.detach()) * int(data.train_mask.sum())
total_examples += int(data.train_mask.sum())

pbar.update(1)
Expand Down
2 changes: 1 addition & 1 deletion examples/ogbn_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ def train(epoch: int) -> tuple[Tensor, float]:
loss.backward()
optimizer.step()

total_loss += float(loss)
total_loss += float(loss.detach())
total_correct += int(out.argmax(dim=-1).eq(y).sum())
pbar.update(batch.batch_size)

Expand Down
2 changes: 1 addition & 1 deletion examples/pmlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def train():
loss = F.cross_entropy(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
return float(loss)
return float(loss.detach())


@torch.no_grad()
Expand Down
4 changes: 2 additions & 2 deletions examples/proteins_mincut_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def train(epoch):
out, mc_loss, o_loss = model(data.x, data.edge_index, data.batch)
loss = F.nll_loss(out, data.y.view(-1)) + mc_loss + o_loss
loss.backward()
loss_all += data.y.size(0) * float(loss)
loss_all += data.y.size(0) * float(loss.detach())
optimizer.step()
return loss_all / len(train_dataset)

Expand All @@ -92,7 +92,7 @@ def test(loader):
data = data.to(device)
pred, mc_loss, o_loss = model(data.x, data.edge_index, data.batch)
loss = F.nll_loss(pred, data.y.view(-1)) + mc_loss + o_loss
loss_all += data.y.size(0) * float(loss)
loss_all += data.y.size(0) * float(loss.detach())
correct += int(pred.max(dim=1)[1].eq(data.y.view(-1)).sum())

return loss_all / len(loader.dataset), correct / len(loader.dataset)
Expand Down
2 changes: 1 addition & 1 deletion examples/rgcn.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def train():
loss = F.nll_loss(out[data.train_idx], data.train_y)
loss.backward()
optimizer.step()
return float(loss)
return float(loss.detach())


@torch.no_grad()
Expand Down
2 changes: 1 addition & 1 deletion examples/seal_link_pred.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ def train():
loss = criterion(out.view(-1), data.y.to(torch.float))
loss.backward()
optimizer.step()
total_loss += float(loss) * data.num_graphs
total_loss += float(loss.detach()) * data.num_graphs

return total_loss / len(train_dataset)

Expand Down
2 changes: 1 addition & 1 deletion examples/tgn.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ def train():
loss.backward()
optimizer.step()
memory.detach()
total_loss += float(loss) * batch.num_events
total_loss += float(loss.detach()) * batch.num_events

return total_loss / train_data.num_events

Expand Down
Loading