Replies: 2 comments 3 replies
-
Do you mean a mini-batch VGAE version for multiple graphs or for a single large graph? I think the mini-batch version of VGAE for multiple graphs is pretty straightforward to add, and only involves utilizing def encode(self, z, batch):
z = to_dense_batch(z, batch)
return z @ z.transpose(1, 2) |
Beta Was this translation helpful? Give feedback.
1 reply
-
import argparse
import os.path as osp
import torch
import torch.nn.functional as F
from torch_geometric.datasets import UPFD
from torch_geometric.data import DataLoader
from torch_geometric.transforms import ToUndirected
from torch_geometric.nn import GCNConv
from torch_geometric.utils import negative_sampling, to_dense_batch, to_dense_adj
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='politifact',
choices=['politifact', 'gossipcop'])
parser.add_argument('--feature', type=str, default='profile',
choices=['profile', 'spacy', 'bert', 'content'])
args = parser.parse_args()
path = osp.join(osp.dirname(osp.realpath(__file__)), '.', 'datasets', 'UPFD')
train_dataset = UPFD(path, args.dataset, args.feature, 'train', ToUndirected())
val_dataset = UPFD(path, args.dataset, args.feature, 'val', ToUndirected())
test_dataset = UPFD(path, args.dataset, args.feature, 'test', ToUndirected())
train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=128, shuffle=False)
test_loader = DataLoader(test_dataset, batch_size=128, shuffle=False)
class GraphBTM(torch.nn.Module):
def __init__(self, in_channels, out_channels):
super(GraphBTM, self).__init__()
self.conv1 = GCNConv(in_channels, 128)
self.conv2 = GCNConv(128, out_channels)
def encode(self, x, edge_index):
x = self.conv1(x, edge_index)
x = x.relu()
return self.conv2(x, edge_index)
def decode(self, z, batch):
z, mask = to_dense_batch(z, batch)
z = z @ z.transpose(1, 2)
z = z.view(z.size(0), -1)
return z
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = GraphBTM(train_dataset.num_features, 64).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=0.01)
def train():
model.train()
loss = 0.0
for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
z = model.encode(data.x, data.edge_index)
link_preds = model.decode(z, data.batch)
link_preds = link_preds.sigmoid()
neg_edge_index = negative_sampling(data.edge_index, z.size(0))
link_labels = to_dense_adj(neg_edge_index, data.batch)
link_labels = link_labels.view(link_labels.size(0), -1)
loss = F.binary_cross_entropy_with_logits(link_preds, link_labels)
loss.backward()
optimizer.step()
return loss
for epoch in range(1, 101):
loss = train()
print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}') |
Beta Was this translation helpful? Give feedback.
2 replies
Answer selected by
A11en0
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
Uh oh!
There was an error while loading. Please reload this page.
-
hi, thanks for your wonderful works. could you please give a demo that implements the mini-batch version VGAE with sparse tensor?
Or helps to re-implement the GraphBTM, it uses the VGAE to make a topic model, which has a raw code at https://github.com/valdersoul/GraphBTM.
Edit: Has to mention is that it takes the adjacent matrix$A$ as the node feature $X$ in the model. So we need both of $A$ and $X$ should be sparse tensors.
Beta Was this translation helpful? Give feedback.
All reactions