-
Notifications
You must be signed in to change notification settings - Fork 5
Expand file tree
/
Copy pathdataset_loader.py
More file actions
131 lines (104 loc) · 4.47 KB
/
dataset_loader.py
File metadata and controls
131 lines (104 loc) · 4.47 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
import torch
import pickle
import os.path as osp
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid, WikipediaNetwork
from torch_sparse import coalesce
from torch_geometric.data import InMemoryDataset, download_url, Data
from torch_geometric.utils.undirected import to_undirected
import os
class Chame_Squir_Actor(InMemoryDataset):
def __init__(self, root='data/', name=None, p2raw=None, transform=None, pre_transform=None):
if name =='actor':
name = 'film'
existing_dataset = ['chameleon', 'film', 'squirrel']
if name not in existing_dataset:
raise ValueError(f'name of hypergraph dataset must be one of: {existing_dataset}')
else:
self.name = name
if (p2raw is not None) and osp.isdir(p2raw):
self.p2raw = p2raw
elif p2raw is None:
self.p2raw = None
elif not osp.isdir(p2raw):
raise ValueError(
f'path to raw hypergraph dataset "{p2raw}" does not exist!')
if not osp.isdir(root):
os.makedirs(root)
self.root = root
super(Chame_Squir_Actor, self).__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_dir(self):
return osp.join(self.root, self.name, 'raw')
@property
def processed_dir(self):
return osp.join(self.root, self.name, 'processed')
@property
def raw_file_names(self):
file_names = [self.name]
return file_names
@property
def processed_file_names(self):
return ['data.pt']
def download(self):
pass
def process(self):
p2f = osp.join(self.raw_dir, self.name)
with open(p2f, 'rb') as f:
data = pickle.load(f)
data = data if self.pre_transform is None else self.pre_transform(data)
torch.save(self.collate([data]), self.processed_paths[0])
def __repr__(self):
return '{}()'.format(self.name)
class WebKB(InMemoryDataset):
url = ('https://raw.githubusercontent.com/graphdml-uiuc-jlu/geom-gcn/master/new_data')
def __init__(self, root, name, transform=None, pre_transform=None):
self.name = name.lower()
assert self.name in ['cornell', 'texas', 'washington', 'wisconsin']
super(WebKB, self).__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_dir(self):
return osp.join(self.root, self.name, 'raw')
@property
def processed_dir(self):
return osp.join(self.root, self.name, 'processed')
@property
def raw_file_names(self):
return ['out1_node_feature_label.txt', 'out1_graph_edges.txt']
@property
def processed_file_names(self):
return 'data.pt'
def download(self):
for name in self.raw_file_names:
download_url(f'{self.url}/{self.name}/{name}', self.raw_dir)
def process(self):
with open(self.raw_paths[0], 'r') as f:
data = f.read().split('\n')[1:-1]
x = [[float(v) for v in r.split('\t')[1].split(',')] for r in data]
x = torch.tensor(x, dtype=torch.float)
y = [int(r.split('\t')[2]) for r in data]
y = torch.tensor(y, dtype=torch.long)
with open(self.raw_paths[1], 'r') as f:
data = f.read().split('\n')[1:-1]
data = [[int(v) for v in r.split('\t')] for r in data]
edge_index = torch.tensor(data, dtype=torch.long).t().contiguous()
edge_index = to_undirected(edge_index)
edge_index, _ = coalesce(edge_index, None, x.size(0), x.size(0))
data = Data(x=x, edge_index=edge_index, y=y)
data = data if self.pre_transform is None else self.pre_transform(data)
torch.save(self.collate([data]), self.processed_paths[0])
def __repr__(self):
return '{}()'.format(self.name)
def DataLoader(name):
name = name.lower()
if name in ['cora', 'citeseer', 'pubmed']:
dataset = Planetoid(osp.join('./data', name), name, transform=T.NormalizeFeatures())
elif name in ['chameleon', 'actor', 'squirrel']:
dataset = Chame_Squir_Actor(root='./data/', name=name, transform=T.NormalizeFeatures())
elif name in ['texas', 'cornell', 'wisconsin']:
dataset = WebKB(root='./data/',name=name, transform=T.NormalizeFeatures())
else:
raise ValueError(f'dataset {name} not supported in dataloader')
return dataset