-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdatasets.py
154 lines (116 loc) · 4.65 KB
/
datasets.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 17 10:55:25 2023
@author: chris
"""
from torch_geometric.datasets import Flickr
from torch_geometric.datasets import PPI
from torch_geometric.datasets import Planetoid
from torch_geometric.transforms import NormalizeFeatures
import numpy as np
import torch
import sklearn
def get_Planetoid(name = 'Cora'):
print(f"Loading Planetoid({name}) Dataset...")
data = Planetoid(root=f'data/Planetoid/{name}', name = name,
transform=NormalizeFeatures())
# Read Masks
val_mask = data.val_mask
test_mask = data.test_mask
train_mask = val_mask==test_mask
# Read Labels
data_label = data.y.numpy()
# Read Features
data_feat = data.x.numpy()
# Read Edges
edge_index = data.edge_index
edge_index = edge_index.numpy().T
print_statistics(data_feat, data_label, edge_index, train_mask, val_mask,
test_mask)
return data_feat, data_label, edge_index, train_mask, val_mask, test_mask
def get_PPI(split = 'train'):
print(f"Loading PPI ({split}) Dataset...")
assert(split == 'train' or split == 'val' or split == 'test')
data = PPI(root='data/PPI', split=split, transform=NormalizeFeatures())
# Read Labels
data_feat = data.x.numpy()
# Read Features
data_label = data.y.numpy()
# Generate Mask
train_mask = np.full((data_feat.shape[0],), False)
val_mask = np.full((data_feat.shape[0],), False)
test_mask = np.full((data_feat.shape[0],), False)
if split == 'train':
train_mask = np.full((data_feat.shape[0],), True)
elif split == 'val':
val_mask = np.full((data_feat.shape[0],), True)
elif split == 'test':
test_mask = np.full((data_feat.shape[0],), True)
train_mask = torch.tensor(train_mask)
val_mask = torch.tensor(val_mask)
test_mask = torch.tensor(test_mask)
# Read Edges
edge_index = data.edge_index
edge_index = edge_index.numpy().T
print_statistics(data_feat, data_label, edge_index, train_mask, val_mask,
test_mask)
return data_feat, data_label, edge_index, train_mask, val_mask, test_mask
def get_Organ(view = 'C'):
print(f"Loading Organ-{view} Dataset...")
if view == 'C':
dataset_name = 'organc'
elif view == 'S':
dataset_name = 'organs'
# Read Masks
train_mask = np.load('data/'+dataset_name+'/train_mask.npy')
train_mask = torch.tensor(train_mask)
val_mask = np.load('data/'+dataset_name+'/val_mask.npy')
val_mask = torch.tensor(val_mask)
test_mask = np.load('data/'+dataset_name+'/test_mask.npy')
test_mask = torch.tensor(test_mask)
# Read Labels
data_label = np.load('data/'+dataset_name+'/data_label.npy')
# Read Features
data_feat = np.load('data/'+dataset_name+'/data_feat.npy')
scaler = sklearn.preprocessing.StandardScaler()
scaler.fit(data_feat)
data_feat = scaler.transform(data_feat)
# Read Edges
edge_index = np.load('data/'+dataset_name+'/edge_index.npy')
print_statistics(data_feat, data_label, edge_index, train_mask, val_mask,
test_mask)
return data_feat, data_label, edge_index, train_mask, val_mask, test_mask
def get_Flickr():
print("Loading Flickr Dataset...")
data = Flickr(root='data/Flickr', transform=NormalizeFeatures())
# Read Masks
train_mask = data.train_mask
val_mask = data.val_mask
test_mask = data.test_mask
# Read Labels
data_label = data.y.numpy()
# Read Features
data_feat = data.x.numpy()
# Read Edges
edge_index = data.edge_index
edge_index = edge_index.numpy().T
print_statistics(data_feat, data_label, edge_index, train_mask, val_mask,
test_mask)
return data_feat, data_label, edge_index, train_mask, val_mask, test_mask
def print_statistics(data_feat, data_label, edge_index, train_mask,
val_mask, test_mask):
print("=============== Dataset Properties ===============")
print(f"Total Nodes: {data_feat.shape[0]}")
print(f"Total Edges: {edge_index.shape[0]}")
print(f"Number of Features: {data_feat.shape[1]}")
if len(data_label.shape) == 1:
print(f"Number of Labels: {max(data_label) + 1}")
print("Task Type: Multi-class Classification")
else:
print(f"Number of Labels: {data_label.shape[1]}")
print("Task Type: Multi-label Classification")
print(f"Training Nodes: {sum(train_mask)}")
print(f"Validation Nodes: {sum(val_mask)}")
print(f"Testing Nodes: {sum(test_mask)}")
print()