Skip to content

Commit 70e6466

Browse files
submitsubmit
submit
authored and
submit
committed
clean some codes
1 parent bb63b2d commit 70e6466

4 files changed

+62
-240
lines changed

create_Graph.py

+61
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
import numpy as np
2+
import pickle as pkl
3+
import scipy.sparse as sp
4+
import sys
5+
import os
6+
import networkx as nx
7+
from utils import *
8+
import json
9+
from networkx.readwrite import json_graph
10+
11+
# 'cora', 'citeseer', 'pubmed'
12+
13+
if __name__=="__main__":
14+
data_name = 'cora'
15+
adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_data(data_name)
16+
17+
G = nx.from_scipy_sparse_matrix(adj)
18+
19+
val_index = np.where(val_mask)[0]
20+
test_index = np.where(test_mask)[0]
21+
y = y_train+y_val+y_test
22+
y = np.argmax(y,axis=1)
23+
24+
25+
for i in range(len(y)):
26+
if i in val_index:
27+
G.node[i]['val']=True
28+
G.node[i]['test']=False
29+
elif i in test_index:
30+
G.node[i]['test']=True
31+
G.node[i]['val']=False
32+
else:
33+
G.node[i]['test'] = False
34+
G.node[i]['val'] = False
35+
36+
37+
data = json_graph.node_link_data(G)
38+
with open("cora/cora-G.json","wb") as f:
39+
json.dump(data,f)
40+
classMap = {}
41+
idMap = {}
42+
for i in range(len(y)):
43+
classMap[i]=y[i]
44+
idMap[i] = i
45+
with open("cora/cora-id_map.json","wb") as f:
46+
json.dump(idMap,f)
47+
with open("cora/cora-class_map.json","wb") as f:
48+
json.dump(classMap,f)
49+
np.save(open("cora/cora-feats.npy","wb"), features.todense())
50+
51+
52+
53+
54+
55+
56+
57+
58+
59+
60+
61+

train_batch_multiRank_inductive_reddit_Mixlayers_sampleA.py

-103
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
# Settings
2020
flags = tf.app.flags
2121
FLAGS = flags.FLAGS
22-
flags.DEFINE_string('dataset', 'pubmed', 'Dataset string.') # 'cora', 'citeseer', 'pubmed'
2322
flags.DEFINE_string('model', 'gcn_mix', 'Model string.') # 'gcn', 'gcn_appr'
2423
flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')
2524
flags.DEFINE_integer('epochs', 200, 'Number of epochs to train.')
@@ -114,11 +113,6 @@ def construct_feeddict_forMixlayers(AXfeatures, support, labels, placeholders):
114113
def main(rank1):
115114

116115

117-
118-
# config = tf.ConfigProto(device_count={"CPU": 4}, # limit to num_cpu_core CPU usage
119-
# inter_op_parallelism_threads = 1,
120-
# intra_op_parallelism_threads = 4,
121-
# log_device_placement=False)
122116
adj, features, y_train, y_val, y_test,train_index, val_index, test_index = loadRedditFromNPZ("data/")
123117
adj = adj+adj.T
124118

@@ -274,103 +268,6 @@ def transferG2ADJ():
274268
sp.save_npz("reddit_adj.npz", adj)
275269

276270

277-
def test(rank1):
278-
# config = tf.ConfigProto(device_count={"CPU": 4}, # limit to num_cpu_core CPU usage
279-
# inter_op_parallelism_threads = 1,
280-
# intra_op_parallelism_threads = 4,
281-
# log_device_placement=False)
282-
adj, features, y_train, y_val, y_test, train_index, val_index, test_index = loadRedditFromNPZ("data/")
283-
adj = adj + adj.T
284-
285-
y_train = transferLabel2Onehot(y_train, 41)
286-
y_test = transferLabel2Onehot(y_test, 41)
287-
288-
features = sp.lil_matrix(features)
289-
290-
291-
numNode_train = y_train.shape[0]
292-
293-
# print("numNode", numNode)
294-
295-
296-
297-
if FLAGS.model == 'gcn_mix':
298-
normADJ = nontuple_preprocess_adj(adj)
299-
normADJ_test = normADJ[test_index, :]
300-
# normADJ_val = nontuple_preprocess_adj(adj_val)
301-
# normADJ_test = nontuple_preprocess_adj(adj_test)
302-
303-
num_supports = 2
304-
model_func = GCN_APPRO_Mix
305-
else:
306-
raise ValueError('Invalid argument for model: ' + str(FLAGS.model))
307-
308-
# Some preprocessing
309-
features = nontuple_preprocess_features(features).todense()
310-
311-
features = normADJ.dot(features)
312-
313-
314-
# Define placeholders
315-
placeholders = {
316-
'support': tf.sparse_placeholder(tf.float32),
317-
'AXfeatures': tf.placeholder(tf.float32, shape=(None, features.shape[1])),
318-
'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])),
319-
'dropout': tf.placeholder_with_default(0., shape=()),
320-
'num_features_nonzero': tf.placeholder(tf.int32) # helper variable for sparse dropout
321-
}
322-
323-
# Create model
324-
model = model_func(placeholders, input_dim=features.shape[-1], logging=True)
325-
326-
# Initialize session
327-
sess = tf.Session()
328-
329-
# Define model evaluation function
330-
def evaluate(features, support, labels, placeholders):
331-
t_test = time.time()
332-
feed_dict_val = construct_feeddict_forMixlayers(features, support, labels, placeholders)
333-
outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val)
334-
return outs_val[0], outs_val[1], (time.time() - t_test)
335-
336-
# Init variables
337-
sess.run(tf.global_variables_initializer())
338-
saver = tf.train.Saver()
339-
340-
saver.restore(sess, "tmp/tmp_MixModel_uniform.ckpt")
341-
342-
cost_val = []
343-
344-
p0 = column_prop(normADJ_test)
345-
346-
347-
t = time.time()
348-
349-
if rank1 is None:
350-
support1 = sparse_to_tuple(normADJ_test)
351-
features_inputs = features
352-
else:
353-
distr = np.nonzero(np.sum(normADJ_test, axis=0))[1]
354-
if rank1 > len(distr):
355-
q1 = distr
356-
else:
357-
q1 = np.random.choice(distr, rank1, replace=False, p=p0[distr] / sum(p0[distr])) # top layer
358-
359-
# q1 = np.random.choice(np.arange(numNode_train), rank1, p=p0) # top layer
360-
361-
support1 = sparse_to_tuple(normADJ_test[:, q1].dot(sp.diags(1.0 / (p0[q1] * rank1))))
362-
363-
364-
features_inputs = features[q1, :] # selected nodes for approximation
365-
366-
test_cost, test_acc, test_duration = evaluate(features_inputs, support1, y_test,
367-
placeholders)
368-
369-
370-
test_duration = time.time() - t
371-
print("rank1 = {}".format(rank1), "cost=", "{:.5f}".format(test_cost),
372-
"accuracy=", "{:.5f}".format(test_acc),
373-
"test time=", "{:.5f}".format(test_duration))
374271

375272
if __name__=="__main__":
376273
# main(100)

train_batch_multiRank_inductive_reddit_Mixlayers_sampleBatch.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
# Settings
2020
flags = tf.app.flags
2121
FLAGS = flags.FLAGS
22-
flags.DEFINE_string('dataset', 'pubmed', 'Dataset string.') # 'cora', 'citeseer', 'pubmed'
22+
2323
flags.DEFINE_string('model', 'gcn_mix', 'Model string.') # 'gcn', 'gcn_appr'
2424
flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')
2525
flags.DEFINE_integer('epochs', 200, 'Number of epochs to train.')

train_inductive.py

-136
This file was deleted.

0 commit comments

Comments
 (0)