|
19 | 19 | # Settings
|
20 | 20 | flags = tf.app.flags
|
21 | 21 | FLAGS = flags.FLAGS
|
22 |
| -flags.DEFINE_string('dataset', 'pubmed', 'Dataset string.') # 'cora', 'citeseer', 'pubmed' |
23 | 22 | flags.DEFINE_string('model', 'gcn_mix', 'Model string.') # 'gcn', 'gcn_appr'
|
24 | 23 | flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')
|
25 | 24 | flags.DEFINE_integer('epochs', 200, 'Number of epochs to train.')
|
@@ -114,11 +113,6 @@ def construct_feeddict_forMixlayers(AXfeatures, support, labels, placeholders):
|
114 | 113 | def main(rank1):
|
115 | 114 |
|
116 | 115 |
|
117 |
| - |
118 |
| - # config = tf.ConfigProto(device_count={"CPU": 4}, # limit to num_cpu_core CPU usage |
119 |
| - # inter_op_parallelism_threads = 1, |
120 |
| - # intra_op_parallelism_threads = 4, |
121 |
| - # log_device_placement=False) |
122 | 116 | adj, features, y_train, y_val, y_test,train_index, val_index, test_index = loadRedditFromNPZ("data/")
|
123 | 117 | adj = adj+adj.T
|
124 | 118 |
|
@@ -274,103 +268,6 @@ def transferG2ADJ():
|
274 | 268 | sp.save_npz("reddit_adj.npz", adj)
|
275 | 269 |
|
276 | 270 |
|
277 |
| -def test(rank1): |
278 |
| - # config = tf.ConfigProto(device_count={"CPU": 4}, # limit to num_cpu_core CPU usage |
279 |
| - # inter_op_parallelism_threads = 1, |
280 |
| - # intra_op_parallelism_threads = 4, |
281 |
| - # log_device_placement=False) |
282 |
| - adj, features, y_train, y_val, y_test, train_index, val_index, test_index = loadRedditFromNPZ("data/") |
283 |
| - adj = adj + adj.T |
284 |
| - |
285 |
| - y_train = transferLabel2Onehot(y_train, 41) |
286 |
| - y_test = transferLabel2Onehot(y_test, 41) |
287 |
| - |
288 |
| - features = sp.lil_matrix(features) |
289 |
| - |
290 |
| - |
291 |
| - numNode_train = y_train.shape[0] |
292 |
| - |
293 |
| - # print("numNode", numNode) |
294 |
| - |
295 |
| - |
296 |
| - |
297 |
| - if FLAGS.model == 'gcn_mix': |
298 |
| - normADJ = nontuple_preprocess_adj(adj) |
299 |
| - normADJ_test = normADJ[test_index, :] |
300 |
| - # normADJ_val = nontuple_preprocess_adj(adj_val) |
301 |
| - # normADJ_test = nontuple_preprocess_adj(adj_test) |
302 |
| - |
303 |
| - num_supports = 2 |
304 |
| - model_func = GCN_APPRO_Mix |
305 |
| - else: |
306 |
| - raise ValueError('Invalid argument for model: ' + str(FLAGS.model)) |
307 |
| - |
308 |
| - # Some preprocessing |
309 |
| - features = nontuple_preprocess_features(features).todense() |
310 |
| - |
311 |
| - features = normADJ.dot(features) |
312 |
| - |
313 |
| - |
314 |
| - # Define placeholders |
315 |
| - placeholders = { |
316 |
| - 'support': tf.sparse_placeholder(tf.float32), |
317 |
| - 'AXfeatures': tf.placeholder(tf.float32, shape=(None, features.shape[1])), |
318 |
| - 'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])), |
319 |
| - 'dropout': tf.placeholder_with_default(0., shape=()), |
320 |
| - 'num_features_nonzero': tf.placeholder(tf.int32) # helper variable for sparse dropout |
321 |
| - } |
322 |
| - |
323 |
| - # Create model |
324 |
| - model = model_func(placeholders, input_dim=features.shape[-1], logging=True) |
325 |
| - |
326 |
| - # Initialize session |
327 |
| - sess = tf.Session() |
328 |
| - |
329 |
| - # Define model evaluation function |
330 |
| - def evaluate(features, support, labels, placeholders): |
331 |
| - t_test = time.time() |
332 |
| - feed_dict_val = construct_feeddict_forMixlayers(features, support, labels, placeholders) |
333 |
| - outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val) |
334 |
| - return outs_val[0], outs_val[1], (time.time() - t_test) |
335 |
| - |
336 |
| - # Init variables |
337 |
| - sess.run(tf.global_variables_initializer()) |
338 |
| - saver = tf.train.Saver() |
339 |
| - |
340 |
| - saver.restore(sess, "tmp/tmp_MixModel_uniform.ckpt") |
341 |
| - |
342 |
| - cost_val = [] |
343 |
| - |
344 |
| - p0 = column_prop(normADJ_test) |
345 |
| - |
346 |
| - |
347 |
| - t = time.time() |
348 |
| - |
349 |
| - if rank1 is None: |
350 |
| - support1 = sparse_to_tuple(normADJ_test) |
351 |
| - features_inputs = features |
352 |
| - else: |
353 |
| - distr = np.nonzero(np.sum(normADJ_test, axis=0))[1] |
354 |
| - if rank1 > len(distr): |
355 |
| - q1 = distr |
356 |
| - else: |
357 |
| - q1 = np.random.choice(distr, rank1, replace=False, p=p0[distr] / sum(p0[distr])) # top layer |
358 |
| - |
359 |
| - # q1 = np.random.choice(np.arange(numNode_train), rank1, p=p0) # top layer |
360 |
| - |
361 |
| - support1 = sparse_to_tuple(normADJ_test[:, q1].dot(sp.diags(1.0 / (p0[q1] * rank1)))) |
362 |
| - |
363 |
| - |
364 |
| - features_inputs = features[q1, :] # selected nodes for approximation |
365 |
| - |
366 |
| - test_cost, test_acc, test_duration = evaluate(features_inputs, support1, y_test, |
367 |
| - placeholders) |
368 |
| - |
369 |
| - |
370 |
| - test_duration = time.time() - t |
371 |
| - print("rank1 = {}".format(rank1), "cost=", "{:.5f}".format(test_cost), |
372 |
| - "accuracy=", "{:.5f}".format(test_acc), |
373 |
| - "test time=", "{:.5f}".format(test_duration)) |
374 | 271 |
|
375 | 272 | if __name__=="__main__":
|
376 | 273 | # main(100)
|
|
0 commit comments