forked from seymayucer/ActionDatasetLoader
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathNTUAction3DTF.py
executable file
·70 lines (55 loc) · 2.22 KB
/
NTUAction3DTF.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import tensorflow as tf
from os import listdir
from os.path import join
import numpy as np
import workspace_generals
import helpers
from tensorflow.contrib.data import Dataset, Iterator
from six.moves import cPickle
from helpers import logger
max_frame_num = 300
frame_size = 75
num_classes = 60
def _parse_function(filename):
img = np.loadtxt(filename, dtype=float)
img_len = len(img)
# label = tf.one_hot(label, num_classes, dtype=tf.int32)
if img_len < max_frame_num:
mis_dim = max_frame_num - img_len
zero_vec = np.zeros((mis_dim, img.shape[1]))
img = np.vstack((img, zero_vec))
return img, img_len
def read(data_dir, subject_id, split=False):
print('Loading NTU 3D Data, data directory %s' % data_dir)
filenames = []
documents = [d for d in sorted(listdir(data_dir))]
filenames.extend(documents)
np.random.shuffle(filenames)
test_files, test_labels = [], []
train_files, train_labels = [], []
for files in filenames:
substr = 'P{0:03}'.format(subject_id)
label = helpers.full_fname2_str(files, 'A')
if substr in files:
test_files.append(join(data_dir, files))
test_labels.append(label)
else:
train_files.append(join(data_dir, files))
train_labels.append(label)
train_imgs = tf.constant(train_files)
train_labels = tf.constant(train_labels)
test_imgs = tf.constant(test_files)
test_labels = tf.constant(test_labels)
# create TensorFlow Dataset objects
train_data = tf.data.Dataset.from_tensor_slices((train_imgs, train_labels))
train_data = train_data.map(
lambda filename, label: tuple(tf.py_func(_parse_function, [filename], [tf.double, tf.int64])) + (
tf.one_hot(label, num_classes, dtype=tf.int32),))
train_data = train_data.batch(32) # batched dataset creation
test_data = tf.data.Dataset.from_tensor_slices((test_imgs, test_labels))
test_data = test_data.map(
lambda filename, label: tuple(tf.py_func(_parse_function, [filename], [tf.double, tf.int64])) + (
tf.one_hot(label, num_classes, dtype=tf.int32),))
print('data is ready', type(train_data), train_data)
if split:
return train_data, test_data