Skip to content

Commit 5a6980c

Browse files
committed
Refactoring
1 parent 3499a3a commit 5a6980c

5 files changed

+44
-50
lines changed

src/main.py

+14-14
Original file line numberDiff line numberDiff line change
@@ -82,12 +82,12 @@ def init_config_frame(self):
8282
self.learning_rate_entry = Entry(self.config_frame, textvar=self.learning_rate_var)
8383
self.learning_rate_entry.grid(row=8, column=1, sticky=W)
8484

85-
self.learning_decay_label = Label(self.config_frame, text='Layer Decay:')
86-
self.learning_decay_label.grid(row=9, column=0, sticky=W)
87-
self.learning_decay_var = StringVar()
88-
self.learning_decay_var.set('1.0')
89-
self.learning_decay_entry = Entry(self.config_frame, textvar=self.learning_decay_var)
90-
self.learning_decay_entry.grid(row=9, column=1, sticky=W)
85+
self.layer_decay_label = Label(self.config_frame, text='Layer Decay:')
86+
self.layer_decay_label.grid(row=9, column=0, sticky=W)
87+
self.layer_decay_var = StringVar()
88+
self.layer_decay_var.set('1.0')
89+
self.layer_decay_entry = Entry(self.config_frame, textvar=self.layer_decay_var)
90+
self.layer_decay_entry.grid(row=9, column=1, sticky=W)
9191

9292
self.momentum_label = Label(self.config_frame, text='Momentum:')
9393
self.momentum_label.grid(row=10, column=0, sticky=W)
@@ -139,7 +139,7 @@ def init_validation_frame(self, row):
139139
self.validation_test_loss_var.grid(row=3, column=1, sticky=W)
140140

141141
def init_test_frame(self):
142-
self.canvas_data = np.array(self.test_images[0], dtype='float64')
142+
self.canvas_data = np.array(self.test_images[0], dtype='float32')
143143

144144
self.test_frame = Frame(self.master)
145145
self.test_frame.grid(row=0, column=1)
@@ -185,7 +185,7 @@ def init_results_frame(self, row):
185185

186186
def create_network(self):
187187
# Set the neural network parameters.
188-
self.layer_sizes = tuple([784] + list(literal_eval(self.layer_sizes_var.get())) + [10])
188+
self.layer_sizes = (784,) + literal_eval(self.layer_sizes_var.get()) + (10,)
189189
self.sigmoid = self.sigmoid_var.get()
190190
if not (self.sigmoid == 'logistic' or self.sigmoid == 'tanh'):
191191
raise ValueError('Invalid sigmoid function.')
@@ -209,12 +209,12 @@ def train(self):
209209
# Set the training parameters.
210210
self.num_iterations = int(self.num_iterations_var.get())
211211
self.learning_rate = float(self.learning_rate_var.get())
212-
self.learning_decay = float(self.learning_decay_var.get())
212+
self.layer_decay = float(self.layer_decay_var.get())
213213
self.momentum = float(self.momentum_var.get())
214214
self.batch_size = int(self.batch_size_var.get())
215215

216216
stochastic_gradient_descent(self.nn, self.train_input, self.train_output, num_iterations=self.num_iterations,
217-
learning_rate=self.learning_rate, learning_decay=self.learning_decay,
217+
learning_rate=self.learning_rate, layer_decay=self.layer_decay,
218218
momentum=self.momentum, batch_size=self.batch_size)
219219

220220
self.test()
@@ -248,13 +248,13 @@ def validate(self):
248248
else:
249249
raise ValueError('Invalid sigmoid function.')
250250

251-
training_prediction_rate = 100 * get_prediction_rate(self.nn, self.train_input, self.train_output)
252-
test_prediction_rate = 100 * get_prediction_rate(self.nn, test_input, test_output)
251+
training_prediction_accuracy = 100 * get_prediction_accuracy(self.nn, self.train_input, self.train_output)
252+
test_prediction_accuracy = 100 * get_prediction_accuracy(self.nn, test_input, test_output)
253253
training_loss = self.nn.get_loss(self.train_input, self.train_output)
254254
test_loss = self.nn.get_loss(test_input, test_output)
255255

256-
self.validation_training_accuracy_var.config(text=('%.2f %%' % (training_prediction_rate)))
257-
self.validation_test_accuracy_var.config(text=('%.2f %%' % (test_prediction_rate)))
256+
self.validation_training_accuracy_var.config(text=('%.2f %%' % (training_prediction_accuracy)))
257+
self.validation_test_accuracy_var.config(text=('%.2f %%' % (test_prediction_accuracy)))
258258
self.validation_training_loss_var.config(text=('%.4f' % (training_loss)))
259259
self.validation_test_loss_var.config(text=('%.4f' % (test_loss)))
260260

File renamed without changes.

src/mnist_hello_world.py src/mnist_fully_connected.py

+20-26
Original file line numberDiff line numberDiff line change
@@ -5,30 +5,13 @@
55
import mnist
66

77
from neural_network import NeuralNetwork
8-
from preprocessing import *
98
from training import stochastic_gradient_descent
10-
11-
NUM_EXAMPLES = 59999
9+
from preprocessing import *
1210

1311

14-
def test_mnist_one_hot(num_train_examples=-1, num_test_examples=-1, hidden_layers=(24, 32), sigmoid='tanh',
15-
learning_rate=0.01, learning_decay=1.0, momentum=0.0, batch_size=100, num_epochs=100,
12+
def test_mnist_one_hot(num_train_examples=-1, num_test_examples=-1, hidden_layers=(100,), sigmoid='tanh',
13+
learning_rate=0.01, layer_decay=1.0, momentum=0.0, batch_size=100, num_epochs=100,
1614
csv_filename=None, return_test_accuracies=False):
17-
layer_sizes = (784,) + hidden_layers + (10,)
18-
weight_decay = 0.0
19-
20-
print('Network Parameters')
21-
print('layer_sizes: {}, sigmoid: {}, weight_decay: {}'.format(layer_sizes, sigmoid, weight_decay))
22-
23-
# Set the training parameters.
24-
num_iterations = (NUM_EXAMPLES // batch_size) * num_epochs
25-
26-
print('Training Parameters')
27-
print('num_iterations: {}, learning_rate: {}, learning_decay: {}, momentum: {}, batch_size: {}'.format(
28-
num_iterations, learning_rate, learning_decay, momentum, batch_size))
29-
30-
print('')
31-
3215
# Collect and preprocess the data.
3316
if sigmoid == 'logistic':
3417
train_input = convert_mnist_images_logistic(mnist.train_images()[:num_train_examples])
@@ -46,9 +29,12 @@ def test_mnist_one_hot(num_train_examples=-1, num_test_examples=-1, hidden_layer
4629
raise ValueError('Invalid sigmoid function.')
4730

4831
# Create and train the neural network.
32+
layer_sizes = (784,) + hidden_layers + (10,)
33+
weight_decay = 0.0
4934
nn = NeuralNetwork(layer_sizes, sigmoid=sigmoid, weight_decay=weight_decay)
5035

5136
num_examples = train_input.shape[0]
37+
num_iterations = (num_examples // batch_size) * num_epochs
5238

5339
rows = None
5440
if csv_filename is not None:
@@ -61,23 +47,31 @@ def test_mnist_one_hot(num_train_examples=-1, num_test_examples=-1, hidden_layer
6147
def callback(iteration):
6248
if iteration % (num_examples // batch_size) == 0:
6349
epoch = iteration // (num_examples // batch_size)
64-
training_prediction_rate = get_prediction_rate(nn, train_input, train_output)
65-
test_prediction_rate = get_prediction_rate(nn, test_input, test_output)
50+
training_prediction_accuracy = get_prediction_accuracy(nn, train_input, train_output)
51+
test_prediction_accuracy = get_prediction_accuracy(nn, test_input, test_output)
6652
training_loss = nn.get_loss(train_input, train_output)
6753
test_loss = nn.get_loss(test_input, test_output)
68-
print('{},{:.6f},{:.6f},{:.6f},{:.6f}'.format(epoch, training_prediction_rate, test_prediction_rate,
54+
print('{},{:.6f},{:.6f},{:.6f},{:.6f}'.format(epoch, training_prediction_accuracy, test_prediction_accuracy,
6955
training_loss, test_loss))
7056
if csv_filename is not None:
71-
rows.append((epoch, training_prediction_rate, test_prediction_rate, training_loss, test_loss))
57+
rows.append((epoch, training_prediction_accuracy, test_prediction_accuracy, training_loss, test_loss))
7258
if return_test_accuracies:
73-
test_accuracies.append(test_prediction_rate)
59+
test_accuracies.append(test_prediction_accuracy)
60+
61+
print('Network Parameters')
62+
print('layer_sizes: {}, sigmoid: {}, weight_decay: {}'.format(layer_sizes, sigmoid, weight_decay))
63+
print('Training Parameters')
64+
print('num_iterations: {}, learning_rate: {}, layer_decay: {}, momentum: {}, batch_size: {}'.format(
65+
num_iterations, learning_rate, layer_decay, momentum, batch_size))
66+
print('')
7467

7568
header = 'epoch,training_accuracy,test_accuracy,training_loss,test_loss'
7669
print(header)
7770
stochastic_gradient_descent(nn, train_input, train_output, num_iterations=num_iterations,
78-
learning_rate=learning_rate, learning_decay=learning_decay,
71+
learning_rate=learning_rate, layer_decay=layer_decay,
7972
momentum=momentum, batch_size=batch_size,
8073
callback=callback)
74+
8175
if csv_filename is not None:
8276
save_rows_to_csv(csv_filename, rows, header.split(','))
8377

src/preprocessing.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -37,15 +37,15 @@ def flatten_input_data(images):
3737

3838

3939
def convert_mnist_labels_one_hot(labels, positive, negative):
40-
lst = []
40+
data = []
4141
for label in labels:
4242
label_one_hot = negative * np.ones(10)
4343
label_one_hot[label] = positive
44-
lst.append(np.array([label_one_hot]))
45-
return np.array(lst)
44+
data.append(np.array([label_one_hot]))
45+
return np.array(data)
4646

4747

48-
def get_prediction_rate(nn, test_input, test_output):
48+
def get_prediction_accuracy(nn, test_input, test_output):
4949
prediction = nn.predict(test_input)
5050
diff = np.argmax(prediction, 2) - np.argmax(test_output, 2)
5151
error = np.count_nonzero(diff) / diff.size

src/training.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -4,15 +4,15 @@
44

55

66
def stochastic_gradient_descent(nn, input_vectors, output_vectors, num_iterations=1000,
7-
learning_rate=0.1, learning_decay=1.0, momentum=0.0, batch_size=1,
7+
learning_rate=0.1, layer_decay=1.0, momentum=0.0, batch_size=1,
88
callback=lambda iteration: None):
99
"""
1010
Trains the neural network by using stochastic gradient descent
1111
with the given training examples.
1212
"""
1313

1414
delta_weights = [np.zeros(weight.shape) for weight in nn.get_weights()]
15-
learning_rates = get_learning_rates(nn, learning_rate, learning_decay)
15+
learning_rates = get_learning_rates(nn, learning_rate, layer_decay)
1616
for iteration in range(num_iterations):
1717
# Get a random batch of examples.
1818
random_indices = np.random.randint(input_vectors.shape[0], size=batch_size)
@@ -27,14 +27,14 @@ def stochastic_gradient_descent(nn, input_vectors, output_vectors, num_iteration
2727

2828

2929
def batch_gradient_descent(nn, input_vectors, output_vectors, num_iterations=1000,
30-
learning_rate=0.1, learning_decay=1.0, momentum=0.0, callback=lambda iteration: None):
30+
learning_rate=0.1, layer_decay=1.0, momentum=0.0, callback=lambda iteration: None):
3131
"""
3232
Trains the neural network by using standard batch gradient
3333
descent with the given training examples.
3434
"""
3535

3636
delta_weights = [np.zeros(weight.shape) for weight in nn.get_weights()]
37-
learning_rates = get_learning_rates(nn, learning_rate, learning_decay)
37+
learning_rates = get_learning_rates(nn, learning_rate, layer_decay)
3838
for iteration in range(num_iterations):
3939
# Update the weights using the examples.
4040
update_weights(nn, input_vectors, output_vectors, delta_weights, learning_rates, momentum)
@@ -52,11 +52,11 @@ def update_weights(nn, input_vectors, output_vectors, delta_weights, learning_ra
5252
nn.set_weights(weights)
5353

5454

55-
def get_learning_rates(nn, learning_rate, learning_decay):
55+
def get_learning_rates(nn, learning_rate, layer_decay):
5656
learning_rates = []
5757
running_learning_rate = learning_rate
5858
for i in range(nn.num_layers - 1):
5959
learning_rates.append(running_learning_rate)
60-
running_learning_rate *= learning_decay
60+
running_learning_rate *= layer_decay
6161
learning_rates *= 2
6262
return learning_rates

0 commit comments

Comments
 (0)