Skip to content

Commit 0d92eb5

Browse files
committed
Using instance norm and addded pad support
1 parent 6522a1a commit 0d92eb5

File tree

3 files changed

+10
-9
lines changed

3 files changed

+10
-9
lines changed

layers.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -27,10 +27,10 @@ def instance_norm(x):
2727
def general_conv2d(inputconv, o_d=64, f_h=7, f_w=7, s_h=1, s_w=1, stddev=0.02, padding="VALID", name="conv2d", do_norm=True, do_relu=True, relufactor=0):
2828
with tf.variable_scope(name):
2929

30-
conv = tf.contrib.layers.conv2d(inputconv, o_d, f_w, s_w, padding, activation_fn=None, weights_initializer=tf.truncated_normal_initializer(stddev=stddev),biases_initializer=None)
30+
conv = tf.contrib.layers.conv2d(inputconv, o_d, f_w, s_w, padding, activation_fn=None, weights_initializer=tf.truncated_normal_initializer(stddev=stddev),biases_initializer=tf.constant_initializer(0.0))
3131
if do_norm:
32-
# conv = instance_norm(conv)
33-
conv = tf.contrib.layers.batch_norm(conv, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, scope="batch_norm")
32+
conv = instance_norm(conv)
33+
# conv = tf.contrib.layers.batch_norm(conv, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, scope="batch_norm")
3434

3535
if do_relu:
3636
if(relufactor == 0):
@@ -45,11 +45,11 @@ def general_conv2d(inputconv, o_d=64, f_h=7, f_w=7, s_h=1, s_w=1, stddev=0.02, p
4545
def general_deconv2d(inputconv, outshape, o_d=64, f_h=7, f_w=7, s_h=1, s_w=1, stddev=0.02, padding="VALID", name="deconv2d", do_norm=True, do_relu=True, relufactor=0):
4646
with tf.variable_scope(name):
4747

48-
conv = tf.contrib.layers.conv2d_transpose(inputconv, o_d, [f_h, f_w], [s_h, s_w], padding, activation_fn=None, weights_initializer=tf.truncated_normal_initializer(stddev=stddev),biases_initializer=None)
48+
conv = tf.contrib.layers.conv2d_transpose(inputconv, o_d, [f_h, f_w], [s_h, s_w], padding, activation_fn=None, weights_initializer=tf.truncated_normal_initializer(stddev=stddev),biases_initializer=tf.constant_initializer(0.0))
4949

5050
if do_norm:
51-
# conv = instance_norm(conv)
52-
conv = tf.contrib.layers.batch_norm(conv, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, scope="batch_norm")
51+
conv = instance_norm(conv)
52+
# conv = tf.contrib.layers.batch_norm(conv, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, scope="batch_norm")
5353

5454
if do_relu:
5555
if(relufactor == 0):

main.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@
4040
batch_size = 1
4141
pool_size = 50
4242
sample_size = 10
43-
ngf = 64
43+
ngf = 32
4444
ndf = 64
4545

4646

model.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121

2222
batch_size = 1
2323
pool_size = 50
24-
ngf = 64
24+
ngf = 32
2525
ndf = 64
2626

2727

@@ -59,7 +59,8 @@ def build_generator_resnet_6blocks(inputgen, name="generator"):
5959

6060
o_c4 = general_deconv2d(o_r6, [batch_size,64,64,ngf*2], ngf*2, ks, ks, 2, 2, 0.02,"SAME","c4")
6161
o_c5 = general_deconv2d(o_c4, [batch_size,128,128,ngf], ngf, ks, ks, 2, 2, 0.02,"SAME","c5")
62-
o_c6 = general_conv2d(o_c5, img_layer, f, f, 1, 1, 0.02,"SAME","c6",do_relu=False)
62+
o_c5_pad = tf.pad(inputgen,[[0, 0], [ks, ks], [ks, ks], [0, 0]], "REFLECT")
63+
o_c6 = general_conv2d(o_c5_pad, img_layer, f, f, 1, 1, 0.02,"VALID","c6",do_relu=False)
6364

6465
# Adding the tanh layer
6566

0 commit comments

Comments
 (0)