@@ -11,11 +11,7 @@ def lrelu(x, leak=0.2, name="lrelu", alt_relu_impl=True):
11
11
return tf .maximum (x , leak * x )
12
12
13
13
14
- < << << << HEAD
15
- def general_conv2d (inputconv , o_d = 64 , f_h = 7 , f_w = 7 , s_h = 1 , s_w = 1 , stddev = 0.02 , padding = None , name = "conv2d" , do_norm = True , do_relu = True , relufactor = 0 ):
16
- == == == =
17
14
def general_conv2d (inputconv , o_d = 64 , f_h = 7 , f_w = 7 , s_h = 1 , s_w = 1 , stddev = 0.02 , padding = "VALID" , name = "conv2d" , do_norm = True , do_relu = True , relufactor = 0 ):
18
- > >> >> >> 80 b993bcd27a00f9bc26de1fbd2250bbc092827d
19
15
with tf .variable_scope (name ):
20
16
w = tf .get_variable ('w' ,[f_h , f_w , inputconv .get_shape ()[- 1 ], o_d ],
21
17
initializer = tf .truncated_normal_initializer (stddev = stddev ))
@@ -36,11 +32,7 @@ def general_conv2d(inputconv, o_d=64, f_h=7, f_w=7, s_h=1, s_w=1, stddev=0.02, p
36
32
37
33
return conv
38
34
39
- < << << << HEAD
40
- def general_deconv2d (inputconv , outshape , o_d = 64 , f_h = 7 , f_w = 7 , s_h = 1 , s_w = 1 , stddev = 0.02 , padding = None , name = "deconv2d" , do_norm = True , do_relu = True , relufactor = 0 ):
41
- == == == =
42
35
def general_deconv2d (inputconv , outshape , o_d = 64 , f_h = 7 , f_w = 7 , s_h = 1 , s_w = 1 , stddev = 0.02 , padding = "VALID" , name = "deconv2d" , do_norm = True , do_relu = True , relufactor = 0 ):
43
- > >> >> >> 80 b993bcd27a00f9bc26de1fbd2250bbc092827d
44
36
with tf .variable_scope (name ):
45
37
w = tf .get_variable ('w' ,[f_h , f_w , o_d , inputconv .get_shape ()[- 1 ]],
46
38
initializer = tf .truncated_normal_initializer (stddev = stddev ))
0 commit comments