@@ -63,7 +63,7 @@ def testBaseline(self, cls, num_microbatches, expected_answer):
63
63
num_microbatches = num_microbatches ,
64
64
learning_rate = 2.0 )
65
65
66
- self .evaluate (tf .compat . v1 . global_variables_initializer ())
66
+ self .evaluate (tf .global_variables_initializer ())
67
67
# Fetch params to validate initial values
68
68
self .assertAllClose ([1.0 , 2.0 ], self .evaluate (var0 ))
69
69
@@ -87,7 +87,7 @@ def testClippingNorm(self, cls):
87
87
88
88
opt = cls (dp_sum_query , num_microbatches = 1 , learning_rate = 2.0 )
89
89
90
- self .evaluate (tf .compat . v1 . global_variables_initializer ())
90
+ self .evaluate (tf .global_variables_initializer ())
91
91
# Fetch params to validate initial values
92
92
self .assertAllClose ([0.0 , 0.0 ], self .evaluate (var0 ))
93
93
@@ -110,7 +110,7 @@ def testNoiseMultiplier(self, cls):
110
110
111
111
opt = cls (dp_sum_query , num_microbatches = 1 , learning_rate = 2.0 )
112
112
113
- self .evaluate (tf .compat . v1 . global_variables_initializer ())
113
+ self .evaluate (tf .global_variables_initializer ())
114
114
# Fetch params to validate initial values
115
115
self .assertAllClose ([0.0 ], self .evaluate (var0 ))
116
116
@@ -126,7 +126,7 @@ def testNoiseMultiplier(self, cls):
126
126
@mock .patch ('absl.logging.warning' )
127
127
def testComputeGradientsOverrideWarning (self , mock_logging ):
128
128
129
- class SimpleOptimizer (tf .compat . v1 . train .Optimizer ):
129
+ class SimpleOptimizer (tf .train .Optimizer ):
130
130
131
131
def compute_gradients (self ):
132
132
return 0
@@ -153,7 +153,7 @@ def linear_model_fn(features, labels, mode):
153
153
dp_sum_query ,
154
154
num_microbatches = 1 ,
155
155
learning_rate = 1.0 )
156
- global_step = tf .compat . v1 . train .get_global_step ()
156
+ global_step = tf .train .get_global_step ()
157
157
train_op = optimizer .minimize (loss = vector_loss , global_step = global_step )
158
158
return tf .estimator .EstimatorSpec (
159
159
mode = mode , loss = scalar_loss , train_op = train_op )
@@ -167,7 +167,7 @@ def linear_model_fn(features, labels, mode):
167
167
true_weights ) + true_bias + np .random .normal (
168
168
scale = 0.1 , size = (200 , 1 )).astype (np .float32 )
169
169
170
- train_input_fn = tf .compat . v1 . estimator .inputs .numpy_input_fn (
170
+ train_input_fn = tf .estimator .inputs .numpy_input_fn (
171
171
x = {'x' : train_data },
172
172
y = train_labels ,
173
173
batch_size = 20 ,
@@ -200,7 +200,7 @@ def testUnrollMicrobatches(self, cls):
200
200
learning_rate = 2.0 ,
201
201
unroll_microbatches = True )
202
202
203
- self .evaluate (tf .compat . v1 . global_variables_initializer ())
203
+ self .evaluate (tf .global_variables_initializer ())
204
204
# Fetch params to validate initial values
205
205
self .assertAllClose ([1.0 , 2.0 ], self .evaluate (var0 ))
206
206
@@ -225,7 +225,7 @@ def testDPGaussianOptimizerClass(self, cls):
225
225
num_microbatches = 1 ,
226
226
learning_rate = 2.0 )
227
227
228
- self .evaluate (tf .compat . v1 . global_variables_initializer ())
228
+ self .evaluate (tf .global_variables_initializer ())
229
229
# Fetch params to validate initial values
230
230
self .assertAllClose ([0.0 ], self .evaluate (var0 ))
231
231
0 commit comments