Skip to content

Commit ff322e0

Browse files
authoredMar 27, 2019
Merge pull request chainer#6346 from toslunar/fix-f632
Fix F632 (use ==/!= to compare str)
2 parents d0829e6 + 0f2e7b8 commit ff322e0

File tree

5 files changed

+10
-11
lines changed

5 files changed

+10
-11
lines changed
 

‎chainer/functions/math/sparse_matmul.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,9 @@ def _coo_matmul(sp_data, sp_row, sp_col, sp_shape, sp_order,
2424
A_row = sp_col
2525
A_col = sp_row
2626
A_shape = (sp_shape[1], sp_shape[0])
27-
if sp_order is 'C':
27+
if sp_order == 'C':
2828
A_order = 'F'
29-
elif sp_order is 'F':
29+
elif sp_order == 'F':
3030
A_order = 'C'
3131
else:
3232
A_order = sp_order
@@ -99,7 +99,7 @@ def _coo_matmul_gpu(A_data, A_row, A_col, A_shape, A_order, B, dtype):
9999
nb = B.shape[0]
100100
C = cuda.cupy.zeros((nb, _m, _n), dtype=cupy_dtype)
101101

102-
if A_order is 'C':
102+
if A_order == 'C':
103103
# A chunk is the number of non-zero elements handled by a single GPU
104104
# thread. If contiguous non-zero elemets are related to the same
105105
# location of the output matrix and they are processed in the same

‎chainer/variable.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1798,9 +1798,9 @@ def from_chx(self):
17981798

17991799
if isinstance(device, backend.ChainerxDevice):
18001800
backend_name = device.device.backend.name
1801-
if backend_name is 'native':
1801+
if backend_name == 'native':
18021802
self._initial_device = backend.CpuDevice()
1803-
elif backend_name is 'cuda':
1803+
elif backend_name == 'cuda':
18041804
self._initial_device = chainer.get_device(
18051805
(cuda.cupy, device.device.index))
18061806

‎setup.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -33,9 +33,8 @@
3333
],
3434
'stylecheck': [
3535
'autopep8>=1.4.1,<1.5',
36-
'flake8>=3.6,<3.7',
37-
'pbr==4.0.4',
38-
'pycodestyle>=2.4,<2.5',
36+
'flake8>=3.7,<3.8',
37+
'pycodestyle>=2.5,<2.6',
3938
],
4039
'test': [
4140
'pytest<4.2.0', # 4.2.0 is slow collecting tests and times out on CI.

‎tests/chainer_tests/iterators_tests/test_multiprocess_iterator.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -675,7 +675,7 @@ def __getitem__(self, _):
675675
os.write(fd, six.b(code))
676676
os.close(fd)
677677

678-
if self.shared_mem is not None and dataset is 'infinite_wait':
678+
if self.shared_mem is not None and dataset == 'infinite_wait':
679679
stdout = subprocess.PIPE
680680
else:
681681
stdout = None

‎tests/chainer_tests/optimizers_tests/test_optimizers_by_linear_model.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -96,9 +96,9 @@ def _optimizer_loss_scaling(optimizer, loss_scaling):
9696
if loss_scaling not in [False, 'dynamic', 'static']:
9797
msg = 'loss_scaling must be False, \'dynamic\' or \'static\'.'
9898
raise ValueError(msg)
99-
if loss_scaling is 'dynamic':
99+
if loss_scaling == 'dynamic':
100100
optimizer.loss_scaling()
101-
elif loss_scaling is 'static':
101+
elif loss_scaling == 'static':
102102
optimizer.loss_scaling(scale=10.0)
103103

104104

0 commit comments

Comments
 (0)
Please sign in to comment.