forked from ROCm/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlengths_reducer_fused_8bit_rowwise_ops_test.py
127 lines (108 loc) · 4.22 KB
/
lengths_reducer_fused_8bit_rowwise_ops_test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import numpy as np
from hypothesis import given
import hypothesis.strategies as st
class TestLengthsReducerOpsFused8BitRowwise(hu.HypothesisTestCase):
@given(
input_data=hu.tensor(min_dim=2, max_dim=2),
weighted=st.booleans(),
seed=st.integers(0, 2**32 - 1),
)
def test_sparse_lengths_sum(self, input_data, weighted, seed):
net = core.Net("bench")
np.random.seed(seed)
input_data = input_data.astype(np.float32)
indices = np.random.randint(
low=0,
high=len(input_data),
size=[np.random.randint(len(input_data))],
dtype=np.int32
)
weights = np.random.uniform(size=[len(indices)]).astype(np.float32)
lengths_split = np.clip(1, len(indices) // 2, 10)
lengths = np.ones(
[len(indices) // lengths_split], dtype=np.int32
) * lengths_split
print(indices, weights, lengths)
quantized_data = net.FloatToFused8BitRowwiseQuantized(
'input_data', 'quantized_data'
)
dequantized_data = net.Fused8BitRowwiseQuantizedToFloat(
quantized_data, 'dequantized_data'
)
if weighted:
net.SparseLengthsWeightedSum(
[dequantized_data, 'weights', 'indices', 'lengths'],
'sum_reference',
engine='fp16'
)
net.SparseLengthsWeightedSumFused8BitRowwise(
[quantized_data, 'weights', 'indices', 'lengths'],
'sum_quantized'
)
else:
net.SparseLengthsSum(
[dequantized_data, 'indices', 'lengths'],
'sum_reference',
engine='fp16'
)
net.SparseLengthsSumFused8BitRowwise(
[quantized_data, 'indices', 'lengths'], 'sum_quantized'
)
workspace.FeedBlob('input_data', input_data)
workspace.FeedBlob('weights', weights)
workspace.FeedBlob('indices', indices)
workspace.FeedBlob('lengths', lengths)
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
workspace.CreateNet(net)
workspace.RunNetOnce(net)
sum_reference = workspace.FetchBlob('sum_reference')
sum_quantized = workspace.FetchBlob('sum_quantized')
np.testing.assert_array_almost_equal(sum_reference, sum_quantized)
@given(
input_data=hu.tensor(min_dim=2, max_dim=2),
seed=st.integers(0, 2**32 - 1),
)
def test_sparse_lengths_mean(self, input_data, seed):
net = core.Net("bench")
np.random.seed(seed)
input_data = input_data.astype(np.float32)
indices = np.random.randint(
low=0,
high=len(input_data),
size=[np.random.randint(len(input_data))],
dtype=np.int32
)
lengths_split = np.clip(1, len(indices) // 2, 10)
lengths = np.ones(
[len(indices) // lengths_split], dtype=np.int32
) * lengths_split
print(indices, lengths)
quantized_data = net.FloatToFused8BitRowwiseQuantized(
'input_data', 'quantized_data'
)
dequantized_data = net.Fused8BitRowwiseQuantizedToFloat(
quantized_data, 'dequantized_data'
)
net.SparseLengthsMean(
[dequantized_data, 'indices', 'lengths'],
'mean_reference',
engine='fp16'
)
net.SparseLengthsMeanFused8BitRowwise(
[quantized_data, 'indices', 'lengths'], 'mean_quantized'
)
workspace.FeedBlob('input_data', input_data)
workspace.FeedBlob('indices', indices)
workspace.FeedBlob('lengths', lengths)
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
workspace.CreateNet(net)
workspace.RunNetOnce(net)
mean_reference = workspace.FetchBlob('mean_reference')
mean_quantized = workspace.FetchBlob('mean_quantized')
np.testing.assert_array_almost_equal(mean_reference, mean_quantized)