3333#include " zero_backend.hpp"
3434#include " zero_tensor.hpp"
3535
36- using CompilationParams = std::tuple<std::string, // Device name
37- ov::AnyMap // Config
38- >;
36+ using CompilationParamsAndTensorDataType = std::tuple<std::string, // Device name
37+ ov::AnyMap, // Config
38+ ov::element::Type // Tensor data type
39+ >;
3940
4041using ::testing::AllOf;
4142using ::testing::HasSubstr;
@@ -44,19 +45,21 @@ namespace ov {
4445namespace test {
4546namespace behavior {
4647class ZeroTensorTests : public ov ::test::behavior::OVPluginTestBase,
47- public testing::WithParamInterface<CompilationParams > {
48+ public testing::WithParamInterface<CompilationParamsAndTensorDataType > {
4849protected:
4950 std::shared_ptr<ov::Core> core = utils::PluginCache::get().core();
5051 ov::AnyMap configuration;
52+ ov::element::Type element_type;
5153 std::shared_ptr<::intel_npu::ZeroInitStructsHolder> init_struct;
5254 std::shared_ptr<::intel_npu::OptionsDesc> options = std::make_shared<::intel_npu::OptionsDesc>();
5355 ::intel_npu::Config npu_config = ::intel_npu::Config(options);
5456
5557public:
56- static std::string getTestCaseName (testing::TestParamInfo<CompilationParams> obj) {
58+ static std::string getTestCaseName (const testing::TestParamInfo<CompilationParamsAndTensorDataType>& obj) {
5759 std::string targetDevice;
5860 ov::AnyMap configuration;
59- std::tie (targetDevice, configuration) = obj.param ;
61+ ov::element::Type type;
62+ std::tie (targetDevice, configuration, type) = obj.param ;
6063 std::replace (targetDevice.begin (), targetDevice.end (), ' :' , ' _' );
6164 targetDevice = ov::test::utils::getTestsPlatformFromEnvironmentOr (ov::test::utils::DEVICE_NPU);
6265
@@ -69,12 +72,15 @@ class ZeroTensorTests : public ov::test::behavior::OVPluginTestBase,
6972 configItem.second .print (result);
7073 }
7174 }
75+ if (!type.get_type_name ().empty ()) {
76+ result << " tensorDataType=" << type.get_type_name () << " _" ;
77+ }
7278
7379 return result.str ();
7480 }
7581
7682 void SetUp () override {
77- std::tie (target_device, configuration) = this ->GetParam ();
83+ std::tie (target_device, configuration, element_type ) = this ->GetParam ();
7884
7985 SKIP_IF_CURRENT_TEST_IS_DISABLED ()
8086 OVPluginTestBase::SetUp ();
@@ -95,12 +101,12 @@ TEST_P(ZeroTensorTests, AllocateDeleteAllocateZeroTensor) {
95101 SKIP_IF_CURRENT_TEST_IS_DISABLED ()
96102
97103 auto shape = Shape{1 , 2 , 2 , 2 };
98- auto zero_tensor = std::make_shared<::intel_npu::ZeroTensor>(init_struct, npu_config, element:: f32 , shape, true );
104+ auto zero_tensor = std::make_shared<::intel_npu::ZeroTensor>(init_struct, npu_config, element_type , shape, true );
99105 ASSERT_TRUE (::intel_npu::zeroUtils::memory_was_allocated_in_the_same_l0_context (init_struct->getContext (),
100106 zero_tensor->data ()));
101107
102108 zero_tensor = {};
103- zero_tensor = std::make_shared<::intel_npu::ZeroTensor>(init_struct, npu_config, element:: f32 , shape, false );
109+ zero_tensor = std::make_shared<::intel_npu::ZeroTensor>(init_struct, npu_config, element_type , shape, false );
104110 ASSERT_TRUE (::intel_npu::zeroUtils::memory_was_allocated_in_the_same_l0_context (init_struct->getContext (),
105111 zero_tensor->data ()));
106112
@@ -115,10 +121,10 @@ TEST_P(ZeroTensorTests, CheckSetSmallerShape) {
115121
116122 auto shape = Shape{1 , 20 , 20 , 20 };
117123 auto shape_size = ov::shape_size (shape);
118- auto zero_tensor = std::make_shared<::intel_npu::ZeroTensor>(init_struct, npu_config, element:: f32 , shape, true );
124+ auto zero_tensor = std::make_shared<::intel_npu::ZeroTensor>(init_struct, npu_config, element_type , shape, true );
119125 EXPECT_EQ (shape, zero_tensor->get_shape ());
120126 EXPECT_EQ (shape_size, zero_tensor->get_size ());
121- EXPECT_EQ (shape_size * sizeof (ov::element:: f32 ), zero_tensor->get_byte_size ());
127+ EXPECT_EQ (shape_size * element_type. size ( ), zero_tensor->get_byte_size ());
122128
123129 auto data = zero_tensor->data ();
124130
@@ -128,7 +134,7 @@ TEST_P(ZeroTensorTests, CheckSetSmallerShape) {
128134 zero_tensor->set_shape (new_shape);
129135 EXPECT_EQ (new_shape, zero_tensor->get_shape ());
130136 EXPECT_EQ (new_shape_size, zero_tensor->get_size ());
131- EXPECT_EQ (new_shape_size * sizeof (ov::element:: f32 ), zero_tensor->get_byte_size ());
137+ EXPECT_EQ (new_shape_size * element_type. size ( ), zero_tensor->get_byte_size ());
132138 EXPECT_EQ (data, zero_tensor->data ());
133139 ASSERT_TRUE (::intel_npu::zeroUtils::memory_was_allocated_in_the_same_l0_context (init_struct->getContext (),
134140 zero_tensor->data ()));
@@ -139,10 +145,10 @@ TEST_P(ZeroTensorTests, CheckSetBiggerShape) {
139145
140146 auto shape = Shape{1 , 20 , 20 , 20 };
141147 auto shape_size = ov::shape_size (shape);
142- auto zero_tensor = std::make_shared<::intel_npu::ZeroTensor>(init_struct, npu_config, element:: f32 , shape, false );
148+ auto zero_tensor = std::make_shared<::intel_npu::ZeroTensor>(init_struct, npu_config, element_type , shape, false );
143149 EXPECT_EQ (shape, zero_tensor->get_shape ());
144150 EXPECT_EQ (shape_size, zero_tensor->get_size ());
145- EXPECT_EQ (shape_size * sizeof (ov::element:: f32 ), zero_tensor->get_byte_size ());
151+ EXPECT_EQ (shape_size * element_type. size ( ), zero_tensor->get_byte_size ());
146152
147153 auto new_shape = Shape{1 , 50 , 50 , 50 };
148154 auto new_shape_size = ov::shape_size (new_shape);
@@ -152,7 +158,7 @@ TEST_P(ZeroTensorTests, CheckSetBiggerShape) {
152158 zero_tensor->set_shape (new_shape);
153159 EXPECT_EQ (new_shape, zero_tensor->get_shape ());
154160 EXPECT_EQ (new_shape_size, zero_tensor->get_size ());
155- EXPECT_EQ (new_shape_size * sizeof (ov::element:: f32 ), zero_tensor->get_byte_size ());
161+ EXPECT_EQ (new_shape_size * element_type. size ( ), zero_tensor->get_byte_size ());
156162 ASSERT_TRUE (zero_tensor->memory_address_changed ());
157163 ASSERT_TRUE (::intel_npu::zeroUtils::memory_was_allocated_in_the_same_l0_context (init_struct->getContext (),
158164 zero_tensor->data ()));
@@ -164,122 +170,118 @@ TEST_P(ZeroTensorTests, CheckSetBiggerShape) {
164170TEST_P (ZeroTensorTests, CheckIsContinuousZeroTensorScalar) {
165171 SKIP_IF_CURRENT_TEST_IS_DISABLED ()
166172
167- auto zero_tensor =
168- std::make_shared<::intel_npu::ZeroTensor>(init_struct, npu_config, ov::element::f32 , Shape{}, true );
173+ auto zero_tensor = std::make_shared<::intel_npu::ZeroTensor>(init_struct, npu_config, element_type, Shape{}, true );
169174 auto data = zero_tensor->data ();
170175 auto strides = zero_tensor->get_strides ();
171176
172177 ov::Tensor view_tensor;
173178
174- view_tensor = ov::Tensor (ov::element:: f32 , ov::Shape{}, data, strides);
179+ view_tensor = ov::Tensor (element_type , ov::Shape{}, data, strides);
175180 EXPECT_EQ (view_tensor.is_continuous (), true );
176181}
177182
178183TEST_P (ZeroTensorTests, CheckIsContinuousHostTensor1Dimension) {
179184 SKIP_IF_CURRENT_TEST_IS_DISABLED ()
180185
181186 auto zero_tensor =
182- std::make_shared<::intel_npu::ZeroTensor>(init_struct, npu_config, ov::element:: f32 , Shape{128 }, true );
187+ std::make_shared<::intel_npu::ZeroTensor>(init_struct, npu_config, element_type , Shape{128 }, true );
183188
184189 auto data = zero_tensor->data ();
185190 auto strides = zero_tensor->get_strides ();
186191
187192 ov::Tensor view_tensor;
188193
189- view_tensor = ov::Tensor (ov::element:: f32 , ov::Shape{128 }, data, strides);
194+ view_tensor = ov::Tensor (element_type , ov::Shape{128 }, data, strides);
190195 EXPECT_EQ (view_tensor.is_continuous (), true );
191196
192- view_tensor = ov::Tensor (ov::element:: f32 , ov::Shape{16 }, data, strides);
197+ view_tensor = ov::Tensor (element_type , ov::Shape{16 }, data, strides);
193198 EXPECT_EQ (view_tensor.is_continuous (), true );
194199}
195200
196201TEST_P (ZeroTensorTests, CheckIsContinuousZeroTensor2Dimensions) {
197202 SKIP_IF_CURRENT_TEST_IS_DISABLED ()
198203
199204 auto zero_tensor =
200- std::make_shared<::intel_npu::ZeroTensor>(init_struct, npu_config, ov::element:: f32 , Shape{32 , 128 }, true );
205+ std::make_shared<::intel_npu::ZeroTensor>(init_struct, npu_config, element_type , Shape{32 , 128 }, true );
201206 auto data = zero_tensor->data ();
202207 auto strides = zero_tensor->get_strides ();
203208
204209 ov::Tensor view_tensor;
205210
206- view_tensor = ov::Tensor (ov::element:: f32 , Shape{16 , 128 }, data, strides);
211+ view_tensor = ov::Tensor (element_type , Shape{16 , 128 }, data, strides);
207212 EXPECT_EQ (view_tensor.is_continuous (), true );
208213
209- view_tensor = ov::Tensor (ov::element:: f32 , Shape{1 , 128 }, data, strides);
214+ view_tensor = ov::Tensor (element_type , Shape{1 , 128 }, data, strides);
210215 EXPECT_EQ (view_tensor.is_continuous (), true );
211216
212- view_tensor = ov::Tensor (ov::element:: f32 , Shape{1 , 16 }, data, strides);
217+ view_tensor = ov::Tensor (element_type , Shape{1 , 16 }, data, strides);
213218 EXPECT_EQ (view_tensor.is_continuous (), true );
214219
215- view_tensor = ov::Tensor (ov::element:: f32 , Shape{2 , 16 }, data, strides);
220+ view_tensor = ov::Tensor (element_type , Shape{2 , 16 }, data, strides);
216221 EXPECT_EQ (view_tensor.is_continuous (), false );
217222}
218223
219224TEST_P (ZeroTensorTests, CheckIsContinuousZeroTensor3Dimensions) {
220225 SKIP_IF_CURRENT_TEST_IS_DISABLED ()
221226
222227 auto zero_tensor =
223- std::make_shared<::intel_npu::ZeroTensor>(init_struct, npu_config, ov::element:: f32 , Shape{5 , 32 , 128 }, true );
228+ std::make_shared<::intel_npu::ZeroTensor>(init_struct, npu_config, element_type , Shape{5 , 32 , 128 }, true );
224229 auto data = zero_tensor->data ();
225230 auto strides = zero_tensor->get_strides ();
226231
227232 ov::Tensor view_tensor;
228233
229- view_tensor = ov::Tensor (ov::element:: f32 , Shape{2 , 32 , 128 }, data, strides);
234+ view_tensor = ov::Tensor (element_type , Shape{2 , 32 , 128 }, data, strides);
230235 EXPECT_EQ (view_tensor.is_continuous (), true );
231236
232- view_tensor = ov::Tensor (ov::element:: f32 , Shape{2 , 16 , 128 }, data, strides);
237+ view_tensor = ov::Tensor (element_type , Shape{2 , 16 , 128 }, data, strides);
233238 EXPECT_EQ (view_tensor.is_continuous (), false );
234239
235- view_tensor = ov::Tensor (ov::element:: f32 , Shape{1 , 1 , 128 }, data, strides);
240+ view_tensor = ov::Tensor (element_type , Shape{1 , 1 , 128 }, data, strides);
236241 EXPECT_EQ (view_tensor.is_continuous (), true );
237242
238- view_tensor = ov::Tensor (ov::element:: f32 , Shape{1 , 1 , 64 }, data, strides);
243+ view_tensor = ov::Tensor (element_type , Shape{1 , 1 , 64 }, data, strides);
239244 EXPECT_EQ (view_tensor.is_continuous (), true );
240245
241- view_tensor = ov::Tensor (ov::element:: f32 , Shape{1 , 16 , 128 }, data, strides);
246+ view_tensor = ov::Tensor (element_type , Shape{1 , 16 , 128 }, data, strides);
242247 EXPECT_EQ (view_tensor.is_continuous (), true );
243248}
244249
245250TEST_P (ZeroTensorTests, CheckIsContinuousZeroTensor4Dimensions) {
246251 SKIP_IF_CURRENT_TEST_IS_DISABLED ()
247252
248- auto zero_tensor = std::make_shared<::intel_npu::ZeroTensor>(init_struct,
249- npu_config,
250- ov::element::f32 ,
251- Shape{3 , 5 , 32 , 128 },
252- true );
253+ auto zero_tensor =
254+ std::make_shared<::intel_npu::ZeroTensor>(init_struct, npu_config, element_type, Shape{3 , 5 , 32 , 128 }, true );
253255 auto data = zero_tensor->data ();
254256 auto strides = zero_tensor->get_strides ();
255257
256258 ov::Tensor view_tensor;
257259
258- view_tensor = ov::Tensor (ov::element:: f32 , Shape{1 , 2 , 32 , 128 }, data, strides);
260+ view_tensor = ov::Tensor (element_type , Shape{1 , 2 , 32 , 128 }, data, strides);
259261 EXPECT_EQ (view_tensor.is_continuous (), true );
260262
261- view_tensor = ov::Tensor (ov::element:: f32 , Shape{2 , 5 , 32 , 128 }, data, strides);
263+ view_tensor = ov::Tensor (element_type , Shape{2 , 5 , 32 , 128 }, data, strides);
262264 EXPECT_EQ (view_tensor.is_continuous (), true );
263265
264- view_tensor = ov::Tensor (ov::element:: f32 , Shape{2 , 2 , 32 , 128 }, data, strides);
266+ view_tensor = ov::Tensor (element_type , Shape{2 , 2 , 32 , 128 }, data, strides);
265267 EXPECT_EQ (view_tensor.is_continuous (), false );
266268
267- view_tensor = ov::Tensor (ov::element:: f32 , Shape{1 , 2 , 5 , 128 }, data, strides);
269+ view_tensor = ov::Tensor (element_type , Shape{1 , 2 , 5 , 128 }, data, strides);
268270 EXPECT_EQ (view_tensor.is_continuous (), false );
269271
270- view_tensor = ov::Tensor (ov::element:: f32 , Shape{3 , 5 , 32 , 64 }, data, strides);
272+ view_tensor = ov::Tensor (element_type , Shape{3 , 5 , 32 , 64 }, data, strides);
271273 EXPECT_EQ (view_tensor.is_continuous (), false );
272274
273- view_tensor = ov::Tensor (ov::element:: f32 , Shape{1 , 1 , 16 , 128 }, data, strides);
275+ view_tensor = ov::Tensor (element_type , Shape{1 , 1 , 16 , 128 }, data, strides);
274276 EXPECT_EQ (view_tensor.is_continuous (), true );
275277
276- view_tensor = ov::Tensor (ov::element:: f32 , Shape{2 , 1 , 16 , 128 }, data, strides);
278+ view_tensor = ov::Tensor (element_type , Shape{2 , 1 , 16 , 128 }, data, strides);
277279 EXPECT_EQ (view_tensor.is_continuous (), false );
278280
279- view_tensor = ov::Tensor (ov::element:: f32 , Shape{1 , 1 , 1 , 128 }, data, strides);
281+ view_tensor = ov::Tensor (element_type , Shape{1 , 1 , 1 , 128 }, data, strides);
280282 EXPECT_EQ (view_tensor.is_continuous (), true );
281283
282- view_tensor = ov::Tensor (ov::element:: f32 , Shape{1 , 1 , 1 , 32 }, data, strides);
284+ view_tensor = ov::Tensor (element_type , Shape{1 , 1 , 1 , 32 }, data, strides);
283285 EXPECT_EQ (view_tensor.is_continuous (), true );
284286}
285287
@@ -289,8 +291,8 @@ TEST_P(ZeroTensorTests, CopyDefaultTensorExpectedThrow) {
289291 auto shape = Shape{1 , 2 , 2 , 2 };
290292
291293 // shape size is unaligned to standard page size, expect to fail
292- auto data = static_cast <float *>(::operator new (ov::shape_size (shape) * sizeof (ov::element:: f32 )));
293- auto default_tensor = make_tensor (ov::element:: f32 , shape, data);
294+ auto data = static_cast <float *>(::operator new (ov::shape_size (shape) * element_type. size ( )));
295+ auto default_tensor = make_tensor (element_type , shape, data);
294296 ASSERT_THROW (auto zero_tensor = std::make_shared<::intel_npu::ZeroTensor>(init_struct, default_tensor, npu_config),
295297 ::intel_npu::ZeroTensorException);
296298
@@ -302,8 +304,7 @@ TEST_P(ZeroTensorTests, CopyZeroTensorAndKeepAlive) {
302304
303305 auto shape = Shape{1 , 2 , 2 , 2 };
304306
305- auto zero_tensor =
306- std::make_shared<::intel_npu::ZeroTensor>(init_struct, npu_config, ov::element::f32 , shape, true );
307+ auto zero_tensor = std::make_shared<::intel_npu::ZeroTensor>(init_struct, npu_config, element_type, shape, true );
307308
308309 auto copy_zero_tensor = std::make_shared<::intel_npu::ZeroTensor>(init_struct, zero_tensor, npu_config);
309310
@@ -325,8 +326,7 @@ TEST_P(ZeroTensorTests, CopyHostTensorAndKeepAlive) {
325326 auto zero_context = std::make_shared<::intel_npu::RemoteContextImpl>(engine_backend);
326327 auto shape = Shape{1 , 2 , 2 , 2 };
327328
328- auto host_tensor =
329- std::make_shared<::intel_npu::ZeroHostTensor>(zero_context, init_struct, ov::element::f32 , shape);
329+ auto host_tensor = std::make_shared<::intel_npu::ZeroHostTensor>(zero_context, init_struct, element_type, shape);
330330
331331 auto copy_zero_tensor = std::make_shared<::intel_npu::ZeroTensor>(init_struct, host_tensor, npu_config);
332332
@@ -349,7 +349,7 @@ TEST_P(ZeroTensorTests, CopyRemoteTensorAndKeepAlive) {
349349 auto shape = Shape{1 , 2 , 2 , 2 };
350350
351351 auto remote_tensor =
352- std::make_shared<::intel_npu::ZeroRemoteTensor>(zero_context, init_struct, ov::element:: f32 , shape);
352+ std::make_shared<::intel_npu::ZeroRemoteTensor>(zero_context, init_struct, element_type , shape);
353353
354354 auto copy_zero_tensor = std::make_shared<::intel_npu::ZeroTensor>(init_struct, remote_tensor, npu_config);
355355
@@ -375,14 +375,28 @@ TEST_P(ZeroTensorTests, CopyRemoteTensorFromAnotherContextThrow) {
375375 {ov::intel_npu::tensor_type.name (), {ov::intel_npu::TensorType::INPUT}}};
376376
377377 auto context = core->create_context (target_device, params);
378- auto remote_tensor = context.create_tensor (ov::element:: f32 , shape);
378+ auto remote_tensor = context.create_tensor (element_type , shape);
379379 auto remote_tensor_impl = get_tensor_impl (remote_tensor);
380380
381381 ASSERT_THROW (
382382 auto zero_tensor = std::make_shared<::intel_npu::ZeroTensor>(init_struct, remote_tensor_impl, npu_config),
383383 ::intel_npu::ZeroTensorException);
384384}
385385
386+ using ZeroTensorTestsCheckDataType = ZeroTensorTests;
387+
388+ TEST_P (ZeroTensorTestsCheckDataType, CopyZeroTensorAndCheckTensorDataType) {
389+ SKIP_IF_CURRENT_TEST_IS_DISABLED ()
390+
391+ auto shape = Shape{1 , 2 , 2 , 2 };
392+
393+ auto zero_tensor = std::make_shared<::intel_npu::ZeroTensor>(init_struct, npu_config, element_type, shape, true );
394+ EXPECT_EQ (element_type, zero_tensor->get_element_type ());
395+
396+ auto copy_zero_tensor = std::make_shared<::intel_npu::ZeroTensor>(init_struct, zero_tensor, npu_config);
397+ EXPECT_EQ (zero_tensor->get_element_type (), copy_zero_tensor->get_element_type ());
398+ }
399+
386400} // namespace behavior
387401} // namespace test
388402} // namespace ov
0 commit comments