ArmNN
 20.02
ArmComputeTensorUtilsTests.cpp File Reference
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <boost/test/unit_test.hpp>

Go to the source code of this file.

Functions

 BOOST_AUTO_TEST_CASE (BuildArmComputeTensorInfoTest)
 

Function Documentation

◆ BOOST_AUTO_TEST_CASE()

BOOST_AUTO_TEST_CASE ( BuildArmComputeTensorInfoTest  )

Definition at line 14 of file ArmComputeTensorUtilsTests.cpp.

References armnn::QAsymmU8.

15 {
16 
17  const armnn::TensorShape tensorShape = { 1, 2, 3, 4 };
19 
20  const std::vector<float> quantScales = { 1.5f, 2.5f, 3.5f, 4.5f };
21  const float quantScale = quantScales[0];
22  const int32_t quantOffset = 128;
23 
24  // Tensor info with per-tensor quantization
25  const armnn::TensorInfo tensorInfo0(tensorShape, dataType, quantScale, quantOffset);
26  const arm_compute::TensorInfo aclTensorInfo0 = BuildArmComputeTensorInfo(tensorInfo0);
27 
28  const arm_compute::TensorShape& aclTensorShape = aclTensorInfo0.tensor_shape();
29  BOOST_CHECK(aclTensorShape.num_dimensions() == tensorShape.GetNumDimensions());
30  for(unsigned int i = 0u; i < tensorShape.GetNumDimensions(); ++i)
31  {
32  // NOTE: arm_compute tensor dimensions are stored in the opposite order
33  BOOST_CHECK(aclTensorShape[i] == tensorShape[tensorShape.GetNumDimensions() - i - 1]);
34  }
35 
36  BOOST_CHECK(aclTensorInfo0.data_type() == arm_compute::DataType::QASYMM8);
37  BOOST_CHECK(aclTensorInfo0.quantization_info().scale()[0] == quantScale);
38 
39  // Tensor info with per-axis quantization
40  const armnn::TensorInfo tensorInfo1(tensorShape, dataType, quantScales, 0);
41  const arm_compute::TensorInfo aclTensorInfo1 = BuildArmComputeTensorInfo(tensorInfo1);
42 
43  BOOST_CHECK(aclTensorInfo1.quantization_info().scale() == quantScales);
44 }
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
DataType
Definition: Types.hpp:32
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:43