ArmNN
 20.11
ConstantTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ConstantTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
11 
12 #include <armnnUtils/Permute.hpp>
13 
15 
18 
19 #include <test/TensorHelpers.hpp>
20 
21 namespace
22 {
23 
24 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
25 LayerTestResult<T, 4> ConstantTestImpl(
26  armnn::IWorkloadFactory& workloadFactory,
28  const armnn::ITensorHandleFactory& tensorHandleFactory,
29  float qScale,
30  int32_t qOffset)
31 {
32  IgnoreUnused(memoryManager);
33  constexpr unsigned int inputWidth = 3;
34  constexpr unsigned int inputHeight = 4;
35  constexpr unsigned int inputChannels = 3;
36  constexpr unsigned int inputBatchSize = 2;
37 
38  constexpr unsigned int outputWidth = inputWidth;
39  constexpr unsigned int outputHeight = inputHeight;
40  constexpr unsigned int outputChannels = inputChannels;
41  constexpr unsigned int outputBatchSize = inputBatchSize;
42 
43  armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
44  ArmnnType, qScale, qOffset);
45 
46  armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
47  ArmnnType, qScale, qOffset);
48 
49  // Set quantization parameters if the requested type is a quantized type.
50  if(armnn::IsQuantizedType<T>())
51  {
52  inputTensorInfo.SetQuantizationScale(qScale);
53  inputTensorInfo.SetQuantizationOffset(qOffset);
54  outputTensorInfo.SetQuantizationScale(qScale);
55  outputTensorInfo.SetQuantizationOffset(qOffset);
56  }
57 
58  auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
59  armnnUtils::QuantizedVector<T>(
60  {
61  // Batch 0, Channel 0
62  235.0f, 46.0f, 178.0f,
63  100.0f, 123.0f, 19.0f,
64  172.0f, 74.0f, 250.0f,
65  6.0f, 195.0f, 80.0f,
66 
67  // Batch 0, Channel 1
68  113.0f, 95.0f, 202.0f,
69  77.0f, 114.0f, 71.0f,
70  122.0f, 246.0f, 166.0f,
71  82.0f, 28.0f, 37.0f,
72 
73  // Batch 0, Channel 2
74  56.0f, 170.0f, 162.0f,
75  194.0f, 89.0f, 254.0f,
76  12.0f, 209.0f, 200.0f,
77  1.0f, 64.0f, 54.0f,
78 
79  // Batch 1, Channel 0
80  67.0f, 90.0f, 49.0f,
81  7.0f, 163.0f, 18.0f,
82  25.0f, 117.0f, 103.0f,
83  247.0f, 59.0f, 189.0f,
84 
85  // Batch 1, Channel 1
86  239.0f, 104.0f, 199.0f,
87  17.0f, 124.0f, 153.0f,
88  222.0f, 217.0f, 75.0f,
89  32.0f, 126.0f, 21.0f,
90 
91  // Batch 1, Channel 2
92  97.0f, 145.0f, 215.0f,
93  115.0f, 116.0f, 238.0f,
94  226.0f, 16.0f, 132.0f,
95  92.0f, 125.0f, 88.0f,
96  },
97  qScale, qOffset)));
98 
99  LayerTestResult<T, 4> result(outputTensorInfo);
100  result.outputExpected = input;
101 
102  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
103 
104  armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
105  AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
106 
108  descriptor.m_LayerOutput = &constantTensor;
109 
111  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
112 
113  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
114 
115  outputHandle->Allocate();
116 
117  workload->PostAllocationConfigure();
118  workload->Execute();
119 
120  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
121  return result;
122 }
123 
124 } // anonymous namespace
125 
127  armnn::IWorkloadFactory& workloadFactory,
129  const armnn::ITensorHandleFactory& tensorHandleFactory)
130 {
131  return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
132 }
133 
135  armnn::IWorkloadFactory& workloadFactory,
137  const armnn::ITensorHandleFactory& tensorHandleFactory)
138 {
139  return ConstantTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
140 }
141 
143  armnn::IWorkloadFactory& workloadFactory,
145  const armnn::ITensorHandleFactory& tensorHandleFactory)
146 {
147  return ConstantTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
148 }
149 
151  armnn::IWorkloadFactory& workloadFactory,
153  const armnn::ITensorHandleFactory& tensorHandleFactory)
154 {
155  return ConstantTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 2e-6f, 1);
156 }
157 
159  armnn::IWorkloadFactory& workloadFactory,
161  const armnn::ITensorHandleFactory& tensorHandleFactory)
162 {
163  return ConstantTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 2e-6f, 1);
164 }
LayerTestResult< float, 4 > ConstantTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > ConstantInt16CustomQuantizationScaleAndOffsetTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
const ConstCpuTensorHandle * m_LayerOutput
LayerTestResult< uint8_t, 4 > ConstantUint8CustomQuantizationScaleAndOffsetTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< IWorkload > CreateConstant(const ConstantQueueDescriptor &descriptor, const WorkloadInfo &info) const
void IgnoreUnused(Ts &&...)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< int16_t, 4 > ConstantInt16SimpleQuantizationScaleNoOffsetTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > ConstantUint8SimpleQuantizationScaleNoOffsetTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about inputs and outputs to a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0