ArmNN
 20.05
ConstantTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ConstantTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
11 
12 #include <armnnUtils/Permute.hpp>
13 
15 
18 
19 #include <test/TensorHelpers.hpp>
20 
21 namespace
22 {
23 
24 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
25 LayerTestResult<T, 4> ConstantTestImpl(
26  armnn::IWorkloadFactory& workloadFactory,
28  float qScale,
29  int32_t qOffset)
30 {
31  IgnoreUnused(memoryManager);
32  constexpr unsigned int inputWidth = 3;
33  constexpr unsigned int inputHeight = 4;
34  constexpr unsigned int inputChannels = 3;
35  constexpr unsigned int inputBatchSize = 2;
36 
37  constexpr unsigned int outputWidth = inputWidth;
38  constexpr unsigned int outputHeight = inputHeight;
39  constexpr unsigned int outputChannels = inputChannels;
40  constexpr unsigned int outputBatchSize = inputBatchSize;
41 
42  armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
43  ArmnnType, qScale, qOffset);
44 
45  armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
46  ArmnnType, qScale, qOffset);
47 
48  // Set quantization parameters if the requested type is a quantized type.
49  if(armnn::IsQuantizedType<T>())
50  {
51  inputTensorInfo.SetQuantizationScale(qScale);
52  inputTensorInfo.SetQuantizationOffset(qOffset);
53  outputTensorInfo.SetQuantizationScale(qScale);
54  outputTensorInfo.SetQuantizationOffset(qOffset);
55  }
56 
57  auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
58  armnnUtils::QuantizedVector<T>(
59  {
60  // Batch 0, Channel 0
61  235.0f, 46.0f, 178.0f,
62  100.0f, 123.0f, 19.0f,
63  172.0f, 74.0f, 250.0f,
64  6.0f, 195.0f, 80.0f,
65 
66  // Batch 0, Channel 1
67  113.0f, 95.0f, 202.0f,
68  77.0f, 114.0f, 71.0f,
69  122.0f, 246.0f, 166.0f,
70  82.0f, 28.0f, 37.0f,
71 
72  // Batch 0, Channel 2
73  56.0f, 170.0f, 162.0f,
74  194.0f, 89.0f, 254.0f,
75  12.0f, 209.0f, 200.0f,
76  1.0f, 64.0f, 54.0f,
77 
78  // Batch 1, Channel 0
79  67.0f, 90.0f, 49.0f,
80  7.0f, 163.0f, 18.0f,
81  25.0f, 117.0f, 103.0f,
82  247.0f, 59.0f, 189.0f,
83 
84  // Batch 1, Channel 1
85  239.0f, 104.0f, 199.0f,
86  17.0f, 124.0f, 153.0f,
87  222.0f, 217.0f, 75.0f,
88  32.0f, 126.0f, 21.0f,
89 
90  // Batch 1, Channel 2
91  97.0f, 145.0f, 215.0f,
92  115.0f, 116.0f, 238.0f,
93  226.0f, 16.0f, 132.0f,
94  92.0f, 125.0f, 88.0f,
95  },
96  qScale, qOffset)));
97 
98  LayerTestResult<T, 4> result(outputTensorInfo);
99  result.outputExpected = input;
100 
101  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
102 
103  armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
104  AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
105 
107  descriptor.m_LayerOutput = &constantTensor;
108 
110  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
111 
112  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
113 
114  outputHandle->Allocate();
115 
116  workload->PostAllocationConfigure();
117  workload->Execute();
118 
119  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
120  return result;
121 }
122 
123 } // anonymous namespace
124 
126  armnn::IWorkloadFactory& workloadFactory,
128 {
129  return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
130 }
131 
133  armnn::IWorkloadFactory& workloadFactory,
135 {
136  return ConstantTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 1.0f, 0);
137 }
138 
140  armnn::IWorkloadFactory& workloadFactory,
142 {
143  return ConstantTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
144 }
145 
147  armnn::IWorkloadFactory& workloadFactory,
149 {
150  return ConstantTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 2e-6f, 1);
151 }
152 
154  armnn::IWorkloadFactory& workloadFactory,
156 {
157  return ConstantTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 2e-6f, 1);
158 }
LayerTestResult< float, 4 > ConstantTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
const ConstCpuTensorHandle * m_LayerOutput
virtual std::unique_ptr< IWorkload > CreateConstant(const ConstantQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< int16_t, 4 > ConstantInt16CustomQuantizationScaleAndOffsetTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > ConstantInt16SimpleQuantizationScaleNoOffsetTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > ConstantUint8SimpleQuantizationScaleNoOffsetTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void IgnoreUnused(Ts &&...)
LayerTestResult< uint8_t, 4 > ConstantUint8CustomQuantizationScaleAndOffsetTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:260
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.