ArmNN
 22.02
ElementwiseTestImpl.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
9 
10 
11 #include <ResolveType.hpp>
12 
17 
18 #include <DataTypeUtils.hpp>
21 
23 
24 #include <memory>
25 
26 template<typename DescriptorType>
27 std::unique_ptr<armnn::IWorkload> CreateWorkload(
28  const armnn::IWorkloadFactory& workloadFactory,
29  const armnn::WorkloadInfo& info,
30  const DescriptorType& descriptor) {
31  return CreateWorkload(workloadFactory, info, descriptor);
32 }
33 
34 template<std::size_t NumDims,
35  typename Descriptor,
36  armnn::DataType ArmnnTypeInput,
37  armnn::DataType ArmnnTypeOutput,
38  typename TInput = armnn::ResolveType<ArmnnTypeInput>,
39  typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
41  armnn::IWorkloadFactory& workloadFactory,
43  const unsigned int shape0[NumDims],
44  std::vector<TInput> values0,
45  float quantScale0,
46  int quantOffset0,
47  const unsigned int shape1[NumDims],
48  std::vector<TInput> values1,
49  float quantScale1,
50  int quantOffset1,
51  const unsigned int outShape[NumDims],
52  std::vector<TOutput> outValues,
53  const armnn::ITensorHandleFactory& tensorHandleFactory,
54  float outQuantScale,
55  int outQuantOffset) {
56  armnn::TensorInfo inputTensorInfo0{NumDims, shape0, ArmnnTypeInput};
57  armnn::TensorInfo inputTensorInfo1{NumDims, shape1, ArmnnTypeInput};
58  armnn::TensorInfo outputTensorInfo{NumDims, outShape, ArmnnTypeOutput};
59 
60  inputTensorInfo0.SetQuantizationScale(quantScale0);
61  inputTensorInfo0.SetQuantizationOffset(quantOffset0);
62 
63  inputTensorInfo1.SetQuantizationScale(quantScale1);
64  inputTensorInfo1.SetQuantizationOffset(quantOffset1);
65 
66  outputTensorInfo.SetQuantizationScale(outQuantScale);
67  outputTensorInfo.SetQuantizationOffset(outQuantOffset);
68 
69  std::vector<TOutput> actualOutput(outputTensorInfo.GetNumElements());
70 
71  bool isBoolean = false;
72  if (ArmnnTypeOutput == armnn::DataType::Boolean)
73  {
74  isBoolean = true;
75  }
76 
77  std::unique_ptr<armnn::ITensorHandle> inputHandle0 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo0);
78  std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
79  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
80 
81  Descriptor data;
83  AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
84  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
85  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
86  auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
87 
88  inputHandle0->Allocate();
89  inputHandle1->Allocate();
90  outputHandle->Allocate();
91 
92  CopyDataToITensorHandle(inputHandle0.get(), values0.data());
93  CopyDataToITensorHandle(inputHandle1.get(), values1.data());
94 
95  workload->PostAllocationConfigure();
96  ExecuteWorkload(*workload, memoryManager);
97 
98  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
99 
100  return LayerTestResult<TOutput, NumDims>(actualOutput,
101  outValues,
102  outputHandle->GetShape(),
103  outputTensorInfo.GetShape(),
104  isBoolean);
105 }
106 
107 template<std::size_t NumDims,
108  typename Descriptor,
109  armnn::DataType ArmnnType,
110  typename T = armnn::ResolveType<ArmnnType>>
112  armnn::IWorkloadFactory& workloadFactory,
114  const unsigned int shape0[NumDims],
115  std::vector<T> values0,
116  float quantScale0,
117  int quantOffset0,
118  const unsigned int shape1[NumDims],
119  std::vector<T> values1,
120  float quantScale1,
121  int quantOffset1,
122  const unsigned int outShape[NumDims],
123  std::vector<T> outValues,
124  const armnn::ITensorHandleFactory& tensorHandleFactory,
125  float outQuantScale,
126  int outQuantOffset) {
127  return ElementwiseTestHelper<NumDims, Descriptor, ArmnnType, ArmnnType>(
128  workloadFactory,
129  memoryManager,
130  shape0,
131  values0,
132  quantScale0,
133  quantOffset0,
134  shape1,
135  values1,
136  quantScale1,
137  quantOffset1,
138  outShape,
139  outValues,
140  tensorHandleFactory,
141  outQuantScale,
142  outQuantOffset);
143 }
144 
145 template<std::size_t NumDims,
146  typename Descriptor,
147  armnn::DataType ArmnnTypeInput,
148  armnn::DataType ArmnnTypeOutput,
149  typename TInput = armnn::ResolveType<ArmnnTypeInput>,
150  typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
152  armnn::IWorkloadFactory& workloadFactory,
154  const unsigned int shape0[NumDims],
155  std::vector<TInput> values0,
156  const unsigned int shape1[NumDims],
157  std::vector<TInput> values1,
158  const unsigned int outShape[NumDims],
159  std::vector<TOutput> outValues,
160  const armnn::ITensorHandleFactory& tensorHandleFactory,
161  float quantScale = 1.0f,
162  int quantOffset = 0) {
163  return ElementwiseTestHelper<NumDims, Descriptor, ArmnnTypeInput, ArmnnTypeOutput>(
164  workloadFactory,
165  memoryManager,
166  shape0,
167  values0,
168  quantScale,
169  quantOffset,
170  shape1,
171  values1,
172  quantScale,
173  quantOffset,
174  outShape,
175  outValues,
176  tensorHandleFactory,
177  quantScale,
178  quantOffset);
179 }
180 
181 template<std::size_t NumDims,
182  typename Descriptor,
183  armnn::DataType ArmnnType,
184  typename T = armnn::ResolveType<ArmnnType>>
186  armnn::IWorkloadFactory& workloadFactory,
188  const unsigned int shape0[NumDims],
189  std::vector<T> values0,
190  const unsigned int shape1[NumDims],
191  std::vector<T> values1,
192  const unsigned int outShape[NumDims],
193  std::vector<T> outValues,
194  const armnn::ITensorHandleFactory& tensorHandleFactory,
195  float quantScale = 1.0f,
196  int quantOffset = 0) {
197  return ElementwiseTestHelper<NumDims, Descriptor, ArmnnType, ArmnnType>(
198  workloadFactory,
199  memoryManager,
200  shape0,
201  values0,
202  shape1,
203  values1,
204  outShape,
205  outValues,
206  tensorHandleFactory,
207  quantScale,
208  quantOffset);
209 }
std::unique_ptr< armnn::IWorkload > CreateWorkload(const armnn::IWorkloadFactory &workloadFactory, const armnn::WorkloadInfo &info, const DescriptorType &descriptor)
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
DataType
Definition: Types.hpp:35
LayerTestResult< TOutput, NumDims > ElementwiseTestHelper(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const unsigned int shape0[NumDims], std::vector< TInput > values0, float quantScale0, int quantOffset0, const unsigned int shape1[NumDims], std::vector< TInput > values1, float quantScale1, int quantOffset1, const unsigned int outShape[NumDims], std::vector< TOutput > outValues, const armnn::ITensorHandleFactory &tensorHandleFactory, float outQuantScale, int outQuantOffset)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0