ArmNN
 21.05
ElementwiseTestImpl.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "LayerTestResult.hpp"
9 
10 
11 #include <ResolveType.hpp>
12 
17 
21 
22 #include <test/TensorHelpers.hpp>
23 
24 #include <memory>
25 
26 template<typename DescriptorType>
27 std::unique_ptr<armnn::IWorkload> CreateWorkload(
28  const armnn::IWorkloadFactory& workloadFactory,
29  const armnn::WorkloadInfo& info,
30  const DescriptorType& descriptor)
31 {
32  return CreateWorkload(workloadFactory, info, descriptor);
33 }
34 
35 template <std::size_t NumDims,
36  typename Descriptor,
37  armnn::DataType ArmnnTypeInput,
38  armnn::DataType ArmnnTypeOutput,
39  typename TInput = armnn::ResolveType<ArmnnTypeInput>,
40  typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
42  armnn::IWorkloadFactory & workloadFactory,
44  const unsigned int shape0[NumDims],
45  std::vector<TInput> values0,
46  float quantScale0,
47  int quantOffset0,
48  const unsigned int shape1[NumDims],
49  std::vector<TInput> values1,
50  float quantScale1,
51  int quantOffset1,
52  const unsigned int outShape[NumDims],
53  std::vector<TOutput> outValues,
54  const armnn::ITensorHandleFactory& tensorHandleFactory,
55  float outQuantScale,
56  int outQuantOffset)
57 {
58  armnn::TensorInfo inputTensorInfo0{NumDims, shape0, ArmnnTypeInput};
59  armnn::TensorInfo inputTensorInfo1{NumDims, shape1, ArmnnTypeInput};
60  armnn::TensorInfo outputTensorInfo{NumDims, outShape, ArmnnTypeOutput};
61 
62  auto input0 = MakeTensor<TInput, NumDims>(inputTensorInfo0, values0);
63  auto input1 = MakeTensor<TInput, NumDims>(inputTensorInfo1, values1);
64 
65  inputTensorInfo0.SetQuantizationScale(quantScale0);
66  inputTensorInfo0.SetQuantizationOffset(quantOffset0);
67 
68  inputTensorInfo1.SetQuantizationScale(quantScale1);
69  inputTensorInfo1.SetQuantizationOffset(quantOffset1);
70 
71  outputTensorInfo.SetQuantizationScale(outQuantScale);
72  outputTensorInfo.SetQuantizationOffset(outQuantOffset);
73 
74  LayerTestResult<TOutput, NumDims> ret(outputTensorInfo);
75 
76  if(ArmnnTypeOutput == armnn::DataType::Boolean)
77  {
78  ret.compareBoolean = true;
79  }
80 
81  std::unique_ptr<armnn::ITensorHandle> inputHandle0 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo0);
82  std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
83  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
84 
85  Descriptor data;
87  AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
88  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
89  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
90  auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
91 
92  inputHandle0->Allocate();
93  inputHandle1->Allocate();
94  outputHandle->Allocate();
95 
96  CopyDataToITensorHandle(inputHandle0.get(), input0.origin());
97  CopyDataToITensorHandle(inputHandle1.get(), input1.origin());
98 
99  workload->PostAllocationConfigure();
100  ExecuteWorkload(*workload, memoryManager);
101 
102  CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
103 
104  ret.outputExpected = MakeTensor<TOutput, NumDims>(outputTensorInfo, outValues);
105  return ret;
106 }
107 
108 template <std::size_t NumDims,
109  typename Descriptor,
110  armnn::DataType ArmnnType,
111  typename T = armnn::ResolveType<ArmnnType>>
113  armnn::IWorkloadFactory & workloadFactory,
115  const unsigned int shape0[NumDims],
116  std::vector<T> values0,
117  float quantScale0,
118  int quantOffset0,
119  const unsigned int shape1[NumDims],
120  std::vector<T> values1,
121  float quantScale1,
122  int quantOffset1,
123  const unsigned int outShape[NumDims],
124  std::vector<T> outValues,
125  const armnn::ITensorHandleFactory& tensorHandleFactory,
126  float outQuantScale,
127  int outQuantOffset)
128 {
129  return ElementwiseTestHelper<NumDims, Descriptor, ArmnnType, ArmnnType>(
130  workloadFactory,
131  memoryManager,
132  shape0,
133  values0,
134  quantScale0,
135  quantOffset0,
136  shape1,
137  values1,
138  quantScale1,
139  quantOffset1,
140  outShape,
141  outValues,
142  tensorHandleFactory,
143  outQuantScale,
144  outQuantOffset);
145 }
146 
147 template <std::size_t NumDims,
148  typename Descriptor,
149  armnn::DataType ArmnnTypeInput,
150  armnn::DataType ArmnnTypeOutput,
151  typename TInput = armnn::ResolveType<ArmnnTypeInput>,
152  typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
154  armnn::IWorkloadFactory & workloadFactory,
156  const unsigned int shape0[NumDims],
157  std::vector<TInput> values0,
158  const unsigned int shape1[NumDims],
159  std::vector<TInput> values1,
160  const unsigned int outShape[NumDims],
161  std::vector<TOutput> outValues,
162  const armnn::ITensorHandleFactory& tensorHandleFactory,
163  float quantScale = 1.0f,
164  int quantOffset = 0)
165 {
166  return ElementwiseTestHelper<NumDims, Descriptor, ArmnnTypeInput, ArmnnTypeOutput>(
167  workloadFactory,
168  memoryManager,
169  shape0,
170  values0,
171  quantScale,
172  quantOffset,
173  shape1,
174  values1,
175  quantScale,
176  quantOffset,
177  outShape,
178  outValues,
179  tensorHandleFactory,
180  quantScale,
181  quantOffset);
182 }
183 
184 template <std::size_t NumDims,
185  typename Descriptor,
186  armnn::DataType ArmnnType,
187  typename T = armnn::ResolveType<ArmnnType>>
189  armnn::IWorkloadFactory & workloadFactory,
191  const unsigned int shape0[NumDims],
192  std::vector<T> values0,
193  const unsigned int shape1[NumDims],
194  std::vector<T> values1,
195  const unsigned int outShape[NumDims],
196  std::vector<T> outValues,
197  const armnn::ITensorHandleFactory& tensorHandleFactory,
198  float quantScale = 1.0f,
199  int quantOffset = 0)
200 {
201  return ElementwiseTestHelper<NumDims, Descriptor, ArmnnType, ArmnnType>(
202  workloadFactory,
203  memoryManager,
204  shape0,
205  values0,
206  shape1,
207  values1,
208  outShape,
209  outValues,
210  tensorHandleFactory,
211  quantScale,
212  quantOffset);
213 }
std::unique_ptr< armnn::IWorkload > CreateWorkload(const armnn::IWorkloadFactory &workloadFactory, const armnn::WorkloadInfo &info, const DescriptorType &descriptor)
boost::multi_array< T, n > outputExpected
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
DataType
Definition: Types.hpp:36
LayerTestResult< TOutput, NumDims > ElementwiseTestHelper(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const unsigned int shape0[NumDims], std::vector< TInput > values0, float quantScale0, int quantOffset0, const unsigned int shape1[NumDims], std::vector< TInput > values1, float quantScale1, int quantOffset1, const unsigned int outShape[NumDims], std::vector< TOutput > outValues, const armnn::ITensorHandleFactory &tensorHandleFactory, float outQuantScale, int outQuantOffset)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
boost::multi_array< T, n > output
Contains information about inputs and outputs to a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)