ArmNN
 20.05
ElementwiseTestImpl.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "LayerTestResult.hpp"
9 
10 
11 #include <ResolveType.hpp>
12 
17 
21 
22 #include <test/TensorHelpers.hpp>
23 
24 #include <memory>
25 
26 template<typename DescriptorType>
27 std::unique_ptr<armnn::IWorkload> CreateWorkload(
28  const armnn::IWorkloadFactory& workloadFactory,
29  const armnn::WorkloadInfo& info,
30  const DescriptorType& descriptor)
31 {
32  return CreateWorkload(workloadFactory, info, descriptor);
33 }
34 
35 template <std::size_t NumDims,
36  typename Descriptor,
37  armnn::DataType ArmnnTypeInput,
38  armnn::DataType ArmnnTypeOutput,
39  typename TInput = armnn::ResolveType<ArmnnTypeInput>,
40  typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
42  armnn::IWorkloadFactory & workloadFactory,
44  const unsigned int shape0[NumDims],
45  std::vector<TInput> values0,
46  float quantScale0,
47  int quantOffset0,
48  const unsigned int shape1[NumDims],
49  std::vector<TInput> values1,
50  float quantScale1,
51  int quantOffset1,
52  const unsigned int outShape[NumDims],
53  std::vector<TOutput> outValues,
54  float outQuantScale,
55  int outQuantOffset)
56 {
57  armnn::TensorInfo inputTensorInfo0{NumDims, shape0, ArmnnTypeInput};
58  armnn::TensorInfo inputTensorInfo1{NumDims, shape1, ArmnnTypeInput};
59  armnn::TensorInfo outputTensorInfo{NumDims, outShape, ArmnnTypeOutput};
60 
61  auto input0 = MakeTensor<TInput, NumDims>(inputTensorInfo0, values0);
62  auto input1 = MakeTensor<TInput, NumDims>(inputTensorInfo1, values1);
63 
64  inputTensorInfo0.SetQuantizationScale(quantScale0);
65  inputTensorInfo0.SetQuantizationOffset(quantOffset0);
66 
67  inputTensorInfo1.SetQuantizationScale(quantScale1);
68  inputTensorInfo1.SetQuantizationOffset(quantOffset1);
69 
70  outputTensorInfo.SetQuantizationScale(outQuantScale);
71  outputTensorInfo.SetQuantizationOffset(outQuantOffset);
72 
73  LayerTestResult<TOutput, NumDims> ret(outputTensorInfo);
74 
75  if(ArmnnTypeOutput == armnn::DataType::Boolean)
76  {
77  ret.compareBoolean = true;
78  }
79 
80  std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
81  std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
82  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
83 
84  Descriptor data;
86  AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
87  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
88  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
89  auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
90 
91  inputHandle0->Allocate();
92  inputHandle1->Allocate();
93  outputHandle->Allocate();
94 
95  CopyDataToITensorHandle(inputHandle0.get(), input0.origin());
96  CopyDataToITensorHandle(inputHandle1.get(), input1.origin());
97 
98  workload->PostAllocationConfigure();
99  ExecuteWorkload(*workload, memoryManager);
100 
101  CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
102 
103  ret.outputExpected = MakeTensor<TOutput, NumDims>(outputTensorInfo, outValues);
104  return ret;
105 }
106 
107 template <std::size_t NumDims,
108  typename Descriptor,
109  armnn::DataType ArmnnType,
110  typename T = armnn::ResolveType<ArmnnType>>
112  armnn::IWorkloadFactory & workloadFactory,
114  const unsigned int shape0[NumDims],
115  std::vector<T> values0,
116  float quantScale0,
117  int quantOffset0,
118  const unsigned int shape1[NumDims],
119  std::vector<T> values1,
120  float quantScale1,
121  int quantOffset1,
122  const unsigned int outShape[NumDims],
123  std::vector<T> outValues,
124  float outQuantScale,
125  int outQuantOffset)
126 {
127  return ElementwiseTestHelper<NumDims, Descriptor, ArmnnType, ArmnnType>(
128  workloadFactory,
129  memoryManager,
130  shape0,
131  values0,
132  quantScale0,
133  quantOffset0,
134  shape1,
135  values1,
136  quantScale1,
137  quantOffset1,
138  outShape,
139  outValues,
140  outQuantScale,
141  outQuantOffset);
142 }
143 
144 template <std::size_t NumDims,
145  typename Descriptor,
146  armnn::DataType ArmnnTypeInput,
147  armnn::DataType ArmnnTypeOutput,
148  typename TInput = armnn::ResolveType<ArmnnTypeInput>,
149  typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
151  armnn::IWorkloadFactory & workloadFactory,
153  const unsigned int shape0[NumDims],
154  std::vector<TInput> values0,
155  const unsigned int shape1[NumDims],
156  std::vector<TInput> values1,
157  const unsigned int outShape[NumDims],
158  std::vector<TOutput> outValues,
159  float quantScale = 1.0f,
160  int quantOffset = 0)
161 {
162  return ElementwiseTestHelper<NumDims, Descriptor, ArmnnTypeInput, ArmnnTypeOutput>(
163  workloadFactory,
164  memoryManager,
165  shape0,
166  values0,
167  quantScale,
168  quantOffset,
169  shape1,
170  values1,
171  quantScale,
172  quantOffset,
173  outShape,
174  outValues,
175  quantScale,
176  quantOffset);
177 }
178 
179 template <std::size_t NumDims,
180  typename Descriptor,
181  armnn::DataType ArmnnType,
182  typename T = armnn::ResolveType<ArmnnType>>
184  armnn::IWorkloadFactory & workloadFactory,
186  const unsigned int shape0[NumDims],
187  std::vector<T> values0,
188  const unsigned int shape1[NumDims],
189  std::vector<T> values1,
190  const unsigned int outShape[NumDims],
191  std::vector<T> outValues,
192  float quantScale = 1.0f,
193  int quantOffset = 0)
194 {
195  return ElementwiseTestHelper<NumDims, Descriptor, ArmnnType, ArmnnType>(
196  workloadFactory,
197  memoryManager,
198  shape0,
199  values0,
200  shape1,
201  values1,
202  outShape,
203  outValues,
204  quantScale,
205  quantOffset);
206 }
std::unique_ptr< armnn::IWorkload > CreateWorkload(const armnn::IWorkloadFactory &workloadFactory, const armnn::WorkloadInfo &info, const DescriptorType &descriptor)
boost::multi_array< T, n > outputExpected
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:73
DataType
Definition: Types.hpp:32
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:260
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
boost::multi_array< T, n > output
LayerTestResult< TOutput, NumDims > ElementwiseTestHelper(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const unsigned int shape0[NumDims], std::vector< TInput > values0, float quantScale0, int quantOffset0, const unsigned int shape1[NumDims], std::vector< TInput > values1, float quantScale1, int quantOffset1, const unsigned int outShape[NumDims], std::vector< TOutput > outValues, float outQuantScale, int outQuantOffset)
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)