ArmNN
 20.08
ElementwiseTestImpl.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "LayerTestResult.hpp"
9 
10 
11 #include <ResolveType.hpp>
12 
17 
21 
22 #include <test/TensorHelpers.hpp>
23 
24 #include <memory>
25 
26 template<typename DescriptorType>
27 std::unique_ptr<armnn::IWorkload> CreateWorkload(
28  const armnn::IWorkloadFactory& workloadFactory,
29  const armnn::WorkloadInfo& info,
30  const DescriptorType& descriptor)
31 {
32  return CreateWorkload(workloadFactory, info, descriptor);
33 }
34 
35 template <std::size_t NumDims,
36  typename Descriptor,
37  armnn::DataType ArmnnTypeInput,
38  armnn::DataType ArmnnTypeOutput,
39  typename TInput = armnn::ResolveType<ArmnnTypeInput>,
40  typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
42  armnn::IWorkloadFactory & workloadFactory,
44  const unsigned int shape0[NumDims],
45  std::vector<TInput> values0,
46  float quantScale0,
47  int quantOffset0,
48  const unsigned int shape1[NumDims],
49  std::vector<TInput> values1,
50  float quantScale1,
51  int quantOffset1,
52  const unsigned int outShape[NumDims],
53  std::vector<TOutput> outValues,
54  float outQuantScale,
55  int outQuantOffset)
56 {
57  armnn::TensorInfo inputTensorInfo0{NumDims, shape0, ArmnnTypeInput};
58  armnn::TensorInfo inputTensorInfo1{NumDims, shape1, ArmnnTypeInput};
59  armnn::TensorInfo outputTensorInfo{NumDims, outShape, ArmnnTypeOutput};
60 
61  auto input0 = MakeTensor<TInput, NumDims>(inputTensorInfo0, values0);
62  auto input1 = MakeTensor<TInput, NumDims>(inputTensorInfo1, values1);
63 
64  inputTensorInfo0.SetQuantizationScale(quantScale0);
65  inputTensorInfo0.SetQuantizationOffset(quantOffset0);
66 
67  inputTensorInfo1.SetQuantizationScale(quantScale1);
68  inputTensorInfo1.SetQuantizationOffset(quantOffset1);
69 
70  outputTensorInfo.SetQuantizationScale(outQuantScale);
71  outputTensorInfo.SetQuantizationOffset(outQuantOffset);
72 
73  LayerTestResult<TOutput, NumDims> ret(outputTensorInfo);
74 
75  if(ArmnnTypeOutput == armnn::DataType::Boolean)
76  {
77  ret.compareBoolean = true;
78  }
79 
81  std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
82  std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
83  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
85 
86  Descriptor data;
88  AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
89  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
90  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
91  auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
92 
93  inputHandle0->Allocate();
94  inputHandle1->Allocate();
95  outputHandle->Allocate();
96 
97  CopyDataToITensorHandle(inputHandle0.get(), input0.origin());
98  CopyDataToITensorHandle(inputHandle1.get(), input1.origin());
99 
100  workload->PostAllocationConfigure();
101  ExecuteWorkload(*workload, memoryManager);
102 
103  CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
104 
105  ret.outputExpected = MakeTensor<TOutput, NumDims>(outputTensorInfo, outValues);
106  return ret;
107 }
108 
109 template <std::size_t NumDims,
110  typename Descriptor,
111  armnn::DataType ArmnnType,
112  typename T = armnn::ResolveType<ArmnnType>>
114  armnn::IWorkloadFactory & workloadFactory,
116  const unsigned int shape0[NumDims],
117  std::vector<T> values0,
118  float quantScale0,
119  int quantOffset0,
120  const unsigned int shape1[NumDims],
121  std::vector<T> values1,
122  float quantScale1,
123  int quantOffset1,
124  const unsigned int outShape[NumDims],
125  std::vector<T> outValues,
126  float outQuantScale,
127  int outQuantOffset)
128 {
129  return ElementwiseTestHelper<NumDims, Descriptor, ArmnnType, ArmnnType>(
130  workloadFactory,
131  memoryManager,
132  shape0,
133  values0,
134  quantScale0,
135  quantOffset0,
136  shape1,
137  values1,
138  quantScale1,
139  quantOffset1,
140  outShape,
141  outValues,
142  outQuantScale,
143  outQuantOffset);
144 }
145 
146 template <std::size_t NumDims,
147  typename Descriptor,
148  armnn::DataType ArmnnTypeInput,
149  armnn::DataType ArmnnTypeOutput,
150  typename TInput = armnn::ResolveType<ArmnnTypeInput>,
151  typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
153  armnn::IWorkloadFactory & workloadFactory,
155  const unsigned int shape0[NumDims],
156  std::vector<TInput> values0,
157  const unsigned int shape1[NumDims],
158  std::vector<TInput> values1,
159  const unsigned int outShape[NumDims],
160  std::vector<TOutput> outValues,
161  float quantScale = 1.0f,
162  int quantOffset = 0)
163 {
164  return ElementwiseTestHelper<NumDims, Descriptor, ArmnnTypeInput, ArmnnTypeOutput>(
165  workloadFactory,
166  memoryManager,
167  shape0,
168  values0,
169  quantScale,
170  quantOffset,
171  shape1,
172  values1,
173  quantScale,
174  quantOffset,
175  outShape,
176  outValues,
177  quantScale,
178  quantOffset);
179 }
180 
181 template <std::size_t NumDims,
182  typename Descriptor,
183  armnn::DataType ArmnnType,
184  typename T = armnn::ResolveType<ArmnnType>>
186  armnn::IWorkloadFactory & workloadFactory,
188  const unsigned int shape0[NumDims],
189  std::vector<T> values0,
190  const unsigned int shape1[NumDims],
191  std::vector<T> values1,
192  const unsigned int outShape[NumDims],
193  std::vector<T> outValues,
194  float quantScale = 1.0f,
195  int quantOffset = 0)
196 {
197  return ElementwiseTestHelper<NumDims, Descriptor, ArmnnType, ArmnnType>(
198  workloadFactory,
199  memoryManager,
200  shape0,
201  values0,
202  shape1,
203  values1,
204  outShape,
205  outValues,
206  quantScale,
207  quantOffset);
208 }
std::unique_ptr< armnn::IWorkload > CreateWorkload(const armnn::IWorkloadFactory &workloadFactory, const armnn::WorkloadInfo &info, const DescriptorType &descriptor)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
boost::multi_array< T, n > outputExpected
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:73
DataType
Definition: Types.hpp:32
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:465
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
boost::multi_array< T, n > output
LayerTestResult< TOutput, NumDims > ElementwiseTestHelper(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const unsigned int shape0[NumDims], std::vector< TInput > values0, float quantScale0, int quantOffset0, const unsigned int shape1[NumDims], std::vector< TInput > values1, float quantScale1, int quantOffset1, const unsigned int outShape[NumDims], std::vector< TOutput > outValues, float outQuantScale, int outQuantOffset)
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)