26 template<
typename DescriptorType>
30 const DescriptorType& descriptor)
35 template <std::size_t NumDims,
44 const unsigned int shape0[NumDims],
45 std::vector<TInput> values0,
48 const unsigned int shape1[NumDims],
49 std::vector<TInput> values1,
52 const unsigned int outShape[NumDims],
53 std::vector<TOutput> outValues,
61 auto input0 = MakeTensor<TInput, NumDims>(inputTensorInfo0, values0);
62 auto input1 = MakeTensor<TInput, NumDims>(inputTensorInfo1, values1);
65 inputTensorInfo0.SetQuantizationOffset(quantOffset0);
67 inputTensorInfo1.SetQuantizationScale(quantScale1);
68 inputTensorInfo1.SetQuantizationOffset(quantOffset1);
70 outputTensorInfo.SetQuantizationScale(outQuantScale);
71 outputTensorInfo.SetQuantizationOffset(outQuantOffset);
81 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.
CreateTensorHandle(inputTensorInfo0);
82 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.
CreateTensorHandle(inputTensorInfo1);
83 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
88 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
89 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
90 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
91 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
93 inputHandle0->Allocate();
94 inputHandle1->Allocate();
95 outputHandle->Allocate();
100 workload->PostAllocationConfigure();
101 ExecuteWorkload(*workload, memoryManager);
105 ret.
outputExpected = MakeTensor<TOutput, NumDims>(outputTensorInfo, outValues);
109 template <std::size_t NumDims,
116 const unsigned int shape0[NumDims],
117 std::vector<T> values0,
120 const unsigned int shape1[NumDims],
121 std::vector<T> values1,
124 const unsigned int outShape[NumDims],
125 std::vector<T> outValues,
129 return ElementwiseTestHelper<NumDims, Descriptor, ArmnnType, ArmnnType>(
146 template <std::size_t NumDims,
155 const unsigned int shape0[NumDims],
156 std::vector<TInput> values0,
157 const unsigned int shape1[NumDims],
158 std::vector<TInput> values1,
159 const unsigned int outShape[NumDims],
160 std::vector<TOutput> outValues,
161 float quantScale = 1.0f,
164 return ElementwiseTestHelper<NumDims, Descriptor, ArmnnTypeInput, ArmnnTypeOutput>(
181 template <std::size_t NumDims,
188 const unsigned int shape0[NumDims],
189 std::vector<T> values0,
190 const unsigned int shape1[NumDims],
191 std::vector<T> values1,
192 const unsigned int outShape[NumDims],
193 std::vector<T> outValues,
194 float quantScale = 1.0f,
197 return ElementwiseTestHelper<NumDims, Descriptor, ArmnnType, ArmnnType>(
std::unique_ptr< armnn::IWorkload > CreateWorkload(const armnn::IWorkloadFactory &workloadFactory, const armnn::WorkloadInfo &info, const DescriptorType &descriptor)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
boost::multi_array< T, n > outputExpected
typename ResolveTypeImpl< DT >::Type ResolveType
#define ARMNN_NO_DEPRECATE_WARN_END
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
boost::multi_array< T, n > output
LayerTestResult< TOutput, NumDims > ElementwiseTestHelper(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const unsigned int shape0[NumDims], std::vector< TInput > values0, float quantScale0, int quantOffset0, const unsigned int shape1[NumDims], std::vector< TInput > values1, float quantScale1, int quantOffset1, const unsigned int outShape[NumDims], std::vector< TOutput > outValues, float outQuantScale, int outQuantOffset)
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)