ArmNN
 21.02
ElementwiseUnaryTestImpl.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "LayerTestResult.hpp"
9 
10 #include <armnn/ArmNN.hpp>
11 
12 #include <ResolveType.hpp>
13 
18 
22 
23 #include <test/TensorHelpers.hpp>
24 
25 #include <memory>
26 
27 std::unique_ptr<armnn::IWorkload> CreateWorkload(
28  const armnn::IWorkloadFactory& workloadFactory,
29  const armnn::WorkloadInfo& info,
30  const armnn::ElementwiseUnaryQueueDescriptor& descriptor);
31 
32 template <std::size_t NumDims,
33  armnn::DataType ArmnnType,
34  typename T = armnn::ResolveType<ArmnnType>>
36  armnn::IWorkloadFactory & workloadFactory,
39  const unsigned int shape[NumDims],
40  std::vector<float> values,
41  float quantScale,
42  int quantOffset,
43  const unsigned int outShape[NumDims],
44  std::vector<float> outValues,
45  const armnn::ITensorHandleFactory& tensorHandleFactory,
46  float outQuantScale,
47  int outQuantOffset)
48 {
49  armnn::TensorInfo inputTensorInfo{NumDims, shape, ArmnnType};
50  armnn::TensorInfo outputTensorInfo{NumDims, outShape, ArmnnType};
51 
52  inputTensorInfo.SetQuantizationScale(quantScale);
53  inputTensorInfo.SetQuantizationOffset(quantOffset);
54 
55  outputTensorInfo.SetQuantizationScale(outQuantScale);
56  outputTensorInfo.SetQuantizationOffset(outQuantOffset);
57 
58  auto input = MakeTensor<T, NumDims>(inputTensorInfo, ConvertToDataType<ArmnnType>(values, inputTensorInfo));
59 
60  LayerTestResult<T, NumDims> ret(outputTensorInfo);
61 
62  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
63  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
64 
67  qDesc.m_Parameters = desc;
69  AddInputToWorkload(qDesc, info, inputTensorInfo, inputHandle.get());
70  AddOutputToWorkload(qDesc, info, outputTensorInfo, outputHandle.get());
71  auto workload = CreateWorkload(workloadFactory, info, qDesc);
72 
73  inputHandle->Allocate();
74  outputHandle->Allocate();
75 
76  CopyDataToITensorHandle(inputHandle.get(), input.origin());
77 
78  workload->PostAllocationConfigure();
79  ExecuteWorkload(*workload, memoryManager);
80 
81  CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
82 
83  ret.outputExpected = MakeTensor<T, NumDims>(outputTensorInfo, ConvertToDataType<ArmnnType>(outValues,
84  inputTensorInfo));
85  return ret;
86 }
87 
88 template <std::size_t NumDims,
89  armnn::DataType ArmnnType,
90  typename T = armnn::ResolveType<ArmnnType>>
92  armnn::IWorkloadFactory & workloadFactory,
95  const unsigned int shape[NumDims],
96  std::vector<float> values,
97  const unsigned int outShape[NumDims],
98  std::vector<float> outValues,
99  const armnn::ITensorHandleFactory& tensorHandleFactory,
100  float quantScale = 1.0f,
101  int quantOffset = 0)
102 {
103  return ElementwiseUnaryTestHelper<NumDims, ArmnnType>(
104  workloadFactory,
105  memoryManager,
106  op,
107  shape,
108  values,
109  quantScale,
110  quantOffset,
111  outShape,
112  outValues,
113  tensorHandleFactory,
114  quantScale,
115  quantOffset);
116 }
boost::multi_array< T, n > outputExpected
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:73
DataType
Definition: Types.hpp:32
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
std::unique_ptr< armnn::IWorkload > CreateWorkload(const armnn::IWorkloadFactory &workloadFactory, const armnn::WorkloadInfo &info, const armnn::ElementwiseUnaryQueueDescriptor &descriptor)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
boost::multi_array< T, n > output
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
UnaryOperation
Definition: Types.hpp:94
LayerTestResult< T, NumDims > ElementwiseUnaryTestHelper(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::UnaryOperation op, const unsigned int shape[NumDims], std::vector< float > values, float quantScale, int quantOffset, const unsigned int outShape[NumDims], std::vector< float > outValues, const armnn::ITensorHandleFactory &tensorHandleFactory, float outQuantScale, int outQuantOffset)
Contains information about inputs and outputs to a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)