ArmNN
 22.05
LogSoftmaxTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "LogSoftmaxTestImpl.hpp"
7 
8 #include <Half.hpp>
10 #include <ResolveType.hpp>
11 
12 
16 
19 
21 
22 namespace
23 {
24 
25 template<armnn::DataType ArmnnType,
26  std::size_t NumDims,
27  typename T = armnn::ResolveType<ArmnnType>>
28 LayerTestResult<T, NumDims> LogSoftmaxTestImpl(
29  armnn::IWorkloadFactory& workloadFactory,
31  const armnn::ITensorHandleFactory& tensorHandleFactory,
32  const armnn::TensorInfo& inputInfo,
33  const armnn::TensorInfo& outputInfo,
34  const std::vector<float>& inputValues,
35  const std::vector<float>& expectedOutputValues,
37  float qScale = 1.0f,
38  int32_t qOffset = 0)
39 {
40  IgnoreUnused(memoryManager);
41 
42  auto inputTensor = armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset);
43 
44  std::vector<T> actualOutput(outputInfo.GetNumElements());
45  std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset);
46 
47  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
48  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
49 
51 
52  AddInputToWorkload(descriptor, info, inputInfo, inputHandle.get());
53  AddOutputToWorkload(descriptor, info, outputInfo, outputHandle.get());
54 
55  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::LogSoftmax,
56  descriptor,
57  info);
58 
59  inputHandle->Allocate();
60  outputHandle->Allocate();
61 
62  CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
63 
64  ExecuteWorkload(*workload, memoryManager);
65 
66  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
67 
68  return LayerTestResult<T, NumDims>(actualOutput,
69  expectedOutput,
70  outputHandle->GetShape(),
71  outputInfo.GetShape());
72 
73 }
74 
75 } // anonymous namespace
76 
77 template<armnn::DataType ArmnnType, typename T>
79  armnn::IWorkloadFactory& workloadFactory,
81  const armnn::ITensorHandleFactory& tensorHandleFactory)
82 {
83  const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
84 
85  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
86  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
87 
88  std::vector<float> inputValues
89  {
90  0.f, -6.f, 2.f, 4.f,
91  3.f, -2.f, 10.f, 1.f
92  };
93 
94  std::vector<float> expectedOutputValues
95  {
96  -4.14297f, -10.14297f, -2.14297f, -0.14297f,
97  -7.00104f, -12.00104f, -0.00105f, -9.00104f
98  };
99 
101  descriptor.m_Parameters.m_Beta = 1.0f; // default beta
102  descriptor.m_Parameters.m_Axis = -1; // default axis
103 
104  return LogSoftmaxTestImpl<ArmnnType, 4>(
105  workloadFactory,
106  memoryManager,
107  tensorHandleFactory,
108  inputTensorInfo,
109  outputTensorInfo,
110  inputValues,
111  expectedOutputValues,
112  descriptor);
113 }
114 
115 template<armnn::DataType ArmnnType, typename T>
117  armnn::IWorkloadFactory& workloadFactory,
119  const armnn::ITensorHandleFactory& tensorHandleFactory)
120 {
121  const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
122 
123  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
124  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
125 
126  std::vector<float> inputValues
127  {
128  0.f, -6.f, 2.f, 4.f,
129  3.f, -2.f, 10.f, 1.f
130  };
131 
132  std::vector<float> expectedOutputValues
133  {
134  -4.14297f, -10.14297f, -2.14297f, -0.14297f,
135  -7.00104f, -12.00104f, -0.00105f, -9.00104f
136  };
137 
139  descriptor.m_Parameters.m_Beta = 1.0f; // default beta
140  descriptor.m_Parameters.m_Axis = 3; // positive axis
141 
142  return LogSoftmaxTestImpl<ArmnnType, 4>(
143  workloadFactory,
144  memoryManager,
145  tensorHandleFactory,
146  inputTensorInfo,
147  outputTensorInfo,
148  inputValues,
149  expectedOutputValues,
150  descriptor);
151 }
152 
153 template<armnn::DataType ArmnnType, typename T>
155  armnn::IWorkloadFactory& workloadFactory,
157  const armnn::ITensorHandleFactory& tensorHandleFactory)
158 {
159  const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
160 
161  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
162  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
163 
164  std::vector<float> inputValues
165  {
166  0.0f, -0.6f, 0.2f, 0.4f,
167  0.3f, -0.2f, 1.0f, 0.1f
168  };
169 
170  std::vector<float> expectedOutputValues
171  {
172  -4.14297f, -10.14297f, -2.14297f, -0.14297f,
173  -7.00104f, -12.00104f, -0.00105f, -9.00104f
174  };
175 
177  descriptor.m_Parameters.m_Beta = 10.0f; // non-default beta
178  descriptor.m_Parameters.m_Axis = 3; // positive axis
179 
180  return LogSoftmaxTestImpl<ArmnnType, 4>(
181  workloadFactory,
182  memoryManager,
183  tensorHandleFactory,
184  inputTensorInfo,
185  outputTensorInfo,
186  inputValues,
187  expectedOutputValues,
188  descriptor);
189 }
190 
191 template<armnn::DataType ArmnnType, typename T>
193  armnn::IWorkloadFactory& workloadFactory,
195  const armnn::ITensorHandleFactory& tensorHandleFactory)
196 {
197  const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
198 
199  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
200  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
201 
202  std::vector<float> inputValues
203  {
204  0.f, -6.f, 2.f, 4.f,
205  3.f, -2.f, 10.f, 1.f
206  };
207 
208  std::vector<float> expectedOutputValues
209  {
210  -3.048587f, -4.018149f, -8.000336f, -0.048587f,
211  -0.048587f, -0.018149f, -0.000335f, -3.048587f
212  };
213 
215  descriptor.m_Parameters.m_Beta = 1.0f; // default beta
216  descriptor.m_Parameters.m_Axis = -2; // negative axis
217 
218  return LogSoftmaxTestImpl<ArmnnType, 4>(
219  workloadFactory,
220  memoryManager,
221  tensorHandleFactory,
222  inputTensorInfo,
223  outputTensorInfo,
224  inputValues,
225  expectedOutputValues,
226  descriptor);
227 }
228 
230 LogSoftmaxTest1<armnn::DataType::Float32>(
231  armnn::IWorkloadFactory& workloadFactory,
233  const armnn::ITensorHandleFactory& tensorHandleFactory);
234 
235 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
236 LogSoftmaxTest2<armnn::DataType::Float32>(
237  armnn::IWorkloadFactory& workloadFactory,
239  const armnn::ITensorHandleFactory& tensorHandleFactory);
240 
241 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
242 LogSoftmaxTest3<armnn::DataType::Float32>(
243  armnn::IWorkloadFactory& workloadFactory,
245  const armnn::ITensorHandleFactory& tensorHandleFactory);
246 
247 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
248 LogSoftmaxTest4<armnn::DataType::Float32>(
249  armnn::IWorkloadFactory& workloadFactory,
251  const armnn::ITensorHandleFactory& tensorHandleFactory);
252 
254 LogSoftmaxTest1<armnn::DataType::Float16>(
255  armnn::IWorkloadFactory& workloadFactory,
257  const armnn::ITensorHandleFactory& tensorHandleFactory);
258 
259 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
260 LogSoftmaxTest2<armnn::DataType::Float16>(
261  armnn::IWorkloadFactory& workloadFactory,
263  const armnn::ITensorHandleFactory& tensorHandleFactory);
264 
265 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
266 LogSoftmaxTest3<armnn::DataType::Float16>(
267  armnn::IWorkloadFactory& workloadFactory,
269  const armnn::ITensorHandleFactory& tensorHandleFactory);
270 
271 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
272 LogSoftmaxTest4<armnn::DataType::Float16>(
273  armnn::IWorkloadFactory& workloadFactory,
275  const armnn::ITensorHandleFactory& tensorHandleFactory);
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
float m_Beta
Exponentiation value.
LayerTestResult< T, 4 > LogSoftmaxTest2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
void IgnoreUnused(Ts &&...)
DataType
Definition: Types.hpp:48
LayerTestResult< T, 4 > LogSoftmaxTest1(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< T, 4 > LogSoftmaxTest3(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Contains information about TensorInfos of a layer.
LayerTestResult< T, 4 > LogSoftmaxTest4(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
unsigned int GetNumElements() const
Definition: Tensor.hpp:196