ArmNN
 21.08
LogSoftmaxTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "LogSoftmaxTestImpl.hpp"
7 
8 #include <Half.hpp>
9 #include <QuantizeHelper.hpp>
10 #include <ResolveType.hpp>
11 
12 
16 
19 
20 #include <test/TensorHelpers.hpp>
21 
22 namespace
23 {
24 
25 template<armnn::DataType ArmnnType,
26  std::size_t NumDims,
27  typename T = armnn::ResolveType<ArmnnType>>
28 LayerTestResult<T, NumDims> LogSoftmaxTestImpl(
29  armnn::IWorkloadFactory& workloadFactory,
31  const armnn::ITensorHandleFactory& tensorHandleFactory,
32  const armnn::TensorInfo& inputInfo,
33  const armnn::TensorInfo& outputInfo,
34  const std::vector<float>& inputValues,
35  const std::vector<float>& expectedOutputValues,
37  float qScale = 1.0f,
38  int32_t qOffset = 0)
39 {
40  IgnoreUnused(memoryManager);
41 
42  auto inputTensor = armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset);
43 
44  std::vector<T> actualOutput(outputInfo.GetNumElements());
45  std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset);
46 
47  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
48  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
49 
51 
52  AddInputToWorkload(descriptor, info, inputInfo, inputHandle.get());
53  AddOutputToWorkload(descriptor, info, outputInfo, outputHandle.get());
54 
55  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLogSoftmax(descriptor, info);
56 
57  inputHandle->Allocate();
58  outputHandle->Allocate();
59 
60  CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
61 
62  ExecuteWorkload(*workload, memoryManager);
63 
64  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
65 
66  return LayerTestResult<T, NumDims>(actualOutput,
67  expectedOutput,
68  outputHandle->GetShape(),
69  outputInfo.GetShape());
70 
71 }
72 
73 } // anonymous namespace
74 
75 template<armnn::DataType ArmnnType, typename T>
77  armnn::IWorkloadFactory& workloadFactory,
79  const armnn::ITensorHandleFactory& tensorHandleFactory)
80 {
81  const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
82 
83  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
84  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
85 
86  std::vector<float> inputValues
87  {
88  0.f, -6.f, 2.f, 4.f,
89  3.f, -2.f, 10.f, 1.f
90  };
91 
92  std::vector<float> expectedOutputValues
93  {
94  -4.14297f, -10.14297f, -2.14297f, -0.14297f,
95  -7.00104f, -12.00104f, -0.00105f, -9.00104f
96  };
97 
99  descriptor.m_Parameters.m_Beta = 1.0f; // default beta
100  descriptor.m_Parameters.m_Axis = -1; // default axis
101 
102  return LogSoftmaxTestImpl<ArmnnType, 4>(
103  workloadFactory,
104  memoryManager,
105  tensorHandleFactory,
106  inputTensorInfo,
107  outputTensorInfo,
108  inputValues,
109  expectedOutputValues,
110  descriptor);
111 }
112 
113 template<armnn::DataType ArmnnType, typename T>
115  armnn::IWorkloadFactory& workloadFactory,
117  const armnn::ITensorHandleFactory& tensorHandleFactory)
118 {
119  const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
120 
121  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
122  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
123 
124  std::vector<float> inputValues
125  {
126  0.f, -6.f, 2.f, 4.f,
127  3.f, -2.f, 10.f, 1.f
128  };
129 
130  std::vector<float> expectedOutputValues
131  {
132  -4.14297f, -10.14297f, -2.14297f, -0.14297f,
133  -7.00104f, -12.00104f, -0.00105f, -9.00104f
134  };
135 
137  descriptor.m_Parameters.m_Beta = 1.0f; // default beta
138  descriptor.m_Parameters.m_Axis = 3; // positive axis
139 
140  return LogSoftmaxTestImpl<ArmnnType, 4>(
141  workloadFactory,
142  memoryManager,
143  tensorHandleFactory,
144  inputTensorInfo,
145  outputTensorInfo,
146  inputValues,
147  expectedOutputValues,
148  descriptor);
149 }
150 
151 template<armnn::DataType ArmnnType, typename T>
153  armnn::IWorkloadFactory& workloadFactory,
155  const armnn::ITensorHandleFactory& tensorHandleFactory)
156 {
157  const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
158 
159  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
160  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
161 
162  std::vector<float> inputValues
163  {
164  0.0f, -0.6f, 0.2f, 0.4f,
165  0.3f, -0.2f, 1.0f, 0.1f
166  };
167 
168  std::vector<float> expectedOutputValues
169  {
170  -4.14297f, -10.14297f, -2.14297f, -0.14297f,
171  -7.00104f, -12.00104f, -0.00105f, -9.00104f
172  };
173 
175  descriptor.m_Parameters.m_Beta = 10.0f; // non-default beta
176  descriptor.m_Parameters.m_Axis = 3; // positive axis
177 
178  return LogSoftmaxTestImpl<ArmnnType, 4>(
179  workloadFactory,
180  memoryManager,
181  tensorHandleFactory,
182  inputTensorInfo,
183  outputTensorInfo,
184  inputValues,
185  expectedOutputValues,
186  descriptor);
187 }
188 
189 template<armnn::DataType ArmnnType, typename T>
191  armnn::IWorkloadFactory& workloadFactory,
193  const armnn::ITensorHandleFactory& tensorHandleFactory)
194 {
195  const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
196 
197  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
198  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
199 
200  std::vector<float> inputValues
201  {
202  0.f, -6.f, 2.f, 4.f,
203  3.f, -2.f, 10.f, 1.f
204  };
205 
206  std::vector<float> expectedOutputValues
207  {
208  -3.048587f, -4.018149f, -8.000336f, -0.048587f,
209  -0.048587f, -0.018149f, -0.000335f, -3.048587f
210  };
211 
213  descriptor.m_Parameters.m_Beta = 1.0f; // default beta
214  descriptor.m_Parameters.m_Axis = -2; // negative axis
215 
216  return LogSoftmaxTestImpl<ArmnnType, 4>(
217  workloadFactory,
218  memoryManager,
219  tensorHandleFactory,
220  inputTensorInfo,
221  outputTensorInfo,
222  inputValues,
223  expectedOutputValues,
224  descriptor);
225 }
226 
228 LogSoftmaxTest1<armnn::DataType::Float32>(
229  armnn::IWorkloadFactory& workloadFactory,
231  const armnn::ITensorHandleFactory& tensorHandleFactory);
232 
233 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
234 LogSoftmaxTest2<armnn::DataType::Float32>(
235  armnn::IWorkloadFactory& workloadFactory,
237  const armnn::ITensorHandleFactory& tensorHandleFactory);
238 
239 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
240 LogSoftmaxTest3<armnn::DataType::Float32>(
241  armnn::IWorkloadFactory& workloadFactory,
243  const armnn::ITensorHandleFactory& tensorHandleFactory);
244 
245 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
246 LogSoftmaxTest4<armnn::DataType::Float32>(
247  armnn::IWorkloadFactory& workloadFactory,
249  const armnn::ITensorHandleFactory& tensorHandleFactory);
250 
252 LogSoftmaxTest1<armnn::DataType::Float16>(
253  armnn::IWorkloadFactory& workloadFactory,
255  const armnn::ITensorHandleFactory& tensorHandleFactory);
256 
257 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
258 LogSoftmaxTest2<armnn::DataType::Float16>(
259  armnn::IWorkloadFactory& workloadFactory,
261  const armnn::ITensorHandleFactory& tensorHandleFactory);
262 
263 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
264 LogSoftmaxTest3<armnn::DataType::Float16>(
265  armnn::IWorkloadFactory& workloadFactory,
267  const armnn::ITensorHandleFactory& tensorHandleFactory);
268 
269 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
270 LogSoftmaxTest4<armnn::DataType::Float16>(
271  armnn::IWorkloadFactory& workloadFactory,
273  const armnn::ITensorHandleFactory& tensorHandleFactory);
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
virtual std::unique_ptr< IWorkload > CreateLogSoftmax(const LogSoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
float m_Beta
Exponentiation value.
LayerTestResult< T, 4 > LogSoftmaxTest2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
void IgnoreUnused(Ts &&...)
DataType
Definition: Types.hpp:35
LayerTestResult< T, 4 > LogSoftmaxTest1(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< T, 4 > LogSoftmaxTest3(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about TensorInfos of a layer.
LayerTestResult< T, 4 > LogSoftmaxTest4(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)