ArmNN
 21.05
LogSoftmaxTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "LogSoftmaxTestImpl.hpp"
7 
8 #include <Half.hpp>
9 #include <QuantizeHelper.hpp>
10 #include <ResolveType.hpp>
11 
12 
16 
19 
20 #include <test/TensorHelpers.hpp>
21 
22 namespace
23 {
24 
25 template<armnn::DataType ArmnnType,
26  std::size_t NumDims,
27  typename T = armnn::ResolveType<ArmnnType>>
28 LayerTestResult<T, NumDims> LogSoftmaxTestImpl(
29  armnn::IWorkloadFactory& workloadFactory,
31  const armnn::ITensorHandleFactory& tensorHandleFactory,
32  const armnn::TensorInfo& inputInfo,
33  const armnn::TensorInfo& outputInfo,
34  const std::vector<float>& inputValues,
35  const std::vector<float>& expectedOutputValues,
37  float qScale = 1.0f,
38  int32_t qOffset = 0)
39 {
40  IgnoreUnused(memoryManager);
41  LayerTestResult<T, NumDims> result(outputInfo);
42  result.outputExpected =
43  MakeTensor<T, NumDims>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
44 
45  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
46  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
47 
49 
50  AddInputToWorkload(descriptor, info, inputInfo, inputHandle.get());
51  AddOutputToWorkload(descriptor, info, outputInfo, outputHandle.get());
52 
53  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLogSoftmax(descriptor, info);
54 
55  inputHandle->Allocate();
56  outputHandle->Allocate();
57 
58  auto inputTensor = MakeTensor<T, NumDims>(inputInfo, armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
59  CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin());
60 
61  ExecuteWorkload(*workload, memoryManager);
62 
63  CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
64 
65  return result;
66 }
67 
68 } // anonymous namespace
69 
70 template<armnn::DataType ArmnnType, typename T>
72  armnn::IWorkloadFactory& workloadFactory,
74  const armnn::ITensorHandleFactory& tensorHandleFactory)
75 {
76  const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
77 
78  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
79  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
80 
81  std::vector<float> inputValues
82  {
83  0.f, -6.f, 2.f, 4.f,
84  3.f, -2.f, 10.f, 1.f
85  };
86 
87  std::vector<float> expectedOutputValues
88  {
89  -4.14297f, -10.14297f, -2.14297f, -0.14297f,
90  -7.00104f, -12.00104f, -0.00105f, -9.00104f
91  };
92 
94  descriptor.m_Parameters.m_Beta = 1.0f; // default beta
95  descriptor.m_Parameters.m_Axis = -1; // default axis
96 
97  return LogSoftmaxTestImpl<ArmnnType, 4>(
98  workloadFactory,
99  memoryManager,
100  tensorHandleFactory,
101  inputTensorInfo,
102  outputTensorInfo,
103  inputValues,
104  expectedOutputValues,
105  descriptor);
106 }
107 
108 template<armnn::DataType ArmnnType, typename T>
110  armnn::IWorkloadFactory& workloadFactory,
112  const armnn::ITensorHandleFactory& tensorHandleFactory)
113 {
114  const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
115 
116  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
117  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
118 
119  std::vector<float> inputValues
120  {
121  0.f, -6.f, 2.f, 4.f,
122  3.f, -2.f, 10.f, 1.f
123  };
124 
125  std::vector<float> expectedOutputValues
126  {
127  -4.14297f, -10.14297f, -2.14297f, -0.14297f,
128  -7.00104f, -12.00104f, -0.00105f, -9.00104f
129  };
130 
132  descriptor.m_Parameters.m_Beta = 1.0f; // default beta
133  descriptor.m_Parameters.m_Axis = 3; // positive axis
134 
135  return LogSoftmaxTestImpl<ArmnnType, 4>(
136  workloadFactory,
137  memoryManager,
138  tensorHandleFactory,
139  inputTensorInfo,
140  outputTensorInfo,
141  inputValues,
142  expectedOutputValues,
143  descriptor);
144 }
145 
146 template<armnn::DataType ArmnnType, typename T>
148  armnn::IWorkloadFactory& workloadFactory,
150  const armnn::ITensorHandleFactory& tensorHandleFactory)
151 {
152  const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
153 
154  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
155  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
156 
157  std::vector<float> inputValues
158  {
159  0.0f, -0.6f, 0.2f, 0.4f,
160  0.3f, -0.2f, 1.0f, 0.1f
161  };
162 
163  std::vector<float> expectedOutputValues
164  {
165  -4.14297f, -10.14297f, -2.14297f, -0.14297f,
166  -7.00104f, -12.00104f, -0.00105f, -9.00104f
167  };
168 
170  descriptor.m_Parameters.m_Beta = 10.0f; // non-default beta
171  descriptor.m_Parameters.m_Axis = 3; // positive axis
172 
173  return LogSoftmaxTestImpl<ArmnnType, 4>(
174  workloadFactory,
175  memoryManager,
176  tensorHandleFactory,
177  inputTensorInfo,
178  outputTensorInfo,
179  inputValues,
180  expectedOutputValues,
181  descriptor);
182 }
183 
184 template<armnn::DataType ArmnnType, typename T>
186  armnn::IWorkloadFactory& workloadFactory,
188  const armnn::ITensorHandleFactory& tensorHandleFactory)
189 {
190  const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
191 
192  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
193  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
194 
195  std::vector<float> inputValues
196  {
197  0.f, -6.f, 2.f, 4.f,
198  3.f, -2.f, 10.f, 1.f
199  };
200 
201  std::vector<float> expectedOutputValues
202  {
203  -3.048587f, -4.018149f, -8.000336f, -0.048587f,
204  -0.048587f, -0.018149f, -0.000335f, -3.048587f
205  };
206 
208  descriptor.m_Parameters.m_Beta = 1.0f; // default beta
209  descriptor.m_Parameters.m_Axis = -2; // negative axis
210 
211  return LogSoftmaxTestImpl<ArmnnType, 4>(
212  workloadFactory,
213  memoryManager,
214  tensorHandleFactory,
215  inputTensorInfo,
216  outputTensorInfo,
217  inputValues,
218  expectedOutputValues,
219  descriptor);
220 }
221 
223 LogSoftmaxTest1<armnn::DataType::Float32>(
224  armnn::IWorkloadFactory& workloadFactory,
226  const armnn::ITensorHandleFactory& tensorHandleFactory);
227 
228 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
229 LogSoftmaxTest2<armnn::DataType::Float32>(
230  armnn::IWorkloadFactory& workloadFactory,
232  const armnn::ITensorHandleFactory& tensorHandleFactory);
233 
234 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
235 LogSoftmaxTest3<armnn::DataType::Float32>(
236  armnn::IWorkloadFactory& workloadFactory,
238  const armnn::ITensorHandleFactory& tensorHandleFactory);
239 
240 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
241 LogSoftmaxTest4<armnn::DataType::Float32>(
242  armnn::IWorkloadFactory& workloadFactory,
244  const armnn::ITensorHandleFactory& tensorHandleFactory);
245 
247 LogSoftmaxTest1<armnn::DataType::Float16>(
248  armnn::IWorkloadFactory& workloadFactory,
250  const armnn::ITensorHandleFactory& tensorHandleFactory);
251 
252 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
253 LogSoftmaxTest2<armnn::DataType::Float16>(
254  armnn::IWorkloadFactory& workloadFactory,
256  const armnn::ITensorHandleFactory& tensorHandleFactory);
257 
258 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
259 LogSoftmaxTest3<armnn::DataType::Float16>(
260  armnn::IWorkloadFactory& workloadFactory,
262  const armnn::ITensorHandleFactory& tensorHandleFactory);
263 
264 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
265 LogSoftmaxTest4<armnn::DataType::Float16>(
266  armnn::IWorkloadFactory& workloadFactory,
268  const armnn::ITensorHandleFactory& tensorHandleFactory);
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
float m_Beta
Exponentiation value.
LayerTestResult< T, 4 > LogSoftmaxTest2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
void IgnoreUnused(Ts &&...)
DataType
Definition: Types.hpp:36
LayerTestResult< T, 4 > LogSoftmaxTest1(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< T, 4 > LogSoftmaxTest3(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about inputs and outputs to a layer.
LayerTestResult< T, 4 > LogSoftmaxTest4(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)