ArmNN
 20.02
LogSoftmaxTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "LogSoftmaxTestImpl.hpp"
7 
8 #include <Half.hpp>
9 #include <QuantizeHelper.hpp>
10 #include <ResolveType.hpp>
11 
12 
16 
19 
20 #include <test/TensorHelpers.hpp>
21 
22 namespace
23 {
24 
25 template<armnn::DataType ArmnnType,
26  std::size_t NumDims,
27  typename T = armnn::ResolveType<ArmnnType>>
28 LayerTestResult<T, NumDims> LogSoftmaxTestImpl(
29  armnn::IWorkloadFactory& workloadFactory,
31  const armnn::TensorInfo& inputInfo,
32  const armnn::TensorInfo& outputInfo,
33  const std::vector<float>& inputValues,
34  const std::vector<float>& expectedOutputValues,
36  float qScale = 1.0f,
37  int32_t qOffset = 0)
38 {
39  IgnoreUnused(memoryManager);
40  LayerTestResult<T, NumDims> result(outputInfo);
41  result.outputExpected =
42  MakeTensor<T, NumDims>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
43 
44  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
45  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
46 
48 
49  AddInputToWorkload(descriptor, info, inputInfo, inputHandle.get());
50  AddOutputToWorkload(descriptor, info, outputInfo, outputHandle.get());
51 
52  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLogSoftmax(descriptor, info);
53 
54  inputHandle->Allocate();
55  outputHandle->Allocate();
56 
57  auto inputTensor = MakeTensor<T, NumDims>(inputInfo, armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
58  CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin());
59 
60  workload->Execute();
61 
62  CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
63 
64  return result;
65 }
66 
67 } // anonymous namespace
68 
69 template<armnn::DataType ArmnnType, typename T>
71  armnn::IWorkloadFactory& workloadFactory,
73 {
74  const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
75 
76  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
77  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
78 
79  std::vector<float> inputValues
80  {
81  0.f, -6.f, 2.f, 4.f,
82  3.f, -2.f, 10.f, 1.f
83  };
84 
85  std::vector<float> expectedOutputValues
86  {
87  -4.14297f, -10.14297f, -2.14297f, -0.14297f,
88  -7.00104f, -12.00104f, -0.00105f, -9.00104f
89  };
90 
92  descriptor.m_Parameters.m_Beta = 1.0f; // default beta
93  descriptor.m_Parameters.m_Axis = -1; // default axis
94 
95  return LogSoftmaxTestImpl<ArmnnType, 4>(
96  workloadFactory,
97  memoryManager,
98  inputTensorInfo,
99  outputTensorInfo,
100  inputValues,
101  expectedOutputValues,
102  descriptor);
103 }
104 
105 template<armnn::DataType ArmnnType, typename T>
107  armnn::IWorkloadFactory& workloadFactory,
109 {
110  const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
111 
112  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
113  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
114 
115  std::vector<float> inputValues
116  {
117  0.f, -6.f, 2.f, 4.f,
118  3.f, -2.f, 10.f, 1.f
119  };
120 
121  std::vector<float> expectedOutputValues
122  {
123  -4.14297f, -10.14297f, -2.14297f, -0.14297f,
124  -7.00104f, -12.00104f, -0.00105f, -9.00104f
125  };
126 
128  descriptor.m_Parameters.m_Beta = 1.0f; // default beta
129  descriptor.m_Parameters.m_Axis = 3; // positive axis
130 
131  return LogSoftmaxTestImpl<ArmnnType, 4>(
132  workloadFactory,
133  memoryManager,
134  inputTensorInfo,
135  outputTensorInfo,
136  inputValues,
137  expectedOutputValues,
138  descriptor);
139 }
140 
141 template<armnn::DataType ArmnnType, typename T>
143  armnn::IWorkloadFactory& workloadFactory,
145 {
146  const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
147 
148  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
149  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
150 
151  std::vector<float> inputValues
152  {
153  0.0f, -0.6f, 0.2f, 0.4f,
154  0.3f, -0.2f, 1.0f, 0.1f
155  };
156 
157  std::vector<float> expectedOutputValues
158  {
159  -4.14297f, -10.14297f, -2.14297f, -0.14297f,
160  -7.00104f, -12.00104f, -0.00105f, -9.00104f
161  };
162 
164  descriptor.m_Parameters.m_Beta = 10.0f; // non-default beta
165  descriptor.m_Parameters.m_Axis = 3; // positive axis
166 
167  return LogSoftmaxTestImpl<ArmnnType, 4>(
168  workloadFactory,
169  memoryManager,
170  inputTensorInfo,
171  outputTensorInfo,
172  inputValues,
173  expectedOutputValues,
174  descriptor);
175 }
176 
177 template<armnn::DataType ArmnnType, typename T>
179  armnn::IWorkloadFactory& workloadFactory,
181 {
182  const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
183 
184  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
185  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
186 
187  std::vector<float> inputValues
188  {
189  0.f, -6.f, 2.f, 4.f,
190  3.f, -2.f, 10.f, 1.f
191  };
192 
193  std::vector<float> expectedOutputValues
194  {
195  -3.048587f, -4.018149f, -8.000336f, -0.048587f,
196  -0.048587f, -0.018149f, -0.000335f, -3.048587f
197  };
198 
200  descriptor.m_Parameters.m_Beta = 1.0f; // default beta
201  descriptor.m_Parameters.m_Axis = -2; // negative axis
202 
203  return LogSoftmaxTestImpl<ArmnnType, 4>(
204  workloadFactory,
205  memoryManager,
206  inputTensorInfo,
207  outputTensorInfo,
208  inputValues,
209  expectedOutputValues,
210  descriptor);
211 }
212 
214 LogSoftmaxTest1<armnn::DataType::Float32>(
215  armnn::IWorkloadFactory& workloadFactory,
217 
218 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
219 LogSoftmaxTest2<armnn::DataType::Float32>(
220  armnn::IWorkloadFactory& workloadFactory,
222 
223 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
224 LogSoftmaxTest3<armnn::DataType::Float32>(
225  armnn::IWorkloadFactory& workloadFactory,
227 
228 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
229 LogSoftmaxTest4<armnn::DataType::Float32>(
230  armnn::IWorkloadFactory& workloadFactory,
232 
234 LogSoftmaxTest1<armnn::DataType::Float16>(
235  armnn::IWorkloadFactory& workloadFactory,
237 
238 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
239 LogSoftmaxTest2<armnn::DataType::Float16>(
240  armnn::IWorkloadFactory& workloadFactory,
242 
243 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
244 LogSoftmaxTest3<armnn::DataType::Float16>(
245  armnn::IWorkloadFactory& workloadFactory,
247 
248 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
249 LogSoftmaxTest4<armnn::DataType::Float16>(
250  armnn::IWorkloadFactory& workloadFactory,
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
float m_Beta
Exponentiation value.
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:73
LayerTestResult< T, 4 > LogSoftmaxTest4(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void IgnoreUnused(Ts &&...)
LayerTestResult< T, 4 > LogSoftmaxTest2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
DataType
Definition: Types.hpp:32
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about inputs and outputs to a layer.
LayerTestResult< T, 4 > LogSoftmaxTest3(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > LogSoftmaxTest1(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)