ArmNN
 20.08
LogSoftmaxTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "LogSoftmaxTestImpl.hpp"
7 
8 #include <Half.hpp>
9 #include <QuantizeHelper.hpp>
10 #include <ResolveType.hpp>
11 
12 
16 
19 
20 #include <test/TensorHelpers.hpp>
21 
22 namespace
23 {
24 
25 template<armnn::DataType ArmnnType,
26  std::size_t NumDims,
27  typename T = armnn::ResolveType<ArmnnType>>
28 LayerTestResult<T, NumDims> LogSoftmaxTestImpl(
29  armnn::IWorkloadFactory& workloadFactory,
31  const armnn::TensorInfo& inputInfo,
32  const armnn::TensorInfo& outputInfo,
33  const std::vector<float>& inputValues,
34  const std::vector<float>& expectedOutputValues,
36  float qScale = 1.0f,
37  int32_t qOffset = 0)
38 {
39  IgnoreUnused(memoryManager);
40  LayerTestResult<T, NumDims> result(outputInfo);
41  result.outputExpected =
42  MakeTensor<T, NumDims>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
43 
45  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
46  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
48 
50 
51  AddInputToWorkload(descriptor, info, inputInfo, inputHandle.get());
52  AddOutputToWorkload(descriptor, info, outputInfo, outputHandle.get());
53 
54  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLogSoftmax(descriptor, info);
55 
56  inputHandle->Allocate();
57  outputHandle->Allocate();
58 
59  auto inputTensor = MakeTensor<T, NumDims>(inputInfo, armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
60  CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin());
61 
62  ExecuteWorkload(*workload, memoryManager);
63 
64  CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
65 
66  return result;
67 }
68 
69 } // anonymous namespace
70 
71 template<armnn::DataType ArmnnType, typename T>
73  armnn::IWorkloadFactory& workloadFactory,
75 {
76  const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
77 
78  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
79  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
80 
81  std::vector<float> inputValues
82  {
83  0.f, -6.f, 2.f, 4.f,
84  3.f, -2.f, 10.f, 1.f
85  };
86 
87  std::vector<float> expectedOutputValues
88  {
89  -4.14297f, -10.14297f, -2.14297f, -0.14297f,
90  -7.00104f, -12.00104f, -0.00105f, -9.00104f
91  };
92 
94  descriptor.m_Parameters.m_Beta = 1.0f; // default beta
95  descriptor.m_Parameters.m_Axis = -1; // default axis
96 
97  return LogSoftmaxTestImpl<ArmnnType, 4>(
98  workloadFactory,
99  memoryManager,
100  inputTensorInfo,
101  outputTensorInfo,
102  inputValues,
103  expectedOutputValues,
104  descriptor);
105 }
106 
107 template<armnn::DataType ArmnnType, typename T>
109  armnn::IWorkloadFactory& workloadFactory,
111 {
112  const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
113 
114  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
115  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
116 
117  std::vector<float> inputValues
118  {
119  0.f, -6.f, 2.f, 4.f,
120  3.f, -2.f, 10.f, 1.f
121  };
122 
123  std::vector<float> expectedOutputValues
124  {
125  -4.14297f, -10.14297f, -2.14297f, -0.14297f,
126  -7.00104f, -12.00104f, -0.00105f, -9.00104f
127  };
128 
130  descriptor.m_Parameters.m_Beta = 1.0f; // default beta
131  descriptor.m_Parameters.m_Axis = 3; // positive axis
132 
133  return LogSoftmaxTestImpl<ArmnnType, 4>(
134  workloadFactory,
135  memoryManager,
136  inputTensorInfo,
137  outputTensorInfo,
138  inputValues,
139  expectedOutputValues,
140  descriptor);
141 }
142 
143 template<armnn::DataType ArmnnType, typename T>
145  armnn::IWorkloadFactory& workloadFactory,
147 {
148  const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
149 
150  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
151  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
152 
153  std::vector<float> inputValues
154  {
155  0.0f, -0.6f, 0.2f, 0.4f,
156  0.3f, -0.2f, 1.0f, 0.1f
157  };
158 
159  std::vector<float> expectedOutputValues
160  {
161  -4.14297f, -10.14297f, -2.14297f, -0.14297f,
162  -7.00104f, -12.00104f, -0.00105f, -9.00104f
163  };
164 
166  descriptor.m_Parameters.m_Beta = 10.0f; // non-default beta
167  descriptor.m_Parameters.m_Axis = 3; // positive axis
168 
169  return LogSoftmaxTestImpl<ArmnnType, 4>(
170  workloadFactory,
171  memoryManager,
172  inputTensorInfo,
173  outputTensorInfo,
174  inputValues,
175  expectedOutputValues,
176  descriptor);
177 }
178 
179 template<armnn::DataType ArmnnType, typename T>
181  armnn::IWorkloadFactory& workloadFactory,
183 {
184  const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
185 
186  armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
187  armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
188 
189  std::vector<float> inputValues
190  {
191  0.f, -6.f, 2.f, 4.f,
192  3.f, -2.f, 10.f, 1.f
193  };
194 
195  std::vector<float> expectedOutputValues
196  {
197  -3.048587f, -4.018149f, -8.000336f, -0.048587f,
198  -0.048587f, -0.018149f, -0.000335f, -3.048587f
199  };
200 
202  descriptor.m_Parameters.m_Beta = 1.0f; // default beta
203  descriptor.m_Parameters.m_Axis = -2; // negative axis
204 
205  return LogSoftmaxTestImpl<ArmnnType, 4>(
206  workloadFactory,
207  memoryManager,
208  inputTensorInfo,
209  outputTensorInfo,
210  inputValues,
211  expectedOutputValues,
212  descriptor);
213 }
214 
216 LogSoftmaxTest1<armnn::DataType::Float32>(
217  armnn::IWorkloadFactory& workloadFactory,
219 
220 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
221 LogSoftmaxTest2<armnn::DataType::Float32>(
222  armnn::IWorkloadFactory& workloadFactory,
224 
225 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
226 LogSoftmaxTest3<armnn::DataType::Float32>(
227  armnn::IWorkloadFactory& workloadFactory,
229 
230 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
231 LogSoftmaxTest4<armnn::DataType::Float32>(
232  armnn::IWorkloadFactory& workloadFactory,
234 
236 LogSoftmaxTest1<armnn::DataType::Float16>(
237  armnn::IWorkloadFactory& workloadFactory,
239 
240 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
241 LogSoftmaxTest2<armnn::DataType::Float16>(
242  armnn::IWorkloadFactory& workloadFactory,
244 
245 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
246 LogSoftmaxTest3<armnn::DataType::Float16>(
247  armnn::IWorkloadFactory& workloadFactory,
249 
250 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
251 LogSoftmaxTest4<armnn::DataType::Float16>(
252  armnn::IWorkloadFactory& workloadFactory,
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
float m_Beta
Exponentiation value.
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:73
LayerTestResult< T, 4 > LogSoftmaxTest4(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void IgnoreUnused(Ts &&...)
LayerTestResult< T, 4 > LogSoftmaxTest2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
DataType
Definition: Types.hpp:32
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about inputs and outputs to a layer.
LayerTestResult< T, 4 > LogSoftmaxTest3(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > LogSoftmaxTest1(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)