33 const std::vector<float>& inputValues,
34 const std::vector<float>& expectedOutputValues,
39 boost::ignore_unused(memoryManager);
41 result.outputExpected =
42 MakeTensor<T, NumDims>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
44 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
45 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
49 AddInputToWorkload(descriptor, info, inputInfo, inputHandle.get());
50 AddOutputToWorkload(descriptor, info, outputInfo, outputHandle.get());
52 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLogSoftmax(descriptor, info);
54 inputHandle->Allocate();
55 outputHandle->Allocate();
57 auto inputTensor = MakeTensor<T, NumDims>(inputInfo, armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
69 template<armnn::DataType ArmnnType,
typename T>
79 std::vector<float> inputValues
85 std::vector<float> expectedOutputValues
87 -4.14297f, -10.14297f, -2.14297f, -0.14297f,
88 -7.00104f, -12.00104f, -0.00105f, -9.00104f
95 return LogSoftmaxTestImpl<ArmnnType, 4>(
101 expectedOutputValues,
105 template<armnn::DataType ArmnnType,
typename T>
115 std::vector<float> inputValues
121 std::vector<float> expectedOutputValues
123 -4.14297f, -10.14297f, -2.14297f, -0.14297f,
124 -7.00104f, -12.00104f, -0.00105f, -9.00104f
131 return LogSoftmaxTestImpl<ArmnnType, 4>(
137 expectedOutputValues,
141 template<armnn::DataType ArmnnType,
typename T>
151 std::vector<float> inputValues
153 0.0f, -0.6f, 0.2f, 0.4f,
154 0.3f, -0.2f, 1.0f, 0.1f
157 std::vector<float> expectedOutputValues
159 -4.14297f, -10.14297f, -2.14297f, -0.14297f,
160 -7.00104f, -12.00104f, -0.00105f, -9.00104f
167 return LogSoftmaxTestImpl<ArmnnType, 4>(
173 expectedOutputValues,
177 template<armnn::DataType ArmnnType,
typename T>
187 std::vector<float> inputValues
193 std::vector<float> expectedOutputValues
195 -3.048587f, -4.018149f, -8.000336f, -0.048587f,
196 -0.048587f, -0.018149f, -0.000335f, -3.048587f
203 return LogSoftmaxTestImpl<ArmnnType, 4>(
209 expectedOutputValues,
214 LogSoftmaxTest1<armnn::DataType::Float32>(
218 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
219 LogSoftmaxTest2<armnn::DataType::Float32>(
223 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
224 LogSoftmaxTest3<armnn::DataType::Float32>(
228 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
229 LogSoftmaxTest4<armnn::DataType::Float32>(
234 LogSoftmaxTest1<armnn::DataType::Float16>(
238 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
239 LogSoftmaxTest2<armnn::DataType::Float16>(
243 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
244 LogSoftmaxTest3<armnn::DataType::Float16>(
248 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
249 LogSoftmaxTest4<armnn::DataType::Float16>(
LayerTestResult< T, 4 > LogSoftmaxTest4(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerDescriptor m_Parameters
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
typename ResolveTypeImpl< DT >::Type ResolveType
LayerTestResult< T, 4 > LogSoftmaxTest2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
float m_Beta
Exponentiation value.
LayerTestResult< T, 4 > LogSoftmaxTest3(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > LogSoftmaxTest1(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...