From 10b4dfd8e9ccd7a03df7bb053ee1c644cb37f8ab Mon Sep 17 00:00:00 2001 From: David Beck Date: Wed, 19 Sep 2018 12:03:20 +0100 Subject: IVGCVSW-1897 : build infrastructure for the src/backends folder Change-Id: I7ebafb675ccc77ad54d1deb01412a8379a5356bb --- src/backends/test/SoftmaxTestImpl.hpp | 153 ++++++++++++++++++++++++++++++++++ 1 file changed, 153 insertions(+) create mode 100644 src/backends/test/SoftmaxTestImpl.hpp (limited to 'src/backends/test/SoftmaxTestImpl.hpp') diff --git a/src/backends/test/SoftmaxTestImpl.hpp b/src/backends/test/SoftmaxTestImpl.hpp new file mode 100644 index 0000000000..5bc13fa21c --- /dev/null +++ b/src/backends/test/SoftmaxTestImpl.hpp @@ -0,0 +1,153 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include +#include +#include +#include + +#include "test/TensorHelpers.hpp" +#include "QuantizeHelper.hpp" + +#include "backends/CpuTensorHandle.hpp" +#include "backends/WorkloadFactory.hpp" + +#include + +template +LayerTestResult SimpleSoftmaxTestImpl(armnn::IWorkloadFactory& workloadFactory, float beta) +{ + using std::exp; + + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int inputShape[] = { 2, 4 }; + + inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType()); + float qScale = 1.f / 256.f; + int qOffset = 0; + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + + outputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType()); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + + LayerTestResult ret(outputTensorInfo); + + // Each row is independently softmax'd. + auto input = MakeTensor(inputTensorInfo, std::vector( + QuantizedVector(qScale, 0, { + 0.f, 1.f, 0.f, 0.f, + .5f, 0.f, 0.f, 0.f, + }))); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::SoftmaxQueueDescriptor data; + data.m_Parameters.m_Beta = beta; + + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreateSoftmax(data, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0]); + + workloadFactory.Finalize(); + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); + + float x0[4] = { exp((0.f - 1.0f) * beta), exp((1.0f - 1.0f) * beta), + exp((0.0f - 1.0f) * beta), exp((0.0f - 1.0f) * beta) }; + float sum0 = x0[0] + x0[1] + x0[2] + x0[3]; + float x1[4] = { exp((0.5f - 0.5f) * beta), exp((0.0f - 0.5f) * beta), + exp((0.0f - 0.5f) * beta), exp((0.0f - 0.5f) * beta) }; + float sum1 = x1[0] + x1[1] + x1[2] + x1[3]; + + ret.outputExpected = MakeTensor(outputTensorInfo, std::vector( + QuantizedVector(qScale, qOffset, { + x0[0] / sum0, x0[1] / sum0, x0[2] / sum0, x0[3] / sum0, + x1[0] / sum1, x1[1] / sum1, x1[2] / sum1, x1[3] / sum1 + }))); + + return ret; +} + +template +LayerTestResult CompareSoftmaxTestImpl(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory, + float beta) +{ + + const int batchSize = 20; + const int channels = 30; + + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int inputShape[] = { batchSize, channels }; + + inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType()); + outputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType()); + float qScale = 1.f / 256.f; + int qOffset = 0; + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + + + LayerTestResult ret(outputTensorInfo); + auto input = MakeRandomTensor(inputTensorInfo, 0xF00D, 0.0f, 1.0f); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::SoftmaxQueueDescriptor data; + data.m_Parameters.m_Beta = beta; + + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo); + std::unique_ptr inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo); + + + armnn::SoftmaxQueueDescriptor refData = data; + armnn::WorkloadInfo refInfo = info; + SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get()); + SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get()); + + std::unique_ptr workload = workloadFactory.CreateSoftmax(data, info); + std::unique_ptr workloadRef = refWorkloadFactory.CreateSoftmax(refData, refInfo); + + outputHandleRef->Allocate(); + inputHandleRef->Allocate(); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0]); + CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0]); + + workloadFactory.Finalize(); + workload->Execute(); + refWorkloadFactory.Finalize(); + workloadRef->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(&ret.outputExpected[0][0], outputHandleRef.get()); + + return ret; +} -- cgit v1.2.1