From c9cc80455ff29fd2c8622c9487ec9c57ade6ea30 Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Thu, 1 Nov 2018 16:15:57 +0000 Subject: IVGCVSW-1946: Remove armnn/src from the include paths Change-Id: I663a0a0fccb43ee960ec070121a59df9db0bb04e --- src/backends/test/SoftmaxTestImpl.hpp | 152 ---------------------------------- 1 file changed, 152 deletions(-) delete mode 100644 src/backends/test/SoftmaxTestImpl.hpp (limited to 'src/backends/test/SoftmaxTestImpl.hpp') diff --git a/src/backends/test/SoftmaxTestImpl.hpp b/src/backends/test/SoftmaxTestImpl.hpp deleted file mode 100644 index 0bca8be49d..0000000000 --- a/src/backends/test/SoftmaxTestImpl.hpp +++ /dev/null @@ -1,152 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// -#pragma once - -#include -#include -#include - -#include -#include "QuantizeHelper.hpp" - -#include -#include - -#include - -template -LayerTestResult SimpleSoftmaxTestImpl(armnn::IWorkloadFactory& workloadFactory, float beta) -{ - using std::exp; - - armnn::TensorInfo inputTensorInfo; - armnn::TensorInfo outputTensorInfo; - - unsigned int inputShape[] = { 2, 4 }; - - inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType()); - float qScale = 1.f / 256.f; - int qOffset = 0; - inputTensorInfo.SetQuantizationScale(qScale); - inputTensorInfo.SetQuantizationOffset(qOffset); - - outputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType()); - outputTensorInfo.SetQuantizationScale(qScale); - outputTensorInfo.SetQuantizationOffset(qOffset); - - LayerTestResult ret(outputTensorInfo); - - // Each row is independently softmax'd. - auto input = MakeTensor(inputTensorInfo, std::vector( - QuantizedVector(qScale, 0, { - 0.f, 1.f, 0.f, 0.f, - .5f, 0.f, 0.f, 0.f, - }))); - - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::SoftmaxQueueDescriptor data; - data.m_Parameters.m_Beta = beta; - - armnn::WorkloadInfo info; - AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr workload = workloadFactory.CreateSoftmax(data, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0]); - - workloadFactory.Finalize(); - workload->Execute(); - - CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); - - float x0[4] = { exp((0.f - 1.0f) * beta), exp((1.0f - 1.0f) * beta), - exp((0.0f - 1.0f) * beta), exp((0.0f - 1.0f) * beta) }; - float sum0 = x0[0] + x0[1] + x0[2] + x0[3]; - float x1[4] = { exp((0.5f - 0.5f) * beta), exp((0.0f - 0.5f) * beta), - exp((0.0f - 0.5f) * beta), exp((0.0f - 0.5f) * beta) }; - float sum1 = x1[0] + x1[1] + x1[2] + x1[3]; - - ret.outputExpected = MakeTensor(outputTensorInfo, std::vector( - QuantizedVector(qScale, qOffset, { - x0[0] / sum0, x0[1] / sum0, x0[2] / sum0, x0[3] / sum0, - x1[0] / sum1, x1[1] / sum1, x1[2] / sum1, x1[3] / sum1 - }))); - - return ret; -} - -template -LayerTestResult CompareSoftmaxTestImpl(armnn::IWorkloadFactory& workloadFactory, - armnn::IWorkloadFactory& refWorkloadFactory, - float beta) -{ - - const int batchSize = 20; - const int channels = 30; - - armnn::TensorInfo inputTensorInfo; - armnn::TensorInfo outputTensorInfo; - - unsigned int inputShape[] = { batchSize, channels }; - - inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType()); - float qScale = 1.f / 256.f; - int qOffset = 0; - inputTensorInfo.SetQuantizationScale(qScale); - inputTensorInfo.SetQuantizationOffset(qOffset); - outputTensorInfo.SetQuantizationScale(qScale); - outputTensorInfo.SetQuantizationOffset(qOffset); - - - LayerTestResult ret(outputTensorInfo); - auto input = MakeRandomTensor(inputTensorInfo, 0xF00D, 0.0f, 1.0f); - - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::SoftmaxQueueDescriptor data; - data.m_Parameters.m_Beta = beta; - - armnn::WorkloadInfo info; - AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo); - std::unique_ptr inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo); - - - armnn::SoftmaxQueueDescriptor refData = data; - armnn::WorkloadInfo refInfo = info; - SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get()); - SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get()); - - std::unique_ptr workload = workloadFactory.CreateSoftmax(data, info); - std::unique_ptr workloadRef = refWorkloadFactory.CreateSoftmax(refData, refInfo); - - outputHandleRef->Allocate(); - inputHandleRef->Allocate(); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), &input[0][0]); - CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0]); - - workloadFactory.Finalize(); - workload->Execute(); - refWorkloadFactory.Finalize(); - workloadRef->Execute(); - - CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); - CopyDataFromITensorHandle(&ret.outputExpected[0][0], outputHandleRef.get()); - - return ret; -} -- cgit v1.2.1