diff options
Diffstat (limited to 'delegate/src')
-rw-r--r-- | delegate/src/Gather.hpp | 96 | ||||
-rw-r--r-- | delegate/src/test/GatherTest.cpp | 117 | ||||
-rw-r--r-- | delegate/src/test/GatherTestHelper.hpp | 181 |
3 files changed, 378 insertions, 16 deletions
diff --git a/delegate/src/Gather.hpp b/delegate/src/Gather.hpp index 98d8dc9656..9ed0fe15c1 100644 --- a/delegate/src/Gather.hpp +++ b/delegate/src/Gather.hpp @@ -5,29 +5,93 @@ #pragma once -#include <armnn/utility/IgnoreUnused.hpp> - -#include <tensorflow/lite/builtin_ops.h> -#include <tensorflow/lite/c/builtin_op_data.h> -#include <tensorflow/lite/c/common.h> -#include <tensorflow/lite/minimal_logging.h> +#include "DelegateUtils.hpp" +#include <algorithm> +#include <iterator> +#include <string> +#include <vector> namespace armnnDelegate { - TfLiteStatus VisitGatherOperator(DelegateData& delegateData, TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode, int nodeIndex, - int32_t gatherOperatorCode) + int32_t operatorCode) { - armnn::IgnoreUnused(delegateData, - tfLiteContext, - tfLiteNode, - nodeIndex, - gatherOperatorCode); + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); - return kTfLiteError; -} + const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; + + const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]]; + if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const TfLiteTensor& tfLiteIndicesTensor = tfLiteTensors[tfLiteNode->inputs->data[1]]; + if (!IsValid(tfLiteContext, tfLiteIndicesTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; + if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + auto* gatherParameters = reinterpret_cast<TfLiteGatherParams*>(tfLiteNode->builtin_data); + auto axis = gatherParameters->axis; -} // namespace armnnDelegate + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); + const armnn::TensorInfo& indicesTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteIndicesTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); + armnn::GatherDescriptor gatherDescriptor; + gatherDescriptor.m_Axis = axis; + + auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions()); + auto indicesDimensions = indicesTensorInfo.GetNumDimensions(); + auto outputDimensions = outputTensorInfo.GetNumDimensions(); + if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0))) + { + TF_LITE_MAYBE_KERNEL_LOG( tfLiteContext, + "TfLiteArmnnDelegate: Operation has invalid axis: %d. It is out of bounds [-%d, %d))", + axis, inputDimensions, inputDimensions); + return kTfLiteError; + } + if (outputDimensions != static_cast<unsigned int>(inputDimensions) + indicesDimensions - 1) + { + TF_LITE_MAYBE_KERNEL_LOG( tfLiteContext, + "Operation has invalid output dimensions: %d. Output must be an (%d + %d - 1)-D tensor", + outputDimensions, inputDimensions, indicesDimensions); + return kTfLiteError; + } + + if (!delegateData.m_Network) + { + // Check if supported + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + tfLiteContext, + IsGatherSupported, + delegateData.m_Backends, + isSupported, + inputTensorInfo, + indicesTensorInfo, + outputTensorInfo, + gatherDescriptor); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherLayer(gatherDescriptor); + ARMNN_ASSERT(layer != nullptr); + + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + Connect(layer, tfLiteNode, delegateData); + + return kTfLiteOk; +} +} // namespace armnnDelegate
\ No newline at end of file diff --git a/delegate/src/test/GatherTest.cpp b/delegate/src/test/GatherTest.cpp new file mode 100644 index 0000000000..6dd015173c --- /dev/null +++ b/delegate/src/test/GatherTest.cpp @@ -0,0 +1,117 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "GatherTestHelper.hpp" + +#include <armnn_delegate.hpp> + +#include <flatbuffers/flatbuffers.h> +#include <tensorflow/lite/schema/schema_generated.h> + +#include <doctest/doctest.h> + +namespace armnnDelegate +{ + +// GATHER Operator +void GatherUint8Test(std::vector<armnn::BackendId>& backends) +{ + + std::vector<int32_t> paramsShape{8}; + std::vector<int32_t> indicesShape{3}; + std::vector<int32_t> expectedOutputShape{3}; + + int32_t axis = 0; + std::vector<uint8_t> paramsValues{1, 2, 3, 4, 5, 6, 7, 8}; + std::vector<int32_t> indicesValues{7, 6, 5}; + std::vector<uint8_t> expectedOutputValues{8, 7, 6}; + + GatherTest<uint8_t>(::tflite::TensorType_UINT8, + backends, + paramsShape, + indicesShape, + expectedOutputShape, + axis, + paramsValues, + indicesValues, + expectedOutputValues); +} + +void GatherFp32Test(std::vector<armnn::BackendId>& backends) +{ + std::vector<int32_t> paramsShape{8}; + std::vector<int32_t> indicesShape{3}; + std::vector<int32_t> expectedOutputShape{3}; + + int32_t axis = 0; + std::vector<float> paramsValues{1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f}; + std::vector<int32_t> indicesValues{7, 6, 5}; + std::vector<float> expectedOutputValues{8.8f, 7.7f, 6.6f}; + + GatherTest<float>(::tflite::TensorType_FLOAT32, + backends, + paramsShape, + indicesShape, + expectedOutputShape, + axis, + paramsValues, + indicesValues, + expectedOutputValues); +} + +// GATHER Test Suite +TEST_SUITE("GATHER_CpuRefTests") +{ + +TEST_CASE ("GATHER_Uint8_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef}; + GatherUint8Test(backends); +} + +TEST_CASE ("GATHER_Fp32_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef}; + GatherFp32Test(backends); +} + +} + +TEST_SUITE("GATHER_CpuAccTests") +{ + +TEST_CASE ("GATHER_Uint8_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc}; + GatherUint8Test(backends); +} + +TEST_CASE ("GATHER_Fp32_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc}; + GatherFp32Test(backends); +} + +} + +TEST_SUITE("GATHER_GpuAccTests") +{ + +TEST_CASE ("GATHER_Uint8_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc}; + GatherUint8Test(backends); +} + +TEST_CASE ("GATHER_Fp32_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc}; + GatherFp32Test(backends); +} + +} +// End of GATHER Test Suite + +} // namespace armnnDelegate
\ No newline at end of file diff --git a/delegate/src/test/GatherTestHelper.hpp b/delegate/src/test/GatherTestHelper.hpp new file mode 100644 index 0000000000..d8bfe37842 --- /dev/null +++ b/delegate/src/test/GatherTestHelper.hpp @@ -0,0 +1,181 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "TestUtils.hpp" + +#include <armnn_delegate.hpp> + +#include <flatbuffers/flatbuffers.h> +#include <tensorflow/lite/interpreter.h> +#include <tensorflow/lite/kernels/register.h> +#include <tensorflow/lite/model.h> +#include <tensorflow/lite/schema/schema_generated.h> +#include <tensorflow/lite/version.h> + +#include <doctest/doctest.h> + +namespace +{ + +std::vector<char> CreateGatherTfLiteModel(tflite::TensorType tensorType, + std::vector<int32_t>& paramsShape, + std::vector<int32_t>& indicesShape, + const std::vector<int32_t>& expectedOutputShape, + int32_t axis, + float quantScale = 1.0f, + int quantOffset = 0) +{ + using namespace tflite; + flatbuffers::FlatBufferBuilder flatBufferBuilder; + + std::vector<flatbuffers::Offset<tflite::Buffer>> buffers; + buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); + + auto quantizationParameters = + CreateQuantizationParameters(flatBufferBuilder, + 0, + 0, + flatBufferBuilder.CreateVector<float>({quantScale}), + flatBufferBuilder.CreateVector<int64_t>({quantOffset})); + + std::array<flatbuffers::Offset<Tensor>, 3> tensors; + tensors[0] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector<int32_t>(paramsShape.data(), + paramsShape.size()), + tensorType, + 0, + flatBufferBuilder.CreateString("params"), + quantizationParameters); + tensors[1] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector<int32_t>(indicesShape.data(), + indicesShape.size()), + ::tflite::TensorType_INT32, + 0, + flatBufferBuilder.CreateString("indices"), + quantizationParameters); + tensors[2] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector<int32_t>(expectedOutputShape.data(), + expectedOutputShape.size()), + tensorType, + 0, + flatBufferBuilder.CreateString("output"), + quantizationParameters); + + + // create operator + tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_GatherOptions; + flatbuffers::Offset<void> operatorBuiltinOptions = CreateGatherOptions(flatBufferBuilder).Union(); + + const std::vector<int> operatorInputs{{0, 1}}; + const std::vector<int> operatorOutputs{{2}}; + flatbuffers::Offset<Operator> controlOperator = + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), + operatorInputs.size()), + flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), + operatorOutputs.size()), + operatorBuiltinOptionsType, + operatorBuiltinOptions); + + const std::vector<int> subgraphInputs{{0, 1}}; + const std::vector<int> subgraphOutputs{{2}}; + flatbuffers::Offset<SubGraph> subgraph = + CreateSubGraph(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), + flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), + subgraphInputs.size()), + flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), + subgraphOutputs.size()), + flatBufferBuilder.CreateVector(&controlOperator, 1)); + + flatbuffers::Offset<flatbuffers::String> modelDescription = + flatBufferBuilder.CreateString("ArmnnDelegate: GATHER Operator Model"); + flatbuffers::Offset<OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, + BuiltinOperator_GATHER); + + flatbuffers::Offset<Model> flatbufferModel = + CreateModel(flatBufferBuilder, + TFLITE_SCHEMA_VERSION, + flatBufferBuilder.CreateVector(&operatorCode, 1), + flatBufferBuilder.CreateVector(&subgraph, 1), + modelDescription, + flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); + + flatBufferBuilder.Finish(flatbufferModel); + + return std::vector<char>(flatBufferBuilder.GetBufferPointer(), + flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); +} + +template<typename T> +void GatherTest(tflite::TensorType tensorType, + std::vector<armnn::BackendId>& backends, + std::vector<int32_t>& paramsShape, + std::vector<int32_t>& indicesShape, + std::vector<int32_t>& expectedOutputShape, + int32_t axis, + std::vector<T>& paramsValues, + std::vector<int32_t>& indicesValues, + std::vector<T>& expectedOutputValues, + float quantScale = 1.0f, + int quantOffset = 0) +{ + using namespace tflite; + std::vector<char> modelBuffer = CreateGatherTfLiteModel(tensorType, + paramsShape, + indicesShape, + expectedOutputShape, + axis, + quantScale, + quantOffset); + const Model* tfLiteModel = GetModel(modelBuffer.data()); + + // Create TfLite Interpreters + std::unique_ptr<Interpreter> armnnDelegate; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&armnnDelegate) == kTfLiteOk); + CHECK(armnnDelegate != nullptr); + CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk); + + std::unique_ptr<Interpreter> tfLiteDelegate; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&tfLiteDelegate) == kTfLiteOk); + CHECK(tfLiteDelegate != nullptr); + CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk); + + // Create the ArmNN Delegate + armnnDelegate::DelegateOptions delegateOptions(backends); + std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)> + theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), + armnnDelegate::TfLiteArmnnDelegateDelete); + CHECK(theArmnnDelegate != nullptr); + + // Modify armnnDelegateInterpreter to use armnnDelegate + CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); + + // Set input data + armnnDelegate::FillInput<T>(tfLiteDelegate, 0, paramsValues); + armnnDelegate::FillInput<T>(armnnDelegate, 0, paramsValues); + armnnDelegate::FillInput<int32_t>(tfLiteDelegate, 1, indicesValues); + armnnDelegate::FillInput<int32_t>(armnnDelegate, 1, indicesValues); + + // Run EnqueWorkload + CHECK(tfLiteDelegate->Invoke() == kTfLiteOk); + CHECK(armnnDelegate->Invoke() == kTfLiteOk); + + // Compare output data + armnnDelegate::CompareOutputData<T>(tfLiteDelegate, + armnnDelegate, + expectedOutputShape, + expectedOutputValues, + 0); + + tfLiteDelegate.reset(nullptr); + armnnDelegate.reset(nullptr); +} +} // anonymous namespace
\ No newline at end of file |