aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2021-02-22 18:09:07 +0000
committerJim Flynn <jim.flynn@arm.com>2021-02-25 11:25:27 +0000
commit29b49cf1771620233f9c16dfa0214e6d8a64c4d7 (patch)
tree111ac8261a8ad97e01915d99fce80717facd1b3f
parent4018b21cd41437f1e1b2e528d5521136f39ff2b1 (diff)
downloadarmnn-29b49cf1771620233f9c16dfa0214e6d8a64c4d7.tar.gz
IVGCVSW-5401 Implement the FILL operator
* Added FILL operator to TfLite ArmNN Delegate * Added unit tests Signed-off-by: David Monahan <david.monahan@arm.com> Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I335ef469ff773fa4305eb87f6e93ae9c03fc6997
-rw-r--r--delegate/CMakeLists.txt2
-rw-r--r--delegate/src/Fill.hpp98
-rw-r--r--delegate/src/test/FillTest.cpp221
-rw-r--r--delegate/src/test/FillTestHelper.hpp160
-rw-r--r--delegate/src/test/TestUtils.cpp9
-rw-r--r--delegate/src/test/TestUtils.hpp3
6 files changed, 486 insertions, 7 deletions
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index 981fc9f0bf..5dbe83e014 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -131,6 +131,8 @@ if(BUILD_UNIT_TESTS)
src/test/ElementwiseBinaryTestHelper.hpp
src/test/ElementwiseUnaryTest.cpp
src/test/ElementwiseUnaryTestHelper.hpp
+ src/test/FillTest.cpp
+ src/test/FillTestHelper.hpp
src/test/FullyConnectedTest.cpp
src/test/FullyConnectedTestHelper.hpp
src/test/GatherTest.cpp
diff --git a/delegate/src/Fill.hpp b/delegate/src/Fill.hpp
index 99c3c625c2..c9fd159b3e 100644
--- a/delegate/src/Fill.hpp
+++ b/delegate/src/Fill.hpp
@@ -19,15 +19,99 @@ TfLiteStatus VisitFillOperator(DelegateData& delegateData,
TfLiteContext* tfLiteContext,
TfLiteNode* tfLiteNode,
int nodeIndex,
- int32_t operatorCode)
+ int32_t tfLiteFillOperatorCode)
{
- armnn::IgnoreUnused(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- operatorCode);
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
- return kTfLiteError;
+ switch(tfLiteFillOperatorCode)
+ {
+ case kTfLiteBuiltinFill:
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ break;
+ default:
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteFillOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteFillTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (!IsValid(tfLiteContext, tfLiteFillTensor, tfLiteFillOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteFillOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& fillTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFillTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+
+ armnn::FillDescriptor descriptor;
+ switch (tfLiteFillTensor.type)
+ {
+ case kTfLiteFloat32:
+ descriptor.m_Value = tflite::GetTensorData<float>(&tfLiteFillTensor)[0];
+ break;
+ case kTfLiteInt32:
+ descriptor.m_Value = tflite::GetTensorData<int32_t>(&tfLiteFillTensor)[0];
+ break;
+ default:
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: FILL value data type is not supported in operator #%d node #%d: ",
+ tfLiteFillOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ tfLiteContext,
+ IsFillSupported,
+ delegateData.m_Backends,
+ isSupported,
+ inputTensorInfo,
+ outInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddFillLayer(descriptor);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ if(tflite::IsConstantTensor(&tfLiteInputTensor))
+ {
+ auto status = ConnectConstant(layer,
+ inputTensorInfo,
+ tfLiteContext,
+ tfLiteInputTensor,
+ delegateData,
+ tfLiteNode->inputs->data[0]);
+ if (status == kTfLiteError)
+ {
+ return status;
+ }
+ }
+
+ return Connect(layer, tfLiteNode, delegateData);
}
} // namespace armnnDelegate
diff --git a/delegate/src/test/FillTest.cpp b/delegate/src/test/FillTest.cpp
new file mode 100644
index 0000000000..50f7f53d56
--- /dev/null
+++ b/delegate/src/test/FillTest.cpp
@@ -0,0 +1,221 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "FillTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void Fill2dTest(std::vector<armnn::BackendId>& backends,
+ tflite::BuiltinOperator fillOperatorCode = tflite::BuiltinOperator_FILL,
+ float fill = 2.0f )
+{
+ std::vector<int32_t> inputShape { 2 };
+ std::vector<int32_t> tensorShape { 2, 2 };
+ std::vector<float> expectedOutputValues = { fill, fill,
+ fill, fill };
+
+ FillTest<float>(fillOperatorCode,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ tensorShape,
+ expectedOutputValues,
+ fill);
+}
+
+void Fill3dTest(std::vector<armnn::BackendId>& backends,
+ tflite::BuiltinOperator fillOperatorCode = tflite::BuiltinOperator_FILL,
+ float fill = 5.0f )
+{
+ std::vector<int32_t> inputShape { 3 };
+ std::vector<int32_t> tensorShape { 3, 3, 3 };
+ std::vector<float> expectedOutputValues = { fill, fill, fill,
+ fill, fill, fill,
+ fill, fill, fill,
+
+ fill, fill, fill,
+ fill, fill, fill,
+ fill, fill, fill,
+
+ fill, fill, fill,
+ fill, fill, fill,
+ fill, fill, fill };
+
+ FillTest<float>(fillOperatorCode,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ tensorShape,
+ expectedOutputValues,
+ fill);
+}
+
+void Fill4dTest(std::vector<armnn::BackendId>& backends,
+ tflite::BuiltinOperator fillOperatorCode = tflite::BuiltinOperator_FILL,
+ float fill = 3.0f )
+{
+ std::vector<int32_t> inputShape { 4 };
+ std::vector<int32_t> tensorShape { 2, 2, 4, 4 };
+ std::vector<float> expectedOutputValues = { fill, fill, fill, fill,
+ fill, fill, fill, fill,
+ fill, fill, fill, fill,
+ fill, fill, fill, fill,
+
+ fill, fill, fill, fill,
+ fill, fill, fill, fill,
+ fill, fill, fill, fill,
+ fill, fill, fill, fill,
+
+ fill, fill, fill, fill,
+ fill, fill, fill, fill,
+ fill, fill, fill, fill,
+ fill, fill, fill, fill,
+
+ fill, fill, fill, fill,
+ fill, fill, fill, fill,
+ fill, fill, fill, fill,
+ fill, fill, fill, fill };
+
+ FillTest<float>(fillOperatorCode,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ tensorShape,
+ expectedOutputValues,
+ fill);
+}
+
+void FillInt32Test(std::vector<armnn::BackendId>& backends,
+ tflite::BuiltinOperator fillOperatorCode = tflite::BuiltinOperator_FILL,
+ int32_t fill = 2 )
+{
+ std::vector<int32_t> inputShape { 2 };
+ std::vector<int32_t> tensorShape { 2, 2 };
+ std::vector<int32_t> expectedOutputValues = { fill, fill,
+ fill, fill };
+
+ FillTest<int32_t>(fillOperatorCode,
+ ::tflite::TensorType_INT32,
+ backends,
+ inputShape,
+ tensorShape,
+ expectedOutputValues,
+ fill);
+}
+
+TEST_SUITE("Fill_CpuRefTests")
+{
+
+TEST_CASE ("Fill2d_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ Fill2dTest(backends);
+}
+
+TEST_CASE ("Fill3d_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ Fill3dTest(backends);
+}
+
+TEST_CASE ("Fill3d_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ Fill3dTest(backends);
+}
+
+TEST_CASE ("Fill4d_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ Fill4dTest(backends);
+}
+
+TEST_CASE ("FillInt32_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ FillInt32Test(backends);
+}
+
+}
+
+TEST_SUITE("Fill_CpuAccTests")
+{
+
+TEST_CASE ("Fill2d_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ Fill2dTest(backends);
+}
+
+TEST_CASE ("Fill3d_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ Fill3dTest(backends);
+}
+
+TEST_CASE ("Fill3d_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ Fill3dTest(backends);
+}
+
+TEST_CASE ("Fill4d_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ Fill4dTest(backends);
+}
+
+TEST_CASE ("FillInt32_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ FillInt32Test(backends);
+}
+
+}
+
+TEST_SUITE("Fill_GpuAccTests")
+{
+
+TEST_CASE ("Fill2d_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ Fill2dTest(backends);
+}
+
+TEST_CASE ("Fill3d_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ Fill3dTest(backends);
+}
+
+TEST_CASE ("Fill3d_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ Fill3dTest(backends);
+}
+
+TEST_CASE ("Fill4d_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ Fill4dTest(backends);
+}
+
+TEST_CASE ("FillInt32_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ FillInt32Test(backends);
+}
+
+}
+
+} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/FillTestHelper.hpp b/delegate/src/test/FillTestHelper.hpp
new file mode 100644
index 0000000000..e6890a2b2d
--- /dev/null
+++ b/delegate/src/test/FillTestHelper.hpp
@@ -0,0 +1,160 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+template <typename T>
+std::vector<char> CreateFillTfLiteModel(tflite::BuiltinOperator fillOperatorCode,
+ tflite::TensorType tensorType,
+ const std::vector<int32_t>& inputShape,
+ const std::vector <int32_t>& tensorShape,
+ const std::vector<T> fillValue)
+{
+ using namespace tflite;
+ flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+ std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector({})));
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(tensorShape.data()),
+ sizeof(int32_t) * tensorShape.size())));
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(fillValue.data()),
+ sizeof(T) * fillValue.size())));
+
+ std::array<flatbuffers::Offset<Tensor>, 3> tensors;
+ tensors[0] = CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(inputShape.data(),
+ inputShape.size()),
+ tflite::TensorType_INT32,
+ 1,
+ flatBufferBuilder.CreateString("dims"));
+
+ std::vector<int32_t> fillShape = {};
+ tensors[1] = CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(fillShape.data(),
+ fillShape.size()),
+ tensorType,
+ 2,
+ flatBufferBuilder.CreateString("value"));
+
+ tensors[2] = CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
+ tensorShape.size()),
+ tensorType,
+ 0,
+ flatBufferBuilder.CreateString("output"));
+
+ tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_FillOptions;
+ flatbuffers::Offset<void> operatorBuiltinOptions = CreateFillOptions(flatBufferBuilder).Union();
+
+ // create operator
+ const std::vector<int> operatorInputs{ {0, 1} };
+ const std::vector<int> operatorOutputs{ 2 };
+ flatbuffers::Offset <Operator> fillOperator =
+ CreateOperator(flatBufferBuilder,
+ 0,
+ flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+ operatorBuiltinOptionsType,
+ operatorBuiltinOptions);
+
+ const std::vector<int> subgraphInputs{ {0, 1} };
+ const std::vector<int> subgraphOutputs{ 2 };
+ flatbuffers::Offset <SubGraph> subgraph =
+ CreateSubGraph(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+ flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+ flatBufferBuilder.CreateVector(&fillOperator, 1));
+
+ flatbuffers::Offset <flatbuffers::String> modelDescription =
+ flatBufferBuilder.CreateString("ArmnnDelegate: Fill Operator Model");
+ flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
+ fillOperatorCode);
+
+ flatbuffers::Offset <Model> flatbufferModel =
+ CreateModel(flatBufferBuilder,
+ TFLITE_SCHEMA_VERSION,
+ flatBufferBuilder.CreateVector(&operatorCode, 1),
+ flatBufferBuilder.CreateVector(&subgraph, 1),
+ modelDescription,
+ flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+ flatBufferBuilder.Finish(flatbufferModel);
+
+ return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+ flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+
+}
+
+template <typename T>
+void FillTest(tflite::BuiltinOperator fillOperatorCode,
+ tflite::TensorType tensorType,
+ const std::vector<armnn::BackendId>& backends,
+ std::vector<int32_t >& inputShape,
+ std::vector<int32_t >& tensorShape,
+ std::vector<T>& expectedOutputValues,
+ T fillValue)
+{
+ using namespace tflite;
+ std::vector<char> modelBuffer = CreateFillTfLiteModel<T>(fillOperatorCode,
+ tensorType,
+ inputShape,
+ tensorShape,
+ {fillValue});
+
+ const Model* tfLiteModel = GetModel(modelBuffer.data());
+ CHECK(tfLiteModel != nullptr);
+
+ std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+ CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+ (&armnnDelegateInterpreter) == kTfLiteOk);
+ CHECK(armnnDelegateInterpreter != nullptr);
+ CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+ std::unique_ptr<Interpreter> tfLiteInterpreter;
+ CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+ (&tfLiteInterpreter) == kTfLiteOk);
+ CHECK(tfLiteInterpreter != nullptr);
+ CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+ // Create the ArmNN Delegate
+ armnnDelegate::DelegateOptions delegateOptions(backends);
+ std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+ theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+ armnnDelegate::TfLiteArmnnDelegateDelete);
+ CHECK(theArmnnDelegate != nullptr);
+ // Modify armnnDelegateInterpreter to use armnnDelegate
+ CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+ // Run EnqueueWorkload
+ CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+ CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+ armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues);
+}
+
+} // anonymous namespace
diff --git a/delegate/src/test/TestUtils.cpp b/delegate/src/test/TestUtils.cpp
index 1bc5786112..bbe89904eb 100644
--- a/delegate/src/test/TestUtils.cpp
+++ b/delegate/src/test/TestUtils.cpp
@@ -52,6 +52,15 @@ void CompareData(int16_t tensor1[], int16_t tensor2[], size_t tensorSize)
}
}
+void CompareData(int32_t tensor1[], int32_t tensor2[], size_t tensorSize)
+{
+ int32_t tolerance = 1;
+ for (size_t i = 0; i < tensorSize; i++)
+ {
+ CHECK(std::max(tensor1[i], tensor2[i]) - std::min(tensor1[i], tensor2[i]) <= tolerance);
+ }
+}
+
void CompareData(int8_t tensor1[], int8_t tensor2[], size_t tensorSize)
{
int8_t tolerance = 1;
diff --git a/delegate/src/test/TestUtils.hpp b/delegate/src/test/TestUtils.hpp
index d14e1edb45..8a2756f4c5 100644
--- a/delegate/src/test/TestUtils.hpp
+++ b/delegate/src/test/TestUtils.hpp
@@ -51,6 +51,9 @@ void CompareData(uint8_t tensor1[], uint8_t tensor2[], size_t tensorSize);
/// Can be used to compare int16_t data coming from a tflite interpreter with a tolerance of 1
void CompareData(int16_t tensor1[], int16_t tensor2[], size_t tensorSize);
+/// Can be used to compare int32_t data coming from a tflite interpreter with a tolerance of 1
+void CompareData(int32_t tensor1[], int32_t tensor2[], size_t tensorSize);
+
/// Can be used to compare Half (Float16) data with a tolerance of limit_of_float*100
void CompareData(Half tensor1[], Half tensor2[], size_t tensorSize);