aboutsummaryrefslogtreecommitdiff
path: root/delegate
diff options
context:
space:
mode:
Diffstat (limited to 'delegate')
-rw-r--r--delegate/CMakeLists.txt4
-rw-r--r--delegate/cmake/Modules/FindTfLite.cmake12
-rw-r--r--delegate/include/armnn_delegate.hpp1
-rw-r--r--delegate/src/test/ActivationTestHelper.hpp4
-rw-r--r--delegate/src/test/ArgMinMaxTestHelper.hpp13
-rw-r--r--delegate/src/test/BatchMatMulTestHelper.hpp350
-rw-r--r--delegate/src/test/BatchSpaceTestHelper.hpp20
-rw-r--r--delegate/src/test/CastTestHelper.hpp10
-rw-r--r--delegate/src/test/ComparisonTestHelper.hpp13
-rw-r--r--delegate/src/test/ControlTestHelper.hpp12
-rw-r--r--delegate/src/test/ConvolutionTestHelper.hpp24
-rw-r--r--delegate/src/test/DelegateOptionsTestHelper.hpp21
-rw-r--r--delegate/src/test/ElementwiseBinaryTestHelper.hpp15
-rw-r--r--delegate/src/test/ElementwiseUnaryTestHelper.hpp4
-rw-r--r--delegate/src/test/FillTestHelper.hpp9
-rw-r--r--delegate/src/test/FullyConnectedTestHelper.hpp26
-rw-r--r--delegate/src/test/GatherNdTestHelper.hpp13
-rw-r--r--delegate/src/test/GatherTestHelper.hpp13
-rw-r--r--delegate/src/test/LogicalTestHelper.hpp13
-rw-r--r--delegate/src/test/LstmTestHelper.hpp10
-rw-r--r--delegate/src/test/NormalizationTestHelper.hpp11
-rw-r--r--delegate/src/test/PackTestHelper.hpp7
-rw-r--r--delegate/src/test/PadTestHelper.hpp6
-rw-r--r--delegate/src/test/Pooling2dTestHelper.hpp44
-rw-r--r--delegate/src/test/Pooling3dTestHelper.hpp7
-rw-r--r--delegate/src/test/PreluTestHelper.hpp14
-rw-r--r--delegate/src/test/QuantizationTestHelper.hpp11
-rw-r--r--delegate/src/test/RedefineTestHelper.hpp110
-rw-r--r--delegate/src/test/ReduceTestHelper.hpp25
-rw-r--r--delegate/src/test/ResizeTestHelper.hpp12
-rw-r--r--delegate/src/test/RoundTestHelper.hpp10
-rw-r--r--delegate/src/test/ShapeTestHelper.hpp10
-rw-r--r--delegate/src/test/SliceTestHelper.hpp31
-rw-r--r--delegate/src/test/SoftmaxTestHelper.hpp10
-rw-r--r--delegate/src/test/SpaceDepthTestHelper.hpp10
-rw-r--r--delegate/src/test/SplitTestHelper.hpp86
-rw-r--r--delegate/src/test/StridedSliceTestHelper.hpp47
-rw-r--r--delegate/src/test/TransposeTestHelper.hpp65
-rw-r--r--delegate/src/test/UnidirectionalSequenceLstmTestHelper.hpp344
-rw-r--r--delegate/src/test/UnpackTestHelper.hpp13
40 files changed, 780 insertions, 680 deletions
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index d044ed991d..55bdb7ca53 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -5,8 +5,10 @@
cmake_minimum_required (VERSION 3.7.0)
project(armnnDelegate)
+set(CMAKE_CXX_STANDARD 17)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
-set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 -Wall -Wextra -Werror -Wold-style-cast -Wno-missing-braces -Wconversion -Wsign-conversion -Wno-comment")
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -Wall -Wextra -Werror -Wold-style-cast -Wno-missing-braces -Wconversion -Wsign-conversion -Wno-comment")
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${PROJECT_SOURCE_DIR}/cmake/Modules/")
diff --git a/delegate/cmake/Modules/FindTfLite.cmake b/delegate/cmake/Modules/FindTfLite.cmake
index 338cde1697..634aaea511 100644
--- a/delegate/cmake/Modules/FindTfLite.cmake
+++ b/delegate/cmake/Modules/FindTfLite.cmake
@@ -1,5 +1,5 @@
#
-# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+# Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
#
@@ -38,8 +38,6 @@ if (TfLite_LIB MATCHES .a$)
PATH ${TFLITE_LIB_ROOT}/_deps/flatbuffers-build)
find_library(TfLite_cpuinfo_LIB "libcpuinfo.a" PATH
${TFLITE_LIB_ROOT}/_deps/cpuinfo-build)
- find_library(TfLite_clog_LIB "libclog.a" PATH
- ${TFLITE_LIB_ROOT}/_deps/clog-build)
# All remaining libraries are part of libruy.
find_library(TfLite_ruy_allocator_LIB "libruy_allocator.a" PATH
@@ -100,6 +98,8 @@ if (TfLite_LIB MATCHES .a$)
${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
find_library(TfLite_ruy_profiler_LIB "libruy_profiler_instrumentation.a" PATH
${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy/profiler)
+ find_library(TfLite_pthread_pool_LIB "libpthreadpool.a" PATH
+ ${TFLITE_LIB_ROOT}/pthreadpool)
## Set TFLITE_FOUND if all libraries are satisfied for static lib
find_package_handle_standard_args(TfLite DEFAULT_MSG TfLite_LIB TfLite_abseilstrings_LIB TfLite_farmhash_LIB TfLite_fftsg_LIB TfLite_fftsg2d_LIB
@@ -110,8 +110,8 @@ if (TfLite_LIB MATCHES .a$)
TfLite_ruy_kernel_avx2_fma_LIB TfLite_ruy_kernel_avx512_LIB TfLite_ruy_kernel_avx_LIB TfLite_ruy_pack_arm_LIB
TfLite_ruy_pack_avx2_fma_LIB TfLite_ruy_pack_avx512_LIB TfLite_ruy_pack_avx_LIB TfLite_ruy_prepacked_cache_LIB
TfLite_ruy_prepare_packed_matrices_LIB TfLite_ruy_system_aligned_alloc_LIB TfLite_ruy_threadpool_LIB
- TfLite_ruy_trmul_LIB TfLite_ruy_tune_LIB TfLite_ruy_wait_LIB TfLite_ruy_profiler_LIB TfLite_cpuinfo_LIB TfLite_clog_LIB
- TfLite_abseil_synchronization_LIB)
+ TfLite_ruy_trmul_LIB TfLite_ruy_tune_LIB TfLite_ruy_wait_LIB TfLite_ruy_profiler_LIB TfLite_cpuinfo_LIB
+ TfLite_abseil_synchronization_LIB TfLite_pthread_pool_LIB)
# Set external variables for usage in CMakeLists.txt
if (TFLITE_FOUND)
# WARNING! The order of these libraries is critical. Moving them
@@ -126,7 +126,7 @@ if (TfLite_LIB MATCHES .a$)
${TfLite_ruy_pack_avx2_fma_LIB} ${TfLite_ruy_pack_avx512_LIB} ${TfLite_ruy_pack_avx_LIB} ${TfLite_ruy_prepacked_cache_LIB}
${TfLite_ruy_prepare_packed_matrices_LIB} ${TfLite_ruy_system_aligned_alloc_LIB}
${TfLite_ruy_tune_LIB} ${TfLite_ruy_wait_LIB} ${TfLite_ruy_profiler_LIB}
- ${TfLite_cpuinfo_LIB} ${TfLite_clog_LIB} ${TfLite_abseil_synchronization_LIB})
+ ${TfLite_cpuinfo_LIB} ${TfLite_abseil_synchronization_LIB} ${TfLite_pthread_pool_LIB})
endif ()
elseif (TfLite_LIB MATCHES .so$)
message("-- Dynamic tensorflow lite library found, using for ArmNN build")
diff --git a/delegate/include/armnn_delegate.hpp b/delegate/include/armnn_delegate.hpp
index 159d590423..9cfc397550 100644
--- a/delegate/include/armnn_delegate.hpp
+++ b/delegate/include/armnn_delegate.hpp
@@ -81,6 +81,7 @@ private:
nullptr, // .CopyToBufferHandle
nullptr, // .FreeBufferHandle
kTfLiteDelegateFlagsNone, // .flags
+ nullptr, // .opaque_delegate_builder
};
/// ArmNN Runtime pointer
diff --git a/delegate/src/test/ActivationTestHelper.hpp b/delegate/src/test/ActivationTestHelper.hpp
index 0f4d944685..6475083da0 100644
--- a/delegate/src/test/ActivationTestHelper.hpp
+++ b/delegate/src/test/ActivationTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -29,7 +29,7 @@ std::vector<char> CreateActivationTfLiteModel(tflite::BuiltinOperator activation
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::array<flatbuffers::Offset<tflite::Buffer>, 1> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
+ buffers[0] = CreateBuffer(flatBufferBuilder);
std::array<flatbuffers::Offset<Tensor>, 2> tensors;
tensors[0] = CreateTensor(flatBufferBuilder,
diff --git a/delegate/src/test/ArgMinMaxTestHelper.hpp b/delegate/src/test/ArgMinMaxTestHelper.hpp
index a734c819f9..3e607d6b2b 100644
--- a/delegate/src/test/ArgMinMaxTestHelper.hpp
+++ b/delegate/src/test/ArgMinMaxTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -46,7 +46,7 @@ std::vector<char> CreateArgMinMaxTfLiteModel(tflite::BuiltinOperator argMinMaxOp
flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
inputTensorShape.size()),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input"),
quantizationParameters);
@@ -54,26 +54,27 @@ std::vector<char> CreateArgMinMaxTfLiteModel(tflite::BuiltinOperator argMinMaxOp
flatBufferBuilder.CreateVector<int32_t>(axisTensorShape.data(),
axisTensorShape.size()),
tflite::TensorType_INT32,
- 1,
+ 2,
flatBufferBuilder.CreateString("axis"));
auto outputTensor = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
outputTensorShape.size()),
outputType,
- 2,
+ 3,
flatBufferBuilder.CreateString("output"),
quantizationParameters);
std::vector<flatbuffers::Offset<Tensor>> tensors = { inputTensor, axisTensor, outputTensor };
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
buffers.push_back(
CreateBuffer(flatBufferBuilder,
flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisValue.data()),
sizeof(OutputT))));
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
std::vector<int32_t> operatorInputs = {{ 0, 1 }};
std::vector<int> subgraphInputs = {{ 0, 1 }};
diff --git a/delegate/src/test/BatchMatMulTestHelper.hpp b/delegate/src/test/BatchMatMulTestHelper.hpp
index 42c1ed6a1e..7437064a42 100644
--- a/delegate/src/test/BatchMatMulTestHelper.hpp
+++ b/delegate/src/test/BatchMatMulTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -20,184 +20,186 @@
namespace
{
-
- std::vector<char> CreateBatchMatMulTfLiteModel(
- tflite::BuiltinOperator bmmOperatorCode,
- tflite::TensorType tensorType,
- const std::vector <int32_t>& LHSInputTensorShape,
- const std::vector <int32_t>& RHSInputTensorShape,
- const std::vector <int32_t>& outputTensorShape,
- bool adjX = false,
- bool adjY = false,
- float quantScale = 1.0f,
- int quantOffset = 0)
+std::vector<char> CreateBatchMatMulTfLiteModel(
+ tflite::BuiltinOperator bmmOperatorCode,
+ tflite::TensorType tensorType,
+ const std::vector <int32_t>& LHSInputTensorShape,
+ const std::vector <int32_t>& RHSInputTensorShape,
+ const std::vector <int32_t>& outputTensorShape,
+ bool adjX = false,
+ bool adjY = false,
+ float quantScale = 1.0f,
+ int quantOffset = 0)
+{
+ using namespace tflite;
+ flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+ std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+ auto quantizationParameters =
+ CreateQuantizationParameters(flatBufferBuilder,
+ 0,
+ 0,
+ flatBufferBuilder.CreateVector<float>({ quantScale }),
+ flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+ std::array<flatbuffers::Offset<Tensor>, 3> tensors;
+ tensors[0] = CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(LHSInputTensorShape.data(),
+ LHSInputTensorShape.size()),
+ tensorType,
+ 1,
+ flatBufferBuilder.CreateString("LHSInput"),
+ quantizationParameters);
+
+ tensors[1] = CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(RHSInputTensorShape.data(),
+ RHSInputTensorShape.size()),
+ tensorType,
+ 2,
+ flatBufferBuilder.CreateString("RHSInput"),
+ quantizationParameters);
+
+ tensors[2] = CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+ outputTensorShape.size()),
+ tensorType,
+ 3,
+ flatBufferBuilder.CreateString("output"),
+ quantizationParameters);
+
+ // create operator
+ tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_BatchMatMulOptions;
+ flatbuffers::Offset<void> operatorBuiltinOptions = CreateBatchMatMulOptions(flatBufferBuilder,
+ adjX,
+ adjY).Union();
+
+ const std::vector<int32_t> operatorInputs{{0, 1}};
+ const std::vector<int32_t> operatorOutputs{2};
+ flatbuffers::Offset <Operator> bmmOperator =
+ CreateOperator(flatBufferBuilder,
+ 0,
+ flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(),
+ operatorOutputs.size()),
+ operatorBuiltinOptionsType,
+ operatorBuiltinOptions);
+
+ const std::vector<int> subgraphInputs{{0, 1}};
+ const std::vector<int> subgraphOutputs{2};
+ flatbuffers::Offset <SubGraph> subgraph =
+ CreateSubGraph(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+ flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(),
+ subgraphOutputs.size()),
+ flatBufferBuilder.CreateVector(&bmmOperator, 1));
+
+ flatbuffers::Offset <flatbuffers::String> modelDescription =
+ flatBufferBuilder.CreateString("ArmnnDelegate: BatchMatMul Operator Model");
+ flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, bmmOperatorCode);
+
+ flatbuffers::Offset <Model> flatbufferModel =
+ CreateModel(flatBufferBuilder,
+ TFLITE_SCHEMA_VERSION,
+ flatBufferBuilder.CreateVector(&operatorCode, 1),
+ flatBufferBuilder.CreateVector(&subgraph, 1),
+ modelDescription,
+ flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+ flatBufferBuilder.Finish(flatbufferModel);
+
+ return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+ flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void BatchMatMulTest(tflite::BuiltinOperator bmmOperatorCode,
+ tflite::TensorType tensorType,
+ std::vector<armnn::BackendId>& backends,
+ std::vector<int32_t>& LHSInputShape,
+ std::vector<int32_t>& RHSInputShape,
+ std::vector<int32_t>& outputShape,
+ std::vector<T>& LHSInputValues,
+ std::vector<T>& RHSInputValues,
+ std::vector<T>& expectedOutputValues,
+ bool adjX = false,
+ bool adjY = false,
+ float quantScale = 1.0f,
+ int quantOffset = 0)
+{
+ using namespace tflite;
+ std::vector<char> modelBuffer = CreateBatchMatMulTfLiteModel(bmmOperatorCode,
+ tensorType,
+ LHSInputShape,
+ RHSInputShape,
+ outputShape,
+ adjX,
+ adjY,
+ quantScale,
+ quantOffset);
+
+ const Model* tfLiteModel = GetModel(modelBuffer.data());
+ CHECK(tfLiteModel != nullptr);
+ // Create TfLite Interpreters
+ std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+ CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+ (&armnnDelegateInterpreter) == kTfLiteOk);
+ CHECK(armnnDelegateInterpreter != nullptr);
+ CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+ std::unique_ptr<Interpreter> tfLiteInterpreter;
+ CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+ (&tfLiteInterpreter) == kTfLiteOk);
+ CHECK(tfLiteInterpreter != nullptr);
+ CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+ // Create the ArmNN Delegate
+ armnnDelegate::DelegateOptions delegateOptions(backends);
+ std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+ theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+ armnnDelegate::TfLiteArmnnDelegateDelete);
+ CHECK(theArmnnDelegate != nullptr);
+ // Modify armnnDelegateInterpreter to use armnnDelegate
+ CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+ // Set input data
+ auto tfLiteDelegateLHSInputId = tfLiteInterpreter->inputs()[0];
+ auto tfLiteDelegateLHSInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateLHSInputId);
+ auto tfLiteDelegateRHSInputId = tfLiteInterpreter->inputs()[1];
+ auto tfLiteDelegateRHSInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateRHSInputId);
+ for (unsigned int i = 0; i < LHSInputValues.size(); ++i)
+ {
+ tfLiteDelegateLHSInputData[i] = LHSInputValues[i];
+ }
+ for (unsigned int i = 0; i < RHSInputValues.size(); ++i)
{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
- std::array<flatbuffers::Offset<Tensor>, 3> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(LHSInputTensorShape.data(),
- LHSInputTensorShape.size()),
- tensorType,
- 0,
- flatBufferBuilder.CreateString("LHSInput"),
- quantizationParameters);
-
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(RHSInputTensorShape.data(),
- RHSInputTensorShape.size()),
- tensorType,
- 0,
- flatBufferBuilder.CreateString("RHSInput"),
- quantizationParameters);
-
- tensors[2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- 0,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
- // create operator
- tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_BatchMatMulOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions = CreateBatchMatMulOptions(flatBufferBuilder,
- adjX,
- adjY).Union();
-
- const std::vector<int32_t> operatorInputs{{0, 1}};
- const std::vector<int32_t> operatorOutputs{2};
- flatbuffers::Offset <Operator> bmmOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(),
- operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphInputs{{0, 1}};
- const std::vector<int> subgraphOutputs{2};
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(),
- subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&bmmOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: BatchMatMul Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, bmmOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+ tfLiteDelegateRHSInputData[i] = RHSInputValues[i];
}
- template <typename T>
- void BatchMatMulTest(tflite::BuiltinOperator bmmOperatorCode,
- tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& LHSInputShape,
- std::vector<int32_t>& RHSInputShape,
- std::vector<int32_t>& outputShape,
- std::vector<T>& LHSInputValues,
- std::vector<T>& RHSInputValues,
- std::vector<T>& expectedOutputValues,
- bool adjX = false,
- bool adjY = false,
- float quantScale = 1.0f,
- int quantOffset = 0)
+ auto armnnDelegateLHSInputId = armnnDelegateInterpreter->inputs()[0];
+ auto armnnDelegateLHSInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateLHSInputId);
+ auto armnnDelegateRHSInputId = armnnDelegateInterpreter->inputs()[1];
+ auto armnnDelegateRHSInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateRHSInputId);
+ for (unsigned int i = 0; i < LHSInputValues.size(); ++i)
+ {
+ armnnDelegateLHSInputData[i] = LHSInputValues[i];
+ }
+ for (unsigned int i = 0; i < RHSInputValues.size(); ++i)
{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateBatchMatMulTfLiteModel(bmmOperatorCode,
- tensorType,
- LHSInputShape,
- RHSInputShape,
- outputShape,
- adjX,
- adjY,
- quantScale,
- quantOffset);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- CHECK(tfLiteModel != nullptr);
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- auto tfLiteDelegateLHSInputId = tfLiteInterpreter->inputs()[0];
- auto tfLiteDelegateLHSInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateLHSInputId);
- auto tfLiteDelegateRHSInputId = tfLiteInterpreter->inputs()[1];
- auto tfLiteDelegateRHSInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateRHSInputId);
- for (unsigned int i = 0; i < LHSInputValues.size(); ++i)
- {
- tfLiteDelegateLHSInputData[i] = LHSInputValues[i];
- }
- for (unsigned int i = 0; i < RHSInputValues.size(); ++i)
- {
- tfLiteDelegateRHSInputData[i] = RHSInputValues[i];
- }
-
- auto armnnDelegateLHSInputId = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateLHSInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateLHSInputId);
- auto armnnDelegateRHSInputId = armnnDelegateInterpreter->inputs()[1];
- auto armnnDelegateRHSInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateRHSInputId);
- for (unsigned int i = 0; i < LHSInputValues.size(); ++i)
- {
- armnnDelegateLHSInputData[i] = LHSInputValues[i];
- }
- for (unsigned int i = 0; i < RHSInputValues.size(); ++i)
- {
- armnnDelegateRHSInputData[i] = RHSInputValues[i];
- }
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter,
- outputShape, expectedOutputValues);
+ armnnDelegateRHSInputData[i] = RHSInputValues[i];
}
+ // Run EnqueueWorkload
+ CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+ CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+ armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter,
+ outputShape, expectedOutputValues);
+}
} // anonymous namespace
diff --git a/delegate/src/test/BatchSpaceTestHelper.hpp b/delegate/src/test/BatchSpaceTestHelper.hpp
index 464a5d9cbe..d4fa9837e8 100644
--- a/delegate/src/test/BatchSpaceTestHelper.hpp
+++ b/delegate/src/test/BatchSpaceTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -33,14 +33,16 @@ std::vector<char> CreateBatchSpaceTfLiteModel(tflite::BuiltinOperator batchSpace
using namespace tflite;
flatbuffers::FlatBufferBuilder flatBufferBuilder;
- std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
- buffers[1] = CreateBuffer(flatBufferBuilder,
+ std::array<flatbuffers::Offset<tflite::Buffer>, 5> buffers;
+ buffers[0] = CreateBuffer(flatBufferBuilder);
+ buffers[1] = CreateBuffer(flatBufferBuilder);
+ buffers[2] = CreateBuffer(flatBufferBuilder,
flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(blockData.data()),
sizeof(int32_t) * blockData.size()));
- buffers[2] = CreateBuffer(flatBufferBuilder,
+ buffers[3] = CreateBuffer(flatBufferBuilder,
flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cropsPadData.data()),
sizeof(int64_t) * cropsPadData.size()));
+ buffers[4] = CreateBuffer(flatBufferBuilder);
auto quantizationParameters =
CreateQuantizationParameters(flatBufferBuilder,
@@ -60,7 +62,7 @@ std::vector<char> CreateBatchSpaceTfLiteModel(tflite::BuiltinOperator batchSpace
flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
inputTensorShape.size()),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input"),
quantizationParameters);
@@ -68,7 +70,7 @@ std::vector<char> CreateBatchSpaceTfLiteModel(tflite::BuiltinOperator batchSpace
flatBufferBuilder.CreateVector<int32_t>(blockShape.data(),
blockShape.size()),
::tflite::TensorType_INT32,
- 1,
+ 2,
flatBufferBuilder.CreateString("block"),
quantizationParameters);
@@ -76,7 +78,7 @@ std::vector<char> CreateBatchSpaceTfLiteModel(tflite::BuiltinOperator batchSpace
flatBufferBuilder.CreateVector<int32_t>(cropsOrPaddingShape.data(),
cropsOrPaddingShape.size()),
::tflite::TensorType_INT32,
- 2,
+ 3,
flatBufferBuilder.CreateString(cropsOrPadding),
quantizationParameters);
@@ -85,7 +87,7 @@ std::vector<char> CreateBatchSpaceTfLiteModel(tflite::BuiltinOperator batchSpace
flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
outputTensorShape.size()),
tensorType,
- 0,
+ 4,
flatBufferBuilder.CreateString("output"),
quantizationParameters);
diff --git a/delegate/src/test/CastTestHelper.hpp b/delegate/src/test/CastTestHelper.hpp
index 6b1d5ee947..0448e65856 100644
--- a/delegate/src/test/CastTestHelper.hpp
+++ b/delegate/src/test/CastTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -30,7 +30,9 @@ std::vector<char> CreateCastTfLiteModel(tflite::TensorType inputTensorType,
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
auto quantizationParameters =
CreateQuantizationParameters(flatBufferBuilder,
@@ -44,14 +46,14 @@ std::vector<char> CreateCastTfLiteModel(tflite::TensorType inputTensorType,
flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
tensorShape.size()),
inputTensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input"),
quantizationParameters);
tensors[1] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
tensorShape.size()),
outputTensorType,
- 0,
+ 2,
flatBufferBuilder.CreateString("output"),
quantizationParameters);
diff --git a/delegate/src/test/ComparisonTestHelper.hpp b/delegate/src/test/ComparisonTestHelper.hpp
index c9ccb778ac..db337f9f8a 100644
--- a/delegate/src/test/ComparisonTestHelper.hpp
+++ b/delegate/src/test/ComparisonTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -33,7 +33,10 @@ std::vector<char> CreateComparisonTfLiteModel(tflite::BuiltinOperator comparison
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
auto quantizationParameters =
CreateQuantizationParameters(flatBufferBuilder,
@@ -47,21 +50,21 @@ std::vector<char> CreateComparisonTfLiteModel(tflite::BuiltinOperator comparison
flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
input0TensorShape.size()),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input_0"),
quantizationParameters);
tensors[1] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
input1TensorShape.size()),
tensorType,
- 0,
+ 2,
flatBufferBuilder.CreateString("input_1"),
quantizationParameters);
tensors[2] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
outputTensorShape.size()),
::tflite::TensorType_BOOL,
- 0);
+ 3);
// create operator
tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_EqualOptions;;
diff --git a/delegate/src/test/ControlTestHelper.hpp b/delegate/src/test/ControlTestHelper.hpp
index 0c9796170d..3e427e60c5 100644
--- a/delegate/src/test/ControlTestHelper.hpp
+++ b/delegate/src/test/ControlTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -36,7 +36,9 @@ std::vector<char> CreateConcatTfLiteModel(tflite::BuiltinOperator controlOperato
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
auto quantizationParameters =
CreateQuantizationParameters(flatBufferBuilder,
@@ -57,7 +59,7 @@ std::vector<char> CreateConcatTfLiteModel(tflite::BuiltinOperator controlOperato
flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
inputTensorShape.size()),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input" + std::to_string(i)),
quantizationParameters);
@@ -71,7 +73,7 @@ std::vector<char> CreateConcatTfLiteModel(tflite::BuiltinOperator controlOperato
flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
outputTensorShape.size()),
tensorType,
- 0,
+ 2,
flatBufferBuilder.CreateString("output"),
quantizationParameters);
@@ -126,7 +128,7 @@ std::vector<char> CreateMeanTfLiteModel(tflite::BuiltinOperator controlOperatorC
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::array<flatbuffers::Offset<tflite::Buffer>, 2> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
+ buffers[0] = CreateBuffer(flatBufferBuilder);
buffers[1] = CreateBuffer(flatBufferBuilder,
flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
sizeof(int32_t) * axisData.size()));
diff --git a/delegate/src/test/ConvolutionTestHelper.hpp b/delegate/src/test/ConvolutionTestHelper.hpp
index ce1f951d21..70c1da6dce 100644
--- a/delegate/src/test/ConvolutionTestHelper.hpp
+++ b/delegate/src/test/ConvolutionTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -50,15 +50,17 @@ std::vector<char> CreateConv2dTfLiteModel(tflite::BuiltinOperator convolutionOpe
using namespace tflite;
flatbuffers::FlatBufferBuilder flatBufferBuilder;
- std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
- buffers[1] = CreateBuffer(flatBufferBuilder,
+ std::array<flatbuffers::Offset<tflite::Buffer>, 5> buffers;
+ buffers[0] = CreateBuffer(flatBufferBuilder);
+ buffers[1] = CreateBuffer(flatBufferBuilder);
+ buffers[2] = CreateBuffer(flatBufferBuilder,
flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(filterData.data()),
sizeof(T) * filterData.size()));
- buffers[2] = CreateBuffer(flatBufferBuilder,
+ buffers[3] = CreateBuffer(flatBufferBuilder,
flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
sizeof(B) * biasData.size()));
+ buffers[4] = CreateBuffer(flatBufferBuilder);
auto quantizationParameters =
CreateQuantizationParameters(flatBufferBuilder,
@@ -95,14 +97,14 @@ std::vector<char> CreateConv2dTfLiteModel(tflite::BuiltinOperator convolutionOpe
flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
inputTensorShape.size()),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input"),
quantizationParameters);
tensors[1] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(filterTensorShape.data(),
filterTensorShape.size()),
tensorType,
- 1,
+ 2,
flatBufferBuilder.CreateString("filter"),
filterQuantizationParameters);
@@ -114,14 +116,14 @@ std::vector<char> CreateConv2dTfLiteModel(tflite::BuiltinOperator convolutionOpe
tensors[2] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(biasTensorShape.data(), biasTensorShape.size()),
biasTensorType,
- 2,
+ 3,
flatBufferBuilder.CreateString("bias"),
biasQuantizationParameters);
tensors[3] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
outputTensorShape.size()),
tensorType,
- 0,
+ 4,
flatBufferBuilder.CreateString("output"),
outputQuantizationParameters);
@@ -334,7 +336,7 @@ std::vector<char> CreateConv3dTfLiteModel(tflite::BuiltinOperator convolutionOpe
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
+ buffers[0] = CreateBuffer(flatBufferBuilder);
buffers[1] = CreateBuffer(flatBufferBuilder,
flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(filterData.data()),
sizeof(T) * filterData.size()));
@@ -581,7 +583,7 @@ std::vector<char> CreateTransposeConvTfLiteModel(tflite::TensorType tensorType,
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
+ buffers[0] = CreateBuffer(flatBufferBuilder);
buffers[1] = CreateBuffer(flatBufferBuilder,
flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(transposeData.data()),
sizeof(int32_t) * transposeData.size()));
diff --git a/delegate/src/test/DelegateOptionsTestHelper.hpp b/delegate/src/test/DelegateOptionsTestHelper.hpp
index 7e147de31f..00a3d95904 100644
--- a/delegate/src/test/DelegateOptionsTestHelper.hpp
+++ b/delegate/src/test/DelegateOptionsTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -44,7 +44,12 @@ std::vector<char> CreateAddDivTfLiteModel(tflite::TensorType tensorType,
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
auto quantizationParameters =
CreateQuantizationParameters(flatBufferBuilder,
@@ -59,35 +64,35 @@ std::vector<char> CreateAddDivTfLiteModel(tflite::TensorType tensorType,
flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
tensorShape.size()),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input_0"),
quantizationParameters);
tensors[1] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
tensorShape.size()),
tensorType,
- 0,
+ 2,
flatBufferBuilder.CreateString("input_1"),
quantizationParameters);
tensors[2] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
tensorShape.size()),
tensorType,
- 0,
+ 3,
flatBufferBuilder.CreateString("input_2"),
quantizationParameters);
tensors[3] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
tensorShape.size()),
tensorType,
- 0,
+ 4,
flatBufferBuilder.CreateString("add"),
quantizationParameters);
tensors[4] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
tensorShape.size()),
tensorType,
- 0,
+ 5,
flatBufferBuilder.CreateString("output"),
quantizationParameters);
@@ -157,7 +162,7 @@ std::vector<char> CreateCeilTfLiteModel(tflite::TensorType tensorType,
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
auto quantizationParameters =
CreateQuantizationParameters(flatBufferBuilder,
diff --git a/delegate/src/test/ElementwiseBinaryTestHelper.hpp b/delegate/src/test/ElementwiseBinaryTestHelper.hpp
index 69b0c88dc8..09a715e7f1 100644
--- a/delegate/src/test/ElementwiseBinaryTestHelper.hpp
+++ b/delegate/src/test/ElementwiseBinaryTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -37,7 +37,8 @@ std::vector<char> CreateElementwiseBinaryTfLiteModel(tflite::BuiltinOperator bin
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
if (constantInput)
{
buffers.push_back(
@@ -47,9 +48,9 @@ std::vector<char> CreateElementwiseBinaryTfLiteModel(tflite::BuiltinOperator bin
}
else
{
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
}
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
auto quantizationParameters =
CreateQuantizationParameters(flatBufferBuilder,
@@ -64,21 +65,21 @@ std::vector<char> CreateElementwiseBinaryTfLiteModel(tflite::BuiltinOperator bin
flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
input0TensorShape.size()),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input_0"),
quantizationParameters);
tensors[1] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
input1TensorShape.size()),
tensorType,
- 1,
+ 2,
flatBufferBuilder.CreateString("input_1"),
quantizationParameters);
tensors[2] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
outputTensorShape.size()),
tensorType,
- 2,
+ 3,
flatBufferBuilder.CreateString("output"),
quantizationParameters);
diff --git a/delegate/src/test/ElementwiseUnaryTestHelper.hpp b/delegate/src/test/ElementwiseUnaryTestHelper.hpp
index dcc7074753..230d0fcca5 100644
--- a/delegate/src/test/ElementwiseUnaryTestHelper.hpp
+++ b/delegate/src/test/ElementwiseUnaryTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -29,7 +29,7 @@ std::vector<char> CreateElementwiseUnaryTfLiteModel(tflite::BuiltinOperator unar
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::array<flatbuffers::Offset<tflite::Buffer>, 1> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
+ buffers[0] = CreateBuffer(flatBufferBuilder);
std::array<flatbuffers::Offset<Tensor>, 2> tensors;
tensors[0] = CreateTensor(flatBufferBuilder,
diff --git a/delegate/src/test/FillTestHelper.hpp b/delegate/src/test/FillTestHelper.hpp
index e6890a2b2d..8479b72730 100644
--- a/delegate/src/test/FillTestHelper.hpp
+++ b/delegate/src/test/FillTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -32,9 +32,7 @@ std::vector<char> CreateFillTfLiteModel(tflite::BuiltinOperator fillOperatorCode
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
buffers.push_back(
CreateBuffer(flatBufferBuilder,
flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(tensorShape.data()),
@@ -43,6 +41,7 @@ std::vector<char> CreateFillTfLiteModel(tflite::BuiltinOperator fillOperatorCode
CreateBuffer(flatBufferBuilder,
flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(fillValue.data()),
sizeof(T) * fillValue.size())));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
std::array<flatbuffers::Offset<Tensor>, 3> tensors;
tensors[0] = CreateTensor(flatBufferBuilder,
@@ -64,7 +63,7 @@ std::vector<char> CreateFillTfLiteModel(tflite::BuiltinOperator fillOperatorCode
flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
tensorShape.size()),
tensorType,
- 0,
+ 3,
flatBufferBuilder.CreateString("output"));
tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_FillOptions;
diff --git a/delegate/src/test/FullyConnectedTestHelper.hpp b/delegate/src/test/FullyConnectedTestHelper.hpp
index 37062c3400..a3f009a863 100644
--- a/delegate/src/test/FullyConnectedTestHelper.hpp
+++ b/delegate/src/test/FullyConnectedTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -37,8 +37,9 @@ std::vector<char> CreateFullyConnectedTfLiteModel(tflite::TensorType tensorType,
{
using namespace tflite;
flatbuffers::FlatBufferBuilder flatBufferBuilder;
- std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
+ std::array<flatbuffers::Offset<tflite::Buffer>, 5> buffers;
+ buffers[0] = CreateBuffer(flatBufferBuilder);
+ buffers[1] = CreateBuffer(flatBufferBuilder);
auto biasTensorType = ::tflite::TensorType_FLOAT32;
if (tensorType == ::tflite::TensorType_INT8)
@@ -47,14 +48,14 @@ std::vector<char> CreateFullyConnectedTfLiteModel(tflite::TensorType tensorType,
}
if (constantWeights)
{
- buffers[1] = CreateBuffer(flatBufferBuilder,
+ buffers[2] = CreateBuffer(flatBufferBuilder,
flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(weightsData.data()),
sizeof(T) * weightsData.size()));
if (tensorType == ::tflite::TensorType_INT8)
{
std::vector<int32_t> biasData = { 10 };
- buffers[2] = CreateBuffer(flatBufferBuilder,
+ buffers[3] = CreateBuffer(flatBufferBuilder,
flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
sizeof(int32_t) * biasData.size()));
@@ -62,16 +63,17 @@ std::vector<char> CreateFullyConnectedTfLiteModel(tflite::TensorType tensorType,
else
{
std::vector<float> biasData = { 10 };
- buffers[2] = CreateBuffer(flatBufferBuilder,
+ buffers[3] = CreateBuffer(flatBufferBuilder,
flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
sizeof(float) * biasData.size()));
}
}
else
{
- buffers[1] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
- buffers[2] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
+ buffers[2] = CreateBuffer(flatBufferBuilder);
+ buffers[3] = CreateBuffer(flatBufferBuilder);
}
+ buffers[4] = CreateBuffer(flatBufferBuilder);
auto quantizationParameters =
CreateQuantizationParameters(flatBufferBuilder,
@@ -92,21 +94,21 @@ std::vector<char> CreateFullyConnectedTfLiteModel(tflite::TensorType tensorType,
flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
inputTensorShape.size()),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input_0"),
quantizationParameters);
tensors[1] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(weightsTensorShape.data(),
weightsTensorShape.size()),
tensorType,
- 1,
+ 2,
flatBufferBuilder.CreateString("weights"),
quantizationParameters);
tensors[2] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(biasTensorShape.data(),
biasTensorShape.size()),
biasTensorType,
- 2,
+ 3,
flatBufferBuilder.CreateString("bias"),
quantizationParameters);
@@ -114,7 +116,7 @@ std::vector<char> CreateFullyConnectedTfLiteModel(tflite::TensorType tensorType,
flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
outputTensorShape.size()),
tensorType,
- 0,
+ 4,
flatBufferBuilder.CreateString("output"),
outputQuantizationParameters);
diff --git a/delegate/src/test/GatherNdTestHelper.hpp b/delegate/src/test/GatherNdTestHelper.hpp
index f475584dc5..c2cf9ffe9d 100644
--- a/delegate/src/test/GatherNdTestHelper.hpp
+++ b/delegate/src/test/GatherNdTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -32,7 +32,10 @@ std::vector<char> CreateGatherNdTfLiteModel(tflite::TensorType tensorType,
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
auto quantizationParameters =
CreateQuantizationParameters(flatBufferBuilder,
@@ -46,21 +49,21 @@ std::vector<char> CreateGatherNdTfLiteModel(tflite::TensorType tensorType,
flatBufferBuilder.CreateVector<int32_t>(paramsShape.data(),
paramsShape.size()),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("params"),
quantizationParameters);
tensors[1] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(indicesShape.data(),
indicesShape.size()),
::tflite::TensorType_INT32,
- 0,
+ 2,
flatBufferBuilder.CreateString("indices"),
quantizationParameters);
tensors[2] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(expectedOutputShape.data(),
expectedOutputShape.size()),
tensorType,
- 0,
+ 3,
flatBufferBuilder.CreateString("output"),
quantizationParameters);
diff --git a/delegate/src/test/GatherTestHelper.hpp b/delegate/src/test/GatherTestHelper.hpp
index fcacf04134..4763e06c73 100644
--- a/delegate/src/test/GatherTestHelper.hpp
+++ b/delegate/src/test/GatherTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -33,7 +33,10 @@ std::vector<char> CreateGatherTfLiteModel(tflite::TensorType tensorType,
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
auto quantizationParameters =
CreateQuantizationParameters(flatBufferBuilder,
@@ -47,21 +50,21 @@ std::vector<char> CreateGatherTfLiteModel(tflite::TensorType tensorType,
flatBufferBuilder.CreateVector<int32_t>(paramsShape.data(),
paramsShape.size()),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("params"),
quantizationParameters);
tensors[1] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(indicesShape.data(),
indicesShape.size()),
::tflite::TensorType_INT32,
- 0,
+ 2,
flatBufferBuilder.CreateString("indices"),
quantizationParameters);
tensors[2] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(expectedOutputShape.data(),
expectedOutputShape.size()),
tensorType,
- 0,
+ 3,
flatBufferBuilder.CreateString("output"),
quantizationParameters);
diff --git a/delegate/src/test/LogicalTestHelper.hpp b/delegate/src/test/LogicalTestHelper.hpp
index d08a1af388..2a1ff2b996 100644
--- a/delegate/src/test/LogicalTestHelper.hpp
+++ b/delegate/src/test/LogicalTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -33,7 +33,10 @@ std::vector<char> CreateLogicalBinaryTfLiteModel(tflite::BuiltinOperator logical
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
auto quantizationParameters =
CreateQuantizationParameters(flatBufferBuilder,
@@ -48,21 +51,21 @@ std::vector<char> CreateLogicalBinaryTfLiteModel(tflite::BuiltinOperator logical
flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
input0TensorShape.size()),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input_0"),
quantizationParameters);
tensors[1] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
input1TensorShape.size()),
tensorType,
- 0,
+ 2,
flatBufferBuilder.CreateString("input_1"),
quantizationParameters);
tensors[2] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
outputTensorShape.size()),
tensorType,
- 0,
+ 3,
flatBufferBuilder.CreateString("output"),
quantizationParameters);
diff --git a/delegate/src/test/LstmTestHelper.hpp b/delegate/src/test/LstmTestHelper.hpp
index 36a606119a..082d5dea91 100644
--- a/delegate/src/test/LstmTestHelper.hpp
+++ b/delegate/src/test/LstmTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -101,7 +101,7 @@ std::vector<char> CreateLstmTfLiteModel(tflite::TensorType tensorType,
flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(inputShape.data(),
inputShape.size()),
@@ -388,7 +388,7 @@ std::vector<char> CreateLstmTfLiteModel(tflite::TensorType tensorType,
operatorInputs.push_back(kTfLiteOptionalTensor);
}
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(outputStateInDimensions.data(),
outputStateInDimensions.size()),
@@ -399,7 +399,7 @@ std::vector<char> CreateLstmTfLiteModel(tflite::TensorType tensorType,
true));
operatorInputs.push_back(buffers.size() - 1);
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(cellStateInDimensions.data(),
cellStateInDimensions.size()),
@@ -493,7 +493,7 @@ std::vector<char> CreateLstmTfLiteModel(tflite::TensorType tensorType,
operatorInputs.push_back(kTfLiteOptionalTensor);
}
int outputBufferId = buffers.size();
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(outputShape.data(),
outputShape.size()),
diff --git a/delegate/src/test/NormalizationTestHelper.hpp b/delegate/src/test/NormalizationTestHelper.hpp
index ebdfdc1a25..510b578c02 100644
--- a/delegate/src/test/NormalizationTestHelper.hpp
+++ b/delegate/src/test/NormalizationTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -46,7 +46,7 @@ std::vector<char> CreateNormalizationTfLiteModel(tflite::BuiltinOperator normali
flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
inputTensorShape.size()),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input"),
quantizationParameters);
@@ -54,15 +54,16 @@ std::vector<char> CreateNormalizationTfLiteModel(tflite::BuiltinOperator normali
flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
outputTensorShape.size()),
tensorType,
- 1,
+ 2,
flatBufferBuilder.CreateString("output"),
quantizationParameters);
std::vector<flatbuffers::Offset<Tensor>> tensors = { inputTensor, outputTensor };
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
std::vector<int32_t> operatorInputs = { 0 };
std::vector<int> subgraphInputs = { 0 };
diff --git a/delegate/src/test/PackTestHelper.hpp b/delegate/src/test/PackTestHelper.hpp
index 0869228326..a9e2ee17bc 100644
--- a/delegate/src/test/PackTestHelper.hpp
+++ b/delegate/src/test/PackTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -36,7 +36,8 @@ std::vector<char> CreatePackTfLiteModel(tflite::BuiltinOperator packOperatorCode
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
auto quantizationParameters =
CreateQuantizationParameters(flatBufferBuilder,
@@ -57,7 +58,7 @@ std::vector<char> CreatePackTfLiteModel(tflite::BuiltinOperator packOperatorCode
flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
inputTensorShape.size()),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input" + std::to_string(i)),
quantizationParameters);
diff --git a/delegate/src/test/PadTestHelper.hpp b/delegate/src/test/PadTestHelper.hpp
index 5b9a1bcc36..e96bc4bfe3 100644
--- a/delegate/src/test/PadTestHelper.hpp
+++ b/delegate/src/test/PadTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -70,12 +70,12 @@ std::vector<char> CreatePadTfLiteModel(
std::vector<flatbuffers::Offset<Tensor>> tensors = { inputTensor, paddingTensor, outputTensor};
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
buffers.push_back(
CreateBuffer(flatBufferBuilder,
flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(paddingDim.data()),
sizeof(int32_t) * paddingDim.size())));
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
std::vector<int32_t> operatorInputs;
std::vector<int> subgraphInputs;
diff --git a/delegate/src/test/Pooling2dTestHelper.hpp b/delegate/src/test/Pooling2dTestHelper.hpp
index b5d36b0231..c7457dbb22 100644
--- a/delegate/src/test/Pooling2dTestHelper.hpp
+++ b/delegate/src/test/Pooling2dTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -38,8 +38,9 @@ std::vector<char> CreatePooling2dTfLiteModel(
using namespace tflite;
flatbuffers::FlatBufferBuilder flatBufferBuilder;
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ flatbuffers::Offset<tflite::Buffer> buffers[3] = {CreateBuffer(flatBufferBuilder),
+ CreateBuffer(flatBufferBuilder),
+ CreateBuffer(flatBufferBuilder)};
auto quantizationParameters =
CreateQuantizationParameters(flatBufferBuilder,
@@ -48,22 +49,21 @@ std::vector<char> CreatePooling2dTfLiteModel(
flatBufferBuilder.CreateVector<float>({ quantScale }),
flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
- std::array<flatbuffers::Offset<Tensor>, 2> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
- inputTensorShape.size()),
+ flatbuffers::Offset<Tensor> tensors[2] {
+ CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(inputTensorShape),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input"),
- quantizationParameters);
+ quantizationParameters),
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
+ CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(outputTensorShape),
tensorType,
- 0,
+ 2,
flatBufferBuilder.CreateString("output"),
- quantizationParameters);
+ quantizationParameters)
+ };
// create operator
tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_Pool2DOptions;
@@ -80,18 +80,18 @@ std::vector<char> CreatePooling2dTfLiteModel(
flatbuffers::Offset <Operator> poolingOperator =
CreateOperator(flatBufferBuilder,
0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(operatorInputs),
+ flatBufferBuilder.CreateVector<int32_t>(operatorOutputs),
operatorBuiltinOptionsType,
operatorBuiltinOptions);
- const std::vector<int> subgraphInputs{0};
- const std::vector<int> subgraphOutputs{1};
+ const int subgraphInputs[1] = {0};
+ const int subgraphOutputs[1] = {1};
flatbuffers::Offset <SubGraph> subgraph =
CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+ flatBufferBuilder.CreateVector(tensors, 2),
+ flatBufferBuilder.CreateVector<int32_t>(subgraphInputs, 1),
+ flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs, 1),
flatBufferBuilder.CreateVector(&poolingOperator, 1));
flatbuffers::Offset <flatbuffers::String> modelDescription =
@@ -104,7 +104,7 @@ std::vector<char> CreatePooling2dTfLiteModel(
flatBufferBuilder.CreateVector(&operatorCode, 1),
flatBufferBuilder.CreateVector(&subgraph, 1),
modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+ flatBufferBuilder.CreateVector(buffers, 3));
flatBufferBuilder.Finish(flatbufferModel);
diff --git a/delegate/src/test/Pooling3dTestHelper.hpp b/delegate/src/test/Pooling3dTestHelper.hpp
index f5f5cc3809..47e00f7b7f 100644
--- a/delegate/src/test/Pooling3dTestHelper.hpp
+++ b/delegate/src/test/Pooling3dTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -46,7 +46,10 @@ std::vector<char> CreatePooling3dTfLiteModel(
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+
auto quantizationParameters =
CreateQuantizationParameters(flatBufferBuilder,
diff --git a/delegate/src/test/PreluTestHelper.hpp b/delegate/src/test/PreluTestHelper.hpp
index b6c18ccdfb..b50c37763f 100644
--- a/delegate/src/test/PreluTestHelper.hpp
+++ b/delegate/src/test/PreluTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -33,10 +33,12 @@ std::vector<char> CreatePreluTfLiteModel(tflite::BuiltinOperator preluOperatorCo
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
-
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(
reinterpret_cast<const uint8_t *>(alphaData.data()), sizeof(float) * alphaData.size())));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+
auto quantizationParameters =
CreateQuantizationParameters(flatBufferBuilder,
@@ -49,7 +51,7 @@ std::vector<char> CreatePreluTfLiteModel(tflite::BuiltinOperator preluOperatorCo
flatBufferBuilder.CreateVector<int32_t>(inputShape.data(),
inputShape.size()),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input"),
quantizationParameters);
@@ -57,7 +59,7 @@ std::vector<char> CreatePreluTfLiteModel(tflite::BuiltinOperator preluOperatorCo
flatBufferBuilder.CreateVector<int32_t>(alphaShape.data(),
alphaShape.size()),
tensorType,
- 1,
+ 2,
flatBufferBuilder.CreateString("alpha"),
quantizationParameters);
@@ -65,7 +67,7 @@ std::vector<char> CreatePreluTfLiteModel(tflite::BuiltinOperator preluOperatorCo
flatBufferBuilder.CreateVector<int32_t>(outputShape.data(),
outputShape.size()),
tensorType,
- 0,
+ 3,
flatBufferBuilder.CreateString("output"),
quantizationParameters);
diff --git a/delegate/src/test/QuantizationTestHelper.hpp b/delegate/src/test/QuantizationTestHelper.hpp
index e4155040cd..a8b102271a 100644
--- a/delegate/src/test/QuantizationTestHelper.hpp
+++ b/delegate/src/test/QuantizationTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -31,7 +31,10 @@ std::vector<char> CreateQuantizationTfLiteModel(tflite::BuiltinOperator quantiza
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+
auto quantizationParameters =
CreateQuantizationParameters(flatBufferBuilder,
@@ -46,14 +49,14 @@ std::vector<char> CreateQuantizationTfLiteModel(tflite::BuiltinOperator quantiza
flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
inputTensorShape.size()),
inputTensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input"),
quantizationParameters);
tensors[1] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
outputTensorShape.size()),
outputTensorType,
- 0,
+ 2,
flatBufferBuilder.CreateString("output"),
quantizationParameters);
diff --git a/delegate/src/test/RedefineTestHelper.hpp b/delegate/src/test/RedefineTestHelper.hpp
index 6f061572b4..7f811d56dd 100644
--- a/delegate/src/test/RedefineTestHelper.hpp
+++ b/delegate/src/test/RedefineTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -22,44 +22,36 @@ namespace
{
std::vector<char> CreateRedefineTfLiteModel(
- tflite::BuiltinOperator redefineOperatorCode,
- tflite::TensorType tensorType,
- const std::vector<int32_t>& inputTensorShape,
- const std::vector<int32_t>& outputTensorShape,
- const std::vector<int32_t>& targetShape,
- bool useOption = true,
- float quantScale = 1.0f,
- int quantOffset = 0)
+ tflite::BuiltinOperator redefineOperatorCode,
+ tflite::TensorType tensorType,
+ const std::vector<int32_t>& inputTensorShape,
+ const std::vector<int32_t>& outputTensorShape,
+ const std::vector<int32_t>& targetShape,
+ bool useOption = true,
+ float quantScale = 1.0f,
+ int quantOffset = 0)
{
using namespace tflite;
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+ CreateQuantizationParameters(flatBufferBuilder,
+ 0,
+ 0,
+ flatBufferBuilder.CreateVector<float>({ quantScale }),
+ flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
auto inputTensor = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
inputTensorShape.size()),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input"),
quantizationParameters);
- auto outputTensor = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
std::vector<flatbuffers::Offset<Tensor>> tensors;
std::vector<int32_t> operatorInputs;
std::vector<int> subgraphInputs;
@@ -67,25 +59,43 @@ std::vector<char> CreateRedefineTfLiteModel(
if (useOption)
{
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ auto outputTensor = CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+ outputTensorShape.size()),
+ tensorType,
+ 2,
+ flatBufferBuilder.CreateString("output"),
+ quantizationParameters);
tensors = { inputTensor, outputTensor};
operatorInputs = {0};
subgraphInputs = {0};
operatorBuiltinOptions = CreateReshapeOptions(
- flatBufferBuilder,
- flatBufferBuilder.CreateVector(targetShape.data(), targetShape.size())).Union();
+ flatBufferBuilder,
+ flatBufferBuilder.CreateVector(targetShape.data(), targetShape.size())).Union();
}
else
{
buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(targetShape.data()),
- sizeof(int32_t) * targetShape.size())));
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(targetShape.data()),
+ sizeof(int32_t) * targetShape.size())));
int32_t size = static_cast<int32_t>(targetShape.size());
auto shapeTensor = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>( { size } ),
tflite::TensorType_INT32,
2,
flatBufferBuilder.CreateString("shape"));
+
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ auto outputTensor = CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+ outputTensorShape.size()),
+ tensorType,
+ 3,
+ flatBufferBuilder.CreateString("output"),
+ quantizationParameters);
+
tensors = { inputTensor, outputTensor, shapeTensor };
operatorInputs = {0, 2};
subgraphInputs = {0, 2};
@@ -97,33 +107,33 @@ std::vector<char> CreateRedefineTfLiteModel(
const std::vector<int32_t> operatorOutputs{1};
flatbuffers::Offset <Operator> redefineOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
+ CreateOperator(flatBufferBuilder,
+ 0,
+ flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+ operatorBuiltinOptionsType,
+ operatorBuiltinOptions);
const std::vector<int> subgraphOutputs{1};
flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&redefineOperator, 1));
+ CreateSubGraph(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+ flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+ flatBufferBuilder.CreateVector(&redefineOperator, 1));
flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Reshape Operator Model");
+ flatBufferBuilder.CreateString("ArmnnDelegate: Reshape Operator Model");
flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
redefineOperatorCode);
flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+ CreateModel(flatBufferBuilder,
+ TFLITE_SCHEMA_VERSION,
+ flatBufferBuilder.CreateVector(&operatorCode, 1),
+ flatBufferBuilder.CreateVector(&subgraph, 1),
+ modelDescription,
+ flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
flatBufferBuilder.Finish(flatbufferModel);
@@ -172,8 +182,8 @@ void RedefineTest(tflite::BuiltinOperator redefineOperatorCode,
// Create the ArmNN Delegate
armnnDelegate::DelegateOptions delegateOptions(backends);
std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
+ theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+ armnnDelegate::TfLiteArmnnDelegateDelete);
CHECK(theArmnnDelegate != nullptr);
// Modify armnnDelegateInterpreter to use armnnDelegate
CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
diff --git a/delegate/src/test/ReduceTestHelper.hpp b/delegate/src/test/ReduceTestHelper.hpp
index 5457adbd0f..f500736080 100644
--- a/delegate/src/test/ReduceTestHelper.hpp
+++ b/delegate/src/test/ReduceTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -37,14 +37,17 @@ std::vector<char> CreateReduceTfLiteModel(tflite::BuiltinOperator reduceOperator
using namespace tflite;
flatbuffers::FlatBufferBuilder flatBufferBuilder;
- std::array<flatbuffers::Offset<tflite::Buffer>, 2> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
- buffers[1] = CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
- sizeof(int32_t) * axisData.size()));
+ flatbuffers::Offset<tflite::Buffer> buffers[4] = {
+ CreateBuffer(flatBufferBuilder),
+ CreateBuffer(flatBufferBuilder),
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
+ sizeof(int32_t) * axisData.size())),
+ CreateBuffer(flatBufferBuilder)
+ };
flatbuffers::Offset<tflite::QuantizationParameters> quantizationParametersAxis
- = CreateQuantizationParameters(flatBufferBuilder);
+ = CreateQuantizationParameters(flatBufferBuilder);
flatbuffers::Offset<tflite::QuantizationParameters> quantizationParameters;
@@ -81,7 +84,7 @@ std::vector<char> CreateReduceTfLiteModel(tflite::BuiltinOperator reduceOperator
flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
input0TensorShape.size()),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input"),
quantizationParameters);
@@ -89,7 +92,7 @@ std::vector<char> CreateReduceTfLiteModel(tflite::BuiltinOperator reduceOperator
flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
input1TensorShape.size()),
::tflite::TensorType_INT32,
- 1,
+ 2,
flatBufferBuilder.CreateString("axis"),
quantizationParametersAxis);
@@ -98,7 +101,7 @@ std::vector<char> CreateReduceTfLiteModel(tflite::BuiltinOperator reduceOperator
flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
outputTensorShape.size()),
tensorType,
- 0,
+ 3,
flatBufferBuilder.CreateString("output"),
quantizationParameters);
@@ -135,7 +138,7 @@ std::vector<char> CreateReduceTfLiteModel(tflite::BuiltinOperator reduceOperator
flatBufferBuilder.CreateVector(&operatorCode, 1),
flatBufferBuilder.CreateVector(&subgraph, 1),
modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+ flatBufferBuilder.CreateVector(buffers, 4));
flatBufferBuilder.Finish(flatbufferModel);
diff --git a/delegate/src/test/ResizeTestHelper.hpp b/delegate/src/test/ResizeTestHelper.hpp
index 030b2a7a4a..6937a4ba43 100644
--- a/delegate/src/test/ResizeTestHelper.hpp
+++ b/delegate/src/test/ResizeTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -32,31 +32,33 @@ std::vector<char> CreateResizeTfLiteModel(tflite::BuiltinOperator operatorCode,
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
buffers.push_back(CreateBuffer(flatBufferBuilder,
flatBufferBuilder.CreateVector(
reinterpret_cast<const uint8_t*>(sizeTensorData.data()),
sizeof(int32_t) * sizeTensorData.size())));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
std::array<flatbuffers::Offset<Tensor>, 3> tensors;
tensors[0] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(), inputTensorShape.size()),
inputTensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input_tensor"));
tensors[1] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(sizeTensorShape.data(),
sizeTensorShape.size()),
TensorType_INT32,
- 1,
+ 2,
flatBufferBuilder.CreateString("size_input_tensor"));
tensors[2] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
outputTensorShape.size()),
inputTensorType,
- 0,
+ 3,
flatBufferBuilder.CreateString("output_tensor"));
// Create Operator
diff --git a/delegate/src/test/RoundTestHelper.hpp b/delegate/src/test/RoundTestHelper.hpp
index 3a35ee0764..6638607dcf 100644
--- a/delegate/src/test/RoundTestHelper.hpp
+++ b/delegate/src/test/RoundTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -30,7 +30,9 @@ std::vector<char> CreateRoundTfLiteModel(tflite::BuiltinOperator roundOperatorCo
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
auto quantizationParameters =
CreateQuantizationParameters(flatBufferBuilder,
@@ -44,14 +46,14 @@ std::vector<char> CreateRoundTfLiteModel(tflite::BuiltinOperator roundOperatorCo
flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
tensorShape.size()),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input"),
quantizationParameters);
tensors[1] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
tensorShape.size()),
tensorType,
- 0,
+ 2,
flatBufferBuilder.CreateString("output"),
quantizationParameters);
diff --git a/delegate/src/test/ShapeTestHelper.hpp b/delegate/src/test/ShapeTestHelper.hpp
index 854c5084aa..9b3d574e23 100644
--- a/delegate/src/test/ShapeTestHelper.hpp
+++ b/delegate/src/test/ShapeTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -31,7 +31,9 @@ std::vector<char> CreateShapeTfLiteModel(tflite::TensorType inputTensorType,
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
auto quantizationParameters =
CreateQuantizationParameters(flatBufferBuilder,
@@ -45,14 +47,14 @@ std::vector<char> CreateShapeTfLiteModel(tflite::TensorType inputTensorType,
flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
inputTensorShape.size()),
inputTensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input"),
quantizationParameters);
tensors[1] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
outputTensorShape.size()),
outputTensorType,
- 0,
+ 2,
flatBufferBuilder.CreateString("output"),
quantizationParameters);
diff --git a/delegate/src/test/SliceTestHelper.hpp b/delegate/src/test/SliceTestHelper.hpp
index 4a2537feec..94c076b4f7 100644
--- a/delegate/src/test/SliceTestHelper.hpp
+++ b/delegate/src/test/SliceTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -35,39 +35,42 @@ std::vector<char> CreateSliceTfLiteModel(tflite::TensorType tensorType,
using namespace tflite;
flatbuffers::FlatBufferBuilder flatBufferBuilder;
- std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
- buffers[1] = CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(beginTensorData.data()),
- sizeof(int32_t) * beginTensorData.size()));
- buffers[2] = CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(sizeTensorData.data()),
- sizeof(int32_t) * sizeTensorData.size()));
+ flatbuffers::Offset<tflite::Buffer> buffers[5] = {
+ CreateBuffer(flatBufferBuilder),
+ CreateBuffer(flatBufferBuilder),
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(beginTensorData.data()),
+ sizeof(int32_t) * beginTensorData.size())),
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(sizeTensorData.data()),
+ sizeof(int32_t) * sizeTensorData.size())),
+ CreateBuffer(flatBufferBuilder)
+ };
std::array<flatbuffers::Offset<Tensor>, 4> tensors;
tensors[0] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
inputTensorShape.size()),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input"));
tensors[1] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(beginTensorShape.data(),
beginTensorShape.size()),
::tflite::TensorType_INT32,
- 1,
+ 2,
flatBufferBuilder.CreateString("begin_tensor"));
tensors[2] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(sizeTensorShape.data(),
sizeTensorShape.size()),
::tflite::TensorType_INT32,
- 2,
+ 3,
flatBufferBuilder.CreateString("size_tensor"));
tensors[3] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
outputTensorShape.size()),
tensorType,
- 0,
+ 4,
flatBufferBuilder.CreateString("output"));
@@ -105,7 +108,7 @@ std::vector<char> CreateSliceTfLiteModel(tflite::TensorType tensorType,
flatBufferBuilder.CreateVector(&operatorCode, 1),
flatBufferBuilder.CreateVector(&subgraph, 1),
modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+ flatBufferBuilder.CreateVector(buffers, 5));
flatBufferBuilder.Finish(flatbufferModel);
diff --git a/delegate/src/test/SoftmaxTestHelper.hpp b/delegate/src/test/SoftmaxTestHelper.hpp
index bd32c212e9..f3367f9d24 100644
--- a/delegate/src/test/SoftmaxTestHelper.hpp
+++ b/delegate/src/test/SoftmaxTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -28,19 +28,21 @@ std::vector<char> CreateSoftmaxTfLiteModel(tflite::BuiltinOperator softmaxOperat
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
std::array<flatbuffers::Offset<Tensor>, 2> tensors;
tensors[0] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
tensorShape.size()),
tensorType,
- 0);
+ 1);
tensors[1] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
tensorShape.size()),
tensorType,
- 0);
+ 2);
const std::vector<int32_t> operatorInputs({0});
const std::vector<int32_t> operatorOutputs({1});
diff --git a/delegate/src/test/SpaceDepthTestHelper.hpp b/delegate/src/test/SpaceDepthTestHelper.hpp
index d9a783c6a7..737e199ef7 100644
--- a/delegate/src/test/SpaceDepthTestHelper.hpp
+++ b/delegate/src/test/SpaceDepthTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -37,21 +37,23 @@ std::vector<char> CreateSpaceDepthTfLiteModel(tflite::BuiltinOperator spaceDepth
flatBufferBuilder.CreateVector<int64_t>({ 0 }));
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
std::array<flatbuffers::Offset<Tensor>, 2> tensors;
tensors[0] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
inputTensorShape.size()),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input"),
quantizationParameters);
tensors[1] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
outputTensorShape.size()),
tensorType,
- 0,
+ 2,
flatBufferBuilder.CreateString("output"),
quantizationParameters);
diff --git a/delegate/src/test/SplitTestHelper.hpp b/delegate/src/test/SplitTestHelper.hpp
index 31fc7d5e46..3c5f50ffac 100644
--- a/delegate/src/test/SplitTestHelper.hpp
+++ b/delegate/src/test/SplitTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -35,11 +35,12 @@ std::vector<char> CreateSplitTfLiteModel(tflite::TensorType tensorType,
using namespace tflite;
flatbuffers::FlatBufferBuilder flatBufferBuilder;
- std::array<flatbuffers::Offset<tflite::Buffer>, 2> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
- buffers[1] = CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
- sizeof(int32_t) * axisData.size()));
+ std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
+ sizeof(int32_t) * axisData.size())));
auto quantizationParameters =
CreateQuantizationParameters(flatBufferBuilder,
@@ -53,27 +54,28 @@ std::vector<char> CreateSplitTfLiteModel(tflite::TensorType tensorType,
flatBufferBuilder.CreateVector<int32_t>(axisTensorShape.data(),
axisTensorShape.size()),
::tflite::TensorType_INT32,
- 1,
+ 2,
flatBufferBuilder.CreateString("axis"),
quantizationParameters);
tensors[1] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
inputTensorShape.size()),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input"),
quantizationParameters);
// Create output tensor
for (unsigned int i = 0; i < outputTensorShapes.size(); ++i)
{
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
tensors[i + 2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShapes[i].data(),
- outputTensorShapes[i].size()),
- tensorType,
- 0,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
+ flatBufferBuilder.CreateVector<int32_t>(outputTensorShapes[i].data(),
+ outputTensorShapes[i].size()),
+ tensorType,
+ (i+3),
+ flatBufferBuilder.CreateString("output"),
+ quantizationParameters);
}
// create operator. Mean uses ReducerOptions.
@@ -109,7 +111,7 @@ std::vector<char> CreateSplitTfLiteModel(tflite::TensorType tensorType,
flatBufferBuilder.CreateVector(&operatorCode, 1),
flatBufferBuilder.CreateVector(&subgraph, 1),
modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+ flatBufferBuilder.CreateVector(buffers));
flatBufferBuilder.Finish(flatbufferModel);
@@ -144,21 +146,21 @@ void SplitTest(tflite::TensorType tensorType,
// Create TfLite Interpreters
std::unique_ptr<Interpreter> armnnDelegate;
CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegate) == kTfLiteOk);
+ (&armnnDelegate) == kTfLiteOk);
CHECK(armnnDelegate != nullptr);
CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
std::unique_ptr<Interpreter> tfLiteDelegate;
CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteDelegate) == kTfLiteOk);
+ (&tfLiteDelegate) == kTfLiteOk);
CHECK(tfLiteDelegate != nullptr);
CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
// Create the ArmNN Delegate
armnnDelegate::DelegateOptions delegateOptions(backends);
std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
+ theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+ armnnDelegate::TfLiteArmnnDelegateDelete);
CHECK(theArmnnDelegate != nullptr);
// Modify armnnDelegateInterpreter to use armnnDelegate
@@ -210,11 +212,11 @@ std::vector<char> CreateSplitVTfLiteModel(tflite::TensorType tensorType,
sizeof(int32_t) * axisData.size()));
auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+ CreateQuantizationParameters(flatBufferBuilder,
+ 0,
+ 0,
+ flatBufferBuilder.CreateVector<float>({ quantScale }),
+ flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
std::array<flatbuffers::Offset<Tensor>, 5> tensors;
tensors[0] = CreateTensor(flatBufferBuilder,
@@ -258,33 +260,33 @@ std::vector<char> CreateSplitVTfLiteModel(tflite::TensorType tensorType,
const std::vector<int> operatorInputs{ {0, 1, 2} };
const std::vector<int> operatorOutputs{ {3, 4} };
flatbuffers::Offset <Operator> controlOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
+ CreateOperator(flatBufferBuilder,
+ 0,
+ flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+ operatorBuiltinOptionsType,
+ operatorBuiltinOptions);
const std::vector<int> subgraphInputs{ {0, 1, 2} };
const std::vector<int> subgraphOutputs{ {3, 4} };
flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&controlOperator, 1));
+ CreateSubGraph(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+ flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+ flatBufferBuilder.CreateVector(&controlOperator, 1));
flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: SPLIT_V Operator Model");
+ flatBufferBuilder.CreateString("ArmnnDelegate: SPLIT_V Operator Model");
flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, BuiltinOperator_SPLIT_V);
flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+ CreateModel(flatBufferBuilder,
+ TFLITE_SCHEMA_VERSION,
+ flatBufferBuilder.CreateVector(&operatorCode, 1),
+ flatBufferBuilder.CreateVector(&subgraph, 1),
+ modelDescription,
+ flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
flatBufferBuilder.Finish(flatbufferModel);
diff --git a/delegate/src/test/StridedSliceTestHelper.hpp b/delegate/src/test/StridedSliceTestHelper.hpp
index 2bca4fdc35..ef944d7e7a 100644
--- a/delegate/src/test/StridedSliceTestHelper.hpp
+++ b/delegate/src/test/StridedSliceTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -43,48 +43,51 @@ std::vector<char> CreateStridedSliceTfLiteModel(tflite::TensorType tensorType,
using namespace tflite;
flatbuffers::FlatBufferBuilder flatBufferBuilder;
- std::array<flatbuffers::Offset<tflite::Buffer>, 4> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
- buffers[1] = CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(beginTensorData.data()),
- sizeof(int32_t) * beginTensorData.size()));
- buffers[2] = CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(endTensorData.data()),
- sizeof(int32_t) * endTensorData.size()));
- buffers[3] = CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(strideTensorData.data()),
- sizeof(int32_t) * strideTensorData.size()));
+ flatbuffers::Offset<tflite::Buffer> buffers[6] = {
+ CreateBuffer(flatBufferBuilder),
+ CreateBuffer(flatBufferBuilder),
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(beginTensorData.data()),
+ sizeof(int32_t) * beginTensorData.size())),
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(endTensorData.data()),
+ sizeof(int32_t) * endTensorData.size())),
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(strideTensorData.data()),
+ sizeof(int32_t) * strideTensorData.size())),
+ CreateBuffer(flatBufferBuilder)
+ };
std::array<flatbuffers::Offset<Tensor>, 5> tensors;
tensors[0] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
inputTensorShape.size()),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input"));
tensors[1] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(beginTensorShape.data(),
beginTensorShape.size()),
::tflite::TensorType_INT32,
- 1,
+ 2,
flatBufferBuilder.CreateString("begin_tensor"));
tensors[2] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(endTensorShape.data(),
endTensorShape.size()),
::tflite::TensorType_INT32,
- 2,
+ 3,
flatBufferBuilder.CreateString("end_tensor"));
tensors[3] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(strideTensorShape.data(),
strideTensorShape.size()),
::tflite::TensorType_INT32,
- 3,
+ 4,
flatBufferBuilder.CreateString("stride_tensor"));
tensors[4] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
outputTensorShape.size()),
tensorType,
- 0,
+ 5,
flatBufferBuilder.CreateString("output"));
@@ -127,7 +130,7 @@ std::vector<char> CreateStridedSliceTfLiteModel(tflite::TensorType tensorType,
flatBufferBuilder.CreateVector(&operatorCode, 1),
flatBufferBuilder.CreateVector(&subgraph, 1),
modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+ flatBufferBuilder.CreateVector(buffers, 6));
flatBufferBuilder.Finish(flatbufferModel);
@@ -177,21 +180,21 @@ void StridedSliceTestImpl(std::vector<armnn::BackendId>& backends,
// Create TfLite Interpreters
std::unique_ptr<Interpreter> armnnDelegate;
CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegate) == kTfLiteOk);
+ (&armnnDelegate) == kTfLiteOk);
CHECK(armnnDelegate != nullptr);
CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
std::unique_ptr<Interpreter> tfLiteDelegate;
CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteDelegate) == kTfLiteOk);
+ (&tfLiteDelegate) == kTfLiteOk);
CHECK(tfLiteDelegate != nullptr);
CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
// Create the ArmNN Delegate
armnnDelegate::DelegateOptions delegateOptions(backends);
std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
+ theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+ armnnDelegate::TfLiteArmnnDelegateDelete);
CHECK(theArmnnDelegate != nullptr);
// Modify armnnDelegateInterpreter to use armnnDelegate
diff --git a/delegate/src/test/TransposeTestHelper.hpp b/delegate/src/test/TransposeTestHelper.hpp
index 1d55273b9f..4479c486cb 100644
--- a/delegate/src/test/TransposeTestHelper.hpp
+++ b/delegate/src/test/TransposeTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -26,53 +26,56 @@ std::vector<char> CreateTransposeTfLiteModel(tflite::TensorType tensorType,
{
using namespace tflite;
flatbuffers::FlatBufferBuilder flatBufferBuilder;
- std::array<flatbuffers::Offset<tflite::Buffer>, 2> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
- buffers[1] = CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(inputPermVec.data()),
- sizeof(int32_t) * inputPermVec.size()));
+ flatbuffers::Offset<tflite::Buffer> buffers[4]{
+ CreateBuffer(flatBufferBuilder),
+ CreateBuffer(flatBufferBuilder),
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(inputPermVec.data()),
+ sizeof(int32_t) * inputPermVec.size())),
+ CreateBuffer(flatBufferBuilder)
+ };
std::array<flatbuffers::Offset<Tensor>, 3> tensors;
tensors[0] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
input0TensorShape.size()),
- tensorType, 0);
+ tensorType, 1);
tensors[1] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(inputPermVecShape.data(),
inputPermVecShape.size()),
- tflite::TensorType_INT32, 1,
+ tflite::TensorType_INT32, 2,
flatBufferBuilder.CreateString("permutation_vector"));
tensors[2] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
outputTensorShape.size()),
- tensorType);
+ tensorType,3);
const std::vector<int32_t> operatorInputs{0, 1};
const std::vector<int32_t> operatorOutputs{2};
flatbuffers::Offset <Operator> transposeOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- BuiltinOptions_TransposeOptions,
- CreateTransposeOptions(flatBufferBuilder).Union());
+ CreateOperator(flatBufferBuilder,
+ 0,
+ flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+ BuiltinOptions_TransposeOptions,
+ CreateTransposeOptions(flatBufferBuilder).Union());
const std::vector<int> subgraphInputs{0, 1};
const std::vector<int> subgraphOutputs{2};
flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&transposeOperator, 1));
+ CreateSubGraph(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+ flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+ flatBufferBuilder.CreateVector(&transposeOperator, 1));
flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Transpose Operator Model");
+ flatBufferBuilder.CreateString("ArmnnDelegate: Transpose Operator Model");
flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
tflite::BuiltinOperator_TRANSPOSE);
flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+ CreateModel(flatBufferBuilder,
+ TFLITE_SCHEMA_VERSION,
+ flatBufferBuilder.CreateVector(&operatorCode, 1),
+ flatBufferBuilder.CreateVector(&subgraph, 1),
+ modelDescription,
+ flatBufferBuilder.CreateVector(buffers, 4));
flatBufferBuilder.Finish(flatbufferModel);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -104,21 +107,21 @@ void TransposeFP32Test(std::vector<armnn::BackendId>& backends)
// Create TfLite Interpreters
std::unique_ptr<Interpreter> armnnDelegateInterpreter;
CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
+ (&armnnDelegateInterpreter) == kTfLiteOk);
CHECK(armnnDelegateInterpreter != nullptr);
CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
std::unique_ptr<Interpreter> tfLiteInterpreter;
CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
+ (&tfLiteInterpreter) == kTfLiteOk);
CHECK(tfLiteInterpreter != nullptr);
CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
// Create the ArmNN Delegate
armnnDelegate::DelegateOptions delegateOptions(backends);
std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
+ theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+ armnnDelegate::TfLiteArmnnDelegateDelete);
CHECK(theArmnnDelegate != nullptr);
// Modify armnnDelegateInterpreter to use armnnDelegate
CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
diff --git a/delegate/src/test/UnidirectionalSequenceLstmTestHelper.hpp b/delegate/src/test/UnidirectionalSequenceLstmTestHelper.hpp
index 9d6ef87e3f..10555aca1a 100644
--- a/delegate/src/test/UnidirectionalSequenceLstmTestHelper.hpp
+++ b/delegate/src/test/UnidirectionalSequenceLstmTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -19,7 +19,6 @@
#include <doctest/doctest.h>
-
#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <armnn/TypesUtils.hpp>
@@ -33,7 +32,7 @@
namespace
{
-template <typename T>
+template<typename T>
std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType tensorType,
int32_t batchSize,
int32_t timeSize,
@@ -78,7 +77,7 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
float clippingThresProj,
bool isTimeMajor,
float quantScale,
- int quantOffset = 0)
+ int quantOffset = 0)
{
std::vector<int32_t> tensorInfo0{};
@@ -105,39 +104,41 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
std::vector<int> operatorInputs;
using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
+ flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- std::vector<flatbuffers::Offset<Tensor>> tensors;
+ std::vector<flatbuffers::Offset<Tensor>> tensors;
auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ 1.0f }),
- flatBufferBuilder.CreateVector<int64_t>({ 0 }));
+ CreateQuantizationParameters(flatBufferBuilder,
+ 0,
+ 0,
+ flatBufferBuilder.CreateVector<float>({1.0f}),
+ flatBufferBuilder.CreateVector<int64_t>({0}));
auto weightQuantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ CreateQuantizationParameters(flatBufferBuilder,
+ 0,
+ 0,
+ flatBufferBuilder.CreateVector<float>({quantScale}),
+ flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
+
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(inputShape.data(),
inputShape.size()),
::tflite::TensorType_FLOAT32,
buffers.size() - 1,
flatBufferBuilder.CreateString("input_0")));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
if (hasInputToInputWeights)
{
buffers.push_back(
CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(inputToInputWeights.data()),
- sizeof(T) * inputToInputWeights.size())));
+ flatBufferBuilder.CreateVector(
+ reinterpret_cast<const uint8_t*>(inputToInputWeights.data()),
+ sizeof(T) * inputToInputWeights.size())));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorInfoInputSize.data(),
tensorInfoInputSize.size()),
@@ -145,7 +146,7 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
buffers.size() - 1,
flatBufferBuilder.CreateString("inputToInputWeights"),
weightQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
}
else
{
@@ -154,8 +155,9 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
buffers.push_back(
CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(inputToForgetWeights.data()),
- sizeof(T) * inputToForgetWeights.size())));
+ flatBufferBuilder.CreateVector(
+ reinterpret_cast<const uint8_t*>(inputToForgetWeights.data()),
+ sizeof(T) * inputToForgetWeights.size())));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorInfoInputSize.data(),
tensorInfoInputSize.size()),
@@ -163,12 +165,13 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
buffers.size() - 1,
flatBufferBuilder.CreateString("inputToForgetWeights"),
weightQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
buffers.push_back(
CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(inputToCellWeights.data()),
- sizeof(T) * inputToCellWeights.size())));
+ flatBufferBuilder.CreateVector(
+ reinterpret_cast<const uint8_t*>(inputToCellWeights.data()),
+ sizeof(T) * inputToCellWeights.size())));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorInfoInputSize.data(),
tensorInfoInputSize.size()),
@@ -176,12 +179,13 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
buffers.size() - 1,
flatBufferBuilder.CreateString("inputToCellWeights"),
weightQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
buffers.push_back(
CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(inputToOutputWeights.data()),
- sizeof(T) * inputToOutputWeights.size())));
+ flatBufferBuilder.CreateVector(
+ reinterpret_cast<const uint8_t*>(inputToOutputWeights.data()),
+ sizeof(T) * inputToOutputWeights.size())));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorInfoInputSize.data(),
tensorInfoInputSize.size()),
@@ -189,7 +193,7 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
buffers.size() - 1,
flatBufferBuilder.CreateString("inputToOutputWeights"),
weightQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
if (hasRecurrentToInputWeights)
{
@@ -204,7 +208,7 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
buffers.size() - 1,
flatBufferBuilder.CreateString("recurrentToInputWeights"),
weightQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
}
else
{
@@ -213,7 +217,8 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
buffers.push_back(
CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(recurrentToForgetWeights.data()),
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
+ recurrentToForgetWeights.data()),
sizeof(T) * recurrentToForgetWeights.size())));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorInfoOutputSize.data(),
@@ -222,11 +227,12 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
buffers.size() - 1,
flatBufferBuilder.CreateString("recurrentToForgetWeights"),
weightQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
buffers.push_back(
CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(recurrentToCellWeights.data()),
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
+ recurrentToCellWeights.data()),
sizeof(T) * recurrentToCellWeights.size())));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorInfoOutputSize.data(),
@@ -235,26 +241,28 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
buffers.size() - 1,
flatBufferBuilder.CreateString("recurrentToCellWeights"),
weightQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
buffers.push_back(
CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(recurrentToOutputWeights.data()),
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
+ recurrentToOutputWeights.data()),
sizeof(T) * recurrentToOutputWeights.size())));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorInfoOutputSize.data(),
tensorInfoOutputSize.size()),
tensorType,
- buffers.size() - 1 ,
+ buffers.size() - 1,
flatBufferBuilder.CreateString("recurrentToOutputWeights"),
weightQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
if (hasCellToInputWeights)
{
buffers.push_back(
CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cellToInputWeights.data()),
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
+ cellToInputWeights.data()),
sizeof(T) * cellToInputWeights.size())));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
@@ -263,7 +271,7 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
buffers.size() - 1,
flatBufferBuilder.CreateString("cellToInputWeights"),
weightQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
}
else
{
@@ -274,7 +282,8 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
{
buffers.push_back(
CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cellToForgetWeights.data()),
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
+ cellToForgetWeights.data()),
sizeof(T) * cellToForgetWeights.size())));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
@@ -283,7 +292,7 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
buffers.size() - 1,
flatBufferBuilder.CreateString("cellToForgetWeights"),
weightQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
}
else
{
@@ -294,7 +303,8 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
{
buffers.push_back(
CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cellToOutputWeights.data()),
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
+ cellToOutputWeights.data()),
sizeof(T) * cellToOutputWeights.size())));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
@@ -303,7 +313,7 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
buffers.size() - 1,
flatBufferBuilder.CreateString("cellToOutputWeights"),
weightQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
}
else
{
@@ -322,7 +332,7 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
::tflite::TensorType_FLOAT32,
buffers.size() - 1,
flatBufferBuilder.CreateString("inputGateBias")));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
}
else
{
@@ -331,7 +341,7 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
buffers.push_back(
CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(forgetGateBias.data()),
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(forgetGateBias.data()),
sizeof(float) * forgetGateBias.size())));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
@@ -339,11 +349,11 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
::tflite::TensorType_FLOAT32,
buffers.size() - 1,
flatBufferBuilder.CreateString("forgetGateBias")));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
buffers.push_back(
CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(cellBias.data()),
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cellBias.data()),
sizeof(float) * cellBias.size())));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
@@ -351,11 +361,11 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
::tflite::TensorType_FLOAT32,
buffers.size() - 1,
flatBufferBuilder.CreateString("cellBias")));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
buffers.push_back(
CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(outputGateBias.data()),
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(outputGateBias.data()),
sizeof(float) * outputGateBias.size())));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
@@ -363,14 +373,15 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
::tflite::TensorType_FLOAT32,
buffers.size() - 1,
flatBufferBuilder.CreateString("outputGateBias")));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
if (hasProjectionWeights)
{
buffers.push_back(
CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(projectionWeights.data()),
- sizeof(T) * projectionWeights.size())));
+ flatBufferBuilder.CreateVector(
+ reinterpret_cast<const uint8_t*>(projectionWeights.data()),
+ sizeof(T) * projectionWeights.size())));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(projectionWeightDimensions.data(),
projectionWeightDimensions.size()),
@@ -378,7 +389,7 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
buffers.size() - 1,
flatBufferBuilder.CreateString("projectionWeights"),
weightQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
}
else
{
@@ -389,22 +400,23 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
{
buffers.push_back(
CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(projectionBias.data()),
- sizeof(float) * projectionBias.size())));
+ flatBufferBuilder.CreateVector(
+ reinterpret_cast<const uint8_t*>(projectionBias.data()),
+ sizeof(float) * projectionBias.size())));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(projectionBiasDimensions.data(),
projectionBiasDimensions.size()),
::tflite::TensorType_FLOAT32,
buffers.size() - 1,
flatBufferBuilder.CreateString("projectionBias")));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
}
else
{
operatorInputs.push_back(kTfLiteOptionalTensor);
}
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(outputStateInDimensions.data(),
outputStateInDimensions.size()),
@@ -413,9 +425,9 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
flatBufferBuilder.CreateString("outputStateInInfo"),
quantizationParameters,
true));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(cellStateInDimensions.data(),
cellStateInDimensions.size()),
@@ -424,22 +436,22 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
flatBufferBuilder.CreateString("cellStateInInfo"),
quantizationParameters,
true));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
if (hasInputLayerNormWeights)
{
buffers.push_back(
CreateBuffer(flatBufferBuilder,
flatBufferBuilder.CreateVector(
- reinterpret_cast<const uint8_t *>(inputLayerNormWeights.data()),
- sizeof(float) * inputLayerNormWeights.size())));
+ reinterpret_cast<const uint8_t*>(inputLayerNormWeights.data()),
+ sizeof(float) * inputLayerNormWeights.size())));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
tensorInfoNumUnits.size()),
::tflite::TensorType_FLOAT32,
buffers.size() - 1,
flatBufferBuilder.CreateString("inputLayerNormWeights")));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
}
else
{
@@ -451,15 +463,15 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
buffers.push_back(
CreateBuffer(flatBufferBuilder,
flatBufferBuilder.CreateVector(
- reinterpret_cast<const uint8_t *>(forgetLayerNormWeights.data()),
- sizeof(float) * forgetLayerNormWeights.size())));
+ reinterpret_cast<const uint8_t*>(forgetLayerNormWeights.data()),
+ sizeof(float) * forgetLayerNormWeights.size())));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
tensorInfoNumUnits.size()),
::tflite::TensorType_FLOAT32,
buffers.size() - 1,
flatBufferBuilder.CreateString("forgetLayerNormWeights")));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
}
else
{
@@ -470,7 +482,8 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
{
buffers.push_back(
CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(cellLayerNormWeights.data()),
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
+ cellLayerNormWeights.data()),
sizeof(float) * cellLayerNormWeights.size())));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
@@ -478,7 +491,7 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
::tflite::TensorType_FLOAT32,
buffers.size() - 1,
flatBufferBuilder.CreateString("cellLayerNormWeights")));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
}
else
{
@@ -490,7 +503,7 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
buffers.push_back(
CreateBuffer(flatBufferBuilder,
flatBufferBuilder.CreateVector(
- reinterpret_cast<const uint8_t *>(outputLayerNormWeights.data()),
+ reinterpret_cast<const uint8_t*>(outputLayerNormWeights.data()),
sizeof(float) * outputLayerNormWeights.size())));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
@@ -498,58 +511,63 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
::tflite::TensorType_FLOAT32,
buffers.size() - 1,
flatBufferBuilder.CreateString("outputLayerNormWeights")));
- operatorInputs.push_back(buffers.size() - 1);
+ operatorInputs.push_back(tensors.size() - 1);
}
else
{
operatorInputs.push_back(kTfLiteOptionalTensor);
}
- int outputBufferId = buffers.size();
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
tensors.push_back(CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(outputShape.data(),
outputShape.size()),
::tflite::TensorType_FLOAT32,
- outputBufferId,
+ buffers.size() - 1,
flatBufferBuilder.CreateString("output")));
std::vector<int> operatorOutputs;
- operatorOutputs.push_back(buffers.size() - 1);
+ operatorOutputs.push_back(tensors.size() - 1);
// create operator
- tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_UnidirectionalSequenceLSTMOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions =
- CreateUnidirectionalSequenceLSTMOptions(flatBufferBuilder,
- activationFunction,
- clippingThresCell,
- clippingThresProj,
- isTimeMajor).Union();
+ tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_UnidirectionalSequenceLSTMOptions;
+ flatbuffers::Offset<void> operatorBuiltinOptions =
+ CreateUnidirectionalSequenceLSTMOptions(flatBufferBuilder,
+ activationFunction,
+ clippingThresCell,
+ clippingThresProj,
+ isTimeMajor).Union();
flatbuffers::Offset<Operator> lstmOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType, operatorBuiltinOptions);
-
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- flatBufferBuilder.CreateVector(&lstmOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: UnidirectionalSequenceLSTM Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode =
- CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+ CreateOperator(flatBufferBuilder,
+ 0,
+ flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(),
+ operatorInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(),
+ operatorOutputs.size()),
+ operatorBuiltinOptionsType, operatorBuiltinOptions);
+
+ flatbuffers::Offset<SubGraph> subgraph =
+ CreateSubGraph(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+ flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(),
+ operatorInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(),
+ operatorOutputs.size()),
+ flatBufferBuilder.CreateVector(&lstmOperator, 1));
+
+ flatbuffers::Offset<flatbuffers::String> modelDescription =
+ flatBufferBuilder.CreateString(
+ "ArmnnDelegate: UnidirectionalSequenceLSTM Operator Model");
+ flatbuffers::Offset<OperatorCode> operatorCode =
+ CreateOperatorCode(flatBufferBuilder,
+ tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM);
+
+ flatbuffers::Offset<Model> flatbufferModel =
+ CreateModel(flatBufferBuilder,
+ TFLITE_SCHEMA_VERSION,
+ flatBufferBuilder.CreateVector(&operatorCode, 1),
+ flatBufferBuilder.CreateVector(&subgraph, 1),
+ modelDescription,
+ flatBufferBuilder.CreateVector(buffers));
flatBufferBuilder.Finish(flatbufferModel);
@@ -557,7 +575,7 @@ std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
}
-template <typename T>
+template<typename T>
void UnidirectionalSequenceLstmTestImpl(std::vector<armnn::BackendId>& backends,
tflite::TensorType tensorType,
int32_t batchSize,
@@ -609,69 +627,69 @@ void UnidirectionalSequenceLstmTestImpl(std::vector<armnn::BackendId>& backends,
using namespace tflite;
std::vector<char> modelBuffer = CreateUnidirectionalSequenceLstmTfLiteModel(tensorType,
- batchSize,
- timeSize,
- inputSize,
- outputSize,
- numUnits,
- hasInputToInputWeights,
- inputToInputWeights,
- inputToForgetWeights,
- inputToCellWeights,
- inputToOutputWeights,
- hasRecurrentToInputWeights,
- recurrentToInputWeights,
- recurrentToForgetWeights,
- recurrentToCellWeights,
- recurrentToOutputWeights,
- hasCellToInputWeights,
- cellToInputWeights,
- hasCellToForgetWeights,
- cellToForgetWeights,
- hasCellToOutputWeights,
- cellToOutputWeights,
- hasInputGateBias,
- inputGateBias,
- forgetGateBias,
- cellBias,
- outputGateBias,
- hasProjectionWeights,
- projectionWeights,
- hasProjectionBias,
- projectionBias,
- hasInputLayerNormWeights,
- inputLayerNormWeights,
- hasForgetLayerNormWeights,
- forgetLayerNormWeights,
- hasCellLayerNormWeights,
- cellLayerNormWeights,
- hasOutputLayerNormWeights,
- outputLayerNormWeights,
- activationFunction,
- clippingThresCell,
- clippingThresProj,
- isTimeMajor,
- quantScale);
+ batchSize,
+ timeSize,
+ inputSize,
+ outputSize,
+ numUnits,
+ hasInputToInputWeights,
+ inputToInputWeights,
+ inputToForgetWeights,
+ inputToCellWeights,
+ inputToOutputWeights,
+ hasRecurrentToInputWeights,
+ recurrentToInputWeights,
+ recurrentToForgetWeights,
+ recurrentToCellWeights,
+ recurrentToOutputWeights,
+ hasCellToInputWeights,
+ cellToInputWeights,
+ hasCellToForgetWeights,
+ cellToForgetWeights,
+ hasCellToOutputWeights,
+ cellToOutputWeights,
+ hasInputGateBias,
+ inputGateBias,
+ forgetGateBias,
+ cellBias,
+ outputGateBias,
+ hasProjectionWeights,
+ projectionWeights,
+ hasProjectionBias,
+ projectionBias,
+ hasInputLayerNormWeights,
+ inputLayerNormWeights,
+ hasForgetLayerNormWeights,
+ forgetLayerNormWeights,
+ hasCellLayerNormWeights,
+ cellLayerNormWeights,
+ hasOutputLayerNormWeights,
+ outputLayerNormWeights,
+ activationFunction,
+ clippingThresCell,
+ clippingThresProj,
+ isTimeMajor,
+ quantScale);
const Model* tfLiteModel = GetModel(modelBuffer.data());
// Create TfLite Interpreters
std::unique_ptr<Interpreter> armnnDelegateInterpreter;
CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
+ (&armnnDelegateInterpreter) == kTfLiteOk);
CHECK(armnnDelegateInterpreter != nullptr);
CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
std::unique_ptr<Interpreter> tfLiteInterpreter;
CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
+ (&tfLiteInterpreter) == kTfLiteOk);
CHECK(tfLiteInterpreter != nullptr);
CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
// Create the ArmNN Delegate
armnnDelegate::DelegateOptions delegateOptions(backends);
std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
+ theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+ armnnDelegate::TfLiteArmnnDelegateDelete);
CHECK(theArmnnDelegate != nullptr);
// Modify armnnDelegateInterpreter to use armnnDelegate
CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
@@ -684,7 +702,7 @@ void UnidirectionalSequenceLstmTestImpl(std::vector<armnn::BackendId>& backends,
tfLiteDelageInputData[i] = inputValues[i];
}
- auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
+ auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId);
for (unsigned int i = 0; i < inputValues.size(); ++i)
{
@@ -696,10 +714,10 @@ void UnidirectionalSequenceLstmTestImpl(std::vector<armnn::BackendId>& backends,
CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
// Compare output data
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+ auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
+ auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+ auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
if (tensorType == ::tflite::TensorType_INT8)
{
@@ -713,8 +731,10 @@ void UnidirectionalSequenceLstmTestImpl(std::vector<armnn::BackendId>& backends,
}
else
{
- armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData, expectedOutputValues.size());
- armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData, expectedOutputValues.size());
+ armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData,
+ expectedOutputValues.size());
+ armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData,
+ expectedOutputValues.size());
armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
}
}
diff --git a/delegate/src/test/UnpackTestHelper.hpp b/delegate/src/test/UnpackTestHelper.hpp
index 848713498f..0e12d72279 100644
--- a/delegate/src/test/UnpackTestHelper.hpp
+++ b/delegate/src/test/UnpackTestHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -36,7 +36,9 @@ std::vector<char> CreateUnpackTfLiteModel(tflite::BuiltinOperator unpackOperator
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+
auto quantizationParameters =
CreateQuantizationParameters(flatBufferBuilder,
@@ -57,7 +59,7 @@ std::vector<char> CreateUnpackTfLiteModel(tflite::BuiltinOperator unpackOperator
flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
inputTensorShape.size()),
tensorType,
- 0,
+ 1,
flatBufferBuilder.CreateString("input"),
quantizationParameters);
@@ -67,10 +69,11 @@ std::vector<char> CreateUnpackTfLiteModel(tflite::BuiltinOperator unpackOperator
flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
outputTensorShape.size()),
tensorType,
- 0,
+ (i + 2),
flatBufferBuilder.CreateString("output" + std::to_string(i)),
quantizationParameters);
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
operatorOutputs.push_back(i + 1);
subgraphOutputs.push_back(i + 1);
}
@@ -105,7 +108,7 @@ std::vector<char> CreateUnpackTfLiteModel(tflite::BuiltinOperator unpackOperator
flatBufferBuilder.CreateVector(&operatorCode, 1),
flatBufferBuilder.CreateVector(&subgraph, 1),
modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+ flatBufferBuilder.CreateVector(buffers));
flatBufferBuilder.Finish(flatbufferModel);