aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2021-04-15 20:46:24 +0100
committerMatthew Sloyan <matthew.sloyan@arm.com>2021-04-15 20:51:23 +0100
commited7fce413410d15c501ea52f9e6bfbbf71b3daf1 (patch)
treef72645fdb44fec0fee95e00bd6c6f51b404ed5c7
parent86723e67a940ed9f288c3cb59c120b9a10a1d590 (diff)
downloadarmnn-ed7fce413410d15c501ea52f9e6bfbbf71b3daf1.tar.gz
IVGCVSW-5829 Segfault in tflite-parser, int8 models
* Updated ParseSplit TfLiteParser function to read correct axis data. * Improved validation in ParseSplit and ParseSplitV function. * Added TensorFlow BOOL support to TfLiteParser. * Added supported ElementWiseUnary operators to TfLiteParser E.g. ABS, LOGICAL_NOT and RSQRT. * Removed ParseExp and ParseNeg function implementation in favour of reusable ParseElementWiseUnary function. * Removed Exp.cpp and Neg.cpp files and moved tests to ElementWiseUnary.cpp. Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: Ibce36e3ce4d95755dda88abc2ddde1e07e62c5e2
-rw-r--r--CMakeLists.txt3
-rw-r--r--docs/01_01_parsers.dox5
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp164
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.hpp4
-rw-r--r--src/armnnTfLiteParser/test/ElementWiseUnary.cpp148
-rw-r--r--src/armnnTfLiteParser/test/Exp.cpp85
-rw-r--r--src/armnnTfLiteParser/test/Neg.cpp85
7 files changed, 263 insertions, 231 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 7a1db7bf29..8878065478 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -820,7 +820,7 @@ if(BUILD_UNIT_TESTS)
src/armnnTfLiteParser/test/Dequantize.cpp
src/armnnTfLiteParser/test/DetectionPostProcess.cpp
src/armnnTfLiteParser/test/Div.cpp
- src/armnnTfLiteParser/test/Exp.cpp
+ src/armnnTfLiteParser/test/ElementWiseUnary.cpp
src/armnnTfLiteParser/test/FullyConnected.cpp
src/armnnTfLiteParser/test/Gather.cpp
src/armnnTfLiteParser/test/L2Normalization.cpp
@@ -831,7 +831,6 @@ if(BUILD_UNIT_TESTS)
src/armnnTfLiteParser/test/Mean.cpp
src/armnnTfLiteParser/test/Minimum.cpp
src/armnnTfLiteParser/test/Multiplication.cpp
- src/armnnTfLiteParser/test/Neg.cpp
src/armnnTfLiteParser/test/Pack.cpp
src/armnnTfLiteParser/test/Pad.cpp
src/armnnTfLiteParser/test/Reduce.cpp
diff --git a/docs/01_01_parsers.dox b/docs/01_01_parsers.dox
index 05e1225f93..6607921585 100644
--- a/docs/01_01_parsers.dox
+++ b/docs/01_01_parsers.dox
@@ -157,9 +157,10 @@ This reference guide provides a list of TensorFlow Lite operators the Arm NN SDK
### Fully supported
The Arm NN SDK TensorFlow Lite parser currently supports the following operators:
+- ABS
- ADD
- ARG_MAX
-_ ARG_MIN
+- ARG_MIN
- AVERAGE_POOL_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE
- BATCH_TO_SPACE
- CONCATENATION, Supported Fused Activation: RELU , RELU6 , TANH, NONE
@@ -174,6 +175,7 @@ _ ARG_MIN
- GATHER
- HARD_SWISH
- LEAKY_RELU
+- LOGICAL_NOT
- LOGISTIC
- L2_NORMALIZATION
- MAX_POOL_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE
@@ -192,6 +194,7 @@ _ ARG_MIN
- RESHAPE
- RESIZE_BILINEAR
- RESIZE_NEAREST_NEIGHBOR
+- RSQRT
- SLICE
- SOFTMAX
- SPACE_TO_BATCH
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 5f8b08bf85..a68839c20e 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -394,6 +394,9 @@ armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
case tflite::TensorType_INT64:
type = armnn::DataType::Signed64;
break;
+ case tflite::TensorType_BOOL:
+ type = armnn::DataType::Boolean;
+ break;
default:
{
CheckLocation location = CHECK_LOCATION();
@@ -603,6 +606,7 @@ TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOpt
, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
{
// register supported operators
+ m_ParserFunctions[tflite::BuiltinOperator_ABS] = &TfLiteParserImpl::ParseAbs;
m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParserImpl::ParseAdd;
m_ParserFunctions[tflite::BuiltinOperator_ARG_MIN] = &TfLiteParserImpl::ParseArgMin;
m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParserImpl::ParseArgMax;
@@ -622,6 +626,7 @@ TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOpt
m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParserImpl::ParseGather;
m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParserImpl::ParseHardSwish;
m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParserImpl::ParseLeakyRelu;
+ m_ParserFunctions[tflite::BuiltinOperator_LOGICAL_NOT] = &TfLiteParserImpl::ParseLogicalNot;
m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic;
m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization;
m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParserImpl::ParseMaxPool2D;
@@ -640,6 +645,7 @@ TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOpt
m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParserImpl::ParseReshape;
m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParserImpl::ParseResizeBilinear;
m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
+ m_ParserFunctions[tflite::BuiltinOperator_RSQRT] = &TfLiteParserImpl::ParseRsqrt;
m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParserImpl::ParseSlice;
m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParserImpl::ParseSoftmax;
m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParserImpl::ParseSpaceToBatchND;
@@ -1090,33 +1096,6 @@ void TfLiteParserImpl::ParseDequantize(size_t subgraphIndex, size_t operatorInde
RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
}
-void TfLiteParserImpl::ParseExp(size_t subgraphIndex, size_t operatorIndex)
-{
- CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
-
- auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
- CHECK_VALID_SIZE(inputs.size(), 1);
-
- auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
- CHECK_VALID_SIZE(outputs.size(), 1);
-
- auto layerName = fmt::format("Exp:{}:{}", subgraphIndex, operatorIndex);
-
- ElementwiseUnaryDescriptor desc;
- desc.m_Operation = UnaryOperation::Exp;
- IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
-
- TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
- layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
-
- auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
- RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
-
- auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
- RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
-}
-
void TfLiteParserImpl::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
{
CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
@@ -1917,31 +1896,6 @@ void TfLiteParserImpl::ParseMean(size_t subgraphIndex, size_t operatorIndex)
RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
}
-void TfLiteParserImpl::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
-{
- CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
-
- auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
- CHECK_VALID_SIZE(inputs.size(), 1);
-
- auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
- CHECK_VALID_SIZE(outputs.size(), 1);
-
- auto layerName = fmt::format("Neg:{}:{}", subgraphIndex, operatorIndex);
- armnn::ElementwiseUnaryDescriptor descriptor(armnn::UnaryOperation::Neg);
- IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
-
- TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
- layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
-
- auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
- RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
-
- auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
- RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
-}
-
void TfLiteParserImpl::ParsePad(size_t subgraphIndex, size_t operatorIndex)
{
CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
@@ -2758,15 +2712,35 @@ void TfLiteParserImpl::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
CHECK_VALID_SIZE(outputs.size(), numSplits);
- armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
- armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
+ armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
+ armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
+ ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
- std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
+ if (axisBufferPtr == nullptr)
+ {
+ throw ParseException(
+ fmt::format("Operation has invalid inputs. Failed to read axis. {}",
+ CHECK_LOCATION().AsString()));
+ }
+
+ std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
+ int32_t axis = axisData[0];
- ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
- const unsigned int splitDim = axisData[0];
+ auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
+ if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
+ {
+ // Square bracket denotes inclusive n while parenthesis denotes exclusive n
+ // E.g. Rank 4 tensor can have axis in range [-4, 3)
+ // -1 == 3, -2 == 2, -3 == 1, -4 == 0
+ throw ParseException(
+ fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
+ axis,
+ CHECK_LOCATION().AsString()));
+ }
+
+ const unsigned int splitDim = armnnUtils::GetUnsignedAxis(inputTensorInfo.GetNumDimensions(), axis);
auto inputDimSize = inputTensorInfo.GetNumDimensions();
if (inputDimSize > MaxNumOfTensorDimensions)
@@ -2863,9 +2837,29 @@ void TfLiteParserImpl::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
// Get split axis
BufferRawPtr axisBufferPtr = GetBuffer(m_Model, axisTensor->buffer);
+ if (axisBufferPtr == nullptr)
+ {
+ throw ParseException(
+ fmt::format("Operation has invalid inputs. Failed to read axis. {}",
+ CHECK_LOCATION().AsString()));
+ }
+
std::vector<int> axisData(axisTensorInfo.GetNumElements());
::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
- const unsigned int splitDim = ComputeWrappedIndex(axisData[0], inputTensorInfo.GetNumDimensions());
+ int32_t axis = axisData[0];
+
+ auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
+ if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
+ {
+ // Square bracket denotes inclusive n while parenthesis denotes exclusive n
+ // E.g. Rank 4 tensor can have axis in range [-4, 3)
+ // -1 == 3, -2 == 2, -3 == 1, -4 == 0
+ throw ParseException(
+ fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
+ axis,
+ CHECK_LOCATION().AsString()));
+ }
+ const unsigned int splitDim = ComputeWrappedIndex(axis, inputTensorInfo.GetNumDimensions());
// Set split sizes
CHECK_VALID_SIZE(splitsInfo.GetNumDimensions(), 1);
@@ -2988,6 +2982,7 @@ void TfLiteParserImpl::ParseArgMinMax(size_t subgraphIndex, size_t operatorIndex
armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[1]);
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+ ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
// Check if output tensor type is Signed32 or Signed64
if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
@@ -3210,6 +3205,59 @@ void TfLiteParserImpl::ParseReduce(size_t subgraphIndex, size_t operatorIndex, R
RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
}
+void TfLiteParserImpl::ParseAbs(size_t subgraphIndex, size_t operatorIndex)
+{
+ ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Abs);
+}
+
+void TfLiteParserImpl::ParseExp(size_t subgraphIndex, size_t operatorIndex)
+{
+ ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Exp);
+}
+
+void TfLiteParserImpl::ParseLogicalNot(size_t subgraphIndex, size_t operatorIndex)
+{
+ ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::LogicalNot);
+}
+
+void TfLiteParserImpl::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
+{
+ ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Neg);
+}
+
+void TfLiteParserImpl::ParseRsqrt(size_t subgraphIndex, size_t operatorIndex)
+{
+ ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Rsqrt);
+}
+
+void TfLiteParserImpl::ParseElementwiseUnary(size_t subgraphIndex, size_t operatorIndex, UnaryOperation unaryOperation)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+ auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(inputs.size(), 1);
+
+ auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ std::string layerName = std::string(GetUnaryOperationAsCString(unaryOperation)) + ":{}:{}";
+ std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
+
+ ElementwiseUnaryDescriptor desc;
+ desc.m_Operation = unaryOperation;
+ IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerNameFormatted.c_str());
+ ARMNN_ASSERT(layer != nullptr);
+
+ TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+ auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
+}
+
armnn::IConnectableLayer* TfLiteParserImpl::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
unsigned int outputSlot,
tflite::ActivationFunctionType activationType)
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index af844d2ef3..0aee07dc21 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -97,6 +97,7 @@ private:
void ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex);
void ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex);
+ void ParseAbs(size_t subgraphIndex, size_t operatorIndex);
void ParseActivation(size_t subgraphIndex, size_t operatorIndex, armnn::ActivationFunction activationType);
void ParseAdd(size_t subgraphIndex, size_t operatorIndex);
void ParseArgMinMax(size_t subgraphIndex, size_t operatorIndex, armnn::ArgMinMaxFunction argMinMaxFunction);
@@ -112,12 +113,14 @@ private:
void ParseDequantize(size_t subgraphIndex, size_t operatorIndex);
void ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex);
void ParseDiv(size_t subgraphIndex, size_t operatorIndex);
+ void ParseElementwiseUnary(size_t subgraphIndex, size_t operatorIndex, armnn::UnaryOperation unaryOperation);
void ParseElu(size_t subgraphIndex, size_t operatorIndex);
void ParseExp(size_t subgraphIndex, size_t operatorIndex);
void ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex);
void ParseGather(size_t subgraphIndex, size_t operatorIndex);
void ParseHardSwish(size_t subgraphIndex, size_t operatorIndex);
void ParseLeakyRelu(size_t subgraphIndex, size_t operatorIndex);
+ void ParseLogicalNot(size_t subgraphIndex, size_t operatorIndex);
void ParseLogistic(size_t subgraphIndex, size_t operatorIndex);
void ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex);
void ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex);
@@ -139,6 +142,7 @@ private:
void ParseResize(size_t subgraphIndex, size_t operatorIndex, armnn::ResizeMethod resizeMethod);
void ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex);
void ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex);
+ void ParseRsqrt(size_t subgraphIndex, size_t operatorIndex);
void ParseSlice(size_t subgraphIndex, size_t operatorIndex);
void ParseSoftmax(size_t subgraphIndex, size_t operatorIndex);
void ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex);
diff --git a/src/armnnTfLiteParser/test/ElementWiseUnary.cpp b/src/armnnTfLiteParser/test/ElementWiseUnary.cpp
new file mode 100644
index 0000000000..dc236d2637
--- /dev/null
+++ b/src/armnnTfLiteParser/test/ElementWiseUnary.cpp
@@ -0,0 +1,148 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <string>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct ElementWiseUnaryFixture : public ParserFlatbuffersFixture
+{
+ explicit ElementWiseUnaryFixture(const std::string& operatorCode,
+ const std::string& dataType,
+ const std::string& inputShape,
+ const std::string& outputShape)
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": )" + operatorCode + R"( } ],
+ "subgraphs": [ {
+ "tensors": [
+ {
+ "shape": )" + inputShape + R"(,
+ "type": )" + dataType + R"( ,
+ "buffer": 0,
+ "name": "inputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": )" + outputShape + R"( ,
+ "type": )" + dataType + R"( ,
+ "buffer": 1,
+ "name": "outputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [
+ { },
+ { }
+ ]
+ }
+ )";
+ Setup();
+ }
+};
+
+struct SimpleAbsFixture : public ElementWiseUnaryFixture
+{
+ SimpleAbsFixture() : ElementWiseUnaryFixture("ABS", "FLOAT32", "[ 2, 2 ]", "[ 2, 2 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseAbs, SimpleAbsFixture)
+{
+ std::vector<float> inputValues
+ {
+ -0.1f, 0.2f,
+ 0.3f, -0.4f
+ };
+
+ // Calculate output data
+ std::vector<float> expectedOutputValues(inputValues.size());
+ for (unsigned int i = 0; i < inputValues.size(); ++i)
+ {
+ expectedOutputValues[i] = std::abs(inputValues[i]);
+ }
+
+ RunTest<2, armnn::DataType::Float32>(0, {{ "inputTensor", { inputValues } }},
+ {{ "outputTensor",{ expectedOutputValues } } });
+}
+
+struct SimpleExpFixture : public ElementWiseUnaryFixture
+{
+ SimpleExpFixture() : ElementWiseUnaryFixture("EXP", "FLOAT32", "[ 1, 2, 3, 1 ]", "[ 1, 2, 3, 1 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseExp, SimpleExpFixture)
+{
+ RunTest<4, armnn::DataType::Float32>(0, {{ "inputTensor", { 0.0f, 1.0f, 2.0f,
+ 3.0f, 4.0f, 5.0f} }},
+ {{ "outputTensor",{ 1.0f, 2.718281f, 7.3890515f,
+ 20.0855185f, 54.5980834f, 148.4129329f} } });
+}
+
+struct SimpleLogicalNotFixture : public ElementWiseUnaryFixture
+{
+ SimpleLogicalNotFixture() : ElementWiseUnaryFixture("LOGICAL_NOT", "BOOL", "[ 1, 1, 1, 4 ]", "[ 1, 1, 1, 4 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseLogicalNot, SimpleLogicalNotFixture)
+{
+ RunTest<4, armnn::DataType::Boolean>(0, {{ "inputTensor", { 0, 1, 0, 1 } }},
+ {{ "outputTensor",{ 1, 0, 1, 0 } } });
+}
+
+struct SimpleNegFixture : public ElementWiseUnaryFixture
+{
+ SimpleNegFixture() : ElementWiseUnaryFixture("NEG", "FLOAT32", "[ 1, 2, 3, 1 ]", "[ 1, 2, 3, 1 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseNeg, SimpleNegFixture)
+{
+ RunTest<4, armnn::DataType::Float32>(0, {{ "inputTensor", { 0.0f, 1.0f, -2.0f,
+ 20.0855185f, -54.5980834f, 5.0f} }},
+ {{ "outputTensor",{ 0.0f, -1.0f, 2.0f,
+ -20.0855185f, 54.5980834f, -5.0f} }});
+}
+
+struct SimpleRsqrtFixture : public ElementWiseUnaryFixture
+{
+ SimpleRsqrtFixture() : ElementWiseUnaryFixture("RSQRT", "FLOAT32", "[ 1, 2, 3, 1 ]", "[ 1, 2, 3, 1 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseRsqrt, SimpleRsqrtFixture)
+{
+ RunTest<4, armnn::DataType::Float32>(0, {{ "inputTensor", { 1.0f, 4.0f, 16.0f,
+ 25.0f, 64.0f, 100.0f } }},
+ {{ "outputTensor",{ 1.0f, 0.5f, 0.25f,
+ 0.2f, 0.125f, 0.1f} }});
+}
+
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/Exp.cpp b/src/armnnTfLiteParser/test/Exp.cpp
deleted file mode 100644
index 168cc45dd9..0000000000
--- a/src/armnnTfLiteParser/test/Exp.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <boost/test/unit_test.hpp>
-#include "ParserFlatbuffersFixture.hpp"
-#include "../TfLiteParser.hpp"
-
-#include <string>
-
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
-struct ExpFixture : public ParserFlatbuffersFixture
-{
- explicit ExpFixture(const std::string & inputShape,
- const std::string & outputShape)
- {
- m_JsonString = R"(
- {
- "version": 3,
- "operator_codes": [ { "builtin_code": "EXP" } ],
- "subgraphs": [ {
- "tensors": [
- {
- "shape": )" + inputShape + R"(,
- "type": "FLOAT32",
- "buffer": 0,
- "name": "inputTensor",
- "quantization": {
- "min": [ 0.0 ],
- "max": [ 255.0 ],
- "scale": [ 1.0 ],
- "zero_point": [ 0 ],
- }
- },
- {
- "shape": )" + outputShape + R"( ,
- "type": "FLOAT32",
- "buffer": 1,
- "name": "outputTensor",
- "quantization": {
- "min": [ 0.0 ],
- "max": [ 255.0 ],
- "scale": [ 1.0 ],
- "zero_point": [ 0 ],
- }
- }
- ],
- "inputs": [ 0 ],
- "outputs": [ 1 ],
- "operators": [
- {
- "opcode_index": 0,
- "inputs": [ 0 ],
- "outputs": [ 1 ],
- "custom_options_format": "FLEXBUFFERS"
- }
- ],
- } ],
- "buffers" : [
- { },
- { }
- ]
- }
- )";
- Setup();
- }
-};
-
-struct SimpleExpFixture : public ExpFixture
-{
- SimpleExpFixture() : ExpFixture("[ 1, 2, 3, 1 ]", "[ 1, 2, 3, 1 ]") {}
-};
-
-BOOST_FIXTURE_TEST_CASE(ParseExp, SimpleExpFixture)
-{
- using armnn::DataType;
- RunTest<4, DataType::Float32>(0, {{ "inputTensor", { 0.0f, 1.0f, 2.0f,
- 3.0f, 4.0f, 5.0f} }},
- {{ "outputTensor", { 1.0f, 2.718281f, 7.3890515f,
- 20.0855185f, 54.5980834f, 148.4129329f} } });
-}
-
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/Neg.cpp b/src/armnnTfLiteParser/test/Neg.cpp
deleted file mode 100644
index 39e1f9e82c..0000000000
--- a/src/armnnTfLiteParser/test/Neg.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <boost/test/unit_test.hpp>
-#include "ParserFlatbuffersFixture.hpp"
-#include "../TfLiteParser.hpp"
-
-#include <string>
-
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
-struct NegFixture : public ParserFlatbuffersFixture
-{
- explicit NegFixture(const std::string & inputShape,
- const std::string & outputShape)
- {
- m_JsonString = R"(
- {
- "version": 3,
- "operator_codes": [ { "builtin_code": "NEG" } ],
- "subgraphs": [ {
- "tensors": [
- {
- "shape": )" + inputShape + R"(,
- "type": "FLOAT32",
- "buffer": 0,
- "name": "inputTensor",
- "quantization": {
- "min": [ 0.0 ],
- "max": [ 255.0 ],
- "scale": [ 1.0 ],
- "zero_point": [ 0 ],
- }
- },
- {
- "shape": )" + outputShape + R"( ,
- "type": "FLOAT32",
- "buffer": 1,
- "name": "outputTensor",
- "quantization": {
- "min": [ 0.0 ],
- "max": [ 255.0 ],
- "scale": [ 1.0 ],
- "zero_point": [ 0 ],
- }
- }
- ],
- "inputs": [ 0 ],
- "outputs": [ 1 ],
- "operators": [
- {
- "opcode_index": 0,
- "inputs": [ 0 ],
- "outputs": [ 1 ],
- "custom_options_format": "FLEXBUFFERS"
- }
- ],
- } ],
- "buffers" : [
- { },
- { }
- ]
- }
- )";
- Setup();
- }
-};
-
-struct SimpleNegFixture : public NegFixture
-{
- SimpleNegFixture() : NegFixture("[ 1, 2, 3, 1 ]", "[ 1, 2, 3, 1 ]") {}
-};
-
-BOOST_FIXTURE_TEST_CASE(ParseNeg, SimpleNegFixture)
-{
- using armnn::DataType;
- RunTest<4, DataType::Float32>(0, {{ "inputTensor", { 0.0f, 1.0f, -2.0f,
- 20.0855185f, -54.5980834f, 5.0f} }},
- {{ "outputTensor",{ 0.0f, -1.0f, 2.0f,
- -20.0855185f, 54.5980834f, -5.0f} }});
-}
-
-BOOST_AUTO_TEST_SUITE_END()