From ed7fce413410d15c501ea52f9e6bfbbf71b3daf1 Mon Sep 17 00:00:00 2001 From: Matthew Sloyan Date: Thu, 15 Apr 2021 20:46:24 +0100 Subject: IVGCVSW-5829 Segfault in tflite-parser, int8 models * Updated ParseSplit TfLiteParser function to read correct axis data. * Improved validation in ParseSplit and ParseSplitV function. * Added TensorFlow BOOL support to TfLiteParser. * Added supported ElementWiseUnary operators to TfLiteParser E.g. ABS, LOGICAL_NOT and RSQRT. * Removed ParseExp and ParseNeg function implementation in favour of reusable ParseElementWiseUnary function. * Removed Exp.cpp and Neg.cpp files and moved tests to ElementWiseUnary.cpp. Signed-off-by: Matthew Sloyan Change-Id: Ibce36e3ce4d95755dda88abc2ddde1e07e62c5e2 --- src/armnnTfLiteParser/TfLiteParser.cpp | 164 +++++++++++++++--------- src/armnnTfLiteParser/TfLiteParser.hpp | 4 + src/armnnTfLiteParser/test/ElementWiseUnary.cpp | 148 +++++++++++++++++++++ src/armnnTfLiteParser/test/Exp.cpp | 85 ------------ src/armnnTfLiteParser/test/Neg.cpp | 85 ------------ 5 files changed, 258 insertions(+), 228 deletions(-) create mode 100644 src/armnnTfLiteParser/test/ElementWiseUnary.cpp delete mode 100644 src/armnnTfLiteParser/test/Exp.cpp delete mode 100644 src/armnnTfLiteParser/test/Neg.cpp (limited to 'src/armnnTfLiteParser') diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index 5f8b08bf85..a68839c20e 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -394,6 +394,9 @@ armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr, case tflite::TensorType_INT64: type = armnn::DataType::Signed64; break; + case tflite::TensorType_BOOL: + type = armnn::DataType::Boolean; + break; default: { CheckLocation location = CHECK_LOCATION(); @@ -603,6 +606,7 @@ TfLiteParserImpl::TfLiteParserImpl(const OptionalAddElementwiseUnaryLayer(desc, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); - - TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true); - layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); - - auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex)); - RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]}); - - auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex)); - RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes); -} - void TfLiteParserImpl::ParseTranspose(size_t subgraphIndex, size_t operatorIndex) { CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); @@ -1917,31 +1896,6 @@ void TfLiteParserImpl::ParseMean(size_t subgraphIndex, size_t operatorIndex) RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]}); } -void TfLiteParserImpl::ParseNeg(size_t subgraphIndex, size_t operatorIndex) -{ - CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); - - auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex); - CHECK_VALID_SIZE(inputs.size(), 1); - - auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex); - CHECK_VALID_SIZE(outputs.size(), 1); - - auto layerName = fmt::format("Neg:{}:{}", subgraphIndex, operatorIndex); - armnn::ElementwiseUnaryDescriptor descriptor(armnn::UnaryOperation::Neg); - IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); - - TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true); - layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); - - auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex)); - RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]}); - - auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex)); - RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes); -} - void TfLiteParserImpl::ParsePad(size_t subgraphIndex, size_t operatorIndex) { CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); @@ -2758,15 +2712,35 @@ void TfLiteParserImpl::ParseSplit(size_t subgraphIndex, size_t operatorIndex) auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex); CHECK_VALID_SIZE(outputs.size(), numSplits); - armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]); - armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]); + armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]); + armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]); + ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1); BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer); - std::vector axisData(axisTensorInfo.GetNumElements()); + if (axisBufferPtr == nullptr) + { + throw ParseException( + fmt::format("Operation has invalid inputs. Failed to read axis. {}", + CHECK_LOCATION().AsString())); + } + + std::vector axisData(axisTensorInfo.GetNumElements()); ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes()); + int32_t axis = axisData[0]; - ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1); - const unsigned int splitDim = axisData[0]; + auto inputDimensions = static_cast(inputTensorInfo.GetNumDimensions()); + if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0))) + { + // Square bracket denotes inclusive n while parenthesis denotes exclusive n + // E.g. Rank 4 tensor can have axis in range [-4, 3) + // -1 == 3, -2 == 2, -3 == 1, -4 == 0 + throw ParseException( + fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}", + axis, + CHECK_LOCATION().AsString())); + } + + const unsigned int splitDim = armnnUtils::GetUnsignedAxis(inputTensorInfo.GetNumDimensions(), axis); auto inputDimSize = inputTensorInfo.GetNumDimensions(); if (inputDimSize > MaxNumOfTensorDimensions) @@ -2863,9 +2837,29 @@ void TfLiteParserImpl::ParseSplitV(size_t subgraphIndex, size_t operatorIndex) // Get split axis BufferRawPtr axisBufferPtr = GetBuffer(m_Model, axisTensor->buffer); + if (axisBufferPtr == nullptr) + { + throw ParseException( + fmt::format("Operation has invalid inputs. Failed to read axis. {}", + CHECK_LOCATION().AsString())); + } + std::vector axisData(axisTensorInfo.GetNumElements()); ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes()); - const unsigned int splitDim = ComputeWrappedIndex(axisData[0], inputTensorInfo.GetNumDimensions()); + int32_t axis = axisData[0]; + + auto inputDimensions = static_cast(inputTensorInfo.GetNumDimensions()); + if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0))) + { + // Square bracket denotes inclusive n while parenthesis denotes exclusive n + // E.g. Rank 4 tensor can have axis in range [-4, 3) + // -1 == 3, -2 == 2, -3 == 1, -4 == 0 + throw ParseException( + fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}", + axis, + CHECK_LOCATION().AsString())); + } + const unsigned int splitDim = ComputeWrappedIndex(axis, inputTensorInfo.GetNumDimensions()); // Set split sizes CHECK_VALID_SIZE(splitsInfo.GetNumDimensions(), 1); @@ -2988,6 +2982,7 @@ void TfLiteParserImpl::ParseArgMinMax(size_t subgraphIndex, size_t operatorIndex armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]); armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[1]); armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); + ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1); // Check if output tensor type is Signed32 or Signed64 if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 && @@ -3210,6 +3205,59 @@ void TfLiteParserImpl::ParseReduce(size_t subgraphIndex, size_t operatorIndex, R RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes); } +void TfLiteParserImpl::ParseAbs(size_t subgraphIndex, size_t operatorIndex) +{ + ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Abs); +} + +void TfLiteParserImpl::ParseExp(size_t subgraphIndex, size_t operatorIndex) +{ + ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Exp); +} + +void TfLiteParserImpl::ParseLogicalNot(size_t subgraphIndex, size_t operatorIndex) +{ + ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::LogicalNot); +} + +void TfLiteParserImpl::ParseNeg(size_t subgraphIndex, size_t operatorIndex) +{ + ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Neg); +} + +void TfLiteParserImpl::ParseRsqrt(size_t subgraphIndex, size_t operatorIndex) +{ + ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Rsqrt); +} + +void TfLiteParserImpl::ParseElementwiseUnary(size_t subgraphIndex, size_t operatorIndex, UnaryOperation unaryOperation) +{ + CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); + + auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex); + CHECK_VALID_SIZE(inputs.size(), 1); + + auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + + std::string layerName = std::string(GetUnaryOperationAsCString(unaryOperation)) + ":{}:{}"; + std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex); + + ElementwiseUnaryDescriptor desc; + desc.m_Operation = unaryOperation; + IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerNameFormatted.c_str()); + ARMNN_ASSERT(layer != nullptr); + + TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true); + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]}); + + auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes); +} + armnn::IConnectableLayer* TfLiteParserImpl::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer, unsigned int outputSlot, tflite::ActivationFunctionType activationType) diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp index af844d2ef3..0aee07dc21 100644 --- a/src/armnnTfLiteParser/TfLiteParser.hpp +++ b/src/armnnTfLiteParser/TfLiteParser.hpp @@ -97,6 +97,7 @@ private: void ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex); void ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex); + void ParseAbs(size_t subgraphIndex, size_t operatorIndex); void ParseActivation(size_t subgraphIndex, size_t operatorIndex, armnn::ActivationFunction activationType); void ParseAdd(size_t subgraphIndex, size_t operatorIndex); void ParseArgMinMax(size_t subgraphIndex, size_t operatorIndex, armnn::ArgMinMaxFunction argMinMaxFunction); @@ -112,12 +113,14 @@ private: void ParseDequantize(size_t subgraphIndex, size_t operatorIndex); void ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex); void ParseDiv(size_t subgraphIndex, size_t operatorIndex); + void ParseElementwiseUnary(size_t subgraphIndex, size_t operatorIndex, armnn::UnaryOperation unaryOperation); void ParseElu(size_t subgraphIndex, size_t operatorIndex); void ParseExp(size_t subgraphIndex, size_t operatorIndex); void ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex); void ParseGather(size_t subgraphIndex, size_t operatorIndex); void ParseHardSwish(size_t subgraphIndex, size_t operatorIndex); void ParseLeakyRelu(size_t subgraphIndex, size_t operatorIndex); + void ParseLogicalNot(size_t subgraphIndex, size_t operatorIndex); void ParseLogistic(size_t subgraphIndex, size_t operatorIndex); void ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex); void ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex); @@ -139,6 +142,7 @@ private: void ParseResize(size_t subgraphIndex, size_t operatorIndex, armnn::ResizeMethod resizeMethod); void ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex); void ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex); + void ParseRsqrt(size_t subgraphIndex, size_t operatorIndex); void ParseSlice(size_t subgraphIndex, size_t operatorIndex); void ParseSoftmax(size_t subgraphIndex, size_t operatorIndex); void ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex); diff --git a/src/armnnTfLiteParser/test/ElementWiseUnary.cpp b/src/armnnTfLiteParser/test/ElementWiseUnary.cpp new file mode 100644 index 0000000000..dc236d2637 --- /dev/null +++ b/src/armnnTfLiteParser/test/ElementWiseUnary.cpp @@ -0,0 +1,148 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include +#include "ParserFlatbuffersFixture.hpp" +#include "../TfLiteParser.hpp" + +#include + +BOOST_AUTO_TEST_SUITE(TensorflowLiteParser) + +struct ElementWiseUnaryFixture : public ParserFlatbuffersFixture +{ + explicit ElementWiseUnaryFixture(const std::string& operatorCode, + const std::string& dataType, + const std::string& inputShape, + const std::string& outputShape) + { + m_JsonString = R"( + { + "version": 3, + "operator_codes": [ { "builtin_code": )" + operatorCode + R"( } ], + "subgraphs": [ { + "tensors": [ + { + "shape": )" + inputShape + R"(, + "type": )" + dataType + R"( , + "buffer": 0, + "name": "inputTensor", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 1.0 ], + "zero_point": [ 0 ], + } + }, + { + "shape": )" + outputShape + R"( , + "type": )" + dataType + R"( , + "buffer": 1, + "name": "outputTensor", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 1.0 ], + "zero_point": [ 0 ], + } + } + ], + "inputs": [ 0 ], + "outputs": [ 1 ], + "operators": [ + { + "opcode_index": 0, + "inputs": [ 0 ], + "outputs": [ 1 ], + "custom_options_format": "FLEXBUFFERS" + } + ], + } ], + "buffers" : [ + { }, + { } + ] + } + )"; + Setup(); + } +}; + +struct SimpleAbsFixture : public ElementWiseUnaryFixture +{ + SimpleAbsFixture() : ElementWiseUnaryFixture("ABS", "FLOAT32", "[ 2, 2 ]", "[ 2, 2 ]") {} +}; + +BOOST_FIXTURE_TEST_CASE(ParseAbs, SimpleAbsFixture) +{ + std::vector inputValues + { + -0.1f, 0.2f, + 0.3f, -0.4f + }; + + // Calculate output data + std::vector expectedOutputValues(inputValues.size()); + for (unsigned int i = 0; i < inputValues.size(); ++i) + { + expectedOutputValues[i] = std::abs(inputValues[i]); + } + + RunTest<2, armnn::DataType::Float32>(0, {{ "inputTensor", { inputValues } }}, + {{ "outputTensor",{ expectedOutputValues } } }); +} + +struct SimpleExpFixture : public ElementWiseUnaryFixture +{ + SimpleExpFixture() : ElementWiseUnaryFixture("EXP", "FLOAT32", "[ 1, 2, 3, 1 ]", "[ 1, 2, 3, 1 ]") {} +}; + +BOOST_FIXTURE_TEST_CASE(ParseExp, SimpleExpFixture) +{ + RunTest<4, armnn::DataType::Float32>(0, {{ "inputTensor", { 0.0f, 1.0f, 2.0f, + 3.0f, 4.0f, 5.0f} }}, + {{ "outputTensor",{ 1.0f, 2.718281f, 7.3890515f, + 20.0855185f, 54.5980834f, 148.4129329f} } }); +} + +struct SimpleLogicalNotFixture : public ElementWiseUnaryFixture +{ + SimpleLogicalNotFixture() : ElementWiseUnaryFixture("LOGICAL_NOT", "BOOL", "[ 1, 1, 1, 4 ]", "[ 1, 1, 1, 4 ]") {} +}; + +BOOST_FIXTURE_TEST_CASE(ParseLogicalNot, SimpleLogicalNotFixture) +{ + RunTest<4, armnn::DataType::Boolean>(0, {{ "inputTensor", { 0, 1, 0, 1 } }}, + {{ "outputTensor",{ 1, 0, 1, 0 } } }); +} + +struct SimpleNegFixture : public ElementWiseUnaryFixture +{ + SimpleNegFixture() : ElementWiseUnaryFixture("NEG", "FLOAT32", "[ 1, 2, 3, 1 ]", "[ 1, 2, 3, 1 ]") {} +}; + +BOOST_FIXTURE_TEST_CASE(ParseNeg, SimpleNegFixture) +{ + RunTest<4, armnn::DataType::Float32>(0, {{ "inputTensor", { 0.0f, 1.0f, -2.0f, + 20.0855185f, -54.5980834f, 5.0f} }}, + {{ "outputTensor",{ 0.0f, -1.0f, 2.0f, + -20.0855185f, 54.5980834f, -5.0f} }}); +} + +struct SimpleRsqrtFixture : public ElementWiseUnaryFixture +{ + SimpleRsqrtFixture() : ElementWiseUnaryFixture("RSQRT", "FLOAT32", "[ 1, 2, 3, 1 ]", "[ 1, 2, 3, 1 ]") {} +}; + +BOOST_FIXTURE_TEST_CASE(ParseRsqrt, SimpleRsqrtFixture) +{ + RunTest<4, armnn::DataType::Float32>(0, {{ "inputTensor", { 1.0f, 4.0f, 16.0f, + 25.0f, 64.0f, 100.0f } }}, + {{ "outputTensor",{ 1.0f, 0.5f, 0.25f, + 0.2f, 0.125f, 0.1f} }}); +} + + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnTfLiteParser/test/Exp.cpp b/src/armnnTfLiteParser/test/Exp.cpp deleted file mode 100644 index 168cc45dd9..0000000000 --- a/src/armnnTfLiteParser/test/Exp.cpp +++ /dev/null @@ -1,85 +0,0 @@ -// -// Copyright © 2020 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include -#include "ParserFlatbuffersFixture.hpp" -#include "../TfLiteParser.hpp" - -#include - -BOOST_AUTO_TEST_SUITE(TensorflowLiteParser) - -struct ExpFixture : public ParserFlatbuffersFixture -{ - explicit ExpFixture(const std::string & inputShape, - const std::string & outputShape) - { - m_JsonString = R"( - { - "version": 3, - "operator_codes": [ { "builtin_code": "EXP" } ], - "subgraphs": [ { - "tensors": [ - { - "shape": )" + inputShape + R"(, - "type": "FLOAT32", - "buffer": 0, - "name": "inputTensor", - "quantization": { - "min": [ 0.0 ], - "max": [ 255.0 ], - "scale": [ 1.0 ], - "zero_point": [ 0 ], - } - }, - { - "shape": )" + outputShape + R"( , - "type": "FLOAT32", - "buffer": 1, - "name": "outputTensor", - "quantization": { - "min": [ 0.0 ], - "max": [ 255.0 ], - "scale": [ 1.0 ], - "zero_point": [ 0 ], - } - } - ], - "inputs": [ 0 ], - "outputs": [ 1 ], - "operators": [ - { - "opcode_index": 0, - "inputs": [ 0 ], - "outputs": [ 1 ], - "custom_options_format": "FLEXBUFFERS" - } - ], - } ], - "buffers" : [ - { }, - { } - ] - } - )"; - Setup(); - } -}; - -struct SimpleExpFixture : public ExpFixture -{ - SimpleExpFixture() : ExpFixture("[ 1, 2, 3, 1 ]", "[ 1, 2, 3, 1 ]") {} -}; - -BOOST_FIXTURE_TEST_CASE(ParseExp, SimpleExpFixture) -{ - using armnn::DataType; - RunTest<4, DataType::Float32>(0, {{ "inputTensor", { 0.0f, 1.0f, 2.0f, - 3.0f, 4.0f, 5.0f} }}, - {{ "outputTensor", { 1.0f, 2.718281f, 7.3890515f, - 20.0855185f, 54.5980834f, 148.4129329f} } }); -} - -BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file diff --git a/src/armnnTfLiteParser/test/Neg.cpp b/src/armnnTfLiteParser/test/Neg.cpp deleted file mode 100644 index 39e1f9e82c..0000000000 --- a/src/armnnTfLiteParser/test/Neg.cpp +++ /dev/null @@ -1,85 +0,0 @@ -// -// Copyright © 2020 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include -#include "ParserFlatbuffersFixture.hpp" -#include "../TfLiteParser.hpp" - -#include - -BOOST_AUTO_TEST_SUITE(TensorflowLiteParser) - -struct NegFixture : public ParserFlatbuffersFixture -{ - explicit NegFixture(const std::string & inputShape, - const std::string & outputShape) - { - m_JsonString = R"( - { - "version": 3, - "operator_codes": [ { "builtin_code": "NEG" } ], - "subgraphs": [ { - "tensors": [ - { - "shape": )" + inputShape + R"(, - "type": "FLOAT32", - "buffer": 0, - "name": "inputTensor", - "quantization": { - "min": [ 0.0 ], - "max": [ 255.0 ], - "scale": [ 1.0 ], - "zero_point": [ 0 ], - } - }, - { - "shape": )" + outputShape + R"( , - "type": "FLOAT32", - "buffer": 1, - "name": "outputTensor", - "quantization": { - "min": [ 0.0 ], - "max": [ 255.0 ], - "scale": [ 1.0 ], - "zero_point": [ 0 ], - } - } - ], - "inputs": [ 0 ], - "outputs": [ 1 ], - "operators": [ - { - "opcode_index": 0, - "inputs": [ 0 ], - "outputs": [ 1 ], - "custom_options_format": "FLEXBUFFERS" - } - ], - } ], - "buffers" : [ - { }, - { } - ] - } - )"; - Setup(); - } -}; - -struct SimpleNegFixture : public NegFixture -{ - SimpleNegFixture() : NegFixture("[ 1, 2, 3, 1 ]", "[ 1, 2, 3, 1 ]") {} -}; - -BOOST_FIXTURE_TEST_CASE(ParseNeg, SimpleNegFixture) -{ - using armnn::DataType; - RunTest<4, DataType::Float32>(0, {{ "inputTensor", { 0.0f, 1.0f, -2.0f, - 20.0855185f, -54.5980834f, 5.0f} }}, - {{ "outputTensor",{ 0.0f, -1.0f, 2.0f, - -20.0855185f, 54.5980834f, -5.0f} }}); -} - -BOOST_AUTO_TEST_SUITE_END() -- cgit v1.2.1