aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRyan OShea <ryan.oshea3@arm.com>2023-01-25 18:10:20 +0000
committerryan.oshea3 <ryan.oshea3@arm.com>2023-02-21 14:36:56 +0000
commita544f0f5d01ea980ca86e1e13e2530fea4fddcd2 (patch)
treedead6db771d8d78f1e797d3a556586bd9f5129af
parentb2293702c16d107ac1ad80cfac9bd84d804f55d4 (diff)
downloadarmnn-a544f0f5d01ea980ca86e1e13e2530fea4fddcd2.tar.gz
MLCE-753 Expand Tensorshape for relevent layers before verifying support
Previously we were adding a reshape layer to "broadcast" tensors for elementwise operations. This broadcast was happening too late and was really just an expand dims. This was breaking the constant attributes of tensors and layer support of certain backends. * Remove addition of reshape layer when expanding dimensions * Replace broadcast function with expand dims to equal rank function * Fix some error status checks in various layers * Add new TensorUtil function that expands dims to a defined rank * Add unit tests to new TensorUtil function Signed-off-by: Ryan OShea <ryan.oshea3@arm.com> Change-Id: I31aca47c98075fef4f86864a15470f5faa55ab8d
-rw-r--r--delegate/src/Comparison.hpp23
-rw-r--r--delegate/src/Control.hpp5
-rw-r--r--delegate/src/Convolution.hpp16
-rw-r--r--delegate/src/DelegateUtils.hpp79
-rw-r--r--delegate/src/ElementwiseBinary.hpp15
-rw-r--r--delegate/src/Gather.hpp6
-rw-r--r--delegate/src/GatherNd.hpp6
-rw-r--r--delegate/src/LogicalBinary.hpp22
-rw-r--r--delegate/src/test/ElementwiseBinaryTest.cpp6
-rw-r--r--include/armnnUtils/TensorUtils.hpp2
-rw-r--r--src/armnnUtils/TensorUtils.cpp25
-rw-r--r--src/armnnUtils/test/TensorUtilsTest.cpp85
12 files changed, 158 insertions, 132 deletions
diff --git a/delegate/src/Comparison.hpp b/delegate/src/Comparison.hpp
index 80354e835d..688f90c597 100644
--- a/delegate/src/Comparison.hpp
+++ b/delegate/src/Comparison.hpp
@@ -57,10 +57,17 @@ TfLiteStatus VisitComparisonOperator(DelegateData& delegateData,
return kTfLiteError;
}
- const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
- const armnn::TensorInfo& inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
+ armnn::TensorInfo inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
+ armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ // Check if we need to expand the dims of any of the input tensor infos.
+ // This is required for a few of the backends.
+ if(inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
+ {
+ ExpandTensorRankToEqual(inputTensorInfo0, inputTensorInfo1);
+ }
+
armnn::ComparisonOperation comparisonOperation = armnn::ComparisonOperation::Equal;
switch(tfLiteComparisonOperatorCode)
{
@@ -122,17 +129,7 @@ TfLiteStatus VisitComparisonOperator(DelegateData& delegateData,
return kTfLiteError;
}
- auto reshapeLayer = BroadcastTensor(inputTensorInfo0,
- inputTensorInfo1,
- comparisonLayer,
- tfLiteContext,
- tfLiteNode,
- delegateData);
- if (!reshapeLayer)
- {
- return kTfLiteError;
- }
- return kTfLiteOk;
+ return Connect(comparisonLayer, tfLiteNode, delegateData);
}
} // namespace armnnDelegate
diff --git a/delegate/src/Control.hpp b/delegate/src/Control.hpp
index 17f23d81ad..a3ea6e92a7 100644
--- a/delegate/src/Control.hpp
+++ b/delegate/src/Control.hpp
@@ -172,7 +172,10 @@ TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = concatenationLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
- Connect(concatenationLayer, tfLiteNode, delegateData);
+ if(Connect(concatenationLayer, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
if (activationType == kTfLiteActNone)
{
diff --git a/delegate/src/Convolution.hpp b/delegate/src/Convolution.hpp
index a8559e2548..31cb2ab9ac 100644
--- a/delegate/src/Convolution.hpp
+++ b/delegate/src/Convolution.hpp
@@ -222,7 +222,10 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
- Connect(layer, tfLiteNode, delegateData);
+ if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
if (!tfLiteNodeParameters)
{
@@ -408,7 +411,10 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
- Connect(layer, tfLiteNode, delegateData);
+ if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
if (!tfLiteNodeParameters)
{
@@ -624,7 +630,11 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
- Connect(layer, tfLiteNode, delegateData);
+ if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
if (!tfLiteNodeParameters)
{
// No Activation
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
index 3e74225b15..1aa9029271 100644
--- a/delegate/src/DelegateUtils.hpp
+++ b/delegate/src/DelegateUtils.hpp
@@ -13,6 +13,7 @@
#include <armnn/utility/NumericCast.hpp>
#include <armnnUtils/Permute.hpp>
+#include <armnnUtils/TensorUtils.hpp>
#include <tensorflow/lite/builtin_ops.h>
#include <tensorflow/lite/c/builtin_op_data.h>
@@ -188,91 +189,25 @@ TfLiteStatus Connect(armnn::IConnectableLayer* layer,
return kTfLiteOk;
}
-armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
- const armnn::TensorInfo& inputInfo1,
- armnn::IConnectableLayer* startLayer,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- armnnDelegate::DelegateData& delegateData)
+void ExpandTensorRankToEqual(armnn::TensorInfo& inputInfo0,
+ armnn::TensorInfo& inputInfo1)
{
unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
if (inputDimensions0 == inputDimensions1)
{
- auto status = Connect(startLayer, tfLiteNode, delegateData);
- return status == kTfLiteOk ? startLayer : nullptr;
+ return;
}
unsigned int biggerInputDimensions = std::max(inputDimensions0, inputDimensions1);
- unsigned int dimDifference = static_cast<unsigned int>(std::abs(armnn::numeric_cast<int>(inputDimensions0) -
- armnn::numeric_cast<int>(inputDimensions1)));
bool input0IsSmaller = inputDimensions0 < inputDimensions1;
- const armnn::TensorInfo& smallInfo = input0IsSmaller ? inputInfo0 : inputInfo1;
- const armnn::TensorShape& smallShape = smallInfo.GetShape();
-
- std::vector<unsigned int> reshapedDimensions(biggerInputDimensions, 1);
- for (unsigned int i = dimDifference; i < biggerInputDimensions; ++i)
- {
- reshapedDimensions[i] = smallShape[i - dimDifference];
- }
-
- armnn::TensorInfo reshapedInfo = smallInfo;
- reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
- reshapedDimensions.data() });
-
- armnn::ReshapeDescriptor reshapeDescriptor;
- reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
- bool isSupported = false;
- armnn::BackendId setBackend;
- FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
- tfLiteContext,
- IsReshapeSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- smallInfo,
- reshapedInfo,
- reshapeDescriptor);
- if (!isSupported)
- {
- return nullptr;
- }
+ armnn::TensorInfo& smallInfo = input0IsSmaller ? inputInfo0 : inputInfo1;
+ const armnn::TensorShape& newShape = armnnUtils::ExpandDimsToRank(smallInfo.GetShape(), biggerInputDimensions);
- ARMNN_ASSERT(delegateData.m_Network != nullptr);
- // Add Reshape layer
- armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
- reshapeLayer->SetBackendId(setBackend);
- ARMNN_ASSERT(reshapeLayer != nullptr);
- reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
+ smallInfo.SetShape(newShape);
- if (input0IsSmaller)
- {
- delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
- ->Connect(reshapeLayer->GetInputSlot(0));
- reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
- delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
- ->Connect(startLayer->GetInputSlot(1));
- }
- else
- {
- delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
- ->Connect(reshapeLayer->GetInputSlot(0));
- reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
- delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
- ->Connect(startLayer->GetInputSlot(0));
- }
-
- // Prepare output slots
- for (unsigned int outputIndex = 0; outputIndex < startLayer->GetNumOutputSlots(); ++outputIndex)
- {
- armnn::IOutputSlot& outputSlot = startLayer->GetOutputSlot(outputIndex);
- delegateData.m_OutputSlotForNode
- [static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
- }
-
- return reshapeLayer;
}
TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
diff --git a/delegate/src/ElementwiseBinary.hpp b/delegate/src/ElementwiseBinary.hpp
index 8096acfefb..52c6b2434b 100644
--- a/delegate/src/ElementwiseBinary.hpp
+++ b/delegate/src/ElementwiseBinary.hpp
@@ -254,6 +254,13 @@ TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ // Check if we need to expand the dims of the input tensor infos.
+ // This is required for a few of the backends.
+ if(inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
+ {
+ ExpandTensorRankToEqual(inputTensorInfo0, inputTensorInfo1);
+ }
+
auto* tfLiteNodeParameters = reinterpret_cast<TfLiteAddParams*>(tfLiteNode->builtin_data);
TfLiteFusedActivation activationType = kTfLiteActNone;
if (tfLiteNodeParameters)
@@ -363,13 +370,7 @@ TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
return inputsTensorsProcess;
}
- auto reshapeLayer = BroadcastTensor(inputTensorInfo0,
- inputTensorInfo1,
- elementwiseBinaryLayer,
- tfLiteContext,
- tfLiteNode,
- delegateData);
- if (!reshapeLayer)
+ if(Connect(elementwiseBinaryLayer, tfLiteNode, delegateData) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/src/Gather.hpp b/delegate/src/Gather.hpp
index 9e98966471..9125997417 100644
--- a/delegate/src/Gather.hpp
+++ b/delegate/src/Gather.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020,2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -101,8 +101,6 @@ TfLiteStatus VisitGatherOperator(DelegateData& delegateData,
return inputsTensorsProcess;
}
- Connect(layer, tfLiteNode, delegateData);
-
- return kTfLiteOk;
+ return Connect(layer, tfLiteNode, delegateData);
}
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/GatherNd.hpp b/delegate/src/GatherNd.hpp
index f2192f77c3..cf526e1995 100644
--- a/delegate/src/GatherNd.hpp
+++ b/delegate/src/GatherNd.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -77,8 +77,6 @@ TfLiteStatus VisitGatherNdOperator(DelegateData& delegateData,
return inputsTensorsProcess;
}
- Connect(layer, tfLiteNode, delegateData);
-
- return kTfLiteOk;
+ return Connect(layer, tfLiteNode, delegateData);
}
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/LogicalBinary.hpp b/delegate/src/LogicalBinary.hpp
index b6a8f5d5f6..d71618ee9c 100644
--- a/delegate/src/LogicalBinary.hpp
+++ b/delegate/src/LogicalBinary.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -46,6 +46,13 @@ TfLiteStatus VisitLogicalBinaryOperator(DelegateData& delegateData,
armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ // Check if we need to expand the dims of any of the input tensor infos.
+ // This is required for a few of the backends.
+ if(inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
+ {
+ ExpandTensorRankToEqual(inputTensorInfo0, inputTensorInfo1);
+ }
+
// Setup descriptor and assign operation
armnn::LogicalBinaryDescriptor desc;
desc.m_Operation = binaryOperation;
@@ -89,18 +96,7 @@ TfLiteStatus VisitLogicalBinaryOperator(DelegateData& delegateData,
return inputsTensorsProcess;
}
- // LogicalBinary operators support broadcasting
- auto reshapeLayer = BroadcastTensor(inputTensorInfo0,
- inputTensorInfo1,
- logicalBinaryLayer,
- tfLiteContext,
- tfLiteNode,
- delegateData);
- if (!reshapeLayer)
- {
- return kTfLiteError;
- }
- return kTfLiteOk;
+ return Connect(logicalBinaryLayer, tfLiteNode, delegateData);
}
} // namespace armnnDelegate
diff --git a/delegate/src/test/ElementwiseBinaryTest.cpp b/delegate/src/test/ElementwiseBinaryTest.cpp
index 9d03204263..8099efebff 100644
--- a/delegate/src/test/ElementwiseBinaryTest.cpp
+++ b/delegate/src/test/ElementwiseBinaryTest.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -885,7 +885,7 @@ TEST_CASE ("ADD_Broadcast_CpuAcc_Test")
AddBroadcastTest(backends);
}
-TEST_CASE ("ADD_Actiation_CpuAcc_Test")
+TEST_CASE ("ADD_Activation_CpuAcc_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
AddActivationTest(backends);
@@ -1017,7 +1017,7 @@ TEST_CASE ("ADD_Constant_Input_CpuRef_Test")
AddConstInputTest(backends);
}
-TEST_CASE ("ADD_Actiation_CpuRef_Test")
+TEST_CASE ("ADD_Activation_CpuRef_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
AddActivationTest(backends);
diff --git a/include/armnnUtils/TensorUtils.hpp b/include/armnnUtils/TensorUtils.hpp
index a2aa9b0a98..7bf41c1ac8 100644
--- a/include/armnnUtils/TensorUtils.hpp
+++ b/include/armnnUtils/TensorUtils.hpp
@@ -48,6 +48,8 @@ armnn::TensorInfo ReduceDims(const armnn::TensorInfo& tensorInfo, unsigned int d
armnn::TensorShape ExpandDims(const armnn::TensorShape& tensorShape, int axis);
+armnn::TensorShape ExpandDimsToRank(const armnn::TensorShape& tensorShape, unsigned int rank);
+
std::vector<unsigned int> SqueezeDims(const armnn::TensorShape& tensorShape);
unsigned int GetNumElementsBetween(const armnn::TensorShape& shape,
diff --git a/src/armnnUtils/TensorUtils.cpp b/src/armnnUtils/TensorUtils.cpp
index 03109e0cee..cb73d92ef8 100644
--- a/src/armnnUtils/TensorUtils.cpp
+++ b/src/armnnUtils/TensorUtils.cpp
@@ -165,6 +165,31 @@ TensorShape ExpandDims(const TensorShape& tensorShape, int axis)
return { outputDim, outputShape.data() };
}
+TensorShape ExpandDimsToRank(const TensorShape& tensorShape, unsigned int rank)
+{
+ // Can't expand if rank is smaller than current shape
+ if (tensorShape.GetNumDimensions() >= rank)
+ {
+ return tensorShape;
+ }
+
+ std::vector<unsigned int> newShape;
+
+ // First add 1s to the beginning of the tensorInfo to fill in the space
+ for (unsigned int i = 0; i < rank - tensorShape.GetNumDimensions(); ++i)
+ {
+ newShape.push_back(1);
+ }
+
+ // Then iterate through the original shape and append it to the new shape with the added 1s
+ for (unsigned int i = 0; i < tensorShape.GetNumDimensions(); ++i)
+ {
+ newShape.push_back(tensorShape[i]);
+ }
+
+ return TensorShape(static_cast<unsigned int>(newShape.size()), newShape.data());
+}
+
std::vector<unsigned int> SqueezeDims(const TensorShape& tensorShape)
{
std::vector<unsigned int> squeezedDims;
diff --git a/src/armnnUtils/test/TensorUtilsTest.cpp b/src/armnnUtils/test/TensorUtilsTest.cpp
index a69a0098ce..ed21bbe93c 100644
--- a/src/armnnUtils/test/TensorUtilsTest.cpp
+++ b/src/armnnUtils/test/TensorUtilsTest.cpp
@@ -126,11 +126,79 @@ TEST_CASE("ExpandDimsInvalidAxisTest")
CHECK_THROWS_AS(ExpandDims(inputShape, 4), armnn::InvalidArgumentException);
}
+TEST_CASE("ExpandDimsInvalidNegativeAxisTest")
+{
+ armnn::TensorShape inputShape({ 2, 3, 4 });
+
+ // Invalid expand dimension -5
+ CHECK_THROWS_AS(ExpandDims(inputShape, -5), armnn::InvalidArgumentException);
+}
+
+TEST_CASE("ExpandDimsBy1Rank")
+{
+ armnn::TensorShape inputShape({ 2, 3, 4 });
+
+ // Expand by 1 dimension
+ armnn::TensorShape outputShape = ExpandDimsToRank(inputShape, 4);
+ CHECK(outputShape.GetNumDimensions() == 4);
+ CHECK(outputShape[0] == 1);
+ CHECK(outputShape[1] == 2);
+ CHECK(outputShape[2] == 3);
+ CHECK(outputShape[3] == 4);
+}
+
+TEST_CASE("ExpandDimsBy2Ranks")
+{
+ armnn::TensorShape inputShape({ 3, 4 });
+
+ // Expand 2 dimensions
+ armnn::TensorShape outputShape = ExpandDimsToRank(inputShape, 4);
+ CHECK(outputShape.GetNumDimensions() == 4);
+ CHECK(outputShape[0] == 1);
+ CHECK(outputShape[1] == 1);
+ CHECK(outputShape[2] == 3);
+ CHECK(outputShape[3] == 4);
+}
+
+TEST_CASE("ExpandDimsBy3Ranks")
+{
+ armnn::TensorShape inputShape({ 4 });
+
+ // Expand 3 dimensions
+ armnn::TensorShape outputShape = ExpandDimsToRank(inputShape, 4);
+ CHECK(outputShape.GetNumDimensions() == 4);
+ CHECK(outputShape[0] == 1);
+ CHECK(outputShape[1] == 1);
+ CHECK(outputShape[2] == 1);
+ CHECK(outputShape[3] == 4);
+}
+
+TEST_CASE("ExpandDimsInvalidRankAmount")
+{
+ armnn::TensorShape inputShape({ 2, 3, 4 });
+
+ // Don't expand because target rank is smaller than current rank
+ armnn::TensorShape outputShape = ExpandDimsToRank(inputShape, 2);
+ CHECK(outputShape.GetNumDimensions() == 3);
+ CHECK(outputShape[0] == 2);
+ CHECK(outputShape[1] == 3);
+ CHECK(outputShape[2] == 4);
+}
+
+TEST_CASE("ExpandDimsToRankInvalidTensorShape")
+{
+ armnn::TensorShape inputShape({ 2, 3, 4 });
+
+ // Throw exception because rank 6 tensors are unsupported by armnn
+ CHECK_THROWS_AS(ExpandDimsToRank(inputShape, 6), armnn::InvalidArgumentException);
+}
+
+
TEST_CASE("ReduceDimsShapeAll1s")
{
armnn::TensorShape inputShape({ 1, 1, 1 });
- // Invalid expand dimension 4
+ // Reduce dimension 2
armnn::TensorShape outputShape = ReduceDims(inputShape, 2);
CHECK(outputShape.GetNumDimensions() == 2);
CHECK(outputShape[0] == 1);
@@ -141,7 +209,7 @@ TEST_CASE("ReduceDimsShapeNotEnough1s")
{
armnn::TensorShape inputShape({ 1, 2, 1 });
- // Invalid expand dimension 4
+ // Reduce dimension 1
armnn::TensorShape outputShape = ReduceDims(inputShape, 1);
CHECK(outputShape.GetNumDimensions() == 2);
CHECK(outputShape[0] == 2);
@@ -152,7 +220,7 @@ TEST_CASE("ReduceDimsInfoAll1s")
{
armnn::TensorInfo inputInfo({ 1, 1, 1 }, DataType::Float32);
- // Invalid expand dimension 4
+ // Reduce dimension 2
armnn::TensorInfo outputInfo = ReduceDims(inputInfo, 2);
CHECK(outputInfo.GetShape().GetNumDimensions() == 2);
CHECK(outputInfo.GetShape()[0] == 1);
@@ -163,7 +231,7 @@ TEST_CASE("ReduceDimsInfoNotEnough1s")
{
armnn::TensorInfo inputInfo({ 1, 2, 1 }, DataType::Float32);
- // Invalid expand dimension 4
+ // Reduce dimension 1
armnn::TensorInfo outputInfo = ReduceDims(inputInfo, 1);
CHECK(outputInfo.GetNumDimensions() == 2);
CHECK(outputInfo.GetShape()[0] == 2);
@@ -174,7 +242,7 @@ TEST_CASE("ReduceDimsShapeDimensionGreaterThanSize")
{
armnn::TensorShape inputShape({ 1, 1, 1 });
- // Invalid expand dimension 4
+ // Do not reduce because dimension does not exist
armnn::TensorShape outputShape = ReduceDims(inputShape, 4);
CHECK(outputShape.GetNumDimensions() == 3);
CHECK(outputShape[0] == 1);
@@ -182,13 +250,6 @@ TEST_CASE("ReduceDimsShapeDimensionGreaterThanSize")
CHECK(outputShape[2] == 1);
}
-TEST_CASE("ExpandDimsInvalidNegativeAxisTest")
-{
- armnn::TensorShape inputShape({ 2, 3, 4 });
-
- // Invalid expand dimension -5
- CHECK_THROWS_AS(ExpandDims(inputShape, -5), armnn::InvalidArgumentException);
-}
TEST_CASE("ToFloatArrayInvalidDataType")
{