aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--1.0/HalPolicy.cpp61
-rw-r--r--1.1/HalPolicy.cpp132
-rw-r--r--1.2/HalPolicy.cpp191
-rw-r--r--Android.mk3
-rw-r--r--ConversionUtils.hpp78
-rw-r--r--OutputShapeUtils.cpp199
-rw-r--r--OutputShapeUtils.hpp40
7 files changed, 169 insertions, 535 deletions
diff --git a/1.0/HalPolicy.cpp b/1.0/HalPolicy.cpp
index 8dd603a6..db0e2a2d 100644
--- a/1.0/HalPolicy.cpp
+++ b/1.0/HalPolicy.cpp
@@ -8,7 +8,6 @@
#include <armnn/Optional.hpp>
#include "FullyConnected.hpp"
-#include "OutputShapeUtils.hpp"
#include "Utils.hpp"
namespace armnn_driver
@@ -122,7 +121,7 @@ bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, Conve
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
if (IsDynamicTensor(outputInfo))
{
- return Fail("%s: Dynamic output shapes are not supported in this HAL version", __func__);
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
}
bool isSupported = false;
@@ -418,11 +417,10 @@ bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model
return Fail("%s: Operation has invalid outputs", __func__);
}
- armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
if (IsDynamicTensor(outputInfo))
{
- ALOGD("Output shape not set, will infer from input");
- outputInfo.SetShape(input.GetTensorInfo().GetShape());
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
}
bool isSupported = false;
@@ -441,12 +439,7 @@ bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
- 0,
- *layer,
- model,
- data,
- armnn::Optional<armnn::TensorInfo>(outputInfo));
+ return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
@@ -465,13 +458,19 @@ bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, Con
return Fail("%s: Operation has invalid outputs", __func__);
}
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
+ if (IsDynamicTensor(outputInfo))
+ {
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
+ }
+
bool isSupported = false;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsFloorSupported,
data.m_Backends,
isSupported,
input.GetTensorInfo(),
- GetTensorInfoForOperand(*outputOperand));
+ outputInfo);
if (!isSupported)
{
return false;
@@ -500,13 +499,12 @@ bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& m
return Fail("%s: Could not read output 0", __func__);
}
- const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
- armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
+ const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
if (IsDynamicTensor(outputInfo))
{
- ALOGD("Output shape not set, will infer from inputs");
- outputInfo.SetShape(inputInfo.GetShape());
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
}
// ArmNN does not currently support non-fixed weights or bias
@@ -581,12 +579,7 @@ bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& m
input.Connect(startLayer->GetInputSlot(0));
}
- return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
- 0,
- *endLayer,
- model,
- data,
- armnn::Optional<armnn::TensorInfo>(outputInfo));
+ return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
}
else
{
@@ -1036,12 +1029,11 @@ bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model&
}
const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
- armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
if (IsDynamicTensor(outputInfo))
{
- ALOGD("Output shape not set, will infer from inputs");
- outputInfo.SetShape(inputInfo.GetShape());
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
}
armnn::L2NormalizationDescriptor desc;
@@ -1064,12 +1056,7 @@ bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model&
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
- 0,
- *layer,
- model,
- data,
- armnn::Optional<armnn::TensorInfo>(outputInfo));
+ return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
@@ -1177,11 +1164,10 @@ bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, C
return Fail("%s: Operation has no outputs", __func__);
}
- armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
if (IsDynamicTensor(outputInfo))
{
- ALOGD("Output shape not set, will infer from input");
- outputInfo.SetShape(input.GetTensorInfo().GetShape());
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
}
armnn::SoftmaxDescriptor desc;
@@ -1207,12 +1193,7 @@ bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, C
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
- 0,
- *layer,
- model,
- data,
- armnn::Optional<armnn::TensorInfo>(outputInfo));
+ return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
index bbd289ee..ab8224a0 100644
--- a/1.1/HalPolicy.cpp
+++ b/1.1/HalPolicy.cpp
@@ -5,7 +5,6 @@
#include "HalPolicy.hpp"
-#include "OutputShapeUtils.hpp"
#include "Utils.hpp"
#include "../1.0/HalPolicy.hpp"
@@ -124,13 +123,17 @@ bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, Conve
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* outputOperand = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
- if (!outputOperand)
+ const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
+ if (!output)
{
- return false;
+ return Fail("%s: Could not read output 0", __func__);
}
- const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicTensor(outputInfo))
+ {
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
+ }
bool isSupported = false;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
@@ -139,14 +142,14 @@ bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, Conve
isSupported,
input0.GetTensorInfo(),
input1.GetTensorInfo(),
- outInfo);
+ outputInfo);
if (!isSupported)
{
return false;
}
armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
- armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
+ armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
@@ -180,17 +183,16 @@ bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, Conve
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* outputOperand = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
- if (!outputOperand)
+ const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
+ if (!output)
{
- return false;
+ return Fail("%s: Could not read output 0", __func__);
}
- armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
if (IsDynamicTensor(outputInfo))
{
- ALOGD("Output shape not set, will infer from inputs");
- outputInfo.SetShape(InferSubOutputShape(input0.GetTensorInfo().GetShape(), input1.GetTensorInfo().GetShape()));
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
}
bool isSupported = false;
@@ -215,12 +217,7 @@ bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, Conve
if (endLayer)
{
BroadcastTensor(input0, input1, startLayer, *data.m_Network);
- return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation,
- 0,
- *endLayer,
- model,
- data,
- armnn::Optional<armnn::TensorInfo>(outputInfo));
+ return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *endLayer, model, data);
}
return Fail("%s: ProcessActivation failed", __func__);
@@ -236,6 +233,18 @@ bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, Conv
return Fail("%s: Operation has invalid inputs", __func__);
}
+ const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
+ if (!output)
+ {
+ return Fail("%s: Could not read output 0", __func__);
+ }
+
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicTensor(outputInfo))
+ {
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
+ }
+
const Operand* axisOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
if (!axisOperand)
{
@@ -268,14 +277,6 @@ bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, Conv
descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
descriptor.m_KeepDims = keepDims > 0;
- const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
- if (!output)
- {
- return Fail("%s: Could not read output 0", __func__);
- }
-
- const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
bool isSupported = false;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsMeanSupported,
@@ -321,6 +322,18 @@ bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& m
Fail("%s: Only inputs with rank 4 are supported", __func__);
}
+ const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
+ if (!output)
+ {
+ return Fail("%s: Could not read output 0", __func__);
+ }
+
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicTensor(outputInfo))
+ {
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
+ }
+
const Operand* blockShapeOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
const Operand* paddingsOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 2, model);
@@ -363,14 +376,6 @@ bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& m
descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
- const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
- if (!output)
- {
- return Fail("%s: Could not read output 0", __func__);
- }
-
- const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
bool isSupported = false;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsSpaceToBatchNdSupported,
@@ -402,13 +407,23 @@ bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, C
}
const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
-
unsigned int rank = inputInfo.GetNumDimensions();
if (rank > 4)
{
Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
}
+ const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
+ if (!output)
+ {
+ return Fail("%s: Could not read output 0", __func__);
+ }
+
+ if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
+ {
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
+ }
+
// NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
// if the operand index is out of bounds.
const Operand* axisOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model, false);
@@ -446,12 +461,6 @@ bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, C
armnn::ReshapeDescriptor reshapeDesc;
reshapeDesc.m_TargetShape = outputInfo.GetShape();
- const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
- if (!output)
- {
- return Fail("%s: Could not read output 0", __func__);
- }
-
bool isSupported = false;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsReshapeSupported,
@@ -488,6 +497,18 @@ bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& mod
Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
}
+ const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
+ if (!output)
+ {
+ return Fail("%s: Could not read output 0", __func__);
+ }
+
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicTensor(outputInfo))
+ {
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
+ }
+
const Operand* beginOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
const Operand* endOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 2, model);
const Operand* stridesOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 3, model);
@@ -539,13 +560,6 @@ bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& mod
return Fail("%s: Operation has invalid inputs", __func__);
}
- const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
- if (!output)
- {
- return Fail("%s: Could not read output 0", __func__);
- }
- const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
bool isSupported = false;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsStridedSliceSupported,
@@ -652,6 +666,18 @@ bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& m
return Fail("%s: Operation has invalid inputs", __func__);
}
+ const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
+ if (!output)
+ {
+ return Fail("%s: Could not read output 0", __func__);
+ }
+
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicTensor(outputInfo))
+ {
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
+ }
+
const Operand* blockOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
if (!blockOperand)
{
@@ -686,14 +712,6 @@ bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& m
// Setting crops to 0,0 0,0 as it is not supported in Android NN API
batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
- const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
- if (!output)
- {
- return Fail("%s: Could not read output 0", __func__);
- }
-
- const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
bool isSupported = false;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsBatchToSpaceNdSupported,
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index 4ef7ea4f..7515eb3e 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -5,7 +5,6 @@
#include "HalPolicy.hpp"
-#include "OutputShapeUtils.hpp"
#include "Utils.hpp"
#include "../1.0/HalPolicy.hpp"
@@ -184,8 +183,13 @@ bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, Co
return Fail("%s: Could not read output 0", __func__);
}
- const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
- armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
+ const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+ if (IsDynamicTensor(outputInfo))
+ {
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
+ }
armnn::Convolution2dDescriptor desc;
desc.m_DataLayout = armnn::DataLayout::NHWC;
@@ -279,21 +283,6 @@ bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, Co
desc.m_BiasEnabled = true;
armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
- if (IsDynamicTensor(outputInfo))
- {
- try
- {
- ALOGD("Output shape not set, will infer from inputs");
- outputInfo.SetShape(InferConvolution2dOutputShape(inputInfo.GetShape(),
- weights.GetInfo().GetShape(),
- desc));
- }
- catch (armnn::Exception& e)
- {
- return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
- }
- }
-
bool isSupported = false;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsConvolution2dSupported,
@@ -327,12 +316,7 @@ bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, Co
input.Connect(startLayer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
- 0,
- *endLayer,
- model,
- data,
- armnn::Optional<armnn::TensorInfo>(outputInfo));
+ return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
}
bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
@@ -354,6 +338,12 @@ bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model&
}
const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+ if (IsDynamicTensor(outputInfo))
+ {
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
+ }
// ArmNN does not currently support non-fixed weights or bias
// Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
@@ -460,22 +450,6 @@ bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model&
desc.m_BiasEnabled = true;
armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
- armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
- if (IsDynamicTensor(outputInfo))
- {
- try
- {
- ALOGD("Output shape not set, will infer from inputs");
- outputInfo.SetShape(InferDepthwiseConvolution2dOutputShape(inputInfo.GetShape(),
- weights.GetInfo().GetShape(),
- desc));
- }
- catch (armnn::Exception& e)
- {
- return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
- }
- }
-
bool isSupported = false;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsDepthwiseConvolutionSupported,
@@ -508,12 +482,7 @@ bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model&
input.Connect(startLayer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
- 0,
- *endLayer,
- model,
- data,
- armnn::Optional<armnn::TensorInfo>(outputInfo));
+ return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
}
bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
@@ -534,11 +503,10 @@ bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, C
return Fail("%s: Could not read output", __func__);
}
- armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
+ const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
if (IsDynamicTensor(outInfo))
{
- ALOGD("Output shape not set, will infer from inputs");
- outInfo.SetShape(InferMaximumOutputShape(input0.GetTensorInfo().GetShape(), input1.GetTensorInfo().GetShape()));
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
}
bool isSupported = false;
@@ -559,12 +527,7 @@ bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, C
assert(layer != nullptr);
BroadcastTensor(input0, input1, layer, *data.m_Network);
- return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
- 0,
- *layer,
- model,
- data,
- armnn::Optional<armnn::TensorInfo>(outInfo));
+ return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
@@ -585,12 +548,10 @@ bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, C
return Fail("%s: Could not read output 0", __func__);
}
- armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
if (IsDynamicTensor(outputInfo))
{
- ALOGD("Output shape not set, will infer from inputs");
- outputInfo.SetShape(InferMinimumOutputShape(input0.GetTensorInfo().GetShape(),
- input1.GetTensorInfo().GetShape()));
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
}
bool isSupported = false;
@@ -611,12 +572,7 @@ bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, C
assert(layer != nullptr);
BroadcastTensor(input0, input1, layer, *data.m_Network);
- return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
- 0,
- *layer,
- model,
- data,
- armnn::Optional<armnn::TensorInfo>(outputInfo));
+ return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
@@ -650,11 +606,10 @@ bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, Con
return Fail("%s: Could not convert paddings", __func__);
}
- armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
if (IsDynamicTensor(outputInfo))
{
- ALOGD("Output shape not set, will infer from inputs");
- outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList));
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
}
// Determine type of padding value
@@ -717,12 +672,7 @@ bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, Con
input.Connect(layer->GetInputSlot(0));
layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
- return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
- 0,
- *layer,
- model,
- data,
- armnn::Optional<armnn::TensorInfo>(outputInfo));
+ return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
@@ -746,12 +696,11 @@ bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, Con
const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
- armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
if (IsDynamicTensor(outputInfo))
{
- ALOGD("Output shape not set, will infer from inputs");
- outputInfo.SetShape(InferPreluOutputShape(inputInfo.GetShape(), alphaInfo.GetShape()));
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
}
bool isSupported = false;
@@ -776,12 +725,7 @@ bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, Con
BroadcastTensor(input, alpha, layer, *data.m_Network);
- return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
- 0,
- *layer,
- model,
- data,
- armnn::Optional<armnn::TensorInfo>(outputInfo));
+ return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
@@ -821,8 +765,13 @@ bool HalPolicy::ConvertResize(const Operation& operation,
return Fail("%s: Could not read output 0", __func__);
}
- const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
- armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
+ const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+ if (IsDynamicTensor(outputInfo))
+ {
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
+ }
armnn::ResizeDescriptor descriptor;
descriptor.m_Method = resizeMethod;
@@ -890,19 +839,6 @@ bool HalPolicy::ConvertResize(const Operation& operation,
return false;
}
- if (IsDynamicTensor(outputInfo))
- {
- try
- {
- ALOGD("Output shape not set, will infer from inputs");
- outputInfo.SetShape(InferResizeOutputShape(inputInfo.GetShape(), descriptor));
- }
- catch (armnn::Exception& e)
- {
- return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
- }
- }
-
bool isSupported = false;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsResizeSupported,
@@ -924,12 +860,7 @@ bool HalPolicy::ConvertResize(const Operation& operation,
layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
- 0,
- *layer,
- model,
- data,
- armnn::Optional<armnn::TensorInfo>(outputInfo));
+ return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
@@ -944,12 +875,23 @@ bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& mod
const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
unsigned int rank = inputInfo.GetNumDimensions();
-
if (rank != 4)
{
return Fail("%s: Only inputs with rank 4 are supported", __func__);
}
+ const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
+ if (!output)
+ {
+ return Fail("%s: Could not read output 0", __func__);
+ }
+
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicTensor(outputInfo))
+ {
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
+ }
+
armnn::SpaceToDepthDescriptor desc;
GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
@@ -961,26 +903,6 @@ bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& mod
desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 2, model, data);
- const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
- if (!output)
- {
- return Fail("%s: Could not read output 0", __func__);
- }
-
- armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
- if (IsDynamicTensor(outputInfo))
- {
- try
- {
- ALOGD("Output shape not set, will infer from inputs");
- outputInfo.SetShape(InferSpaceToDepthOutputShape(inputInfo.GetShape(), desc));
- }
- catch (armnn::Exception& e)
- {
- return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
- }
- }
-
bool isSupported = false;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsSpaceToDepthSupported,
@@ -998,12 +920,7 @@ bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& mod
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
- 0,
- *layer,
- model,
- data,
- armnn::Optional<armnn::TensorInfo>(outputInfo));
+ return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
@@ -1022,11 +939,10 @@ bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, C
return Fail("%s: Operation has no outputs", __func__);
}
- armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
if (IsDynamicTensor(outputInfo))
{
- ALOGD("Output shape not set, will infer from input");
- outputInfo.SetShape(input.GetTensorInfo().GetShape());
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
}
armnn::SoftmaxDescriptor desc;
@@ -1062,12 +978,7 @@ bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, C
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
- 0,
- *layer,
- model,
- data,
- armnn::Optional<armnn::TensorInfo>(outputInfo));
+ return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
}
bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
diff --git a/Android.mk b/Android.mk
index 9bbee43d..bee57dd0 100644
--- a/Android.mk
+++ b/Android.mk
@@ -114,7 +114,6 @@ LOCAL_SRC_FILES := \
ArmnnDevice.cpp \
ArmnnPreparedModel.cpp \
ModelToINetworkConverter.cpp \
- OutputShapeUtils.cpp \
RequestThread.cpp \
Utils.cpp \
ConversionUtils.cpp
@@ -228,7 +227,6 @@ LOCAL_SRC_FILES := \
ArmnnDevice.cpp \
ArmnnPreparedModel.cpp \
ModelToINetworkConverter.cpp \
- OutputShapeUtils.cpp \
RequestThread.cpp \
Utils.cpp \
ConversionUtils.cpp
@@ -336,7 +334,6 @@ LOCAL_SRC_FILES := \
ConversionUtils.cpp \
DriverOptions.cpp \
ModelToINetworkConverter.cpp \
- OutputShapeUtils.cpp \
RequestThread.cpp \
Utils.cpp
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 694e1b20..9a2b08f0 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -5,7 +5,6 @@
#pragma once
-#include "OutputShapeUtils.hpp"
#include "Utils.hpp"
#include <armnn/ArmNN.hpp>
@@ -1079,8 +1078,7 @@ bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
armnn::IConnectableLayer& layer,
uint32_t layerOutputIndex,
const HalModel& model,
- ConversionData& data,
- const armnn::Optional<armnn::TensorInfo>& outputInfo = armnn::EmptyOptional())
+ ConversionData& data)
{
using HalOperand = typename HalPolicy::Operand;
@@ -1095,15 +1093,7 @@ bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
const uint32_t operandIndex = operation.outputs[operationOutputIndex];
data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
- if (outputInfo.has_value())
- {
- outputSlot.SetTensorInfo(outputInfo.value());
- ALOGD("Output info overwritten");
- }
- else
- {
- outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
- }
+ outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
return true;
}
@@ -1152,16 +1142,14 @@ bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
uint32_t outputIndex,
armnn::IConnectableLayer& layer,
const HalModel& model,
- ConversionData& data,
- const armnn::Optional<armnn::TensorInfo>& outputInfo = armnn::EmptyOptional())
+ ConversionData& data)
{
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
outputIndex,
layer,
outputIndex,
model,
- data,
- outputInfo);
+ data);
}
template<typename HalPolicy,
@@ -1186,18 +1174,11 @@ bool ConvertToActivation(const HalOperation& operation,
{
return false;
}
- armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
+
+ const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
if (IsDynamicTensor(outInfo))
{
- if (Is12Operand(*outputOperand))
- {
- ALOGD("Output shape not set, will infer from input");
- outInfo.SetShape(input.GetTensorInfo().GetShape());
- }
- else
- {
- return Fail("%s: Dynamic OutputShapes are not supported in this HAL version", __func__);
- }
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
}
bool isSupported = false;
@@ -1217,11 +1198,7 @@ bool ConvertToActivation(const HalOperation& operation,
BOOST_ASSERT(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
- 0,
- *layer,
- model,
- data,armnn::Optional<armnn::TensorInfo>(outInfo));
+ return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
}
template<typename HalPolicy,
@@ -1343,6 +1320,11 @@ bool ConvertPooling2d(const HalOperation& operation,
const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicTensor(outputInfo))
+ {
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
+ }
+
armnn::Pooling2dDescriptor desc;
desc.m_PoolType = poolType;
desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
@@ -1438,7 +1420,7 @@ bool ConvertConv2d(const HalOperation& operation, const HalModel& model, Convers
}
const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
- armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
// ArmNN does not currently support non-fixed weights or bias
const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
@@ -1504,7 +1486,7 @@ bool ConvertConv2d(const HalOperation& operation, const HalModel& model, Convers
if (IsDynamicTensor(outputInfo))
{
- return Fail("%s: Dynamic OutputShapes are not supported", __func__);
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
}
bool isSupported = false;
@@ -1539,12 +1521,7 @@ bool ConvertConv2d(const HalOperation& operation, const HalModel& model, Convers
input.Connect(startLayer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
- 0,
- *endLayer,
- model,
- data,
- armnn::Optional<armnn::TensorInfo>(outputInfo));
+ return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
}
template<typename HalPolicy,
@@ -1570,7 +1547,7 @@ bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model
}
const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
- armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
// ArmNN does not currently support non-fixed weights or bias
@@ -1674,7 +1651,7 @@ bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model
if (IsDynamicTensor(outputInfo))
{
- return Fail("%s: Dynamic OutputShapes are not supported", __func__);
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
}
bool isSupported = false;
@@ -1707,12 +1684,7 @@ bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model
input.Connect(startLayer->GetInputSlot(0));
- return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
- 0,
- *endLayer,
- model,
- data,
- armnn::Optional<armnn::TensorInfo>(outputInfo));
+ return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
}
template<typename HalPolicy,
@@ -1750,11 +1722,10 @@ bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData&
return Fail("%s: Could not read output", __func__);
}
- armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
if (IsDynamicTensor(outputInfo))
{
- ALOGD("Output shape not set, will infer from inputs");
- outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList));
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
}
bool isSupported = false;
@@ -1775,12 +1746,7 @@ bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData&
input.Connect(layer->GetInputSlot(0));
layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
- return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
- 0,
- *layer,
- model,
- data,
- armnn::Optional<armnn::TensorInfo>(outputInfo));
+ return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
}
} // namespace armnn_driver
diff --git a/OutputShapeUtils.cpp b/OutputShapeUtils.cpp
deleted file mode 100644
index ecec0b92..00000000
--- a/OutputShapeUtils.cpp
+++ /dev/null
@@ -1,199 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "OutputShapeUtils.hpp"
-
-#include <DataLayoutIndexed.hpp>
-
-#include <algorithm>
-#include <numeric>
-#include <vector>
-
-namespace
-{
-
-using namespace armnn;
-
-TensorShape CalculateMaxShape(const TensorShape& inShape0, const TensorShape& inShape1)
-{
- // NOTE: The inferred output size will be the maximum size along each dimension
- // of inShape0 and inShape1, starting with the trailing dimensions, and working its way forward.
- //
- // Example: inShape0={4, 1, 2}, inShape1={5, 4, 3, 1} => outputShape={5, 4, 3, 2}
-
- const unsigned int numInput0Dims = inShape0.GetNumDimensions();
- const unsigned int numInput1Dims = inShape1.GetNumDimensions();
-
- const unsigned int maxNumDims = std::max(numInput0Dims, numInput1Dims);
-
- TensorShape outputShape = TensorShape(maxNumDims);
- for (unsigned int reverseIdx = 1u; reverseIdx <= maxNumDims; ++reverseIdx)
- {
- const int input0Idx = numInput0Dims - reverseIdx;
- const int input1Idx = numInput1Dims - reverseIdx;
-
- const unsigned int input0DimSize = input0Idx >= 0 ? inShape0[input0Idx] : 0u;
- const unsigned int input1DimSize = input1Idx >= 0 ? inShape1[input1Idx] : 0u;
-
- const unsigned int outputIdx = maxNumDims - reverseIdx;
- outputShape[outputIdx] = std::max(input0DimSize, input1DimSize);
- }
-
- return outputShape;
-}
-
-template<typename ConvolutionDescriptor>
-TensorShape InferConvolution2dOutputShapeImpl(const TensorShape& inputShape,
- const TensorShape& kernelShape,
- const ConvolutionDescriptor& descriptor,
- bool isDepthwiseConvolution)
-{
- if (inputShape.GetNumDimensions() != 4)
- {
- throw InvalidArgumentException("Input shape must be 4D");
- }
-
- armnnUtils::DataLayoutIndexed dataLayoutIndex(descriptor.m_DataLayout);
-
- const unsigned int cIndex = dataLayoutIndex.GetChannelsIndex();
- const unsigned int wIndex = dataLayoutIndex.GetWidthIndex();
- const unsigned int hIndex = dataLayoutIndex.GetHeightIndex();
-
- const unsigned int wInput = inputShape[wIndex];
- const unsigned int hInput = inputShape[hIndex];
-
- const unsigned int wKernel = isDepthwiseConvolution ? kernelShape[2] : kernelShape[wIndex];
- const unsigned int wDilated = wKernel + (descriptor.m_DilationX - 1) * (wKernel - 1);
-
- const unsigned int wRead = (wInput + descriptor.m_PadLeft + descriptor.m_PadRight) - wDilated;
- const unsigned int wOutput = 1 + (wRead / descriptor.m_StrideX);
-
- const unsigned int hKernel = isDepthwiseConvolution ? kernelShape[3] : kernelShape[hIndex];
- const unsigned int hDilated = hKernel + (descriptor.m_DilationY - 1) * (hKernel - 1);
-
- const unsigned int hRead = (hInput + descriptor.m_PadTop + descriptor.m_PadBottom) - hDilated;
- const unsigned int hOutput = 1 + (hRead / descriptor.m_StrideY);
-
- TensorShape outputShape(4);
- outputShape[0] = inputShape[0];
- outputShape[cIndex] = kernelShape[0];
- outputShape[wIndex] = wOutput;
- outputShape[hIndex] = hOutput;
-
- if (isDepthwiseConvolution)
- {
- outputShape[cIndex] *= inputShape[cIndex];
- }
-
- return outputShape;
-}
-
-} // anonymous namespace
-
-namespace armnn_driver
-{
-
-using namespace armnn;
-
-TensorShape InferConvolution2dOutputShape(const TensorShape& inputShape,
- const TensorShape& kernelShape,
- const Convolution2dDescriptor& descriptor)
-{
- return InferConvolution2dOutputShapeImpl(inputShape, kernelShape, descriptor, false);
-}
-
-TensorShape InferDepthwiseConvolution2dOutputShape(const TensorShape& inputShape,
- const TensorShape& kernelShape,
- const DepthwiseConvolution2dDescriptor& descriptor)
-{
- return InferConvolution2dOutputShapeImpl(inputShape, kernelShape, descriptor, true);
-}
-
-TensorShape InferMaximumOutputShape(const armnn::TensorShape& input0Shape,
- const armnn::TensorShape& input1Shape)
-{
- return CalculateMaxShape(input0Shape, input1Shape);
-}
-
-TensorShape InferMinimumOutputShape(const armnn::TensorShape& input0Shape,
- const armnn::TensorShape& input1Shape)
-{
- return CalculateMaxShape(input0Shape, input1Shape);
-}
-
-TensorShape InferPadOutputShape(const TensorShape& inputShape,
- const std::vector<std::pair<unsigned int, unsigned int>>& padList)
-{
- const unsigned int numDims = inputShape.GetNumDimensions();
-
- std::vector<unsigned int> outputDims;
- TensorShape outputShape = TensorShape(numDims);
- for (unsigned int dim = 0; dim < numDims; ++dim)
- {
- unsigned int dimSize = inputShape[dim];
- const std::pair<unsigned int, unsigned int>& dimPadding = padList[dim];
- dimSize += dimPadding.first;
- dimSize += dimPadding.second;
- outputShape[dim] = dimSize;
- }
- return outputShape;
-}
-
-TensorShape InferPreluOutputShape(const TensorShape& inputShape, const TensorShape& alphaShape)
-{
- return CalculateMaxShape(inputShape, alphaShape);
-}
-
-TensorShape InferResizeOutputShape(const TensorShape& inputShape, const ResizeDescriptor& descriptor)
-{
- if (inputShape.GetNumDimensions() != 4)
- {
- throw InvalidArgumentException("Input shape for Resize must be 4D");
- }
-
- armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
-
- const unsigned int cIndex = dataLayoutIndexed.GetChannelsIndex();
- const unsigned int wIndex = dataLayoutIndexed.GetWidthIndex();
- const unsigned int hIndex = dataLayoutIndexed.GetHeightIndex();
-
- TensorShape outputShape(4);
- outputShape[0] = inputShape[0];
- outputShape[cIndex] = inputShape[cIndex];
- outputShape[wIndex] = descriptor.m_TargetWidth;
- outputShape[hIndex] = descriptor.m_TargetHeight;
-
- return outputShape;
-}
-
-TensorShape InferSpaceToDepthOutputShape(const TensorShape& inputShape, const SpaceToDepthDescriptor& descriptor)
-{
- TensorShape outputShape(inputShape);
-
- armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
-
- const unsigned int cIndex = dataLayoutIndexed.GetChannelsIndex();
- const unsigned int wIndex = dataLayoutIndexed.GetWidthIndex();
- const unsigned int hIndex = dataLayoutIndexed.GetHeightIndex();
-
- if (descriptor.m_BlockSize == 0)
- {
- throw InvalidArgumentException("Block size must be greater than zero");
- }
-
- outputShape[cIndex] = inputShape[cIndex] * descriptor.m_BlockSize * descriptor.m_BlockSize;
-
- outputShape[hIndex] = inputShape[hIndex] / descriptor.m_BlockSize;
- outputShape[wIndex] = inputShape[wIndex] / descriptor.m_BlockSize;
-
- return outputShape;
-}
-
-TensorShape InferSubOutputShape(const TensorShape& input0Shape, const TensorShape& input1Shape)
-{
- return CalculateMaxShape(input0Shape, input1Shape);
-}
-
-} // namespace armnn_driver
diff --git a/OutputShapeUtils.hpp b/OutputShapeUtils.hpp
deleted file mode 100644
index 85cafbf6..00000000
--- a/OutputShapeUtils.hpp
+++ /dev/null
@@ -1,40 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/ArmNN.hpp>
-
-namespace armnn_driver
-{
-
-armnn::TensorShape InferConvolution2dOutputShape(const armnn::TensorShape& inputShape,
- const armnn::TensorShape& kernelShape,
- const armnn::Convolution2dDescriptor& descriptor);
-
-armnn::TensorShape InferDepthwiseConvolution2dOutputShape(const armnn::TensorShape& inputShape,
- const armnn::TensorShape& kernelShape,
- const armnn::DepthwiseConvolution2dDescriptor& descriptor);
-
-armnn::TensorShape InferMaximumOutputShape(const armnn::TensorShape& input0Shape,
- const armnn::TensorShape& input1Shape);
-
-armnn::TensorShape InferMinimumOutputShape(const armnn::TensorShape& input0Shape,
- const armnn::TensorShape& input1Shape);
-
-armnn::TensorShape InferPadOutputShape(const armnn::TensorShape& inputShape,
- const std::vector<std::pair<unsigned int, unsigned int>>& padList);
-
-armnn::TensorShape InferPreluOutputShape(const armnn::TensorShape& inputShape, const armnn::TensorShape& alphaShape);
-
-armnn::TensorShape InferResizeOutputShape(const armnn::TensorShape& inputShape,
- const armnn::ResizeDescriptor& descriptor);
-
-armnn::TensorShape InferSpaceToDepthOutputShape(const armnn::TensorShape& inputShape,
- const armnn::SpaceToDepthDescriptor& descriptor);
-
-armnn::TensorShape InferSubOutputShape(const armnn::TensorShape& input0Shape, const armnn::TensorShape& input1Shape);
-
-} // namespace armnn_driver