aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTracy Narine <tracy.narine@arm.com>2023-07-13 16:50:54 +0100
committerTracy Narine <tracy.narine@arm.com>2023-07-17 14:19:36 +0100
commitbb8d7591a35bd95480b39001f8b7e41a6671f3a6 (patch)
treeabf2871aa1bb86378f423df405164b0d4521db3f
parent688268328c69e7d4181cdd31fe4717c80a6d1685 (diff)
downloadarmnn-bb8d7591a35bd95480b39001f8b7e41a6671f3a6.tar.gz
IVGCVSW-7879 Change REVERSE_V2 from LayerWithParameters with 1 input, to Layer with 2 inputs
* Changing ReverseV2 to use two inputs * This is required by the backends * The ReverseV2Descriptor was removed * Tests updated * Added a Run<> templatefor inputs with different data types Signed-off-by: Tracy Narine <tracy.narine@arm.com> Change-Id: I22f947de829b4b3da6bda3a74f4ffdef4052cc25
-rw-r--r--include/armnn/BackendHelper.hpp4
-rw-r--r--include/armnn/Descriptors.hpp24
-rw-r--r--include/armnn/DescriptorsFwd.hpp1
-rw-r--r--include/armnn/INetwork.hpp4
-rw-r--r--include/armnn/backends/WorkloadData.hpp2
-rw-r--r--src/armnn/BackendHelper.cpp8
-rw-r--r--src/armnn/Network.cpp9
-rw-r--r--src/armnn/Network.hpp3
-rw-r--r--src/armnn/layers/ReverseV2Layer.cpp38
-rw-r--r--src/armnn/layers/ReverseV2Layer.hpp11
-rw-r--r--src/armnnDeserializer/Deserializer.cpp11
-rw-r--r--src/armnnDeserializer/test/DeserializeReverseV2.cpp442
-rw-r--r--src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp79
-rw-r--r--src/armnnSerializer/ArmnnSchema.fbs5
-rw-r--r--src/armnnSerializer/Serializer.cpp14
-rw-r--r--src/armnnSerializer/Serializer.hpp1
-rw-r--r--src/armnnSerializer/test/SerializerTests.cpp11
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp37
-rw-r--r--src/armnnTfLiteParser/test/ReverseV2.cpp24
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp63
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp8
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/ReverseV2TestImpl.cpp241
-rw-r--r--src/backends/reference/RefLayerSupport.cpp23
-rw-r--r--src/backends/reference/RefLayerSupport.hpp4
-rw-r--r--src/backends/reference/workloads/RefReverseV2Workload.cpp9
-rw-r--r--src/backends/reference/workloads/ReverseV2Impl.cpp23
-rw-r--r--src/backends/reference/workloads/ReverseV2Impl.hpp7
28 files changed, 665 insertions, 443 deletions
diff --git a/include/armnn/BackendHelper.hpp b/include/armnn/BackendHelper.hpp
index 6f804cbbed..6181ba5c40 100644
--- a/include/armnn/BackendHelper.hpp
+++ b/include/armnn/BackendHelper.hpp
@@ -360,9 +360,9 @@ public:
const ResizeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional());
- bool IsReverseV2Supported(const TensorInfo& input,
+ bool IsReverseV2Supported(const TensorInfo& input0,
+ const TensorInfo& input1,
const TensorInfo& output,
- const ReverseV2Descriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsShapeSupported(const TensorInfo& input,
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index 27ca50123f..9ff894f1b0 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -1620,28 +1620,4 @@ struct BatchMatMulDescriptor : BaseDescriptor
const TensorShape& tensorShape);
};
-struct ReverseV2Descriptor : BaseDescriptor
-{
- ReverseV2Descriptor()
- : m_Axis()
- , m_MaxDimension(4)
- {}
-
- ReverseV2Descriptor(std::vector<int32_t> axis)
- : m_Axis(axis)
- , m_MaxDimension(4)
- {}
-
- bool operator ==(const ReverseV2Descriptor& rhs) const
- {
- return m_Axis == rhs.m_Axis;
- }
-
- /// The indices of the dimensions to reverse
- std::vector<int32_t> m_Axis;
- /// The max dimension supported in the lower levels of code
- uint32_t m_MaxDimension;
-
-};
-
} // namespace armnn
diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp
index 4e9621d020..2c25a49f00 100644
--- a/include/armnn/DescriptorsFwd.hpp
+++ b/include/armnn/DescriptorsFwd.hpp
@@ -42,7 +42,6 @@ struct QLstmDescriptor;
struct ReshapeDescriptor;
struct ResizeDescriptor;
struct ReduceDescriptor;
-struct ReverseV2Descriptor;
struct SliceDescriptor;
struct SoftmaxDescriptor;
struct SpaceToBatchNdDescriptor;
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index e311fa8840..e20dd1c348 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -838,11 +838,9 @@ public:
const char* name = nullptr);
/// Add a ReverseV2 layer to the network
- /// @param descriptor - Parameters for the ReverseV2 operation
/// @param name - Optional name for the layer
/// @return - Interface for configuring the layer
- IConnectableLayer* AddReverseV2Layer(const ReverseV2Descriptor& descriptor,
- const char* name = nullptr);
+ IConnectableLayer* AddReverseV2Layer(const char* name = nullptr);
void ExecuteStrategy(IStrategy& strategy) const;
diff --git a/include/armnn/backends/WorkloadData.hpp b/include/armnn/backends/WorkloadData.hpp
index fe59fca795..e7d5e0e689 100644
--- a/include/armnn/backends/WorkloadData.hpp
+++ b/include/armnn/backends/WorkloadData.hpp
@@ -750,7 +750,7 @@ struct BatchMatMulQueueDescriptor : QueueDescriptorWithParameters<BatchMatMulDes
void Validate(const WorkloadInfo& workloadInfo) const;
};
-struct ReverseV2QueueDescriptor : QueueDescriptorWithParameters<ReverseV2Descriptor>
+struct ReverseV2QueueDescriptor : QueueDescriptor
{
void Validate(const WorkloadInfo& workloadInfo) const;
};
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index 404d278efc..18184fbfb2 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -1211,16 +1211,16 @@ bool LayerSupportHandle::IsResizeSupported(const TensorInfo& input,
reasonIfUnsupported);
}
-bool LayerSupportHandle::IsReverseV2Supported(const armnn::TensorInfo &input,
+bool LayerSupportHandle::IsReverseV2Supported(const armnn::TensorInfo &input0,
+ const armnn::TensorInfo &input1,
const armnn::TensorInfo &output,
- const armnn::ReverseV2Descriptor &descriptor,
Optional<std::string &> reasonIfUnsupported)
{
- TensorInfos infos{input, output};
+ TensorInfos infos{input0, input1, output};
return m_LayerSupport->IsLayerSupported(LayerType::ReverseV2,
infos,
- descriptor,
+ BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
reasonIfUnsupported);
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index ae5bde17ca..010fa0076b 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -639,10 +639,9 @@ IConnectableLayer* INetwork::AddBatchMatMulLayer(const BatchMatMulDescriptor &de
return pNetworkImpl->AddBatchMatMulLayer(descriptor, name);
}
-IConnectableLayer* INetwork::AddReverseV2Layer(const ReverseV2Descriptor &descriptor,
- const char *name)
+IConnectableLayer* INetwork::AddReverseV2Layer(const char *name)
{
- return pNetworkImpl->AddReverseV2Layer(descriptor, name);
+ return pNetworkImpl->AddReverseV2Layer(name);
}
void INetwork::ExecuteStrategy(IStrategy& strategy) const
@@ -2930,9 +2929,9 @@ IConnectableLayer* NetworkImpl::AddBatchMatMulLayer(const BatchMatMulDescriptor&
return m_Graph->AddLayer<BatchMatMulLayer>(desc, name);
}
-IConnectableLayer* NetworkImpl::AddReverseV2Layer(const ReverseV2Descriptor &desc, const char *name)
+IConnectableLayer* NetworkImpl::AddReverseV2Layer(const char *name)
{
- return m_Graph->AddLayer<ReverseV2Layer>(desc, name);
+ return m_Graph->AddLayer<ReverseV2Layer>(name);
}
IConnectableLayer* NetworkImpl::AddPrecompiledLayer(const PreCompiledDescriptor& preCompiledDescriptor,
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index fc3ae42aa9..ae287f32d1 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -188,8 +188,7 @@ public:
IConnectableLayer* AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
const char* name = nullptr);
- IConnectableLayer* AddReverseV2Layer(const ReverseV2Descriptor& ReverseV2Descriptor,
- const char* name = nullptr);
+ IConnectableLayer* AddReverseV2Layer(const char* name = nullptr);
IConnectableLayer* AddShapeLayer(const char* name = nullptr);
diff --git a/src/armnn/layers/ReverseV2Layer.cpp b/src/armnn/layers/ReverseV2Layer.cpp
index 201e19819b..e1160b6e16 100644
--- a/src/armnn/layers/ReverseV2Layer.cpp
+++ b/src/armnn/layers/ReverseV2Layer.cpp
@@ -10,9 +10,10 @@
namespace armnn
{
-ReverseV2Layer::ReverseV2Layer(const armnn::ReverseV2Descriptor &param, const char *name)
- : LayerWithParameters(1, 1, LayerType::ReverseV2, param, name)
-{}
+ReverseV2Layer::ReverseV2Layer(const char* name)
+ : Layer(2, 1, LayerType::ReverseV2, name)
+{
+}
std::unique_ptr<IWorkload> ReverseV2Layer::CreateWorkload(const armnn::IWorkloadFactory &factory) const
{
@@ -24,27 +25,48 @@ std::unique_ptr<IWorkload> ReverseV2Layer::CreateWorkload(const armnn::IWorkload
ReverseV2Layer* ReverseV2Layer::Clone(armnn::Graph &graph) const
{
- auto layer = CloneBase<ReverseV2Layer>(graph, m_Param, GetName());
+ auto layer = CloneBase<ReverseV2Layer>(graph, GetName());
return std::move(layer);
}
-/// Use the default Layer::InferOutputShape method
+std::vector<TensorShape> ReverseV2Layer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ ARMNN_ASSERT(inputShapes.size() == 2);
+
+ const auto inputDims = inputShapes[0].GetNumDimensions();
+
+ std::vector<unsigned int> dimSizes(inputDims);
+ for (unsigned i=0; i<inputDims; i++)
+ {
+ dimSizes[i] = inputShapes[0][i];
+ }
+
+ TensorShape outputShape({ inputDims, dimSizes.data() });
+
+ return std::vector<TensorShape>({ outputShape });
+}
void ReverseV2Layer::ValidateTensorShapesFromInputs()
{
- VerifyLayerConnections(1, CHECK_LOCATION());
+ VerifyLayerConnections(2, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({
- GetInputSlot(0).GetTensorInfo().GetShape() });
+ GetInputSlot(0).GetTensorInfo().GetShape(),
+ GetInputSlot(1).GetTensorInfo().GetShape()});
ARMNN_ASSERT(inferredShapes.size() == 1);
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ReverseV2Layer");
}
-} \ No newline at end of file
+void ReverseV2Layer::ExecuteStrategy(IStrategy& strategy) const
+{
+ strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName());
+}
+
+}
diff --git a/src/armnn/layers/ReverseV2Layer.hpp b/src/armnn/layers/ReverseV2Layer.hpp
index 046670e9de..7dc0656aca 100644
--- a/src/armnn/layers/ReverseV2Layer.hpp
+++ b/src/armnn/layers/ReverseV2Layer.hpp
@@ -11,7 +11,7 @@ namespace armnn
{
/// This layer represents a ReverseV2 operation.
- class ReverseV2Layer : public LayerWithParameters<ReverseV2Descriptor>
+ class ReverseV2Layer : public Layer
{
public:
/// Makes a workload for the ReverseV2 type.
@@ -28,19 +28,18 @@ namespace armnn
/// otherwise infers the output shapes from given input shapes and layer properties.
/// @param [in] inputShapes The vector of input shapes for ReverseV2.
/// @return A vector to the inferred output shape.
-
- /// Use the default Layer::InferOutputShape method
- // std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ReverseV2Layer.
void ValidateTensorShapesFromInputs() override;
+ void ExecuteStrategy(IStrategy& strategy) const override;
+
protected:
/// Constructor to create a ReverseV2Layer.
- /// @param [in] param ReverseV2Descriptor to configure the ReverseV2 operation.
/// @param [in] name Optional name for the layer.
- ReverseV2Layer(const ReverseV2Descriptor& param, const char* name);
+ ReverseV2Layer(const char* name);
/// Default destructor
~ReverseV2Layer() = default;
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index d125ef56dd..1e40c637cc 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -2749,20 +2749,13 @@ void IDeserializer::DeserializerImpl::ParseReverseV2(GraphPtr graph, unsigned in
CHECK_LAYERS(graph, 0, layerIndex);
TensorRawPtrVector inputs = GetInputs(graph, layerIndex);
- CHECK_VALID_SIZE(inputs.size(), 1);
+ CHECK_VALID_SIZE(inputs.size(), 2);
TensorRawPtrVector outputs = GetOutputs(graph, layerIndex);
CHECK_VALID_SIZE(outputs.size(), 1);
- auto flatBufferDescriptor = graph->layers()->Get(layerIndex)->layer_as_ReverseV2Layer()->descriptor();
- auto flatBufferAxis = flatBufferDescriptor->axis();
-
- armnn::ReverseV2Descriptor descriptor;
- descriptor.m_Axis =
- std::vector<int32_t>(flatBufferAxis->begin(), flatBufferAxis->end());
-
auto layerName = GetLayerName(graph, layerIndex);
- IConnectableLayer* layer = m_Network->AddReverseV2Layer(descriptor, layerName.c_str());
+ IConnectableLayer* layer = m_Network->AddReverseV2Layer(layerName.c_str());
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
diff --git a/src/armnnDeserializer/test/DeserializeReverseV2.cpp b/src/armnnDeserializer/test/DeserializeReverseV2.cpp
index 73816f8588..b6f207c9d1 100644
--- a/src/armnnDeserializer/test/DeserializeReverseV2.cpp
+++ b/src/armnnDeserializer/test/DeserializeReverseV2.cpp
@@ -14,61 +14,96 @@ TEST_SUITE("Deserializer_ReverseV2")
{
struct ReverseV2Fixture : public ParserFlatbuffersSerializeFixture
{
- explicit ReverseV2Fixture(const std::string& inputShape,
+ explicit ReverseV2Fixture(const std::string& inputShape0,
+ const std::string& inputShape1,
const std::string& outputShape,
- const std::string& dataType,
- const std::string& axis)
+ const std::string& dataType0,
+ const std::string& dataType1)
{
m_JsonString = R"(
{
- inputIds: [0],
- outputIds: [2],
- layers: [
+ inputIds: [0, 1],
+ outputIds: [3],
+ layers:
+ [
{
layer_type: "InputLayer",
layer: {
- base: {
- layerBindingId: 0,
- base: {
- index: 0,
- layerName: "InputLayer",
- layerType: "Input",
- inputSlots: [{
- index: 0,
- connection: {sourceLayerIndex:0, outputSlotIndex:0 },
- }],
- outputSlots: [{
+ base: {
+ layerBindingId: 0,
+ base: {
index: 0,
- tensorInfo: {
- dimensions: )" + inputShape + R"(,
- dataType: )" + dataType + R"(
- }
- }]
- }
- }
+ layerName: "InputLayer0",
+ layerType: "Input",
+ inputSlots:
+ [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots:
+ [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + inputShape0 + R"(,
+ dataType: )" + dataType0 + R"(
+ }
+ }]
+ }
+ }
}
},
{
- layer_type: "ReverseV2Layer",
+ layer_type: "InputLayer",
layer: {
- base: {
- index: 1,
- layerName: "ReverseV2Layer",
- layerType: "ReverseV2",
- inputSlots: [{
- index: 0,
- connection: {sourceLayerIndex:0, outputSlotIndex:0 },
- }],
- outputSlots: [{
- index: 0,
- tensorInfo: {
- dimensions: )" + outputShape + R"(,
- dataType: )" + dataType + R"(
+ base: {
+ layerBindingId: 1,
+ base: {
+ index:1,
+ layerName: "InputLayer1",
+ layerType: "Input",
+ inputSlots:
+ [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots:
+ [{
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + inputShape1 + R"(,
+ dataType: )" + dataType1 + R"(
+ }
+ }]
}
+ }
+ }
+ },
+ {
+ layer_type: "ReverseV2Layer",
+ layer : {
+ base: {
+ index:2,
+ layerName: "ReverseV2Layer",
+ layerType: "ReverseV2",
+ inputSlots:
+ [
+ {
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ },
+ {
+ index: 1,
+ connection: {sourceLayerIndex:1, outputSlotIndex:0 },
+ }
+ ],
+ outputSlots:
+ [{
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType0 + R"(
+ }
}]
- },
- descriptor: {
- axis: )" + axis + R"(
}
}
},
@@ -76,31 +111,32 @@ TEST_SUITE("Deserializer_ReverseV2")
layer_type: "OutputLayer",
layer: {
base:{
- layerBindingId: 2,
- base: {
- index: 2,
- layerName: "OutputLayer",
- layerType: "Output",
- inputSlots: [{
- index: 0,
- connection: {sourceLayerIndex:1, outputSlotIndex:0 },
- }],
- outputSlots: [{
- index: 0,
- tensorInfo: {
- dimensions: )" + outputShape + R"(,
- dataType: )" + dataType + R"(
- },
- }],
- }
+ layerBindingId: 0,
+ base: {
+ index: 3,
+ layerName: "OutputLayer",
+ layerType: "Output",
+ inputSlots:
+ [{
+ index: 0,
+ connection: {sourceLayerIndex:2, outputSlotIndex:0 },
+ }],
+ outputSlots:
+ [{
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType0 + R"(
+ }
+ }]
+ }
}
}
}
]
- }
- )";
+ } )";
- SetupSingleInputSingleOutput("InputLayer", "OutputLayer");
+ Setup();
}
};
@@ -109,65 +145,155 @@ TEST_SUITE("Deserializer_ReverseV2")
struct SimpleReverseV2FixtureFloat32 : ReverseV2Fixture
{
SimpleReverseV2FixtureFloat32()
+ : ReverseV2Fixture("[ 2, 2 ]",
+ "[ 1 ]",
+ "[ 2, 2 ]",
+ "Float32",
+ "Signed32")
+ {}
+ };
+
+ TEST_CASE_FIXTURE(SimpleReverseV2FixtureFloat32, "SimpleReverseV2TestFloat32")
+ {
+ RunTest<2, armnn::DataType::Float32, armnn::DataType::Signed32, armnn::DataType::Float32>(
+ 0,
+ {
+ {
+ "InputLayer0",
+ { 1.0f, 2.0f,
+ 3.0f, 4.0f }
+ }
+ },
+ {
+ {
+ "InputLayer1",
+ { 1 }
+ }
+ },
+ {
+ {
+ "OutputLayer",
+ { 2.0f, 1.0f,
+ 4.0f, 3.0f }
+ }
+ }
+ );
+ }
+
+ struct SimpleReverseV2FixtureFloat32OtherAxis : ReverseV2Fixture
+ {
+ SimpleReverseV2FixtureFloat32OtherAxis()
: ReverseV2Fixture("[ 2, 2 ]",
+ "[ 1 ]",
"[ 2, 2 ]",
"Float32",
- "[1]")
+ "Signed32")
{}
};
- TEST_CASE_FIXTURE(SimpleReverseV2FixtureFloat32, "SimpleReverseV2TestFloat32")
+ TEST_CASE_FIXTURE(SimpleReverseV2FixtureFloat32OtherAxis, "SimpleReverseV2FixtureFloat32OtherAxis")
{
- RunTest<4, armnn::DataType::Float32>(
+ RunTest<2, armnn::DataType::Float32, armnn::DataType::Signed32, armnn::DataType::Float32>(
0,
- { 1.0f, 2.0f,
- 3.0f, 4.0f },
- { 2.0f, 1.0f,
- 4.0f, 3.0f }
- );
+ {
+ {
+ "InputLayer0",
+ { 1.0f, 2.0f,
+ 3.0f, 4.0f }
+ }
+ },
+ {
+ {
+ "InputLayer1",
+ { 1 }
+ }
+ },
+ {
+ {
+ "OutputLayer",
+ { 2.0f, 1.0f,
+ 4.0f, 3.0f }
+ }
+ }
+ );
}
- struct SimpleReverseV2FixtureFloat32ZeroAxis : ReverseV2Fixture
+ struct SimpleReverseV2FixtureFloat32NegativeFirstAxis : ReverseV2Fixture
{
- SimpleReverseV2FixtureFloat32ZeroAxis()
+ SimpleReverseV2FixtureFloat32NegativeFirstAxis()
: ReverseV2Fixture("[ 2, 2 ]",
+ "[ 1 ]",
"[ 2, 2 ]",
"Float32",
- "[0]")
+ "Signed32")
{}
};
- TEST_CASE_FIXTURE(SimpleReverseV2FixtureFloat32ZeroAxis, "SimpleReverseV2TestFloat32ZeroAxis")
+ TEST_CASE_FIXTURE(SimpleReverseV2FixtureFloat32NegativeFirstAxis, "SimpleReverseV2FixtureFloat32NegativeFirstAxis")
{
- RunTest<4, armnn::DataType::Float32>(
- 0,
- { 1.0f, 2.0f,
- 3.0f, 4.0f },
- { 3.0f, 4.0f,
- 1.0f, 2.0f }
+ RunTest<2, armnn::DataType::Float32, armnn::DataType::Signed32, armnn::DataType::Float32>(
+ 0,
+ {
+ {
+ "InputLayer0",
+ { 1.0f, 2.0f,
+ 3.0f, 4.0f }
+ }
+ },
+ {
+ {
+ "InputLayer1",
+ { -2 }
+ }
+ },
+ {
+ {
+ "OutputLayer",
+ { 3.0f, 4.0f,
+ 1.0f, 2.0f }
+ }
+ }
);
}
- struct SimpleReverseV2FixtureFloat32NegativeAxis : ReverseV2Fixture
+ struct SimpleReverseV2FixtureFloat32NegativeSecondAxis : ReverseV2Fixture
{
- SimpleReverseV2FixtureFloat32NegativeAxis()
+ SimpleReverseV2FixtureFloat32NegativeSecondAxis()
: ReverseV2Fixture("[ 3, 3 ]",
+ "[ 1 ]",
"[ 3, 3 ]",
"Float32",
- "[-1]")
+ "Signed32")
{}
};
- TEST_CASE_FIXTURE(SimpleReverseV2FixtureFloat32NegativeAxis, "SimpleReverseV2TestFloat32NegativeAxis")
+ TEST_CASE_FIXTURE(SimpleReverseV2FixtureFloat32NegativeSecondAxis,
+ "SimpleReverseV2FixtureFloat32NegativeSecondAxis")
{
- RunTest<4, armnn::DataType::Float32>(
- 0,
- { 1.0f, 2.0f, 3.0f,
- 4.0f, 5.0f, 6.0f,
- 7.0f, 8.0f, 9.0f },
- { 3.0f, 2.0f, 1.0f,
- 6.0f, 5.0f, 4.0f,
- 9.0f, 8.0f, 7.0f }
+ RunTest<2, armnn::DataType::Float32, armnn::DataType::Signed32, armnn::DataType::Float32>(
+ 0,
+ {
+ {
+ "InputLayer0",
+ { 1.0f, 2.0f, 3.0f,
+ 4.0f, 5.0f, 6.0f,
+ 7.0f, 8.0f, 9.0f }
+ }
+ },
+ {
+ {
+ "InputLayer1",
+ { -1 }
+ }
+ },
+ {
+ {
+ "OutputLayer",
+ { 3.0f, 2.0f, 1.0f,
+ 6.0f, 5.0f, 4.0f,
+ 9.0f, 8.0f, 7.0f }
+ }
+ }
);
}
@@ -175,38 +301,55 @@ TEST_SUITE("Deserializer_ReverseV2")
{
SimpleReverseV2FixtureFloat32ThreeAxis()
: ReverseV2Fixture("[ 3, 3, 3 ]",
+ "[ 3 ]",
"[ 3, 3, 3 ]",
"Float32",
- "[0, 2, 1]")
+ "Signed32")
{}
};
TEST_CASE_FIXTURE(SimpleReverseV2FixtureFloat32ThreeAxis, "SimpleReverseV2TestFloat32ThreeAxis")
{
- RunTest<4, armnn::DataType::Float32>(
- 0,
- { 1.0f, 2.0f, 3.0f,
- 4.0f, 5.0f, 6.0f,
- 7.0f, 8.0f, 9.0f,
-
- 11.0f, 12.0f, 13.0f,
- 14.0f, 15.0f, 16.0f,
- 17.0f, 18.0f, 19.0f,
-
- 21.0f, 22.0f, 23.0f,
- 24.0f, 25.0f, 26.0f,
- 27.0f, 28.0f, 29.0f },
- { 29.0f, 28.0f, 27.0f,
- 26.0f, 25.0f, 24.0f,
- 23.0f, 22.0f, 21.0f,
-
- 19.0f, 18.0f, 17.0f,
- 16.0f, 15.0f, 14.0f,
- 13.0f, 12.0f, 11.0f,
-
- 9.0f, 8.0f, 7.0f,
- 6.0f, 5.0f, 4.0f,
- 3.0f, 2.0f, 1.0f }
+ RunTest<2, armnn::DataType::Float32, armnn::DataType::Signed32, armnn::DataType::Float32>(
+ 0,
+ {
+ {
+ "InputLayer0",
+ { 1.0f, 2.0f, 3.0f,
+ 4.0f, 5.0f, 6.0f,
+ 7.0f, 8.0f, 9.0f,
+
+ 11.0f, 12.0f, 13.0f,
+ 14.0f, 15.0f, 16.0f,
+ 17.0f, 18.0f, 19.0f,
+
+ 21.0f, 22.0f, 23.0f,
+ 24.0f, 25.0f, 26.0f,
+ 27.0f, 28.0f, 29.0f },
+ }
+ },
+ {
+ {
+ "InputLayer1",
+ { 0, 2, 1 }
+ }
+ },
+ {
+ {
+ "OutputLayer",
+ { 29.0f, 28.0f, 27.0f,
+ 26.0f, 25.0f, 24.0f,
+ 23.0f, 22.0f, 21.0f,
+
+ 19.0f, 18.0f, 17.0f,
+ 16.0f, 15.0f, 14.0f,
+ 13.0f, 12.0f, 11.0f,
+
+ 9.0f, 8.0f, 7.0f,
+ 6.0f, 5.0f, 4.0f,
+ 3.0f, 2.0f, 1.0f }
+ }
+ }
);
}
@@ -214,38 +357,55 @@ TEST_SUITE("Deserializer_ReverseV2")
{
SimpleReverseV2FixtureQuantisedAsymm8ThreeAxis()
: ReverseV2Fixture("[ 3, 3, 3 ]",
+ "[ 3 ]",
"[ 3, 3, 3 ]",
"QuantisedAsymm8",
- "[0, 2, 1]")
+ "Signed32")
{}
};
TEST_CASE_FIXTURE(SimpleReverseV2FixtureQuantisedAsymm8ThreeAxis, "SimpleReverseV2TestQuantisedAsymm8ThreeAxis")
{
- RunTest<4, armnn::DataType::QAsymmU8>(
- 0,
- { 1, 2, 3,
- 4, 5, 6,
- 7, 8, 9,
-
- 11, 12, 13,
- 14, 15, 16,
- 17, 18, 19,
-
- 21, 22, 23,
- 24, 25, 26,
- 27, 28, 29 },
- { 29, 28, 27,
- 26, 25, 24,
- 23, 22, 21,
-
- 19, 18, 17,
- 16, 15, 14,
- 13, 12, 11,
-
- 9, 8, 7,
- 6, 5, 4,
- 3, 2, 1 }
+ RunTest<2, armnn::DataType::QAsymmU8, armnn::DataType::Signed32, armnn::DataType::QAsymmU8>(
+ 0,
+ {
+ {
+ "InputLayer0",
+ { 1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9,
+
+ 11, 12, 13,
+ 14, 15, 16,
+ 17, 18, 19,
+
+ 21, 22, 23,
+ 24, 25, 26,
+ 27, 28, 29 },
+ }
+ },
+ {
+ {
+ "InputLayer1",
+ { 0, 2, 1 }
+ }
+ },
+ {
+ {
+ "OutputLayer",
+ { 29, 28, 27,
+ 26, 25, 24,
+ 23, 22, 21,
+
+ 19, 18, 17,
+ 16, 15, 14,
+ 13, 12, 11,
+
+ 9, 8, 7,
+ 6, 5, 4,
+ 3, 2, 1 }
+ }
+ }
);
}
}
diff --git a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
index 31ff026887..0b717bc0fd 100644
--- a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
+++ b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -150,6 +150,18 @@ struct ParserFlatbuffersSerializeFixture
const std::map<std::string, std::vector<InputDataType>>& inputData,
const std::map<std::string, std::vector<OutputDataType>>& expectedOutputData);
+ template<std::size_t NumOutputDimensions,
+ armnn::DataType ArmnnInputType0,
+ armnn::DataType ArmnnInputType1,
+ armnn::DataType ArmnnOutputType,
+ typename InputDataType0 = armnn::ResolveType<ArmnnInputType0>,
+ typename InputDataType1 = armnn::ResolveType<ArmnnInputType1>,
+ typename OutputDataType = armnn::ResolveType<ArmnnOutputType>>
+ void RunTest(unsigned int layersId,
+ const std::map<std::string, std::vector<InputDataType0>>& inputData0,
+ const std::map<std::string, std::vector<InputDataType1>>& inputData1,
+ const std::map<std::string, std::vector<OutputDataType>>& expectedOutputData);
+
void CheckTensors(const TensorRawPtr& tensors, size_t shapeSize, const std::vector<int32_t>& shape,
armnnSerializer::TensorInfo tensorType, const std::string& name,
const float scale, const int64_t zeroPoint)
@@ -246,3 +258,68 @@ void ParserFlatbuffersSerializeFixture::RunTest(
CHECK_MESSAGE(result.m_Result, result.m_Message.str());
}
}
+
+template<std::size_t NumOutputDimensions,
+ armnn::DataType ArmnnInputType0,
+ armnn::DataType ArmnnInputType1,
+ armnn::DataType ArmnnOutputType,
+ typename InputDataType0,
+ typename InputDataType1,
+ typename OutputDataType>
+void ParserFlatbuffersSerializeFixture::RunTest(
+ unsigned int layersId,
+ const std::map<std::string, std::vector<InputDataType0>>& inputData0,
+ const std::map<std::string, std::vector<InputDataType1>>& inputData1,
+ const std::map<std::string, std::vector<OutputDataType>>& expectedOutputData)
+{
+ auto ConvertBindingInfo = [](const armnnDeserializer::BindingPointInfo& bindingInfo)
+ {
+ return std::make_pair(bindingInfo.m_BindingId, bindingInfo.m_TensorInfo);
+ };
+
+ // Setup the armnn input tensors from the given vectors.
+ armnn::InputTensors inputTensors;
+ for (auto&& it : inputData0)
+ {
+ armnn::BindingPointInfo bindingInfo = ConvertBindingInfo(
+ m_Parser->GetNetworkInputBindingInfo(layersId, it.first));
+ bindingInfo.second.SetConstant(true);
+ armnn::VerifyTensorInfoDataType(bindingInfo.second, ArmnnInputType0);
+ inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
+ }
+
+ for (auto&& it : inputData1)
+ {
+ armnn::BindingPointInfo bindingInfo = ConvertBindingInfo(
+ m_Parser->GetNetworkInputBindingInfo(layersId, it.first));
+ bindingInfo.second.SetConstant(true);
+ armnn::VerifyTensorInfoDataType(bindingInfo.second, ArmnnInputType1);
+ inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
+ }
+
+ // Allocate storage for the output tensors to be written to and setup the armnn output tensors.
+ std::map<std::string, std::vector<OutputDataType>> outputStorage;
+ armnn::OutputTensors outputTensors;
+ for (auto&& it : expectedOutputData)
+ {
+ armnn::BindingPointInfo bindingInfo = ConvertBindingInfo(
+ m_Parser->GetNetworkOutputBindingInfo(layersId, it.first));
+ armnn::VerifyTensorInfoDataType(bindingInfo.second, ArmnnOutputType);
+ outputStorage.emplace(it.first, std::vector<OutputDataType>(bindingInfo.second.GetNumElements()));
+ outputTensors.push_back(
+ { bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) });
+ }
+
+ m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
+
+ // Compare each output tensor to the expected values
+ for (auto&& it : expectedOutputData)
+ {
+ armnn::BindingPointInfo bindingInfo = ConvertBindingInfo(
+ m_Parser->GetNetworkOutputBindingInfo(layersId, it.first));
+ auto outputExpected = it.second;
+ auto result = CompareTensors(outputExpected, outputStorage[it.first],
+ bindingInfo.second.GetShape(), bindingInfo.second.GetShape());
+ CHECK_MESSAGE(result.m_Result, result.m_Message.str());
+ }
+}
diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs
index 995a6013c1..75dd252e68 100644
--- a/src/armnnSerializer/ArmnnSchema.fbs
+++ b/src/armnnSerializer/ArmnnSchema.fbs
@@ -984,11 +984,6 @@ table ResizeDescriptor {
table ReverseV2Layer {
base:LayerBase;
- descriptor:ReverseV2Descriptor;
-}
-
-table ReverseV2Descriptor {
- axis:[int];
}
table StackLayer {
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index e10b66f51d..39a42954a9 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -1036,20 +1036,14 @@ void SerializerStrategy::SerializeResizeLayer(const armnn::IConnectableLayer* la
}
void SerializerStrategy::SerializeReverseV2Layer(const armnn::IConnectableLayer* layer,
- const armnn::ReverseV2Descriptor& reverseV2Descriptor,
- const char* name)
+ const char* name)
{
IgnoreUnused(name);
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ReverseV2);
- auto flatBufferDescriptor =
- CreateReverseV2Descriptor(m_flatBufferBuilder,
- m_flatBufferBuilder.CreateVector(reverseV2Descriptor.m_Axis));
-
auto flatBufferLayer = serializer::CreateReverseV2Layer(m_flatBufferBuilder,
- flatBufferBaseLayer,
- flatBufferDescriptor);
+ flatBufferBaseLayer);
CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ReverseV2Layer);
}
@@ -2357,9 +2351,7 @@ void SerializerStrategy::ExecuteStrategy(const armnn::IConnectableLayer* layer,
}
case armnn::LayerType::ReverseV2:
{
- const armnn::ReverseV2Descriptor& layerDescriptor =
- static_cast<const armnn::ReverseV2Descriptor&>(descriptor);
- SerializeReverseV2Layer(layer, layerDescriptor, name);
+ SerializeReverseV2Layer(layer, name);
break;
}
case armnn::LayerType::Shape:
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index eb724752f2..9e9eca8e26 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -290,7 +290,6 @@ private:
const char* name = nullptr);
void SerializeReverseV2Layer(const armnn::IConnectableLayer* layer,
- const armnn::ReverseV2Descriptor& reverseV2Descriptor,
const char* name = nullptr);
void SerializeSliceLayer(const armnn::IConnectableLayer* layer,
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 49971d2ecc..163e5c8374 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -2457,26 +2457,27 @@ TEST_CASE("SerializeReverseV2")
{
const std::string layerName("reverseV2");
const armnn::TensorInfo inputInfo = armnn::TensorInfo({2, 3, 4}, armnn::DataType::Float32);
+ const armnn::TensorInfo axisInfo = armnn::TensorInfo({3}, armnn::DataType::Signed32, 0.0f, 0, true);
const armnn::TensorInfo outputInfo = armnn::TensorInfo({2, 3, 4}, armnn::DataType::Float32);
- armnn::ReverseV2Descriptor desc;
- desc.m_Axis = {1, 0, 2};
-
armnn::INetworkPtr network = armnn::INetwork::Create();
armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
- armnn::IConnectableLayer* const reverseV2Layer = network->AddReverseV2Layer(desc, layerName.c_str());
+ armnn::IConnectableLayer* const axisLayer = network->AddInputLayer(1);
+ armnn::IConnectableLayer* const reverseV2Layer = network->AddReverseV2Layer(layerName.c_str());
armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
inputLayer->GetOutputSlot(0).Connect(reverseV2Layer->GetInputSlot(0));
+ axisLayer->GetOutputSlot(0).Connect(reverseV2Layer->GetInputSlot(1));
reverseV2Layer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
+ axisLayer->GetOutputSlot(0).SetTensorInfo(axisInfo);
reverseV2Layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
CHECK(deserializedNetwork);
- LayerVerifierBaseWithDescriptor<armnn::ReverseV2Descriptor> verifier(layerName, {inputInfo}, {outputInfo}, desc);
+ LayerVerifierBase verifier(layerName, {inputInfo, axisInfo}, {outputInfo});
deserializedNetwork->ExecuteStrategy(verifier);
}
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 078786cb93..77ce565959 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -3294,46 +3294,13 @@ void TfLiteParserImpl::ParseReverseV2(size_t subgraphIndex, size_t operatorIndex
TensorInfo axisTensorInfo = ToTensorInfo(inputs[1]);
TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
- std::vector<int32_t> axisTensorData(axisTensorInfo.GetNumElements());
-
- BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
- ::memcpy(axisTensorData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
-
- ReverseV2Descriptor descriptor(axisTensorData);
-
- auto inputRank = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
- std::vector<bool> dimFlag(inputRank, false);
-
- for (auto axis : axisTensorData)
- {
- if (axis < -inputRank || axis >= inputRank)
- {
- throw ParseException(
- fmt::format("Operation has invalid axis: {} It is out of bounds [ -{}, {} ) {}",
- axis,
- inputRank, inputRank,
- CHECK_LOCATION().AsString()));
- }
-
- auto posAxis = axis < 0 ? axis + inputRank : axis;
-
- if (dimFlag[posAxis])
- {
- throw ParseException(
- fmt::format("Operation has repeated axis: {} {}",
- axis,
- CHECK_LOCATION().AsString()));
- }
- dimFlag[posAxis] = true;
- }
-
- IConnectableLayer* layer = m_Network->AddReverseV2Layer(descriptor, layerName.c_str());
+ IConnectableLayer* layer = m_Network->AddReverseV2Layer(layerName.c_str());
ARMNN_ASSERT(layer != nullptr);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
- RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
diff --git a/src/armnnTfLiteParser/test/ReverseV2.cpp b/src/armnnTfLiteParser/test/ReverseV2.cpp
index 9604a3a1de..7dad19a40d 100644
--- a/src/armnnTfLiteParser/test/ReverseV2.cpp
+++ b/src/armnnTfLiteParser/test/ReverseV2.cpp
@@ -12,7 +12,6 @@ struct ReverseV2Fixture : public ParserFlatbuffersFixture
explicit ReverseV2Fixture(const std::string& inputShape,
const std::string& outputShape,
const std::string& axisShape,
- const std::string& axisData,
const std::string& dataType = "FLOAT32",
const std::string& scale = "1.0",
const std::string& offset = "0")
@@ -75,8 +74,8 @@ struct ReverseV2Fixture : public ParserFlatbuffersFixture
} ],
"buffers" : [
{ },
- { "data": )" + axisData + R"(, },
{ },
+ { }
]
}
)";
@@ -86,15 +85,30 @@ struct ReverseV2Fixture : public ParserFlatbuffersFixture
struct SimpleReverseV2Fixture : public ReverseV2Fixture
{
- SimpleReverseV2Fixture() : ReverseV2Fixture("[ 2, 2, 2 ]", "[ 2, 2, 2 ]", "[ 2 ]", "[ 0,0,0,0, 1,0,0,0 ]") {}
+ SimpleReverseV2Fixture() : ReverseV2Fixture("[ 2, 2, 2 ]", "[ 2, 2, 2 ]", "[ 2 ]" ) {}
};
TEST_CASE_FIXTURE(SimpleReverseV2Fixture, "ParseReverseV2")
{
- RunTest<3, armnn::DataType::Float32>
+ RunTest<3, armnn::DataType::Float32, armnn::DataType::Signed32, armnn::DataType::Float32>
(0,
{{ "inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8 }}},
+ {{ "axis", { 0, 1 }}},
{{ "outputTensor", { 7, 8, 5, 6, 3, 4, 1, 2 }}});
}
-} \ No newline at end of file
+struct SimpleReverseV2FixtureNegativeAxis : public ReverseV2Fixture
+{
+ SimpleReverseV2FixtureNegativeAxis() : ReverseV2Fixture("[ 2, 2, 2 ]", "[ 2, 2, 2 ]", "[ 1 ]" ) {}
+};
+
+TEST_CASE_FIXTURE(SimpleReverseV2FixtureNegativeAxis, "ParseReverseV2")
+{
+ RunTest<3, armnn::DataType::Float32, armnn::DataType::Signed32, armnn::DataType::Float32>
+ (0,
+ {{ "inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8 }}},
+ {{ "axis", { -1 }}},
+ {{ "outputTensor", { 2, 1, 4, 3, 6, 5, 8, 7 }}});
+}
+
+}
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index a26aaf490b..bd3c7c2760 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1617,18 +1617,35 @@ void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
void ReverseV2QueueDescriptor::Validate(const WorkloadInfo &workloadInfo) const {
const std::string descriptorName{"ReverseV2QueueDescriptor"};
- ValidateNumInputs(workloadInfo, descriptorName, 1);
+ // Backend restriction
+ const unsigned int maxDimensions = 4;
+
+ ValidateNumInputs(workloadInfo, descriptorName, 2);
ValidateNumOutputs(workloadInfo, descriptorName, 1);
const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
+ const TensorInfo& axisTensorInfo = workloadInfo.m_InputTensorInfos[1];
const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
- auto inputTensorNumDimensions = inputTensorInfo.GetNumDimensions();
- if (inputTensorNumDimensions > m_Parameters.m_MaxDimension)
+ const auto inputTensorNumDimensions = inputTensorInfo.GetNumDimensions();
+ if (inputTensorNumDimensions > maxDimensions)
{
throw InvalidArgumentException(descriptorName +
": Input tensors with rank greater than " +
- std::to_string(m_Parameters.m_MaxDimension) + " are not supported.");
+ std::to_string(maxDimensions) + " are not supported.");
+ }
+
+ const auto axisTensorNumDimensions = axisTensorInfo.GetNumDimensions();
+ if (axisTensorNumDimensions > maxDimensions)
+ {
+ throw InvalidArgumentException(descriptorName +
+ ": More than " + std::to_string(maxDimensions) + " axes cannot be specified.");
+ }
+
+ if (axisTensorNumDimensions > inputTensorNumDimensions)
+ {
+ throw InvalidArgumentException(descriptorName +
+ ": More axes specified than the number of axes on the input tensor.");
}
std::vector<DataType> supportedTypes =
@@ -1642,44 +1659,18 @@ void ReverseV2QueueDescriptor::Validate(const WorkloadInfo &workloadInfo) const
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
- ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
- ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
- if (m_Parameters.m_Axis.size() > inputTensorNumDimensions)
- {
- throw InvalidArgumentException(descriptorName + ": More axes specified than is on the input tensor.");
- }
- if (m_Parameters.m_Axis.size() > m_Parameters.m_MaxDimension)
+ std::vector<DataType> axisSupportedTypes =
{
- throw InvalidArgumentException(descriptorName +
- ": More than " + std::to_string(m_Parameters.m_MaxDimension) + " axes cannot be specified.");
- }
+ DataType::Signed32,
+ };
- if (! m_Parameters.m_Axis.empty())
- {
- // First check that we have unique axis values
- auto checkAxis = m_Parameters.m_Axis;
- std::sort(checkAxis.begin(), checkAxis.end());
- auto lastUnique = std::unique(checkAxis.begin(), checkAxis.end());
- if (lastUnique != checkAxis.end())
- {
- throw InvalidArgumentException(descriptorName + ": Axes values must be unique.");
- }
+ ValidateDataTypes(axisTensorInfo, axisSupportedTypes, descriptorName);
- // Next check that the axes values are in range: [-rank, rank]
- const auto minmax =
- std::minmax_element(std::begin(m_Parameters.m_Axis), std::end(m_Parameters.m_Axis));
- if (((*minmax.first) < int32_t(-inputTensorNumDimensions)) ||
- ((*minmax.second) >= int32_t (inputTensorNumDimensions)))
- {
- throw InvalidArgumentException(descriptorName +
- ": Axes values must in range [-" + std::to_string(inputTensorNumDimensions) + "," +
- std::to_string(inputTensorNumDimensions) + "].");
- }
- }
+ ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
+ ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
}
-
void FakeQuantizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
const std::string descriptorName{"FakeQuantizationQueueDescriptor"};
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index ac4bcc90f6..ee797b632c 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -1121,12 +1121,12 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
}
case LayerType::ReverseV2:
{
- auto cLayer = PolymorphicDowncast<const ReverseV2Layer*>(&layer);
- const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
+ const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
- result = layerSupportObject.IsReverseV2Supported(OverrideDataType(input, dataType),
+ result = layerSupportObject.IsReverseV2Supported(OverrideDataType(input0, dataType),
+ OverrideDataType(input1, armnn::DataType::Signed32),
OverrideDataType(output, dataType),
- cLayer->GetParameters(),
reason);
break;
}
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index f7a852f440..182fab97be 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -744,8 +744,6 @@ DECLARE_LAYER_POLICY_2_PARAM(Resize)
DECLARE_LAYER_POLICY_2_PARAM(Reshape)
-DECLARE_LAYER_POLICY_2_PARAM(ReverseV2)
-
DECLARE_LAYER_POLICY_1_PARAM(Shape)
DECLARE_LAYER_POLICY_2_PARAM(Slice)
diff --git a/src/backends/backendsCommon/test/layerTests/ReverseV2TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReverseV2TestImpl.cpp
index 586b831e45..3cfd614336 100644
--- a/src/backends/backendsCommon/test/layerTests/ReverseV2TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ReverseV2TestImpl.cpp
@@ -23,23 +23,25 @@ namespace
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- armnn::ReverseV2Descriptor descriptor,
const std::vector<T>& input,
+ const std::vector<int>& axis,
const std::vector<T>& outputExpected,
const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& axisInfo,
const armnn::TensorInfo& outputInfo)
{
LayerTestResult<T, NumDims> result(outputInfo);
std::vector<T> outputActual(outputInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
+ std::unique_ptr<armnn::ITensorHandle> axisHandle = tensorHandleFactory.CreateTensorHandle(axisInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
armnn::ReverseV2QueueDescriptor queueDescriptor;
- queueDescriptor.m_Parameters = std::move(descriptor);
armnn::WorkloadInfo workloadInfo;
AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
+ AddInputToWorkload(queueDescriptor, workloadInfo, axisInfo, axisHandle.get());
AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
// Don't execute if ReverseV2 is not supported, as an exception will be raised.
@@ -47,9 +49,9 @@ namespace
std::string reasonIfUnsupported;
armnn::LayerSupportHandle handle = armnn::GetILayerSupportByBackendId(backend);
result.m_Supported = handle.IsReverseV2Supported(inputInfo,
- outputInfo,
- queueDescriptor.m_Parameters,
- reasonIfUnsupported);
+ axisInfo,
+ outputInfo,
+ reasonIfUnsupported);
if (!result.m_Supported)
{
return result;
@@ -58,9 +60,11 @@ namespace
auto workload = workloadFactory.CreateWorkload(armnn::LayerType::ReverseV2, queueDescriptor, workloadInfo);
inputHandle->Allocate();
+ axisHandle->Allocate();
outputHandle->Allocate();
CopyDataToITensorHandle(inputHandle.get(), input.data());
+ CopyDataToITensorHandle(axisHandle.get(), axis.data());
workload->PostAllocationConfigure();
ExecuteWorkload(*workload, memoryManager);
@@ -80,13 +84,13 @@ LayerTestResult<T, 2> ReverseV2SimpleTestEmptyAxis(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- // Simple test with default descriptor. No axes set so output is the same as input
- auto descriptor = armnn::ReverseV2Descriptor();
+ // Simple test with no axes set so output is the same as input
float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo inputInfo({2,2}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo axisInfo({}, armnn::DataType::Signed32, qScale, qOffset);
armnn::TensorInfo outputInfo({2,2}, ArmnnType, qScale, qOffset);
std::vector<T> input = armnnUtils::QuantizedVector<T>({
@@ -94,6 +98,8 @@ LayerTestResult<T, 2> ReverseV2SimpleTestEmptyAxis(
3, 4
}, qScale, qOffset);
+ std::vector<int> axis = armnnUtils::QuantizedVector<int>({}, qScale, qOffset);
+
std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
1, 2,
3, 4
@@ -102,10 +108,11 @@ LayerTestResult<T, 2> ReverseV2SimpleTestEmptyAxis(
return ReverseV2TestImpl<ArmnnType, T, 2>(workloadFactory,
memoryManager,
tensorHandleFactory,
- descriptor,
input,
+ axis,
outputExpected,
inputInfo,
+ axisInfo,
outputInfo);
}
@@ -115,26 +122,27 @@ LayerTestResult<T, 2> ReverseV2SimpleTestEmptyTensor(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- // Simple test with default descriptor. Empty tensor set so output is the same as input
- auto descriptor = armnn::ReverseV2Descriptor();
+ // Simple test with empty input tensor
float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo inputInfo({0}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo axisInfo({0}, armnn::DataType::Signed32, qScale, qOffset);
armnn::TensorInfo outputInfo({0}, ArmnnType, qScale, qOffset);
std::vector<T> input = armnnUtils::QuantizedVector<T>({}, qScale, qOffset);
-
+ std::vector<int> axis = armnnUtils::QuantizedVector<int>({}, qScale, qOffset);
std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({}, qScale, qOffset);
return ReverseV2TestImpl<ArmnnType, T, 2>(workloadFactory,
memoryManager,
tensorHandleFactory,
- descriptor,
input,
+ axis,
outputExpected,
inputInfo,
+ axisInfo,
outputInfo);
}
@@ -144,12 +152,11 @@ LayerTestResult<T, 2> ReverseV2SimpleTest1Dim(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- auto descriptor = armnn::ReverseV2Descriptor(std::vector<int> {0});
-
float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo inputInfo({4}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo axisInfo({1}, armnn::DataType::Signed32, qScale, qOffset);
armnn::TensorInfo outputInfo({4}, ArmnnType, qScale, qOffset);
std::vector<T> input = armnnUtils::QuantizedVector<T>({
@@ -157,6 +164,8 @@ LayerTestResult<T, 2> ReverseV2SimpleTest1Dim(
3, 4
}, qScale, qOffset);
+ std::vector<int> axis = {0};
+
std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
4, 3,
2, 1
@@ -165,10 +174,11 @@ LayerTestResult<T, 2> ReverseV2SimpleTest1Dim(
return ReverseV2TestImpl<ArmnnType, T, 2>(workloadFactory,
memoryManager,
tensorHandleFactory,
- descriptor,
input,
+ axis,
outputExpected,
inputInfo,
+ axisInfo,
outputInfo);
}
@@ -178,12 +188,11 @@ LayerTestResult<T, 2> ReverseV2SimpleTest2Dim1Axis(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- auto descriptor = armnn::ReverseV2Descriptor(std::vector<int> {1});
-
float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo inputInfo({2,2}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo axisInfo({1}, armnn::DataType::Signed32, qScale, qOffset);
armnn::TensorInfo outputInfo({2,2}, ArmnnType, qScale, qOffset);
std::vector<T> input = armnnUtils::QuantizedVector<T>({
@@ -191,6 +200,8 @@ LayerTestResult<T, 2> ReverseV2SimpleTest2Dim1Axis(
3, 4
}, qScale, qOffset);
+ std::vector<int> axis = {1};
+
std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
2, 1,
4, 3
@@ -199,10 +210,11 @@ LayerTestResult<T, 2> ReverseV2SimpleTest2Dim1Axis(
return ReverseV2TestImpl<ArmnnType, T, 2>(workloadFactory,
memoryManager,
tensorHandleFactory,
- descriptor,
input,
+ axis,
outputExpected,
inputInfo,
+ axisInfo,
outputInfo);
}
@@ -212,12 +224,11 @@ LayerTestResult<T, 2> ReverseV2SimpleTest2Dim2Axis(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- auto descriptor = armnn::ReverseV2Descriptor(std::vector<int> {1, 0});
-
float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo inputInfo({2,2}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo axisInfo({2}, armnn::DataType::Signed32, qScale, qOffset);
armnn::TensorInfo outputInfo({2,2}, ArmnnType, qScale, qOffset);
std::vector<T> input = armnnUtils::QuantizedVector<T>({
@@ -225,6 +236,8 @@ LayerTestResult<T, 2> ReverseV2SimpleTest2Dim2Axis(
3, 4
}, qScale, qOffset);
+ std::vector<int> axis = {1,0};
+
std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
4, 3,
2, 1
@@ -233,10 +246,11 @@ LayerTestResult<T, 2> ReverseV2SimpleTest2Dim2Axis(
return ReverseV2TestImpl<ArmnnType, T, 2>(workloadFactory,
memoryManager,
tensorHandleFactory,
- descriptor,
input,
+ axis,
outputExpected,
inputInfo,
+ axisInfo,
outputInfo);
}
@@ -246,12 +260,11 @@ LayerTestResult<T, 2> ReverseV2SimpleTest3Dim1Axis(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- auto descriptor = armnn::ReverseV2Descriptor(std::vector<int> {1});
-
float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo inputInfo({2, 3, 4}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo axisInfo({1}, armnn::DataType::Signed32, qScale, qOffset);
armnn::TensorInfo outputInfo({2, 3, 4}, ArmnnType, qScale, qOffset);
std::vector<T> input = armnnUtils::QuantizedVector<T>({
@@ -263,6 +276,8 @@ LayerTestResult<T, 2> ReverseV2SimpleTest3Dim1Axis(
21, 22, 23, 24
}, qScale, qOffset);
+ std::vector<int> axis = {1};
+
std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
9, 10, 11, 12,
5, 6, 7, 8,
@@ -275,10 +290,11 @@ LayerTestResult<T, 2> ReverseV2SimpleTest3Dim1Axis(
return ReverseV2TestImpl<ArmnnType, T, 2>(workloadFactory,
memoryManager,
tensorHandleFactory,
- descriptor,
input,
+ axis,
outputExpected,
inputInfo,
+ axisInfo,
outputInfo);
}
@@ -288,12 +304,11 @@ LayerTestResult<T, 2> ReverseV2SimpleTest3Dim2Axis(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- auto descriptor = armnn::ReverseV2Descriptor(std::vector<int> {0, 1});
-
float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo inputInfo({2, 3, 4}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo axisInfo({2}, armnn::DataType::Signed32, qScale, qOffset);
armnn::TensorInfo outputInfo({2, 3, 4}, ArmnnType, qScale, qOffset);
std::vector<T> input = armnnUtils::QuantizedVector<T>({
@@ -305,6 +320,8 @@ LayerTestResult<T, 2> ReverseV2SimpleTest3Dim2Axis(
21, 22, 23, 24
}, qScale, qOffset);
+ std::vector<int> axis = {0, 1};
+
std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
21, 22, 23, 24,
17, 18, 19, 20,
@@ -317,10 +334,11 @@ LayerTestResult<T, 2> ReverseV2SimpleTest3Dim2Axis(
return ReverseV2TestImpl<ArmnnType, T, 2>(workloadFactory,
memoryManager,
tensorHandleFactory,
- descriptor,
input,
+ axis,
outputExpected,
inputInfo,
+ axisInfo,
outputInfo);
}
@@ -330,14 +348,11 @@ LayerTestResult<T, 2> ReverseV2SimpleTest3Dim3Axis(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- // Simple test with default descriptor. No axes set so output is
- // the same as input
- auto descriptor = armnn::ReverseV2Descriptor(std::vector<int> {1, 0, 2});
-
float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo inputInfo({2, 3, 4}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo axisInfo({3}, armnn::DataType::Signed32, qScale, qOffset);
armnn::TensorInfo outputInfo({2, 3, 4}, ArmnnType, qScale, qOffset);
std::vector<T> input = armnnUtils::QuantizedVector<T>({
@@ -349,6 +364,8 @@ LayerTestResult<T, 2> ReverseV2SimpleTest3Dim3Axis(
21, 22, 23, 24
}, qScale, qOffset);
+ std::vector<int> axis = {1, 0, 2};
+
std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
24, 23, 22, 21,
20, 19, 18, 17,
@@ -361,10 +378,11 @@ LayerTestResult<T, 2> ReverseV2SimpleTest3Dim3Axis(
return ReverseV2TestImpl<ArmnnType, T, 2>(workloadFactory,
memoryManager,
tensorHandleFactory,
- descriptor,
input,
+ axis,
outputExpected,
inputInfo,
+ axisInfo,
outputInfo);
}
@@ -374,14 +392,11 @@ LayerTestResult<T, 2> ReverseV2SimpleTest4Dim1Axis(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- // Simple test with default descriptor. No axes set so output is
- // the same as input
- auto descriptor = armnn::ReverseV2Descriptor(std::vector<int> {0});
-
float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo inputInfo({2, 2, 2, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo axisInfo({1}, armnn::DataType::Signed32, qScale, qOffset);
armnn::TensorInfo outputInfo({2, 2, 2, 3}, ArmnnType, qScale, qOffset);
std::vector<T> input = armnnUtils::QuantizedVector<T>({
@@ -395,6 +410,8 @@ LayerTestResult<T, 2> ReverseV2SimpleTest4Dim1Axis(
22, 23, 24
}, qScale, qOffset);
+ std::vector<int> axis = {0};
+
std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
13, 14, 15,
16, 17, 18,
@@ -409,10 +426,11 @@ LayerTestResult<T, 2> ReverseV2SimpleTest4Dim1Axis(
return ReverseV2TestImpl<ArmnnType, T, 2>(workloadFactory,
memoryManager,
tensorHandleFactory,
- descriptor,
input,
+ axis,
outputExpected,
inputInfo,
+ axisInfo,
outputInfo);
}
@@ -422,14 +440,11 @@ LayerTestResult<T, 2> ReverseV2SimpleTest4Dim2Axis(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- // Simple test with default descriptor. No axes set so output is
- // the same as input
- auto descriptor = armnn::ReverseV2Descriptor(std::vector<int> {0, 1});
-
float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo inputInfo({2, 2, 2, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo axisInfo({2}, armnn::DataType::Signed32, qScale, qOffset);
armnn::TensorInfo outputInfo({2, 2, 2, 3}, ArmnnType, qScale, qOffset);
std::vector<T> input = armnnUtils::QuantizedVector<T>({
@@ -443,6 +458,8 @@ LayerTestResult<T, 2> ReverseV2SimpleTest4Dim2Axis(
22, 23, 24
}, qScale, qOffset);
+ std::vector<int> axis = {0, 1};
+
std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
19, 20, 21,
22, 23, 24,
@@ -457,10 +474,11 @@ LayerTestResult<T, 2> ReverseV2SimpleTest4Dim2Axis(
return ReverseV2TestImpl<ArmnnType, T, 2>(workloadFactory,
memoryManager,
tensorHandleFactory,
- descriptor,
input,
+ axis,
outputExpected,
inputInfo,
+ axisInfo,
outputInfo);
}
@@ -470,14 +488,11 @@ LayerTestResult<T, 2> ReverseV2SimpleTest4Dim3Axis(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- // Simple test with default descriptor. No axes set so output is
- // the same as input
- auto descriptor = armnn::ReverseV2Descriptor(std::vector<int> {0, 1, 2});
-
float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo inputInfo({2, 2, 2, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo axisInfo({3}, armnn::DataType::Signed32, qScale, qOffset);
armnn::TensorInfo outputInfo({2, 2, 2, 3}, ArmnnType, qScale, qOffset);
std::vector<T> input = armnnUtils::QuantizedVector<T>({
@@ -491,6 +506,8 @@ LayerTestResult<T, 2> ReverseV2SimpleTest4Dim3Axis(
22, 23, 24
}, qScale, qOffset);
+ std::vector<int> axis = {0, 1, 2};
+
std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
22, 23, 24,
19, 20, 21,
@@ -505,10 +522,11 @@ LayerTestResult<T, 2> ReverseV2SimpleTest4Dim3Axis(
return ReverseV2TestImpl<ArmnnType, T, 2>(workloadFactory,
memoryManager,
tensorHandleFactory,
- descriptor,
input,
+ axis,
outputExpected,
inputInfo,
+ axisInfo,
outputInfo);
}
@@ -518,14 +536,11 @@ LayerTestResult<T, 2> ReverseV2SimpleTest4Dim4Axis(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- // Simple test with default descriptor. No axes set so output is
- // the same as input
- auto descriptor = armnn::ReverseV2Descriptor(std::vector<int> {0, 1, 2, 3});
-
float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo inputInfo({2, 2, 2, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo axisInfo({4}, armnn::DataType::Signed32, qScale, qOffset);
armnn::TensorInfo outputInfo({2, 2, 2, 3}, ArmnnType, qScale, qOffset);
std::vector<T> input = armnnUtils::QuantizedVector<T>({
@@ -539,24 +554,27 @@ LayerTestResult<T, 2> ReverseV2SimpleTest4Dim4Axis(
22, 23, 24
}, qScale, qOffset);
+ std::vector<int> axis = {0, 1, 2, 3};
+
std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
- 24, 23, 22,
- 21, 20, 19,
- 18, 17, 16,
- 15, 14, 13,
- 12, 11, 10,
- 9, 8, 7,
- 6, 5, 4,
- 3, 2, 1
- }, qScale, qOffset);
+ 24, 23, 22,
+ 21, 20, 19,
+ 18, 17, 16,
+ 15, 14, 13,
+ 12, 11, 10,
+ 9, 8, 7,
+ 6, 5, 4,
+ 3, 2, 1
+ }, qScale, qOffset);
return ReverseV2TestImpl<ArmnnType, T, 2>(workloadFactory,
memoryManager,
tensorHandleFactory,
- descriptor,
input,
+ axis,
outputExpected,
inputInfo,
+ axisInfo,
outputInfo);
}
@@ -566,13 +584,11 @@ LayerTestResult<T, 2> ReverseV2EvenRowOddColTest2Dim(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- // Simple test with default descriptor. No axes set so output is the same as input
- auto descriptor = armnn::ReverseV2Descriptor(std::vector<int> {1});
-
float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo inputInfo({2, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo axisInfo({1}, armnn::DataType::Signed32, qScale, qOffset);
armnn::TensorInfo outputInfo({2, 3}, ArmnnType, qScale, qOffset);
std::vector<T> input = armnnUtils::QuantizedVector<T>({
@@ -580,6 +596,8 @@ LayerTestResult<T, 2> ReverseV2EvenRowOddColTest2Dim(
4, 5, 6
}, qScale, qOffset);
+ std::vector<int> axis = {1};
+
std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
3, 2, 1,
6, 5, 4
@@ -588,10 +606,11 @@ LayerTestResult<T, 2> ReverseV2EvenRowOddColTest2Dim(
return ReverseV2TestImpl<ArmnnType, T, 2>(workloadFactory,
memoryManager,
tensorHandleFactory,
- descriptor,
input,
+ axis,
outputExpected,
inputInfo,
+ axisInfo,
outputInfo);
}
@@ -601,14 +620,11 @@ LayerTestResult<T, 2> ReverseV2EvenRowOddColTest3Dim(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- // Simple test with default descriptor. No axes set so output is
- // the same as input
- auto descriptor = armnn::ReverseV2Descriptor(std::vector<int> {1});
-
float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo inputInfo({2, 3, 1}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo axisInfo({1}, armnn::DataType::Signed32, qScale, qOffset);
armnn::TensorInfo outputInfo({2, 3, 1}, ArmnnType, qScale, qOffset);
std::vector<T> input = armnnUtils::QuantizedVector<T>({
@@ -616,6 +632,8 @@ LayerTestResult<T, 2> ReverseV2EvenRowOddColTest3Dim(
4, 5, 6
}, qScale, qOffset);
+ std::vector<int> axis = {1};
+
std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
3, 2, 1,
6, 5, 4
@@ -624,10 +642,11 @@ LayerTestResult<T, 2> ReverseV2EvenRowOddColTest3Dim(
return ReverseV2TestImpl<ArmnnType, T, 2>(workloadFactory,
memoryManager,
tensorHandleFactory,
- descriptor,
input,
+ axis,
outputExpected,
inputInfo,
+ axisInfo,
outputInfo);
}
@@ -637,14 +656,11 @@ LayerTestResult<T, 2> ReverseV2EvenRowEvenColTest2Dim(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- // Simple test with default descriptor. No axes set so output is
- // the same as input
- auto descriptor = armnn::ReverseV2Descriptor(std::vector<int> {1});
-
float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo inputInfo({2, 4}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo axisInfo({1}, armnn::DataType::Signed32, qScale, qOffset);
armnn::TensorInfo outputInfo({2, 4}, ArmnnType, qScale, qOffset);
std::vector<T> input = armnnUtils::QuantizedVector<T>({
@@ -652,6 +668,8 @@ LayerTestResult<T, 2> ReverseV2EvenRowEvenColTest2Dim(
5, 6, 7, 8
}, qScale, qOffset);
+ std::vector<int> axis = {1};
+
std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
4, 3, 2, 1,
8, 7, 6, 5
@@ -660,10 +678,11 @@ LayerTestResult<T, 2> ReverseV2EvenRowEvenColTest2Dim(
return ReverseV2TestImpl<ArmnnType, T, 2>(workloadFactory,
memoryManager,
tensorHandleFactory,
- descriptor,
input,
+ axis,
outputExpected,
inputInfo,
+ axisInfo,
outputInfo);
}
@@ -673,14 +692,11 @@ LayerTestResult<T, 2> ReverseV2EvenRowEvenColTest3Dim(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- // Simple test with default descriptor. No axes set so output is
- // the same as input
- auto descriptor = armnn::ReverseV2Descriptor(std::vector<int> {1});
-
float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo inputInfo({2, 4, 1}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo axisInfo({1}, armnn::DataType::Signed32, qScale, qOffset);
armnn::TensorInfo outputInfo({2, 4, 1}, ArmnnType, qScale, qOffset);
std::vector<T> input = armnnUtils::QuantizedVector<T>({
@@ -688,6 +704,8 @@ LayerTestResult<T, 2> ReverseV2EvenRowEvenColTest3Dim(
5, 6, 7, 8
}, qScale, qOffset);
+ std::vector<int> axis = {1};
+
std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
4, 3, 2, 1,
8, 7, 6, 5
@@ -696,10 +714,11 @@ LayerTestResult<T, 2> ReverseV2EvenRowEvenColTest3Dim(
return ReverseV2TestImpl<ArmnnType, T, 2>(workloadFactory,
memoryManager,
tensorHandleFactory,
- descriptor,
input,
+ axis,
outputExpected,
inputInfo,
+ axisInfo,
outputInfo);
}
@@ -709,14 +728,11 @@ LayerTestResult<T, 2> ReverseV2OddRowOddColTest2Dim(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- // Simple test with default descriptor. No axes set so output is
- // the same as input
- auto descriptor = armnn::ReverseV2Descriptor(std::vector<int> {1});
-
float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo inputInfo({3, 3}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo axisInfo({1}, armnn::DataType::Signed32, qScale, qOffset);
armnn::TensorInfo outputInfo({3, 3}, ArmnnType, qScale, qOffset);
std::vector<T> input = armnnUtils::QuantizedVector<T>({
@@ -725,6 +741,8 @@ LayerTestResult<T, 2> ReverseV2OddRowOddColTest2Dim(
7, 8, 9
}, qScale, qOffset);
+ std::vector<int> axis = {1};
+
std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
3, 2, 1,
6, 5, 4,
@@ -734,10 +752,11 @@ LayerTestResult<T, 2> ReverseV2OddRowOddColTest2Dim(
return ReverseV2TestImpl<ArmnnType, T, 2>(workloadFactory,
memoryManager,
tensorHandleFactory,
- descriptor,
input,
+ axis,
outputExpected,
inputInfo,
+ axisInfo,
outputInfo);
}
@@ -747,14 +766,11 @@ LayerTestResult<T, 2> ReverseV2OddRowOddColTest3Dim(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- // Simple test with default descriptor. No axes set so output is
- // the same as input
- auto descriptor = armnn::ReverseV2Descriptor(std::vector<int> {1});
-
float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo inputInfo({3, 3, 1}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo axisInfo({1}, armnn::DataType::Signed32, qScale, qOffset);
armnn::TensorInfo outputInfo({3, 3, 1}, ArmnnType, qScale, qOffset);
std::vector<T> input = armnnUtils::QuantizedVector<T>({
@@ -763,6 +779,8 @@ LayerTestResult<T, 2> ReverseV2OddRowOddColTest3Dim(
7, 8, 9
}, qScale, qOffset);
+ std::vector<int> axis = {1};
+
std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
3, 2, 1,
6, 5, 4,
@@ -772,10 +790,11 @@ LayerTestResult<T, 2> ReverseV2OddRowOddColTest3Dim(
return ReverseV2TestImpl<ArmnnType, T, 2>(workloadFactory,
memoryManager,
tensorHandleFactory,
- descriptor,
input,
+ axis,
outputExpected,
inputInfo,
+ axisInfo,
outputInfo);
}
@@ -785,14 +804,11 @@ LayerTestResult<T, 2> ReverseV2OddRowEvenColTest2Dim(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- // Simple test with default descriptor. No axes set so output is
- // the same as input
- auto descriptor = armnn::ReverseV2Descriptor(std::vector<int> {1});
-
float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo inputInfo({3, 4}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo axisInfo({1}, armnn::DataType::Signed32, qScale, qOffset);
armnn::TensorInfo outputInfo({3, 4}, ArmnnType, qScale, qOffset);
std::vector<T> input = armnnUtils::QuantizedVector<T>({
@@ -801,6 +817,8 @@ LayerTestResult<T, 2> ReverseV2OddRowEvenColTest2Dim(
9, 10, 11, 12
}, qScale, qOffset);
+ std::vector<int> axis = {1};
+
std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
4, 3, 2, 1,
8, 7, 6, 5,
@@ -810,10 +828,11 @@ LayerTestResult<T, 2> ReverseV2OddRowEvenColTest2Dim(
return ReverseV2TestImpl<ArmnnType, T, 2>(workloadFactory,
memoryManager,
tensorHandleFactory,
- descriptor,
input,
+ axis,
outputExpected,
inputInfo,
+ axisInfo,
outputInfo);
}
@@ -823,14 +842,11 @@ LayerTestResult<T, 2> ReverseV2OddRowEvenColTest3Dim(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- // Simple test with default descriptor. No axes set so output is
- // the same as input
- auto descriptor = armnn::ReverseV2Descriptor(std::vector<int> {1});
-
float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo inputInfo({3, 4, 1}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo axisInfo({1}, armnn::DataType::Signed32, qScale, qOffset);
armnn::TensorInfo outputInfo({3, 4, 1}, ArmnnType, qScale, qOffset);
std::vector<T> input = armnnUtils::QuantizedVector<T>({
@@ -839,6 +855,8 @@ LayerTestResult<T, 2> ReverseV2OddRowEvenColTest3Dim(
9, 10, 11, 12
}, qScale, qOffset);
+ std::vector<int> axis = {1};
+
std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
4, 3, 2, 1,
8, 7, 6, 5,
@@ -848,10 +866,11 @@ LayerTestResult<T, 2> ReverseV2OddRowEvenColTest3Dim(
return ReverseV2TestImpl<ArmnnType, T, 2>(workloadFactory,
memoryManager,
tensorHandleFactory,
- descriptor,
input,
+ axis,
outputExpected,
inputInfo,
+ axisInfo,
outputInfo);
}
@@ -861,14 +880,11 @@ LayerTestResult<T, 2> ReverseV2NegAxisTest2Dim1Axis(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- // Simple test with default descriptor. No axes set so output is
- // the same as input
- auto descriptor = armnn::ReverseV2Descriptor(std::vector<int> {-1});
-
float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo inputInfo({2, 4}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo axisInfo({1}, armnn::DataType::Signed32, qScale, qOffset);
armnn::TensorInfo outputInfo({2, 4}, ArmnnType, qScale, qOffset);
std::vector<T> input = armnnUtils::QuantizedVector<T>({
@@ -876,6 +892,8 @@ LayerTestResult<T, 2> ReverseV2NegAxisTest2Dim1Axis(
5, 6, 7, 8,
}, qScale, qOffset);
+ std::vector<int> axis = {-1};
+
std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
4, 3, 2, 1,
8, 7, 6, 5
@@ -884,10 +902,11 @@ LayerTestResult<T, 2> ReverseV2NegAxisTest2Dim1Axis(
return ReverseV2TestImpl<ArmnnType, T, 2>(workloadFactory,
memoryManager,
tensorHandleFactory,
- descriptor,
input,
+ axis,
outputExpected,
inputInfo,
+ axisInfo,
outputInfo);
}
@@ -897,14 +916,11 @@ LayerTestResult<T, 2> ReverseV2NegAxisTest3Dim2Axis(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- // Simple test with default descriptor. No axes set so output is
- // the same as input
- auto descriptor = armnn::ReverseV2Descriptor(std::vector<int> {1, -1});
-
float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo inputInfo({2, 4, 1}, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo axisInfo({2}, armnn::DataType::Signed32, qScale, qOffset);
armnn::TensorInfo outputInfo({2, 4, 1}, ArmnnType, qScale, qOffset);
std::vector<T> input = armnnUtils::QuantizedVector<T>({
@@ -912,6 +928,8 @@ LayerTestResult<T, 2> ReverseV2NegAxisTest3Dim2Axis(
5, 6, 7, 8,
}, qScale, qOffset);
+ std::vector<int> axis = {1, -1};
+
std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
4, 3, 2, 1,
8, 7, 6, 5
@@ -920,10 +938,11 @@ LayerTestResult<T, 2> ReverseV2NegAxisTest3Dim2Axis(
return ReverseV2TestImpl<ArmnnType, T, 2>(workloadFactory,
memoryManager,
tensorHandleFactory,
- descriptor,
input,
+ axis,
outputExpected,
inputInfo,
+ axisInfo,
outputInfo);
}
@@ -1081,4 +1100,4 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
ReverseV2SimpleTest2Dim2Axis<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 1d5fab1adc..e94478f088 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -344,7 +344,7 @@ bool RefLayerSupport::IsLayerSupported(const LayerType& type,
case LayerType::ReverseV2:
return IsReverseV2Supported(infos[0],
infos[1],
- *(PolymorphicDowncast<const ReverseV2Descriptor*>(&descriptor)),
+ infos[2],
reasonIfUnsupported);
case LayerType::Reduce:
return IsReduceSupported(infos[0],
@@ -2361,12 +2361,11 @@ bool RefLayerSupport::IsResizeSupported(const TensorInfo& input,
return supported;
}
-bool RefLayerSupport::IsReverseV2Supported(const TensorInfo& input,
+bool RefLayerSupport::IsReverseV2Supported(const TensorInfo& input0,
+ const TensorInfo& input1,
const TensorInfo& output,
- const ReverseV2Descriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- IgnoreUnused(descriptor);
bool supported = true;
// ReverseV2 is data type agnostic so it can support all the types in the Reference backend
std::array<DataType,6> supportedTypes =
@@ -2379,14 +2378,22 @@ bool RefLayerSupport::IsReverseV2Supported(const TensorInfo& input,
DataType::QSymmS16
};
- supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
- "Reference ReverseV2: input type not supported");
+ supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
+ "Reference ReverseV2: input0 type not supported");
supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
"Reference ReverseV2: output type not supported");
- supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
- "Reference ReverseV2: input and output types not matching");
+ supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
+ "Reference ReverseV2: input0 and output types not matching");
+
+ std::array<DataType,6> input2SupportedTypes =
+ {
+ DataType::Signed32
+ };
+
+ supported &= CheckSupportRule(TypeAnyOf(input1, input2SupportedTypes), reasonIfUnsupported,
+ "Reference ReverseV2: input1 type not supported");
return supported;
}
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 0afb9c2c94..21d59e27fc 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -299,9 +299,9 @@ public:
const ResizeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- bool IsReverseV2Supported(const TensorInfo& input,
+ bool IsReverseV2Supported(const TensorInfo& input0,
+ const TensorInfo& input1,
const TensorInfo& output,
- const ReverseV2Descriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
bool IsShapeSupported(const TensorInfo& input,
diff --git a/src/backends/reference/workloads/RefReverseV2Workload.cpp b/src/backends/reference/workloads/RefReverseV2Workload.cpp
index cd2d9f930b..22d5449466 100644
--- a/src/backends/reference/workloads/RefReverseV2Workload.cpp
+++ b/src/backends/reference/workloads/RefReverseV2Workload.cpp
@@ -32,16 +32,21 @@ namespace armnn
ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefReverseV2Workload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
+ const TensorInfo& axisInfo = GetTensorInfo(inputs[1]);
std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]),
inputs[0]->Map());
+ std::unique_ptr<Decoder<int>> axisDecoder = MakeDecoder<int>(GetTensorInfo(inputs[1]),
+ inputs[1]->Map());
+
std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]),
outputs[0]->Map());
- ReverseV2(m_Data.m_Parameters,
- inputInfo,
+ ReverseV2(inputInfo,
+ axisInfo,
*inputDecoder,
+ *axisDecoder,
*outputEncoder);
}
diff --git a/src/backends/reference/workloads/ReverseV2Impl.cpp b/src/backends/reference/workloads/ReverseV2Impl.cpp
index f6d5fd74d1..896f9050f5 100644
--- a/src/backends/reference/workloads/ReverseV2Impl.cpp
+++ b/src/backends/reference/workloads/ReverseV2Impl.cpp
@@ -75,13 +75,16 @@ unsigned int ReverseRelocateIdx(unsigned int idx,
return outputIdx;
}
-void ReverseV2(const ReverseV2Descriptor& params,
- const TensorInfo& inputInfo,
+void ReverseV2(const TensorInfo& inputInfo,
+ const TensorInfo& axisInfo,
Decoder<float>& inputDecoder,
+ Decoder<int>& axisDecoder,
Encoder<float>& outputEncoder)
{
+ unsigned int axesRank = static_cast<unsigned int>(axisInfo.GetNumElements());
+
// Empty axis and empty tensor case: copy input to output
- if (params.m_Axis.empty() || inputInfo.GetNumElements() == 0)
+ if ((axesRank == 0) || inputInfo.GetNumElements() == 0)
{
for (unsigned idx = 0; idx < inputInfo.GetNumElements(); idx++)
{
@@ -95,11 +98,19 @@ void ReverseV2(const ReverseV2Descriptor& params,
unsigned int inputRank = static_cast<unsigned int>(inputInfo.GetNumDimensions());
- std::vector<bool>axisFlag(inputRank, false);
- std::vector<unsigned int>dimSize(inputRank, 0);
+ std::vector<bool> axisFlag(inputRank, false);
+ std::vector<unsigned int> dimSize(inputRank, 0);
+ std::vector<int32_t> axis(axesRank, 0);
+
+ // Decode the axis information
+ for (unsigned int i=0; i < axesRank; i++)
+ {
+ axis[i] = axisDecoder.Get();
+ axisDecoder += 1;
+ }
// Make sure the axes are positive
- for (int32_t axisElement: params.m_Axis)
+ for (int32_t axisElement: axis)
{
axisElement = axisElement < 0 ? axisElement + static_cast<int32_t>(inputRank) : axisElement;
axisFlag[static_cast<uint32_t>(axisElement)] = true;
diff --git a/src/backends/reference/workloads/ReverseV2Impl.hpp b/src/backends/reference/workloads/ReverseV2Impl.hpp
index bc1fe1d432..59407d4a4e 100644
--- a/src/backends/reference/workloads/ReverseV2Impl.hpp
+++ b/src/backends/reference/workloads/ReverseV2Impl.hpp
@@ -13,9 +13,10 @@
namespace armnn
{
-void ReverseV2(const ReverseV2Descriptor& params,
- const TensorInfo& inputInfo,
+void ReverseV2(const TensorInfo& inputInfo,
+ const TensorInfo& axisInfo,
Decoder<float>& inputDecoder,
+ Decoder<int>& axisDecoder,
Encoder<float>& outputEncoder);
-} // namespace armnn \ No newline at end of file
+} // namespace armnn