aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/InternalTypes.cpp3
-rw-r--r--src/armnn/InternalTypes.hpp5
-rw-r--r--src/armnn/LayerSupport.cpp6
-rw-r--r--src/armnn/LayersFwd.hpp6
-rw-r--r--src/armnn/Network.cpp10
-rw-r--r--src/armnn/Network.hpp5
-rw-r--r--src/armnn/QuantizerVisitor.cpp20
-rw-r--r--src/armnn/QuantizerVisitor.hpp7
-rw-r--r--src/armnn/layers/ElementwiseUnaryLayer.cpp62
-rw-r--r--src/armnn/layers/ElementwiseUnaryLayer.hpp48
-rw-r--r--src/armnn/test/CreateWorkload.hpp37
-rw-r--r--src/armnn/test/QuantizerTest.cpp55
-rw-r--r--src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp7
-rw-r--r--src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp1
-rw-r--r--src/armnn/test/TestNameOnlyLayerVisitor.cpp2
-rw-r--r--src/armnn/test/TestNameOnlyLayerVisitor.hpp2
16 files changed, 164 insertions, 112 deletions
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index 8c2a0f77e0..10e7f501b7 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -14,7 +14,6 @@ char const* GetLayerTypeAsCString(LayerType type)
{
switch (type)
{
- case LayerType::Abs: return "Abs";
case LayerType::Activation: return "Activation";
case LayerType::Addition: return "Addition";
case LayerType::ArgMinMax: return "ArgMinMax";
@@ -32,6 +31,7 @@ char const* GetLayerTypeAsCString(LayerType type)
case LayerType::Dequantize: return "Dequantize";
case LayerType::DetectionPostProcess: return "DetectionPostProcess";
case LayerType::Division: return "Division";
+ case LayerType::ElementwiseUnary: return "ElementwiseUnary";
case LayerType::FakeQuantization: return "FakeQuantization";
case LayerType::Floor: return "Floor";
case LayerType::FullyConnected: return "FullyConnected";
@@ -58,7 +58,6 @@ char const* GetLayerTypeAsCString(LayerType type)
case LayerType::Quantize: return "Quantize";
case LayerType::QuantizedLstm: return "QuantizedLstm";
case LayerType::Reshape: return "Reshape";
- case LayerType::Rsqrt: return "Rsqrt";
case LayerType::Resize: return "Resize";
case LayerType::Slice: return "Slice";
case LayerType::Softmax: return "Softmax";
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index 36e7280e96..2d7be3cac6 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -14,8 +14,7 @@ namespace armnn
enum class LayerType
{
FirstLayer,
- Abs = FirstLayer,
- Activation,
+ Activation = FirstLayer,
Addition,
ArgMinMax,
BatchNormalization,
@@ -32,6 +31,7 @@ enum class LayerType
Dequantize,
DetectionPostProcess,
Division,
+ ElementwiseUnary,
FakeQuantization,
Floor,
FullyConnected,
@@ -59,7 +59,6 @@ enum class LayerType
QuantizedLstm,
Reshape,
Resize,
- Rsqrt,
Slice,
Softmax,
SpaceToBatchNd,
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index dac88385b1..08d91fc20b 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -570,7 +570,11 @@ bool IsRsqrtSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsRsqrtSupported, input, output);
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsElementwiseUnarySupported,
+ input,
+ output,
+ ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt));
}
bool IsSoftmaxSupported(const BackendId& backend,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 13bf900dca..2d486f48a6 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -6,7 +6,6 @@
#include "InternalTypes.hpp"
-#include "layers/AbsLayer.hpp"
#include "layers/ActivationLayer.hpp"
#include "layers/AdditionLayer.hpp"
#include "layers/ArgMinMaxLayer.hpp"
@@ -24,6 +23,7 @@
#include "layers/DequantizeLayer.hpp"
#include "layers/DetectionPostProcessLayer.hpp"
#include "layers/DivisionLayer.hpp"
+#include "layers/ElementwiseUnaryLayer.hpp"
#include "layers/FakeQuantizationLayer.hpp"
#include "layers/FloorLayer.hpp"
#include "layers/FullyConnectedLayer.hpp"
@@ -51,7 +51,6 @@
#include "layers/QuantizedLstmLayer.hpp"
#include "layers/ReshapeLayer.hpp"
#include "layers/ResizeLayer.hpp"
-#include "layers/RsqrtLayer.hpp"
#include "layers/SliceLayer.hpp"
#include "layers/SoftmaxLayer.hpp"
#include "layers/SpaceToBatchNdLayer.hpp"
@@ -91,7 +90,6 @@ constexpr LayerType LayerEnumOf(const T* = nullptr);
#define DECLARE_LAYER(LayerName) DECLARE_LAYER_IMPL(, LayerName)
-DECLARE_LAYER(Abs)
DECLARE_LAYER(Activation)
DECLARE_LAYER(Addition)
DECLARE_LAYER(ArgMinMax)
@@ -109,6 +107,7 @@ DECLARE_LAYER(DepthwiseConvolution2d)
DECLARE_LAYER(Dequantize)
DECLARE_LAYER(DetectionPostProcess)
DECLARE_LAYER(Division)
+DECLARE_LAYER(ElementwiseUnary)
DECLARE_LAYER(FakeQuantization)
DECLARE_LAYER(Floor)
DECLARE_LAYER(FullyConnected)
@@ -136,7 +135,6 @@ DECLARE_LAYER(Quantize)
DECLARE_LAYER(QuantizedLstm)
DECLARE_LAYER(Reshape)
DECLARE_LAYER(Resize)
-DECLARE_LAYER(Rsqrt)
DECLARE_LAYER(Slice)
DECLARE_LAYER(Softmax)
DECLARE_LAYER(SpaceToBatchNd)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 43c79c8479..7edc6240a1 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -967,6 +967,12 @@ IConnectableLayer* Network::AddComparisonLayer(const ComparisonDescriptor& compa
return m_Graph->AddLayer<ComparisonLayer>(comparisonDescriptor, name);
}
+IConnectableLayer* Network::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
+ const char* name)
+{
+ return m_Graph->AddLayer<ElementwiseUnaryLayer>(elementwiseUnaryDescriptor, name);
+}
+
IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
const Optional<ConstTensor>& biases,
@@ -1200,7 +1206,7 @@ IConnectableLayer* Network::AddMergerLayer(const MergerDescriptor& mergerDescrip
IConnectableLayer* Network::AddAbsLayer(const char * name)
{
- return m_Graph->AddLayer<AbsLayer>(name);
+ return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
}
IConnectableLayer* Network::AddAdditionLayer(const char* name)
@@ -1475,7 +1481,7 @@ IConnectableLayer* Network::AddEqualLayer(const char* name)
IConnectableLayer* Network::AddRsqrtLayer(const char * name)
{
- return m_Graph->AddLayer<RsqrtLayer>(name);
+ return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
}
IConnectableLayer* Network::AddGatherLayer(const char* name)
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 0a11941340..23a8e47093 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -95,6 +95,9 @@ public:
const ConstTensor& anchors,
const char* name = nullptr) override;
+ IConnectableLayer* AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
+ const char* name = nullptr) override;
+
IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
const Optional<ConstTensor>& biases,
@@ -137,6 +140,7 @@ public:
IConnectableLayer* AddMergerLayer(const MergerDescriptor& mergerDescriptor,
const char* name = nullptr) override;
+ ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
IConnectableLayer* AddAbsLayer(const char* name = nullptr) override;
IConnectableLayer* AddAdditionLayer(const char* name = nullptr) override;
@@ -208,6 +212,7 @@ public:
ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead")
IConnectableLayer* AddEqualLayer(const char* name = nullptr) override;
+ ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
IConnectableLayer* AddRsqrtLayer(const char* name = nullptr) override;
IConnectableLayer* AddMergeLayer(const char* name = nullptr) override;
diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp
index 4b80b02e34..51818ebddd 100644
--- a/src/armnn/QuantizerVisitor.cpp
+++ b/src/armnn/QuantizerVisitor.cpp
@@ -115,9 +115,7 @@ void QuantizerVisitor::RecordLayer(const IConnectableLayer* srcLayer, IConnectab
void QuantizerVisitor::VisitAbsLayer(const IConnectableLayer* layer, const char* name)
{
- IConnectableLayer* newLayer = m_QuantizedNetwork->AddAbsLayer(name);
- RecordLayer(layer, newLayer);
- SetQuantizedInputConnections(layer, newLayer);
+ VisitElementwiseUnaryLayer(layer, ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
}
void QuantizerVisitor::VisitActivationLayer(const IConnectableLayer* layer,
@@ -275,6 +273,15 @@ void QuantizerVisitor::VisitDepthwiseConvolution2dLayer(const IConnectableLayer*
SetQuantizedInputConnections(layer, newLayer);
}
+void QuantizerVisitor::VisitElementwiseUnaryLayer(const IConnectableLayer* layer,
+ const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
+ const char* name)
+{
+ IConnectableLayer* newLayer = m_QuantizedNetwork->AddElementwiseUnaryLayer(elementwiseUnaryDescriptor, name);
+ RecordLayer(layer, newLayer);
+ SetQuantizedInputConnections(layer, newLayer);
+}
+
void QuantizerVisitor::VisitFullyConnectedLayer(const IConnectableLayer *layer,
const FullyConnectedDescriptor& desc,
const ConstTensor& weights,
@@ -450,12 +457,9 @@ void QuantizerVisitor::VisitResizeLayer(const IConnectableLayer* layer,
SetQuantizedInputConnections(layer, newLayer);
}
-void QuantizerVisitor::VisitRsqrtLayer(const IConnectableLayer* layer,
- const char* name)
+void QuantizerVisitor::VisitRsqrtLayer(const IConnectableLayer* layer, const char* name)
{
- IConnectableLayer* newLayer = m_QuantizedNetwork->AddRsqrtLayer(name);
- RecordLayer(layer, newLayer);
- SetQuantizedInputConnections(layer, newLayer);
+ VisitElementwiseUnaryLayer(layer, ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
}
void QuantizerVisitor::VisitSliceLayer(const IConnectableLayer* layer,
diff --git a/src/armnn/QuantizerVisitor.hpp b/src/armnn/QuantizerVisitor.hpp
index db0134d7a4..4013033697 100644
--- a/src/armnn/QuantizerVisitor.hpp
+++ b/src/armnn/QuantizerVisitor.hpp
@@ -32,6 +32,7 @@ public:
~QuantizerVisitor() = default;
/// Functions to quantize the individual layers, overridden from ILayerVisitor
+ ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead")
void VisitAbsLayer(const IConnectableLayer* layer, const char* name = nullptr) override;
void VisitActivationLayer(const IConnectableLayer* layer,
@@ -78,13 +79,16 @@ public:
const DepthToSpaceDescriptor& depthToSpaceDescriptor,
const char* name = nullptr) override;
-
void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
const DepthwiseConvolution2dDescriptor& desc,
const ConstTensor& weights,
const Optional<ConstTensor>& biases,
const char* name = nullptr) override;
+ void VisitElementwiseUnaryLayer(const IConnectableLayer* layer,
+ const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
+ const char* name = nullptr) override;
+
void VisitFullyConnectedLayer(const IConnectableLayer *layer,
const FullyConnectedDescriptor& desc,
const ConstTensor& weights,
@@ -142,6 +146,7 @@ public:
const ResizeBilinearDescriptor& resizeDesc,
const char* name = nullptr) override;
+ ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead")
void VisitRsqrtLayer(const IConnectableLayer*,
const char* name = nullptr) override;
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.cpp b/src/armnn/layers/ElementwiseUnaryLayer.cpp
new file mode 100644
index 0000000000..d3843da060
--- /dev/null
+++ b/src/armnn/layers/ElementwiseUnaryLayer.cpp
@@ -0,0 +1,62 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ElementwiseUnaryLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <algorithm>
+
+namespace armnn
+{
+
+ElementwiseUnaryLayer::ElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& param, const char* name)
+ : LayerWithParameters(1, 1, LayerType::ElementwiseUnary, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> ElementwiseUnaryLayer::CreateWorkload(const IWorkloadFactory& factory) const
+{
+ ElementwiseUnaryQueueDescriptor descriptor;
+ return factory.CreateElementwiseUnary(descriptor, PrepInfoAndDesc(descriptor));
+}
+
+ElementwiseUnaryLayer* ElementwiseUnaryLayer::Clone(Graph& graph) const
+{
+ return CloneBase<ElementwiseUnaryLayer>(graph, m_Param, GetName());
+}
+
+std::vector<TensorShape> ElementwiseUnaryLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ // Should return the shape of the input tensor
+ BOOST_ASSERT(inputShapes.size() == 1);
+ const TensorShape& input = inputShapes[0];
+
+ return std::vector<TensorShape>({ input });
+}
+
+void ElementwiseUnaryLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(1, CHECK_LOCATION());
+
+ std::vector<TensorShape> inferredShapes = InferOutputShapes({
+ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
+ BOOST_ASSERT(inferredShapes.size() == 1);
+
+ ConditionalThrowIfNotEqual<LayerValidationException>(
+ "ElementwiseUnaryLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+ GetOutputSlot(0).GetTensorInfo().GetShape(),
+ inferredShapes[0]);
+}
+
+void ElementwiseUnaryLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitElementwiseUnaryLayer(this, GetParameters(), GetName());
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.hpp b/src/armnn/layers/ElementwiseUnaryLayer.hpp
new file mode 100644
index 0000000000..850a814b6e
--- /dev/null
+++ b/src/armnn/layers/ElementwiseUnaryLayer.hpp
@@ -0,0 +1,48 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a elementwiseUnary operation.
+class ElementwiseUnaryLayer : public LayerWithParameters<ElementwiseUnaryDescriptor>
+{
+public:
+ /// Makes a workload for the elementwiseUnary type
+ /// @param [in] graph The graph where this layer can be found
+ /// @param [in] factory The workload factory which will create the workload
+ /// @return A pointer to the created workload, or nullptr if not created
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer
+ /// @param [in] graph The graph into which this layer is being cloned
+ ElementwiseUnaryLayer* Clone(Graph& graph) const override;
+
+ /// Returns inputShapes by default.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ /// Check if the input tensor shape(s) will lead to a valid configuration
+ /// of @ref ElementwiseUnaryLayer
+ void ValidateTensorShapesFromInputs() override;
+
+ void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+ /// Constructor to create a ElementwiseUnaryLayer
+ /// @param [in] param ElementwiseUnaryDescriptor to configure the ElementwiseUnaryLayer
+ /// @param [in] name Optional name for the layer
+ ElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& param, const char* name);
+
+ /// Default destructor
+ ~ElementwiseUnaryLayer() = default;
+};
+
+} // namespace armnn
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 02ce12a304..4782c432a2 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -131,14 +131,15 @@ std::unique_ptr<WorkloadType> CreateElementwiseWorkloadTest(armnn::IWorkloadFact
return workload;
}
-template <typename WorkloadType,
+template <typename WorkloadType,
typename DescriptorType,
- typename LayerType,
armnn::DataType DataType>
std::unique_ptr<WorkloadType> CreateElementwiseUnaryWorkloadTest(armnn::IWorkloadFactory & factory,
- armnn::Graph & graph)
+ armnn::Graph & graph,
+ armnn::UnaryOperation op)
{
- Layer* const layer = graph.AddLayer<LayerType>("layer");
+ ElementwiseUnaryDescriptor desc = ElementwiseUnaryDescriptor(op);
+ Layer* const layer = graph.AddLayer<armnn::ElementwiseUnaryLayer>(desc, "layer");
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
@@ -1059,34 +1060,6 @@ std::unique_ptr<ResizeWorkload> CreateResizeBilinearWorkloadTest(armnn::IWorkloa
return workload;
}
-template <typename RsqrtWorkload, armnn::DataType DataType>
-std::unique_ptr<RsqrtWorkload> CreateRsqrtWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph)
-{
- Layer* const layer = graph.AddLayer<RsqrtLayer>("rsqrt");
-
- // Creates extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connects up.
- armnn::TensorInfo tensorInfo({1, 1}, DataType);
-
- Connect(input, layer, tensorInfo);
- Connect(layer, output, tensorInfo);
-
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<RsqrtWorkload>(*layer, factory);
-
- RsqrtQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
-
- return workload;
-}
-
template <typename BatchToSpaceNdWorkload, armnn::DataType DataType>
std::unique_ptr<BatchToSpaceNdWorkload> CreateBatchToSpaceNdWorkloadTest(armnn::IWorkloadFactory& factory,
armnn::Graph& graph)
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index 52beb630f9..d568b2cbc0 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -1672,61 +1672,6 @@ BOOST_AUTO_TEST_CASE(QuantizeConstant)
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
}
-BOOST_AUTO_TEST_CASE(QuantizeAbs)
-{
- class TestAbsQuantization : public TestLeakyReLuActivationQuantization
- {
- public:
- TestAbsQuantization(const TensorShape& inputShape, const TensorShape& outputShape) :
- TestLeakyReLuActivationQuantization(inputShape, outputShape)
- {}
-
- TestAbsQuantization(const QuantizerOptions& options,
- const TensorShape& inputShape,
- const TensorShape& outputShape) :
- TestLeakyReLuActivationQuantization(options, inputShape, outputShape)
- {}
-
- void VisitAbsLayer(const IConnectableLayer *layer,
- const char *name = nullptr) override
- {
- boost::ignore_unused(name);
- TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
-
- TestQuantizationParams(outputInfo,
- { 30.0f / g_Asymm8QuantizationBase, 128 },
- { 15.0f / g_Symm8QuantizationBase, 0},
- { 15.0f / g_Symm16QuantizationBase, 0 });
- }
- };
-
- INetworkPtr network = INetwork::Create();
-
- //Add the layer being tested
- IConnectableLayer* absLayer = network->AddAbsLayer();
-
- const TensorShape shape{1U};
- TensorInfo info(shape, DataType::Float32);
-
- IConnectableLayer* activation = CreateStartOfLeakyReluNetwork(network.get(), info);
-
- CompleteLeakyReluNetwork(network.get(), activation, absLayer, info);
-
- INetworkPtr quantizedNetworkQAsymm8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
- TestAbsQuantization validatorQAsymm8(shape, shape);
- VisitLayersTopologically(quantizedNetworkQAsymm8.get(), validatorQAsymm8);
-
- const QuantizerOptions qSymm8Options(DataType::QSymmS8);
- INetworkPtr quantizedNetworkQSymm8 = INetworkQuantizer::Create(network.get(), qSymm8Options)->ExportNetwork();
- TestAbsQuantization validatorQSymm8(qSymm8Options, shape, shape);
- VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
-
- const QuantizerOptions qSymm16options(DataType::QSymmS16);
- INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
- TestAbsQuantization validatorQSymm16(qSymm16options, shape, shape);
- VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
-}
-
BOOST_AUTO_TEST_CASE(QuantizeArgMinMax)
{
class TestArgMinMaxQuantization : public TestQuantization
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
index 36bbd36792..efe50a5b58 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
@@ -86,6 +86,12 @@ armnn::ConcatDescriptor GetDescriptor<armnn::ConcatDescriptor>()
}
template<>
+armnn::ElementwiseUnaryDescriptor GetDescriptor<armnn::ElementwiseUnaryDescriptor>()
+{
+ return armnn::ElementwiseUnaryDescriptor(armnn::UnaryOperation::Abs);
+}
+
+template<>
armnn::InstanceNormalizationDescriptor GetDescriptor<armnn::InstanceNormalizationDescriptor>()
{
armnn::InstanceNormalizationDescriptor descriptor;
@@ -251,6 +257,7 @@ TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(DepthToSpace)
TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(BatchToSpaceNd)
TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Comparison)
TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Concat)
+TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(ElementwiseUnary)
TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(InstanceNormalization)
TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(L2Normalization)
TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(LogSoftmax)
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
index 221057cbdc..f792bc3554 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
@@ -48,6 +48,7 @@ DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(BatchToSpaceNd)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Comparison)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Concat)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(DepthToSpace)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(ElementwiseUnary)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(InstanceNormalization)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(L2Normalization)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(LogSoftmax)
diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.cpp b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
index 32de94e7ef..0653b39e58 100644
--- a/src/armnn/test/TestNameOnlyLayerVisitor.cpp
+++ b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
@@ -38,7 +38,6 @@ TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(name)
BOOST_AUTO_TEST_SUITE(TestNameOnlyLayerVisitor)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Abs)
TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Addition)
TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Dequantize)
TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Division)
@@ -50,7 +49,6 @@ TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Minimum)
TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Multiplication)
TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Prelu)
TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Quantize)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Rsqrt)
TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Subtraction)
TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Switch)
diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.hpp b/src/armnn/test/TestNameOnlyLayerVisitor.hpp
index c770b5e9e0..84dfdd6539 100644
--- a/src/armnn/test/TestNameOnlyLayerVisitor.hpp
+++ b/src/armnn/test/TestNameOnlyLayerVisitor.hpp
@@ -25,7 +25,6 @@ public: \
} // anonymous namespace
-DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Abs)
DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Addition)
DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Dequantize)
DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Division)
@@ -37,6 +36,5 @@ DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Minimum)
DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Multiplication)
DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Prelu)
DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Quantize)
-DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Rsqrt)
DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Subtraction)
DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Switch)