aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-02-11 12:21:27 +0000
committerAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-02-11 13:33:20 +0000
commit0085978ac40ecd008195d635cd009a1d4f49fb74 (patch)
tree560c296e74b94826d6338b7d0d92224ae526a426
parent3dad5acc5d8eda6fc472b9a255c1d893d4e1f942 (diff)
downloadarmnn-0085978ac40ecd008195d635cd009a1d4f49fb74.tar.gz
IVGCVSW-2676 Make biases optional in ILayerVisitor for Convolution2D, DepthwiseConvolution2D and FullyConnected
Change-Id: I3048504ff699fdb266488e7c07b7262e5843d4b0 Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
-rw-r--r--include/armnn/ILayerVisitor.hpp50
-rw-r--r--src/armnn/LayerVisitorBase.hpp231
-rw-r--r--src/armnn/QuantizerVisitor.cpp67
-rw-r--r--src/armnn/QuantizerVisitor.hpp18
-rw-r--r--src/armnn/StaticRangeVisitor.cpp31
-rw-r--r--src/armnn/StaticRangeVisitor.hpp20
-rw-r--r--src/armnn/layers/Convolution2dLayer.cpp10
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.cpp12
-rw-r--r--src/armnn/layers/FullyConnectedLayer.cpp12
-rw-r--r--src/armnn/test/ConstTensorLayerVisitor.cpp481
-rw-r--r--src/armnn/test/ConstTensorLayerVisitor.hpp117
-rw-r--r--src/armnn/test/QuantizerTest.cpp70
-rw-r--r--src/armnn/test/TestLayerVisitor.cpp10
-rw-r--r--src/armnn/test/TestLayerVisitor.hpp233
14 files changed, 588 insertions, 774 deletions
diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp
index 3b92733a81..c0350d253a 100644
--- a/include/armnn/ILayerVisitor.hpp
+++ b/include/armnn/ILayerVisitor.hpp
@@ -6,6 +6,7 @@
#include <armnn/NetworkFwd.hpp>
#include <armnn/DescriptorsFwd.hpp>
+#include <armnn/Optional.hpp>
#include <armnn/TensorFwd.hpp>
#include <armnn/Types.hpp>
@@ -27,52 +28,30 @@ public:
LayerBindingId id,
const char* name = nullptr) = 0;
- /// Function that a 2D convolution layer without biases should call back to when its Accept(ILayerVisitor&)
+ /// Function that a 2D convolution layer should call back to when its Accept(ILayerVisitor&)
/// function is invoked.
/// @param layer - pointer to the layer which is calling back to this visit function.
/// @param convolution2dDescriptor - Description of the 2D convolution layer.
/// @param weights - Tensor for the weights data.
+ /// @param biases - Optional tensor for the bias data. If specified, must match the output tensor shape.
/// @param name - Optional name for the layer.
virtual void VisitConvolution2dLayer(const IConnectableLayer* layer,
const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
const char* name = nullptr) = 0;
- /// Function that a 2D convolution layer with bias should call back to when its Accept(ILayerVisitor&)
- /// function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param convolution2dDescriptor - Description of the 2D convolution layer.
- /// @param weights - Tensor for the weights data.
- /// @param biases - Tensor for the bias data. Must match the output tensor shape.
- /// @param name - Optional name for the layer.
- virtual void VisitConvolution2dLayer(const IConnectableLayer* layer,
- const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr) = 0;
-
- /// Function that a 2D depthwise convolution layer without biases should call back to when its
- /// Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param convolution2dDescriptor - Description of the 2D depthwise convolution layer.
- /// @param weights - Tensor for the weights. Expected format: [channelMultiplier, inputChannels, height, width].
- /// @param name - Optional name for the layer.
- virtual void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const char* name = nullptr) = 0;
-
/// Function that a 2D depthwise convolution layer with biases should call back to when its
/// Accept(ILayerVisitor&) function is invoked.
/// @param layer - pointer to the layer which is calling back to this visit function.
/// @param convolution2dDescriptor - Description of the 2D depthwise convolution layer.
/// @param weights - Tensor for the weights. Expected format: [channelMultiplier, inputChannels, height, width].
- /// @param biases - Tensor for the bias data. Must match the output tensor shape.
+ /// @param biases - Optional tensor for the bias data. If specified, must match the output tensor shape.
/// @param name - Optional name for the layer.
virtual void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
- const ConstTensor& biases,
+ const Optional<ConstTensor>& biases,
const char* name = nullptr) = 0;
/// Function that a Detection PostProcess layer should call back to when its
@@ -86,28 +65,17 @@ public:
const ConstTensor& anchors,
const char* name = nullptr) = 0;
- /// Function that a fully connected layer without biases should call back to when its Accept(ILayerVisitor&)
- /// function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param fullyConnectedDescriptor - Description of the fully connected layer.
- /// @param weights - Tensor for the weights data.
- /// @param name - Optional name for the layer.
- virtual void VisitFullyConnectedLayer(const IConnectableLayer* layer,
- const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const ConstTensor& weights,
- const char* name = nullptr) = 0;
-
- /// Function that a fully connected layer with biases should call back to when its Accept(ILayerVisitor&)
+ /// Function that a fully connected layer should call back to when its Accept(ILayerVisitor&)
/// function is invoked.
/// @param layer - pointer to the layer which is calling back to this visit function.
/// @param fullyConnectedDescriptor - Description of the fully connected layer.
/// @param weights - Tensor for the weights data.
- /// @param biases - Tensor for the bias data.
+ /// @param biases - Optional tensor for the bias data.
/// @param name - Optional name for the layer.
virtual void VisitFullyConnectedLayer(const IConnectableLayer* layer,
const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
- const ConstTensor& biases,
+ const Optional<ConstTensor>& biases,
const char* name = nullptr) = 0;
/// Function that a permute layer should call back to when its Accept(ILayerVisitor&) function is invoked.
diff --git a/src/armnn/LayerVisitorBase.hpp b/src/armnn/LayerVisitorBase.hpp
index 641ca31e2d..2c37a21786 100644
--- a/src/armnn/LayerVisitorBase.hpp
+++ b/src/armnn/LayerVisitorBase.hpp
@@ -29,161 +29,146 @@ protected:
virtual ~LayerVisitorBase() {}
public:
- virtual void VisitInputLayer(const IConnectableLayer*,
- LayerBindingId,
- const char*) { DefaultPolicy::Apply(); }
-
- virtual void VisitConvolution2dLayer(const IConnectableLayer*,
- const Convolution2dDescriptor&,
- const ConstTensor&,
- const char*) { DefaultPolicy::Apply(); }
-
- virtual void VisitConvolution2dLayer(const IConnectableLayer*,
- const Convolution2dDescriptor&,
- const ConstTensor&,
- const ConstTensor&,
- const char*) { DefaultPolicy::Apply(); }
-
- virtual void VisitDepthwiseConvolution2dLayer(const IConnectableLayer*,
- const DepthwiseConvolution2dDescriptor&,
- const ConstTensor& ,
- const char*) { DefaultPolicy::Apply(); }
-
- virtual void VisitDepthwiseConvolution2dLayer(const IConnectableLayer*,
- const DepthwiseConvolution2dDescriptor&,
- const ConstTensor&,
- const ConstTensor&,
- const char*) { DefaultPolicy::Apply(); }
-
- virtual void VisitDetectionPostProcessLayer(const IConnectableLayer*,
- const DetectionPostProcessDescriptor&,
- const ConstTensor&,
- const char*) { DefaultPolicy::Apply(); }
-
- virtual void VisitFullyConnectedLayer(const IConnectableLayer*,
- const FullyConnectedDescriptor&,
+ void VisitInputLayer(const IConnectableLayer*,
+ LayerBindingId,
+ const char*) override { DefaultPolicy::Apply(); }
+
+ void VisitConvolution2dLayer(const IConnectableLayer*,
+ const Convolution2dDescriptor&,
+ const ConstTensor&,
+ const Optional<ConstTensor>&,
+ const char*) override { DefaultPolicy::Apply(); }
+
+ void VisitDepthwiseConvolution2dLayer(const IConnectableLayer*,
+ const DepthwiseConvolution2dDescriptor&,
const ConstTensor&,
- const char*) { DefaultPolicy::Apply(); }
+ const Optional<ConstTensor>&,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitFullyConnectedLayer(const IConnectableLayer*,
- const FullyConnectedDescriptor&,
- const ConstTensor&,
- const ConstTensor&,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitDetectionPostProcessLayer(const IConnectableLayer*,
+ const DetectionPostProcessDescriptor&,
+ const ConstTensor&,
+ const char*) override { DefaultPolicy::Apply(); }
+
+ void VisitFullyConnectedLayer(const IConnectableLayer*,
+ const FullyConnectedDescriptor&,
+ const ConstTensor&,
+ const Optional<ConstTensor>&,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitPermuteLayer(const IConnectableLayer*,
- const PermuteDescriptor&,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitPermuteLayer(const IConnectableLayer*,
+ const PermuteDescriptor&,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitBatchToSpaceNdLayer(const IConnectableLayer*,
- const BatchToSpaceNdDescriptor&,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitBatchToSpaceNdLayer(const IConnectableLayer*,
+ const BatchToSpaceNdDescriptor&,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitPooling2dLayer(const IConnectableLayer*,
- const Pooling2dDescriptor&,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitPooling2dLayer(const IConnectableLayer*,
+ const Pooling2dDescriptor&,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitActivationLayer(const IConnectableLayer*,
- const ActivationDescriptor&,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitActivationLayer(const IConnectableLayer*,
+ const ActivationDescriptor&,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitNormalizationLayer(const IConnectableLayer*,
- const NormalizationDescriptor&,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitNormalizationLayer(const IConnectableLayer*,
+ const NormalizationDescriptor&,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitSoftmaxLayer(const IConnectableLayer*,
- const SoftmaxDescriptor&,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitSoftmaxLayer(const IConnectableLayer*,
+ const SoftmaxDescriptor&,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitSplitterLayer(const IConnectableLayer*,
- const ViewsDescriptor&,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitSplitterLayer(const IConnectableLayer*,
+ const ViewsDescriptor&,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitMergerLayer(const IConnectableLayer*,
- const OriginsDescriptor&,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitMergerLayer(const IConnectableLayer*,
+ const OriginsDescriptor&,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitAdditionLayer(const IConnectableLayer*,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitAdditionLayer(const IConnectableLayer*,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitMultiplicationLayer(const IConnectableLayer*,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitMultiplicationLayer(const IConnectableLayer*,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitBatchNormalizationLayer(const IConnectableLayer*,
- const BatchNormalizationDescriptor&,
- const ConstTensor&,
- const ConstTensor&,
- const ConstTensor&,
- const ConstTensor&,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitBatchNormalizationLayer(const IConnectableLayer*,
+ const BatchNormalizationDescriptor&,
+ const ConstTensor&,
+ const ConstTensor&,
+ const ConstTensor&,
+ const ConstTensor&,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitResizeBilinearLayer(const IConnectableLayer*,
- const ResizeBilinearDescriptor&,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitResizeBilinearLayer(const IConnectableLayer*,
+ const ResizeBilinearDescriptor&,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitL2NormalizationLayer(const IConnectableLayer*,
- const L2NormalizationDescriptor&,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitL2NormalizationLayer(const IConnectableLayer*,
+ const L2NormalizationDescriptor&,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitConstantLayer(const IConnectableLayer*,
- const ConstTensor&,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitConstantLayer(const IConnectableLayer*,
+ const ConstTensor&,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitReshapeLayer(const IConnectableLayer*,
- const ReshapeDescriptor&,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitReshapeLayer(const IConnectableLayer*,
+ const ReshapeDescriptor&,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitSpaceToBatchNdLayer(const IConnectableLayer*,
- const SpaceToBatchNdDescriptor&,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitSpaceToBatchNdLayer(const IConnectableLayer*,
+ const SpaceToBatchNdDescriptor&,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitFloorLayer(const IConnectableLayer*,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitFloorLayer(const IConnectableLayer*,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitOutputLayer(const IConnectableLayer*,
- LayerBindingId id,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitOutputLayer(const IConnectableLayer*,
+ LayerBindingId id,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitLstmLayer(const IConnectableLayer*,
- const LstmDescriptor&,
- const LstmInputParams&,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitLstmLayer(const IConnectableLayer*,
+ const LstmDescriptor&,
+ const LstmInputParams&,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitDivisionLayer(const IConnectableLayer*,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitDivisionLayer(const IConnectableLayer*,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitSubtractionLayer(const IConnectableLayer*,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitSubtractionLayer(const IConnectableLayer*,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitMaximumLayer(const IConnectableLayer*,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitMaximumLayer(const IConnectableLayer*,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitMeanLayer(const IConnectableLayer*,
- const MeanDescriptor&,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitMeanLayer(const IConnectableLayer*,
+ const MeanDescriptor&,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitPadLayer(const IConnectableLayer*,
- const PadDescriptor&,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitPadLayer(const IConnectableLayer*,
+ const PadDescriptor&,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitStridedSliceLayer(const IConnectableLayer*,
- const StridedSliceDescriptor&,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitStridedSliceLayer(const IConnectableLayer*,
+ const StridedSliceDescriptor&,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitMinimumLayer(const IConnectableLayer*,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitMinimumLayer(const IConnectableLayer*,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitGreaterLayer(const IConnectableLayer*,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitGreaterLayer(const IConnectableLayer*,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitEqualLayer(const IConnectableLayer*,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitEqualLayer(const IConnectableLayer*,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitRsqrtLayer(const IConnectableLayer*,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitRsqrtLayer(const IConnectableLayer*,
+ const char*) override { DefaultPolicy::Apply(); }
- virtual void VisitGatherLayer(const IConnectableLayer*,
- const char*) { DefaultPolicy::Apply(); }
+ void VisitGatherLayer(const IConnectableLayer*,
+ const char*) override { DefaultPolicy::Apply(); }
};
} //namespace armnn
diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp
index c5e203ef86..f5ff83c31f 100644
--- a/src/armnn/QuantizerVisitor.cpp
+++ b/src/armnn/QuantizerVisitor.cpp
@@ -82,29 +82,24 @@ void QuantizerVisitor::VisitActivationLayer(const IConnectableLayer* layer,
void QuantizerVisitor::VisitFullyConnectedLayer(const IConnectableLayer *layer,
const FullyConnectedDescriptor& desc,
const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
const char *name)
{
std::vector<uint8_t> weightsBacking;
ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking);
- IConnectableLayer* newLayer = m_QuantizedNetwork->AddFullyConnectedLayer(desc, qWeights, name);
- RecordLayer(layer, newLayer);
- SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitFullyConnectedLayer(const IConnectableLayer *layer,
- const FullyConnectedDescriptor& desc,
- const ConstTensor& weights,
- const ConstTensor& bias,
- const char *name)
-{
- std::vector<uint8_t> weightsBacking;
- ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking);
-
- std::vector<uint8_t> biasBacking;
- ConstTensor qBias = CreateQuantizedConst(bias, biasBacking);
+ IConnectableLayer* newLayer;
+ if (biases.has_value())
+ {
+ std::vector<uint8_t> biasBacking;
+ ConstTensor qBias = CreateQuantizedConst(biases.value(), biasBacking);
+ newLayer = m_QuantizedNetwork->AddFullyConnectedLayer(desc, qWeights, qBias, name);
+ }
+ else
+ {
+ newLayer = m_QuantizedNetwork->AddFullyConnectedLayer(desc, qWeights, name);
+ }
- IConnectableLayer* newLayer = m_QuantizedNetwork->AddFullyConnectedLayer(desc, qWeights, qBias, name);
RecordLayer(layer, newLayer);
SetQuantizedInputConnections(layer, newLayer);
}
@@ -156,36 +151,30 @@ void QuantizerVisitor::VisitBatchNormalizationLayer(const IConnectableLayer* lay
void QuantizerVisitor::VisitConvolution2dLayer(const IConnectableLayer* layer,
const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
const char* name)
{
std::vector<uint8_t> weightsBacking;
ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking);
- IConnectableLayer* newLayer = m_QuantizedNetwork->AddConvolution2dLayer(convolution2dDescriptor,
- qWeights,
- name);
- RecordLayer(layer, newLayer);
- SetQuantizedInputConnections(layer, newLayer);
-}
-
-void QuantizerVisitor::VisitConvolution2dLayer(const IConnectableLayer* layer,
- const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name)
-{
- std::vector<uint8_t> weightsBacking;
- ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking);
+ IConnectableLayer* newLayer;
+ if (biases.has_value())
+ {
+ std::vector<uint8_t> biasesBacking;
+ ConstTensor qBiases = CreateQuantizedConst(biases.value(), biasesBacking);
- std::vector<uint8_t> biasesBacking;
- ConstTensor qBiases = CreateQuantizedConst(weights, biasesBacking);
+ newLayer = m_QuantizedNetwork->AddConvolution2dLayer(convolution2dDescriptor,
+ qWeights,
+ qBiases,
+ name);
+ }
+ else
+ {
+ newLayer = m_QuantizedNetwork->AddConvolution2dLayer(convolution2dDescriptor, qWeights, name);
+ }
- IConnectableLayer* newLayer = m_QuantizedNetwork->AddConvolution2dLayer(convolution2dDescriptor,
- qWeights,
- qBiases,
- name);
RecordLayer(layer, newLayer);
SetQuantizedInputConnections(layer, newLayer);
}
-} //namespace armnn
+} // namespace armnn
diff --git a/src/armnn/QuantizerVisitor.hpp b/src/armnn/QuantizerVisitor.hpp
index fbf9cfa20e..8d8b787d89 100644
--- a/src/armnn/QuantizerVisitor.hpp
+++ b/src/armnn/QuantizerVisitor.hpp
@@ -29,11 +29,15 @@ public:
/// Functions to quantize the individual layers, overridden from ILayerVisitor
void VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name = nullptr) override;
+
void VisitAdditionLayer(const IConnectableLayer* layer, const char* name = nullptr) override;
+
void VisitActivationLayer(const IConnectableLayer* layer,
const ActivationDescriptor& activationDescriptor,
const char* name = nullptr) override;
+
void VisitOutputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name = nullptr) override;
+
void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
const BatchNormalizationDescriptor& desc,
const ConstTensor& mean,
@@ -41,25 +45,17 @@ public:
const ConstTensor& beta,
const ConstTensor& gamma,
const char* name = nullptr) override;
+
void VisitFullyConnectedLayer(const IConnectableLayer *layer,
const FullyConnectedDescriptor&,
const ConstTensor&,
- const char *name = nullptr) override;
- void VisitFullyConnectedLayer(const IConnectableLayer *layer,
- const FullyConnectedDescriptor&,
- const ConstTensor&,
- const ConstTensor&,
+ const Optional<ConstTensor>&,
const char *name = nullptr) override;
- // Extract the quantized network
- void VisitConvolution2dLayer(const IConnectableLayer* layer,
- const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const char* name = nullptr) override;
void VisitConvolution2dLayer(const IConnectableLayer* layer,
const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
- const ConstTensor& biases,
+ const Optional<ConstTensor>& biases,
const char* name = nullptr) override;
/// Extract the quantized network
diff --git a/src/armnn/StaticRangeVisitor.cpp b/src/armnn/StaticRangeVisitor.cpp
index fa95938b37..6ae0d3314b 100644
--- a/src/armnn/StaticRangeVisitor.cpp
+++ b/src/armnn/StaticRangeVisitor.cpp
@@ -62,24 +62,16 @@ void StaticRangeVisitor::VisitBatchNormalizationLayer(const IConnectableLayer* l
void StaticRangeVisitor::VisitConvolution2dLayer(const IConnectableLayer* layer,
const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
const char* name)
{
boost::ignore_unused(convolution2dDescriptor);
boost::ignore_unused(weights);
+ boost::ignore_unused(biases);
boost::ignore_unused(name);
SetRange(layer, 0, -15.0f, 15.0f);
}
-void StaticRangeVisitor::VisitConvolution2dLayer(const IConnectableLayer* layer,
- const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name)
-{
- boost::ignore_unused(biases);
- VisitConvolution2dLayer(layer, convolution2dDescriptor, weights, name);
-}
-
void StaticRangeVisitor::VisitActivationLayer(const IConnectableLayer* layer,
const ActivationDescriptor& activationDescriptor,
const char* name)
@@ -108,26 +100,15 @@ void StaticRangeVisitor::VisitActivationLayer(const IConnectableLayer* layer,
}
}
-void StaticRangeVisitor::VisitFullyConnectedLayer(const armnn::IConnectableLayer *layer,
- const armnn::FullyConnectedDescriptor& desc,
+void StaticRangeVisitor::VisitFullyConnectedLayer(const IConnectableLayer *layer,
+ const FullyConnectedDescriptor& desc,
const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
const char *name)
{
boost::ignore_unused(desc);
boost::ignore_unused(weights);
- boost::ignore_unused(name);
- SetRange(layer, 0, -15.0f, 15.0f);
-}
-
-void StaticRangeVisitor::VisitFullyConnectedLayer(const armnn::IConnectableLayer *layer,
- const armnn::FullyConnectedDescriptor& desc,
- const ConstTensor& weights,
- const ConstTensor& bias,
- const char *name)
-{
- boost::ignore_unused(desc);
- boost::ignore_unused(weights);
- boost::ignore_unused(bias);
+ boost::ignore_unused(biases);
boost::ignore_unused(name);
SetRange(layer, 0, -15.0f, 15.0f);
}
diff --git a/src/armnn/StaticRangeVisitor.hpp b/src/armnn/StaticRangeVisitor.hpp
index 81a0f4aede..887f3e9120 100644
--- a/src/armnn/StaticRangeVisitor.hpp
+++ b/src/armnn/StaticRangeVisitor.hpp
@@ -28,6 +28,7 @@ public:
/// Functions to set the Range on a per-layer-type basis
void VisitAdditionLayer(const IConnectableLayer* layer, const char* name = nullptr) override;
+
void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
const BatchNormalizationDescriptor& desc,
const ConstTensor& mean,
@@ -35,26 +36,21 @@ public:
const ConstTensor& beta,
const ConstTensor& gamma,
const char* name = nullptr) override;
+
void VisitConvolution2dLayer(const IConnectableLayer* layer,
const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
const char* name = nullptr) override;
- void VisitConvolution2dLayer(const IConnectableLayer* layer,
- const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr) override;
+
void VisitActivationLayer(const IConnectableLayer* layer,
const ActivationDescriptor& activationDescriptor,
const char* name = nullptr) override;
- void VisitFullyConnectedLayer(const armnn::IConnectableLayer *layer,
- const armnn::FullyConnectedDescriptor& desc,
- const ConstTensor& weights,
- const char *name) override;
- void VisitFullyConnectedLayer(const armnn::IConnectableLayer *layer,
- const armnn::FullyConnectedDescriptor& desc,
+
+ void VisitFullyConnectedLayer(const IConnectableLayer *layer,
+ const FullyConnectedDescriptor& desc,
const ConstTensor& weights,
- const ConstTensor& bias,
+ const Optional<ConstTensor>& biases,
const char *name) override;
/// Retrieve the default range
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index 2c0997a9d0..243bcc3dfd 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -113,15 +113,15 @@ Layer::ConstantTensors Convolution2dLayer::GetConstantTensorsByRef()
void Convolution2dLayer::Accept(ILayerVisitor& visitor) const
{
ConstTensor weightsTensor(m_Weight->GetTensorInfo(), m_Weight->Map(true)) ;
+ Optional<ConstTensor> optionalBiasTensor = EmptyOptional();
+
if (GetParameters().m_BiasEnabled)
{
ConstTensor biasTensor(m_Bias->GetTensorInfo(), m_Bias->Map(true));
- visitor.VisitConvolution2dLayer(this, GetParameters(), weightsTensor, biasTensor, GetName());
- }
- else
- {
- visitor.VisitConvolution2dLayer(this, GetParameters(), weightsTensor, GetName());
+ optionalBiasTensor = Optional<ConstTensor>(biasTensor);
}
+
+ visitor.VisitConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
}
} // namespace armnn
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index a17673fc1e..a1ffe91792 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -122,16 +122,16 @@ Layer::ConstantTensors DepthwiseConvolution2dLayer::GetConstantTensorsByRef()
void DepthwiseConvolution2dLayer::Accept(ILayerVisitor& visitor) const
{
- ConstTensor weightsTensor(m_Weight->GetTensorInfo(), m_Weight->Map(true)) ;
+ ConstTensor weightsTensor(m_Weight->GetTensorInfo(), m_Weight->Map(true));
+ Optional<ConstTensor> optionalBiasTensor = EmptyOptional();
+
if (GetParameters().m_BiasEnabled)
{
ConstTensor biasTensor(m_Bias->GetTensorInfo(), m_Bias->Map(true));
- visitor.VisitDepthwiseConvolution2dLayer(this, GetParameters(), weightsTensor, biasTensor, GetName());
- }
- else
- {
- visitor.VisitDepthwiseConvolution2dLayer(this, GetParameters(), weightsTensor, GetName());
+ optionalBiasTensor = Optional<ConstTensor>(biasTensor);
}
+
+ visitor.VisitDepthwiseConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
}
} // namespace armnn
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index 219113bca6..98b81fa6a8 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -88,16 +88,16 @@ Layer::ConstantTensors FullyConnectedLayer::GetConstantTensorsByRef()
void FullyConnectedLayer::Accept(ILayerVisitor& visitor) const
{
- ConstTensor weightsTensor(m_Weight->GetTensorInfo(), m_Weight->Map(true)) ;
+ ConstTensor weightsTensor(m_Weight->GetTensorInfo(), m_Weight->Map(true));
+ Optional<ConstTensor> optionalBiasTensor = EmptyOptional();
+
if (GetParameters().m_BiasEnabled)
{
ConstTensor biasTensor(m_Bias->GetTensorInfo(), m_Bias->GetConstTensor<void>());
- visitor.VisitFullyConnectedLayer(this, GetParameters(), weightsTensor, biasTensor, GetName());
- }
- else
- {
- visitor.VisitFullyConnectedLayer(this, GetParameters(), weightsTensor, GetName());
+ optionalBiasTensor = Optional<ConstTensor>(biasTensor);
}
+
+ visitor.VisitFullyConnectedLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
}
} // namespace armnn
diff --git a/src/armnn/test/ConstTensorLayerVisitor.cpp b/src/armnn/test/ConstTensorLayerVisitor.cpp
index 6ab2ea89a2..5b77ddeb97 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.cpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.cpp
@@ -122,11 +122,11 @@ BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
- TestConvolution2dLayerVisitor visitor(descriptor, weights);
+ TestConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional());
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights);
layer->Accept(visitor);
@@ -146,11 +146,11 @@ BOOST_AUTO_TEST_CASE(CheckNamedConvolution2dLayer)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
- TestConvolution2dLayerVisitor visitor(descriptor, weights, layerName);
+ TestConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional(), layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, layerName);
layer->Accept(visitor);
@@ -170,16 +170,15 @@ BOOST_AUTO_TEST_CASE(CheckConvolution2dLayerWithBiases)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor biases(TensorInfo(4, biasDimensions.data(), armnn::DataType::Float32), biasData);
+ ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
+ TestConvolution2dLayerVisitor visitor(descriptor, weights, Optional<ConstTensor>(biases));
- TestConvolution2dWithBiasLayerVisitor visitor(descriptor, weights, biases);
-
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, biases);
layer->Accept(visitor);
@@ -200,15 +199,15 @@ BOOST_AUTO_TEST_CASE(CheckNamedConvolution2dLayerWithBiases)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor biases(TensorInfo(4, biasDimensions.data(), armnn::DataType::Float32), biasData);
+ ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
- TestConvolution2dWithBiasLayerVisitor visitor(descriptor, weights, biases, layerName);
+ TestConvolution2dLayerVisitor visitor(descriptor, weights, Optional<ConstTensor>(biases), layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, biases, layerName);
layer->Accept(visitor);
@@ -227,11 +226,11 @@ BOOST_AUTO_TEST_CASE(CheckDepthwiseConvolution2dLayer)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
- TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights);
+ TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional());
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights);
layer->Accept(visitor);
@@ -251,11 +250,11 @@ BOOST_AUTO_TEST_CASE(CheckNamedDepthwiseConvolution2dLayer)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
- TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, layerName);
+ TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional(), layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, layerName);
layer->Accept(visitor);
@@ -275,15 +274,15 @@ BOOST_AUTO_TEST_CASE(CheckDepthwiseConvolution2dLayerWithBiases)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor biases(TensorInfo(4, biasDimensions.data(), armnn::DataType::Float32), biasData);
+ ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
- TestDepthwiseConvolution2dWithBiasLayerVisitor visitor(descriptor, weights, biases);
+ TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, Optional<ConstTensor>(biases));
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, biases);
layer->Accept(visitor);
@@ -304,15 +303,15 @@ BOOST_AUTO_TEST_CASE(CheckNamedDepthwiseConvolution2dLayerWithBiases)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor biases(TensorInfo(4, biasDimensions.data(), armnn::DataType::Float32), biasData);
+ ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
- TestDepthwiseConvolution2dWithBiasLayerVisitor visitor(descriptor, weights, biases, layerName);
+ TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, Optional<ConstTensor>(biases), layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, biases, layerName);
layer->Accept(visitor);
@@ -325,11 +324,11 @@ BOOST_AUTO_TEST_CASE(CheckFullyConnectedLayer)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
- TestFullyConnectedLayerVistor visitor(descriptor, weights);
+ TestFullyConnectedLayerVistor visitor(descriptor, weights, EmptyOptional());
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights);
layer->Accept(visitor);
@@ -343,11 +342,11 @@ BOOST_AUTO_TEST_CASE(CheckNamedFullyConnectedLayer)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
- TestFullyConnectedLayerVistor visitor(descriptor, weights, layerName);
+ TestFullyConnectedLayerVistor visitor(descriptor, weights, EmptyOptional(), layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, layerName);
layer->Accept(visitor);
@@ -361,15 +360,15 @@ BOOST_AUTO_TEST_CASE(CheckFullyConnectedLayerWithBiases)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor biases(TensorInfo(4, biasDimensions.data(), armnn::DataType::Float32), biasData);
+ ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
- TestFullyConnectedLayerWithBiasesVisitor visitor(descriptor, weights, biases);
+ TestFullyConnectedLayerVistor visitor(descriptor, weights, Optional<ConstTensor>(biases));
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, biases);
layer->Accept(visitor);
@@ -384,15 +383,15 @@ BOOST_AUTO_TEST_CASE(CheckNamedFullyConnectedLayerWithBiases)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor weights(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor biases(TensorInfo(4, biasDimensions.data(), armnn::DataType::Float32), biasData);
+ ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
- TestFullyConnectedLayerWithBiasesVisitor visitor(descriptor, weights, biases, layerName);
+ TestFullyConnectedLayerVistor visitor(descriptor, weights, Optional<ConstTensor>(biases), layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, biases, layerName);
layer->Accept(visitor);
@@ -406,23 +405,23 @@ BOOST_AUTO_TEST_CASE(CheckBatchNormalizationLayer)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor mean(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor mean(TensorInfo(4, dimensions.data(), DataType::Float32), data);
std::vector<float> varianceData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> varianceDimensions = {1, 1, 3, 3};
- armnn::ConstTensor variance(TensorInfo(4, varianceDimensions.data(), armnn::DataType::Float32), varianceData);
+ ConstTensor variance(TensorInfo(4, varianceDimensions.data(), DataType::Float32), varianceData);
std::vector<float> betaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> betaDimensions = {1, 1, 3, 3};
- armnn::ConstTensor beta(TensorInfo(4, betaDimensions.data(), armnn::DataType::Float32), betaData);
+ ConstTensor beta(TensorInfo(4, betaDimensions.data(), DataType::Float32), betaData);
std::vector<float> gammaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> gammaDimensions = {1, 1, 3, 3};
- armnn::ConstTensor gamma(TensorInfo(4, gammaDimensions.data(), armnn::DataType::Float32), gammaData);
+ ConstTensor gamma(TensorInfo(4, gammaDimensions.data(), DataType::Float32), gammaData);
TestBatchNormalizationLayerVisitor visitor(descriptor, mean, variance, beta, gamma);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddBatchNormalizationLayer(descriptor, mean, variance, beta, gamma);
layer->Accept(visitor);
@@ -437,23 +436,23 @@ BOOST_AUTO_TEST_CASE(CheckNamedBatchNormalizationLayer)
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor mean(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor mean(TensorInfo(4, dimensions.data(), DataType::Float32), data);
std::vector<float> varianceData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> varianceDimensions = {1, 1, 3, 3};
- armnn::ConstTensor variance(TensorInfo(4, varianceDimensions.data(), armnn::DataType::Float32), varianceData);
+ ConstTensor variance(TensorInfo(4, varianceDimensions.data(), DataType::Float32), varianceData);
std::vector<float> betaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> betaDimensions = {1, 1, 3, 3};
- armnn::ConstTensor beta(TensorInfo(4, betaDimensions.data(), armnn::DataType::Float32), betaData);
+ ConstTensor beta(TensorInfo(4, betaDimensions.data(), DataType::Float32), betaData);
std::vector<float> gammaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> gammaDimensions = {1, 1, 3, 3};
- armnn::ConstTensor gamma(TensorInfo(4, gammaDimensions.data(), armnn::DataType::Float32), gammaData);
+ ConstTensor gamma(TensorInfo(4, gammaDimensions.data(), DataType::Float32), gammaData);
TestBatchNormalizationLayerVisitor visitor(descriptor, mean, variance, beta, gamma, layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddBatchNormalizationLayer(
descriptor, mean, variance, beta, gamma, layerName);
@@ -464,11 +463,11 @@ BOOST_AUTO_TEST_CASE(CheckConstLayer)
{
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor input(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor input(TensorInfo(4, dimensions.data(), DataType::Float32), data);
TestConstantLayerVisitor visitor(input);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddConstantLayer(input);
layer->Accept(visitor);
@@ -479,11 +478,11 @@ BOOST_AUTO_TEST_CASE(CheckNamedConstLayer)
const char* layerName = "ConstantLayer";
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- armnn::ConstTensor input(TensorInfo(4, dimensions.data(), armnn::DataType::Float32), data);
+ ConstTensor input(TensorInfo(4, dimensions.data(), DataType::Float32), data);
TestConstantLayerVisitor visitor(input, layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddConstantLayer(input, layerName);
layer->Accept(visitor);
@@ -499,48 +498,48 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerBasic)
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+ ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+ ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+ ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+ ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -555,7 +554,7 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerBasic)
TestLstmLayerVisitor visitor(descriptor, params);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
layer->Accept(visitor);
@@ -572,48 +571,48 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerBasic)
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+ ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+ ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+ ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+ ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -628,7 +627,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerBasic)
TestLstmLayerVisitor visitor(descriptor, params, layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
layer->Accept(visitor);
@@ -644,68 +643,68 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerCifgDisabled)
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+ ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+ ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+ ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+ ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
std::vector<float> inputToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToInputWeights(
- TensorInfo(4, inputToInputWeightsDimensions.data(), armnn::DataType::Float32), inputToInputWeightsData);
+ ConstTensor inputToInputWeights(
+ TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::Float32), inputToInputWeightsData);
std::vector<float> recurrentToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToInputWeights(TensorInfo(
- 4, recurrentToInputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToInputWeightsData);
+ ConstTensor recurrentToInputWeights(TensorInfo(
+ 4, recurrentToInputWeightsDimensions.data(), DataType::Float32), recurrentToInputWeightsData);
std::vector<float> cellToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellToInputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellToInputWeights(
- TensorInfo(4, cellToInputWeightsDimensions.data(), armnn::DataType::Float32), cellToInputWeightsData);
+ ConstTensor cellToInputWeights(
+ TensorInfo(4, cellToInputWeightsDimensions.data(), DataType::Float32), cellToInputWeightsData);
std::vector<float> inputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputGateBias(
- TensorInfo(4, inputGateBiasDimensions.data(), armnn::DataType::Float32), inputGateBiasData);
+ ConstTensor inputGateBias(
+ TensorInfo(4, inputGateBiasDimensions.data(), DataType::Float32), inputGateBiasData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -725,7 +724,7 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerCifgDisabled)
TestLstmLayerVisitor visitor(descriptor, params);
- armnn::Network net;
+ Network net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
layer->Accept(visitor);
@@ -742,68 +741,68 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerCifgDisabled)
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+ ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+ ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+ ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+ ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
std::vector<float> inputToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToInputWeights(
- TensorInfo(4, inputToInputWeightsDimensions.data(), armnn::DataType::Float32), inputToInputWeightsData);
+ ConstTensor inputToInputWeights(
+ TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::Float32), inputToInputWeightsData);
std::vector<float> recurrentToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToInputWeights(TensorInfo(
- 4, recurrentToInputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToInputWeightsData);
+ ConstTensor recurrentToInputWeights(TensorInfo(
+ 4, recurrentToInputWeightsDimensions.data(), DataType::Float32), recurrentToInputWeightsData);
std::vector<float> cellToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellToInputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellToInputWeights(
- TensorInfo(4, cellToInputWeightsDimensions.data(), armnn::DataType::Float32), cellToInputWeightsData);
+ ConstTensor cellToInputWeights(
+ TensorInfo(4, cellToInputWeightsDimensions.data(), DataType::Float32), cellToInputWeightsData);
std::vector<float> inputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputGateBias(
- TensorInfo(4, inputGateBiasDimensions.data(), armnn::DataType::Float32), inputGateBiasData);
+ ConstTensor inputGateBias(
+ TensorInfo(4, inputGateBiasDimensions.data(), DataType::Float32), inputGateBiasData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -823,7 +822,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerCifgDisabled)
TestLstmLayerVisitor visitor(descriptor, params, layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params, layerName);
layer->Accept(visitor);
@@ -841,58 +840,58 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerPeephole)
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+ ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+ ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+ ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+ ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
std::vector<float> cellToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellToForgetWeights(
- TensorInfo(4, cellToForgetWeightsDimensions.data(), armnn::DataType::Float32), cellToForgetWeightsData);
+ ConstTensor cellToForgetWeights(
+ TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::Float32), cellToForgetWeightsData);
std::vector<float> cellToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellToOutputWeights(
- TensorInfo(4, cellToOutputWeightsDimensions.data(), armnn::DataType::Float32), cellToOutputWeightsData);
+ ConstTensor cellToOutputWeights(
+ TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::Float32), cellToOutputWeightsData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -910,7 +909,7 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerPeephole)
TestLstmLayerVisitor visitor(descriptor, params);
- armnn::Network net;
+ Network net;
IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params);
layer->Accept(visitor);
@@ -928,58 +927,58 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerPeephole)
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+ ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+ ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+ ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+ ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
std::vector<float> cellToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellToForgetWeights(
- TensorInfo(4, cellToForgetWeightsDimensions.data(), armnn::DataType::Float32), cellToForgetWeightsData);
+ ConstTensor cellToForgetWeights(
+ TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::Float32), cellToForgetWeightsData);
std::vector<float> cellToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellToOutputWeights(
- TensorInfo(4, cellToOutputWeightsDimensions.data(), armnn::DataType::Float32), cellToOutputWeightsData);
+ ConstTensor cellToOutputWeights(
+ TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::Float32), cellToOutputWeightsData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -997,7 +996,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerPeephole)
TestLstmLayerVisitor visitor(descriptor, params, layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params, layerName);
layer->Accept(visitor);
@@ -1015,58 +1014,58 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerProjection)
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+ ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+ ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+ ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+ ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
std::vector<float> projectionBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> projectionBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor projectionBias(
- TensorInfo(4, projectionBiasDimensions.data(), armnn::DataType::Float32), projectionBiasData);
+ ConstTensor projectionBias(
+ TensorInfo(4, projectionBiasDimensions.data(), DataType::Float32), projectionBiasData);
std::vector<float> projectionWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> projectionWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor projectionWeights(
- TensorInfo(4, projectionWeightsDimensions.data(), armnn::DataType::Float32), projectionWeightsData);
+ ConstTensor projectionWeights(
+ TensorInfo(4, projectionWeightsDimensions.data(), DataType::Float32), projectionWeightsData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -1084,7 +1083,7 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerProjection)
TestLstmLayerVisitor visitor(descriptor, params);
- armnn::Network net;
+ Network net;
IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params);
layer->Accept(visitor);
@@ -1102,58 +1101,58 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerProjection)
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), armnn::DataType::Float32), inputToForgetWeightsData);
+ ConstTensor inputToForgetWeights(
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), armnn::DataType::Float32), inputToCellWeightsData);
+ ConstTensor inputToCellWeights(
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), armnn::DataType::Float32), inputToOutputWeightsData);
+ ConstTensor inputToOutputWeights(
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), armnn::DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(TensorInfo(
+ 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), armnn::DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(TensorInfo(
+ 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), armnn::DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(TensorInfo(
+ 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), armnn::DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(TensorInfo(
+ 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), armnn::DataType::Float32), cellBiasData);
+ ConstTensor cellBias(TensorInfo(
+ 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), armnn::DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(TensorInfo(
+ 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
std::vector<float> projectionBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> projectionBiasDimensions = {1, 1, 3, 3};
- armnn::ConstTensor projectionBias(
- TensorInfo(4, projectionBiasDimensions.data(), armnn::DataType::Float32), projectionBiasData);
+ ConstTensor projectionBias(
+ TensorInfo(4, projectionBiasDimensions.data(), DataType::Float32), projectionBiasData);
std::vector<float> projectionWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> projectionWeightsDimensions = {1, 1, 3, 3};
- armnn::ConstTensor projectionWeights(
- TensorInfo(4, projectionWeightsDimensions.data(), armnn::DataType::Float32), projectionWeightsData);
+ ConstTensor projectionWeights(
+ TensorInfo(4, projectionWeightsDimensions.data(), DataType::Float32), projectionWeightsData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -1171,7 +1170,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerProjection)
TestLstmLayerVisitor visitor(descriptor, params, layerName);
- armnn::Network net;
+ Network net;
IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params, layerName);
layer->Accept(visitor);
diff --git a/src/armnn/test/ConstTensorLayerVisitor.hpp b/src/armnn/test/ConstTensorLayerVisitor.hpp
index 3b0f723542..513a471465 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.hpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.hpp
@@ -16,58 +16,34 @@ class TestConvolution2dLayerVisitor : public TestLayerVisitor
public:
explicit TestConvolution2dLayerVisitor(const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
const char* name = nullptr) : TestLayerVisitor(name),
m_Descriptor(convolution2dDescriptor),
- m_Weights(weights) {};
+ m_Weights(weights),
+ m_Biases(biases) {};
virtual ~TestConvolution2dLayerVisitor() {};
void VisitConvolution2dLayer(const IConnectableLayer* layer,
const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
const char* name = nullptr) override
{
CheckLayerPointer(layer);
CheckLayerName(name);
CheckDescriptor(convolution2dDescriptor);
CheckConstTensors(m_Weights, weights);
+ CheckOptionalConstTensors(m_Biases, biases);
}
protected:
void CheckDescriptor(const Convolution2dDescriptor& convolution2dDescriptor);
private:
- armnn::Convolution2dDescriptor m_Descriptor;
- armnn::ConstTensor m_Weights;
-};
-
-class TestConvolution2dWithBiasLayerVisitor : public TestConvolution2dLayerVisitor
-{
-public:
- explicit TestConvolution2dWithBiasLayerVisitor(const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr) :
- TestConvolution2dLayerVisitor(
- convolution2dDescriptor, weights, name),
- m_Biases(biases) {};
-
- // needed to suppress crappy error message about base class function i.e. version
- // without the biases argument being hidden
- using TestConvolution2dLayerVisitor::VisitConvolution2dLayer;
-
- void VisitConvolution2dLayer(const IConnectableLayer* layer,
- const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr) override
- {
- TestConvolution2dLayerVisitor::VisitConvolution2dLayer(layer, convolution2dDescriptor, weights, name);
- CheckConstTensors(m_Biases, biases);
- }
-
-private:
- armnn::ConstTensor m_Biases;
+ Convolution2dDescriptor m_Descriptor;
+ ConstTensor m_Weights;
+ Optional<ConstTensor> m_Biases;
};
class TestDepthwiseConvolution2dLayerVisitor : public TestLayerVisitor
@@ -75,60 +51,34 @@ class TestDepthwiseConvolution2dLayerVisitor : public TestLayerVisitor
public:
explicit TestDepthwiseConvolution2dLayerVisitor(const DepthwiseConvolution2dDescriptor& descriptor,
const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
const char* name = nullptr) : TestLayerVisitor(name),
m_Descriptor(descriptor),
- m_Weights(weights) {};
+ m_Weights(weights),
+ m_Biases(biases) {};
virtual ~TestDepthwiseConvolution2dLayerVisitor() {};
void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
const char* name = nullptr) override
{
CheckLayerPointer(layer);
CheckLayerName(name);
CheckDescriptor(convolution2dDescriptor);
CheckConstTensors(m_Weights, weights);
+ CheckOptionalConstTensors(m_Biases, biases);
}
protected:
void CheckDescriptor(const DepthwiseConvolution2dDescriptor& convolution2dDescriptor);
private:
- armnn::DepthwiseConvolution2dDescriptor m_Descriptor;
- armnn::ConstTensor m_Weights;
-};
-
-class TestDepthwiseConvolution2dWithBiasLayerVisitor : public TestDepthwiseConvolution2dLayerVisitor
-{
-public:
- explicit TestDepthwiseConvolution2dWithBiasLayerVisitor(const DepthwiseConvolution2dDescriptor& descriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr) :
- TestDepthwiseConvolution2dLayerVisitor(descriptor, weights, name),
- m_Biases(biases) {};
-
- ~TestDepthwiseConvolution2dWithBiasLayerVisitor() {};
-
- // needed to suppress crappy error message about base class function i.e. version
- // without the biases argument being hidden
- using TestDepthwiseConvolution2dLayerVisitor::VisitDepthwiseConvolution2dLayer;
-
- void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr) override
- {
- TestDepthwiseConvolution2dLayerVisitor::VisitDepthwiseConvolution2dLayer(
- layer, convolution2dDescriptor, weights, name);
- CheckConstTensors(m_Biases, biases);
- }
-
-private:
- armnn::ConstTensor m_Biases;
+ DepthwiseConvolution2dDescriptor m_Descriptor;
+ ConstTensor m_Weights;
+ Optional<ConstTensor> m_Biases;
};
class TestFullyConnectedLayerVistor : public TestLayerVisitor
@@ -136,21 +86,25 @@ class TestFullyConnectedLayerVistor : public TestLayerVisitor
public:
explicit TestFullyConnectedLayerVistor(const FullyConnectedDescriptor& descriptor,
const ConstTensor& weights,
+ const Optional<ConstTensor> biases,
const char* name = nullptr) : TestLayerVisitor(name),
m_Descriptor(descriptor),
- m_Weights(weights) {};
+ m_Weights(weights),
+ m_Biases(biases) {};
virtual ~TestFullyConnectedLayerVistor() {};
void VisitFullyConnectedLayer(const IConnectableLayer* layer,
const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
const char* name = nullptr) override
{
CheckLayerPointer(layer);
CheckLayerName(name);
CheckDescriptor(fullyConnectedDescriptor);
CheckConstTensors(m_Weights, weights);
+ CheckOptionalConstTensors(m_Biases, biases);
}
protected:
@@ -158,34 +112,7 @@ protected:
private:
FullyConnectedDescriptor m_Descriptor;
ConstTensor m_Weights;
-};
-
-class TestFullyConnectedLayerWithBiasesVisitor : public TestFullyConnectedLayerVistor
-{
-public:
- explicit TestFullyConnectedLayerWithBiasesVisitor(const FullyConnectedDescriptor& descriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr) :
- TestFullyConnectedLayerVistor(descriptor, weights, name),
- m_Biases(biases) {};
-
- // needed to suppress crappy error message about base class function i.e. version
- // without the biases argument being hidden
- using TestFullyConnectedLayerVistor::VisitFullyConnectedLayer;
-
- void VisitFullyConnectedLayer(const IConnectableLayer* layer,
- const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr) override
- {
- TestFullyConnectedLayerVistor::VisitFullyConnectedLayer(layer, fullyConnectedDescriptor, weights, name);
- CheckConstTensors(m_Biases, biases);
- }
-
-private:
- ConstTensor m_Biases;
+ Optional<ConstTensor> m_Biases;
};
class TestBatchNormalizationLayerVisitor : public TestLayerVisitor
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index a960c6b772..90935f37f8 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -500,10 +500,11 @@ INetworkPtr CreateNetworkWithFullyConnectedLayer(const bool biasEnabled)
class TestFullyConnectedQuantization : public TestQuantization
{
public:
- virtual void VisitFullyConnectedLayer(const IConnectableLayer* layer,
- const FullyConnectedDescriptor& desc,
- const ConstTensor& weights,
- const char* name = nullptr)
+ void VisitFullyConnectedLayer(const IConnectableLayer* layer,
+ const FullyConnectedDescriptor& desc,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name = nullptr) override
{
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
@@ -514,37 +515,17 @@ public:
// Based off current static value [-15.0f, 15.0f]
BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 30.0f/255.0f, 0.000001f );
- //Test constants
+ //Test weights
BOOST_TEST((weights.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
-
BOOST_CHECK_CLOSE(weights.GetInfo().GetQuantizationScale(), 3.0f/255.0f, 0.000001f);
-
BOOST_TEST((weights.GetInfo().GetQuantizationOffset() == 85));
- }
-
- virtual void VisitFullyConnectedLayer(const IConnectableLayer* layer,
- const FullyConnectedDescriptor& desc,
- const ConstTensor& weights,
- const ConstTensor& bias,
- const char* name = nullptr)
- {
- TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
-
- BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8));
-
- BOOST_TEST((info.GetQuantizationOffset() == 128));
-
- // Based off current static value [-15.0f, 15.0f]
- BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 30.0f/255.0f, 0.000001f );
- //Test constants
- BOOST_TEST((weights.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
- BOOST_TEST((bias.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
-
- BOOST_CHECK_CLOSE(weights.GetInfo().GetQuantizationScale(), 3.0f/255.0f, 0.000001f);
- BOOST_CHECK_CLOSE(bias.GetInfo().GetQuantizationScale(), 30.0f/255.0f, 0.000001f);
-
- BOOST_TEST((weights.GetInfo().GetQuantizationOffset() == 85));
+ // Test biases
+ if (biases.has_value())
+ {
+ BOOST_TEST((biases.value().GetInfo().GetDataType() == DataType::QuantisedAsymm8));
+ BOOST_CHECK_CLOSE(biases.value().GetInfo().GetQuantizationScale(), 30.0f/255.0f, 0.000001f);
+ }
}
};
@@ -570,8 +551,9 @@ class TestConv2dQuantization : public TestQuantization
{
public:
virtual void VisitConvolution2dLayer(const IConnectableLayer *layer,
- const Convolution2dDescriptor &convolution2dDescriptor,
- const ConstTensor &weights,
+ const Convolution2dDescriptor& convolution2dDescriptor,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
const char *name = nullptr)
{
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
@@ -581,24 +563,18 @@ public:
// Based off current static value [-15.0f, 15.0f]
BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 30.0f / 255.0f, 0.000001f);
- // test weights const
+ // Test weitghs
BOOST_TEST((weights.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
BOOST_CHECK_CLOSE(weights.GetInfo().GetQuantizationScale(), 3.0f / 255.0f, 0.000001f);
BOOST_TEST((weights.GetInfo().GetQuantizationOffset() == 85));
- }
- virtual void VisitConvolution2dLayer(const IConnectableLayer *layer,
- const Convolution2dDescriptor &convolution2dDescriptor,
- const ConstTensor &weights,
- const ConstTensor &biases,
- const char *name = nullptr)
- {
- VisitConvolution2dLayer(layer, convolution2dDescriptor, weights, name);
-
- // test biases const
- BOOST_TEST((biases.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
- BOOST_CHECK_CLOSE(biases.GetInfo().GetQuantizationScale(), 3.0f / 255.0f, 0.000001f);
- BOOST_TEST((biases.GetInfo().GetQuantizationOffset() == 85));
+ // Test biases
+ if (biases.has_value())
+ {
+ BOOST_TEST((biases.value().GetInfo().GetDataType() == DataType::QuantisedAsymm8));
+ BOOST_CHECK_CLOSE(biases.value().GetInfo().GetQuantizationScale(), 3.0f / 255.0f, 0.000001f);
+ BOOST_TEST((biases.value().GetInfo().GetQuantizationOffset() == 85));
+ }
}
};
diff --git a/src/armnn/test/TestLayerVisitor.cpp b/src/armnn/test/TestLayerVisitor.cpp
index 932aef6deb..4c028b3709 100644
--- a/src/armnn/test/TestLayerVisitor.cpp
+++ b/src/armnn/test/TestLayerVisitor.cpp
@@ -48,4 +48,14 @@ void TestLayerVisitor::CheckConstTensors(const ConstTensor& expected, const Cons
}
}
+void TestLayerVisitor::CheckOptionalConstTensors(const Optional<ConstTensor>& expected,
+ const Optional<ConstTensor>& actual)
+{
+ BOOST_CHECK(expected.has_value() == actual.has_value());
+ if (expected.has_value() && actual.has_value())
+ {
+ CheckConstTensors(expected.value(), actual.value());
+ }
+}
+
} //namespace armnn
diff --git a/src/armnn/test/TestLayerVisitor.hpp b/src/armnn/test/TestLayerVisitor.hpp
index fe2631fa39..8c0da50be3 100644
--- a/src/armnn/test/TestLayerVisitor.hpp
+++ b/src/armnn/test/TestLayerVisitor.hpp
@@ -21,6 +21,8 @@ protected:
void CheckConstTensors(const ConstTensor& expected, const ConstTensor& actual);
+ void CheckOptionalConstTensors(const Optional<ConstTensor>& expected, const Optional<ConstTensor>& actual);
+
private:
const char* m_LayerName;
@@ -33,161 +35,146 @@ public:
}
}
- virtual void VisitInputLayer(const IConnectableLayer* layer,
- LayerBindingId id,
- const char* name = nullptr) {}
-
- virtual void VisitConvolution2dLayer(const IConnectableLayer* layer,
- const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const char* name = nullptr) {}
-
- virtual void VisitConvolution2dLayer(const IConnectableLayer* layer,
- const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr) {}
-
- virtual void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const char* name = nullptr) {}
-
- virtual void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr) {}
-
- virtual void VisitDetectionPostProcessLayer(const IConnectableLayer* layer,
- const DetectionPostProcessDescriptor& descriptor,
- const ConstTensor& anchors,
- const char* name = nullptr) {}
-
- virtual void VisitFullyConnectedLayer(const IConnectableLayer* layer,
- const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const ConstTensor& weights,
- const char* name = nullptr) {}
+ void VisitInputLayer(const IConnectableLayer* layer,
+ LayerBindingId id,
+ const char* name = nullptr) override {}
- virtual void VisitFullyConnectedLayer(const IConnectableLayer* layer,
- const FullyConnectedDescriptor& fullyConnectedDescriptor,
+ void VisitConvolution2dLayer(const IConnectableLayer* layer,
+ const Convolution2dDescriptor& convolution2dDescriptor,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name = nullptr) override {}
+
+ void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
+ const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr) {}
+ const Optional<ConstTensor>& biases,
+ const char* name = nullptr) override {}
+
+ void VisitDetectionPostProcessLayer(const IConnectableLayer* layer,
+ const DetectionPostProcessDescriptor& descriptor,
+ const ConstTensor& anchors,
+ const char* name = nullptr) override {}
+
+ void VisitFullyConnectedLayer(const IConnectableLayer* layer,
+ const FullyConnectedDescriptor& fullyConnectedDescriptor,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name = nullptr) override {}
- virtual void VisitPermuteLayer(const IConnectableLayer* layer,
- const PermuteDescriptor& permuteDescriptor,
- const char* name = nullptr) {}
+ void VisitPermuteLayer(const IConnectableLayer* layer,
+ const PermuteDescriptor& permuteDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitBatchToSpaceNdLayer(const IConnectableLayer* layer,
- const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
- const char* name = nullptr) {}
+ void VisitBatchToSpaceNdLayer(const IConnectableLayer* layer,
+ const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitPooling2dLayer(const IConnectableLayer* layer,
- const Pooling2dDescriptor& pooling2dDescriptor,
- const char* name = nullptr) {}
+ void VisitPooling2dLayer(const IConnectableLayer* layer,
+ const Pooling2dDescriptor& pooling2dDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitActivationLayer(const IConnectableLayer* layer,
- const ActivationDescriptor& activationDescriptor,
- const char* name = nullptr) {}
+ void VisitActivationLayer(const IConnectableLayer* layer,
+ const ActivationDescriptor& activationDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitNormalizationLayer(const IConnectableLayer* layer,
- const NormalizationDescriptor& normalizationDescriptor,
- const char* name = nullptr) {}
+ void VisitNormalizationLayer(const IConnectableLayer* layer,
+ const NormalizationDescriptor& normalizationDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitSoftmaxLayer(const IConnectableLayer* layer,
- const SoftmaxDescriptor& softmaxDescriptor,
- const char* name = nullptr) {}
+ void VisitSoftmaxLayer(const IConnectableLayer* layer,
+ const SoftmaxDescriptor& softmaxDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitSplitterLayer(const IConnectableLayer* layer,
- const ViewsDescriptor& splitterDescriptor,
- const char* name = nullptr) {}
+ void VisitSplitterLayer(const IConnectableLayer* layer,
+ const ViewsDescriptor& splitterDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitMergerLayer(const IConnectableLayer* layer,
- const OriginsDescriptor& mergerDescriptor,
- const char* name = nullptr) {}
+ void VisitMergerLayer(const IConnectableLayer* layer,
+ const OriginsDescriptor& mergerDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitAdditionLayer(const IConnectableLayer* layer,
- const char* name = nullptr) {}
+ void VisitAdditionLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override {}
- virtual void VisitMultiplicationLayer(const IConnectableLayer* layer,
- const char* name = nullptr) {}
+ void VisitMultiplicationLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override {}
- virtual void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
- const BatchNormalizationDescriptor& desc,
- const ConstTensor& mean,
- const ConstTensor& variance,
- const ConstTensor& beta,
- const ConstTensor& gamma,
- const char* name = nullptr) {}
+ void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
+ const BatchNormalizationDescriptor& desc,
+ const ConstTensor& mean,
+ const ConstTensor& variance,
+ const ConstTensor& beta,
+ const ConstTensor& gamma,
+ const char* name = nullptr) override {}
- virtual void VisitResizeBilinearLayer(const IConnectableLayer* layer,
- const ResizeBilinearDescriptor& resizeDesc,
- const char* name = nullptr) {}
+ void VisitResizeBilinearLayer(const IConnectableLayer* layer,
+ const ResizeBilinearDescriptor& resizeDesc,
+ const char* name = nullptr) override {}
- virtual void VisitL2NormalizationLayer(const IConnectableLayer* layer,
- const L2NormalizationDescriptor& desc,
- const char* name = nullptr) {}
+ void VisitL2NormalizationLayer(const IConnectableLayer* layer,
+ const L2NormalizationDescriptor& desc,
+ const char* name = nullptr) override {}
- virtual void VisitConstantLayer(const IConnectableLayer* layer,
- const ConstTensor& input,
- const char* name = nullptr) {}
+ void VisitConstantLayer(const IConnectableLayer* layer,
+ const ConstTensor& input,
+ const char* name = nullptr) override {}
- virtual void VisitReshapeLayer(const IConnectableLayer* layer,
- const ReshapeDescriptor& reshapeDescriptor,
- const char* name = nullptr) {}
+ void VisitReshapeLayer(const IConnectableLayer* layer,
+ const ReshapeDescriptor& reshapeDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitSpaceToBatchNdLayer(const IConnectableLayer* layer,
- const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
- const char* name = nullptr) {}
+ void VisitSpaceToBatchNdLayer(const IConnectableLayer* layer,
+ const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitFloorLayer(const IConnectableLayer* layer,
- const char* name = nullptr) {}
+ void VisitFloorLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override {}
- virtual void VisitOutputLayer(const IConnectableLayer* layer,
- LayerBindingId id,
- const char* name = nullptr) {}
+ void VisitOutputLayer(const IConnectableLayer* layer,
+ LayerBindingId id,
+ const char* name = nullptr) override {}
- virtual void VisitLstmLayer(const IConnectableLayer* layer,
- const LstmDescriptor& descriptor,
- const LstmInputParams& params,
- const char* name = nullptr) {}
+ void VisitLstmLayer(const IConnectableLayer* layer,
+ const LstmDescriptor& descriptor,
+ const LstmInputParams& params,
+ const char* name = nullptr) override {}
- virtual void VisitDivisionLayer(const IConnectableLayer* layer,
- const char* name = nullptr) {}
+ void VisitDivisionLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override {}
- virtual void VisitSubtractionLayer(const IConnectableLayer* layer,
- const char* name = nullptr) {}
+ void VisitSubtractionLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override {}
- virtual void VisitMaximumLayer(const IConnectableLayer* layer,
- const char* name = nullptr) {}
+ void VisitMaximumLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override {}
- virtual void VisitMeanLayer(const IConnectableLayer* layer,
- const MeanDescriptor& meanDescriptor,
- const char* name = nullptr) {}
+ void VisitMeanLayer(const IConnectableLayer* layer,
+ const MeanDescriptor& meanDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitPadLayer(const IConnectableLayer* layer,
- const PadDescriptor& padDescriptor,
- const char* name = nullptr) {}
+ void VisitPadLayer(const IConnectableLayer* layer,
+ const PadDescriptor& padDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitStridedSliceLayer(const IConnectableLayer* layer,
- const StridedSliceDescriptor& stridedSliceDescriptor,
- const char* name = nullptr) {}
+ void VisitStridedSliceLayer(const IConnectableLayer* layer,
+ const StridedSliceDescriptor& stridedSliceDescriptor,
+ const char* name = nullptr) override {}
- virtual void VisitMinimumLayer(const IConnectableLayer* layer,
- const char* name = nullptr) {}
+ void VisitMinimumLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override {}
- virtual void VisitGreaterLayer(const IConnectableLayer* layer,
- const char* name = nullptr) {}
+ void VisitGreaterLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override {}
- virtual void VisitEqualLayer(const IConnectableLayer* layer,
- const char* name = nullptr) {}
+ void VisitEqualLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override {}
- virtual void VisitRsqrtLayer(const IConnectableLayer* layer,
- const char* name = nullptr) {}
+ void VisitRsqrtLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override {}
- virtual void VisitGatherLayer(const IConnectableLayer* layer,
- const char* name = nullptr) {}
+ void VisitGatherLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override {}
};
} //namespace armnn