aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKeith Davis <keith.davis@arm.com>2020-10-23 17:20:05 +0100
committerKeith Davis <keith.davis@arm.com>2020-11-09 10:54:30 +0000
commitdf04d23a6608fa3d5d1c1ffae4abc43582034d22 (patch)
tree3bb1caa7cb73937f31b77156824872ad78255363
parent90231b8c9f680d323e4b93dcd0820a47925e6d24 (diff)
downloadarmnn-df04d23a6608fa3d5d1c1ffae4abc43582034d22.tar.gz
IVGCVSW-5327 Add to Layer a binary blob to host the activation layer info
Signed-off-by: Keith Davis <keith.davis@arm.com> Change-Id: I0a07dea96a86849701ba387dbea148909a6d729b
-rw-r--r--src/armnn/Layer.cpp5
-rw-r--r--src/armnn/Layer.hpp19
-rw-r--r--src/armnn/layers/AbsLayer.cpp2
-rw-r--r--src/armnn/layers/ActivationLayer.cpp2
-rw-r--r--src/armnn/layers/AdditionLayer.cpp2
-rw-r--r--src/armnn/layers/ArgMinMaxLayer.cpp2
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.cpp1
-rw-r--r--src/armnn/layers/BatchToSpaceNdLayer.cpp1
-rw-r--r--src/armnn/layers/ComparisonLayer.cpp2
-rw-r--r--src/armnn/layers/ConcatLayer.cpp1
-rw-r--r--src/armnn/layers/ConstantLayer.cpp2
-rw-r--r--src/armnn/layers/ConvertBf16ToFp32Layer.cpp2
-rw-r--r--src/armnn/layers/ConvertFp16ToFp32Layer.cpp2
-rw-r--r--src/armnn/layers/ConvertFp32ToBf16Layer.cpp2
-rw-r--r--src/armnn/layers/ConvertFp32ToFp16Layer.cpp2
-rw-r--r--src/armnn/layers/Convolution2dLayer.cpp3
-rw-r--r--src/armnn/layers/DebugLayer.cpp2
-rw-r--r--src/armnn/layers/DepthToSpaceLayer.cpp2
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.cpp3
-rw-r--r--src/armnn/layers/DequantizeLayer.cpp1
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.cpp2
-rw-r--r--src/armnn/layers/DivisionLayer.cpp2
-rw-r--r--src/armnn/layers/ElementwiseUnaryLayer.cpp1
-rw-r--r--src/armnn/layers/FakeQuantizationLayer.cpp2
-rw-r--r--src/armnn/layers/FillLayer.cpp2
-rw-r--r--src/armnn/layers/FloorLayer.cpp2
-rw-r--r--src/armnn/layers/FullyConnectedLayer.cpp4
-rw-r--r--src/armnn/layers/GatherLayer.cpp2
-rw-r--r--src/armnn/layers/InstanceNormalizationLayer.cpp2
-rw-r--r--src/armnn/layers/L2NormalizationLayer.cpp2
-rw-r--r--src/armnn/layers/LogSoftmaxLayer.cpp2
-rw-r--r--src/armnn/layers/LstmLayer.cpp2
-rw-r--r--src/armnn/layers/MapLayer.cpp1
-rw-r--r--src/armnn/layers/MaximumLayer.cpp2
-rw-r--r--src/armnn/layers/MeanLayer.cpp1
-rw-r--r--src/armnn/layers/MemCopyLayer.cpp1
-rw-r--r--src/armnn/layers/MemImportLayer.cpp1
-rw-r--r--src/armnn/layers/MinimumLayer.cpp2
-rw-r--r--src/armnn/layers/MultiplicationLayer.cpp2
-rw-r--r--src/armnn/layers/NormalizationLayer.cpp2
-rw-r--r--src/armnn/layers/PadLayer.cpp1
-rw-r--r--src/armnn/layers/PermuteLayer.cpp2
-rw-r--r--src/armnn/layers/Pooling2dLayer.cpp2
-rw-r--r--src/armnn/layers/PreCompiledLayer.cpp2
-rw-r--r--src/armnn/layers/PreluLayer.cpp1
-rw-r--r--src/armnn/layers/QLstmLayer.cpp2
-rw-r--r--src/armnn/layers/QuantizeLayer.cpp3
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.cpp2
-rw-r--r--src/armnn/layers/RankLayer.cpp2
-rw-r--r--src/armnn/layers/ReshapeLayer.cpp2
-rw-r--r--src/armnn/layers/ResizeLayer.cpp2
-rw-r--r--src/armnn/layers/RsqrtLayer.cpp2
-rw-r--r--src/armnn/layers/SliceLayer.cpp2
-rw-r--r--src/armnn/layers/SoftmaxLayer.cpp2
-rw-r--r--src/armnn/layers/SpaceToBatchNdLayer.cpp5
-rw-r--r--src/armnn/layers/SpaceToDepthLayer.cpp2
-rw-r--r--src/armnn/layers/SplitterLayer.cpp2
-rw-r--r--src/armnn/layers/StackLayer.cpp2
-rw-r--r--src/armnn/layers/StridedSliceLayer.cpp2
-rw-r--r--src/armnn/layers/SubtractionLayer.cpp2
-rw-r--r--src/armnn/layers/SwitchLayer.cpp2
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.cpp2
-rw-r--r--src/armnn/layers/TransposeLayer.cpp2
-rw-r--r--src/armnn/layers/UnmapLayer.cpp1
-rw-r--r--src/armnn/test/CreateWorkload.hpp408
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp10
-rw-r--r--src/backends/reference/test/RefCreateWorkloadTests.cpp101
67 files changed, 659 insertions, 4 deletions
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index d06b0459f6..62f861b4a0 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -242,6 +242,11 @@ void Layer::CollectWorkloadOutputs(WorkloadDataCollector& dataCollector) const
}
}
+void Layer::SetAdditionalInfo(QueueDescriptor& descriptor) const
+{
+ descriptor.m_AdditionalInfoObject = m_AdditionalInfoObject.get();
+}
+
void Layer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
const IWorkloadFactory& workloadFactory,
const bool IsMemoryManaged)
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index fff57732aa..6cb3b8f5bd 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -28,6 +28,7 @@
#include <memory>
#include <string>
#include <vector>
+#include <backendsCommon/WorkloadData.hpp>
namespace armnn
{
@@ -204,6 +205,7 @@ class ScopedCpuTensorHandle;
// Base layer class
using LayerPriority = unsigned int;
+using AdditionalInfoObjectPtr = std::shared_ptr<void>;
class Layer : public IConnectableLayer
{
@@ -333,6 +335,17 @@ public:
m_ShapeInferenceMethod = shapeInferenceMethod;
}
+ template<typename T>
+ std::shared_ptr<T> GetAdditionalInformation()
+ {
+ return std::static_pointer_cast<T>(m_AdditionalInfoObject);
+ }
+
+ void SetAdditionalInfoForObject(const AdditionalInfoObjectPtr& additionalInfo)
+ {
+ m_AdditionalInfoObject = additionalInfo;
+ }
+
protected:
// Graph needs access to the virtual destructor.
friend class Graph;
@@ -377,6 +390,12 @@ protected:
using ConstantTensors = std::vector<std::reference_wrapper<std::unique_ptr<ScopedCpuTensorHandle>>>;
virtual ConstantTensors GetConstantTensorsByRef() {return ConstantTensors(); };
+ // "Blob"
+ AdditionalInfoObjectPtr m_AdditionalInfoObject;
+
+ // Utility method to set a pointer in the queueDescriptor to the "blob" location in the layer
+ void SetAdditionalInfo(QueueDescriptor& descriptor) const;
+
private:
void CollectWorkloadInputs(WorkloadDataCollector& dataCollector) const;
void CollectWorkloadOutputs(WorkloadDataCollector& dataCollector) const;
diff --git a/src/armnn/layers/AbsLayer.cpp b/src/armnn/layers/AbsLayer.cpp
index e04fcbba1a..7aa4099641 100644
--- a/src/armnn/layers/AbsLayer.cpp
+++ b/src/armnn/layers/AbsLayer.cpp
@@ -22,6 +22,8 @@ AbsLayer::AbsLayer(const char* name)
std::unique_ptr<IWorkload> AbsLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
AbsQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateAbs(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/ActivationLayer.cpp b/src/armnn/layers/ActivationLayer.cpp
index d3d02c3c19..7bfa28ef73 100644
--- a/src/armnn/layers/ActivationLayer.cpp
+++ b/src/armnn/layers/ActivationLayer.cpp
@@ -20,6 +20,8 @@ ActivationLayer::ActivationLayer(const ActivationDescriptor& param, const char*
std::unique_ptr<IWorkload> ActivationLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
ActivationQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateActivation(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/AdditionLayer.cpp b/src/armnn/layers/AdditionLayer.cpp
index b27f450f73..8b1f2a8dff 100644
--- a/src/armnn/layers/AdditionLayer.cpp
+++ b/src/armnn/layers/AdditionLayer.cpp
@@ -22,6 +22,8 @@ AdditionLayer::AdditionLayer(const char* name)
std::unique_ptr<IWorkload> AdditionLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
AdditionQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateAddition(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp
index bd914ec245..219f34682c 100644
--- a/src/armnn/layers/ArgMinMaxLayer.cpp
+++ b/src/armnn/layers/ArgMinMaxLayer.cpp
@@ -24,6 +24,8 @@ ArgMinMaxLayer::ArgMinMaxLayer(const ArgMinMaxDescriptor& param, const char* nam
std::unique_ptr<IWorkload> ArgMinMaxLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
ArgMinMaxQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateArgMinMax(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp
index 625e0d472d..ce351a4376 100644
--- a/src/armnn/layers/BatchNormalizationLayer.cpp
+++ b/src/armnn/layers/BatchNormalizationLayer.cpp
@@ -27,6 +27,7 @@ std::unique_ptr<IWorkload> BatchNormalizationLayer::CreateWorkload(const IWorklo
ARMNN_ASSERT_MSG(m_Gamma != nullptr, "BatchNormalizationLayer: Gamma data should not be null.");
BatchNormalizationQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
descriptor.m_Mean = m_Mean.get();
descriptor.m_Variance = m_Variance.get();
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp
index 1a5cfa6647..a13b0b731a 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.cpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp
@@ -31,6 +31,7 @@ BatchToSpaceNdLayer::BatchToSpaceNdLayer(const armnn::BatchToSpaceNdDescriptor&
std::unique_ptr<IWorkload> BatchToSpaceNdLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
BatchToSpaceNdQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
return factory.CreateBatchToSpaceNd(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/ComparisonLayer.cpp b/src/armnn/layers/ComparisonLayer.cpp
index a9639e8285..399834d72d 100644
--- a/src/armnn/layers/ComparisonLayer.cpp
+++ b/src/armnn/layers/ComparisonLayer.cpp
@@ -23,6 +23,8 @@ ComparisonLayer::ComparisonLayer(const ComparisonDescriptor& param, const char*
std::unique_ptr<IWorkload> ComparisonLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
ComparisonQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateComparison(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index affbe7148e..238fdb66d9 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -31,6 +31,7 @@ std::unique_ptr<IWorkload> ConcatLayer::CreateWorkload(const IWorkloadFactory& f
descriptor.m_ViewOrigins.emplace_back(
std::vector<unsigned int>(m_Param.GetViewOrigin(i), m_Param.GetViewOrigin(i) + m_Param.GetNumDimensions()));
}
+ SetAdditionalInfo(descriptor);
return factory.CreateConcat(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/ConstantLayer.cpp b/src/armnn/layers/ConstantLayer.cpp
index cd8a056fb3..76b9997cfe 100644
--- a/src/armnn/layers/ConstantLayer.cpp
+++ b/src/armnn/layers/ConstantLayer.cpp
@@ -22,6 +22,8 @@ std::unique_ptr<IWorkload> ConstantLayer::CreateWorkload(const IWorkloadFactory&
{
ConstantQueueDescriptor descriptor;
descriptor.m_LayerOutput = m_LayerOutput.get();
+ SetAdditionalInfo(descriptor);
+
return factory.CreateConstant(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
index 81bb4d9f1b..3577723a38 100644
--- a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
@@ -22,6 +22,8 @@ ConvertBf16ToFp32Layer::ConvertBf16ToFp32Layer(const char* name)
std::unique_ptr<IWorkload> ConvertBf16ToFp32Layer::CreateWorkload(const IWorkloadFactory& factory) const
{
ConvertBf16ToFp32QueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateConvertBf16ToFp32(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
index 709ca137f4..3b6f72c440 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
@@ -22,6 +22,8 @@ ConvertFp16ToFp32Layer::ConvertFp16ToFp32Layer(const char* name)
std::unique_ptr<IWorkload> ConvertFp16ToFp32Layer::CreateWorkload(const IWorkloadFactory& factory) const
{
ConvertFp16ToFp32QueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateConvertFp16ToFp32(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
index 9b02b2f64b..f909769b9d 100644
--- a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
@@ -22,6 +22,8 @@ ConvertFp32ToBf16Layer::ConvertFp32ToBf16Layer(const char* name)
std::unique_ptr<IWorkload> ConvertFp32ToBf16Layer::CreateWorkload(const IWorkloadFactory& factory) const
{
ConvertFp32ToBf16QueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateConvertFp32ToBf16(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
index 7b2df00e0b..3e6f055a4a 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
@@ -21,6 +21,8 @@ ConvertFp32ToFp16Layer::ConvertFp32ToFp16Layer(const char* name)
std::unique_ptr<IWorkload> ConvertFp32ToFp16Layer::CreateWorkload(const IWorkloadFactory& factory) const
{
ConvertFp32ToFp16QueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateConvertFp32ToFp16(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index 5fff982ca1..26f11f3d0c 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -60,6 +60,9 @@ std::unique_ptr<IWorkload> Convolution2dLayer::CreateWorkload(const IWorkloadFac
ARMNN_ASSERT_MSG(m_Bias != nullptr, "Convolution2dLayer: Bias data should not be null.");
descriptor.m_Bias = m_Bias.get();
}
+
+ SetAdditionalInfo(descriptor);
+
return factory.CreateConvolution2d(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp
index c29421fc08..ade09ed3d4 100644
--- a/src/armnn/layers/DebugLayer.cpp
+++ b/src/armnn/layers/DebugLayer.cpp
@@ -26,6 +26,8 @@ std::unique_ptr<IWorkload> DebugLayer::CreateWorkload(const IWorkloadFactory& fa
descriptor.m_LayerName = prevLayer.GetNameStr();
descriptor.m_SlotIndex = GetInputSlot(0).GetConnectedOutputSlot()->CalculateIndexOnOwner();
+ SetAdditionalInfo(descriptor);
+
return factory.CreateDebug(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/DepthToSpaceLayer.cpp b/src/armnn/layers/DepthToSpaceLayer.cpp
index dae557ea7b..dfa575b7a3 100644
--- a/src/armnn/layers/DepthToSpaceLayer.cpp
+++ b/src/armnn/layers/DepthToSpaceLayer.cpp
@@ -28,6 +28,8 @@ std::unique_ptr<IWorkload> DepthToSpaceLayer::CreateWorkload(const IWorkloadFact
descriptor.m_Parameters.m_BlockSize = m_Param.m_BlockSize;
descriptor.m_Parameters.m_DataLayout = m_Param.m_DataLayout;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateDepthToSpace(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index 8a7cf23bb7..139d268631 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -62,6 +62,9 @@ std::unique_ptr<IWorkload> DepthwiseConvolution2dLayer::CreateWorkload(const IWo
ARMNN_ASSERT_MSG(m_Bias != nullptr, "DepthwiseConvolution2dLayer: Bias data should not be null.");
descriptor.m_Bias = m_Bias.get();
}
+
+ SetAdditionalInfo(descriptor);
+
return factory.CreateDepthwiseConvolution2d(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/DequantizeLayer.cpp b/src/armnn/layers/DequantizeLayer.cpp
index f79888260a..cbe9ae17b5 100644
--- a/src/armnn/layers/DequantizeLayer.cpp
+++ b/src/armnn/layers/DequantizeLayer.cpp
@@ -20,6 +20,7 @@ std::unique_ptr<IWorkload> DequantizeLayer::CreateWorkload(
const IWorkloadFactory& factory) const
{
DequantizeQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
return factory.CreateDequantize(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp
index b18781b1c0..d54bf26c40 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.cpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.cpp
@@ -24,6 +24,8 @@ std::unique_ptr<IWorkload> DetectionPostProcessLayer::CreateWorkload(const armnn
{
DetectionPostProcessQueueDescriptor descriptor;
descriptor.m_Anchors = m_Anchors.get();
+ SetAdditionalInfo(descriptor);
+
return factory.CreateDetectionPostProcess(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/DivisionLayer.cpp b/src/armnn/layers/DivisionLayer.cpp
index 193b96b6ee..5b032ce998 100644
--- a/src/armnn/layers/DivisionLayer.cpp
+++ b/src/armnn/layers/DivisionLayer.cpp
@@ -22,6 +22,8 @@ DivisionLayer::DivisionLayer(const char* name)
std::unique_ptr<IWorkload> DivisionLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
DivisionQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateDivision(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.cpp b/src/armnn/layers/ElementwiseUnaryLayer.cpp
index cf4c2fc36b..74fa16e15f 100644
--- a/src/armnn/layers/ElementwiseUnaryLayer.cpp
+++ b/src/armnn/layers/ElementwiseUnaryLayer.cpp
@@ -23,6 +23,7 @@ ElementwiseUnaryLayer::ElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& p
std::unique_ptr<IWorkload> ElementwiseUnaryLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
ElementwiseUnaryQueueDescriptor descriptor;
+
return factory.CreateElementwiseUnary(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
index ab41324061..a316b2b82a 100644
--- a/src/armnn/layers/FakeQuantizationLayer.cpp
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -21,6 +21,8 @@ FakeQuantizationLayer::FakeQuantizationLayer(const FakeQuantizationDescriptor& p
std::unique_ptr<IWorkload> FakeQuantizationLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
FakeQuantizationQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateFakeQuantization(descriptor, PrepInfoAndDesc(descriptor) );
}
diff --git a/src/armnn/layers/FillLayer.cpp b/src/armnn/layers/FillLayer.cpp
index 329a30a5bc..41471c3412 100644
--- a/src/armnn/layers/FillLayer.cpp
+++ b/src/armnn/layers/FillLayer.cpp
@@ -21,6 +21,8 @@ FillLayer::FillLayer(const FillDescriptor& param, const char* name)
std::unique_ptr<IWorkload> FillLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
FillQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateFill(descriptor, PrepInfoAndDesc(descriptor) );
}
diff --git a/src/armnn/layers/FloorLayer.cpp b/src/armnn/layers/FloorLayer.cpp
index 5ff9a9a1c5..e03bdb16ff 100644
--- a/src/armnn/layers/FloorLayer.cpp
+++ b/src/armnn/layers/FloorLayer.cpp
@@ -21,6 +21,8 @@ FloorLayer::FloorLayer(const char* name)
std::unique_ptr<IWorkload> FloorLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
FloorQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateFloor(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index f10beda72b..0dc138b761 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -26,12 +26,16 @@ std::unique_ptr<IWorkload> FullyConnectedLayer::CreateWorkload(const IWorkloadFa
FullyConnectedQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
descriptor.m_Weight = m_Weight.get();
if (m_Param.m_BiasEnabled)
{
ARMNN_ASSERT_MSG(m_Bias != nullptr, "FullyConnectedLayer: Bias data should not be null.");
descriptor.m_Bias = m_Bias.get();
}
+
+ SetAdditionalInfo(descriptor);
+
return factory.CreateFullyConnected(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/GatherLayer.cpp b/src/armnn/layers/GatherLayer.cpp
index e5d4a18967..9a4f9bf8f0 100644
--- a/src/armnn/layers/GatherLayer.cpp
+++ b/src/armnn/layers/GatherLayer.cpp
@@ -21,6 +21,8 @@ GatherLayer::GatherLayer(const GatherDescriptor& param, const char* name)
std::unique_ptr<IWorkload> GatherLayer::CreateWorkload(const armnn::IWorkloadFactory& factory) const
{
GatherQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateGather(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/InstanceNormalizationLayer.cpp b/src/armnn/layers/InstanceNormalizationLayer.cpp
index eb6fe90767..87c6877df8 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.cpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.cpp
@@ -21,6 +21,8 @@ InstanceNormalizationLayer::InstanceNormalizationLayer(const InstanceNormalizati
std::unique_ptr<IWorkload> InstanceNormalizationLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
InstanceNormalizationQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateInstanceNormalization(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/L2NormalizationLayer.cpp b/src/armnn/layers/L2NormalizationLayer.cpp
index ab2b094acf..c96e708075 100644
--- a/src/armnn/layers/L2NormalizationLayer.cpp
+++ b/src/armnn/layers/L2NormalizationLayer.cpp
@@ -21,6 +21,8 @@ L2NormalizationLayer::L2NormalizationLayer(const L2NormalizationDescriptor& para
std::unique_ptr<IWorkload> L2NormalizationLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
L2NormalizationQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateL2Normalization(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/LogSoftmaxLayer.cpp b/src/armnn/layers/LogSoftmaxLayer.cpp
index 1620acb166..24e79ce8ae 100644
--- a/src/armnn/layers/LogSoftmaxLayer.cpp
+++ b/src/armnn/layers/LogSoftmaxLayer.cpp
@@ -21,6 +21,8 @@ LogSoftmaxLayer::LogSoftmaxLayer(const LogSoftmaxDescriptor &param, const char*
std::unique_ptr<IWorkload> LogSoftmaxLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
LogSoftmaxQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateLogSoftmax(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index 724bd6b780..8e396ab70c 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -72,6 +72,8 @@ std::unique_ptr<IWorkload> LstmLayer::CreateWorkload(const IWorkloadFactory& fac
descriptor.m_OutputLayerNormWeights = m_LayerNormParameters.m_OutputLayerNormWeights.get();
}
+ SetAdditionalInfo(descriptor);
+
return factory.CreateLstm(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/MapLayer.cpp b/src/armnn/layers/MapLayer.cpp
index bc6cbf19e5..608a71eba6 100644
--- a/src/armnn/layers/MapLayer.cpp
+++ b/src/armnn/layers/MapLayer.cpp
@@ -28,6 +28,7 @@ std::unique_ptr<IWorkload> MapLayer::CreateWorkload(const IWorkloadFactory& fact
{
IgnoreUnused(factory);
MapQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
//This is different from other workloads. Does not get created by the workload factory.
return std::make_unique<MapWorkload>(descriptor, PrepInfoAndDesc(descriptor));
diff --git a/src/armnn/layers/MaximumLayer.cpp b/src/armnn/layers/MaximumLayer.cpp
index ab7bf88f5f..d57e9e63ab 100644
--- a/src/armnn/layers/MaximumLayer.cpp
+++ b/src/armnn/layers/MaximumLayer.cpp
@@ -21,6 +21,8 @@ MaximumLayer::MaximumLayer(const char* name)
std::unique_ptr<IWorkload> MaximumLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
MaximumQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateMaximum(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp
index 0c5959ca0d..b5c7708fc3 100644
--- a/src/armnn/layers/MeanLayer.cpp
+++ b/src/armnn/layers/MeanLayer.cpp
@@ -26,6 +26,7 @@ std::unique_ptr<IWorkload> MeanLayer::CreateWorkload(const armnn::IWorkloadFacto
MeanQueueDescriptor descriptor;
descriptor.m_Parameters.m_Axis = m_Param.m_Axis;
descriptor.m_Parameters.m_KeepDims = m_Param.m_KeepDims;
+ SetAdditionalInfo(descriptor);
return factory.CreateMean(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp
index 854b4f669d..d9a802c23c 100644
--- a/src/armnn/layers/MemCopyLayer.cpp
+++ b/src/armnn/layers/MemCopyLayer.cpp
@@ -28,6 +28,7 @@ std::unique_ptr<IWorkload> MemCopyLayer::CreateWorkload(const IWorkloadFactory&
{
IgnoreUnused(factory);
MemCopyQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
//This is different from other workloads. Does not get created by the workload factory.
return std::make_unique<CopyMemGenericWorkload>(descriptor, PrepInfoAndDesc(descriptor));
diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp
index d9148fb579..3d1c702946 100644
--- a/src/armnn/layers/MemImportLayer.cpp
+++ b/src/armnn/layers/MemImportLayer.cpp
@@ -28,6 +28,7 @@ std::unique_ptr<IWorkload> MemImportLayer::CreateWorkload(const IWorkloadFactory
{
IgnoreUnused(factory);
MemImportQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
//This is different from other workloads. Does not get created by the workload factory.
return std::make_unique<ImportMemGenericWorkload>(descriptor, PrepInfoAndDesc(descriptor));
diff --git a/src/armnn/layers/MinimumLayer.cpp b/src/armnn/layers/MinimumLayer.cpp
index 9154d788d5..f60815ed6b 100644
--- a/src/armnn/layers/MinimumLayer.cpp
+++ b/src/armnn/layers/MinimumLayer.cpp
@@ -22,6 +22,8 @@ MinimumLayer::MinimumLayer(const char* name)
std::unique_ptr<IWorkload> MinimumLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
MinimumQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateMinimum(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/MultiplicationLayer.cpp b/src/armnn/layers/MultiplicationLayer.cpp
index f02ee57a2c..8fc13aca76 100644
--- a/src/armnn/layers/MultiplicationLayer.cpp
+++ b/src/armnn/layers/MultiplicationLayer.cpp
@@ -22,6 +22,8 @@ MultiplicationLayer::MultiplicationLayer(const char* name)
std::unique_ptr<IWorkload> MultiplicationLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
MultiplicationQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateMultiplication(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/NormalizationLayer.cpp b/src/armnn/layers/NormalizationLayer.cpp
index b75bb338cc..4bf97edb72 100644
--- a/src/armnn/layers/NormalizationLayer.cpp
+++ b/src/armnn/layers/NormalizationLayer.cpp
@@ -21,6 +21,8 @@ NormalizationLayer::NormalizationLayer(const NormalizationDescriptor& param, con
std::unique_ptr<IWorkload> NormalizationLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
NormalizationQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateNormalization(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/PadLayer.cpp b/src/armnn/layers/PadLayer.cpp
index a8c749c570..324dd4a2e7 100644
--- a/src/armnn/layers/PadLayer.cpp
+++ b/src/armnn/layers/PadLayer.cpp
@@ -23,6 +23,7 @@ std::unique_ptr<IWorkload> PadLayer::CreateWorkload(const armnn::IWorkloadFactor
{
PadQueueDescriptor descriptor;
descriptor.m_Parameters.m_PadList = m_Param.m_PadList;
+ SetAdditionalInfo(descriptor);
return factory.CreatePad(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/PermuteLayer.cpp b/src/armnn/layers/PermuteLayer.cpp
index 3c4d1ee096..859e687cb3 100644
--- a/src/armnn/layers/PermuteLayer.cpp
+++ b/src/armnn/layers/PermuteLayer.cpp
@@ -25,6 +25,8 @@ PermuteLayer::PermuteLayer(const PermuteDescriptor& param, const char* name)
std::unique_ptr<IWorkload> PermuteLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
PermuteQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreatePermute(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/Pooling2dLayer.cpp b/src/armnn/layers/Pooling2dLayer.cpp
index 5411695492..dbeee840e8 100644
--- a/src/armnn/layers/Pooling2dLayer.cpp
+++ b/src/armnn/layers/Pooling2dLayer.cpp
@@ -27,6 +27,8 @@ Pooling2dLayer::Pooling2dLayer(const Pooling2dDescriptor& param, const char* nam
std::unique_ptr<IWorkload> Pooling2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
Pooling2dQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreatePooling2d(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp
index afc9877928..dbbc1fd716 100644
--- a/src/armnn/layers/PreCompiledLayer.cpp
+++ b/src/armnn/layers/PreCompiledLayer.cpp
@@ -32,6 +32,8 @@ std::unique_ptr<IWorkload> PreCompiledLayer::CreateWorkload(const armnn::IWorklo
{
PreCompiledQueueDescriptor descriptor;
descriptor.m_PreCompiledObject = m_PreCompiledObject.get();
+ SetAdditionalInfo(descriptor);
+
return factory.CreatePreCompiled(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp
index 365dd4fadc..f9f534e648 100644
--- a/src/armnn/layers/PreluLayer.cpp
+++ b/src/armnn/layers/PreluLayer.cpp
@@ -23,6 +23,7 @@ PreluLayer::PreluLayer(const char* name)
std::unique_ptr<IWorkload> PreluLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
PreluQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
return factory.CreatePrelu(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp
index 4d0d57cc49..85f99bddf9 100644
--- a/src/armnn/layers/QLstmLayer.cpp
+++ b/src/armnn/layers/QLstmLayer.cpp
@@ -73,6 +73,8 @@ std::unique_ptr<IWorkload> QLstmLayer::CreateWorkload(const IWorkloadFactory& fa
descriptor.m_OutputLayerNormWeights = m_LayerNormParameters.m_OutputLayerNormWeights.get();
}
+ SetAdditionalInfo(descriptor);
+
return factory.CreateQLstm(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/QuantizeLayer.cpp b/src/armnn/layers/QuantizeLayer.cpp
index aad6dd87bf..6ce28c4153 100644
--- a/src/armnn/layers/QuantizeLayer.cpp
+++ b/src/armnn/layers/QuantizeLayer.cpp
@@ -19,7 +19,10 @@ QuantizeLayer::QuantizeLayer(const char* name)
std::unique_ptr<IWorkload> QuantizeLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
QuantizeQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
WorkloadInfo info = PrepInfoAndDesc(descriptor);
+
return factory.CreateQuantize(descriptor, info);
}
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index ad227618a9..624e443064 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -39,6 +39,8 @@ std::unique_ptr<IWorkload> QuantizedLstmLayer::CreateWorkload(const IWorkloadFac
descriptor.m_CellBias = m_QuantizedLstmParameters.m_CellBias.get();
descriptor.m_OutputGateBias = m_QuantizedLstmParameters.m_OutputGateBias.get();
+ SetAdditionalInfo(descriptor);
+
return factory.CreateQuantizedLstm(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/RankLayer.cpp b/src/armnn/layers/RankLayer.cpp
index 2e70134126..2b0dffe370 100644
--- a/src/armnn/layers/RankLayer.cpp
+++ b/src/armnn/layers/RankLayer.cpp
@@ -20,6 +20,8 @@ RankLayer::RankLayer(const char* name)
std::unique_ptr<IWorkload> RankLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
RankQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateRank(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp
index 526531604b..f303ff7c68 100644
--- a/src/armnn/layers/ReshapeLayer.cpp
+++ b/src/armnn/layers/ReshapeLayer.cpp
@@ -22,6 +22,8 @@ ReshapeLayer::ReshapeLayer(const ReshapeDescriptor& param, const char* name)
std::unique_ptr<IWorkload> ReshapeLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
ReshapeQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateReshape(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/ResizeLayer.cpp b/src/armnn/layers/ResizeLayer.cpp
index 53af5f9524..3a390d43cd 100644
--- a/src/armnn/layers/ResizeLayer.cpp
+++ b/src/armnn/layers/ResizeLayer.cpp
@@ -26,6 +26,8 @@ ResizeLayer::ResizeLayer(const ResizeDescriptor& param, const char* name)
std::unique_ptr<IWorkload> ResizeLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
ResizeQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateResize(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/RsqrtLayer.cpp b/src/armnn/layers/RsqrtLayer.cpp
index e85d865675..9c09701ab8 100644
--- a/src/armnn/layers/RsqrtLayer.cpp
+++ b/src/armnn/layers/RsqrtLayer.cpp
@@ -22,6 +22,8 @@ RsqrtLayer::RsqrtLayer(const char* name)
std::unique_ptr<IWorkload> RsqrtLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
RsqrtQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateRsqrt(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp
index bfa16e5f9e..b512ca4915 100644
--- a/src/armnn/layers/SliceLayer.cpp
+++ b/src/armnn/layers/SliceLayer.cpp
@@ -24,6 +24,8 @@ SliceLayer::SliceLayer(const SliceDescriptor& param, const char* name)
std::unique_ptr<IWorkload> SliceLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
SliceQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateSlice(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/SoftmaxLayer.cpp b/src/armnn/layers/SoftmaxLayer.cpp
index 32d3a1117c..9882da42b0 100644
--- a/src/armnn/layers/SoftmaxLayer.cpp
+++ b/src/armnn/layers/SoftmaxLayer.cpp
@@ -21,6 +21,8 @@ SoftmaxLayer::SoftmaxLayer(const SoftmaxDescriptor &param, const char* name)
std::unique_ptr<IWorkload> SoftmaxLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
SoftmaxQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateSoftmax(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp
index decb6e61f0..b9e33314ef 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.cpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp
@@ -26,9 +26,10 @@ SpaceToBatchNdLayer::SpaceToBatchNdLayer(const SpaceToBatchNdDescriptor param, c
std::unique_ptr<IWorkload> SpaceToBatchNdLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- SpaceToBatchNdQueueDescriptor descriptor;
+ SpaceToBatchNdQueueDescriptor descriptor;
descriptor.m_Parameters.m_BlockShape = m_Param.m_BlockShape;
- descriptor.m_Parameters.m_PadList = m_Param.m_PadList;
+ descriptor.m_Parameters.m_PadList = m_Param.m_PadList;
+ SetAdditionalInfo(descriptor);
return factory.CreateSpaceToBatchNd(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp
index 72d82308d7..90ba8fc8c3 100644
--- a/src/armnn/layers/SpaceToDepthLayer.cpp
+++ b/src/armnn/layers/SpaceToDepthLayer.cpp
@@ -30,6 +30,8 @@ std::unique_ptr<IWorkload> SpaceToDepthLayer::CreateWorkload(const IWorkloadFact
descriptor.m_Parameters.m_BlockSize = m_Param.m_BlockSize;
descriptor.m_Parameters.m_DataLayout = m_Param.m_DataLayout;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateSpaceToDepth(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index e5c9903e2f..5e6622e13a 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -29,6 +29,8 @@ std::unique_ptr<IWorkload> SplitterLayer::CreateWorkload(const IWorkloadFactory&
std::vector<unsigned int>(m_Param.GetViewOrigin(i), m_Param.GetViewOrigin(i) + m_Param.GetNumDimensions()));
}
+ SetAdditionalInfo(descriptor);
+
return factory.CreateSplitter(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp
index 715057615d..11935a1acf 100644
--- a/src/armnn/layers/StackLayer.cpp
+++ b/src/armnn/layers/StackLayer.cpp
@@ -22,6 +22,8 @@ StackLayer::StackLayer(const StackDescriptor& param, const char* name)
std::unique_ptr<IWorkload> StackLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
StackQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateStack(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/StridedSliceLayer.cpp b/src/armnn/layers/StridedSliceLayer.cpp
index 957f5858b6..c8f36355ae 100644
--- a/src/armnn/layers/StridedSliceLayer.cpp
+++ b/src/armnn/layers/StridedSliceLayer.cpp
@@ -34,6 +34,8 @@ std::unique_ptr<IWorkload> StridedSliceLayer::CreateWorkload(const IWorkloadFact
descriptor.m_Parameters.m_NewAxisMask = m_Param.m_NewAxisMask;
descriptor.m_Parameters.m_ShrinkAxisMask = m_Param.m_ShrinkAxisMask;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateStridedSlice(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/SubtractionLayer.cpp b/src/armnn/layers/SubtractionLayer.cpp
index 82c96428a5..34087bd466 100644
--- a/src/armnn/layers/SubtractionLayer.cpp
+++ b/src/armnn/layers/SubtractionLayer.cpp
@@ -22,6 +22,8 @@ SubtractionLayer::SubtractionLayer(const char* name)
std::unique_ptr<IWorkload> SubtractionLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
SubtractionQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateSubtraction(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/SwitchLayer.cpp b/src/armnn/layers/SwitchLayer.cpp
index d905f5248a..879263955f 100644
--- a/src/armnn/layers/SwitchLayer.cpp
+++ b/src/armnn/layers/SwitchLayer.cpp
@@ -19,6 +19,8 @@ SwitchLayer::SwitchLayer(const char* name)
std::unique_ptr<IWorkload> SwitchLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
SwitchQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateSwitch(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index 92873899b7..1591213d9d 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -35,6 +35,8 @@ std::unique_ptr<IWorkload> TransposeConvolution2dLayer::CreateWorkload(const IWo
descriptor.m_Bias = m_Bias.get();
}
+ SetAdditionalInfo(descriptor);
+
return factory.CreateTransposeConvolution2d(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/TransposeLayer.cpp b/src/armnn/layers/TransposeLayer.cpp
index 61e6863304..8951fe4637 100644
--- a/src/armnn/layers/TransposeLayer.cpp
+++ b/src/armnn/layers/TransposeLayer.cpp
@@ -25,6 +25,8 @@ TransposeLayer::TransposeLayer(const TransposeDescriptor& param, const char* nam
std::unique_ptr<IWorkload> TransposeLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
TransposeQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
return factory.CreateTranspose(descriptor, PrepInfoAndDesc(descriptor));
}
diff --git a/src/armnn/layers/UnmapLayer.cpp b/src/armnn/layers/UnmapLayer.cpp
index d2df9c1bc6..4a43f9ff21 100644
--- a/src/armnn/layers/UnmapLayer.cpp
+++ b/src/armnn/layers/UnmapLayer.cpp
@@ -28,6 +28,7 @@ std::unique_ptr<IWorkload> UnmapLayer::CreateWorkload(const IWorkloadFactory& fa
{
IgnoreUnused(factory);
UnmapQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
//This is different from other workloads. Does not get created by the workload factory.
return std::make_unique<UnmapWorkload>(descriptor, PrepInfoAndDesc(descriptor));
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 60beb51c32..c07bf6a5bc 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -133,7 +133,177 @@ std::unique_ptr<WorkloadType> CreateElementwiseWorkloadTest(armnn::IWorkloadFact
return workload;
}
-template <typename WorkloadType,
+template<typename WorkloadType,
+ typename DescriptorType,
+ armnn::DataType DataType>
+std::unique_ptr<WorkloadType> CreateSubtractionWithBlobWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph)
+{
+ // Creates the layer we're testing.
+ SubtractionLayer* const layer = graph.AddLayer<SubtractionLayer>("layer");
+
+ auto activationDesc = std::make_shared<ActivationDescriptor>();
+ activationDesc->m_A = 10.0f;
+ activationDesc->m_B = 5.0f;
+ activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
+
+ layer->SetAdditionalInfoForObject(activationDesc);
+
+ // Creates extra layers.
+ Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
+ Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ armnn::TensorInfo tensorInfo({2, 3}, DataType);
+ Connect(input1, layer, tensorInfo, 0, 0);
+ Connect(input2, layer, tensorInfo, 0, 1);
+ Connect(layer, output, tensorInfo);
+ CreateTensorHandles(graph, factory);
+
+ // Check that the additional information can be queried from the layer
+ std::shared_ptr<ActivationDescriptor>
+ activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
+
+ BOOST_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+ BOOST_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+ BOOST_ASSERT(
+ static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
+ );
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
+
+ DescriptorType queueDescriptor = workload->GetData();
+
+ const ActivationDescriptor* queueDescBlobPtr =
+ queueDescriptor.template GetAdditionalInformation<ActivationDescriptor>();
+ IgnoreUnused(queueDescBlobPtr);
+ BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+ BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+ BOOST_ASSERT(
+ static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
+ );
+
+ BOOST_TEST(queueDescriptor.m_Inputs.size() == 2);
+ BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+
+ return workload;
+}
+
+template<typename WorkloadType,
+ typename DescriptorType,
+ armnn::DataType DataType>
+std::unique_ptr<WorkloadType> CreateMultiplicationWithBlobWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph)
+{
+ // Creates the layer we're testing.
+ MultiplicationLayer* const layer = graph.AddLayer<MultiplicationLayer>("layer");
+
+ auto activationDesc = std::make_shared<ActivationDescriptor>();
+ activationDesc->m_A = 10.0f;
+ activationDesc->m_B = 5.0f;
+ activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
+
+ layer->SetAdditionalInfoForObject(activationDesc);
+
+ // Creates extra layers.
+ Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
+ Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ armnn::TensorInfo tensorInfo({2, 3}, DataType);
+ Connect(input1, layer, tensorInfo, 0, 0);
+ Connect(input2, layer, tensorInfo, 0, 1);
+ Connect(layer, output, tensorInfo);
+ CreateTensorHandles(graph, factory);
+
+ // Check that the additional information can be queried from the layer
+ std::shared_ptr<ActivationDescriptor>
+ activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
+
+ BOOST_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+ BOOST_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+ BOOST_ASSERT(
+ static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
+ );
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
+
+ DescriptorType queueDescriptor = workload->GetData();
+ BOOST_TEST(queueDescriptor.m_Inputs.size() == 2);
+ BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ const ActivationDescriptor* queueDescBlobPtr =
+ queueDescriptor.template GetAdditionalInformation<ActivationDescriptor>();
+ IgnoreUnused(queueDescBlobPtr);
+ BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+ BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+ BOOST_ASSERT(
+ static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
+ );
+
+ return workload;// Returns so we can do extra, backend-specific tests.
+}
+
+template<typename WorkloadType,
+ typename DescriptorType,
+ armnn::DataType DataType>
+std::unique_ptr<WorkloadType> CreateAdditionWithBlobWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph)
+{
+ // Creates the layer we're testing.
+ AdditionLayer* const layer = graph.AddLayer<AdditionLayer>("layer");
+
+ auto activationDesc = std::make_shared<ActivationDescriptor>();
+ activationDesc->m_A = 10.0f;
+ activationDesc->m_B = 5.0f;
+ activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
+
+ layer->SetAdditionalInfoForObject(activationDesc);
+
+ // Creates extra layers.
+ Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
+ Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ armnn::TensorInfo tensorInfo({2, 3}, DataType);
+ Connect(input1, layer, tensorInfo, 0, 0);
+ Connect(input2, layer, tensorInfo, 0, 1);
+ Connect(layer, output, tensorInfo);
+ CreateTensorHandles(graph, factory);
+
+ // Check that the additional information can be queried from the layer
+ std::shared_ptr<ActivationDescriptor>
+ activationDescPtr = layer->template GetAdditionalInformation<ActivationDescriptor>();
+
+ BOOST_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+ BOOST_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+ BOOST_ASSERT(
+ static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
+ );
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
+
+ DescriptorType queueDescriptor = workload->GetData();
+ const ActivationDescriptor* queueDescBlobPtr =
+ queueDescriptor.template GetAdditionalInformation<ActivationDescriptor>();
+ IgnoreUnused(queueDescBlobPtr);
+ BOOST_TEST(queueDescriptor.m_Inputs.size() == 2);
+ BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+ BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+ BOOST_ASSERT(
+ static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
+ );
+
+ return workload;
+}
+
+template <typename WorkloadType,
typename DescriptorType,
armnn::DataType DataType>
std::unique_ptr<WorkloadType> CreateElementwiseUnaryWorkloadTest(armnn::IWorkloadFactory & factory,
@@ -218,6 +388,87 @@ std::unique_ptr<BatchNormalizationWorkloadType> CreateBatchNormalizationWorkload
return workload;
}
+template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
+std::unique_ptr<BatchNormalizationWorkloadType> CreateBatchNormalizationWithBlobWorkloadTest(
+ armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW)
+{
+ TensorShape tensorShape;
+ switch (dataLayout)
+ {
+ case DataLayout::NHWC:
+ tensorShape = { 2, 4, 4, 3 };
+ break;
+ case DataLayout::NCHW:
+ default:
+ tensorShape = { 2, 3, 4, 4 };
+ }
+
+ // Creates the layer we're testing.
+ BatchNormalizationDescriptor layerDesc;
+ layerDesc.m_Eps = 0.05f;
+ layerDesc.m_DataLayout = dataLayout;
+
+ BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
+
+ armnn::TensorInfo weightInfo({3}, DataType);
+ layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
+ layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
+ layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
+ layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
+ layer->m_Mean->Allocate();
+ layer->m_Variance->Allocate();
+ layer->m_Beta->Allocate();
+ layer->m_Gamma->Allocate();
+
+ auto activationDesc = std::make_shared<ActivationDescriptor>();
+ activationDesc->m_A = 10.0f;
+ activationDesc->m_B = 5.0f;
+ activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
+
+ layer->SetAdditionalInfoForObject(activationDesc);
+
+ // Check that the additional information can be queried from the layer
+ std::shared_ptr<ActivationDescriptor> activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
+ BOOST_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+ BOOST_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+ BOOST_ASSERT(
+ static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
+ );
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ armnn::TensorInfo tensorInfo(tensorShape, DataType);
+ Connect(input, layer, tensorInfo);
+ Connect(layer, output, tensorInfo);
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<BatchNormalizationWorkloadType>(*layer, factory);
+ BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
+ const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
+ IgnoreUnused(queueDescBlobPtr);
+ BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+ BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+ BOOST_ASSERT(
+ static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
+ );
+
+ BOOST_TEST(queueDescriptor.m_Parameters.m_Eps == 0.05f);
+ BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
+ BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ BOOST_TEST((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType)));
+ BOOST_TEST((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType)));
+ BOOST_TEST((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType)));
+ BOOST_TEST((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType)));
+ BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
template <typename Convolution2dWorkload, armnn::DataType DataType>
std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadTest(armnn::IWorkloadFactory& factory,
armnn::Graph& graph,
@@ -279,6 +530,92 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadTest(armnn::IW
return workload;
}
+template<typename Convolution2dWorkload, armnn::DataType DataType>
+std::unique_ptr<Convolution2dWorkload> CreateConvolution2dFusedActivationWithBlobWorkloadTest(
+ armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph,
+ DataLayout dataLayout = DataLayout::NCHW,
+ const ModelOptions& modelOptions = {})
+{
+ // Creates the layer we're testing.
+ Convolution2dDescriptor layerDesc;
+ layerDesc.m_PadLeft = 3;
+ layerDesc.m_PadRight = 3;
+ layerDesc.m_PadTop = 1;
+ layerDesc.m_PadBottom = 1;
+ layerDesc.m_StrideX = 2;
+ layerDesc.m_StrideY = 4;
+ layerDesc.m_BiasEnabled = true;
+ layerDesc.m_DataLayout = dataLayout;
+
+
+ Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
+
+ TensorShape weightShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 5, 3} : TensorShape{2, 5, 3, 3};
+ TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3};
+ TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2};
+
+ layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo(weightShape, DataType));
+ layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
+
+ layer->m_Weight->Allocate();
+ layer->m_Bias->Allocate();
+
+ auto activationDesc = std::make_shared<ActivationDescriptor>();
+ activationDesc->m_A = 10.0f;
+ activationDesc->m_B = 5.0f;
+ activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
+
+ layer->SetAdditionalInfoForObject(activationDesc);
+
+ // Check that the additional information can be queried from the layer
+ std::shared_ptr<ActivationDescriptor> activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
+
+ BOOST_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+ BOOST_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+ BOOST_ASSERT(
+ static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
+ );
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ Connect(input, layer, TensorInfo(inputShape, DataType));
+ Connect(layer, output, TensorInfo(outputShape, DataType));
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, factory, modelOptions);
+
+ Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
+ const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
+ IgnoreUnused(queueDescBlobPtr);
+ BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+ BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+ BOOST_ASSERT(
+ static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
+ );
+
+ BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 2);
+ BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 4);
+ BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 3);
+ BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 3);
+ BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1);
+ BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1);
+ BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled);
+ BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+ BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
+ BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() ==
+ TensorInfo({2}, GetBiasDataType(DataType))));
+ BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
template <typename Convolution2dWorkload, armnn::DataType DataType>
std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadFastMathTest(armnn::IWorkloadFactory& factory,
armnn::Graph& graph,
@@ -893,6 +1230,75 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::
return workload;
}
+template <typename FullyConnectedWorkload, armnn::DataType DataType>
+std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWithBlobWorkloadTest
+ (armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph)
+{
+ // Creates the layer we're testing.
+ FullyConnectedDescriptor layerDesc;
+ layerDesc.m_BiasEnabled = true;
+ layerDesc.m_TransposeWeightMatrix = true;
+
+ FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
+
+ float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
+
+ layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
+ layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
+ layer->m_Weight->Allocate();
+ layer->m_Bias->Allocate();
+
+ auto activationDesc = std::make_shared<ActivationDescriptor>();
+ activationDesc->m_A = 10.0f;
+ activationDesc->m_B = 5.0f;
+ activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
+
+ layer->SetAdditionalInfoForObject(activationDesc);
+
+ // Check that the additional information can be queried from the layer
+ std::shared_ptr<ActivationDescriptor> activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
+ BOOST_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+ BOOST_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+ BOOST_ASSERT(static_cast<ActivationFunction>(activationDescPtr->m_Function) ==
+ armnn::ActivationFunction::BoundedReLu);
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale));
+ Connect(layer, output, TensorInfo({3, 7}, DataType, outputQScale));
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<FullyConnectedWorkload>(*layer, factory);
+
+ FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
+
+ const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
+ IgnoreUnused(queueDescBlobPtr);
+
+ BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+ BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+ BOOST_ASSERT(
+ static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
+ );
+
+ BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == true);
+ BOOST_TEST(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
+ BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
+ BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({7, 20}, DataType, inputsQScale)));
+ BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() == TensorInfo({7}, GetBiasDataType(DataType), inputsQScale)));
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+
template <typename NormalizationWorkload, armnn::DataType DataType>
std::unique_ptr<NormalizationWorkload> CreateNormalizationWorkloadTest(armnn::IWorkloadFactory& factory,
armnn::Graph& graph,
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index c563626b28..952ddc323a 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -29,15 +29,23 @@ struct QueueDescriptor
{
std::vector<ITensorHandle*> m_Inputs;
std::vector<ITensorHandle*> m_Outputs;
+ void* m_AdditionalInfoObject;
void ValidateInputsOutputs(const std::string& descName,
unsigned int numExpectedIn,
unsigned int numExpectedOut) const;
+ template<typename T>
+ const T* GetAdditionalInformation()
+ {
+ return static_cast<T*>(m_AdditionalInfoObject);
+ }
protected:
~QueueDescriptor() = default;
- QueueDescriptor() = default;
+ QueueDescriptor()
+ : m_AdditionalInfoObject(nullptr)
+ {}
QueueDescriptor(QueueDescriptor const&) = default;
QueueDescriptor& operator=(QueueDescriptor const&) = default;
};
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index 6e2217c919..0f86e7eeff 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -89,6 +89,55 @@ static void RefCreateElementwiseWorkloadTest()
TensorInfo({ 2, 3 }, DataType));
}
+BOOST_AUTO_TEST_CASE(CreateSubtractionWorkloadWithBlobTest)
+{
+ Graph graph;
+ RefWorkloadFactory factory = GetFactory();
+ armnn::DataType DataType = armnn::DataType::Float32;
+
+ auto workload = CreateSubtractionWithBlobWorkloadTest<RefSubtractionWorkload<>,
+ SubtractionQueueDescriptor,
+ armnn::DataType::Float32>
+ (factory, graph);
+
+ CheckInputsOutput(std::move(workload),
+ TensorInfo({ 2, 3 }, DataType),
+ TensorInfo({ 2, 3 }, DataType),
+ TensorInfo({ 2, 3 }, DataType));
+}
+
+BOOST_AUTO_TEST_CASE(CreateAdditionWorkloadWithBlobTest)
+{
+ Graph graph;
+ RefWorkloadFactory factory = GetFactory();
+ armnn::DataType DataType = armnn::DataType::Float32;
+
+ auto workload = CreateAdditionWithBlobWorkloadTest<RefAdditionWorkload<>,
+ AdditionQueueDescriptor,
+ armnn::DataType::Float32>(factory, graph);
+
+ CheckInputsOutput(std::move(workload),
+ TensorInfo({ 2, 3 }, DataType),
+ TensorInfo({ 2, 3 }, DataType),
+ TensorInfo({ 2, 3 }, DataType));
+}
+
+BOOST_AUTO_TEST_CASE(CreateMultiplicationWorkloadWithBlobTest)
+{
+ Graph graph;
+ RefWorkloadFactory factory = GetFactory();
+ armnn::DataType DataType = armnn::DataType::Float32;
+
+ auto workload = CreateMultiplicationWithBlobWorkloadTest<RefMultiplicationWorkload<>,
+ MultiplicationQueueDescriptor,
+ armnn::DataType::Float32>(factory, graph);
+
+ CheckInputsOutput(std::move(workload),
+ TensorInfo({2, 3}, DataType),
+ TensorInfo({2, 3}, DataType),
+ TensorInfo({2, 3}, DataType));
+}
+
BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
{
RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
@@ -262,6 +311,24 @@ static void RefCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
}
+BOOST_AUTO_TEST_CASE(CreateBatchNormalizationWithBlobFloat32Workload)
+{
+ Graph graph;
+ RefWorkloadFactory factory = GetFactory();
+ auto dataType = armnn::DataType::Float32;
+ auto workload = CreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload,
+ armnn::DataType::Float32>(factory, graph, DataLayout::NHWC);
+
+ TensorShape inputShape;
+ TensorShape outputShape;
+
+ inputShape = { 2, 4, 4, 3 };
+ outputShape = { 2, 4, 4, 3 };
+
+ // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
+ CheckInputOutput(std::move(workload), TensorInfo(inputShape, dataType), TensorInfo(outputShape, dataType));
+}
+
BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32Workload)
{
RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload,armnn::DataType::Float32>
@@ -360,6 +427,25 @@ BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
RefCreateConvolution2dWorkloadTest(DataLayout::NHWC);
}
+BOOST_AUTO_TEST_CASE(CreateConvolution2dWithBlobWorkload)
+{
+ DataLayout dataLayout = DataLayout::NHWC;
+ Graph graph;
+ RefWorkloadFactory factory = GetFactory();
+ auto workload = CreateConvolution2dFusedActivationWithBlobWorkloadTest<RefConvolution2dWorkload, DataType::Float32>
+ (factory, graph, dataLayout);
+
+ TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({2, 3, 8, 16})
+ : std::initializer_list<unsigned int>({2, 8, 16, 3});
+ TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({2, 2, 2, 10})
+ : std::initializer_list<unsigned int>({2, 2, 10, 2});
+
+ // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
+ CheckInputOutput(std::move(workload),
+ TensorInfo(inputShape, DataType::Float32),
+ TensorInfo(outputShape, DataType::Float32));
+}
+
static void RefCreateDepthwiseConvolutionWorkloadTest(DataLayout dataLayout)
{
Graph graph;
@@ -383,6 +469,21 @@ BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolutionFloat32NhwcWorkload)
RefCreateDepthwiseConvolutionWorkloadTest(DataLayout::NHWC);
}
+BOOST_AUTO_TEST_CASE(RefCreateFullyConnectedWithBlobWorkloadTest)
+{
+ Graph graph;
+ RefWorkloadFactory factory = GetFactory();
+ auto workload = CreateFullyConnectedWithBlobWorkloadTest<RefFullyConnectedWorkload,
+ armnn::DataType::Float32>(factory, graph);
+
+ // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
+ float inputsQScale = 0.0f;
+ float outputQScale = 0.0f;
+ CheckInputOutput(std::move(workload),
+ TensorInfo({ 3, 1, 4, 5 }, armnn::DataType::Float32, inputsQScale),
+ TensorInfo({ 3, 7 }, armnn::DataType::Float32, outputQScale));
+}
+
template <typename FullyConnectedWorkloadType, armnn::DataType DataType>
static void RefCreateFullyConnectedWorkloadTest()
{