aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorarovir01 <Aron.Virginas-Tar@arm.com>2018-08-28 17:40:45 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-09-17 17:21:21 +0100
commita68241066c3e797dab70f515d2c55aaa74abf564 (patch)
tree9f5b70a02dcd7d5d61d58e2bf235ee473be53ce5
parentc577f2c6a3b4ddb6ba87a882723c53a248afbeba (diff)
downloadarmnn-a68241066c3e797dab70f515d2c55aaa74abf564.tar.gz
IVGCVSW-1324: Make biases truly optional for Conv2d and DepthwiseConvolution
!android-nn-driver:145961 Change-Id: I039ab0adc61725859514246365b5e5b5fa603eaa
-rw-r--r--include/armnn/LayerSupport.hpp6
-rw-r--r--src/armnn/LayerSupport.cpp4
-rw-r--r--src/armnn/backends/ClLayerSupport.cpp4
-rw-r--r--src/armnn/backends/ClLayerSupport.hpp6
-rw-r--r--src/armnn/backends/ClWorkloads/ClConvolution2dBaseWorkload.cpp7
-rw-r--r--src/armnn/backends/ClWorkloads/ClConvolution2dBaseWorkload.hpp4
-rw-r--r--src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionBaseWorkload.cpp7
-rw-r--r--src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionBaseWorkload.hpp4
-rw-r--r--src/armnn/backends/NeonLayerSupport.cpp4
-rw-r--r--src/armnn/backends/NeonLayerSupport.hpp6
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp7
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.hpp11
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionBaseWorkload.cpp9
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionBaseWorkload.hpp4
-rw-r--r--src/armnn/backends/RefLayerSupport.cpp4
-rw-r--r--src/armnn/backends/RefLayerSupport.hpp6
-rw-r--r--src/armnn/backends/WorkloadFactory.cpp90
17 files changed, 75 insertions, 108 deletions
diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp
index c875619949..26cbda47e2 100644
--- a/include/armnn/LayerSupport.hpp
+++ b/include/armnn/LayerSupport.hpp
@@ -8,6 +8,8 @@
#include <armnn/Types.hpp>
#include <armnn/Tensor.hpp>
+#include <boost/optional.hpp>
+
namespace armnn
{
@@ -58,7 +60,7 @@ bool IsConvolution2dSupported(Compute compute,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases,
+ const boost::optional<TensorInfo>& biases,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
@@ -67,7 +69,7 @@ bool IsDepthwiseConvolutionSupported(Compute compute,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases,
+ const boost::optional<TensorInfo>& biases,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index a734e03a56..8dcb0dc6ee 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -134,7 +134,7 @@ bool IsConvolution2dSupported(Compute compute,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases,
+ const boost::optional<TensorInfo>& biases,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
@@ -146,7 +146,7 @@ bool IsDepthwiseConvolutionSupported(Compute compute,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases,
+ const boost::optional<TensorInfo>& biases,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
diff --git a/src/armnn/backends/ClLayerSupport.cpp b/src/armnn/backends/ClLayerSupport.cpp
index 72594ac82b..b00a218a72 100644
--- a/src/armnn/backends/ClLayerSupport.cpp
+++ b/src/armnn/backends/ClLayerSupport.cpp
@@ -210,7 +210,7 @@ bool IsConvolution2dSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases,
+ const boost::optional<TensorInfo>& biases,
std::string* reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
@@ -226,7 +226,7 @@ bool IsDepthwiseConvolutionSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases,
+ const boost::optional<TensorInfo>& biases,
std::string* reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
diff --git a/src/armnn/backends/ClLayerSupport.hpp b/src/armnn/backends/ClLayerSupport.hpp
index 791e904616..ae5f4b0c54 100644
--- a/src/armnn/backends/ClLayerSupport.hpp
+++ b/src/armnn/backends/ClLayerSupport.hpp
@@ -9,6 +9,8 @@
#include <armnn/Tensor.hpp>
#include <armnn/ArmNN.hpp>
+#include <boost/optional.hpp>
+
namespace armnn
{
bool IsClDirectConvolution2dSupported(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc);
@@ -42,14 +44,14 @@ bool IsConvolution2dSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases,
+ const boost::optional<TensorInfo>& biases,
std::string* reasonIfUnsupported = nullptr);
bool IsDepthwiseConvolutionSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases,
+ const boost::optional<TensorInfo>& biases,
std::string* reasonIfUnsupported = nullptr);
bool IsFullyConnectedSupportedCl(const TensorInfo& input,
diff --git a/src/armnn/backends/ClWorkloads/ClConvolution2dBaseWorkload.cpp b/src/armnn/backends/ClWorkloads/ClConvolution2dBaseWorkload.cpp
index 9851a22dc6..be4c33500e 100644
--- a/src/armnn/backends/ClWorkloads/ClConvolution2dBaseWorkload.cpp
+++ b/src/armnn/backends/ClWorkloads/ClConvolution2dBaseWorkload.cpp
@@ -17,17 +17,20 @@ arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases)
+ const boost::optional<TensorInfo>& biases)
{
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights);
+
arm_compute::TensorInfo aclBiasesInfo;
arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr;
if (descriptor.m_BiasEnabled)
{
- aclBiasesInfo = BuildArmComputeTensorInfo(biases);
+ BOOST_ASSERT(biases.is_initialized());
+
+ aclBiasesInfo = BuildArmComputeTensorInfo(biases.get());
optionalAclBiasesInfo = &aclBiasesInfo;
}
diff --git a/src/armnn/backends/ClWorkloads/ClConvolution2dBaseWorkload.hpp b/src/armnn/backends/ClWorkloads/ClConvolution2dBaseWorkload.hpp
index c4ef152361..c79e476dd3 100644
--- a/src/armnn/backends/ClWorkloads/ClConvolution2dBaseWorkload.hpp
+++ b/src/armnn/backends/ClWorkloads/ClConvolution2dBaseWorkload.hpp
@@ -7,6 +7,8 @@
#include "backends/ClWorkloadUtils.hpp"
+#include <boost/optional.hpp>
+
namespace armnn
{
@@ -14,6 +16,6 @@ arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases);
+ const boost::optional<TensorInfo>& biases);
} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionBaseWorkload.cpp b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionBaseWorkload.cpp
index cfb8485039..5a5775487b 100644
--- a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionBaseWorkload.cpp
+++ b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionBaseWorkload.cpp
@@ -21,7 +21,7 @@ arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo& inp
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases)
+ const boost::optional<TensorInfo>& biases)
{
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
@@ -29,9 +29,12 @@ arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo& inp
arm_compute::TensorInfo aclBiasesInfo;
arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr;
+
if (descriptor.m_BiasEnabled)
{
- aclBiasesInfo = BuildArmComputeTensorInfo(biases);
+ BOOST_ASSERT(biases.is_initialized());
+
+ aclBiasesInfo = BuildArmComputeTensorInfo(biases.get());
optionalAclBiasesInfo = &aclBiasesInfo;
}
diff --git a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionBaseWorkload.hpp b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionBaseWorkload.hpp
index a879efc89e..502d570552 100644
--- a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionBaseWorkload.hpp
+++ b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionBaseWorkload.hpp
@@ -7,6 +7,8 @@
#include "backends/ClWorkloadUtils.hpp"
+#include <boost/optional.hpp>
+
namespace armnn
{
@@ -14,7 +16,7 @@ arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo& inp
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases);
+ const boost::optional<TensorInfo>& biases);
template<armnn::DataType... dataTypes>
class ClDepthwiseConvolutionBaseWorkload : public TypedWorkload<DepthwiseConvolution2dQueueDescriptor, dataTypes...>
diff --git a/src/armnn/backends/NeonLayerSupport.cpp b/src/armnn/backends/NeonLayerSupport.cpp
index 3aef4e60aa..8f73b05460 100644
--- a/src/armnn/backends/NeonLayerSupport.cpp
+++ b/src/armnn/backends/NeonLayerSupport.cpp
@@ -197,7 +197,7 @@ bool IsConvolution2dSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases,
+ const boost::optional<TensorInfo>& biases,
std::string* reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution2dWorkloadValidate,
@@ -213,7 +213,7 @@ bool IsDepthwiseConvolutionSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases,
+ const boost::optional<TensorInfo>& biases,
std::string* reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
diff --git a/src/armnn/backends/NeonLayerSupport.hpp b/src/armnn/backends/NeonLayerSupport.hpp
index 6f9fe9c20e..45032444a4 100644
--- a/src/armnn/backends/NeonLayerSupport.hpp
+++ b/src/armnn/backends/NeonLayerSupport.hpp
@@ -8,6 +8,8 @@
#include <armnn/Types.hpp>
#include <armnn/Tensor.hpp>
+#include <boost/optional.hpp>
+
namespace armnn
{
@@ -46,7 +48,7 @@ bool IsConvolution2dSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases,
+ const boost::optional<TensorInfo>& biases,
std::string* reasonIfUnsupported = nullptr);
@@ -54,7 +56,7 @@ bool IsDepthwiseConvolutionSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases,
+ const boost::optional<TensorInfo>& biases,
std::string* reasonIfUnsupported = nullptr);
bool IsFullyConnectedSupportedNeon(const TensorInfo& input,
diff --git a/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp b/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp
index e76afb6cf7..e1556c292c 100644
--- a/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp
@@ -21,17 +21,20 @@ arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases)
+ const boost::optional<TensorInfo>& biases)
{
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights);
+
arm_compute::TensorInfo aclBiasesInfo;
arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr;
if (descriptor.m_BiasEnabled)
{
- aclBiasesInfo = BuildArmComputeTensorInfo(biases);
+ BOOST_ASSERT(biases.is_initialized());
+
+ aclBiasesInfo = BuildArmComputeTensorInfo(biases.get());
optionalAclBiasesInfo = &aclBiasesInfo;
}
diff --git a/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.hpp b/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.hpp
index 524d2c90b6..de559395c2 100644
--- a/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.hpp
@@ -5,15 +5,16 @@
#pragma once
-#include <backends/Workload.hpp>
-#include <backends/NeonWorkloadUtils.hpp>
-
-#include "backends/CpuTensorHandle.hpp"
#include "backends/ArmComputeTensorUtils.hpp"
+#include "backends/CpuTensorHandle.hpp"
#include "backends/NeonLayerSupport.hpp"
+#include "backends/NeonWorkloadUtils.hpp"
+#include "backends/Workload.hpp"
#include "arm_compute/runtime/MemoryManagerOnDemand.h"
+#include <boost/optional.hpp>
+
#include <memory>
namespace armnn
@@ -23,7 +24,7 @@ arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases);
+ const boost::optional<TensorInfo>& biases);
template<armnn::DataType... dataTypes>
class NeonConvolution2dBaseWorkload : public TypedWorkload<Convolution2dQueueDescriptor, dataTypes...>
diff --git a/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionBaseWorkload.cpp b/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionBaseWorkload.cpp
index 58d6061537..26705c01a2 100644
--- a/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionBaseWorkload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionBaseWorkload.cpp
@@ -14,7 +14,7 @@ arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& i
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases)
+ const boost::optional<TensorInfo>& biases)
{
const arm_compute::TensorInfo aclInputInfo =
armcomputetensorutils::BuildArmComputeTensorInfo(input);
@@ -25,9 +25,12 @@ arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& i
arm_compute::TensorInfo aclBiasesInfo;
arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr;
+
if (descriptor.m_BiasEnabled)
{
- aclBiasesInfo = armcomputetensorutils::BuildArmComputeTensorInfo(biases);
+ BOOST_ASSERT(biases.is_initialized());
+
+ aclBiasesInfo = armcomputetensorutils::BuildArmComputeTensorInfo(biases.get());
optionalAclBiasesInfo = &aclBiasesInfo;
}
@@ -43,4 +46,4 @@ arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& i
aclDepthMultiplier);
}
-}
+} \ No newline at end of file
diff --git a/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionBaseWorkload.hpp b/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionBaseWorkload.hpp
index 0cead354f8..b19e77ef95 100644
--- a/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionBaseWorkload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionBaseWorkload.hpp
@@ -7,6 +7,8 @@
#include "backends/NeonWorkloadUtils.hpp"
+#include <boost/optional.hpp>
+
namespace armnn
{
@@ -14,6 +16,6 @@ arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& i
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases);
+ const boost::optional<TensorInfo>& biases);
} // namespace armnn
diff --git a/src/armnn/backends/RefLayerSupport.cpp b/src/armnn/backends/RefLayerSupport.cpp
index ca4fca6f31..dd89dd51b3 100644
--- a/src/armnn/backends/RefLayerSupport.cpp
+++ b/src/armnn/backends/RefLayerSupport.cpp
@@ -87,7 +87,7 @@ bool IsConvolution2dSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases,
+ const boost::optional<TensorInfo>& biases,
std::string* reasonIfUnsupported)
{
ignore_unused(descriptor);
@@ -104,7 +104,7 @@ bool IsDepthwiseConvolutionSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases,
+ const boost::optional<TensorInfo>& biases,
std::string* reasonIfUnsupported)
{
ignore_unused(output);
diff --git a/src/armnn/backends/RefLayerSupport.hpp b/src/armnn/backends/RefLayerSupport.hpp
index 5e543ac537..fde09685ac 100644
--- a/src/armnn/backends/RefLayerSupport.hpp
+++ b/src/armnn/backends/RefLayerSupport.hpp
@@ -10,6 +10,8 @@
#include <layers/LstmLayer.hpp>
#include <boost/optional.hpp>
+#include <boost/optional.hpp>
+
namespace armnn
{
@@ -39,14 +41,14 @@ bool IsConvolution2dSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases,
+ const boost::optional<TensorInfo>& biases,
std::string* reasonIfUnsupported = nullptr);
bool IsDepthwiseConvolutionSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const TensorInfo& biases,
+ const boost::optional<TensorInfo>& biases,
std::string* reasonIfUnsupported = nullptr);
bool IsFullyConnectedSupportedRef(const TensorInfo& input,
diff --git a/src/armnn/backends/WorkloadFactory.cpp b/src/armnn/backends/WorkloadFactory.cpp
index 1b3f29421a..5708dc0b0c 100644
--- a/src/armnn/backends/WorkloadFactory.cpp
+++ b/src/armnn/backends/WorkloadFactory.cpp
@@ -130,49 +130,20 @@ bool IWorkloadFactory::IsLayerSupported(Compute compute, const Layer& layer, boo
case LayerType::Convolution2d:
{
auto cLayer = boost::polymorphic_downcast<const Convolution2dLayer*>(&layer);
- const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(), dataType);
+
+ const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
+ dataType);
const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
- TensorInfo biasInfo;
- const TensorInfo * biasInfoPtr = nullptr;
- static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
- static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
- static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
-
- const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
+ const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
+ // Construct optional biases object based on the value of m_BiasEnabled
+ boost::optional<TensorInfo> biases(boost::none);
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
- biasInfo = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
- biasInfoPtr = &biasInfo;
- }
- else
- {
- // If biases are not enabled pass a dummy tensorinfo for the validation.
- switch(input.GetDataType())
- {
- case DataType::Float16:
- {
- biasInfoPtr = &dummyFloat16Bias;
- break;
- }
- case DataType::Float32:
- {
- biasInfoPtr = &dummyFloat32Bias;
- break;
- }
- case DataType::QuantisedAsymm8:
- {
- biasInfoPtr = &dummyQA8Bias;
- break;
- }
- default:
- {
- BOOST_ASSERT_MSG(false, "Unexpected input type");
- }
- }
+ biases = boost::make_optional(
+ OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType)));
}
result = IsConvolution2dSupported(compute,
@@ -180,7 +151,7 @@ bool IWorkloadFactory::IsLayerSupported(Compute compute, const Layer& layer, boo
output,
descriptor,
OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
- *biasInfoPtr,
+ biases,
reason,
reasonCapacity);
break;
@@ -202,53 +173,22 @@ bool IWorkloadFactory::IsLayerSupported(Compute compute, const Layer& layer, boo
const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
- TensorInfo biasInfo;
- const TensorInfo * biasInfoPtr = nullptr;
- static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
- static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
- static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
-
const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
+
+ // Construct optional biases object based on the value of m_BiasEnabled
+ boost::optional<TensorInfo> biases(boost::none);
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
- biasInfo = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
- biasInfoPtr = &biasInfo;
- }
- else
- {
- // If biases are not enabled pass a dummy tensorinfo for the validation
- switch(input.GetDataType())
- {
- case DataType::Float16:
- {
- biasInfoPtr = &dummyFloat16Bias;
- break;
- }
- case DataType::Float32:
- {
- biasInfoPtr = &dummyFloat32Bias;
- break;
- }
- case DataType::QuantisedAsymm8:
- {
- biasInfoPtr = &dummyQA8Bias;
- break;
- }
- default:
- {
- BOOST_ASSERT_MSG(false, "Unexpected bias type");
- }
- }
+ biases = boost::make_optional(
+ OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType)));
}
-
result = IsDepthwiseConvolutionSupported(compute,
input,
output,
descriptor,
OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
- *biasInfoPtr,
+ biases,
reason,
reasonCapacity);
break;