aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorarovir01 <Aron.Virginas-Tar@arm.com>2018-10-08 14:48:19 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-10 16:16:58 +0100
commit085f0a42623401f78bd1df34cdcebe8555809410 (patch)
tree6b9e14d10c4969598e0b4dd86dd984e248e76891
parent537a0b676a7499fbf160d9947d78a65d79c9b444 (diff)
downloadarmnn-085f0a42623401f78bd1df34cdcebe8555809410.tar.gz
IVGCVSW-1985: Replace std::string* reasonIfNotSupported with Optional<std::string&> in IsLayerSupported implementations
Change-Id: I2f054f0dcff9bdc86ee90c55b3e94c6b4ae25085
-rw-r--r--src/armnn/LayerSupport.cpp9
-rw-r--r--src/armnn/LayerSupportCommon.hpp34
-rw-r--r--src/backends/cl/ClLayerSupport.cpp203
-rw-r--r--src/backends/cl/ClLayerSupport.hpp114
-rw-r--r--src/backends/cl/workloads/ClAdditionWorkload.cpp15
-rw-r--r--src/backends/cl/workloads/ClAdditionWorkload.hpp7
-rw-r--r--src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp16
-rw-r--r--src/backends/cl/workloads/ClConvertFp16ToFp32Workload.hpp4
-rw-r--r--src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp16
-rw-r--r--src/backends/cl/workloads/ClConvertFp32ToFp16Workload.hpp4
-rw-r--r--src/backends/cl/workloads/ClPadWorkload.cpp15
-rw-r--r--src/backends/cl/workloads/ClPadWorkload.hpp7
-rw-r--r--src/backends/cl/workloads/ClSubtractionWorkload.cpp15
-rw-r--r--src/backends/cl/workloads/ClSubtractionWorkload.hpp7
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp129
-rw-r--r--src/backends/neon/NeonLayerSupport.hpp100
-rw-r--r--src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp2
-rw-r--r--src/backends/reference/RefLayerSupport.cpp168
-rw-r--r--src/backends/reference/RefLayerSupport.hpp95
19 files changed, 513 insertions, 447 deletions
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 3758ed40f6..8bad89f070 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -3,6 +3,7 @@
// SPDX-License-Identifier: MIT
//
#include <armnn/LayerSupport.hpp>
+#include <armnn/Optional.hpp>
#include <backends/reference/RefLayerSupport.hpp>
#include <backends/neon/NeonLayerSupport.hpp>
@@ -36,16 +37,16 @@ void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxL
switch(compute) \
{ \
case Compute::CpuRef: \
- isSupported = func##Ref(__VA_ARGS__, &reasonIfUnsupportedFull); \
+ isSupported = func##Ref(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
break; \
case Compute::CpuAcc: \
- isSupported = func##Neon(__VA_ARGS__, &reasonIfUnsupportedFull); \
+ isSupported = func##Neon(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
break; \
case Compute::GpuAcc: \
- isSupported = func##Cl(__VA_ARGS__, &reasonIfUnsupportedFull); \
+ isSupported = func##Cl(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
break; \
default: \
- isSupported = func##Ref(__VA_ARGS__, &reasonIfUnsupportedFull); \
+ isSupported = func##Ref(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
break; \
} \
CopyErrorMessage(reasonIfUnsupported, reasonIfUnsupportedFull.c_str(), reasonIfUnsupportedMaxLength); \
diff --git a/src/armnn/LayerSupportCommon.hpp b/src/armnn/LayerSupportCommon.hpp
index e351cf4f87..d6dda4f93d 100644
--- a/src/armnn/LayerSupportCommon.hpp
+++ b/src/armnn/LayerSupportCommon.hpp
@@ -12,7 +12,7 @@ namespace armnn
{
template<typename Float16Func, typename Float32Func, typename Uint8Func, typename ... Params>
-bool IsSupportedForDataTypeGeneric(std::string* reasonIfUnsupported,
+bool IsSupportedForDataTypeGeneric(Optional<std::string&> reasonIfUnsupported,
DataType dataType,
Float16Func float16FuncPtr,
Float32Func float32FuncPtr,
@@ -33,83 +33,83 @@ bool IsSupportedForDataTypeGeneric(std::string* reasonIfUnsupported,
}
template<typename ... Params>
-bool TrueFunc(std::string* reasonIfUnsupported, Params&&... params)
+bool TrueFunc(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
return true;
}
template<typename ... Params>
-bool FalseFunc(std::string* reasonIfUnsupported, Params&&... params)
+bool FalseFunc(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
return false;
}
template<typename ... Params>
-bool FalseFuncF16(std::string* reasonIfUnsupported, Params&&... params)
+bool FalseFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
if (reasonIfUnsupported)
{
- *reasonIfUnsupported = "Layer is not supported with float16 data type";
+ reasonIfUnsupported.value() = "Layer is not supported with float16 data type";
}
return false;
}
template<typename ... Params>
-bool FalseFuncF32(std::string* reasonIfUnsupported, Params&&... params)
+bool FalseFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
if (reasonIfUnsupported)
{
- *reasonIfUnsupported = "Layer is not supported with float32 data type";
+ reasonIfUnsupported.value() = "Layer is not supported with float32 data type";
}
return false;
}
template<typename ... Params>
-bool FalseFuncU8(std::string* reasonIfUnsupported, Params&&... params)
+bool FalseFuncU8(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
if (reasonIfUnsupported)
{
- *reasonIfUnsupported = "Layer is not supported with 8-bit data type";
+ reasonIfUnsupported.value() = "Layer is not supported with 8-bit data type";
}
return false;
}
template<typename ... Params>
-bool FalseInputFuncF32(std::string* reasonIfUnsupported, Params&&... params)
+bool FalseInputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
if (reasonIfUnsupported)
{
- *reasonIfUnsupported = "Layer is not supported with float32 data type input";
+ reasonIfUnsupported.value() = "Layer is not supported with float32 data type input";
}
return false;
}
template<typename ... Params>
-bool FalseInputFuncF16(std::string* reasonIfUnsupported, Params&&... params)
+bool FalseInputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
if (reasonIfUnsupported)
{
- *reasonIfUnsupported = "Layer is not supported with float16 data type input";
+ reasonIfUnsupported.value() = "Layer is not supported with float16 data type input";
}
return false;
}
template<typename ... Params>
-bool FalseOutputFuncF32(std::string* reasonIfUnsupported, Params&&... params)
+bool FalseOutputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
if (reasonIfUnsupported)
{
- *reasonIfUnsupported = "Layer is not supported with float32 data type output";
+ reasonIfUnsupported.value() = "Layer is not supported with float32 data type output";
}
return false;
}
template<typename ... Params>
-bool FalseOutputFuncF16(std::string* reasonIfUnsupported, Params&&... params)
+bool FalseOutputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
if (reasonIfUnsupported)
{
- *reasonIfUnsupported = "Layer is not supported with float16 data type output";
+ reasonIfUnsupported.value() = "Layer is not supported with float16 data type output";
}
return false;
}
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 434b069092..494b339952 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -22,16 +22,16 @@
#include "workloads/ClConvolution2dWorkload.hpp"
#include "workloads/ClDepthwiseConvolutionWorkload.hpp"
#include "workloads/ClDivisionFloatWorkload.hpp"
+#include "workloads/ClFullyConnectedWorkload.hpp"
#include "workloads/ClL2NormalizationFloatWorkload.hpp"
+#include "workloads/ClLstmFloatWorkload.hpp"
#include "workloads/ClMultiplicationWorkload.hpp"
-#include "workloads/ClFullyConnectedWorkload.hpp"
+#include "workloads/ClNormalizationFloatWorkload.hpp"
#include "workloads/ClPadWorkload.hpp"
-#include "workloads/ClPooling2dBaseWorkload.hpp"
#include "workloads/ClPermuteWorkload.hpp"
-#include "workloads/ClNormalizationFloatWorkload.hpp"
+#include "workloads/ClPooling2dBaseWorkload.hpp"
#include "workloads/ClSoftmaxBaseWorkload.hpp"
#include "workloads/ClSubtractionWorkload.hpp"
-#include "workloads/ClLstmFloatWorkload.hpp"
#endif
using namespace boost;
@@ -59,14 +59,14 @@ bool IsMatchingStride(uint32_t actualStride)
return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
};
-bool IsClBackendSupported(std::string* reasonIfUnsupported)
+bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported)
{
#if ARMCOMPUTECL_ENABLED
return true;
#else
- if (reasonIfUnsupported != nullptr)
+ if (reasonIfUnsupported)
{
- *reasonIfUnsupported = "The armnn library has been built without CL support";
+ reasonIfUnsupported.value() = "The armnn library has been built without CL support";
}
return false;
#endif
@@ -80,13 +80,13 @@ bool IsClBackendSupported(std::string* reasonIfUnsupported)
#if ARMCOMPUTECL_ENABLED
template<class FuncType, class... Args>
-inline bool IsWorkloadSupported(FuncType&& func, std::string* reasonIfUnsupported, Args&&... args)
+inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
{
arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
if (!supported && reasonIfUnsupported)
{
- *reasonIfUnsupported = aclStatus.error_description();
+ reasonIfUnsupported.value() = aclStatus.error_description();
}
return supported;
}
@@ -101,7 +101,7 @@ inline bool IsWorkloadSupported(FuncType&& func, std::string* reasonIfUnsupporte
} //namespace
template<typename FloatFunc, typename Uint8Func, typename ... Params>
-bool IsSupportedForDataTypeCl(std::string* reasonIfUnsupported,
+bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
DataType dataType,
FloatFunc floatFuncPtr,
Uint8Func uint8FuncPtr,
@@ -119,7 +119,7 @@ bool IsSupportedForDataTypeCl(std::string* reasonIfUnsupported,
bool IsActivationSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClActivationWorkloadValidate,
reasonIfUnsupported,
@@ -131,12 +131,13 @@ bool IsActivationSupportedCl(const TensorInfo& input,
bool IsAdditionSupportedCl(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
- return FORWARD_CL_LAYER_SUPPORT_FUNC(ClAdditionValidate(input0,
- input1,
- output,
- reasonIfUnsupported));
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate,
+ reasonIfUnsupported,
+ input0,
+ input1,
+ output);
}
bool IsBatchNormalizationSupportedCl(const TensorInfo& input,
@@ -146,7 +147,7 @@ bool IsBatchNormalizationSupportedCl(const TensorInfo& input,
const TensorInfo& beta,
const TensorInfo& gamma,
const BatchNormalizationDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchNormalizationValidate,
reasonIfUnsupported,
@@ -160,7 +161,7 @@ bool IsBatchNormalizationSupportedCl(const TensorInfo& input,
}
bool IsConstantSupportedCl(const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
return IsSupportedForDataTypeCl(reasonIfUnsupported,
output.GetDataType(),
@@ -201,10 +202,11 @@ bool IsClDirectConvolution2dSupported(const TensorInfo& weightInfo, const Convol
return isSupported;
}
-bool IsDirectConvolution2dParamsSupportedCl(std::string* reasonIfUnsupported,
+bool IsDirectConvolution2dParamsSupportedCl(Optional<std::string&> reasonIfUnsupported,
const Convolution2dDescriptor& parameters,
const TensorInfo& weightInfo)
{
+ ignore_unused(reasonIfUnsupported);
return IsClDirectConvolution2dSupported(weightInfo, parameters);
}
@@ -213,7 +215,7 @@ bool IsConvolution2dSupportedCl(const TensorInfo& input,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
const Optional<TensorInfo>& biases,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
reasonIfUnsupported,
@@ -229,7 +231,7 @@ bool IsDepthwiseConvolutionSupportedCl(const TensorInfo& input,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
const Optional<TensorInfo>& biases,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
reasonIfUnsupported,
@@ -243,7 +245,7 @@ bool IsDepthwiseConvolutionSupportedCl(const TensorInfo& input,
bool IsDivisionSupportedCl(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate,
reasonIfUnsupported,
@@ -255,12 +257,14 @@ bool IsDivisionSupportedCl(const TensorInfo& input0,
bool IsSubtractionSupportedCl(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
- return FORWARD_CL_LAYER_SUPPORT_FUNC(ClSubtractionValidate(input0,
- input1,
- output,
- reasonIfUnsupported));
+
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
+ reasonIfUnsupported,
+ input0,
+ input1,
+ output);
}
bool IsFullyConnectedSupportedCl(const TensorInfo& input,
@@ -268,7 +272,7 @@ bool IsFullyConnectedSupportedCl(const TensorInfo& input,
const TensorInfo& weights,
const TensorInfo& biases,
const FullyConnectedDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClFullyConnectedWorkloadValidate,
reasonIfUnsupported,
@@ -280,7 +284,7 @@ bool IsFullyConnectedSupportedCl(const TensorInfo& input,
}
bool IsInputSupportedCl(const TensorInfo& input,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
return IsSupportedForDataTypeCl(reasonIfUnsupported,
input.GetDataType(),
@@ -291,14 +295,14 @@ bool IsInputSupportedCl(const TensorInfo& input,
bool IsL2NormalizationSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const L2NormalizationDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
}
bool IsMergerSupportedCl(const std::vector<const TensorInfo*> inputs,
const OriginsDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(descriptor);
return IsSupportedForDataTypeCl(reasonIfUnsupported,
@@ -310,7 +314,7 @@ bool IsMergerSupportedCl(const std::vector<const TensorInfo*> inputs,
bool IsMultiplicationSupportedCl(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate,
reasonIfUnsupported,
@@ -322,13 +326,13 @@ bool IsMultiplicationSupportedCl(const TensorInfo& input0,
bool IsNormalizationSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const NormalizationDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
}
bool IsOutputSupportedCl(const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
return IsSupportedForDataTypeCl(reasonIfUnsupported,
output.GetDataType(),
@@ -336,18 +340,10 @@ bool IsOutputSupportedCl(const TensorInfo& output,
&TrueFunc<>);
}
-bool IsPadSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const PadDescriptor& descriptor,
- std::string* reasonIfUnsupported)
-{
- return FORWARD_CL_LAYER_SUPPORT_FUNC(ClPadValidate(input, output, descriptor, reasonIfUnsupported));
-}
-
bool IsPermuteSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const PermuteDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(input);
ignore_unused(output);
@@ -357,13 +353,13 @@ bool IsPermuteSupportedCl(const TensorInfo& input,
bool IsPooling2dSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const Pooling2dDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
}
bool IsResizeBilinearSupportedCl(const TensorInfo& input,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
return IsSupportedForDataTypeCl(reasonIfUnsupported,
input.GetDataType(),
@@ -374,7 +370,7 @@ bool IsResizeBilinearSupportedCl(const TensorInfo& input,
bool IsSoftmaxSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const SoftmaxDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(descriptor);
FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output);
@@ -382,7 +378,7 @@ bool IsSoftmaxSupportedCl(const TensorInfo& input,
bool IsSplitterSupportedCl(const TensorInfo& input,
const ViewsDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(descriptor);
return IsSupportedForDataTypeCl(reasonIfUnsupported,
@@ -393,23 +389,25 @@ bool IsSplitterSupportedCl(const TensorInfo& input,
bool IsFakeQuantizationSupportedCl(const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(input);
ignore_unused(descriptor);
+ ignore_unused(reasonIfUnsupported);
return false;
}
bool IsReshapeSupportedCl(const TensorInfo& input,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(input);
+ ignore_unused(reasonIfUnsupported);
return true;
}
bool IsFloorSupportedCl(const TensorInfo& input,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(output);
return IsClBackendSupported(reasonIfUnsupported) &&
@@ -420,59 +418,104 @@ bool IsFloorSupportedCl(const TensorInfo& input,
&FalseFuncU8<>);
}
-bool IsLstmSupportedCl(const TensorInfo& input, const TensorInfo& outputStateIn,
- const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
- const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
- const TensorInfo& output, const LstmDescriptor& descriptor,
- const TensorInfo& inputToForgetWeights, const TensorInfo& inputToCellWeights,
- const TensorInfo& inputToOutputWeights, const TensorInfo& recurrentToForgetWeights,
- const TensorInfo& recurrentToCellWeights, const TensorInfo& recurrentToOutputWeights,
- const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
- const TensorInfo& outputGateBias, const TensorInfo* inputToInputWeights,
- const TensorInfo* recurrentToInputWeights, const TensorInfo* cellToInputWeights,
- const TensorInfo* inputGateBias, const TensorInfo* projectionWeights,
- const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights,
- const TensorInfo* cellToOutputWeights, std::string* reasonIfUnsupported)
-{
- FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate, reasonIfUnsupported,
- input, outputStateIn, cellStateIn, scratchBuffer, outputStateOut, cellStateOut,
- output, descriptor, inputToForgetWeights, inputToCellWeights,
- inputToOutputWeights, recurrentToForgetWeights,
- recurrentToCellWeights, recurrentToOutputWeights,
- forgetGateBias, cellBias, outputGateBias,
- inputToInputWeights, recurrentToInputWeights,
- cellToInputWeights, inputGateBias, projectionWeights,
- projectionBias, cellToForgetWeights, cellToOutputWeights);
+bool IsLstmSupportedCl(const TensorInfo& input,
+ const TensorInfo& outputStateIn,
+ const TensorInfo& cellStateIn,
+ const TensorInfo& scratchBuffer,
+ const TensorInfo& outputStateOut,
+ const TensorInfo& cellStateOut,
+ const TensorInfo& output,
+ const LstmDescriptor& descriptor,
+ const TensorInfo& inputToForgetWeights,
+ const TensorInfo& inputToCellWeights,
+ const TensorInfo& inputToOutputWeights,
+ const TensorInfo& recurrentToForgetWeights,
+ const TensorInfo& recurrentToCellWeights,
+ const TensorInfo& recurrentToOutputWeights,
+ const TensorInfo& forgetGateBias,
+ const TensorInfo& cellBias,
+ const TensorInfo& outputGateBias,
+ const TensorInfo* inputToInputWeights,
+ const TensorInfo* recurrentToInputWeights,
+ const TensorInfo* cellToInputWeights,
+ const TensorInfo* inputGateBias,
+ const TensorInfo* projectionWeights,
+ const TensorInfo* projectionBias,
+ const TensorInfo* cellToForgetWeights,
+ const TensorInfo* cellToOutputWeights,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ outputStateIn,
+ cellStateIn,
+ scratchBuffer,
+ outputStateOut,
+ cellStateOut,
+ output,
+ descriptor,
+ inputToForgetWeights,
+ inputToCellWeights,
+ inputToOutputWeights,
+ recurrentToForgetWeights,
+ recurrentToCellWeights,
+ recurrentToOutputWeights,
+ forgetGateBias,
+ cellBias,
+ outputGateBias,
+ inputToInputWeights,
+ recurrentToInputWeights,
+ cellToInputWeights,
+ inputGateBias,
+ projectionWeights,
+ projectionBias,
+ cellToForgetWeights,
+ cellToOutputWeights);
}
bool IsConvertFp16ToFp32SupportedCl(const TensorInfo& input,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp16ToFp32WorkloadValidate,
reasonIfUnsupported,
input,
- output,
- reasonIfUnsupported);
+ output);
}
bool IsConvertFp32ToFp16SupportedCl(const TensorInfo& input,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp32ToFp16WorkloadValidate,
reasonIfUnsupported,
input,
- output,
- reasonIfUnsupported);
+ output);
}
bool IsMeanSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const MeanDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
+ ignore_unused(input);
+ ignore_unused(output);
+ ignore_unused(descriptor);
+ ignore_unused(reasonIfUnsupported);
return false;
}
+bool IsPadSupportedCl(const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClPadValidate,
+ reasonIfUnsupported,
+ input,
+ output,
+ descriptor);
+}
+
}
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index 314ac4c73f..5cd756ba1b 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -5,6 +5,7 @@
#pragma once
#include <armnn/DescriptorsFwd.hpp>
+#include <armnn/Optional.hpp>
#include <armnn/Types.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/ArmNN.hpp>
@@ -18,19 +19,19 @@ class ClLayerSupport : public ILayerSupport
};
bool IsClDirectConvolution2dSupported(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc);
-bool IsClDepthwiseConvolution2dDescParamsSupported(std::string* reasonIfUnsupported,
+bool IsClDepthwiseConvolution2dDescParamsSupported(Optional<std::string&> reasonIfUnsupported,
const DepthwiseConvolution2dDescriptor& parameters,
const TensorInfo& weights);
bool IsActivationSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsAdditionSupportedCl(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsBatchNormalizationSupportedCl(const TensorInfo& input,
const TensorInfo& output,
@@ -39,130 +40,143 @@ bool IsBatchNormalizationSupportedCl(const TensorInfo& input,
const TensorInfo& beta,
const TensorInfo& gamma,
const BatchNormalizationDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsConstantSupportedCl(const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsConvolution2dSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
const Optional<TensorInfo>& biases,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsDepthwiseConvolutionSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
const Optional<TensorInfo>& biases,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsDivisionSupportedCl(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsSubtractionSupportedCl(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsFullyConnectedSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const TensorInfo& weights,
const TensorInfo& biases,
const FullyConnectedDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsInputSupportedCl(const TensorInfo& input,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsL2NormalizationSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const L2NormalizationDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
-
-bool IsLstmSupportedCl(const TensorInfo& input, const TensorInfo& outputStateIn,
- const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
- const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
- const TensorInfo& output, const LstmDescriptor& descriptor,
- const TensorInfo& inputToForgetWeights, const TensorInfo& inputToCellWeights,
- const TensorInfo& inputToOutputWeights, const TensorInfo& recurrentToForgetWeights,
- const TensorInfo& recurrentToCellWeights, const TensorInfo& recurrentToOutputWeights,
- const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
- const TensorInfo& outputGateBias, const TensorInfo* inputToInputWeights,
- const TensorInfo* recurrentToInputWeights, const TensorInfo* cellToInputWeights,
- const TensorInfo* inputGateBias, const TensorInfo* projectionWeights,
- const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights,
- const TensorInfo* cellToOutputWeights, std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+bool IsLstmSupportedCl(const TensorInfo& input,
+ const TensorInfo& outputStateIn,
+ const TensorInfo& cellStateIn,
+ const TensorInfo& scratchBuffer,
+ const TensorInfo& outputStateOut,
+ const TensorInfo& cellStateOut,
+ const TensorInfo& output,
+ const LstmDescriptor& descriptor,
+ const TensorInfo& inputToForgetWeights,
+ const TensorInfo& inputToCellWeights,
+ const TensorInfo& inputToOutputWeights,
+ const TensorInfo& recurrentToForgetWeights,
+ const TensorInfo& recurrentToCellWeights,
+ const TensorInfo& recurrentToOutputWeights,
+ const TensorInfo& forgetGateBias,
+ const TensorInfo& cellBias,
+ const TensorInfo& outputGateBias,
+ const TensorInfo* inputToInputWeights,
+ const TensorInfo* recurrentToInputWeights,
+ const TensorInfo* cellToInputWeights,
+ const TensorInfo* inputGateBias,
+ const TensorInfo* projectionWeights,
+ const TensorInfo* projectionBias,
+ const TensorInfo* cellToForgetWeights,
+ const TensorInfo* cellToOutputWeights,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsMergerSupportedCl(const std::vector<const TensorInfo*> inputs,
const OriginsDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsMultiplicationSupportedCl(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsNormalizationSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const NormalizationDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsOutputSupportedCl(const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
-
-bool IsPadSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const PadDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsPermuteSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const PermuteDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsPooling2dSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const Pooling2dDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsResizeBilinearSupportedCl(const TensorInfo& input,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsSoftmaxSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const SoftmaxDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsSplitterSupportedCl(const TensorInfo& input,
const ViewsDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsFakeQuantizationSupportedCl(const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsReshapeSupportedCl(const TensorInfo& input,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsFloorSupportedCl(const TensorInfo& input,
const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
-
-bool IsMeanSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const MeanDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsConvertFp16ToFp32SupportedCl(const TensorInfo& input,
const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsConvertFp32ToFp16SupportedCl(const TensorInfo& input,
const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+bool IsMeanSupportedCl(const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+bool IsPadSupportedCl(const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
}
diff --git a/src/backends/cl/workloads/ClAdditionWorkload.cpp b/src/backends/cl/workloads/ClAdditionWorkload.cpp
index aa032e872c..ec0dd30275 100644
--- a/src/backends/cl/workloads/ClAdditionWorkload.cpp
+++ b/src/backends/cl/workloads/ClAdditionWorkload.cpp
@@ -37,10 +37,9 @@ void ClAdditionWorkload<T...>::Execute() const
m_Layer.run();
}
-bool ClAdditionValidate(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- std::string* reasonIfUnsupported)
+arm_compute::Status ClAdditionValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output)
{
const arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0);
const arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1);
@@ -51,13 +50,7 @@ bool ClAdditionValidate(const TensorInfo& input0,
&aclOutputInfo,
g_AclConvertPolicy);
- const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
- if (!supported && reasonIfUnsupported)
- {
- *reasonIfUnsupported = aclStatus.error_description();
- }
-
- return supported;
+ return aclStatus;
}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClAdditionWorkload.hpp b/src/backends/cl/workloads/ClAdditionWorkload.hpp
index 3e4ee26793..c5e6affac8 100644
--- a/src/backends/cl/workloads/ClAdditionWorkload.hpp
+++ b/src/backends/cl/workloads/ClAdditionWorkload.hpp
@@ -24,8 +24,7 @@ private:
mutable arm_compute::CLArithmeticAddition m_Layer;
};
-bool ClAdditionValidate(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- std::string* reasonIfUnsupported);
+arm_compute::Status ClAdditionValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output);
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
index e7663b4ca4..2c9a0e1fc2 100644
--- a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
+++ b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
@@ -32,19 +32,15 @@ void ClConvertFp16ToFp32Workload::Execute() const
m_Layer.run();
}
-arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- std::string* reasonIfUnsupported)
+arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input, const TensorInfo& output)
{
if (input.GetDataType() != DataType::Float16)
{
- *reasonIfUnsupported = "Input should be Float16";
- return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, *reasonIfUnsupported);
+ return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, "Input should be Float16");
}
if (output.GetDataType() != DataType::Float32)
{
- *reasonIfUnsupported = "Output should be Float32";
- return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, *reasonIfUnsupported);
+ return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, "Output should be Float32");
}
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
@@ -53,12 +49,6 @@ arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input,
const arm_compute::Status aclStatus = arm_compute::CLDepthConvertLayer::validate(
&aclInputInfo, &aclOutputInfo, g_AclConvertPolicy, 0);
- const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
- if (!supported && reasonIfUnsupported)
- {
- *reasonIfUnsupported = aclStatus.error_description();
- }
-
return aclStatus;
}
diff --git a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.hpp b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.hpp
index b6447488f7..f5f230d869 100644
--- a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.hpp
+++ b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.hpp
@@ -23,8 +23,6 @@ private:
mutable arm_compute::CLDepthConvertLayer m_Layer;
};
-arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- std::string* reasonIfUnsupported);
+arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input, const TensorInfo& output);
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
index 2ae4adc424..6758180a6e 100644
--- a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
+++ b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
@@ -32,19 +32,15 @@ void ClConvertFp32ToFp16Workload::Execute() const
m_Layer.run();
}
-arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- std::string* reasonIfUnsupported)
+arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input, const TensorInfo& output)
{
if (input.GetDataType() != DataType::Float32)
{
- *reasonIfUnsupported = "Input should be Float32";
- return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, *reasonIfUnsupported);
+ return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, "Input should be Float32");
}
if (output.GetDataType() != DataType::Float16)
{
- *reasonIfUnsupported = "Output should be Float16";
- return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, *reasonIfUnsupported);
+ return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, "Output should be Float16");
}
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
@@ -53,12 +49,6 @@ arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input,
const arm_compute::Status aclStatus = arm_compute::CLDepthConvertLayer::validate(
&aclInputInfo, &aclOutputInfo, g_AclConvertPolicy, 0);
- const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
- if (!supported && reasonIfUnsupported)
- {
- *reasonIfUnsupported = aclStatus.error_description();
- }
-
return aclStatus;
}
diff --git a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.hpp b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.hpp
index 95d19905d7..28d0bfa9b0 100644
--- a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.hpp
+++ b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.hpp
@@ -23,8 +23,6 @@ private:
mutable arm_compute::CLDepthConvertLayer m_Layer;
};
-arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- std::string* reasonIfUnsupported);
+arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input, const TensorInfo& output);
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClPadWorkload.cpp b/src/backends/cl/workloads/ClPadWorkload.cpp
index 45dc5e8be7..89b0d8fde9 100644
--- a/src/backends/cl/workloads/ClPadWorkload.cpp
+++ b/src/backends/cl/workloads/ClPadWorkload.cpp
@@ -35,10 +35,9 @@ void ClPadWorkload<T...>::Execute() const
m_Layer.run();
}
-bool ClPadValidate(const TensorInfo& input,
- const TensorInfo& output,
- const PadDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+arm_compute::Status ClPadValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor)
{
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
@@ -48,13 +47,7 @@ bool ClPadValidate(const TensorInfo& input,
&aclOutputInfo,
padList);
- const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
- if (!supported && reasonIfUnsupported)
- {
- *reasonIfUnsupported = aclStatus.error_description();
- }
-
- return supported;
+ return aclStatus;
}
} // namespace armnn
diff --git a/src/backends/cl/workloads/ClPadWorkload.hpp b/src/backends/cl/workloads/ClPadWorkload.hpp
index a7ad6670a7..97f57fdeac 100644
--- a/src/backends/cl/workloads/ClPadWorkload.hpp
+++ b/src/backends/cl/workloads/ClPadWorkload.hpp
@@ -23,10 +23,9 @@ private:
mutable arm_compute::CLPadLayer m_Layer;
};
-bool ClPadValidate(const TensorInfo& input,
- const TensorInfo& output,
- const PadDescriptor& descriptor,
- std::string* reasonIfUnsupported);
+arm_compute::Status ClPadValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor);
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClSubtractionWorkload.cpp b/src/backends/cl/workloads/ClSubtractionWorkload.cpp
index 8efed94293..1967fae354 100644
--- a/src/backends/cl/workloads/ClSubtractionWorkload.cpp
+++ b/src/backends/cl/workloads/ClSubtractionWorkload.cpp
@@ -35,10 +35,9 @@ void ClSubtractionWorkload::Execute() const
m_Layer.run();
}
-bool ClSubtractionValidate(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- std::string* reasonIfUnsupported)
+arm_compute::Status ClSubtractionValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output)
{
const arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0);
const arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1);
@@ -49,13 +48,7 @@ bool ClSubtractionValidate(const TensorInfo& input0,
&aclOutputInfo,
g_AclConvertPolicy);
- const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
- if (!supported && reasonIfUnsupported)
- {
- *reasonIfUnsupported = aclStatus.error_description();
- }
-
- return supported;
+ return aclStatus;
}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClSubtractionWorkload.hpp b/src/backends/cl/workloads/ClSubtractionWorkload.hpp
index 7dd608bf8a..3a4210da07 100644
--- a/src/backends/cl/workloads/ClSubtractionWorkload.hpp
+++ b/src/backends/cl/workloads/ClSubtractionWorkload.hpp
@@ -23,8 +23,7 @@ private:
mutable arm_compute::CLArithmeticSubtraction m_Layer;
};
-bool ClSubtractionValidate(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- std::string* reasonIfUnsupported);
+arm_compute::Status ClSubtractionValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output);
} //namespace armnn
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index ef70fbd370..b6d5e4854d 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -69,13 +69,14 @@ bool IsNeonDirectConvolutionPreferred(const TensorInfo& weightInfo, const Convol
return preferDirectConvolution;
}
-bool IsNeonNormalizationDescParamsSupported(std::string* reasonIfUnsupported, const NormalizationDescriptor& parameters)
+bool IsNeonNormalizationDescParamsSupported(Optional<std::string&> reasonIfUnsupported,
+ const NormalizationDescriptor& parameters)
{
if (parameters.m_NormMethodType != NormalizationAlgorithmMethod::LocalBrightness)
{
if (reasonIfUnsupported)
{
- *reasonIfUnsupported = "Unsupported normalisation method type, only LocalBrightness is supported";
+ reasonIfUnsupported.value() = "Unsupported normalisation method type, only LocalBrightness is supported";
}
return false;
}
@@ -83,7 +84,7 @@ bool IsNeonNormalizationDescParamsSupported(std::string* reasonIfUnsupported, co
{
if (reasonIfUnsupported)
{
- *reasonIfUnsupported = "Normalization size must be an odd number.";
+ reasonIfUnsupported.value() = "Normalization size must be an odd number.";
}
return false;
}
@@ -91,21 +92,21 @@ bool IsNeonNormalizationDescParamsSupported(std::string* reasonIfUnsupported, co
return true;
}
-bool IsNeonBackendSupported(std::string* reasonIfUnsupported)
+bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported)
{
#if ARMCOMPUTENEON_ENABLED
return true;
#else
- if (reasonIfUnsupported != nullptr)
+ if (reasonIfUnsupported)
{
- *reasonIfUnsupported = "The armnn library has been built without NEON support";
+ reasonIfUnsupported.value() = "The armnn library has been built without NEON support";
}
return false;
#endif
}
template<typename FloatFunc, typename Uint8Func, typename ... Params>
-bool IsSupportedForDataTypeNeon(std::string* reasonIfUnsupported,
+bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
DataType dataType,
FloatFunc floatFuncPtr,
Uint8Func uint8FuncPtr,
@@ -122,13 +123,13 @@ bool IsSupportedForDataTypeNeon(std::string* reasonIfUnsupported,
#if ARMCOMPUTENEON_ENABLED
template<class FuncType, class... Args>
-inline bool IsWorkloadSupported(FuncType& func, std::string* reasonIfUnsupported, Args&&... args)
+inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
{
arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
if (!supported && reasonIfUnsupported)
{
- *reasonIfUnsupported = aclStatus.error_description();
+ reasonIfUnsupported.value() = aclStatus.error_description();
}
return supported;
}
@@ -143,7 +144,7 @@ inline bool IsWorkloadSupported(FuncType& func, std::string* reasonIfUnsupported
bool IsActivationSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(descriptor);
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonActivationWorkloadValidate,
@@ -156,7 +157,7 @@ bool IsActivationSupportedNeon(const TensorInfo& input,
bool IsAdditionSupportedNeon(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAdditionWorkloadValidate,
reasonIfUnsupported,
@@ -172,7 +173,7 @@ bool IsBatchNormalizationSupportedNeon(const TensorInfo& input,
const TensorInfo& beta,
const TensorInfo& gamma,
const BatchNormalizationDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonBatchNormalizationValidate,
reasonIfUnsupported,
@@ -186,7 +187,7 @@ bool IsBatchNormalizationSupportedNeon(const TensorInfo& input,
}
bool IsConstantSupportedNeon(const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
return IsSupportedForDataTypeNeon(reasonIfUnsupported,
output.GetDataType(),
@@ -199,7 +200,7 @@ bool IsConvolution2dSupportedNeon(const TensorInfo& input,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
const Optional<TensorInfo>& biases,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution2dWorkloadValidate,
reasonIfUnsupported,
@@ -215,7 +216,7 @@ bool IsDepthwiseConvolutionSupportedNeon(const TensorInfo& input,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
const Optional<TensorInfo>& biases,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
reasonIfUnsupported,
@@ -229,16 +230,20 @@ bool IsDepthwiseConvolutionSupportedNeon(const TensorInfo& input,
bool IsDivisionSupportedNeon(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
// At the moment division is not supported
+ ignore_unused(input0);
+ ignore_unused(input1);
+ ignore_unused(output);
+ ignore_unused(reasonIfUnsupported);
return false;
}
bool IsSubtractionSupportedNeon(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSubtractionWorkloadValidate,
reasonIfUnsupported,
@@ -252,7 +257,7 @@ bool IsFullyConnectedSupportedNeon(const TensorInfo& input,
const TensorInfo& weights,
const TensorInfo& biases,
const FullyConnectedDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
// At the moment U8 is unsupported
if (input.GetDataType() == DataType::QuantisedAsymm8)
@@ -269,7 +274,7 @@ bool IsFullyConnectedSupportedNeon(const TensorInfo& input,
}
bool IsInputSupportedNeon(const TensorInfo& input,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
return IsSupportedForDataTypeNeon(reasonIfUnsupported,
input.GetDataType(),
@@ -280,14 +285,14 @@ bool IsInputSupportedNeon(const TensorInfo& input,
bool IsL2NormalizationSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const L2NormalizationDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
}
bool IsMergerSupportedNeon(const std::vector<const TensorInfo*> inputs,
const OriginsDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(descriptor);
return IsSupportedForDataTypeNeon(reasonIfUnsupported,
@@ -299,7 +304,7 @@ bool IsMergerSupportedNeon(const std::vector<const TensorInfo*> inputs,
bool IsMultiplicationSupportedNeon(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMultiplicationWorkloadValidate,
reasonIfUnsupported,
@@ -311,13 +316,13 @@ bool IsMultiplicationSupportedNeon(const TensorInfo& input0,
bool IsNormalizationSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const NormalizationDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
}
bool IsOutputSupportedNeon(const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
return IsSupportedForDataTypeNeon(reasonIfUnsupported,
output.GetDataType(),
@@ -328,7 +333,7 @@ bool IsOutputSupportedNeon(const TensorInfo& output,
bool IsPermuteSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const PermuteDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
}
@@ -336,29 +341,30 @@ bool IsPermuteSupportedNeon(const TensorInfo& input,
bool IsPooling2dSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const Pooling2dDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
}
bool IsResizeBilinearSupportedNeon(const TensorInfo& input,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(input);
+ ignore_unused(reasonIfUnsupported);
return false;
}
bool IsSoftmaxSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const SoftmaxDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
}
bool IsSplitterSupportedNeon(const TensorInfo& input,
const ViewsDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(descriptor);
return IsSupportedForDataTypeNeon(reasonIfUnsupported,
@@ -369,15 +375,16 @@ bool IsSplitterSupportedNeon(const TensorInfo& input,
bool IsFakeQuantizationSupportedNeon(const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(input);
ignore_unused(descriptor);
+ ignore_unused(reasonIfUnsupported);
return false;
}
bool IsReshapeSupportedNeon(const TensorInfo& input,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
return IsSupportedForDataTypeNeon(reasonIfUnsupported,
input.GetDataType(),
@@ -387,7 +394,7 @@ bool IsReshapeSupportedNeon(const TensorInfo& input,
bool IsFloorSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(output);
return IsNeonBackendSupported(reasonIfUnsupported) &&
@@ -398,19 +405,32 @@ bool IsFloorSupportedNeon(const TensorInfo& input,
&FalseFuncU8<>);
}
-bool IsLstmSupportedNeon(const TensorInfo& input, const TensorInfo& outputStateIn,
- const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
- const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
- const TensorInfo& output, const LstmDescriptor& descriptor,
- const TensorInfo& inputToForgetWeights, const TensorInfo& inputToCellWeights,
- const TensorInfo& inputToOutputWeights, const TensorInfo& recurrentToForgetWeights,
- const TensorInfo& recurrentToCellWeights, const TensorInfo& recurrentToOutputWeights,
- const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
- const TensorInfo& outputGateBias, const TensorInfo* inputToInputWeights,
- const TensorInfo* recurrentToInputWeights, const TensorInfo* cellToInputWeights,
- const TensorInfo* inputGateBias, const TensorInfo* projectionWeights,
- const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights,
- const TensorInfo* cellToOutputWeights, std::string* reasonIfUnsupported)
+bool IsLstmSupportedNeon(const TensorInfo& input,
+ const TensorInfo& outputStateIn,
+ const TensorInfo& cellStateIn,
+ const TensorInfo& scratchBuffer,
+ const TensorInfo& outputStateOut,
+ const TensorInfo& cellStateOut,
+ const TensorInfo& output,
+ const LstmDescriptor& descriptor,
+ const TensorInfo& inputToForgetWeights,
+ const TensorInfo& inputToCellWeights,
+ const TensorInfo& inputToOutputWeights,
+ const TensorInfo& recurrentToForgetWeights,
+ const TensorInfo& recurrentToCellWeights,
+ const TensorInfo& recurrentToOutputWeights,
+ const TensorInfo& forgetGateBias,
+ const TensorInfo& cellBias,
+ const TensorInfo& outputGateBias,
+ const TensorInfo* inputToInputWeights,
+ const TensorInfo* recurrentToInputWeights,
+ const TensorInfo* cellToInputWeights,
+ const TensorInfo* inputGateBias,
+ const TensorInfo* projectionWeights,
+ const TensorInfo* projectionBias,
+ const TensorInfo* cellToForgetWeights,
+ const TensorInfo* cellToOutputWeights,
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(input);
ignore_unused(outputStateIn);
@@ -437,40 +457,51 @@ bool IsLstmSupportedNeon(const TensorInfo& input, const TensorInfo& outputStateI
ignore_unused(projectionBias);
ignore_unused(cellToForgetWeights);
ignore_unused(cellToOutputWeights);
+ ignore_unused(reasonIfUnsupported);
return false;
}
bool IsConvertFp16ToFp32SupportedNeon(const TensorInfo& input,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(input);
ignore_unused(output);
+ ignore_unused(reasonIfUnsupported);
return true;
}
bool IsConvertFp32ToFp16SupportedNeon(const TensorInfo& input,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(input);
ignore_unused(output);
+ ignore_unused(reasonIfUnsupported);
return true;
}
bool IsMeanSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const MeanDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
+ ignore_unused(input);
+ ignore_unused(output);
+ ignore_unused(descriptor);
+ ignore_unused(reasonIfUnsupported);
return false;
}
bool IsPadSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const PadDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
+ ignore_unused(input);
+ ignore_unused(output);
+ ignore_unused(descriptor);
+ ignore_unused(reasonIfUnsupported);
return false;
}
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 8b674c6460..468cf58393 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -5,6 +5,7 @@
#pragma once
#include <armnn/DescriptorsFwd.hpp>
+#include <armnn/Optional.hpp>
#include <armnn/Types.hpp>
#include <armnn/Tensor.hpp>
@@ -18,22 +19,22 @@ class NeonLayerSupport : public ILayerSupport
bool IsNeonDirectConvolutionPreferred(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc);
-bool IsNeonNormalizationDescParamsSupported(std::string* reasonIfUnsupported,
+bool IsNeonNormalizationDescParamsSupported(Optional<std::string&> reasonIfUnsupported,
const NormalizationDescriptor& parameters);
bool IsActivationSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
- std::string* reasonIfUnsupported);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-bool IsNeonDepthwiseConvolution2dDescParamsSupported(std::string* reasonIfUnsupported,
+bool IsNeonDepthwiseConvolution2dDescParamsSupported(Optional<std::string&> reasonIfUnsupported,
const DepthwiseConvolution2dDescriptor& parameters,
const TensorInfo& weights);
bool IsAdditionSupportedNeon(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsBatchNormalizationSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
@@ -42,17 +43,17 @@ bool IsBatchNormalizationSupportedNeon(const TensorInfo& input,
const TensorInfo& beta,
const TensorInfo& gamma,
const BatchNormalizationDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsConstantSupportedNeon(const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsConvolution2dSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
const Optional<TensorInfo>& biases,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsDepthwiseConvolutionSupportedNeon(const TensorInfo& input,
@@ -60,113 +61,126 @@ bool IsDepthwiseConvolutionSupportedNeon(const TensorInfo& input,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
const Optional<TensorInfo>& biases,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsDivisionSupportedNeon(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsSubtractionSupportedNeon(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsFullyConnectedSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const TensorInfo& weights,
const TensorInfo& biases,
const FullyConnectedDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsInputSupportedNeon(const TensorInfo& input,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsL2NormalizationSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const L2NormalizationDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsMergerSupportedNeon(const std::vector<const TensorInfo*> inputs,
const OriginsDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsMultiplicationSupportedNeon(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsNormalizationSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const NormalizationDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsOutputSupportedNeon(const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsPermuteSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const PermuteDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsPooling2dSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const Pooling2dDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsResizeBilinearSupportedNeon(const TensorInfo& input,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsSoftmaxSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const SoftmaxDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsSplitterSupportedNeon(const TensorInfo& input,
const ViewsDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsFakeQuantizationSupportedNeon(const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsReshapeSupportedNeon(const TensorInfo& input,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsFloorSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
-
-bool IsLstmSupportedNeon(const TensorInfo& input, const TensorInfo& outputStateIn,
- const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
- const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
- const TensorInfo& output, const LstmDescriptor& descriptor,
- const TensorInfo& inputToForgetWeights, const TensorInfo& inputToCellWeights,
- const TensorInfo& inputToOutputWeights, const TensorInfo& recurrentToForgetWeights,
- const TensorInfo& recurrentToCellWeights, const TensorInfo& recurrentToOutputWeights,
- const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
- const TensorInfo& outputGateBias, const TensorInfo* inputToInputWeights,
- const TensorInfo* recurrentToInputWeights, const TensorInfo* cellToInputWeights,
- const TensorInfo* inputGateBias, const TensorInfo* projectionWeights,
- const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights,
- const TensorInfo* cellToOutputWeights, std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+bool IsLstmSupportedNeon(const TensorInfo& input,
+ const TensorInfo& outputStateIn,
+ const TensorInfo& cellStateIn,
+ const TensorInfo& scratchBuffer,
+ const TensorInfo& outputStateOut,
+ const TensorInfo& cellStateOut,
+ const TensorInfo& output,
+ const LstmDescriptor& descriptor,
+ const TensorInfo& inputToForgetWeights,
+ const TensorInfo& inputToCellWeights,
+ const TensorInfo& inputToOutputWeights,
+ const TensorInfo& recurrentToForgetWeights,
+ const TensorInfo& recurrentToCellWeights,
+ const TensorInfo& recurrentToOutputWeights,
+ const TensorInfo& forgetGateBias,
+ const TensorInfo& cellBias,
+ const TensorInfo& outputGateBias,
+ const TensorInfo* inputToInputWeights,
+ const TensorInfo* recurrentToInputWeights,
+ const TensorInfo* cellToInputWeights,
+ const TensorInfo* inputGateBias,
+ const TensorInfo* projectionWeights,
+ const TensorInfo* projectionBias,
+ const TensorInfo* cellToForgetWeights,
+ const TensorInfo* cellToOutputWeights,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsConvertFp16ToFp32SupportedNeon(const TensorInfo& input,
const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsConvertFp32ToFp16SupportedNeon(const TensorInfo& input,
const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsMeanSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const MeanDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsPadSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const PadDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
}
diff --git a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp
index 7019c82582..0deff79dac 100644
--- a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp
@@ -33,7 +33,7 @@ NeonNormalizationFloatWorkload::NeonNormalizationFloatWorkload(const Normalizati
{
m_Data.ValidateInputsOutputs("NeonNormalizationFloatWorkload", 1, 1);
std::string reasonIfUnsupported;
- if (!IsNeonNormalizationDescParamsSupported(&reasonIfUnsupported, m_Data.m_Parameters))
+ if (!IsNeonNormalizationDescParamsSupported(Optional<std::string&>(reasonIfUnsupported), m_Data.m_Parameters))
{
throw UnimplementedException(reasonIfUnsupported);
}
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index e6b1442e4d..2ee942cc2e 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -17,22 +17,12 @@ using namespace boost;
namespace armnn
{
-namespace
-{
-
-std::string* GetReasonIfUnsupportedPtr(const Optional<std::string&>& reasonIfUnsupported)
-{
- return reasonIfUnsupported ? &reasonIfUnsupported.value() : nullptr;
-}
-
-} // anonymous namespace
-
bool RefLayerSupport::IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsActivationSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsActivationSupportedRef(input, output, descriptor, reasonIfUnsupported);
}
bool RefLayerSupport::IsAdditionSupported(const TensorInfo& input0,
@@ -40,10 +30,7 @@ bool RefLayerSupport::IsAdditionSupported(const TensorInfo& input0,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsAdditionSupportedRef(input0,
- input1,
- output,
- GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsAdditionSupportedRef(input0, input1, output, reasonIfUnsupported);
}
bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
@@ -62,27 +49,27 @@ bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
beta,
gamma,
descriptor,
- GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ reasonIfUnsupported);
}
bool RefLayerSupport::IsConstantSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsConstantSupportedRef(output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsConstantSupportedRef(output, reasonIfUnsupported);
}
bool RefLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsConvertFp16ToFp32SupportedRef(input, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsConvertFp16ToFp32SupportedRef(input, output, reasonIfUnsupported);
}
bool RefLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsConvertFp32ToFp16SupportedRef(input, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsConvertFp32ToFp16SupportedRef(input, output, reasonIfUnsupported);
}
bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
@@ -97,7 +84,7 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
descriptor,
weights,
biases,
- GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ reasonIfUnsupported);
}
bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
@@ -112,7 +99,7 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
descriptor,
weights,
biases,
- GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ reasonIfUnsupported);
}
bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0,
@@ -120,21 +107,21 @@ bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsDivisionSupportedRef(input0, input1, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsDivisionSupportedRef(input0, input1, output, reasonIfUnsupported);
}
bool RefLayerSupport::IsFakeQuantizationSupported(const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsFakeQuantizationSupportedRef(input, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsFakeQuantizationSupportedRef(input, descriptor, reasonIfUnsupported);
}
bool RefLayerSupport::IsFloorSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsFloorSupportedRef(input, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsFloorSupportedRef(input, output, reasonIfUnsupported);
}
bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
@@ -149,13 +136,13 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
weights,
biases,
descriptor,
- GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ reasonIfUnsupported);
}
bool RefLayerSupport::IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsInputSupportedRef(input, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsInputSupportedRef(input, reasonIfUnsupported);
}
bool RefLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
@@ -163,10 +150,7 @@ bool RefLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
const L2NormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsL2NormalizationSupportedRef(input,
- output,
- descriptor,
- GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsL2NormalizationSupportedRef(input, output, descriptor, reasonIfUnsupported);
}
bool RefLayerSupport::IsLstmSupported(const TensorInfo& input,
@@ -221,7 +205,7 @@ bool RefLayerSupport::IsLstmSupported(const TensorInfo& input,
projectionBias,
cellToForgetWeights,
cellToOutputWeights,
- GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ reasonIfUnsupported);
}
bool RefLayerSupport::IsMeanSupported(const TensorInfo& input,
@@ -229,14 +213,14 @@ bool RefLayerSupport::IsMeanSupported(const TensorInfo& input,
const MeanDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsMeanSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsMeanSupportedRef(input, output, descriptor,reasonIfUnsupported);
}
bool RefLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
const OriginsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsMergerSupportedRef(inputs, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsMergerSupportedRef(inputs, descriptor, reasonIfUnsupported);
}
bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
@@ -244,7 +228,7 @@ bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsMultiplicationSupportedRef(input0, input1, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsMultiplicationSupportedRef(input0, input1, output, reasonIfUnsupported);
}
bool RefLayerSupport::IsNormalizationSupported(const TensorInfo& input,
@@ -255,13 +239,13 @@ bool RefLayerSupport::IsNormalizationSupported(const TensorInfo& input,
return armnn::IsNormalizationSupportedRef(input,
output,
descriptor,
- GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ reasonIfUnsupported);
}
bool RefLayerSupport::IsOutputSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsOutputSupportedRef(output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsOutputSupportedRef(output, reasonIfUnsupported);
}
bool RefLayerSupport::IsPadSupported(const TensorInfo& input,
@@ -269,7 +253,7 @@ bool RefLayerSupport::IsPadSupported(const TensorInfo& input,
const PadDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsPadSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsPadSupportedRef(input, output, descriptor, reasonIfUnsupported);
}
bool RefLayerSupport::IsPermuteSupported(const TensorInfo& input,
@@ -277,7 +261,7 @@ bool RefLayerSupport::IsPermuteSupported(const TensorInfo& input,
const PermuteDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsPermuteSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsPermuteSupportedRef(input, output, descriptor, reasonIfUnsupported);
}
bool RefLayerSupport::IsPooling2dSupported(const TensorInfo& input,
@@ -285,19 +269,19 @@ bool RefLayerSupport::IsPooling2dSupported(const TensorInfo& input,
const Pooling2dDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsPooling2dSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsPooling2dSupportedRef(input, output, descriptor, reasonIfUnsupported);
}
bool RefLayerSupport::IsReshapeSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsReshapeSupportedRef(input, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsReshapeSupportedRef(input, reasonIfUnsupported);
}
bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsResizeBilinearSupportedRef(input, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsResizeBilinearSupportedRef(input, reasonIfUnsupported);
}
bool RefLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
@@ -305,14 +289,14 @@ bool RefLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
const SoftmaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsSoftmaxSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsSoftmaxSupportedRef(input, output, descriptor, reasonIfUnsupported);
}
bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsSplitterSupportedRef(input, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsSplitterSupportedRef(input, descriptor, reasonIfUnsupported);
}
bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
@@ -320,7 +304,7 @@ bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsSubtractionSupportedRef(input0, input1, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsSubtractionSupportedRef(input0, input1, output, reasonIfUnsupported);
}
//
@@ -329,7 +313,7 @@ bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
// TODO: Functions kept for backward compatibility. Remove once transition to plugable backends is complete!
template<typename Float32Func, typename Uint8Func, typename ... Params>
-bool IsSupportedForDataTypeRef(std::string* reasonIfUnsupported,
+bool IsSupportedForDataTypeRef(Optional<std::string&> reasonIfUnsupported,
DataType dataType,
Float32Func floatFuncPtr,
Uint8Func uint8FuncPtr,
@@ -346,7 +330,7 @@ bool IsSupportedForDataTypeRef(std::string* reasonIfUnsupported,
bool IsActivationSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(output);
ignore_unused(descriptor);
@@ -359,7 +343,7 @@ bool IsActivationSupportedRef(const TensorInfo& input,
bool IsAdditionSupportedRef(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(input1);
ignore_unused(output);
@@ -376,7 +360,7 @@ bool IsBatchNormalizationSupportedRef(const TensorInfo& input,
const TensorInfo& beta,
const TensorInfo& gamma,
const BatchNormalizationDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(descriptor);
return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -386,7 +370,7 @@ bool IsBatchNormalizationSupportedRef(const TensorInfo& input,
}
bool IsConstantSupportedRef(const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
return IsSupportedForDataTypeRef(reasonIfUnsupported,
output.GetDataType(),
@@ -399,7 +383,7 @@ bool IsConvolution2dSupportedRef(const TensorInfo& input,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
const Optional<TensorInfo>& biases,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(descriptor);
ignore_unused(output);
@@ -416,7 +400,7 @@ bool IsDepthwiseConvolutionSupportedRef(const TensorInfo& input,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
const Optional<TensorInfo>& biases,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(output);
ignore_unused(descriptor);
@@ -431,7 +415,7 @@ bool IsDepthwiseConvolutionSupportedRef(const TensorInfo& input,
bool IsDivisionSupportedRef(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(input1);
ignore_unused(output);
@@ -444,7 +428,7 @@ bool IsDivisionSupportedRef(const TensorInfo& input0,
bool IsSubtractionSupportedRef(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(input1);
ignore_unused(output);
@@ -459,7 +443,7 @@ bool IsFullyConnectedSupportedRef(const TensorInfo& input,
const TensorInfo& weights,
const TensorInfo& biases,
const FullyConnectedDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(output);
ignore_unused(descriptor);
@@ -472,7 +456,7 @@ bool IsFullyConnectedSupportedRef(const TensorInfo& input,
}
bool IsInputSupportedRef(const TensorInfo& input,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
return IsSupportedForDataTypeRef(reasonIfUnsupported,
input.GetDataType(),
@@ -483,7 +467,7 @@ bool IsInputSupportedRef(const TensorInfo& input,
bool IsL2NormalizationSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const L2NormalizationDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(output);
ignore_unused(descriptor);
@@ -495,7 +479,7 @@ bool IsL2NormalizationSupportedRef(const TensorInfo& input,
bool IsMergerSupportedRef(const std::vector<const TensorInfo*> inputs,
const OriginsDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(descriptor);
return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -507,7 +491,7 @@ bool IsMergerSupportedRef(const std::vector<const TensorInfo*> inputs,
bool IsMultiplicationSupportedRef(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(input1);
ignore_unused(output);
@@ -520,7 +504,7 @@ bool IsMultiplicationSupportedRef(const TensorInfo& input0,
bool IsNormalizationSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const NormalizationDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(descriptor);
return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -530,7 +514,7 @@ bool IsNormalizationSupportedRef(const TensorInfo& input,
}
bool IsOutputSupportedRef(const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
return IsSupportedForDataTypeRef(reasonIfUnsupported,
output.GetDataType(),
@@ -541,7 +525,7 @@ bool IsOutputSupportedRef(const TensorInfo& output,
bool IsPermuteSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const PermuteDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(descriptor);
return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -553,7 +537,7 @@ bool IsPermuteSupportedRef(const TensorInfo& input,
bool IsPooling2dSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const Pooling2dDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(descriptor);
return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -563,7 +547,7 @@ bool IsPooling2dSupportedRef(const TensorInfo& input,
}
bool IsResizeBilinearSupportedRef(const TensorInfo& input,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
return IsSupportedForDataTypeRef(reasonIfUnsupported,
input.GetDataType(),
@@ -574,7 +558,7 @@ bool IsResizeBilinearSupportedRef(const TensorInfo& input,
bool IsSoftmaxSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const SoftmaxDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(output);
ignore_unused(descriptor);
@@ -586,7 +570,7 @@ bool IsSoftmaxSupportedRef(const TensorInfo& input,
bool IsSplitterSupportedRef(const TensorInfo& input,
const ViewsDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(descriptor);
return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -597,7 +581,7 @@ bool IsSplitterSupportedRef(const TensorInfo& input,
bool IsFakeQuantizationSupportedRef(const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(descriptor);
return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -607,7 +591,7 @@ bool IsFakeQuantizationSupportedRef(const TensorInfo& input,
}
bool IsReshapeSupportedRef(const TensorInfo& input,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
return IsSupportedForDataTypeRef(reasonIfUnsupported,
input.GetDataType(),
@@ -617,7 +601,7 @@ bool IsReshapeSupportedRef(const TensorInfo& input,
bool IsFloorSupportedRef(const TensorInfo& input,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(output);
return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -626,19 +610,32 @@ bool IsFloorSupportedRef(const TensorInfo& input,
&FalseFuncU8<>);
}
-bool IsLstmSupportedRef(const TensorInfo& input, const TensorInfo& outputStateIn,
- const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
- const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
- const TensorInfo& output, const LstmDescriptor& descriptor,
- const TensorInfo& inputToForgetWeights, const TensorInfo& inputToCellWeights,
- const TensorInfo& inputToOutputWeights, const TensorInfo& recurrentToForgetWeights,
- const TensorInfo& recurrentToCellWeights, const TensorInfo& recurrentToOutputWeights,
- const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
- const TensorInfo& outputGateBias, const TensorInfo* inputToInputWeights,
- const TensorInfo* recurrentToInputWeights, const TensorInfo* cellToInputWeights,
- const TensorInfo* inputGateBias, const TensorInfo* projectionWeights,
- const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights,
- const TensorInfo* cellToOutputWeights, std::string* reasonIfUnsupported)
+bool IsLstmSupportedRef(const TensorInfo& input,
+ const TensorInfo& outputStateIn,
+ const TensorInfo& cellStateIn,
+ const TensorInfo& scratchBuffer,
+ const TensorInfo& outputStateOut,
+ const TensorInfo& cellStateOut,
+ const TensorInfo& output,
+ const LstmDescriptor& descriptor,
+ const TensorInfo& inputToForgetWeights,
+ const TensorInfo& inputToCellWeights,
+ const TensorInfo& inputToOutputWeights,
+ const TensorInfo& recurrentToForgetWeights,
+ const TensorInfo& recurrentToCellWeights,
+ const TensorInfo& recurrentToOutputWeights,
+ const TensorInfo& forgetGateBias,
+ const TensorInfo& cellBias,
+ const TensorInfo& outputGateBias,
+ const TensorInfo* inputToInputWeights,
+ const TensorInfo* recurrentToInputWeights,
+ const TensorInfo* cellToInputWeights,
+ const TensorInfo* inputGateBias,
+ const TensorInfo* projectionWeights,
+ const TensorInfo* projectionBias,
+ const TensorInfo* cellToForgetWeights,
+ const TensorInfo* cellToOutputWeights,
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(input);
ignore_unused(outputStateIn);
@@ -665,12 +662,13 @@ bool IsLstmSupportedRef(const TensorInfo& input, const TensorInfo& outputStateIn
ignore_unused(projectionBias);
ignore_unused(cellToForgetWeights);
ignore_unused(cellToOutputWeights);
+ ignore_unused(reasonIfUnsupported);
return false;
}
bool IsConvertFp16ToFp32SupportedRef(const TensorInfo& input,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
input.GetDataType(),
@@ -686,7 +684,7 @@ bool IsConvertFp16ToFp32SupportedRef(const TensorInfo& input,
bool IsConvertFp32ToFp16SupportedRef(const TensorInfo& input,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
input.GetDataType(),
@@ -703,7 +701,7 @@ bool IsConvertFp32ToFp16SupportedRef(const TensorInfo& input,
bool IsMeanSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const MeanDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(output);
ignore_unused(descriptor);
@@ -716,7 +714,7 @@ bool IsMeanSupportedRef(const TensorInfo& input,
bool IsPadSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const PadDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(output);
ignore_unused(descriptor);
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 25501fe016..1d0edf6cb3 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -175,12 +175,12 @@ public:
bool IsActivationSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsAdditionSupportedRef(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsBatchNormalizationSupportedRef(const TensorInfo& input,
const TensorInfo& output,
@@ -189,130 +189,143 @@ bool IsBatchNormalizationSupportedRef(const TensorInfo& input,
const TensorInfo& beta,
const TensorInfo& gamma,
const BatchNormalizationDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsConstantSupportedRef(const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsConvolution2dSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
const Optional<TensorInfo>& biases,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsDepthwiseConvolutionSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
const Optional<TensorInfo>& biases,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsDivisionSupportedRef(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsSubtractionSupportedRef(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsFullyConnectedSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const TensorInfo& weights,
const TensorInfo& biases,
const FullyConnectedDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsInputSupportedRef(const TensorInfo& input,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsL2NormalizationSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const L2NormalizationDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
-
-bool IsLstmSupportedRef(const TensorInfo& input, const TensorInfo& outputStateIn,
- const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
- const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
- const TensorInfo& output, const LstmDescriptor& descriptor,
- const TensorInfo& inputToForgetWeights, const TensorInfo& inputToCellWeights,
- const TensorInfo& inputToOutputWeights, const TensorInfo& recurrentToForgetWeights,
- const TensorInfo& recurrentToCellWeights, const TensorInfo& recurrentToOutputWeights,
- const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
- const TensorInfo& outputGateBias, const TensorInfo* inputToInputWeights,
- const TensorInfo* recurrentToInputWeights, const TensorInfo* cellToInputWeights,
- const TensorInfo* inputGateBias, const TensorInfo* projectionWeights,
- const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights,
- const TensorInfo* cellToOutputWeights, std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+bool IsLstmSupportedRef(const TensorInfo& input,
+ const TensorInfo& outputStateIn,
+ const TensorInfo& cellStateIn,
+ const TensorInfo& scratchBuffer,
+ const TensorInfo& outputStateOut,
+ const TensorInfo& cellStateOut,
+ const TensorInfo& output,
+ const LstmDescriptor& descriptor,
+ const TensorInfo& inputToForgetWeights,
+ const TensorInfo& inputToCellWeights,
+ const TensorInfo& inputToOutputWeights,
+ const TensorInfo& recurrentToForgetWeights,
+ const TensorInfo& recurrentToCellWeights,
+ const TensorInfo& recurrentToOutputWeights,
+ const TensorInfo& forgetGateBias,
+ const TensorInfo& cellBias,
+ const TensorInfo& outputGateBias,
+ const TensorInfo* inputToInputWeights,
+ const TensorInfo* recurrentToInputWeights,
+ const TensorInfo* cellToInputWeights,
+ const TensorInfo* inputGateBias,
+ const TensorInfo* projectionWeights,
+ const TensorInfo* projectionBias,
+ const TensorInfo* cellToForgetWeights,
+ const TensorInfo* cellToOutputWeights,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsMergerSupportedRef(const std::vector<const TensorInfo*> inputs,
const OriginsDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsMultiplicationSupportedRef(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsNormalizationSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const NormalizationDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsOutputSupportedRef(const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsPermuteSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const PermuteDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsPooling2dSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const Pooling2dDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsResizeBilinearSupportedRef(const TensorInfo& input,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsSoftmaxSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const SoftmaxDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsSplitterSupportedRef(const TensorInfo& input,
const ViewsDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsFakeQuantizationSupportedRef(const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsReshapeSupportedRef(const TensorInfo& input,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsFloorSupportedRef(const TensorInfo& input,
const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsConvertFp16ToFp32SupportedRef(const TensorInfo& input,
const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsConvertFp32ToFp16SupportedRef(const TensorInfo& input,
const TensorInfo& output,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsMeanSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const MeanDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
bool IsPadSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const PadDescriptor& descriptor,
- std::string* reasonIfUnsupported = nullptr);
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
}