aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/RefLayerSupport.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/reference/RefLayerSupport.cpp')
-rw-r--r--src/backends/reference/RefLayerSupport.cpp168
1 files changed, 83 insertions, 85 deletions
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index e6b1442e4d..2ee942cc2e 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -17,22 +17,12 @@ using namespace boost;
namespace armnn
{
-namespace
-{
-
-std::string* GetReasonIfUnsupportedPtr(const Optional<std::string&>& reasonIfUnsupported)
-{
- return reasonIfUnsupported ? &reasonIfUnsupported.value() : nullptr;
-}
-
-} // anonymous namespace
-
bool RefLayerSupport::IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsActivationSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsActivationSupportedRef(input, output, descriptor, reasonIfUnsupported);
}
bool RefLayerSupport::IsAdditionSupported(const TensorInfo& input0,
@@ -40,10 +30,7 @@ bool RefLayerSupport::IsAdditionSupported(const TensorInfo& input0,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsAdditionSupportedRef(input0,
- input1,
- output,
- GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsAdditionSupportedRef(input0, input1, output, reasonIfUnsupported);
}
bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
@@ -62,27 +49,27 @@ bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
beta,
gamma,
descriptor,
- GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ reasonIfUnsupported);
}
bool RefLayerSupport::IsConstantSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsConstantSupportedRef(output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsConstantSupportedRef(output, reasonIfUnsupported);
}
bool RefLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsConvertFp16ToFp32SupportedRef(input, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsConvertFp16ToFp32SupportedRef(input, output, reasonIfUnsupported);
}
bool RefLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsConvertFp32ToFp16SupportedRef(input, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsConvertFp32ToFp16SupportedRef(input, output, reasonIfUnsupported);
}
bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
@@ -97,7 +84,7 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
descriptor,
weights,
biases,
- GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ reasonIfUnsupported);
}
bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
@@ -112,7 +99,7 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
descriptor,
weights,
biases,
- GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ reasonIfUnsupported);
}
bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0,
@@ -120,21 +107,21 @@ bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsDivisionSupportedRef(input0, input1, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsDivisionSupportedRef(input0, input1, output, reasonIfUnsupported);
}
bool RefLayerSupport::IsFakeQuantizationSupported(const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsFakeQuantizationSupportedRef(input, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsFakeQuantizationSupportedRef(input, descriptor, reasonIfUnsupported);
}
bool RefLayerSupport::IsFloorSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsFloorSupportedRef(input, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsFloorSupportedRef(input, output, reasonIfUnsupported);
}
bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
@@ -149,13 +136,13 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
weights,
biases,
descriptor,
- GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ reasonIfUnsupported);
}
bool RefLayerSupport::IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsInputSupportedRef(input, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsInputSupportedRef(input, reasonIfUnsupported);
}
bool RefLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
@@ -163,10 +150,7 @@ bool RefLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
const L2NormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsL2NormalizationSupportedRef(input,
- output,
- descriptor,
- GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsL2NormalizationSupportedRef(input, output, descriptor, reasonIfUnsupported);
}
bool RefLayerSupport::IsLstmSupported(const TensorInfo& input,
@@ -221,7 +205,7 @@ bool RefLayerSupport::IsLstmSupported(const TensorInfo& input,
projectionBias,
cellToForgetWeights,
cellToOutputWeights,
- GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ reasonIfUnsupported);
}
bool RefLayerSupport::IsMeanSupported(const TensorInfo& input,
@@ -229,14 +213,14 @@ bool RefLayerSupport::IsMeanSupported(const TensorInfo& input,
const MeanDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsMeanSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsMeanSupportedRef(input, output, descriptor,reasonIfUnsupported);
}
bool RefLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
const OriginsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsMergerSupportedRef(inputs, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsMergerSupportedRef(inputs, descriptor, reasonIfUnsupported);
}
bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
@@ -244,7 +228,7 @@ bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsMultiplicationSupportedRef(input0, input1, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsMultiplicationSupportedRef(input0, input1, output, reasonIfUnsupported);
}
bool RefLayerSupport::IsNormalizationSupported(const TensorInfo& input,
@@ -255,13 +239,13 @@ bool RefLayerSupport::IsNormalizationSupported(const TensorInfo& input,
return armnn::IsNormalizationSupportedRef(input,
output,
descriptor,
- GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ reasonIfUnsupported);
}
bool RefLayerSupport::IsOutputSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsOutputSupportedRef(output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsOutputSupportedRef(output, reasonIfUnsupported);
}
bool RefLayerSupport::IsPadSupported(const TensorInfo& input,
@@ -269,7 +253,7 @@ bool RefLayerSupport::IsPadSupported(const TensorInfo& input,
const PadDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsPadSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsPadSupportedRef(input, output, descriptor, reasonIfUnsupported);
}
bool RefLayerSupport::IsPermuteSupported(const TensorInfo& input,
@@ -277,7 +261,7 @@ bool RefLayerSupport::IsPermuteSupported(const TensorInfo& input,
const PermuteDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsPermuteSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsPermuteSupportedRef(input, output, descriptor, reasonIfUnsupported);
}
bool RefLayerSupport::IsPooling2dSupported(const TensorInfo& input,
@@ -285,19 +269,19 @@ bool RefLayerSupport::IsPooling2dSupported(const TensorInfo& input,
const Pooling2dDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsPooling2dSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsPooling2dSupportedRef(input, output, descriptor, reasonIfUnsupported);
}
bool RefLayerSupport::IsReshapeSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsReshapeSupportedRef(input, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsReshapeSupportedRef(input, reasonIfUnsupported);
}
bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsResizeBilinearSupportedRef(input, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsResizeBilinearSupportedRef(input, reasonIfUnsupported);
}
bool RefLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
@@ -305,14 +289,14 @@ bool RefLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
const SoftmaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsSoftmaxSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsSoftmaxSupportedRef(input, output, descriptor, reasonIfUnsupported);
}
bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsSplitterSupportedRef(input, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsSplitterSupportedRef(input, descriptor, reasonIfUnsupported);
}
bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
@@ -320,7 +304,7 @@ bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return armnn::IsSubtractionSupportedRef(input0, input1, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+ return armnn::IsSubtractionSupportedRef(input0, input1, output, reasonIfUnsupported);
}
//
@@ -329,7 +313,7 @@ bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
// TODO: Functions kept for backward compatibility. Remove once transition to plugable backends is complete!
template<typename Float32Func, typename Uint8Func, typename ... Params>
-bool IsSupportedForDataTypeRef(std::string* reasonIfUnsupported,
+bool IsSupportedForDataTypeRef(Optional<std::string&> reasonIfUnsupported,
DataType dataType,
Float32Func floatFuncPtr,
Uint8Func uint8FuncPtr,
@@ -346,7 +330,7 @@ bool IsSupportedForDataTypeRef(std::string* reasonIfUnsupported,
bool IsActivationSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(output);
ignore_unused(descriptor);
@@ -359,7 +343,7 @@ bool IsActivationSupportedRef(const TensorInfo& input,
bool IsAdditionSupportedRef(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(input1);
ignore_unused(output);
@@ -376,7 +360,7 @@ bool IsBatchNormalizationSupportedRef(const TensorInfo& input,
const TensorInfo& beta,
const TensorInfo& gamma,
const BatchNormalizationDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(descriptor);
return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -386,7 +370,7 @@ bool IsBatchNormalizationSupportedRef(const TensorInfo& input,
}
bool IsConstantSupportedRef(const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
return IsSupportedForDataTypeRef(reasonIfUnsupported,
output.GetDataType(),
@@ -399,7 +383,7 @@ bool IsConvolution2dSupportedRef(const TensorInfo& input,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
const Optional<TensorInfo>& biases,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(descriptor);
ignore_unused(output);
@@ -416,7 +400,7 @@ bool IsDepthwiseConvolutionSupportedRef(const TensorInfo& input,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
const Optional<TensorInfo>& biases,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(output);
ignore_unused(descriptor);
@@ -431,7 +415,7 @@ bool IsDepthwiseConvolutionSupportedRef(const TensorInfo& input,
bool IsDivisionSupportedRef(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(input1);
ignore_unused(output);
@@ -444,7 +428,7 @@ bool IsDivisionSupportedRef(const TensorInfo& input0,
bool IsSubtractionSupportedRef(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(input1);
ignore_unused(output);
@@ -459,7 +443,7 @@ bool IsFullyConnectedSupportedRef(const TensorInfo& input,
const TensorInfo& weights,
const TensorInfo& biases,
const FullyConnectedDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(output);
ignore_unused(descriptor);
@@ -472,7 +456,7 @@ bool IsFullyConnectedSupportedRef(const TensorInfo& input,
}
bool IsInputSupportedRef(const TensorInfo& input,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
return IsSupportedForDataTypeRef(reasonIfUnsupported,
input.GetDataType(),
@@ -483,7 +467,7 @@ bool IsInputSupportedRef(const TensorInfo& input,
bool IsL2NormalizationSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const L2NormalizationDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(output);
ignore_unused(descriptor);
@@ -495,7 +479,7 @@ bool IsL2NormalizationSupportedRef(const TensorInfo& input,
bool IsMergerSupportedRef(const std::vector<const TensorInfo*> inputs,
const OriginsDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(descriptor);
return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -507,7 +491,7 @@ bool IsMergerSupportedRef(const std::vector<const TensorInfo*> inputs,
bool IsMultiplicationSupportedRef(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(input1);
ignore_unused(output);
@@ -520,7 +504,7 @@ bool IsMultiplicationSupportedRef(const TensorInfo& input0,
bool IsNormalizationSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const NormalizationDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(descriptor);
return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -530,7 +514,7 @@ bool IsNormalizationSupportedRef(const TensorInfo& input,
}
bool IsOutputSupportedRef(const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
return IsSupportedForDataTypeRef(reasonIfUnsupported,
output.GetDataType(),
@@ -541,7 +525,7 @@ bool IsOutputSupportedRef(const TensorInfo& output,
bool IsPermuteSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const PermuteDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(descriptor);
return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -553,7 +537,7 @@ bool IsPermuteSupportedRef(const TensorInfo& input,
bool IsPooling2dSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const Pooling2dDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(descriptor);
return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -563,7 +547,7 @@ bool IsPooling2dSupportedRef(const TensorInfo& input,
}
bool IsResizeBilinearSupportedRef(const TensorInfo& input,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
return IsSupportedForDataTypeRef(reasonIfUnsupported,
input.GetDataType(),
@@ -574,7 +558,7 @@ bool IsResizeBilinearSupportedRef(const TensorInfo& input,
bool IsSoftmaxSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const SoftmaxDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(output);
ignore_unused(descriptor);
@@ -586,7 +570,7 @@ bool IsSoftmaxSupportedRef(const TensorInfo& input,
bool IsSplitterSupportedRef(const TensorInfo& input,
const ViewsDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(descriptor);
return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -597,7 +581,7 @@ bool IsSplitterSupportedRef(const TensorInfo& input,
bool IsFakeQuantizationSupportedRef(const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(descriptor);
return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -607,7 +591,7 @@ bool IsFakeQuantizationSupportedRef(const TensorInfo& input,
}
bool IsReshapeSupportedRef(const TensorInfo& input,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
return IsSupportedForDataTypeRef(reasonIfUnsupported,
input.GetDataType(),
@@ -617,7 +601,7 @@ bool IsReshapeSupportedRef(const TensorInfo& input,
bool IsFloorSupportedRef(const TensorInfo& input,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(output);
return IsSupportedForDataTypeRef(reasonIfUnsupported,
@@ -626,19 +610,32 @@ bool IsFloorSupportedRef(const TensorInfo& input,
&FalseFuncU8<>);
}
-bool IsLstmSupportedRef(const TensorInfo& input, const TensorInfo& outputStateIn,
- const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
- const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
- const TensorInfo& output, const LstmDescriptor& descriptor,
- const TensorInfo& inputToForgetWeights, const TensorInfo& inputToCellWeights,
- const TensorInfo& inputToOutputWeights, const TensorInfo& recurrentToForgetWeights,
- const TensorInfo& recurrentToCellWeights, const TensorInfo& recurrentToOutputWeights,
- const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
- const TensorInfo& outputGateBias, const TensorInfo* inputToInputWeights,
- const TensorInfo* recurrentToInputWeights, const TensorInfo* cellToInputWeights,
- const TensorInfo* inputGateBias, const TensorInfo* projectionWeights,
- const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights,
- const TensorInfo* cellToOutputWeights, std::string* reasonIfUnsupported)
+bool IsLstmSupportedRef(const TensorInfo& input,
+ const TensorInfo& outputStateIn,
+ const TensorInfo& cellStateIn,
+ const TensorInfo& scratchBuffer,
+ const TensorInfo& outputStateOut,
+ const TensorInfo& cellStateOut,
+ const TensorInfo& output,
+ const LstmDescriptor& descriptor,
+ const TensorInfo& inputToForgetWeights,
+ const TensorInfo& inputToCellWeights,
+ const TensorInfo& inputToOutputWeights,
+ const TensorInfo& recurrentToForgetWeights,
+ const TensorInfo& recurrentToCellWeights,
+ const TensorInfo& recurrentToOutputWeights,
+ const TensorInfo& forgetGateBias,
+ const TensorInfo& cellBias,
+ const TensorInfo& outputGateBias,
+ const TensorInfo* inputToInputWeights,
+ const TensorInfo* recurrentToInputWeights,
+ const TensorInfo* cellToInputWeights,
+ const TensorInfo* inputGateBias,
+ const TensorInfo* projectionWeights,
+ const TensorInfo* projectionBias,
+ const TensorInfo* cellToForgetWeights,
+ const TensorInfo* cellToOutputWeights,
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(input);
ignore_unused(outputStateIn);
@@ -665,12 +662,13 @@ bool IsLstmSupportedRef(const TensorInfo& input, const TensorInfo& outputStateIn
ignore_unused(projectionBias);
ignore_unused(cellToForgetWeights);
ignore_unused(cellToOutputWeights);
+ ignore_unused(reasonIfUnsupported);
return false;
}
bool IsConvertFp16ToFp32SupportedRef(const TensorInfo& input,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
input.GetDataType(),
@@ -686,7 +684,7 @@ bool IsConvertFp16ToFp32SupportedRef(const TensorInfo& input,
bool IsConvertFp32ToFp16SupportedRef(const TensorInfo& input,
const TensorInfo& output,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
input.GetDataType(),
@@ -703,7 +701,7 @@ bool IsConvertFp32ToFp16SupportedRef(const TensorInfo& input,
bool IsMeanSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const MeanDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(output);
ignore_unused(descriptor);
@@ -716,7 +714,7 @@ bool IsMeanSupportedRef(const TensorInfo& input,
bool IsPadSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const PadDescriptor& descriptor,
- std::string* reasonIfUnsupported)
+ Optional<std::string&> reasonIfUnsupported)
{
ignore_unused(output);
ignore_unused(descriptor);