aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorramelg01 <ramy.elgammal@arm.com>2021-09-12 23:07:47 +0100
committerramy.elgammal <ramy.elgammal@arm.com>2021-09-16 13:33:41 +0000
commit3ae3d88c1a305ef4fc0beed8fda3cfc39ddb2ae8 (patch)
tree3bab18d64b773a76766844a7e34819443fd7ee8d
parent2ec6163cdbd274ec9207a7b4ee6e144f93440b4f (diff)
downloadComputeLibrary-3ae3d88c1a305ef4fc0beed8fda3cfc39ddb2ae8.tar.gz
Provide logging for configure functions in all cpu operators
Partially Resolves: COMPMID-4718 Signed-off-by: Ramy Elgammal <ramy.elgammal@arm.com> Change-Id: I02eabdd6bce8cd561ab2fdfd644a686a3762b817 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6253 Reviewed-by: Giorgio Arena <giorgio.arena@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/common/utils/Log.h20
-rw-r--r--src/cpu/operators/CpuActivation.cpp2
-rw-r--r--src/cpu/operators/CpuAdd.cpp2
-rw-r--r--src/cpu/operators/CpuCast.cpp3
-rw-r--r--src/cpu/operators/CpuConcatenate.cpp2
-rw-r--r--src/cpu/operators/CpuConv2d.cpp3
-rw-r--r--src/cpu/operators/CpuConvertFullyConnectedWeights.cpp2
-rw-r--r--src/cpu/operators/CpuCopy.cpp3
-rw-r--r--src/cpu/operators/CpuDepthwiseConv2d.cpp3
-rw-r--r--src/cpu/operators/CpuDepthwiseConv2dAssemblyDispatch.cpp2
-rw-r--r--src/cpu/operators/CpuDequantize.cpp2
-rw-r--r--src/cpu/operators/CpuDirectConv2d.cpp3
-rw-r--r--src/cpu/operators/CpuElementwise.cpp6
-rw-r--r--src/cpu/operators/CpuElementwiseUnary.cpp2
-rw-r--r--src/cpu/operators/CpuFill.cpp3
-rw-r--r--src/cpu/operators/CpuFlatten.cpp3
-rw-r--r--src/cpu/operators/CpuFloor.cpp3
-rw-r--r--src/cpu/operators/CpuFullyConnected.cpp2
-rw-r--r--src/cpu/operators/CpuGemm.cpp2
-rw-r--r--src/cpu/operators/CpuGemmConv2d.cpp2
-rw-r--r--src/cpu/operators/CpuGemmDirectConv2d.cpp3
-rw-r--r--src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp2
-rw-r--r--src/cpu/operators/CpuGemmLowpOutputStage.cpp2
-rw-r--r--src/cpu/operators/CpuMul.cpp5
-rw-r--r--src/cpu/operators/CpuPermute.cpp3
-rw-r--r--src/cpu/operators/CpuPool2d.cpp3
-rw-r--r--src/cpu/operators/CpuQuantize.cpp2
-rw-r--r--src/cpu/operators/CpuReshape.cpp3
-rw-r--r--src/cpu/operators/CpuScale.cpp2
-rw-r--r--src/cpu/operators/CpuSoftmax.cpp2
-rw-r--r--src/cpu/operators/CpuSub.cpp5
-rw-r--r--src/cpu/operators/CpuTranspose.cpp3
-rw-r--r--src/cpu/operators/CpuWinogradConv2d.cpp2
-rw-r--r--utils/TypePrinter.h494
34 files changed, 588 insertions, 13 deletions
diff --git a/src/common/utils/Log.h b/src/common/utils/Log.h
index 89e86bf2fc..a9d0d79876 100644
--- a/src/common/utils/Log.h
+++ b/src/common/utils/Log.h
@@ -134,15 +134,17 @@ logParamsImpl(std::vector<std::string> &data_registry, const std::tuple<Tp...> &
/** Function Template with variable number of inputs to collect all the passed parameters from
* the logging macro ARM_COMPUTE_LOG_PARAMS(...)
*
- * @param[in] ...ins The input parameters in the variadic template
+ * @param[in] ...ins The input parameters in the variadic template, taken by reference, (not by value) to avoid
+ * detecting T as an abstract data type when passing any of these parameters as L-value reference
+ * to an abstract type.
*
- * @return vector of the parameters' data in a string format
+ * @return Vector of the parameters' data in a string format
*/
template <typename... Ts>
-const std::vector<std::string> logParams(Ts... ins)
+const std::vector<std::string> logParams(Ts &&... ins)
{
std::vector<std::string> data_registry{};
- std::tuple<Ts...> in_params_tuple(ins...);
+ std::tuple<Ts...> in_params_tuple{ ins... };
// Start logging the tuple elements, starting from 0 to tuple_size-1
logParamsImpl<0>(data_registry, in_params_tuple);
@@ -210,8 +212,10 @@ inline const std::string constructDataLog(const std::vector<std::string> &params
*
* @param[in] ... Input parameters
*/
-#define ARM_COMPUTE_LOG_PARAMS(...) \
- ARM_COMPUTE_LOG_INFO_WITH_FUNCNAME_ACL(constructDataLog(getParamsNames(#__VA_ARGS__), \
- logParams(__VA_ARGS__)));
-
+#define ARM_COMPUTE_LOG_PARAMS(...) \
+ do \
+ { \
+ ARM_COMPUTE_LOG_INFO_WITH_FUNCNAME_ACL(constructDataLog(getParamsNames(#__VA_ARGS__), \
+ logParams(__VA_ARGS__))); \
+ } while(false)
#endif /* SRC_COMMON_LOG_H */
diff --git a/src/cpu/operators/CpuActivation.cpp b/src/cpu/operators/CpuActivation.cpp
index d9330a8156..3945fa59a5 100644
--- a/src/cpu/operators/CpuActivation.cpp
+++ b/src/cpu/operators/CpuActivation.cpp
@@ -25,6 +25,7 @@
#include "src/common/IOperator.h"
#include "src/common/utils/LegacySupport.h"
+#include "src/common/utils/Log.h"
#include "src/cpu/CpuContext.h"
#include "src/cpu/kernels/CpuActivationKernel.h"
@@ -34,6 +35,7 @@ namespace cpu
{
void CpuActivation::configure(const ITensorInfo *input, ITensorInfo *output, const ActivationLayerInfo &activation_info)
{
+ ARM_COMPUTE_LOG_PARAMS(input, output, activation_info);
auto k = std::make_unique<kernels::CpuActivationKernel>();
k->configure(input, output, activation_info);
_kernel = std::move(k);
diff --git a/src/cpu/operators/CpuAdd.cpp b/src/cpu/operators/CpuAdd.cpp
index 755b1994ae..76ec7d7d8d 100644
--- a/src/cpu/operators/CpuAdd.cpp
+++ b/src/cpu/operators/CpuAdd.cpp
@@ -33,8 +33,8 @@ namespace cpu
{
void CpuAdd::configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst, ConvertPolicy policy, const ActivationLayerInfo &act_info)
{
- ARM_COMPUTE_LOG_PARAMS(src0, src1, policy, act_info);
ARM_COMPUTE_UNUSED(act_info);
+ ARM_COMPUTE_LOG_PARAMS(src0, src1, dst, policy, act_info);
auto k = std::make_unique<kernels::CpuAddKernel>();
k->configure(src0, src1, dst, policy);
_kernel = std::move(k);
diff --git a/src/cpu/operators/CpuCast.cpp b/src/cpu/operators/CpuCast.cpp
index d0980c75b6..1cfd8c1d0e 100644
--- a/src/cpu/operators/CpuCast.cpp
+++ b/src/cpu/operators/CpuCast.cpp
@@ -25,12 +25,15 @@
#include "src/cpu/kernels/CpuCastKernel.h"
+#include "src/common/utils/Log.h"
+
namespace arm_compute
{
namespace cpu
{
void CpuCast::configure(const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy)
{
+ ARM_COMPUTE_LOG_PARAMS(src, dst, policy);
auto k = std::make_unique<kernels::CpuCastKernel>();
k->configure(src, dst, policy);
_kernel = std::move(k);
diff --git a/src/cpu/operators/CpuConcatenate.cpp b/src/cpu/operators/CpuConcatenate.cpp
index 92c1ef6bdf..4021fd8ded 100644
--- a/src/cpu/operators/CpuConcatenate.cpp
+++ b/src/cpu/operators/CpuConcatenate.cpp
@@ -36,6 +36,7 @@
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Validate.h"
+#include "src/common/utils/Log.h"
#include "src/core/helpers/AutoConfiguration.h"
namespace arm_compute
@@ -45,6 +46,7 @@ namespace cpu
void CpuConcatenate::configure(const std::vector<const ITensorInfo *> &srcs_vector, ITensorInfo *dst, size_t axis)
{
ARM_COMPUTE_ERROR_ON(dst == nullptr);
+ ARM_COMPUTE_LOG_PARAMS(srcs_vector, dst, axis);
_axis = axis;
_num_srcs = srcs_vector.size();
diff --git a/src/cpu/operators/CpuConv2d.cpp b/src/cpu/operators/CpuConv2d.cpp
index 3878e0de58..fa8a7a185c 100644
--- a/src/cpu/operators/CpuConv2d.cpp
+++ b/src/cpu/operators/CpuConv2d.cpp
@@ -24,6 +24,7 @@
#include "src/cpu/operators/CpuConv2d.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h"
+#include "src/common/utils/Log.h"
#include "src/cpu/operators/CpuDirectConv2d.h"
#include "src/cpu/operators/CpuGemm.h"
#include "src/cpu/operators/CpuGemmConv2d.h"
@@ -50,6 +51,8 @@ void CpuConv2d::configure(ITensorInfo *input, ITensorInfo *weights, const ITenso
ARM_COMPUTE_ERROR_THROW_ON(CpuConv2d::validate(input, weights, biases, output, conv_info, weights_info, dilation, act_info,
enable_fast_math, num_groups));
+ ARM_COMPUTE_LOG_PARAMS(input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math, num_groups);
+
const Conv2dInfo info(conv_info, dilation, act_info, enable_fast_math, num_groups);
switch(CpuConv2d::get_convolution_method(input, weights, output, conv_info, weights_info, dilation, act_info, enable_fast_math))
{
diff --git a/src/cpu/operators/CpuConvertFullyConnectedWeights.cpp b/src/cpu/operators/CpuConvertFullyConnectedWeights.cpp
index da744fc100..810ffb1e4e 100644
--- a/src/cpu/operators/CpuConvertFullyConnectedWeights.cpp
+++ b/src/cpu/operators/CpuConvertFullyConnectedWeights.cpp
@@ -24,6 +24,7 @@
#include "src/cpu/operators/CpuConvertFullyConnectedWeights.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/common/utils/Log.h"
#include "src/cpu/kernels/CpuConvertFullyConnectedWeightsKernel.h"
namespace arm_compute
@@ -32,6 +33,7 @@ namespace cpu
{
void CpuConvertFullyConnectedWeights::configure(const ITensorInfo *src, ITensorInfo *dst, const TensorShape &original_src_shape, DataLayout data_layout)
{
+ ARM_COMPUTE_LOG_PARAMS(src, dst, original_src_shape, data_layout);
auto k = std::make_unique<kernels::CpuConvertFullyConnectedWeightsKernel>();
k->configure(src, dst, original_src_shape, data_layout);
_kernel = std::move(k);
diff --git a/src/cpu/operators/CpuCopy.cpp b/src/cpu/operators/CpuCopy.cpp
index 2eecc2390e..7420ff6240 100644
--- a/src/cpu/operators/CpuCopy.cpp
+++ b/src/cpu/operators/CpuCopy.cpp
@@ -25,12 +25,15 @@
#include "src/cpu/kernels/CpuCopyKernel.h"
+#include "src/common/utils/Log.h"
+
namespace arm_compute
{
namespace cpu
{
void CpuCopy::configure(const ITensorInfo *src, ITensorInfo *dst)
{
+ ARM_COMPUTE_LOG_PARAMS(src, dst);
auto k = std::make_unique<kernels::CpuCopyKernel>();
k->configure(src, dst);
_kernel = std::move(k);
diff --git a/src/cpu/operators/CpuDepthwiseConv2d.cpp b/src/cpu/operators/CpuDepthwiseConv2d.cpp
index 071690e7a6..c93ffb113d 100644
--- a/src/cpu/operators/CpuDepthwiseConv2d.cpp
+++ b/src/cpu/operators/CpuDepthwiseConv2d.cpp
@@ -28,6 +28,7 @@
#include "arm_compute/core/utils/misc/InfoHelpers.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/common/utils/Log.h"
#include "src/cpu/kernels/CpuDepthwiseConv2dNativeKernel.h"
namespace arm_compute
@@ -422,6 +423,8 @@ void CpuDepthwiseConv2d::CpuDepthwiseConv2dGeneric::prepare(ITensorPack &tensors
void CpuDepthwiseConv2d::configure(ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ConvolutionInfo &info)
{
+ ARM_COMPUTE_LOG_PARAMS(src, weights, biases, dst, info);
+
_depth_conv_func = get_depthwiseconvolution_function(src, weights, (biases != nullptr) ? biases : nullptr, dst, info);
switch(_depth_conv_func)
{
diff --git a/src/cpu/operators/CpuDepthwiseConv2dAssemblyDispatch.cpp b/src/cpu/operators/CpuDepthwiseConv2dAssemblyDispatch.cpp
index a353a66dc2..e75b082ca5 100644
--- a/src/cpu/operators/CpuDepthwiseConv2dAssemblyDispatch.cpp
+++ b/src/cpu/operators/CpuDepthwiseConv2dAssemblyDispatch.cpp
@@ -26,6 +26,7 @@
#include "arm_compute/core/ITensorInfo.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/common/utils/Log.h"
#include "src/core/CPP/Validate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/utils/AssemblyUtils.h"
@@ -57,6 +58,7 @@ void CpuDepthwiseConv2dAssemblyDispatch::configure(const ITensorInfo *src,
ITensorInfo *dst,
const ConvolutionInfo &info)
{
+ ARM_COMPUTE_LOG_PARAMS(src, weights, bias, dst, info);
const CPUInfo &ci = NEScheduler::get().cpu_info();
const unsigned int num_threads = NEScheduler::get().num_threads();
_pImpl->is_prepared = false;
diff --git a/src/cpu/operators/CpuDequantize.cpp b/src/cpu/operators/CpuDequantize.cpp
index 7c03571f40..12dc136ba3 100644
--- a/src/cpu/operators/CpuDequantize.cpp
+++ b/src/cpu/operators/CpuDequantize.cpp
@@ -26,6 +26,7 @@
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/common/utils/Log.h"
#include "src/cpu/kernels/CpuDequantizeKernel.h"
namespace arm_compute
@@ -34,6 +35,7 @@ namespace cpu
{
void CpuDequantize::configure(const ITensorInfo *src, ITensorInfo *dst)
{
+ ARM_COMPUTE_LOG_PARAMS(src, dst);
auto k = std::make_unique<kernels::CpuDequantizeKernel>();
k->configure(src, dst);
_kernel = std::move(k);
diff --git a/src/cpu/operators/CpuDirectConv2d.cpp b/src/cpu/operators/CpuDirectConv2d.cpp
index ec52dbf153..9cdbdb61c1 100644
--- a/src/cpu/operators/CpuDirectConv2d.cpp
+++ b/src/cpu/operators/CpuDirectConv2d.cpp
@@ -27,6 +27,7 @@
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/common/utils/Log.h"
namespace arm_compute
{
@@ -43,6 +44,8 @@ CpuDirectConv2d::CpuDirectConv2d(std::shared_ptr<IMemoryManager> memory_manager)
void CpuDirectConv2d::configure(ITensorInfo *src, ITensorInfo *weights, const ITensorInfo *bias, ITensorInfo *dst, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_ERROR_ON(src->data_layout() == DataLayout::UNKNOWN);
+ ARM_COMPUTE_LOG_PARAMS(src, weights, bias, dst, conv_info, act_info);
+
_output_stage_kernel = std::make_unique<kernels::CpuDirectConv2dOutputStageKernel>();
_conv_kernel = std::make_unique<kernels::CpuDirectConv2dKernel>();
_input_border_handler = std::make_unique<NEFillBorderKernel>();
diff --git a/src/cpu/operators/CpuElementwise.cpp b/src/cpu/operators/CpuElementwise.cpp
index 4f767434f3..b88ae3e514 100644
--- a/src/cpu/operators/CpuElementwise.cpp
+++ b/src/cpu/operators/CpuElementwise.cpp
@@ -22,6 +22,7 @@
* SOFTWARE.
*/
#include "src/cpu/operators/CpuElementwise.h"
+#include "src/common/utils/Log.h"
#include "src/core/helpers/WindowHelpers.h"
#include "src/cpu/kernels/CpuElementwiseKernel.h"
@@ -47,6 +48,7 @@ void CpuElementwiseBase::run(ITensorPack &tensors)
template <ArithmeticOperation op>
void CpuElementwiseArithmetic<op>::configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst)
{
+ ARM_COMPUTE_LOG_PARAMS(src0, src1, dst);
auto k = std::make_unique<kernels::CpuArithmeticKernel>();
k->configure(op, src0, src1, dst);
_kernel = std::move(k);
@@ -65,6 +67,7 @@ template class CpuElementwiseArithmetic<ArithmeticOperation::PRELU>;
void CpuElementwiseDivision::configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst)
{
+ ARM_COMPUTE_LOG_PARAMS(src0, src1, dst);
auto k = std::make_unique<kernels::CpuDivisionKernel>();
k->configure(src0, src1, dst);
_kernel = std::move(k);
@@ -77,6 +80,7 @@ Status CpuElementwiseDivision::validate(const ITensorInfo *src0, const ITensorIn
void CpuElementwisePower::configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst)
{
+ ARM_COMPUTE_LOG_PARAMS(src0, src1, dst);
auto k = std::make_unique<kernels::CpuPowerKernel>();
k->configure(src0, src1, dst);
_kernel = std::move(k);
@@ -90,6 +94,7 @@ Status CpuElementwisePower::validate(const ITensorInfo *src0, const ITensorInfo
template <ComparisonOperation COP>
void CpuElementwiseComparisonStatic<COP>::configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst)
{
+ ARM_COMPUTE_LOG_PARAMS(src0, src1, dst);
auto k = std::make_unique<kernels::CpuComparisonKernel>();
k->configure(COP, src0, src1, dst);
_kernel = std::move(k);
@@ -103,6 +108,7 @@ Status CpuElementwiseComparisonStatic<COP>::validate(const ITensorInfo *src0, co
void CpuElementwiseComparison::configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst, ComparisonOperation op)
{
+ ARM_COMPUTE_LOG_PARAMS(src0, src1, dst);
auto k = std::make_unique<kernels::CpuComparisonKernel>();
k->configure(op, src0, src1, dst);
_kernel = std::move(k);
diff --git a/src/cpu/operators/CpuElementwiseUnary.cpp b/src/cpu/operators/CpuElementwiseUnary.cpp
index 7cf1488c44..7fd14dba7d 100644
--- a/src/cpu/operators/CpuElementwiseUnary.cpp
+++ b/src/cpu/operators/CpuElementwiseUnary.cpp
@@ -22,6 +22,7 @@
* SOFTWARE.
*/
#include "src/cpu/operators/CpuElementwiseUnary.h"
+#include "src/common/utils/Log.h"
#include "src/core/helpers/WindowHelpers.h"
#include "src/cpu/kernels/CpuElementwiseUnaryKernel.h"
@@ -33,6 +34,7 @@ using KernelType = kernels::CpuElementwiseUnaryKernel;
void CpuElementwiseUnary::configure(ElementWiseUnary op, const ITensorInfo &src, ITensorInfo &dst)
{
+ ARM_COMPUTE_LOG_PARAMS(op, src, dst);
auto k = std::make_unique<KernelType>();
k->configure(op, src, dst);
_kernel = std::move(k);
diff --git a/src/cpu/operators/CpuFill.cpp b/src/cpu/operators/CpuFill.cpp
index c0b48f5830..3d8f62fe07 100644
--- a/src/cpu/operators/CpuFill.cpp
+++ b/src/cpu/operators/CpuFill.cpp
@@ -25,12 +25,15 @@
#include "src/cpu/kernels/CpuFillKernel.h"
+#include "src/common/utils/Log.h"
+
namespace arm_compute
{
namespace cpu
{
void CpuFill::configure(const ITensorInfo *tensor, PixelValue constant_value)
{
+ ARM_COMPUTE_LOG_PARAMS(tensor, constant_value);
auto k = std::make_unique<kernels::CpuFillKernel>();
k->configure(tensor, constant_value);
_kernel = std::move(k);
diff --git a/src/cpu/operators/CpuFlatten.cpp b/src/cpu/operators/CpuFlatten.cpp
index 685e5b9238..f6ae139794 100644
--- a/src/cpu/operators/CpuFlatten.cpp
+++ b/src/cpu/operators/CpuFlatten.cpp
@@ -25,12 +25,15 @@
#include "src/cpu/kernels/CpuReshapeKernel.h"
+#include "src/common/utils/Log.h"
+
namespace arm_compute
{
namespace cpu
{
void CpuFlatten::configure(const ITensorInfo *src, ITensorInfo *dst)
{
+ ARM_COMPUTE_LOG_PARAMS(src, dst);
auto k = std::make_unique<kernels::CpuReshapeKernel>();
k->configure(src, dst);
_kernel = std::move(k);
diff --git a/src/cpu/operators/CpuFloor.cpp b/src/cpu/operators/CpuFloor.cpp
index 55f645847f..868add7d29 100644
--- a/src/cpu/operators/CpuFloor.cpp
+++ b/src/cpu/operators/CpuFloor.cpp
@@ -25,12 +25,15 @@
#include "src/cpu/kernels/CpuFloorKernel.h"
+#include "src/common/utils/Log.h"
+
namespace arm_compute
{
namespace cpu
{
void CpuFloor::configure(const ITensorInfo *src, ITensorInfo *dst)
{
+ ARM_COMPUTE_LOG_PARAMS(src, dst);
auto k = std::make_unique<kernels::CpuFloorKernel>();
k->configure(src, dst);
_kernel = std::move(k);
diff --git a/src/cpu/operators/CpuFullyConnected.cpp b/src/cpu/operators/CpuFullyConnected.cpp
index cafb3484b6..4133d9e8ca 100644
--- a/src/cpu/operators/CpuFullyConnected.cpp
+++ b/src/cpu/operators/CpuFullyConnected.cpp
@@ -29,6 +29,7 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/common/utils/Log.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/MemoryHelpers.h"
#include "src/cpu/kernels/CpuTransposeKernel.h"
@@ -231,6 +232,7 @@ void CpuFullyConnected::configure(const ITensorInfo *src, const ITensorInfo *wei
biases != nullptr ? biases : nullptr,
dst,
fc_info));
+ ARM_COMPUTE_LOG_PARAMS(src, weights, biases, dst, fc_info);
_needs_weights_conversion = false;
_needs_weights_reshape = fc_info.transpose_weights ? !fc_info.are_weights_reshaped : false;
diff --git a/src/cpu/operators/CpuGemm.cpp b/src/cpu/operators/CpuGemm.cpp
index f7416315e9..9c7ad92761 100644
--- a/src/cpu/operators/CpuGemm.cpp
+++ b/src/cpu/operators/CpuGemm.cpp
@@ -27,6 +27,7 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/common/utils/Log.h"
#include "src/core/CPP/Validate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/MemoryHelpers.h"
@@ -58,6 +59,7 @@ void CpuGemm::configure(const ITensorInfo *a, const ITensorInfo *b, const ITenso
{
ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, d);
ARM_COMPUTE_ERROR_THROW_ON(CpuGemm::validate(a, b, c, d, alpha, beta, gemm_info));
+ ARM_COMPUTE_LOG_PARAMS(a, b, c, d, alpha, beta, gemm_info);
const cpu::AsmGemmInfo asm_info = init_assembly_metadata(gemm_info);
const bool is_c_bias = gemm_info.reshape_b_only_on_first_run();
diff --git a/src/cpu/operators/CpuGemmConv2d.cpp b/src/cpu/operators/CpuGemmConv2d.cpp
index 5010792a28..d925f8edd9 100644
--- a/src/cpu/operators/CpuGemmConv2d.cpp
+++ b/src/cpu/operators/CpuGemmConv2d.cpp
@@ -31,6 +31,7 @@
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/common/utils/Log.h"
#include "src/core/helpers/MemoryHelpers.h"
#include "src/cpu/kernels/CpuCol2ImKernel.h"
#include "src/cpu/kernels/CpuIm2ColKernel.h"
@@ -226,6 +227,7 @@ void CpuGemmConv2d::configure(const ITensorInfo *src, const ITensorInfo *weights
act_info,
enable_fast_math,
num_groups));
+ ARM_COMPUTE_LOG_PARAMS(src, weights, biases, dst, conv_info, weights_info, dilation, act_info, enable_fast_math, num_groups);
const DataType data_type = src->data_type();
const DataLayout data_layout = src->data_layout();
diff --git a/src/cpu/operators/CpuGemmDirectConv2d.cpp b/src/cpu/operators/CpuGemmDirectConv2d.cpp
index 2e17a21462..75c057e455 100644
--- a/src/cpu/operators/CpuGemmDirectConv2d.cpp
+++ b/src/cpu/operators/CpuGemmDirectConv2d.cpp
@@ -26,6 +26,7 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/FunctionDescriptors.h"
+#include "src/common/utils/Log.h"
#include "src/core/helpers/MemoryHelpers.h"
#include "src/cpu/utils/CpuAuxTensorHandler.h"
@@ -112,6 +113,8 @@ void CpuGemmDirectConv2d::configure(const ITensorInfo *src, const ITensorInfo *w
biases != nullptr ? biases : nullptr,
dst,
info));
+ ARM_COMPUTE_LOG_PARAMS(src, weights, biases, dst, info);
+
_run_activation = info.act_info.enabled() && !_gemm_asm_func->is_activation_supported(info.act_info);
_is_prepared = false;
diff --git a/src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp b/src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp
index 2074a89307..8faa3c217a 100644
--- a/src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp
+++ b/src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp
@@ -35,6 +35,7 @@
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/MemoryHelpers.h"
+#include "src/common/utils/Log.h"
#include "src/cpu/kernels/CpuConvertQuantizedSignednessKernel.h"
#include "src/cpu/kernels/CpuGemmInterleave4x4Kernel.h"
#include "src/cpu/kernels/CpuGemmLowpMatrixMultiplyKernel.h"
@@ -108,6 +109,7 @@ void CpuGemmLowpMatrixMultiplyCore::configure(const ITensorInfo *a, const ITenso
{
ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, dst);
ARM_COMPUTE_ERROR_THROW_ON(CpuGemmLowpMatrixMultiplyCore::validate(a, b, c, dst, gemm_info));
+ ARM_COMPUTE_LOG_PARAMS(a, b, c, dst, gemm_info);
const ITensorInfo *matrix_a = a;
const ITensorInfo *matrix_b = b;
diff --git a/src/cpu/operators/CpuGemmLowpOutputStage.cpp b/src/cpu/operators/CpuGemmLowpOutputStage.cpp
index ebd3f60280..58f98acff0 100644
--- a/src/cpu/operators/CpuGemmLowpOutputStage.cpp
+++ b/src/cpu/operators/CpuGemmLowpOutputStage.cpp
@@ -26,6 +26,7 @@
#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/common/utils/Log.h"
#include "src/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.h"
#include "src/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h"
#include "src/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h"
@@ -39,6 +40,7 @@ void CpuGemmLowpOutputStage::configure(ITensorInfo *src, ITensorInfo *bias, ITen
{
// Perform validate step
ARM_COMPUTE_ERROR_THROW_ON(CpuGemmLowpOutputStage::validate(src, bias, dst, info));
+ ARM_COMPUTE_LOG_PARAMS(src, bias, dst, info);
switch(info.type)
{
diff --git a/src/cpu/operators/CpuMul.cpp b/src/cpu/operators/CpuMul.cpp
index 06a68d64a8..9cb93b7784 100644
--- a/src/cpu/operators/CpuMul.cpp
+++ b/src/cpu/operators/CpuMul.cpp
@@ -26,6 +26,7 @@
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/common/utils/Log.h"
#include "src/cpu/kernels/CpuMulKernel.h"
namespace arm_compute
@@ -43,6 +44,8 @@ void CpuMul::configure(ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, f
const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_UNUSED(act_info);
+ ARM_COMPUTE_LOG_PARAMS(src1, src2, dst, scale, overflow_policy, rounding_policy, act_info);
+
auto k = std::make_unique<kernels::CpuMulKernel>();
k->configure(src1, src2, dst, scale, overflow_policy, rounding_policy);
_kernel = std::move(k);
@@ -63,6 +66,8 @@ Status CpuComplexMul::validate(const ITensorInfo *src1, const ITensorInfo *src2,
void CpuComplexMul::configure(ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_UNUSED(act_info);
+ ARM_COMPUTE_LOG_PARAMS(src1, src2, dst, act_info);
+
auto k = std::make_unique<kernels::CpuComplexMulKernel>();
k->configure(src1, src2, dst);
_kernel = std::move(k);
diff --git a/src/cpu/operators/CpuPermute.cpp b/src/cpu/operators/CpuPermute.cpp
index d730815313..babaf21b6f 100644
--- a/src/cpu/operators/CpuPermute.cpp
+++ b/src/cpu/operators/CpuPermute.cpp
@@ -25,12 +25,15 @@
#include "src/cpu/kernels/CpuPermuteKernel.h"
+#include "src/common/utils/Log.h"
+
namespace arm_compute
{
namespace cpu
{
void CpuPermute::configure(const ITensorInfo *src, ITensorInfo *dst, const PermutationVector &perm)
{
+ ARM_COMPUTE_LOG_PARAMS(src, dst, perm);
auto k = std::make_unique<kernels::CpuPermuteKernel>();
k->configure(src, dst, perm);
_kernel = std::move(k);
diff --git a/src/cpu/operators/CpuPool2d.cpp b/src/cpu/operators/CpuPool2d.cpp
index 6059c75dd2..a4ac871d48 100644
--- a/src/cpu/operators/CpuPool2d.cpp
+++ b/src/cpu/operators/CpuPool2d.cpp
@@ -26,6 +26,7 @@
#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/common/utils/Log.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/cpu/kernels/CpuPool2dKernel.h"
#include "src/cpu/kernels/internal/CpuPool2dAssemblyWrapperKernel.h"
@@ -50,6 +51,8 @@ CpuPool2d::~CpuPool2d() = default;
void CpuPool2d::configure(ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &pool_info, ITensorInfo *indices)
{
+ ARM_COMPUTE_LOG_PARAMS(src, dst, pool_info, indices);
+
// Check if we can run assembly kernels. Currently, indices are not supported by those kernels
const bool run_optimised = bool(kernels::CpuPool2dAssemblyWrapperKernel::validate(src, dst, pool_info)) && (indices == nullptr);
diff --git a/src/cpu/operators/CpuQuantize.cpp b/src/cpu/operators/CpuQuantize.cpp
index 0bfcc21942..f9e14d1f88 100644
--- a/src/cpu/operators/CpuQuantize.cpp
+++ b/src/cpu/operators/CpuQuantize.cpp
@@ -27,6 +27,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/common/utils/Log.h"
#include "src/cpu/kernels/CpuQuantizeKernel.h"
namespace arm_compute
@@ -42,6 +43,7 @@ Status CpuQuantize::validate(const ITensorInfo *src, const ITensorInfo *dst)
void CpuQuantize::configure(const ITensorInfo *src, ITensorInfo *dst)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_LOG_PARAMS(src, dst);
// Configure quantize kernel
auto k = std::make_unique<kernels::CpuQuantizeKernel>();
diff --git a/src/cpu/operators/CpuReshape.cpp b/src/cpu/operators/CpuReshape.cpp
index 5d2b052e34..79e7b8fe6e 100644
--- a/src/cpu/operators/CpuReshape.cpp
+++ b/src/cpu/operators/CpuReshape.cpp
@@ -25,12 +25,15 @@
#include "src/cpu/kernels/CpuReshapeKernel.h"
+#include "src/common/utils/Log.h"
+
namespace arm_compute
{
namespace cpu
{
void CpuReshape::configure(const ITensorInfo *src, ITensorInfo *dst)
{
+ ARM_COMPUTE_LOG_PARAMS(src, dst);
auto k = std::make_unique<kernels::CpuReshapeKernel>();
k->configure(src, dst);
_kernel = std::move(k);
diff --git a/src/cpu/operators/CpuScale.cpp b/src/cpu/operators/CpuScale.cpp
index 9e35bccec5..27da238c16 100644
--- a/src/cpu/operators/CpuScale.cpp
+++ b/src/cpu/operators/CpuScale.cpp
@@ -27,6 +27,7 @@
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/common/utils/Log.h"
#include "src/core/utils/ScaleUtils.h"
#include "src/cpu/kernels/CpuScaleKernel.h"
#include "support/Rounding.h"
@@ -90,6 +91,7 @@ void CpuScale::configure(ITensorInfo *src, ITensorInfo *dst, const ScaleKernelIn
{
ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
ARM_COMPUTE_ERROR_THROW_ON(CpuScale::validate(src, dst, info));
+ ARM_COMPUTE_LOG_PARAMS(src, dst, info);
_scale_info = info;
_is_prepared = false;
diff --git a/src/cpu/operators/CpuSoftmax.cpp b/src/cpu/operators/CpuSoftmax.cpp
index b70ee7e4df..bf4c2fa3a2 100644
--- a/src/cpu/operators/CpuSoftmax.cpp
+++ b/src/cpu/operators/CpuSoftmax.cpp
@@ -28,6 +28,7 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/common/utils/Log.h"
#include "src/core/helpers/MemoryHelpers.h"
#include "src/core/helpers/SoftmaxHelpers.h"
#include "src/cpu/kernels/CpuSoftmaxKernel.h"
@@ -60,6 +61,7 @@ void CpuSoftmaxGeneric<IS_LOG>::configure(const ITensorInfo *src, ITensorInfo *d
// Perform validation step
ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
ARM_COMPUTE_ERROR_THROW_ON(CpuSoftmaxGeneric::validate(src, dst, beta, axis));
+ ARM_COMPUTE_LOG_PARAMS(src, dst, beta, axis);
const unsigned int actual_axis = static_cast<unsigned int>(wrap_around(axis, static_cast<int32_t>(src->num_dimensions())));
diff --git a/src/cpu/operators/CpuSub.cpp b/src/cpu/operators/CpuSub.cpp
index 0485a595c7..f0a7770cea 100644
--- a/src/cpu/operators/CpuSub.cpp
+++ b/src/cpu/operators/CpuSub.cpp
@@ -25,6 +25,8 @@
#include "src/cpu/kernels/CpuSubKernel.h"
+#include "src/common/utils/Log.h"
+
namespace arm_compute
{
namespace cpu
@@ -32,6 +34,7 @@ namespace cpu
void CpuSub::configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst, ConvertPolicy policy, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_UNUSED(act_info);
+ ARM_COMPUTE_LOG_PARAMS(src0, src1, dst, policy);
auto k = std::make_unique<kernels::CpuSubKernel>();
k->configure(src0, src1, dst, policy);
_kernel = std::move(k);
@@ -43,4 +46,4 @@ Status CpuSub::validate(const ITensorInfo *src0, const ITensorInfo *src1, const
return kernels::CpuSubKernel::validate(src0, src1, dst, policy);
}
} // namespace cpu
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/cpu/operators/CpuTranspose.cpp b/src/cpu/operators/CpuTranspose.cpp
index 518227b464..4e7854fd6e 100644
--- a/src/cpu/operators/CpuTranspose.cpp
+++ b/src/cpu/operators/CpuTranspose.cpp
@@ -25,12 +25,15 @@
#include "src/cpu/kernels/CpuTransposeKernel.h"
+#include "src/common/utils/Log.h"
+
namespace arm_compute
{
namespace cpu
{
void CpuTranspose::configure(const ITensorInfo *src, ITensorInfo *dst)
{
+ ARM_COMPUTE_LOG_PARAMS(src, dst);
auto k = std::make_unique<kernels::CpuTransposeKernel>();
k->configure(src, dst);
_kernel = std::move(k);
diff --git a/src/cpu/operators/CpuWinogradConv2d.cpp b/src/cpu/operators/CpuWinogradConv2d.cpp
index 8fca836b8e..dcc18ce8fa 100644
--- a/src/cpu/operators/CpuWinogradConv2d.cpp
+++ b/src/cpu/operators/CpuWinogradConv2d.cpp
@@ -29,6 +29,7 @@
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/FunctionDescriptors.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/common/utils/Log.h"
#include "src/core/CPP/Validate.h"
#include "src/core/NEON/kernels/convolution/common/utils.hpp"
#include "src/core/NEON/kernels/convolution/winograd/winograd.hpp"
@@ -340,6 +341,7 @@ void CpuWinogradConv2d::configure(const ITensorInfo *src, const ITensorInfo *wei
{
ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, weights, biases, dst, conv_info));
+ ARM_COMPUTE_LOG_PARAMS(src, weights, biases, dst, conv_info, act_info, enable_fast_math);
// Get indices for the width and height
_data_layout = src->data_layout();
diff --git a/utils/TypePrinter.h b/utils/TypePrinter.h
index 8cbc8dbd0d..91532bdaac 100644
--- a/utils/TypePrinter.h
+++ b/utils/TypePrinter.h
@@ -34,6 +34,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLTunerTypes.h"
#include "arm_compute/runtime/CL/CLTypes.h"
+#include "arm_compute/runtime/FunctionDescriptors.h"
#include "support/StringSupport.h"
#include <ostream>
@@ -427,6 +428,27 @@ inline std::string to_string(const arm_compute::ActivationLayerInfo &info)
return str.str();
}
+/** Formatted output of the activation function info type.
+ *
+ * @param[in] info Type to output.
+ *
+ * @return Formatted string.
+ */
+inline std::string to_string(const arm_compute::ActivationLayerInfo *info)
+{
+ std::string ret_str = "nullptr";
+ if(info != nullptr)
+ {
+ std::stringstream str;
+ if(info->enabled())
+ {
+ str << info->activation();
+ }
+ ret_str = str.str();
+ }
+ return ret_str;
+}
+
/** Formatted output of the activation function type.
*
* @param[in] function Type to output.
@@ -1033,7 +1055,7 @@ inline ::std::ostream &operator<<(std::ostream &os, const ITensorInfo *info)
return os;
}
-/** Formatted output of the TensorInfo type.
+/** Formatted output of the const TensorInfo& type.
*
* @param[out] os Output stream.
* @param[in] info Type to output.
@@ -1046,7 +1068,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const TensorInfo &info)
return os;
}
-/** Formatted output of the TensorInfo type.
+/** Formatted output of the const TensorInfo& type.
*
* @param[in] info Type to output.
*
@@ -1059,16 +1081,101 @@ inline std::string to_string(const TensorInfo &info)
return str.str();
}
+/** Formatted output of the const ITensorInfo& type.
+ *
+ * @param[in] info Type to output.
+ *
+ * @return Formatted string.
+ */
+inline std::string to_string(const ITensorInfo &info)
+{
+ std::stringstream str;
+ str << &info;
+ return str.str();
+}
+
/** Formatted output of the ITensorInfo* type.
*
* @param[in] info Type to output.
*
* @return Formatted string.
*/
+inline std::string to_string(ITensorInfo *info)
+{
+ std::string ret_str = "nullptr";
+ if(info != nullptr)
+ {
+ std::stringstream str;
+ str << info;
+ ret_str = str.str();
+ }
+ return ret_str;
+}
+
+/** Formatted output of the const ITensorInfo* type.
+ *
+ * @param[in] info Type to output.
+ *
+ * @return Formatted string.
+ */
inline std::string to_string(const ITensorInfo *info)
{
+ std::string ret_str = "nullptr";
+ if(info != nullptr)
+ {
+ std::stringstream str;
+ str << info;
+ ret_str = str.str();
+ }
+ return ret_str;
+}
+
+/** Formatted output of the const ITensor* type.
+ *
+ * @param[in] tensor Type to output.
+ *
+ * @return Formatted string.
+ */
+inline std::string to_string(const ITensor *tensor)
+{
+ std::string ret_str = "nullptr";
+ if(tensor != nullptr)
+ {
+ std::stringstream str;
+ str << tensor->info();
+ ret_str = str.str();
+ }
+ return ret_str;
+}
+
+/** Formatted output of the ITensor* type.
+ *
+ * @param[in] tensor Type to output.
+ *
+ * @return Formatted string.
+ */
+inline std::string to_string(ITensor *tensor)
+{
+ std::string ret_str = "nullptr";
+ if(tensor != nullptr)
+ {
+ std::stringstream str;
+ str << tensor->info();
+ ret_str = str.str();
+ }
+ return ret_str;
+}
+
+/** Formatted output of the ITensor& type.
+ *
+ * @param[in] tensor Type to output.
+ *
+ * @return Formatted string.
+ */
+inline std::string to_string(ITensor &tensor)
+{
std::stringstream str;
- str << info;
+ str << tensor.info();
return str.str();
}
@@ -2206,6 +2313,387 @@ inline ::std::ostream &operator<<(::std::ostream &os, const CLTunerMode &val)
return os;
}
+/** Formatted output of the ConvolutionInfo type.
+ *
+ * @param[out] os Output stream.
+ * @param[in] conv_info ConvolutionInfo to output.
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, const ConvolutionInfo &conv_info)
+{
+ os << "PadStrideInfo = " << conv_info.pad_stride_info << ", "
+ << "depth_multiplier = " << conv_info.depth_multiplier << ", "
+ << "act_info = " << to_string(conv_info.act_info) << ", "
+ << "dilation = " << conv_info.dilation;
+ return os;
+}
+
+/** Converts a @ref ConvolutionInfo to string
+ *
+ * @param[in] info ConvolutionInfo value to be converted
+ *
+ * @return String representing the corresponding ConvolutionInfo
+ */
+inline std::string to_string(const ConvolutionInfo &info)
+{
+ std::stringstream str;
+ str << info;
+ return str.str();
+}
+
+/** Formatted output of the FullyConnectedLayerInfo type.
+ *
+ * @param[out] os Output stream.
+ * @param[in] layer_info FullyConnectedLayerInfo to output.
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, const FullyConnectedLayerInfo &layer_info)
+{
+ os << "activation_info = " << to_string(layer_info.activation_info) << ", "
+ << "weights_trained_layout = " << layer_info.weights_trained_layout << ", "
+ << "transpose_weights = " << layer_info.transpose_weights << ", "
+ << "are_weights_reshaped = " << layer_info.are_weights_reshaped << ", "
+ << "retain_internal_weights = " << layer_info.retain_internal_weights << ", "
+ << "constant_weights = " << layer_info.transpose_weights << ", "
+ << "fp_mixed_precision = " << layer_info.fp_mixed_precision;
+ return os;
+}
+
+/** Converts a @ref FullyConnectedLayerInfo to string
+ *
+ * @param[in] info FullyConnectedLayerInfo value to be converted
+ *
+ * @return String representing the corresponding FullyConnectedLayerInfo
+ */
+inline std::string to_string(const FullyConnectedLayerInfo &info)
+{
+ std::stringstream str;
+ str << info;
+ return str.str();
+}
+
+/** Formatted output of the GEMMLowpOutputStageType type.
+ *
+ * @param[out] os Output stream.
+ * @param[in] gemm_type GEMMLowpOutputStageType to output.
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, const GEMMLowpOutputStageType &gemm_type)
+{
+ switch(gemm_type)
+ {
+ case GEMMLowpOutputStageType::NONE:
+ os << "NONE";
+ break;
+ case GEMMLowpOutputStageType::QUANTIZE_DOWN:
+ os << "QUANTIZE_DOWN";
+ break;
+ case GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT:
+ os << "QUANTIZE_DOWN_FIXEDPOINT";
+ break;
+ case GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT:
+ os << "QUANTIZE_DOWN_FLOAT";
+ break;
+ default:
+ ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
+ }
+ return os;
+}
+
+/** Converts a @ref GEMMLowpOutputStageType to string
+ *
+ * @param[in] gemm_type GEMMLowpOutputStageType value to be converted
+ *
+ * @return String representing the corresponding GEMMLowpOutputStageType
+ */
+inline std::string to_string(const GEMMLowpOutputStageType &gemm_type)
+{
+ std::stringstream str;
+ str << gemm_type;
+ return str.str();
+}
+
+/** Formatted output of the GEMMLowpOutputStageInfo type.
+ *
+ * @param[out] os Output stream.
+ * @param[in] gemm_info GEMMLowpOutputStageInfo to output.
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, const GEMMLowpOutputStageInfo &gemm_info)
+{
+ os << "type = " << gemm_info.type << ", "
+ << "gemlowp_offset = " << gemm_info.gemmlowp_offset << ", "
+ << "gemmlowp_multiplier" << gemm_info.gemmlowp_multiplier << ", "
+ << "gemmlowp_shift = " << gemm_info.gemmlowp_shift << ", "
+ << "gemmlowp_min_bound = " << gemm_info.gemmlowp_min_bound << ", "
+ << "gemmlowp_max_bound = " << gemm_info.gemmlowp_max_bound << ", "
+ << "gemmlowp_multipliers = " << gemm_info.gemmlowp_multiplier << ", "
+ << "gemmlowp_shifts = " << gemm_info.gemmlowp_shift << ", "
+ << "gemmlowp_real_multiplier = " << gemm_info.gemmlowp_real_multiplier << ", "
+ << "is_quantized_per_channel = " << gemm_info.is_quantized_per_channel << ", "
+ << "output_data_type = " << gemm_info.output_data_type;
+ return os;
+}
+
+/** Converts a @ref GEMMLowpOutputStageInfo to string
+ *
+ * @param[in] gemm_info GEMMLowpOutputStageInfo value to be converted
+ *
+ * @return String representing the corresponding GEMMLowpOutputStageInfo
+ */
+inline std::string to_string(const GEMMLowpOutputStageInfo &gemm_info)
+{
+ std::stringstream str;
+ str << gemm_info;
+ return str.str();
+}
+
+/** Formatted output of the Conv2dInfo type.
+ *
+ * @param[out] os Output stream.
+ * @param[in] conv_info Conv2dInfo to output.
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, const Conv2dInfo &conv_info)
+{
+ os << "conv_info = " << conv_info.conv_info << ", "
+ << "dilation = " << conv_info.dilation << ", "
+ << "act_info = " << to_string(conv_info.act_info) << ", "
+ << "enable_fast_math = " << conv_info.enable_fast_math << ", "
+ << "num_groups = " << conv_info.num_groups;
+ return os;
+}
+
+/** Converts a @ref Conv2dInfo to string
+ *
+ * @param[in] conv_info Conv2dInfo value to be converted
+ *
+ * @return String representing the corresponding Conv2dInfo
+ */
+inline std::string to_string(const Conv2dInfo &conv_info)
+{
+ std::stringstream str;
+ str << conv_info;
+ return str.str();
+}
+
+/** Formatted output of the PixelValue type.
+ *
+ * @param[out] os Output stream.
+ * @param[in] pixel_value PixelValue to output.
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, const PixelValue &pixel_value)
+{
+ os << "value.u64= " << pixel_value.get<uint64_t>();
+ return os;
+}
+
+/** Converts a @ref PixelValue to string
+ *
+ * @param[in] pixel_value PixelValue value to be converted
+ *
+ * @return String representing the corresponding PixelValue
+ */
+inline std::string to_string(const PixelValue &pixel_value)
+{
+ std::stringstream str;
+ str << pixel_value;
+ return str.str();
+}
+
+/** Formatted output of the ScaleKernelInfo type.
+ *
+ * @param[out] os Output stream.
+ * @param[in] scale_info ScaleKernelInfo to output.
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, const ScaleKernelInfo &scale_info)
+{
+ os << "interpolation_policy = " << scale_info.interpolation_policy << ", "
+ << "BorderMode = " << scale_info.border_mode << ", "
+ << "PixelValue = " << scale_info.constant_border_value << ", "
+ << "SamplingPolicy = " << scale_info.sampling_policy << ", "
+ << "use_padding = " << scale_info.use_padding << ", "
+ << "align_corners = " << scale_info.align_corners << ", "
+ << "data_layout = " << scale_info.data_layout;
+ return os;
+}
+
+/** Converts a @ref ScaleKernelInfo to string
+ *
+ * @param[in] scale_info ScaleKernelInfo value to be converted
+ *
+ * @return String representing the corresponding ScaleKernelInfo
+ */
+inline std::string to_string(const ScaleKernelInfo &scale_info)
+{
+ std::stringstream str;
+ str << scale_info;
+ return str.str();
+}
+
+/** Formatted output of the FFTDirection type.
+ *
+ * @param[out] os Output stream.
+ * @param[in] fft_dir FFTDirection to output.
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, const FFTDirection &fft_dir)
+{
+ switch(fft_dir)
+ {
+ case FFTDirection::Forward:
+ os << "Forward";
+ break;
+ case FFTDirection::Inverse:
+ os << "Inverse";
+ break;
+ default:
+ ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
+ }
+ return os;
+}
+
+/** Converts a @ref FFT1DInfo to string
+ *
+ * @param[in] fft_dir FFT1DInfo value to be converted
+ *
+ * @return String representing the corresponding FFT1DInfo
+ */
+inline std::string to_string(const FFTDirection &fft_dir)
+{
+ std::stringstream str;
+ str << fft_dir;
+ return str.str();
+}
+
+/** Formatted output of the FFT1DInfo type.
+ *
+ * @param[out] os Output stream.
+ * @param[in] fft1d_info FFT1DInfo to output.
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, const FFT1DInfo &fft1d_info)
+{
+ os << "axis = " << fft1d_info.axis << ", "
+ << "direction = " << fft1d_info.direction;
+ return os;
+}
+
+/** Converts a @ref FFT1DInfo to string
+ *
+ * @param[in] fft1d_info FFT1DInfo value to be converted
+ *
+ * @return String representing the corresponding FFT1DInfo
+ */
+inline std::string to_string(const FFT1DInfo &fft1d_info)
+{
+ std::stringstream str;
+ str << fft1d_info;
+ return str.str();
+}
+
+/** Formatted output of the FFT2DInfo type.
+ *
+ * @param[out] os Output stream.
+ * @param[in] fft2d_info FFT2DInfo to output.
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, const FFT2DInfo &fft2d_info)
+{
+ os << "axis = " << fft2d_info.axis0 << ", "
+ << "axis = " << fft2d_info.axis1 << ", "
+ << "direction = " << fft2d_info.direction;
+ return os;
+}
+
+/** Converts a @ref FFT2DInfo to string
+ *
+ * @param[in] fft2d_info FFT2DInfo value to be converted
+ *
+ * @return String representing the corresponding FFT2DInfo
+ */
+inline std::string to_string(const FFT2DInfo &fft2d_info)
+{
+ std::stringstream str;
+ str << fft2d_info;
+ return str.str();
+}
+
+/** Formatted output of the Coordinates2D type.
+ *
+ * @param[out] os Output stream.
+ * @param[in] coord_2d Coordinates2D to output.
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, const Coordinates2D &coord_2d)
+{
+ os << "x = " << coord_2d.x << ", "
+ << "y = " << coord_2d.y;
+ return os;
+}
+
+/** Converts a @ref Coordinates2D to string
+ *
+ * @param[in] coord_2d Coordinates2D value to be converted
+ *
+ * @return String representing the corresponding Coordinates2D
+ */
+inline std::string to_string(const Coordinates2D &coord_2d)
+{
+ std::stringstream str;
+ str << coord_2d;
+ return str.str();
+}
+
+/** Formatted output of the FuseBatchNormalizationType type.
+ *
+ * @param[out] os Output stream.
+ * @param[in] fuse_type FuseBatchNormalizationType to output.
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, const FuseBatchNormalizationType &fuse_type)
+{
+ switch(fuse_type)
+ {
+ case FuseBatchNormalizationType::CONVOLUTION:
+ os << "CONVOLUTION";
+ break;
+ case FuseBatchNormalizationType::DEPTHWISECONVOLUTION:
+ os << "DEPTHWISECONVOLUTION";
+ break;
+ default:
+ ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
+ }
+ return os;
+}
+
+/** Converts a @ref FuseBatchNormalizationType to string
+ *
+ * @param[in] fuse_type FuseBatchNormalizationType value to be converted
+ *
+ * @return String representing the corresponding FuseBatchNormalizationType
+ */
+inline std::string to_string(const FuseBatchNormalizationType &fuse_type)
+{
+ std::stringstream str;
+ str << fuse_type;
+ return str.str();
+}
+
} // namespace arm_compute
#endif /* __ARM_COMPUTE_TYPE_PRINTER_H__ */