aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2021-07-02 15:17:08 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2021-07-23 18:50:22 +0000
commitd9cdf1402fb7e1231f56c1d5549639b423e4e323 (patch)
tree8fe0438c9006bd0efddf591c92c162b12a1a48b7
parenta8297fb7433e1acfb7bb37703ab93b7ec4c39335 (diff)
downloadComputeLibrary-d9cdf1402fb7e1231f56c1d5549639b423e4e323.tar.gz
Port NEFullyConnectedLayer to memory injecting interface
Resolves: COMPMID-4501 Change-Id: Ib61b3d06974009e501b3fb86467735427e13a94a Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5931 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
-rw-r--r--Android.bp1
-rw-r--r--arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h45
-rw-r--r--filelist.json16
-rw-r--r--src/core/helpers/MemoryHelpers.h20
-rw-r--r--src/runtime/NEON/functions/NEFullyConnectedLayer.cpp442
-rw-r--r--src/runtime/NEON/functions/NEGEMM.cpp14
-rw-r--r--src/runtime/NEON/functions/NEGEMMConv2d.cpp14
-rw-r--r--src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp16
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp14
-rw-r--r--src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp14
-rw-r--r--src/runtime/cpu/operators/CpuFullyConnected.cpp483
-rw-r--r--src/runtime/cpu/operators/CpuFullyConnected.h145
-rw-r--r--src/runtime/cpu/operators/CpuGemm.cpp2
-rw-r--r--src/runtime/cpu/operators/CpuWinogradConv2d.cpp166
-rw-r--r--src/runtime/cpu/operators/CpuWinogradConv2d.h2
-rw-r--r--tests/validation/NEON/FullyConnectedLayer.cpp193
16 files changed, 975 insertions, 612 deletions
diff --git a/Android.bp b/Android.bp
index 91dda75f51..6507f7037a 100644
--- a/Android.bp
+++ b/Android.bp
@@ -641,6 +641,7 @@ cc_library_static {
"src/runtime/cpu/operators/CpuFill.cpp",
"src/runtime/cpu/operators/CpuFlatten.cpp",
"src/runtime/cpu/operators/CpuFloor.cpp",
+ "src/runtime/cpu/operators/CpuFullyConnected.cpp",
"src/runtime/cpu/operators/CpuGemm.cpp",
"src/runtime/cpu/operators/CpuGemmConvolution.cpp",
"src/runtime/cpu/operators/CpuGemmDirectConv2d.cpp",
diff --git a/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h b/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h
index 43f1d4cc05..aa96716d38 100644
--- a/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h
@@ -25,15 +25,14 @@
#define ARM_COMPUTE_NEFULLYCONNECTEDLAYER_H
#include "arm_compute/runtime/IFunction.h"
+#include "arm_compute/runtime/IMemoryManager.h"
+#include "arm_compute/runtime/IWeightsManager.h"
-#include "arm_compute/runtime/MemoryGroup.h"
-#include "arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h"
-#include "arm_compute/runtime/NEON/functions/NEFlattenLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEGEMM.h"
-#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h"
#include "arm_compute/runtime/NEON/functions/NETranspose.h"
#include "arm_compute/runtime/Tensor.h"
+#include <memory>
+
namespace arm_compute
{
namespace weights_transformations
@@ -129,17 +128,7 @@ public:
FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo());
/** Static function to check if given info will lead to a valid configuration of @ref NEFullyConnectedLayer
*
- * @param[in] input Source tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
- * @param[in] weights Weights tensor info. The weights must be 2 dimensional.
- * If this function is called after a Convolution Layer, the (transposed) weights will have as many rows as the product of the first 3 input's dimensions.
- * If it is called after another FullyConnected Layer, the (transposed) weights will have as many rows as the input's first dimension.
- * Data type supported: Same as @p input.
- * @param[in] biases Bias tensor. Can be nullptr. Data type supported: Same as @p weights, S32 if @p weights is QASYMM8/QASYMM8_SIGNED.
- * @param[in] output Destination tensor info. Its shape should be equal to the output of a matrix multiplication between:
- * - The output of im2col on the input and the (transposed) 2D weights, if the function is called after a Convolution Layer
- * - The input tensor and the (transposed) 2D weights, if the function is called after another FullyConnected Layer.
- * Data type supported: Same as @p input.
- * @param[in] fc_info (Optional) Fully connected layer additional info
+ * Similar to @ref NEFullyConnectedLayer
*
* @return a status
*/
@@ -151,28 +140,8 @@ public:
void prepare() override;
private:
- void configure_fc_fc(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const ActivationLayerInfo &act);
- void configure_conv_fc(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const ActivationLayerInfo &act);
- void configure_mm(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const ActivationLayerInfo &act);
-
- MemoryGroup _memory_group;
- IWeightsManager *_weights_manager;
- NEFlattenLayer _flatten;
- NEConvertFullyConnectedWeights _convert_weights;
- weights_transformations::NEConvertFullyConnectedWeightsManaged _convert_weights_managed;
- NETranspose _reshape_weights_function;
- weights_transformations::NEFullyConnectedLayerReshapeWeightsManaged _reshape_weights_managed_function;
- NEGEMM _mm_gemm;
- NEGEMMLowpMatrixMultiplyCore _mm_gemmlowp;
- Tensor _flatten_output;
- Tensor _converted_weights_output;
- Tensor _reshape_weights_output;
- const ITensor *_original_weights;
- bool _are_weights_converted;
- bool _are_weights_reshaped;
- bool _is_fc_after_conv;
- bool _is_quantized_asymmetric;
- bool _is_prepared;
+ struct Impl;
+ std::unique_ptr<Impl> _impl;
};
} // namespace arm_compute
#endif /* ARM_COMPUTE_NEFULLYCONNECTEDLAYER_H */
diff --git a/filelist.json b/filelist.json
index 914abc2ac3..56633e64d1 100644
--- a/filelist.json
+++ b/filelist.json
@@ -1139,6 +1139,22 @@
}
}
},
+ "FullyConnected": {
+ "deps": [
+ "CpuFlatten",
+ "CpuConvertFullyConnectedWeights",
+ "CpuGemm",
+ "CpuGemmLowpMatrixMultiplyCore"
+ ],
+ "files": {
+ "operator": [
+ "src/runtime/cpu/operators/CpuFullyConnected.cpp"
+ ]
+ },
+ "kernel": [
+ "CpuTransposeKernel"
+ ]
+ },
"FuseBatchNormalization": {
"files": {
"kernel": [
diff --git a/src/core/helpers/MemoryHelpers.h b/src/core/helpers/MemoryHelpers.h
index 60a2dbfff7..a41052687b 100644
--- a/src/core/helpers/MemoryHelpers.h
+++ b/src/core/helpers/MemoryHelpers.h
@@ -116,5 +116,25 @@ void release_prepare_tensors(WorkspaceData<TensorType> &workspace, ITensorPack &
}),
workspace.end());
}
+
+/** Utility function to release tensors with lifetime marked as Prepare */
+template <typename TensorType>
+void release_temporaries(const experimental::MemoryRequirements &mem_reqs,
+ WorkspaceData<TensorType> &workspace)
+{
+ for(auto &ws : workspace)
+ {
+ const int slot = ws.slot;
+ for(auto &m : mem_reqs)
+ {
+ if(m.slot == slot && m.lifetime == experimental::MemoryLifetime::Prepare)
+ {
+ auto tensor = ws.tensor.get();
+ tensor->allocator()->free();
+ break;
+ }
+ }
+ }
+}
} // namespace arm_compute
#endif /* SRC_COMMON_MEMORY_HELPERS_H */
diff --git a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
index daa14b1b3a..d815a73b93 100644
--- a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
+++ b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
@@ -23,187 +23,41 @@
*/
#include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h"
-#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensorPack.h"
-#include "arm_compute/core/Size2D.h"
#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
-#include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "src/core/cpu/kernels/CpuTransposeKernel.h"
-
-#include <cmath>
+#include "arm_compute/runtime/MemoryGroup.h"
+#include "arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h"
+#include "src/core/helpers/MemoryHelpers.h"
+#include "src/runtime/cpu/operators/CpuFullyConnected.h"
namespace arm_compute
{
-using namespace arm_compute::misc::shape_calculator;
-
-namespace
-{
-// Get min, max bound of a quantized assymetric output tensor, with the effect of fused activation
-std::pair<PixelValue, PixelValue> get_quantized_asymmetric_output_min_max(const QuantizationInfo &q_info, const ActivationLayerInfo &act_info, DataType data_type)
-{
- PixelValue type_min{};
- PixelValue type_max{};
- std::tie(type_min, type_max) = get_min_max(data_type);
- const UniformQuantizationInfo q_unif = q_info.uniform();
-
- if(act_info.enabled())
- {
- switch(act_info.activation())
- {
- case ActivationLayerInfo::ActivationFunction::RELU:
- type_min = PixelValue(q_unif.offset);
- break;
- case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
- type_min = PixelValue(q_unif.offset);
- type_max = PixelValue(act_info.a(), data_type, q_info);
- break;
- case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
- type_min = PixelValue(act_info.b(), data_type, q_info);
- type_max = PixelValue(act_info.a(), data_type, q_info);
- break;
- default:
- ARM_COMPUTE_ERROR("Activation function not supported.");
- break;
- }
- }
-
- return std::make_pair(type_min, type_max);
-}
+using namespace arm_compute::experimental;
-Status get_gemmlowp_output_stage_info(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const ActivationLayerInfo &act,
- GEMMLowpOutputStageInfo &gemmlowp_output_stage_info)
+struct NEFullyConnectedLayer::Impl
{
- const auto data_type = input->data_type();
- const QuantizationInfo oq_info = output->quantization_info();
- const UniformQuantizationInfo iq_unif = input->quantization_info().uniform();
- const UniformQuantizationInfo wq_unif = weights->quantization_info().uniform();
- const UniformQuantizationInfo oq_unif = oq_info.uniform();
-
- float multiplier = (iq_unif.scale * wq_unif.scale) / oq_unif.scale;
- int32_t output_multiplier;
- int32_t output_shift;
+ MemoryGroup memory_group{};
+ IWeightsManager *weights_manager{ nullptr };
- ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
+ std::unique_ptr<cpu::CpuFullyConnected> op{ nullptr };
- PixelValue type_min{};
- PixelValue type_max{};
- std::tie(type_min, type_max) = get_quantized_asymmetric_output_min_max(oq_info, act, data_type);
+ const ITensor *original_weights{ nullptr };
- gemmlowp_output_stage_info.gemmlowp_multiplier = output_multiplier;
- gemmlowp_output_stage_info.gemmlowp_shift = output_shift;
- gemmlowp_output_stage_info.gemmlowp_offset = oq_unif.offset;
- gemmlowp_output_stage_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
- gemmlowp_output_stage_info.gemmlowp_min_bound = type_min.get<int32_t>();
- gemmlowp_output_stage_info.gemmlowp_max_bound = type_max.get<int32_t>();
+ ITensorPack run_pack{};
+ ITensorPack prep_pack{};
+ WorkspaceData<Tensor> workspace{};
+ experimental::MemoryRequirements aux_mem_req{};
- return Status{};
-}
-
-Status validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const ActivationLayerInfo &act)
-{
- if(is_data_type_quantized_asymmetric(input->data_type()))
- {
- // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
- // Extract and negate input and weights offset
- const QuantizationInfo input_quantization_info(input->quantization_info().uniform().scale, -input->quantization_info().uniform().offset);
- const QuantizationInfo weights_quantization_info(weights->quantization_info().uniform().scale, -weights->quantization_info().uniform().offset);
-
- GEMMLowpOutputStageInfo gemmlowp_output_stage_info;
- ARM_COMPUTE_RETURN_ON_ERROR(get_gemmlowp_output_stage_info(input, weights, output, act, gemmlowp_output_stage_info));
-
- GEMMInfo gemm_info;
- gemm_info.set_gemmlowp_output_stage(gemmlowp_output_stage_info);
-
- // Validate gemmlowp function
- ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixMultiplyCore::validate(&input->clone()->set_quantization_info(input_quantization_info),
- &weights->clone()->set_quantization_info(weights_quantization_info),
- biases,
- output,
- gemm_info));
- }
- else
- {
- ARM_COMPUTE_RETURN_ON_ERROR(NEGEMM::validate(input, weights, biases, output, 1.f, 1.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */)));
- }
-
- return Status{};
-}
-} // namespace
+ bool is_prepared{ false };
+};
NEFullyConnectedLayer::~NEFullyConnectedLayer() = default;
NEFullyConnectedLayer::NEFullyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
- : _memory_group(std::move(memory_manager)), _weights_manager(weights_manager), _flatten(), _convert_weights(), _convert_weights_managed(), _reshape_weights_function(),
- _reshape_weights_managed_function(), _mm_gemm(nullptr, weights_manager), _mm_gemmlowp(nullptr, weights_manager), _flatten_output(), _converted_weights_output(), _reshape_weights_output(),
- _original_weights(nullptr), _are_weights_converted(true), _are_weights_reshaped(false), _is_fc_after_conv(false), _is_quantized_asymmetric(false), _is_prepared(false)
+ : _impl(std::make_unique<Impl>())
{
-}
-
-void NEFullyConnectedLayer::configure_mm(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const ActivationLayerInfo &act)
-{
- if(_is_quantized_asymmetric)
- {
- // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
- // Extract and negate input and weights offset
- const QuantizationInfo input_quantization_info = input->info()->quantization_info();
- const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
-
- input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.uniform().scale, -input_quantization_info.uniform().offset));
- weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
-
- // Configure gemmlowp function and output stage for asymmetric quantized types
- GEMMLowpOutputStageInfo gemmlowp_output_stage_info;
- const Status status = get_gemmlowp_output_stage_info(input->info(), weights->info(), output->info(), act, gemmlowp_output_stage_info);
- ARM_COMPUTE_ERROR_ON(status.error_code() != ErrorCode::OK);
-
- GEMMInfo gemm_info;
- gemm_info.set_gemmlowp_output_stage(gemmlowp_output_stage_info);
- gemm_info.set_activation_info(act);
- _mm_gemmlowp.configure(input, weights, biases, output, gemm_info);
-
- // Revert back QuantizatioInfo as input and weights could be used in other fully connected layers
- input->info()->set_quantization_info(input_quantization_info);
- weights->info()->set_quantization_info(weights_quantization_info);
- }
- else
- {
- // Configure matrix multiply kernel
- GEMMInfo gemm_info(false, false, true /* Reshape weights only for the first run */);
- gemm_info.set_activation_info(act);
- _mm_gemm.configure(input, weights, biases, output, 1.f, 1.0f, gemm_info);
- }
-}
-
-void NEFullyConnectedLayer::configure_conv_fc(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const ActivationLayerInfo &act)
-{
- ARM_COMPUTE_ERROR_ON((weights->info()->dimension(1) != (input->info()->dimension(0) * input->info()->dimension(1) * input->info()->dimension(2))));
-
- // If the fully connected layer is called after a convolution layer, the input tensor must be linearized
-
- // Initialize output tensor for flatten
- TensorShape shape_flatten = compute_flatten_shape(input->info());
- _flatten_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_flatten));
-
- // Configure flatten kernel
- _memory_group.manage(&_flatten_output);
-
- _flatten.configure(input, &_flatten_output);
-
- // Configure matrix multiply kernel
- configure_mm(&_flatten_output, weights, biases, output, act);
-
- // Allocate the output tensor for flatten once all the configure methods have been called
- _flatten_output.allocator()->allocate();
-}
-
-void NEFullyConnectedLayer::configure_fc_fc(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const ActivationLayerInfo &act)
-{
- ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != weights->info()->dimension(1));
-
- // Configure matrix multiply kernel
- configure_mm(input, weights, biases, output, act);
+ _impl->memory_group = MemoryGroup(std::move(memory_manager));
+ _impl->weights_manager = weights_manager;
}
void NEFullyConnectedLayer::configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output,
@@ -217,266 +71,54 @@ void NEFullyConnectedLayer::configure(const ITensor *input, const ITensor *weigh
output->info(),
fc_info));
- _are_weights_converted = true;
- _are_weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
- _is_fc_after_conv = true;
- _is_quantized_asymmetric = is_data_type_quantized_asymmetric(input->info()->data_type());
- _original_weights = weights;
-
- if(_weights_manager)
- {
- _weights_manager->manage(weights);
- }
-
- // With the Fully Connected layer we can have 4 different cases:
- // 1) Convolution layer -> Fully Connected layer without batches
- // 2) Fully Connected layer -> Fully Connected layer without batches
- // 3) Convolution layer -> Fully Connected layer with batches
- // 4) Fully Connected layer -> Fully Connected layer with batches
-
- const ITensor *weights_to_use = weights;
-
- // Check if we have a fully connected layer with batches
- const bool is_batched_fc_layer = output->info()->dimension(1) > 1;
- if(is_batched_fc_layer)
- {
- _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->info()->tensor_shape().cbegin() + 3,
- input->info()->tensor_shape().cend(),
- output->info()->tensor_shape().cbegin() + 1));
- }
- else
- {
- _is_fc_after_conv = input->info()->num_dimensions() > 1;
- }
-
- // Reshape weights if needed
- if(!_are_weights_reshaped)
- {
- if(_weights_manager && _weights_manager->are_weights_managed(weights))
- {
- _reshape_weights_managed_function.configure(weights);
- weights_to_use = _weights_manager->acquire(weights, &_reshape_weights_managed_function);
- }
- else
- {
- // Reshape the weights
- _reshape_weights_function.configure(weights, &_reshape_weights_output);
- weights_to_use = &_reshape_weights_output;
- }
- }
-
- // Convert weights if needed
- if(_is_fc_after_conv && (input->info()->data_layout() != fc_info.weights_trained_layout))
- {
- if(_weights_manager && _weights_manager->are_weights_managed(weights_to_use))
- {
- _convert_weights_managed.configure(weights_to_use,
- input->info()->tensor_shape(),
- fc_info.weights_trained_layout);
- weights_to_use = _weights_manager->acquire(weights, &_convert_weights_managed);
- }
- else
- {
- // Convert weights
- _convert_weights.configure(weights_to_use,
- &_converted_weights_output,
- input->info()->tensor_shape(),
- fc_info.weights_trained_layout);
+ _impl->op = std::make_unique<cpu::CpuFullyConnected>();
+ _impl->original_weights = weights;
+ _impl->is_prepared = false;
- weights_to_use = &_converted_weights_output;
- }
- _are_weights_converted = false;
- }
+ _impl->op->configure(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(), fc_info);
- if(_is_fc_after_conv)
- {
- // Fully Connected layer after a Convolution Layer without batches
- configure_conv_fc(input, weights_to_use, biases, output, fc_info.activation_info);
- }
- else
- {
- // Fully Connected layer after a Fully Connected Layer without batches
- configure_fc_fc(input, weights_to_use, biases, output, fc_info.activation_info);
- }
-
- _are_weights_reshaped = _are_weights_reshaped || fc_info.retain_internal_weights;
+ _impl->aux_mem_req = _impl->op->workspace();
+ _impl->run_pack = { { ACL_SRC_0, input }, { ACL_SRC_1, weights }, { ACL_SRC_2, biases }, { ACL_DST, output } };
+ _impl->prep_pack = { { ACL_SRC_1, weights }, { ACL_SRC_2, biases } };
+ _impl->workspace = manage_workspace<Tensor>(_impl->aux_mem_req, _impl->memory_group, _impl->run_pack, _impl->prep_pack);
}
Status NEFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
FullyConnectedLayerInfo fc_info)
{
- ARM_COMPUTE_UNUSED(fc_info.retain_internal_weights);
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output);
- ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2);
- ARM_COMPUTE_RETURN_ERROR_ON(biases != nullptr && biases->num_dimensions() > 1);
- ARM_COMPUTE_RETURN_ERROR_ON(fc_info.activation_info.enabled() && is_data_type_quantized(input->data_type()) && fc_info.activation_info.activation() != ActivationLayerInfo::ActivationFunction::RELU
- && fc_info.activation_info.activation() != ActivationLayerInfo::ActivationFunction::BOUNDED_RELU && fc_info.activation_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU);
-
- bool weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
- bool is_fc_after_conv = true;
-
- const ITensorInfo &flatten_input = TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(input)));
- const ITensorInfo &reshaped_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights)));
- const ITensorInfo &converted_weights = weights_reshaped ? TensorInfo(weights->clone()->set_is_resizable(true).reset_padding()) : TensorInfo(*reshaped_weights.clone());
-
- // With the Fully Connected layer we can have 4 different cases:
- // 1) Convolution layer -> Fully Connected layer without batches
- // 2) Fully Connected layer -> Fully Connected layer without batches
- // 3) Convolution layer -> Fully Connected layer with batches
- // 4) Fully Connected layer -> Fully Connected layer with batches
-
- const ITensorInfo *input_to_use = input;
- const ITensorInfo *weights_to_use = weights;
-
- // Check if we have a fully connected layer with batches
- const bool is_batched_fc_layer = output->dimension(1) > 1;
-
- if(is_batched_fc_layer)
- {
- is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->tensor_shape().cbegin() + 3,
- input->tensor_shape().cend(),
- output->tensor_shape().cbegin() + 1));
- }
- else
- {
- is_fc_after_conv = input->num_dimensions() > 1;
- }
-
- if(!weights_reshaped)
- {
- // Validate reshape weights kernel
- ARM_COMPUTE_RETURN_ON_ERROR(NETranspose::validate(weights, &reshaped_weights));
- weights_to_use = &reshaped_weights;
- }
-
- if(is_fc_after_conv && (input->data_layout() != fc_info.weights_trained_layout))
- {
- // Validate convert weights kernel
- ARM_COMPUTE_RETURN_ON_ERROR(NEConvertFullyConnectedWeights::validate(weights_to_use,
- &converted_weights,
- input->tensor_shape(),
- fc_info.weights_trained_layout));
- weights_to_use = &converted_weights;
- }
-
- if(is_fc_after_conv)
- {
- // Fully Connected layer after a Convolution Layer without batches
- ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (input->dimension(0) * input->dimension(1) * input->dimension(2))));
-
- // Validate flatten kernel
- ARM_COMPUTE_RETURN_ON_ERROR(NEFlattenLayer::validate(input, &flatten_input));
- input_to_use = &flatten_input;
- }
- else
- {
- // Fully Connected layer after a Fully Connected Layer without batches
- ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != weights_to_use->dimension(1));
- }
- // Validate matrix multiply kernel
- ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(input_to_use, weights_to_use, biases, output, fc_info.activation_info));
-
- return Status{};
+ return cpu::CpuFullyConnected::validate(input, weights, biases, output, fc_info);
}
void NEFullyConnectedLayer::run()
{
prepare();
- MemoryGroupResourceScope scope_mg(_memory_group);
-
- // Linearize input if it comes from a convolutional layer
- if(_is_fc_after_conv)
- {
- _flatten.run();
- }
-
- // Run matrix multiply
- if(_is_quantized_asymmetric)
- {
- _mm_gemmlowp.run();
- }
- else
- {
- _mm_gemm.run();
- }
+ MemoryGroupResourceScope scope_mg(_impl->memory_group);
+ _impl->op->run(_impl->run_pack);
}
void NEFullyConnectedLayer::prepare()
{
- if(!_is_prepared)
+ if(!_impl->is_prepared)
{
- if(!_weights_manager)
- {
- ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
- }
-
- auto release_unused = [](Tensor * w)
- {
- if(!w->is_used())
- {
- w->allocator()->free();
- }
- };
+ _impl->op->prepare(_impl->prep_pack);
- // Pointer to current weights
- const ITensor *cur_weights = _original_weights;
+ auto has_reshape = std::find_if(_impl->aux_mem_req.begin(),
+ _impl->aux_mem_req.end(),
+ [](const MemoryInfo & m) -> bool { return m.lifetime == MemoryLifetime::Persistent; });
- // Reshape of the weights (happens only once)
- if(!_are_weights_reshaped)
+ if(has_reshape != std::end(_impl->aux_mem_req))
{
- if(_weights_manager && _weights_manager->are_weights_managed(_original_weights))
- {
- cur_weights = _weights_manager->run(cur_weights, &_reshape_weights_managed_function);
- }
- else
- {
- // Reshape of the weights (happens only once)
- if(!_are_weights_reshaped)
- {
- // Run reshape weights kernel and mark weights as unused
- _reshape_weights_output.allocator()->allocate();
- _reshape_weights_function.run();
- }
- cur_weights->mark_as_unused();
- cur_weights = &_reshape_weights_output;
- }
- _are_weights_reshaped = true;
+ _impl->original_weights->mark_as_unused();
}
-
- // Convert weights if needed (happens only once)
- if(!_are_weights_converted)
- {
- if(_weights_manager && _weights_manager->are_weights_managed(cur_weights))
- {
- _weights_manager->run(cur_weights, &_convert_weights_managed);
- }
- else
- {
- _converted_weights_output.allocator()->allocate();
- _convert_weights.run();
- cur_weights->mark_as_unused();
- }
-
- _are_weights_converted = true;
- }
-
- // Release reshaped weights if unused
- release_unused(&_reshape_weights_output);
-
- // Prepare GEMM prepare and release unused weights
- if(!_is_quantized_asymmetric)
+ else
{
- _mm_gemm.prepare();
+ _impl->run_pack.add_const_tensor(ACL_SRC_1, _impl->original_weights);
}
- // Release converted weights if unused
- release_unused(&_reshape_weights_output);
- release_unused(&_converted_weights_output);
-
- _is_prepared = true;
+ // Release temporary tensors that are only used in prepare stage
+ release_temporaries<Tensor>(_impl->aux_mem_req, _impl->workspace);
+ _impl->is_prepared = true;
}
}
} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEGEMM.cpp b/src/runtime/NEON/functions/NEGEMM.cpp
index 4bf330fa1e..b470afe1c6 100644
--- a/src/runtime/NEON/functions/NEGEMM.cpp
+++ b/src/runtime/NEON/functions/NEGEMM.cpp
@@ -112,19 +112,7 @@ void NEGEMM::prepare()
}
// Release temporary tensors that are only used in prepare stage
- for(auto &ws : _impl->workspace)
- {
- const int slot = ws.slot;
- for(auto &m : _impl->aux_mem_req)
- {
- if(m.slot == slot && m.lifetime == MemoryLifetime::Prepare)
- {
- auto tensor = ws.tensor.get();
- tensor->allocator()->free();
- break;
- }
- }
- }
+ release_temporaries<Tensor>(_impl->aux_mem_req, _impl->workspace);
_impl->is_prepared = true;
}
}
diff --git a/src/runtime/NEON/functions/NEGEMMConv2d.cpp b/src/runtime/NEON/functions/NEGEMMConv2d.cpp
index 7e2ce70444..2230e80e4b 100644
--- a/src/runtime/NEON/functions/NEGEMMConv2d.cpp
+++ b/src/runtime/NEON/functions/NEGEMMConv2d.cpp
@@ -102,19 +102,7 @@ void NEGEMMConv2d::prepare()
}
// Release temporary tensors that are only used in prepare stage
- for(auto &ws : _impl->workspace)
- {
- const int slot = ws.slot;
- for(auto &m : _impl->aux_mem_req)
- {
- if(m.slot == slot && m.lifetime == MemoryLifetime::Prepare)
- {
- auto tensor = ws.tensor.get();
- tensor->allocator()->free();
- break;
- }
- }
- }
+ release_temporaries<Tensor>(_impl->aux_mem_req, _impl->workspace);
_impl->is_prepared = true;
}
}
diff --git a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
index c405786c80..f63fcb02fd 100644
--- a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
@@ -105,19 +105,9 @@ void NEGEMMConvolutionLayer::prepare()
{
_impl->weights->mark_as_unused();
}
- for(auto &ws : _impl->workspace_tensors)
- {
- const int slot = ws.slot;
- for(auto &m : _impl->aux_mem_req)
- {
- if(m.slot == slot && m.lifetime == MemoryLifetime::Prepare)
- {
- auto tensor = ws.tensor.get();
- tensor->allocator()->free();
- break;
- }
- }
- }
+
+ // Release temporary tensors that are only used in prepare stage
+ release_temporaries<Tensor>(_impl->aux_mem_req, _impl->workspace_tensors);
_impl->is_prepared = true;
}
}
diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
index 64507495ca..b85530c70f 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
@@ -108,19 +108,7 @@ void NEGEMMLowpMatrixMultiplyCore::prepare()
}
// Release temporary tensors that are only used in prepare stage
- for(auto &ws : _impl->workspace_tensors)
- {
- const int slot = ws.slot;
- for(auto &m : _impl->aux_mem_req)
- {
- if(m.slot == slot && m.lifetime == MemoryLifetime::Prepare)
- {
- auto tensor = ws.tensor.get();
- tensor->allocator()->free();
- break;
- }
- }
- }
+ release_temporaries<Tensor>(_impl->aux_mem_req, _impl->workspace_tensors);
_impl->is_prepared = true;
}
}
diff --git a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
index b91048a426..98ff12590b 100644
--- a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
@@ -97,19 +97,7 @@ void NEWinogradConvolutionLayer::prepare()
_impl->original_weights->mark_as_unused();
// Release temporary tensors that are only used in prepare stage
- for(auto &ws : _impl->workspace)
- {
- const int slot = ws.slot;
- for(auto &m : _impl->aux_mem_req)
- {
- if(m.slot == slot && m.lifetime == MemoryLifetime::Prepare)
- {
- auto tensor = ws.tensor.get();
- tensor->allocator()->free();
- break;
- }
- }
- }
+ release_temporaries<Tensor>(_impl->aux_mem_req, _impl->workspace);
_impl->is_prepared = true;
}
diff --git a/src/runtime/cpu/operators/CpuFullyConnected.cpp b/src/runtime/cpu/operators/CpuFullyConnected.cpp
new file mode 100644
index 0000000000..2b6d051482
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuFullyConnected.cpp
@@ -0,0 +1,483 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/cpu/operators/CpuFullyConnected.h"
+
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensorPack.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/core/cpu/kernels/CpuTransposeKernel.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/MemoryHelpers.h"
+#include "src/runtime/cpu/operators/CpuConvertFullyConnectedWeights.h"
+#include "src/runtime/cpu/operators/CpuFlatten.h"
+#include "src/runtime/cpu/operators/CpuGemm.h"
+#include "src/runtime/cpu/operators/CpuGemmLowpMatrixMultiplyCore.h"
+#include "src/runtime/cpu/utils/CpuAuxTensorHandler.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+using namespace arm_compute::experimental;
+using namespace arm_compute::misc::shape_calculator;
+
+namespace
+{
+// Get min, max bound of a quantized asymmetric dst tensor, with the effect of fused activation
+std::pair<PixelValue, PixelValue> get_quantized_asymmetric_output_min_max(const QuantizationInfo &q_info, const ActivationLayerInfo &act_info, DataType data_type)
+{
+ PixelValue type_min{};
+ PixelValue type_max{};
+ std::tie(type_min, type_max) = get_min_max(data_type);
+ const UniformQuantizationInfo q_unif = q_info.uniform();
+
+ if(act_info.enabled())
+ {
+ switch(act_info.activation())
+ {
+ case ActivationLayerInfo::ActivationFunction::RELU:
+ type_min = PixelValue(q_unif.offset);
+ break;
+ case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
+ type_min = PixelValue(q_unif.offset);
+ type_max = PixelValue(act_info.a(), data_type, q_info);
+ break;
+ case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
+ type_min = PixelValue(act_info.b(), data_type, q_info);
+ type_max = PixelValue(act_info.a(), data_type, q_info);
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Activation function not supported.");
+ break;
+ }
+ }
+
+ return std::make_pair(type_min, type_max);
+}
+
+Status get_gemmlowp_output_stage_info(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const ActivationLayerInfo &act,
+ GEMMLowpOutputStageInfo &gemmlowp_output_stage_info)
+{
+ const auto data_type = src->data_type();
+ const QuantizationInfo oq_info = dst->quantization_info();
+ const UniformQuantizationInfo iq_unif = src->quantization_info().uniform();
+ const UniformQuantizationInfo wq_unif = weights->quantization_info().uniform();
+ const UniformQuantizationInfo oq_unif = oq_info.uniform();
+
+ float multiplier = (iq_unif.scale * wq_unif.scale) / oq_unif.scale;
+ int32_t output_multiplier;
+ int32_t output_shift;
+
+ ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
+
+ PixelValue type_min{};
+ PixelValue type_max{};
+ std::tie(type_min, type_max) = get_quantized_asymmetric_output_min_max(oq_info, act, data_type);
+
+ gemmlowp_output_stage_info.gemmlowp_multiplier = output_multiplier;
+ gemmlowp_output_stage_info.gemmlowp_shift = output_shift;
+ gemmlowp_output_stage_info.gemmlowp_offset = oq_unif.offset;
+ gemmlowp_output_stage_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
+ gemmlowp_output_stage_info.gemmlowp_min_bound = type_min.get<int32_t>();
+ gemmlowp_output_stage_info.gemmlowp_max_bound = type_max.get<int32_t>();
+
+ return Status{};
+}
+
+Status validate_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const ActivationLayerInfo &act)
+{
+ if(is_data_type_quantized_asymmetric(src->data_type()))
+ {
+ // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
+ // Extract and negate src and weights offset
+ const QuantizationInfo src_quantization_info(src->quantization_info().uniform().scale, -src->quantization_info().uniform().offset);
+ const QuantizationInfo weights_quantization_info(weights->quantization_info().uniform().scale, -weights->quantization_info().uniform().offset);
+
+ GEMMLowpOutputStageInfo gemmlowp_output_stage_info;
+ ARM_COMPUTE_RETURN_ON_ERROR(get_gemmlowp_output_stage_info(src, weights, dst, act, gemmlowp_output_stage_info));
+
+ GEMMInfo gemm_info;
+ gemm_info.set_gemmlowp_output_stage(gemmlowp_output_stage_info);
+
+ // Validate gemmlowp function
+ TensorInfo src_info = src->clone()->set_quantization_info(src_quantization_info);
+ TensorInfo weights_info = weights->clone()->set_quantization_info(weights_quantization_info);
+ ARM_COMPUTE_RETURN_ON_ERROR(CpuGemmLowpMatrixMultiplyCore::validate(&src_info,
+ &weights_info,
+ biases,
+ dst,
+ gemm_info));
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(CpuGemm::validate(src, weights, biases, dst, 1.f, 1.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */)));
+ }
+
+ return Status{};
+}
+} // namespace
+
+CpuFullyConnected::CpuFullyConnected()
+ : _flatten(nullptr),
+ _convert_weights(nullptr),
+ _transpose_weights(nullptr),
+ _mm_gemm(nullptr),
+ _mm_gemmlowp(nullptr),
+ _flattened_src(),
+ _converted_weights(),
+ _reshaped_weights(),
+ _aux_mem(Count),
+ _are_weights_converted(false),
+ _are_weights_reshaped(false),
+ _is_fc_after_conv(false),
+ _is_quantized_asymmetric(false),
+ _is_prepared(false)
+
+{
+}
+
+CpuFullyConnected::~CpuFullyConnected() = default;
+
+void CpuFullyConnected::configure_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act)
+{
+ if(_is_quantized_asymmetric)
+ {
+ // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
+ // Extract and negate src and weights offset
+ const QuantizationInfo src_quantization_info(src->quantization_info().uniform().scale, -src->quantization_info().uniform().offset);
+ const QuantizationInfo weights_quantization_info(weights->quantization_info().uniform().scale, -weights->quantization_info().uniform().offset);
+
+ TensorInfo src_info = src->clone()->set_quantization_info(src_quantization_info);
+ TensorInfo weights_info = weights->clone()->set_quantization_info(weights_quantization_info);
+
+ // Configure gemmlowp function and output stage for asymmetric quantized types
+ GEMMLowpOutputStageInfo gemmlowp_output_stage_info;
+ const Status status = get_gemmlowp_output_stage_info(&src_info, &weights_info, dst, act, gemmlowp_output_stage_info);
+ ARM_COMPUTE_ERROR_ON(status.error_code() != ErrorCode::OK);
+
+ GEMMInfo gemm_info;
+ gemm_info.set_gemmlowp_output_stage(gemmlowp_output_stage_info);
+ gemm_info.set_activation_info(act);
+ _mm_gemmlowp = std::make_unique<CpuGemmLowpMatrixMultiplyCore>();
+ _mm_gemmlowp->configure(&src_info, &weights_info, biases, dst, gemm_info);
+ }
+ else
+ {
+ // Configure matrix multiply kernel
+ GEMMInfo gemm_info(false, false, true /* Reshape weights only for the first run */);
+ gemm_info.set_activation_info(act);
+ _mm_gemm = std::make_unique<CpuGemm>();
+ _mm_gemm->configure(src, weights, biases, dst, 1.f, 1.0f, gemm_info);
+ }
+}
+
+void CpuFullyConnected::configure_conv_fc(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act)
+{
+ ARM_COMPUTE_ERROR_ON((weights->dimension(1) != (src->dimension(0) * src->dimension(1) * src->dimension(2))));
+
+ // If the fully connected layer is called after a convolution layer, the src tensor must be linearized
+
+ // Initialize output tensor for flatten
+ auto_init_if_empty(_flattened_src, src->clone()->set_tensor_shape(compute_flatten_shape(src)));
+
+ _flatten = std::make_unique<CpuFlatten>();
+ _flatten->configure(src, &_flattened_src);
+
+ // Configure matrix multiply kernel
+ configure_mm(&_flattened_src, weights, biases, dst, act);
+}
+
+void CpuFullyConnected::configure_fc_fc(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act)
+{
+ ARM_COMPUTE_ERROR_ON(src->dimension(0) != weights->dimension(1));
+
+ // Configure matrix multiply kernel
+ configure_mm(src, weights, biases, dst, act);
+}
+
+void CpuFullyConnected::configure(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst,
+ FullyConnectedLayerInfo fc_info)
+{
+ // Perform validate step
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
+ ARM_COMPUTE_ERROR_THROW_ON(CpuFullyConnected::validate(src,
+ weights,
+ biases != nullptr ? biases : nullptr,
+ dst,
+ fc_info));
+
+ _are_weights_converted = true;
+ _are_weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
+ _is_fc_after_conv = true;
+ _is_quantized_asymmetric = is_data_type_quantized_asymmetric(src->data_type());
+ _is_prepared = false;
+
+ // With the Fully Connected layer we can have 4 different cases:
+ // 1) Convolution layer -> Fully Connected layer without batches
+ // 2) Fully Connected layer -> Fully Connected layer without batches
+ // 3) Convolution layer -> Fully Connected layer with batches
+ // 4) Fully Connected layer -> Fully Connected layer with batches
+
+ const ITensorInfo *weights_to_use = weights;
+
+ // Check if we have a fully connected layer with batches
+ const bool is_batched_fc_layer = dst->dimension(1) > 1;
+ if(is_batched_fc_layer)
+ {
+ _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(src->tensor_shape().cbegin() + 3,
+ src->tensor_shape().cend(),
+ dst->tensor_shape().cbegin() + 1));
+ }
+ else
+ {
+ _is_fc_after_conv = src->num_dimensions() > 1;
+ }
+
+ // Reshape weights if needed
+ if(!_are_weights_reshaped)
+ {
+ // Reshape the weights
+ _transpose_weights = std::make_unique<kernels::CpuTransposeKernel>();
+ _transpose_weights->configure(weights, &_reshaped_weights);
+ weights_to_use = &_reshaped_weights;
+ }
+
+ // Convert weights if needed
+ if(_is_fc_after_conv && (src->data_layout() != fc_info.weights_trained_layout))
+ {
+ // Convert weights
+ _convert_weights = std::make_unique<CpuConvertFullyConnectedWeights>();
+ _convert_weights->configure(weights_to_use,
+ &_converted_weights,
+ src->tensor_shape(),
+ fc_info.weights_trained_layout);
+
+ weights_to_use = &_converted_weights;
+ _are_weights_converted = false;
+ }
+
+ if(_is_fc_after_conv)
+ {
+ // Fully Connected layer after a Convolution Layer without batches
+ configure_conv_fc(src, weights_to_use, biases, dst, fc_info.activation_info);
+ }
+ else
+ {
+ // Fully Connected layer after a Fully Connected Layer without batches
+ configure_fc_fc(src, weights_to_use, biases, dst, fc_info.activation_info);
+ }
+
+ _are_weights_reshaped = _are_weights_reshaped || fc_info.retain_internal_weights;
+
+ // Set auxiliary memory requirements
+ auto gemm_mem_req = (_is_quantized_asymmetric) ? _mm_gemmlowp->workspace() : _mm_gemm->workspace();
+ for(unsigned int i = 0; i < gemm_mem_req.size(); ++i)
+ {
+ _aux_mem[i] = gemm_mem_req[i];
+ }
+
+ if(_aux_mem[Pretranspose].size > 0)
+ {
+ // Release permuted weights at the of prepare as they are further transposed by the assembly dispatch
+ _aux_mem[TransposedWeights] = MemoryInfo(offset_int_vec(TransposedWeights), MemoryLifetime::Prepare, _reshaped_weights.total_size());
+ _aux_mem[ConvertedWeights] = MemoryInfo(offset_int_vec(ConvertedWeights), MemoryLifetime::Prepare, _converted_weights.total_size());
+ }
+ else
+ {
+ _aux_mem[TransposedWeights] = MemoryInfo(offset_int_vec(TransposedWeights), MemoryLifetime::Persistent, _reshaped_weights.total_size());
+ _aux_mem[ConvertedWeights] = MemoryInfo(offset_int_vec(ConvertedWeights), MemoryLifetime::Persistent, _converted_weights.total_size());
+ }
+ _aux_mem[FlattenedSrc] = MemoryInfo(offset_int_vec(FlattenedSrc), MemoryLifetime::Temporary, _flattened_src.total_size());
+}
+
+Status CpuFullyConnected::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
+ FullyConnectedLayerInfo fc_info)
+{
+ ARM_COMPUTE_UNUSED(fc_info.retain_internal_weights);
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, weights, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2);
+ ARM_COMPUTE_RETURN_ERROR_ON(biases != nullptr && biases->num_dimensions() > 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(fc_info.activation_info.enabled() && is_data_type_quantized(src->data_type()) && fc_info.activation_info.activation() != ActivationLayerInfo::ActivationFunction::RELU
+ && fc_info.activation_info.activation() != ActivationLayerInfo::ActivationFunction::BOUNDED_RELU && fc_info.activation_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU);
+
+ bool weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
+ bool is_fc_after_conv = true;
+
+ const ITensorInfo &flatten_src = TensorInfo(src->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(src)));
+ const ITensorInfo &reshaped_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights)));
+ const ITensorInfo &converted_weights = weights_reshaped ? TensorInfo(weights->clone()->set_is_resizable(true).reset_padding()) : TensorInfo(*reshaped_weights.clone());
+
+ // With the Fully Connected layer we can have 4 different cases:
+ // 1) Convolution layer -> Fully Connected layer without batches
+ // 2) Fully Connected layer -> Fully Connected layer without batches
+ // 3) Convolution layer -> Fully Connected layer with batches
+ // 4) Fully Connected layer -> Fully Connected layer with batches
+
+ const ITensorInfo *src_to_use = src;
+ const ITensorInfo *weights_to_use = weights;
+
+ // Check if we have a fully connected layer with batches
+ const bool is_batched_fc_layer = dst->dimension(1) > 1;
+
+ if(is_batched_fc_layer)
+ {
+ is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(src->tensor_shape().cbegin() + 3,
+ src->tensor_shape().cend(),
+ dst->tensor_shape().cbegin() + 1));
+ }
+ else
+ {
+ is_fc_after_conv = src->num_dimensions() > 1;
+ }
+
+ if(!weights_reshaped)
+ {
+ // Validate reshape weights kernel
+ ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuTransposeKernel::validate(weights, &reshaped_weights));
+ weights_to_use = &reshaped_weights;
+ }
+
+ if(is_fc_after_conv && (src->data_layout() != fc_info.weights_trained_layout))
+ {
+ // Validate convert weights kernel
+ ARM_COMPUTE_RETURN_ON_ERROR(CpuConvertFullyConnectedWeights::validate(weights_to_use,
+ &converted_weights,
+ src->tensor_shape(),
+ fc_info.weights_trained_layout));
+ weights_to_use = &converted_weights;
+ }
+
+ if(is_fc_after_conv)
+ {
+ // Fully Connected layer after a Convolution Layer without batches
+ ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (src->dimension(0) * src->dimension(1) * src->dimension(2))));
+
+ // Validate flatten kernel
+ ARM_COMPUTE_RETURN_ON_ERROR(CpuFlatten::validate(src, &flatten_src));
+ src_to_use = &flatten_src;
+ }
+ else
+ {
+ // Fully Connected layer after a Fully Connected Layer without batches
+ ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) != weights_to_use->dimension(1));
+ }
+ // Validate matrix multiply kernel
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(src_to_use, weights_to_use, biases, dst, fc_info.activation_info));
+
+ return Status{};
+}
+
+void CpuFullyConnected::run(ITensorPack &tensors)
+{
+ prepare(tensors);
+
+ auto src = tensors.get_const_tensor(ACL_SRC_0);
+
+ CpuAuxTensorHandler flattened_src(offset_int_vec(FlattenedSrc), _flattened_src, tensors, false);
+
+ // Linearize src if it comes from a convolutional layer
+ if(_is_fc_after_conv)
+ {
+ ITensorPack flatten_pack{ { ACL_SRC, src }, { ACL_DST, flattened_src.get() } };
+ _flatten->run(flatten_pack);
+ }
+
+ ITensorPack gemm_pack = tensors;
+ gemm_pack.add_const_tensor(ACL_SRC_0, (_is_fc_after_conv) ? flattened_src.get() : src);
+
+ // Run matrix multiply
+ if(_is_quantized_asymmetric)
+ {
+ _mm_gemmlowp->run(gemm_pack);
+ }
+ else
+ {
+ _mm_gemm->run(gemm_pack);
+ }
+}
+
+void CpuFullyConnected::prepare(ITensorPack &tensors)
+{
+ if(!_is_prepared)
+ {
+ auto weights = tensors.get_const_tensor(ACL_SRC_1);
+
+ CpuAuxTensorHandler reshaped_weights(offset_int_vec(TransposedWeights), _reshaped_weights, tensors, false);
+ CpuAuxTensorHandler converted_weights(offset_int_vec(ConvertedWeights), _converted_weights, tensors, false);
+
+ // Pointer to current weights
+ const ITensor *cur_weights = weights;
+
+ // Reshape of the weights (happens only once)
+ if(!_are_weights_reshaped)
+ {
+ // Run reshape weights kernel and mark weights as unused
+ ITensorPack transpose_pack{ { ACL_SRC, weights }, { ACL_DST, reshaped_weights.get() } };
+ NEScheduler::get().schedule_op(_transpose_weights.get(), Window::DimY, _transpose_weights->window(), transpose_pack);
+
+ cur_weights->mark_as_unused();
+ cur_weights = reshaped_weights.get();
+
+ _are_weights_reshaped = true;
+ }
+
+ // Convert weights if needed (happens only once)
+ if(!_are_weights_converted)
+ {
+ ITensorPack convert_pack{ { ACL_SRC, cur_weights }, { ACL_DST, converted_weights.get() } };
+ _convert_weights->run(convert_pack);
+
+ cur_weights->mark_as_unused();
+ cur_weights = converted_weights.get();
+
+ _are_weights_converted = true;
+ }
+
+ tensors.add_const_tensor(ACL_SRC_1, cur_weights);
+
+ // Prepare GEMM prepare and release unused weights
+ if(!_is_quantized_asymmetric)
+ {
+ _mm_gemm->prepare(tensors);
+ }
+ else
+ {
+ _mm_gemmlowp->prepare(tensors);
+ }
+
+ _is_prepared = true;
+ }
+}
+
+experimental::MemoryRequirements CpuFullyConnected::workspace() const
+{
+ return _aux_mem;
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/runtime/cpu/operators/CpuFullyConnected.h b/src/runtime/cpu/operators/CpuFullyConnected.h
new file mode 100644
index 0000000000..954a7b7ffc
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuFullyConnected.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_FULLY_CONNECTED_H
+#define ARM_COMPUTE_CPU_FULLY_CONNECTED_H
+
+#include "src/runtime/cpu/ICpuOperator.h"
+
+#include "arm_compute/core/TensorInfo.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+namespace cpu
+{
+// Forward declarations
+class CpuConvertFullyConnectedWeights;
+class CpuFlatten;
+class CpuGemm;
+class CpuGemmLowpMatrixMultiplyCore;
+namespace kernels
+{
+class CpuTransposeKernel;
+} // namespace kernels
+/** Basic function to compute a Fully Connected layer. This function calls the following kernels:
+ * -# @ref kernels::CpuIm2ColKernel (called when the input comes from a convolutional layer)
+ * -# @ref kernels::CpuTransposeKernel (if @p are_weights_reshaped is set to false and transpose_weights is set to true ) (called once)
+ * -# @ref CpuGemm or @ref CpuGemmLowpMatrixMultiplyCore (if quantized asymmetric)
+ * -# @ref kernels::CpuGemmMatrixAdditionKernel or @ref CpuGemmLowpOutputStage (if quantized asymmetric) (if @p biases is not equal to nullptr)
+ *
+ * @note The fully connected layer accepts "weights" tensors only with 2 dimensions.
+ */
+class CpuFullyConnected : public ICpuOperator
+{
+public:
+ /** Constructor */
+ CpuFullyConnected();
+ /** Destructor */
+ ~CpuFullyConnected();
+ /** Set the input and output tensors.
+ *
+ * Valid data layouts:
+ * - NHWC
+ * - NCHW
+ *
+ * Valid data type configurations:
+ * |src0 |src1 |src2 |dst |
+ * |:--------------|:------------------|:------|:--------------|
+ * |F16 |F16 |F16 |F16 |
+ * |F32 |F32 |F32 |F32 |
+ * |QASYMM8 |QASYMM8 |S32 |QASYMM8 |
+ * |QASYMM8_SIGNED |QASYMM8_SIGNED |S32 |QASYMM8_SIGNED |
+ *
+ * @param[in] src Source tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+ * @param[in] weights Weights tensor info. The weights must be 2 dimensional.
+ * If this function is called after a Convolution Layer, the (transposed) weights will have as many rows as the product of the first 3 input's dimensions.
+ * If it is called after another FullyConnected Layer, the (transposed) weights will have as many rows as the input's first dimension.
+ * Data type supported: Same as @p src.
+ * @param[in] biases Bias tensor info. Can be nullptr. Data type supported: Same as @p weights, S32 if @p weights is QASYMM8/QASYMM8_SIGNED.
+ * @param[out] dst Destination tensor info. Its shape should be equal to the output of a matrix multiplication between:
+ * - The output of im2col on the input and the (transposed) 2D weights, if the function is called after a Convolution Layer
+ * - The input tensor and the (transposed) 2D weights, if the function is called after another FullyConnected Layer.
+ * Data type supported: Same as @p src.
+ * @param[in] fc_info (Optional) Fully connected layer additional info
+ */
+ void configure(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst,
+ FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo());
+ /** Static function to check if given info will lead to a valid configuration of @ref CpuFullyConnected
+ *
+ * Similar to @ref CpuFullyConnected
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
+ FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo());
+
+ //Inherited methods override
+ void run(ITensorPack &tensors) override;
+ void prepare(ITensorPack &tensors) override;
+ experimental::MemoryRequirements workspace() const override;
+
+private:
+ void configure_fc_fc(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act);
+ void configure_conv_fc(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act);
+ void configure_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act);
+
+ enum AuxTensorIdx
+ {
+ AsmGemmWorkspace = 0,
+ Pretranspose,
+ GemmTemp1, // Both CpuGemm and CpuGemmLowpMatrixMultiplyCore
+ GemmTemp2, // Both CpuGemm and CpuGemmLowpMatrixMultiplyCore
+ GemmTemp3, // Both CpuGemm and CpuGemmLowpMatrixMultiplyCore
+ GemmTemp4, // CpuGemmLowpMatrixMultiplyCore only
+ GemmTemp5, // CpuGemmLowpMatrixMultiplyCore only
+ GemmTemp6, // CpuGemmLowpMatrixMultiplyCore only
+ GemmTemp7, // CpuGemmLowpMatrixMultiplyCore only
+ TransposedWeights,
+ ConvertedWeights,
+ FlattenedSrc,
+ Count
+ };
+
+ std::unique_ptr<CpuFlatten> _flatten;
+ std::unique_ptr<CpuConvertFullyConnectedWeights> _convert_weights;
+ std::unique_ptr<kernels::CpuTransposeKernel> _transpose_weights;
+ std::unique_ptr<CpuGemm> _mm_gemm;
+ std::unique_ptr<CpuGemmLowpMatrixMultiplyCore> _mm_gemmlowp;
+
+ TensorInfo _flattened_src;
+ TensorInfo _converted_weights;
+ TensorInfo _reshaped_weights;
+
+ experimental::MemoryRequirements _aux_mem;
+
+ bool _are_weights_converted;
+ bool _are_weights_reshaped;
+ bool _is_fc_after_conv;
+ bool _is_quantized_asymmetric;
+ bool _is_prepared;
+};
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_FULLY_CONNECTED_H */
diff --git a/src/runtime/cpu/operators/CpuGemm.cpp b/src/runtime/cpu/operators/CpuGemm.cpp
index c6abe1f893..bd3f231001 100644
--- a/src/runtime/cpu/operators/CpuGemm.cpp
+++ b/src/runtime/cpu/operators/CpuGemm.cpp
@@ -128,7 +128,7 @@ void CpuGemm::configure(const ITensorInfo *a, const ITensorInfo *b, const ITenso
{
_add_bias = std::make_unique<cpu::CpuAdd>();
_add_bias->configure(gemm_output_to_use, c, d, ConvertPolicy::SATURATE);
- _aux_mem[TempResult] = MemoryInfo(offset_int_vec(TempResult), MemoryLifetime::Persistent, _tmp_d.total_size());
+ _aux_mem[TempResult] = MemoryInfo(offset_int_vec(TempResult), MemoryLifetime::Temporary, _tmp_d.total_size());
}
}
diff --git a/src/runtime/cpu/operators/CpuWinogradConv2d.cpp b/src/runtime/cpu/operators/CpuWinogradConv2d.cpp
index bf105d5880..a734e1797c 100644
--- a/src/runtime/cpu/operators/CpuWinogradConv2d.cpp
+++ b/src/runtime/cpu/operators/CpuWinogradConv2d.cpp
@@ -71,164 +71,164 @@ arm_gemm::Activation arm_gemm_activation_from_acl_activation(const ActivationLay
}
}
-inline Status validate_kernel_3x3(const Size2D input_dims, const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
- const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
+inline Status validate_kernel_3x3(const Size2D input_dims, const ITensorInfo *src, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
+ const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32);
- if(input->data_type() == DataType::F32)
+ if(src->data_type() == DataType::F32)
{
if(input_dims.width > 4 && input_dims.height > 4)
{
- ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformInputKernel<float, 4, 4, 3, 3>::validate(input, input0, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformInputKernel<float, 4, 4, 3, 3>::validate(src, input0, winograd_info)));
ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformWeightsKernel<float, 4, 4, 3, 3>::validate(weights, input1, winograd_info)));
- ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformOutputKernel<float, 4, 4, 3, 3>::validate(batched_mm_output, biases, output, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformOutputKernel<float, 4, 4, 3, 3>::validate(batched_mm_output, biases, dst, winograd_info)));
}
else
{
- ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformInputKernel<float, 2, 2, 3, 3>::validate(input, input0, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformInputKernel<float, 2, 2, 3, 3>::validate(src, input0, winograd_info)));
ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformWeightsKernel<float, 2, 2, 3, 3>::validate(weights, input1, winograd_info)));
- ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformOutputKernel<float, 2, 2, 3, 3>::validate(batched_mm_output, biases, output, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformOutputKernel<float, 2, 2, 3, 3>::validate(batched_mm_output, biases, dst, winograd_info)));
}
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
- else if(input->data_type() == DataType::F16)
+ else if(src->data_type() == DataType::F16)
{
- ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformInputKernel<__fp16, 4, 4, 3, 3>::validate(input, input0, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformInputKernel<__fp16, 4, 4, 3, 3>::validate(src, input0, winograd_info)));
ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformWeightsKernel<__fp16, 4, 4, 3, 3>::validate(weights, input1, winograd_info)));
- ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformOutputKernel<__fp16, 4, 4, 3, 3>::validate(batched_mm_output, biases, output, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformOutputKernel<__fp16, 4, 4, 3, 3>::validate(batched_mm_output, biases, dst, winograd_info)));
}
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
if(act_info.enabled())
{
- CpuActivation::validate(output, nullptr, act_info);
+ CpuActivation::validate(dst, nullptr, act_info);
}
return Status{};
}
-inline Status validate_kernel_5x5(const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
- const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
+inline Status validate_kernel_5x5(const ITensorInfo *src, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
+ const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
{
- ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformInputKernel<float, 2, 2, 5, 5>::validate(input, input0, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformInputKernel<float, 2, 2, 5, 5>::validate(src, input0, winograd_info)));
ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformWeightsKernel<float, 2, 2, 5, 5>::validate(weights, input1, winograd_info)));
- ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformOutputKernel<float, 2, 2, 5, 5>::validate(batched_mm_output, biases, output, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformOutputKernel<float, 2, 2, 5, 5>::validate(batched_mm_output, biases, dst, winograd_info)));
if(act_info.enabled())
{
- CpuActivation::validate(output, nullptr, act_info);
+ CpuActivation::validate(dst, nullptr, act_info);
}
return Status{};
}
-inline Status validate_kernel_3x1(const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
- const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
+inline Status validate_kernel_3x1(const ITensorInfo *src, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
+ const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
- ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformInputKernel<float, 1, 6, 1, 3>::validate(input, input0, winograd_info)));
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F32);
+ ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformInputKernel<float, 1, 6, 1, 3>::validate(src, input0, winograd_info)));
ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformWeightsKernel<float, 1, 6, 1, 3>::validate(weights, input1, winograd_info)));
- ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformOutputKernel<float, 1, 6, 1, 3>::validate(batched_mm_output, biases, output, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformOutputKernel<float, 1, 6, 1, 3>::validate(batched_mm_output, biases, dst, winograd_info)));
if(act_info.enabled())
{
- CpuActivation::validate(output, nullptr, act_info);
+ CpuActivation::validate(dst, nullptr, act_info);
}
return Status{};
}
-inline Status validate_kernel_1x3(const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
- const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
+inline Status validate_kernel_1x3(const ITensorInfo *src, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
+ const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
- ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformInputKernel<float, 6, 1, 3, 1>::validate(input, input0, winograd_info)));
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F32);
+ ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformInputKernel<float, 6, 1, 3, 1>::validate(src, input0, winograd_info)));
ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformWeightsKernel<float, 6, 1, 3, 1>::validate(weights, input1, winograd_info)));
- ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformOutputKernel<float, 6, 1, 3, 1>::validate(batched_mm_output, biases, output, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformOutputKernel<float, 6, 1, 3, 1>::validate(batched_mm_output, biases, dst, winograd_info)));
if(act_info.enabled())
{
- CpuActivation::validate(output, nullptr, act_info);
+ CpuActivation::validate(dst, nullptr, act_info);
}
return Status{};
}
-inline Status validate_kernel_5x1(const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
- const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
+inline Status validate_kernel_5x1(const ITensorInfo *src, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
+ const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
- ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformInputKernel<float, 1, 4, 1, 5>::validate(input, input0, winograd_info)));
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F32);
+ ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformInputKernel<float, 1, 4, 1, 5>::validate(src, input0, winograd_info)));
ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformWeightsKernel<float, 1, 4, 1, 5>::validate(weights, input1, winograd_info)));
- ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformOutputKernel<float, 1, 4, 1, 5>::validate(batched_mm_output, biases, output, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformOutputKernel<float, 1, 4, 1, 5>::validate(batched_mm_output, biases, dst, winograd_info)));
if(act_info.enabled())
{
- CpuActivation::validate(output, nullptr, act_info);
+ CpuActivation::validate(dst, nullptr, act_info);
}
return Status{};
}
-inline Status validate_kernel_1x5(const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
- const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
+inline Status validate_kernel_1x5(const ITensorInfo *src, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
+ const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
- ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformInputKernel<float, 4, 1, 5, 1>::validate(input, input0, winograd_info)));
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F32);
+ ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformInputKernel<float, 4, 1, 5, 1>::validate(src, input0, winograd_info)));
ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformWeightsKernel<float, 4, 1, 5, 1>::validate(weights, input1, winograd_info)));
- ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformOutputKernel<float, 4, 1, 5, 1>::validate(batched_mm_output, biases, output, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformOutputKernel<float, 4, 1, 5, 1>::validate(batched_mm_output, biases, dst, winograd_info)));
if(act_info.enabled())
{
- CpuActivation::validate(output, nullptr, act_info);
+ CpuActivation::validate(dst, nullptr, act_info);
}
return Status{};
}
-inline Status validate_kernel_7x1(const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
- const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
+inline Status validate_kernel_7x1(const ITensorInfo *src, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
+ const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
- ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformInputKernel<float, 1, 2, 1, 7>::validate(input, input0, winograd_info)));
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F32);
+ ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformInputKernel<float, 1, 2, 1, 7>::validate(src, input0, winograd_info)));
ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformWeightsKernel<float, 1, 2, 1, 7>::validate(weights, input1, winograd_info)));
- ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformOutputKernel<float, 1, 2, 1, 7>::validate(batched_mm_output, biases, output, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformOutputKernel<float, 1, 2, 1, 7>::validate(batched_mm_output, biases, dst, winograd_info)));
if(act_info.enabled())
{
- CpuActivation::validate(output, nullptr, act_info);
+ CpuActivation::validate(dst, nullptr, act_info);
}
return Status{};
}
-inline Status validate_kernel_1x7(const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
- const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
+inline Status validate_kernel_1x7(const ITensorInfo *src, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
+ const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
- ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformInputKernel<float, 2, 1, 7, 1>::validate(input, input0, winograd_info)));
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F32);
+ ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformInputKernel<float, 2, 1, 7, 1>::validate(src, input0, winograd_info)));
ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformWeightsKernel<float, 2, 1, 7, 1>::validate(weights, input1, winograd_info)));
- ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformOutputKernel<float, 2, 1, 7, 1>::validate(batched_mm_output, biases, output, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((CpuWinogradConv2dTransformOutputKernel<float, 2, 1, 7, 1>::validate(batched_mm_output, biases, dst, winograd_info)));
if(act_info.enabled())
{
- CpuActivation::validate(output, nullptr, act_info);
+ CpuActivation::validate(dst, nullptr, act_info);
}
return Status{};
}
-inline Tensor4DShape internal_get_input_shape(const ITensorInfo *input)
+inline Tensor4DShape internal_get_input_shape(const ITensorInfo *src)
{
- const DataLayout data_layout = input->data_layout();
- const int in_width = input->dimension(get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH));
- const int in_height = input->dimension(get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT));
- const int in_channels = input->dimension(get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL));
- const int in_batches = input->dimension(3);
+ const DataLayout data_layout = src->data_layout();
+ const int in_width = src->dimension(get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH));
+ const int in_height = src->dimension(get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT));
+ const int in_channels = src->dimension(get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL));
+ const int in_batches = src->dimension(3);
return Tensor4DShape{ in_batches, in_height, in_width, in_channels };
}
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info)
+Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const PadStrideInfo &conv_info)
{
- ARM_COMPUTE_UNUSED(output);
- ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
+ ARM_COMPUTE_UNUSED(dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.stride().first != 1 || conv_info.stride().second != 1, "Winograd layer only supports unit strides.");
if(biases != nullptr)
{
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, biases);
ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
}
- return ICpuWinogradConv2dTransformWeightsKernel::validate(input, weights);
+ return ICpuWinogradConv2dTransformWeightsKernel::validate(src, weights);
}
Size2D winograd_output_tile(const Size2D &input_dims, const Size2D &kernel_dims, DataType data_type)
{
@@ -647,20 +647,20 @@ void CpuWinogradConv2d::configure(const ITensorInfo *src, const ITensorInfo *wei
_aux_mem[OutputWorkspace] = MemoryInfo(offset_int_vec(OutputWorkspace), MemoryLifetime::Persistent, output_workspace_size);
}
-Status CpuWinogradConv2d::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
+Status CpuWinogradConv2d::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info, bool enable_fast_math)
{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, conv_info));
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, weights, biases, dst, conv_info));
// Get indices for the width and height
- const size_t idx_width = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
- const size_t idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
+ const size_t idx_width = get_data_layout_dimension_index(src->data_layout(), DataLayoutDimension::WIDTH);
+ const size_t idx_height = get_data_layout_dimension_index(src->data_layout(), DataLayoutDimension::HEIGHT);
// Input shape, kernel size and output tile
- const Size2D input_dims = Size2D(input->dimension(idx_width), input->dimension(idx_height));
+ const Size2D input_dims = Size2D(src->dimension(idx_width), src->dimension(idx_height));
const Size2D kernel_size = Size2D(weights->dimension(idx_width), weights->dimension(idx_height));
- const DataType data_type = input->data_type();
+ const DataType data_type = src->data_type();
const Size2D output_tile = winograd_output_tile(input_dims, kernel_size, data_type);
// Check if the Winograd configuration requires fast math
@@ -674,11 +674,11 @@ Status CpuWinogradConv2d::validate(const ITensorInfo *input, const ITensorInfo *
kernel_size,
input_dims,
conv_info,
- input->data_layout());
+ src->data_layout());
// Validate input transform
- const TensorShape input0_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*input, winograd_info);
- const TensorInfo input0 = input->clone()->set_tensor_shape(input0_shape);
+ const TensorShape input0_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*src, winograd_info);
+ const TensorInfo input0 = src->clone()->set_tensor_shape(input0_shape);
// Validate filter transform
const TensorShape input1_shape = misc::shape_calculator::compute_winograd_filter_transform_shape(*weights, winograd_info);
const TensorInfo input1 = weights->clone()->set_tensor_shape(input1_shape);
@@ -696,7 +696,7 @@ Status CpuWinogradConv2d::validate(const ITensorInfo *input, const ITensorInfo *
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_right() != conv_info.pad_left(), "Only SAME or VALID padding supported");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_top() != conv_info.pad_bottom(), "Only SAME or VALID padding supported");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_top() != conv_info.pad_left(), "Only SAME or VALID padding supported");
- return validate_kernel_3x3(input_dims, input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info);
+ return validate_kernel_3x3(input_dims, src, &input0, &input1, &batched_mm_output, weights, biases, dst, winograd_info, act_info);
}
else if(kernel_size == Size2D(5, 5))
{
@@ -707,49 +707,49 @@ Status CpuWinogradConv2d::validate(const ITensorInfo *input, const ITensorInfo *
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_right() != conv_info.pad_left(), "Only SAME or VALID padding supported");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_top() != conv_info.pad_bottom(), "Only SAME or VALID padding supported");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_top() != conv_info.pad_left(), "Only SAME or VALID padding supported");
- return validate_kernel_5x5(input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info);
+ return validate_kernel_5x5(src, &input0, &input1, &batched_mm_output, weights, biases, dst, winograd_info, act_info);
}
if(kernel_size == Size2D(3, 1))
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_left() != 0u && conv_info.pad_left() != 1, "Only SAME or VALID padding supported");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_right() != 0u && conv_info.pad_right() != 1, "Only SAME or VALID padding supported");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_top() != 0u && conv_info.pad_bottom() != 0, "Only SAME or VALID padding supported");
- return validate_kernel_3x1(input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info);
+ return validate_kernel_3x1(src, &input0, &input1, &batched_mm_output, weights, biases, dst, winograd_info, act_info);
}
else if(kernel_size == Size2D(1, 3))
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_top() != 0u && conv_info.pad_top() != 1, "Only SAME or VALID padding supported");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_bottom() != 0u && conv_info.pad_bottom() != 1, "Only SAME or VALID padding supported");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_left() != 0u && conv_info.pad_right() != 0, "Only SAME or VALID padding supported");
- return validate_kernel_1x3(input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info);
+ return validate_kernel_1x3(src, &input0, &input1, &batched_mm_output, weights, biases, dst, winograd_info, act_info);
}
else if(kernel_size == Size2D(5, 1))
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_left() != 0u && conv_info.pad_left() != 2, "Only SAME or VALID padding supported");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_right() != 0u && conv_info.pad_right() != 2, "Only SAME or VALID padding supported");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_top() != 0u && conv_info.pad_bottom() != 0, "Only SAME or VALID padding supported");
- return validate_kernel_5x1(input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info);
+ return validate_kernel_5x1(src, &input0, &input1, &batched_mm_output, weights, biases, dst, winograd_info, act_info);
}
else if(kernel_size == Size2D(1, 5))
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_top() != 0u && conv_info.pad_top() != 2, "Only SAME or VALID padding supported");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_bottom() != 0u && conv_info.pad_bottom() != 2, "Only SAME or VALID padding supported");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_left() != 0u && conv_info.pad_right() != 0, "Only SAME or VALID padding supported");
- return validate_kernel_1x5(input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info);
+ return validate_kernel_1x5(src, &input0, &input1, &batched_mm_output, weights, biases, dst, winograd_info, act_info);
}
else if(kernel_size == Size2D(7, 1))
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_left() != 0u && conv_info.pad_left() != 3, "Only SAME or VALID padding supported");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_right() != 0u && conv_info.pad_right() != 3, "Only SAME or VALID padding supported");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_top() != 0u && conv_info.pad_bottom() != 0, "Only SAME or VALID padding supported");
- return validate_kernel_7x1(input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info);
+ return validate_kernel_7x1(src, &input0, &input1, &batched_mm_output, weights, biases, dst, winograd_info, act_info);
}
else if(kernel_size == Size2D(1, 7))
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_top() != 0u && conv_info.pad_top() != 3, "Only SAME or VALID padding supported");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_bottom() != 0u && conv_info.pad_bottom() != 3, "Only SAME or VALID padding supported");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_left() != 0u && conv_info.pad_right() != 0, "Only SAME or VALID padding supported");
- return validate_kernel_1x7(input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info);
+ return validate_kernel_1x7(src, &input0, &input1, &batched_mm_output, weights, biases, dst, winograd_info, act_info);
}
else
{
diff --git a/src/runtime/cpu/operators/CpuWinogradConv2d.h b/src/runtime/cpu/operators/CpuWinogradConv2d.h
index 14c61f7355..ae705ac86b 100644
--- a/src/runtime/cpu/operators/CpuWinogradConv2d.h
+++ b/src/runtime/cpu/operators/CpuWinogradConv2d.h
@@ -81,7 +81,7 @@ public:
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
+ static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const PadStrideInfo &conv_info,
const ActivationLayerInfo &act_info = ActivationLayerInfo(),
bool enable_fast_math = false);
diff --git a/tests/validation/NEON/FullyConnectedLayer.cpp b/tests/validation/NEON/FullyConnectedLayer.cpp
index 4bb48bf42c..8ba0f1f771 100644
--- a/tests/validation/NEON/FullyConnectedLayer.cpp
+++ b/tests/validation/NEON/FullyConnectedLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,6 +25,8 @@
#include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h"
#include "arm_compute/runtime/Tensor.h"
#include "arm_compute/runtime/TensorAllocator.h"
+#include "src/core/helpers/MemoryHelpers.h"
+#include "src/runtime/cpu/operators/CpuFullyConnected.h"
#include "tests/NEON/Accessor.h"
#include "tests/PaddingCalculator.h"
#include "tests/datasets/FullyConnectedLayerDataset.h"
@@ -94,6 +96,149 @@ const auto ActivationFunctionsQuantizedDataset = framework::dataset::make("Activ
TEST_SUITE(NEON)
TEST_SUITE(FullyConnectedLayer)
+/** Test case for memory injection in @ref cpu::CpuFullyConnected.
+ *
+ * Configure the operator once and inject memory at run-time in multiple executions.
+ *
+ * Checks performed in order:
+ * - Both runs compute the same output
+ */
+TEST_CASE(MemoryInjection, framework::DatasetMode::ALL)
+{
+ auto fc = std::make_unique<cpu::CpuFullyConnected>();
+ const auto src_info = TensorInfo(TensorShape(8U), 1, DataType::F32, DataLayout::NHWC);
+ const auto weight_info = TensorInfo(TensorShape(8U, 4U), 1, DataType::F32, DataLayout::NHWC);
+ const auto bias_info = TensorInfo(TensorShape(4U), 1, DataType::F32, DataLayout::NHWC);
+ auto dst_info = TensorInfo(TensorShape(4U), 1, DataType::F32, DataLayout::NHWC);
+ const auto fc_info = FullyConnectedLayerInfo{};
+ fc->configure(&src_info, &weight_info, &bias_info, &dst_info, fc_info);
+
+ // telhs are newly created every call of this lambda function
+ auto src = create_tensor<Tensor>(src_info);
+ auto weight = create_tensor<Tensor>(weight_info);
+ auto bias = create_tensor<Tensor>(bias_info);
+ src.allocator()->allocate();
+ weight.allocator()->allocate();
+ bias.allocator()->allocate();
+
+ ITensorPack run_pack{ { TensorType::ACL_SRC_0, &src }, { TensorType::ACL_SRC_1, &weight }, { TensorType::ACL_SRC_2, &bias } };
+ ITensorPack prep_pack{ { TensorType::ACL_SRC_1, &weight }, { TensorType::ACL_SRC_2, &bias } };
+
+ auto mg = MemoryGroup{};
+ auto ws = manage_workspace<Tensor>(fc->workspace(), mg, run_pack, prep_pack);
+
+ auto run_conv = [&]() -> Tensor
+ {
+ auto dst = create_tensor<Tensor>(dst_info);
+ dst.allocator()->allocate();
+ run_pack.add_tensor(TensorType::ACL_DST, &dst);
+
+ library->fill_tensor_value(Accessor(src), 1.f);
+ library->fill_tensor_value(Accessor(weight), 2.f);
+ library->fill_tensor_value(Accessor(bias), 3.f);
+ // This operator is configured once and captured by this lambda.
+ fc->prepare(prep_pack);
+ fc->run(run_pack);
+ return dst;
+ };
+ auto result_0 = run_conv();
+ auto result_1 = run_conv();
+ for(size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
+ {
+ ARM_COMPUTE_EXPECT(((float *)result_0.buffer())[i] == ((float *)result_1.buffer())[i], framework::LogLevel::ERRORS);
+ }
+}
+
+/** Test case for memory injection in @ref NEFullyConnectedLayer.
+ *
+ * Make sure @ref NEFullyConnectedLayer still works through injecting the memory at configure time using the old API.
+ *
+ * Checks performed in order:
+ * - Both runs compute the same output
+ */
+TEST_CASE(MultipleExecutionWithConfigure, framework::DatasetMode::ALL)
+{
+ auto fc = std::make_unique<NEFullyConnectedLayer>();
+ const auto src_info = TensorInfo(TensorShape(8U), 1, DataType::F32, DataLayout::NHWC);
+ const auto weight_info = TensorInfo(TensorShape(8U, 4U), 1, DataType::F32, DataLayout::NHWC);
+ const auto bias_info = TensorInfo(TensorShape(4U), 1, DataType::F32, DataLayout::NHWC);
+ auto dst_info = TensorInfo(TensorShape(4U), 1, DataType::F32, DataLayout::NHWC);
+ const auto fc_info = FullyConnectedLayerInfo{};
+ auto run_conv = [&]()
+ {
+ auto src = create_tensor<Tensor>(src_info);
+ auto weight = create_tensor<Tensor>(weight_info);
+ auto bias = create_tensor<Tensor>(bias_info);
+ auto dst = create_tensor<Tensor>(dst_info);
+ fc->configure(&src, &weight, &bias, &dst, fc_info);
+ src.allocator()->allocate();
+ weight.allocator()->allocate();
+ bias.allocator()->allocate();
+ dst.allocator()->allocate();
+ library->fill_tensor_value(Accessor(src), 1.f);
+ library->fill_tensor_value(Accessor(weight), 2.f);
+ library->fill_tensor_value(Accessor(bias), 3.f);
+ fc->run();
+ return dst;
+ };
+ auto result_0 = run_conv();
+ auto result_1 = run_conv();
+ for(size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
+ {
+ ARM_COMPUTE_EXPECT(((float *)result_0.buffer())[i] == ((float *)result_1.buffer())[i], framework::LogLevel::ERRORS);
+ }
+}
+
+/** Unit test for @ref cpu::CpuFullyConnected with quantized multipler > 1
+ *
+ * Tests output correctness.
+ */
+TEST_CASE(Quant8_Signed_Mult_gt_1, framework::DatasetMode::ALL)
+{
+ auto fc = std::make_unique<cpu::CpuFullyConnected>();
+ const auto src_info = TensorInfo(TensorShape(1U, 3U), 1, DataType::QASYMM8_SIGNED, QuantizationInfo(0.5f, -1));
+ const auto weight_info = TensorInfo(TensorShape(1U), 1, DataType::QASYMM8_SIGNED, QuantizationInfo(0.5, -8));
+ const auto bias_info = TensorInfo(TensorShape(1U), 1, DataType::S32);
+ auto dst_info = TensorInfo(TensorShape(1U, 3U), 1, DataType::QASYMM8_SIGNED, QuantizationInfo(0.1f, 0));
+ const auto fc_info = FullyConnectedLayerInfo{};
+ fc->configure(&src_info, &weight_info, &bias_info, &dst_info, fc_info);
+
+ // telhs are newly created every call of this lambda function
+ auto src = create_tensor<Tensor>(src_info);
+ auto weight = create_tensor<Tensor>(weight_info);
+ auto bias = create_tensor<Tensor>(bias_info);
+ auto dst = create_tensor<Tensor>(dst_info);
+ src.allocator()->allocate();
+ weight.allocator()->allocate();
+ bias.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ITensorPack run_pack{ { TensorType::ACL_SRC_0, &src }, { TensorType::ACL_SRC_1, &weight }, { TensorType::ACL_SRC_2, &bias }, { TensorType::ACL_DST, &dst } };
+ ITensorPack prep_pack{ { TensorType::ACL_SRC_1, &weight }, { TensorType::ACL_SRC_2, &bias } };
+
+ auto mg = MemoryGroup{};
+ auto ws = manage_workspace<Tensor>(fc->workspace(), mg, run_pack, prep_pack);
+
+ // Initialize input values
+ const std::vector<int8_t> src_values = { 3, 63, 31 };
+ const std::vector<int8_t> weight_values = { -4 };
+ const std::vector<int32_t> bias_values = { 16 };
+ const std::vector<int32_t> expected = { 80, 127, 127 };
+ library->fill_static_values(Accessor(src), src_values);
+ library->fill_static_values(Accessor(weight), weight_values);
+ library->fill_static_values(Accessor(bias), bias_values);
+
+ // Run FC layer
+ fc->prepare(prep_pack);
+ fc->run(run_pack);
+
+ auto dst_ptr = reinterpret_cast<int8_t *>(dst.buffer());
+ for(size_t i = 0; i < dst.info()->tensor_shape().total_size(); ++i)
+ {
+ ARM_COMPUTE_EXPECT(dst_ptr[i] == expected[i], framework::LogLevel::ERRORS);
+ }
+}
+
// *INDENT-OFF*
// clang-format off
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
@@ -186,13 +331,13 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerFixture<float>, framework:
validate(Accessor(_target), _reference, rel_tolerance_f32, 0, abs_tolerance_f32);
}
FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEFullyConnectedLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(
- framework::dataset::make("Input", TensorShape(9U, 5U, 7U)),
- framework::dataset::make("Weights", TensorShape(315U, 271U))),
- framework::dataset::make("Biases", TensorShape(271U))),
- framework::dataset::make("Output", TensorShape(271U))),
- FullyConnectedParameters),
- framework::dataset::make("DataType", DataType::F32)),
- framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))))
+ framework::dataset::make("Input", TensorShape(9U, 5U, 7U)),
+ framework::dataset::make("Weights", TensorShape(315U, 271U))),
+ framework::dataset::make("Biases", TensorShape(271U))),
+ framework::dataset::make("Output", TensorShape(271U))),
+ FullyConnectedParameters),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))))
{
// Validate output
validate(Accessor(_target), _reference, rel_tolerance_f32, 0, abs_tolerance_f32);
@@ -235,14 +380,14 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerQuantizedFixture<uint8_t>,
}
FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEFullyConnectedLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(combine(combine(combine(combine(
- framework::dataset::make("Input", TensorShape(9U, 5U, 7U)),
- framework::dataset::make("Weights", TensorShape(315U, 271U))),
- framework::dataset::make("Biases", TensorShape(271U))),
- framework::dataset::make("Output", TensorShape(271U))),
- FullyConnectedParameters),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- QuantizationData),
- framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))))
+ framework::dataset::make("Input", TensorShape(9U, 5U, 7U)),
+ framework::dataset::make("Weights", TensorShape(315U, 271U))),
+ framework::dataset::make("Biases", TensorShape(271U))),
+ framework::dataset::make("Output", TensorShape(271U))),
+ FullyConnectedParameters),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ QuantizationData),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
@@ -282,14 +427,14 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerQuantizedFixture<int8_t>,
}
FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEFullyConnectedLayerQuantizedMixedDataLayoutFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(combine(combine(combine(combine(
- framework::dataset::make("Input", TensorShape(9U, 5U, 7U)),
- framework::dataset::make("Weights", TensorShape(315U, 271U))),
- framework::dataset::make("Biases", TensorShape(271U))),
- framework::dataset::make("Output", TensorShape(271U))),
- FullyConnectedParameters),
- framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
- QuantizationData),
- framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))))
+ framework::dataset::make("Input", TensorShape(9U, 5U, 7U)),
+ framework::dataset::make("Weights", TensorShape(315U, 271U))),
+ framework::dataset::make("Biases", TensorShape(271U))),
+ framework::dataset::make("Output", TensorShape(271U))),
+ FullyConnectedParameters),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ QuantizationData),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);