aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2018-04-05 17:20:34 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:49:37 +0000
commitbb54e4e40b7b08c509e234cd91ebd3087af66c23 (patch)
tree5e0b6bdf58bb129ef2b3b26e6e65515bc8b76f83 /src
parent4d33630096c769dd43716dd5607f151e3d5abef7 (diff)
downloadComputeLibrary-bb54e4e40b7b08c509e234cd91ebd3087af66c23.tar.gz
COMPMID-797 Integrate Mobilenet QASYMM8 with new graph.
Change-Id: I4df63ec2f4eb27a8a6eec2bea27741bf8dec6910 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/126966 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/core/TensorInfo.cpp2
-rw-r--r--src/graph/GraphBuilder.cpp24
-rw-r--r--src/graph/backends/CL/CLDeviceBackend.cpp2
-rw-r--r--src/graph/backends/CL/CLFunctionsFactory.cpp32
-rw-r--r--src/graph/backends/GLES/GCDeviceBackend.cpp2
-rw-r--r--src/graph/backends/GLES/GCFunctionsFactory.cpp32
-rw-r--r--src/graph/backends/NEON/NEDeviceBackend.cpp2
-rw-r--r--src/graph/backends/NEON/NEFunctionFactory.cpp32
-rw-r--r--src/graph/nodes/ConvolutionLayerNode.cpp10
-rw-r--r--src/graph/nodes/SoftmaxLayerNode.cpp5
-rw-r--r--src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp10
-rw-r--r--src/runtime/NEON/functions/NEGEMM.cpp5
-rw-r--r--src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp16
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp20
14 files changed, 138 insertions, 56 deletions
diff --git a/src/core/TensorInfo.cpp b/src/core/TensorInfo.cpp
index 539d0f84b3..676938a231 100644
--- a/src/core/TensorInfo.cpp
+++ b/src/core/TensorInfo.cpp
@@ -322,7 +322,7 @@ ITensorInfo &TensorInfo::set_data_type(DataType data_type)
{
_data_type = data_type;
_format = Format::UNKNOWN;
- return *this;
+ return set_tensor_shape(tensor_shape()); // Force total size and strides to update
}
ITensorInfo &TensorInfo::set_num_channels(int num_channels)
diff --git a/src/graph/GraphBuilder.cpp b/src/graph/GraphBuilder.cpp
index 0d1bdc3596..4ad34e789c 100644
--- a/src/graph/GraphBuilder.cpp
+++ b/src/graph/GraphBuilder.cpp
@@ -206,7 +206,9 @@ NodeID GraphBuilder::add_batch_normalization_node(Graph &g, NodeParams params, N
NodeID GraphBuilder::add_convolution_node(Graph &g, NodeParams params, NodeIdxPair input,
Size2D kernel_spatial_extend, unsigned int depth, PadStrideInfo conv_info,
unsigned int num_groups, ConvolutionMethod method,
- ITensorAccessorUPtr weights_accessor, ITensorAccessorUPtr bias_accessor)
+ ITensorAccessorUPtr weights_accessor, ITensorAccessorUPtr bias_accessor,
+ const QuantizationInfo weights_quant_info,
+ const QuantizationInfo out_quant_info)
{
CHECK_NODEIDX_PAIR(input, g);
ARM_COMPUTE_ERROR_ON(depth == 0);
@@ -220,7 +222,13 @@ NodeID GraphBuilder::add_convolution_node(Graph &g, NodeParams params, NodeIdxPa
// Create weights node
TensorDescriptor w_desc = input_tensor_desc;
w_desc.shape = TensorShape(kernel_spatial_extend.width, kernel_spatial_extend.height, w_desc.shape.z() / num_groups, depth);
- NodeID w_nid = add_const_node_with_name(g, params, "Weights", w_desc, std::move(weights_accessor));
+
+ if(!weights_quant_info.empty())
+ {
+ w_desc.quant_info = weights_quant_info;
+ }
+
+ NodeID w_nid = add_const_node_with_name(g, params, "Weights", w_desc, std::move(weights_accessor));
// Create bias nodes
NodeID b_nid = EmptyNodeID;
@@ -234,7 +242,7 @@ NodeID GraphBuilder::add_convolution_node(Graph &g, NodeParams params, NodeIdxPa
if(num_groups == 1)
{
// Create convolution node and connect
- NodeID conv_nid = g.add_node<ConvolutionLayerNode>(conv_info, method);
+ NodeID conv_nid = g.add_node<ConvolutionLayerNode>(conv_info, method, out_quant_info);
g.add_connection(input.node_id, input.index, conv_nid, 0);
g.add_connection(w_nid, 0, conv_nid, 1);
if(has_bias)
@@ -270,7 +278,7 @@ NodeID GraphBuilder::add_depth_concatenate_node(Graph &g, NodeParams params, std
NodeID GraphBuilder::add_depthwise_convolution_node(Graph &g, NodeParams params, NodeIdxPair input, Size2D kernel_spatial_extend, PadStrideInfo conv_info,
DepthwiseConvolutionMethod method,
- ITensorAccessorUPtr weights_accessor, ITensorAccessorUPtr bias_accessor)
+ ITensorAccessorUPtr weights_accessor, ITensorAccessorUPtr bias_accessor, const QuantizationInfo quant_info)
{
CHECK_NODEIDX_PAIR(input, g);
ARM_COMPUTE_ERROR_ON((kernel_spatial_extend.width == 0) || (kernel_spatial_extend.height == 0));
@@ -283,7 +291,13 @@ NodeID GraphBuilder::add_depthwise_convolution_node(Graph &g, NodeParams params,
// Create weights node
TensorDescriptor w_desc = input_tensor_desc;
w_desc.shape = TensorShape(kernel_spatial_extend.width, kernel_spatial_extend.height, w_desc.shape.z());
- NodeID w_nid = add_const_node_with_name(g, params, "Weights", w_desc, std::move(weights_accessor));
+
+ if(!quant_info.empty())
+ {
+ w_desc.quant_info = quant_info;
+ }
+
+ NodeID w_nid = add_const_node_with_name(g, params, "Weights", w_desc, std::move(weights_accessor));
// Create bias nodes
NodeID b_nid = EmptyNodeID;
diff --git a/src/graph/backends/CL/CLDeviceBackend.cpp b/src/graph/backends/CL/CLDeviceBackend.cpp
index f10eb33a98..92cb6936c3 100644
--- a/src/graph/backends/CL/CLDeviceBackend.cpp
+++ b/src/graph/backends/CL/CLDeviceBackend.cpp
@@ -126,7 +126,7 @@ std::unique_ptr<ITensorHandle> CLDeviceBackend::create_tensor(const Tensor &tens
ARM_COMPUTE_ERROR_ON(tensor_desc.target != Target::CL);
// Create backend tensor handle
- TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type);
+ TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type, tensor_desc.quant_info);
auto backend_tensor_handle = support::cpp14::make_unique<CLTensorHandle>(info);
return std::move(backend_tensor_handle);
diff --git a/src/graph/backends/CL/CLFunctionsFactory.cpp b/src/graph/backends/CL/CLFunctionsFactory.cpp
index 1b448fefd2..ad73a797e3 100644
--- a/src/graph/backends/CL/CLFunctionsFactory.cpp
+++ b/src/graph/backends/CL/CLFunctionsFactory.cpp
@@ -154,10 +154,16 @@ std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node,
ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
// Extract IO and info
- ICLTensor *input = get_backing_tensor(node.input(0));
- ICLTensor *weights = get_backing_tensor(node.input(1));
- ICLTensor *biases = get_backing_tensor(node.input(2));
- ICLTensor *output = get_backing_tensor(node.output(0));
+ ICLTensor *input = get_backing_tensor(node.input(0));
+ ICLTensor *weights = get_backing_tensor(node.input(1));
+ ICLTensor *biases = get_backing_tensor(node.input(2));
+ ICLTensor *output = get_backing_tensor(node.output(0));
+
+ if(is_data_type_quantized_asymmetric(input->info()->data_type()))
+ {
+ biases->info()->set_data_type(DataType::S32);
+ }
+
const PadStrideInfo conv_info = node.convolution_info();
const ConvolutionMethod conv_algorithm = node.convolution_method();
@@ -190,6 +196,8 @@ std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node,
// Log info
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
<< " Data Type: " << input->info()->data_type()
+ << " Input QuantInfo: " << input->info()->quantization_info()
+ << " Weights QuantInfo: " << weights->info()->quantization_info()
<< " Input shape: " << input->info()->tensor_shape()
<< " Weights shape: " << weights->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
@@ -251,10 +259,16 @@ std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvoluti
ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
// Extract IO and info
- ICLTensor *input = get_backing_tensor(node.input(0));
- ICLTensor *weights = get_backing_tensor(node.input(1));
- ICLTensor *biases = get_backing_tensor(node.input(2));
- ICLTensor *output = get_backing_tensor(node.output(0));
+ ICLTensor *input = get_backing_tensor(node.input(0));
+ ICLTensor *weights = get_backing_tensor(node.input(1));
+ ICLTensor *biases = get_backing_tensor(node.input(2));
+ ICLTensor *output = get_backing_tensor(node.output(0));
+
+ if(is_data_type_quantized_asymmetric(input->info()->data_type()))
+ {
+ biases->info()->set_data_type(DataType::S32);
+ }
+
const PadStrideInfo conv_info = node.convolution_info();
const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
@@ -275,6 +289,8 @@ std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvoluti
// Log info
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
<< " Data Type: " << input->info()->data_type()
+ << " Input QuantInfo: " << input->info()->quantization_info()
+ << " Weights QuantInfo: " << weights->info()->quantization_info()
<< " Input shape: " << input->info()->tensor_shape()
<< " Weights shape: " << weights->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
diff --git a/src/graph/backends/GLES/GCDeviceBackend.cpp b/src/graph/backends/GLES/GCDeviceBackend.cpp
index 8cd9994744..a55215f058 100644
--- a/src/graph/backends/GLES/GCDeviceBackend.cpp
+++ b/src/graph/backends/GLES/GCDeviceBackend.cpp
@@ -87,7 +87,7 @@ std::unique_ptr<ITensorHandle> GCDeviceBackend::create_tensor(const Tensor &tens
ARM_COMPUTE_ERROR_ON(tensor_desc.target != Target::GC);
// Create backend tensor handle
- TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type);
+ TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type, tensor_desc.quant_info);
auto backend_tensor_handle = support::cpp14::make_unique<GCTensorHandle>(info);
return std::move(backend_tensor_handle);
diff --git a/src/graph/backends/GLES/GCFunctionsFactory.cpp b/src/graph/backends/GLES/GCFunctionsFactory.cpp
index 12e7c042d4..d3c5737e68 100644
--- a/src/graph/backends/GLES/GCFunctionsFactory.cpp
+++ b/src/graph/backends/GLES/GCFunctionsFactory.cpp
@@ -154,10 +154,16 @@ std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node,
ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
// Extract IO and info
- IGCTensor *input = get_backing_tensor(node.input(0));
- IGCTensor *weights = get_backing_tensor(node.input(1));
- IGCTensor *biases = get_backing_tensor(node.input(2));
- IGCTensor *output = get_backing_tensor(node.output(0));
+ IGCTensor *input = get_backing_tensor(node.input(0));
+ IGCTensor *weights = get_backing_tensor(node.input(1));
+ IGCTensor *biases = get_backing_tensor(node.input(2));
+ IGCTensor *output = get_backing_tensor(node.output(0));
+
+ if(is_data_type_quantized_asymmetric(input->info()->data_type()))
+ {
+ biases->info()->set_data_type(DataType::S32);
+ }
+
const PadStrideInfo conv_info = node.convolution_info();
const ConvolutionMethod conv_algorithm = node.convolution_method();
@@ -180,6 +186,8 @@ std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node,
// Log info
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
<< " Data Type: " << input->info()->data_type()
+ << " Input QuantInfo: " << input->info()->quantization_info()
+ << " Weights QuantInfo: " << weights->info()->quantization_info()
<< " Input shape: " << input->info()->tensor_shape()
<< " Weights shape: " << weights->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
@@ -241,10 +249,16 @@ std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvoluti
ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
// Extract IO and info
- IGCTensor *input = get_backing_tensor(node.input(0));
- IGCTensor *weights = get_backing_tensor(node.input(1));
- IGCTensor *biases = get_backing_tensor(node.input(2));
- IGCTensor *output = get_backing_tensor(node.output(0));
+ IGCTensor *input = get_backing_tensor(node.input(0));
+ IGCTensor *weights = get_backing_tensor(node.input(1));
+ IGCTensor *biases = get_backing_tensor(node.input(2));
+ IGCTensor *output = get_backing_tensor(node.output(0));
+
+ if(is_data_type_quantized_asymmetric(input->info()->data_type()))
+ {
+ biases->info()->set_data_type(DataType::S32);
+ }
+
const PadStrideInfo conv_info = node.convolution_info();
const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
@@ -264,6 +278,8 @@ std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvoluti
// Log info
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
<< " Data Type: " << input->info()->data_type()
+ << " Input QuantInfo: " << input->info()->quantization_info()
+ << " Weights QuantInfo: " << weights->info()->quantization_info()
<< " Input shape: " << input->info()->tensor_shape()
<< " Weights shape: " << weights->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
diff --git a/src/graph/backends/NEON/NEDeviceBackend.cpp b/src/graph/backends/NEON/NEDeviceBackend.cpp
index aaf05829bb..9123196540 100644
--- a/src/graph/backends/NEON/NEDeviceBackend.cpp
+++ b/src/graph/backends/NEON/NEDeviceBackend.cpp
@@ -93,7 +93,7 @@ std::unique_ptr<ITensorHandle> NEDeviceBackend::create_tensor(const Tensor &tens
ARM_COMPUTE_ERROR_ON(tensor_desc.target != Target::NEON);
// Create backend tensor handle
- TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type);
+ TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type, tensor_desc.quant_info);
auto backend_tensor_handle = support::cpp14::make_unique<NETensorHandle>(info);
return std::move(backend_tensor_handle);
diff --git a/src/graph/backends/NEON/NEFunctionFactory.cpp b/src/graph/backends/NEON/NEFunctionFactory.cpp
index 228af9ca6f..906378c565 100644
--- a/src/graph/backends/NEON/NEFunctionFactory.cpp
+++ b/src/graph/backends/NEON/NEFunctionFactory.cpp
@@ -140,10 +140,16 @@ std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node,
ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
// Extract IO and info
- ITensor *input = get_backing_tensor(node.input(0));
- ITensor *weights = get_backing_tensor(node.input(1));
- ITensor *biases = get_backing_tensor(node.input(2));
- ITensor *output = get_backing_tensor(node.output(0));
+ ITensor *input = get_backing_tensor(node.input(0));
+ ITensor *weights = get_backing_tensor(node.input(1));
+ ITensor *biases = get_backing_tensor(node.input(2));
+ ITensor *output = get_backing_tensor(node.output(0));
+
+ if(is_data_type_quantized_asymmetric(input->info()->data_type()))
+ {
+ biases->info()->set_data_type(DataType::S32);
+ }
+
const PadStrideInfo conv_info = node.convolution_info();
const ConvolutionMethod conv_algorithm = node.convolution_method();
@@ -175,6 +181,8 @@ std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node,
// Log info
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
<< " Data Type: " << input->info()->data_type()
+ << " Input QuantInfo: " << input->info()->quantization_info()
+ << " Weights QuantInfo: " << weights->info()->quantization_info()
<< " Input shape: " << input->info()->tensor_shape()
<< " Weights shape: " << weights->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
@@ -234,10 +242,16 @@ std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvoluti
ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
// Extract IO and info
- ITensor *input = get_backing_tensor(node.input(0));
- ITensor *weights = get_backing_tensor(node.input(1));
- ITensor *biases = get_backing_tensor(node.input(2));
- ITensor *output = get_backing_tensor(node.output(0));
+ ITensor *input = get_backing_tensor(node.input(0));
+ ITensor *weights = get_backing_tensor(node.input(1));
+ ITensor *biases = get_backing_tensor(node.input(2));
+ ITensor *output = get_backing_tensor(node.output(0));
+
+ if(is_data_type_quantized_asymmetric(input->info()->data_type()))
+ {
+ biases->info()->set_data_type(DataType::S32);
+ }
+
const PadStrideInfo conv_info = node.convolution_info();
const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
@@ -258,6 +272,8 @@ std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvoluti
// Log info
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
<< " Data Type: " << input->info()->data_type()
+ << " Input QuantInfo: " << input->info()->quantization_info()
+ << " Weights QuantInfo: " << weights->info()->quantization_info()
<< " Input shape: " << input->info()->tensor_shape()
<< " Weights shape: " << weights->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
diff --git a/src/graph/nodes/ConvolutionLayerNode.cpp b/src/graph/nodes/ConvolutionLayerNode.cpp
index 461728487f..eb0c6a1c1a 100644
--- a/src/graph/nodes/ConvolutionLayerNode.cpp
+++ b/src/graph/nodes/ConvolutionLayerNode.cpp
@@ -31,8 +31,8 @@ namespace arm_compute
{
namespace graph
{
-ConvolutionLayerNode::ConvolutionLayerNode(PadStrideInfo info, ConvolutionMethod method)
- : _info(std::move(info)), _method(method)
+ConvolutionLayerNode::ConvolutionLayerNode(PadStrideInfo info, ConvolutionMethod method, QuantizationInfo out_quant_info)
+ : _info(std::move(info)), _method(method), _out_quant_info(out_quant_info)
{
_input_edges.resize(3, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -90,6 +90,12 @@ TensorDescriptor ConvolutionLayerNode::configure_output(size_t idx) const
TensorDescriptor output_info = src->desc();
TensorShape output_shape = compute_output_shape(src->desc().shape, weights->desc().shape, _info);
output_info.shape = output_shape;
+
+ if(!_out_quant_info.empty())
+ {
+ output_info.quant_info = _out_quant_info;
+ }
+
return output_info;
}
diff --git a/src/graph/nodes/SoftmaxLayerNode.cpp b/src/graph/nodes/SoftmaxLayerNode.cpp
index 4c21ac6ad0..b6241e6654 100644
--- a/src/graph/nodes/SoftmaxLayerNode.cpp
+++ b/src/graph/nodes/SoftmaxLayerNode.cpp
@@ -63,7 +63,10 @@ TensorDescriptor SoftmaxLayerNode::configure_output(size_t idx) const
const Tensor *src = input(0);
ARM_COMPUTE_ERROR_ON(src == nullptr);
- return src->desc();
+ TensorDescriptor out_desc = src->desc();
+ out_desc.quant_info = QuantizationInfo(1.f / 256.f, 0);
+
+ return out_desc;
}
Status SoftmaxLayerNode::validate()
diff --git a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
index 5f747bc477..711b006ede 100644
--- a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
@@ -102,7 +102,10 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor
matrix_b = &_tmp_b;
_memory_group.manage(&_tmp_a);
- _memory_group.manage(&_tmp_b);
+ if(!_reshape_b_only_on_first_run)
+ {
+ _memory_group.manage(&_tmp_b);
+ }
// Configure interleave kernel
_mtx_a_reshape_kernel.configure(a, &_tmp_a, mult_interleave4x4_height);
@@ -119,7 +122,10 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor
{
TensorInfo info_vector_sum_col(compute_reductionA_shape(*b->info()), 1, DataType::S32);
_vector_sum_col.allocator()->init(info_vector_sum_col);
- _memory_group.manage(&_vector_sum_col);
+ if(!_reshape_b_only_on_first_run)
+ {
+ _memory_group.manage(&_vector_sum_col);
+ }
// Configure Matrix B reduction kernel
_mtx_b_reduction_kernel.configure(b, &_vector_sum_col);
diff --git a/src/runtime/NEON/functions/NEGEMM.cpp b/src/runtime/NEON/functions/NEGEMM.cpp
index 18e6e919c3..e0859be93e 100644
--- a/src/runtime/NEON/functions/NEGEMM.cpp
+++ b/src/runtime/NEON/functions/NEGEMM.cpp
@@ -107,7 +107,10 @@ void NEGEMM::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITe
// Manage intermediate buffers
_memory_group.manage(&_tmp_a);
- _memory_group.manage(&_tmp_b);
+ if(!_reshape_b_only_on_first_run)
+ {
+ _memory_group.manage(&_tmp_b);
+ }
int m = a->info()->dimension(1);
int n = b->info()->dimension(0);
diff --git a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
index 7f25c2e717..3c48d691ed 100644
--- a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
@@ -109,14 +109,6 @@ Status NEConvolutionLayerReshapeWeights::validate(const ITensorInfo *weights, co
ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
}
- // Checks performed when biases are present
- if(append_bias)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
- ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(3));
- ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
- }
-
if(transpose1xW)
{
TensorInfo weights_reshaped = weights->clone()->set_tensor_shape(get_reshaped_weights_shape(weights, append_bias));
@@ -344,7 +336,7 @@ void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weig
_memory_group.manage(&_input_im2col_reshaped);
// Create tensor (interleave) to prepare input tensor for GEMM
- if(!_is_fully_connected_convolution && !run_optimised)
+ if(!_is_fully_connected_convolution && !run_optimised && _is_interleaved)
{
TensorShape shape_interleaved(shape_im2col);
shape_interleaved.set(0, shape_interleaved.x() * 4);
@@ -362,7 +354,9 @@ void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weig
TensorInfo info_gemm(shape_gemm, 1, gemm_data_type, input->info()->fixed_point_position());
info_gemm.set_quantization_info(output->info()->quantization_info());
_gemm_output.allocator()->init(info_gemm);
- _memory_group.manage(&_gemm_output);
+
+ // FIXME: enabling memory manager for _gemm_output gives incorrect results (maybe bound to the assembly kernel in GEMMLowp?)
+ // _memory_group.manage(&_gemm_output);
// Configure kernels
// Configure im2col
@@ -491,7 +485,7 @@ Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
reshaped_weights->set_tensor_shape(get_reshaped_weights_shape_conv(weights, append_bias, is_fully_connected_convolution));
ARM_COMPUTE_RETURN_ON_ERROR(NEConvolutionLayerReshapeWeights::validate(weights, biases, reshaped_weights.get(), !is_fully_connected_convolution /* 1xW transpose */));
}
- else
+ else if(!is_quantized)
{
TensorShape reshaped_weights_shape;
diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
index 7372c6ca57..cbec73fc31 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
@@ -43,19 +43,19 @@ using namespace arm_compute::misc::shape_calculator;
NEGEMMLowpMatrixMultiplyCore::NEGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)), _asm_glue_unsigned(), _asm_glue_signed(), _mm_kernel(nullptr), _mtx_a_reshape_kernel(nullptr), _mtx_b_reshape_kernel(nullptr), _mtx_a_reduction_kernel(),
_mtx_b_reduction_kernel(), _offset_contribution_kernel(), _vector_sum_col(), _vector_sum_row(), _tmp_a(), _tmp_b(), _workspace(), _a_offset(0), _b_offset(0), _run_vector_matrix_multiplication(false),
- _dot_product_path(false)
+ _dot_product_path(false), _is_first_run(true), _reshape_b_only_on_first_run(false)
{
}
void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, ITensor *output, const GEMMInfo &gemm_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, output);
- ARM_COMPUTE_UNUSED(gemm_info);
ARM_COMPUTE_ERROR_THROW_ON(NEGEMMLowpMatrixMultiplyCore::validate(a->info(), b->info(), output->info(), gemm_info));
_a_offset = a->info()->quantization_info().offset;
_b_offset = b->info()->quantization_info().offset;
_run_vector_matrix_multiplication = a->info()->dimension(1) < 2;
+ _reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run();
#ifdef __aarch64__
switch(a->info()->data_type())
@@ -98,7 +98,10 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
_tmp_a.allocator()->init(info_a);
_tmp_b.allocator()->init(info_b);
_memory_group.manage(&_tmp_a);
- _memory_group.manage(&_tmp_b);
+ if(!_reshape_b_only_on_first_run)
+ {
+ _memory_group.manage(&_tmp_b);
+ }
// Configure interleave kernel
{
@@ -129,7 +132,10 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
TensorInfo info_vector_sum_col(compute_reductionA_shape(*b->info()), 1, DataType::S32);
_vector_sum_col.allocator()->init(info_vector_sum_col);
- _memory_group.manage(&_vector_sum_col);
+ if(!_reshape_b_only_on_first_run)
+ {
+ _memory_group.manage(&_vector_sum_col);
+ }
// Configure Matrix B reduction kernel
_mtx_b_reduction_kernel.configure(b, &_vector_sum_col, a->info()->dimension(0), false);
@@ -252,7 +258,7 @@ void NEGEMMLowpMatrixMultiplyCore::run()
NEScheduler::get().schedule(_mtx_a_reshape_kernel.get(), Window::DimY);
}
- if(_mtx_b_reshape_kernel)
+ if(_mtx_b_reshape_kernel && (_is_first_run || !_reshape_b_only_on_first_run))
{
NEScheduler::get().schedule(_mtx_b_reshape_kernel.get(), Window::DimY);
}
@@ -278,7 +284,7 @@ void NEGEMMLowpMatrixMultiplyCore::run()
}
// Run matrix B reduction kernel only if _a_offset is not equal to 0
- if(_a_offset != 0)
+ if(_a_offset != 0 && (_is_first_run || !_reshape_b_only_on_first_run))
{
NEScheduler::get().schedule(&_mtx_b_reduction_kernel, Window::DimX);
}
@@ -287,4 +293,6 @@ void NEGEMMLowpMatrixMultiplyCore::run()
NEScheduler::get().schedule(&_offset_contribution_kernel, Window::DimY);
_memory_group.release();
+
+ _is_first_run = false;
}