aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-07-17 12:28:42 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:54 +0000
commit7d66a8e3f603f2cd363f04a750847e3f9eabdfd4 (patch)
tree0d7e1ad5bf0ecd32cd919074f756d27c351d7638
parentae54e026c86aec7d6819ee3ef76372c1a3c92467 (diff)
downloadComputeLibrary-7d66a8e3f603f2cd363f04a750847e3f9eabdfd4.tar.gz
COMPMID-1386: Add support for converting weights for CL.
Change-Id: I62e3ead903366baeeb1488f233a9b8b0c388c9de Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/140403 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Giorgio Arena <giorgio.arena@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h4
-rw-r--r--arm_compute/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.h4
-rw-r--r--arm_compute/core/Types.h9
-rw-r--r--arm_compute/graph/backends/FunctionHelpers.h3
-rw-r--r--arm_compute/graph/nodes/FullyConnectedLayerNode.h11
-rw-r--r--arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h39
-rw-r--r--arm_compute/runtime/GLES_COMPUTE/functions/GCFullyConnectedLayer.h15
-rw-r--r--arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h4
-rw-r--r--arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h28
-rw-r--r--examples/graph_lenet.cpp19
-rw-r--r--examples/graph_mobilenet.cpp10
-rw-r--r--examples/graph_vgg16.cpp45
-rw-r--r--examples/graph_vgg19.cpp50
-rw-r--r--src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp43
-rw-r--r--src/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.cpp41
-rw-r--r--src/graph/GraphBuilder.cpp9
-rw-r--r--src/graph/nodes/FullyConnectedLayer.cpp9
-rw-r--r--src/runtime/CL/functions/CLFullyConnectedLayer.cpp157
-rw-r--r--src/runtime/CL/functions/CLLSTMLayer.cpp20
-rw-r--r--src/runtime/CL/functions/CLRNNLayer.cpp4
-rw-r--r--src/runtime/GLES_COMPUTE/functions/GCFullyConnectedLayer.cpp8
-rw-r--r--src/runtime/NEON/functions/NEFullyConnectedLayer.cpp21
-rw-r--r--src/runtime/NEON/functions/NERNNLayer.cpp4
-rw-r--r--tests/networks/AlexNetNetwork.h9
-rw-r--r--tests/validation/CL/FullyConnectedLayer.cpp15
-rw-r--r--tests/validation/GLES_COMPUTE/FullyConnectedLayer.cpp7
-rw-r--r--tests/validation/NEON/FullyConnectedLayer.cpp14
-rw-r--r--tests/validation/fixtures/FullyConnectedLayerFixture.h7
-rw-r--r--tests/validation/fixtures/UNIT/MemoryManagerFixture.h14
-rw-r--r--tests/validation/reference/ConvertFullyConnectedWeights.cpp10
-rw-r--r--utils/GraphUtils.cpp4
-rw-r--r--utils/GraphUtils.h18
33 files changed, 421 insertions, 238 deletions
diff --git a/arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h b/arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h
index b85f93e992..40c9dc826f 100644
--- a/arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h
+++ b/arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h
@@ -57,7 +57,7 @@ public:
*
* @param[in] input Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
* @param[out] output The converted weights tensor. Shape and Data Type: Same as @p input.
- * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
+ * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer).
* @param[in] data_layout The data layout the weights have been trained in.
*/
void configure(const ICLTensor *input, ICLTensor *output, const TensorShape &original_input_shape, DataLayout data_layout);
@@ -65,7 +65,7 @@ public:
*
* @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
* @param[in] output The converted weights tensor info. Shape and Data Type: Same as @p input.
- * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
+ * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer).
* @param[in] data_layout The data layout the weights have been trained in.
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const TensorShape &original_input_shape, DataLayout data_layout);
diff --git a/arm_compute/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.h b/arm_compute/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.h
index 1a276c353e..5b8d7fd457 100644
--- a/arm_compute/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.h
+++ b/arm_compute/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.h
@@ -61,7 +61,7 @@ public:
*
* @param[in] input Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
* @param[out] output The converted weights tensor. Shape and Data Type: Same as @p input.
- * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
+ * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer).
* @param[in] data_layout The data layout the weights have been trained in.
*/
void configure(const ITensor *input, ITensor *output, const TensorShape &original_input_shape, DataLayout data_layout);
@@ -69,7 +69,7 @@ public:
*
* @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
* @param[in] output The converted weights tensor info. Shape and Data Type: Same as @p input.
- * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
+ * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer).
* @param[in] data_layout The data layout the weights have been trained in.
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const TensorShape &original_input_shape, DataLayout data_layout);
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 1363324e3b..343952f0b2 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -682,6 +682,15 @@ private:
DimensionRoundingType _round_type;
};
+/** Fully connected layer info */
+struct FullyConnectedLayerInfo
+{
+ DataLayout weights_trained_layout{ DataLayout::NCHW }; /**< Layout that the weights have been trained with. */
+ bool transpose_weights{ true }; /**< Transpose weights if true. */
+ bool are_weights_reshaped{ false }; /**< Reshape the weights tensor if false. */
+ bool retain_internal_weights{ false }; /**< Retain internal reshaped weights. */
+};
+
/** Pooling Layer Information class */
class PoolingLayerInfo
{
diff --git a/arm_compute/graph/backends/FunctionHelpers.h b/arm_compute/graph/backends/FunctionHelpers.h
index 16536bcb65..978d3bc1a8 100644
--- a/arm_compute/graph/backends/FunctionHelpers.h
+++ b/arm_compute/graph/backends/FunctionHelpers.h
@@ -524,10 +524,11 @@ std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode
typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
+ const FullyConnectedLayerInfo fc_info = node.info();
// Create and configure function
auto func = support::cpp14::make_unique<FullyConnectedLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
- func->configure(input, weights, biases, output);
+ func->configure(input, weights, biases, output, fc_info);
ARM_COMPUTE_ERROR_ON(input == nullptr);
ARM_COMPUTE_ERROR_ON(weights == nullptr);
ARM_COMPUTE_ERROR_ON(output == nullptr);
diff --git a/arm_compute/graph/nodes/FullyConnectedLayerNode.h b/arm_compute/graph/nodes/FullyConnectedLayerNode.h
index 3d1b68909a..79201c8bdc 100644
--- a/arm_compute/graph/nodes/FullyConnectedLayerNode.h
+++ b/arm_compute/graph/nodes/FullyConnectedLayerNode.h
@@ -37,8 +37,9 @@ public:
/** Constructor
*
* @param[in] num_outputs Number of neurons in the layer
+ * @param[in] fc_info (Optional) Additional information about the fully connected layer
*/
- FullyConnectedLayerNode(unsigned int num_outputs);
+ FullyConnectedLayerNode(unsigned int num_outputs, FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo());
/** Computes weights descriptor
*
* @warning Works for inputs with 1D batch space
@@ -59,6 +60,11 @@ public:
* @return Output descriptor
*/
static TensorDescriptor compute_output_descriptor(const TensorDescriptor &input_descriptor, unsigned int num_outputs);
+ /** Fully connected layer addition information
+ *
+ * @return Additional information about the fully connected layer
+ */
+ FullyConnectedLayerInfo info() const;
// Inherited overridden methods:
NodeType type() const override;
@@ -67,7 +73,8 @@ public:
void accept(INodeVisitor &v) override;
private:
- unsigned int _num_outputs;
+ unsigned int _num_outputs;
+ FullyConnectedLayerInfo _info;
};
} // namespace graph
} // namespace arm_compute
diff --git a/arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h b/arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h
index 77e9d15e7f..9bfade4894 100644
--- a/arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h
+++ b/arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h
@@ -39,7 +39,7 @@ public:
*
* @param[in] input Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
* @param[out] output The converted weights tensor. Shape and Data Type: Same as @p input.
- * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
+ * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer).
* @param[in] data_layout The data layout the weights have been trained in.
*/
void configure(const ICLTensor *input, ICLTensor *output, const TensorShape &original_input_shape, DataLayout data_layout);
@@ -47,7 +47,7 @@ public:
*
* @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
* @param[in] output The converted weights tensor info. Shape and Data Type: Same as @p input.
- * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
+ * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer).
* @param[in] data_layout The data layout the weights have been trained in.
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const TensorShape &original_input_shape, DataLayout data_layout);
diff --git a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
index 3357868968..6b8d7a97ec 100644
--- a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
+++ b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
@@ -31,6 +31,7 @@
#include "arm_compute/core/CL/kernels/CLTransposeKernel.h"
#include "arm_compute/runtime/CL/CLMemoryGroup.h"
#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h"
#include "arm_compute/runtime/CL/functions/CLGEMM.h"
#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
#include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
@@ -86,32 +87,26 @@ public:
CLFullyConnectedLayer &operator=(CLFullyConnectedLayer &&) = default;
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data type supported: QASYMM8/F16/F32.
- * @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input
- * @param[in] biases Bias tensor. It can be nullptr. Data type supported:Same as @p input.
- * @param[out] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] transpose_weights (Optional) Transpose weights if true. Defaults to true.
- * @param[in] are_weights_reshaped (Optional) Reshape the weights tensor if false. Defaults to false.
- * @param[in] retain_internal_weights (Optional) Retain internal reshaped weights. Defaults to false.
- * Used for reconfiguration purposes.
+ * @param[in] input Source tensor. Data type supported: QASYMM8/F16/F32.
+ * @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input
+ * @param[in] biases Bias tensor. It can be nullptr. Data type supported:Same as @p input.
+ * @param[out] output Destination tensor. Data type supported: Same as @p input.
+ * @param[in] fc_info (Optional) Fully connected layer additional info
*/
- void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, bool transpose_weights = true, bool are_weights_reshaped = false,
- bool retain_internal_weights = false);
+ void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
+ FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo());
/** Static function to check if given info will lead to a valid configuration of @ref CLFullyConnectedLayer
*
- * @param[in] input Source tensor. Data type supported: QASYMM8/F16/F32.
- * @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input
- * @param[in] biases Bias tensor. It can be nullptr. Data type supported:Same as @p input.
- * @param[in] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] transpose_weights (Optional) Transpose weights if true. Defaults to true.
- * @param[in] are_weights_reshaped (Optional) Reshape the weights tensor if false. Defaults to false.
- * @param[in] retain_internal_weights (Optional) Retain internal reshaped weights. Defaults to false.
- * Used for reconfiguration purposes.
+ * @param[in] input Source tensor. Data type supported: QASYMM8/F16/F32.
+ * @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input
+ * @param[in] biases Bias tensor. It can be nullptr. Data type supported:Same as @p input.
+ * @param[in] output Destination tensor. Data type supported: Same as @p input.
+ * @param[in] fc_info (Optional) Fully connected layer additional info
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, bool transpose_weights = true, bool are_weights_reshaped = false,
- bool retain_internal_weights = false);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
+ FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo());
//Inherited methods override
void run() override;
@@ -124,6 +119,7 @@ private:
CLMemoryGroup _memory_group;
CLIm2ColKernel _im2col_kernel;
+ CLConvertFullyConnectedWeights _convert_weights;
CLFullyConnectedLayerReshapeWeights _reshape_weights_kernel;
CLGEMM _mm_gemm;
CLGEMMLowpMatrixMultiplyCore _mm_gemmlowp;
@@ -131,11 +127,14 @@ private:
CLGEMMMatrixAccumulateBiasesKernel _accumulate_biases_kernel;
CLTensor _im2col_output;
CLTensor _gemmlowp_output;
+ CLTensor _converted_weights_output;
CLTensor _reshape_weights_output;
+ bool _are_weights_converted;
bool _are_weights_reshaped;
bool _is_fc_after_conv;
bool _accumulate_biases;
bool _is_quantized;
+ bool _is_prepared;
const ICLTensor *_original_weights;
};
}
diff --git a/arm_compute/runtime/GLES_COMPUTE/functions/GCFullyConnectedLayer.h b/arm_compute/runtime/GLES_COMPUTE/functions/GCFullyConnectedLayer.h
index cd108c3eab..63565df1a7 100644
--- a/arm_compute/runtime/GLES_COMPUTE/functions/GCFullyConnectedLayer.h
+++ b/arm_compute/runtime/GLES_COMPUTE/functions/GCFullyConnectedLayer.h
@@ -75,17 +75,14 @@ public:
GCFullyConnectedLayer &operator=(GCFullyConnectedLayer &&) = default;
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data type supported: F16/F32.
- * @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input
- * @param[in] biases Bias tensor. It can be nullptr. Data type supported:Same as @p input.
- * @param[out] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] transpose_weights (Optional) Transpose weights if true. Defaults to true.
- * @param[in] are_weights_reshaped (Optional) Reshape the weights tensor if false. Defaults to false.
- * @param[in] retain_internal_weights (Optional) Retain internal reshaped weights. Defaults to false.
- * Used for reconfiguration purposes.
+ * @param[in] input Source tensor. Data type supported: F16/F32.
+ * @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input
+ * @param[in] biases Bias tensor. It can be nullptr. Data type supported:Same as @p input.
+ * @param[out] output Destination tensor. Data type supported: Same as @p input.
+ * @param[in] fc_info (Optional) Fully connected layer additional info
*/
void configure(const IGCTensor *input, const IGCTensor *weights, const IGCTensor *biases, IGCTensor *output,
- bool transpose_weights = true, bool are_weights_reshaped = false, bool retain_internal_weights = false);
+ FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo());
//Inherited methods override
void run() override;
diff --git a/arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h b/arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h
index acbba28040..8f261421e6 100644
--- a/arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h
+++ b/arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h
@@ -42,7 +42,7 @@ public:
*
* @param[in] input Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
* @param[out] output The converted weights tensor. Shape and Data Type: Same as @p input.
- * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
+ * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer).
* @param[in] data_layout The data layout the weights have been trained in.
*/
void configure(const ITensor *input, ITensor *output, const TensorShape &original_input_shape, DataLayout data_layout);
@@ -50,7 +50,7 @@ public:
*
* @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
* @param[in] output The converted weights tensor info. Shape and Data Type: Same as @p input.
- * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
+ * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer).
* @param[in] data_layout The data layout the weights have been trained in.
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const TensorShape &original_input_shape, DataLayout data_layout);
diff --git a/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h b/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h
index 33ac8ecb8a..ea0762ea79 100644
--- a/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h
@@ -104,26 +104,26 @@ public:
NEFullyConnectedLayer &operator=(NEFullyConnectedLayer &&) = default;
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data type supported: F16/F32.
- * @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input.
- * @param[in] biases Bias tensor. Can be nullptr. Data type supported:Same as @p input.
- * @param[out] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] transpose_weights (Optional) Transpose the weights tensor if true. Defaults to true.
- * @param[in] are_weights_reshaped (Optional) Reshape the weights tensor if false. Defaults to false.
+ * @param[in] input Source tensor. Data type supported: F16/F32.
+ * @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input.
+ * @param[in] biases Bias tensor. Can be nullptr. Data type supported:Same as @p input.
+ * @param[out] output Destination tensor. Data type supported: Same as @p input.
+ * @param[in] fc_info (Optional) Fully connected layer additional info
*/
- void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, bool transpose_weights = true, bool are_weights_reshaped = false);
+ void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output,
+ FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo());
/** Static function to check if given info will lead to a valid configuration of @ref CLFullyConnectedLayer
*
- * @param[in] input Source tensor info. Data type supported: F16/F32.
- * @param[in] weights Weights tensor info. The weights must be 2 dimensional. Data type supported: Same as @p input
- * @param[in] biases Bias tensor info. It can be nullptr. Data type supported:Same as @p input.
- * @param[in] output Destination tensor info. Data type supported: Same as @p input.
- * @param[in] transpose_weights (Optional) Transpose weights if true. Defaults to true.
- * @param[in] are_weights_reshaped (Optional) Reshape the weights tensor if false. Defaults to false.
+ * @param[in] input Source tensor info. Data type supported: F16/F32.
+ * @param[in] weights Weights tensor info. The weights must be 2 dimensional. Data type supported: Same as @p input
+ * @param[in] biases Bias tensor info. It can be nullptr. Data type supported:Same as @p input.
+ * @param[in] output Destination tensor info. Data type supported: Same as @p input.
+ * @param[in] fc_info (Optional) Fully connected layer additional info
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, bool transpose_weights = true, bool are_weights_reshaped = false);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
+ FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo());
//Inherited methods override
void run() override;
diff --git a/examples/graph_lenet.cpp b/examples/graph_lenet.cpp
index 0d8a943737..f3aa266c50 100644
--- a/examples/graph_lenet.cpp
+++ b/examples/graph_lenet.cpp
@@ -60,7 +60,7 @@ public:
// Checks
ARM_COMPUTE_EXIT_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
- ARM_COMPUTE_EXIT_ON_MSG(common_params.data_layout == DataLayout::NHWC, "Unsupported data layout!");
+ ARM_COMPUTE_EXIT_ON_MSG(common_params.data_layout == DataLayout::NHWC && common_params.target != Target::CL, "Unsupported data layout!");
// Print parameter values
std::cout << common_params << std::endl;
@@ -69,33 +69,40 @@ public:
std::string data_path = common_params.data_path;
unsigned int batches = 4; /** Number of batches */
+ // Create input descriptor
+ const TensorShape tensor_shape = permute_shape(TensorShape(28U, 28U, 1U, batches), DataLayout::NCHW, common_params.data_layout);
+ TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(common_params.data_layout);
+
+ // Set weights trained layout
+ const DataLayout weights_layout = DataLayout::NCHW;
+
//conv1 << pool1 << conv2 << pool2 << fc1 << act1 << fc2 << smx
graph << common_params.target
<< common_params.fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(28U, 28U, 1U, batches), common_params.data_type), get_input_accessor(common_params))
+ << InputLayer(input_descriptor, get_input_accessor(common_params))
<< ConvolutionLayer(
5U, 5U, 20U,
- get_weights_accessor(data_path, "/cnn_data/lenet_model/conv1_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/lenet_model/conv1_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/lenet_model/conv1_b.npy"),
PadStrideInfo(1, 1, 0, 0))
.set_name("conv1")
<< PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 2, PadStrideInfo(2, 2, 0, 0))).set_name("pool1")
<< ConvolutionLayer(
5U, 5U, 50U,
- get_weights_accessor(data_path, "/cnn_data/lenet_model/conv2_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/lenet_model/conv2_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/lenet_model/conv2_b.npy"),
PadStrideInfo(1, 1, 0, 0))
.set_name("conv2")
<< PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 2, PadStrideInfo(2, 2, 0, 0))).set_name("pool2")
<< FullyConnectedLayer(
500U,
- get_weights_accessor(data_path, "/cnn_data/lenet_model/ip1_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/lenet_model/ip1_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/lenet_model/ip1_b.npy"))
.set_name("ip1")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("relu")
<< FullyConnectedLayer(
10U,
- get_weights_accessor(data_path, "/cnn_data/lenet_model/ip2_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/lenet_model/ip2_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/lenet_model/ip2_b.npy"))
.set_name("ip2")
<< SoftmaxLayer().set_name("prob")
diff --git a/examples/graph_mobilenet.cpp b/examples/graph_mobilenet.cpp
index b30f4c5fd3..a747b3cd11 100644
--- a/examples/graph_mobilenet.cpp
+++ b/examples/graph_mobilenet.cpp
@@ -78,12 +78,10 @@ public:
// Create input descriptor
unsigned int spatial_size = (model_id == 0 || common_params.data_type == DataType::QASYMM8) ? 224 : 160;
- TensorShape tensor_shape = TensorShape(spatial_size, spatial_size, 3U, 1U);
- if(common_params.data_layout == DataLayout::NHWC)
- {
- arm_compute::permute(tensor_shape, arm_compute::PermutationVector(2U, 0U, 1U));
- }
- TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(common_params.data_layout);
+
+ // Create input descriptor
+ const TensorShape tensor_shape = permute_shape(TensorShape(spatial_size, spatial_size, 3U, 1U), DataLayout::NCHW, common_params.data_layout);
+ TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(common_params.data_layout);
// Set graph hints
graph << common_params.target
diff --git a/examples/graph_vgg16.cpp b/examples/graph_vgg16.cpp
index e677650d04..e23ea65dd7 100644
--- a/examples/graph_vgg16.cpp
+++ b/examples/graph_vgg16.cpp
@@ -60,7 +60,7 @@ public:
// Checks
ARM_COMPUTE_EXIT_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
- ARM_COMPUTE_EXIT_ON_MSG(common_params.data_layout == DataLayout::NHWC, "Unsupported data layout!");
+ ARM_COMPUTE_EXIT_ON_MSG(common_params.data_layout == DataLayout::NHWC && common_params.target != Target::CL, "Unsupported data layout!");
// Print parameter values
std::cout << common_params << std::endl;
@@ -72,14 +72,21 @@ public:
const std::array<float, 3> mean_rgb{ { 123.68f, 116.779f, 103.939f } };
std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+ // Create input descriptor
+ const TensorShape tensor_shape = permute_shape(TensorShape(224U, 224U, 3U, 1U), DataLayout::NCHW, common_params.data_layout);
+ TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(common_params.data_layout);
+
+ // Set weights trained layout
+ const DataLayout weights_layout = DataLayout::NCHW;
+
+ // Create graph
graph << common_params.target
<< common_params.fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), common_params.data_type),
- get_input_accessor(common_params, std::move(preprocessor)))
+ << InputLayer(input_descriptor, get_input_accessor(common_params, std::move(preprocessor)))
// Layer 1
<< ConvolutionLayer(
3U, 3U, 64U,
- get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv1_1_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv1_1_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv1_1_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv1_1")
@@ -87,7 +94,7 @@ public:
// Layer 2
<< ConvolutionLayer(
3U, 3U, 64U,
- get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv1_2_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv1_2_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv1_2_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv1_2")
@@ -96,7 +103,7 @@ public:
// Layer 3
<< ConvolutionLayer(
3U, 3U, 128U,
- get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv2_1_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv2_1_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv2_1_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv2_1")
@@ -104,7 +111,7 @@ public:
// Layer 4
<< ConvolutionLayer(
3U, 3U, 128U,
- get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv2_2_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv2_2_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv2_2_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv2_2")
@@ -113,7 +120,7 @@ public:
// Layer 5
<< ConvolutionLayer(
3U, 3U, 256U,
- get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv3_1_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv3_1_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv3_1_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv3_1")
@@ -121,7 +128,7 @@ public:
// Layer 6
<< ConvolutionLayer(
3U, 3U, 256U,
- get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv3_2_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv3_2_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv3_2_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv3_2")
@@ -129,7 +136,7 @@ public:
// Layer 7
<< ConvolutionLayer(
3U, 3U, 256U,
- get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv3_3_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv3_3_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv3_3_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv3_3")
@@ -138,7 +145,7 @@ public:
// Layer 8
<< ConvolutionLayer(
3U, 3U, 512U,
- get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv4_1_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv4_1_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv4_1_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv4_1")
@@ -146,7 +153,7 @@ public:
// Layer 9
<< ConvolutionLayer(
3U, 3U, 512U,
- get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv4_2_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv4_2_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv4_2_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv4_2")
@@ -154,7 +161,7 @@ public:
// Layer 10
<< ConvolutionLayer(
3U, 3U, 512U,
- get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv4_3_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv4_3_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv4_3_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv4_3")
@@ -163,7 +170,7 @@ public:
// Layer 11
<< ConvolutionLayer(
3U, 3U, 512U,
- get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv5_1_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv5_1_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv5_1_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv5_1")
@@ -171,7 +178,7 @@ public:
// Layer 12
<< ConvolutionLayer(
3U, 3U, 512U,
- get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv5_2_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv5_2_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv5_2_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv5_2")
@@ -179,7 +186,7 @@ public:
// Layer 13
<< ConvolutionLayer(
3U, 3U, 512U,
- get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv5_3_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv5_3_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv5_3_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv5_3")
@@ -188,21 +195,21 @@ public:
// Layer 14
<< FullyConnectedLayer(
4096U,
- get_weights_accessor(data_path, "/cnn_data/vgg16_model/fc6_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg16_model/fc6_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg16_model/fc6_b.npy"))
.set_name("fc6")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Relu")
// Layer 15
<< FullyConnectedLayer(
4096U,
- get_weights_accessor(data_path, "/cnn_data/vgg16_model/fc7_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg16_model/fc7_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg16_model/fc7_b.npy"))
.set_name("fc7")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Relu_1")
// Layer 16
<< FullyConnectedLayer(
1000U,
- get_weights_accessor(data_path, "/cnn_data/vgg16_model/fc8_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg16_model/fc8_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg16_model/fc8_b.npy"))
.set_name("fc8")
// Softmax
diff --git a/examples/graph_vgg19.cpp b/examples/graph_vgg19.cpp
index c7fc333e0a..6cb6b1fae2 100644
--- a/examples/graph_vgg19.cpp
+++ b/examples/graph_vgg19.cpp
@@ -59,7 +59,7 @@ public:
// Checks
ARM_COMPUTE_EXIT_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
- ARM_COMPUTE_EXIT_ON_MSG(common_params.data_layout == DataLayout::NHWC, "Unsupported data layout!");
+ ARM_COMPUTE_EXIT_ON_MSG(common_params.data_layout == DataLayout::NHWC && common_params.target != Target::CL, "Unsupported data layout!");
// Print parameter values
std::cout << common_params << std::endl;
@@ -71,21 +71,27 @@ public:
const std::array<float, 3> mean_rgb{ { 123.68f, 116.779f, 103.939f } };
std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+ // Create input descriptor
+ const TensorShape tensor_shape = permute_shape(TensorShape(224U, 224U, 3U, 1U), DataLayout::NCHW, common_params.data_layout);
+ TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(common_params.data_layout);
+
+ // Set weights trained layout
+ const DataLayout weights_layout = DataLayout::NCHW;
+
graph << common_params.target
<< common_params.fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), common_params.data_type),
- get_input_accessor(common_params, std::move(preprocessor)))
+ << InputLayer(input_descriptor, get_input_accessor(common_params, std::move(preprocessor)))
// Layer 1
<< ConvolutionLayer(
3U, 3U, 64U,
- get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv1_1_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv1_1_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv1_1_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv1_1")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv1_1/Relu")
<< ConvolutionLayer(
3U, 3U, 64U,
- get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv1_2_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv1_2_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv1_2_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv1_2")
@@ -94,14 +100,14 @@ public:
// Layer 2
<< ConvolutionLayer(
3U, 3U, 128U,
- get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv2_1_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv2_1_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv2_1_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv2_1")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv2_1/Relu")
<< ConvolutionLayer(
3U, 3U, 128U,
- get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv2_2_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv2_2_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv2_2_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv2_2")
@@ -110,28 +116,28 @@ public:
// Layer 3
<< ConvolutionLayer(
3U, 3U, 256U,
- get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv3_1_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv3_1_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv3_1_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv3_1")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv3_1/Relu")
<< ConvolutionLayer(
3U, 3U, 256U,
- get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv3_2_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv3_2_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv3_2_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv3_2")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv3_2/Relu")
<< ConvolutionLayer(
3U, 3U, 256U,
- get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv3_3_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv3_3_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv3_3_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv3_3")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv3_3/Relu")
<< ConvolutionLayer(
3U, 3U, 256U,
- get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv3_4_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv3_4_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv3_4_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv3_4")
@@ -140,28 +146,28 @@ public:
// Layer 4
<< ConvolutionLayer(
3U, 3U, 512U,
- get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv4_1_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv4_1_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv4_1_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv4_1")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv4_1/Relu")
<< ConvolutionLayer(
3U, 3U, 512U,
- get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv4_2_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv4_2_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv4_2_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv4_2")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv4_2/Relu")
<< ConvolutionLayer(
3U, 3U, 512U,
- get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv4_3_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv4_3_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv4_3_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv4_3")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv4_3/Relu")
<< ConvolutionLayer(
3U, 3U, 512U,
- get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv4_4_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv4_4_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv4_4_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv4_4")
@@ -170,28 +176,28 @@ public:
// Layer 5
<< ConvolutionLayer(
3U, 3U, 512U,
- get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv5_1_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv5_1_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv5_1_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv5_1")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv5_1/Relu")
<< ConvolutionLayer(
3U, 3U, 512U,
- get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv5_2_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv5_2_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv5_2_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv5_2")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv5_2/Relu")
<< ConvolutionLayer(
3U, 3U, 512U,
- get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv5_3_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv5_3_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv5_3_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv5_3")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv5_3/Relu")
<< ConvolutionLayer(
3U, 3U, 512U,
- get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv5_4_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv5_4_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg19_model/conv5_4_b.npy"),
PadStrideInfo(1, 1, 1, 1))
.set_name("conv5_4")
@@ -200,21 +206,21 @@ public:
// Layer 6
<< FullyConnectedLayer(
4096U,
- get_weights_accessor(data_path, "/cnn_data/vgg19_model/fc6_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg19_model/fc6_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg19_model/fc6_b.npy"))
.set_name("fc6")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Relu")
// Layer 7
<< FullyConnectedLayer(
4096U,
- get_weights_accessor(data_path, "/cnn_data/vgg19_model/fc7_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg19_model/fc7_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg19_model/fc7_b.npy"))
.set_name("fc7")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Relu_1")
// Layer 8
<< FullyConnectedLayer(
1000U,
- get_weights_accessor(data_path, "/cnn_data/vgg19_model/fc8_w.npy"),
+ get_weights_accessor(data_path, "/cnn_data/vgg19_model/fc8_w.npy", weights_layout),
get_weights_accessor(data_path, "/cnn_data/vgg19_model/fc8_b.npy"))
.set_name("fc8")
// Softmax
diff --git a/src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp b/src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp
index 86858d0c03..69ab590540 100644
--- a/src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp
+++ b/src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp
@@ -41,27 +41,32 @@ void CLConvertFullyConnectedWeightsKernel::configure(const ICLTensor *input, ICL
DataLayout data_layout)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+
+ // Output tensor auto initialisation if not yet initialized
+ auto_init_if_empty(*output->info(), *input->info()->clone());
+
ARM_COMPUTE_ERROR_THROW_ON(CLConvertFullyConnectedWeightsKernel::validate(input->info(), output->info(), original_input_shape, data_layout));
_input = input;
_output = output;
- const unsigned int num_elems_per_input_plane = original_input_shape.x() * original_input_shape.y();
- const unsigned int num_channels = original_input_shape.z();
+ const DataLayout input_data_layout = (data_layout == DataLayout::NCHW) ? DataLayout::NHWC : DataLayout::NCHW;
+
+ const int width_idx = get_data_layout_dimension_index(input_data_layout, DataLayoutDimension::WIDTH);
+ const int height_idx = get_data_layout_dimension_index(input_data_layout, DataLayoutDimension::HEIGHT);
+ const int channel_idx = get_data_layout_dimension_index(input_data_layout, DataLayoutDimension::CHANNEL);
+
+ const unsigned int num_elems_per_input_plane = original_input_shape[width_idx] * original_input_shape[height_idx];
+ const unsigned int num_channels = original_input_shape[channel_idx];
+
+ const unsigned int factor_1 = (data_layout == DataLayout::NCHW) ? num_elems_per_input_plane : num_channels;
+ const unsigned int factor_2 = (data_layout == DataLayout::NCHW) ? num_channels : num_elems_per_input_plane;
// Set build options
CLBuildOptions build_opts;
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
- if(data_layout == DataLayout::NCHW)
- {
- build_opts.add_option("-DFACTOR_1=" + support::cpp11::to_string(num_elems_per_input_plane));
- build_opts.add_option("-DFACTOR_2=" + support::cpp11::to_string(num_channels));
- }
- else
- {
- build_opts.add_option("-DFACTOR_1=" + support::cpp11::to_string(num_channels));
- build_opts.add_option("-DFACTOR_2=" + support::cpp11::to_string(num_elems_per_input_plane));
- }
+ build_opts.add_option("-DFACTOR_1=" + support::cpp11::to_string(factor_1));
+ build_opts.add_option("-DFACTOR_2=" + support::cpp11::to_string(factor_2));
// Create kernel
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("convert_fc_weights", build_opts.options()));
@@ -75,14 +80,22 @@ Status CLConvertFullyConnectedWeightsKernel::validate(const ITensorInfo *input,
DataLayout data_layout)
{
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QASYMM8, DataType::U16, DataType::S16, DataType::U32, DataType::S32,
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1,
+ DataType::U8, DataType::S8, DataType::QASYMM8,
+ DataType::U16, DataType::S16,
+ DataType::U32, DataType::S32,
DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() != 2);
ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(1) != original_input_shape.total_size_lower(3));
ARM_COMPUTE_RETURN_ERROR_ON(data_layout == DataLayout::UNKNOWN);
+ // Checks performed when output is configured
+ if((output != nullptr) && (output->total_size() != 0))
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
+ }
+
return Status{};
}
diff --git a/src/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.cpp b/src/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.cpp
index be5e6436b3..198565b1d5 100644
--- a/src/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.cpp
+++ b/src/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.cpp
@@ -37,25 +37,26 @@ void NEConvertFullyConnectedWeightsKernel::configure(const ITensor *input, ITens
DataLayout data_layout)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+
+ // Output tensor auto initialisation if not yet initialized
+ auto_init_if_empty(*output->info(), *input->info()->clone());
+
ARM_COMPUTE_ERROR_THROW_ON(NEConvertFullyConnectedWeightsKernel::validate(input->info(), output->info(), original_input_shape, data_layout));
_input = input;
_output = output;
- const unsigned int num_elems_per_input_plane = original_input_shape.x() * original_input_shape.y();
- const unsigned int num_channels = original_input_shape.z();
+ const DataLayout input_data_layout = (data_layout == DataLayout::NCHW) ? DataLayout::NHWC : DataLayout::NCHW;
- // Set build options
- if(data_layout == DataLayout::NCHW)
- {
- _factor1 = num_elems_per_input_plane;
- _factor2 = num_channels;
- }
- else
- {
- _factor1 = num_channels;
- _factor2 = num_elems_per_input_plane;
- }
+ const int width_idx = get_data_layout_dimension_index(input_data_layout, DataLayoutDimension::WIDTH);
+ const int height_idx = get_data_layout_dimension_index(input_data_layout, DataLayoutDimension::HEIGHT);
+ const int channel_idx = get_data_layout_dimension_index(input_data_layout, DataLayoutDimension::CHANNEL);
+
+ const unsigned int num_elems_per_input_plane = original_input_shape[width_idx] * original_input_shape[height_idx];
+ const unsigned int num_channels = original_input_shape[channel_idx];
+
+ _factor1 = (data_layout == DataLayout::NCHW) ? num_elems_per_input_plane : num_channels;
+ _factor2 = (data_layout == DataLayout::NCHW) ? num_channels : num_elems_per_input_plane;
// Configure kernel window
Window win = calculate_max_window(*input->info(), Steps());
@@ -65,14 +66,22 @@ void NEConvertFullyConnectedWeightsKernel::configure(const ITensor *input, ITens
Status NEConvertFullyConnectedWeightsKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const TensorShape &original_input_shape,
DataLayout data_layout)
{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QASYMM8, DataType::U16, DataType::S16, DataType::U32, DataType::S32,
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1,
+ DataType::U8, DataType::S8, DataType::QASYMM8,
+ DataType::U16, DataType::S16,
+ DataType::U32, DataType::S32,
DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() != 2);
ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(1) != original_input_shape.total_size_lower(3));
ARM_COMPUTE_RETURN_ERROR_ON(data_layout == DataLayout::UNKNOWN);
+ // Checks performed when output is configured
+ if((output != nullptr) && (output->total_size() != 0))
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
+ }
+
return Status{};
}
diff --git a/src/graph/GraphBuilder.cpp b/src/graph/GraphBuilder.cpp
index 3b906c0ffd..d26039ec35 100644
--- a/src/graph/GraphBuilder.cpp
+++ b/src/graph/GraphBuilder.cpp
@@ -446,8 +446,13 @@ NodeID GraphBuilder::add_fully_connected_layer(Graph &g, NodeParams params, Node
b_nid = add_const_node_with_name(g, params, "Bias", b_desc, std::move(bias_accessor));
}
- // Create convolution node and connect
- NodeID fc_nid = g.add_node<FullyConnectedLayerNode>(num_outputs);
+ // Add fully connected info
+ // FIXME (COMPMID-1367) : Expose weights layout
+ FullyConnectedLayerInfo fc_info;
+ fc_info.weights_trained_layout = DataLayout::NCHW;
+
+ // Create fully connected node and connect
+ NodeID fc_nid = g.add_node<FullyConnectedLayerNode>(num_outputs, fc_info);
g.add_connection(input.node_id, input.index, fc_nid, 0);
g.add_connection(w_nid, 0, fc_nid, 1);
if(has_bias)
diff --git a/src/graph/nodes/FullyConnectedLayer.cpp b/src/graph/nodes/FullyConnectedLayer.cpp
index d94a7851ff..6ba4eefa88 100644
--- a/src/graph/nodes/FullyConnectedLayer.cpp
+++ b/src/graph/nodes/FullyConnectedLayer.cpp
@@ -31,8 +31,8 @@ namespace arm_compute
{
namespace graph
{
-FullyConnectedLayerNode::FullyConnectedLayerNode(unsigned int num_outputs)
- : _num_outputs(num_outputs)
+FullyConnectedLayerNode::FullyConnectedLayerNode(unsigned int num_outputs, FullyConnectedLayerInfo fc_info)
+ : _num_outputs(num_outputs), _info(fc_info)
{
_input_edges.resize(3, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -75,6 +75,11 @@ TensorDescriptor FullyConnectedLayerNode::compute_output_descriptor(const Tensor
return output_descriptor;
}
+FullyConnectedLayerInfo FullyConnectedLayerNode::info() const
+{
+ return _info;
+}
+
bool FullyConnectedLayerNode::forward_descriptors()
{
if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
diff --git a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
index 273ef96a03..ccd7813fbc 100644
--- a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
+++ b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
@@ -73,8 +73,9 @@ Status CLFullyConnectedLayerReshapeWeights::validate(const ITensorInfo *input, c
}
CLFullyConnectedLayer::CLFullyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(memory_manager), _im2col_kernel(), _reshape_weights_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(), _accumulate_biases_kernel(),
- _im2col_output(), _gemmlowp_output(), _reshape_weights_output(), _are_weights_reshaped(true), _is_fc_after_conv(true), _accumulate_biases(false), _is_quantized(false), _original_weights(nullptr)
+ : _memory_group(memory_manager), _im2col_kernel(), _convert_weights(), _reshape_weights_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(),
+ _accumulate_biases_kernel(), _im2col_output(), _gemmlowp_output(), _converted_weights_output(), _reshape_weights_output(), _are_weights_converted(true), _are_weights_reshaped(true),
+ _is_fc_after_conv(true), _accumulate_biases(false), _is_quantized(false), _is_prepared(false), _original_weights(nullptr)
{
}
@@ -112,7 +113,7 @@ void CLFullyConnectedLayer::configure_conv_fc(const ICLTensor *input, const ICLT
// Initialize output tensor for im2col
TensorShape shape_im2col = compute_im2col_fc_shape(input->info());
- _im2col_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_im2col));
+ _im2col_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_im2col).set_data_layout(DataLayout::NCHW));
// Configure im2col kernel
_memory_group.manage(&_im2col_output);
@@ -134,8 +135,8 @@ void CLFullyConnectedLayer::configure_fc_fc(const ICLTensor *input, const ICLTen
configure_mm(input, weights, output);
}
-void CLFullyConnectedLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, bool transpose_weights, bool are_weights_reshaped,
- bool retain_internal_weights)
+void CLFullyConnectedLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
+ FullyConnectedLayerInfo fc_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
@@ -144,15 +145,15 @@ void CLFullyConnectedLayer::configure(const ICLTensor *input, const ICLTensor *w
weights->info(),
biases != nullptr ? biases->info() : nullptr,
output->info(),
- transpose_weights,
- are_weights_reshaped,
- retain_internal_weights));
+ fc_info));
- _are_weights_reshaped = transpose_weights ? are_weights_reshaped : true;
- _is_fc_after_conv = true;
- _accumulate_biases = false;
- _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
- _original_weights = weights;
+ _are_weights_converted = true;
+ _are_weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
+ _is_fc_after_conv = true;
+ _accumulate_biases = false;
+ _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
+ _is_prepared = false;
+ _original_weights = weights;
// Configure gemmlowp output
if(_is_quantized)
@@ -172,25 +173,16 @@ void CLFullyConnectedLayer::configure(const ICLTensor *input, const ICLTensor *w
_accumulate_biases_kernel.configure(output, biases);
}
+ const ICLTensor *weights_to_use = weights;
+
// With the Fully Connected layer we can have 4 different cases:
// 1) Convolution layer -> Fully Connected layer without batches
// 2) Fully Connected layer -> Fully Connected layer without batches
// 3) Convolution layer -> Fully Connected layer with batches
// 4) Fully Connected layer -> Fully Connected layer with batches
- const ICLTensor *weights_to_use = weights;
-
- if(!_are_weights_reshaped)
- {
- weights_to_use = &_reshape_weights_output;
-
- // Reshape the weights
- _reshape_weights_kernel.configure(weights, &_reshape_weights_output);
- }
-
// Check if we have a fully connected layer with batches
const bool is_batched_fc_layer = output->info()->dimension(1) > 1;
-
if(is_batched_fc_layer)
{
_is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->info()->tensor_shape().cbegin() + 3,
@@ -202,6 +194,28 @@ void CLFullyConnectedLayer::configure(const ICLTensor *input, const ICLTensor *w
_is_fc_after_conv = input->info()->num_dimensions() > 1;
}
+ // Reshape weights if needed
+ if(!_are_weights_reshaped)
+ {
+ // Reshape the weights
+ _reshape_weights_kernel.configure(weights, &_reshape_weights_output);
+ weights_to_use = &_reshape_weights_output;
+ }
+
+ // Convert weights if needed
+ if(_is_fc_after_conv && (input->info()->data_layout() != fc_info.weights_trained_layout))
+ {
+ // Convert weights
+ _convert_weights.configure(weights_to_use,
+ &_converted_weights_output,
+ input->info()->tensor_shape(),
+ fc_info.weights_trained_layout);
+
+ weights_to_use = &_converted_weights_output;
+ _are_weights_converted = false;
+ }
+
+ // Configure fc core
ICLTensor *tmp_output = (_is_quantized) ? &_gemmlowp_output : output;
if(_is_fc_after_conv)
{
@@ -224,26 +238,26 @@ void CLFullyConnectedLayer::configure(const ICLTensor *input, const ICLTensor *w
_gemmlowp_output.allocator()->allocate();
}
- _are_weights_reshaped = _are_weights_reshaped || retain_internal_weights;
+ _are_weights_reshaped = _are_weights_reshaped || fc_info.retain_internal_weights;
}
-Status CLFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, bool transpose_weights, bool are_weights_reshaped,
- bool retain_internal_weights)
+Status CLFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
+ FullyConnectedLayerInfo fc_info)
{
- ARM_COMPUTE_UNUSED(retain_internal_weights);
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output);
ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2);
- bool weights_reshaped = transpose_weights ? are_weights_reshaped : true;
+ bool weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
bool is_fc_after_conv = true;
bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
const GPUTarget gpu_target = CLScheduler::get().target();
- const ITensorInfo &im2col_input = TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_im2col_fc_shape(input)));
- const ITensorInfo &reshaped_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights)));
- const ITensorInfo &gemmlowp_output = TensorInfo(output->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
+ const ITensorInfo &im2col_input = TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_im2col_fc_shape(input)).set_data_layout(DataLayout::NCHW));
+ const ITensorInfo &reshaped_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights)));
+ const ITensorInfo &converted_weights = TensorInfo(reshaped_weights.clone()->set_is_resizable(true).reset_padding());
+ const ITensorInfo &gemmlowp_output = TensorInfo(output->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
// Configure accumulate biases kernel for non quantized asymmetric types
if(biases != nullptr && !is_quantized)
@@ -262,16 +276,8 @@ Status CLFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorIn
const ITensorInfo *weights_to_use = weights;
const ITensorInfo *tmp_output = (is_quantized) ? &gemmlowp_output : output;
- if(!weights_reshaped)
- {
- // Validate reshape weights kernel
- ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayerReshapeWeights::validate(weights, &reshaped_weights));
- weights_to_use = &reshaped_weights;
- }
-
// Check if we have a fully connected layer with batches
const bool is_batched_fc_layer = output->dimension(1) > 1;
-
if(is_batched_fc_layer)
{
is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->tensor_shape().cbegin() + 3,
@@ -283,6 +289,23 @@ Status CLFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorIn
is_fc_after_conv = input->num_dimensions() > 1;
}
+ if(!weights_reshaped)
+ {
+ // Validate reshape weights kernel
+ ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayerReshapeWeights::validate(weights, &reshaped_weights));
+ weights_to_use = &reshaped_weights;
+ }
+
+ if(is_fc_after_conv && (input->data_layout() != fc_info.weights_trained_layout))
+ {
+ // Validate convert weights kernel
+ ARM_COMPUTE_RETURN_ON_ERROR(CLConvertFullyConnectedWeights::validate(weights_to_use,
+ &converted_weights,
+ input->tensor_shape(),
+ fc_info.weights_trained_layout));
+ weights_to_use = &converted_weights;
+ }
+
if(is_fc_after_conv)
{
// Fully Connected layer after a Convolution Layer without batches
@@ -349,27 +372,57 @@ void CLFullyConnectedLayer::run()
void CLFullyConnectedLayer::prepare()
{
- // Reshape of the weights (happens only once)
- if(!_are_weights_reshaped)
+ if(!_is_prepared)
{
ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
- // Run reshape weights kernel and mark weights as unused
- _reshape_weights_output.allocator()->allocate();
- _reshape_weights_kernel.run();
- _original_weights->mark_as_unused();
+ auto release_unused = [](CLTensor * w)
+ {
+ if(!w->is_used())
+ {
+ CLScheduler::get().queue().finish();
+ w->allocator()->free();
+ }
+ };
+
+ // Pointer to current weights
+ const ICLTensor *cur_weights = _original_weights;
+
+ // Reshape of the weights if needed (happens only once)
+ if(!_are_weights_reshaped)
+ {
+ // Run reshape weights kernel and mark weights as unused
+ _reshape_weights_output.allocator()->allocate();
+ _reshape_weights_kernel.run();
+
+ cur_weights->mark_as_unused();
+ cur_weights = &_reshape_weights_output;
+ _are_weights_reshaped = true;
+ }
+
+ // Convert weights if needed (happens only once)
+ if(!_are_weights_converted)
+ {
+ _converted_weights_output.allocator()->allocate();
+ _convert_weights.run();
+
+ cur_weights->mark_as_unused();
+ _are_weights_converted = true;
+ }
+
+ // Release reshaped weights if unused
+ release_unused(&_reshape_weights_output);
// Prepare GEMM prepare and release unused weights
if(!_is_quantized)
{
_mm_gemm.prepare();
- if(!_reshape_weights_output.is_used())
- {
- _reshape_weights_output.allocator()->free();
- }
}
- CLScheduler::get().queue().finish();
- _are_weights_reshaped = true;
+ // Release converted weights if unused
+ release_unused(&_reshape_weights_output);
+ release_unused(&_converted_weights_output);
+
+ _is_prepared = true;
}
}
diff --git a/src/runtime/CL/functions/CLLSTMLayer.cpp b/src/runtime/CL/functions/CLLSTMLayer.cpp
index 872325175d..d384400ed3 100644
--- a/src/runtime/CL/functions/CLLSTMLayer.cpp
+++ b/src/runtime/CL/functions/CLLSTMLayer.cpp
@@ -90,7 +90,7 @@ void CLLSTMLayer::configure(const ICLTensor *input, const ICLTensor *input_to_fo
// Configure block that calculates the forget gate
// forget_gate = Activation(input * input_to_forget_weights + output_state * recurrent_to_forget_weights + PixelWiseMul(cell_state, cell_to_forget_weights) + forget_gate_bias)
_memory_group.manage(&_forget_gate_out1);
- _fully_connected_forget_gate.configure(input, input_to_forget_weights, forget_gate_bias, &_forget_gate_out1, true, false);
+ _fully_connected_forget_gate.configure(input, input_to_forget_weights, forget_gate_bias, &_forget_gate_out1);
_memory_group.manage(&_forget_gate_out2);
_transpose_forget_gate.configure(recurrent_to_forget_weights, &_forget_gate_out2);
_memory_group.manage(&_forget_gate_out3);
@@ -142,7 +142,7 @@ void CLLSTMLayer::configure(const ICLTensor *input, const ICLTensor *input_to_fo
_input_gate_out5.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
_memory_group.manage(&_input_gate_out1);
- _fully_connected_input_gate.configure(input, lstm_params.input_to_input_weights(), lstm_params.input_gate_bias(), &_input_gate_out1, true, false);
+ _fully_connected_input_gate.configure(input, lstm_params.input_to_input_weights(), lstm_params.input_gate_bias(), &_input_gate_out1);
_memory_group.manage(&_input_gate_out2);
_transpose_input_gate.configure(lstm_params.recurrent_to_input_weights(), &_input_gate_out2);
_memory_group.manage(&_input_gate_out3);
@@ -169,7 +169,7 @@ void CLLSTMLayer::configure(const ICLTensor *input, const ICLTensor *input_to_fo
// Configure block that calculates the cell state
// cell_state = Clip((PixelwiseMul(input_gate, Activation(input * input_to_cell_weights + output_state * recurrent_to_cell_weights + cell_bias)) + PixelwiseMul(forget_gate, cell_state)), cell_threshold)
_memory_group.manage(&_cell_state_out1);
- _fully_connected_cell_state.configure(input, input_to_cell_weights, cell_bias, &_cell_state_out1, true, false);
+ _fully_connected_cell_state.configure(input, input_to_cell_weights, cell_bias, &_cell_state_out1);
_memory_group.manage(&_cell_state_out2);
_transpose_cell_state.configure(recurrent_to_cell_weights, &_cell_state_out2);
_memory_group.manage(&_cell_state_out3);
@@ -204,7 +204,7 @@ void CLLSTMLayer::configure(const ICLTensor *input, const ICLTensor *input_to_fo
// Configure block that calculates the output
// output_state = Activation(input * input_to_output_weights + output_state * recurrent_to_output_weights + PixelWiseMul(cell_state, cell_to_output_weights) + output_gate_bias)
_memory_group.manage(&_output1);
- _fully_connected_output.configure(input, input_to_output_weights, output_gate_bias, &_output1, true, false);
+ _fully_connected_output.configure(input, input_to_output_weights, output_gate_bias, &_output1);
_memory_group.manage(&_output2);
_transpose_output.configure(recurrent_to_output_weights, &_output2);
_memory_group.manage(&_output3);
@@ -255,7 +255,7 @@ void CLLSTMLayer::configure(const ICLTensor *input, const ICLTensor *input_to_fo
_has_projection_weights = true;
_output_projection1.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
_memory_group.manage(&_output_projection1);
- _fully_connected_output_state.configure(output_state, lstm_params.projection_weights(), lstm_params.projection_bias(), &_output_projection1, true, false);
+ _fully_connected_output_state.configure(output_state, lstm_params.projection_weights(), lstm_params.projection_bias(), &_output_projection1);
// Perform clipping
if(projection_threshold != 0.f)
{
@@ -326,7 +326,7 @@ Status CLLSTMLayer::validate(const ITensorInfo *input, const ITensorInfo *input_
const TensorInfo num_units_transposed_info = TensorInfo(num_units_transposed_shape, 1, input->data_type());
// Validate forget gate
- ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayer::validate(input, input_to_forget_weights, forget_gate_bias, cell_state, true, false));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayer::validate(input, input_to_forget_weights, forget_gate_bias, cell_state));
ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(output_state, &units_out_transposed_info, nullptr, cell_state, 1.f, 0.f, GEMMInfo()));
ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAdditionKernel::validate(cell_state, cell_state, cell_state, ConvertPolicy::SATURATE));
if(lstm_params.has_peephole_opt())
@@ -344,7 +344,7 @@ Status CLLSTMLayer::validate(const ITensorInfo *input, const ITensorInfo *input_
ARM_COMPUTE_RETURN_ERROR_ON(lstm_params.recurrent_to_input_weights()->num_dimensions() > 2);
ARM_COMPUTE_RETURN_ERROR_ON(lstm_params.cell_to_input_weights()->num_dimensions() > 1);
ARM_COMPUTE_RETURN_ERROR_ON(lstm_params.input_gate_bias()->num_dimensions() > 1);
- ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayer::validate(input, lstm_params.input_to_input_weights(), lstm_params.input_gate_bias(), cell_state, true, false));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayer::validate(input, lstm_params.input_to_input_weights(), lstm_params.input_gate_bias(), cell_state));
ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(cell_state, &num_units_transposed_info, nullptr, &gemmv_shape_info, 1.f, 0.f, GEMMInfo()));
ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAddition::validate(cell_state, &gemmv_shape_info, cell_state, ConvertPolicy::SATURATE));
ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayerKernel::validate(cell_state, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC)));
@@ -355,7 +355,7 @@ Status CLLSTMLayer::validate(const ITensorInfo *input, const ITensorInfo *input_
}
// Validate cell state
- ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayer::validate(input, input_to_cell_weights, cell_bias, cell_state, true, false));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayer::validate(input, input_to_cell_weights, cell_bias, cell_state));
ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayerKernel::validate(cell_state, nullptr, activation_info));
ARM_COMPUTE_RETURN_ON_ERROR(CLPixelWiseMultiplicationKernel::validate(cell_state, cell_state, cell_state, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_EVEN));
@@ -364,7 +364,7 @@ Status CLLSTMLayer::validate(const ITensorInfo *input, const ITensorInfo *input_
ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayerKernel::validate(cell_state, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, -cell_threshold, cell_threshold)));
}
- ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayer::validate(input, input_to_output_weights, output_gate_bias, cell_state, true, false));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayer::validate(input, input_to_output_weights, output_gate_bias, cell_state));
if(lstm_params.has_peephole_opt())
{
ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAddition::validate(cell_state, cell_state, cell_state, ConvertPolicy::SATURATE));
@@ -376,7 +376,7 @@ Status CLLSTMLayer::validate(const ITensorInfo *input, const ITensorInfo *input_
ARM_COMPUTE_RETURN_ON_ERROR(CLPixelWiseMultiplicationKernel::validate(cell_state, output, output_state, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_EVEN));
if(lstm_params.has_projection())
{
- ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayer::validate(output_state, lstm_params.projection_weights(), lstm_params.projection_bias(), cell_state, true, false));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayer::validate(output_state, lstm_params.projection_weights(), lstm_params.projection_bias(), cell_state));
if(projection_threshold != 0.f)
{
ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayerKernel::validate(cell_state, output_state, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, -projection_threshold,
diff --git a/src/runtime/CL/functions/CLRNNLayer.cpp b/src/runtime/CL/functions/CLRNNLayer.cpp
index 0e1b9d5b58..1809e6e64e 100644
--- a/src/runtime/CL/functions/CLRNNLayer.cpp
+++ b/src/runtime/CL/functions/CLRNNLayer.cpp
@@ -58,7 +58,7 @@ Status CLRNNLayer::validate(const ITensorInfo *input, const ITensorInfo *weights
auto shape_info = TensorInfo(compute_rnn_shape(recurrent_weights, hidden_state->dimension(idx_height)), 1, input->data_type());
- ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayer::validate(input, weights, bias, &shape_info, true, false));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayer::validate(input, weights, bias, &shape_info));
ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(hidden_state, recurrent_weights, nullptr, &shape_info, 1.f, 0.f));
ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAdditionKernel::validate(&shape_info, &shape_info, &shape_info, ConvertPolicy::SATURATE));
ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayerKernel::validate(&shape_info, &shape_info, info));
@@ -82,7 +82,7 @@ void CLRNNLayer::configure(const ICLTensor *input, const ICLTensor *weights, con
// Manage intermediate buffers and configure
_memory_group.manage(&_fully_connected_out);
- _fully_connected_kernel.configure(input, weights, bias, &_fully_connected_out, true, false);
+ _fully_connected_kernel.configure(input, weights, bias, &_fully_connected_out);
_memory_group.manage(&_gemm_output);
_gemm_state_f.configure(hidden_state, recurrent_weights, nullptr, &_gemm_output, 1.f, 0.f);
diff --git a/src/runtime/GLES_COMPUTE/functions/GCFullyConnectedLayer.cpp b/src/runtime/GLES_COMPUTE/functions/GCFullyConnectedLayer.cpp
index ab2c6c2813..6b8e341b14 100644
--- a/src/runtime/GLES_COMPUTE/functions/GCFullyConnectedLayer.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCFullyConnectedLayer.cpp
@@ -80,14 +80,14 @@ void GCFullyConnectedLayer::configure_fc_fc(const IGCTensor *input, const IGCTen
}
void GCFullyConnectedLayer::configure(const IGCTensor *input, const IGCTensor *weights, const IGCTensor *biases, IGCTensor *output,
- bool transpose_weights, bool are_weights_reshaped, bool retain_internal_weights)
+ FullyConnectedLayerInfo fc_info)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output);
ARM_COMPUTE_ERROR_ON(weights->info()->num_dimensions() > 2);
_original_weights = weights;
- _are_weights_reshaped = transpose_weights ? are_weights_reshaped : true;
+ _are_weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
_is_fc_after_conv = true;
_accumulate_biases = false;
@@ -142,8 +142,8 @@ void GCFullyConnectedLayer::configure(const IGCTensor *input, const IGCTensor *w
configure_fc_fc(input, weights_to_use, output);
}
- ARM_COMPUTE_ERROR_ON(retain_internal_weights && _reshape_weights_output.gc_buffer() == 0);
- _are_weights_reshaped = _are_weights_reshaped || retain_internal_weights;
+ ARM_COMPUTE_ERROR_ON(fc_info.retain_internal_weights && _reshape_weights_output.gc_buffer() == 0);
+ _are_weights_reshaped = _are_weights_reshaped || fc_info.retain_internal_weights;
}
void GCFullyConnectedLayer::run()
diff --git a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
index 3126823e9c..1aab3a05e0 100644
--- a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
+++ b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
@@ -136,7 +136,8 @@ NEFullyConnectedLayer::NEFullyConnectedLayer(std::shared_ptr<IMemoryManager> mem
{
}
-void NEFullyConnectedLayer::configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, bool transpose_weights, bool are_weights_reshaped)
+void NEFullyConnectedLayer::configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output,
+ FullyConnectedLayerInfo fc_info)
{
// With the Fully Connected layer we can have 4 different cases:
// 1) Convolution layer -> Fully Connected layer without batches
@@ -156,8 +157,7 @@ void NEFullyConnectedLayer::configure(const ITensor *input, const ITensor *weigh
weights->info(),
biases != nullptr ? biases->info() : nullptr,
output->info(),
- transpose_weights,
- are_weights_reshaped));
+ fc_info));
const int num_batch_dimensions = std::max(0, static_cast<int>(output->info()->tensor_shape().num_dimensions()) - 1);
const int num_input_dimensions = input->info()->tensor_shape().num_dimensions() - num_batch_dimensions;
@@ -167,7 +167,7 @@ void NEFullyConnectedLayer::configure(const ITensor *input, const ITensor *weigh
_linearize_input = (input->info()->tensor_shape().x() != linear_input_size) || (num_input_dimensions > 1 && linear_input_size == 1);
_accumulate_biases = biases != nullptr;
_is_batched_fc_layer = num_batch_dimensions > 0;
- _is_prepared = are_weights_reshaped || (!transpose_weights && !_is_batched_fc_layer);
+ _is_prepared = fc_info.are_weights_reshaped || (!fc_info.transpose_weights && !_is_batched_fc_layer);
const size_t interleave_width = 16 / input->info()->element_size();
const ITensor *weights_to_use = weights;
@@ -177,11 +177,11 @@ void NEFullyConnectedLayer::configure(const ITensor *input, const ITensor *weigh
weights_to_use = &_reshape_weights_output;
_reshape_weights_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_fully_connected_reshaped_weights_shape(weights->info(),
- transpose_weights,
+ fc_info.transpose_weights,
_is_batched_fc_layer, interleave_width)));
// Reshape the weights
- _reshape_weights_function.configure(weights, &_reshape_weights_output, transpose_weights, _is_batched_fc_layer);
+ _reshape_weights_function.configure(weights, &_reshape_weights_output, fc_info.transpose_weights, _is_batched_fc_layer);
}
const ITensor *multiply_input = input;
@@ -231,7 +231,8 @@ void NEFullyConnectedLayer::configure(const ITensor *input, const ITensor *weigh
}
}
-Status NEFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, bool transpose_weights, bool are_weights_reshaped)
+Status NEFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
+ FullyConnectedLayerInfo fc_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output);
@@ -251,11 +252,11 @@ Status NEFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorIn
const ITensorInfo *weights_to_use = weights;
std::unique_ptr<ITensorInfo> reshape_weights_output = input->clone();
- if(!are_weights_reshaped && (transpose_weights || is_batched_fc_layer))
+ if(!fc_info.are_weights_reshaped && (fc_info.transpose_weights || is_batched_fc_layer))
{
- reshape_weights_output->set_tensor_shape(compute_fully_connected_reshaped_weights_shape(weights, transpose_weights, is_batched_fc_layer, interleave_width));
+ reshape_weights_output->set_tensor_shape(compute_fully_connected_reshaped_weights_shape(weights, fc_info.transpose_weights, is_batched_fc_layer, interleave_width));
- ARM_COMPUTE_RETURN_ON_ERROR(NEFullyConnectedLayerReshapeWeights::validate(weights, reshape_weights_output.get(), transpose_weights, is_batched_fc_layer));
+ ARM_COMPUTE_RETURN_ON_ERROR(NEFullyConnectedLayerReshapeWeights::validate(weights, reshape_weights_output.get(), fc_info.transpose_weights, is_batched_fc_layer));
weights_to_use = reshape_weights_output.get();
}
diff --git a/src/runtime/NEON/functions/NERNNLayer.cpp b/src/runtime/NEON/functions/NERNNLayer.cpp
index 08017e20c3..f77566a108 100644
--- a/src/runtime/NEON/functions/NERNNLayer.cpp
+++ b/src/runtime/NEON/functions/NERNNLayer.cpp
@@ -57,7 +57,7 @@ Status NERNNLayer::validate(const ITensorInfo *input, const ITensorInfo *weights
auto shape_info = TensorInfo(misc::shape_calculator::compute_rnn_shape(recurrent_weights, hidden_state->dimension(idx_height)), 1, input->data_type());
- ARM_COMPUTE_RETURN_ON_ERROR(NEFullyConnectedLayer::validate(input, weights, bias, &shape_info, true, false));
+ ARM_COMPUTE_RETURN_ON_ERROR(NEFullyConnectedLayer::validate(input, weights, bias, &shape_info));
ARM_COMPUTE_RETURN_ON_ERROR(NEArithmeticAdditionKernel::validate(&shape_info, &shape_info, &shape_info, ConvertPolicy::SATURATE));
ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayerKernel::validate(&shape_info, &shape_info, info));
@@ -79,7 +79,7 @@ void NERNNLayer::configure(const ITensor *input, const ITensor *weights, const I
// Manage intermediate buffers and configure
_fully_connected_out.allocator()->init(TensorInfo(shape, 1, input->info()->data_type()));
_memory_group.manage(&_fully_connected_out);
- _fully_connected_kernel.configure(input, weights, bias, &_fully_connected_out, true, false);
+ _fully_connected_kernel.configure(input, weights, bias, &_fully_connected_out);
_gemm_output.allocator()->init(TensorInfo(shape, 1, input->info()->data_type()));
_memory_group.manage(&_gemm_output);
diff --git a/tests/networks/AlexNetNetwork.h b/tests/networks/AlexNetNetwork.h
index e92affe954..e15db2a110 100644
--- a/tests/networks/AlexNetNetwork.h
+++ b/tests/networks/AlexNetNetwork.h
@@ -193,6 +193,9 @@ public:
/** Build the network */
void build()
{
+ FullyConnectedLayerInfo fc_info;
+ fc_info.are_weights_reshaped = _reshaped_weights;
+
input.allocator()->init(TensorInfo(TensorShape(227U, 227U, 3U, _batches), 1, _data_type));
output.allocator()->init(TensorInfo(TensorShape(1000U, _batches), 1, _data_type));
@@ -265,13 +268,13 @@ public:
act5.configure(&conv5_out, &act5_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
pool5.configure(&act5_out, &pool5_out, PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0)));
// Layer 6
- fc6.configure(&pool5_out, &w[5], &b[5], &fc6_out, true, _reshaped_weights);
+ fc6.configure(&pool5_out, &w[5], &b[5], &fc6_out, fc_info);
act6.configure(&fc6_out, &act6_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
// Layer 7
- fc7.configure(&act6_out, &w[6], &b[6], &fc7_out, true, _reshaped_weights);
+ fc7.configure(&act6_out, &w[6], &b[6], &fc7_out, fc_info);
act7.configure(&fc7_out, &act7_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
// Layer 8
- fc8.configure(&act7_out, &w[7], &b[7], &fc8_out, true, _reshaped_weights);
+ fc8.configure(&act7_out, &w[7], &b[7], &fc8_out, fc_info);
// Softmax
smx.configure(&fc8_out, &output);
}
diff --git a/tests/validation/CL/FullyConnectedLayer.cpp b/tests/validation/CL/FullyConnectedLayer.cpp
index 9958a88419..cd050e378e 100644
--- a/tests/validation/CL/FullyConnectedLayer.cpp
+++ b/tests/validation/CL/FullyConnectedLayer.cpp
@@ -96,9 +96,14 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
const QuantizationInfo src_quantization_info = src.info()->quantization_info();
const QuantizationInfo weights_quantization_info = weights.info()->quantization_info();
+ // Create Fully Connected layer info
+ FullyConnectedLayerInfo fc_info;
+ fc_info.transpose_weights = transpose_weights;
+ fc_info.are_weights_reshaped = !reshape_weights;
+
// Create and configure function.
CLFullyConnectedLayer fc;
- fc.configure(&src, &weights, &bias, &dst, transpose_weights, !reshape_weights);
+ fc.configure(&src, &weights, &bias, &dst, fc_info);
// Validate valid region
const ValidRegion dst_valid_region = shape_to_valid_region(dst_shape);
@@ -141,12 +146,16 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
framework::dataset::make("Expected", { false, true, true, false, false })),
input_info, weights_info, bias_info, output_info, transpose_weights, reshaped_weights, expected)
{
+ // Create Fully Connected layer info
+ FullyConnectedLayerInfo fc_info;
+ fc_info.transpose_weights = transpose_weights;
+ fc_info.are_weights_reshaped = reshaped_weights;
+
Status status = CLFullyConnectedLayer::validate(&input_info.clone()->set_is_resizable(false),
&weights_info.clone()->set_is_resizable(false),
&bias_info.clone()->set_is_resizable(false),
&output_info.clone()->set_is_resizable(false),
- transpose_weights,
- reshaped_weights);
+ fc_info);
ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
}
// clang-format on
diff --git a/tests/validation/GLES_COMPUTE/FullyConnectedLayer.cpp b/tests/validation/GLES_COMPUTE/FullyConnectedLayer.cpp
index 49716dc946..c82a8a1a43 100644
--- a/tests/validation/GLES_COMPUTE/FullyConnectedLayer.cpp
+++ b/tests/validation/GLES_COMPUTE/FullyConnectedLayer.cpp
@@ -87,9 +87,14 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ // Create Fully Connected layer info
+ FullyConnectedLayerInfo fc_info;
+ fc_info.transpose_weights = transpose_weights;
+ fc_info.are_weights_reshaped = !reshape_weights;
+
// Create and configure function.
GCFullyConnectedLayer fc;
- fc.configure(&src, &weights, &bias, &dst, transpose_weights, !reshape_weights);
+ fc.configure(&src, &weights, &bias, &dst, fc_info);
// Validate valid region
const ValidRegion dst_valid_region = shape_to_valid_region(dst_shape);
diff --git a/tests/validation/NEON/FullyConnectedLayer.cpp b/tests/validation/NEON/FullyConnectedLayer.cpp
index 174778b8ef..80fdf1784e 100644
--- a/tests/validation/NEON/FullyConnectedLayer.cpp
+++ b/tests/validation/NEON/FullyConnectedLayer.cpp
@@ -99,9 +99,14 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ // Create Fully Connected layer info
+ FullyConnectedLayerInfo fc_info;
+ fc_info.transpose_weights = transpose_weights;
+ fc_info.are_weights_reshaped = !reshape_weights;
+
// Create and configure function.
NEFullyConnectedLayer fc;
- fc.configure(&src, &weights, &bias, &dst, transpose_weights, !reshape_weights);
+ fc.configure(&src, &weights, &bias, &dst, fc_info);
// Validate valid region
const ValidRegion dst_valid_region = shape_to_valid_region(dst_shape);
@@ -144,7 +149,12 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
framework::dataset::make("Expected", { false, true, true, false, false, true })),
input_info, weights_info, bias_info, output_info, transpose_weights, reshaped_weights, expected)
{
- Status status = NEFullyConnectedLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &bias_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), transpose_weights, reshaped_weights);
+ // Create Fully Connected layer info
+ FullyConnectedLayerInfo fc_info;
+ fc_info.transpose_weights = transpose_weights;
+ fc_info.are_weights_reshaped = reshaped_weights;
+
+ Status status = NEFullyConnectedLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &bias_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), fc_info);
ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
}
// clang-format on
diff --git a/tests/validation/fixtures/FullyConnectedLayerFixture.h b/tests/validation/fixtures/FullyConnectedLayerFixture.h
index 895e43b735..18321480f8 100644
--- a/tests/validation/fixtures/FullyConnectedLayerFixture.h
+++ b/tests/validation/fixtures/FullyConnectedLayerFixture.h
@@ -130,9 +130,14 @@ protected:
TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _quantization_info);
TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _quantization_info);
+ // Create Fully Connected layer info
+ FullyConnectedLayerInfo fc_info;
+ fc_info.transpose_weights = transpose_weights;
+ fc_info.are_weights_reshaped = !reshape_weights;
+
// Create and configure function.
FunctionType fc;
- fc.configure(&src, &weights, &bias, &dst, transpose_weights, !reshape_weights);
+ fc.configure(&src, &weights, &bias, &dst, fc_info);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
diff --git a/tests/validation/fixtures/UNIT/MemoryManagerFixture.h b/tests/validation/fixtures/UNIT/MemoryManagerFixture.h
index 21ad42bf77..d8e2b0b427 100644
--- a/tests/validation/fixtures/UNIT/MemoryManagerFixture.h
+++ b/tests/validation/fixtures/UNIT/MemoryManagerFixture.h
@@ -239,9 +239,13 @@ protected:
dst.allocator()->info().set_tensor_shape(TensorShape(24U, _cur_batches)).set_is_resizable(true).extend_padding(new_dst_padding);
dst.allocator()->info().set_is_resizable(false);
+ // Configure FC info
+ FullyConnectedLayerInfo fc_info;
+ fc_info.retain_internal_weights = true;
+
// Configure functions (2nd iteration)
- fc_layer_1.configure(&src, &w1, &b1, &fc1, true, false, true);
- fc_layer_2.configure(&fc1, &w2, &b2, &dst, true, false, true);
+ fc_layer_1.configure(&src, &w1, &b1, &fc1, fc_info);
+ fc_layer_2.configure(&fc1, &w2, &b2, &dst, fc_info);
// Fill tensors (2nd iteration)
fill(AccessorType(src), 5);
@@ -357,6 +361,10 @@ protected:
// Get padding requirements
auto fc_padding = fc.allocator()->info().padding();
+ // Configure FC info
+ FullyConnectedLayerInfo fc_info;
+ fc_info.retain_internal_weights = true;
+
// Run rest iterations
for(int i = _max_batches; i >= static_cast<int>(_cur_batches); --i)
{
@@ -368,7 +376,7 @@ protected:
dst.allocator()->info().set_tensor_shape(TensorShape(8U, i));
// Configure functions
- fc_layer.configure(&src, &w, &b, &fc, true, false, true);
+ fc_layer.configure(&src, &w, &b, &fc, fc_info);
smx_layer.configure(&fc, &dst);
// Fill tensors
diff --git a/tests/validation/reference/ConvertFullyConnectedWeights.cpp b/tests/validation/reference/ConvertFullyConnectedWeights.cpp
index b0f537fa0c..e27846c726 100644
--- a/tests/validation/reference/ConvertFullyConnectedWeights.cpp
+++ b/tests/validation/reference/ConvertFullyConnectedWeights.cpp
@@ -36,9 +36,15 @@ SimpleTensor<T> convert_fully_connected_weights(const SimpleTensor<T> &src, cons
{
SimpleTensor<T> dst(src.shape(), src.data_type());
+ const DataLayout original_input_data_layout = (training_data_layout == DataLayout::NCHW) ? DataLayout::NHWC : DataLayout::NCHW;
+
+ const int width_idx = get_data_layout_dimension_index(original_input_data_layout, DataLayoutDimension::WIDTH);
+ const int height_idx = get_data_layout_dimension_index(original_input_data_layout, DataLayoutDimension::HEIGHT);
+ const int channel_idx = get_data_layout_dimension_index(original_input_data_layout, DataLayoutDimension::CHANNEL);
+
const bool is_nchw_to_nhwc = training_data_layout == DataLayout::NCHW;
- const unsigned int num_elems_per_input_plane = original_input_shape.x() * original_input_shape.y();
- const unsigned int num_channels = original_input_shape.z();
+ const unsigned int num_elems_per_input_plane = original_input_shape[width_idx] * original_input_shape[height_idx];
+ const unsigned int num_channels = original_input_shape[channel_idx];
const unsigned int factor_1 = is_nchw_to_nhwc ? num_elems_per_input_plane : num_channels;
const unsigned int factor_2 = is_nchw_to_nhwc ? num_channels : num_elems_per_input_plane;
diff --git a/utils/GraphUtils.cpp b/utils/GraphUtils.cpp
index 0ce6076f6e..46b748bce6 100644
--- a/utils/GraphUtils.cpp
+++ b/utils/GraphUtils.cpp
@@ -85,9 +85,11 @@ void CaffePreproccessor::preprocess(ITensor &tensor)
Window window;
window.use_tensor_dimensions(tensor.info()->tensor_shape());
+ const int channel_idx = get_data_layout_dimension_index(tensor.info()->data_layout(), DataLayoutDimension::CHANNEL);
+
execute_window_loop(window, [&](const Coordinates & id)
{
- const float value = *reinterpret_cast<float *>(tensor.ptr_to_element(id)) - _mean[id.z()];
+ const float value = *reinterpret_cast<float *>(tensor.ptr_to_element(id)) - _mean[id[channel_idx]];
*reinterpret_cast<float *>(tensor.ptr_to_element(id)) = value;
});
}
diff --git a/utils/GraphUtils.h b/utils/GraphUtils.h
index 8558b9066c..6d537865ba 100644
--- a/utils/GraphUtils.h
+++ b/utils/GraphUtils.h
@@ -474,6 +474,24 @@ inline std::unique_ptr<graph::ITensorAccessor> get_npy_output_accessor(const std
}
}
+/** Permutes a given tensor shape given the input and output data layout
+ *
+ * @param[in] tensor_shape Tensor shape to permute
+ * @param[in] in_data_layout Input tensor shape data layout
+ * @param[in] out_data_layout Output tensor shape data layout
+ *
+ * @return Permuted tensor shape
+ */
+inline TensorShape permute_shape(TensorShape tensor_shape, DataLayout in_data_layout, DataLayout out_data_layout)
+{
+ if(in_data_layout != out_data_layout)
+ {
+ arm_compute::PermutationVector perm_vec = (in_data_layout == DataLayout::NCHW) ? arm_compute::PermutationVector(2U, 0U, 1U) : arm_compute::PermutationVector(1U, 2U, 0U);
+ arm_compute::permute(tensor_shape, perm_vec);
+ }
+ return tensor_shape;
+}
+
/** Utility function to return the TargetHint
*
* @param[in] target Integer value which expresses the selected target. Must be 0 for NEON or 1 for OpenCL or 2 (OpenCL with Tuner)