diff options
Diffstat (limited to 'arm_compute')
-rw-r--r-- | arm_compute/core/TensorShape.h | 15 | ||||
-rw-r--r-- | arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h | 7 |
2 files changed, 14 insertions, 8 deletions
diff --git a/arm_compute/core/TensorShape.h b/arm_compute/core/TensorShape.h index 6cf08de114..8d15c50220 100644 --- a/arm_compute/core/TensorShape.h +++ b/arm_compute/core/TensorShape.h @@ -138,17 +138,28 @@ public: } /** Collapses given dimension and above. * - * @note Precondition: dimension < TensorShape::num_max_dimensions - * * @param[in] dimension Size of the wanted dimension * * @return The linear size of the collapsed dimensions */ size_t total_size_upper(size_t dimension) const { + ARM_COMPUTE_ERROR_ON(dimension >= TensorShape::num_max_dimensions); return std::accumulate(_id.begin() + dimension, _id.end(), 1, std::multiplies<size_t>()); } + /** Compute size of dimensions lower than the given one. + * + * @param[in] dimension Upper boundary. + * + * @return The linear size of the collapsed dimensions. + */ + size_t total_size_lower(size_t dimension) const + { + ARM_COMPUTE_ERROR_ON(dimension > TensorShape::num_max_dimensions); + return std::accumulate(_id.begin(), _id.begin() + dimension, 1, std::multiplies<size_t>()); + } + private: /** Remove trailing dimensions of size 1 from the reported number of dimensions. */ void apply_dimension_correction() diff --git a/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h b/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h index af571d1057..08099b8539 100644 --- a/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h +++ b/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h @@ -97,11 +97,6 @@ public: void run() override; private: - void configure_fc_fc_wb(const ITensor *input, const ITensor *weights, ITensor *output); - void configure_fc_fc_nb(const ITensor *input, const ITensor *weights, ITensor *output); - void configure_conv_fc_wb(const ITensor *input, const ITensor *weights, ITensor *output); - void configure_conv_fc_nb(const ITensor *input, const ITensor *weights, ITensor *output); - NEIm2ColKernel _im2col_kernel; NEFullyConnectedLayerReshapeWeights _reshape_weights_kernel; NEGEMMInterleave4x4Kernel _interleave4x4_kernel; @@ -111,8 +106,8 @@ private: Tensor _interleave4x4_output; Tensor _reshape_weights_output; bool _are_weights_reshaped; - bool _is_fc_after_conv; bool _is_batched_fc_layer; + bool _linearize_input; bool _accumulate_biases; }; } |