aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures/ConvolutionLayerFixture.h
diff options
context:
space:
mode:
authorPablo Tello <pablo.tello@arm.com>2018-03-14 17:55:27 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:49:16 +0000
commit7fad9b1d00f3ee1488ba4038d1371f6ea219f8b7 (patch)
treeded71e1cfa8e0c085f8bce5dfc26a99786d60e52 /tests/validation/fixtures/ConvolutionLayerFixture.h
parent1562be3e8a449360a90af75f6f1481a30d41be75 (diff)
downloadComputeLibrary-7fad9b1d00f3ee1488ba4038d1371f6ea219f8b7.tar.gz
COMPMID-1021: CPUInfo refactoring.
Removed CPUTarget in favor of the CPUModel type. CPUInfo now holds a vector of N CPUs. CPUInfo autoinitialise upon construction with 1 GENERIC CPU. CPPScheduler fills CPUInfo's vector upon construction (runtime). IScheduler has a single CPUInfo obj and ThreadInfo always gets a pointer to it (avoid copying the vector) Change-Id: I30f293258c959c87f6bac5eac8b963beb6a4d365 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/124626 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests/validation/fixtures/ConvolutionLayerFixture.h')
-rw-r--r--tests/validation/fixtures/ConvolutionLayerFixture.h17
1 files changed, 3 insertions, 14 deletions
diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h
index 6a100acef3..3d073e3f79 100644
--- a/tests/validation/fixtures/ConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/ConvolutionLayerFixture.h
@@ -100,6 +100,8 @@ protected:
TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
bool reshape_weights, const Size2D &dilation)
{
+ const bool is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && _data_type == DataType::F32;
+
WeightsInfo weights_info(!reshape_weights, weights_shape.x(), weights_shape.y(), weights_shape[3]);
TensorShape reshaped_weights_shape(weights_shape);
@@ -107,12 +109,6 @@ protected:
{
// Check if its a "fully connected" convolution
const bool is_fully_connected_convolution = (output_shape.x() == 1 && output_shape.y() == 1);
- bool is_optimised = false;
-#if defined(__arm__)
- is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU == CPUTarget::ARMV7 && _data_type == DataType::F32;
-#elif defined(__aarch64__)
- is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV8 && _data_type == DataType::F32;
-#endif /* defined(__arm__) || defined(__aarch64__) */
reshaped_weights_shape.collapse(3);
@@ -167,14 +163,7 @@ protected:
if(!reshape_weights)
{
- const bool is_fully_connected_convolution = (output_shape.x() == 1 && output_shape.y() == 1);
- bool is_optimised = false;
-#if defined(__arm__)
- is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU == CPUTarget::ARMV7 && _data_type == DataType::F32;
-#elif defined(__aarch64__)
- is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV8 && _data_type == DataType::F32;
-#endif /* defined(__arm__) || defined(__aarch64__) */
-
+ const bool is_fully_connected_convolution = (output_shape.x() == 1 && output_shape.y() == 1);
TensorShape tmp_weights_shape(weights_shape);
SimpleTensor<T> tmp_weights(tmp_weights_shape, _data_type, 1, _fractional_bits, _quantization_info);