aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorMoritz Pflanzer <moritz.pflanzer@arm.com>2017-09-15 10:42:58 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commit80373f607cb12693824411510c39e367a4dfbdb5 (patch)
treeddc4d038783ed91ff227fb259a85fefc09e46319 /tests
parentc09314a288dc2aa7ef75a09a8ff5dede3f80974a (diff)
downloadComputeLibrary-80373f607cb12693824411510c39e367a4dfbdb5.tar.gz
COMPMID-481: Add AArch32 GEMM
Change-Id: Idba0b30bfb27866a46a22388014ab81432ea28dc Reviewed-on: http://mpd-gerrit.cambridge.arm.com/86196 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/networks/AlexNetNetwork.h2
-rw-r--r--tests/validation/fixtures/ConvolutionLayerFixture.h4
2 files changed, 3 insertions, 3 deletions
diff --git a/tests/networks/AlexNetNetwork.h b/tests/networks/AlexNetNetwork.h
index 0c06c1860f..448cf31914 100644
--- a/tests/networks/AlexNetNetwork.h
+++ b/tests/networks/AlexNetNetwork.h
@@ -100,7 +100,7 @@ public:
{
auto reshape = [&](unsigned int width, unsigned int height, bool convolution_layer) -> TensorShape
{
- const bool is_optimised = std::is_same<ITensorType, ITensor>::value && NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV8 && data_type == DataType::F32;
+ const bool is_optimised = std::is_same<ITensorType, ITensor>::value && NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV7 && data_type == DataType::F32;
if(convolution_layer && is_optimised)
{
diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h
index fcaf4ef42b..434291b58e 100644
--- a/tests/validation/fixtures/ConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/ConvolutionLayerFixture.h
@@ -88,7 +88,7 @@ protected:
{
// Check if its a "fully connected" convolution
const bool is_fully_connected_convolution = (output_shape.x() == 1 && output_shape.y() == 1);
- const bool is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV8 && data_type == DataType::F32;
+ const bool is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV7 && data_type == DataType::F32;
reshaped_weights_shape.collapse(3);
@@ -143,7 +143,7 @@ protected:
if(!reshape_weights)
{
const bool is_fully_connected_convolution = (output_shape.x() == 1 && output_shape.y() == 1);
- const bool is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV8 && data_type == DataType::F32;
+ const bool is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV7 && data_type == DataType::F32;
TensorShape tmp_weights_shape(weights_shape);
SimpleTensor<T> tmp_weights(tmp_weights_shape, data_type, 1, fixed_point_position);