aboutsummaryrefslogtreecommitdiff
path: root/tests/validation
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation')
-rw-r--r--tests/validation/CL/GlobalPoolingLayer.cpp6
-rw-r--r--tests/validation/CL/Permute.cpp5
-rw-r--r--tests/validation/CPP/Permute.cpp7
-rw-r--r--tests/validation/NEON/Permute.cpp7
-rw-r--r--tests/validation/UNIT/LifetimeManager.cpp1
-rw-r--r--tests/validation/fixtures/DropoutLayerFixture.h3
-rw-r--r--tests/validation/fixtures/FullyConnectedLayerFixture.h7
-rw-r--r--tests/validation/fixtures/FuseBatchNormalizationFixture.h4
-rw-r--r--tests/validation/fixtures/GEMMFixture.h53
-rw-r--r--tests/validation/fixtures/GEMMTranspose1xWFixture.h6
-rw-r--r--tests/validation/fixtures/InstanceNormalizationLayerFixture.h4
-rw-r--r--tests/validation/fixtures/PermuteFixture.h8
-rw-r--r--tests/validation/fixtures/PoolingLayerFixture.h2
-rw-r--r--tests/validation/fixtures/UNIT/DynamicTensorFixture.h1
-rw-r--r--tests/validation/fixtures/WarpPerspectiveFixture.h12
-rw-r--r--tests/validation/fixtures/WinogradConvolutionLayerFixture.h22
-rw-r--r--tests/validation/reference/ColorConvert.cpp6
-rw-r--r--tests/validation/reference/ColorConvertHelper.h4
-rw-r--r--tests/validation/reference/ROIAlignLayer.cpp2
19 files changed, 77 insertions, 83 deletions
diff --git a/tests/validation/CL/GlobalPoolingLayer.cpp b/tests/validation/CL/GlobalPoolingLayer.cpp
index 586be5e041..bd4fb68c77 100644
--- a/tests/validation/CL/GlobalPoolingLayer.cpp
+++ b/tests/validation/CL/GlobalPoolingLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -61,7 +61,7 @@ TEST_SUITE(Float)
TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(RunGlobalPooling, CLGlobalPoolingLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(GlobalPoolingLayerDataset, framework::dataset::make("DataType",
DataType::F32)),
- framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
@@ -71,7 +71,7 @@ TEST_SUITE_END()
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunGlobalPooling, CLGlobalPoolingLayerFixture<half>, framework::DatasetMode::ALL, combine(combine(GlobalPoolingLayerDataset, framework::dataset::make("DataType",
DataType::F16)),
- framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f16);
diff --git a/tests/validation/CL/Permute.cpp b/tests/validation/CL/Permute.cpp
index e1908abe2f..8eb302adce 100644
--- a/tests/validation/CL/Permute.cpp
+++ b/tests/validation/CL/Permute.cpp
@@ -62,9 +62,8 @@ const auto PermuteVectors4 = framework::dataset::make("PermutationVector",
PermutationVector(0U, 3U, 2U, 1U)
});
const auto PermuteVectors = concat(PermuteVectors3, PermuteVectors4);
-const auto PermuteInputLayout = framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC });
-const auto PermuteParametersSmall = concat(concat(datasets::Small2DShapes(), datasets::Small3DShapes()), datasets::Small4DShapes()) * PermuteInputLayout * PermuteVectors;
-const auto PermuteParametersLarge = datasets::Large4DShapes() * PermuteInputLayout * PermuteVectors;
+const auto PermuteParametersSmall = concat(concat(datasets::Small2DShapes(), datasets::Small3DShapes()), datasets::Small4DShapes()) * PermuteVectors;
+const auto PermuteParametersLarge = datasets::Large4DShapes() * PermuteVectors;
} // namespace
TEST_SUITE(CL)
TEST_SUITE(Permute)
diff --git a/tests/validation/CPP/Permute.cpp b/tests/validation/CPP/Permute.cpp
index 2ba10ec651..3d28df17b0 100644
--- a/tests/validation/CPP/Permute.cpp
+++ b/tests/validation/CPP/Permute.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -51,9 +51,8 @@ const auto PermuteVectors = framework::dataset::make("PermutationVector",
PermutationVector(1U, 0U, 2U),
PermutationVector(2U, 1U, 0U),
});
-const auto PermuteInputLayout = framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC });
-const auto PermuteParametersSmall = concat(concat(datasets::Small2DShapes(), datasets::Small3DShapes()), datasets::Small4DShapes()) * PermuteInputLayout * PermuteVectors;
-const auto PermuteParametersLarge = datasets::Large4DShapes() * PermuteInputLayout * PermuteVectors;
+const auto PermuteParametersSmall = concat(concat(datasets::Small2DShapes(), datasets::Small3DShapes()), datasets::Small4DShapes()) * PermuteVectors;
+const auto PermuteParametersLarge = datasets::Large4DShapes() * PermuteVectors;
} // namespace
TEST_SUITE(CPP)
diff --git a/tests/validation/NEON/Permute.cpp b/tests/validation/NEON/Permute.cpp
index a5a81b7ac3..07578d3896 100644
--- a/tests/validation/NEON/Permute.cpp
+++ b/tests/validation/NEON/Permute.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -62,9 +62,8 @@ const auto PermuteVectors4 = framework::dataset::make("PermutationVector",
PermutationVector(0U, 3U, 2U, 1U)
});
const auto PermuteVectors = concat(PermuteVectors3, PermuteVectors4);
-const auto PermuteInputLayout = framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC });
-const auto PermuteParametersSmall = concat(concat(datasets::Small2DShapes(), datasets::Small3DShapes()), datasets::Small4DShapes()) * PermuteInputLayout * PermuteVectors;
-const auto PermuteParametersLarge = datasets::Large4DShapes() * PermuteInputLayout * PermuteVectors;
+const auto PermuteParametersSmall = concat(concat(datasets::Small2DShapes(), datasets::Small3DShapes()), datasets::Small4DShapes()) * PermuteVectors;
+const auto PermuteParametersLarge = datasets::Large4DShapes() * PermuteVectors;
} // namespace
TEST_SUITE(NEON)
TEST_SUITE(Permute)
diff --git a/tests/validation/UNIT/LifetimeManager.cpp b/tests/validation/UNIT/LifetimeManager.cpp
index d4c0a51346..44a52aa5e3 100644
--- a/tests/validation/UNIT/LifetimeManager.cpp
+++ b/tests/validation/UNIT/LifetimeManager.cpp
@@ -46,6 +46,7 @@ class MockMemoryManageable : public IMemoryManageable
public:
void associate_memory_group(IMemoryGroup *memory_group) override
{
+ ARM_COMPUTE_UNUSED(memory_group);
}
};
/** Creates a lifetime of three objects where the two of them can share the same underlying within the given scope
diff --git a/tests/validation/fixtures/DropoutLayerFixture.h b/tests/validation/fixtures/DropoutLayerFixture.h
index 771de30917..be25802650 100644
--- a/tests/validation/fixtures/DropoutLayerFixture.h
+++ b/tests/validation/fixtures/DropoutLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -93,6 +93,7 @@ protected:
SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type)
{
+ ARM_COMPUTE_UNUSED(shape, data_type);
}
TensorType _target{};
diff --git a/tests/validation/fixtures/FullyConnectedLayerFixture.h b/tests/validation/fixtures/FullyConnectedLayerFixture.h
index 1e4a74445f..0449d80de8 100644
--- a/tests/validation/fixtures/FullyConnectedLayerFixture.h
+++ b/tests/validation/fixtures/FullyConnectedLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -64,7 +64,7 @@ public:
_quantization_info = quantization_info;
_target = compute_target(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape);
}
protected:
@@ -181,8 +181,7 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, bool transpose_weights,
- bool reshape_weights)
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape)
{
// Create reference
SimpleTensor<T> src{ input_shape, _data_type, 1, _quantization_info };
diff --git a/tests/validation/fixtures/FuseBatchNormalizationFixture.h b/tests/validation/fixtures/FuseBatchNormalizationFixture.h
index 4a81fb0823..780b4a0fb3 100644
--- a/tests/validation/fixtures/FuseBatchNormalizationFixture.h
+++ b/tests/validation/fixtures/FuseBatchNormalizationFixture.h
@@ -51,7 +51,7 @@ public:
void setup(TensorShape shape_w, DataType data_type, DataLayout data_layout, bool in_place, bool with_bias, bool with_gamma, bool with_beta)
{
std::tie(_target_w, _target_b) = compute_target(shape_w, data_type, data_layout, in_place, with_bias, with_gamma, with_beta);
- std::tie(_reference_w, _reference_b) = compute_reference(shape_w, data_type, data_layout, with_bias, with_gamma, with_beta);
+ std::tie(_reference_w, _reference_b) = compute_reference(shape_w, data_type, with_bias, with_gamma, with_beta);
}
protected:
@@ -138,7 +138,7 @@ protected:
return std::make_pair(std::move(in_place_w ? w : w_fused), std::move(in_place_b ? b : b_fused));
}
- std::pair<SimpleTensor<T>, SimpleTensor<T>> compute_reference(TensorShape shape_w, DataType data_type, DataLayout data_layout, bool with_bias, bool with_gamma, bool with_beta)
+ std::pair<SimpleTensor<T>, SimpleTensor<T>> compute_reference(TensorShape shape_w, DataType data_type, bool with_bias, bool with_gamma, bool with_beta)
{
const TensorShape shape_v(shape_w[dims_weights - 1]);
diff --git a/tests/validation/fixtures/GEMMFixture.h b/tests/validation/fixtures/GEMMFixture.h
index bf919c9b09..efe7567075 100644
--- a/tests/validation/fixtures/GEMMFixture.h
+++ b/tests/validation/fixtures/GEMMFixture.h
@@ -51,8 +51,9 @@ public:
template <typename...>
void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape output_shape, float alpha, float beta, bool pretranspose, DataType data_type)
{
- _target = compute_target(shape_a, shape_b, shape_c, output_shape, alpha, beta, pretranspose, data_type);
- _reference = compute_reference(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type);
+ ARM_COMPUTE_UNUSED(pretranspose);
+ _target = compute_target(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type);
+ _reference = compute_reference(shape_a, shape_b, output_shape, alpha, beta, data_type);
}
protected:
@@ -74,7 +75,7 @@ protected:
}
TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c, const TensorShape &output_shape, float alpha, float beta,
- bool pretranspose, DataType data_type)
+ DataType data_type)
{
// Create tensors
TensorType a = create_tensor<TensorType>(shape_a, data_type, 1);
@@ -124,7 +125,7 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c, const TensorShape &output_shape, float alpha, float beta,
+ SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, float alpha, float beta,
DataType data_type)
{
TensorShape shape_a_to_use = shape_a;
@@ -183,7 +184,7 @@ public:
broadcast_bias ? 1 : batch_size);
_target = compute_target(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, broadcast_bias, fp16_mixed_precision, act_info, gpu_arch);
- _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, broadcast_bias, act_info);
+ _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, broadcast_bias, act_info);
}
protected:
@@ -244,7 +245,7 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, bool broadcast_bias,
+ SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, float beta, bool broadcast_bias,
const ActivationLayerInfo &act_info)
{
TensorShape dst_shape = lhs_shape;
@@ -289,6 +290,8 @@ public:
void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, float alpha, float beta, bool broadcast_bias, bool fp16_mixed_precision,
const ActivationLayerInfo &act_info, DataType data_type, GPUTarget gpu_arch)
{
+ ARM_COMPUTE_UNUSED(broadcast_bias);
+
// In case of GEMM3D, m is the product between m_w and m_h
const unsigned int m = m_w * m_h;
@@ -298,7 +301,7 @@ public:
const TensorShape bias_shape(n, 1, 1);
_target = compute_target(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, m_h, fp16_mixed_precision, act_info, gpu_arch);
- _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, m_h, act_info);
+ _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, m_h, act_info);
}
protected:
@@ -355,7 +358,7 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, unsigned int m_h,
+ SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, float beta, unsigned int m_h,
const ActivationLayerInfo &act_info)
{
TensorShape dst_shape = lhs_shape;
@@ -421,7 +424,7 @@ public:
broadcast_bias ? 1 : batch_size);
_target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, broadcast_bias, fp16_mixed_precision, act_info, gpu_arch);
- _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, broadcast_bias, act_info);
+ _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, broadcast_bias, act_info);
}
protected:
@@ -494,7 +497,7 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, bool broadcast_bias,
+ SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, float beta, bool broadcast_bias,
const ActivationLayerInfo &act_info)
{
TensorShape dst_shape = lhs_shape;
@@ -539,6 +542,8 @@ public:
void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, float alpha, float beta, unsigned int v0, unsigned int h0, bool broadcast_bias,
bool fp16_mixed_precision, const ActivationLayerInfo &act_info, DataType data_type, GPUTarget gpu_arch)
{
+ ARM_COMPUTE_UNUSED(broadcast_bias);
+
GEMMLHSMatrixInfo lhs_info;
lhs_info.m0 = 4;
lhs_info.k0 = 4;
@@ -562,7 +567,7 @@ public:
const TensorShape bias_shape(n, 1, 1);
_target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h, fp16_mixed_precision, act_info, gpu_arch);
- _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, m_h, act_info);
+ _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, m_h, act_info);
}
protected:
@@ -631,7 +636,7 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, unsigned int m_h,
+ SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, float beta, unsigned int m_h,
const ActivationLayerInfo &act_info)
{
TensorShape dst_shape = lhs_shape;
@@ -697,7 +702,7 @@ public:
broadcast_bias ? 1 : batch_size);
_target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, broadcast_bias, act_info);
- _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, broadcast_bias, act_info);
+ _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, broadcast_bias, act_info);
}
protected:
@@ -778,7 +783,7 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, bool broadcast_bias,
+ SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, float beta, bool broadcast_bias,
const ActivationLayerInfo &act_info)
{
TensorShape dst_shape = lhs_shape;
@@ -854,7 +859,7 @@ public:
const TensorShape bias_shape(n, 1, 1);
_target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h, act_info);
- _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, m_h, act_info);
+ _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, m_h, act_info);
}
protected:
@@ -931,7 +936,7 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, unsigned int m_h,
+ SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, float beta, unsigned int m_h,
const ActivationLayerInfo &act_info)
{
TensorShape dst_shape = lhs_shape;
@@ -1001,7 +1006,7 @@ public:
broadcast_bias ? 1 : batch_size);
_target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, broadcast_bias, act_info);
- _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, broadcast_bias, act_info);
+ _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, broadcast_bias, act_info);
}
protected:
@@ -1075,7 +1080,7 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, bool broadcast_bias,
+ SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, float beta, bool broadcast_bias,
const ActivationLayerInfo &act_info)
{
TensorShape dst_shape = lhs_shape;
@@ -1140,7 +1145,7 @@ public:
const TensorShape bias_shape(n, 1, 1);
_target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h, act_info);
- _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, m_h, act_info);
+ _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, m_h, act_info);
}
protected:
@@ -1211,7 +1216,7 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, unsigned int m_h,
+ SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, float beta, unsigned int m_h,
const ActivationLayerInfo &act_info)
{
TensorShape dst_shape = lhs_shape;
@@ -1271,7 +1276,7 @@ public:
broadcast_bias ? 1 : batch_size);
_target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, broadcast_bias, act_info);
- _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, broadcast_bias, act_info);
+ _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, broadcast_bias, act_info);
}
protected:
@@ -1337,7 +1342,7 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, bool broadcast_bias,
+ SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, float beta, bool broadcast_bias,
const ActivationLayerInfo &act_info)
{
TensorShape dst_shape = lhs_shape;
@@ -1399,7 +1404,7 @@ public:
const TensorShape bias_shape(n, 1, 1);
_target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h, act_info);
- _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, m_h, act_info);
+ _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, m_h, act_info);
}
protected:
@@ -1463,7 +1468,7 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, unsigned int m_h,
+ SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, float beta, unsigned int m_h,
const ActivationLayerInfo &act_info)
{
TensorShape dst_shape = lhs_shape;
diff --git a/tests/validation/fixtures/GEMMTranspose1xWFixture.h b/tests/validation/fixtures/GEMMTranspose1xWFixture.h
index af2a3b278d..89d2238344 100644
--- a/tests/validation/fixtures/GEMMTranspose1xWFixture.h
+++ b/tests/validation/fixtures/GEMMTranspose1xWFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,7 +54,7 @@ public:
const unsigned int transpose_w = 16 / data_size_from_type(data_type);
const TensorShape shape_b(static_cast<size_t>(y * transpose_w), static_cast<size_t>(std::ceil(x / static_cast<float>(transpose_w))));
_target = compute_target(shape_a, shape_b, data_type);
- _reference = compute_reference(shape_a, shape_b, data_type);
+ _reference = compute_reference(shape_a, data_type);
}
protected:
@@ -106,7 +106,7 @@ protected:
return b;
}
- SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type)
+ SimpleTensor<T> compute_reference(const TensorShape &shape_a, DataType data_type)
{
// Create reference
SimpleTensor<T> a{ shape_a, data_type, 1 };
diff --git a/tests/validation/fixtures/InstanceNormalizationLayerFixture.h b/tests/validation/fixtures/InstanceNormalizationLayerFixture.h
index 175ef2fb90..5e230d4430 100644
--- a/tests/validation/fixtures/InstanceNormalizationLayerFixture.h
+++ b/tests/validation/fixtures/InstanceNormalizationLayerFixture.h
@@ -48,7 +48,7 @@ public:
void setup(TensorShape shape, DataType data_type, DataLayout data_layout, bool in_place)
{
_target = compute_target(shape, data_type, data_layout, in_place);
- _reference = compute_reference(shape, data_type, data_layout);
+ _reference = compute_reference(shape, data_type);
}
protected:
@@ -118,7 +118,7 @@ protected:
}
}
- SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type, DataLayout data_layout)
+ SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type)
{
std::mt19937 gen(library->seed());
std::uniform_real_distribution<float> dist_gamma(1.f, 2.f);
diff --git a/tests/validation/fixtures/PermuteFixture.h b/tests/validation/fixtures/PermuteFixture.h
index 92d01a5654..76351734d5 100644
--- a/tests/validation/fixtures/PermuteFixture.h
+++ b/tests/validation/fixtures/PermuteFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,9 +46,9 @@ class PermuteValidationFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape input_shape, DataLayout input_layout, PermutationVector perm, DataType data_type)
+ void setup(TensorShape input_shape, PermutationVector perm, DataType data_type)
{
- _target = compute_target(input_shape, input_layout, data_type, perm);
+ _target = compute_target(input_shape, data_type, perm);
_reference = compute_reference(input_shape, data_type, perm);
}
@@ -59,7 +59,7 @@ protected:
library->fill_tensor_uniform(tensor, 0);
}
- TensorType compute_target(const TensorShape &input_shape, DataLayout input_layout, DataType data_type, PermutationVector perm)
+ TensorType compute_target(const TensorShape &input_shape, DataType data_type, PermutationVector perm)
{
// Permute shapes
TensorShape output_shape = input_shape;
diff --git a/tests/validation/fixtures/PoolingLayerFixture.h b/tests/validation/fixtures/PoolingLayerFixture.h
index cdc2cae584..18577edc66 100644
--- a/tests/validation/fixtures/PoolingLayerFixture.h
+++ b/tests/validation/fixtures/PoolingLayerFixture.h
@@ -182,7 +182,7 @@ public:
template <typename...>
void setup(TensorShape shape, PoolingType pool_type, DataType data_type, DataLayout data_layout = DataLayout::NCHW)
{
- PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type), data_type, DataLayout::NCHW);
+ PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type), data_type, data_layout);
}
};
diff --git a/tests/validation/fixtures/UNIT/DynamicTensorFixture.h b/tests/validation/fixtures/UNIT/DynamicTensorFixture.h
index b2600f13f0..08b90c5b52 100644
--- a/tests/validation/fixtures/UNIT/DynamicTensorFixture.h
+++ b/tests/validation/fixtures/UNIT/DynamicTensorFixture.h
@@ -104,6 +104,7 @@ public:
}
void configure(ITensorType *src, ITensorType *dst)
{
+ ARM_COMPUTE_UNUSED(src, dst);
}
void run()
{
diff --git a/tests/validation/fixtures/WarpPerspectiveFixture.h b/tests/validation/fixtures/WarpPerspectiveFixture.h
index 0eba97c47c..aa84946e94 100644
--- a/tests/validation/fixtures/WarpPerspectiveFixture.h
+++ b/tests/validation/fixtures/WarpPerspectiveFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -58,14 +58,12 @@ public:
constant_border_value = distribution_u8(gen);
}
- const TensorShape vmask_shape(input_shape);
-
// Create the matrix
std::array<float, 9> matrix = { { 0 } };
fill_warp_matrix<9>(matrix);
- _target = compute_target(input_shape, vmask_shape, matrix, policy, border_mode, constant_border_value, data_type);
- _reference = compute_reference(input_shape, vmask_shape, matrix, policy, border_mode, constant_border_value, data_type);
+ _target = compute_target(input_shape, matrix, policy, border_mode, constant_border_value, data_type);
+ _reference = compute_reference(input_shape, matrix, policy, border_mode, constant_border_value, data_type);
}
protected:
@@ -75,7 +73,7 @@ protected:
library->fill_tensor_uniform(tensor, 0);
}
- TensorType compute_target(const TensorShape &shape, const TensorShape &vmask_shape, const std::array<float, 9> &matrix, InterpolationPolicy policy, BorderMode border_mode,
+ TensorType compute_target(const TensorShape &shape, const std::array<float, 9> &matrix, InterpolationPolicy policy, BorderMode border_mode,
uint8_t constant_border_value,
DataType data_type)
{
@@ -106,7 +104,7 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &shape, const TensorShape &vmask_shape, const std::array<float, 9> &matrix, InterpolationPolicy policy, BorderMode border_mode,
+ SimpleTensor<T> compute_reference(const TensorShape &shape, const std::array<float, 9> &matrix, InterpolationPolicy policy, BorderMode border_mode,
uint8_t constant_border_value,
DataType data_type)
{
diff --git a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
index c0ba57a828..9c2df9ef4b 100644
--- a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
@@ -81,8 +81,6 @@ protected:
default:
{
ARM_COMPUTE_ERROR("Not supported");
- library->fill_tensor_uniform(tensor, i);
- break;
}
}
}
@@ -168,7 +166,7 @@ public:
{
ARM_COMPUTE_UNUSED(dilation);
_target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info);
}
protected:
@@ -192,8 +190,6 @@ protected:
default:
{
ARM_COMPUTE_ERROR("Not supported");
- library->fill_tensor_uniform(tensor, i);
- break;
}
}
}
@@ -247,7 +243,7 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const PadStrideInfo &info,
DataType data_type, ActivationLayerInfo act_info)
{
// Create reference
@@ -332,7 +328,7 @@ public:
TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
_target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
- _reference = compute_reference(input_shape, output_shape, winograd_info, data_layout, data_type);
+ _reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
}
protected:
@@ -351,8 +347,6 @@ protected:
default:
{
ARM_COMPUTE_ERROR("Not supported");
- library->fill_tensor_uniform(tensor, i);
- break;
}
}
}
@@ -390,7 +384,7 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataType data_type)
{
// Create reference
SimpleTensor<T> src{ input_shape, data_type, 1, QuantizationInfo() };
@@ -416,7 +410,7 @@ public:
TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
_target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
- _reference = compute_reference(input_shape, output_shape, winograd_info, data_layout, data_type);
+ _reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
}
protected:
@@ -435,8 +429,6 @@ protected:
default:
{
ARM_COMPUTE_ERROR("Not supported");
- library->fill_tensor_uniform(tensor, i);
- break;
}
}
}
@@ -474,7 +466,7 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataType data_type)
{
// Create reference
SimpleTensor<T> src{ input_shape, data_type, 1, QuantizationInfo() };
@@ -516,8 +508,6 @@ protected:
default:
{
ARM_COMPUTE_ERROR("Not supported");
- library->fill_tensor_uniform(tensor, i);
- break;
}
}
}
diff --git a/tests/validation/reference/ColorConvert.cpp b/tests/validation/reference/ColorConvert.cpp
index 9090319a86..a759594cfa 100644
--- a/tests/validation/reference/ColorConvert.cpp
+++ b/tests/validation/reference/ColorConvert.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -170,7 +170,7 @@ std::vector<SimpleTensor<T>> color_convert(const TensorShape &shape, const std::
{
case Format::RGB888:
case Format::RGBA8888:
- colorconvert_helper::detail::colorconvert_iyuv_to_rgb(shape, tensor_planes, dst[0]);
+ colorconvert_helper::detail::colorconvert_iyuv_to_rgb(tensor_planes, dst[0]);
break;
default:
ARM_COMPUTE_ERROR("Not Supported");
@@ -185,7 +185,7 @@ std::vector<SimpleTensor<T>> color_convert(const TensorShape &shape, const std::
{
case Format::RGB888:
case Format::RGBA8888:
- colorconvert_helper::detail::colorconvert_nv12_to_rgb(shape, src_format, tensor_planes, dst[0]);
+ colorconvert_helper::detail::colorconvert_nv12_to_rgb(src_format, tensor_planes, dst[0]);
break;
case Format::IYUV:
colorconvert_helper::detail::colorconvert_nv_to_iyuv(tensor_planes, src_format, dst);
diff --git a/tests/validation/reference/ColorConvertHelper.h b/tests/validation/reference/ColorConvertHelper.h
index abd1f5d1fe..8dd961c0f4 100644
--- a/tests/validation/reference/ColorConvertHelper.h
+++ b/tests/validation/reference/ColorConvertHelper.h
@@ -306,7 +306,7 @@ inline void colorconvert_yuyv_to_rgb(const SimpleTensor<T> src, const Format for
}
template <typename T>
-inline void colorconvert_iyuv_to_rgb(const TensorShape &shape, const std::vector<SimpleTensor<T>> &tensor_planes, SimpleTensor<T> &dst)
+inline void colorconvert_iyuv_to_rgb(const std::vector<SimpleTensor<T>> &tensor_planes, SimpleTensor<T> &dst)
{
SimpleTensor<T> yvec(TensorShape{ tensor_planes[0].shape().x() / 2, tensor_planes[0].shape().y() }, Format::U8);
SimpleTensor<T> uvec(TensorShape{ tensor_planes[0].shape().x() / 2, tensor_planes[0].shape().y() }, Format::U8);
@@ -361,7 +361,7 @@ inline void colorconvert_iyuv_to_rgb(const TensorShape &shape, const std::vector
}
template <typename T>
-inline void colorconvert_nv12_to_rgb(const TensorShape &shape, const Format format, const std::vector<SimpleTensor<T>> &tensor_planes, SimpleTensor<T> &dst)
+inline void colorconvert_nv12_to_rgb(const Format format, const std::vector<SimpleTensor<T>> &tensor_planes, SimpleTensor<T> &dst)
{
SimpleTensor<T> yvec(TensorShape{ tensor_planes[0].shape().x() / 2, tensor_planes[0].shape().y() }, Format::U8);
SimpleTensor<T> uvec(TensorShape{ tensor_planes[0].shape().x() / 2, tensor_planes[0].shape().y() }, Format::U8);
diff --git a/tests/validation/reference/ROIAlignLayer.cpp b/tests/validation/reference/ROIAlignLayer.cpp
index 415b483bc0..c32dce72e1 100644
--- a/tests/validation/reference/ROIAlignLayer.cpp
+++ b/tests/validation/reference/ROIAlignLayer.cpp
@@ -132,6 +132,8 @@ SimpleTensor<float> convert_rois_from_asymmetric(SimpleTensor<uint16_t> rois)
template <typename T, typename TRois>
SimpleTensor<T> roi_align_layer(const SimpleTensor<T> &src, const SimpleTensor<TRois> &rois, const ROIPoolingLayerInfo &pool_info, const QuantizationInfo &output_qinfo)
{
+ ARM_COMPUTE_UNUSED(output_qinfo);
+
const size_t values_per_roi = rois.shape()[0];
const size_t num_rois = rois.shape()[1];
DataType dst_data_type = src.data_type();