aboutsummaryrefslogtreecommitdiff
path: root/tests/validation
diff options
context:
space:
mode:
authorMoritz Pflanzer <moritz.pflanzer@arm.com>2017-07-21 17:19:58 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-09-17 14:16:42 +0100
commitf6ad98a95cc4a638e133538ae682185032c16201 (patch)
tree7940632c316c141bc0bb2557578b09f86ca7ca73 /tests/validation
parentf5d76f28b51e93447273d1f7fa7512b3e0a54166 (diff)
downloadComputeLibrary-f6ad98a95cc4a638e133538ae682185032c16201.tar.gz
COMPMID-415: Move SoftmaxLayer to new validation
Change-Id: I68bb359021256e67892e4fc00d436f9027a3bd07 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/80942 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Diffstat (limited to 'tests/validation')
-rw-r--r--tests/validation/CL/SoftmaxLayer.cpp226
-rw-r--r--tests/validation/NEON/SoftmaxLayer.cpp248
-rw-r--r--tests/validation/Reference.cpp25
-rw-r--r--tests/validation/Reference.h9
-rw-r--r--tests/validation/ReferenceCPP.cpp8
-rw-r--r--tests/validation/ReferenceCPP.h6
-rw-r--r--tests/validation/TensorOperations.h79
-rw-r--r--tests/validation/TensorVisitors.h19
8 files changed, 0 insertions, 620 deletions
diff --git a/tests/validation/CL/SoftmaxLayer.cpp b/tests/validation/CL/SoftmaxLayer.cpp
deleted file mode 100644
index 5e03785231..0000000000
--- a/tests/validation/CL/SoftmaxLayer.cpp
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "AssetsLibrary.h"
-#include "CL/CLAccessor.h"
-#include "Globals.h"
-#include "PaddingCalculator.h"
-#include "TypePrinter.h"
-#include "Utils.h"
-#include "validation/Datasets.h"
-#include "validation/Reference.h"
-#include "validation/Validation.h"
-
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/CL/CLTensorAllocator.h"
-#include "arm_compute/runtime/CL/functions/CLSoftmaxLayer.h"
-
-#include "boost_wrapper.h"
-
-#include <random>
-#include <string>
-
-using namespace arm_compute;
-using namespace arm_compute::test;
-using namespace arm_compute::test::validation;
-
-namespace
-{
-const float tolerance = 0.000001f; /** Tolerance for float operations */
-const float tolerance_qs8 = 2.f; /** Tolerance for QS8 fixed point operations */
-const float tolerance_qs16 = 2.f; /** Tolerance for QS16 fixed point operations */
-
-/** Compute OpenCL softmax layer function.
- *
- * @param[in] shape Shape of the input and output tensors.
- * @param[in] dt Shape Data type of tensors.
- * @param[in] fixed_point_position (Optional) Number of bits for the fractional part of fixed point numbers.
- *
- * @return Computed output tensor.
- */
-CLTensor compute_softmax_layer(const TensorShape &shape, DataType dt, int fixed_point_position = 0)
-{
- // Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, dt, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(shape, dt, 1, fixed_point_position);
-
- // Create and configure function
- CLSoftmaxLayer smx_layer;
- smx_layer.configure(&src, &dst);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
-
- BOOST_TEST(!src.info()->is_resizable());
- BOOST_TEST(!dst.info()->is_resizable());
-
- // Fill tensors
- if(arm_compute::is_data_type_float(dt))
- {
- std::uniform_real_distribution<> distribution(-1000.f, 1000.f);
- library->fill(CLAccessor(src), distribution, 0);
- }
- else
- {
- int one_fixed = 1 << fixed_point_position;
- std::uniform_int_distribution<> distribution(-one_fixed, one_fixed);
- library->fill(CLAccessor(src), distribution, 0);
- }
-
- // Compute function
- smx_layer.run();
-
- return dst;
-}
-} // namespace
-
-#ifndef DOXYGEN_SKIP_THIS
-BOOST_AUTO_TEST_SUITE(CL)
-BOOST_AUTO_TEST_SUITE(SoftmaxLayer)
-
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit") * boost::unit_test::label("nightly"))
-BOOST_DATA_TEST_CASE(Configuration, (SmallShapes() + LargeShapes()) * CNNDataTypes(), shape, dt)
-{
- // Set fixed point position data type allowed
- int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
-
- // Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, dt, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(shape, dt, 1, fixed_point_position);
-
- BOOST_TEST(src.info()->is_resizable());
- BOOST_TEST(dst.info()->is_resizable());
-
- // Create and configure function
- CLSoftmaxLayer smx_layer;
- smx_layer.configure(&src, &dst);
-
- // Validate valid region
- const ValidRegion valid_region = shape_to_valid_region(shape);
- validate(src.info()->valid_region(), valid_region);
- validate(dst.info()->valid_region(), valid_region);
-
- // Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
- validate(src.info()->padding(), padding);
- validate(dst.info()->padding(), padding);
-}
-
-BOOST_AUTO_TEST_SUITE(Float)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * CNNFloatDataTypes(), shape, dt)
-{
- // Compute function
- CLTensor dst = compute_softmax_layer(shape, dt);
-
- // Compute reference
- RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, dt);
-
- // Validate output
- validate(CLAccessor(dst), ref_dst, tolerance);
-}
-
-BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
-BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * CNNFloatDataTypes(), shape, dt)
-{
- // Compute function
- CLTensor dst = compute_softmax_layer(shape, dt);
-
- // Compute reference
- RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, dt);
-
- // Validate output
- validate(CLAccessor(dst), ref_dst, tolerance);
-}
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE(Quantized)
-BOOST_AUTO_TEST_SUITE(QS8)
-// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * boost::unit_test::data::xrange(1, 6),
- shape, fixed_point_position)
-{
- // Compute function
- CLTensor dst = compute_softmax_layer(shape, DataType::QS8, fixed_point_position);
-
- // Compute reference
- RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, DataType::QS8, fixed_point_position);
-
- // Validate output
- validate(CLAccessor(dst), ref_dst, tolerance_qs8);
-}
-
-BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
-BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * boost::unit_test::data::xrange(1, 6),
- shape, fixed_point_position)
-{
- // Compute function
- CLTensor dst = compute_softmax_layer(shape, DataType::QS8, fixed_point_position);
-
- // Compute reference
- RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, DataType::QS8, fixed_point_position);
-
- // Validate output
- validate(CLAccessor(dst), ref_dst, tolerance_qs8);
-}
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE(QS16)
-// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * boost::unit_test::data::xrange(1, 14),
- shape, fixed_point_position)
-{
- // Compute function
- CLTensor dst = compute_softmax_layer(shape, DataType::QS16, fixed_point_position);
-
- // Compute reference
- RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, DataType::QS16, fixed_point_position);
-
- // Validate output
- validate(CLAccessor(dst), ref_dst, tolerance_qs16);
-}
-
-BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
-BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * boost::unit_test::data::xrange(1, 14),
- shape, fixed_point_position)
-{
- // Compute function
- CLTensor dst = compute_softmax_layer(shape, DataType::QS16, fixed_point_position);
-
- // Compute reference
- RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, DataType::QS16, fixed_point_position);
-
- // Validate output
- validate(CLAccessor(dst), ref_dst, tolerance_qs16);
-}
-BOOST_AUTO_TEST_SUITE_END()
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE_END()
-BOOST_AUTO_TEST_SUITE_END()
-#endif /* DOXYGEN_SKIP_THIS */
diff --git a/tests/validation/NEON/SoftmaxLayer.cpp b/tests/validation/NEON/SoftmaxLayer.cpp
deleted file mode 100644
index 8422ba363c..0000000000
--- a/tests/validation/NEON/SoftmaxLayer.cpp
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "AssetsLibrary.h"
-#include "Globals.h"
-#include "NEON/Accessor.h"
-#include "PaddingCalculator.h"
-#include "TypePrinter.h"
-#include "Utils.h"
-#include "validation/Datasets.h"
-#include "validation/Reference.h"
-#include "validation/Validation.h"
-
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/NEON/functions/NESoftmaxLayer.h"
-#include "arm_compute/runtime/Tensor.h"
-#include "arm_compute/runtime/TensorAllocator.h"
-
-#include "boost_wrapper.h"
-
-#include <random>
-#include <string>
-
-using namespace arm_compute;
-using namespace arm_compute::test;
-using namespace arm_compute::test::validation;
-
-namespace
-{
-/** Tolerance for float operations */
-const float tolerance_f32 = 0.000001f;
-#ifdef ARM_COMPUTE_ENABLE_FP16
-const float tolerance_f16 = 0.0001f;
-#endif /* ARM_COMPUTE_ENABLE_FP16*/
-/** Tolerance for fixed point operations */
-const float tolerance_fixed_point = 2.f;
-
-/** Compute Neon softmax layer function.
- *
- * @param[in] shape Shape of the input and output tensors.
- * @param[in] dt Shape Data type of tensors.
- * @param[in] fixed_point_position (Optional) Number of bits for the fractional part of fixed point numbers.
- *
- * @return Computed output tensor.
- */
-Tensor compute_softmax_layer(const TensorShape &shape, DataType dt, int fixed_point_position = 0)
-{
- // Create tensors
- Tensor src = create_tensor<Tensor>(shape, dt, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape, dt, 1, fixed_point_position);
-
- // Create and configure function
- NESoftmaxLayer smx_layer;
- smx_layer.configure(&src, &dst);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
-
- BOOST_TEST(!src.info()->is_resizable());
- BOOST_TEST(!dst.info()->is_resizable());
-
- // Fill tensors
- if(arm_compute::is_data_type_float(dt))
- {
- std::uniform_real_distribution<> distribution(-1000.f, 1000.f);
- library->fill(Accessor(src), distribution, 0);
- }
- else
- {
- int one_fixed = 1 << fixed_point_position;
- std::uniform_int_distribution<> distribution(-one_fixed, one_fixed);
- library->fill(Accessor(src), distribution, 0);
- }
-
- // Compute function
- smx_layer.run();
-
- return dst;
-}
-} // namespace
-
-#ifndef DOXYGEN_SKIP_THIS
-BOOST_AUTO_TEST_SUITE(NEON)
-BOOST_AUTO_TEST_SUITE(SoftmaxLayer)
-
-#ifdef ARM_COMPUTE_ENABLE_FP16
-BOOST_AUTO_TEST_SUITE(Float16)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RunSmall, SmallShapes(), shape)
-{
- // Compute function
- Tensor dst = compute_softmax_layer(shape, DataType::F16);
-
- // Compute reference
- RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, DataType::F16);
-
- // Validate output
- validate(Accessor(dst), ref_dst, tolerance_f16);
-}
-BOOST_AUTO_TEST_SUITE_END()
-#endif /* ARM_COMPUTE_ENABLE_FP16*/
-
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit") * boost::unit_test::label("nightly"))
-BOOST_DATA_TEST_CASE(Configuration, (SmallShapes() + LargeShapes()) * CNNDataTypes(), shape, dt)
-{
- // Set fixed point position data type allowed
- int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
-
- // Create tensors
- Tensor src = create_tensor<Tensor>(shape, dt, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape, dt, 1, fixed_point_position);
-
- BOOST_TEST(src.info()->is_resizable());
- BOOST_TEST(dst.info()->is_resizable());
-
- // Create and configure function
- NESoftmaxLayer smx_layer;
- smx_layer.configure(&src, &dst);
-
- // Validate valid region
- const ValidRegion valid_region = shape_to_valid_region(shape);
- validate(src.info()->valid_region(), valid_region);
- validate(dst.info()->valid_region(), valid_region);
-
- // Validate padding
- const int step = 16 / arm_compute::data_size_from_type(dt);
- const PaddingSize padding = PaddingCalculator(shape.x(), step).required_padding();
- validate(src.info()->padding(), padding);
- validate(dst.info()->padding(), padding);
-}
-
-BOOST_AUTO_TEST_SUITE(Float)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * CNNFloatDataTypes(), shape, dt)
-{
- // Compute function
- Tensor dst = compute_softmax_layer(shape, dt);
-
- // Compute reference
- RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, dt);
-
- // Validate output
- validate(Accessor(dst), ref_dst, tolerance_f32);
-}
-
-BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
-BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * CNNFloatDataTypes(), shape, dt)
-{
- // Compute function
- Tensor dst = compute_softmax_layer(shape, dt);
-
- // Compute reference
- RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, dt);
-
- // Validate output
- validate(Accessor(dst), ref_dst, tolerance_f32);
-}
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE(Quantized)
-BOOST_AUTO_TEST_SUITE(QS8)
-// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * boost::unit_test::data::xrange(1, 6),
- shape, fixed_point_position)
-{
- // Compute function
- Tensor dst = compute_softmax_layer(shape, DataType::QS8, fixed_point_position);
-
- // Compute reference
- RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, DataType::QS8, fixed_point_position);
-
- // Validate output
- validate(Accessor(dst), ref_dst, tolerance_fixed_point);
-}
-
-BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
-BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * boost::unit_test::data::xrange(1, 6),
- shape, fixed_point_position)
-{
- // Compute function
- Tensor dst = compute_softmax_layer(shape, DataType::QS8, fixed_point_position);
-
- // Compute reference
- RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, DataType::QS8, fixed_point_position);
-
- // Validate output
- validate(Accessor(dst), ref_dst, tolerance_fixed_point);
-}
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE(QS16)
-// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * boost::unit_test::data::xrange(1, 14),
- shape, fixed_point_position)
-{
- // Compute function
- Tensor dst = compute_softmax_layer(shape, DataType::QS16, fixed_point_position);
-
- // Compute reference
- RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, DataType::QS16, fixed_point_position);
-
- // Validate output
- validate(Accessor(dst), ref_dst, tolerance_fixed_point);
-}
-
-BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
-BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * boost::unit_test::data::xrange(1, 14),
- shape, fixed_point_position)
-{
- // Compute function
- Tensor dst = compute_softmax_layer(shape, DataType::QS16, fixed_point_position);
-
- // Compute reference
- RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, DataType::QS16, fixed_point_position);
-
- // Validate output
- validate(Accessor(dst), ref_dst, tolerance_fixed_point);
-}
-BOOST_AUTO_TEST_SUITE_END()
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE_END()
-BOOST_AUTO_TEST_SUITE_END()
-#endif /* DOXYGEN_SKIP_THIS */
diff --git a/tests/validation/Reference.cpp b/tests/validation/Reference.cpp
index b94a0e5195..5c669903c8 100644
--- a/tests/validation/Reference.cpp
+++ b/tests/validation/Reference.cpp
@@ -738,31 +738,6 @@ RawTensor Reference::compute_reference_roi_pooling_layer(const TensorShape &shap
return ref_dst;
}
-RawTensor Reference::compute_reference_softmax_layer(const TensorShape &shape, DataType dt, int fixed_point_position)
-{
- // Create reference
- RawTensor ref_src(shape, dt, 1, fixed_point_position);
- RawTensor ref_dst(shape, dt, 1, fixed_point_position);
-
- // Fill reference
- if(arm_compute::is_data_type_float(dt))
- {
- std::uniform_real_distribution<> distribution(-1000.f, 1000.f);
- library->fill(ref_src, distribution, 0);
- }
- else
- {
- int one_fixed = 1 << fixed_point_position;
- std::uniform_int_distribution<> distribution(-one_fixed, one_fixed);
- library->fill(ref_src, distribution, 0);
- }
-
- // Compute reference
- ReferenceCPP::softmax_layer(ref_src, ref_dst);
-
- return ref_dst;
-}
-
RawTensor Reference::compute_reference_fixed_point_operation(const TensorShape &shape, DataType dt_in, DataType dt_out, FixedPointOp op, int fixed_point_position)
{
// Create reference
diff --git a/tests/validation/Reference.h b/tests/validation/Reference.h
index c540ec48a1..034a308327 100644
--- a/tests/validation/Reference.h
+++ b/tests/validation/Reference.h
@@ -382,15 +382,6 @@ public:
* @param[in] pool_info ROI Pooling Layer information.
*/
static RawTensor compute_reference_roi_pooling_layer(const TensorShape &shape, DataType dt, const std::vector<ROI> &rois, const ROIPoolingLayerInfo &pool_info);
- /** Compute reference softmax layer.
- *
- * @param[in] shape Shape of the input and output tensors.
- * @param[in] dt Data type of input and output tensors.
- * @param[in] fixed_point_position (Optional) Number of bits for the fractional part of the fixed point numbers
- *
- * @return Computed raw tensor.
- */
- static RawTensor compute_reference_softmax_layer(const TensorShape &shape, DataType dt, int fixed_point_position = 0);
/** Compute reference fixed point operation.
*
* @param[in] shape Shape of the input and output tensors.
diff --git a/tests/validation/ReferenceCPP.cpp b/tests/validation/ReferenceCPP.cpp
index 07801aba1c..dd2437195e 100644
--- a/tests/validation/ReferenceCPP.cpp
+++ b/tests/validation/ReferenceCPP.cpp
@@ -361,14 +361,6 @@ void ReferenceCPP::roi_pooling_layer(const RawTensor &src, RawTensor &dst, const
boost::apply_visitor(tensor_visitors::roi_pooling_layer_visitor(s, rois, pool_info), d);
}
-// Softmax Layer
-void ReferenceCPP::softmax_layer(const RawTensor &src, RawTensor &dst)
-{
- const TensorVariant s = TensorFactory::get_tensor(src);
- TensorVariant d = TensorFactory::get_tensor(dst);
- boost::apply_visitor(tensor_visitors::softmax_layer_visitor(s), d);
-}
-
// Fixed point operation
void ReferenceCPP::fixed_point_operation(const RawTensor &src, RawTensor &dst, FixedPointOp op)
{
diff --git a/tests/validation/ReferenceCPP.h b/tests/validation/ReferenceCPP.h
index 248cdc4cc1..6d4d243c95 100644
--- a/tests/validation/ReferenceCPP.h
+++ b/tests/validation/ReferenceCPP.h
@@ -319,12 +319,6 @@ public:
* @param[in] pool_info ROI Pooling Layer information.
*/
static void roi_pooling_layer(const RawTensor &src, RawTensor &dst, const std::vector<ROI> &rois, const ROIPoolingLayerInfo &pool_info);
- /** Softmax Layer of @p src.
- *
- * @param[in] src Input tensor.
- * @param[out] dst Result tensor.
- */
- static void softmax_layer(const RawTensor &src, RawTensor &dst);
/** Fixed point operations of @p src
*
* @param[in] src Input tensor.
diff --git a/tests/validation/TensorOperations.h b/tests/validation/TensorOperations.h
index 359dfe8d03..5018bfdb2b 100644
--- a/tests/validation/TensorOperations.h
+++ b/tests/validation/TensorOperations.h
@@ -1606,85 +1606,6 @@ void roi_pooling_layer(const Tensor<T> &in, Tensor<T> &out, const std::vector<RO
}
}
-// Softmax Layer
-template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
-void softmax_layer(const Tensor<T> &in, Tensor<T> &out)
-{
- const int cols = static_cast<int>(in.shape()[0]);
- const int upper_dims = in.shape().total_size() / cols;
- for(int r = 0; r < upper_dims; ++r)
- {
- // Find max
- T max = std::numeric_limits<T>::lowest();
- for(int c = 0; c < cols; ++c)
- {
- const T x = in[r * cols + c];
- if(x > max)
- {
- max = x;
- }
- }
-
- // Regularize
- T sum(0);
- for(int c = 0; c < cols; ++c)
- {
- const T res = exp(in[r * cols + c] - max);
- out[r * cols + c] = res;
- sum += res;
- }
-
- // Normalize
- const T norm_val = static_cast<T>(1) / sum;
- for(int c = 0; c < cols; ++c)
- {
- out[r * cols + c] *= norm_val;
- }
- }
-}
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
-void softmax_layer(const Tensor<T> &in, Tensor<T> &out)
-{
- using namespace fixed_point_arithmetic;
- using promoted_T = typename test::traits::promote<T>::type;
-
- const int fixed_point_position = in.fixed_point_position();
- const int cols = static_cast<int>(in.shape()[0]);
- const int upper_dims = in.shape().total_size() / cols;
-
- for(int r = 0; r < upper_dims; ++r)
- {
- // Find max
- fixed_point<T> max(std::numeric_limits<T>::lowest(), fixed_point_position, true);
- for(int c = 0; c < cols; ++c)
- {
- const fixed_point<T> x(in[r * cols + c], fixed_point_position, true);
- if(x > max)
- {
- max = x;
- }
- }
-
- // Regularize
- fixed_point<promoted_T> sum(0, fixed_point_position);
- for(int c = 0; c < cols; ++c)
- {
- const fixed_point<T> x(in[r * cols + c], fixed_point_position, true);
- fixed_point<T> res = exp(x - max);
- out[r * cols + c] = res.raw();
- sum = add(sum, static_cast<fixed_point<promoted_T>>(res));
- }
-
- // Normalize
- fixed_point<T> sat_sum(sum);
- for(int c = 0; c < cols; ++c)
- {
- const fixed_point<T> x(out[r * cols + c], fixed_point_position, true);
- out[r * cols + c] = div(x, sat_sum).raw();
- }
- }
-}
-
// Fixed point operations
template <typename T>
void fixed_point_operation(const Tensor<T> &in, Tensor<T> &out, FixedPointOp op)
diff --git a/tests/validation/TensorVisitors.h b/tests/validation/TensorVisitors.h
index 223b76faaa..bccb70a1d3 100644
--- a/tests/validation/TensorVisitors.h
+++ b/tests/validation/TensorVisitors.h
@@ -409,25 +409,6 @@ private:
ROIPoolingLayerInfo _pool_info;
};
-// Softmax Layer visitor
-struct softmax_layer_visitor : public boost::static_visitor<>
-{
-public:
- explicit softmax_layer_visitor(const TensorVariant &in)
- : _in(in)
- {
- }
-
- template <typename T>
- void operator()(Tensor<T> &out) const
- {
- const auto &in = boost::get<Tensor<T>>(_in);
- tensor_operations::softmax_layer(in, out);
- }
-
-private:
- const TensorVariant &_in;
-};
// Fixed Point operations visitor
struct fixed_point_operation_visitor : public boost::static_visitor<>
{