aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/reference/SoftmaxLayer.cpp
diff options
context:
space:
mode:
authorgiuros01 <giuseppe.rossini@arm.com>2018-09-03 09:53:53 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:54 +0000
commitefbf6c8fd54159b26eda43eea7a12fce491ca13a (patch)
treef24f63d73703ddcb5fe0ea3ccef101660a9eb9a4 /tests/validation/reference/SoftmaxLayer.cpp
parent477531c258801caf3cce44eb3e43df611b42fc6d (diff)
downloadComputeLibrary-efbf6c8fd54159b26eda43eea7a12fce491ca13a.tar.gz
[COMPMID-386] Github: Support SoftmaxLayer on different number of dimensions?
Change-Id: I7422b977538ff29930a90f078badc2edee78af93 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/146638 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'tests/validation/reference/SoftmaxLayer.cpp')
-rw-r--r--tests/validation/reference/SoftmaxLayer.cpp32
1 files changed, 20 insertions, 12 deletions
diff --git a/tests/validation/reference/SoftmaxLayer.cpp b/tests/validation/reference/SoftmaxLayer.cpp
index 7f2c36ecef..f1b94c0a02 100644
--- a/tests/validation/reference/SoftmaxLayer.cpp
+++ b/tests/validation/reference/SoftmaxLayer.cpp
@@ -34,18 +34,26 @@ namespace validation
namespace reference
{
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
-SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta)
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, size_t axis)
{
// Create reference
SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
- const bool is_4D_input = (src.shape().num_dimensions() > 2);
+ // Compute reference. Lower dims are the collapsing of the first axis
+ // dimensions (i.e., the flattened dimension of each batch). The upper dims are
+ // instead the batches we want to normalize
- // Compute reference. Lower dims are
- // - the number of columns for the 2D case
- // - the collapsing of the first three dimensions (i.e., the flattened dimension of each batch) in the 4D case
- const int lower_dims = (is_4D_input ? src.shape()[2] * src.shape()[1] * src.shape()[0] : src.shape()[0]);
- const int upper_dims = src.num_elements() / lower_dims;
+ int lower_dims = 1;
+ for(size_t i = 0; i < axis; i++)
+ {
+ lower_dims *= src.shape()[i];
+ }
+
+ int upper_dims = 1;
+ for(size_t i = axis; i < TensorShape::num_max_dimensions; i++)
+ {
+ upper_dims *= src.shape()[i];
+ }
for(int r = 0; r < upper_dims; ++r)
{
@@ -75,20 +83,20 @@ SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta)
}
template <typename T, typename std::enable_if<std::is_same<T, uint8_t>::value, int>::type>
-SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta)
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, size_t axis)
{
// Note: Output quantization info should always have scale = 1/256 and offset = 0
const QuantizationInfo output_quantization_info = QuantizationInfo(1.f / 256, 0);
SimpleTensor<float> src_tmp = convert_from_asymmetric(src);
- SimpleTensor<float> dst_tmp = softmax_layer<float>(src_tmp, beta);
+ SimpleTensor<float> dst_tmp = softmax_layer<float>(src_tmp, beta, axis);
SimpleTensor<T> dst = convert_to_asymmetric(dst_tmp, output_quantization_info);
return dst;
}
-template SimpleTensor<float> softmax_layer(const SimpleTensor<float> &src, float beta);
-template SimpleTensor<half> softmax_layer(const SimpleTensor<half> &src, float beta);
-template SimpleTensor<uint8_t> softmax_layer(const SimpleTensor<uint8_t> &src, float beta);
+template SimpleTensor<float> softmax_layer(const SimpleTensor<float> &src, float beta, size_t axis);
+template SimpleTensor<half> softmax_layer(const SimpleTensor<half> &src, float beta, size_t axis);
+template SimpleTensor<uint8_t> softmax_layer(const SimpleTensor<uint8_t> &src, float beta, size_t axis);
} // namespace reference
} // namespace validation
} // namespace test