diff options
Diffstat (limited to 'src/backends/reference')
-rw-r--r-- | src/backends/reference/test/RefLayerTests.cpp | 24 | ||||
-rw-r--r-- | src/backends/reference/workloads/RefSoftmaxWorkload.cpp | 3 | ||||
-rw-r--r-- | src/backends/reference/workloads/Softmax.cpp | 83 | ||||
-rw-r--r-- | src/backends/reference/workloads/Softmax.hpp | 2 |
4 files changed, 83 insertions, 29 deletions
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index 8af42eaba2..5cb804225b 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -374,6 +374,30 @@ ARMNN_AUTO_TEST_CASE(SimpleSoftmaxUint16, SimpleSoftmaxUint16Test, 1.0f) ARMNN_AUTO_TEST_CASE(Simple3dSoftmaxUint16, Simple3dSoftmaxUint16Test, 1.0f) ARMNN_AUTO_TEST_CASE(Simple4dSoftmaxUint16, Simple4dSoftmaxUint16Test, 1.0f) +ARMNN_AUTO_TEST_CASE(Simple2dAxis0Softmax, SimpleAxisSoftmaxTest, 1.0f, 0) +ARMNN_AUTO_TEST_CASE(Simple2dAxis1Softmax, SimpleAxisSoftmaxTest, 1.0f, 1) + +ARMNN_AUTO_TEST_CASE(Simple2dAxis0NegSoftmax, SimpleAxisSoftmaxTest, 1.0f, -2) +ARMNN_AUTO_TEST_CASE(Simple2dAxis1NegSoftmax, SimpleAxisSoftmaxTest, 1.0f, -1) + +ARMNN_AUTO_TEST_CASE(Simple3dAxis0Softmax, Simple3dAxisSoftmaxTest, 1.0f, 0) +ARMNN_AUTO_TEST_CASE(Simple3dAxis1Softmax, Simple3dAxisSoftmaxTest, 1.0f, 1) +ARMNN_AUTO_TEST_CASE(Simple3dAxis2Softmax, Simple3dAxisSoftmaxTest, 1.0f, 2) + +ARMNN_AUTO_TEST_CASE(Simple3dAxis0NegSoftmax, Simple3dAxisSoftmaxTest, 1.0f, -3) +ARMNN_AUTO_TEST_CASE(Simple3dAxis1NegSoftmax, Simple3dAxisSoftmaxTest, 1.0f, -2) +ARMNN_AUTO_TEST_CASE(Simple3dAxis2NegSoftmax, Simple3dAxisSoftmaxTest, 1.0f, -1) + +ARMNN_AUTO_TEST_CASE(Simple4dAxis0Softmax, Simple4dAxisSoftmaxTest, 1.0f, 0) +ARMNN_AUTO_TEST_CASE(Simple4dAxis1Softmax, Simple4dAxisSoftmaxTest, 1.0f, 1) +ARMNN_AUTO_TEST_CASE(Simple4dAxis2Softmax, Simple4dAxisSoftmaxTest, 1.0f, 2) +ARMNN_AUTO_TEST_CASE(Simple4dAxis3Softmax, Simple4dAxisSoftmaxTest, 1.0f, 3) + +ARMNN_AUTO_TEST_CASE(Simple4dAxis0NegSoftmax, Simple4dAxisSoftmaxTest, 1.0f, -4) +ARMNN_AUTO_TEST_CASE(Simple4dAxis1NegSoftmax, Simple4dAxisSoftmaxTest, 1.0f, -3) +ARMNN_AUTO_TEST_CASE(Simple4dAxis2NegSoftmax, Simple4dAxisSoftmaxTest, 1.0f, -2) +ARMNN_AUTO_TEST_CASE(Simple4dAxis3NegSoftmax, Simple4dAxisSoftmaxTest, 1.0f, -1) + // Sigmoid Activation ARMNN_AUTO_TEST_CASE(SimpleSigmoid, SimpleSigmoidTest) ARMNN_AUTO_TEST_CASE(SimpleSigmoidUint8, SimpleSigmoidUint8Test) diff --git a/src/backends/reference/workloads/RefSoftmaxWorkload.cpp b/src/backends/reference/workloads/RefSoftmaxWorkload.cpp index b17666738f..0f6f837785 100644 --- a/src/backends/reference/workloads/RefSoftmaxWorkload.cpp +++ b/src/backends/reference/workloads/RefSoftmaxWorkload.cpp @@ -34,6 +34,7 @@ void RefSoftmaxWorkload::Execute() const Softmax(decoder, encoder, inputTensorInfo, - m_Data.m_Parameters.m_Beta); + m_Data.m_Parameters.m_Beta, + m_Data.m_Parameters.m_Axis); } } //namespace armnn diff --git a/src/backends/reference/workloads/Softmax.cpp b/src/backends/reference/workloads/Softmax.cpp index 6cb219a6cc..ec4fdb8839 100644 --- a/src/backends/reference/workloads/Softmax.cpp +++ b/src/backends/reference/workloads/Softmax.cpp @@ -11,42 +11,71 @@ namespace armnn { +unsigned int GetNumElementsBetween(const TensorShape& shape, + unsigned int firstAxisInclusive, + unsigned int lastAxisExclusive) +{ + BOOST_ASSERT(0 <= firstAxisInclusive); + BOOST_ASSERT(firstAxisInclusive <= lastAxisExclusive); + BOOST_ASSERT(lastAxisExclusive <= shape.GetNumDimensions()); + unsigned int count = 1; + for (unsigned int i = firstAxisInclusive; i < lastAxisExclusive; i++) + { + count *= shape[i]; + } + return count; +} + /// Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo. -void Softmax(Decoder<float>& in, Encoder<float>& out, const TensorInfo& inputTensorInfo, float beta) +void Softmax(Decoder<float>& in, Encoder<float>& out, const TensorInfo& inputTensorInfo, float beta, int axis) { - unsigned int numChannels = inputTensorInfo.GetShape()[1]; + BOOST_ASSERT_MSG(axis < static_cast<int>(inputTensorInfo.GetNumDimensions()), + "Required axis index greater than number of dimensions."); + BOOST_ASSERT_MSG(axis >= -static_cast<int>(inputTensorInfo.GetNumDimensions()), + "Required axis index lower than negative of the number of dimensions"); + + unsigned int uAxis = axis < 0 ? + inputTensorInfo.GetNumDimensions() - static_cast<unsigned int>(abs(axis)) + : static_cast<unsigned int>(axis); - for (unsigned int n = 0; n < inputTensorInfo.GetShape()[0]; n++) + const TensorShape& inputShape = inputTensorInfo.GetShape(); + const unsigned int outerSize = GetNumElementsBetween(inputShape, 0, uAxis); + const unsigned int axisSize = inputShape[uAxis]; + const unsigned int innerSize = GetNumElementsBetween(inputShape, uAxis + 1, inputShape.GetNumDimensions()); + + for (unsigned int outer = 0; outer < outerSize; ++outer) { - // Find maximum channel. - in[n * numChannels]; - float max = in.Get(); - for (unsigned int c = 1; c < numChannels; c++) + unsigned int inputBeginIdx = outer * axisSize * innerSize; + unsigned int inputEndIdx = inputBeginIdx + axisSize * innerSize; + unsigned int outputBeginIdx = outer * axisSize * innerSize; + + for (unsigned int inner = 0; inner < innerSize; ++inner, ++inputBeginIdx, ++inputEndIdx, ++outputBeginIdx) { - in[n * numChannels + c]; - float val = in.Get(); - if (val > max) + // Find max + float maxValue = std::numeric_limits<float>::lowest(); + for (unsigned int iter = inputBeginIdx; iter < inputEndIdx; iter += innerSize) { - max = val; + in[iter]; + maxValue = std::max(maxValue, in.Get()); } - } - // Exponentiate all values and sum. - std::vector<float> exponentials(numChannels); - float sum = 0.0f; - for (unsigned int c = 0; c < numChannels; c++) - { - in[n * numChannels + c]; - float val = in.Get(); - exponentials[c] = expf((val - max) * beta); - sum += exponentials[c]; - } + // Compute sum + float sum = 0.0f; + for (unsigned int iter = inputBeginIdx; iter < inputEndIdx; iter += innerSize) + { + in[iter]; + sum += std::exp((in.Get() - maxValue) * beta); + } - // Divide exponentials by sum to give outputs. - for (unsigned int c = 0; c < numChannels; c++) - { - out[n * numChannels + c]; - out.Set(exponentials[c] / sum); + // Compute result + unsigned int outputIter = outputBeginIdx; + out[outputIter]; + for (unsigned int iter = inputBeginIdx; iter < inputEndIdx; iter += innerSize, outputIter += innerSize) + { + out[outputIter]; + in[iter]; + out.Set(std::exp((in.Get() - maxValue) * beta) / sum); + } } } } diff --git a/src/backends/reference/workloads/Softmax.hpp b/src/backends/reference/workloads/Softmax.hpp index 3876293957..25c7449474 100644 --- a/src/backends/reference/workloads/Softmax.hpp +++ b/src/backends/reference/workloads/Softmax.hpp @@ -12,6 +12,6 @@ namespace armnn { /// Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo. -void Softmax(Decoder<float>& in, Encoder<float>& out, const TensorInfo& inputTensorInfo, float beta); +void Softmax(Decoder<float>& in, Encoder<float>& out, const TensorInfo& inputTensorInfo, float beta, int axis = -1); } //namespace armnn |