diff options
author | Finn Williams <Finn.Williams@arm.com> | 2021-04-07 10:23:21 +0100 |
---|---|---|
committer | Finn Williams <Finn.Williams@arm.com> | 2021-04-14 15:18:38 +0100 |
commit | b8181f72b8c7c9132373dbcf7f8709ec2c0f23c0 (patch) | |
tree | 04cc91a6efb7e2601f80e4213a747938165b7184 /src/backends/reference/workloads/RefSoftmaxWorkload.cpp | |
parent | b898222a8856475f0217be5e78b4816aa1914f15 (diff) | |
download | armnn-b8181f72b8c7c9132373dbcf7f8709ec2c0f23c0.tar.gz |
IVGCVSW-5787 Add/Update Execute() implementations in RefActivationWorkload
* Added multithreaded StridedSliceEndToEndTest
Signed-off-by: Finn Williams <Finn.Williams@arm.com>
Change-Id: I4579db7b5959e0a22256f1bda00238c22e611dec
Diffstat (limited to 'src/backends/reference/workloads/RefSoftmaxWorkload.cpp')
-rw-r--r-- | src/backends/reference/workloads/RefSoftmaxWorkload.cpp | 18 |
1 files changed, 14 insertions, 4 deletions
diff --git a/src/backends/reference/workloads/RefSoftmaxWorkload.cpp b/src/backends/reference/workloads/RefSoftmaxWorkload.cpp index 2e4d811674..9733cbc859 100644 --- a/src/backends/reference/workloads/RefSoftmaxWorkload.cpp +++ b/src/backends/reference/workloads/RefSoftmaxWorkload.cpp @@ -19,16 +19,26 @@ namespace armnn void RefSoftmaxWorkload::Execute() const { + Execute(m_Data.m_Inputs, m_Data.m_Outputs); +} + +void RefSoftmaxWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor) +{ + Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs); +} + +void RefSoftmaxWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const +{ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSoftmaxWorkload_Execute"); - const TensorInfo &inputTensorInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo &inputTensorInfo = GetTensorInfo(inputs[0]); - std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputTensorInfo, m_Data.m_Inputs[0]->Map()); + std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputTensorInfo, inputs[0]->Map()); Decoder<float> &decoder = *decoderPtr; - const TensorInfo &outputTensorInfo = GetTensorInfo(m_Data.m_Outputs[0]); + const TensorInfo &outputTensorInfo = GetTensorInfo(outputs[0]); - std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputTensorInfo, m_Data.m_Outputs[0]->Map()); + std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputTensorInfo, outputs[0]->Map()); Encoder<float> &encoder = *encoderPtr; Softmax(decoder, |