diff options
author | Finn Williams <Finn.Williams@arm.com> | 2021-04-07 10:23:21 +0100 |
---|---|---|
committer | Finn Williams <Finn.Williams@arm.com> | 2021-04-14 15:18:38 +0100 |
commit | b8181f72b8c7c9132373dbcf7f8709ec2c0f23c0 (patch) | |
tree | 04cc91a6efb7e2601f80e4213a747938165b7184 /src/backends/reference/workloads/RefInstanceNormalizationWorkload.cpp | |
parent | b898222a8856475f0217be5e78b4816aa1914f15 (diff) | |
download | armnn-b8181f72b8c7c9132373dbcf7f8709ec2c0f23c0.tar.gz |
IVGCVSW-5787 Add/Update Execute() implementations in RefActivationWorkload
* Added multithreaded StridedSliceEndToEndTest
Signed-off-by: Finn Williams <Finn.Williams@arm.com>
Change-Id: I4579db7b5959e0a22256f1bda00238c22e611dec
Diffstat (limited to 'src/backends/reference/workloads/RefInstanceNormalizationWorkload.cpp')
-rw-r--r-- | src/backends/reference/workloads/RefInstanceNormalizationWorkload.cpp | 19 |
1 files changed, 15 insertions, 4 deletions
diff --git a/src/backends/reference/workloads/RefInstanceNormalizationWorkload.cpp b/src/backends/reference/workloads/RefInstanceNormalizationWorkload.cpp index 150f0cb017..daee97ae3e 100644 --- a/src/backends/reference/workloads/RefInstanceNormalizationWorkload.cpp +++ b/src/backends/reference/workloads/RefInstanceNormalizationWorkload.cpp @@ -20,12 +20,23 @@ RefInstanceNormalizationWorkload::RefInstanceNormalizationWorkload( void RefInstanceNormalizationWorkload::Execute() const { + Execute(m_Data.m_Inputs, m_Data.m_Outputs); +} + +void RefInstanceNormalizationWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor) +{ + Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs); +} + +void RefInstanceNormalizationWorkload::Execute(std::vector<ITensorHandle*> inputs, + std::vector<ITensorHandle*> outputs) const +{ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefInstanceNormalizationWorkload_Execute"); - std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(m_Data.m_Inputs[0]), - m_Data.m_Inputs[0]->Map()); - std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(m_Data.m_Outputs[0]), - m_Data.m_Outputs[0]->Map()); + std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]), + inputs[0]->Map()); + std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]), + outputs[0]->Map()); InstanceNorm(m_Data, *inputDecoder, *outputEncoder); } |