aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp')
-rw-r--r--src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp31
1 files changed, 15 insertions, 16 deletions
diff --git a/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp b/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp
index 1b4e8f9aa0..f187e0ca31 100644
--- a/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp
+++ b/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp
@@ -22,32 +22,31 @@ RefLogicalBinaryWorkload::RefLogicalBinaryWorkload(const LogicalBinaryQueueDescr
: BaseWorkload<LogicalBinaryQueueDescriptor>(desc, info)
{}
-void RefLogicalBinaryWorkload::PostAllocationConfigure()
+void RefLogicalBinaryWorkload::Execute() const
{
- const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]);
- const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]);
- const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+ Execute(m_Data.m_Inputs, m_Data.m_Outputs);
+}
- m_Input0 = MakeDecoder<InType>(inputInfo0);
- m_Input1 = MakeDecoder<InType>(inputInfo1);
- m_Output = MakeEncoder<OutType>(outputInfo);
+void RefLogicalBinaryWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+{
+ Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
}
-void RefLogicalBinaryWorkload::Execute() const
+void RefLogicalBinaryWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefLogicalBinaryWorkload_Execute");
- const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]);
- const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]);
- const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+ const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]);
+ const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]);
+ const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
const TensorShape& inShape0 = inputInfo0.GetShape();
const TensorShape& inShape1 = inputInfo1.GetShape();
const TensorShape& outShape = outputInfo.GetShape();
- m_Input0->Reset(m_Data.m_Inputs[0]->Map());
- m_Input1->Reset(m_Data.m_Inputs[1]->Map());
- m_Output->Reset(m_Data.m_Outputs[0]->Map());
+ std::unique_ptr<Decoder<InType>> input0 = MakeDecoder<InType>(inputInfo0, inputs[0]->Map());
+ std::unique_ptr<Decoder<InType>> input1 = MakeDecoder<InType>(inputInfo1, inputs[1]->Map());
+ std::unique_ptr<Encoder<OutType>> output = MakeEncoder<OutType>(outputInfo, outputs[0]->Map());
using AndFunction = LogicalBinaryFunction<std::logical_and<bool>>;
using OrFunction = LogicalBinaryFunction<std::logical_or<bool>>;
@@ -56,12 +55,12 @@ void RefLogicalBinaryWorkload::Execute() const
{
case LogicalBinaryOperation::LogicalAnd:
{
- AndFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
+ AndFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
break;
}
case LogicalBinaryOperation::LogicalOr:
{
- OrFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
+ OrFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
break;
}
default: