ArmNN
 22.05.01
RefCastWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "RefCastWorkload.hpp"
7 #include "RefWorkloadUtils.hpp"
9 #include <ResolveType.hpp>
10 #include "Encoders.hpp"
11 #include "Decoders.hpp"
12 
13 namespace
14 {
15  void Cast(armnn::Decoder<float>& in, armnn::Encoder<float>& out, const uint32_t numElements )
16  {
17  for (unsigned int i = 0; i < numElements; i++)
18  {
19  out.Set(in.Get());
20  ++in;
21  ++out;
22  }
23  }
24 }
25 
26 namespace armnn
27 {
28 
30 {
32 }
33 
35 {
36  Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
37 }
38 
39 void RefCastWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
40 {
41  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefCastWorkload_Execute");
42 
43  TensorInfo inputTensorInfo(GetTensorInfo(inputs[0]));
44  TensorInfo outputTensorInfo(GetTensorInfo(outputs[0]));
45 
46  // Quantization info should set to default values.
47  if (inputTensorInfo.IsQuantized())
48  {
49  inputTensorInfo.SetQuantizationScale(1.0f);
50  inputTensorInfo.SetQuantizationOffset(0);
51  }
52  if (outputTensorInfo.IsQuantized())
53  {
54  outputTensorInfo.SetQuantizationScale(1.0f);
55  outputTensorInfo.SetQuantizationOffset(0);
56  }
57 
58  Cast(*MakeDecoder<float>(inputTensorInfo, inputs[0]->Map()),
59  *MakeEncoder<float>(outputTensorInfo, outputs[0]->Map()),
60  inputTensorInfo.GetNumElements());
61 }
62 
63 } //namespace armnn
CPU Execution: Reference C++ kernels.
virtual void Set(IType right)=0
void Execute() const override
Copyright (c) 2021 ARM Limited and Contributors.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
virtual IType Get() const =0
void ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor) override
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:473
std::vector< ITensorHandle * > m_Outputs
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:489
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
bool IsQuantized() const
Definition: Tensor.cpp:504
unsigned int GetNumElements() const
Definition: Tensor.hpp:196