ArmNN
 22.02
RefInstanceNormalizationWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "InstanceNorm.hpp"
9 #include "RefWorkloadUtils.hpp"
10 
11 #include "Profiling.hpp"
12 
13 namespace armnn
14 {
15 
17  const InstanceNormalizationQueueDescriptor& descriptor,
18  const WorkloadInfo& info)
20 
22 {
24 }
25 
27 {
28  Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
29 }
30 
31 void RefInstanceNormalizationWorkload::Execute(std::vector<ITensorHandle*> inputs,
32  std::vector<ITensorHandle*> outputs) const
33 {
34  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefInstanceNormalizationWorkload_Execute");
35 
36  std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]),
37  inputs[0]->Map());
38  std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]),
39  outputs[0]->Map());
40  const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
41 
42  InstanceNorm(m_Data, inputInfo, *inputDecoder, *outputEncoder);
43 }
44 
45 } // namespace armnn
CPU Execution: Reference C++ kernels.
Copyright (c) 2021 ARM Limited and Contributors.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
void InstanceNorm(const InstanceNormalizationQueueDescriptor &data, const TensorInfo &inputInfo, Decoder< float > &inputDecoder, Encoder< float > &outputEncoder)
InstanceNormalizationQueueDescriptor m_Data
Definition: Workload.hpp:77
std::vector< ITensorHandle * > m_Outputs
RefInstanceNormalizationWorkload(const InstanceNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info)
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs
void ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor) override
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers