ArmNN
 22.08
ClNormalizationFloatWorkload.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
9 
10 #include <arm_compute/runtime/CL/functions/CLNormalizationLayer.h>
11 
12 namespace armnn
13 {
14 
16  const TensorInfo& output,
17  const NormalizationDescriptor& descriptor);
18 
19 class ClNormalizationFloatWorkload : public FloatWorkload<NormalizationQueueDescriptor>
20 {
21 public:
23  const WorkloadInfo& info,
24  const arm_compute::CLCompileContext& clCompileContext);
25  void Execute() const override;
26  // Replace input tensor handle with the given TensorHandle
27  void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
28 
29  // Replace output tensor handle with the given TensorHandle
30  void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
31 private:
32  mutable arm_compute::CLNormalizationLayer m_NormalizationLayer;
33  virtual void Reconfigure();
34 };
35 
36 } //namespace armnn
void ReplaceInputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
Copyright (c) 2021 ARM Limited and Contributors.
ClNormalizationFloatWorkload(const NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)
arm_compute::Status ClNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
Status
enumeration
Definition: Types.hpp:42
void ReplaceOutputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
Contains information about TensorInfos of a layer.