ArmNN
 22.05.01
NeonNormalizationFloatWorkload.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
9 
10 #include <arm_compute/core/Error.h>
11 #include <arm_compute/runtime/IFunction.h>
12 #include <arm_compute/runtime/MemoryManagerOnDemand.h>
13 
14 #include <memory>
15 
16 namespace armnn
17 {
18 
20  const TensorInfo& output,
21  const NormalizationDescriptor& descriptor);
22 
23 class NeonNormalizationFloatWorkload : public FloatWorkload<NormalizationQueueDescriptor>
24 {
25 public:
27  std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
28  virtual void Execute() const override;
29  // Replace input tensor handle with the given TensorHandle
30  void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
31 
32  // Replace output tensor handle with the given TensorHandle
33  void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
34 private:
35  std::unique_ptr<arm_compute::IFunction> m_NormalizationLayer;
36  virtual void Reconfigure();
37 };
38 
39 } //namespace armnn
40 
41 
42 
43 
Copyright (c) 2021 ARM Limited and Contributors.
void ReplaceOutputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
void ReplaceInputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
Status
enumeration
Definition: Types.hpp:42
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
NeonNormalizationFloatWorkload(const NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info, std::shared_ptr< arm_compute::MemoryManagerOnDemand > &memoryManager)
Contains information about TensorInfos of a layer.