ArmNN
 20.05
ConvertFp32NetworkToFp16.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include "Optimization.hpp"
8 #include "NetworkUtils.hpp"
9 
10 namespace armnn
11 {
12 namespace optimizations
13 {
14 
16 {
17 public:
18  void Run(Graph& graph, Layer& layer) const
19  {
20  if(layer.GetType() == LayerType::Input)
21  {
22  // if the outputs of this layer are DataType::Float32
23  // add a ConvertFloat32ToFloat16 layer after each of the outputs
24  if (layer.GetDataType() == DataType::Float32)
25  {
27  }
28  }
29  else if (layer.GetType() == LayerType::Output)
30  {
31  // if the inputs of this layer are DataType::Float32
32  // add a ConvertFloat16ToFloat32 layer before each of the inputs
33  if (layer.GetDataType() == DataType::Float32)
34  {
35  // NOTE: We need to call InsertConvertFp16ToFp32LayersBefore with expectCorrectInputType = false
36  // here, otherwise it will expect the inputs to be DataType::Float16
37  InsertConvertFp16ToFp32LayersBefore(graph, layer, false);
38  }
39  }
41  {
42  // if the inputs/outputs of this layer are DataType::Float32
43  // change the data type for all inputs and outputs to DataType::Float16
44  for (auto&& input = layer.BeginInputSlots(); input != layer.EndInputSlots(); ++input)
45  {
46  // if it is connected to OutputSlot of the InputLayer do not change the DataType of connection
47  // InputSlots of the current layer will be updated when conversion layer is inserted after InputLayer
48  Layer& base = input->GetConnectedOutputSlot()->GetOwningLayer();
49  if (base.GetType() != LayerType::Input)
50  {
51  TensorInfo convertInfo = input->GetConnection()->GetTensorInfo();
52  if (convertInfo.GetDataType() == DataType::Float32)
53  {
54  convertInfo.SetDataType(DataType::Float16);
55  input->GetConnection()->SetTensorInfo(convertInfo);
56  }
57  }
58  }
59 
60  // change outputs to DataType::Float16
61  for (auto&& output = layer.BeginOutputSlots(); output != layer.EndOutputSlots(); ++output)
62  {
63  TensorInfo convertInfo = output->GetTensorInfo();
64  if (convertInfo.GetDataType() == DataType::Float32)
65  {
66  convertInfo.SetDataType(DataType::Float16);
67  output->SetTensorInfo(convertInfo);
68  }
69  }
70  }
71  }
72 
73 protected:
74  ConvertFp32NetworkToFp16Impl() = default;
76 };
77 
79 
80 } // namespace optimizations
81 } // namespace armnn
std::vector< InputSlot >::iterator EndInputSlots()
Definition: Layer.hpp:236
std::vector< ConvertFp32ToFp16Layer * > InsertConvertFp32ToFp16LayersAfter(Graph &graph, Layer &layer)
std::vector< ConvertFp16ToFp32Layer * > InsertConvertFp16ToFp32LayersBefore(Graph &graph, Layer &layer, bool expectCorrectInputType)
Copyright (c) 2020 ARM Limited.
std::vector< InputSlot >::iterator BeginInputSlots()
Definition: Layer.hpp:235
DataType GetDataType() const
Definition: Tensor.hpp:95
void SetDataType(DataType type)
Definition: Tensor.hpp:96
std::vector< OutputSlot >::iterator BeginOutputSlots()
Definition: Layer.hpp:239
std::vector< OutputSlot >::iterator EndOutputSlots()
Definition: Layer.hpp:240
DataType GetDataType() const
Definition: Layer.cpp:274
LayerType GetType() const
Definition: Layer.hpp:259