diff options
author | Ryan OShea <ryan.oshea3@arm.com> | 2022-11-07 16:20:48 +0000 |
---|---|---|
committer | ryan.oshea3 <ryan.oshea3@arm.com> | 2022-11-16 15:22:50 +0000 |
commit | 31441595009182c985dacbedc70c41ee6664d070 (patch) | |
tree | 248a85295aeff4022c9b395fc97748b0a0aa6b35 /src/armnn | |
parent | bd18eab07a8f30492de1e462b1815189014cb8d5 (diff) | |
download | armnn-31441595009182c985dacbedc70c41ee6664d070.tar.gz |
IVGCVSW-7214 Disable BF16-Turbo-Mode and remove conversion layers
- Remove Bf16ToFp32 Conversion Layer
- Remove Fp32ToBf16 Conversion Layer
- Remove B16 Conversion tests
* Throw exception if m_ReduceFp32ToBf16 optimzer option is set to true
* Provide comments to enable fast math in order to use bf16
* Update docs to inform users to enable fast math for bf16
Execute Network Changes
* Require bf16_turbo_mode to also have fast_math_enabled set to true
- Remove setting m_ReduceFp32ToBf16 optimizer option
Signed-off-by: Ryan OShea <ryan.oshea3@arm.com>
Change-Id: Ibaa6da9d29c96a1ce32ff5196b0847fde9f04a1c
Diffstat (limited to 'src/armnn')
20 files changed, 6 insertions, 1428 deletions
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp index 6638709d6f..ff899d49ea 100644 --- a/src/armnn/BackendHelper.cpp +++ b/src/armnn/BackendHelper.cpp @@ -307,34 +307,6 @@ bool LayerSupportHandle::IsConstantSupported(const TensorInfo& output, reasonIfUnsupported); } -bool LayerSupportHandle::IsConvertBf16ToFp32Supported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported) -{ - TensorInfos infos{input, output}; - - return m_LayerSupport->IsLayerSupported(LayerType::ConvertBf16ToFp32, - infos, - BaseDescriptor(), - EmptyOptional(), - EmptyOptional(), - reasonIfUnsupported); -} - -bool LayerSupportHandle::IsConvertFp32ToBf16Supported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported) -{ - TensorInfos infos{input, output}; - - return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp32ToBf16, - infos, - BaseDescriptor(), - EmptyOptional(), - EmptyOptional(), - reasonIfUnsupported); -} - bool LayerSupportHandle::IsConvertFp16ToFp32Supported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported) diff --git a/src/armnn/ILayerSupport.cpp b/src/armnn/ILayerSupport.cpp index 8099782750..3ef367ee16 100644 --- a/src/armnn/ILayerSupport.cpp +++ b/src/armnn/ILayerSupport.cpp @@ -77,18 +77,10 @@ bool ILayerSupport::IsLayerSupported(const LayerType& type, case LayerType::Constant: return IsConstantSupported(infos[0], reasonIfUnsupported); - case LayerType::ConvertBf16ToFp32: - return IsConvertBf16ToFp32Supported(infos[0], - infos[1], - reasonIfUnsupported); case LayerType::ConvertFp16ToFp32: return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported); - case LayerType::ConvertFp32ToBf16: - return IsConvertFp32ToBf16Supported(infos[0], - infos[1], - reasonIfUnsupported); case LayerType::ConvertFp32ToFp16: return IsConvertFp32ToFp16Supported(infos[0], infos[1], @@ -634,22 +626,6 @@ bool ILayerSupport::IsConstantSupported(const TensorInfo& output, return false; } -bool ILayerSupport::IsConvertBf16ToFp32Supported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported) const -{ - IgnoreUnused(input, output, reasonIfUnsupported); - return false; -} - -bool ILayerSupport::IsConvertFp32ToBf16Supported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported) const -{ - IgnoreUnused(input, output, reasonIfUnsupported); - return false; -} - bool ILayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported) const diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp index acac1f9988..43862d5072 100644 --- a/src/armnn/LayersFwd.hpp +++ b/src/armnn/LayersFwd.hpp @@ -17,9 +17,7 @@ #include "layers/ComparisonLayer.hpp" #include "layers/ConcatLayer.hpp" #include "layers/ConstantLayer.hpp" -#include "layers/ConvertBf16ToFp32Layer.hpp" #include "layers/ConvertFp16ToFp32Layer.hpp" -#include "layers/ConvertFp32ToBf16Layer.hpp" #include "layers/ConvertFp32ToFp16Layer.hpp" #include "layers/Convolution2dLayer.hpp" #include "layers/Convolution3dLayer.hpp" @@ -119,9 +117,7 @@ DECLARE_LAYER(ChannelShuffle) DECLARE_LAYER(Comparison) DECLARE_LAYER(Concat) DECLARE_LAYER(Constant) -DECLARE_LAYER(ConvertBf16ToFp32) DECLARE_LAYER(ConvertFp16ToFp32) -DECLARE_LAYER(ConvertFp32ToBf16) DECLARE_LAYER(ConvertFp32ToFp16) DECLARE_LAYER(Convolution2d) DECLARE_LAYER(Convolution3d) diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 9d00a69518..6d3058c670 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -604,30 +604,6 @@ bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string return noErrors; } -template <typename LayerT> -LayerT* ConvertBf16ToFp32Weight(Layer* l) -{ - LayerT* layer = PolymorphicDowncast<LayerT*>(l); - if ((layer->GetType() == LayerType::Convolution2d || layer->GetType() == LayerType::FullyConnected) - && layer->m_Weight) - { - const TensorInfo& info = layer->m_Weight->GetTensorInfo(); - - if (info.GetDataType() == DataType::BFloat16) - { - std::vector<float> newValues(info.GetNumElements()); - - armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32( - layer->m_Weight->template GetConstTensor<armnn::BFloat16>(), info.GetNumElements(), newValues.data()); - - TensorInfo newInfo(info.GetShape(), DataType::Float32); - ConstTensor newInput(newInfo, newValues); - layer->m_Weight.reset(new ScopedTensorHandle(newInput)); - } - } - return layer; -} - OptimizationResult AttemptBackendAssignment(BackendSettings& backendSettings, Graph& graph, Layer* layer, @@ -772,98 +748,6 @@ OptimizationResult AttemptBackendAssignment(BackendSettings& backendSettings, return result; } } - else if (dataTypeIn == DataType::BFloat16 || dataTypeOut == DataType::BFloat16) - { - const auto layerType = layer->GetType(); - if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported) - && layerType != LayerType::ConvertFp32ToBf16 - && layerType != LayerType::ConvertBf16ToFp32) - { - bool revertConstantWeightsConversion = RevertConstantWeightsToFP32(layer); - - // Insert BF16 -> FP32 conversion layer before current layer. - // Unless we have reverted Constant Weights Type above. - std::vector<ConvertBf16ToFp32Layer*> convertBf16ToFp32Layers; - if (dataTypeIn == DataType::BFloat16 && dataTypeOut != DataType::BFloat16 - && !revertConstantWeightsConversion) - { - convertBf16ToFp32Layers = - InsertConvertBf16ToFp32LayersBefore(graph, *layer); - if (layer->GetType() == LayerType::Convolution2d) - { - ConvertBf16ToFp32Weight<Convolution2dLayer>(layer); - } - else if (layer->GetType() == LayerType::FullyConnected) - { - ConvertBf16ToFp32Weight<FullyConnectedLayer>(layer); - } - } - - // Insert FP32 -> BF16 conversion layer after current layer - std::vector<ConvertFp32ToBf16Layer*> convertFp32ToBf16Layers; - if (dataTypeOut == DataType::BFloat16) - { - convertFp32ToBf16Layers = - InsertConvertFp32ToBf16LayersAfter(graph, *layer); - } - - // Assign a supported backend to the newly introduced conversion layers - auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend) - { - bool supportedBackendFound = false; - std::string reasonIfUnsupported; - - // Try preferred backend first - layer->SetBackendId(preferredBackend); - if (IWorkloadFactory::IsLayerSupported(*layer, - EmptyOptional(), - reasonIfUnsupported)) - { - supportedBackendFound = true; - } - else - { - for (const auto& backend : availablePreferredBackends) - { - // Skip preferred backend (we already determined that it is not supported) - if (backend == preferredBackend) - { - continue; - } - - layer->SetBackendId(backend); - if (IWorkloadFactory::IsLayerSupported(*layer, - EmptyOptional(), - reasonIfUnsupported)) - { - supportedBackendFound = true; - break; - } - } - } - - return supportedBackendFound; - }; - - for (ConvertBf16ToFp32Layer* convertLayer : convertBf16ToFp32Layers) - { - if (!AssignFirstSupportedBackend(convertLayer, backend)) - { - return ReturnError(convertLayer); - } - } - - for (ConvertFp32ToBf16Layer* convertLayer : convertFp32ToBf16Layers) - { - if (!AssignFirstSupportedBackend(convertLayer, backend)) - { - return ReturnError(convertLayer); - } - } - - return result; - } - } std::stringstream warningMsg; warningMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType()) @@ -1669,6 +1553,12 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph, throw InvalidArgumentException("Invoked Optimize with no backends specified"); } + if (options.m_ReduceFp32ToBf16) + { + throw InvalidArgumentException("BFloat16 optimization is currently ignored. In order to use Bf16 optimization " + "Please use the FastMathEnabled backend option for CpuAcc or GpuAcc."); + } + if (options.m_ReduceFp32ToFp16 && options.m_ReduceFp32ToBf16) { throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time."); @@ -1745,17 +1635,6 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph, Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf())); } - // If Fp32 to Bf16 optimization is set convert Fp32 network to Bf16 - // Convert input of Convolution2d and FullyConnected from Fp32 to Bf16 - // Only Constant weight of Convolution2d and FullyConnected are converted from Fp32 to Bf16 - // Constant and Fp32ToBf16 layers will also be fused so conversion is no longer needed at inference time - if (options.m_ReduceFp32ToBf16) - { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_ReduceFp32ToBf16"); - Optimizer::Pass(optGraph, MakeOptimizations(Fp32NetworkToBf16Converter())); - Optimizer::Pass(optGraph, MakeOptimizations(FuseConversionLayersIntoConstLayers())); - } - // Initialize backend settings BackendSettings backendSettings(backendPreferences, deviceSpec); if (backendSettings.GetAvailablePreferredBackends().empty()) diff --git a/src/armnn/NetworkUtils.cpp b/src/armnn/NetworkUtils.cpp index aaee4eba1a..1d46f029dc 100644 --- a/src/armnn/NetworkUtils.cpp +++ b/src/armnn/NetworkUtils.cpp @@ -5,8 +5,6 @@ #include "NetworkUtils.hpp" -#include <armnnUtils/FloatingPointConverter.hpp> -#include <BFloat16.hpp> #include "SubgraphViewSelector.hpp" #include <armnn/Exceptions.hpp> @@ -26,17 +24,6 @@ void UpdateOutputSlotToFp32(OutputSlot& outputSlot) outputSlot.SetTensorInfo(newTensorInfo); } -void ChangeOutputBf16ToFp32(Layer& layer) -{ - for (auto&& outputSlot = layer.BeginOutputSlots(); outputSlot != layer.EndOutputSlots(); ++outputSlot) - { - if (outputSlot->GetTensorInfo().GetDataType() == DataType::BFloat16) - { - UpdateOutputSlotToFp32(*outputSlot); - } - } -} - void ChangeOutputFp16ToFp32(Layer& layer) { for (auto&& outputSlot = layer.BeginOutputSlots(); outputSlot != layer.EndOutputSlots(); ++outputSlot) @@ -50,93 +37,6 @@ void ChangeOutputFp16ToFp32(Layer& layer) } // anonymous namespace -std::vector<ConvertBf16ToFp32Layer*> InsertConvertBf16ToFp32LayersBefore(Graph& graph, - Layer& layer, - bool expectCorrectInputType) -{ - std::vector<ConvertBf16ToFp32Layer*> convertLayers; - convertLayers.reserve(layer.GetNumInputSlots()); - - // Insert a ConvertBf16ToFp32Layer before each input slot - for (auto&& inputSlot = layer.BeginInputSlots(); inputSlot != layer.EndInputSlots(); ++inputSlot) - { - bool allowInsert = true; - if (expectCorrectInputType) - { - // Only insert ConvertBf16ToFp32Layer before BF16 input slots - OutputSlot* connectedOutputSlot = inputSlot->GetConnectedOutputSlot(); - allowInsert = - connectedOutputSlot && connectedOutputSlot->GetTensorInfo().GetDataType() == DataType::BFloat16; - } - - if (allowInsert) - { - const std::string name = - std::string("convert_bf16_to_fp32-" + std::to_string(inputSlot->GetSlotIndex()) + "-") + - layer.GetName(); - ConvertBf16ToFp32Layer* convertLayer = - graph.InsertNewLayer<ConvertBf16ToFp32Layer>(*inputSlot, name.c_str()); - - TensorInfo convertInfo = convertLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(); - convertInfo.SetDataType(DataType::Float32); - - convertLayer->GetOutputSlot().SetTensorInfo(convertInfo); - - convertLayers.emplace_back(convertLayer); - } - } - - return convertLayers; -} - -std::vector<ConvertFp32ToBf16Layer*> InsertConvertFp32ToBf16LayersBefore(Graph& graph, - Layer& layer, - bool expectCorrectInputType) -{ - std::vector<ConvertFp32ToBf16Layer*> convertLayers; - convertLayers.reserve(layer.GetNumInputSlots()); - - // Insert a ConvertFp32ToBf16Layer before each input slot - for (auto&& inputSlot = layer.BeginInputSlots(); inputSlot != layer.EndInputSlots(); ++inputSlot) - { - bool allowInsert = true; - - if ((layer.GetType() == LayerType::Convolution2d || - layer.GetType() == LayerType::FullyConnected || - layer.GetType() == LayerType::DepthwiseConvolution2d) - && inputSlot->GetSlotIndex() == 2) - { - // Refrain from reducing bias to Bf16 - continue; - } - if (expectCorrectInputType) - { - // Only insert ConvertFp32ToBf16Layer before FP32 input slots - OutputSlot* connectedOutputSlot = inputSlot->GetConnectedOutputSlot(); - allowInsert = - connectedOutputSlot && connectedOutputSlot->GetTensorInfo().GetDataType() == DataType::Float32; - } - - if (allowInsert) - { - const std::string name = - std::string("convert_fp32_to_bf16-" + std::to_string(inputSlot->GetSlotIndex()) + "-") + - layer.GetName(); - ConvertFp32ToBf16Layer* convertLayer = - graph.InsertNewLayer<ConvertFp32ToBf16Layer>(*inputSlot, name.c_str()); - - TensorInfo convertInfo = convertLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(); - convertInfo.SetDataType(DataType::BFloat16); - - convertLayer->GetOutputSlot().SetTensorInfo(convertInfo); - - convertLayers.emplace_back(convertLayer); - } - } - - return convertLayers; -} - std::vector<ConvertFp16ToFp32Layer*> InsertConvertFp16ToFp32LayersBefore(Graph& graph, Layer& layer, bool expectCorrectInputType) @@ -176,39 +76,6 @@ std::vector<ConvertFp16ToFp32Layer*> InsertConvertFp16ToFp32LayersBefore(Graph& return convertLayers; } -std::vector<ConvertFp32ToBf16Layer*> InsertConvertFp32ToBf16LayersAfter(Graph& graph, Layer& layer) -{ - const unsigned int numOutputSlots = layer.GetNumOutputSlots(); - - std::vector<ConvertFp32ToBf16Layer*> convertLayers; - convertLayers.reserve(numOutputSlots); - - // Update Bf16 output slots to FP32 on current layer - ChangeOutputBf16ToFp32(layer); - - // Insert a ConvertFp32ToBf16Layer after each FP32 output slot - for (unsigned int slotIndex = 0u; slotIndex < numOutputSlots; ++slotIndex) - { - OutputSlot& outputSlot = layer.GetOutputSlot(slotIndex); - if(outputSlot.GetTensorInfo().GetDataType() == DataType::Float32) - { - const std::string name = - std::string("convert_fp32_to_bf16-" + std::to_string(slotIndex) + "-") + layer.GetName(); - ConvertFp32ToBf16Layer* convertLayer = - graph.InsertNewLayer<ConvertFp32ToBf16Layer>(outputSlot, name.c_str()); - - TensorInfo convertInfo = convertLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(); - convertInfo.SetDataType(DataType::BFloat16); - - convertLayer->GetOutputSlot().SetTensorInfo(convertInfo); - - convertLayers.emplace_back(convertLayer); - } - } - - return convertLayers; -} - std::vector<ConvertFp32ToFp16Layer*> InsertConvertFp32ToFp16LayersAfter(Graph& graph, Layer& layer) { const unsigned int numOutputSlots = layer.GetNumOutputSlots(); @@ -274,50 +141,4 @@ std::vector<DebugLayer*> InsertDebugLayerAfter(Graph& graph, Layer& layer, bool return debugLayers; } -bool RevertConstantWeightsToFP32(Layer* layer) -{ - if (layer->GetType() == LayerType::Convolution2d || layer->GetType() == LayerType::FullyConnected) - { - // Revert Weights on Constant Layer to FP32 so they can be accessed by Conv2d or FullyConnected - // This prevents a conversion layer being added in during backend assignment which blocks - // the RedirectMembersToConstantInputs backward compatibility workaround/optimization. - auto constantLayerInfo = layer->GetInputSlot(1).GetConnection()->GetTensorInfo(); - - if (constantLayerInfo.IsConstant() && constantLayerInfo.GetDataType() == DataType::BFloat16) - { - std::vector<float> newValues(constantLayerInfo.GetNumElements()); - - auto weightLayer = PolymorphicDowncast<ConstantLayer*>( - &layer->GetInputSlot(1).GetConnection()->GetOwningIConnectableLayer()); - armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32( - weightLayer->m_LayerOutput->GetConstTensor<BFloat16>(), - constantLayerInfo.GetNumElements(), - newValues.data()); - - TensorInfo newInfo(constantLayerInfo.GetShape(), DataType::Float32); - newInfo.SetConstant(true); - ConstTensor newInput(newInfo, newValues); - weightLayer->m_LayerOutput.reset(new ScopedTensorHandle(newInput)); - weightLayer->GetOutputSlot(0).SetTensorInfo(newInfo); - - // Connect Conv2d/FullyConnected to InputLayer directly leaving out - // the ConversionLayer to be cleaned up later - auto& conversionLayer = layer->GetInputSlot(0).GetConnection()->GetOwningIConnectableLayer(); - auto actualInputOutputSlot = conversionLayer.GetInputSlot(0).GetConnection(); - - auto& conversionLayerOutputSlot = - layer->GetInputSlot(0).GetConnection()->GetOwningIConnectableLayer().GetOutputSlot(0); - auto& conversionLayerInputSlot = - layer->GetInputSlot(0).GetConnection()->GetOwningIConnectableLayer().GetInputSlot(0); - actualInputOutputSlot->Disconnect(conversionLayerInputSlot); - conversionLayerOutputSlot.Disconnect(layer->GetInputSlot(0)); - - actualInputOutputSlot->Connect(layer->GetInputSlot(0)); - - return true; - } - } - return false; -} - } // namespace armnn diff --git a/src/armnn/NetworkUtils.hpp b/src/armnn/NetworkUtils.hpp index 38e0aabaf9..74e872cfbc 100644 --- a/src/armnn/NetworkUtils.hpp +++ b/src/armnn/NetworkUtils.hpp @@ -11,16 +11,6 @@ namespace armnn { -std::vector<ConvertBf16ToFp32Layer*> InsertConvertBf16ToFp32LayersBefore(Graph& graph, - Layer& layer, - bool expectCorrectInputType = true); - -std::vector<ConvertFp32ToBf16Layer*> InsertConvertFp32ToBf16LayersBefore(Graph& graph, - Layer& layer, - bool expectCorrectInputType = true); - -std::vector<ConvertFp32ToBf16Layer*> InsertConvertFp32ToBf16LayersAfter(Graph& graph, Layer& layer); - std::vector<ConvertFp16ToFp32Layer*> InsertConvertFp16ToFp32LayersBefore(Graph& graph, Layer& layer, bool expectCorrectInputType = true); diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp deleted file mode 100644 index a0958e36cb..0000000000 --- a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp +++ /dev/null @@ -1,55 +0,0 @@ -// -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "ConvertBf16ToFp32Layer.hpp" -#include "LayerCloneBase.hpp" - -#include <armnn/TypesUtils.hpp> - -#include <armnn/backends/WorkloadData.hpp> -#include <armnn/backends/WorkloadFactory.hpp> - -namespace armnn -{ - -ConvertBf16ToFp32Layer::ConvertBf16ToFp32Layer(const char* name) - : Layer(1, 1, LayerType::ConvertBf16ToFp32, name) -{ -} - -std::unique_ptr<IWorkload> ConvertBf16ToFp32Layer::CreateWorkload(const IWorkloadFactory& factory) const -{ - ConvertBf16ToFp32QueueDescriptor descriptor; - SetAdditionalInfo(descriptor); - - return factory.CreateWorkload(LayerType::ConvertBf16ToFp32, descriptor, PrepInfoAndDesc(descriptor)); -} - -ConvertBf16ToFp32Layer* ConvertBf16ToFp32Layer::Clone(Graph& graph) const -{ - return CloneBase<ConvertBf16ToFp32Layer>(graph, GetName()); -} - -void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs() -{ - VerifyLayerConnections(1, CHECK_LOCATION()); - - const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); - - VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod); - - auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); - - ARMNN_ASSERT(inferredShapes.size() == 1); - - ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConvertBf16ToFp32Layer"); -} - -void ConvertBf16ToFp32Layer::ExecuteStrategy(IStrategy& strategy) const -{ - strategy.ExecuteStrategy(this, GetParameters(), {}, GetName()); -} - -} // namespace armnn diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp deleted file mode 100644 index 71312758e4..0000000000 --- a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp +++ /dev/null @@ -1,42 +0,0 @@ -// -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include <Layer.hpp> - -namespace armnn -{ - -/// This layer converts data type BFloat16 to Float32. -class ConvertBf16ToFp32Layer : public Layer -{ -public: - /// Makes a workload for the ConvertBf16ToFp32 type. - /// @param [in] factory The workload factory which will create the workload. - /// @return A pointer to the created workload, or nullptr if not created. - virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override; - - /// Creates a dynamically-allocated copy of this layer. - /// @param [in] graph The graph into which this layer is being cloned. - ConvertBf16ToFp32Layer* Clone(Graph& graph) const override; - - /// Check if the input tensor shape(s) - /// will lead to a valid configuration of @ref ConvertBf16ToFp32Layer. - /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. - void ValidateTensorShapesFromInputs() override; - - void ExecuteStrategy(IStrategy& strategy) const override; - -protected: - /// Constructor to create a ConvertBf16ToFp32Layer. - /// @param [in] name Optional name for the layer. - ConvertBf16ToFp32Layer(const char* name); - - /// Default destructor - ~ConvertBf16ToFp32Layer() = default; -}; - -} // namespace diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp deleted file mode 100644 index 7c98eea239..0000000000 --- a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp +++ /dev/null @@ -1,56 +0,0 @@ -// -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "ConvertFp32ToBf16Layer.hpp" -#include "LayerCloneBase.hpp" - -#include <armnn/TypesUtils.hpp> - -#include <armnn/backends/WorkloadData.hpp> -#include <armnn/backends/WorkloadFactory.hpp> - -namespace armnn -{ - -ConvertFp32ToBf16Layer::ConvertFp32ToBf16Layer(const char* name) - : Layer(1, 1, LayerType::ConvertFp32ToBf16, name) -{ -} - -std::unique_ptr<IWorkload> ConvertFp32ToBf16Layer::CreateWorkload(const IWorkloadFactory& factory) const -{ - ConvertFp32ToBf16QueueDescriptor descriptor; - SetAdditionalInfo(descriptor); - - return factory.CreateWorkload(LayerType::ConvertFp32ToBf16, descriptor, PrepInfoAndDesc(descriptor)); -} - -ConvertFp32ToBf16Layer* ConvertFp32ToBf16Layer::Clone(Graph& graph) const -{ - return CloneBase<ConvertFp32ToBf16Layer>(graph, GetName()); -} - -void ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs() -{ - - VerifyLayerConnections(1, CHECK_LOCATION()); - - const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); - - VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod); - - auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); - - ARMNN_ASSERT(inferredShapes.size() == 1); - - ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LayerName"); -} - -void ConvertFp32ToBf16Layer::ExecuteStrategy(IStrategy& strategy) const -{ - strategy.ExecuteStrategy(this, GetParameters(), {}, GetName()); -} - -} // namespace armnn diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.hpp b/src/armnn/layers/ConvertFp32ToBf16Layer.hpp deleted file mode 100644 index 71de4fbcda..0000000000 --- a/src/armnn/layers/ConvertFp32ToBf16Layer.hpp +++ /dev/null @@ -1,42 +0,0 @@ -// -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include <Layer.hpp> - -namespace armnn -{ - -/// This layer converts data type Float32 to BFloat16. -class ConvertFp32ToBf16Layer : public Layer -{ -public: - /// Makes a workload for the ConvertFp32ToBf16Layer type. - /// @param [in] factory The workload factory which will create the workload. - /// @return A pointer to the created workload, or nullptr if not created. - virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override; - - /// Creates a dynamically-allocated copy of this layer. - /// @param [in] graph The graph into which this layer is being cloned. - ConvertFp32ToBf16Layer* Clone(Graph& graph) const override; - - /// Check if the input tensor shape(s) - /// will lead to a valid configuration of @ref ConvertFp32ToBf16Layer. - /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. - void ValidateTensorShapesFromInputs() override; - - void ExecuteStrategy(IStrategy& strategy) const override; - -protected: - /// Constructor to create a ConvertFp32ToBf16Layer. - /// @param [in] name Optional name for the layer. - ConvertFp32ToBf16Layer(const char* name); - - /// Default destructor - ~ConvertFp32ToBf16Layer() = default; -}; - -} // namespace diff --git a/src/armnn/optimizations/All.hpp b/src/armnn/optimizations/All.hpp index 0421f31973..a11dec9446 100644 --- a/src/armnn/optimizations/All.hpp +++ b/src/armnn/optimizations/All.hpp @@ -9,8 +9,6 @@ #include "ConvertConstants.hpp" #include "ConvertConstDequantisationLayersToConstLayers.hpp" #include "ConvertConstPermuteLayersToConstLayers.hpp" -#include "FuseConvertFp32ToBf16IntoConstLayers.hpp" -#include "ConvertFp32NetworkToBf16.hpp" #include "ConvertFp32NetworkToFp16.hpp" #include "FoldPadIntoLayer2d.hpp" #include "FuseBatchNorm.hpp" diff --git a/src/armnn/optimizations/ConvertConstants.hpp b/src/armnn/optimizations/ConvertConstants.hpp index 54c14e5c89..7b2f1fd291 100644 --- a/src/armnn/optimizations/ConvertConstants.hpp +++ b/src/armnn/optimizations/ConvertConstants.hpp @@ -11,7 +11,6 @@ #include <armnn/backends/TensorHandle.hpp> #include <armnn/utility/IgnoreUnused.hpp> -#include <BFloat16.hpp> #include <Half.hpp> namespace armnn @@ -19,27 +18,6 @@ namespace armnn namespace optimizations { -struct BFloat16ToFloat32 -{ - static void Func(std::shared_ptr<ConstTensorHandle>& handle) - { - const TensorInfo& info = handle->GetTensorInfo(); - - if (info.GetDataType() == DataType::BFloat16) - { - std::vector<float> newValues(info.GetNumElements()); - - armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(handle->GetConstTensor<BFloat16>(), - info.GetNumElements(), - newValues.data()); - - TensorInfo newInfo(info.GetShape(), DataType::Float32, 0.0f, 0, true); - ConstTensor newInput(newInfo, newValues); - handle.reset(new ScopedTensorHandle(newInput)); - } - } -}; - struct Float16ToFloat32 { static void Func(std::shared_ptr<ConstTensorHandle>& handle) @@ -61,27 +39,6 @@ struct Float16ToFloat32 } }; -struct Float32ToBFloat16 -{ - static void Func(std::shared_ptr<ConstTensorHandle>& handle) - { - const TensorInfo& info = handle->GetTensorInfo(); - - if (info.GetDataType() == DataType::Float32) - { - std::vector<BFloat16> newValues(info.GetNumElements()); - - armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(handle->GetConstTensor<float>(), - info.GetNumElements(), - newValues.data()); - - TensorInfo newInfo(info.GetShape(), DataType::BFloat16, 0.0f, 0, true); - ConstTensor newInput(newInfo, newValues); - handle.reset(new ScopedTensorHandle(newInput)); - } - } -}; - struct Float32ToFloat16 { static void Func(std::shared_ptr<ConstTensorHandle>& handle) @@ -138,17 +95,6 @@ struct IsFloat16Layer } }; -struct IsBFloat16Layer -{ - static bool Test(const Layer& layer) - { - return layer.GetDataType() == DataType::BFloat16; - } -}; - -using ConvertConstantsBFloatToFloat = ConvertConstants<BFloat16ToFloat32, IsFloat32Layer>; -using ConvertConstantsFloatToBFloat = ConvertConstants<Float32ToBFloat16, IsBFloat16Layer>; - using ConvertConstantsHalfToFloat = ConvertConstants<Float16ToFloat32, IsFloat32Layer>; using ConvertConstantsFloatToHalf = ConvertConstants<Float32ToFloat16, IsFloat16Layer>; diff --git a/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp b/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp deleted file mode 100644 index 6c80e740be..0000000000 --- a/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp +++ /dev/null @@ -1,79 +0,0 @@ -// -// Copyright © 2020 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// -#pragma once - -#include "NetworkUtils.hpp" -#include "Optimization.hpp" - -#include <armnn/utility/PolymorphicDowncast.hpp> - -namespace armnn -{ -namespace optimizations -{ - -template <typename LayerT> -inline LayerT* ConvertWeight(Layer* l) -{ - LayerT* layer = PolymorphicDowncast<LayerT*>(l); - if ((layer->GetType() == LayerType::Convolution2d || layer->GetType() == LayerType::FullyConnected) - && layer->m_Weight) - { - const TensorInfo& info = layer->m_Weight->GetTensorInfo(); - - if (info.GetDataType() == DataType::Float32) - { - std::vector<BFloat16> newValues(info.GetNumElements()); - - armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16( - layer->m_Weight->template GetConstTensor<float>(), - info.GetNumElements(), - newValues.data()); - - TensorInfo newInfo(info); - newInfo.SetDataType(DataType::BFloat16); - ConstTensor newInput(newInfo, newValues); - layer->m_Weight.reset(new ScopedTensorHandle(newInput)); - } - } - return layer; -} - -class ConvertFp32NetworkToBf16Impl -{ -public: - - void Run(Graph& graph, Layer& layer) const - { - // Only convert Float32 To BFloat16 for the Input of Convolution2d layer and FullyConnected layer. - // And also convert weight data type from Float32 to Bfloat16. - // Do not convert bias data type. - if (layer.GetType() == LayerType::Convolution2d) - { - if (layer.GetDataType() == DataType::Float32) - { - InsertConvertFp32ToBf16LayersBefore(graph,layer); - ConvertWeight<Convolution2dLayer>(&layer); - } - } - else if (layer.GetType() == LayerType::FullyConnected) - { - if (layer.GetDataType() == DataType::Float32) - { - InsertConvertFp32ToBf16LayersBefore(graph,layer); - ConvertWeight<FullyConnectedLayer>(&layer); - } - } - } - -protected: - ConvertFp32NetworkToBf16Impl() = default; - ~ConvertFp32NetworkToBf16Impl() = default; -}; - -using Fp32NetworkToBf16Converter = OptimizeForType<Layer, ConvertFp32NetworkToBf16Impl>; - -} // namespace optimizations -} // namespace armnn diff --git a/src/armnn/optimizations/FuseConvertFp32ToBf16IntoConstLayers.hpp b/src/armnn/optimizations/FuseConvertFp32ToBf16IntoConstLayers.hpp deleted file mode 100644 index d112010539..0000000000 --- a/src/armnn/optimizations/FuseConvertFp32ToBf16IntoConstLayers.hpp +++ /dev/null @@ -1,89 +0,0 @@ -// -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include "Optimization.hpp" -#include <armnnUtils/Permute.hpp> -#include <ResolveType.hpp> - -namespace armnn -{ -namespace optimizations -{ - -class FuseConvertFp32ToBf16IntoConstLayers -{ -public: - void Run(Graph& graph, InputSlot& connection) const - { - Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer(); - Layer& child = connection.GetOwningLayer(); - - ARMNN_ASSERT(base.GetType() == LayerType::Constant); - ARMNN_ASSERT(child.GetType() == LayerType::ConvertFp32ToBf16); - - auto dataType = base.GetDataType(); - switch (dataType) - { - case DataType::Float32: - ReplaceConvertFp32ToBf16Layer<DataType::BFloat16>( - graph, - PolymorphicDowncast<ConstantLayer*>(&base), - PolymorphicDowncast<ConvertFp32ToBf16Layer*>(&child)); - break; - default: - throw InvalidArgumentException(GetDataTypeName(dataType) + - std::string(" Constant Layer cannot be fused into ") + - GetDataTypeName(child.GetDataType()) + - std::string(" conversion layer.")); - } - } -protected: - FuseConvertFp32ToBf16IntoConstLayers() = default; - ~FuseConvertFp32ToBf16IntoConstLayers() = default; -private: - template<armnn::DataType ArmnnType, - typename T = armnn::ResolveType<ArmnnType>> - static void ReplaceConvertFp32ToBf16Layer(Graph& graph, - ConstantLayer* constantLayer, - ConvertFp32ToBf16Layer* convertFp32ToBf16layer) - { - IgnoreUnused(graph); - /** - * This optimisation is to find situations where a constant set of inputs is being provided to a - * ConvertFp32ToBf16 layer. In this case we don't want the overhead of Converting the values on - * every inference, instead we want to Convert them once and store them in a Const layer to be - * used everytime as they will not change. - */ - TensorInfo outputConvertFp32ToBf16Info = convertFp32ToBf16layer->GetOutputSlot(0).GetTensorInfo(); - std::vector<T> newValues(outputConvertFp32ToBf16Info.GetNumElements()); - - armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16( - constantLayer->m_LayerOutput->GetConstTensor<float>(), - outputConvertFp32ToBf16Info.GetNumElements(), - newValues.data()); - TensorInfo newInfo = outputConvertFp32ToBf16Info; - newInfo.SetConstant(true); - ConstTensor newInput(newInfo, newValues); - - constantLayer->m_LayerOutput.reset(new ScopedTensorHandle(newInput)); - - // Moves connections in convertFp32ToBf16layer output slot to the constant layer. - // ConvertFp32ToBf16layer layer will be removed if left unconnected. - convertFp32ToBf16layer->GetOutputSlot().MoveAllConnections(constantLayer->GetOutputSlot()); - - // Updating the output tensor - constantLayer->GetOutputSlot(0).SetTensorInfo(newInfo); - ARMNN_ASSERT(constantLayer->GetOutputSlot(0).GetTensorInfo().IsConstant() == true); - } -}; - -using FuseConversionLayersIntoConstLayers = OptimizeForConnection<ConstantLayer, - ConvertFp32ToBf16Layer, - FuseConvertFp32ToBf16IntoConstLayers>; - -} // namespace optimizations -} // namespace armnn
\ No newline at end of file diff --git a/src/armnn/test/FloatingPointConverterTest.cpp b/src/armnn/test/FloatingPointConverterTest.cpp index 21a16a3cc0..81384cefae 100644 --- a/src/armnn/test/FloatingPointConverterTest.cpp +++ b/src/armnn/test/FloatingPointConverterTest.cpp @@ -5,7 +5,6 @@ #include <armnnUtils/FloatingPointConverter.hpp> -#include <BFloat16.hpp> #include <Half.hpp> #include <vector> @@ -55,73 +54,4 @@ TEST_CASE("TestConvertFp16ToFp32") } } -TEST_CASE("TestConvertFloat32ToBFloat16") -{ - float floatArray[] = { 1.704735E38f, // 0x7F004000 round down - 0.0f, // 0x00000000 round down - 2.2959E-41f, // 0x00004000 round down - 1.7180272E38f, // 0x7F014000 round down - 9.18355E-41f, // 0x00010000 round down - 1.14794E-40f, // 0x00014000 round down - 4.5918E-41f, // 0x00008000 round down - -1.708058E38f, // 0xFF008000 round down - -4.3033756E37f, // 0xFE018000 round up - 1.60712E-40f, // 0x0001C000 round up - -2.0234377f, // 0xC0018001 round up - -1.1800863E-38f,// 0x80808001 round up - 4.843037E-35f, // 0x0680C000 round up - 3.9999998f, // 0x407FFFFF round up - std::numeric_limits<float>::max(), // 0x7F7FFFFF max positive value - std::numeric_limits<float>::lowest(), // 0xFF7FFFFF max negative value - 1.1754942E-38f, // 0x007FFFFF min positive value - -1.1754942E-38f // 0x807FFFFF min negative value - }; - uint16_t expectedResult[] = { 0x7F00, - 0x0000, - 0x0000, - 0x7F01, - 0x0001, - 0x0001, - 0x0000, - 0xFF00, - 0xFE02, - 0x0002, - 0xC002, - 0x8081, - 0x0681, - 0x4080, - 0x7F80, - 0xFF80, - 0x0080, - 0x8080 - }; - size_t numFloats = sizeof(floatArray) / sizeof(floatArray[0]); - - std::vector<armnn::BFloat16> convertedBuffer(numFloats); - - armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(floatArray, numFloats, convertedBuffer.data()); - - for (size_t i = 0; i < numFloats; i++) - { - armnn::BFloat16 actual = convertedBuffer[i]; - CHECK_EQ(expectedResult[i], actual.Val()); - } -} - -TEST_CASE("TestConvertBFloat16ToFloat32") -{ - uint16_t bf16Array[] = { 16256, 16320, 38699, 16384, 49156, 32639 }; - size_t numFloats = sizeof(bf16Array) / sizeof(bf16Array[0]); - float expectedResult[] = { 1.0f, 1.5f, -5.525308E-25f, 2.0f, -2.0625f, 3.3895314E38f }; - std::vector<float> convertedBuffer(numFloats, 0.0f); - - armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(bf16Array, numFloats, convertedBuffer.data()); - - for (size_t i = 0; i < numFloats; i++) - { - float actual = convertedBuffer[i]; - CHECK_EQ(expectedResult[i], actual); - } -} - } diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp index a3800ade09..1035a3b6fd 100644 --- a/src/armnn/test/ShapeInferenceTests.cpp +++ b/src/armnn/test/ShapeInferenceTests.cpp @@ -250,17 +250,6 @@ TEST_CASE("ConstantTest") CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == outputShape); } -TEST_CASE("ConvertBf16ToFp32Test") -{ - CreateGraphAndRunTest<ConvertBf16ToFp32Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor"); -} - -TEST_CASE("ConvertFp16ToBf16Test") -{ - const TensorShape tensorShape{5, 7, 6, 2}; - CreateGraphAndRunTest<ConvertFp32ToBf16Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor"); -} - TEST_CASE("ConvertFp16ToFp32Test") { CreateGraphAndRunTest<ConvertFp16ToFp32Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor"); diff --git a/src/armnn/test/UtilsTests.cpp b/src/armnn/test/UtilsTests.cpp index 63884374b3..067c8612fe 100644 --- a/src/armnn/test/UtilsTests.cpp +++ b/src/armnn/test/UtilsTests.cpp @@ -123,54 +123,6 @@ TEST_CASE("BFloatType") CHECK((GetDataTypeName(armnn::DataType::BFloat16) == std::string("BFloat16"))); } -TEST_CASE("Float32ToBFloat16Test") -{ - // LSB = 0, R = 0 -> round down - armnn::BFloat16 roundDown0 = armnn::BFloat16::Float32ToBFloat16(1.704735E38f); // 0x7F004000 - CHECK_EQ(roundDown0.Val(), 0x7F00); - // LSB = 1, R = 0 -> round down - armnn::BFloat16 roundDown1 = armnn::BFloat16::Float32ToBFloat16(9.18355E-41f); // 0x00010000 - CHECK_EQ(roundDown1.Val(), 0x0001); - // LSB = 0, R = 1 all 0 -> round down - armnn::BFloat16 roundDown2 = armnn::BFloat16::Float32ToBFloat16(1.14794E-40f); // 0x00014000 - CHECK_EQ(roundDown2.Val(), 0x0001); - // LSB = 1, R = 1 -> round up - armnn::BFloat16 roundUp = armnn::BFloat16::Float32ToBFloat16(-2.0234377f); // 0xC0018001 - CHECK_EQ(roundUp.Val(), 0xC002); - // LSB = 0, R = 1 -> round up - armnn::BFloat16 roundUp1 = armnn::BFloat16::Float32ToBFloat16(4.843037E-35f); // 0x0680C000 - CHECK_EQ(roundUp1.Val(), 0x0681); - // Max positive value -> infinity - armnn::BFloat16 maxPositive = armnn::BFloat16::Float32ToBFloat16(std::numeric_limits<float>::max()); // 0x7F7FFFFF - CHECK_EQ(maxPositive, armnn::BFloat16::Inf()); - // Max negative value -> -infinity - armnn::BFloat16 maxNeg = armnn::BFloat16::Float32ToBFloat16(std::numeric_limits<float>::lowest()); // 0xFF7FFFFF - CHECK_EQ(maxNeg.Val(), 0xFF80); - // Min positive value - armnn::BFloat16 minPositive = armnn::BFloat16::Float32ToBFloat16(1.1754942E-38f); // 0x007FFFFF - CHECK_EQ(minPositive.Val(), 0x0080); - // Min negative value - armnn::BFloat16 minNeg = armnn::BFloat16::Float32ToBFloat16(-1.1754942E-38f); // 0x807FFFFF - CHECK_EQ(minNeg.Val(), 0x8080); -} - -TEST_CASE("BFloat16ToFloat32Test") -{ - armnn::BFloat16 bf0(1.5f); - CHECK_EQ(bf0.ToFloat32(), 1.5f); - armnn::BFloat16 bf1(-5.525308E-25f); - CHECK_EQ(bf1.ToFloat32(), -5.525308E-25f); - armnn::BFloat16 bf2(-2.0625f); - CHECK_EQ(bf2.ToFloat32(), -2.0625f); - uint16_t v = 32639; - armnn::BFloat16 bf3(v); - CHECK_EQ(bf3.ToFloat32(), 3.3895314E38f); - // Infinity - CHECK_EQ(armnn::BFloat16::Inf().ToFloat32(), std::numeric_limits<float>::infinity()); - // NaN - CHECK(std::isnan(armnn::BFloat16::Nan().ToFloat32())); -} - TEST_CASE("GraphTopologicalSortSimpleTest") { std::map<int, std::vector<int>> graph; diff --git a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp deleted file mode 100644 index 4aacf7f4fe..0000000000 --- a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp +++ /dev/null @@ -1,128 +0,0 @@ -// -// Copyright © 2020 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include <TestUtils.hpp> - -#include <BFloat16.hpp> -#include <Optimizer.hpp> - -#include <doctest/doctest.h> - -using namespace armnn; - -TEST_SUITE("Optimizer") -{ -using namespace armnn::optimizations; - -TEST_CASE("ConvertConstantsFloatToBFloatTest") -{ - armnn::Graph graph; - - const armnn::TensorInfo info({ 1, 1, 1, 2 }, armnn::DataType::BFloat16); - - // Create const tensor from fp32 data - unsigned int dims[] = { 4, 2, 1, 1 }; - std::vector<float> floatWeights{ 0.0f, -1.0f, - 3.8f, // 0x40733333 Round down - 3.1055E+29f, // 0x707ADC3C Round up - 9.149516E-10f, // 0x307B7FFF Round down - -3.8f, // 0xC0733333 Round down - -3.1055E+29f, // 0xF07ADC3C Round up - -9.149516E-10f // 0xB07B7FFF Round down - }; - armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights); - - // Create simple test network - auto input = graph.AddLayer<armnn::InputLayer>(0, "input"); - input->GetOutputSlot().SetTensorInfo(info); - - auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc"); - fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights); - fc->GetOutputSlot().SetTensorInfo(info); - - auto output = graph.AddLayer<armnn::OutputLayer>(1, "output"); - - // Connect up the layers - input->GetOutputSlot().Connect(fc->GetInputSlot(0)); - fc->GetOutputSlot().Connect(output->GetInputSlot(0)); - - // Check tensor data type before conversion - CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32); - - // Run the optimizer - armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsFloatToBFloat())); - - // Check tensor data type after conversion - CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16); - - // Check whether data matches expected Bf16 data - const BFloat16* data = fc->m_Weight->GetConstTensor<BFloat16>(); - CHECK(data[0] == BFloat16(0.0f)); - CHECK(data[1] == BFloat16(-1.0f)); - CHECK(data[2] == BFloat16(3.796875f)); // 0x4073 - CHECK(data[3] == BFloat16(3.1072295E29f)); // 0x707B - CHECK(data[4] == BFloat16(9.131327E-10f)); // 0x307B - CHECK(data[5] == BFloat16(-3.796875f)); // 0xC073 - CHECK(data[6] == BFloat16(-3.1072295E29f)); // 0xF07B - CHECK(data[7] == BFloat16(-9.131327E-10f)); // 0xB07B -} - -TEST_CASE("ConvertConstantsBFloatToFloatTest") -{ - armnn::Graph graph; - - const armnn::TensorInfo info({ 1, 1, 1, 2 }, armnn::DataType::Float32); - - // Create the BFloat16 precision input data - unsigned int dims[] = { 4, 2, 1, 1 }; - std::vector<float> convWeightsData{ 0.f, -1.f, - 3.796875f, // 0x4073 - 3.1072295E29f, // 0x707B - 9.131327E-10f, // 0x307B - -3.796875f, // 0xC073 - -3.1072295E29f, // 0xF07B - -9.131327E-10f // 0xB07B - }; - std::vector<uint16_t> bfWeights(8); - armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(convWeightsData.data(), convWeightsData.size(), - bfWeights.data()); - armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::BFloat16, 0.0f, 0, true), bfWeights); - - //Create the simple test network - auto input = graph.AddLayer<armnn::InputLayer>(0, "input"); - input->GetOutputSlot().SetTensorInfo(info); - - auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc"); - fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights); - fc->GetOutputSlot().SetTensorInfo(info); - - auto output = graph.AddLayer<armnn::OutputLayer>(1, "output"); - - //Connect up the layers - input->GetOutputSlot().Connect(fc->GetInputSlot(0)); - fc->GetOutputSlot().Connect(output->GetInputSlot(0)); - - //Test the tensor info is correct. - CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16); - - // Run the optimizer - armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsBFloatToFloat())); - - //Test the tensor info is correct. - CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32); - - // Now test the data matches float32 data - const float* data = fc->m_Weight->GetConstTensor<float>(); - CHECK(data[0] == 0.0f); - CHECK(data[1] == -1.0f); - CHECK(data[2] == 3.796875f); - CHECK(data[3] == 3.1072295E29f); - CHECK(data[4] == 9.131327E-10f); - CHECK(data[5] == -3.796875f); - CHECK(data[6] == -3.1072295E29f); - CHECK(data[7] == -9.131327E-10f); -} - -}
\ No newline at end of file diff --git a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp deleted file mode 100644 index 66893ce1f5..0000000000 --- a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp +++ /dev/null @@ -1,229 +0,0 @@ -// -// Copyright © 2020 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include <TestUtils.hpp> - -#include <Optimizer.hpp> - -#include <doctest/doctest.h> - -TEST_SUITE("Optimizer") -{ -using namespace armnn::optimizations; - -TEST_CASE("Fp32NetworkToBf16OptimizationNoConversionTest") -{ - armnn::Graph graph; - - const armnn::TensorInfo infoFP32({ 2, 2, 1, 3 }, armnn::DataType::Float32); - - // Create the simple test network without Conv2D/FullyConnected. - auto input = graph.AddLayer<armnn::InputLayer>(0, "input"); - input->GetOutputSlot().SetTensorInfo(infoFP32); - - auto floor = graph.AddLayer<armnn::FloorLayer>("floor"); - floor->GetOutputSlot().SetTensorInfo(infoFP32); - - auto output = graph.AddLayer<armnn::OutputLayer>(1, "output"); - - // Connect up the layers - input->GetOutputSlot().Connect(floor->GetInputSlot(0)); - floor->GetOutputSlot().Connect(output->GetInputSlot(0)); - - CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, - &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>)); - - // Run the optimizer - armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter())); - - CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, - &IsLayerOfType<armnn::FloorLayer>, - &IsLayerOfType<armnn::OutputLayer>)); -} - -TEST_CASE("Fp32NetworkToBf16OptimizationConv2DTest") -{ - armnn::Graph graph; - - const armnn::TensorInfo infoFP32({ 2, 3, 8, 1 }, armnn::DataType::Float32); - - // Create const tensor fp32 data - unsigned int dims[] = { 4, 2, 1, 1 }; - std::vector<float> floatWeights{ 0.0f, -1.0f, - 3.8f, // 0x40733333 Round down - 3.1055E+29f, // 0x707ADC3C Round up - 9.149516E-10f, // 0x307B7FFF Round down - -3.8f, // 0xC0733333 Round down - -3.1055E+29f, // 0xF07ADC3C Round up - -9.149516E-10f // 0xB07B7FFF Round down - }; - armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights); - - // Create const bias fp32 data - unsigned int biasDims[] {4}; - std::vector<float> floatBias{ 1.0f, 2.0f, 3.0f, 4.0f }; - armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32, 0.0f, 0, true), floatBias); - - // A network with Convolution2d layer - auto input = graph.AddLayer<armnn::InputLayer>(0, "input"); - input->GetOutputSlot().SetTensorInfo(infoFP32); - - armnn::Convolution2dDescriptor descriptor; - descriptor.m_BiasEnabled = true; - auto conv = graph.AddLayer<armnn::Convolution2dLayer>(descriptor, "conv2d"); - conv->GetOutputSlot().SetTensorInfo(infoFP32); - - auto weightsLayer = graph.AddLayer<armnn::ConstantLayer>("Weights"); - weightsLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(weights); - weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsLayer->m_LayerOutput->GetTensorInfo()); - - auto biasLayer = graph.AddLayer<armnn::ConstantLayer>("Bias"); - biasLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(bias); - biasLayer->GetOutputSlot(0).SetTensorInfo(biasLayer->m_LayerOutput->GetTensorInfo()); - - auto output = graph.AddLayer<armnn::OutputLayer>(1, "output"); - - // Connect up the layers - input->GetOutputSlot().Connect(conv->GetInputSlot(0)); - weightsLayer->GetOutputSlot(0).Connect(conv->GetInputSlot(1)); - biasLayer->GetOutputSlot(0).Connect(conv->GetInputSlot(2)); - conv->GetOutputSlot().Connect(output->GetInputSlot(0)); - - CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, - &IsLayerOfType<armnn::ConstantLayer>, - &IsLayerOfType<armnn::ConstantLayer>, - &IsLayerOfType<armnn::Convolution2dLayer>, - &IsLayerOfType<armnn::OutputLayer>)); - - // Run the optimizer - armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(RedirectMembersToConstantInputs(), - Fp32NetworkToBf16Converter())); - - CHECK(7 == graph.GetNumLayers()); - CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, - &IsLayerOfType<armnn::ConstantLayer>, - &IsLayerOfType<armnn::ConstantLayer>, - &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>, - &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>, - &IsLayerOfType<armnn::Convolution2dLayer>, - &IsLayerOfType<armnn::OutputLayer>)); - - armnn::TensorInfo inputTensor = conv->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(); - armnn::TensorInfo weightTensor = conv->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(); - armnn::TensorInfo biasTensor = conv->GetInputSlot(2).GetConnectedOutputSlot()->GetTensorInfo(); - armnn::TensorInfo outputTensor = conv->GetOutputSlot(0).GetTensorInfo(); - CHECK((conv->GetDataType() == armnn::DataType::BFloat16)); - CHECK((conv->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16)); - CHECK((conv->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32)); - CHECK((inputTensor.GetDataType() == armnn::DataType::BFloat16)); - CHECK((weightTensor.GetDataType() == armnn::DataType::BFloat16)); - CHECK((biasTensor.GetDataType() == armnn::DataType::Float32)); - CHECK((outputTensor.GetDataType() == armnn::DataType::Float32)); - - // Check whether data matches expected Bf16 data - const armnn::BFloat16* data = conv->m_Weight->GetConstTensor<armnn::BFloat16>(); - CHECK(data[0] == armnn::BFloat16(0.0f)); - CHECK(data[1] == armnn::BFloat16(-1.0f)); - CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073 - CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B - CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B - CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073 - CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B - CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B -} - -TEST_CASE("Fp32NetworkToBf16OptimizationFullyConnectedTest") -{ - armnn::Graph graph; - - const armnn::TensorInfo infoFP32({ 2, 3, 8, 1 }, armnn::DataType::Float32); - - // Create const tensor fp32 data - unsigned int dims[] = { 4, 2, 1, 1 }; - std::vector<float> floatWeights{ 0.0f, -1.0f, - 3.8f, // 0x40733333 Round down - 3.1055E+29f, // 0x707ADC3C Round up - 9.149516E-10f, // 0x307B7FFF Round down - -3.8f, // 0xC0733333 Round down - -3.1055E+29f, // 0xF07ADC3C Round up - -9.149516E-10f // 0xB07B7FFF Round down - }; - armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights); - - // Create const bias fp32 data - unsigned int biasDims[] {4}; - std::vector<float> floatBias{ 1.0f, 2.0f, 3.0f, 4.0f }; - armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32, 0.0f, 0, true), floatBias); - - // A network with FullyConnected layer - auto input = graph.AddLayer<armnn::InputLayer>(0, "input"); - input->GetOutputSlot().SetTensorInfo(infoFP32); - - armnn::FullyConnectedDescriptor descriptor; - descriptor.m_BiasEnabled = true; - - auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(descriptor, "fully"); - fc->GetOutputSlot().SetTensorInfo(infoFP32); - - auto weightsLayer = graph.AddLayer<armnn::ConstantLayer>("Weights"); - weightsLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(weights); - weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsLayer->m_LayerOutput->GetTensorInfo()); - - auto biasLayer = graph.AddLayer<armnn::ConstantLayer>("Bias"); - biasLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(bias); - biasLayer->GetOutputSlot(0).SetTensorInfo(biasLayer->m_LayerOutput->GetTensorInfo()); - - auto output = graph.AddLayer<armnn::OutputLayer>(1, "output"); - - // Connect up the layers - input->GetOutputSlot().Connect(fc->GetInputSlot(0)); - weightsLayer->GetOutputSlot(0).Connect(fc->GetInputSlot(1)); - biasLayer->GetOutputSlot(0).Connect(fc->GetInputSlot(2)); - fc->GetOutputSlot().Connect(output->GetInputSlot(0)); - - CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, - &IsLayerOfType<armnn::ConstantLayer>, - &IsLayerOfType<armnn::ConstantLayer>, - &IsLayerOfType<armnn::FullyConnectedLayer>, - &IsLayerOfType<armnn::OutputLayer>)); - - // Run the optimizer - armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(RedirectMembersToConstantInputs(), - Fp32NetworkToBf16Converter())); - - CHECK(7 == graph.GetNumLayers()); - CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, - &IsLayerOfType<armnn::ConstantLayer>, - &IsLayerOfType<armnn::ConstantLayer>, - &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>, - &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>, - &IsLayerOfType<armnn::FullyConnectedLayer>, - &IsLayerOfType<armnn::OutputLayer>)); - - armnn::TensorInfo inputTensor = fc->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(); - armnn::TensorInfo weightTensor = fc->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(); - armnn::TensorInfo biasTensor = fc->GetInputSlot(2).GetConnectedOutputSlot()->GetTensorInfo(); - armnn::TensorInfo outputTensor = fc->GetOutputSlot(0).GetTensorInfo(); - CHECK((fc->GetDataType() == armnn::DataType::BFloat16)); - CHECK((fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16)); - CHECK((fc->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32)); - CHECK((inputTensor.GetDataType() == armnn::DataType::BFloat16)); - CHECK((weightTensor.GetDataType() == armnn::DataType::BFloat16)); - CHECK((biasTensor.GetDataType() == armnn::DataType::Float32)); - CHECK((outputTensor.GetDataType() == armnn::DataType::Float32)); - - // Check whether data matches expected Bf16 data - const armnn::BFloat16* data = fc->m_Weight->GetConstTensor<armnn::BFloat16>(); - CHECK(data[0] == armnn::BFloat16(0.0f)); - CHECK(data[1] == armnn::BFloat16(-1.0f)); - CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073 - CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B - CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B - CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073 - CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B - CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B -} - -}
\ No newline at end of file diff --git a/src/armnn/test/optimizations/FuseConvertF32BF16IntoConstLayerTests.cpp b/src/armnn/test/optimizations/FuseConvertF32BF16IntoConstLayerTests.cpp deleted file mode 100644 index 93d5948d61..0000000000 --- a/src/armnn/test/optimizations/FuseConvertF32BF16IntoConstLayerTests.cpp +++ /dev/null @@ -1,151 +0,0 @@ -// -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include <LayersFwd.hpp> -#include <Network.hpp> -#include <NetworkUtils.hpp> -#include <Optimizer.hpp> -#include <TestUtils.hpp> - -#include <armnn/backends/TensorHandle.hpp> - -#include <doctest/doctest.h> - -TEST_SUITE("Optimizer") -{ -using namespace armnn; -using namespace armnn::optimizations; - -TEST_CASE("FuseConvertFp32Fp16intoConst") -{ - Graph graph; - const unsigned int shape[] = {1, 2, 2, 3}; - - const TensorInfo constTensorInfo(4, shape, DataType::Float32, 1.0, 0, true); - const TensorInfo outputConvertInfo(4, shape, DataType::BFloat16, 1.0, 0, true); - - ConstantLayer* constantLayer = graph.AddLayer<ConstantLayer>("constant"); - std::vector<float> constantValues(constTensorInfo.GetNumElements(), 3.1416f); - ConstTensor constTensor(constTensorInfo, constantValues.data()); - constantLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor); - constantLayer->GetOutputSlot().SetTensorInfo(constTensorInfo); - - ConvertFp32ToBf16Layer* convertLayer = graph.AddLayer<ConvertFp32ToBf16Layer>("convert"); - convertLayer->GetOutputSlot().SetTensorInfo(outputConvertInfo); - - OutputLayer* output = graph.AddLayer<OutputLayer>(0, "output"); - - // Connect up constant -> convert -> output - constantLayer->GetOutputSlot().Connect(convertLayer->GetInputSlot(0)); - convertLayer->GetOutputSlot().Connect(output->GetInputSlot(0)); - - auto checkConstantFloat32 = [](const armnn::Layer *const layer) -> bool { - return IsLayerOfType<ConstantLayer>(layer) && - (layer->GetDataType() == DataType::Float32); - }; - auto checkConstantBFloat16 = [](const armnn::Layer *const layer) -> bool { - return IsLayerOfType<ConstantLayer>(layer) && - (layer->GetDataType() == DataType::BFloat16); - }; - - CHECK(CheckSequence(graph.cbegin(), graph.cend(), - checkConstantFloat32, - &IsLayerOfType<ConvertFp32ToBf16Layer>, - &IsLayerOfType<OutputLayer>)); - - armnn::Optimizer::Pass(graph, MakeOptimizations(FuseConversionLayersIntoConstLayers())); - - CHECK(CheckSequence(graph.cbegin(), graph.cend(), - checkConstantBFloat16, - &IsLayerOfType<OutputLayer>)); -} - -TEST_CASE("RevertConstantWeightsToFP32") -{ - Graph graph; - const unsigned int shape[] = {1, 2, 2, 3}; - - const TensorInfo constTensorInfo(4, shape, DataType::Float32, 1.0, 0, true); - const TensorInfo outputConvertInfo(4, shape, DataType::BFloat16, 1.0, 0, true); - - TensorInfo inputInfo(4, shape, DataType::Float32); - auto* input = graph.AddLayer<InputLayer>(0, "input0"); - input->GetOutputSlot().SetTensorInfo(inputInfo); - - auto* constantLayer = graph.AddLayer<ConstantLayer>("constant"); - std::vector<float> constantValues(constTensorInfo.GetNumElements(), 3.1416f); - ConstTensor constTensor(constTensorInfo, constantValues.data()); - constantLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor); - constantLayer->GetOutputSlot().SetTensorInfo(constTensorInfo); - - ConvertFp32ToBf16Layer* convertLayerInputs = graph.AddLayer<ConvertFp32ToBf16Layer>("convert"); - convertLayerInputs->GetOutputSlot().SetTensorInfo(outputConvertInfo); - ConvertFp32ToBf16Layer* convertLayerWeights = graph.AddLayer<ConvertFp32ToBf16Layer>("convert2"); - convertLayerWeights->GetOutputSlot().SetTensorInfo(outputConvertInfo); - ConvertFp32ToBf16Layer* convertLayerBiases = graph.AddLayer<ConvertFp32ToBf16Layer>("convert3"); - convertLayerBiases->GetOutputSlot().SetTensorInfo(outputConvertInfo); - - auto* biases = graph.AddLayer<armnn::ConstantLayer>("Biases"); - biases->m_LayerOutput = std::make_unique<armnn::ScopedTensorHandle>(constTensor); - biases->GetOutputSlot().SetTensorInfo(constTensorInfo); - - armnn::Convolution2dDescriptor descriptor; - descriptor.m_BiasEnabled = true; - auto* conv = graph.AddLayer<armnn::Convolution2dLayer>(descriptor, "conv2d"); - const armnn::TensorInfo infoFP32({ 2, 3, 8, 1 }, armnn::DataType::Float32); - conv->GetOutputSlot().SetTensorInfo(infoFP32); - - auto* output = graph.AddLayer<OutputLayer>(0, "output"); - - // Connect up Input -> Convert -> - // Constant -> Convert -> Conv2d -> Output - // Constant -> Convert -> - input->GetOutputSlot().Connect(convertLayerInputs->GetInputSlot(0)); - constantLayer->GetOutputSlot().Connect(convertLayerWeights->GetInputSlot(0)); - biases->GetOutputSlot().Connect(convertLayerBiases->GetInputSlot(0)); - - convertLayerInputs->GetOutputSlot().Connect(conv->GetInputSlot(0)); - convertLayerWeights->GetOutputSlot().Connect(conv->GetInputSlot(1)); - convertLayerBiases->GetOutputSlot().Connect(conv->GetInputSlot(2)); - - conv->GetOutputSlot().Connect(output->GetInputSlot(0)); - - auto checkConstantFloat32 = [](const armnn::Layer *const layer) -> bool { - return IsLayerOfType<ConstantLayer>(layer) && - (layer->GetDataType() == DataType::Float32); - }; - auto checkConstantBFloat16 = [](const armnn::Layer *const layer) -> bool { - return IsLayerOfType<ConstantLayer>(layer) && - (layer->GetDataType() == DataType::BFloat16); - }; - - CHECK(CheckSequence(graph.cbegin(), graph.cend(), - &IsLayerOfType<InputLayer>, - checkConstantFloat32, - checkConstantFloat32, - &IsLayerOfType<ConvertFp32ToBf16Layer>, - &IsLayerOfType<ConvertFp32ToBf16Layer>, - &IsLayerOfType<ConvertFp32ToBf16Layer>, - &IsLayerOfType<Convolution2dLayer>, - &IsLayerOfType<OutputLayer>)); - - armnn::Optimizer::Pass(graph, MakeOptimizations(FuseConversionLayersIntoConstLayers())); - - bool revert = RevertConstantWeightsToFP32(conv); - - // Erase unconnected layer as occurs during Topological Sort. - graph.EraseLayer(convertLayerInputs); - - CHECK(revert); - CHECK(constantLayer->GetDataType() == DataType::Float32); - - CHECK(CheckSequence(graph.cbegin(), graph.cend(), - &IsLayerOfType<InputLayer>, - checkConstantBFloat16, - checkConstantFloat32, - &IsLayerOfType<Convolution2dLayer>, - &IsLayerOfType<OutputLayer>)); -} -} |