6 #include <TestUtils.hpp> 10 #include <doctest/doctest.h> 16 TEST_CASE(
"Fp32NetworkToBf16OptimizationNoConversionTest")
33 floor->GetOutputSlot().Connect(output->GetInputSlot(0));
36 &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>));
42 &IsLayerOfType<armnn::FloorLayer>,
43 &IsLayerOfType<armnn::OutputLayer>));
46 TEST_CASE(
"Fp32NetworkToBf16OptimizationConv2DTest")
53 unsigned int dims[] = { 4, 2, 1, 1 };
54 std::vector<float> floatWeights{ 0.0f, -1.0f,
65 unsigned int biasDims[] {4};
66 std::vector<float> floatBias{ 1.0f, 2.0f, 3.0f, 4.0f };
79 weightsLayer->
m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(weights);
80 weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsLayer->m_LayerOutput->GetTensorInfo());
83 biasLayer->
m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(bias);
84 biasLayer->GetOutputSlot(0).SetTensorInfo(biasLayer->m_LayerOutput->GetTensorInfo());
90 weightsLayer->GetOutputSlot(0).Connect(conv->GetInputSlot(1));
91 biasLayer->GetOutputSlot(0).Connect(conv->GetInputSlot(2));
92 conv->GetOutputSlot().Connect(output->GetInputSlot(0));
95 &IsLayerOfType<armnn::ConstantLayer>,
96 &IsLayerOfType<armnn::ConstantLayer>,
97 &IsLayerOfType<armnn::Convolution2dLayer>,
98 &IsLayerOfType<armnn::OutputLayer>));
106 &IsLayerOfType<armnn::ConstantLayer>,
107 &IsLayerOfType<armnn::ConstantLayer>,
108 &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
109 &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
110 &IsLayerOfType<armnn::Convolution2dLayer>,
111 &IsLayerOfType<armnn::OutputLayer>));
113 armnn::TensorInfo inputTensor = conv->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
114 armnn::TensorInfo weightTensor = conv->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
115 armnn::TensorInfo biasTensor = conv->GetInputSlot(2).GetConnectedOutputSlot()->GetTensorInfo();
137 TEST_CASE(
"Fp32NetworkToBf16OptimizationFullyConnectedTest")
144 unsigned int dims[] = { 4, 2, 1, 1 };
145 std::vector<float> floatWeights{ 0.0f, -1.0f,
156 unsigned int biasDims[] {4};
157 std::vector<float> floatBias{ 1.0f, 2.0f, 3.0f, 4.0f };
171 weightsLayer->
m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(weights);
172 weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsLayer->m_LayerOutput->GetTensorInfo());
175 biasLayer->
m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(bias);
176 biasLayer->GetOutputSlot(0).SetTensorInfo(biasLayer->m_LayerOutput->GetTensorInfo());
182 weightsLayer->GetOutputSlot(0).Connect(fc->GetInputSlot(1));
183 biasLayer->GetOutputSlot(0).Connect(fc->GetInputSlot(2));
184 fc->GetOutputSlot().Connect(output->GetInputSlot(0));
187 &IsLayerOfType<armnn::ConstantLayer>,
188 &IsLayerOfType<armnn::ConstantLayer>,
189 &IsLayerOfType<armnn::FullyConnectedLayer>,
190 &IsLayerOfType<armnn::OutputLayer>));
198 &IsLayerOfType<armnn::ConstantLayer>,
199 &IsLayerOfType<armnn::ConstantLayer>,
200 &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
201 &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
202 &IsLayerOfType<armnn::FullyConnectedLayer>,
203 &IsLayerOfType<armnn::OutputLayer>));
205 armnn::TensorInfo inputTensor = fc->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
206 armnn::TensorInfo weightTensor = fc->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
207 armnn::TensorInfo biasTensor = fc->GetInputSlot(2).GetConnectedOutputSlot()->GetTensorInfo();
A layer that the constant data can be bound to.
bool m_BiasEnabled
Enable/disable bias.
Optimizer::Optimizations MakeOptimizations(Args &&... args)
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
std::shared_ptr< ConstTensorHandle > m_LayerOutput
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
A Convolution2dDescriptor for the Convolution2dLayer.
int Connect(InputSlot &destination)
static void Pass(Graph &graph, const Optimizations &optimizations)
OptimizeForType< Layer, RedirectMembersToConstantInputsImpl > RedirectMembersToConstantInputs
A layer user-provided data can be bound to (e.g. inputs, outputs).
This layer represents a fully connected operation.
DataType GetDataType() const
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
This layer represents a floor operation.
void SetTensorInfo(const TensorInfo &tensorInfo) override
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
This layer represents a convolution 2d operation.
size_t GetNumLayers() const
OptimizeForType< Layer, ConvertFp32NetworkToBf16Impl > Fp32NetworkToBf16Converter