16 TEST_CASE(
"Fp32NetworkToBf16OptimizationNoConversionTest")
33 floor->GetOutputSlot().Connect(output->GetInputSlot(0));
36 &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>));
42 &IsLayerOfType<armnn::FloorLayer>,
43 &IsLayerOfType<armnn::OutputLayer>));
46 TEST_CASE(
"Fp32NetworkToBf16OptimizationConv2DTest")
53 unsigned int dims[] = { 4, 2, 1, 1 };
54 std::vector<float> floatWeights{ 0.0f, -1.0f,
65 unsigned int biasDims[] {4};
66 std::vector<float> floatBias{ 1.0f, 2.0f, 3.0f, 4.0f };
76 conv->
m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
77 conv->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(bias);
78 conv->GetOutputSlot().SetTensorInfo(infoFP32);
84 conv->GetOutputSlot().Connect(output->GetInputSlot(0));
87 &IsLayerOfType<armnn::Convolution2dLayer>, &IsLayerOfType<armnn::OutputLayer>));
93 &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>, &IsLayerOfType<armnn::Convolution2dLayer>,
94 &IsLayerOfType<armnn::OutputLayer>));
96 armnn::TensorInfo inputTensor = conv->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
116 TEST_CASE(
"Fp32NetworkToBf16OptimizationFullyConnectedTest")
123 unsigned int dims[] = { 4, 2, 1, 1 };
124 std::vector<float> floatWeights{ 0.0f, -1.0f,
135 unsigned int biasDims[] {4};
136 std::vector<float> floatBias{ 1.0f, 2.0f, 3.0f, 4.0f };
146 fc->
m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
147 fc->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(bias);
148 fc->GetOutputSlot().SetTensorInfo(infoFP32);
154 fc->GetOutputSlot().Connect(output->GetInputSlot(0));
157 &IsLayerOfType<armnn::FullyConnectedLayer>, &IsLayerOfType<armnn::OutputLayer>));
163 &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>, &IsLayerOfType<armnn::FullyConnectedLayer>,
164 &IsLayerOfType<armnn::OutputLayer>));
166 armnn::TensorInfo inputTensor = fc->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
Optimizer::Optimizations MakeOptimizations(Args &&... args)
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
A Convolution2dDescriptor for the Convolution2dLayer.
int Connect(InputSlot &destination)
static void Pass(Graph &graph, const Optimizations &optimizations)
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
A layer user-provided data can be bound to (e.g. inputs, outputs).
This layer represents a fully connected operation.
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
DataType GetDataType() const
A FullyConnectedDescriptor for the FullyConnectedLayer.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
This layer represents a floor operation.
void SetTensorInfo(const TensorInfo &tensorInfo) override
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
This layer represents a convolution 2d operation.
OptimizeForType< Layer, ConvertFp32NetworkToBf16Impl > Fp32NetworkToBf16Converter