16 TEST_CASE(
"ConvertConstantsHalfToFloatTest")
23 unsigned int dims[] = { 4, 1, 1, 1 };
24 std::vector<float> convWeightsData{ 1.f, 2.f, 3.f, 4.f };
25 std::vector<uint16_t> halfWeights(4);
35 fc->
m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
36 fc->GetOutputSlot().SetTensorInfo(info);
42 fc->GetOutputSlot().Connect(output->GetInputSlot(0));
54 const float* data = fc->m_Weight->GetConstTensor<
float>();
55 CHECK(1.0f == data[0]);
56 CHECK(2.0f == data[1]);
57 CHECK(3.0f == data[2]);
58 CHECK(4.0f == data[3]);
Optimizer::Optimizations MakeOptimizations(Args &&... args)
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
int Connect(InputSlot &destination)
static void Pass(Graph &graph, const Optimizations &optimizations)
A layer user-provided data can be bound to (e.g. inputs, outputs).
This layer represents a fully connected operation.
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
A FullyConnectedDescriptor for the FullyConnectedLayer.
static void ConvertFloat32To16(const float *srcFloat32Buffer, size_t numElements, void *dstFloat16Buffer)
Converts a buffer of FP32 values to FP16, and stores in the given dstFloat16Buffer.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
ConvertConstants< Float16ToFloat32, IsFloat32Layer > ConvertConstantsHalfToFloat
void SetTensorInfo(const TensorInfo &tensorInfo) override
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.