// // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include "SchemaSerialize.hpp" #include "TensorHelpers.hpp" #include "flatbuffers/idl.h" #include "flatbuffers/util.h" #include #include #include #include #include #include #include #include #include using armnnDeserializer::IDeserializer; using TensorRawPtr = armnnSerializer::TensorInfo*; struct ParserFlatbuffersSerializeFixture { ParserFlatbuffersSerializeFixture() : m_Parser(IDeserializer::Create()), m_Runtime(armnn::IRuntime::Create(armnn::IRuntime::CreationOptions())), m_NetworkIdentifier(-1) { } std::vector m_GraphBinary; std::string m_JsonString; std::unique_ptr m_Parser; armnn::IRuntimePtr m_Runtime; armnn::NetworkId m_NetworkIdentifier; /// If the single-input-single-output overload of Setup() is called, these will store the input and output name /// so they don't need to be passed to the single-input-single-output overload of RunTest(). std::string m_SingleInputName; std::string m_SingleOutputName; void Setup() { bool ok = ReadStringToBinary(); if (!ok) { throw armnn::Exception("LoadNetwork failed while reading binary input"); } armnn::INetworkPtr network = m_Parser->CreateNetworkFromBinary(m_GraphBinary); if (!network) { throw armnn::Exception("The parser failed to create an ArmNN network"); } auto optimized = Optimize(*network, {armnn::Compute::CpuRef}, m_Runtime->GetDeviceSpec()); std::string errorMessage; armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage); if (ret != armnn::Status::Success) { throw armnn::Exception(fmt::format("The runtime failed to load the network. " "Error was: {0}. in {1} [{2}:{3}]", errorMessage, __func__, __FILE__, __LINE__)); } } void SetupSingleInputSingleOutput(const std::string& inputName, const std::string& outputName) { // Store the input and output name so they don't need to be passed to the single-input-single-output RunTest(). m_SingleInputName = inputName; m_SingleOutputName = outputName; Setup(); } bool ReadStringToBinary() { std::string schemafile(&deserialize_schema_start, &deserialize_schema_end); // parse schema first, so we can use it to parse the data after flatbuffers::Parser parser; bool ok = parser.Parse(schemafile.c_str()); CHECK_MESSAGE(ok, std::string("Failed to parse schema file. Error was: " + parser.error_).c_str()); ok &= parser.Parse(m_JsonString.c_str()); CHECK_MESSAGE(ok, std::string("Failed to parse json input. Error was: " + parser.error_).c_str()); if (!ok) { return false; } { const uint8_t* bufferPtr = parser.builder_.GetBufferPointer(); size_t size = static_cast(parser.builder_.GetSize()); m_GraphBinary.assign(bufferPtr, bufferPtr+size); } return ok; } /// Executes the network with the given input tensor and checks the result against the given output tensor. /// This overload assumes the network has a single input and a single output. template> void RunTest(unsigned int layersId, const std::vector& inputData, const std::vector& expectedOutputData); template, typename OutputDataType = armnn::ResolveType> void RunTest(unsigned int layersId, const std::vector& inputData, const std::vector& expectedOutputData); /// Executes the network with the given input tensors and checks the results against the given output tensors. /// This overload supports multiple inputs and multiple outputs, identified by name. template> void RunTest(unsigned int layersId, const std::map>& inputData, const std::map>& expectedOutputData); template, typename OutputDataType = armnn::ResolveType> void RunTest(unsigned int layersId, const std::map>& inputData, const std::map>& expectedOutputData); void CheckTensors(const TensorRawPtr& tensors, size_t shapeSize, const std::vector& shape, armnnSerializer::TensorInfo tensorType, const std::string& name, const float scale, const int64_t zeroPoint) { armnn::IgnoreUnused(name); CHECK_EQ(shapeSize, tensors->dimensions()->size()); CHECK(std::equal(shape.begin(), shape.end(), tensors->dimensions()->begin(), tensors->dimensions()->end())); CHECK_EQ(tensorType.dataType(), tensors->dataType()); CHECK_EQ(scale, tensors->quantizationScale()); CHECK_EQ(zeroPoint, tensors->quantizationOffset()); } }; template void ParserFlatbuffersSerializeFixture::RunTest(unsigned int layersId, const std::vector& inputData, const std::vector& expectedOutputData) { RunTest(layersId, inputData, expectedOutputData); } template void ParserFlatbuffersSerializeFixture::RunTest(unsigned int layersId, const std::vector& inputData, const std::vector& expectedOutputData) { RunTest(layersId, { { m_SingleInputName, inputData } }, { { m_SingleOutputName, expectedOutputData } }); } template void ParserFlatbuffersSerializeFixture::RunTest(unsigned int layersId, const std::map>& inputData, const std::map>& expectedOutputData) { RunTest(layersId, inputData, expectedOutputData); } template void ParserFlatbuffersSerializeFixture::RunTest( unsigned int layersId, const std::map>& inputData, const std::map>& expectedOutputData) { auto ConvertBindingInfo = [](const armnnDeserializer::BindingPointInfo& bindingInfo) { return std::make_pair(bindingInfo.m_BindingId, bindingInfo.m_TensorInfo); }; // Setup the armnn input tensors from the given vectors. armnn::InputTensors inputTensors; for (auto&& it : inputData) { armnn::BindingPointInfo bindingInfo = ConvertBindingInfo( m_Parser->GetNetworkInputBindingInfo(layersId, it.first)); bindingInfo.second.SetConstant(true); armnn::VerifyTensorInfoDataType(bindingInfo.second, ArmnnInputType); inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) }); } // Allocate storage for the output tensors to be written to and setup the armnn output tensors. std::map> outputStorage; armnn::OutputTensors outputTensors; for (auto&& it : expectedOutputData) { armnn::BindingPointInfo bindingInfo = ConvertBindingInfo( m_Parser->GetNetworkOutputBindingInfo(layersId, it.first)); armnn::VerifyTensorInfoDataType(bindingInfo.second, ArmnnOutputType); outputStorage.emplace(it.first, std::vector(bindingInfo.second.GetNumElements())); outputTensors.push_back( { bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) }); } m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors); // Compare each output tensor to the expected values for (auto&& it : expectedOutputData) { armnn::BindingPointInfo bindingInfo = ConvertBindingInfo( m_Parser->GetNetworkOutputBindingInfo(layersId, it.first)); auto outputExpected = it.second; auto result = CompareTensors(outputExpected, outputStorage[it.first], bindingInfo.second.GetShape(), bindingInfo.second.GetShape()); CHECK_MESSAGE(result.m_Result, result.m_Message.str()); } }