From 7cf0eaa26c1fb29ca9df97e4734ec7c1e10f81c4 Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Thu, 24 Jan 2019 17:05:36 +0000 Subject: IVGCVSW-2564 Add support for multiple input and output bindings in InferenceModel Change-Id: I64d724367d42dca4b768b6c6e42acda714985950 --- tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp | 6 +- tests/ExecuteNetwork/ExecuteNetwork.cpp | 37 ++- tests/InferenceModel.hpp | 341 ++++++++++++++------- tests/InferenceTest.hpp | 31 +- tests/InferenceTest.inl | 22 +- .../MultipleNetworksCifar10.cpp | 17 +- tests/YoloInferenceTest.hpp | 6 +- 7 files changed, 310 insertions(+), 150 deletions(-) (limited to 'tests') diff --git a/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp b/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp index 98db023e97..b752c7c98e 100644 --- a/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp +++ b/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp @@ -31,9 +31,9 @@ int main(int argc, char* argv[]) typename YoloInferenceModel::Params modelParams; modelParams.m_ModelPath = modelOptions.m_ModelDir + "yolov1_tiny_voc2007_model.caffemodel"; - modelParams.m_InputBinding = "data"; - modelParams.m_OutputBinding = "fc12"; - modelParams.m_InputTensorShape = &inputTensorShape; + modelParams.m_InputBindings = { "data" }; + modelParams.m_OutputBindings = { "fc12" }; + modelParams.m_InputShapes = { inputTensorShape }; modelParams.m_IsModelBinary = true; modelParams.m_ComputeDevice = modelOptions.m_ComputeDevice; modelParams.m_VisualizePostOptimizationModel = modelOptions.m_VisualizePostOptimizationModel; diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index dd769755b4..d783a0e2cf 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -137,13 +137,12 @@ std::vector ParseArray(std::istream& stream) [](const std::string& s) { return boost::numeric_cast(std::stoi(s)); }); } -void PrintArray(const std::vector& v) +void PrintOutputData(const std::string& outputLayerName, const std::vector& data) { - for (size_t i = 0; i < v.size(); i++) - { - printf("%f ", v[i]); - } - printf("\n"); + std::cout << outputLayerName << ": "; + std::copy(data.begin(), data.end(), + std::ostream_iterator(std::cout, " ")); + std::cout << std::endl; } void RemoveDuplicateDevices(std::vector& computeDevices) @@ -179,8 +178,10 @@ int MainImpl(const char* modelPath, const size_t subgraphId, const std::shared_ptr& runtime = nullptr) { + using TContainer = std::vector; + // Loads input tensor. - std::vector input; + TContainer inputDataContainer; { std::ifstream inputTensorFile(inputTensorDataFilePath); if (!inputTensorFile.good()) @@ -188,7 +189,7 @@ int MainImpl(const char* modelPath, BOOST_LOG_TRIVIAL(fatal) << "Failed to load input tensor data file from " << inputTensorDataFilePath; return EXIT_FAILURE; } - input = ParseArray(inputTensorFile); + inputDataContainer = ParseArray(inputTensorFile); } try @@ -198,19 +199,23 @@ int MainImpl(const char* modelPath, params.m_ModelPath = modelPath; params.m_IsModelBinary = isModelBinary; params.m_ComputeDevice = computeDevice; - params.m_InputBinding = inputName; - params.m_InputTensorShape = inputTensorShape; - params.m_OutputBinding = outputName; + params.m_InputBindings = { inputName }; + params.m_InputShapes = { *inputTensorShape }; + params.m_OutputBindings = { outputName }; params.m_EnableProfiling = enableProfiling; params.m_SubgraphId = subgraphId; InferenceModel model(params, runtime); - // Executes the model. - std::vector output(model.GetOutputSize()); - model.Run(input, output); + // Executes the model + const size_t numOutputs = params.m_OutputBindings.size(); + std::vector outputDataContainers(numOutputs); + model.Run({ inputDataContainer }, outputDataContainers); - // Prints the output tensor. - PrintArray(output); + // Print output tensors + for (size_t i = 0; i < numOutputs; i++) + { + PrintOutputData(params.m_OutputBindings[i], outputDataContainers[i]); + } } catch (armnn::Exception const& e) { diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp index 5fefd05619..1c89238d32 100644 --- a/tests/InferenceModel.hpp +++ b/tests/InferenceModel.hpp @@ -27,6 +27,7 @@ #include #include #include +#include #include namespace @@ -73,20 +74,19 @@ using QuantizationParams = std::pair; struct Params { - std::string m_ModelPath; - std::string m_InputBinding; - std::string m_OutputBinding; - const armnn::TensorShape* m_InputTensorShape; - std::vector m_ComputeDevice; - bool m_EnableProfiling; - size_t m_SubgraphId; - bool m_IsModelBinary; - bool m_VisualizePostOptimizationModel; - bool m_EnableFp16TurboMode; + std::string m_ModelPath; + std::vector m_InputBindings; + std::vector m_InputShapes; + std::vector m_OutputBindings; + std::vector m_ComputeDevice; + bool m_EnableProfiling; + size_t m_SubgraphId; + bool m_IsModelBinary; + bool m_VisualizePostOptimizationModel; + bool m_EnableFp16TurboMode; Params() - : m_InputTensorShape(nullptr) - , m_ComputeDevice{armnn::Compute::CpuRef} + : m_ComputeDevice{armnn::Compute::CpuRef} , m_EnableProfiling(false) , m_SubgraphId(0) , m_IsModelBinary(true) @@ -105,33 +105,54 @@ public: using BindingPointInfo = InferenceModelInternal::BindingPointInfo; static armnn::INetworkPtr Create(const Params& params, - BindingPointInfo& inputBindings, - BindingPointInfo& outputBindings) + std::vector& inputBindings, + std::vector& outputBindings) { - const std::string& modelPath = params.m_ModelPath; - - // Create a network from a file on disk - auto parser(IParser::Create()); - - std::map inputShapes; - if (params.m_InputTensorShape) - { - inputShapes[params.m_InputBinding] = *params.m_InputTensorShape; - } - std::vector requestedOutputs{ params.m_OutputBinding }; - armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}}; - - { - ARMNN_SCOPED_HEAP_PROFILING("Parsing"); - // Handle text and binary input differently by calling the corresponding parser function - network = (params.m_IsModelBinary ? - parser->CreateNetworkFromBinaryFile(modelPath.c_str(), inputShapes, requestedOutputs) : - parser->CreateNetworkFromTextFile(modelPath.c_str(), inputShapes, requestedOutputs)); - } - - inputBindings = parser->GetNetworkInputBindingInfo(params.m_InputBinding); - outputBindings = parser->GetNetworkOutputBindingInfo(params.m_OutputBinding); - return network; + const std::string& modelPath = params.m_ModelPath; + + // Create a network from a file on disk + auto parser(IParser::Create()); + + std::map inputShapes; + if (!params.m_InputShapes.empty()) + { + const size_t numInputShapes = params.m_InputShapes.size(); + const size_t numInputBindings = params.m_InputBindings.size(); + if (numInputShapes < numInputBindings) + { + throw armnn::Exception(boost::str(boost::format( + "Not every input has its tensor shape specified: expected=%1%, got=%2%") + % numInputBindings % numInputShapes)); + } + + for (size_t i = 0; i < numInputShapes; i++) + { + inputShapes[params.m_InputBindings[i]] = params.m_InputShapes[i]; + } + } + + std::vector requestedOutputs = params.m_OutputBindings; + armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}}; + + { + ARMNN_SCOPED_HEAP_PROFILING("Parsing"); + // Handle text and binary input differently by calling the corresponding parser function + network = (params.m_IsModelBinary ? + parser->CreateNetworkFromBinaryFile(modelPath.c_str(), inputShapes, requestedOutputs) : + parser->CreateNetworkFromTextFile(modelPath.c_str(), inputShapes, requestedOutputs)); + } + + for (const std::string& inputLayerName : params.m_InputBindings) + { + inputBindings.push_back(parser->GetNetworkInputBindingInfo(inputLayerName)); + } + + for (const std::string& outputLayerName : params.m_OutputBindings) + { + outputBindings.push_back(parser->GetNetworkOutputBindingInfo(outputLayerName)); + } + + return network; } }; @@ -145,24 +166,36 @@ public: using BindingPointInfo = InferenceModelInternal::BindingPointInfo; static armnn::INetworkPtr Create(const Params& params, - BindingPointInfo& inputBindings, - BindingPointInfo& outputBindings) + std::vector& inputBindings, + std::vector& outputBindings) { - const std::string& modelPath = params.m_ModelPath; + const std::string& modelPath = params.m_ModelPath; - // Create a network from a file on disk - auto parser(IParser::Create()); + // Create a network from a file on disk + auto parser(IParser::Create()); - armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}}; + armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}}; - { - ARMNN_SCOPED_HEAP_PROFILING("Parsing"); - network = parser->CreateNetworkFromBinaryFile(modelPath.c_str()); - } + { + ARMNN_SCOPED_HEAP_PROFILING("Parsing"); + network = parser->CreateNetworkFromBinaryFile(modelPath.c_str()); + } + + for (const std::string& inputLayerName : params.m_InputBindings) + { + BindingPointInfo inputBinding = + parser->GetNetworkInputBindingInfo(params.m_SubgraphId, inputLayerName); + inputBindings.push_back(inputBinding); + } - inputBindings = parser->GetNetworkInputBindingInfo(params.m_SubgraphId, params.m_InputBinding); - outputBindings = parser->GetNetworkOutputBindingInfo(params.m_SubgraphId, params.m_OutputBinding); - return network; + for (const std::string& outputLayerName : params.m_OutputBindings) + { + BindingPointInfo outputBinding = + parser->GetNetworkOutputBindingInfo(params.m_SubgraphId, outputLayerName); + outputBindings.push_back(outputBinding); + } + + return network; } }; #endif @@ -177,67 +210,111 @@ public: using BindingPointInfo = InferenceModelInternal::BindingPointInfo; static armnn::INetworkPtr Create(const Params& params, - BindingPointInfo& inputBindings, - BindingPointInfo& outputBindings) + std::vector& inputBindings, + std::vector& outputBindings) { - const std::string& modelPath = params.m_ModelPath; + const std::string& modelPath = params.m_ModelPath; + + // Create a network from a file on disk + auto parser(IParser::Create()); + + armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}}; - // Create a network from a file on disk - auto parser(IParser::Create()); + { + ARMNN_SCOPED_HEAP_PROFILING("Parsing"); + network = (params.m_IsModelBinary ? + parser->CreateNetworkFromBinaryFile(modelPath.c_str()) : + parser->CreateNetworkFromTextFile(modelPath.c_str())); + } - armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}}; + for (const std::string& inputLayerName : params.m_InputBindings) + { + BindingPointInfo inputBinding = parser->GetNetworkInputBindingInfo(inputLayerName); + inputBindings.push_back(inputBinding); + } - { - ARMNN_SCOPED_HEAP_PROFILING("Parsing"); - network = (params.m_IsModelBinary ? - parser->CreateNetworkFromBinaryFile(modelPath.c_str()) : - parser->CreateNetworkFromTextFile(modelPath.c_str())); - } + for (const std::string& outputLayerName : params.m_OutputBindings) + { + BindingPointInfo outputBinding = parser->GetNetworkOutputBindingInfo(outputLayerName); + outputBindings.push_back(outputBinding); + } - inputBindings = parser->GetNetworkInputBindingInfo(params.m_InputBinding); - outputBindings = parser->GetNetworkOutputBindingInfo(params.m_OutputBinding); - return network; + return network; } }; #endif template -inline armnn::InputTensors MakeInputTensors(const InferenceModelInternal::BindingPointInfo& input, - const TContainer& inputTensorData) +inline armnn::InputTensors MakeInputTensors( + const std::vector& inputBindings, + const std::vector& inputDataContainers) { - if (inputTensorData.size() != input.second.GetNumElements()) + armnn::InputTensors inputTensors; + + const size_t numInputs = inputBindings.size(); + if (numInputs != inputDataContainers.size()) { - try - { - throw armnn::Exception(boost::str(boost::format("Input tensor has incorrect size. Expected %1% elements " - "but got %2%.") % input.second.GetNumElements() % inputTensorData.size())); - } catch (const boost::exception& e) + throw armnn::Exception(boost::str(boost::format("Number of inputs does not match number of " + "tensor data containers: %1% != %2%") % numInputs % inputDataContainers.size())); + } + + for (size_t i = 0; i < numInputs; i++) + { + const InferenceModelInternal::BindingPointInfo& inputBinding = inputBindings[i]; + const TContainer& inputData = inputDataContainers[i]; + + if (inputData.size() != inputBinding.second.GetNumElements()) { - // Coverity fix: it should not be possible to get here but boost::str and boost::format can both - // throw uncaught exceptions, convert them to armnn exceptions and rethrow. - throw armnn::Exception(diagnostic_information(e)); + throw armnn::Exception("Input tensor has incorrect size"); } + + armnn::ConstTensor inputTensor(inputBinding.second, inputData.data()); + inputTensors.push_back(std::make_pair(inputBinding.first, inputTensor)); } - return { { input.first, armnn::ConstTensor(input.second, inputTensorData.data()) } }; + + return inputTensors; } template -inline armnn::OutputTensors MakeOutputTensors(const InferenceModelInternal::BindingPointInfo& output, - TContainer& outputTensorData) +inline armnn::OutputTensors MakeOutputTensors( + const std::vector& outputBindings, + std::vector& outputDataContainers) { - if (outputTensorData.size() != output.second.GetNumElements()) + armnn::OutputTensors outputTensors; + + const size_t numOutputs = outputBindings.size(); + if (numOutputs != outputDataContainers.size()) { - throw armnn::Exception("Output tensor has incorrect size"); + throw armnn::Exception(boost::str(boost::format("Number of outputs does not match number of " + "tensor data containers: %1% != %2%") % numOutputs % outputDataContainers.size())); + } + + for (size_t i = 0; i < numOutputs; i++) + { + const InferenceModelInternal::BindingPointInfo& outputBinding = outputBindings[i]; + TContainer& outputData = outputDataContainers[i]; + + if (outputData.size() != outputBinding.second.GetNumElements()) + { + throw armnn::Exception("Output tensor has incorrect size"); + } + + armnn::Tensor outputTensor(outputBinding.second, outputData.data()); + outputTensors.push_back(std::make_pair(outputBinding.first, outputTensor)); } - return { { output.first, armnn::Tensor(output.second, outputTensorData.data()) } }; + + return outputTensors; } template class InferenceModel { public: - using DataType = TDataType; - using Params = InferenceModelInternal::Params; + using DataType = TDataType; + using Params = InferenceModelInternal::Params; + using BindingPointInfo = InferenceModelInternal::BindingPointInfo; + using QuantizationParams = InferenceModelInternal::QuantizationParams; + using TContainer = std::vector; struct CommandLineOptions { @@ -290,8 +367,8 @@ public: throw armnn::Exception("Some backend IDs are invalid: " + invalidBackends); } - armnn::INetworkPtr network = CreateNetworkImpl::Create(params, m_InputBindingInfo, - m_OutputBindingInfo); + armnn::INetworkPtr network = + CreateNetworkImpl::Create(params, m_InputBindings, m_OutputBindings); armnn::IOptimizedNetworkPtr optNet{nullptr, [](armnn::IOptimizedNetwork *){}}; { @@ -327,14 +404,41 @@ public: } } - unsigned int GetOutputSize() const + void CheckInputIndexIsValid(unsigned int inputIndex) const + { + if (m_InputBindings.size() < inputIndex + 1) + { + throw armnn::Exception(boost::str(boost::format("Input index out of range: %1%") % inputIndex)); + } + } + + void CheckOutputIndexIsValid(unsigned int outputIndex) const + { + if (m_OutputBindings.size() < outputIndex + 1) + { + throw armnn::Exception(boost::str(boost::format("Output index out of range: %1%") % outputIndex)); + } + } + + unsigned int GetOutputSize(unsigned int outputIndex = 0u) const { - return m_OutputBindingInfo.second.GetNumElements(); + CheckOutputIndexIsValid(outputIndex); + return m_OutputBindings[outputIndex].second.GetNumElements(); } - void Run(const std::vector& input, std::vector& output) + void Run(const std::vector& inputContainers, std::vector& outputContainers) { - BOOST_ASSERT(output.size() == GetOutputSize()); + for (unsigned int i = 0; i < outputContainers.size(); i++) + { + const unsigned int expectedOutputDataSize = GetOutputSize(i); + const unsigned int actualOutputDataSize = boost::numeric_cast(outputContainers[i].size()); + if (actualOutputDataSize < expectedOutputDataSize) + { + unsigned int outputIndex = boost::numeric_cast(i); + throw armnn::Exception(boost::str(boost::format("Not enough data for output #%1%: expected " + "%2% elements, got %3%") % outputIndex % expectedOutputDataSize % actualOutputDataSize)); + } + } std::shared_ptr profiler = m_Runtime->GetProfiler(m_NetworkIdentifier); if (profiler) @@ -343,8 +447,8 @@ public: } armnn::Status ret = m_Runtime->EnqueueWorkload(m_NetworkIdentifier, - MakeInputTensors(input), - MakeOutputTensors(output)); + MakeInputTensors(inputContainers), + MakeOutputTensors(outputContainers)); // if profiling is enabled print out the results if (profiler && profiler->IsProfilingEnabled()) @@ -358,39 +462,62 @@ public: } } - const InferenceModelInternal::BindingPointInfo & GetInputBindingInfo() const + const BindingPointInfo& GetInputBindingInfo(unsigned int inputIndex = 0u) const + { + CheckInputIndexIsValid(inputIndex); + return m_InputBindings[inputIndex]; + } + + const std::vector& GetInputBindingInfos() const + { + return m_InputBindings; + } + + const BindingPointInfo& GetOutputBindingInfo(unsigned int outputIndex = 0u) const + { + CheckOutputIndexIsValid(outputIndex); + return m_OutputBindings[outputIndex]; + } + + const std::vector& GetOutputBindingInfos() const { - return m_InputBindingInfo; + return m_OutputBindings; } - const InferenceModelInternal::BindingPointInfo & GetOutputBindingInfo() const + QuantizationParams GetQuantizationParams(unsigned int outputIndex = 0u) const { - return m_OutputBindingInfo; + CheckOutputIndexIsValid(outputIndex); + return std::make_pair(m_OutputBindings[outputIndex].second.GetQuantizationScale(), + m_OutputBindings[outputIndex].second.GetQuantizationOffset()); } - InferenceModelInternal::QuantizationParams GetQuantizationParams() const + std::vector GetAllQuantizationParams() const { - return std::make_pair(m_OutputBindingInfo.second.GetQuantizationScale(), - m_OutputBindingInfo.second.GetQuantizationOffset()); + std::vector quantizationParams; + for (unsigned int i = 0u; i < m_OutputBindings.size(); i++) + { + quantizationParams.push_back(GetQuantizationParams(i)); + } + return quantizationParams; } private: armnn::NetworkId m_NetworkIdentifier; std::shared_ptr m_Runtime; - InferenceModelInternal::BindingPointInfo m_InputBindingInfo; - InferenceModelInternal::BindingPointInfo m_OutputBindingInfo; + std::vector m_InputBindings; + std::vector m_OutputBindings; bool m_EnableProfiling; template - armnn::InputTensors MakeInputTensors(const TContainer& inputTensorData) + armnn::InputTensors MakeInputTensors(const std::vector& inputDataContainers) { - return ::MakeInputTensors(m_InputBindingInfo, inputTensorData); + return ::MakeInputTensors(m_InputBindings, inputDataContainers); } template - armnn::OutputTensors MakeOutputTensors(TContainer& outputTensorData) + armnn::OutputTensors MakeOutputTensors(std::vector& outputDataContainers) { - return ::MakeOutputTensors(m_OutputBindingInfo, outputTensorData); + return ::MakeOutputTensors(m_OutputBindings, outputDataContainers); } -}; +}; \ No newline at end of file diff --git a/tests/InferenceTest.hpp b/tests/InferenceTest.hpp index 3ea70962d2..3c22df9a5e 100644 --- a/tests/InferenceTest.hpp +++ b/tests/InferenceTest.hpp @@ -100,31 +100,40 @@ template class InferenceModelTestCase : public IInferenceTestCase { public: + using TContainer = std::vector; + InferenceModelTestCase(TModel& model, - unsigned int testCaseId, - std::vector modelInput, - unsigned int outputSize) + unsigned int testCaseId, + const std::vector& inputs, + const std::vector& outputSizes) : m_Model(model) , m_TestCaseId(testCaseId) - , m_Input(std::move(modelInput)) + , m_Inputs(std::move(inputs)) { - m_Output.resize(outputSize); + // Initialize output vector + const size_t numOutputs = outputSizes.size(); + m_Outputs.resize(numOutputs); + + for (size_t i = 0; i < numOutputs; i++) + { + m_Outputs[i].resize(outputSizes[i]); + } } virtual void Run() override { - m_Model.Run(m_Input, m_Output); + m_Model.Run(m_Inputs, m_Outputs); } protected: unsigned int GetTestCaseId() const { return m_TestCaseId; } - const std::vector& GetOutput() const { return m_Output; } + const std::vector& GetOutputs() const { return m_Outputs; } private: - TModel& m_Model; - unsigned int m_TestCaseId; - std::vector m_Input; - std::vector m_Output; + TModel& m_Model; + unsigned int m_TestCaseId; + std::vector m_Inputs; + std::vector m_Outputs; }; template diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl index 7ce017c6cd..4dde35403d 100644 --- a/tests/InferenceTest.inl +++ b/tests/InferenceTest.inl @@ -1,4 +1,4 @@ -// +// // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // @@ -39,7 +39,7 @@ ClassifierTestCase::ClassifierTestCase( unsigned int testCaseId, unsigned int label, std::vector modelInput) - : InferenceModelTestCase(model, testCaseId, std::move(modelInput), model.GetOutputSize()) + : InferenceModelTestCase(model, testCaseId, { std::move(modelInput) }, { model.GetOutputSize() }) , m_Label(label) , m_QuantizationParams(model.GetQuantizationParams()) , m_NumInferencesRef(numInferencesRef) @@ -52,7 +52,7 @@ ClassifierTestCase::ClassifierTestCase( template TestCaseResult ClassifierTestCase::ProcessResult(const InferenceTestOptions& params) { - auto& output = this->GetOutput(); + auto& output = this->GetOutputs()[0]; const auto testCaseId = this->GetTestCaseId(); std::map resultMap; @@ -309,7 +309,12 @@ int ClassifierInferenceTestMain(int argc, const std::vector& defaultTestCaseIds, TConstructDatabaseCallable constructDatabase, const armnn::TensorShape* inputTensorShape) + { + BOOST_ASSERT(modelFilename); + BOOST_ASSERT(inputBindingName); + BOOST_ASSERT(outputBindingName); + return InferenceTestMain(argc, argv, defaultTestCaseIds, [=] () @@ -328,9 +333,14 @@ int ClassifierInferenceTestMain(int argc, typename InferenceModel::Params modelParams; modelParams.m_ModelPath = modelOptions.m_ModelDir + modelFilename; - modelParams.m_InputBinding = inputBindingName; - modelParams.m_OutputBinding = outputBindingName; - modelParams.m_InputTensorShape = inputTensorShape; + modelParams.m_InputBindings = { inputBindingName }; + modelParams.m_OutputBindings = { outputBindingName }; + + if (inputTensorShape) + { + modelParams.m_InputShapes.push_back(*inputTensorShape); + } + modelParams.m_IsModelBinary = isModelBinary; modelParams.m_ComputeDevice = modelOptions.m_ComputeDevice; modelParams.m_VisualizePostOptimizationModel = modelOptions.m_VisualizePostOptimizationModel; diff --git a/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp b/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp index f31e0c95a9..44b8890fc2 100644 --- a/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp +++ b/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp @@ -173,14 +173,23 @@ int main(int argc, char* argv[]) // Loads test case data (including image data). std::unique_ptr testCaseData = cifar10.GetTestCaseData(i); - // Tests inference. - std::vector> outputs(networksCount); + using TInputContainer = std::vector; + using TOutputContainer = std::array; + // Tests inference. + std::vector outputs(networksCount); for (unsigned int k = 0; k < networksCount; ++k) { + using BindingPointInfo = InferenceModelInternal::BindingPointInfo; + std::vector inputBindings = { networks[k].m_InputBindingInfo }; + std::vector outputBindings = { networks[k].m_OutputBindingInfo }; + + std::vector inputData = { testCaseData->m_InputImage }; + std::vector outputData = { outputs[k] }; + status = runtime->EnqueueWorkload(networks[k].m_Network, - MakeInputTensors(networks[k].m_InputBindingInfo, testCaseData->m_InputImage), - MakeOutputTensors(networks[k].m_OutputBindingInfo, outputs[k])); + MakeInputTensors(inputBindings, inputData), + MakeOutputTensors(outputBindings, outputData)); if (status == armnn::Status::Failure) { BOOST_LOG_TRIVIAL(fatal) << "armnn::IRuntime: Failed to enqueue workload"; diff --git a/tests/YoloInferenceTest.hpp b/tests/YoloInferenceTest.hpp index 5e2a4820fa..98a9d2f106 100644 --- a/tests/YoloInferenceTest.hpp +++ b/tests/YoloInferenceTest.hpp @@ -24,7 +24,7 @@ public: YoloTestCase(Model& model, unsigned int testCaseId, YoloTestCaseData& testCaseData) - : InferenceModelTestCase(model, testCaseId, std::move(testCaseData.m_InputImage), YoloOutputSize) + : InferenceModelTestCase(model, testCaseId, { std::move(testCaseData.m_InputImage) }, { YoloOutputSize }) , m_FloatComparer(boost::math::fpc::percent_tolerance(1.0f)) , m_TopObjectDetections(std::move(testCaseData.m_TopObjectDetections)) { @@ -34,7 +34,7 @@ public: { using Boost3dArray = boost::multi_array; - const std::vector& output = this->GetOutput(); + const std::vector& output = this->GetOutputs()[0]; BOOST_ASSERT(output.size() == YoloOutputSize); constexpr Boost3dArray::index gridSize = 7; @@ -178,7 +178,7 @@ class YoloTestCaseProvider : public IInferenceTestCaseProvider { public: template - YoloTestCaseProvider(TConstructModelCallable constructModel) + explicit YoloTestCaseProvider(TConstructModelCallable constructModel) : m_ConstructModel(constructModel) { } -- cgit v1.2.1