aboutsummaryrefslogtreecommitdiff
path: root/src/armnnUtils
diff options
context:
space:
mode:
authortelsoa01 <telmo.soares@arm.com>2018-03-09 14:13:49 +0000
committertelsoa01 <telmo.soares@arm.com>2018-03-09 14:13:49 +0000
commit4fcda0101ec3d110c1d6d7bee5c83416b645528a (patch)
treec9a70aeb2887006160c1b3d265c27efadb7bdbae /src/armnnUtils
downloadarmnn-4fcda0101ec3d110c1d6d7bee5c83416b645528a.tar.gz
Release 18.02
Change-Id: Id3c11dc5ee94ef664374a988fcc6901e9a232fa6
Diffstat (limited to 'src/armnnUtils')
-rw-r--r--src/armnnUtils/GraphTopologicalSort.hpp90
-rw-r--r--src/armnnUtils/Logging.cpp99
-rw-r--r--src/armnnUtils/Logging.hpp21
-rw-r--r--src/armnnUtils/ParserPrototxtFixture.hpp134
-rw-r--r--src/armnnUtils/Permute.cpp118
-rw-r--r--src/armnnUtils/Permute.hpp20
6 files changed, 482 insertions, 0 deletions
diff --git a/src/armnnUtils/GraphTopologicalSort.hpp b/src/armnnUtils/GraphTopologicalSort.hpp
new file mode 100644
index 0000000000..f455289567
--- /dev/null
+++ b/src/armnnUtils/GraphTopologicalSort.hpp
@@ -0,0 +1,90 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#pragma once
+
+#include <boost/assert.hpp>
+
+#include <functional>
+#include <map>
+#include <vector>
+
+namespace armnnUtils
+{
+
+namespace
+{
+
+enum class NodeState
+{
+ Visiting,
+ Visited,
+};
+
+template<typename TNodeId>
+bool Visit(
+ TNodeId current,
+ std::function<std::vector<TNodeId>(TNodeId)> getIncomingEdges,
+ std::vector<TNodeId>& outSorted,
+ std::map<TNodeId, NodeState>& nodeStates)
+{
+ auto currentStateIt = nodeStates.find(current);
+ if (currentStateIt != nodeStates.end())
+ {
+ if (currentStateIt->second == NodeState::Visited)
+ {
+ return true;
+ }
+ if (currentStateIt->second == NodeState::Visiting)
+ {
+ return false;
+ }
+ else
+ {
+ BOOST_ASSERT(false);
+ }
+ }
+
+ nodeStates[current] = NodeState::Visiting;
+
+ for (TNodeId inputNode : getIncomingEdges(current))
+ {
+ Visit(inputNode, getIncomingEdges, outSorted, nodeStates);
+ }
+
+ nodeStates[current] = NodeState::Visited;
+
+ outSorted.push_back(current);
+ return true;
+}
+
+}
+
+// Sorts an directed acyclic graph (DAG) into a flat list such that all inputs to a node are before the node itself.
+// Returns true if successful or false if there is an error in the graph structure (e.g. it contains a cycle).
+// The graph is defined entirely by the "getIncomingEdges" function which the user provides. For a given node,
+// it must return the list of nodes which are required to come before it.
+// "targetNodes" is the list of nodes where the search begins - i.e. the nodes that you want to evaluate.
+// The implementation is based on https://en.wikipedia.org/wiki/Topological_sorting#Depth-first_search
+template<typename TNodeId, typename TTargetNodes>
+bool GraphTopologicalSort(
+ const TTargetNodes& targetNodes,
+ std::function<std::vector<TNodeId>(TNodeId)> getIncomingEdges,
+ std::vector<TNodeId>& outSorted)
+{
+ outSorted.clear();
+ std::map<TNodeId, NodeState> nodeStates;
+
+ for (TNodeId targetNode : targetNodes)
+ {
+ if (!Visit(targetNode, getIncomingEdges, outSorted, nodeStates))
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+} \ No newline at end of file
diff --git a/src/armnnUtils/Logging.cpp b/src/armnnUtils/Logging.cpp
new file mode 100644
index 0000000000..95978d437e
--- /dev/null
+++ b/src/armnnUtils/Logging.cpp
@@ -0,0 +1,99 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include "Logging.hpp"
+
+#include <string>
+#include <iostream>
+
+#if defined(_MSC_VER)
+#include <Windows.h>
+#endif
+
+#if defined(__ANDROID__)
+#include <android/log.h>
+#endif
+
+#include <boost/make_shared.hpp>
+#include <boost/log/core.hpp>
+#include <boost/log/sinks.hpp>
+#include <boost/log/sinks/debug_output_backend.hpp>
+#include <boost/log/sinks/basic_sink_backend.hpp>
+#include <boost/log/sinks/text_ostream_backend.hpp>
+#include <boost/log/utility/setup/console.hpp>
+
+namespace armnnUtils
+{
+
+struct DebugOutputSink : boost::log::sinks::basic_formatted_sink_backend<char, boost::log::sinks::concurrent_feeding>
+{
+ void consume(boost::log::record_view const& rec, std::string const& formatted_message)
+ {
+#if defined(_MSC_VER)
+ OutputDebugString(formatted_message.c_str());
+ OutputDebugString("\n");
+#endif
+#if defined(__ANDROID__)
+ __android_log_write(ANDROID_LOG_DEBUG, "armnn", formatted_message.c_str());
+#endif
+ }
+};
+
+void ConfigureLogging(boost::log::core* core, bool printToStandardOutput, bool printToDebugOutput,
+ armnn::LogSeverity severity)
+{
+ // Even if we remove all the sinks, Boost will fallback to the 'default sink' and still print stuff to
+ // stdout, so we have to explicitly disable logging in this case.
+ core->set_logging_enabled(printToStandardOutput || printToDebugOutput);
+
+ // Setup severity filter
+ boost::log::trivial::severity_level boostSeverity;
+ switch (severity)
+ {
+ case armnn::LogSeverity::Trace:
+ boostSeverity = boost::log::trivial::trace;
+ break;
+ case armnn::LogSeverity::Debug:
+ boostSeverity = boost::log::trivial::debug;
+ break;
+ case armnn::LogSeverity::Info:
+ boostSeverity = boost::log::trivial::info;
+ break;
+ case armnn::LogSeverity::Warning:
+ boostSeverity = boost::log::trivial::warning;
+ break;
+ case armnn::LogSeverity::Error:
+ boostSeverity = boost::log::trivial::error;
+ break;
+ case armnn::LogSeverity::Fatal:
+ boostSeverity = boost::log::trivial::fatal;
+ break;
+ default:
+ BOOST_ASSERT_MSG(false, "Invalid severity");
+ }
+ core->set_filter(boost::log::trivial::severity >= boostSeverity);
+
+ core->remove_all_sinks();
+ if (printToStandardOutput)
+ {
+ typedef boost::log::sinks::basic_text_ostream_backend<char> backend_t;
+ boost::shared_ptr<backend_t> backend = boost::make_shared<backend_t>();
+
+ boost::shared_ptr<std::basic_ostream<char>> stream(&std::cout, boost::null_deleter());
+ backend->add_stream(stream);
+
+ typedef boost::log::sinks::synchronous_sink<backend_t> sink_t;
+ boost::shared_ptr<sink_t> standardOutputSink = boost::make_shared<sink_t>(backend);
+
+ core->add_sink(standardOutputSink);
+ }
+ if (printToDebugOutput)
+ {
+ typedef boost::log::sinks::synchronous_sink<DebugOutputSink> sink_t;
+ boost::shared_ptr<sink_t> debugOutputSink(new sink_t());
+ core->add_sink(debugOutputSink);
+ }
+}
+
+}
diff --git a/src/armnnUtils/Logging.hpp b/src/armnnUtils/Logging.hpp
new file mode 100644
index 0000000000..5669fcaebf
--- /dev/null
+++ b/src/armnnUtils/Logging.hpp
@@ -0,0 +1,21 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#pragma once
+
+
+#include "armnn/Utils.hpp"
+
+#include <boost/log/trivial.hpp>
+
+namespace armnnUtils
+{
+
+// Configures logging for the given Boost Log Core object.
+void ConfigureLogging(boost::log::core* core,
+ bool printToStandardOutput,
+ bool printToDebugOutput,
+ armnn::LogSeverity severity);
+
+} \ No newline at end of file
diff --git a/src/armnnUtils/ParserPrototxtFixture.hpp b/src/armnnUtils/ParserPrototxtFixture.hpp
new file mode 100644
index 0000000000..0e34477a96
--- /dev/null
+++ b/src/armnnUtils/ParserPrototxtFixture.hpp
@@ -0,0 +1,134 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#pragma once
+
+#include "armnn/IRuntime.hpp"
+#include "test/TensorHelpers.hpp"
+#include <string>
+
+template<typename TParser>
+struct ParserPrototxtFixture
+{
+ ParserPrototxtFixture()
+ : m_Parser(TParser::Create())
+ , m_Runtime(armnn::IRuntime::Create(armnn::Compute::CpuRef))
+ , m_NetworkIdentifier(-1)
+ {}
+
+ /// Parses and loads the network defined by the m_Prototext string.
+ /// @{
+ void SetupSingleInputSingleOutput(const std::string& inputName, const std::string& outputName);
+ void SetupSingleInputSingleOutput(const armnn::TensorShape& inputTensorShape,
+ const std::string& inputName,
+ const std::string& outputName);
+ void Setup(const std::map<std::string, armnn::TensorShape>& inputShapes,
+ const std::vector<std::string>& requestedOutputs);
+ /// @}
+
+ /// Executes the network with the given input tensor and checks the result against the given output tensor.
+ /// This overload assumes the network has a single input and a single output.
+ template <std::size_t NumOutputDimensions>
+ void RunTest(const std::vector<float>& inputData, const std::vector<float>& expectedOutputData);
+
+ /// Executes the network with the given input tensors and checks the results against the given output tensors.
+ /// This overload supports multiple inputs and multiple outputs, identified by name.
+ template <std::size_t NumOutputDimensions>
+ void RunTest(const std::map<std::string, std::vector<float>>& inputData,
+ const std::map<std::string, std::vector<float>>& expectedOutputData);
+
+ std::string m_Prototext;
+ std::unique_ptr<TParser, void(*)(TParser* parser)> m_Parser;
+ armnn::IRuntimePtr m_Runtime;
+ armnn::NetworkId m_NetworkIdentifier;
+
+ /// If the single-input-single-output overload of Setup() is called, these will store the input and output name
+ /// so they don't need to be passed to the single-input-single-output overload of RunTest().
+ /// @{
+ std::string m_SingleInputName;
+ std::string m_SingleOutputName;
+ /// @}
+};
+
+template<typename TParser>
+void ParserPrototxtFixture<TParser>::SetupSingleInputSingleOutput(const std::string& inputName,
+ const std::string& outputName)
+{
+ // Store the input and output name so they don't need to be passed to the single-input-single-output RunTest().
+ m_SingleInputName = inputName;
+ m_SingleOutputName = outputName;
+ Setup({ }, { outputName });
+}
+
+template<typename TParser>
+void ParserPrototxtFixture<TParser>::SetupSingleInputSingleOutput(const armnn::TensorShape& inputTensorShape,
+ const std::string& inputName,
+ const std::string& outputName)
+{
+ // Store the input and output name so they don't need to be passed to the single-input-single-output RunTest().
+ m_SingleInputName = inputName;
+ m_SingleOutputName = outputName;
+ Setup({ { inputName, inputTensorShape } }, { outputName });
+}
+
+template<typename TParser>
+void ParserPrototxtFixture<TParser>::Setup(const std::map<std::string, armnn::TensorShape>& inputShapes,
+ const std::vector<std::string>& requestedOutputs)
+{
+ armnn::INetworkPtr network =
+ m_Parser->CreateNetworkFromString(m_Prototext.c_str(), inputShapes, requestedOutputs);
+
+ auto optimized = Optimize(*network, m_Runtime->GetDeviceSpec());
+ armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized));
+ if (ret != armnn::Status::Success)
+ {
+ throw armnn::Exception("LoadNetwork failed");
+ }
+}
+
+template<typename TParser>
+template <std::size_t NumOutputDimensions>
+void ParserPrototxtFixture<TParser>::RunTest(const std::vector<float>& inputData,
+ const std::vector<float>& expectedOutputData)
+{
+ RunTest<NumOutputDimensions>({ { m_SingleInputName, inputData } }, { { m_SingleOutputName, expectedOutputData } });
+}
+
+template<typename TParser>
+template <std::size_t NumOutputDimensions>
+void ParserPrototxtFixture<TParser>::RunTest(const std::map<std::string, std::vector<float>>& inputData,
+ const std::map<std::string, std::vector<float>>& expectedOutputData)
+{
+ using BindingPointInfo = std::pair<armnn::LayerBindingId, armnn::TensorInfo>;
+
+ // Setup the armnn input tensors from the given vectors.
+ armnn::InputTensors inputTensors;
+ for (auto&& it : inputData)
+ {
+ BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(it.first);
+ inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
+ }
+
+ // Allocate storage for the output tensors to be written to and setup the armnn output tensors.
+ std::map<std::string, boost::multi_array<float, NumOutputDimensions>> outputStorage;
+ armnn::OutputTensors outputTensors;
+ for (auto&& it : expectedOutputData)
+ {
+ BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(it.first);
+ outputStorage.emplace(it.first, MakeTensor<float, NumOutputDimensions>(bindingInfo.second));
+ outputTensors.push_back(
+ { bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) });
+ }
+
+ m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
+
+ // Compare each output tensor to the expected values
+ for (auto&& it : expectedOutputData)
+ {
+ BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(it.first);
+ auto outputExpected = MakeTensor<float, NumOutputDimensions>(bindingInfo.second, it.second);
+ BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first]));
+ }
+}
diff --git a/src/armnnUtils/Permute.cpp b/src/armnnUtils/Permute.cpp
new file mode 100644
index 0000000000..58e58583fc
--- /dev/null
+++ b/src/armnnUtils/Permute.cpp
@@ -0,0 +1,118 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include "Permute.hpp"
+
+#include <armnn/Tensor.hpp>
+
+#include <cassert>
+
+namespace
+{
+
+class PermuteLoop
+{
+public:
+ using size_type = unsigned int;
+
+ PermuteLoop(const armnn::TensorShape& dstShape, const armnn::PermutationVector& mappings)
+ : m_DstShape(dstShape)
+ {
+ assert(dstShape.GetNumDimensions() == mappings.GetSize());
+
+ const size_type numDims = dstShape.GetNumDimensions();
+
+ size_type srcStride = 1U;
+ size_type dstStride = 1U;
+
+ for (size_type i = numDims - 1U, k = 0U; k < numDims; ++k, --i)
+ {
+ m_SrcStrides[mappings[i]] = srcStride;
+ m_DstStrides[i] = dstStride;
+
+ srcStride *= dstShape[mappings[i]];
+ dstStride *= dstShape[i];
+ }
+ }
+
+ template <typename T>
+ void Unroll(const T* srcData, T* dstData)
+ {
+ const T* const srcEnd = srcData + m_DstShape.GetNumElements();
+ T* const dstEnd = dstData + m_DstShape.GetNumElements();
+ Unroll(0, srcData, dstData, srcEnd, dstEnd);
+ }
+
+private:
+ template <typename T>
+ void Unroll(size_type dimension, const T* srcData, T* dstData, const T* srcEnd, T* dstEnd)
+ {
+ assert(srcData < srcEnd);
+ assert(dstData < dstEnd);
+
+ if (dimension >= m_DstShape.GetNumDimensions())
+ {
+ *dstData = *srcData;
+ }
+ else
+ {
+ for (size_type i = 0; i < m_DstShape[dimension]; i++)
+ {
+ Unroll(dimension + 1, srcData, dstData, srcEnd, dstEnd);
+
+ srcData += m_SrcStrides[dimension];
+ dstData += m_DstStrides[dimension];
+ }
+ }
+ }
+
+ armnn::TensorShape m_DstShape;
+ std::array<size_type, armnn::MaxNumOfTensorDimensions> m_SrcStrides;
+ std::array<size_type, armnn::MaxNumOfTensorDimensions> m_DstStrides;
+};
+
+} // namespace
+
+namespace armnnUtils
+{
+
+armnn::TensorShape Permuted(const armnn::TensorShape& srcShape, const armnn::PermutationVector& mappings)
+{
+ assert(srcShape.GetNumDimensions() == mappings.GetSize());
+
+ const unsigned int numDims = mappings.GetSize();
+ unsigned int outDims[armnn::MaxNumOfTensorDimensions];
+
+ for (unsigned int i = 0U; i < numDims; ++i)
+ {
+ outDims[mappings[i]] = srcShape[i];
+ }
+
+ armnn::TensorShape permutedShape(numDims, outDims);
+ return permutedShape;
+}
+
+armnn::TensorInfo Permuted(const armnn::TensorInfo& info, const armnn::PermutationVector& mappings)
+{
+ armnn::TensorInfo outInfo(info);
+ outInfo.SetShape(Permuted(info.GetShape(), mappings));
+ return outInfo;
+}
+
+template <typename T>
+void Permute(const armnn::TensorShape& dstShape, const armnn::PermutationVector& mappings, const T* src, T* dst)
+{
+ PermuteLoop(dstShape, mappings).Unroll(src, dst);
+}
+
+// Instantiate for types
+template void Permute(const armnn::TensorShape& dstShape, const armnn::PermutationVector& mappings,
+ const float* src, float* dst);
+template void Permute(const armnn::TensorShape& dstShape, const armnn::PermutationVector& mappings,
+ const uint8_t* src, uint8_t* dst);
+template void Permute(const armnn::TensorShape& dstShape, const armnn::PermutationVector& mappings,
+ const int32_t* src, int32_t* dst);
+
+} // namespace armnnUtils
diff --git a/src/armnnUtils/Permute.hpp b/src/armnnUtils/Permute.hpp
new file mode 100644
index 0000000000..44f7a281bb
--- /dev/null
+++ b/src/armnnUtils/Permute.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#pragma once
+
+#include <armnn/TensorFwd.hpp>
+#include <armnn/Types.hpp>
+
+namespace armnnUtils
+{
+
+armnn::TensorShape Permuted(const armnn::TensorShape& srcShape, const armnn::PermutationVector& mappings);
+
+armnn::TensorInfo Permuted(const armnn::TensorInfo& info, const armnn::PermutationVector& mappings);
+
+template <typename T>
+void Permute(const armnn::TensorShape& dstShape, const armnn::PermutationVector& mappings, const T* src, T* dst);
+
+} // namespace armnnUtils \ No newline at end of file