aboutsummaryrefslogtreecommitdiff
path: root/src/armnnUtils
diff options
context:
space:
mode:
authortelsoa01 <telmo.soares@arm.com>2018-08-31 09:22:23 +0100
committertelsoa01 <telmo.soares@arm.com>2018-08-31 09:22:23 +0100
commitc577f2c6a3b4ddb6ba87a882723c53a248afbeba (patch)
treebd7d4c148df27f8be6649d313efb24f536b7cf34 /src/armnnUtils
parent4c7098bfeab1ffe1cdc77f6c15548d3e73274746 (diff)
downloadarmnn-c577f2c6a3b4ddb6ba87a882723c53a248afbeba.tar.gz
Release 18.08
Diffstat (limited to 'src/armnnUtils')
-rw-r--r--src/armnnUtils/CsvReader.cpp63
-rw-r--r--src/armnnUtils/CsvReader.hpp25
-rw-r--r--src/armnnUtils/FloatingPointConverter.cpp44
-rw-r--r--src/armnnUtils/FloatingPointConverter.hpp21
-rw-r--r--src/armnnUtils/GraphTopologicalSort.hpp86
-rw-r--r--src/armnnUtils/HeapProfiling.hpp10
-rw-r--r--src/armnnUtils/LeakChecking.cpp19
-rw-r--r--src/armnnUtils/LeakChecking.hpp21
-rw-r--r--src/armnnUtils/Logging.cpp2
-rw-r--r--src/armnnUtils/ParserFlatbuffersFixture.hpp11
-rw-r--r--src/armnnUtils/ParserPrototxtFixture.hpp76
-rw-r--r--src/armnnUtils/Permute.cpp2
-rw-r--r--src/armnnUtils/VerificationHelpers.cpp74
-rw-r--r--src/armnnUtils/VerificationHelpers.hpp35
14 files changed, 424 insertions, 65 deletions
diff --git a/src/armnnUtils/CsvReader.cpp b/src/armnnUtils/CsvReader.cpp
new file mode 100644
index 0000000000..5b66c942ba
--- /dev/null
+++ b/src/armnnUtils/CsvReader.cpp
@@ -0,0 +1,63 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include "CsvReader.hpp"
+
+#include <boost/algorithm/string.hpp>
+#include <boost/tokenizer.hpp>
+
+#include <fstream>
+#include <string>
+#include <vector>
+
+using Tokenizer = boost::tokenizer<boost::escaped_list_separator<char>>;
+
+namespace armnnUtils
+{
+
+CsvRow ParseLine(const std::string& csvLine)
+{
+ Tokenizer tokenizer(csvLine);
+ CsvRow entry;
+
+ for (const auto &token : tokenizer)
+ {
+ entry.values.push_back(boost::trim_copy(token));
+ }
+ return entry;
+}
+
+std::vector<CsvRow> CsvReader::ParseFile(const std::string& csvFile)
+{
+ std::vector<CsvRow> result;
+
+ std::ifstream in(csvFile.c_str());
+ if (!in.is_open())
+ return result;
+
+ std::string line;
+ while (getline(in, line))
+ {
+ if(!line.empty())
+ {
+ CsvRow entry = ParseLine(line);
+ result.push_back(entry);
+ }
+ }
+ return result;
+}
+
+std::vector<CsvRow> CsvReader::ParseVector(const std::vector<std::string>& csvVector)
+{
+ std::vector<CsvRow> result;
+
+ for (auto const& line: csvVector)
+ {
+ CsvRow entry = ParseLine(line);
+ result.push_back(entry);
+ }
+ return result;
+}
+} // namespace armnnUtils \ No newline at end of file
diff --git a/src/armnnUtils/CsvReader.hpp b/src/armnnUtils/CsvReader.hpp
new file mode 100644
index 0000000000..0d529804b6
--- /dev/null
+++ b/src/armnnUtils/CsvReader.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#pragma once
+
+#include <vector>
+#include <string>
+
+namespace armnnUtils
+{
+
+struct CsvRow
+{
+ std::vector<std::string> values;
+};
+
+class CsvReader
+{
+public:
+ static std::vector<CsvRow> ParseFile(const std::string& csvFile);
+
+ static std::vector<CsvRow> ParseVector(const std::vector<std::string>& csvVector);
+};
+} // namespace armnnUtils
diff --git a/src/armnnUtils/FloatingPointConverter.cpp b/src/armnnUtils/FloatingPointConverter.cpp
new file mode 100644
index 0000000000..5c1a43193e
--- /dev/null
+++ b/src/armnnUtils/FloatingPointConverter.cpp
@@ -0,0 +1,44 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include "FloatingPointConverter.hpp"
+#include "../armnn/Half.hpp"
+
+#include <boost/assert.hpp>
+
+namespace armnnUtils
+{
+
+void FloatingPointConverter::ConvertFloat32To16(const float* srcFloat32Buffer,
+ size_t numElements,
+ void* dstFloat16Buffer)
+{
+ BOOST_ASSERT(srcFloat32Buffer != nullptr);
+ BOOST_ASSERT(dstFloat16Buffer != nullptr);
+
+ armnn::Half* pHalf = reinterpret_cast<armnn::Half*>(dstFloat16Buffer);
+
+ for (size_t i = 0; i < numElements; i++)
+ {
+ pHalf[i] = armnn::Half(srcFloat32Buffer[i]);
+ }
+}
+
+void FloatingPointConverter::ConvertFloat16To32(const void* srcFloat16Buffer,
+ size_t numElements,
+ float* dstFloat32Buffer)
+{
+ BOOST_ASSERT(srcFloat16Buffer != nullptr);
+ BOOST_ASSERT(dstFloat32Buffer != nullptr);
+
+ const armnn::Half* pHalf = reinterpret_cast<const armnn::Half*>(srcFloat16Buffer);
+
+ for (size_t i = 0; i < numElements; i++)
+ {
+ dstFloat32Buffer[i] = pHalf[i];
+ }
+}
+
+} //namespace armnnUtils
diff --git a/src/armnnUtils/FloatingPointConverter.hpp b/src/armnnUtils/FloatingPointConverter.hpp
new file mode 100644
index 0000000000..e879c819f4
--- /dev/null
+++ b/src/armnnUtils/FloatingPointConverter.hpp
@@ -0,0 +1,21 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#pragma once
+
+#include <cstddef>
+
+namespace armnnUtils
+{
+class FloatingPointConverter
+{
+public:
+ // Converts a buffer of FP32 values to FP16, and stores in the given dstFloat16Buffer.
+ // dstFloat16Buffer should be (numElements * 2) in size
+ static void ConvertFloat32To16(const float *srcFloat32Buffer, size_t numElements, void *dstFloat16Buffer);
+
+ static void ConvertFloat16To32(const void *srcFloat16Buffer, size_t numElements, float *dstFloat32Buffer);
+};
+} //namespace armnnUtils
diff --git a/src/armnnUtils/GraphTopologicalSort.hpp b/src/armnnUtils/GraphTopologicalSort.hpp
index f455289567..86eb4cc030 100644
--- a/src/armnnUtils/GraphTopologicalSort.hpp
+++ b/src/armnnUtils/GraphTopologicalSort.hpp
@@ -5,11 +5,14 @@
#pragma once
#include <boost/assert.hpp>
+#include <boost/optional.hpp>
#include <functional>
#include <map>
+#include <stack>
#include <vector>
+
namespace armnnUtils
{
@@ -22,51 +25,88 @@ enum class NodeState
Visited,
};
-template<typename TNodeId>
-bool Visit(
- TNodeId current,
- std::function<std::vector<TNodeId>(TNodeId)> getIncomingEdges,
- std::vector<TNodeId>& outSorted,
- std::map<TNodeId, NodeState>& nodeStates)
+
+template <typename TNodeId>
+boost::optional<TNodeId> GetNextChild(TNodeId node,
+ std::function<std::vector<TNodeId>(TNodeId)> getIncomingEdges,
+ std::map<TNodeId, NodeState>& nodeStates)
{
- auto currentStateIt = nodeStates.find(current);
- if (currentStateIt != nodeStates.end())
+ for (TNodeId childNode : getIncomingEdges(node))
{
- if (currentStateIt->second == NodeState::Visited)
- {
- return true;
- }
- if (currentStateIt->second == NodeState::Visiting)
+ if (nodeStates.find(childNode) == nodeStates.end())
{
- return false;
+ return childNode;
}
else
{
- BOOST_ASSERT(false);
+ if (nodeStates.find(childNode)->second == NodeState::Visiting)
+ {
+ return childNode;
+ }
}
}
- nodeStates[current] = NodeState::Visiting;
+ return {};
+}
- for (TNodeId inputNode : getIncomingEdges(current))
+template<typename TNodeId>
+bool TopologicallySort(
+ TNodeId initialNode,
+ std::function<std::vector<TNodeId>(TNodeId)> getIncomingEdges,
+ std::vector<TNodeId>& outSorted,
+ std::map<TNodeId, NodeState>& nodeStates)
+{
+ std::stack<TNodeId> nodeStack;
+
+ // If the node is never visited we should search it
+ if (nodeStates.find(initialNode) == nodeStates.end())
{
- Visit(inputNode, getIncomingEdges, outSorted, nodeStates);
+ nodeStack.push(initialNode);
}
- nodeStates[current] = NodeState::Visited;
+ while (!nodeStack.empty())
+ {
+ TNodeId current = nodeStack.top();
+
+ nodeStates[current] = NodeState::Visiting;
+
+ boost::optional<TNodeId> nextChildOfCurrent = GetNextChild(current, getIncomingEdges, nodeStates);
+
+ if (nextChildOfCurrent)
+ {
+ TNodeId nextChild = nextChildOfCurrent.get();
+
+ // If the child has not been searched, add to the stack and iterate over this node
+ if (nodeStates.find(nextChild) == nodeStates.end())
+ {
+ nodeStack.push(nextChild);
+ continue;
+ }
+
+ // If we re-encounter a node being visited there is a cycle
+ if (nodeStates[nextChild] == NodeState::Visiting)
+ {
+ return false;
+ }
+ }
+
+ nodeStack.pop();
+
+ nodeStates[current] = NodeState::Visited;
+ outSorted.push_back(current);
+ }
- outSorted.push_back(current);
return true;
}
}
-// Sorts an directed acyclic graph (DAG) into a flat list such that all inputs to a node are before the node itself.
+// Sorts a directed acyclic graph (DAG) into a flat list such that all inputs to a node are before the node itself.
// Returns true if successful or false if there is an error in the graph structure (e.g. it contains a cycle).
// The graph is defined entirely by the "getIncomingEdges" function which the user provides. For a given node,
// it must return the list of nodes which are required to come before it.
// "targetNodes" is the list of nodes where the search begins - i.e. the nodes that you want to evaluate.
-// The implementation is based on https://en.wikipedia.org/wiki/Topological_sorting#Depth-first_search
+// This is an iterative implementation based on https://en.wikipedia.org/wiki/Topological_sorting#Depth-first_search
template<typename TNodeId, typename TTargetNodes>
bool GraphTopologicalSort(
const TTargetNodes& targetNodes,
@@ -78,7 +118,7 @@ bool GraphTopologicalSort(
for (TNodeId targetNode : targetNodes)
{
- if (!Visit(targetNode, getIncomingEdges, outSorted, nodeStates))
+ if (!TopologicallySort(targetNode, getIncomingEdges, outSorted, nodeStates))
{
return false;
}
diff --git a/src/armnnUtils/HeapProfiling.hpp b/src/armnnUtils/HeapProfiling.hpp
index febcbfe2b3..4ba38f5a1a 100644
--- a/src/armnnUtils/HeapProfiling.hpp
+++ b/src/armnnUtils/HeapProfiling.hpp
@@ -9,8 +9,8 @@
#include <string>
#include <cstddef>
-// this is conditional so we can change the environment variable
-// at build time
+// This is conditional so we can change the environment variable
+// at build time.
#ifndef ARMNN_HEAP_PROFILE_DUMP_DIR
#define ARMNN_HEAP_PROFILE_DUMP_DIR "ARMNN_HEAP_PROFILE_DUMP_DIR"
#endif // ARMNN_HEAP_PROFILE_DUMP_DIR
@@ -24,12 +24,12 @@ public:
~ScopedHeapProfiler();
private:
- // Location comes from the ARMNN_HEAP_PROFILE_DUMP_DIR
- // if not available then it dumps to /tmp
+ // Location comes from the ARMNN_HEAP_PROFILE_DUMP_DIR.
+ // If it is not available then it dumps to /tmp.
std::string m_Location;
std::string m_Tag;
- // No default construction and copying
+ // No default construction and copying.
ScopedHeapProfiler() = delete;
ScopedHeapProfiler(const ScopedHeapProfiler &) = delete;
ScopedHeapProfiler & operator=(const ScopedHeapProfiler &) = delete;
diff --git a/src/armnnUtils/LeakChecking.cpp b/src/armnnUtils/LeakChecking.cpp
index ac12fe01de..83aa5d8ceb 100644
--- a/src/armnnUtils/LeakChecking.cpp
+++ b/src/armnnUtils/LeakChecking.cpp
@@ -8,6 +8,9 @@
#include "LeakChecking.hpp"
#include "gperftools/heap-checker.h"
+namespace armnnUtils
+{
+
struct ScopedLeakChecker::Impl
{
HeapLeakChecker m_LeakChecker;
@@ -59,4 +62,20 @@ ScopedDisableLeakChecking::~ScopedDisableLeakChecking()
{
}
+void LocalLeakCheckingOnly()
+{
+ auto * globalChecker = HeapLeakChecker::GlobalChecker();
+ if (globalChecker)
+ {
+ // Don't care about global leaks and make sure we won't report any.
+ // This is because leak checking supposed to run in well defined
+ // contexts through the ScopedLeakChecker, otherwise we risk false
+ // positives because of external factors.
+ globalChecker->NoGlobalLeaks();
+ globalChecker->CancelGlobalCheck();
+ }
+}
+
+} // namespace armnnUtils
+
#endif // ARMNN_LEAK_CHECKING_ENABLED
diff --git a/src/armnnUtils/LeakChecking.hpp b/src/armnnUtils/LeakChecking.hpp
index b65befe940..22b3b67f88 100644
--- a/src/armnnUtils/LeakChecking.hpp
+++ b/src/armnnUtils/LeakChecking.hpp
@@ -19,7 +19,7 @@ public:
ScopedLeakChecker(const std::string & name);
~ScopedLeakChecker();
- // forwarding these to Google Performance Tools
+ // Forwarding these to Google Performance Tools.
static bool IsActive();
bool NoLeaks();
// Note that the following two functions only work after
@@ -29,12 +29,12 @@ public:
ssize_t ObjectsLeaked() const;
private:
- // hide imlementation so we don't litter other's namespaces
- // with heap checker related stuff
+ // Hides imlementation so we don't litter other's namespaces
+ // with heap checker related stuff.
struct Impl;
std::unique_ptr<Impl> m_Impl;
- // No default construction and copying
+ // No default construction and copying.
ScopedLeakChecker() = delete;
ScopedLeakChecker(const ScopedLeakChecker &) = delete;
ScopedLeakChecker & operator=(const ScopedLeakChecker &) = delete;
@@ -47,16 +47,19 @@ public:
~ScopedDisableLeakChecking();
private:
- // hide imlementation so we don't litter other's namespaces
- // with heap checker related stuff
+ // Hides imlementation so we don't litter other's namespaces
+ // with heap checker related stuff.
struct Impl;
std::unique_ptr<Impl> m_Impl;
- // No copying
+ // No copying.
ScopedDisableLeakChecking(const ScopedDisableLeakChecking &) = delete;
ScopedDisableLeakChecking & operator=(const ScopedDisableLeakChecking &) = delete;
};
+// disable global leak checks starting from 'main'
+void LocalLeakCheckingOnly();
+
} // namespace armnnUtils
#define ARMNN_SCOPED_LEAK_CHECKER(TAG) \
@@ -77,6 +80,9 @@ private:
#define ARMNN_DISABLE_LEAK_CHECKING_IN_SCOPE() \
armnnUtils::ScopedDisableLeakChecking __disable_leak_checking_in_scope__
+#define ARMNN_LOCAL_LEAK_CHECKING_ONLY() \
+ armnnUtils::LocalLeakCheckingOnly()
+
#else // ARMNN_LEAK_CHECKING_ENABLED
#define ARMNN_SCOPED_LEAK_CHECKER(TAG)
@@ -85,5 +91,6 @@ private:
#define ARMNN_BYTES_LEAKED_IN_SCOPE() 0
#define ARMNN_OBJECTS_LEAKED_IN_SCOPE() 0
#define ARMNN_DISABLE_LEAK_CHECKING_IN_SCOPE()
+#define ARMNN_LOCAL_LEAK_CHECKING_ONLY()
#endif // ARMNN_LEAK_CHECKING_ENABLED
diff --git a/src/armnnUtils/Logging.cpp b/src/armnnUtils/Logging.cpp
index 95978d437e..4d759a3f89 100644
--- a/src/armnnUtils/Logging.cpp
+++ b/src/armnnUtils/Logging.cpp
@@ -47,7 +47,7 @@ void ConfigureLogging(boost::log::core* core, bool printToStandardOutput, bool p
// stdout, so we have to explicitly disable logging in this case.
core->set_logging_enabled(printToStandardOutput || printToDebugOutput);
- // Setup severity filter
+ // Sets up severity filter.
boost::log::trivial::severity_level boostSeverity;
switch (severity)
{
diff --git a/src/armnnUtils/ParserFlatbuffersFixture.hpp b/src/armnnUtils/ParserFlatbuffersFixture.hpp
deleted file mode 100644
index 16f9620ce2..0000000000
--- a/src/armnnUtils/ParserFlatbuffersFixture.hpp
+++ /dev/null
@@ -1,11 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// See LICENSE file in the project root for full license information.
-//
-
-#pragma once
-
-namespace armnnUtils
-{
-
-}
diff --git a/src/armnnUtils/ParserPrototxtFixture.hpp b/src/armnnUtils/ParserPrototxtFixture.hpp
index 81e3057c80..e2e6459bcf 100644
--- a/src/armnnUtils/ParserPrototxtFixture.hpp
+++ b/src/armnnUtils/ParserPrototxtFixture.hpp
@@ -6,11 +6,15 @@
#pragma once
#include "armnn/IRuntime.hpp"
+#include "armnnOnnxParser/IOnnxParser.hpp"
#include "test/TensorHelpers.hpp"
-#include <string>
+#include "VerificationHelpers.hpp"
+#include <boost/format.hpp>
+#include <string>
-// TODO davbec01 (14/05/18) : put these into armnnUtils namespace
+namespace armnnUtils
+{
template<typename TParser>
struct ParserPrototxtFixture
@@ -19,14 +23,15 @@ struct ParserPrototxtFixture
: m_Parser(TParser::Create())
, m_NetworkIdentifier(-1)
{
- m_Runtimes.push_back(armnn::IRuntime::Create(armnn::Compute::CpuRef));
+ armnn::IRuntime::CreationOptions options;
+ m_Runtimes.push_back(std::make_pair(armnn::IRuntime::Create(options), armnn::Compute::CpuRef));
#if ARMCOMPUTENEON_ENABLED
- m_Runtimes.push_back(armnn::IRuntime::Create(armnn::Compute::CpuAcc));
+ m_Runtimes.push_back(std::make_pair(armnn::IRuntime::Create(options), armnn::Compute::CpuAcc));
#endif
#if ARMCOMPUTECL_ENABLED
- m_Runtimes.push_back(armnn::IRuntime::Create(armnn::Compute::GpuAcc));
+ m_Runtimes.push_back(std::make_pair(armnn::IRuntime::Create(options), armnn::Compute::GpuAcc));
#endif
}
@@ -38,10 +43,11 @@ struct ParserPrototxtFixture
const std::string& outputName);
void Setup(const std::map<std::string, armnn::TensorShape>& inputShapes,
const std::vector<std::string>& requestedOutputs);
+ void Setup();
/// @}
/// Executes the network with the given input tensor and checks the result against the given output tensor.
- /// This overload assumes the network has a single input and a single output.
+ /// This overload assumes that the network has a single input and a single output.
template <std::size_t NumOutputDimensions>
void RunTest(const std::vector<float>& inputData, const std::vector<float>& expectedOutputData);
@@ -53,7 +59,7 @@ struct ParserPrototxtFixture
std::string m_Prototext;
std::unique_ptr<TParser, void(*)(TParser* parser)> m_Parser;
- std::vector<armnn::IRuntimePtr> m_Runtimes;
+ std::vector<std::pair<armnn::IRuntimePtr, armnn::Compute>> m_Runtimes;
armnn::NetworkId m_NetworkIdentifier;
/// If the single-input-single-output overload of Setup() is called, these will store the input and output name
@@ -68,7 +74,7 @@ template<typename TParser>
void ParserPrototxtFixture<TParser>::SetupSingleInputSingleOutput(const std::string& inputName,
const std::string& outputName)
{
- // Store the input and output name so they don't need to be passed to the single-input-single-output RunTest().
+ // Stores the input and output name so they don't need to be passed to the single-input-single-output RunTest().
m_SingleInputName = inputName;
m_SingleOutputName = outputName;
Setup({ }, { outputName });
@@ -79,7 +85,7 @@ void ParserPrototxtFixture<TParser>::SetupSingleInputSingleOutput(const armnn::T
const std::string& inputName,
const std::string& outputName)
{
- // Store the input and output name so they don't need to be passed to the single-input-single-output RunTest().
+ // Stores the input and output name so they don't need to be passed to the single-input-single-output RunTest().
m_SingleInputName = inputName;
m_SingleOutputName = outputName;
Setup({ { inputName, inputTensorShape } }, { outputName });
@@ -91,16 +97,39 @@ void ParserPrototxtFixture<TParser>::Setup(const std::map<std::string, armnn::Te
{
for (auto&& runtime : m_Runtimes)
{
+ std::string errorMessage;
+
armnn::INetworkPtr network =
m_Parser->CreateNetworkFromString(m_Prototext.c_str(), inputShapes, requestedOutputs);
+ auto optimized = Optimize(*network, { runtime.second, armnn::Compute::CpuRef }, runtime.first->GetDeviceSpec());
+ armnn::Status ret = runtime.first->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
+ if (ret != armnn::Status::Success)
+ {
+ throw armnn::Exception(boost::str(
+ boost::format("LoadNetwork failed with error: '%1%' %2%")
+ % errorMessage
+ % CHECK_LOCATION().AsString()));
+ }
+ }
+}
- auto optimized = Optimize(*network, runtime->GetDeviceSpec());
-
- armnn::Status ret = runtime->LoadNetwork(m_NetworkIdentifier, move(optimized));
+template<typename TParser>
+void ParserPrototxtFixture<TParser>::Setup()
+{
+ for (auto&& runtime : m_Runtimes)
+ {
+ std::string errorMessage;
+ armnn::INetworkPtr network =
+ m_Parser->CreateNetworkFromString(m_Prototext.c_str());
+ auto optimized = Optimize(*network, { runtime.second, armnn::Compute::CpuRef }, runtime.first->GetDeviceSpec());
+ armnn::Status ret = runtime.first->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
if (ret != armnn::Status::Success)
{
- throw armnn::Exception("LoadNetwork failed");
+ throw armnn::Exception(boost::str(
+ boost::format("LoadNetwork failed with error: '%1%' %2%")
+ % errorMessage
+ % CHECK_LOCATION().AsString()));
}
}
}
@@ -122,7 +151,7 @@ void ParserPrototxtFixture<TParser>::RunTest(const std::map<std::string, std::ve
{
using BindingPointInfo = std::pair<armnn::LayerBindingId, armnn::TensorInfo>;
- // Setup the armnn input tensors from the given vectors.
+ // Sets up the armnn input tensors from the given vectors.
armnn::InputTensors inputTensors;
for (auto&& it : inputData)
{
@@ -130,7 +159,7 @@ void ParserPrototxtFixture<TParser>::RunTest(const std::map<std::string, std::ve
inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
}
- // Allocate storage for the output tensors to be written to and setup the armnn output tensors.
+ // Allocates storage for the output tensors to be written to and sets up the armnn output tensors.
std::map<std::string, boost::multi_array<float, NumOutputDimensions>> outputStorage;
armnn::OutputTensors outputTensors;
for (auto&& it : expectedOutputData)
@@ -141,14 +170,27 @@ void ParserPrototxtFixture<TParser>::RunTest(const std::map<std::string, std::ve
{ bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) });
}
- runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
+ runtime.first->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
- // Compare each output tensor to the expected values
+ // Compares each output tensor to the expected values.
for (auto&& it : expectedOutputData)
{
BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(it.first);
+ if (bindingInfo.second.GetNumElements() != it.second.size())
+ {
+ throw armnn::Exception(
+ boost::str(
+ boost::format("Output tensor %1% is expected to have %2% elements. "
+ "%3% elements supplied. %4%") %
+ it.first %
+ bindingInfo.second.GetNumElements() %
+ it.second.size() %
+ CHECK_LOCATION().AsString()));
+ }
auto outputExpected = MakeTensor<float, NumOutputDimensions>(bindingInfo.second, it.second);
BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first]));
}
}
}
+
+} // namespace armnnUtils
diff --git a/src/armnnUtils/Permute.cpp b/src/armnnUtils/Permute.cpp
index 58e58583fc..ba842dbc33 100644
--- a/src/armnnUtils/Permute.cpp
+++ b/src/armnnUtils/Permute.cpp
@@ -107,7 +107,7 @@ void Permute(const armnn::TensorShape& dstShape, const armnn::PermutationVector&
PermuteLoop(dstShape, mappings).Unroll(src, dst);
}
-// Instantiate for types
+// Instantiates for types.
template void Permute(const armnn::TensorShape& dstShape, const armnn::PermutationVector& mappings,
const float* src, float* dst);
template void Permute(const armnn::TensorShape& dstShape, const armnn::PermutationVector& mappings,
diff --git a/src/armnnUtils/VerificationHelpers.cpp b/src/armnnUtils/VerificationHelpers.cpp
new file mode 100644
index 0000000000..301aa4c8c5
--- /dev/null
+++ b/src/armnnUtils/VerificationHelpers.cpp
@@ -0,0 +1,74 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include "VerificationHelpers.hpp"
+#include <boost/format.hpp>
+#include <armnn/Exceptions.hpp>
+
+using namespace armnn;
+
+namespace armnnUtils
+{
+
+void CheckValidSize(std::initializer_list<size_t> validInputCounts,
+ size_t actualValue,
+ const char* validExpr,
+ const char* actualExpr,
+ const CheckLocation& location)
+{
+ bool isValid = std::any_of(validInputCounts.begin(),
+ validInputCounts.end(),
+ [&actualValue](size_t x) { return x == actualValue; } );
+ if (!isValid)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("%1% = %2% is not valid, not in {%3%}. %4%") %
+ actualExpr %
+ actualValue %
+ validExpr %
+ location.AsString()));
+ }
+}
+
+uint32_t NonNegative(const char* expr,
+ int32_t value,
+ const CheckLocation& location)
+{
+ if (value < 0)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("'%1%' must be non-negative, received: %2% at %3%") %
+ expr %
+ value %
+ location.AsString() ));
+ }
+ else
+ {
+ return static_cast<uint32_t>(value);
+ }
+}
+
+int32_t VerifyInt32(const char* expr,
+ int64_t value,
+ const armnn::CheckLocation& location)
+{
+ if (value < std::numeric_limits<int>::min() || value > std::numeric_limits<int>::max())
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("'%1%' must should fit into a int32 (ArmNN don't support int64), received: %2% at %3%") %
+ expr %
+ value %
+ location.AsString() ));
+ }
+ else
+ {
+ return static_cast<int32_t>(value);
+ }
+}
+
+}// armnnUtils
diff --git a/src/armnnUtils/VerificationHelpers.hpp b/src/armnnUtils/VerificationHelpers.hpp
new file mode 100644
index 0000000000..8e3550c70f
--- /dev/null
+++ b/src/armnnUtils/VerificationHelpers.hpp
@@ -0,0 +1,35 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <iostream>
+#include <sstream>
+
+#include <armnn/Exceptions.hpp>
+
+namespace armnnUtils
+{
+
+void CheckValidSize(std::initializer_list<size_t> validInputCounts,
+ size_t actualValue,
+ const char* validExpr,
+ const char* actualExpr,
+ const armnn::CheckLocation& location);
+
+uint32_t NonNegative(const char* expr,
+ int32_t value,
+ const armnn::CheckLocation& location);
+
+int32_t VerifyInt32(const char* expr,
+ int64_t value,
+ const armnn::CheckLocation& location);
+
+}//armnnUtils
+
+#define CHECKED_INT32(VALUE) armnnUtils::VerifyInt32(#VALUE, VALUE, CHECK_LOCATION())
+
+#define CHECK_VALID_SIZE(ACTUAL, ...) \
+armnnUtils::CheckValidSize({__VA_ARGS__}, ACTUAL, #__VA_ARGS__, #ACTUAL, CHECK_LOCATION())
+
+#define CHECKED_NON_NEGATIVE(VALUE) armnnUtils::NonNegative(#VALUE, VALUE, CHECK_LOCATION())