aboutsummaryrefslogtreecommitdiff
path: root/src/armnnUtils
diff options
context:
space:
mode:
authorsurmeh01 <surabhi.mehta@arm.com>2018-05-18 16:31:43 +0100
committertelsoa01 <telmo.soares@arm.com>2018-05-23 13:09:07 +0100
commit3537c2ca7ebf31c1673b9ec2bb0c17b0406bbae0 (patch)
tree5950603ad78ec3fe56fb31ddc7f4d52a19f5bc60 /src/armnnUtils
parentbceff2fb3fc68bb0aa88b886900c34b77340c826 (diff)
downloadarmnn-3537c2ca7ebf31c1673b9ec2bb0c17b0406bbae0.tar.gz
Release 18.05
Diffstat (limited to 'src/armnnUtils')
-rw-r--r--src/armnnUtils/DotSerializer.cpp13
-rw-r--r--src/armnnUtils/HeapProfiling.cpp38
-rw-r--r--src/armnnUtils/HeapProfiling.hpp47
-rw-r--r--src/armnnUtils/LeakChecking.cpp62
-rw-r--r--src/armnnUtils/LeakChecking.hpp89
-rw-r--r--src/armnnUtils/ParserFlatbuffersFixture.hpp11
-rw-r--r--src/armnnUtils/ParserPrototxtFixture.hpp102
7 files changed, 319 insertions, 43 deletions
diff --git a/src/armnnUtils/DotSerializer.cpp b/src/armnnUtils/DotSerializer.cpp
index 1feea54dbd..3a9df42fbc 100644
--- a/src/armnnUtils/DotSerializer.cpp
+++ b/src/armnnUtils/DotSerializer.cpp
@@ -69,7 +69,7 @@ DotAttributeSet::DotAttributeSet(std::ostream& stream)
DotAttributeSet::~DotAttributeSet()
{
bool doSpace=false;
- for (auto attrib : m_Attributes)
+ for (auto&& attrib : m_Attributes)
{
if (doSpace)
{
@@ -155,7 +155,16 @@ NodeContent::~NodeContent()
ss << "\\l";
}
ss << "}\"";
- GetStream() << ss.str();
+
+ std::string s;
+ try
+ {
+ // Coverity fix: std::stringstream::str() may throw an exception of type std::length_error.
+ s = ss.str();
+ }
+ catch (const std::exception&) { } // Swallow any exception.
+
+ GetStream() << s;
}
DotNode::DotNode(std::ostream& stream, unsigned int nodeId, const char* label)
diff --git a/src/armnnUtils/HeapProfiling.cpp b/src/armnnUtils/HeapProfiling.cpp
new file mode 100644
index 0000000000..7f99927511
--- /dev/null
+++ b/src/armnnUtils/HeapProfiling.cpp
@@ -0,0 +1,38 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#ifdef ARMNN_HEAP_PROFILING_ENABLED
+
+#include "HeapProfiling.hpp"
+#include "gperftools/heap-profiler.h"
+#include <sstream>
+#include <cstdlib>
+
+namespace armnnUtils
+{
+
+ScopedHeapProfiler::ScopedHeapProfiler(const std::string & tag)
+: m_Location("/tmp")
+, m_Tag(tag)
+{
+ char * locationFromEnv = ::getenv(ARMNN_HEAP_PROFILE_DUMP_DIR);
+ if (locationFromEnv)
+ {
+ m_Location = locationFromEnv;
+ }
+ std::stringstream ss;
+ ss << m_Location << "/" << m_Tag << ".hprof";
+ HeapProfilerStart(ss.str().c_str());
+ HeapProfilerDump(m_Tag.c_str());
+}
+
+ScopedHeapProfiler::~ScopedHeapProfiler()
+{
+ HeapProfilerDump(m_Tag.c_str());
+}
+
+} // namespace armnnUtils
+
+#endif // ARMNN_HEAP_PROFILING_ENABLED
diff --git a/src/armnnUtils/HeapProfiling.hpp b/src/armnnUtils/HeapProfiling.hpp
new file mode 100644
index 0000000000..febcbfe2b3
--- /dev/null
+++ b/src/armnnUtils/HeapProfiling.hpp
@@ -0,0 +1,47 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#pragma once
+
+#ifdef ARMNN_HEAP_PROFILING_ENABLED
+
+#include <string>
+#include <cstddef>
+
+// this is conditional so we can change the environment variable
+// at build time
+#ifndef ARMNN_HEAP_PROFILE_DUMP_DIR
+#define ARMNN_HEAP_PROFILE_DUMP_DIR "ARMNN_HEAP_PROFILE_DUMP_DIR"
+#endif // ARMNN_HEAP_PROFILE_DUMP_DIR
+
+namespace armnnUtils
+{
+class ScopedHeapProfiler final
+{
+public:
+ ScopedHeapProfiler(const std::string & tag);
+ ~ScopedHeapProfiler();
+
+private:
+ // Location comes from the ARMNN_HEAP_PROFILE_DUMP_DIR
+ // if not available then it dumps to /tmp
+ std::string m_Location;
+ std::string m_Tag;
+
+ // No default construction and copying
+ ScopedHeapProfiler() = delete;
+ ScopedHeapProfiler(const ScopedHeapProfiler &) = delete;
+ ScopedHeapProfiler & operator=(const ScopedHeapProfiler &) = delete;
+};
+
+} // namespace armnnUtils
+
+#define ARMNN_SCOPED_HEAP_PROFILING(TAG) \
+ armnnUtils::ScopedHeapProfiler __scoped_armnn_heap_profiler__(TAG)
+
+#else // ARMNN_HEAP_PROFILING_ENABLED
+
+#define ARMNN_SCOPED_HEAP_PROFILING(TAG)
+
+#endif // ARMNN_HEAP_PROFILING_ENABLED
diff --git a/src/armnnUtils/LeakChecking.cpp b/src/armnnUtils/LeakChecking.cpp
new file mode 100644
index 0000000000..ac12fe01de
--- /dev/null
+++ b/src/armnnUtils/LeakChecking.cpp
@@ -0,0 +1,62 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#ifdef ARMNN_LEAK_CHECKING_ENABLED
+
+#include "LeakChecking.hpp"
+#include "gperftools/heap-checker.h"
+
+struct ScopedLeakChecker::Impl
+{
+ HeapLeakChecker m_LeakChecker;
+
+ Impl(const std::string & name)
+ : m_LeakChecker(name.c_str())
+ {
+ }
+};
+
+ScopedLeakChecker::ScopedLeakChecker(const std::string & name)
+: m_Impl(new Impl(name))
+{
+}
+
+ScopedLeakChecker::~ScopedLeakChecker() {}
+
+bool ScopedLeakChecker::IsActive()
+{
+ return HeapLeakChecker::IsActive();
+}
+
+bool ScopedLeakChecker::NoLeaks()
+{
+ return (IsActive() ? m_Impl->m_LeakChecker.NoLeaks() : true);
+}
+
+ssize_t ScopedLeakChecker::BytesLeaked() const
+{
+ return (IsActive() ? m_Impl->m_LeakChecker.BytesLeaked(): 0);
+}
+
+ssize_t ScopedLeakChecker::ObjectsLeaked() const
+{
+ return (IsActive() ? m_Impl->m_LeakChecker.ObjectsLeaked(): 0 );
+}
+
+struct ScopedDisableLeakChecking::Impl
+{
+ HeapLeakChecker::Disabler m_Disabler;
+};
+
+ScopedDisableLeakChecking::ScopedDisableLeakChecking()
+: m_Impl(new Impl)
+{
+}
+
+ScopedDisableLeakChecking::~ScopedDisableLeakChecking()
+{
+}
+
+#endif // ARMNN_LEAK_CHECKING_ENABLED
diff --git a/src/armnnUtils/LeakChecking.hpp b/src/armnnUtils/LeakChecking.hpp
new file mode 100644
index 0000000000..b65befe940
--- /dev/null
+++ b/src/armnnUtils/LeakChecking.hpp
@@ -0,0 +1,89 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#pragma once
+
+#ifdef ARMNN_LEAK_CHECKING_ENABLED
+
+#include <string>
+#include <cstddef>
+#include <memory>
+
+namespace armnnUtils
+{
+
+class ScopedLeakChecker final
+{
+public:
+ ScopedLeakChecker(const std::string & name);
+ ~ScopedLeakChecker();
+
+ // forwarding these to Google Performance Tools
+ static bool IsActive();
+ bool NoLeaks();
+ // Note that the following two functions only work after
+ // NoLeaks() has been called. See explanations in
+ // heap-checker.h
+ ssize_t BytesLeaked() const;
+ ssize_t ObjectsLeaked() const;
+
+private:
+ // hide imlementation so we don't litter other's namespaces
+ // with heap checker related stuff
+ struct Impl;
+ std::unique_ptr<Impl> m_Impl;
+
+ // No default construction and copying
+ ScopedLeakChecker() = delete;
+ ScopedLeakChecker(const ScopedLeakChecker &) = delete;
+ ScopedLeakChecker & operator=(const ScopedLeakChecker &) = delete;
+};
+
+class ScopedDisableLeakChecking final
+{
+public:
+ ScopedDisableLeakChecking();
+ ~ScopedDisableLeakChecking();
+
+private:
+ // hide imlementation so we don't litter other's namespaces
+ // with heap checker related stuff
+ struct Impl;
+ std::unique_ptr<Impl> m_Impl;
+
+ // No copying
+ ScopedDisableLeakChecking(const ScopedDisableLeakChecking &) = delete;
+ ScopedDisableLeakChecking & operator=(const ScopedDisableLeakChecking &) = delete;
+};
+
+} // namespace armnnUtils
+
+#define ARMNN_SCOPED_LEAK_CHECKER(TAG) \
+ armnnUtils::ScopedLeakChecker __scoped_armnn_leak_checker__(TAG)
+
+#define ARMNN_LEAK_CHECKER_IS_ACTIVE() \
+ armnnUtils::ScopedLeakChecker::IsActive()
+
+#define ARMNN_NO_LEAKS_IN_SCOPE() \
+ __scoped_armnn_leak_checker__.NoLeaks()
+
+#define ARMNN_BYTES_LEAKED_IN_SCOPE() \
+ __scoped_armnn_leak_checker__.BytesLeaked()
+
+#define ARMNN_OBJECTS_LEAKED_IN_SCOPE() \
+ __scoped_armnn_leak_checker__.ObjectsLeaked()
+
+#define ARMNN_DISABLE_LEAK_CHECKING_IN_SCOPE() \
+ armnnUtils::ScopedDisableLeakChecking __disable_leak_checking_in_scope__
+
+#else // ARMNN_LEAK_CHECKING_ENABLED
+
+#define ARMNN_SCOPED_LEAK_CHECKER(TAG)
+#define ARMNN_LEAK_CHECKER_IS_ACTIVE() false
+#define ARMNN_NO_LEAKS_IN_SCOPE() true
+#define ARMNN_BYTES_LEAKED_IN_SCOPE() 0
+#define ARMNN_OBJECTS_LEAKED_IN_SCOPE() 0
+#define ARMNN_DISABLE_LEAK_CHECKING_IN_SCOPE()
+
+#endif // ARMNN_LEAK_CHECKING_ENABLED
diff --git a/src/armnnUtils/ParserFlatbuffersFixture.hpp b/src/armnnUtils/ParserFlatbuffersFixture.hpp
new file mode 100644
index 0000000000..16f9620ce2
--- /dev/null
+++ b/src/armnnUtils/ParserFlatbuffersFixture.hpp
@@ -0,0 +1,11 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#pragma once
+
+namespace armnnUtils
+{
+
+}
diff --git a/src/armnnUtils/ParserPrototxtFixture.hpp b/src/armnnUtils/ParserPrototxtFixture.hpp
index 0e34477a96..81e3057c80 100644
--- a/src/armnnUtils/ParserPrototxtFixture.hpp
+++ b/src/armnnUtils/ParserPrototxtFixture.hpp
@@ -9,14 +9,26 @@
#include "test/TensorHelpers.hpp"
#include <string>
+
+// TODO davbec01 (14/05/18) : put these into armnnUtils namespace
+
template<typename TParser>
struct ParserPrototxtFixture
{
ParserPrototxtFixture()
: m_Parser(TParser::Create())
- , m_Runtime(armnn::IRuntime::Create(armnn::Compute::CpuRef))
, m_NetworkIdentifier(-1)
- {}
+ {
+ m_Runtimes.push_back(armnn::IRuntime::Create(armnn::Compute::CpuRef));
+
+#if ARMCOMPUTENEON_ENABLED
+ m_Runtimes.push_back(armnn::IRuntime::Create(armnn::Compute::CpuAcc));
+#endif
+
+#if ARMCOMPUTECL_ENABLED
+ m_Runtimes.push_back(armnn::IRuntime::Create(armnn::Compute::GpuAcc));
+#endif
+ }
/// Parses and loads the network defined by the m_Prototext string.
/// @{
@@ -39,10 +51,10 @@ struct ParserPrototxtFixture
void RunTest(const std::map<std::string, std::vector<float>>& inputData,
const std::map<std::string, std::vector<float>>& expectedOutputData);
- std::string m_Prototext;
- std::unique_ptr<TParser, void(*)(TParser* parser)> m_Parser;
- armnn::IRuntimePtr m_Runtime;
- armnn::NetworkId m_NetworkIdentifier;
+ std::string m_Prototext;
+ std::unique_ptr<TParser, void(*)(TParser* parser)> m_Parser;
+ std::vector<armnn::IRuntimePtr> m_Runtimes;
+ armnn::NetworkId m_NetworkIdentifier;
/// If the single-input-single-output overload of Setup() is called, these will store the input and output name
/// so they don't need to be passed to the single-input-single-output overload of RunTest().
@@ -77,14 +89,19 @@ template<typename TParser>
void ParserPrototxtFixture<TParser>::Setup(const std::map<std::string, armnn::TensorShape>& inputShapes,
const std::vector<std::string>& requestedOutputs)
{
- armnn::INetworkPtr network =
- m_Parser->CreateNetworkFromString(m_Prototext.c_str(), inputShapes, requestedOutputs);
-
- auto optimized = Optimize(*network, m_Runtime->GetDeviceSpec());
- armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized));
- if (ret != armnn::Status::Success)
+ for (auto&& runtime : m_Runtimes)
{
- throw armnn::Exception("LoadNetwork failed");
+ armnn::INetworkPtr network =
+ m_Parser->CreateNetworkFromString(m_Prototext.c_str(), inputShapes, requestedOutputs);
+
+ auto optimized = Optimize(*network, runtime->GetDeviceSpec());
+
+ armnn::Status ret = runtime->LoadNetwork(m_NetworkIdentifier, move(optimized));
+
+ if (ret != armnn::Status::Success)
+ {
+ throw armnn::Exception("LoadNetwork failed");
+ }
}
}
@@ -101,34 +118,37 @@ template <std::size_t NumOutputDimensions>
void ParserPrototxtFixture<TParser>::RunTest(const std::map<std::string, std::vector<float>>& inputData,
const std::map<std::string, std::vector<float>>& expectedOutputData)
{
- using BindingPointInfo = std::pair<armnn::LayerBindingId, armnn::TensorInfo>;
-
- // Setup the armnn input tensors from the given vectors.
- armnn::InputTensors inputTensors;
- for (auto&& it : inputData)
- {
- BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(it.first);
- inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
- }
-
- // Allocate storage for the output tensors to be written to and setup the armnn output tensors.
- std::map<std::string, boost::multi_array<float, NumOutputDimensions>> outputStorage;
- armnn::OutputTensors outputTensors;
- for (auto&& it : expectedOutputData)
- {
- BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(it.first);
- outputStorage.emplace(it.first, MakeTensor<float, NumOutputDimensions>(bindingInfo.second));
- outputTensors.push_back(
- { bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) });
- }
-
- m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
-
- // Compare each output tensor to the expected values
- for (auto&& it : expectedOutputData)
+ for (auto&& runtime : m_Runtimes)
{
- BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(it.first);
- auto outputExpected = MakeTensor<float, NumOutputDimensions>(bindingInfo.second, it.second);
- BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first]));
+ using BindingPointInfo = std::pair<armnn::LayerBindingId, armnn::TensorInfo>;
+
+ // Setup the armnn input tensors from the given vectors.
+ armnn::InputTensors inputTensors;
+ for (auto&& it : inputData)
+ {
+ BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(it.first);
+ inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
+ }
+
+ // Allocate storage for the output tensors to be written to and setup the armnn output tensors.
+ std::map<std::string, boost::multi_array<float, NumOutputDimensions>> outputStorage;
+ armnn::OutputTensors outputTensors;
+ for (auto&& it : expectedOutputData)
+ {
+ BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(it.first);
+ outputStorage.emplace(it.first, MakeTensor<float, NumOutputDimensions>(bindingInfo.second));
+ outputTensors.push_back(
+ { bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) });
+ }
+
+ runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
+
+ // Compare each output tensor to the expected values
+ for (auto&& it : expectedOutputData)
+ {
+ BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(it.first);
+ auto outputExpected = MakeTensor<float, NumOutputDimensions>(bindingInfo.second, it.second);
+ BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first]));
+ }
}
}