aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/Layer.cpp2
-rw-r--r--src/armnn/LayerSupport.cpp239
-rw-r--r--src/armnn/Profiling.cpp8
-rw-r--r--src/armnn/Profiling.hpp18
-rw-r--r--src/armnn/ProfilingEvent.cpp12
-rw-r--r--src/armnn/ProfilingEvent.hpp18
-rw-r--r--src/armnn/test/ProfilingEventTest.cpp20
7 files changed, 169 insertions, 148 deletions
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index cf825e39fd..85e1de0b09 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -132,7 +132,7 @@ Layer::Layer(unsigned int numInputSlots,
: m_OutputHandlers(numOutputSlots)
, m_LayerName(name ? name : "")
, m_Type(type)
-, m_BackendId(UninitializedBackendId())
+, m_BackendId()
, m_Guid(GenerateLayerGuid())
{
m_InputSlots.reserve(numInputSlots);
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index cf6ce27dda..b0b3eccb02 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -16,11 +16,9 @@
#include <unordered_map>
#include <armnn/ArmNN.hpp>
-namespace armnn
-{
-
namespace
{
+
/// Helper function to copy a full string to a truncated version.
void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxLength)
{
@@ -33,10 +31,13 @@ void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxL
}
}
-}
+} // anonymous namespace
+
+namespace armnn
+{
// Helper macro to avoid code duplication.
-// Forwards function func to funcRef, funcNeon or funcCl, depending on the value of compute.
+// Forwards function func to funcRef, funcNeon or funcCl, depending on the value of backendId.
#define FORWARD_LAYER_SUPPORT_FUNC(backendId, func, ...) \
std::string reasonIfUnsupportedFull; \
bool isSupported; \
@@ -177,6 +178,18 @@ bool IsDebugSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsDebugSupported, input, output, descriptor);
}
+bool IsDepthwiseConvolutionSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const DepthwiseConvolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsDepthwiseConvolutionSupported, input, output, descriptor, weights, biases);
+}
+
bool IsDivisionSupported(const BackendId& backend,
const TensorInfo& input0,
const TensorInfo& input1,
@@ -187,36 +200,39 @@ bool IsDivisionSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsDivisionSupported, input0, input1, output);
}
-bool IsSubtractionSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
+bool IsEqualSupported(const BackendId& backend,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsSubtractionSupported, input0, input1, output);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsEqualSupported, input0, input1, output);
}
-bool IsDepthwiseConvolutionSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const DepthwiseConvolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
+bool IsFakeQuantizationSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const FakeQuantizationDescriptor& descriptor,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsDepthwiseConvolutionSupported, input, output, descriptor, weights, biases);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsFakeQuantizationSupported, input, descriptor);
}
-bool IsInputSupported(const BackendId& backend,
+bool IsFloorSupported(const BackendId& backend,
const TensorInfo& input,
+ const TensorInfo& output,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsInputSupported, input);
-}
+ // By definition (that is, regardless of compute device), shapes and data type must match.
+ if (input.GetShape() != output.GetShape() || input.GetDataType() != output.GetDataType())
+ {
+ return false;
+ }
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsFloorSupported, input, output);
+}
bool IsFullyConnectedSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
@@ -229,6 +245,25 @@ bool IsFullyConnectedSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
}
+bool IsGreaterSupported(const BackendId& backend,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsGreaterSupported, input0, input1, output);
+}
+
+bool IsInputSupported(const BackendId& backend,
+ const TensorInfo& input,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsInputSupported, input);
+}
+
+
bool IsL2NormalizationSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
@@ -276,6 +311,25 @@ bool IsMaximumSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsMaximumSupported, input0, input1, output);
}
+bool IsMeanSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsMeanSupported, input, output, descriptor);
+}
+
+bool IsMemCopySupported(const BackendId &backend,
+ const TensorInfo &input,
+ const TensorInfo &output,
+ char *reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsMemCopySupported, input, output);
+}
+
bool IsMergerSupported(const BackendId& backend,
std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
@@ -287,6 +341,16 @@ bool IsMergerSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergerSupported, inputs, output, descriptor);
}
+bool IsMinimumSupported(const BackendId& backend,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsMinimumSupported, input0, input1, output);
+}
+
bool IsMultiplicationSupported(const BackendId& backend,
const TensorInfo& input0,
const TensorInfo& input1,
@@ -315,6 +379,17 @@ bool IsOutputSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsOutputSupported, output);
}
+bool IsPadSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsPadSupported, input, output, descriptor);
+}
+
bool IsPermuteSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
@@ -335,6 +410,15 @@ bool IsPooling2dSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsPooling2dSupported, input, output, descriptor);
}
+bool IsReshapeSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const ReshapeDescriptor& descriptor,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input, descriptor);
+}
+
bool IsResizeBilinearSupported(const BackendId& backend,
const TensorInfo& input,
char* reasonIfUnsupported,
@@ -343,6 +427,15 @@ bool IsResizeBilinearSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeBilinearSupported, input);
}
+bool IsRsqrtSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsRsqrtSupported, input, output);
+}
+
bool IsSoftmaxSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
@@ -372,68 +465,6 @@ bool IsSplitterSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, descriptor);
}
-bool IsFakeQuantizationSupported(const BackendId& backend,
- const TensorInfo& input,
- const FakeQuantizationDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsFakeQuantizationSupported, input, descriptor);
-}
-
-bool IsReshapeSupported(const BackendId& backend,
- const TensorInfo& input,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input);
-}
-
-bool IsRsqrtSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsRsqrtSupported, input, output);
-}
-
-bool IsFloorSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- // By definition (that is, regardless of compute device), shapes and data type must match.
- if (input.GetShape() != output.GetShape() || input.GetDataType() != output.GetDataType())
- {
- return false;
- }
-
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsFloorSupported, input, output);
-}
-
-bool IsMeanSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const MeanDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsMeanSupported, input, output, descriptor);
-}
-
-bool IsPadSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const PadDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
-
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsPadSupported, input, output, descriptor);
-}
-
bool IsStridedSliceSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
@@ -444,34 +475,14 @@ bool IsStridedSliceSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsStridedSliceSupported, input, output, descriptor);
}
-bool IsMinimumSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsMinimumSupported, input0, input1, output);
-}
-
-bool IsGreaterSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsGreaterSupported, input0, input1, output);
-}
-
-bool IsEqualSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
+bool IsSubtractionSupported(const BackendId& backend,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsEqualSupported, input0, input1, output);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsSubtractionSupported, input0, input1, output);
}
-}
+} // namespace armnn
diff --git a/src/armnn/Profiling.cpp b/src/armnn/Profiling.cpp
index 4caa31905c..c153eb67fa 100644
--- a/src/armnn/Profiling.cpp
+++ b/src/armnn/Profiling.cpp
@@ -132,7 +132,7 @@ void Profiler::AnalyzeEventSequenceAndWriteResults(ItertType first, ItertType la
<< std::setw(20) << durationMs
<< std::setw(20) << startTimeMs
<< std::setw(20) << stopTimeMs
- << std::setw(20) << GetComputeDeviceAsCString(eventPtr->GetComputeDevice())
+ << std::setw(20) << eventPtr->GetBackendId().Get()
<< std::endl;
}
outStream << std::endl;
@@ -194,10 +194,12 @@ void Profiler::EnableProfiling(bool enableProfiling)
m_ProfilingEnabled = enableProfiling;
}
-Event* Profiler::BeginEvent(Compute compute, const std::string& label, std::vector<InstrumentPtr>&& instruments)
+Event* Profiler::BeginEvent(const BackendId& backendId,
+ const std::string& label,
+ std::vector<InstrumentPtr>&& instruments)
{
Event* parent = m_Parents.empty() ? nullptr : m_Parents.top();
- m_EventSequence.push_back(std::make_unique<Event>(label, this, parent, compute, std::move(instruments)));
+ m_EventSequence.push_back(std::make_unique<Event>(label, this, parent, backendId, std::move(instruments)));
Event* event = m_EventSequence.back().get();
event->Start();
diff --git a/src/armnn/Profiling.hpp b/src/armnn/Profiling.hpp
index ef6bfd5ffb..0fb60d346a 100644
--- a/src/armnn/Profiling.hpp
+++ b/src/armnn/Profiling.hpp
@@ -35,7 +35,7 @@ public:
// Marks the beginning of a user-defined event.
// No attempt will be made to copy the name string: it must be known at compile time.
- Event* BeginEvent(Compute compute, const std::string& name, std::vector<InstrumentPtr>&& instruments);
+ Event* BeginEvent(const BackendId& backendId, const std::string& name, std::vector<InstrumentPtr>&& instruments);
// Marks the end of a user-defined event.
void EndEvent(Event* event);
@@ -117,7 +117,7 @@ public:
using InstrumentPtr = std::unique_ptr<Instrument>;
template<typename... Args>
- ScopedProfilingEvent(Compute compute, const std::string& name, Args... args)
+ ScopedProfilingEvent(const BackendId& backendId, const std::string& name, Args... args)
: m_Event(nullptr)
, m_Profiler(ProfilerManager::GetInstance().GetProfiler())
{
@@ -126,7 +126,7 @@ public:
std::vector<InstrumentPtr> instruments(0);
instruments.reserve(sizeof...(args)); //One allocation
ConstructNextInVector(instruments, args...);
- m_Event = m_Profiler->BeginEvent(compute, name, std::move(instruments));
+ m_Event = m_Profiler->BeginEvent(backendId, name, std::move(instruments));
}
}
@@ -152,15 +152,15 @@ private:
ConstructNextInVector(instruments, args...);
}
- Event* m_Event; ///< Event to track
- Profiler* m_Profiler; ///< Profiler used
+ Event* m_Event; ///< Event to track
+ Profiler* m_Profiler; ///< Profiler used
};
} // namespace armnn
// The event name must be known at compile time
-#define ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(compute, /*name,*/ ...) \
- armnn::ScopedProfilingEvent e_##__FILE__##__LINE__(compute, /*name,*/ __VA_ARGS__);
+#define ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(backendId, /*name,*/ ...) \
+ armnn::ScopedProfilingEvent e_##__FILE__##__LINE__(backendId, /*name,*/ __VA_ARGS__);
-#define ARMNN_SCOPED_PROFILING_EVENT(compute, name) \
- ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(compute, name, armnn::WallClockTimer())
+#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name) \
+ ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(backendId, name, armnn::WallClockTimer())
diff --git a/src/armnn/ProfilingEvent.cpp b/src/armnn/ProfilingEvent.cpp
index 55e931a163..60fb2f79c6 100644
--- a/src/armnn/ProfilingEvent.cpp
+++ b/src/armnn/ProfilingEvent.cpp
@@ -11,12 +11,12 @@ namespace armnn
Event::Event(const std::string& eventName,
Profiler* profiler,
Event* parent,
- const Compute computeDevice,
+ const BackendId backendId,
std::vector<InstrumentPtr>&& instruments)
: m_EventName(eventName)
, m_Profiler(profiler)
, m_Parent(parent)
- , m_ComputeDevice(computeDevice)
+ , m_BackendId(backendId)
, m_Instruments(std::move(instruments))
{
}
@@ -25,7 +25,7 @@ Event::Event(Event&& other) noexcept
: m_EventName(std::move(other.m_EventName))
, m_Profiler(other.m_Profiler)
, m_Parent(other.m_Parent)
- , m_ComputeDevice(other.m_ComputeDevice)
+ , m_BackendId(other.m_BackendId)
, m_Instruments(std::move(other.m_Instruments))
{
@@ -79,9 +79,9 @@ const Event* Event::GetParentEvent() const
return m_Parent;
}
-Compute Event::GetComputeDevice() const
+BackendId Event::GetBackendId() const
{
- return m_ComputeDevice;
+ return m_BackendId;
}
Event& Event::operator=(Event&& other) noexcept
@@ -94,7 +94,7 @@ Event& Event::operator=(Event&& other) noexcept
m_EventName = other.m_EventName;
m_Profiler = other.m_Profiler;
m_Parent = other.m_Parent;
- m_ComputeDevice = other.m_ComputeDevice;
+ m_BackendId = other.m_BackendId;
other.m_Profiler = nullptr;
other.m_Parent = nullptr;
return *this;
diff --git a/src/armnn/ProfilingEvent.hpp b/src/armnn/ProfilingEvent.hpp
index 134735530f..9f57753585 100644
--- a/src/armnn/ProfilingEvent.hpp
+++ b/src/armnn/ProfilingEvent.hpp
@@ -27,10 +27,10 @@ public:
using Instruments = std::vector<InstrumentPtr>;
Event(const std::string& eventName,
- Profiler* profiler,
- Event* parent,
- const Compute computeDevice,
- std::vector<InstrumentPtr>&& instrument);
+ Profiler* profiler,
+ Event* parent,
+ const BackendId backendId,
+ std::vector<InstrumentPtr>&& instrument);
Event(const Event& other) = delete;
@@ -62,9 +62,9 @@ public:
/// \return Pointer of the parent event
const Event* GetParentEvent() const;
- /// Get the compute device of the event
- /// \return Compute device of the event
- Compute GetComputeDevice() const;
+ /// Get the backend id of the event
+ /// \return Backend id of the event
+ BackendId GetBackendId() const;
/// Assignment operator
Event& operator=(const Event& other) = delete;
@@ -82,8 +82,8 @@ private:
/// Stores optional parent event
Event* m_Parent;
- /// Compute device
- Compute m_ComputeDevice;
+ /// Backend id
+ BackendId m_BackendId;
/// Instruments to use
Instruments m_Instruments;
diff --git a/src/armnn/test/ProfilingEventTest.cpp b/src/armnn/test/ProfilingEventTest.cpp
index 9e31ccb323..0add8365e9 100644
--- a/src/armnn/test/ProfilingEventTest.cpp
+++ b/src/armnn/test/ProfilingEventTest.cpp
@@ -2,10 +2,12 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include <boost/test/unit_test.hpp>
#include "ProfilingEvent.hpp"
#include "Profiling.hpp"
+
#include <thread>
using namespace armnn;
@@ -24,7 +26,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEventTest)
Event testEvent(eventName,
nullptr,
nullptr,
- armnn::Compute::Undefined,
+ BackendId(),
std::move(insts1));
BOOST_CHECK_EQUAL(testEvent.GetName(), "EventName");
@@ -41,17 +43,18 @@ BOOST_AUTO_TEST_CASE(ProfilingEventTest)
BOOST_CHECK_GE(testEvent.GetMeasurements().front().m_Value, 10.0);
// create a sub event with CpuAcc
+ BackendId cpuAccBackendId(Compute::CpuAcc);
Event::Instruments insts2;
insts2.emplace_back(std::make_unique<WallClockTimer>());
Event testEvent2(eventName,
profileManager.GetProfiler(),
&testEvent,
- Compute::CpuAcc,
+ cpuAccBackendId,
std::move(insts2));
BOOST_CHECK_EQUAL(&testEvent, testEvent2.GetParentEvent());
BOOST_CHECK_EQUAL(profileManager.GetProfiler(), testEvent2.GetProfiler());
- BOOST_CHECK_EQUAL(Compute::CpuAcc, testEvent2.GetComputeDevice());
+ BOOST_CHECK(cpuAccBackendId == testEvent2.GetBackendId());
}
BOOST_AUTO_TEST_CASE(ProfilingEventTestOnGpuAcc)
@@ -66,7 +69,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEventTestOnGpuAcc)
Event testEvent(eventName,
nullptr,
nullptr,
- armnn::Compute::Undefined,
+ BackendId(),
std::move(insts1));
BOOST_CHECK_EQUAL(testEvent.GetName(), "GPUEvent");
@@ -83,13 +86,18 @@ BOOST_AUTO_TEST_CASE(ProfilingEventTestOnGpuAcc)
BOOST_CHECK_GE(testEvent.GetMeasurements().front().m_Value, 10.0);
// create a sub event
+ BackendId gpuAccBackendId(Compute::GpuAcc);
Event::Instruments insts2;
insts2.emplace_back(std::make_unique<WallClockTimer>());
- Event testEvent2(eventName, profileManager.GetProfiler(), &testEvent, Compute::GpuAcc, std::move(insts2));
+ Event testEvent2(eventName,
+ profileManager.GetProfiler(),
+ &testEvent,
+ gpuAccBackendId,
+ std::move(insts2));
BOOST_CHECK_EQUAL(&testEvent, testEvent2.GetParentEvent());
BOOST_CHECK_EQUAL(profileManager.GetProfiler(), testEvent2.GetProfiler());
- BOOST_CHECK_EQUAL(Compute::GpuAcc, testEvent2.GetComputeDevice());
+ BOOST_CHECK(gpuAccBackendId == testEvent2.GetBackendId());
}
BOOST_AUTO_TEST_SUITE_END()