diff options
author | David Beck <david.beck@arm.com> | 2018-09-19 12:03:20 +0100 |
---|---|---|
committer | Matthew Bentham <matthew.bentham@arm.com> | 2018-10-10 16:16:56 +0100 |
commit | 10b4dfd8e9ccd7a03df7bb053ee1c644cb37f8ab (patch) | |
tree | 1ac5b4f415531e2ef759439ab8e113f177bea7c5 /src/backends/Workload.hpp | |
parent | a3f165624b2cdfbced674af5a6e11856b1e746d9 (diff) | |
download | armnn-10b4dfd8e9ccd7a03df7bb053ee1c644cb37f8ab.tar.gz |
IVGCVSW-1897 : build infrastructure for the src/backends folder
Change-Id: I7ebafb675ccc77ad54d1deb01412a8379a5356bb
Diffstat (limited to 'src/backends/Workload.hpp')
-rw-r--r-- | src/backends/Workload.hpp | 147 |
1 files changed, 147 insertions, 0 deletions
diff --git a/src/backends/Workload.hpp b/src/backends/Workload.hpp new file mode 100644 index 0000000000..cf9c6f21e5 --- /dev/null +++ b/src/backends/Workload.hpp @@ -0,0 +1,147 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "WorkloadData.hpp" +#include "WorkloadInfo.hpp" +#include <algorithm> +#include "Profiling.hpp" + +namespace armnn +{ + +// Workload interface to enqueue a layer computation. +class IWorkload +{ +public: + virtual ~IWorkload() {} + + virtual void Execute() const = 0; +}; + +// NullWorkload used to denote an unsupported workload when used by the MakeWorkload<> template +// in the various workload factories. +// There should never be an instantiation of a NullWorkload. +class NullWorkload : public IWorkload +{ + NullWorkload()=delete; +}; + +template <typename QueueDescriptor> +class BaseWorkload : public IWorkload +{ +public: + + BaseWorkload(const QueueDescriptor& descriptor, const WorkloadInfo& info) + : m_Data(descriptor) + { + m_Data.Validate(info); + } + + const QueueDescriptor& GetData() const { return m_Data; } + +protected: + const QueueDescriptor m_Data; +}; + +// TypedWorkload used +template <typename QueueDescriptor, armnn::DataType... DataTypes> +class TypedWorkload : public BaseWorkload<QueueDescriptor> +{ +public: + + TypedWorkload(const QueueDescriptor& descriptor, const WorkloadInfo& info) + : BaseWorkload<QueueDescriptor>(descriptor, info) + { + std::vector<armnn::DataType> dataTypes = {DataTypes...}; + armnn::DataType expectedInputType; + + if (!info.m_InputTensorInfos.empty()) + { + expectedInputType = info.m_InputTensorInfos.front().GetDataType(); + + if (std::find(dataTypes.begin(), dataTypes.end(), expectedInputType) == dataTypes.end()) + { + BOOST_ASSERT_MSG(false, "Trying to create workload with incorrect type"); + } + BOOST_ASSERT_MSG(std::all_of(std::next(info.m_InputTensorInfos.begin()), + info.m_InputTensorInfos.end(), + [&](auto it){ + return it.GetDataType() == expectedInputType; + }), + "Trying to create workload with incorrect type"); + } + armnn::DataType expectedOutputType; + + if (!info.m_OutputTensorInfos.empty()) + { + expectedOutputType = info.m_OutputTensorInfos.front().GetDataType(); + + if (!info.m_InputTensorInfos.empty()) + { + if (expectedOutputType != expectedInputType) + { + BOOST_ASSERT_MSG(false, "Trying to create workload with incorrect type"); + } + } + else if (std::find(dataTypes.begin(), dataTypes.end(), expectedOutputType) == dataTypes.end()) + { + BOOST_ASSERT_MSG(false, "Trying to create workload with incorrect type"); + } + BOOST_ASSERT_MSG(std::all_of(std::next(info.m_OutputTensorInfos.begin()), + info.m_OutputTensorInfos.end(), + [&](auto it){ + return it.GetDataType() == expectedOutputType; + }), + "Trying to create workload with incorrect type"); + } + } +}; + +template <typename QueueDescriptor, armnn::DataType InputDataType, armnn::DataType OutputDataType> +class MultiTypedWorkload : public BaseWorkload<QueueDescriptor> +{ +public: + + MultiTypedWorkload(const QueueDescriptor& descriptor, const WorkloadInfo& info) + : BaseWorkload<QueueDescriptor>(descriptor, info) + { + BOOST_ASSERT_MSG(std::all_of(info.m_InputTensorInfos.begin(), + info.m_InputTensorInfos.end(), + [&](auto it){ + return it.GetDataType() == InputDataType; + }), + "Trying to create workload with incorrect type"); + BOOST_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(), + info.m_OutputTensorInfos.end(), + [&](auto it){ + return it.GetDataType() == OutputDataType; + }), + "Trying to create workload with incorrect type"); + } +}; + +template <typename QueueDescriptor> +using FloatWorkload = TypedWorkload<QueueDescriptor, + armnn::DataType::Float16, + armnn::DataType::Float32>; + +template <typename QueueDescriptor> +using Float32Workload = TypedWorkload<QueueDescriptor, armnn::DataType::Float32>; + +template <typename QueueDescriptor> +using Uint8Workload = TypedWorkload<QueueDescriptor, armnn::DataType::QuantisedAsymm8>; + +template <typename QueueDescriptor> +using Float16ToFloat32Workload = MultiTypedWorkload<QueueDescriptor, + armnn::DataType::Float16, + armnn::DataType::Float32>; + +template <typename QueueDescriptor> +using Float32ToFloat16Workload = MultiTypedWorkload<QueueDescriptor, + armnn::DataType::Float32, + armnn::DataType::Float16>; + +} //namespace armnn |