aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2021-02-23 10:01:33 +0000
committerGeorgios Pinitas <georgios.pinitas@arm.com>2021-04-06 12:48:34 +0000
commit3f26ef4f9a2d447adb324dd69aec7c49cf7905fc (patch)
tree7f0e38f2f1675cfa97644f3309a20e296b6cddfd
parent7a452fe8630b3ce0a58f63869178d06aaba325fc (diff)
downloadComputeLibrary-3f26ef4f9a2d447adb324dd69aec7c49cf7905fc.tar.gz
Add tensor related data structures for the new API
Adds the following: - TensorDescriptor: which is responsible for holding the information needed to represent a tensor (e.g. shape, dimensions, etc) - Tensor: an aggreate object of a descriptor and a backing memory - TensorPack: A map of tensor that can be passed to operators as inputs/outputs Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Change-Id: I02734ac6ad85700d91d6e73217b4637adbf5d177 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5260 Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--Android.bp6
-rw-r--r--SConscript1
-rw-r--r--arm_compute/Acl.hpp252
-rw-r--r--arm_compute/AclEntrypoints.h127
-rw-r--r--arm_compute/AclOpenClExt.h9
-rw-r--r--arm_compute/AclTypes.h61
-rw-r--r--arm_compute/core/utils/logging/Macros.h15
-rw-r--r--src/c/AclContext.cpp11
-rw-r--r--src/c/AclTensor.cpp148
-rw-r--r--src/c/AclTensorPack.cpp105
-rw-r--r--src/c/cl/AclOpenClExt.cpp29
-rw-r--r--src/common/IContext.h11
-rw-r--r--src/common/ITensor.h128
-rw-r--r--src/common/TensorPack.cpp74
-rw-r--r--src/common/TensorPack.h130
-rw-r--r--src/common/Types.h5
-rw-r--r--src/common/utils/LegacySupport.cpp66
-rw-r--r--src/common/utils/LegacySupport.h44
-rw-r--r--src/common/utils/Log.h11
-rw-r--r--src/common/utils/Utils.h16
-rw-r--r--src/cpu/CpuContext.cpp11
-rw-r--r--src/cpu/CpuContext.h3
-rw-r--r--src/cpu/CpuTensor.cpp80
-rw-r--r--src/cpu/CpuTensor.h63
-rw-r--r--src/gpu/cl/ClContext.cpp19
-rw-r--r--src/gpu/cl/ClContext.h9
-rw-r--r--src/gpu/cl/ClTensor.cpp92
-rw-r--r--src/gpu/cl/ClTensor.h66
-rw-r--r--tests/validation/cpu/unit/Context.cpp94
-rw-r--r--tests/validation/cpu/unit/Tensor.cpp69
-rw-r--r--tests/validation/cpu/unit/TensorPack.cpp57
-rw-r--r--tests/validation/fixtures/UNIT/Context.h148
-rw-r--r--tests/validation/fixtures/UNIT/Tensor.h298
-rw-r--r--tests/validation/fixtures/UNIT/TensorPack.h184
-rw-r--r--tests/validation/gpu/unit/Context.cpp70
-rw-r--r--tests/validation/gpu/unit/Tensor.cpp63
-rw-r--r--tests/validation/gpu/unit/TensorPack.cpp57
37 files changed, 2449 insertions, 183 deletions
diff --git a/Android.bp b/Android.bp
index 219836f812..dd8509ebcd 100644
--- a/Android.bp
+++ b/Android.bp
@@ -52,9 +52,13 @@ cc_library_static {
export_include_dirs: [".", "./include"],
srcs: [
"src/c/AclContext.cpp",
+ "src/c/AclTensor.cpp",
+ "src/c/AclTensorPack.cpp",
"src/c/AclVersion.cpp",
"src/c/cl/AclOpenClExt.cpp",
"src/common/AllocatorWrapper.cpp",
+ "src/common/TensorPack.cpp",
+ "src/common/utils/LegacySupport.cpp",
"src/core/AccessWindowAutoPadding.cpp",
"src/core/AccessWindowStatic.cpp",
"src/core/AccessWindowTranspose.cpp",
@@ -386,7 +390,9 @@ cc_library_static {
"src/core/utils/misc/MMappedFile.cpp",
"src/core/utils/quantization/AsymmHelpers.cpp",
"src/cpu/CpuContext.cpp",
+ "src/cpu/CpuTensor.cpp",
"src/gpu/cl/ClContext.cpp",
+ "src/gpu/cl/ClTensor.cpp",
"src/runtime/Allocator.cpp",
"src/runtime/BlobLifetimeManager.cpp",
"src/runtime/BlobMemoryPool.cpp",
diff --git a/SConscript b/SConscript
index 9894e30481..936090581c 100644
--- a/SConscript
+++ b/SConscript
@@ -190,6 +190,7 @@ runtime_files += Glob('src/runtime/CPP/functions/*.cpp')
runtime_files += Glob('src/c/*.cpp')
runtime_files += Glob('src/common/*.cpp')
+runtime_files += Glob('src/common/utils/*.cpp')
runtime_files += Glob('src/cpu/*.cpp')
# CLHarrisCorners uses the Scheduler to run CPP kernels
diff --git a/arm_compute/Acl.hpp b/arm_compute/Acl.hpp
index b74e65430c..a009894438 100644
--- a/arm_compute/Acl.hpp
+++ b/arm_compute/Acl.hpp
@@ -29,6 +29,7 @@
#include <cstdlib>
#include <memory>
#include <string>
+#include <vector>
#if defined(ARM_COMPUTE_EXCEPTIONS_ENABLED)
#include <exception>
@@ -41,6 +42,8 @@ namespace acl
{
// Forward declarations
class Context;
+class Tensor;
+class TensorPack;
/**< Status code enum */
enum class StatusCode
@@ -80,6 +83,8 @@ struct ObjectDeleter
};
OBJECT_DELETER(AclContext, AclDestroyContext)
+OBJECT_DELETER(AclTensor, AclDestroyTensor)
+OBJECT_DELETER(AclTensorPack, AclDestroyTensorPack)
#undef OBJECT_DELETER
@@ -256,13 +261,12 @@ private:
*
* @return Status code
*/
-static inline StatusCode report_status(StatusCode status, const std::string &msg)
+static inline void report_status(StatusCode status, const std::string &msg)
{
if(status != StatusCode::Success)
{
throw Status(status, msg);
}
- return status;
}
#else /* defined(ARM_COMPUTE_EXCEPTIONS_ENABLED) */
/** Reports a status code
@@ -275,10 +279,10 @@ static inline StatusCode report_status(StatusCode status, const std::string &msg
*
* @return Status code
*/
-static inline StatusCode report_status(StatusCode status, const std::string &msg)
+static inline void report_status(StatusCode status, const std::string &msg)
{
+ ARM_COMPUTE_IGNORE_UNUSED(status);
ARM_COMPUTE_IGNORE_UNUSED(msg);
- return status;
}
#endif /* defined(ARM_COMPUTE_EXCEPTIONS_ENABLED) */
@@ -313,12 +317,22 @@ public:
/**< Context options */
struct Options
{
+ static constexpr int32_t num_threads_auto = -1; /**< Allow runtime to specify number of threads */
+
/** Default Constructor
*
* @note By default no precision loss is enabled for operators
* @note By default the preferred execution mode is to favor multiple consecutive reruns of an operator
*/
- Options() = default;
+ Options()
+ : Options(ExecutionMode::FastRerun /* mode */,
+ AclCpuCapabilitiesAuto /* caps */,
+ false /* enable_fast_math */,
+ nullptr /* kernel_config */,
+ num_threads_auto /* max_compute_units */,
+ nullptr /* allocator */)
+ {
+ }
/** Constructor
*
* @param[in] mode Execution mode to be used
@@ -335,14 +349,15 @@ public:
int32_t max_compute_units,
AclAllocator *allocator)
{
- opts.mode = detail::as_cenum<AclExecutionMode>(mode);
- opts.capabilities = caps;
- opts.enable_fast_math = enable_fast_math;
- opts.kernel_config_file = kernel_config;
- opts.max_compute_units = max_compute_units;
- opts.allocator = allocator;
+ copts.mode = detail::as_cenum<AclExecutionMode>(mode);
+ copts.capabilities = caps;
+ copts.enable_fast_math = enable_fast_math;
+ copts.kernel_config_file = kernel_config;
+ copts.max_compute_units = max_compute_units;
+ copts.allocator = allocator;
}
- AclContextOptions opts{ acl_default_ctx_options };
+
+ AclContextOptions copts{};
};
public:
@@ -367,14 +382,223 @@ public:
Context(Target target, const Options &options, StatusCode *status = nullptr)
{
AclContext ctx;
- const auto st = detail::as_enum<StatusCode>(AclCreateContext(&ctx, detail::as_cenum<AclTarget>(target), &options.opts));
+ const auto st = detail::as_enum<StatusCode>(AclCreateContext(&ctx, detail::as_cenum<AclTarget>(target), &options.copts));
reset(ctx);
- report_status(st, "Failure during context creation");
+ report_status(st, "[Arm Compute Library] Failed to create context");
+ if(status)
+ {
+ *status = st;
+ }
+ }
+};
+
+/**< Data type enumeration */
+enum class DataType
+{
+ Unknown = AclDataTypeUnknown,
+ UInt8 = AclUInt8,
+ Int8 = AclInt8,
+ UInt16 = AclUInt16,
+ Int16 = AclInt16,
+ UInt32 = AclUint32,
+ Int32 = AclInt32,
+ Float16 = AclFloat16,
+ BFloat16 = AclBFloat16,
+ Float32 = AclFloat32,
+};
+
+/** Tensor Descriptor class
+ *
+ * Structure that contains all the required meta-data to represent a tensor
+ */
+class TensorDescriptor
+{
+public:
+ /** Constructor
+ *
+ * @param[in] shape Shape of the tensor
+ * @param[in] data_type Data type of the tensor
+ */
+ TensorDescriptor(const std::vector<int32_t> &shape, DataType data_type)
+ : _shape(shape), _data_type(data_type)
+ {
+ _cdesc.ndims = _shape.size();
+ _cdesc.shape = _shape.data();
+ _cdesc.data_type = detail::as_cenum<AclDataType>(_data_type);
+ _cdesc.strides = nullptr;
+ _cdesc.boffset = 0;
+ }
+ /** Get underlying C tensor descriptor
+ *
+ * @return Underlying structure
+ */
+ const AclTensorDescriptor *get() const
+ {
+ return &_cdesc;
+ }
+
+private:
+ std::vector<int32_t> _shape{};
+ DataType _data_type{};
+ AclTensorDescriptor _cdesc{};
+};
+
+/** Import memory types */
+enum class ImportType
+{
+ Host = AclImportMemoryType::AclHostPtr
+};
+
+/** Tensor class
+ *
+ * Tensor is an mathematical construct that can represent an N-Dimensional space.
+ *
+ * @note Maximum dimensionality support is 6 internally at the moment
+ */
+class Tensor : public detail::ObjectBase<AclTensor_>
+{
+public:
+ /** Constructor
+ *
+ * @note Tensor memory is allocated
+ *
+ * @param[in] ctx Context from where the tensor will be created from
+ * @param[in] desc Tensor descriptor to be used
+ * @param[out] status Status information if requested
+ */
+ Tensor(Context &ctx, const TensorDescriptor &desc, StatusCode *status = nullptr)
+ : Tensor(ctx, desc, true, status)
+ {
+ }
+ /** Constructor
+ *
+ * @param[in] ctx Context from where the tensor will be created from
+ * @param[in] desc Tensor descriptor to be used
+ * @param[in] allocate Flag to indicate if the tensor needs to be allocated
+ * @param[out] status Status information if requested
+ */
+ Tensor(Context &ctx, const TensorDescriptor &desc, bool allocate, StatusCode *status)
+ {
+ AclTensor tensor;
+ const auto st = detail::as_enum<StatusCode>(AclCreateTensor(&tensor, ctx.get(), desc.get(), allocate));
+ reset(tensor);
+ report_status(st, "[Arm Compute Library] Failed to create tensor!");
if(status)
{
*status = st;
}
}
+ /** Maps the backing memory of a given tensor that can be used by the host to access any contents
+ *
+ * @return A valid non-zero pointer in case of success else nullptr
+ */
+ void *map()
+ {
+ void *handle = nullptr;
+ const auto st = detail::as_enum<StatusCode>(AclMapTensor(_object.get(), &handle));
+ report_status(st, "[Arm Compute Library] Failed to map the tensor and extract the tensor's backing memory!");
+ return handle;
+ }
+ /** Unmaps tensor's memory
+ *
+ * @param[in] handle Handle to unmap
+ *
+ * @return Status code
+ */
+ StatusCode unmap(void *handle)
+ {
+ const auto st = detail::as_enum<StatusCode>(AclUnmapTensor(_object.get(), handle));
+ report_status(st, "[Arm Compute Library] Failed to unmap the tensor!");
+ return st;
+ }
+ /** Import external memory to a given tensor object
+ *
+ * @param[in] handle External memory handle
+ * @param[in] type Type of memory to be imported
+ *
+ * @return Status code
+ */
+ StatusCode import(void *handle, ImportType type)
+ {
+ const auto st = detail::as_enum<StatusCode>(AclTensorImport(_object.get(), handle, detail::as_cenum<AclImportMemoryType>(type)));
+ report_status(st, "[Arm Compute Library] Failed to import external memory to tensor!");
+ return st;
+ }
+};
+
+/** Tensor pack class
+ *
+ * Pack is a utility construct that is used to create a collection of tensors that can then
+ * be passed into operator as inputs.
+ */
+class TensorPack : public detail::ObjectBase<AclTensorPack_>
+{
+public:
+ /** Pack pair construct */
+ struct PackPair
+ {
+ /** Constructor
+ *
+ * @param[in] tensor_ Tensor to pack
+ * @param[in] slot_id_ Slot identification of the tensor in respect with the operator
+ */
+ PackPair(Tensor *tensor_, int32_t slot_id_)
+ : tensor(tensor_), slot_id(slot_id_)
+ {
+ }
+
+ Tensor *tensor{ nullptr }; /**< Tensor object */
+ int32_t slot_id{ AclSlotUnknown }; /**< Slot id in respect with the operator */
+ };
+
+public:
+ /** Constructor
+ *
+ * @param[in] ctx Context from where the tensor pack will be created from
+ * @param[out] status Status information if requested
+ */
+ explicit TensorPack(Context &ctx, StatusCode *status = nullptr)
+ {
+ AclTensorPack pack;
+ const auto st = detail::as_enum<StatusCode>(AclCreateTensorPack(&pack, ctx.get()));
+ reset(pack);
+ report_status(st, "[Arm Compute Library] Failure during tensor pack creation");
+ if(status)
+ {
+ *status = st;
+ }
+ }
+ /** Add tensor to tensor pack
+ *
+ * @param[in] slot_id Slot id of the tensor in respect with the operator
+ * @param[in] tensor Tensor to be added in the pack
+ *
+ * @return Status code
+ */
+ StatusCode add(Tensor &tensor, int32_t slot_id)
+ {
+ return detail::as_enum<StatusCode>(AclPackTensor(_object.get(), tensor.get(), slot_id));
+ }
+ /** Add a list of tensors to a tensor pack
+ *
+ * @param[in] packed Pair packs to be added
+ *
+ * @return Status code
+ */
+ StatusCode add(std::initializer_list<PackPair> packed)
+ {
+ const size_t size = packed.size();
+ std::vector<int32_t> slots(size);
+ std::vector<AclTensor> tensors(size);
+ int i = 0;
+ for(auto &p : packed)
+ {
+ slots[i] = p.slot_id;
+ tensors[i] = AclTensor(p.tensor);
+ ++i;
+ }
+ return detail::as_enum<StatusCode>(AclPackTensors(_object.get(), tensors.data(), slots.data(), size));
+ }
};
} // namespace acl
#undef ARM_COMPUTE_IGNORE_UNUSED
diff --git a/arm_compute/AclEntrypoints.h b/arm_compute/AclEntrypoints.h
index 02e072f826..cd974341c2 100644
--- a/arm_compute/AclEntrypoints.h
+++ b/arm_compute/AclEntrypoints.h
@@ -62,6 +62,133 @@ AclStatus AclCreateContext(AclContext *ctx,
*/
AclStatus AclDestroyContext(AclContext ctx);
+/** Create a Tensor object
+ *
+ * Tensor is a generalized matrix construct that can represent up to ND dimensionality (where N = 6 for Compute Library)
+ * The object holds a backing memory along-side to operate on
+ *
+ * @param[in, out] tensor A valid non-zero tensor object if no failures occur
+ * @param[in] ctx Context to be used
+ * @param[in] desc Tensor representation meta-data
+ * @param[in] allocate Instructs allocation of the tensor objects
+ *
+ * Returns:
+ * - @ref AclSuccess if function was completed successfully
+ * - @ref AclOutOfMemory if there was a failure allocating memory resources
+ * - @ref AclUnsupportedTarget if the requested target is unsupported
+ * - @ref AclInvalidArgument if a given argument is invalid
+ */
+AclStatus AclCreateTensor(AclTensor *tensor, AclContext ctx, const AclTensorDescriptor *desc, bool allocate);
+
+/** Map a tensor's backing memory to the host
+ *
+ * @param[in] tensor Tensor to be mapped
+ * @param[in, out] handle A handle to the underlying backing memory
+ *
+ * @return Status code
+ *
+ * Returns:
+ * - @ref AclSuccess if function was completed successfully
+ * - @ref AclInvalidArgument if a given argument is invalid
+ */
+AclStatus AclMapTensor(AclTensor tensor, void **handle);
+
+/** Unmap the tensor's backing memory
+ *
+ * @param[in] tensor tensor to unmap memory from
+ * @param[in] handle Backing memory to be unmapped
+ *
+ * @return Status code
+ *
+ * Returns:
+ * - @ref AclSuccess if function was completed successfully
+ * - @ref AclInvalidArgument if a given argument is invalid
+ */
+AclStatus AclUnmapTensor(AclTensor tensor, void *handle);
+
+/** Import external memory to a given tensor object
+ *
+ * @param[in, out] tensor Tensor to import memory to
+ * @param[in] handle Backing memory to be imported
+ * @param[in] type Type of the imported memory
+ *
+ * Returns:
+ * - @ref AclSuccess if function was completed successfully
+ * - @ref AclInvalidArgument if a given argument is invalid
+ */
+AclStatus AclTensorImport(AclTensor tensor, void *handle, AclImportMemoryType type);
+
+/** Destroy a given tensor object
+ *
+ * @param[in,out] tensor A valid tensor object to be destroyed
+ *
+ * @return Status code
+ *
+ * Returns:
+ * - @ref AclSuccess if function was completed successfully
+ * - @ref AclInvalidArgument if the provided tensor is invalid
+ */
+AclStatus AclDestroyTensor(AclTensor tensor);
+
+/** Creates a tensor pack
+ *
+ * Tensor packs are used to create a collection of tensors that can be passed around for operator execution
+ *
+ * @param[in,out] pack A valid non-zero tensor pack object if no failures occur
+ * @param[in] ctx Context to be used
+ *
+ * @return Status code
+ *
+ * Returns:
+ * - @ref AclSuccess if function was completed successfully
+ * - @ref AclOutOfMemory if there was a failure allocating memory resources
+ * - @ref AclInvalidArgument if a given argument is invalid
+ */
+AclStatus AclCreateTensorPack(AclTensorPack *pack, AclContext ctx);
+
+/** Add a tensor to a tensor pack
+ *
+ * @param[in,out] pack Pack to append a tensor to
+ * @param[in] tensor Tensor to pack
+ * @param[in] slot_id Slot of the operator that the tensors corresponds to
+ *
+ * @return Status code
+ *
+ * Returns:
+ * - @ref AclSuccess if function was completed successfully
+ * - @ref AclOutOfMemory if there was a failure allocating memory resources
+ * - @ref AclInvalidArgument if a given argument is invalid
+ */
+AclStatus AclPackTensor(AclTensorPack pack, AclTensor tensor, int32_t slot_id);
+
+/** A list of tensors to a tensor pack
+ *
+ * @param[in,out] pack Pack to append the tensors to
+ * @param[in] tensors Tensors to append to the pack
+ * @param[in] slot_ids Slot IDs of each tensors to the operators
+ * @param[in] num_tensors Number of tensors that are passed
+ *
+ * @return Status code
+ *
+ * Returns:
+ * - @ref AclSuccess if function was completed successfully
+ * - @ref AclOutOfMemory if there was a failure allocating memory resources
+ * - @ref AclInvalidArgument if a given argument is invalid
+ */
+AclStatus AclPackTensors(AclTensorPack pack, AclTensor *tensors, int32_t *slot_ids, size_t num_tensors);
+
+/** Destroy a given tensor pack object
+ *
+ * @param[in,out] pack A valid tensor pack object to destroy
+ *
+ * @return Status code
+ *
+ * Returns:
+ * - @ref AclSuccess if functions was completed successfully
+ * - @ref AclInvalidArgument if the provided context is invalid
+ */
+AclStatus AclDestroyTensorPack(AclTensorPack pack);
+
#ifdef __cplusplus
}
#endif /* __cplusplus */
diff --git a/arm_compute/AclOpenClExt.h b/arm_compute/AclOpenClExt.h
index f71cd37299..15b233ca12 100644
--- a/arm_compute/AclOpenClExt.h
+++ b/arm_compute/AclOpenClExt.h
@@ -63,6 +63,15 @@ AclStatus AclGetClContext(AclContext ctx, cl_context *opencl_context);
*/
AclStatus AclSetClContext(AclContext ctx, cl_context opencl_context);
+/** Extract the underlying OpenCL memory object by a given Compute Library tensor object
+ *
+ * @param[in] tensor A valid non-zero tensor
+ * @param[out] opencl_mem Underlyig OpenCL memory object
+ *
+ * @return Status code
+ */
+AclStatus AclGetClMem(AclTensor tensor, cl_mem *opencl_mem);
+
#ifdef __cplusplus
}
#endif /* __cplusplus */
diff --git a/arm_compute/AclTypes.h b/arm_compute/AclTypes.h
index bee6d1a8d7..69717ec8a8 100644
--- a/arm_compute/AclTypes.h
+++ b/arm_compute/AclTypes.h
@@ -33,6 +33,10 @@ extern "C" {
/**< Opaque Context object */
typedef struct AclContext_ *AclContext;
+/**< Opaque Tensor object */
+typedef struct AclTensor_ *AclTensor;
+/**< Opaque Tensor pack object */
+typedef struct AclTensorPack_ *AclTensorPack;
// Capabilities bitfield (Note: if multiple are enabled ComputeLibrary will pick the best possible)
typedef uint64_t AclTargetCapabilities;
@@ -134,16 +138,55 @@ typedef struct AclContextOptions
AclAllocator *allocator; /**< Allocator to be used by all the memory internally */
} AclContextOptions;
-/** Default context */
-const AclContextOptions acl_default_ctx_options =
+/**< Supported data types */
+typedef enum AclDataType
{
- AclPreferFastRerun, /* mode */
- AclCpuCapabilitiesAuto, /* capabilities */
- false, /* enable_fast_math */
- "default.mlgo", /* kernel_config_file */
- -1, /* max_compute_units */
- nullptr /* allocator */
-};
+ AclDataTypeUnknown = 0, /**< Unknown data type */
+ AclUInt8 = 1, /**< 8-bit unsigned integer */
+ AclInt8 = 2, /**< 8-bit signed integer */
+ AclUInt16 = 3, /**< 16-bit unsigned integer */
+ AclInt16 = 4, /**< 16-bit signed integer */
+ AclUint32 = 5, /**< 32-bit unsigned integer */
+ AclInt32 = 6, /**< 32-bit signed integer */
+ AclFloat16 = 7, /**< 16-bit floating point */
+ AclBFloat16 = 8, /**< 16-bit brain floating point */
+ AclFloat32 = 9, /**< 32-bit floating point */
+} AclDataType;
+
+/**< Supported data layouts for operations */
+typedef enum AclDataLayout
+{
+ AclDataLayoutUnknown = 0, /**< Unknown data layout */
+ AclNhwc = 1, /**< Native, performant, Compute Library data layout */
+ AclNchw = 2, /**< Data layout where width is the fastest changing dimension */
+} AclDataLayout;
+
+/** Type of memory to be imported */
+typedef enum AclImportMemoryType
+{
+ AclHostPtr = 0 /**< Host allocated memory */
+} AclImportMemoryType;
+
+/**< Tensor Descriptor */
+typedef struct AclTensorDescriptor
+{
+ int32_t ndims; /**< Number or dimensions */
+ int32_t *shape; /**< Tensor Shape */
+ AclDataType data_type; /**< Tensor Data type */
+ int64_t *strides; /**< Strides on each dimension. Linear memory is assumed if nullptr */
+ int64_t boffset; /**< Offset in terms of bytes for the first element */
+} AclTensorDescriptor;
+
+/**< Slot type of a tensor */
+typedef enum
+{
+ AclSlotUnknown = -1,
+ AclSrc = 0,
+ AclSrc0 = 0,
+ AclSrc1 = 1,
+ AclDst = 30,
+ AclSrcVec = 256,
+} AclTensorSlot;
#ifdef __cplusplus
}
diff --git a/arm_compute/core/utils/logging/Macros.h b/arm_compute/core/utils/logging/Macros.h
index 21ed721eb1..1108dd3800 100644
--- a/arm_compute/core/utils/logging/Macros.h
+++ b/arm_compute/core/utils/logging/Macros.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,6 +40,18 @@
} \
} while(false)
+#define ARM_COMPUTE_LOG_MSG_WITH_FUNCNAME(logger_name, log_level, msg) \
+ do \
+ { \
+ auto __logger = arm_compute::logging::LoggerRegistry::get().logger(logger_name); \
+ if(__logger != nullptr) \
+ { \
+ std::ostringstream s; \
+ s << __func__ << ":" << msg; \
+ __logger->log(log_level, s.str()); \
+ } \
+ } while(false)
+
#define ARM_COMPUTE_LOG_MSG_WITH_FORMAT(logger_name, log_level, fmt, ...) \
do \
{ \
@@ -68,6 +80,7 @@
#else /* ARM_COMPUTE_LOGGING_ENABLED */
#define ARM_COMPUTE_LOG_MSG(logger_name, log_level, msg)
+#define ARM_COMPUTE_LOG_MSG_WITH_FUNCNAME(logger_name, log_level, msg)
#define ARM_COMPUTE_LOG_MSG_WITH_FORMAT(logger_name, log_level, fmt, ...)
#define ARM_COMPUTE_LOG_STREAM(logger_name, log_level, stream)
diff --git a/src/c/AclContext.cpp b/src/c/AclContext.cpp
index e88995bcff..bff70f3baf 100644
--- a/src/c/AclContext.cpp
+++ b/src/c/AclContext.cpp
@@ -79,20 +79,20 @@ extern "C" AclStatus AclCreateContext(AclContext *ctx,
{
if(!is_target_valid(target))
{
- ARM_COMPUTE_LOG_ERROR_ACL("Target is invalid");
+ ARM_COMPUTE_LOG_ERROR_WITH_FUNCNAME_ACL("Target is invalid!");
return AclUnsupportedTarget;
}
if(options != nullptr && !are_context_options_valid(options))
{
- ARM_COMPUTE_LOG_ERROR_ACL("Context options are invalid");
+ ARM_COMPUTE_LOG_ERROR_WITH_FUNCNAME_ACL("Context options are invalid!");
return AclInvalidArgument;
}
auto acl_ctx = create_context(target, options);
if(ctx == nullptr)
{
- ARM_COMPUTE_LOG_ERROR_ACL("Couldn't allocate internal resources for context creation");
+ ARM_COMPUTE_LOG_ERROR_WITH_FUNCNAME_ACL("Couldn't allocate internal resources for context creation!");
return AclOutOfMemory;
}
*ctx = acl_ctx;
@@ -106,13 +106,12 @@ extern "C" AclStatus AclDestroyContext(AclContext external_ctx)
IContext *ctx = get_internal(external_ctx);
- StatusCode status = StatusCode::Success;
- status = detail::validate_internal_context(ctx);
+ StatusCode status = detail::validate_internal_context(ctx);
ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
if(ctx->refcount() != 0)
{
- ARM_COMPUTE_LOG_ERROR_ACL("Context has references on it that haven't been released");
+ ARM_COMPUTE_LOG_ERROR_WITH_FUNCNAME_ACL("Context has references on it that haven't been released!");
// TODO: Fix the refcount with callback when reaches 0
}
diff --git a/src/c/AclTensor.cpp b/src/c/AclTensor.cpp
new file mode 100644
index 0000000000..58b17ff70e
--- /dev/null
+++ b/src/c/AclTensor.cpp
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/AclEntrypoints.h"
+#include "src/common/ITensor.h"
+#include "src/common/utils/Macros.h"
+
+namespace
+{
+/**< Maximum allowed dimensions by Compute Library */
+constexpr int32_t max_allowed_dims = 6;
+
+/** Check if a descriptor is valid
+ *
+ * @param desc Descriptor to validate
+ *
+ * @return true in case of success else false
+ */
+bool is_desc_valid(const AclTensorDescriptor &desc)
+{
+ if(desc.data_type > AclFloat32)
+ {
+ ARM_COMPUTE_LOG_ERROR_ACL("[AclCreateTensor]: Unknown data type!");
+ return false;
+ }
+ if(desc.ndims > max_allowed_dims)
+ {
+ ARM_COMPUTE_LOG_ERROR_ACL("[AclCreateTensor]: Dimensions surpass the maximum allowed value!");
+ return false;
+ }
+ if(desc.ndims > 0 && desc.shape == nullptr)
+ {
+ ARM_COMPUTE_LOG_ERROR_ACL("[AclCreateTensor]: Dimensions values are empty while dimensionality is > 0!");
+ return false;
+ }
+ return true;
+}
+} // namespace
+
+extern "C" AclStatus AclCreateTensor(AclTensor *external_tensor,
+ AclContext external_ctx,
+ const AclTensorDescriptor *desc,
+ bool allocate)
+{
+ using namespace arm_compute;
+
+ IContext *ctx = get_internal(external_ctx);
+
+ StatusCode status = detail::validate_internal_context(ctx);
+ ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
+
+ if(desc == nullptr || !is_desc_valid(*desc))
+ {
+ ARM_COMPUTE_LOG_ERROR_ACL("[AclCreateTensor]: Descriptor is invalid!");
+ return AclInvalidArgument;
+ }
+
+ auto tensor = ctx->create_tensor(*desc, allocate);
+ if(tensor == nullptr)
+ {
+ ARM_COMPUTE_LOG_ERROR_ACL("[AclCreateTensor]: Couldn't allocate internal resources for tensor creation!");
+ return AclOutOfMemory;
+ }
+ *external_tensor = tensor;
+
+ return AclSuccess;
+}
+
+extern "C" AclStatus AclMapTensor(AclTensor external_tensor, void **handle)
+{
+ using namespace arm_compute;
+
+ auto tensor = get_internal(external_tensor);
+ StatusCode status = detail::validate_internal_tensor(tensor);
+ ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
+
+ if(handle == nullptr)
+ {
+ ARM_COMPUTE_LOG_ERROR_ACL("[AclMapTensor]: Handle object is nullptr!");
+ return AclInvalidArgument;
+ }
+
+ *handle = tensor->map();
+
+ return AclSuccess;
+}
+
+extern "C" AclStatus AclUnmapTensor(AclTensor external_tensor, void *handle)
+{
+ ARM_COMPUTE_UNUSED(handle);
+
+ using namespace arm_compute;
+
+ auto tensor = get_internal(external_tensor);
+ StatusCode status = detail::validate_internal_tensor(tensor);
+ ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
+
+ status = tensor->unmap();
+ return AclSuccess;
+}
+
+extern "C" AclStatus AclTensorImport(AclTensor external_tensor, void *handle, AclImportMemoryType type)
+{
+ using namespace arm_compute;
+
+ auto tensor = get_internal(external_tensor);
+ StatusCode status = detail::validate_internal_tensor(tensor);
+ ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
+
+ status = tensor->import(handle, utils::as_enum<ImportMemoryType>(type));
+ ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
+
+ return AclSuccess;
+}
+
+extern "C" AclStatus AclDestroyTensor(AclTensor external_tensor)
+{
+ using namespace arm_compute;
+
+ auto tensor = get_internal(external_tensor);
+
+ StatusCode status = detail::validate_internal_tensor(tensor);
+ ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
+
+ delete tensor;
+
+ return AclSuccess;
+}
diff --git a/src/c/AclTensorPack.cpp b/src/c/AclTensorPack.cpp
new file mode 100644
index 0000000000..6700ef464c
--- /dev/null
+++ b/src/c/AclTensorPack.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/AclEntrypoints.h"
+#include "src/common/ITensor.h"
+#include "src/common/TensorPack.h"
+#include "src/common/utils/Macros.h"
+
+namespace
+{
+using namespace arm_compute;
+StatusCode PackTensorInternal(TensorPack &pack, AclTensor external_tensor, int32_t slot_id)
+{
+ auto status = StatusCode::Success;
+ auto tensor = get_internal(external_tensor);
+
+ status = detail::validate_internal_tensor(tensor);
+
+ if(status != StatusCode::Success)
+ {
+ return status;
+ }
+
+ pack.add_tensor(tensor, slot_id);
+
+ return status;
+}
+} // namespace
+
+extern "C" AclStatus AclCreateTensorPack(AclTensorPack *external_pack, AclContext external_ctx)
+{
+ using namespace arm_compute;
+
+ IContext *ctx = get_internal(external_ctx);
+
+ const StatusCode status = detail::validate_internal_context(ctx);
+ ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
+
+ auto pack = new TensorPack(ctx);
+ if(pack == nullptr)
+ {
+ ARM_COMPUTE_LOG_ERROR_WITH_FUNCNAME_ACL("Couldn't allocate internal resources!");
+ return AclOutOfMemory;
+ }
+ *external_pack = pack;
+
+ return AclSuccess;
+}
+
+extern "C" AclStatus AclPackTensor(AclTensorPack external_pack, AclTensor external_tensor, int32_t slot_id)
+{
+ using namespace arm_compute;
+
+ auto pack = get_internal(external_pack);
+ ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(detail::validate_internal_pack(pack));
+ ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(PackTensorInternal(*pack, external_tensor, slot_id));
+ return AclStatus::AclSuccess;
+}
+
+extern "C" AclStatus AclPackTensors(AclTensorPack external_pack, AclTensor *external_tensors, int32_t *slot_ids, size_t num_tensors)
+{
+ using namespace arm_compute;
+
+ auto pack = get_internal(external_pack);
+ ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(detail::validate_internal_pack(pack));
+
+ for(unsigned i = 0; i < num_tensors; ++i)
+ {
+ ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(PackTensorInternal(*pack, external_tensors[i], slot_ids[i]));
+ }
+ return AclStatus::AclSuccess;
+}
+
+extern "C" AclStatus AclDestroyTensorPack(AclTensorPack external_pack)
+{
+ using namespace arm_compute;
+
+ auto pack = get_internal(external_pack);
+ StatusCode status = detail::validate_internal_pack(pack);
+ ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
+
+ delete pack;
+
+ return AclSuccess;
+}
diff --git a/src/c/cl/AclOpenClExt.cpp b/src/c/cl/AclOpenClExt.cpp
index 5f2bb47c16..a144f97f55 100644
--- a/src/c/cl/AclOpenClExt.cpp
+++ b/src/c/cl/AclOpenClExt.cpp
@@ -23,9 +23,12 @@
*/
#include "arm_compute/AclOpenClExt.h"
+#include "src/common/ITensor.h"
#include "src/common/Types.h"
#include "src/gpu/cl/ClContext.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+
#include "support/Cast.h"
extern "C" AclStatus AclGetClContext(AclContext external_ctx, cl_context *opencl_context)
@@ -80,4 +83,30 @@ extern "C" AclStatus AclSetClContext(AclContext external_ctx, cl_context opencl_
}
return AclStatus::AclSuccess;
+}
+
+extern "C" AclStatus AclGetClMem(AclTensor external_tensor, cl_mem *opencl_mem)
+{
+ using namespace arm_compute;
+ ITensorV2 *tensor = get_internal(external_tensor);
+
+ if(detail::validate_internal_tensor(tensor) != StatusCode::Success)
+ {
+ return AclStatus::AclInvalidArgument;
+ }
+
+ if(tensor->header.ctx->type() != Target::GpuOcl)
+ {
+ return AclStatus::AclInvalidTarget;
+ }
+
+ if(opencl_mem == nullptr)
+ {
+ return AclStatus::AclInvalidArgument;
+ }
+
+ auto cl_tensor = utils::cast::polymorphic_downcast<arm_compute::ICLTensor *>(tensor->tensor());
+ *opencl_mem = cl_tensor->cl_buffer().get();
+
+ return AclStatus::AclSuccess;
} \ No newline at end of file
diff --git a/src/common/IContext.h b/src/common/IContext.h
index 0d23abd2be..ee234795cf 100644
--- a/src/common/IContext.h
+++ b/src/common/IContext.h
@@ -41,6 +41,9 @@ protected:
namespace arm_compute
{
+// Forward declarations
+class ITensorV2;
+
/**< Context interface */
class IContext : public AclContext_
{
@@ -88,6 +91,14 @@ public:
{
return header.type == detail::ObjectType::Context;
}
+ /** Create a tensor object
+ *
+ * @param[in] desc Descriptor to use
+ * @param[in] allocate Flag to allocate tensor
+ *
+ * @return A pointer to the created tensor object
+ */
+ virtual ITensorV2 *create_tensor(const AclTensorDescriptor &desc, bool allocate) = 0;
private:
Target _target; /**< Target type of context */
diff --git a/src/common/ITensor.h b/src/common/ITensor.h
new file mode 100644
index 0000000000..ee7eac7688
--- /dev/null
+++ b/src/common/ITensor.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_COMMON_ITENSOR_H_
+#define SRC_COMMON_ITENSOR_H_
+
+#include "src/common/IContext.h"
+#include "src/common/utils/Validate.h"
+
+struct AclTensor_
+{
+ arm_compute::detail::Header header{ arm_compute::detail::ObjectType::Tensor, nullptr };
+
+protected:
+ AclTensor_() = default;
+ ~AclTensor_() = default;
+};
+
+namespace arm_compute
+{
+// Forward declaration
+class ITensor;
+
+/** Base class specifying the tensor interface */
+class ITensorV2 : public AclTensor_
+{
+public:
+ /** Explict Operator Constructor
+ *
+ * @param[in] ctx Context to be used by the operator
+ */
+ explicit ITensorV2(IContext *ctx)
+ : AclTensor_()
+ {
+ ARM_COMPUTE_ASSERT_NOT_NULLPTR(ctx);
+ this->header.ctx = ctx;
+ this->header.ctx->inc_ref();
+ }
+ /** Destructor */
+ virtual ~ITensorV2()
+ {
+ this->header.ctx->dec_ref();
+ this->header.type = detail::ObjectType::Invalid;
+ };
+ /** Checks if a queue is valid
+ *
+ * @return True if successful otherwise false
+ */
+ bool is_valid() const
+ {
+ return this->header.type == detail::ObjectType::Tensor;
+ };
+ /** Map tensor to a host pointer
+ *
+ * @return A pointer to the underlying backing memory if successful else nullptr
+ */
+ virtual void *map() = 0;
+ /** Unmap tensor
+ *
+ * @return AclStatus A status cod
+ */
+ virtual StatusCode unmap() = 0;
+ /** Import external memory handle
+ *
+ * @param[in] handle Memory to import
+ * @param[in] type Type of imported memory
+ *
+ * @return Status code
+ */
+ virtual StatusCode import(void *handle, ImportMemoryType type) = 0;
+ /** Get the legacy tensor object
+ *
+ * @return The legacy underlying tensor object
+ */
+ virtual arm_compute::ITensor *tensor() = 0;
+};
+
+/** Extract internal representation of a Tensor
+ *
+ * @param[in] tensor Opaque tensor pointer
+ *
+ * @return The internal representation as an ITensor
+ */
+inline ITensorV2 *get_internal(AclTensor tensor)
+{
+ return static_cast<ITensorV2 *>(tensor);
+}
+
+namespace detail
+{
+/** Check if an internal tensor is valid
+ *
+ * @param[in] tensor Internal tensor to check
+ *
+ * @return A status code
+ */
+inline StatusCode validate_internal_tensor(const ITensorV2 *tensor)
+{
+ if(tensor == nullptr || !tensor->is_valid())
+ {
+ ARM_COMPUTE_LOG_ERROR_ACL("[ITensorV2]: Invalid tensor object");
+ return StatusCode::InvalidArgument;
+ }
+ return StatusCode::Success;
+}
+} // namespace detail
+} // namespace arm_compute
+#endif /* SRC_COMMON_ITENSOR_H_ */
diff --git a/src/common/TensorPack.cpp b/src/common/TensorPack.cpp
new file mode 100644
index 0000000000..c582c7b106
--- /dev/null
+++ b/src/common/TensorPack.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/common/TensorPack.h"
+#include "src/common/ITensor.h"
+#include "src/common/utils/Validate.h"
+
+namespace arm_compute
+{
+TensorPack::TensorPack(IContext *ctx)
+ : AclTensorPack_(), _pack()
+{
+ ARM_COMPUTE_ASSERT_NOT_NULLPTR(ctx);
+ this->header.ctx = ctx;
+ this->header.ctx->inc_ref();
+}
+
+TensorPack::~TensorPack()
+{
+ this->header.ctx->dec_ref();
+ this->header.type = detail::ObjectType::Invalid;
+}
+
+AclStatus TensorPack::add_tensor(ITensorV2 *tensor, int32_t slot_id)
+{
+ _pack.add_tensor(slot_id, tensor->tensor());
+ return AclStatus::AclSuccess;
+}
+
+size_t TensorPack::size() const
+{
+ return _pack.size();
+}
+
+bool TensorPack::empty() const
+{
+ return _pack.empty();
+}
+
+bool TensorPack::is_valid() const
+{
+ return this->header.type == detail::ObjectType::TensorPack;
+}
+
+arm_compute::ITensor *TensorPack::get_tensor(int32_t slot_id)
+{
+ return _pack.get_tensor(slot_id);
+}
+
+arm_compute::ITensorPack &TensorPack::get_tensor_pack()
+{
+ return _pack;
+}
+} // namespace arm_compute
diff --git a/src/common/TensorPack.h b/src/common/TensorPack.h
new file mode 100644
index 0000000000..f330eee740
--- /dev/null
+++ b/src/common/TensorPack.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_COMMON_ITENSORPACK_H_
+#define SRC_COMMON_ITENSORPACK_H_
+
+#include "arm_compute/core/ITensorPack.h"
+#include "src/common/IContext.h"
+
+struct AclTensorPack_
+{
+ arm_compute::detail::Header header{ arm_compute::detail::ObjectType::TensorPack, nullptr };
+
+protected:
+ AclTensorPack_() = default;
+ ~AclTensorPack_() = default;
+};
+
+namespace arm_compute
+{
+// Forward declaration
+class ITensor;
+class ITensorV2;
+
+/** Tensor packing service
+ *
+ * Class is responsible for creating and managing a collection of tensors.
+ * Tensor packs can be passed to operators to be part of the mutable data of the execution.
+ */
+class TensorPack : public AclTensorPack_
+{
+public:
+ /** Constructor
+ *
+ * @param[in] ctx Context to be used
+ */
+ explicit TensorPack(IContext *ctx);
+ /** Destructor */
+ ~TensorPack();
+ /** Add tensor to the pack
+ *
+ * @param[in] tensor Tensor to add
+ * @param[in] slot_id Slot identification in respect to the operator of the tensor to add
+ *
+ * @return Status code
+ */
+ AclStatus add_tensor(ITensorV2 *tensor, int32_t slot_id);
+ /** Pack size accessor
+ *
+ * @return Number of tensors registered to the pack
+ */
+ size_t size() const;
+ /** Checks if pack is empty
+ *
+ * @return True if empty else false
+ */
+ bool empty() const;
+ /** Checks if an object is valid
+ *
+ * @return True if valid else false
+ */
+ bool is_valid() const;
+ /** Get tensor of a given id from the pac
+ *
+ * @param[in] slot_id Slot identification of tensor to extract
+ *
+ * @return The pointer to the tensor if exist and is non-const else nullptr
+ */
+ arm_compute::ITensor *get_tensor(int32_t slot_id);
+ /** Get legacy tensor pack
+ *
+ * @return Legacy tensor pack
+ */
+ arm_compute::ITensorPack &get_tensor_pack();
+
+private:
+ arm_compute::ITensorPack _pack; /**< Pack that currently redirects to the existing TensorPack */
+};
+
+/** Extract internal representation of a TensoPack
+ *
+ * @param[in] pack Opaque tensor pack pointer
+ *
+ * @return The internal representation as an TensorPack
+ */
+inline TensorPack *get_internal(AclTensorPack pack)
+{
+ return static_cast<TensorPack *>(pack);
+}
+
+namespace detail
+{
+/** Check if an internal TensorPack is valid
+ *
+ * @param[in] pack Internal tensor pack to check
+ *
+ * @return A status code
+ */
+inline StatusCode validate_internal_pack(const TensorPack *pack)
+{
+ if(pack == nullptr || !pack->is_valid())
+ {
+ ARM_COMPUTE_LOG_ERROR_ACL("[TensorPack]: Invalid tensor pack object");
+ return StatusCode::InvalidArgument;
+ }
+ return StatusCode::Success;
+}
+} // namespace detail
+} // namespace arm_compute
+#endif /* SRC_COMMON_ITENSORPACK_H_ */
diff --git a/src/common/Types.h b/src/common/Types.h
index 60a11b04ec..ba07b51d55 100644
--- a/src/common/Types.h
+++ b/src/common/Types.h
@@ -52,5 +52,10 @@ enum class ExecutionMode
FastRerun = AclPreferFastRerun,
FastStart = AclPreferFastStart,
};
+
+enum class ImportMemoryType
+{
+ HostPtr = AclImportMemoryType::AclHostPtr
+};
} // namespace arm_compute
#endif /* SRC_COMMON_TYPES_H_ */
diff --git a/src/common/utils/LegacySupport.cpp b/src/common/utils/LegacySupport.cpp
new file mode 100644
index 0000000000..5981c657bd
--- /dev/null
+++ b/src/common/utils/LegacySupport.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/common/utils/LegacySupport.h"
+
+namespace arm_compute
+{
+namespace detail
+{
+namespace
+{
+DataType data_type_mapper(AclDataType data_type)
+{
+ switch(data_type)
+ {
+ case AclDataType::AclFloat32:
+ return DataType::F32;
+ case AclDataType::AclFloat16:
+ return DataType::F16;
+ case AclDataType::AclBFloat16:
+ return DataType::BFLOAT16;
+ default:
+ return DataType::UNKNOWN;
+ ;
+ }
+}
+
+TensorShape tensor_shape_mapper(int32_t ndims, int32_t *shape)
+{
+ TensorShape legacy_shape{};
+ for(int32_t d = 0; d < ndims; ++d)
+ {
+ legacy_shape.set(d, shape[d], false);
+ }
+ return legacy_shape;
+}
+} // namespace
+
+TensorInfo convert_to_legacy_tensor_info(const AclTensorDescriptor &desc)
+{
+ TensorInfo legacy_desc;
+ legacy_desc.init(tensor_shape_mapper(desc.ndims, desc.shape), 1, data_type_mapper(desc.data_type));
+ return legacy_desc;
+}
+} // namespace detail
+} // namespace arm_compute
diff --git a/src/common/utils/LegacySupport.h b/src/common/utils/LegacySupport.h
new file mode 100644
index 0000000000..37329b747c
--- /dev/null
+++ b/src/common/utils/LegacySupport.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_COMMON_LEGACY_SUPPORT_H
+#define SRC_COMMON_LEGACY_SUPPORT_H
+
+#include "arm_compute/Acl.h"
+#include "arm_compute/core/TensorInfo.h"
+
+namespace arm_compute
+{
+namespace detail
+{
+/** Convert a descriptor to a legacy format one
+ *
+ * @param[in] desc Descriptor to convert
+ *
+ * @return Legacy tensor meta-data
+ */
+TensorInfo convert_to_legacy_tensor_info(const AclTensorDescriptor &desc);
+} // namespace detail
+} // namespace arm_compute
+
+#endif /* SRC_COMMON_LEGACY_SUPPORT_H */
diff --git a/src/common/utils/Log.h b/src/common/utils/Log.h
index 0d6a50da92..496ee74a16 100644
--- a/src/common/utils/Log.h
+++ b/src/common/utils/Log.h
@@ -77,4 +77,15 @@
ARM_COMPUTE_LOG_MSG("ComputeLibrary", arm_compute::logging::LogLevel::ERROR, msg); \
} while(false)
+/** Log an error message to the logger with function name before the message
+ *
+ * @param[in] msg Message to log
+ */
+#define ARM_COMPUTE_LOG_ERROR_WITH_FUNCNAME_ACL(msg) \
+ do \
+ { \
+ ARM_COMPUTE_CREATE_ACL_LOGGER(); \
+ ARM_COMPUTE_LOG_MSG_WITH_FUNCNAME("ComputeLibrary", arm_compute::logging::LogLevel::ERROR, msg); \
+ } while(false)
+
#endif /* SRC_COMMON_LOG_H */
diff --git a/src/common/utils/Utils.h b/src/common/utils/Utils.h
index 9602c32f62..87be9df509 100644
--- a/src/common/utils/Utils.h
+++ b/src/common/utils/Utils.h
@@ -44,6 +44,22 @@ constexpr E as_cenum(SE v) noexcept
{
return static_cast<E>(static_cast<std::underlying_type_t<SE>>(v));
}
+
+/** Convert plain old enumeration to a strongly typed enum
+ *
+ * @tparam SE Strongly typed resulting enum
+ * @tparam E Plain old C enum
+ *
+ * @param[in] val Value to convert
+ *
+ * @return A corresponding strongly typed enumeration
+ */
+template <typename SE, typename E>
+constexpr SE as_enum(E val) noexcept
+{
+ return static_cast<SE>(val);
+}
+
/** Check if the given value is in the given enum value list
*
* @tparam E The type of the enum
diff --git a/src/cpu/CpuContext.cpp b/src/cpu/CpuContext.cpp
index 6ff35602f3..d62c1b6310 100644
--- a/src/cpu/CpuContext.cpp
+++ b/src/cpu/CpuContext.cpp
@@ -24,6 +24,7 @@
#include "src/cpu/CpuContext.h"
#include "arm_compute/core/CPP/CPPTypes.h"
+#include "src/cpu/CpuTensor.h"
#include "src/runtime/CPUUtils.h"
#include <cstdlib>
@@ -185,5 +186,15 @@ AllocatorWrapper &CpuContext::allocator()
{
return _allocator;
}
+
+ITensorV2 *CpuContext::create_tensor(const AclTensorDescriptor &desc, bool allocate)
+{
+ CpuTensor *tensor = new CpuTensor(this, desc);
+ if(tensor != nullptr && allocate)
+ {
+ tensor->allocate();
+ }
+ return tensor;
+}
} // namespace cpu
} // namespace arm_compute
diff --git a/src/cpu/CpuContext.h b/src/cpu/CpuContext.h
index 81bab97b8e..d2062e4bdd 100644
--- a/src/cpu/CpuContext.h
+++ b/src/cpu/CpuContext.h
@@ -67,6 +67,9 @@ public:
*/
AllocatorWrapper &allocator();
+ // Inherrited methods overridden
+ ITensorV2 *create_tensor(const AclTensorDescriptor &desc, bool allocate) override;
+
private:
AllocatorWrapper _allocator;
CpuCapabilities _caps;
diff --git a/src/cpu/CpuTensor.cpp b/src/cpu/CpuTensor.cpp
new file mode 100644
index 0000000000..79dc812c58
--- /dev/null
+++ b/src/cpu/CpuTensor.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/cpu/CpuTensor.h"
+
+#include "src/common/utils/LegacySupport.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+CpuTensor::CpuTensor(IContext *ctx, const AclTensorDescriptor &desc)
+ : ITensorV2(ctx), _legacy_tensor()
+{
+ ARM_COMPUTE_ASSERT((ctx != nullptr) && (ctx->type() == Target::Cpu));
+ _legacy_tensor = std::make_unique<Tensor>();
+ _legacy_tensor->allocator()->init(arm_compute::detail::convert_to_legacy_tensor_info(desc));
+}
+
+void *CpuTensor::map()
+{
+ ARM_COMPUTE_ASSERT(_legacy_tensor.get() != nullptr);
+
+ if(_legacy_tensor == nullptr)
+ {
+ ARM_COMPUTE_LOG_ERROR_ACL("[CpuTensor:map]: Backing tensor does not exist!");
+ return nullptr;
+ }
+ return _legacy_tensor->buffer();
+}
+
+StatusCode CpuTensor::allocate()
+{
+ ARM_COMPUTE_ASSERT(_legacy_tensor.get() != nullptr);
+
+ _legacy_tensor->allocator()->allocate();
+ return StatusCode::Success;
+}
+
+StatusCode CpuTensor::unmap()
+{
+ // No-op
+ return StatusCode::Success;
+}
+
+StatusCode CpuTensor::import(void *handle, ImportMemoryType type)
+{
+ ARM_COMPUTE_ASSERT(_legacy_tensor.get() != nullptr);
+ ARM_COMPUTE_UNUSED(type);
+
+ const auto st = _legacy_tensor->allocator()->import_memory(handle);
+ return bool(st) ? StatusCode::Success : StatusCode::RuntimeError;
+}
+
+arm_compute::ITensor *CpuTensor::tensor()
+{
+ return _legacy_tensor.get();
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/CpuTensor.h b/src/cpu/CpuTensor.h
new file mode 100644
index 0000000000..a46f1a26cb
--- /dev/null
+++ b/src/cpu/CpuTensor.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CPU_CPUTENSOR_H
+#define SRC_CPU_CPUTENSOR_H
+
+#include "src/common/ITensor.h"
+
+#include "arm_compute/runtime/Tensor.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+/** CPU tensor implementation class */
+class CpuTensor final : public ITensorV2
+{
+public:
+ /** Construct a new Cpu Tensor object
+ *
+ * @param[in] ctx Context to be used
+ * @param[in] desc Tensor descriptor
+ */
+ CpuTensor(IContext *ctx, const AclTensorDescriptor &desc);
+ /** Allocates tensor
+ *
+ * @return StatusCode A status code
+ */
+ StatusCode allocate();
+
+ // Inherrited functions overriden
+ void *map() override;
+ StatusCode unmap() override;
+ arm_compute::ITensor *tensor() override;
+ StatusCode import(void *handle, ImportMemoryType type) override;
+
+private:
+ std::unique_ptr<Tensor> _legacy_tensor;
+};
+} // namespace cpu
+} // namespace arm_compute
+
+#endif /* SRC_CPU_CPUTENSOR_H */ \ No newline at end of file
diff --git a/src/gpu/cl/ClContext.cpp b/src/gpu/cl/ClContext.cpp
index 2bd8b8dd0e..2e04e1d593 100644
--- a/src/gpu/cl/ClContext.cpp
+++ b/src/gpu/cl/ClContext.cpp
@@ -23,6 +23,8 @@
*/
#include "src/gpu/cl/ClContext.h"
+#include "src/gpu/cl/ClTensor.h"
+
namespace arm_compute
{
namespace gpu
@@ -33,8 +35,13 @@ namespace
{
mlgo::MLGOHeuristics populate_mlgo(const char *filename)
{
+ bool status = false;
mlgo::MLGOHeuristics heuristics;
- bool status = heuristics.reload_from_file(filename);
+
+ if(filename != nullptr)
+ {
+ status = heuristics.reload_from_file(filename);
+ }
return status ? std::move(heuristics) : mlgo::MLGOHeuristics();
}
} // namespace
@@ -69,6 +76,16 @@ bool ClContext::set_cl_ctx(::cl::Context ctx)
}
return false;
}
+
+ITensorV2 *ClContext::create_tensor(const AclTensorDescriptor &desc, bool allocate)
+{
+ ClTensor *tensor = new ClTensor(this, desc);
+ if(tensor != nullptr && allocate)
+ {
+ tensor->allocate();
+ }
+ return tensor;
+}
} // namespace opencl
} // namespace gpu
} // namespace arm_compute
diff --git a/src/gpu/cl/ClContext.h b/src/gpu/cl/ClContext.h
index e3f16b1c3f..dd6699a0c9 100644
--- a/src/gpu/cl/ClContext.h
+++ b/src/gpu/cl/ClContext.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef SRC_GPU_CL_CPUCONTEXT_H
-#define SRC_GPU_CL_CPUCONTEXT_H
+#ifndef SRC_GPU_CLCONTEXT_H
+#define SRC_GPU_CLCONTEXT_H
#include "src/common/IContext.h"
#include "src/runtime/CL/mlgo/MLGOHeuristics.h"
@@ -65,6 +65,9 @@ public:
*/
bool set_cl_ctx(::cl::Context ctx);
+ // Inherrited methods overridden
+ ITensorV2 *create_tensor(const AclTensorDescriptor &desc, bool allocate) override;
+
private:
mlgo::MLGOHeuristics _mlgo_heuristics;
::cl::Context _cl_context;
@@ -73,4 +76,4 @@ private:
} // namespace gpu
} // namespace arm_compute
-#endif /* SRC_GPU_CL_CPUCONTEXT_H */ \ No newline at end of file
+#endif /* SRC_GPU_CLCONTEXT_H */ \ No newline at end of file
diff --git a/src/gpu/cl/ClTensor.cpp b/src/gpu/cl/ClTensor.cpp
new file mode 100644
index 0000000000..db2081c4ed
--- /dev/null
+++ b/src/gpu/cl/ClTensor.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/gpu/cl/ClTensor.h"
+
+#include "src/common/utils/LegacySupport.h"
+
+namespace arm_compute
+{
+namespace gpu
+{
+namespace opencl
+{
+ClTensor::ClTensor(IContext *ctx, const AclTensorDescriptor &desc)
+ : ITensorV2(ctx), _legacy_tensor()
+{
+ ARM_COMPUTE_ASSERT((ctx != nullptr) && (ctx->type() == Target::GpuOcl));
+ _legacy_tensor = std::make_unique<CLTensor>();
+ _legacy_tensor->allocator()->init(arm_compute::detail::convert_to_legacy_tensor_info(desc));
+}
+
+void *ClTensor::map()
+{
+ ARM_COMPUTE_ASSERT(_legacy_tensor.get() != nullptr);
+
+ if(_legacy_tensor == nullptr)
+ {
+ ARM_COMPUTE_LOG_ERROR_ACL("[ClTensor:map]: Backing tensor does not exist!");
+ return nullptr;
+ }
+
+ _legacy_tensor->map();
+ return _legacy_tensor->buffer();
+}
+
+StatusCode ClTensor::unmap()
+{
+ ARM_COMPUTE_ASSERT(_legacy_tensor.get() != nullptr);
+
+ if(_legacy_tensor == nullptr)
+ {
+ ARM_COMPUTE_LOG_ERROR_ACL("[ClTensor:unmap]: Backing tensor does not exist!");
+ return StatusCode::RuntimeError;
+ }
+ _legacy_tensor->unmap();
+
+ return StatusCode::Success;
+}
+
+StatusCode ClTensor::allocate()
+{
+ ARM_COMPUTE_ASSERT(_legacy_tensor.get() != nullptr);
+
+ _legacy_tensor->allocator()->allocate();
+ return StatusCode::Success;
+}
+
+StatusCode ClTensor::import(void *handle, ImportMemoryType type)
+{
+ ARM_COMPUTE_ASSERT(_legacy_tensor.get() != nullptr);
+ ARM_COMPUTE_UNUSED(type, handle);
+
+ return StatusCode::Success;
+}
+
+arm_compute::ITensor *ClTensor::tensor()
+{
+ return _legacy_tensor.get();
+}
+} // namespace opencl
+} // namespace gpu
+} // namespace arm_compute
diff --git a/src/gpu/cl/ClTensor.h b/src/gpu/cl/ClTensor.h
new file mode 100644
index 0000000000..4188f622d6
--- /dev/null
+++ b/src/gpu/cl/ClTensor.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_GPU_CLTENSOR_H
+#define SRC_GPU_CLTENSOR_H
+
+#include "src/common/ITensor.h"
+
+#include "arm_compute/runtime/CL/CLTensor.h"
+
+namespace arm_compute
+{
+namespace gpu
+{
+namespace opencl
+{
+/** OpenCL tensor implementation class */
+class ClTensor final : public ITensorV2
+{
+public:
+ /** Construct a new OpenCL Tensor object
+ *
+ * @param[in] ctx Context to be used
+ * @param[in] desc Tensor descriptor
+ */
+ ClTensor(IContext *ctx, const AclTensorDescriptor &desc);
+ /** Allocates tensor
+ *
+ * @return StatusCode A status code
+ */
+ StatusCode allocate();
+
+ // Inherrited functions overriden
+ void *map() override;
+ StatusCode unmap() override;
+ arm_compute::ITensor *tensor() override;
+ StatusCode import(void *handle, ImportMemoryType type) override;
+
+private:
+ std::unique_ptr<CLTensor> _legacy_tensor;
+};
+} // namespace opencl
+} // namespace gpu
+} // namespace arm_compute
+
+#endif /* SRC_GPU_CLTENSOR_H */ \ No newline at end of file
diff --git a/tests/validation/cpu/unit/Context.cpp b/tests/validation/cpu/unit/Context.cpp
index bf2a02df5d..519a7bee5f 100644
--- a/tests/validation/cpu/unit/Context.cpp
+++ b/tests/validation/cpu/unit/Context.cpp
@@ -21,11 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Macros.h"
-#include "tests/validation/Validation.h"
-
-#include "arm_compute/Acl.hpp"
+#include "tests/validation/fixtures/UNIT/Context.h"
#include "src/cpu/CpuContext.h"
@@ -78,91 +74,17 @@ TEST_CASE(CreateContextWithInvalidOptions, framework::DatasetMode::ALL)
ARM_COMPUTE_ASSERT(ctx == nullptr);
}
-/** Test-case for AclDestroyContext
- *
- * Validate that AclDestroyContext behaves as expected when invalid inputs as context are given
- *
- * Test Steps:
- * - Call AclDestroyContext with null context
- * - Confirm that AclInvalidArgument is reported
- * - Call AclDestroyContext on empty array
- * - Confirm that AclInvalidArgument is reported
- * - Call AclDestroyContext on an ACL object other than AclContext
- * - Confirm that AclInvalidArgument is reported
- * - Confirm that context is still nullptr
- */
-TEST_CASE(DestroyInvalidContext, framework::DatasetMode::ALL)
+FIXTURE_TEST_CASE(DestroyInvalidContext, DestroyInvalidContextFixture<AclTarget::AclCpu>, framework::DatasetMode::ALL)
{
- AclContext ctx = nullptr;
- std::array<char, 256> empty_array{};
- AclContext valid_ctx = nullptr;
- ARM_COMPUTE_ASSERT(AclCreateContext(&valid_ctx, AclCpu, nullptr) == AclStatus::AclSuccess);
- ARM_COMPUTE_ASSERT(AclDestroyContext(ctx) == AclStatus::AclInvalidArgument);
- ARM_COMPUTE_ASSERT(AclDestroyContext(reinterpret_cast<AclContext>(empty_array.data())) == AclStatus::AclInvalidArgument);
- ARM_COMPUTE_ASSERT(ctx == nullptr);
- ARM_COMPUTE_ASSERT(AclDestroyContext(valid_ctx) == AclStatus::AclSuccess);
}
-
-/** Test-case for AclCreateContext and AclDestroy Context
- *
- * Validate that AclCreateContext can create and destroy a context
- *
- * Test Steps:
- * - Call AclCreateContext with valid target
- * - Confirm that context is not nullptr and error code is AclSuccess
- * - Destroy context
- * - Confirm that AclSuccess is reported
- */
-TEST_CASE(SimpleContextCApi, framework::DatasetMode::ALL)
+FIXTURE_TEST_CASE(SimpleContextCApi, SimpleContextCApiFixture<AclTarget::AclCpu>, framework::DatasetMode::ALL)
{
- AclContext ctx = nullptr;
- ARM_COMPUTE_ASSERT(AclCreateContext(&ctx, AclCpu, nullptr) == AclStatus::AclSuccess);
- ARM_COMPUTE_ASSERT(ctx != nullptr);
- ARM_COMPUTE_ASSERT(AclDestroyContext(ctx) == AclStatus::AclSuccess);
}
-
-/** Test-case for Context from the C++ interface
- *
- * Test Steps:
- * - Create a Context obejct
- * - Confirm that StatusCode::Success is reported
- * - Confirm that equality operator works
- * - Confirm that inequality operator works
- */
-TEST_CASE(SimpleContextCppApi, framework::DatasetMode::ALL)
+FIXTURE_TEST_CASE(SimpleContextCppApi, SimpleContextCppApiFixture<acl::Target::Cpu>, framework::DatasetMode::ALL)
{
- acl::StatusCode status = acl::StatusCode::Success;
- acl::Context ctx(acl::Target::Cpu, &status);
- ARM_COMPUTE_ASSERT(status == acl::StatusCode::Success);
-
- auto ctx_eq = ctx;
- ARM_COMPUTE_ASSERT(ctx_eq == ctx);
-
- acl::Context ctx_ienq(acl::Target::Cpu, &status);
- ARM_COMPUTE_ASSERT(status == acl::StatusCode::Success);
- ARM_COMPUTE_ASSERT(ctx_ienq != ctx);
}
-
-/** Test-case for CpuCapabilities
- *
- * Validate that AclCreateContext can create/destroy multiple contexts with different options
- *
- * Test Steps:
- * - Call AclCreateContext with different targets
- * - Confirm that AclSuccess is reported
- * - Destroy all contexts
- * - Confirm that AclSuccess is reported
- */
-TEST_CASE(MultipleContexts, framework::DatasetMode::ALL)
+FIXTURE_TEST_CASE(MultipleContexts, MultipleContextsFixture<AclTarget::AclCpu>, framework::DatasetMode::ALL)
{
- const unsigned int num_tests = 5;
- std::array<AclContext, num_tests> ctxs{};
- for(unsigned int i = 0; i < num_tests; ++i)
- {
- ARM_COMPUTE_ASSERT(AclCreateContext(&ctxs[i], AclTarget::AclCpu, nullptr) == AclStatus::AclSuccess);
- ARM_COMPUTE_ASSERT(ctxs[i] != nullptr);
- ARM_COMPUTE_ASSERT(AclDestroyContext(ctxs[i]) == AclStatus::AclSuccess);
- }
}
/** Test-case for CpuCapabilities
@@ -176,9 +98,9 @@ TEST_CASE(MultipleContexts, framework::DatasetMode::ALL)
*/
TEST_CASE(CpuCapabilities, framework::DatasetMode::ALL)
{
- AclContextOptions opts = acl_default_ctx_options;
- opts.capabilities = AclCpuCapabilitiesDot | AclCpuCapabilitiesMmlaInt8 | AclCpuCapabilitiesSve2;
- arm_compute::cpu::CpuContext ctx(&opts);
+ acl::Context::Options opts;
+ opts.copts.capabilities = AclCpuCapabilitiesDot | AclCpuCapabilitiesMmlaInt8 | AclCpuCapabilitiesSve2;
+ arm_compute::cpu::CpuContext ctx(&opts.copts);
ARM_COMPUTE_ASSERT(ctx.capabilities().dot == true);
ARM_COMPUTE_ASSERT(ctx.capabilities().mmla_int8 == true);
diff --git a/tests/validation/cpu/unit/Tensor.cpp b/tests/validation/cpu/unit/Tensor.cpp
new file mode 100644
index 0000000000..8fad7fa3ae
--- /dev/null
+++ b/tests/validation/cpu/unit/Tensor.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "tests/validation/fixtures/UNIT/Tensor.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+TEST_SUITE(CPU)
+TEST_SUITE(UNIT)
+TEST_SUITE(Tensor)
+
+FIXTURE_TEST_CASE(CreateTensorWithInvalidContext, CreateTensorWithInvalidContextFixture, framework::DatasetMode::ALL)
+{
+}
+FIXTURE_TEST_CASE(CreateTensorWithInvalidDescriptor, CreateTensorWithInvalidDescriptorFixture<acl::Target::Cpu>, framework::DatasetMode::ALL)
+{
+}
+FIXTURE_TEST_CASE(DestroyInvalidTensor, DestroyInvalidTensorFixture<acl::Target::Cpu>, framework::DatasetMode::ALL)
+{
+}
+FIXTURE_TEST_CASE(SimpleTensor, SimpleTensorFixture<acl::Target::Cpu>, framework::DatasetMode::ALL)
+{
+}
+FIXTURE_TEST_CASE(TensorStress, TensorStressFixture<acl::Target::Cpu>, framework::DatasetMode::ALL)
+{
+}
+FIXTURE_TEST_CASE(MapInvalidTensor, MapInvalidTensorFixture<acl::Target::Cpu>, framework::DatasetMode::ALL)
+{
+}
+FIXTURE_TEST_CASE(MapNotAllocatedTensor, MapNotAllocatedTensorFixture<acl::Target::Cpu>, framework::DatasetMode::ALL)
+{
+}
+FIXTURE_TEST_CASE(MapAllocatedTensor, MapAllocatedTensorFixture<acl::Target::Cpu>, framework::DatasetMode::ALL)
+{
+}
+FIXTURE_TEST_CASE(ImportMemory, ImportMemoryFixture<acl::Target::Cpu>, framework::DatasetMode::ALL)
+{
+}
+
+TEST_SUITE_END() // Tensor
+TEST_SUITE_END() // UNIT
+TEST_SUITE_END() // CPU
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/cpu/unit/TensorPack.cpp b/tests/validation/cpu/unit/TensorPack.cpp
new file mode 100644
index 0000000000..5436ceb0c1
--- /dev/null
+++ b/tests/validation/cpu/unit/TensorPack.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "tests/validation/fixtures/UNIT/TensorPack.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+TEST_SUITE(CPU)
+TEST_SUITE(UNIT)
+TEST_SUITE(TensorPack)
+
+FIXTURE_TEST_CASE(CreateTensorPackWithInvalidContext, CreateTensorPackWithInvalidContextFixture, framework::DatasetMode::ALL)
+{
+}
+FIXTURE_TEST_CASE(DestroyInvalidTensorPack, DestroyInvalidTensorPackFixture<acl::Target::Cpu>, framework::DatasetMode::ALL)
+{
+}
+FIXTURE_TEST_CASE(AddInvalidObjectToTensorPack, AddInvalidObjectToTensorPackFixture<acl::Target::Cpu>, framework::DatasetMode::ALL)
+{
+}
+FIXTURE_TEST_CASE(SimpleTensorPack, SimpleTensorPackFixture<acl::Target::Cpu>, framework::DatasetMode::ALL)
+{
+}
+FIXTURE_TEST_CASE(MultipleTensorsInPack, MultipleTensorsInPackFixture<acl::Target::Cpu>, framework::DatasetMode::ALL)
+{
+}
+
+TEST_SUITE_END() // Tensor
+TEST_SUITE_END() // UNIT
+TEST_SUITE_END() // CPU
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/fixtures/UNIT/Context.h b/tests/validation/fixtures/UNIT/Context.h
new file mode 100644
index 0000000000..afa49e00e0
--- /dev/null
+++ b/tests/validation/fixtures/UNIT/Context.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_UNIT_CONTEXT
+#define ARM_COMPUTE_TEST_UNIT_CONTEXT
+
+#include "arm_compute/Acl.hpp"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/framework/Macros.h"
+#include "tests/validation/Validation.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+/** Test-case for AclDestroyContext
+ *
+ * Validate that AclDestroyContext behaves as expected when invalid inputs as context are given
+ *
+ * Test Steps:
+ * - Call AclDestroyContext with null context
+ * - Confirm that AclInvalidArgument is reported
+ * - Call AclDestroyContext on empty array
+ * - Confirm that AclInvalidArgument is reported
+ * - Call AclDestroyContext on an ACL object other than AclContext
+ * - Confirm that AclInvalidArgument is reported
+ * - Confirm that context is still nullptr
+ */
+template <AclTarget Target>
+class DestroyInvalidContextFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ AclContext ctx = nullptr;
+ std::array<char, 256> empty_array{};
+ AclContext valid_ctx = nullptr;
+ ARM_COMPUTE_ASSERT(AclCreateContext(&valid_ctx, Target, nullptr) == AclStatus::AclSuccess);
+ ARM_COMPUTE_ASSERT(AclDestroyContext(ctx) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(AclDestroyContext(reinterpret_cast<AclContext>(empty_array.data())) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(ctx == nullptr);
+ ARM_COMPUTE_ASSERT(AclDestroyContext(valid_ctx) == AclStatus::AclSuccess);
+ };
+};
+
+/** Test-case for AclCreateContext and AclDestroyContext
+ *
+ * Validate that AclCreateContext can create and destroy a context through the C API
+ *
+ * Test Steps:
+ * - Call AclCreateContext with valid target
+ * - Confirm that context is not nullptr and error code is AclSuccess
+ * - Destroy context
+ * - Confirm that AclSuccess is reported
+ */
+template <AclTarget Target>
+class SimpleContextCApiFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ AclContext ctx = nullptr;
+ ARM_COMPUTE_ASSERT(AclCreateContext(&ctx, Target, nullptr) == AclStatus::AclSuccess);
+ ARM_COMPUTE_ASSERT(ctx != nullptr);
+ ARM_COMPUTE_ASSERT(AclDestroyContext(ctx) == AclStatus::AclSuccess);
+ };
+};
+
+/** Test-case for Context from the C++ interface
+ *
+ * Test Steps:
+ * - Create a Context obejct
+ * - Confirm that StatusCode::Success is reported
+ * - Confirm that equality operator works
+ * - Confirm that inequality operator works
+ */
+template <acl::Target Target>
+class SimpleContextCppApiFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::StatusCode status = acl::StatusCode::Success;
+ acl::Context ctx(Target, &status);
+ ARM_COMPUTE_ASSERT(status == acl::StatusCode::Success);
+
+ auto ctx_eq = ctx;
+ ARM_COMPUTE_ASSERT(ctx_eq == ctx);
+
+ acl::Context ctx_ienq(Target, &status);
+ ARM_COMPUTE_ASSERT(status == acl::StatusCode::Success);
+ ARM_COMPUTE_ASSERT(ctx_ienq != ctx);
+ };
+};
+
+/** Test-case for multiple contexes
+ *
+ * Validate that AclCreateContext can create/destroy multiple contexts with different options
+ *
+ * Test Steps:
+ * - Call AclCreateContext with different targets
+ * - Confirm that AclSuccess is reported
+ * - Destroy all contexts
+ * - Confirm that AclSuccess is reported
+ */
+template <AclTarget Target>
+class MultipleContextsFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ const unsigned int num_tests = 5;
+ std::array<AclContext, num_tests> ctxs{};
+ for(unsigned int i = 0; i < num_tests; ++i)
+ {
+ ARM_COMPUTE_ASSERT(AclCreateContext(&ctxs[i], Target, nullptr) == AclStatus::AclSuccess);
+ ARM_COMPUTE_ASSERT(ctxs[i] != nullptr);
+ ARM_COMPUTE_ASSERT(AclDestroyContext(ctxs[i]) == AclStatus::AclSuccess);
+ }
+ };
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_UNIT_CONTEXT */
diff --git a/tests/validation/fixtures/UNIT/Tensor.h b/tests/validation/fixtures/UNIT/Tensor.h
new file mode 100644
index 0000000000..acd10c91fe
--- /dev/null
+++ b/tests/validation/fixtures/UNIT/Tensor.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_UNIT_TENSOR
+#define ARM_COMPUTE_TEST_UNIT_TENSOR
+
+#include "arm_compute/Acl.hpp"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/framework/Macros.h"
+#include "tests/validation/Validation.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+/** Test case for AclCreateTensor
+ *
+ * Validate that AclCreateTensor behaves as expected with invalid context
+ *
+ * Test Steps:
+ * - Call AclCreateTensor with an invalid context
+ * - Confirm that AclInvalidArgument is reported
+ * - Confirm that the tensor is still nullptr
+ */
+class CreateTensorWithInvalidContextFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ AclTensor tensor = nullptr;
+ ARM_COMPUTE_ASSERT(AclCreateTensor(&tensor, nullptr, nullptr, false) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(tensor == nullptr);
+ };
+};
+
+/** Test-case for AclCreateTensor
+ *
+ * Validate that AclCreateTensor behaves as expected on invalid descriptor
+ *
+ * Test Steps:
+ * - Call AclCreateTensor with valid context but invalid descriptor
+ * - Confirm that AclInvalidArgument is reported
+ * - Confirm that tensor is still nullptr
+ */
+template <acl::Target Target>
+class CreateTensorWithInvalidDescriptorFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::Context ctx(Target);
+ AclTensor tensor = nullptr;
+ ARM_COMPUTE_ASSERT(AclCreateTensor(&tensor, ctx.get(), nullptr, false) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(tensor == nullptr);
+
+ // Check invalid data type
+ AclTensorDescriptor invalid_desc;
+ invalid_desc.ndims = 4;
+ invalid_desc.data_type = static_cast<AclDataType>(-1);
+ ARM_COMPUTE_ASSERT(AclCreateTensor(&tensor, ctx.get(), &invalid_desc, false) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(tensor == nullptr);
+
+ // Check invalid number of dimensions
+ invalid_desc.data_type = AclDataType::AclFloat32;
+ invalid_desc.ndims = 15;
+ ARM_COMPUTE_ASSERT(AclCreateTensor(&tensor, ctx.get(), &invalid_desc, false) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(tensor == nullptr);
+ };
+};
+
+/** Test case for AclDestroyTensor
+*
+* Validate that AclDestroyTensor behaves as expected when an invalid tensor is given
+*
+* Test Steps:
+* - Call AclDestroyTensor with null tensor
+* - Confirm that AclInvalidArgument is reported
+* - Call AclDestroyTensor on empty array
+* - Confirm that AclInvalidArgument is reported
+* - Call AclDestroyTensor on an ACL object other than AclTensor
+* - Confirm that AclInvalidArgument is reported
+* - Confirm that tensor is still nullptr
+*/
+template <acl::Target Target>
+class DestroyInvalidTensorFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::Context ctx(Target);
+
+ std::array<char, 256> empty_array{};
+ AclTensor tensor = nullptr;
+
+ ARM_COMPUTE_ASSERT(AclDestroyTensor(tensor) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(AclDestroyTensor(reinterpret_cast<AclTensor>(ctx.get())) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(AclDestroyTensor(reinterpret_cast<AclTensor>(empty_array.data())) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(tensor == nullptr);
+ };
+};
+
+/** Test case for AclCreateTensor
+ *
+ * Validate that a tensor can be created successfully
+ *
+ * Test Steps:
+ * - Create a valid context
+ * - Create a valid tensor
+ * - Confirm that AclSuccess is returned
+ */
+template <acl::Target Target>
+class SimpleTensorFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::StatusCode err = acl::StatusCode::Success;
+ acl::Context ctx(Target, &err);
+
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+ acl::Tensor tensor(ctx, acl::TensorDescriptor({ 2, 3 }, acl::DataType::Float32), &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+ };
+};
+
+/** Test case for AclTensor
+ *
+ * Validate that multiple tensors can be created successfully
+ * Possibly stress the possibility of memory leaks
+ *
+ * Test Steps:
+ * - Create a valid context
+ * - Create a lot of tensors
+ * - Confirm that AclSuccess is returned
+ */
+template <acl::Target Target>
+class TensorStressFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::StatusCode err = acl::StatusCode::Success;
+
+ acl::Context ctx(Target, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ const unsigned int num_tensors = 1024;
+ for(unsigned int i = 0; i < num_tensors; ++i)
+ {
+ acl::Tensor tensor(ctx, acl::TensorDescriptor({ 1024, 1024 }, acl::DataType::Float32), &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+ }
+ };
+};
+
+/** Test case for AclMapTensor
+ *
+ * Validate that map on an invalid object fails
+ *
+ * Test Steps:
+ * - Create a valid context
+ * - Pass and invalid object for mapping
+ * - Confirm that AclInvalidArgument is returned
+ */
+template <acl::Target Target>
+class MapInvalidTensorFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::StatusCode err = acl::StatusCode::Success;
+
+ acl::Context ctx(Target, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ void *handle = nullptr;
+ ARM_COMPUTE_ASSERT(AclMapTensor(reinterpret_cast<AclTensor>(ctx.get()), &handle) == AclStatus::AclInvalidArgument);
+ };
+};
+
+/** Test case for AclMapTensor
+ *
+ * Validate that map of an unallocated pointer is nullptr
+ *
+ * Test Steps:
+ * - Create a valid context
+ * - Create a valid tensor without allocating
+ * - Map tensor
+ * - Check that mapping is nullptr
+ */
+template <acl::Target Target>
+class MapNotAllocatedTensorFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::StatusCode err = acl::StatusCode::Success;
+
+ acl::Context ctx(Target, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ acl::Tensor tensor(ctx, acl::TensorDescriptor({ 8, 8 }, acl::DataType::Float32), false /* allocate */, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+ ARM_COMPUTE_ASSERT(tensor.map() == nullptr);
+ };
+};
+
+/** Test case for AclMapTensor
+ *
+ * Validate that map of a valid tensor return a non-nullptr value
+ *
+ * Test Steps:
+ * - Create a valid context
+ * - Create a valid tensor while allocating
+ * - Map tensor
+ * - Check that mapping is not nullptr
+ */
+template <acl::Target Target>
+class MapAllocatedTensorFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::StatusCode err = acl::StatusCode::Success;
+
+ acl::Context ctx(Target, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ acl::Tensor tensor(ctx, acl::TensorDescriptor({ 8, 8 }, acl::DataType::Float32), &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ void *handle = tensor.map();
+ ARM_COMPUTE_ASSERT(handle != nullptr);
+ ARM_COMPUTE_ASSERT(tensor.unmap(handle) == acl::StatusCode::Success);
+ };
+};
+
+/** Test case for AclTensorImport
+ *
+ * Validate that an externally memory can be successfully imported
+ *
+ * Test Steps:
+ * - Create a valid context
+ * - Create a valid tensor without allocating
+ * - Allocate external memory
+ * - Import memory to the tensor
+ * - Check that imported pointer matches
+ */
+template <acl::Target Target>
+class ImportMemoryFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::StatusCode err = acl::StatusCode::Success;
+
+ acl::Context ctx(Target, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ const int32_t size = 8;
+ acl::Tensor tensor(ctx, acl::TensorDescriptor({ size }, acl::DataType::Float32), false /* allocate */, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ std::vector<float> data(size);
+ err = tensor.import(data.data(), acl::ImportType::Host);
+
+ void *handle = tensor.map();
+ ARM_COMPUTE_ASSERT(handle == data.data());
+ ARM_COMPUTE_ASSERT(tensor.unmap(handle) == acl::StatusCode::Success);
+ }
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_UNIT_TENSOR */
diff --git a/tests/validation/fixtures/UNIT/TensorPack.h b/tests/validation/fixtures/UNIT/TensorPack.h
new file mode 100644
index 0000000000..98bffb1665
--- /dev/null
+++ b/tests/validation/fixtures/UNIT/TensorPack.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_UNIT_TENSORPACK
+#define ARM_COMPUTE_TEST_UNIT_TENSORPACK
+
+#include "arm_compute/Acl.hpp"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/framework/Macros.h"
+#include "tests/validation/Validation.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+/** Test case for AclCreateTensorPack
+ *
+ * Validate that AclCreateTensorPack behaves as expected with invalid context
+ *
+ * Test Steps:
+ * - Call AclCreateTensorPack with an invalid context
+ * - Confirm that AclInvalidArgument is reported
+ * - Confirm that the tensor pack is still nullptr
+ */
+class CreateTensorPackWithInvalidContextFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ AclTensorPack pack = nullptr;
+ ARM_COMPUTE_ASSERT(AclCreateTensorPack(&pack, nullptr) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(pack == nullptr);
+ };
+};
+
+/** Test case for AclDestroyTensorPack
+ *
+ * Validate that AclDestroyTensorPack behaves as expected when an invalid tensor pack is given
+ *
+ * Test Steps:
+ * - Call AclDestroyTensorPack with null tensor pack
+ * - Confirm that AclInvalidArgument is reported
+ * - Call AclDestroyTensorPack on empty array
+ * - Confirm that AclInvalidArgument is reported
+ * - Call AclDestroyTensorPack on an ACL object other than AclTensorPack
+ * - Confirm that AclInvalidArgument is reported
+ * - Confirm that tensor pack is still nullptr
+ */
+template <acl::Target Target>
+class DestroyInvalidTensorPackFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::Context ctx(Target);
+
+ std::array<char, 256> empty_array{};
+ AclTensorPack pack = nullptr;
+
+ ARM_COMPUTE_ASSERT(AclDestroyTensorPack(pack) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(AclDestroyTensorPack(reinterpret_cast<AclTensorPack>(ctx.get())) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(AclDestroyTensorPack(reinterpret_cast<AclTensorPack>(empty_array.data())) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(pack == nullptr);
+ };
+};
+
+/** Test case for AclPackTensor
+ *
+ * Validate that AclPackTensor behaves as expected when an invalid is being passed for packing
+ *
+ * Test Steps:
+ * - Create a valid TensorPack
+ * - Try to pack an empty object
+ * - Confirm that AclInvalidArgument is reported
+ * - Try to pack another API object other than tensor
+ * - Confirm that AclInvalidArgument is reported
+ */
+template <acl::Target Target>
+class AddInvalidObjectToTensorPackFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ auto err = acl::StatusCode::Success;
+
+ acl::Context ctx(Target, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ acl::TensorPack pack(ctx, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ auto status = AclPackTensor(pack.get(),
+ reinterpret_cast<AclTensor>(ctx.get()),
+ AclTensorSlot::AclSrc);
+ ARM_COMPUTE_ASSERT(status == AclInvalidArgument);
+
+ status = AclPackTensor(pack.get(), nullptr, AclTensorSlot::AclSrc);
+ ARM_COMPUTE_ASSERT(status == AclInvalidArgument);
+ };
+};
+
+/** Test case for AclPackTensor
+ *
+ * Validate that a tensor can be added successfully to the TensorPack
+ *
+ * Test Steps:
+ * - Create a valid tensor pack
+ * - Create a valid tensor
+ * - Add tensor to the tensor pack
+ * - Confirm that AclSuccess is returned
+ */
+template <acl::Target Target>
+class SimpleTensorPackFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::Context ctx(Target);
+ acl::TensorPack pack(ctx);
+ acl::Tensor t(ctx, acl::TensorDescriptor({ 3, 3, 5, 7 }, acl::DataType::Float32));
+
+ ARM_COMPUTE_ASSERT(pack.add(t, AclTensorSlot::AclSrc) == acl::StatusCode::Success);
+ };
+};
+
+/** Test case for AclPackTensor
+ *
+ * Validate that multiple tensor can be added successfully to the TensorPack
+ *
+ * Test Steps:
+ * - Create a valid tensor pack
+ * - Create a list of valid tensors
+ * - Add tensors to the tensor pack
+ * - Confirm that AclSuccess is returned
+ */
+template <acl::Target Target>
+class MultipleTensorsInPackFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::Context ctx(Target);
+ acl::TensorPack pack(ctx);
+
+ const acl::TensorDescriptor desc(acl::TensorDescriptor({ 3, 3, 5, 7 }, acl::DataType::Float32));
+ const size_t num_tensors = 256;
+
+ std::vector<acl::Tensor> tensors;
+ for(unsigned int i = 0; i < num_tensors; ++i)
+ {
+ auto err = acl::StatusCode::Success;
+ tensors.emplace_back(acl::Tensor(ctx, desc, &err));
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+ ARM_COMPUTE_ASSERT(pack.add(tensors.back(), static_cast<int32_t>(AclTensorSlot::AclSrcVec) + i) == acl::StatusCode::Success);
+ }
+ };
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_UNIT_TENSORPACK */
diff --git a/tests/validation/gpu/unit/Context.cpp b/tests/validation/gpu/unit/Context.cpp
index 06b4a83925..523a0283a7 100644
--- a/tests/validation/gpu/unit/Context.cpp
+++ b/tests/validation/gpu/unit/Context.cpp
@@ -21,11 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Macros.h"
-#include "tests/validation/Validation.h"
-
-#include "arm_compute/Acl.hpp"
+#include "tests/validation/fixtures/UNIT/Context.h"
#include "src/gpu/cl/ClContext.h"
@@ -41,66 +37,14 @@ TEST_SUITE(CL)
TEST_SUITE(UNIT)
TEST_SUITE(Context)
-/** Test-case for AclCreateContext and AclDestroy Context
- *
- * Validate that AclCreateContext can create and destroy a context
- *
- * Test Steps:
- * - Call AclCreateContext with valid target
- * - Confirm that context is not nullptr and error code is AclSuccess
- * - Destroy context
- * - Confirm that AclSuccess is reported
- */
-TEST_CASE(SimpleContextCApi, framework::DatasetMode::ALL)
+FIXTURE_TEST_CASE(SimpleContextCApi, SimpleContextCApiFixture<AclTarget::AclGpuOcl>, framework::DatasetMode::ALL)
{
- AclContext ctx = nullptr;
- ARM_COMPUTE_ASSERT(AclCreateContext(&ctx, AclGpuOcl, nullptr) == AclStatus::AclSuccess);
- ARM_COMPUTE_ASSERT(ctx != nullptr);
- ARM_COMPUTE_ASSERT(AclDestroyContext(ctx) == AclStatus::AclSuccess);
}
-
-/** Test-case for Context from the C++ interface
- *
- * Test Steps:
- * - Create a Context obejct
- * - Confirm that StatusCode::Success is reported
- * - Confirm that equality operator works
- * - Confirm that inequality operator works
- */
-TEST_CASE(SimpleContextCppApi, framework::DatasetMode::ALL)
+FIXTURE_TEST_CASE(SimpleContextCppApi, SimpleContextCppApiFixture<acl::Target::GpuOcl>, framework::DatasetMode::ALL)
{
- acl::StatusCode status = acl::StatusCode::Success;
- acl::Context ctx(acl::Target::GpuOcl, &status);
- ARM_COMPUTE_ASSERT(status == acl::StatusCode::Success);
-
- auto ctx_eq = ctx;
- ARM_COMPUTE_ASSERT(ctx_eq == ctx);
-
- acl::Context ctx_ienq(acl::Target::GpuOcl, &status);
- ARM_COMPUTE_ASSERT(status == acl::StatusCode::Success);
- ARM_COMPUTE_ASSERT(ctx_ienq != ctx);
}
-
-/** Test-case for CpuCapabilities
- *
- * Validate that AclCreateContext can create/destroy multiple contexts with different options
- *
- * Test Steps:
- * - Call AclCreateContext with different targets
- * - Confirm that AclSuccess is reported
- * - Destroy all contexts
- * - Confirm that AclSuccess is reported
- */
-TEST_CASE(MultipleContexts, framework::DatasetMode::ALL)
+FIXTURE_TEST_CASE(MultipleContexts, MultipleContextsFixture<AclTarget::AclGpuOcl>, framework::DatasetMode::ALL)
{
- const unsigned int num_tests = 5;
- std::array<AclContext, num_tests> ctxs{};
- for(unsigned int i = 0; i < num_tests; ++i)
- {
- ARM_COMPUTE_ASSERT(AclCreateContext(&ctxs[i], AclTarget::AclGpuOcl, nullptr) == AclStatus::AclSuccess);
- ARM_COMPUTE_ASSERT(ctxs[i] != nullptr);
- ARM_COMPUTE_ASSERT(AclDestroyContext(ctxs[i]) == AclStatus::AclSuccess);
- }
}
/** Test-case for MLGO kernel configuration file
@@ -148,9 +92,9 @@ TEST_CASE(CheckMLGO, framework::DatasetMode::ALL)
ofs << mlgo_str;
ofs.close();
- AclContextOptions opts = acl_default_ctx_options;
- opts.kernel_config_file = mlgo_filename.c_str();
- arm_compute::gpu::opencl::ClContext ctx(&opts);
+ acl::Context::Options opts;
+ opts.copts.kernel_config_file = mlgo_filename.c_str();
+ arm_compute::gpu::opencl::ClContext ctx(&opts.copts);
const MLGOHeuristics &heuristics = ctx.mlgo();
diff --git a/tests/validation/gpu/unit/Tensor.cpp b/tests/validation/gpu/unit/Tensor.cpp
new file mode 100644
index 0000000000..f8278afe25
--- /dev/null
+++ b/tests/validation/gpu/unit/Tensor.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "tests/validation/fixtures/UNIT/Tensor.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+TEST_SUITE(CL)
+TEST_SUITE(UNIT)
+TEST_SUITE(Tensor)
+
+FIXTURE_TEST_CASE(CreateTensorWithInvalidContext, CreateTensorWithInvalidContextFixture, framework::DatasetMode::ALL)
+{
+}
+FIXTURE_TEST_CASE(CreateTensorWithInvalidDescriptor, CreateTensorWithInvalidDescriptorFixture<acl::Target::GpuOcl>, framework::DatasetMode::ALL)
+{
+}
+FIXTURE_TEST_CASE(DestroyInvalidTensor, DestroyInvalidTensorFixture<acl::Target::GpuOcl>, framework::DatasetMode::ALL)
+{
+}
+FIXTURE_TEST_CASE(SimpleTensor, SimpleTensorFixture<acl::Target::GpuOcl>, framework::DatasetMode::ALL)
+{
+}
+FIXTURE_TEST_CASE(TensorStress, TensorStressFixture<acl::Target::GpuOcl>, framework::DatasetMode::ALL)
+{
+}
+FIXTURE_TEST_CASE(MapInvalidTensor, MapInvalidTensorFixture<acl::Target::GpuOcl>, framework::DatasetMode::ALL)
+{
+}
+FIXTURE_TEST_CASE(MapAllocatedTensor, MapAllocatedTensorFixture<acl::Target::GpuOcl>, framework::DatasetMode::ALL)
+{
+}
+
+TEST_SUITE_END() // Tensor
+TEST_SUITE_END() // UNIT
+TEST_SUITE_END() // CL
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/gpu/unit/TensorPack.cpp b/tests/validation/gpu/unit/TensorPack.cpp
new file mode 100644
index 0000000000..b057db44ae
--- /dev/null
+++ b/tests/validation/gpu/unit/TensorPack.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "tests/validation/fixtures/UNIT/TensorPack.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+TEST_SUITE(CL)
+TEST_SUITE(UNIT)
+TEST_SUITE(TensorPack)
+
+FIXTURE_TEST_CASE(CreateTensorPackWithInvalidContext, CreateTensorPackWithInvalidContextFixture, framework::DatasetMode::ALL)
+{
+}
+FIXTURE_TEST_CASE(DestroyInvalidTensorPack, DestroyInvalidTensorPackFixture<acl::Target::GpuOcl>, framework::DatasetMode::ALL)
+{
+}
+FIXTURE_TEST_CASE(AddInvalidObjectToTensorPack, AddInvalidObjectToTensorPackFixture<acl::Target::GpuOcl>, framework::DatasetMode::ALL)
+{
+}
+FIXTURE_TEST_CASE(SimpleTensorPack, SimpleTensorPackFixture<acl::Target::GpuOcl>, framework::DatasetMode::ALL)
+{
+}
+FIXTURE_TEST_CASE(MultipleTensorsInPack, MultipleTensorsInPackFixture<acl::Target::GpuOcl>, framework::DatasetMode::ALL)
+{
+}
+
+TEST_SUITE_END() // Tensor
+TEST_SUITE_END() // UNIT
+TEST_SUITE_END() // CL
+} // namespace validation
+} // namespace test
+} // namespace arm_compute