aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSang-Hoon Park <sang-hoon.park@arm.com>2021-03-31 15:18:16 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2021-04-19 07:35:54 +0000
commitc6fcfb4adc37a6cf09472168dc177234d4fabdfa (patch)
treeb67afd4c8d1594053395394b24406334e66e0791
parentfe56edb4fd7a620fea4b6002d87a9763bdf8791a (diff)
downloadComputeLibrary-c6fcfb4adc37a6cf09472168dc177234d4fabdfa.tar.gz
Add Tensor related utilities to the new API
A couple of utility functions to get the information about tensors are added. Those functions are placed at an additional header file for better grouping. Related test cases are also added. Resolves: COMPMID-4376 Change-Id: I6bd09cbf60fddcf4fe651906982397afb0451392 Signed-off-by: Sang-Hoon Park <sang-hoon.park@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5405 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--Android.bp1
-rw-r--r--arm_compute/Acl.h1
-rw-r--r--arm_compute/Acl.hpp61
-rw-r--r--arm_compute/AclUtils.h63
-rw-r--r--src/c/AclTensor.cpp44
-rw-r--r--src/c/AclTensorPack.cpp2
-rw-r--r--src/c/cl/AclOpenClExt.cpp2
-rw-r--r--src/common/ITensorV2.cpp39
-rw-r--r--src/common/ITensorV2.h (renamed from src/common/ITensor.h)14
-rw-r--r--src/common/TensorPack.cpp2
-rw-r--r--src/common/utils/LegacySupport.cpp53
-rw-r--r--src/common/utils/LegacySupport.h7
-rw-r--r--src/common/utils/Utils.h4
-rw-r--r--src/cpu/CpuTensor.cpp2
-rw-r--r--src/cpu/CpuTensor.h4
-rw-r--r--src/gpu/cl/ClTensor.cpp2
-rw-r--r--src/gpu/cl/ClTensor.h4
-rw-r--r--tests/validation/cpu/unit/Tensor.cpp47
-rw-r--r--tests/validation/fixtures/UNIT/Tensor.h126
-rw-r--r--tests/validation/gpu/unit/Tensor.cpp39
20 files changed, 452 insertions, 65 deletions
diff --git a/Android.bp b/Android.bp
index 17281a49d1..3736ef754f 100644
--- a/Android.bp
+++ b/Android.bp
@@ -57,6 +57,7 @@ cc_library_static {
"src/c/AclVersion.cpp",
"src/c/cl/AclOpenClExt.cpp",
"src/common/AllocatorWrapper.cpp",
+ "src/common/ITensorV2.cpp",
"src/common/TensorPack.cpp",
"src/common/utils/LegacySupport.cpp",
"src/core/AccessWindowAutoPadding.cpp",
diff --git a/arm_compute/Acl.h b/arm_compute/Acl.h
index 6958f60bfd..316407c02e 100644
--- a/arm_compute/Acl.h
+++ b/arm_compute/Acl.h
@@ -31,6 +31,7 @@ extern "C" {
/* Core headers */
#include "arm_compute/AclEntrypoints.h"
#include "arm_compute/AclTypes.h"
+#include "arm_compute/AclUtils.h"
#include "arm_compute/AclVersion.h"
#ifdef __cplusplus
diff --git a/arm_compute/Acl.hpp b/arm_compute/Acl.hpp
index a009894438..01f7179c2f 100644
--- a/arm_compute/Acl.hpp
+++ b/arm_compute/Acl.hpp
@@ -428,6 +428,20 @@ public:
_cdesc.strides = nullptr;
_cdesc.boffset = 0;
}
+ /** Constructor
+ *
+ * @param[in] desc C-type descriptor
+ */
+ explicit TensorDescriptor(const AclTensorDescriptor &desc)
+ {
+ _cdesc = desc;
+ _data_type = detail::as_enum<DataType>(desc.data_type);
+ _shape.reserve(desc.ndims);
+ for(int32_t d = 0; d < desc.ndims; ++d)
+ {
+ _shape.emplace_back(desc.shape[d]);
+ }
+ }
/** Get underlying C tensor descriptor
*
* @return Underlying structure
@@ -436,6 +450,29 @@ public:
{
return &_cdesc;
}
+ /** Operator to compare two TensorDescriptor
+ *
+ * @param[in] other The instance to compare against
+ *
+ * @return True if two instances have the same shape and data type
+ */
+ bool operator==(const TensorDescriptor &other)
+ {
+ bool is_same = true;
+
+ is_same &= _data_type == other._data_type;
+ is_same &= _shape.size() == other._shape.size();
+
+ if(is_same)
+ {
+ for(uint32_t d = 0; d < _shape.size(); ++d)
+ {
+ is_same &= _shape[d] == other._shape[d];
+ }
+ }
+
+ return is_same;
+ }
private:
std::vector<int32_t> _shape{};
@@ -524,6 +561,30 @@ public:
report_status(st, "[Arm Compute Library] Failed to import external memory to tensor!");
return st;
}
+ /** Get the size of the tensor in byte
+ *
+ * @note The size isn't based on allocated memory, but based on information in its descriptor (dimensions, data type, etc.).
+ *
+ * @return The size of the tensor in byte
+ */
+ uint64_t get_size()
+ {
+ uint64_t size{ 0 };
+ const auto st = detail::as_enum<StatusCode>(AclGetTensorSize(_object.get(), &size));
+ report_status(st, "[Arm Compute Library] Failed to get the size of the tensor");
+ return size;
+ }
+ /** Get the descriptor of this tensor
+ *
+ * @return The descriptor describing the characteristics of this tensor
+ */
+ TensorDescriptor get_descriptor()
+ {
+ AclTensorDescriptor desc;
+ const auto st = detail::as_enum<StatusCode>(AclGetTensorDescriptor(_object.get(), &desc));
+ report_status(st, "[Arm Compute Library] Failed to get the descriptor of the tensor");
+ return TensorDescriptor(desc);
+ }
};
/** Tensor pack class
diff --git a/arm_compute/AclUtils.h b/arm_compute/AclUtils.h
new file mode 100644
index 0000000000..2e75772ee8
--- /dev/null
+++ b/arm_compute/AclUtils.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_ACLUTILS_H_
+#define ARM_COMPUTE_ACLUTILS_H_
+
+#include "arm_compute/AclTypes.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /** __cplusplus */
+
+/** Get the size of the existing tensor in byte
+ *
+ * @note The size isn't based on allocated memory, but based on information in its descriptor (dimensions, data type, etc.).
+ *
+ * @param[in] tensor A tensor in interest
+ * @param[out] size The size of the tensor
+ *
+ * @return Status code
+ *
+ * - @ref AclSuccess if function was completed successfully
+ * - @ref AclInvalidArgument if a given argument is invalid
+ */
+AclStatus AclGetTensorSize(AclTensor tensor, uint64_t *size);
+
+/** Get the descriptor of this tensor
+ *
+ * @param[in] tensor A tensor in interest
+ * @param[out] desc The descriptor of the tensor
+ *
+ * @return Status code
+ *
+ * - @ref AclSuccess if function was completed successfully
+ * - @ref AclInvalidArgument if a given argument is invalid
+ */
+AclStatus AclGetTensorDescriptor(AclTensor tensor, AclTensorDescriptor *desc);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* ARM_COMPUTE_ACLUTILS_H_ */
diff --git a/src/c/AclTensor.cpp b/src/c/AclTensor.cpp
index 0d884b1ec3..8f6ce45628 100644
--- a/src/c/AclTensor.cpp
+++ b/src/c/AclTensor.cpp
@@ -22,11 +22,13 @@
* SOFTWARE.
*/
#include "arm_compute/AclEntrypoints.h"
-#include "src/common/ITensor.h"
+#include "arm_compute/AclUtils.h"
+#include "src/common/ITensorV2.h"
#include "src/common/utils/Macros.h"
namespace
{
+using namespace arm_compute;
/**< Maximum allowed dimensions by Compute Library */
constexpr int32_t max_allowed_dims = 6;
@@ -55,6 +57,12 @@ bool is_desc_valid(const AclTensorDescriptor &desc)
}
return true;
}
+
+StatusCode convert_and_validate_tensor(AclTensor tensor, ITensorV2 **internal_tensor)
+{
+ *internal_tensor = get_internal(tensor);
+ return detail::validate_internal_tensor(*internal_tensor);
+}
} // namespace
extern "C" AclStatus AclCreateTensor(AclTensor *external_tensor,
@@ -146,3 +154,37 @@ extern "C" AclStatus AclDestroyTensor(AclTensor external_tensor)
return AclSuccess;
}
+
+extern "C" AclStatus AclGetTensorSize(AclTensor tensor, uint64_t *size)
+{
+ using namespace arm_compute;
+
+ if(size == nullptr)
+ {
+ return AclStatus::AclInvalidArgument;
+ }
+
+ ITensorV2 *internal_tensor{ nullptr };
+ auto status = convert_and_validate_tensor(tensor, &internal_tensor);
+ ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
+
+ *size = internal_tensor->get_size();
+ return utils::as_cenum<AclStatus>(status);
+}
+
+extern "C" AclStatus AclGetTensorDescriptor(AclTensor tensor, AclTensorDescriptor *desc)
+{
+ using namespace arm_compute;
+
+ if(desc == nullptr)
+ {
+ return AclStatus::AclInvalidArgument;
+ }
+
+ ITensorV2 *internal_tensor{ nullptr };
+ const auto status = convert_and_validate_tensor(tensor, &internal_tensor);
+ ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
+
+ *desc = internal_tensor->get_descriptor();
+ return utils::as_cenum<AclStatus>(status);
+} \ No newline at end of file
diff --git a/src/c/AclTensorPack.cpp b/src/c/AclTensorPack.cpp
index 6700ef464c..6202524ca7 100644
--- a/src/c/AclTensorPack.cpp
+++ b/src/c/AclTensorPack.cpp
@@ -22,7 +22,7 @@
* SOFTWARE.
*/
#include "arm_compute/AclEntrypoints.h"
-#include "src/common/ITensor.h"
+#include "src/common/ITensorV2.h"
#include "src/common/TensorPack.h"
#include "src/common/utils/Macros.h"
diff --git a/src/c/cl/AclOpenClExt.cpp b/src/c/cl/AclOpenClExt.cpp
index a144f97f55..ce6d2969de 100644
--- a/src/c/cl/AclOpenClExt.cpp
+++ b/src/c/cl/AclOpenClExt.cpp
@@ -23,7 +23,7 @@
*/
#include "arm_compute/AclOpenClExt.h"
-#include "src/common/ITensor.h"
+#include "src/common/ITensorV2.h"
#include "src/common/Types.h"
#include "src/gpu/cl/ClContext.h"
diff --git a/src/common/ITensorV2.cpp b/src/common/ITensorV2.cpp
new file mode 100644
index 0000000000..39bf1c6fb3
--- /dev/null
+++ b/src/common/ITensorV2.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/common/ITensorV2.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "src/common/utils/LegacySupport.h"
+
+namespace arm_compute
+{
+size_t ITensorV2::get_size() const
+{
+ return tensor()->info()->total_size();
+}
+
+AclTensorDescriptor ITensorV2::get_descriptor() const
+{
+ return detail::convert_to_descriptor(*tensor()->info());
+}
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/common/ITensor.h b/src/common/ITensorV2.h
index ee7eac7688..965aacea23 100644
--- a/src/common/ITensor.h
+++ b/src/common/ITensorV2.h
@@ -92,7 +92,19 @@ public:
*
* @return The legacy underlying tensor object
*/
- virtual arm_compute::ITensor *tensor() = 0;
+ virtual arm_compute::ITensor *tensor() const = 0;
+ /** Get the size of the tensor in byte
+ *
+ * @note The size isn't based on allocated memory, but based on information in its descriptor (dimensions, data type, etc.).
+ *
+ * @return The size of the tensor in byte
+ */
+ size_t get_size() const;
+ /** Get the descriptor of this tensor
+ *
+ * @return The descriptor describing the characteristics of this tensor
+ */
+ AclTensorDescriptor get_descriptor() const;
};
/** Extract internal representation of a Tensor
diff --git a/src/common/TensorPack.cpp b/src/common/TensorPack.cpp
index c582c7b106..6c2c7f9622 100644
--- a/src/common/TensorPack.cpp
+++ b/src/common/TensorPack.cpp
@@ -22,7 +22,7 @@
* SOFTWARE.
*/
#include "src/common/TensorPack.h"
-#include "src/common/ITensor.h"
+#include "src/common/ITensorV2.h"
#include "src/common/utils/Validate.h"
namespace arm_compute
diff --git a/src/common/utils/LegacySupport.cpp b/src/common/utils/LegacySupport.cpp
index 5981c657bd..569b2abd89 100644
--- a/src/common/utils/LegacySupport.cpp
+++ b/src/common/utils/LegacySupport.cpp
@@ -29,7 +29,7 @@ namespace detail
{
namespace
{
-DataType data_type_mapper(AclDataType data_type)
+DataType convert_to_legacy_data_type(AclDataType data_type)
{
switch(data_type)
{
@@ -41,11 +41,25 @@ DataType data_type_mapper(AclDataType data_type)
return DataType::BFLOAT16;
default:
return DataType::UNKNOWN;
- ;
}
}
-TensorShape tensor_shape_mapper(int32_t ndims, int32_t *shape)
+AclDataType convert_to_c_data_type(DataType data_type)
+{
+ switch(data_type)
+ {
+ case DataType::F32:
+ return AclDataType::AclFloat32;
+ case DataType::F16:
+ return AclDataType::AclFloat16;
+ case DataType::BFLOAT16:
+ return AclDataType::AclBFloat16;
+ default:
+ return AclDataType::AclDataTypeUnknown;
+ }
+}
+
+TensorShape create_legacy_tensor_shape(int32_t ndims, int32_t *shape)
{
TensorShape legacy_shape{};
for(int32_t d = 0; d < ndims; ++d)
@@ -54,13 +68,44 @@ TensorShape tensor_shape_mapper(int32_t ndims, int32_t *shape)
}
return legacy_shape;
}
+int32_t *create_tensor_shape_array(const TensorInfo &info)
+{
+ const auto num_dims = info.num_dimensions();
+ if(num_dims <= 0)
+ {
+ return nullptr;
+ }
+
+ int32_t *shape_array = new int32_t[num_dims];
+
+ for(size_t d = 0; d < num_dims; ++d)
+ {
+ shape_array[d] = info.tensor_shape()[d];
+ }
+
+ return shape_array;
+}
} // namespace
TensorInfo convert_to_legacy_tensor_info(const AclTensorDescriptor &desc)
{
TensorInfo legacy_desc;
- legacy_desc.init(tensor_shape_mapper(desc.ndims, desc.shape), 1, data_type_mapper(desc.data_type));
+ legacy_desc.init(create_legacy_tensor_shape(desc.ndims, desc.shape), 1, convert_to_legacy_data_type(desc.data_type));
return legacy_desc;
}
+
+AclTensorDescriptor convert_to_descriptor(const TensorInfo &info)
+{
+ const auto num_dims = info.num_dimensions();
+ AclTensorDescriptor desc
+ {
+ static_cast<int32_t>(num_dims),
+ create_tensor_shape_array(info),
+ convert_to_c_data_type(info.data_type()),
+ nullptr,
+ 0
+ };
+ return desc;
+}
} // namespace detail
} // namespace arm_compute
diff --git a/src/common/utils/LegacySupport.h b/src/common/utils/LegacySupport.h
index 37329b747c..c2cc1bc182 100644
--- a/src/common/utils/LegacySupport.h
+++ b/src/common/utils/LegacySupport.h
@@ -38,6 +38,13 @@ namespace detail
* @return Legacy tensor meta-data
*/
TensorInfo convert_to_legacy_tensor_info(const AclTensorDescriptor &desc);
+/** Convert a legacy tensor meta-data to a descriptor
+ *
+ * @param[in] info Legacy tensor meta-data
+ *
+ * @return A converted descriptor
+ */
+AclTensorDescriptor convert_to_descriptor(const TensorInfo &info);
} // namespace detail
} // namespace arm_compute
diff --git a/src/common/utils/Utils.h b/src/common/utils/Utils.h
index 87be9df509..79f4f39c47 100644
--- a/src/common/utils/Utils.h
+++ b/src/common/utils/Utils.h
@@ -40,7 +40,7 @@ namespace utils
* @return A corresponding plain old C enumeration
*/
template <typename E, typename SE>
-constexpr E as_cenum(SE v) noexcept
+constexpr E as_cenum(const SE v) noexcept
{
return static_cast<E>(static_cast<std::underlying_type_t<SE>>(v));
}
@@ -55,7 +55,7 @@ constexpr E as_cenum(SE v) noexcept
* @return A corresponding strongly typed enumeration
*/
template <typename SE, typename E>
-constexpr SE as_enum(E val) noexcept
+constexpr SE as_enum(const E val) noexcept
{
return static_cast<SE>(val);
}
diff --git a/src/cpu/CpuTensor.cpp b/src/cpu/CpuTensor.cpp
index 79dc812c58..6dd6d9c31b 100644
--- a/src/cpu/CpuTensor.cpp
+++ b/src/cpu/CpuTensor.cpp
@@ -72,7 +72,7 @@ StatusCode CpuTensor::import(void *handle, ImportMemoryType type)
return bool(st) ? StatusCode::Success : StatusCode::RuntimeError;
}
-arm_compute::ITensor *CpuTensor::tensor()
+arm_compute::ITensor *CpuTensor::tensor() const
{
return _legacy_tensor.get();
}
diff --git a/src/cpu/CpuTensor.h b/src/cpu/CpuTensor.h
index a46f1a26cb..b078774c99 100644
--- a/src/cpu/CpuTensor.h
+++ b/src/cpu/CpuTensor.h
@@ -24,7 +24,7 @@
#ifndef SRC_CPU_CPUTENSOR_H
#define SRC_CPU_CPUTENSOR_H
-#include "src/common/ITensor.h"
+#include "src/common/ITensorV2.h"
#include "arm_compute/runtime/Tensor.h"
@@ -51,7 +51,7 @@ public:
// Inherrited functions overriden
void *map() override;
StatusCode unmap() override;
- arm_compute::ITensor *tensor() override;
+ arm_compute::ITensor *tensor() const override;
StatusCode import(void *handle, ImportMemoryType type) override;
private:
diff --git a/src/gpu/cl/ClTensor.cpp b/src/gpu/cl/ClTensor.cpp
index db2081c4ed..0df07813e3 100644
--- a/src/gpu/cl/ClTensor.cpp
+++ b/src/gpu/cl/ClTensor.cpp
@@ -83,7 +83,7 @@ StatusCode ClTensor::import(void *handle, ImportMemoryType type)
return StatusCode::Success;
}
-arm_compute::ITensor *ClTensor::tensor()
+arm_compute::ITensor *ClTensor::tensor() const
{
return _legacy_tensor.get();
}
diff --git a/src/gpu/cl/ClTensor.h b/src/gpu/cl/ClTensor.h
index 4188f622d6..99d228c0b8 100644
--- a/src/gpu/cl/ClTensor.h
+++ b/src/gpu/cl/ClTensor.h
@@ -24,7 +24,7 @@
#ifndef SRC_GPU_CLTENSOR_H
#define SRC_GPU_CLTENSOR_H
-#include "src/common/ITensor.h"
+#include "src/common/ITensorV2.h"
#include "arm_compute/runtime/CL/CLTensor.h"
@@ -53,7 +53,7 @@ public:
// Inherrited functions overriden
void *map() override;
StatusCode unmap() override;
- arm_compute::ITensor *tensor() override;
+ arm_compute::ITensor *tensor() const override;
StatusCode import(void *handle, ImportMemoryType type) override;
private:
diff --git a/tests/validation/cpu/unit/Tensor.cpp b/tests/validation/cpu/unit/Tensor.cpp
index 8fad7fa3ae..aa2e3abdf1 100644
--- a/tests/validation/cpu/unit/Tensor.cpp
+++ b/tests/validation/cpu/unit/Tensor.cpp
@@ -33,33 +33,26 @@ TEST_SUITE(CPU)
TEST_SUITE(UNIT)
TEST_SUITE(Tensor)
-FIXTURE_TEST_CASE(CreateTensorWithInvalidContext, CreateTensorWithInvalidContextFixture, framework::DatasetMode::ALL)
-{
-}
-FIXTURE_TEST_CASE(CreateTensorWithInvalidDescriptor, CreateTensorWithInvalidDescriptorFixture<acl::Target::Cpu>, framework::DatasetMode::ALL)
-{
-}
-FIXTURE_TEST_CASE(DestroyInvalidTensor, DestroyInvalidTensorFixture<acl::Target::Cpu>, framework::DatasetMode::ALL)
-{
-}
-FIXTURE_TEST_CASE(SimpleTensor, SimpleTensorFixture<acl::Target::Cpu>, framework::DatasetMode::ALL)
-{
-}
-FIXTURE_TEST_CASE(TensorStress, TensorStressFixture<acl::Target::Cpu>, framework::DatasetMode::ALL)
-{
-}
-FIXTURE_TEST_CASE(MapInvalidTensor, MapInvalidTensorFixture<acl::Target::Cpu>, framework::DatasetMode::ALL)
-{
-}
-FIXTURE_TEST_CASE(MapNotAllocatedTensor, MapNotAllocatedTensorFixture<acl::Target::Cpu>, framework::DatasetMode::ALL)
-{
-}
-FIXTURE_TEST_CASE(MapAllocatedTensor, MapAllocatedTensorFixture<acl::Target::Cpu>, framework::DatasetMode::ALL)
-{
-}
-FIXTURE_TEST_CASE(ImportMemory, ImportMemoryFixture<acl::Target::Cpu>, framework::DatasetMode::ALL)
-{
-}
+#define TENSOR_TESE_CASE(name, fixture) \
+ FIXTURE_TEST_CASE(name, fixture, framework::DatasetMode::ALL) \
+ { \
+ }
+
+TENSOR_TESE_CASE(CreateTensorWithInvalidContext, CreateTensorWithInvalidContextFixture)
+TENSOR_TESE_CASE(CreateTensorWithInvalidDescriptor, CreateTensorWithInvalidDescriptorFixture<acl::Target::Cpu>)
+TENSOR_TESE_CASE(DestroyInvalidTensor, DestroyInvalidTensorFixture<acl::Target::Cpu>)
+TENSOR_TESE_CASE(SimpleTensor, SimpleTensorFixture<acl::Target::Cpu>)
+TENSOR_TESE_CASE(TensorStress, TensorStressFixture<acl::Target::Cpu>)
+TENSOR_TESE_CASE(MapInvalidTensor, MapInvalidTensorFixture<acl::Target::Cpu>)
+TENSOR_TESE_CASE(MapNotAllocatedTensor, MapNotAllocatedTensorFixture<acl::Target::Cpu>)
+TENSOR_TESE_CASE(MapAllocatedTensor, MapAllocatedTensorFixture<acl::Target::Cpu>)
+TENSOR_TESE_CASE(ImportMemory, ImportMemoryFixture<acl::Target::Cpu>)
+TENSOR_TESE_CASE(GetSize, TensorSizeFixture<acl::Target::Cpu>)
+TENSOR_TESE_CASE(GetInvalidSize, InvalidTensorSizeFixture<acl::Target::Cpu>)
+TENSOR_TESE_CASE(GetDescriptor, DescriptorConversionFixture<acl::Target::Cpu>)
+TENSOR_TESE_CASE(GetInvalidDescriptor, InvalidDescriptorConversionFixture<acl::Target::Cpu>)
+
+#undef TENSOR_TEST_CASE
TEST_SUITE_END() // Tensor
TEST_SUITE_END() // UNIT
diff --git a/tests/validation/fixtures/UNIT/Tensor.h b/tests/validation/fixtures/UNIT/Tensor.h
index acd10c91fe..32260cb431 100644
--- a/tests/validation/fixtures/UNIT/Tensor.h
+++ b/tests/validation/fixtures/UNIT/Tensor.h
@@ -292,6 +292,132 @@ public:
ARM_COMPUTE_ASSERT(tensor.unmap(handle) == acl::StatusCode::Success);
}
};
+/** Test case for get_size() interface of Tensor
+ *
+ *
+ * Test Steps:
+ * - Create a valid context
+ * - Create a valid tensor
+ * - Compare the size value returned with the expected value
+ */
+template <acl::Target Target>
+class TensorSizeFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::StatusCode err = acl::StatusCode::Success;
+ acl::Context ctx(Target, &err);
+
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+ acl::Tensor tensor(ctx, acl::TensorDescriptor({ 2, 3 }, acl::DataType::Float32), &err);
+
+ // size should be 6 elements (2x3) times 4 bytes (float32) = 24 bytes
+ constexpr size_t expected_size = 24;
+ ARM_COMPUTE_ASSERT(tensor.get_size() == expected_size);
+ };
+};
+/** Test case for get_size() dealing with invalid arguments
+ *
+ * Test Steps:
+ * - Test nullptr tensor can return a correct error
+ * - Create a valid tensor
+ * - Test C interface with null size argument can return a correct error
+ */
+template <acl::Target Target>
+class InvalidTensorSizeFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ // Null tensor
+ AclTensor null_tensor = nullptr;
+ uint64_t size{ 0 };
+ ARM_COMPUTE_ASSERT(AclGetTensorSize(null_tensor, &size) == AclStatus::AclInvalidArgument);
+
+ // Create valid tensor
+ acl::StatusCode err = acl::StatusCode::Success;
+ acl::Context ctx(Target, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+ acl::Tensor tensor(ctx, acl::TensorDescriptor({ 2, 3 }, acl::DataType::Float32), &err);
+
+ // Null size argument
+ ARM_COMPUTE_ASSERT(AclGetTensorSize(tensor.get(), nullptr) == AclStatus::AclInvalidArgument);
+ };
+};
+
+template <acl::Target Target>
+class DescriptorConversionFixture : public framework::Fixture
+{
+ bool compare_descriptor(const AclTensorDescriptor &desc_a, const AclTensorDescriptor &desc_b)
+ {
+ auto are_descriptors_same = true;
+
+ are_descriptors_same &= desc_a.ndims == desc_b.ndims;
+ are_descriptors_same &= desc_a.data_type == desc_b.data_type;
+ are_descriptors_same &= desc_a.shape != nullptr && desc_b.shape != nullptr;
+
+ for(int32_t d = 0; d < desc_a.ndims; ++d)
+ {
+ are_descriptors_same &= desc_a.shape[d] == desc_b.shape[d];
+ }
+
+ // other attributes should be added here
+
+ return are_descriptors_same;
+ }
+
+public:
+ void setup()
+ {
+ auto err{ acl::StatusCode::Success };
+ auto ctx{ acl::Context(Target, &err) };
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ auto desc{ acl::TensorDescriptor({ 2, 3 }, acl::DataType::Float32) };
+ acl::Tensor tensor(ctx, desc, &err);
+
+ auto desc_from_tensor = tensor.get_descriptor();
+
+ ARM_COMPUTE_ASSERT(compare_descriptor(*desc.get(), *desc_from_tensor.get()));
+ ARM_COMPUTE_ASSERT(desc == desc_from_tensor);
+
+ // Test c interface with "prepopulated" descriptor
+ // Note: When c interface used, there are possibility of memory leak
+ // if members are not correctly deleted (e.g., shape).
+ // Since that is considered user's responsibility, we don't test here.
+ AclTensorDescriptor prepopulated_descriptor
+ {
+ 3, nullptr, AclDataType::AclBFloat16, nullptr, 0
+ };
+
+ ARM_COMPUTE_ASSERT(AclGetTensorDescriptor(tensor.get(), &prepopulated_descriptor) == AclStatus::AclSuccess);
+ ARM_COMPUTE_ASSERT(compare_descriptor(*desc.get(), prepopulated_descriptor));
+ ARM_COMPUTE_ASSERT(desc == acl::TensorDescriptor(prepopulated_descriptor));
+ };
+};
+
+template <acl::Target Target>
+class InvalidDescriptorConversionFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ // Null tensor
+ AclTensor null_tensor = nullptr;
+ AclTensorDescriptor desc{};
+ ARM_COMPUTE_ASSERT(AclGetTensorDescriptor(null_tensor, &desc) == AclStatus::AclInvalidArgument);
+
+ // Create valid tensor
+ acl::StatusCode err = acl::StatusCode::Success;
+ acl::Context ctx(Target, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+ acl::Tensor tensor(ctx, acl::TensorDescriptor({ 2, 3 }, acl::DataType::Float32), &err);
+
+ // Null size argument
+ ARM_COMPUTE_ASSERT(AclGetTensorDescriptor(tensor.get(), nullptr) == AclStatus::AclInvalidArgument);
+ };
+};
} // namespace validation
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/gpu/unit/Tensor.cpp b/tests/validation/gpu/unit/Tensor.cpp
index f8278afe25..b40d6264f5 100644
--- a/tests/validation/gpu/unit/Tensor.cpp
+++ b/tests/validation/gpu/unit/Tensor.cpp
@@ -33,27 +33,24 @@ TEST_SUITE(CL)
TEST_SUITE(UNIT)
TEST_SUITE(Tensor)
-FIXTURE_TEST_CASE(CreateTensorWithInvalidContext, CreateTensorWithInvalidContextFixture, framework::DatasetMode::ALL)
-{
-}
-FIXTURE_TEST_CASE(CreateTensorWithInvalidDescriptor, CreateTensorWithInvalidDescriptorFixture<acl::Target::GpuOcl>, framework::DatasetMode::ALL)
-{
-}
-FIXTURE_TEST_CASE(DestroyInvalidTensor, DestroyInvalidTensorFixture<acl::Target::GpuOcl>, framework::DatasetMode::ALL)
-{
-}
-FIXTURE_TEST_CASE(SimpleTensor, SimpleTensorFixture<acl::Target::GpuOcl>, framework::DatasetMode::ALL)
-{
-}
-FIXTURE_TEST_CASE(TensorStress, TensorStressFixture<acl::Target::GpuOcl>, framework::DatasetMode::ALL)
-{
-}
-FIXTURE_TEST_CASE(MapInvalidTensor, MapInvalidTensorFixture<acl::Target::GpuOcl>, framework::DatasetMode::ALL)
-{
-}
-FIXTURE_TEST_CASE(MapAllocatedTensor, MapAllocatedTensorFixture<acl::Target::GpuOcl>, framework::DatasetMode::ALL)
-{
-}
+#define TENSOR_TESE_CASE(name, fixture) \
+ FIXTURE_TEST_CASE(name, fixture, framework::DatasetMode::ALL) \
+ { \
+ }
+
+TENSOR_TESE_CASE(CreateTensorWithInvalidContext, CreateTensorWithInvalidContextFixture)
+TENSOR_TESE_CASE(CreateTensorWithInvalidDescriptor, CreateTensorWithInvalidDescriptorFixture<acl::Target::GpuOcl>)
+TENSOR_TESE_CASE(DestroyInvalidTensor, DestroyInvalidTensorFixture<acl::Target::GpuOcl>)
+TENSOR_TESE_CASE(SimpleTensor, SimpleTensorFixture<acl::Target::GpuOcl>)
+TENSOR_TESE_CASE(TensorStress, TensorStressFixture<acl::Target::GpuOcl>)
+TENSOR_TESE_CASE(MapInvalidTensor, MapInvalidTensorFixture<acl::Target::GpuOcl>)
+TENSOR_TESE_CASE(MapAllocatedTensor, MapAllocatedTensorFixture<acl::Target::GpuOcl>)
+TENSOR_TESE_CASE(GetSize, TensorSizeFixture<acl::Target::GpuOcl>)
+TENSOR_TESE_CASE(GetInvalidSize, InvalidTensorSizeFixture<acl::Target::GpuOcl>)
+TENSOR_TESE_CASE(GetDescriptor, DescriptorConversionFixture<acl::Target::GpuOcl>)
+TENSOR_TESE_CASE(GetInvalidDescriptor, InvalidDescriptorConversionFixture<acl::Target::GpuOcl>)
+
+#undef TENSOR_TEST_CASE
TEST_SUITE_END() // Tensor
TEST_SUITE_END() // UNIT