aboutsummaryrefslogtreecommitdiff
path: root/utils
diff options
context:
space:
mode:
authorgiuros01 <giuseppe.rossini@arm.com>2019-08-23 14:27:30 +0100
committerGiuseppe Rossini <giuseppe.rossini@arm.com>2019-08-30 13:37:28 +0000
commit351bd137e48c5276963274ac741b172483e98d21 (patch)
tree3ede92537c406d24f948acc51c1e6c0fac011036 /utils
parentebe2e8ccc6f9504fdad95884a794be1e9f58803e (diff)
downloadComputeLibrary-351bd137e48c5276963274ac741b172483e98d21.tar.gz
compmid-2573: Investigate FP16 Winograd reference implementations
Change-Id: I5a3e692c046a5ad28a676c03e3e51950c64cf503 Signed-off-by: giuros01 <giuseppe.rossini@arm.com> Reviewed-on: https://review.mlplatform.org/c/1845 Reviewed-by: Pablo Marquez <pablo.tello@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'utils')
-rw-r--r--utils/GraphUtils.cpp51
-rw-r--r--utils/GraphUtils.h6
-rw-r--r--utils/ImageLoader.h13
-rw-r--r--utils/Utils.h2
4 files changed, 60 insertions, 12 deletions
diff --git a/utils/GraphUtils.cpp b/utils/GraphUtils.cpp
index 00165cd6c2..3646facab2 100644
--- a/utils/GraphUtils.cpp
+++ b/utils/GraphUtils.cpp
@@ -63,17 +63,33 @@ TFPreproccessor::TFPreproccessor(float min_range, float max_range)
}
void TFPreproccessor::preprocess(ITensor &tensor)
{
+ if(tensor.info()->data_type() == DataType::F32)
+ {
+ preprocess_typed<float>(tensor);
+ }
+ else if(tensor.info()->data_type() == DataType::F16)
+ {
+ preprocess_typed<half>(tensor);
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("NOT SUPPORTED!");
+ }
+}
+
+template <typename T>
+void TFPreproccessor::preprocess_typed(ITensor &tensor)
+{
Window window;
window.use_tensor_dimensions(tensor.info()->tensor_shape());
const float range = _max_range - _min_range;
-
execute_window_loop(window, [&](const Coordinates & id)
{
- const float value = *reinterpret_cast<float *>(tensor.ptr_to_element(id));
- float res = value / 255.f; // Normalize to [0, 1]
- res = res * range + _min_range; // Map to [min_range, max_range]
- *reinterpret_cast<float *>(tensor.ptr_to_element(id)) = res;
+ const T value = *reinterpret_cast<T *>(tensor.ptr_to_element(id));
+ float res = value / 255.f; // Normalize to [0, 1]
+ res = res * range + _min_range; // Map to [min_range, max_range]
+ *reinterpret_cast<T *>(tensor.ptr_to_element(id)) = res;
});
}
@@ -88,15 +104,31 @@ CaffePreproccessor::CaffePreproccessor(std::array<float, 3> mean, bool bgr, floa
void CaffePreproccessor::preprocess(ITensor &tensor)
{
+ if(tensor.info()->data_type() == DataType::F32)
+ {
+ preprocess_typed<float>(tensor);
+ }
+ else if(tensor.info()->data_type() == DataType::F16)
+ {
+ preprocess_typed<half>(tensor);
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("NOT SUPPORTED!");
+ }
+}
+
+template <typename T>
+void CaffePreproccessor::preprocess_typed(ITensor &tensor)
+{
Window window;
window.use_tensor_dimensions(tensor.info()->tensor_shape());
-
const int channel_idx = get_data_layout_dimension_index(tensor.info()->data_layout(), DataLayoutDimension::CHANNEL);
execute_window_loop(window, [&](const Coordinates & id)
{
- const float value = *reinterpret_cast<float *>(tensor.ptr_to_element(id)) - _mean[id[channel_idx]];
- *reinterpret_cast<float *>(tensor.ptr_to_element(id)) = value * _scale;
+ const T value = *reinterpret_cast<T *>(tensor.ptr_to_element(id)) - T(_mean[id[channel_idx]]);
+ *reinterpret_cast<T *>(tensor.ptr_to_element(id)) = value * T(_scale);
});
}
@@ -370,6 +402,9 @@ bool ValidationOutputAccessor::access_tensor(arm_compute::ITensor &tensor)
case DataType::QASYMM8:
tensor_results = access_predictions_tensor<uint8_t>(tensor);
break;
+ case DataType::F16:
+ tensor_results = access_predictions_tensor<half>(tensor);
+ break;
case DataType::F32:
tensor_results = access_predictions_tensor<float>(tensor);
break;
diff --git a/utils/GraphUtils.h b/utils/GraphUtils.h
index 3417135f17..4c25dd2460 100644
--- a/utils/GraphUtils.h
+++ b/utils/GraphUtils.h
@@ -70,6 +70,9 @@ public:
void preprocess(ITensor &tensor) override;
private:
+ template <typename T>
+ void preprocess_typed(ITensor &tensor);
+
std::array<float, 3> _mean;
bool _bgr;
float _scale;
@@ -90,6 +93,9 @@ public:
void preprocess(ITensor &tensor) override;
private:
+ template <typename T>
+ void preprocess_typed(ITensor &tensor);
+
float _min_range;
float _max_range;
};
diff --git a/utils/ImageLoader.h b/utils/ImageLoader.h
index 24fcbe179a..5d3a84c59a 100644
--- a/utils/ImageLoader.h
+++ b/utils/ImageLoader.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -249,14 +249,14 @@ public:
*
* @note If the image is a CLImage, the function maps and unmaps the image
*
- * @param[in,out] tensor Tensor with 3 planes to fill (Must be allocated, and of matching dimensions with the opened image). Data types supported: U8/F32
+ * @param[in,out] tensor Tensor with 3 planes to fill (Must be allocated, and of matching dimensions with the opened image). Data types supported: U8/F16/F32
* @param[in] bgr (Optional) Fill the first plane with blue channel (default = false)
*/
template <typename T>
void fill_planar_tensor(T &tensor, bool bgr = false)
{
ARM_COMPUTE_ERROR_ON(!is_open());
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&tensor, 1, DataType::U8, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&tensor, 1, DataType::U8, DataType::F32, DataType::F16);
const DataLayout data_layout = tensor.info()->data_layout();
const TensorShape tensor_shape = tensor.info()->tensor_shape();
@@ -324,6 +324,13 @@ public:
*reinterpret_cast<float *>(out.ptr() + 2 * stride_z) = static_cast<float>(bgr ? red : blue);
break;
}
+ case DataType::F16:
+ {
+ *reinterpret_cast<half *>(out.ptr() + 0 * stride_z) = static_cast<half>(bgr ? blue : red);
+ *reinterpret_cast<half *>(out.ptr() + 1 * stride_z) = static_cast<half>(green);
+ *reinterpret_cast<half *>(out.ptr() + 2 * stride_z) = static_cast<half>(bgr ? red : blue);
+ break;
+ }
default:
{
ARM_COMPUTE_ERROR("Unsupported data type");
diff --git a/utils/Utils.h b/utils/Utils.h
index ec08896257..7fa74ab08b 100644
--- a/utils/Utils.h
+++ b/utils/Utils.h
@@ -401,7 +401,7 @@ public:
void fill_tensor(T &tensor)
{
ARM_COMPUTE_ERROR_ON(!is_open());
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(&tensor, arm_compute::DataType::QASYMM8, arm_compute::DataType::S32, arm_compute::DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(&tensor, arm_compute::DataType::QASYMM8, arm_compute::DataType::S32, arm_compute::DataType::F32, arm_compute::DataType::F16);
try
{
// Map buffer if creating a CLTensor