aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
authorChunosov <N.Chunosov@yandex.ru>2017-11-03 17:33:15 +0700
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commitd621bca4e963555a99be4328c8d49d1813789649 (patch)
tree59503f9d4cdbaafefdba5a2569bf3d88082ad09d /arm_compute
parent5a99ddf2dcf3a5eb49ea85cb8bcc6a43f1496e5e (diff)
downloadComputeLibrary-d621bca4e963555a99be4328c8d49d1813789649.tar.gz
COMPMID-661: directconv-uint8 (#20)
Change-Id: I84f7a1ce3658be0d3c91e65096467258af48f0b6 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/94341 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/core/CL/CLKernelLibrary.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h2
-rw-r--r--arm_compute/core/Utils.h37
-rw-r--r--arm_compute/core/utils/quantization/AsymmHelpers.h42
-rw-r--r--arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h2
5 files changed, 82 insertions, 3 deletions
diff --git a/arm_compute/core/CL/CLKernelLibrary.h b/arm_compute/core/CL/CLKernelLibrary.h
index d433a740ac..f6256727f8 100644
--- a/arm_compute/core/CL/CLKernelLibrary.h
+++ b/arm_compute/core/CL/CLKernelLibrary.h
@@ -63,7 +63,7 @@ public:
*
* @return Build options set
*/
- StringSet options() const;
+ const StringSet &options() const;
private:
StringSet _build_opts; /**< Build options set */
diff --git a/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h b/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h
index d876143a32..85deeaef37 100644
--- a/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h
@@ -56,7 +56,7 @@ public:
* 5x5 convolution with stride_x = 1/2, stride_y = 1/2
*
* @param[in] input The input tensor to convolve. 3 lower dimensions represent a single input [width, height, IFM],
- * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QS8/QS16/F16/F32.
+ * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/QS8/QS16/F16/F32.
* @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
* The 3rd dimension must be the same as the input's volume 3rd dimension.
* Data type supported:Same as @p input.
diff --git a/arm_compute/core/Utils.h b/arm_compute/core/Utils.h
index a77df030e6..b2bd7bd4ab 100644
--- a/arm_compute/core/Utils.h
+++ b/arm_compute/core/Utils.h
@@ -346,6 +346,43 @@ inline size_t num_channels_from_format(Format format)
}
}
+/** Return the promoted data type of a given data type.
+ *
+ * @note If promoted data type is not supported an error will be thrown
+ *
+ * @param[in] dt Data type to get the promoted type of.
+ *
+ * @return Promoted data type
+ */
+inline DataType get_promoted_data_type(DataType dt)
+{
+ switch(dt)
+ {
+ case DataType::U8:
+ return DataType::U16;
+ case DataType::S8:
+ return DataType::S16;
+ case DataType::QS8:
+ return DataType::QS16;
+ case DataType::U16:
+ return DataType::U32;
+ case DataType::S16:
+ return DataType::S32;
+ case DataType::QS16:
+ return DataType::QS32;
+ case DataType::QASYMM8:
+ case DataType::F16:
+ case DataType::U32:
+ case DataType::S32:
+ case DataType::F32:
+ case DataType::QS32:
+ ARM_COMPUTE_ERROR("Unsupported data type promotions!");
+ default:
+ ARM_COMPUTE_ERROR("Undefined data type!");
+ }
+ return DataType::UNKNOWN;
+}
+
/** Separate a 2D convolution into two 1D convolutions
*
* @param[in] conv 2D convolution
diff --git a/arm_compute/core/utils/quantization/AsymmHelpers.h b/arm_compute/core/utils/quantization/AsymmHelpers.h
new file mode 100644
index 0000000000..d2cd76e256
--- /dev/null
+++ b/arm_compute/core/utils/quantization/AsymmHelpers.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_QUANTIZATION_ASYMM_HELPERS_H__
+#define __ARM_COMPUTE_QUANTIZATION_ASYMM_HELPERS_H__
+
+#include "arm_compute/core/Error.h"
+
+namespace arm_compute
+{
+namespace quantization
+{
+/** Calculate quantized representation of multiplier with value less than one.
+ *
+ * @param[in] multiplier Real multiplier.
+ * @param[out] quant_multiplier Integer multiplier.
+ * @param[out] right_shift Right bit shift.
+ */
+arm_compute::Error calculate_quantized_multiplier_less_than_one(double multiplier, int *quant_multiplier, int *right_shift);
+} // namespace quantization
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_IO_FILE_HANDLER_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h
index 4c85277c05..c2a55e4bfb 100644
--- a/arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h
@@ -45,7 +45,7 @@ public:
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
* while every optional dimension from 4 and above represent a batch of inputs.
- * Data types supported: QS8/QS16/F16/F32.
+ * Data types supported: QASYMM8/QS8/QS16/F16/F32.
* @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
* @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported:Same as @p input.
* @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.