aboutsummaryrefslogtreecommitdiff
path: root/src/cpu/kernels/CpuSoftmaxKernel.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/cpu/kernels/CpuSoftmaxKernel.cpp')
-rw-r--r--src/cpu/kernels/CpuSoftmaxKernel.cpp139
1 files changed, 83 insertions, 56 deletions
diff --git a/src/cpu/kernels/CpuSoftmaxKernel.cpp b/src/cpu/kernels/CpuSoftmaxKernel.cpp
index 6766b10120..93cce785bd 100644
--- a/src/cpu/kernels/CpuSoftmaxKernel.cpp
+++ b/src/cpu/kernels/CpuSoftmaxKernel.cpp
@@ -22,6 +22,7 @@
* SOFTWARE.
*/
#include "src/cpu/kernels/CpuSoftmaxKernel.h"
+
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensor.h"
@@ -29,10 +30,12 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/Window.h"
#include "src/core/CPP/Validate.h"
-#include "src/core/common/Registrars.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
+
+#include "src/core/common/Registrars.h"
#include "src/cpu/kernels/softmax/list.h"
+
namespace arm_compute
{
namespace cpu
@@ -44,57 +47,53 @@ namespace
/* Softmax Logits 1D Max - identifying the max value of 1D Logits */
static const std::vector<CpuLogits1DMaxKernel::SoftmaxLogits1DMaxKernel> available_kernels_max_logits =
{
-#if defined(ARM_COMPUTE_ENABLE_SVE)
{
"sve_fp32_logits_1d_max",
[](const DataTypeISASelectorData & data) { return (data.dt == DataType::F32) && data.isa.sve; },
- REGISTER_FP32_SVE(arm_compute::cpu::sve_fp32_logits)
+ REGISTER_FP32_SVE(sve_fp32_logits)
},
{
"sve_fp16_logits_1d_max",
- [](const DataTypeISASelectorData & data) { return (data.dt == DataType::F16) && data.isa.sve; },
- REGISTER_FP16_SVE(arm_compute::cpu::sve_fp16_logits)
+ [](const DataTypeISASelectorData & data) { return (data.dt == DataType::F16) && data.isa.sve && data.isa.fp16; },
+ REGISTER_FP16_SVE(sve_fp16_logits)
},
{
"sve_qu8_logits_1d_max",
[](const DataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8) && data.isa.sve; },
- REGISTER_QASYMM8_SVE(arm_compute::cpu::sve_qasymm8_logits)
+ REGISTER_QASYMM8_SVE(sve_qasymm8_logits)
},
{
"sve_qs8_logits_1d_max",
[](const DataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8_SIGNED) && data.isa.sve; },
- REGISTER_QASYMM8_SIGNED_SVE(arm_compute::cpu::sve_qasymm8_signed_logits)
+ REGISTER_QASYMM8_SIGNED_SVE(sve_qasymm8_signed_logits)
},
-#endif /* defined(ARM_COMPUTE_ENABLE_SVE) */
-#if defined(ARM_COMPUTE_ENABLE_NEON)
{
"neon_fp32_logits_1d_max",
[](const DataTypeISASelectorData & data) { return (data.dt == DataType::F32); },
- REGISTER_FP32_NEON(arm_compute::cpu::neon_fp32_logits)
+ REGISTER_FP32_NEON(neon_fp32_logits)
},
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
{
"neon_fp16_logits_1d_max",
- [](const DataTypeISASelectorData & data) { return (data.dt == DataType::F16); },
- REGISTER_FP16_NEON(arm_compute::cpu::neon_fp16_logits)
+ [](const DataTypeISASelectorData & data) { return (data.dt == DataType::F16) && data.isa.fp16; },
+ REGISTER_FP16_NEON(neon_fp16_logits)
},
-#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) */
{
"neon_qu8_logits_1d_max",
[](const DataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8); },
- REGISTER_QASYMM8_NEON(arm_compute::cpu::neon_qasymm8_logits)
+ REGISTER_QASYMM8_NEON(neon_qasymm8_logits)
},
{
"neon_qs8_logits_1d_max",
[](const DataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8_SIGNED); },
- REGISTER_QASYMM8_SIGNED_NEON(arm_compute::cpu::neon_qasymm8_singed_logits)
+ REGISTER_QASYMM8_SIGNED_NEON(neon_qasymm8_singed_logits)
},
-#endif /* defined(ARM_COMPUTE_ENABLE_NEON) */
};
+
Status validate_arguments_logits_1d_max(const ITensorInfo &input, const ITensorInfo &output)
{
ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(&input);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
+
// Validate in case of configured output
if(output.total_size() != 0)
{
@@ -102,6 +101,7 @@ Status validate_arguments_logits_1d_max(const ITensorInfo &input, const ITensorI
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(&input, &output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output.tensor_shape(), TensorShape(input.tensor_shape()).set(0, 1));
}
+
return Status{};
}
} //namespace
@@ -109,37 +109,48 @@ const std::vector<CpuLogits1DMaxKernel::SoftmaxLogits1DMaxKernel> &CpuLogits1DMa
{
return available_kernels_max_logits;
}
+
void CpuLogits1DMaxKernel::configure(const ITensorInfo *src, ITensorInfo *dst)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_logits_1d_max(*src, *dst));
+
// Softmax across the x dimension
const TensorShape output_shape = TensorShape(src->tensor_shape()).set(0, 1);
// Output auto initialization if not yet initialized
auto_init_if_empty(*dst, output_shape, 1, src->data_type(), src->quantization_info());
+
const auto *uk = get_implementation(DataTypeISASelectorData{ src->data_type(), CPUInfo::get().get_isa() });
- ARM_COMPUTE_ERROR_ON_NULLPTR(uk);
+ ARM_COMPUTE_ERROR_ON(uk == nullptr || uk->ukernel == nullptr);
+
_run_method = uk->ukernel;
_name = std::string("CpuLogits1DMaxKernel").append("/").append(uk->name);
- Window win = calculate_max_window(*src, Steps());
+
+ Window win = calculate_max_window(*src, Steps());
ICpuKernel::configure(win);
}
+
Status CpuLogits1DMaxKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_logits_1d_max(*src, *dst));
+
return Status{};
}
+
void CpuLogits1DMaxKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
{
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICpuKernel::window(), window);
ARM_COMPUTE_ERROR_ON(_run_method == nullptr);
+
const auto src = tensors.get_const_tensor(TensorType::ACL_SRC);
auto dst = tensors.get_tensor(TensorType::ACL_DST);
+
_run_method(src, dst, window);
}
+
const char *CpuLogits1DMaxKernel::name() const
{
return _name.c_str();
@@ -149,46 +160,38 @@ const char *CpuLogits1DMaxKernel::name() const
template <bool IS_LOG>
static const std::vector<typename CpuLogits1DSoftmaxKernel<IS_LOG>::SoftmaxLogits1DKernel> available_kernels_logits =
{
-#if defined(ARM_COMPUTE_ENABLE_SVE)
+ {
+ "sve2_qu8_softmax_logits_1d",
+ [](const DataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8) && data.isa.sve2; },
+ REGISTER_QASYMM8_SVE2(sve2_qasymm8_softmax)
+ },
+ {
+ "sve2_qs8_softmax_logits_1d",
+ [](const DataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8_SIGNED) && data.isa.sve2; },
+ REGISTER_QASYMM8_SIGNED_SVE2(sve2_qasymm8_signed_softmax)
+ },
{
"sve_fp32_softmax_logits_1d",
[](const DataTypeISASelectorData & data) { return (data.dt == DataType::F32) && data.isa.sve; },
- REGISTER_FP32_SVE(arm_compute::cpu::sve_fp32_softmax)
+ REGISTER_FP32_SVE(sve_fp32_softmax)
},
{
"sve_fp16_softmax_logits_1d",
- [](const DataTypeISASelectorData & data) { return (data.dt == DataType::F16) && data.isa.sve; },
- REGISTER_FP16_SVE(arm_compute::cpu::sve_fp16_softmax)
+ [](const DataTypeISASelectorData & data) { return (data.dt == DataType::F16) && data.isa.sve && data.isa.fp16; },
+ REGISTER_FP16_SVE(sve_fp16_softmax)
},
-#endif /* defined(ARM_COMPUTE_ENABLE_SVE) */
-#if defined(ARM_COMPUTE_ENABLE_NEON)
+
{
"neon_fp32_softmax_logits_1d",
[](const DataTypeISASelectorData & data) { return (data.dt == DataType::F32); },
- REGISTER_FP32_NEON(arm_compute::cpu::neon_fp32_softmax)
+ REGISTER_FP32_NEON(neon_fp32_softmax)
},
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
{
"neon_fp16_softmax_logits_1d",
- [](const DataTypeISASelectorData & data) { return (data.dt == DataType::F16); },
- REGISTER_FP16_NEON(arm_compute::cpu::neon_fp16_softmax)
- },
-#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) */
-#endif /* defined(ARM_COMPUTE_ENABLE_NEON) */
-#if defined(ARM_COMPUTE_ENABLE_SVE2)
- {
- "sve2_qu8_softmax_logits_1d",
- [](const DataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8) && data.isa.sve2; },
- REGISTER_QASYMM8_SVE2(arm_compute::cpu::sve2_qasymm8_softmax)
+ [](const DataTypeISASelectorData & data) { return (data.dt == DataType::F16) && data.isa.fp16; },
+ REGISTER_FP16_NEON(neon_fp16_softmax)
},
{
- "sve2_qs8_softmax_logits_1d",
- [](const DataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8_SIGNED) && data.isa.sve2; },
- REGISTER_QASYMM8_SIGNED_SVE2(arm_compute::cpu::sve2_qasymm8_signed_softmax)
- },
-#endif /* defined(ARM_COMPUTE_ENABLE_SVE2) */
-#if defined(ARM_COMPUTE_ENABLE_NEON)
- {
"neon_qu8_softmax_logits_1d",
[](const DataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8); },
REGISTER_QASYMM8_NEON(arm_compute::cpu::neon_qasymm8_softmax)
@@ -198,7 +201,6 @@ static const std::vector<typename CpuLogits1DSoftmaxKernel<IS_LOG>::SoftmaxLogit
[](const DataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8_SIGNED); },
REGISTER_QASYMM8_SIGNED_NEON(arm_compute::cpu::neon_qasymm8_signed_softmax)
},
-#endif //defined(ARM_COMPUTE_ENABLE_NEON)
};
namespace
{
@@ -209,11 +211,14 @@ Status validate_arguments_logits_softmax(const ITensorInfo &src, const ITensorIn
// Check input
ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(&src);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
+
const bool is_quantized_asymmetric = is_data_type_quantized_asymmetric(src.data_type());
+
// Check max
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src, &max);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(TensorShape(src.tensor_shape()).set(0, 1), max.tensor_shape());
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(&src, &max);
+
// Check output if configured
if(dst.total_size() != 0)
{
@@ -222,6 +227,7 @@ Status validate_arguments_logits_softmax(const ITensorInfo &src, const ITensorIn
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&src, &dst);
ARM_COMPUTE_RETURN_ERROR_ON(dst.quantization_info() != output_quantization);
}
+
// Check tmp if configured
if(tmp.total_size() != 0)
{
@@ -231,69 +237,90 @@ Status validate_arguments_logits_softmax(const ITensorInfo &src, const ITensorIn
// on the maximum number of threads that will run in parallel.
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&src, &tmp);
}
+
return Status{};
}
} // namespace
-template <bool IS_LOG>
+
+template <bool IS_LOG>
const std::vector<typename CpuLogits1DSoftmaxKernel<IS_LOG>::SoftmaxLogits1DKernel> &CpuLogits1DSoftmaxKernel<IS_LOG>::get_available_kernels()
{
return available_kernels_logits<IS_LOG>;
}
+
template <bool IS_LOG>
void CpuLogits1DSoftmaxKernel<IS_LOG>::configure(const ITensorInfo *src, const ITensorInfo *max, ITensorInfo *dst, const float beta, ITensorInfo *tmp)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(src, max, dst, tmp);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_logits_softmax(*src, *max, *dst, beta, *tmp, IS_LOG));
+
// Configure kernel window
const bool is_quantized_asymmetric = is_data_type_quantized_asymmetric(src->data_type());
+
// Output auto initialization if not yet initialized
const QuantizationInfo output_quantization = is_quantized_asymmetric ? arm_compute::get_softmax_output_quantization_info(src->data_type(), IS_LOG) : dst->quantization_info();
auto_init_if_empty(*dst, TensorInfo(*src).set_quantization_info(output_quantization).reset_padding());
+
// Tmp auto initialization if not yet initialized
const DataType tmp_data_type = is_quantized_asymmetric ? DataType::F32 : src->data_type();
auto_init_if_empty(*tmp, TensorInfo(*src).set_data_type(tmp_data_type).reset_padding());
+
const auto *uk = CpuLogits1DSoftmaxKernel<IS_LOG>::get_implementation(DataTypeISASelectorData{ src->data_type(), CPUInfo::get().get_isa() });
- ARM_COMPUTE_ERROR_ON_NULLPTR(uk);
+ ARM_COMPUTE_ERROR_ON(uk == nullptr || uk->ukernel == nullptr);
+
std::string kernel_name = IS_LOG ? std::string("CpuLogits1DLogSoftmaxKernel") : std::string("CpuLogits1DSoftmaxKernel");
- _beta = beta;
- _run_method = uk->ukernel;
- _name = kernel_name.append("/").append(uk->name);
+
+ _beta = beta;
+ _run_method = uk->ukernel;
+ _name = kernel_name.append("/").append(uk->name);
+
// Configure kernel window
Window win = calculate_max_window(*max, Steps());
- ICPPKernel::configure(win);
+
+ ICpuKernel<CpuLogits1DSoftmaxKernel<IS_LOG>>::configure(win);
}
+
template <bool IS_LOG>
Status CpuLogits1DSoftmaxKernel<IS_LOG>::validate(const ITensorInfo *src, const ITensorInfo *max,
const ITensorInfo *dst, const float beta, const ITensorInfo *tmp)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(src, max, dst, tmp);
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_logits_softmax(*src, *max, *dst, beta, *tmp, IS_LOG));
+
return Status{};
}
+
template <bool IS_LOG>
void CpuLogits1DSoftmaxKernel<IS_LOG>::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
{
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICPPKernel::window(), window);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICpuKernel<CpuLogits1DSoftmaxKernel<IS_LOG>>::window(), window);
ARM_COMPUTE_ERROR_ON(_run_method == nullptr);
- const auto src = tensors.get_const_tensor(TensorType::ACL_SRC_0);
- auto max = tensors.get_tensor(TensorType::ACL_SRC_1);
- auto dst = tensors.get_tensor(TensorType::ACL_DST_0);
- auto tmp = tensors.get_tensor(TensorType::ACL_DST_1);
+
+ const auto src = tensors.get_const_tensor(TensorType::ACL_SRC_0);
+ auto max = tensors.get_tensor(TensorType::ACL_SRC_1);
+ auto dst = tensors.get_tensor(TensorType::ACL_DST_0);
+ auto tmp = tensors.get_tensor(TensorType::ACL_DST_1);
+
const unsigned int num_elems_processed_per_iteration = src->info()->valid_region().shape.x();
const unsigned int tmp_size_for_thread = tmp->info()->element_size() * num_elems_processed_per_iteration;
+
ARM_COMPUTE_ERROR_ON(tmp->info()->total_size() < (info.num_threads * tmp_size_for_thread));
+
void *tmp_for_thread = tmp->buffer() + (info.thread_id * tmp_size_for_thread);
_run_method(src, max, tmp_for_thread, dst, _beta, IS_LOG, window);
}
+
template <bool IS_LOG>
const char *CpuLogits1DSoftmaxKernel<IS_LOG>::name() const
{
return _name.c_str();
}
+
template class CpuLogits1DSoftmaxKernel<true>;
template class CpuLogits1DSoftmaxKernel<false>;
+
} // namespace kernels
} // namespace cpu
} // namespace arm_compute