aboutsummaryrefslogtreecommitdiff
path: root/src/graph/nodes/SoftmaxLayer.cpp
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2017-10-02 18:51:47 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commite2c82fee3b6d38f6e79412c78176792b817defd0 (patch)
treeaa6821e33cfe8001c33086191c81c18d66ac7837 /src/graph/nodes/SoftmaxLayer.cpp
parent48a60f9f7b0b7b5cf38253b7a2ac576aac43ef78 (diff)
downloadComputeLibrary-e2c82fee3b6d38f6e79412c78176792b817defd0.tar.gz
COMPMID-550: Adds support for branches.
Change-Id: I778007c9221ce3156400284c4039b90245eb2b7f Reviewed-on: http://mpd-gerrit.cambridge.arm.com/90043 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src/graph/nodes/SoftmaxLayer.cpp')
-rw-r--r--src/graph/nodes/SoftmaxLayer.cpp32
1 files changed, 18 insertions, 14 deletions
diff --git a/src/graph/nodes/SoftmaxLayer.cpp b/src/graph/nodes/SoftmaxLayer.cpp
index 86282448f7..e3345f1400 100644
--- a/src/graph/nodes/SoftmaxLayer.cpp
+++ b/src/graph/nodes/SoftmaxLayer.cpp
@@ -36,7 +36,7 @@ using namespace arm_compute::graph;
namespace
{
template <typename SoftmaxType, typename TensorType, TargetHint hint>
-std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> instantiate_function(arm_compute::ITensor *input, arm_compute::ITensor *output)
{
auto softmax = arm_compute::support::cpp14::make_unique<SoftmaxType>();
softmax->configure(
@@ -47,40 +47,44 @@ std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITe
}
template <TargetHint target_hint>
-std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *output);
+std::unique_ptr<arm_compute::IFunction> instantiate(arm_compute::ITensor *input, arm_compute::ITensor *output);
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(arm_compute::ITensor *input, arm_compute::ITensor *output)
{
- return instantiate_function<arm_compute::CLSoftmaxLayer, arm_compute::CLTensor, TargetHint::OPENCL>(input, output);
+ return instantiate_function<arm_compute::CLSoftmaxLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, output);
}
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(arm_compute::ITensor *input, arm_compute::ITensor *output)
{
- return instantiate_function<arm_compute::NESoftmaxLayer, arm_compute::Tensor, TargetHint::NEON>(input, output);
+ return instantiate_function<arm_compute::NESoftmaxLayer, arm_compute::ITensor, TargetHint::NEON>(input, output);
}
} // namespace
-std::unique_ptr<arm_compute::IFunction> SoftmaxLayer::instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> SoftmaxLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
{
+ ARM_COMPUTE_ERROR_ON(input == nullptr || input->tensor() == nullptr);
+ ARM_COMPUTE_ERROR_ON(output == nullptr || output->tensor() == nullptr);
+
std::unique_ptr<arm_compute::IFunction> func;
_target_hint = ctx.hints().target_hint();
+ arm_compute::ITensor *in = input->tensor();
+ arm_compute::ITensor *out = output->tensor();
+
if(_target_hint == TargetHint::OPENCL)
{
- func = instantiate<TargetHint::OPENCL>(input, output);
- ARM_COMPUTE_LOG("Instantiating CLSoftmaxLayer");
+ func = instantiate<TargetHint::OPENCL>(in, out);
}
else
{
- func = instantiate<TargetHint::NEON>(input, output);
- ARM_COMPUTE_LOG("Instantiating NESoftmaxLayer");
+ func = instantiate<TargetHint::NEON>(in, out);
}
- ARM_COMPUTE_LOG(" Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
+ ARM_COMPUTE_LOG(" Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
<< std::endl);
return func;