aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/optimizations
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn/optimizations')
-rw-r--r--src/armnn/optimizations/AddBroadcastReshapeLayer.hpp4
-rw-r--r--src/armnn/optimizations/ConvertConstants.hpp18
-rw-r--r--src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp2
-rw-r--r--src/armnn/optimizations/FuseBatchNorm.hpp4
4 files changed, 14 insertions, 14 deletions
diff --git a/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp b/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp
index aa00b9913c..4cfe2e4898 100644
--- a/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp
+++ b/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp
@@ -8,7 +8,7 @@
#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
namespace armnn
{
@@ -70,7 +70,7 @@ public:
{
ConstantLayer& constantLayer = static_cast<ConstantLayer&>(parentLayer);
- constantLayer.m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(
+ constantLayer.m_LayerOutput = std::make_unique<ScopedTensorHandle>(
ConstTensor(reshapeInfo, constantLayer.m_LayerOutput.get()->GetConstTensor<void>()));
constantLayer.GetOutputSlot().SetTensorInfo(reshapeInfo);
}
diff --git a/src/armnn/optimizations/ConvertConstants.hpp b/src/armnn/optimizations/ConvertConstants.hpp
index df5a5b4f67..66b3d2685a 100644
--- a/src/armnn/optimizations/ConvertConstants.hpp
+++ b/src/armnn/optimizations/ConvertConstants.hpp
@@ -9,7 +9,7 @@
#include <armnnUtils/FloatingPointConverter.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
@@ -23,7 +23,7 @@ namespace optimizations
struct BFloat16ToFloat32
{
- static void Func(std::shared_ptr<ConstCpuTensorHandle>& handle)
+ static void Func(std::shared_ptr<ConstTensorHandle>& handle)
{
const TensorInfo& info = handle->GetTensorInfo();
@@ -37,14 +37,14 @@ struct BFloat16ToFloat32
TensorInfo newInfo(info.GetShape(), DataType::Float32);
ConstTensor newInput(newInfo, newValues);
- handle.reset(new ScopedCpuTensorHandle(newInput));
+ handle.reset(new ScopedTensorHandle(newInput));
}
}
};
struct Float16ToFloat32
{
- static void Func(std::shared_ptr<ConstCpuTensorHandle>& handle)
+ static void Func(std::shared_ptr<ConstTensorHandle>& handle)
{
const TensorInfo& info = handle->GetTensorInfo();
@@ -58,14 +58,14 @@ struct Float16ToFloat32
TensorInfo newInfo(info.GetShape(), DataType::Float32);
ConstTensor newInput(newInfo, newValues);
- handle.reset(new ScopedCpuTensorHandle(newInput));
+ handle.reset(new ScopedTensorHandle(newInput));
}
}
};
struct Float32ToBFloat16
{
- static void Func(std::shared_ptr<ConstCpuTensorHandle>& handle)
+ static void Func(std::shared_ptr<ConstTensorHandle>& handle)
{
const TensorInfo& info = handle->GetTensorInfo();
@@ -79,14 +79,14 @@ struct Float32ToBFloat16
TensorInfo newInfo(info.GetShape(), DataType::BFloat16);
ConstTensor newInput(newInfo, newValues);
- handle.reset(new ScopedCpuTensorHandle(newInput));
+ handle.reset(new ScopedTensorHandle(newInput));
}
}
};
struct Float32ToFloat16
{
- static void Func(std::shared_ptr<ConstCpuTensorHandle>& handle)
+ static void Func(std::shared_ptr<ConstTensorHandle>& handle)
{
const TensorInfo& info = handle->GetTensorInfo();
@@ -100,7 +100,7 @@ struct Float32ToFloat16
TensorInfo newInfo(info.GetShape(), DataType::Float16);
ConstTensor newInput(newInfo, newValues);
- handle.reset(new ScopedCpuTensorHandle(newInput));
+ handle.reset(new ScopedTensorHandle(newInput));
}
}
};
diff --git a/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp b/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp
index a0856a485b..6c80e740be 100644
--- a/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp
+++ b/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp
@@ -35,7 +35,7 @@ inline LayerT* ConvertWeight(Layer* l)
TensorInfo newInfo(info);
newInfo.SetDataType(DataType::BFloat16);
ConstTensor newInput(newInfo, newValues);
- layer->m_Weight.reset(new ScopedCpuTensorHandle(newInput));
+ layer->m_Weight.reset(new ScopedTensorHandle(newInput));
}
}
return layer;
diff --git a/src/armnn/optimizations/FuseBatchNorm.hpp b/src/armnn/optimizations/FuseBatchNorm.hpp
index 9d25379930..3fb4b34d28 100644
--- a/src/armnn/optimizations/FuseBatchNorm.hpp
+++ b/src/armnn/optimizations/FuseBatchNorm.hpp
@@ -162,8 +162,8 @@ public:
auto& newConv2dLayer = *graph.InsertNewLayer<ConvLayer>(base.GetInputSlot(0),
convDescriptor,
name.c_str());
- newConv2dLayer.m_Weight = std::make_unique<ScopedCpuTensorHandle>(fusedWeightsTensor);
- newConv2dLayer.m_Bias = std::make_unique<ScopedCpuTensorHandle>(ConstTensor(fusedBiasTensor));
+ newConv2dLayer.m_Weight = std::make_unique<ScopedTensorHandle>(fusedWeightsTensor);
+ newConv2dLayer.m_Bias = std::make_unique<ScopedTensorHandle>(ConstTensor(fusedBiasTensor));
// Reconnects with original parent.
newConv2dLayer.GetOutputSlot().MoveAllConnections(*parentOut);