aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/optimizations
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn/optimizations')
-rw-r--r--src/armnn/optimizations/ConvertConstants.hpp8
-rw-r--r--src/armnn/optimizations/FuseBatchNorm.hpp2
2 files changed, 5 insertions, 5 deletions
diff --git a/src/armnn/optimizations/ConvertConstants.hpp b/src/armnn/optimizations/ConvertConstants.hpp
index 66b3d2685a..65318af285 100644
--- a/src/armnn/optimizations/ConvertConstants.hpp
+++ b/src/armnn/optimizations/ConvertConstants.hpp
@@ -35,7 +35,7 @@ struct BFloat16ToFloat32
info.GetNumElements(),
newValues.data());
- TensorInfo newInfo(info.GetShape(), DataType::Float32);
+ TensorInfo newInfo(info.GetShape(), DataType::Float32, 0.0f, 0, true);
ConstTensor newInput(newInfo, newValues);
handle.reset(new ScopedTensorHandle(newInput));
}
@@ -56,7 +56,7 @@ struct Float16ToFloat32
info.GetNumElements(),
newValues.data());
- TensorInfo newInfo(info.GetShape(), DataType::Float32);
+ TensorInfo newInfo(info.GetShape(), DataType::Float32, 0.0f, 0, true);
ConstTensor newInput(newInfo, newValues);
handle.reset(new ScopedTensorHandle(newInput));
}
@@ -77,7 +77,7 @@ struct Float32ToBFloat16
info.GetNumElements(),
newValues.data());
- TensorInfo newInfo(info.GetShape(), DataType::BFloat16);
+ TensorInfo newInfo(info.GetShape(), DataType::BFloat16, 0.0f, 0, true);
ConstTensor newInput(newInfo, newValues);
handle.reset(new ScopedTensorHandle(newInput));
}
@@ -98,7 +98,7 @@ struct Float32ToFloat16
info.GetNumElements(),
newValues.data());
- TensorInfo newInfo(info.GetShape(), DataType::Float16);
+ TensorInfo newInfo(info.GetShape(), DataType::Float16, 0.0f, 0, true);
ConstTensor newInput(newInfo, newValues);
handle.reset(new ScopedTensorHandle(newInput));
}
diff --git a/src/armnn/optimizations/FuseBatchNorm.hpp b/src/armnn/optimizations/FuseBatchNorm.hpp
index fe8238bf14..66f722a8ef 100644
--- a/src/armnn/optimizations/FuseBatchNorm.hpp
+++ b/src/armnn/optimizations/FuseBatchNorm.hpp
@@ -146,7 +146,7 @@ public:
sqrtf(varianceVector[cOut] + epsilon)) + betaVector[cOut];
}
}
- ConstTensor fusedBiasTensor(TensorInfo({outputChannels}, ArmnnType), fusedBiasVector);
+ ConstTensor fusedBiasTensor(TensorInfo({outputChannels}, ArmnnType, 0.0f, 0, true), fusedBiasVector);
// Insert the new convolution layer that has batch norm parameters fused into
const std::string name = std::string("fused-") + child.GetName() + std::string("-into-") + base.GetName();