aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp6
-rw-r--r--src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.cpp2
2 files changed, 4 insertions, 4 deletions
diff --git a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
index 8114adadc2..e72a6c3226 100644
--- a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
+++ b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
@@ -182,7 +182,7 @@ public:
void configure(size_t B_pretranspose_size, unsigned int alignment)
{
- _output.allocator()->init(TensorInfo(TensorShape{ (B_pretranspose_size + alignment /* FIXME: remove alignment after COMPMID-1088 */) }, 1, DataType::S8), alignment);
+ _output.allocator()->init(TensorInfo(TensorShape{ (B_pretranspose_size + alignment) }, 1, DataType::S8), alignment);
_B_pretranspose_size = B_pretranspose_size;
}
@@ -525,7 +525,7 @@ void Fallback<TypeInput, TypeOutput, OutputStage>::configure(const ITensor *a, c
else
{
_pretranspose = new Tensor();
- static_cast<Tensor *>(_pretranspose)->allocator()->init(TensorInfo(TensorShape{ (B_pretranspose_size + alignment /* FIXME: remove alignment after COMPMID-1088 */) }, 1, DataType::S8), alignment);
+ static_cast<Tensor *>(_pretranspose)->allocator()->init(TensorInfo(TensorShape{ (B_pretranspose_size + alignment) }, 1, DataType::S8), alignment);
}
}
@@ -587,7 +587,7 @@ template <typename TypeInput, typename TypeOutput, class OutputStage>
void Fallback<TypeInput, TypeOutput, OutputStage>::allocate_workspace(size_t workspace_size, MemoryGroup &memory_group, size_t alignment)
{
ARM_COMPUTE_ERROR_ON_MSG(workspace_size == 0, "size cannot be 0");
- _workspace.allocator()->init(TensorInfo(TensorShape{ (workspace_size + alignment /* FIXME: remove alignment after COMPMID-1088 */) }, 1, DataType::S8), alignment);
+ _workspace.allocator()->init(TensorInfo(TensorShape{ (workspace_size + alignment) }, 1, DataType::S8), alignment);
memory_group.manage(&_workspace);
_workspace.allocator()->allocate();
}
diff --git a/src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.cpp b/src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.cpp
index 4a5623394f..e4526c5bd3 100644
--- a/src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.cpp
+++ b/src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.cpp
@@ -62,7 +62,7 @@ void CpuPoolingAssemblyDispatch::configure(const ITensorInfo *src, ITensorInfo *
// Allocate workspace based on kernel's memory requirements
constexpr size_t alignment = 4096;
const size_t workspace_size = pooling_wrapper->get_working_size(num_threads);
- _workspace.allocator()->init(TensorInfo(TensorShape{ (workspace_size + alignment /* FIXME: remove alignment after COMPMID-1088 */) }, 1, DataType::S8), alignment);
+ _workspace.allocator()->init(TensorInfo(TensorShape{ (workspace_size + alignment) }, 1, DataType::S8), alignment);
_memory_group.manage(&_workspace);
_workspace.allocator()->allocate();