aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavide Grohmann <davide.grohmann@arm.com>2022-06-15 11:20:41 +0200
committerKristofer Jonsson <kristofer.jonsson@arm.com>2022-08-25 09:13:36 +0000
commitf4379e99431e5b027b0d5942ccff56af6bfe82b1 (patch)
tree793dbd611483e6850dfc29db3048f92975800bf8
parent05dd24dd63fcd0a2f8d6a0db1a7bf740ee968a00 (diff)
downloadethos-u-core-platform-f4379e99431e5b027b0d5942ccff56af6bfe82b1.tar.gz
Add negative testing to message_handler
Also restructure the scatter file to not be constrained of the artificial 512k size limit of the APP_IMAGE region. Add missing sections in DDR for both scatter file and linker script. Change-Id: I3d9bc8aeae1b1c11ab994276be64a2850cc23f8e
-rw-r--r--applications/message_handler/lib/include/message_handler.hpp2
-rw-r--r--applications/message_handler/lib/message_handler.cpp1
-rw-r--r--applications/message_handler/main.cpp2
-rw-r--r--applications/message_handler/test/main.cpp319
-rw-r--r--applications/message_handler/test/message_client.cpp6
-rw-r--r--targets/corstone-300/platform.ld4
-rw-r--r--targets/corstone-300/platform.scatter41
7 files changed, 278 insertions, 97 deletions
diff --git a/applications/message_handler/lib/include/message_handler.hpp b/applications/message_handler/lib/include/message_handler.hpp
index 3c227be..98875f4 100644
--- a/applications/message_handler/lib/include/message_handler.hpp
+++ b/applications/message_handler/lib/include/message_handler.hpp
@@ -42,7 +42,7 @@
namespace MessageHandler {
-template <typename T, size_t capacity = 10>
+template <typename T, size_t capacity = 5>
class Queue {
public:
using Predicate = std::function<bool(const T &data)>;
diff --git a/applications/message_handler/lib/message_handler.cpp b/applications/message_handler/lib/message_handler.cpp
index 7b6377f..66623f9 100644
--- a/applications/message_handler/lib/message_handler.cpp
+++ b/applications/message_handler/lib/message_handler.cpp
@@ -292,6 +292,7 @@ void IncomingMessageHandler::sendNetworkInfoRsp(uint64_t userArg, ethosu_core_ne
if (!failed) {
failed = parser.parseModel(buffer,
+ size,
rsp.desc,
InferenceProcess::makeArray(rsp.ifm_size, rsp.ifm_count, ETHOSU_CORE_BUFFER_MAX),
InferenceProcess::makeArray(rsp.ofm_size, rsp.ofm_count, ETHOSU_CORE_BUFFER_MAX));
diff --git a/applications/message_handler/main.cpp b/applications/message_handler/main.cpp
index caa778b..4bd721e 100644
--- a/applications/message_handler/main.cpp
+++ b/applications/message_handler/main.cpp
@@ -93,7 +93,7 @@ struct TaskParams {
TaskParams() :
messageNotify(xSemaphoreCreateBinary()),
inferenceInputQueue(std::make_shared<Queue<ethosu_core_inference_req>>()),
- inferenceOutputQueue(xQueueCreate(10, sizeof(ethosu_core_inference_rsp))),
+ inferenceOutputQueue(xQueueCreate(5, sizeof(ethosu_core_inference_rsp))),
networks(std::make_shared<WithIndexedNetworks>()) {}
SemaphoreHandle_t messageNotify;
diff --git a/applications/message_handler/test/main.cpp b/applications/message_handler/test/main.cpp
index 1c5e108..6a4d26d 100644
--- a/applications/message_handler/test/main.cpp
+++ b/applications/message_handler/test/main.cpp
@@ -58,17 +58,10 @@ using namespace MessageHandler;
} \
} while (0)
-// Nr. of tasks to process inferences with, reserves driver & runs inference (Normally 1 per NPU, but not a must)
-#if defined(ETHOSU) && defined(ETHOSU_NPU_COUNT) && ETHOSU_NPU_COUNT > 0
-constexpr size_t NUM_PARALLEL_TASKS = ETHOSU_NPU_COUNT;
-#else
-constexpr size_t NUM_PARALLEL_TASKS = 1;
-#endif
-
// TensorArena static initialisation
constexpr size_t arenaSize = TENSOR_ARENA_SIZE;
-__attribute__((section(".bss.tensor_arena"), aligned(16))) uint8_t tensorArena[NUM_PARALLEL_TASKS][arenaSize];
+__attribute__((section(".bss.tensor_arena"), aligned(16))) uint8_t tensorArena[arenaSize];
// Message queue from remote host
__attribute__((section("ethosu_core_in_queue"))) MessageQueue::Queue<1000> inputMessageQueue;
@@ -89,7 +82,7 @@ struct TaskParams {
TaskParams() :
messageNotify(xSemaphoreCreateBinary()),
inferenceInputQueue(std::make_shared<Queue<ethosu_core_inference_req>>()),
- inferenceOutputQueue(xQueueCreate(10, sizeof(ethosu_core_inference_rsp))),
+ inferenceOutputQueue(xQueueCreate(5, sizeof(ethosu_core_inference_rsp))),
networks(std::make_shared<WithIndexedNetworks>()) {}
SemaphoreHandle_t messageNotify;
@@ -101,21 +94,16 @@ struct TaskParams {
std::shared_ptr<Networks> networks;
};
-struct InferenceTaskParams {
- TaskParams *taskParams;
- uint8_t *arena;
-};
-
void inferenceTask(void *pvParameters) {
printf("Starting inference task\n");
- InferenceTaskParams *params = reinterpret_cast<InferenceTaskParams *>(pvParameters);
+ TaskParams *params = reinterpret_cast<TaskParams *>(pvParameters);
- InferenceHandler process(params->arena,
+ InferenceHandler process(tensorArena,
arenaSize,
- params->taskParams->inferenceInputQueue,
- params->taskParams->inferenceOutputQueue,
- params->taskParams->messageNotify,
- params->taskParams->networks);
+ params->inferenceInputQueue,
+ params->inferenceOutputQueue,
+ params->messageNotify,
+ params->networks);
process.run();
}
@@ -134,6 +122,97 @@ void messageTask(void *pvParameters) {
process.run();
}
+ethosu_core_network_info_req networkInfoIndexedRequest(uint64_t user_arg, uint32_t index) {
+ ethosu_core_network_info_req req = {user_arg, // user_arg
+ { // network
+ ETHOSU_CORE_NETWORK_INDEX, // type
+ {{
+ index, // index
+ 0 // ignored padding of union
+ }}}};
+ return req;
+}
+
+ethosu_core_network_info_req networkInfoBufferRequest(uint64_t user_arg, unsigned char *ptr, uint32_t ptr_size) {
+ ethosu_core_network_info_req req = {user_arg, // user_arg
+ { // network
+ ETHOSU_CORE_NETWORK_BUFFER, // type
+ {{
+ reinterpret_cast<uint32_t>(ptr), // ptr
+ ptr_size // size
+ }}}};
+ return req;
+}
+
+ethosu_core_network_info_rsp networkInfoResponse(uint64_t user_arg) {
+ ethosu_core_network_info_rsp rsp = {
+ user_arg, // user_arg
+ "Vela Optimised", // description
+ 1, // ifm_count
+ {/* not comparable */}, // ifm_sizes
+ 1, // ofm_count
+ {/* not comparable */}, // ofm_sizes
+ ETHOSU_CORE_STATUS_OK // status
+ };
+ return rsp;
+}
+
+ethosu_core_inference_req
+inferenceIndexedRequest(uint64_t user_arg, uint32_t index, uint8_t *data, uint32_t data_size) {
+ ethosu_core_inference_req req = {
+ user_arg, // user_arg
+ 1, // ifm_count
+ { // ifm:
+ {
+ reinterpret_cast<uint32_t>(&inputData[0]), // ptr
+ sizeof(inputData) // size
+ }},
+ 1, // ofm_count
+ { // ofm
+ {
+ reinterpret_cast<uint32_t>(data), // ptr
+ data_size // size
+ }},
+ { // network
+ ETHOSU_CORE_NETWORK_INDEX, // type
+ {{
+ index, // index
+ 0 // ignored padding of union
+ }}},
+ {0, 0, 0, 0, 0, 0, 0, 0}, // pmu_event_config
+ 0 // pmu_cycle_counter_enable
+ };
+ return req;
+}
+
+ethosu_core_inference_req
+inferenceBufferRequest(uint64_t user_arg, unsigned char *ptr, uint32_t ptr_size, uint8_t *data, uint32_t data_size) {
+ ethosu_core_inference_req req = {
+ user_arg, // user_arg
+ 1, // ifm_count
+ { // ifm:
+ {
+ reinterpret_cast<uint32_t>(&inputData[0]), // ptr
+ sizeof(inputData) // size
+ }},
+ 1, // ofm_count
+ { // ofm
+ {
+ reinterpret_cast<uint32_t>(data), // ptr
+ data_size // size
+ }},
+ { // network
+ ETHOSU_CORE_NETWORK_BUFFER, // type
+ {{
+ reinterpret_cast<uint32_t>(ptr), // ptr
+ ptr_size // size
+ }}},
+ {0, 0, 0, 0, 0, 0, 0, 0}, // pmu_event_config
+ 0 // pmu_cycle_counter_enable
+ };
+ return req;
+}
+
void testPing(MessageClient client) {
TEST_ASSERT(client.sendInputMessage(ETHOSU_CORE_MSG_PING));
TEST_ASSERT(client.waitAndReadOutputMessage(ETHOSU_CORE_MSG_PONG));
@@ -210,25 +289,12 @@ void testCapabilities(MessageClient client) {
#endif
}
-void testNetworkInfo(MessageClient client) {
+void testNetworkInfoIndex(MessageClient client) {
const uint64_t fake_user_arg = 42;
- ethosu_core_network_info_req req = {fake_user_arg, // user_arg
- { // network
- ETHOSU_CORE_NETWORK_INDEX, // type
- {{
- 0, // index
- 0 // ignored padding of union
- }}}};
+ const uint32_t network_index = 0;
+ ethosu_core_network_info_req req = networkInfoIndexedRequest(fake_user_arg, network_index);
ethosu_core_network_info_rsp rsp;
- ethosu_core_network_info_rsp expected_rsp = {
- req.user_arg, // user_arg
- "Vela Optimised", // description
- 1, // ifm_count
- {/* not comparable */}, // ifm_sizes
- 1, // ofm_count
- {/* not comparable */}, // ofm_sizes
- 0 // status
- };
+ ethosu_core_network_info_rsp expected_rsp = networkInfoResponse(fake_user_arg);
TEST_ASSERT(client.sendInputMessage(ETHOSU_CORE_MSG_NETWORK_INFO_REQ, req));
TEST_ASSERT(client.waitAndReadOutputMessage(ETHOSU_CORE_MSG_NETWORK_INFO_RSP, rsp));
@@ -240,32 +306,90 @@ void testNetworkInfo(MessageClient client) {
TEST_ASSERT(expected_rsp.status == rsp.status);
}
-void testInferenceRun(MessageClient client) {
+void testNetworkInfoNonExistantIndex(MessageClient client) {
+ const uint64_t fake_user_arg = 42;
+ const uint32_t network_index = 1;
+ ethosu_core_network_info_req req = networkInfoIndexedRequest(fake_user_arg, network_index);
+ ethosu_core_network_info_rsp rsp;
+
+ TEST_ASSERT(client.sendInputMessage(ETHOSU_CORE_MSG_NETWORK_INFO_REQ, req));
+ TEST_ASSERT(client.waitAndReadOutputMessage(ETHOSU_CORE_MSG_NETWORK_INFO_RSP, rsp));
+
+ TEST_ASSERT(fake_user_arg == rsp.user_arg);
+ TEST_ASSERT(ETHOSU_CORE_STATUS_ERROR == rsp.status);
+}
+
+void testNetworkInfoBuffer(MessageClient client) {
+ const uint64_t fake_user_arg = 42;
+ uint32_t size = sizeof(Model0::networkModelData);
+ unsigned char *ptr = Model0::networkModelData;
+ ethosu_core_network_info_req req = networkInfoBufferRequest(fake_user_arg, ptr, size);
+ ethosu_core_network_info_rsp rsp;
+ ethosu_core_network_info_rsp expected_rsp = networkInfoResponse(fake_user_arg);
+
+ TEST_ASSERT(client.sendInputMessage(ETHOSU_CORE_MSG_NETWORK_INFO_REQ, req));
+ TEST_ASSERT(client.waitAndReadOutputMessage(ETHOSU_CORE_MSG_NETWORK_INFO_RSP, rsp));
+
+ TEST_ASSERT(expected_rsp.user_arg == rsp.user_arg);
+ TEST_ASSERT(std::strncmp(expected_rsp.desc, rsp.desc, sizeof(rsp.desc)) == 0);
+ TEST_ASSERT(expected_rsp.ifm_count == rsp.ifm_count);
+ TEST_ASSERT(expected_rsp.ofm_count == rsp.ofm_count);
+ TEST_ASSERT(expected_rsp.status == rsp.status);
+}
+
+void testNetworkInfoUnparsableBuffer(MessageClient client) {
+ const uint64_t fake_user_arg = 42;
+ uint32_t size = sizeof(Model0::networkModelData) / 4;
+ unsigned char *ptr = Model0::networkModelData + size;
+ ethosu_core_network_info_req req = networkInfoBufferRequest(fake_user_arg, ptr, size);
+ ethosu_core_network_info_rsp rsp;
+
+ TEST_ASSERT(client.sendInputMessage(ETHOSU_CORE_MSG_NETWORK_INFO_REQ, req));
+ TEST_ASSERT(client.waitAndReadOutputMessage(ETHOSU_CORE_MSG_NETWORK_INFO_RSP, rsp));
+
+ TEST_ASSERT(42 == rsp.user_arg);
+ TEST_ASSERT(ETHOSU_CORE_STATUS_ERROR == rsp.status);
+}
+
+void testInferenceRunIndex(MessageClient client) {
+ const uint64_t fake_user_arg = 42;
+ const uint32_t network_index = 0;
uint8_t data[sizeof(expectedOutputData)];
- const uint64_t fake_user_arg = 42;
- ethosu_core_inference_req req = {
- fake_user_arg, // user_arg
- 1, // ifm_count
- { // ifm:
- {
- reinterpret_cast<uint32_t>(&inputData[0]), // ptr
- sizeof(inputData) // size
- }},
- 1, // ofm_count
- { // ofm
- {
- reinterpret_cast<uint32_t>(&data[0]), // ptr
- sizeof(data) // size
- }},
- { // network
- ETHOSU_CORE_NETWORK_INDEX, // type
- {{
- 0, // index
- 0 // ignored padding of union
- }}},
- {0, 0, 0, 0, 0, 0, 0, 0}, // pmu_event_config
- 0 // pmu_cycle_counter_enable
- };
+ ethosu_core_inference_req req = inferenceIndexedRequest(fake_user_arg, network_index, data, sizeof(data));
+ ethosu_core_inference_rsp rsp;
+
+ TEST_ASSERT(client.sendInputMessage(ETHOSU_CORE_MSG_INFERENCE_REQ, req));
+ TEST_ASSERT(client.waitAndReadOutputMessage(ETHOSU_CORE_MSG_INFERENCE_RSP, rsp));
+
+ TEST_ASSERT(req.user_arg == rsp.user_arg);
+ TEST_ASSERT(rsp.ofm_count == 1);
+ TEST_ASSERT(std::memcmp(expectedOutputData, data, sizeof(expectedOutputData)) == 0);
+ TEST_ASSERT(rsp.status == ETHOSU_CORE_STATUS_OK);
+ TEST_ASSERT(rsp.pmu_cycle_counter_enable == req.pmu_cycle_counter_enable);
+ TEST_ASSERT(std::memcmp(rsp.pmu_event_config, req.pmu_event_config, sizeof(req.pmu_event_config)) == 0);
+}
+
+void testInferenceRunNonExistingIndex(MessageClient client) {
+ const uint64_t fake_user_arg = 42;
+ const uint32_t network_index = 1;
+ uint8_t data[sizeof(expectedOutputData)];
+ ethosu_core_inference_req req = inferenceIndexedRequest(fake_user_arg, network_index, data, sizeof(data));
+ ethosu_core_inference_rsp rsp;
+
+ TEST_ASSERT(client.sendInputMessage(ETHOSU_CORE_MSG_INFERENCE_REQ, req));
+ TEST_ASSERT(client.waitAndReadOutputMessage(ETHOSU_CORE_MSG_INFERENCE_RSP, rsp));
+
+ TEST_ASSERT(req.user_arg == rsp.user_arg);
+ TEST_ASSERT(rsp.status == ETHOSU_CORE_STATUS_ERROR);
+}
+
+void testInferenceRunBuffer(MessageClient client) {
+ const uint64_t fake_user_arg = 42;
+ uint32_t network_size = sizeof(Model0::networkModelData);
+ unsigned char *network_ptr = Model0::networkModelData;
+ uint8_t data[sizeof(expectedOutputData)];
+ ethosu_core_inference_req req =
+ inferenceBufferRequest(fake_user_arg, network_ptr, network_size, data, sizeof(data));
ethosu_core_inference_rsp rsp;
TEST_ASSERT(client.sendInputMessage(ETHOSU_CORE_MSG_INFERENCE_REQ, req));
@@ -279,18 +403,67 @@ void testInferenceRun(MessageClient client) {
TEST_ASSERT(std::memcmp(rsp.pmu_event_config, req.pmu_event_config, sizeof(req.pmu_event_config)) == 0);
}
+void testInferenceRunUnparsableBuffer(MessageClient client) {
+ const uint64_t fake_user_arg = 42;
+ uint32_t network_size = sizeof(Model0::networkModelData) / 4;
+ unsigned char *network_ptr = Model0::networkModelData + network_size;
+ uint8_t data[sizeof(expectedOutputData)];
+ ethosu_core_inference_req req =
+ inferenceBufferRequest(fake_user_arg, network_ptr, network_size, data, sizeof(data));
+ ethosu_core_inference_rsp rsp;
+
+ TEST_ASSERT(client.sendInputMessage(ETHOSU_CORE_MSG_INFERENCE_REQ, req));
+ TEST_ASSERT(client.waitAndReadOutputMessage(ETHOSU_CORE_MSG_INFERENCE_RSP, rsp));
+
+ TEST_ASSERT(req.user_arg == rsp.user_arg);
+ TEST_ASSERT(rsp.status == ETHOSU_CORE_STATUS_ERROR);
+}
+
+void testSequentiallyQueuedInferenceRuns(MessageClient client) {
+ int runs = 5;
+ uint8_t data[runs][sizeof(expectedOutputData)];
+ const uint64_t fake_user_arg = 42;
+ const uint32_t network_index = 0;
+ ethosu_core_inference_req req;
+ ethosu_core_inference_rsp rsp[runs];
+
+ for (int i = 0; i < runs; i++) {
+ vTaskDelay(150);
+
+ req = inferenceIndexedRequest(fake_user_arg + i, network_index, data[i], sizeof(expectedOutputData));
+ TEST_ASSERT(client.sendInputMessage(ETHOSU_CORE_MSG_INFERENCE_REQ, req));
+ }
+
+ for (int i = 0; i < runs; i++) {
+ TEST_ASSERT(client.waitAndReadOutputMessage(ETHOSU_CORE_MSG_INFERENCE_RSP, rsp[i]));
+ TEST_ASSERT(uint64_t(fake_user_arg + i) == rsp[i].user_arg);
+ TEST_ASSERT(rsp[i].ofm_count == 1);
+ TEST_ASSERT(std::memcmp(expectedOutputData, data[i], sizeof(expectedOutputData)) == 0);
+ TEST_ASSERT(rsp[i].status == ETHOSU_CORE_STATUS_OK);
+ TEST_ASSERT(rsp[i].pmu_cycle_counter_enable == req.pmu_cycle_counter_enable);
+ TEST_ASSERT(std::memcmp(rsp[i].pmu_event_config, req.pmu_event_config, sizeof(req.pmu_event_config)) == 0);
+ }
+}
+
void clientTask(void *) {
printf("Starting client task\n");
MessageClient client(*inputMessageQueue.toQueue(), *outputMessageQueue.toQueue(), mailbox);
- vTaskDelay(10);
+ vTaskDelay(50);
testPing(client);
testVersion(client);
testCapabilities(client);
- testNetworkInfo(client);
- testInferenceRun(client);
+ testNetworkInfoIndex(client);
+ testNetworkInfoNonExistantIndex(client);
+ testNetworkInfoBuffer(client);
+ testNetworkInfoUnparsableBuffer(client);
+ testInferenceRunIndex(client);
+ testInferenceRunNonExistingIndex(client);
+ testInferenceRunBuffer(client);
+ testInferenceRunUnparsableBuffer(client);
+ testSequentiallyQueuedInferenceRuns(client);
exit(0);
}
@@ -300,7 +473,6 @@ void clientTask(void *) {
* scheduler is started.
*/
TaskParams taskParams;
-InferenceTaskParams infParams[NUM_PARALLEL_TASKS];
} // namespace
@@ -320,19 +492,14 @@ int main() {
return ret;
}
- // One inference task for each NPU
- for (size_t n = 0; n < NUM_PARALLEL_TASKS; n++) {
- infParams[n].taskParams = &taskParams;
- infParams[n].arena = reinterpret_cast<uint8_t *>(&tensorArena[n]);
- ret = xTaskCreate(inferenceTask, "inferenceTask", 8 * 1024, &infParams[n], 3, nullptr);
- if (ret != pdPASS) {
- printf("Failed to create 'inferenceTask%d'\n", n);
- return ret;
- }
+ ret = xTaskCreate(inferenceTask, "inferenceTask", 8 * 1024, &taskParams, 3, nullptr);
+ if (ret != pdPASS) {
+ printf("Failed to create 'inferenceTask'\n");
+ return ret;
}
// Task for handling incoming /outgoing messages from the remote host
- ret = xTaskCreate(clientTask, "clientTask", 512, nullptr, 2, nullptr);
+ ret = xTaskCreate(clientTask, "clientTask", 1024, nullptr, 2, nullptr);
if (ret != pdPASS) {
printf("Failed to create 'messageTask'\n");
return ret;
diff --git a/applications/message_handler/test/message_client.cpp b/applications/message_handler/test/message_client.cpp
index 4209564..39d1392 100644
--- a/applications/message_handler/test/message_client.cpp
+++ b/applications/message_handler/test/message_client.cpp
@@ -34,7 +34,7 @@ MessageClient::MessageClient(EthosU::ethosu_core_queue &_inputMessageQueue,
bool MessageClient::sendInputMessage(const uint32_t type, const void *src, uint32_t length) {
if (!input.write(type, src, length)) {
- printf("ERROR: Msg: Failed to write ping request. No mailbox message sent\n");
+ printf("ERROR: Msg: Failed to write message request. No mailbox message sent\n");
return false;
}
@@ -44,7 +44,7 @@ bool MessageClient::sendInputMessage(const uint32_t type, const void *src, uint3
}
bool MessageClient::waitAndReadOutputMessage(const uint32_t expected_type, uint8_t *dst, uint32_t length) {
- constexpr TickType_t delay = pdMS_TO_TICKS(2);
+ constexpr TickType_t delay = pdMS_TO_TICKS(5);
constexpr TickType_t deadline = pdMS_TO_TICKS(/* 1 minute */ 60 * 1000 * 1000);
struct ethosu_core_msg msg;
@@ -68,7 +68,7 @@ bool MessageClient::waitAndReadOutputMessage(const uint32_t expected_type, uint8
}
if (msg.type != expected_type) {
- printf("ERROR: Wrong message type\n");
+ printf("ERROR: Wrong message type. Got %" PRIu32 " expected %" PRIu32 "\n", msg.type, expected_type);
return false;
}
diff --git a/targets/corstone-300/platform.ld b/targets/corstone-300/platform.ld
index 12cc6ee..d22b786 100644
--- a/targets/corstone-300/platform.ld
+++ b/targets/corstone-300/platform.ld
@@ -278,6 +278,10 @@ SECTIONS
*(network_model_sec)
#endif
* (expected_output_data_sec)
+ * (sec_command_stream, sec_weight_data, sec_input_data)
+
+ * (ethosu_core_in_queue)
+ * (ethosu_core_out_queue)
. = ALIGN(4);
} > DDR :rom_dram
diff --git a/targets/corstone-300/platform.scatter b/targets/corstone-300/platform.scatter
index 55e21b7..d683100 100644
--- a/targets/corstone-300/platform.scatter
+++ b/targets/corstone-300/platform.scatter
@@ -144,6 +144,25 @@ APP_IMAGE LR_START LR_SIZE
.ANY (+RO)
}
+ ; DTCM 512kB
+ ; Only accessible from the Cortex-M
+ DTCM DTCM_START (DTCM_SIZE - STACK_SIZE - HEAP_SIZE - __STACKSEAL_SIZE)
+ {
+ .ANY1 (+RW +ZI)
+ }
+
+ ARM_LIB_HEAP (STACK_HEAP - STACK_SIZE - __STACKSEAL_SIZE - HEAP_SIZE) EMPTY ALIGN 8 HEAP_SIZE {}
+ ARM_LIB_STACK (STACK_HEAP - STACK_SIZE - __STACKSEAL_SIZE) EMPTY ALIGN 8 STACK_SIZE {}
+
+#if defined(USE_TRUSTZONE) && defined(TRUSTZONE_SECURE)
+ STACKSEAL +0 EMPTY __STACKSEAL_SIZE {
+ ; Reserve empty region for stack seal immediately after stack
+ }
+#endif
+}
+
+LOAD_REGION_BRAM BRAM_START BRAM_SIZE
+{
#if defined(USE_TRUSTZONE) && defined(TRUSTZONE_SECURE)
; MPS3 BRAM
; Shared between Cortex-M and the NPU
@@ -163,14 +182,10 @@ APP_IMAGE LR_START LR_SIZE
* (.sram.data)
}
#endif
+}
- ; DTCM 512kB
- ; Only accessible from the Cortex-M
- DTCM DTCM_START (DTCM_SIZE - STACK_SIZE - HEAP_SIZE - __STACKSEAL_SIZE)
- {
- .ANY1 (+RW +ZI)
- }
-
+LOAD_REGION_SRAM SRAM_START SRAM_SIZE
+{
; 2MB SSE-300 SRAM (3 cycles read latency) from M55/U55
SRAM SRAM_START SRAM_SIZE
{
@@ -187,15 +202,6 @@ APP_IMAGE LR_START LR_SIZE
; Place scratch buffer in SRAM
* (.bss.ethosu_scratch)
}
-
- ARM_LIB_HEAP (STACK_HEAP - STACK_SIZE - __STACKSEAL_SIZE - HEAP_SIZE) EMPTY ALIGN 8 HEAP_SIZE {}
- ARM_LIB_STACK (STACK_HEAP - STACK_SIZE - __STACKSEAL_SIZE) EMPTY ALIGN 8 STACK_SIZE {}
-
-#if defined(USE_TRUSTZONE) && defined(TRUSTZONE_SECURE)
- STACKSEAL +0 EMPTY __STACKSEAL_SIZE {
- ; Reserve empty region for stack seal immediately after stack
- }
-#endif
}
LOAD_REGION_1 DDR_START DDR_SIZE
@@ -215,6 +221,9 @@ LOAD_REGION_1 DDR_START DDR_SIZE
* (expected_output_data_sec)
* (output_data_sec)
* (sec_command_stream, sec_weight_data, sec_input_data)
+
+ * (ethosu_core_in_queue)
+ * (ethosu_core_out_queue)
}
#if (ETHOSU_ARENA == 1)