diff --git a/modules/hal_ethos_u/Kconfig b/modules/hal_ethos_u/Kconfig index 2c442de51a61a..4ca5136cf023a 100644 --- a/modules/hal_ethos_u/Kconfig +++ b/modules/hal_ethos_u/Kconfig @@ -26,6 +26,16 @@ config ARM_ETHOS_U65_256 bool "using Ethos-U65 with 256 macs" config ARM_ETHOS_U65_512 bool "using Ethos-U65 with 512 macs" +config ARM_ETHOS_U85_128 + bool "using Ethos-U85 with 128 macs" +config ARM_ETHOS_U85_256 + bool "using Ethos-U85 with 256 macs" +config ARM_ETHOS_U85_512 + bool "using Ethos-U85 with 512 macs" +config ARM_ETHOS_U85_1024 + bool "using Ethos-U85 with 1024 macs" +config ARM_ETHOS_U85_2048 + bool "using Ethos-U85 with 2048 macs" endchoice endmenu @@ -37,6 +47,11 @@ config ARM_ETHOS_U_NPU_NAME default "ethos-u65-128" if ARM_ETHOS_U65_128 default "ethos-u65-256" if ARM_ETHOS_U65_256 default "ethos-u65-512" if ARM_ETHOS_U65_512 + default "ethos-u85-128" if ARM_ETHOS_U85_128 + default "ethos-u85-256" if ARM_ETHOS_U85_256 + default "ethos-u85-512" if ARM_ETHOS_U85_512 + default "ethos-u85-1024" if ARM_ETHOS_U85_1024 + default "ethos-u85-2048" if ARM_ETHOS_U85_2048 help Name of the used Arm NPU diff --git a/samples/modules/tflite-micro/hello_world/README.rst b/samples/modules/tflite-micro/hello_world/README.rst index 7a1d89cf83aad..12ac4f93a8313 100644 --- a/samples/modules/tflite-micro/hello_world/README.rst +++ b/samples/modules/tflite-micro/hello_world/README.rst @@ -65,7 +65,7 @@ the :envvar:`PATH` variable, then building and testing can be done with followin commands. ``` -$ west build -p auto -b mps3/corstone300/an547 samples/modules/tflite-micro/hello_world/ -T sample.tensorflow.helloworld.cmsis_nn +$ west build -p auto -b mps3/corstone300/fvp samples/modules/tflite-micro/hello_world/ -T sample.tensorflow.helloworld.cmsis_nn $ FVP_Corstone_SSE-300_Ethos-U55 build/zephyr/zephyr.elf ``` diff --git a/samples/modules/tflite-micro/hello_world/sample.yaml b/samples/modules/tflite-micro/hello_world/sample.yaml index b0f087f1c44de..e5521bfed2dad 100644 --- a/samples/modules/tflite-micro/hello_world/sample.yaml +++ b/samples/modules/tflite-micro/hello_world/sample.yaml @@ -24,6 +24,6 @@ tests: sample.tensorflow.helloworld.cmsis_nn: tags: tensorflow platform_allow: - - mps3/corstone300/an547 + - mps3/corstone300/fvp extra_configs: - CONFIG_TENSORFLOW_LITE_MICRO_CMSIS_NN_KERNELS=y diff --git a/samples/modules/tflite-micro/tflm_ethosu/README.rst b/samples/modules/tflite-micro/tflm_ethosu/README.rst index 0e8d7279dfd1b..bf27c6279960e 100644 --- a/samples/modules/tflite-micro/tflm_ethosu/README.rst +++ b/samples/modules/tflite-micro/tflm_ethosu/README.rst @@ -43,5 +43,5 @@ commands. .. code-block:: bash - $ west build -b mps3/corstone300/an547 zephyr/samples/modules/tflite-micro/tflm_ethosu + $ west build -b mps3/corstone300/fvp zephyr/samples/modules/tflite-micro/tflm_ethosu $ FVP_Corstone_SSE-300_Ethos-U55 build/zephyr/zephyr.elf diff --git a/samples/modules/tflite-micro/tflm_ethosu/sample.yaml b/samples/modules/tflite-micro/tflm_ethosu/sample.yaml index 37d97a87d346d..86a7542fd6b80 100644 --- a/samples/modules/tflite-micro/tflm_ethosu/sample.yaml +++ b/samples/modules/tflite-micro/tflm_ethosu/sample.yaml @@ -10,4 +10,4 @@ tests: filter: dt_compat_enabled("arm,ethos-u") build_only: true integration_platforms: - - mps3/corstone300/an547 + - mps3/corstone300/fvp diff --git a/samples/modules/tflite-micro/tflm_ethosu/src/inference_process.cpp b/samples/modules/tflite-micro/tflm_ethosu/src/inference_process.cpp index 5807588302e10..352c8308f738a 100644 --- a/samples/modules/tflite-micro/tflm_ethosu/src/inference_process.cpp +++ b/samples/modules/tflite-micro/tflm_ethosu/src/inference_process.cpp @@ -28,7 +28,7 @@ bool copyOutput(const TfLiteTensor &src, InferenceProcess::DataPtr &dst) } if (src.bytes > dst.size) { - printk("Tensor size mismatch (bytes): actual=%d, expected%d.\n", src.bytes, + printf("Tensor size mismatch (bytes): actual=%d, expected%d.\n", src.bytes, dst.size); return true; } @@ -112,7 +112,7 @@ bool InferenceProcess::runJob(InferenceJob &job) /* Get model handle and verify that the version is correct */ const tflite::Model *model = ::tflite::GetModel(job.networkModel.data); if (model->version() != TFLITE_SCHEMA_VERSION) { - printk("Model schema version unsupported: version=%" PRIu32 ", supported=%d.\n", + printf("Model schema version unsupported: version=%" PRIu32 ", supported=%d.\n", model->version(), TFLITE_SCHEMA_VERSION); return true; } @@ -126,12 +126,12 @@ bool InferenceProcess::runJob(InferenceJob &job) /* Allocate tensors */ TfLiteStatus allocate_status = interpreter.AllocateTensors(); if (allocate_status != kTfLiteOk) { - printk("Failed to allocate tensors for inference. job=%p\n", &job); + printf("Failed to allocate tensors for inference. job=%p\n", &job); return true; } if (job.input.size() != interpreter.inputs_size()) { - printk("Number of job and network inputs do not match. input=%zu, network=%zu\n", + printf("Number of job and network inputs do not match. input=%zu, network=%zu\n", job.input.size(), interpreter.inputs_size()); return true; } @@ -142,7 +142,7 @@ bool InferenceProcess::runJob(InferenceJob &job) const TfLiteTensor *tensor = interpreter.input(i); if (input.size != tensor->bytes) { - printk("Input tensor size mismatch. index=%zu, input=%zu, network=%u\n", i, + printf("Input tensor size mismatch. index=%zu, input=%zu, network=%u\n", i, input.size, tensor->bytes); return true; } @@ -154,14 +154,14 @@ bool InferenceProcess::runJob(InferenceJob &job) /* Run the inference */ TfLiteStatus invoke_status = interpreter.Invoke(); if (invoke_status != kTfLiteOk) { - printk("Invoke failed for inference. job=%s\n", job.name.c_str()); + printf("Invoke failed for inference. job=%s\n", job.name.c_str()); return true; } /* Copy output data */ if (job.output.size() > 0) { if (interpreter.outputs_size() != job.output.size()) { - printk("Number of job and network outputs do not match. job=%zu, network=%u\n", + printf("Number of job and network outputs do not match. job=%zu, network=%u\n", job.output.size(), interpreter.outputs_size()); return true; } @@ -175,7 +175,7 @@ bool InferenceProcess::runJob(InferenceJob &job) if (job.expectedOutput.size() > 0) { if (job.expectedOutput.size() != interpreter.outputs_size()) { - printk("Number of job and network expected outputs do not match. job=%zu, network=%zu\n", + printf("Number of job and network expected outputs do not match. job=%zu, network=%zu\n", job.expectedOutput.size(), interpreter.outputs_size()); return true; } @@ -185,7 +185,7 @@ bool InferenceProcess::runJob(InferenceJob &job) const TfLiteTensor *output = interpreter.output(i); if (expected.size != output->bytes) { - printk("Expected output tensor size mismatch. index=%u, expected=%zu, network=%zu\n", + printf("Expected output tensor size mismatch. index=%u, expected=%zu, network=%zu\n", i, expected.size, output->bytes); return true; } @@ -193,7 +193,7 @@ bool InferenceProcess::runJob(InferenceJob &job) for (unsigned int j = 0; j < output->bytes; ++j) { if (output->data.uint8[j] != static_cast(expected.data)[j]) { - printk("Expected output tensor data mismatch. index=%u, offset=%u, expected=%02x, network=%02x\n", + printf("Expected output tensor data mismatch. index=%u, offset=%u, expected=%02x, network=%02x\n", i, j, static_cast(expected.data)[j], output->data.uint8[j]); return true; diff --git a/samples/modules/tflite-micro/tflm_ethosu/src/main.cpp b/samples/modules/tflite-micro/tflm_ethosu/src/main.cpp index 5671b0a071854..f951fb7b91fd5 100644 --- a/samples/modules/tflite-micro/tflm_ethosu/src/main.cpp +++ b/samples/modules/tflite-micro/tflm_ethosu/src/main.cpp @@ -108,7 +108,7 @@ void *allocateHeap(const size_t size) uint8_t *buf = static_cast(k_malloc(size)); if ((buf == nullptr) || (heap == nullptr)) { - printk("Heap allocation failed. heap=%p, buf=%p, size=%zu\n", heap, buf, size); + printf("Heap allocation failed. heap=%p, buf=%p, size=%zu\n", heap, buf, size); exit(1); } @@ -133,17 +133,17 @@ void inferenceProcessTask(void *_name, void *heap, void *_params) xInferenceJob *job = static_cast(k_queue_get(params->queueHandle, Z_FOREVER)); - printk("%s: Received inference job. job=%p\n", name->c_str(), job); + printf("%s: Received inference job. job=%p\n", name->c_str(), job); /* Run inference */ job->status = inferenceProcess.runJob(*job); - printk("%s: Sending inference response. job=%p\n", name->c_str(), job); + printf("%s: Sending inference response. job=%p\n", name->c_str(), job); /* Return inference message */ int ret = k_queue_alloc_append(job->responseQueue, job); if (0 != ret) { - printk("%s: Failed to send message\n", name->c_str()); + printf("%s: Failed to send message\n", name->c_str()); exit(1); } } @@ -177,13 +177,13 @@ void inferenceSenderTask(void *_name, void *heap, void *_queue) { DataPtr(expectedOutputData, sizeof(expectedOutputData)) }, &senderQueue); - printk("%s: Sending inference. job=%p, name=%s\n", name->c_str(), &job, + printf("%s: Sending inference. job=%p, name=%s\n", name->c_str(), &job, job.name.c_str()); /* Queue job */ ret = k_queue_alloc_append(inferenceQueue, &job); if (0 != ret) { - printk("%s: Failed to send message\n", name->c_str()); + printf("%s: Failed to send message\n", name->c_str()); exit(1); } } @@ -193,7 +193,7 @@ void inferenceSenderTask(void *_name, void *heap, void *_queue) xInferenceJob *job = static_cast(k_queue_get(&senderQueue, Z_FOREVER)); - printk("%s: Received job response. job=%p, status=%u\n", name->c_str(), job, + printf("%s: Received job response. job=%p, status=%u\n", name->c_str(), job, job->status); totalCompletedJobs++; @@ -229,7 +229,7 @@ int main() const size_t stackSize = 2048; k_thread_stack_t *stack = static_cast(k_malloc(stackSize)); if (stack == nullptr) { - printk("Failed to allocate stack to 'inferenceSenderTask%i'\n", n); + printf("Failed to allocate stack to 'inferenceSenderTask%i'\n", n); exit(1); } @@ -239,7 +239,7 @@ int main() thread.id = k_thread_create(&thread.thread, stack, stackSize, inferenceSenderTask, name, heapPtr, &inferenceQueue, 3, 0, K_FOREVER); if (thread.id == 0) { - printk("Failed to create 'inferenceSenderTask%i'\n", n); + printf("Failed to create 'inferenceSenderTask%i'\n", n); exit(1); } @@ -252,7 +252,7 @@ int main() const size_t stackSize = 8192; k_thread_stack_t *stack = static_cast(k_malloc(stackSize)); if (stack == nullptr) { - printk("Failed to allocate stack to 'inferenceSenderTask%i'\n", n); + printf("Failed to allocate stack to 'inferenceSenderTask%i'\n", n); exit(1); } @@ -265,7 +265,7 @@ int main() thread.id = k_thread_create(&thread.thread, stack, stackSize, inferenceProcessTask, name, heapPtr, &taskParam, 2, 0, K_FOREVER); if (thread.id == 0) { - printk("Failed to create 'inferenceProcessTask%i'\n", n); + printf("Failed to create 'inferenceProcessTask%i'\n", n); exit(1); } @@ -283,7 +283,7 @@ int main() /* Safety belt */ k_thread_suspend(k_current_get()); - printk("Zephyr application failed to initialise \n"); + printf("Zephyr application failed to initialise \n"); return 1; } diff --git a/west.yml b/west.yml index e3c82b4482152..8eaeb0d9792f0 100644 --- a/west.yml +++ b/west.yml @@ -163,7 +163,7 @@ manifest: groups: - hal - name: hal_ethos_u - revision: 8e2cf756b474eff9a32a9bdf1775d9620f1eadcf + revision: 50ddffca1cc700112f25ad9bc077915a0355ee5d path: modules/hal/ethos_u groups: - hal