|
27 | 27 | #include <string.h>
|
28 | 28 | #include <stdbool.h>
|
29 | 29 | #include <irq_offload.h>
|
| 30 | +#include <sys/check.h> |
30 | 31 |
|
31 | 32 | static struct k_spinlock lock;
|
32 | 33 |
|
@@ -882,3 +883,90 @@ void irq_offload(irq_offload_routine_t routine, void *parameter)
|
882 | 883 | k_sem_give(&offload_sem);
|
883 | 884 | }
|
884 | 885 | #endif
|
| 886 | + |
| 887 | +#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO) |
| 888 | +#ifdef CONFIG_STACK_GROWS_UP |
| 889 | +#error "Unsupported configuration for stack analysis" |
| 890 | +#endif |
| 891 | + |
| 892 | +int z_impl_k_thread_stack_space_get(const struct k_thread *thread, |
| 893 | + size_t *unused_ptr) |
| 894 | +{ |
| 895 | + const u8_t *start = (u8_t *)thread->stack_info.start; |
| 896 | + size_t size = thread->stack_info.size; |
| 897 | + size_t unused = 0; |
| 898 | + const u8_t *checked_stack = start; |
| 899 | + /* Take the address of any local variable as a shallow bound for the |
| 900 | + * stack pointer. Addresses above it are guaranteed to be |
| 901 | + * accessible. |
| 902 | + */ |
| 903 | + const u8_t *stack_pointer = (const u8_t *)&start; |
| 904 | + |
| 905 | + /* If we are currently running on the stack being analyzed, some |
| 906 | + * memory management hardware will generate an exception if we |
| 907 | + * read unused stack memory. |
| 908 | + * |
| 909 | + * This never happens when invoked from user mode, as user mode |
| 910 | + * will always run this function on the privilege elevation stack. |
| 911 | + */ |
| 912 | + if ((stack_pointer > start) && (stack_pointer <= (start + size)) && |
| 913 | + IS_ENABLED(CONFIG_NO_UNUSED_STACK_INSPECTION)) { |
| 914 | + /* TODO: We could add an arch_ API call to temporarily |
| 915 | + * disable the stack checking in the CPU, but this would |
| 916 | + * need to be properly managed wrt context switches/interrupts |
| 917 | + */ |
| 918 | + return -ENOTSUP; |
| 919 | + } |
| 920 | + |
| 921 | + if (IS_ENABLED(CONFIG_STACK_SENTINEL)) { |
| 922 | + /* First 4 bytes of the stack buffer reserved for the |
| 923 | + * sentinel value, it won't be 0xAAAAAAAA for thread |
| 924 | + * stacks. |
| 925 | + * |
| 926 | + * FIXME: thread->stack_info.start ought to reflect |
| 927 | + * this! |
| 928 | + */ |
| 929 | + checked_stack += 4; |
| 930 | + size -= 4; |
| 931 | + } |
| 932 | + |
| 933 | + for (size_t i = 0; i < size; i++) { |
| 934 | + if ((checked_stack[i]) == 0xaaU) { |
| 935 | + unused++; |
| 936 | + } else { |
| 937 | + break; |
| 938 | + } |
| 939 | + } |
| 940 | + |
| 941 | + *unused_ptr = unused; |
| 942 | + |
| 943 | + return 0; |
| 944 | +} |
| 945 | + |
| 946 | +#ifdef CONFIG_USERSPACE |
| 947 | +int z_vrfy_k_thread_stack_space_get(const struct k_thread *thread, |
| 948 | + size_t *unused_ptr) |
| 949 | +{ |
| 950 | + size_t unused; |
| 951 | + int ret; |
| 952 | + |
| 953 | + ret = Z_SYSCALL_OBJ(thread, K_OBJ_THREAD); |
| 954 | + CHECKIF(ret != 0) { |
| 955 | + return ret; |
| 956 | + } |
| 957 | + |
| 958 | + ret = z_impl_k_thread_stack_space_get(thread, &unused); |
| 959 | + CHECKIF(ret != 0) { |
| 960 | + return ret; |
| 961 | + } |
| 962 | + |
| 963 | + ret = z_user_to_copy(unused_ptr, &unused, sizeof(size_t)); |
| 964 | + CHECKIF(ret != 0) { |
| 965 | + return ret; |
| 966 | + } |
| 967 | + |
| 968 | + return 0; |
| 969 | +} |
| 970 | +#include <syscalls/k_thread_stack_space_get_mrsh.c> |
| 971 | +#endif /* CONFIG_USERSPACE */ |
| 972 | +#endif /* CONFIG_INIT_STACKS && CONFIG_THREAD_STACK_INFO */ |
0 commit comments