diff --git a/arch/x86/core/x86_mmu.c b/arch/x86/core/x86_mmu.c index a8075259cab3a..9abdd4f89496a 100644 --- a/arch/x86/core/x86_mmu.c +++ b/arch/x86/core/x86_mmu.c @@ -937,11 +937,11 @@ static void apply_mem_partition(struct x86_page_tables *ptables, } __ASSERT(partition->start >= DT_PHYS_RAM_ADDR, - "region at %08lx[%u] extends below system ram start 0x%08x", + "region at %08lx[%zu] extends below system ram start 0x%08x", partition->start, partition->size, DT_PHYS_RAM_ADDR); __ASSERT(((partition->start + partition->size) <= (DT_PHYS_RAM_ADDR + (DT_RAM_SIZE * 1024U))), - "region at %08lx[%u] end at %08lx extends beyond system ram end 0x%08x", + "region at %08lx[%zu] end at %08lx extends beyond system ram end 0x%08x", partition->start, partition->size, partition->start + partition->size, (DT_PHYS_RAM_ADDR + (DT_RAM_SIZE * 1024U))); diff --git a/include/app_memory/app_memdomain.h b/include/app_memory/app_memdomain.h index c429c285e4cb4..40227d931ae42 100644 --- a/include/app_memory/app_memdomain.h +++ b/include/app_memory/app_memdomain.h @@ -110,8 +110,8 @@ struct z_app_region { extern char Z_APP_START(name)[]; \ extern char Z_APP_SIZE(name)[]; \ struct k_mem_partition name = { \ - .start = (u32_t) &Z_APP_START(name), \ - .size = (u32_t) &Z_APP_SIZE(name), \ + .start = (uintptr_t) &Z_APP_START(name), \ + .size = (size_t) &Z_APP_SIZE(name), \ .attr = K_MEM_PARTITION_P_RW_U_RW \ }; \ extern char Z_APP_BSS_START(name)[]; \ diff --git a/include/kernel.h b/include/kernel.h index e510e39d5d1a7..237d1856402cb 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -168,11 +168,11 @@ enum k_objects { /* Table generated by gperf, these objects are retrieved via * z_object_find() */ struct _k_object { - char *name; + void *name; u8_t perms[CONFIG_MAX_THREAD_BYTES]; u8_t type; u8_t flags; - u32_t data; + uintptr_t data; } __packed __aligned(4); struct _k_object_assignment { @@ -516,7 +516,7 @@ struct _thread_stack_info { * the size of the actual area, starting from the start member, * that should be writable by the thread */ - u32_t size; + size_t size; }; typedef struct _thread_stack_info _thread_stack_info_t; @@ -4973,7 +4973,7 @@ extern void z_timer_expiration_handler(struct _timeout *t); #define K_THREAD_STACK_LEN(size) ARCH_THREAD_STACK_LEN(size) #define K_THREAD_STACK_MEMBER(sym, size) ARCH_THREAD_STACK_MEMBER(sym, size) #define K_THREAD_STACK_SIZEOF(sym) ARCH_THREAD_STACK_SIZEOF(sym) -#define K_THREAD_STACK_RESERVED ARCH_THREAD_STACK_RESERVED +#define K_THREAD_STACK_RESERVED ((size_t)ARCH_THREAD_STACK_RESERVED) static inline char *Z_THREAD_STACK_BUFFER(k_thread_stack_t *sym) { return ARCH_THREAD_STACK_BUFFER(sym); @@ -5082,7 +5082,7 @@ static inline char *Z_THREAD_STACK_BUFFER(k_thread_stack_t *sym) * the stack object, and does not account for additional space used due to * enforce alignment. */ -#define K_THREAD_STACK_RESERVED 0 +#define K_THREAD_STACK_RESERVED ((size_t)0U) /** * @brief Get a pointer to the physical stack buffer @@ -5128,7 +5128,7 @@ struct k_mem_partition { /** start address of memory partition */ uintptr_t start; /** size of memory partition */ - u32_t size; + size_t size; #if defined(CONFIG_MEMORY_PROTECTION) /** attribute of memory partition */ k_mem_partition_attr_t attr; diff --git a/include/syscall_handler.h b/include/syscall_handler.h index cec1cd463a0c7..f2e17906b700a 100644 --- a/include/syscall_handler.h +++ b/include/syscall_handler.h @@ -316,8 +316,8 @@ extern int z_user_string_copy(char *dst, const char *src, size_t maxlen); #define Z_SYSCALL_MEMORY(ptr, size, write) \ Z_SYSCALL_VERIFY_MSG(arch_buffer_validate((void *)ptr, size, write) \ == 0, \ - "Memory region %p (size %u) %s access denied", \ - (void *)(ptr), (u32_t)(size), \ + "Memory region %p (size %zu) %s access denied", \ + (void *)(ptr), (size_t)(size), \ write ? "write" : "read") /** @@ -354,12 +354,12 @@ extern int z_user_string_copy(char *dst, const char *src, size_t maxlen); #define Z_SYSCALL_MEMORY_ARRAY(ptr, nmemb, size, write) \ ({ \ - u32_t product; \ - Z_SYSCALL_VERIFY_MSG(!u32_mul_overflow((u32_t)(nmemb), \ - (u32_t)(size), \ - &product), \ - "%ux%u array is too large", \ - (u32_t)(nmemb), (u32_t)(size)) || \ + size_t product; \ + Z_SYSCALL_VERIFY_MSG(!size_mul_overflow((size_t)(nmemb), \ + (size_t)(size), \ + &product), \ + "%zux%zu array is too large", \ + (size_t)(nmemb), (size_t)(size)) || \ Z_SYSCALL_MEMORY(ptr, product, write); \ }) diff --git a/kernel/mem_domain.c b/kernel/mem_domain.c index 4eb24d7719cb5..8534ae71ca37f 100644 --- a/kernel/mem_domain.c +++ b/kernel/mem_domain.c @@ -101,7 +101,7 @@ void k_mem_domain_init(struct k_mem_domain *domain, u8_t num_parts, __ASSERT(parts[i] != NULL, ""); __ASSERT((parts[i]->start + parts[i]->size) > parts[i]->start, - "invalid partition %p size %d", + "invalid partition %p size %zu", parts[i], parts[i]->size); #if defined(CONFIG_EXECUTE_XOR_WRITE) || \ @@ -151,7 +151,7 @@ void k_mem_domain_add_partition(struct k_mem_domain *domain, __ASSERT(domain != NULL, ""); __ASSERT(part != NULL, ""); __ASSERT((part->start + part->size) > part->start, - "invalid partition %p size %d", part, part->size); + "invalid partition %p size %zu", part, part->size); #if defined(CONFIG_EXECUTE_XOR_WRITE) || \ defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS) diff --git a/kernel/thread.c b/kernel/thread.c index 920d3e5b69ca5..ef50f137bcb41 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -599,7 +599,7 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread, void *p1, void *p2, void *p3, int prio, u32_t options, s32_t delay) { - u32_t total_size; + size_t total_size; struct _k_object *stack_object; /* The thread and stack objects *must* be in an uninitialized state */ @@ -613,17 +613,17 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread, /* Verify that the stack size passed in is OK by computing the total * size and comparing it with the size value in the object metadata */ - Z_OOPS(Z_SYSCALL_VERIFY_MSG(!u32_add_overflow(K_THREAD_STACK_RESERVED, - stack_size, &total_size), - "stack size overflow (%u+%u)", - (unsigned int) stack_size, + Z_OOPS(Z_SYSCALL_VERIFY_MSG(!size_add_overflow(K_THREAD_STACK_RESERVED, + stack_size, &total_size), + "stack size overflow (%zu+%zu)", + stack_size, K_THREAD_STACK_RESERVED)); /* Testing less-than-or-equal since additional room may have been * allocated for alignment constraints */ Z_OOPS(Z_SYSCALL_VERIFY_MSG(total_size <= stack_object->data, - "stack size %u is too big, max is %u", + "stack size %zu is too big, max is %lu", total_size, stack_object->data)); /* User threads may only create other user threads and they can't diff --git a/kernel/userspace.c b/kernel/userspace.c index 36e52af6b0bdf..31dc2d223bddd 100644 --- a/kernel/userspace.c +++ b/kernel/userspace.c @@ -186,7 +186,7 @@ static struct dyn_obj *dyn_object_find(void *obj) * * @return true if successful, false if failed **/ -static bool thread_idx_alloc(u32_t *tidx) +static bool thread_idx_alloc(uintptr_t *tidx) { int i; int idx; @@ -225,7 +225,7 @@ static bool thread_idx_alloc(u32_t *tidx) * * @param tidx The thread index to be freed **/ -static void thread_idx_free(u32_t tidx) +static void thread_idx_free(uintptr_t tidx) { /* To prevent leaked permission when index is recycled */ z_object_wordlist_foreach(clear_perms_cb, (void *)tidx); @@ -236,7 +236,7 @@ static void thread_idx_free(u32_t tidx) void *z_impl_k_object_alloc(enum k_objects otype) { struct dyn_obj *dyn_obj; - u32_t tidx; + uintptr_t tidx; /* Stacks are not supported, we don't yet have mem pool APIs * to request memory that is aligned @@ -353,7 +353,7 @@ static int thread_index_get(struct k_thread *t) return ko->data; } -static void unref_check(struct _k_object *ko, int index) +static void unref_check(struct _k_object *ko, uintptr_t index) { k_spinlock_key_t key = k_spin_lock(&obj_lock); @@ -445,14 +445,14 @@ void z_thread_perms_clear(struct _k_object *ko, struct k_thread *thread) static void clear_perms_cb(struct _k_object *ko, void *ctx_ptr) { - int id = (int)ctx_ptr; + uintptr_t id = (uintptr_t)ctx_ptr; unref_check(ko, id); } void z_thread_perms_all_clear(struct k_thread *thread) { - int index = thread_index_get(thread); + uintptr_t index = thread_index_get(thread); if (index != -1) { z_object_wordlist_foreach(clear_perms_cb, (void *)index); diff --git a/lib/os/mutex.c b/lib/os/mutex.c index 4f9811c4aa60a..84f207074ffa6 100644 --- a/lib/os/mutex.c +++ b/lib/os/mutex.c @@ -21,7 +21,7 @@ static struct k_mutex *get_k_mutex(struct sys_mutex *mutex) return (struct k_mutex *)obj->data; } -static bool check_sys_mutex_addr(u32_t addr) +static bool check_sys_mutex_addr(struct sys_mutex *addr) { /* sys_mutex memory is never touched, just used to lookup the * underlying k_mutex, but we don't want threads using mutexes @@ -44,7 +44,7 @@ int z_impl_z_sys_mutex_kernel_lock(struct sys_mutex *mutex, s32_t timeout) static inline int z_vrfy_z_sys_mutex_kernel_lock(struct sys_mutex *mutex, s32_t timeout) { - if (check_sys_mutex_addr((u32_t) mutex)) { + if (check_sys_mutex_addr(mutex)) { return -EACCES; } @@ -70,7 +70,7 @@ int z_impl_z_sys_mutex_kernel_unlock(struct sys_mutex *mutex) static inline int z_vrfy_z_sys_mutex_kernel_unlock(struct sys_mutex *mutex) { - if (check_sys_mutex_addr((u32_t) mutex)) { + if (check_sys_mutex_addr(mutex)) { return -EACCES; } diff --git a/samples/userspace/shared_mem/src/main.c b/samples/userspace/shared_mem/src/main.c index 4b96b128cf07e..43ff7c1c34149 100644 --- a/samples/userspace/shared_mem/src/main.c +++ b/samples/userspace/shared_mem/src/main.c @@ -116,7 +116,7 @@ void main(void) K_FOREVER); k_thread_access_grant(tENC, &allforone); /* use K_FOREVER followed by k_thread_start*/ - printk("ENC Thread Created %08X\n", (unsigned int) tENC); + printk("ENC Thread Created %p\n", tENC); k_mem_domain_init(&dom1, 3, dom1_parts); printk("Partitions added to dom1\n"); k_mem_domain_add_thread(&dom1, tENC); @@ -128,7 +128,7 @@ void main(void) -1, K_USER, K_FOREVER); k_thread_access_grant(tPT, &allforone); - printk("PT Thread Created %08X\n", (unsigned int) tPT); + printk("PT Thread Created %p\n", tPT); k_mem_domain_init(&dom0, 2, dom0_parts); k_mem_domain_add_thread(&dom0, tPT); printk("dom0 Created\n"); @@ -138,7 +138,7 @@ void main(void) -1, K_USER, K_FOREVER); k_thread_access_grant(tCT, &allforone); - printk("CT Thread Created %08X\n", (unsigned int) tCT); + printk("CT Thread Created %p\n", tCT); k_mem_domain_init(&dom2, 2, dom2_parts); k_mem_domain_add_thread(&dom2, tCT); printk("dom2 Created\n"); diff --git a/scripts/elf_helper.py b/scripts/elf_helper.py index ad01d13687dd3..44b53162e2ea3 100644 --- a/scripts/elf_helper.py +++ b/scripts/elf_helper.py @@ -71,10 +71,10 @@ def __init__(self, type_obj, addr): self.data = thread_counter thread_counter = thread_counter + 1 elif self.type_obj.name == "sys_mutex": - self.data = "(u32_t)(&kernel_mutexes[%d])" % sys_mutex_counter + self.data = "(uintptr_t)(&kernel_mutexes[%d])" % sys_mutex_counter sys_mutex_counter += 1 elif self.type_obj.name == "k_futex": - self.data = "(u32_t)(&futex_data[%d])" % futex_counter + self.data = "(uintptr_t)(&futex_data[%d])" % futex_counter futex_counter += 1 else: self.data = 0 @@ -353,6 +353,19 @@ def analyze_typedef(die): type_env[die.offset] = type_env[type_offset] +def unpack_pointer(elf, data, offset): + endian_code = "<" if elf.little_endian else ">" + if elf.elfclass == 32: + size_code = "I" + size = 4 + else: + size_code = "Q" + size = 8 + + return struct.unpack(endian_code + size_code, + data[offset:offset + size])[0] + + def addr_deref(elf, addr): for section in elf.iter_sections(): start = section['sh_addr'] @@ -361,14 +374,15 @@ def addr_deref(elf, addr): if start <= addr < end: data = section.data() offset = addr - start - return struct.unpack("I", - data[offset:offset + 4])[0] + return unpack_pointer(elf, data, offset) return 0 def device_get_api_addr(elf, addr): - return addr_deref(elf, addr + 4) + # Read device->driver API + offset = 4 if elf.elfclass == 32 else 8 + return addr_deref(elf, addr + offset) def get_filename_lineno(die): diff --git a/scripts/gen_kobject_list.py b/scripts/gen_kobject_list.py index 1e2cdd2be250f..e891472130e11 100755 --- a/scripts/gen_kobject_list.py +++ b/scripts/gen_kobject_list.py @@ -200,7 +200,17 @@ def write_gperf_table(fp, eh, objs, static_begin, static_end): initialized = static_begin <= obj_addr < static_end is_driver = obj_type.startswith("K_OBJ_DRIVER_") - byte_str = struct.pack("I", obj_addr) + if "CONFIG_64BIT" in syms: + format_code = "Q" + else: + format_code = "I" + + if eh.little_endian: + endian = "<" + else: + endian = ">" + + byte_str = struct.pack(endian + format_code, obj_addr) fp.write("\"") for byte in byte_str: val = "\\x%02x" % byte diff --git a/scripts/process_gperf.py b/scripts/process_gperf.py index 921f0ac9bf2d9..ac46fc7726c6e 100755 --- a/scripts/process_gperf.py +++ b/scripts/process_gperf.py @@ -49,8 +49,8 @@ def reformat_str(match_obj): # Nip quotes addr_str = addr_str[1:-1] - addr_vals = [0, 0, 0, 0] - ctr = 3 + addr_vals = [0, 0, 0, 0, 0, 0, 0 , 0] + ctr = 7 i = 0 while True: @@ -74,7 +74,7 @@ def reformat_str(match_obj): ctr -= 1 - return "(char *)0x%02x%02x%02x%02x" % tuple(addr_vals) + return "(char *)0x%02x%02x%02x%02x%02x%02x%02x%02x" % tuple(addr_vals) def process_line(line, fp): @@ -97,9 +97,9 @@ def process_line(line, fp): warn("gperf %s is not tested, versions %s through %s supported" % (v, v_lo, v_hi)) - # Replace length lookups with constant len of 4 since we're always + # Replace length lookups with constant len since we're always # looking at pointers - line = re.sub(r'lengthtable\[key\]', r'4', line) + line = re.sub(r'lengthtable\[key\]', r'sizeof(void *)', line) # Empty wordlist entries to have NULLs instead of "" line = re.sub(r'[{]["]["][}]', r'{}', line) diff --git a/tests/kernel/mem_protect/mem_protect/src/kobject.c b/tests/kernel/mem_protect/mem_protect/src/kobject.c index e34e15dd27879..4db34bb9f97a0 100644 --- a/tests/kernel/mem_protect/mem_protect/src/kobject.c +++ b/tests/kernel/mem_protect/mem_protect/src/kobject.c @@ -129,7 +129,7 @@ void test_thread_without_kobject_permission(void *p1, void *p2, void *p3) void kobject_user_test4(void *p1, void *p2, void *p3) { /* should cause a fault */ - if ((u32_t)p1 == 1U) { + if ((uintptr_t)p1 == 1U) { valid_fault = false; } else { valid_fault = true; diff --git a/tests/kernel/mem_protect/mem_protect/src/mem_domain.c b/tests/kernel/mem_protect/mem_protect/src/mem_domain.c index 8b93c3dbf349f..ee44d6bfef62e 100644 --- a/tests/kernel/mem_protect/mem_protect/src/mem_domain.c +++ b/tests/kernel/mem_protect/mem_protect/src/mem_domain.c @@ -91,7 +91,7 @@ static inline void set_valid_fault_value(int test_case_number) /* Userspace function */ void mem_domain_for_user(void *tc_number, void *p2, void *p3) { - set_valid_fault_value((u32_t)tc_number); + set_valid_fault_value((int)tc_number); mem_domain_buf[0] = 10U; if (valid_fault == false) { @@ -104,7 +104,7 @@ void mem_domain_for_user(void *tc_number, void *p2, void *p3) void mem_domain_test_1(void *tc_number, void *p2, void *p3) { - if ((u32_t)tc_number == 1U) { + if ((uintptr_t)tc_number == 1U) { mem_domain_buf[0] = 10U; k_mem_domain_remove_thread(k_current_get()); k_mem_domain_add_thread(&mem_domain_mem_domain, @@ -125,7 +125,7 @@ void mem_domain_test_1(void *tc_number, void *p2, void *p3) */ void test_mem_domain_valid_access(void *p1, void *p2, void *p3) { - u32_t tc_number = 1U; + uintptr_t tc_number = 1U; mem_domain_init(); @@ -151,7 +151,7 @@ void test_mem_domain_valid_access(void *p1, void *p2, void *p3) */ void test_mem_domain_invalid_access(void *p1, void *p2, void *p3) { - u32_t tc_number = 2U; + uintptr_t tc_number = 2U; k_thread_create(&mem_domain_2_tid, mem_domain_2_stack, @@ -325,16 +325,17 @@ struct k_mem_domain mem_domain_tc3_mem_domain; void mem_domain_for_user_tc3(void *max_partitions, void *p2, void *p3) { - u32_t index; + uintptr_t index; valid_fault = true; USERSPACE_BARRIER; /* fault should be hit on the first index itself. */ for (index = 0U; - (index < (u32_t)max_partitions) && (index < 8); + (index < (uintptr_t)max_partitions) && (index < 8); index++) { - *(u32_t *)mem_domain_tc3_partition_array[index]->start = 10U; + *(uintptr_t *)mem_domain_tc3_partition_array[index]->start = + 10U; } zassert_unreachable(ERROR_STR); @@ -387,7 +388,7 @@ void test_mem_domain_add_partitions_invalid(void *p1, void *p2, void *p3) k_current_get()); k_thread_user_mode_enter(mem_domain_for_user_tc3, - (void *)(u32_t)max_partitions, + (void *)(uintptr_t)max_partitions, NULL, NULL); @@ -396,13 +397,14 @@ void test_mem_domain_add_partitions_invalid(void *p1, void *p2, void *p3) /****************************************************************************/ void mem_domain_for_user_tc4(void *max_partitions, void *p2, void *p3) { - u32_t index; + uintptr_t index; valid_fault = false; USERSPACE_BARRIER; - for (index = 0U; (index < (u32_t)p2) && (index < 8); index++) { - *(u32_t *)mem_domain_tc3_partition_array[index]->start = 10U; + for (index = 0U; (index < (uintptr_t)p2) && (index < 8); index++) { + *(uintptr_t *)mem_domain_tc3_partition_array[index]->start = + 10U; } ztest_test_pass(); @@ -439,7 +441,7 @@ void test_mem_domain_add_partitions_simple(void *p1, void *p2, void *p3) k_current_get()); k_thread_user_mode_enter(mem_domain_for_user_tc4, - (void *)(u32_t)max_partitions, + (void *)(uintptr_t)max_partitions, NULL, NULL); @@ -521,7 +523,7 @@ void test_mem_domain_remove_partitions(void *p1, void *p2, void *p3) MEM_DOMAIN_STACK_SIZE, mem_domain_test_6_1, NULL, NULL, NULL, - 10, K_USER | K_INHERIT_PERMS, K_NO_WAIT); + -1, K_USER | K_INHERIT_PERMS, K_NO_WAIT); k_sem_take(&sync_sem, K_MSEC(100)); @@ -534,7 +536,7 @@ void test_mem_domain_remove_partitions(void *p1, void *p2, void *p3) MEM_DOMAIN_STACK_SIZE, mem_domain_test_6_2, NULL, NULL, NULL, - 10, K_USER | K_INHERIT_PERMS, K_NO_WAIT); + -1, K_USER | K_INHERIT_PERMS, K_NO_WAIT); k_sem_take(&sync_sem, SYNC_SEM_TIMEOUT); diff --git a/tests/kernel/mem_protect/userspace/src/main.c b/tests/kernel/mem_protect/userspace/src/main.c index 7d7ba606112e6..7f4e5cec0dae6 100644 --- a/tests/kernel/mem_protect/userspace/src/main.c +++ b/tests/kernel/mem_protect/userspace/src/main.c @@ -114,11 +114,18 @@ static void write_control(void) expect_fault = true; expected_reason = K_ERR_CPU_EXCEPTION; BARRIER(); +#ifdef CONFIG_X86_64 + __asm__ volatile ( + "movq $0xFFFFFFFF, %rax;\n\t" + "movq %rax, %cr0;\n\t" + ); +#else __asm__ volatile ( "mov %cr0, %eax;\n\t" "and $0xfffeffff, %eax;\n\t" "mov %eax, %cr0;\n\t" ); +#endif zassert_unreachable("Write to control register did not fault"); #elif defined(CONFIG_ARM) unsigned int msr_value; @@ -162,11 +169,19 @@ static void disable_mmu_mpu(void) expect_fault = true; expected_reason = K_ERR_CPU_EXCEPTION; BARRIER(); +#ifdef CONFIG_X86_64 + __asm__ volatile ( + "movq %cr0, %rax;\n\t" + "andq $0x7ffeffff, %rax;\n\t" + "movq %rax, %cr0;\n\t" + ); +#else __asm__ volatile ( "mov %cr0, %eax;\n\t" "and $0x7ffeffff, %eax;\n\t" "mov %eax, %cr0;\n\t" ); +#endif #elif defined(CONFIG_ARM) expect_fault = true; expected_reason = K_ERR_CPU_EXCEPTION; @@ -292,14 +307,8 @@ static void write_kernel_data(void) /* * volatile to avoid compiler mischief. */ -K_APP_DMEM(part0) volatile int *priv_stack_ptr; -#if defined(CONFIG_X86) -/* - * We can't inline this in the code or make it static - * or local without triggering a warning on -Warray-bounds. - */ -K_APP_DMEM(part0) size_t size = MMU_PAGE_SIZE; -#elif defined(CONFIG_ARC) +K_APP_DMEM(part0) volatile char *priv_stack_ptr; +#if defined(CONFIG_ARC) K_APP_DMEM(part0) s32_t size = (0 - CONFIG_PRIVILEGED_STACK_SIZE - STACK_GUARD_SIZE); #endif @@ -312,13 +321,12 @@ K_APP_DMEM(part0) s32_t size = (0 - CONFIG_PRIVILEGED_STACK_SIZE - static void read_priv_stack(void) { /* Try to read from privileged stack. */ -#if defined(CONFIG_X86) || defined(CONFIG_ARC) +#if defined(CONFIG_ARC) int s[1]; s[0] = 0; - priv_stack_ptr = &s[0]; - priv_stack_ptr = (int *)((unsigned char *)priv_stack_ptr - size); -#elif defined(CONFIG_ARM) + priv_stack_ptr = (char *)&s[0] - size; +#elif defined(CONFIG_ARM) || defined(CONFIG_X86) /* priv_stack_ptr set by test_main() */ #else #error "Not implemented for this architecture" @@ -326,7 +334,7 @@ static void read_priv_stack(void) expect_fault = true; expected_reason = K_ERR_CPU_EXCEPTION; BARRIER(); - printk("%d\n", *priv_stack_ptr); + printk("%c\n", *priv_stack_ptr); zassert_unreachable("Read from privileged stack did not fault"); } @@ -338,13 +346,12 @@ static void read_priv_stack(void) static void write_priv_stack(void) { /* Try to write to privileged stack. */ -#if defined(CONFIG_X86) || defined(CONFIG_ARC) +#if defined(CONFIG_ARC) int s[1]; s[0] = 0; - priv_stack_ptr = &s[0]; - priv_stack_ptr = (int *)((unsigned char *)priv_stack_ptr - size); -#elif defined(CONFIG_ARM) + priv_stack_ptr = (char *)&s[0] - size; +#elif defined(CONFIG_ARM) || defined(CONFIG_X86) /* priv_stack_ptr set by test_main() */ #else #error "Not implemented for this architecture" @@ -654,8 +661,8 @@ static void access_other_memdomain(void) #if defined(CONFIG_ARM) extern u8_t *z_priv_stack_find(void *obj); -extern k_thread_stack_t ztest_thread_stack[]; #endif +extern k_thread_stack_t ztest_thread_stack[]; struct k_mem_domain add_thread_drop_dom; struct k_mem_domain add_part_drop_dom; @@ -676,6 +683,8 @@ static void user_half(void *arg1, void *arg2, void *arg3) if (!expect_fault) { ztest_test_pass(); } else { + printk("Expecting a fatal error %d but succeeded instead\n", + expected_reason); ztest_test_fail(); } } @@ -784,6 +793,8 @@ static void spawn_user(void) k_sem_take(&uthread_end_sem, K_FOREVER); if (expect_fault) { + printk("Expecting a fatal error %d but succeeded instead\n", + expected_reason); ztest_test_fail(); } } @@ -892,18 +903,19 @@ struct foo { struct foo stest_member_stack; -void z_impl_stack_info_get(u32_t *start_addr, u32_t *size) +void z_impl_stack_info_get(char **start_addr, size_t *size) { - *start_addr = k_current_get()->stack_info.start; + *start_addr = (char *)k_current_get()->stack_info.start; *size = k_current_get()->stack_info.size; } -static inline void z_vrfy_stack_info_get(u32_t *start_addr, u32_t *size) +static inline void z_vrfy_stack_info_get(char **start_addr, + size_t *size) { - Z_OOPS(Z_SYSCALL_MEMORY_WRITE(start_addr, sizeof(u32_t))); - Z_OOPS(Z_SYSCALL_MEMORY_WRITE(size, sizeof(u32_t))); + Z_OOPS(Z_SYSCALL_MEMORY_WRITE(start_addr, sizeof(uintptr_t))); + Z_OOPS(Z_SYSCALL_MEMORY_WRITE(size, sizeof(size_t))); - z_impl_stack_info_get((u32_t *)start_addr, (u32_t *)size); + z_impl_stack_info_get(start_addr, size); } #include @@ -927,10 +939,8 @@ void stack_buffer_scenarios(k_thread_stack_t *stack_obj, size_t obj_size) expect_fault = false; - /* Dump interesting information */ - - stack_info_get((u32_t *)&stack_start, (u32_t *)&stack_size); + stack_info_get(&stack_start, &stack_size); printk(" - Thread reports buffer %p size %zu\n", stack_start, stack_size); @@ -987,9 +997,11 @@ void stack_buffer_scenarios(k_thread_stack_t *stack_obj, size_t obj_size) * and not the buffer. */ zassert_true(check_perms(obj_start - 1, 1, 0), - "user mode access to memory before start of stack object"); + "user mode access to memory %p before start of stack object", + obj_start - 1); zassert_true(check_perms(obj_end, 1, 0), - "user mode access past end of stack object"); + "user mode access to memory %p past end of stack object", + obj_end); } @@ -1001,7 +1013,8 @@ void stack_buffer_scenarios(k_thread_stack_t *stack_obj, size_t obj_size) if (arch_is_user_context()) { zassert_true(stack_size <= obj_size - K_THREAD_STACK_RESERVED, - "bad stack size in thread struct"); + "bad stack size %zu in thread struct", + stack_size); } @@ -1048,7 +1061,7 @@ void scenario_entry(void *stack_obj, size_t obj_size) void test_stack_buffer(void) { - printk("Reserved space: %u\n", K_THREAD_STACK_RESERVED); + printk("Reserved space: %zu\n", K_THREAD_STACK_RESERVED); printk("Provided stack size: %u\n", STEST_STACKSIZE); scenario_entry(stest_stack, sizeof(stest_stack)); @@ -1149,8 +1162,13 @@ void test_main(void) k_mem_domain_add_thread(&dom0, k_current_get()); #if defined(CONFIG_ARM) - priv_stack_ptr = (int *)z_priv_stack_find(ztest_thread_stack); + priv_stack_ptr = (char *)z_priv_stack_find(ztest_thread_stack); +#elif defined(CONFIG_X86) + struct z_x86_thread_stack_header *hdr; + hdr = ((struct z_x86_thread_stack_header *)ztest_thread_stack); + priv_stack_ptr = (((char *)&hdr->privilege_stack) + + (sizeof(hdr->privilege_stack) - 1)); #endif k_thread_access_grant(k_current_get(), &kthread_thread, &kthread_stack, diff --git a/tests/kernel/mem_protect/userspace/src/test_syscall.h b/tests/kernel/mem_protect/userspace/src/test_syscall.h index 8e7debac21e1a..3fccea83b14a7 100644 --- a/tests/kernel/mem_protect/userspace/src/test_syscall.h +++ b/tests/kernel/mem_protect/userspace/src/test_syscall.h @@ -7,7 +7,7 @@ #ifndef USERSPACE_TEST_SYSCALL_H #define USERSPACE_TEST_SYSCALL_H -__syscall void stack_info_get(u32_t *start_addr, u32_t *size); +__syscall void stack_info_get(char **start_addr, size_t *size); __syscall int check_perms(void *addr, size_t size, int write); __syscall void missing_syscall(void); diff --git a/tests/kernel/msgq/msgq_api/src/main.c b/tests/kernel/msgq/msgq_api/src/main.c index c005094a42fa9..8e98d34549076 100644 --- a/tests/kernel/msgq/msgq_api/src/main.c +++ b/tests/kernel/msgq/msgq_api/src/main.c @@ -43,7 +43,12 @@ dummy_test(test_msgq_user_attrs_get); dummy_test(test_msgq_user_purge_when_put); #endif /* CONFIG_USERSPACE */ -K_MEM_POOL_DEFINE(test_pool, 128, 128, 2, 4); +#ifdef CONFIG_64BIT +#define MAX_SZ 256 +#else +#define MAX_SZ 128 +#endif +K_MEM_POOL_DEFINE(test_pool, 128, MAX_SZ, 2, 4); extern struct k_msgq kmsgq; extern struct k_msgq msgq; diff --git a/tests/kernel/pipe/pipe_api/src/test_pipe_contexts.c b/tests/kernel/pipe/pipe_api/src/test_pipe_contexts.c index 04798a81f9535..dbc3ba4e34bbb 100644 --- a/tests/kernel/pipe/pipe_api/src/test_pipe_contexts.c +++ b/tests/kernel/pipe/pipe_api/src/test_pipe_contexts.c @@ -32,7 +32,12 @@ K_SEM_DEFINE(end_sema, 0, 1); * to allocate the pipe object, one for its buffer. Both should be auto- * released when the thread exits */ -K_MEM_POOL_DEFINE(test_pool, 128, 128, 4, 4); +#ifdef CONFIG_64BIT +#define SZ 256 +#else +#define SZ 128 +#endif +K_MEM_POOL_DEFINE(test_pool, SZ, SZ, 4, 4); static void tpipe_put(struct k_pipe *ppipe, int timeout) { @@ -337,9 +342,9 @@ void test_pipe_alloc(void) zassert_false(k_pipe_alloc_init(&pipe_test_alloc, 0), NULL); k_pipe_cleanup(&pipe_test_alloc); - ret = k_pipe_alloc_init(&pipe_test_alloc, PIPE_LEN * 8); + ret = k_pipe_alloc_init(&pipe_test_alloc, 1024); zassert_true(ret == -ENOMEM, - "resource pool is smaller then requested buffer"); + "resource pool max block size is not smaller then requested buffer"); } /** diff --git a/tests/kernel/poll/src/main.c b/tests/kernel/poll/src/main.c index fc94f30f8a771..8da1d01306721 100644 --- a/tests/kernel/poll/src/main.c +++ b/tests/kernel/poll/src/main.c @@ -13,7 +13,13 @@ extern void test_poll_multi(void); extern void test_poll_threadstate(void); extern void test_poll_grant_access(void); -K_MEM_POOL_DEFINE(test_pool, 128, 128, 4, 4); +#ifdef CONFIG_64BIT +#define MAX_SZ 256 +#else +#define MAX_SZ 128 +#endif + +K_MEM_POOL_DEFINE(test_pool, 128, MAX_SZ, 4, 4); /*test case main entry*/ void test_main(void) diff --git a/tests/kernel/queue/src/main.c b/tests/kernel/queue/src/main.c index 29604c78019a0..d967b13273248 100644 --- a/tests/kernel/queue/src/main.c +++ b/tests/kernel/queue/src/main.c @@ -28,7 +28,13 @@ static void test_queue_alloc_append_user(void) ztest_test_skip(); } #endif -K_MEM_POOL_DEFINE(test_pool, 16, 96, 4, 4); + +#ifdef CONFIG_64BIT +#define MAX_SZ 128 +#else +#define MAX_SZ 96 +#endif +K_MEM_POOL_DEFINE(test_pool, 16, MAX_SZ, 4, 4); /*test case main entry*/ void test_main(void)