diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 3f5ef8543b49ee..fa00c3722e6be3 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -262,7 +262,7 @@ config ARC_CURRENT_THREAD_USE_NO_TLS RGF_NUM_BANKS the parameter is disabled by-default because banks syncronization requires significant time, and it slows down performance. ARCMWDT works with tls pointer in different way then GCC. Optimized access to - TLS pointer via _current variable does not provide significant advantages + TLS pointer via arch_current_thread() does not provide significant advantages in case of MetaWare. config GEN_ISR_TABLES diff --git a/arch/arc/core/fault.c b/arch/arc/core/fault.c index 6f9da3cd1e0e95..a6c8410e63357c 100644 --- a/arch/arc/core/fault.c +++ b/arch/arc/core/fault.c @@ -55,7 +55,7 @@ static bool z_check_thread_stack_fail(const uint32_t fault_addr, uint32_t sp) { #if defined(CONFIG_MULTITHREADING) uint32_t guard_end, guard_start; - const struct k_thread *thread = _current; + const struct k_thread *thread = arch_current_thread(); if (!thread) { /* TODO: Under what circumstances could we get here ? */ diff --git a/arch/arc/core/irq_offload.c b/arch/arc/core/irq_offload.c index d1a3f900ca3f0b..f24a3e7dd8a5b2 100644 --- a/arch/arc/core/irq_offload.c +++ b/arch/arc/core/irq_offload.c @@ -49,8 +49,8 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter) __asm__ volatile("sync"); - /* If _current was aborted in the offload routine, we shouldn't be here */ - __ASSERT_NO_MSG((_current->base.thread_state & _THREAD_DEAD) == 0); + /* If arch_current_thread() was aborted in the offload routine, we shouldn't be here */ + __ASSERT_NO_MSG((arch_current_thread()->base.thread_state & _THREAD_DEAD) == 0); } /* need to be executed on every core in the system */ diff --git a/arch/arc/core/thread.c b/arch/arc/core/thread.c index 4b1d836103eedb..cb5352bc47547d 100644 --- a/arch/arc/core/thread.c +++ b/arch/arc/core/thread.c @@ -210,7 +210,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, #ifdef CONFIG_MULTITHREADING void *z_arch_get_next_switch_handle(struct k_thread **old_thread) { - *old_thread = _current; + *old_thread = arch_current_thread(); return z_get_next_switch_handle(NULL); } @@ -227,16 +227,16 @@ void *z_arch_get_next_switch_handle(struct k_thread **old_thread) FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3) { - setup_stack_vars(_current); + setup_stack_vars(arch_current_thread()); /* possible optimizaiton: no need to load mem domain anymore */ /* need to lock cpu here ? */ - configure_mpu_thread(_current); + configure_mpu_thread(arch_current_thread()); z_arc_userspace_enter(user_entry, p1, p2, p3, - (uint32_t)_current->stack_info.start, - (_current->stack_info.size - - _current->stack_info.delta), _current); + (uint32_t)arch_current_thread()->stack_info.start, + (arch_current_thread()->stack_info.size - + arch_current_thread()->stack_info.delta), arch_current_thread()); CODE_UNREACHABLE; } #endif @@ -336,7 +336,7 @@ int arc_vpx_lock(k_timeout_t timeout) id = _current_cpu->id; #if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK) - __ASSERT(!arch_is_in_isr() && (_current->base.cpu_mask == BIT(id)), ""); + __ASSERT(!arch_is_in_isr() && (arch_current_thread()->base.cpu_mask == BIT(id)), ""); #endif k_spin_unlock(&lock, key); @@ -355,7 +355,7 @@ void arc_vpx_unlock(void) key = k_spin_lock(&lock); #if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK) - __ASSERT(!arch_is_in_isr() && (_current->base.cpu_mask == BIT(id)), ""); + __ASSERT(!arch_is_in_isr() && (arch_current_thread()->base.cpu_mask == BIT(id)), ""); #endif id = _current_cpu->id; k_spin_unlock(&lock, key); diff --git a/arch/arc/core/tls.c b/arch/arc/core/tls.c index 3cf7d45cab9135..9585b228926c2f 100644 --- a/arch/arc/core/tls.c +++ b/arch/arc/core/tls.c @@ -29,7 +29,7 @@ size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr) void *_Preserve_flags _mwget_tls(void) { - return (void *)(_current->tls); + return (void *)(arch_current_thread()->tls); } #else diff --git a/arch/arm/core/cortex_a_r/fault.c b/arch/arm/core/cortex_a_r/fault.c index daf1d2345ca06f..5e3d38a66b45d6 100644 --- a/arch/arm/core/cortex_a_r/fault.c +++ b/arch/arm/core/cortex_a_r/fault.c @@ -178,7 +178,7 @@ bool z_arm_fault_undef_instruction_fp(void) * context because it is about to be overwritten. */ if (((_current_cpu->nested == 2) - && (_current->base.user_options & K_FP_REGS)) + && (arch_current_thread()->base.user_options & K_FP_REGS)) || ((_current_cpu->nested > 2) && (spill_esf->undefined & FPEXC_EN))) { /* @@ -196,7 +196,7 @@ bool z_arm_fault_undef_instruction_fp(void) * means that a thread that uses the VFP does not have to, * but should, set K_FP_REGS on thread creation. */ - _current->base.user_options |= K_FP_REGS; + arch_current_thread()->base.user_options |= K_FP_REGS; } return false; diff --git a/arch/arm/core/cortex_a_r/swap.c b/arch/arm/core/cortex_a_r/swap.c index 2f7faba741a93d..cf123e8ed932a8 100644 --- a/arch/arm/core/cortex_a_r/swap.c +++ b/arch/arm/core/cortex_a_r/swap.c @@ -17,8 +17,8 @@ int arch_swap(unsigned int key) { /* store off key and return value */ - _current->arch.basepri = key; - _current->arch.swap_return_value = -EAGAIN; + arch_current_thread()->arch.basepri = key; + arch_current_thread()->arch.swap_return_value = -EAGAIN; z_arm_cortex_r_svc(); irq_unlock(key); @@ -26,5 +26,5 @@ int arch_swap(unsigned int key) /* Context switch is performed here. Returning implies the * thread has been context-switched-in again. */ - return _current->arch.swap_return_value; + return arch_current_thread()->arch.swap_return_value; } diff --git a/arch/arm/core/cortex_a_r/swap_helper.S b/arch/arm/core/cortex_a_r/swap_helper.S index a41e1ab5942fe1..36dd9a96548066 100644 --- a/arch/arm/core/cortex_a_r/swap_helper.S +++ b/arch/arm/core/cortex_a_r/swap_helper.S @@ -70,7 +70,7 @@ SECTION_FUNC(TEXT, z_arm_do_swap) #if defined(CONFIG_FPU_SHARING) ldrb r0, [r2, #_thread_offset_to_user_options] - tst r0, #K_FP_REGS /* _current->base.user_options & K_FP_REGS */ + tst r0, #K_FP_REGS /* arch_current_thread()->base.user_options & K_FP_REGS */ beq out_fp_inactive mov ip, #FPEXC_EN @@ -152,7 +152,7 @@ out_fp_inactive: #if defined(CONFIG_FPU_SHARING) ldrb r0, [r2, #_thread_offset_to_user_options] - tst r0, #K_FP_REGS /* _current->base.user_options & K_FP_REGS */ + tst r0, #K_FP_REGS /* arch_current_thread()->base.user_options & K_FP_REGS */ beq in_fp_inactive mov r3, #FPEXC_EN diff --git a/arch/arm/core/cortex_a_r/thread.c b/arch/arm/core/cortex_a_r/thread.c index b3bd91ce5c11c8..43be2d5069022a 100644 --- a/arch/arm/core/cortex_a_r/thread.c +++ b/arch/arm/core/cortex_a_r/thread.c @@ -198,8 +198,8 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, { /* Set up privileged stack before entering user mode */ - _current->arch.priv_stack_start = - (uint32_t)z_priv_stack_find(_current->stack_obj); + arch_current_thread()->arch.priv_stack_start = + (uint32_t)z_priv_stack_find(arch_current_thread()->stack_obj); #if defined(CONFIG_MPU_STACK_GUARD) #if defined(CONFIG_THREAD_STACK_INFO) /* We're dropping to user mode which means the guard area is no @@ -208,13 +208,13 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, * which accounted for memory borrowed from the thread stack. */ #if FP_GUARD_EXTRA_SIZE > 0 - if ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) { - _current->stack_info.start -= FP_GUARD_EXTRA_SIZE; - _current->stack_info.size += FP_GUARD_EXTRA_SIZE; + if ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) { + arch_current_thread()->stack_info.start -= FP_GUARD_EXTRA_SIZE; + arch_current_thread()->stack_info.size += FP_GUARD_EXTRA_SIZE; } #endif /* FP_GUARD_EXTRA_SIZE */ - _current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE; - _current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE; + arch_current_thread()->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE; + arch_current_thread()->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE; #endif /* CONFIG_THREAD_STACK_INFO */ /* Stack guard area reserved at the bottom of the thread's @@ -222,23 +222,23 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, * buffer area accordingly. */ #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) - _current->arch.priv_stack_start += - ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ? + arch_current_thread()->arch.priv_stack_start += + ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ? MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE; #else - _current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE; + arch_current_thread()->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE; #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */ #endif /* CONFIG_MPU_STACK_GUARD */ #if defined(CONFIG_CPU_AARCH32_CORTEX_R) - _current->arch.priv_stack_end = - _current->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE; + arch_current_thread()->arch.priv_stack_end = + arch_current_thread()->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE; #endif z_arm_userspace_enter(user_entry, p1, p2, p3, - (uint32_t)_current->stack_info.start, - _current->stack_info.size - - _current->stack_info.delta); + (uint32_t)arch_current_thread()->stack_info.start, + arch_current_thread()->stack_info.size - + arch_current_thread()->stack_info.delta); CODE_UNREACHABLE; } @@ -304,7 +304,7 @@ EXPORT_SYMBOL(z_arm_thread_is_in_user_mode); uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp) { #if defined(CONFIG_MULTITHREADING) - const struct k_thread *thread = _current; + const struct k_thread *thread = arch_current_thread(); if (thread == NULL) { return 0; @@ -314,7 +314,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp #if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \ defined(CONFIG_MPU_STACK_GUARD) uint32_t guard_len = - ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ? + ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ? MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE; #else /* If MPU_STACK_GUARD is not enabled, the guard length is @@ -377,7 +377,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) int arch_float_disable(struct k_thread *thread) { - if (thread != _current) { + if (thread != arch_current_thread()) { return -EINVAL; } diff --git a/arch/arm/core/cortex_m/swap.c b/arch/arm/core/cortex_m/swap.c index 9a597ef219d62e..72eade765596f7 100644 --- a/arch/arm/core/cortex_m/swap.c +++ b/arch/arm/core/cortex_m/swap.c @@ -33,8 +33,8 @@ int arch_swap(unsigned int key) { /* store off key and return value */ - _current->arch.basepri = key; - _current->arch.swap_return_value = -EAGAIN; + arch_current_thread()->arch.basepri = key; + arch_current_thread()->arch.swap_return_value = -EAGAIN; /* set pending bit to make sure we will take a PendSV exception */ SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk; @@ -45,5 +45,5 @@ int arch_swap(unsigned int key) /* Context switch is performed here. Returning implies the * thread has been context-switched-in again. */ - return _current->arch.swap_return_value; + return arch_current_thread()->arch.swap_return_value; } diff --git a/arch/arm/core/cortex_m/swap_helper.S b/arch/arm/core/cortex_m/swap_helper.S index c6207084b5ea64..23a49cb87ef541 100644 --- a/arch/arm/core/cortex_m/swap_helper.S +++ b/arch/arm/core/cortex_m/swap_helper.S @@ -288,7 +288,7 @@ in_fp_endif: #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE) /* Re-program dynamic memory map */ push {r2,lr} - mov r0, r2 /* _current thread */ + mov r0, r2 /* arch_current_thread() thread */ bl z_arm_configure_dynamic_mpu_regions pop {r2,lr} #endif diff --git a/arch/arm/core/cortex_m/thread.c b/arch/arm/core/cortex_m/thread.c index 6cd7144e79d17b..4013b636811572 100644 --- a/arch/arm/core/cortex_m/thread.c +++ b/arch/arm/core/cortex_m/thread.c @@ -231,8 +231,8 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, { /* Set up privileged stack before entering user mode */ - _current->arch.priv_stack_start = - (uint32_t)z_priv_stack_find(_current->stack_obj); + arch_current_thread()->arch.priv_stack_start = + (uint32_t)z_priv_stack_find(arch_current_thread()->stack_obj); #if defined(CONFIG_MPU_STACK_GUARD) #if defined(CONFIG_THREAD_STACK_INFO) /* We're dropping to user mode which means the guard area is no @@ -241,13 +241,13 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, * which accounted for memory borrowed from the thread stack. */ #if FP_GUARD_EXTRA_SIZE > 0 - if ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) { - _current->stack_info.start -= FP_GUARD_EXTRA_SIZE; - _current->stack_info.size += FP_GUARD_EXTRA_SIZE; + if ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) { + arch_current_thread()->stack_info.start -= FP_GUARD_EXTRA_SIZE; + arch_current_thread()->stack_info.size += FP_GUARD_EXTRA_SIZE; } #endif /* FP_GUARD_EXTRA_SIZE */ - _current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE; - _current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE; + arch_current_thread()->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE; + arch_current_thread()->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE; #endif /* CONFIG_THREAD_STACK_INFO */ /* Stack guard area reserved at the bottom of the thread's @@ -255,18 +255,18 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, * buffer area accordingly. */ #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) - _current->arch.priv_stack_start += - ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ? + arch_current_thread()->arch.priv_stack_start += + ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ? MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE; #else - _current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE; + arch_current_thread()->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE; #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */ #endif /* CONFIG_MPU_STACK_GUARD */ z_arm_userspace_enter(user_entry, p1, p2, p3, - (uint32_t)_current->stack_info.start, - _current->stack_info.size - - _current->stack_info.delta); + (uint32_t)arch_current_thread()->stack_info.start, + arch_current_thread()->stack_info.size - + arch_current_thread()->stack_info.delta); CODE_UNREACHABLE; } @@ -379,7 +379,7 @@ void configure_builtin_stack_guard(struct k_thread *thread) uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp) { #if defined(CONFIG_MULTITHREADING) - const struct k_thread *thread = _current; + const struct k_thread *thread = arch_current_thread(); if (thread == NULL) { return 0; @@ -389,7 +389,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp #if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \ defined(CONFIG_MPU_STACK_GUARD) uint32_t guard_len = - ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ? + ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ? MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE; #else /* If MPU_STACK_GUARD is not enabled, the guard length is @@ -452,7 +452,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) int arch_float_disable(struct k_thread *thread) { - if (thread != _current) { + if (thread != arch_current_thread()) { return -EINVAL; } @@ -522,7 +522,7 @@ void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr, { z_arm_prepare_switch_to_main(); - _current = main_thread; + arch_current_thread_set(main_thread); #if defined(CONFIG_THREAD_LOCAL_STORAGE) /* On Cortex-M, TLS uses a global variable as pointer to diff --git a/arch/arm/core/cortex_m/thread_abort.c b/arch/arm/core/cortex_m/thread_abort.c index 99af8671107050..235adeab0b9eb1 100644 --- a/arch/arm/core/cortex_m/thread_abort.c +++ b/arch/arm/core/cortex_m/thread_abort.c @@ -27,7 +27,7 @@ void z_impl_k_thread_abort(k_tid_t thread) { SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread); - if (_current == thread) { + if (arch_current_thread() == thread) { if (arch_is_in_isr()) { /* ARM is unlike most arches in that this is true * even for non-peripheral interrupts, even though diff --git a/arch/arm64/core/cortex_r/arm_mpu.c b/arch/arm64/core/cortex_r/arm_mpu.c index 2bd6d265c9a9e6..5bc9ecbf3e5aca 100644 --- a/arch/arm64/core/cortex_r/arm_mpu.c +++ b/arch/arm64/core/cortex_r/arm_mpu.c @@ -727,7 +727,7 @@ static int configure_dynamic_mpu_regions(struct k_thread *thread) */ thread->arch.region_num = (uint8_t)region_num; - if (thread == _current) { + if (thread == arch_current_thread()) { ret = flush_dynamic_regions_to_mpu(dyn_regions, region_num); } @@ -795,7 +795,7 @@ int arch_mem_domain_thread_add(struct k_thread *thread) ret = configure_dynamic_mpu_regions(thread); #ifdef CONFIG_SMP - if (ret == 0 && thread != _current) { + if (ret == 0 && thread != arch_current_thread()) { /* the thread could be running on another CPU right now */ z_arm64_mem_cfg_ipi(); } @@ -810,7 +810,7 @@ int arch_mem_domain_thread_remove(struct k_thread *thread) ret = configure_dynamic_mpu_regions(thread); #ifdef CONFIG_SMP - if (ret == 0 && thread != _current) { + if (ret == 0 && thread != arch_current_thread()) { /* the thread could be running on another CPU right now */ z_arm64_mem_cfg_ipi(); } diff --git a/arch/arm64/core/fatal.c b/arch/arm64/core/fatal.c index 7955b6f7d6d16b..0e793ea18bbe35 100644 --- a/arch/arm64/core/fatal.c +++ b/arch/arm64/core/fatal.c @@ -306,8 +306,9 @@ static bool z_arm64_stack_corruption_check(struct arch_esf *esf, uint64_t esr, u } } #ifdef CONFIG_USERSPACE - else if ((_current->base.user_options & K_USER) != 0 && GET_ESR_EC(esr) == 0x24) { - sp_limit = (uint64_t)_current->stack_info.start; + else if ((arch_current_thread()->base.user_options & K_USER) != 0 && + GET_ESR_EC(esr) == 0x24) { + sp_limit = (uint64_t)arch_current_thread()->stack_info.start; guard_start = sp_limit - Z_ARM64_STACK_GUARD_SIZE; sp = esf->sp; if (sp <= sp_limit || (guard_start <= far && far <= sp_limit)) { @@ -434,7 +435,7 @@ void z_arm64_do_kernel_oops(struct arch_esf *esf) * User mode is only allowed to induce oopses and stack check * failures via software-triggered system fatal exceptions. */ - if (((_current->base.user_options & K_USER) != 0) && + if (((arch_current_thread()->base.user_options & K_USER) != 0) && reason != K_ERR_STACK_CHK_FAIL) { reason = K_ERR_KERNEL_OOPS; } diff --git a/arch/arm64/core/fpu.c b/arch/arm64/core/fpu.c index a585165b943397..00abd59632a616 100644 --- a/arch/arm64/core/fpu.c +++ b/arch/arm64/core/fpu.c @@ -36,7 +36,7 @@ static void DBG(char *msg, struct k_thread *th) strcpy(buf, "CPU# exc# "); buf[3] = '0' + _current_cpu->id; buf[8] = '0' + arch_exception_depth(); - strcat(buf, _current->name); + strcat(buf, arch_current_thread()->name); strcat(buf, ": "); strcat(buf, msg); strcat(buf, " "); @@ -125,7 +125,7 @@ static void flush_owned_fpu(struct k_thread *thread) * replace it, and this avoids a deadlock where * two CPUs want to pull each other's FPU context. */ - if (thread == _current) { + if (thread == arch_current_thread()) { arch_flush_local_fpu(); while (atomic_ptr_get(&_kernel.cpus[i].arch.fpu_owner) == thread) { barrier_dsync_fence_full(); @@ -260,15 +260,15 @@ void z_arm64_fpu_trap(struct arch_esf *esf) * Make sure the FPU context we need isn't live on another CPU. * The current CPU's FPU context is NULL at this point. */ - flush_owned_fpu(_current); + flush_owned_fpu(arch_current_thread()); #endif /* become new owner */ - atomic_ptr_set(&_current_cpu->arch.fpu_owner, _current); + atomic_ptr_set(&_current_cpu->arch.fpu_owner, arch_current_thread()); /* restore our content */ - z_arm64_fpu_restore(&_current->arch.saved_fp_context); - DBG("restore", _current); + z_arm64_fpu_restore(&arch_current_thread()->arch.saved_fp_context); + DBG("restore", arch_current_thread()); } /* @@ -287,7 +287,7 @@ static void fpu_access_update(unsigned int exc_update_level) if (arch_exception_depth() == exc_update_level) { /* We're about to execute non-exception code */ - if (atomic_ptr_get(&_current_cpu->arch.fpu_owner) == _current) { + if (atomic_ptr_get(&_current_cpu->arch.fpu_owner) == arch_current_thread()) { /* turn on FPU access */ write_cpacr_el1(cpacr | CPACR_EL1_FPEN_NOTRAP); } else { diff --git a/arch/arm64/core/mmu.c b/arch/arm64/core/mmu.c index a914916d605e7b..ef199b2e7ab4da 100644 --- a/arch/arm64/core/mmu.c +++ b/arch/arm64/core/mmu.c @@ -1309,7 +1309,7 @@ int arch_mem_domain_thread_add(struct k_thread *thread) } thread->arch.ptables = domain_ptables; - if (thread == _current) { + if (thread == arch_current_thread()) { z_arm64_swap_ptables(thread); } else { #ifdef CONFIG_SMP diff --git a/arch/arm64/core/smp.c b/arch/arm64/core/smp.c index fd9d457ea7df55..e1c3f64dbb95f1 100644 --- a/arch/arm64/core/smp.c +++ b/arch/arm64/core/smp.c @@ -240,7 +240,7 @@ void mem_cfg_ipi_handler(const void *unused) * This is a no-op if the page table is already the right one. * Lock irq to prevent the interrupt during mem region switch. */ - z_arm64_swap_mem_domains(_current); + z_arm64_swap_mem_domains(arch_current_thread()); arch_irq_unlock(key); } diff --git a/arch/arm64/core/thread.c b/arch/arm64/core/thread.c index 18f49945eda495..f51e203555039e 100644 --- a/arch/arm64/core/thread.c +++ b/arch/arm64/core/thread.c @@ -159,15 +159,15 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, uint64_t tmpreg; /* Map the thread stack */ - z_arm64_thread_mem_domains_init(_current); + z_arm64_thread_mem_domains_init(arch_current_thread()); /* Top of the user stack area */ - stack_el0 = Z_STACK_PTR_ALIGN(_current->stack_info.start + - _current->stack_info.size - - _current->stack_info.delta); + stack_el0 = Z_STACK_PTR_ALIGN(arch_current_thread()->stack_info.start + + arch_current_thread()->stack_info.size - + arch_current_thread()->stack_info.delta); /* Top of the privileged non-user-accessible part of the stack */ - stack_el1 = (uintptr_t)(_current->stack_obj + ARCH_THREAD_STACK_RESERVED); + stack_el1 = (uintptr_t)(arch_current_thread()->stack_obj + ARCH_THREAD_STACK_RESERVED); register void *x0 __asm__("x0") = user_entry; register void *x1 __asm__("x1") = p1; diff --git a/arch/posix/core/swap.c b/arch/posix/core/swap.c index d4ec5e50b5ea68..18d83cf78d6b6d 100644 --- a/arch/posix/core/swap.c +++ b/arch/posix/core/swap.c @@ -23,7 +23,7 @@ int arch_swap(unsigned int key) { /* - * struct k_thread * _current is the currently running thread + * struct k_thread * arch_current_thread() is the currently running thread * struct k_thread * _kernel.ready_q.cache contains the next thread to * run (cannot be NULL) * @@ -34,8 +34,8 @@ int arch_swap(unsigned int key) #if CONFIG_INSTRUMENT_THREAD_SWITCHING z_thread_mark_switched_out(); #endif - _current->callee_saved.key = key; - _current->callee_saved.retval = -EAGAIN; + arch_current_thread()->callee_saved.key = key; + arch_current_thread()->callee_saved.retval = -EAGAIN; /* retval may be modified with a call to * arch_thread_return_value_set() @@ -47,10 +47,10 @@ int arch_swap(unsigned int key) posix_thread_status_t *this_thread_ptr = (posix_thread_status_t *) - _current->callee_saved.thread_status; + arch_current_thread()->callee_saved.thread_status; - _current = _kernel.ready_q.cache; + arch_current_thread_set(_kernel.ready_q.cache); #if CONFIG_INSTRUMENT_THREAD_SWITCHING z_thread_mark_switched_in(); #endif @@ -66,9 +66,9 @@ int arch_swap(unsigned int key) /* When we continue, _kernel->current points back to this thread */ - irq_unlock(_current->callee_saved.key); + irq_unlock(arch_current_thread()->callee_saved.key); - return _current->callee_saved.retval; + return arch_current_thread()->callee_saved.retval; } @@ -94,7 +94,7 @@ void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr, z_thread_mark_switched_out(); #endif - _current = _kernel.ready_q.cache; + arch_current_thread_set(_kernel.ready_q.cache); #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING z_thread_mark_switched_in(); diff --git a/arch/posix/core/thread.c b/arch/posix/core/thread.c index 4e443e2283b4f1..4965f08f6aac90 100644 --- a/arch/posix/core/thread.c +++ b/arch/posix/core/thread.c @@ -112,7 +112,7 @@ void z_impl_k_thread_abort(k_tid_t thread) key = irq_lock(); - if (_current == thread) { + if (arch_current_thread() == thread) { if (tstatus->aborted == 0) { /* LCOV_EXCL_BR_LINE */ tstatus->aborted = 1; } else { diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index f62bb34d147742..399fc8a2ecf94d 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -35,7 +35,7 @@ config RISCV_CURRENT_VIA_GP select ARCH_HAS_CUSTOM_CURRENT_IMPL help Store the current thread's pointer into the global pointer (GP) register. - When is enabled, calls to `_current` & `k_sched_current_thread_query()` will + When is enabled, calls to `arch_current_thread()` & `k_sched_current_thread_query()` will be reduced to a single register read. config RISCV_ALWAYS_SWITCH_THROUGH_ECALL diff --git a/arch/riscv/core/fatal.c b/arch/riscv/core/fatal.c index 879ffab9a80953..8e0e55c8e8047d 100644 --- a/arch/riscv/core/fatal.c +++ b/arch/riscv/core/fatal.c @@ -158,23 +158,23 @@ static bool bad_stack_pointer(struct arch_esf *esf) uintptr_t sp = (uintptr_t)esf + sizeof(struct arch_esf); #ifdef CONFIG_USERSPACE - if (_current->arch.priv_stack_start != 0 && - sp >= _current->arch.priv_stack_start && - sp < _current->arch.priv_stack_start + Z_RISCV_STACK_GUARD_SIZE) { + if (arch_current_thread()->arch.priv_stack_start != 0 && + sp >= arch_current_thread()->arch.priv_stack_start && + sp < arch_current_thread()->arch.priv_stack_start + Z_RISCV_STACK_GUARD_SIZE) { return true; } - if (z_stack_is_user_capable(_current->stack_obj) && - sp >= _current->stack_info.start - K_THREAD_STACK_RESERVED && - sp < _current->stack_info.start - K_THREAD_STACK_RESERVED + if (z_stack_is_user_capable(arch_current_thread()->stack_obj) && + sp >= arch_current_thread()->stack_info.start - K_THREAD_STACK_RESERVED && + sp < arch_current_thread()->stack_info.start - K_THREAD_STACK_RESERVED + Z_RISCV_STACK_GUARD_SIZE) { return true; } #endif /* CONFIG_USERSPACE */ #if CONFIG_MULTITHREADING - if (sp >= _current->stack_info.start - K_KERNEL_STACK_RESERVED && - sp < _current->stack_info.start - K_KERNEL_STACK_RESERVED + if (sp >= arch_current_thread()->stack_info.start - K_KERNEL_STACK_RESERVED && + sp < arch_current_thread()->stack_info.start - K_KERNEL_STACK_RESERVED + Z_RISCV_STACK_GUARD_SIZE) { return true; } @@ -191,10 +191,10 @@ static bool bad_stack_pointer(struct arch_esf *esf) #ifdef CONFIG_USERSPACE if ((esf->mstatus & MSTATUS_MPP) == 0 && - (esf->sp < _current->stack_info.start || - esf->sp > _current->stack_info.start + - _current->stack_info.size - - _current->stack_info.delta)) { + (esf->sp < arch_current_thread()->stack_info.start || + esf->sp > arch_current_thread()->stack_info.start + + arch_current_thread()->stack_info.size - + arch_current_thread()->stack_info.delta)) { /* user stack pointer moved outside of its allowed stack */ return true; } @@ -246,9 +246,9 @@ FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr) void z_impl_user_fault(unsigned int reason) { - struct arch_esf *oops_esf = _current->syscall_frame; + struct arch_esf *oops_esf = arch_current_thread()->syscall_frame; - if (((_current->base.user_options & K_USER) != 0) && + if (((arch_current_thread()->base.user_options & K_USER) != 0) && reason != K_ERR_STACK_CHK_FAIL) { reason = K_ERR_KERNEL_OOPS; } diff --git a/arch/riscv/core/fpu.c b/arch/riscv/core/fpu.c index 318e97e0002a9f..bd648585c436df 100644 --- a/arch/riscv/core/fpu.c +++ b/arch/riscv/core/fpu.c @@ -36,8 +36,8 @@ static void DBG(char *msg, struct k_thread *th) strcpy(buf, "CPU# exc# "); buf[3] = '0' + _current_cpu->id; - buf[8] = '0' + _current->arch.exception_depth; - strcat(buf, _current->name); + buf[8] = '0' + arch_current_thread()->arch.exception_depth; + strcat(buf, arch_current_thread()->name); strcat(buf, ": "); strcat(buf, msg); strcat(buf, " "); @@ -82,12 +82,12 @@ static void z_riscv_fpu_load(void) "must be called with FPU access disabled"); /* become new owner */ - atomic_ptr_set(&_current_cpu->arch.fpu_owner, _current); + atomic_ptr_set(&_current_cpu->arch.fpu_owner, arch_current_thread()); /* restore our content */ csr_set(mstatus, MSTATUS_FS_INIT); - z_riscv_fpu_restore(&_current->arch.saved_fp_context); - DBG("restore", _current); + z_riscv_fpu_restore(&arch_current_thread()->arch.saved_fp_context); + DBG("restore", arch_current_thread()); } /* @@ -168,7 +168,7 @@ static void flush_owned_fpu(struct k_thread *thread) * replace it, and this avoids a deadlock where * two CPUs want to pull each other's FPU context. */ - if (thread == _current) { + if (thread == arch_current_thread()) { z_riscv_fpu_disable(); arch_flush_local_fpu(); do { @@ -213,7 +213,7 @@ void z_riscv_fpu_trap(struct arch_esf *esf) /* save current owner's content if any */ arch_flush_local_fpu(); - if (_current->arch.exception_depth > 0) { + if (arch_current_thread()->arch.exception_depth > 0) { /* * We were already in exception when the FPU access trapped. * We give it access and prevent any further IRQ recursion @@ -233,7 +233,7 @@ void z_riscv_fpu_trap(struct arch_esf *esf) * Make sure the FPU context we need isn't live on another CPU. * The current CPU's FPU context is NULL at this point. */ - flush_owned_fpu(_current); + flush_owned_fpu(arch_current_thread()); #endif /* make it accessible and clean to the returning context */ @@ -256,13 +256,13 @@ static bool fpu_access_allowed(unsigned int exc_update_level) __ASSERT((csr_read(mstatus) & MSTATUS_IEN) == 0, "must be called with IRQs disabled"); - if (_current->arch.exception_depth == exc_update_level) { + if (arch_current_thread()->arch.exception_depth == exc_update_level) { /* We're about to execute non-exception code */ - if (_current_cpu->arch.fpu_owner == _current) { + if (_current_cpu->arch.fpu_owner == arch_current_thread()) { /* everything is already in place */ return true; } - if (_current->arch.fpu_recently_used) { + if (arch_current_thread()->arch.fpu_recently_used) { /* * Before this thread was context-switched out, * it made active use of the FPU, but someone else @@ -273,7 +273,7 @@ static bool fpu_access_allowed(unsigned int exc_update_level) z_riscv_fpu_disable(); arch_flush_local_fpu(); #ifdef CONFIG_SMP - flush_owned_fpu(_current); + flush_owned_fpu(arch_current_thread()); #endif z_riscv_fpu_load(); _current_cpu->arch.fpu_state = MSTATUS_FS_CLEAN; diff --git a/arch/riscv/core/isr.S b/arch/riscv/core/isr.S index 1def6cfa62d141..ae8f63357a5446 100644 --- a/arch/riscv/core/isr.S +++ b/arch/riscv/core/isr.S @@ -297,7 +297,7 @@ is_fp: /* Process the FP trap and quickly return from exception */ mv a0, sp tail z_riscv_fpu_trap 2: -no_fp: /* increment _current->arch.exception_depth */ +no_fp: /* increment arch_current_thread()->arch.exception_depth */ lr t0, ___cpu_t_current_OFFSET(s0) lb t1, _thread_offset_to_exception_depth(t0) add t1, t1, 1 @@ -724,7 +724,7 @@ no_reschedule: mv a0, sp call z_riscv_fpu_exit_exc - /* decrement _current->arch.exception_depth */ + /* decrement arch_current_thread()->arch.exception_depth */ lr t0, ___cpu_t_current_OFFSET(s0) lb t1, _thread_offset_to_exception_depth(t0) add t1, t1, -1 diff --git a/arch/riscv/core/pmp.c b/arch/riscv/core/pmp.c index e29c8abd76d61e..fbbf7c55137ce9 100644 --- a/arch/riscv/core/pmp.c +++ b/arch/riscv/core/pmp.c @@ -752,8 +752,8 @@ int arch_buffer_validate(const void *addr, size_t size, int write) int ret = -1; /* Check if this is on the stack */ - if (IS_WITHIN(start, size, - _current->stack_info.start, _current->stack_info.size)) { + if (IS_WITHIN(start, size, arch_current_thread()->stack_info.start, + arch_current_thread()->stack_info.size)) { return 0; } @@ -768,7 +768,7 @@ int arch_buffer_validate(const void *addr, size_t size, int write) } /* Look for a matching partition in our memory domain */ - struct k_mem_domain *domain = _current->mem_domain_info.mem_domain; + struct k_mem_domain *domain = arch_current_thread()->mem_domain_info.mem_domain; int p_idx, remaining_partitions; k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock); diff --git a/arch/riscv/core/stacktrace.c b/arch/riscv/core/stacktrace.c index 361e152f00ca3b..0dfe0a19638825 100644 --- a/arch/riscv/core/stacktrace.c +++ b/arch/riscv/core/stacktrace.c @@ -108,7 +108,7 @@ static void walk_stackframe(riscv_stacktrace_cb cb, void *cookie, const struct k /* Unwind the provided exception stack frame */ fp = esf->s0; ra = esf->mepc; - } else if ((csf == NULL) || (csf == &_current->callee_saved)) { + } else if ((csf == NULL) || (csf == &arch_current_thread()->callee_saved)) { /* Unwind current thread (default case when nothing is provided ) */ fp = (uintptr_t)__builtin_frame_address(0); ra = (uintptr_t)walk_stackframe; @@ -181,7 +181,7 @@ static void walk_stackframe(riscv_stacktrace_cb cb, void *cookie, const struct k /* Unwind the provided exception stack frame */ sp = z_riscv_get_sp_before_exc(esf); ra = esf->mepc; - } else if ((csf == NULL) || (csf == &_current->callee_saved)) { + } else if ((csf == NULL) || (csf == &arch_current_thread()->callee_saved)) { /* Unwind current thread (default case when nothing is provided ) */ sp = current_stack_pointer; ra = (uintptr_t)walk_stackframe; @@ -215,8 +215,10 @@ void arch_stack_walk(stack_trace_callback_fn callback_fn, void *cookie, const struct k_thread *thread, const struct arch_esf *esf) { if (thread == NULL) { - /* In case `thread` is NULL, default that to `_current` and try to unwind */ - thread = _current; + /* In case `thread` is NULL, default that to `arch_current_thread()` + * and try to unwind + */ + thread = arch_current_thread(); } walk_stackframe((riscv_stacktrace_cb)callback_fn, cookie, thread, esf, in_stack_bound, @@ -280,7 +282,8 @@ void z_riscv_unwind_stack(const struct arch_esf *esf, const _callee_saved_t *csf int i = 0; LOG_ERR("call trace:"); - walk_stackframe(print_trace_address, &i, _current, esf, in_fatal_stack_bound, csf); + walk_stackframe(print_trace_address, &i, arch_current_thread(), esf, in_fatal_stack_bound, + csf); LOG_ERR(""); } #endif /* CONFIG_EXCEPTION_STACK_TRACE */ diff --git a/arch/riscv/core/thread.c b/arch/riscv/core/thread.c index b4999bda09ac3f..5c471034d25753 100644 --- a/arch/riscv/core/thread.c +++ b/arch/riscv/core/thread.c @@ -132,28 +132,29 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, /* Set up privileged stack */ #ifdef CONFIG_GEN_PRIV_STACKS - _current->arch.priv_stack_start = - (unsigned long)z_priv_stack_find(_current->stack_obj); + arch_current_thread()->arch.priv_stack_start = + (unsigned long)z_priv_stack_find(arch_current_thread()->stack_obj); /* remove the stack guard from the main stack */ - _current->stack_info.start -= K_THREAD_STACK_RESERVED; - _current->stack_info.size += K_THREAD_STACK_RESERVED; + arch_current_thread()->stack_info.start -= K_THREAD_STACK_RESERVED; + arch_current_thread()->stack_info.size += K_THREAD_STACK_RESERVED; #else - _current->arch.priv_stack_start = (unsigned long)_current->stack_obj; + arch_current_thread()->arch.priv_stack_start = + (unsigned long)arch_current_thread()->stack_obj; #endif /* CONFIG_GEN_PRIV_STACKS */ - top_of_priv_stack = Z_STACK_PTR_ALIGN(_current->arch.priv_stack_start + + top_of_priv_stack = Z_STACK_PTR_ALIGN(arch_current_thread()->arch.priv_stack_start + K_KERNEL_STACK_RESERVED + CONFIG_PRIVILEGED_STACK_SIZE); #ifdef CONFIG_INIT_STACKS /* Initialize the privileged stack */ - (void)memset((void *)_current->arch.priv_stack_start, 0xaa, + (void)memset((void *)arch_current_thread()->arch.priv_stack_start, 0xaa, Z_STACK_PTR_ALIGN(K_KERNEL_STACK_RESERVED + CONFIG_PRIVILEGED_STACK_SIZE)); #endif /* CONFIG_INIT_STACKS */ top_of_user_stack = Z_STACK_PTR_ALIGN( - _current->stack_info.start + - _current->stack_info.size - - _current->stack_info.delta); + arch_current_thread()->stack_info.start + + arch_current_thread()->stack_info.size - + arch_current_thread()->stack_info.delta); status = csr_read(mstatus); @@ -169,12 +170,12 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, #ifdef CONFIG_PMP_STACK_GUARD /* reconfigure as the kernel mode stack will be different */ - z_riscv_pmp_stackguard_prepare(_current); + z_riscv_pmp_stackguard_prepare(arch_current_thread()); #endif /* Set up Physical Memory Protection */ - z_riscv_pmp_usermode_prepare(_current); - z_riscv_pmp_usermode_enable(_current); + z_riscv_pmp_usermode_prepare(arch_current_thread()); + z_riscv_pmp_usermode_enable(arch_current_thread()); /* preserve stack pointer for next exception entry */ arch_curr_cpu()->arch.user_exc_sp = top_of_priv_stack; diff --git a/arch/sparc/core/thread.c b/arch/sparc/core/thread.c index e56d9f827c9de3..8bdc4cd5500f6c 100644 --- a/arch/sparc/core/thread.c +++ b/arch/sparc/core/thread.c @@ -61,7 +61,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, void *z_arch_get_next_switch_handle(struct k_thread **old_thread) { - *old_thread = _current; + *old_thread = arch_current_thread(); return z_get_next_switch_handle(*old_thread); } diff --git a/arch/x86/core/fatal.c b/arch/x86/core/fatal.c index d43499a08d2ac1..f3103861f366ec 100644 --- a/arch/x86/core/fatal.c +++ b/arch/x86/core/fatal.c @@ -49,7 +49,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs) { uintptr_t start, end; - if (_current == NULL || arch_is_in_isr()) { + if (arch_current_thread() == NULL || arch_is_in_isr()) { /* We were servicing an interrupt or in early boot environment * and are supposed to be on the interrupt stack */ int cpu_id; @@ -64,7 +64,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs) end = start + CONFIG_ISR_STACK_SIZE; #ifdef CONFIG_USERSPACE } else if ((cs & 0x3U) == 0U && - (_current->base.user_options & K_USER) != 0) { + (arch_current_thread()->base.user_options & K_USER) != 0) { /* The low two bits of the CS register is the privilege * level. It will be 0 in supervisor mode and 3 in user mode * corresponding to ring 0 / ring 3. @@ -72,14 +72,14 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs) * If we get here, we must have been doing a syscall, check * privilege elevation stack bounds */ - start = _current->stack_info.start - CONFIG_PRIVILEGED_STACK_SIZE; - end = _current->stack_info.start; + start = arch_current_thread()->stack_info.start - CONFIG_PRIVILEGED_STACK_SIZE; + end = arch_current_thread()->stack_info.start; #endif /* CONFIG_USERSPACE */ } else { /* Normal thread operation, check its stack buffer */ - start = _current->stack_info.start; - end = Z_STACK_PTR_ALIGN(_current->stack_info.start + - _current->stack_info.size); + start = arch_current_thread()->stack_info.start; + end = Z_STACK_PTR_ALIGN(arch_current_thread()->stack_info.start + + arch_current_thread()->stack_info.size); } return (addr <= start) || (addr + size > end); @@ -97,7 +97,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs) __pinned_func bool z_x86_check_guard_page(uintptr_t addr) { - struct k_thread *thread = _current; + struct k_thread *thread = arch_current_thread(); uintptr_t start, end; /* Front guard size - before thread stack area */ @@ -233,7 +233,7 @@ static inline uintptr_t get_cr3(const struct arch_esf *esf) * switch when we took the exception via z_x86_trampoline_to_kernel */ if ((esf->cs & 0x3) != 0) { - return _current->arch.ptables; + return arch_current_thread()->arch.ptables; } #else ARG_UNUSED(esf); diff --git a/arch/x86/core/ia32/float.c b/arch/x86/core/ia32/float.c index c89bf7accd5a1e..e4102d803324f3 100644 --- a/arch/x86/core/ia32/float.c +++ b/arch/x86/core/ia32/float.c @@ -207,7 +207,7 @@ void z_float_enable(struct k_thread *thread, unsigned int options) /* Associate the new FP context with the specified thread */ - if (thread == _current) { + if (thread == arch_current_thread()) { /* * When enabling FP support for the current thread, just claim * ownership of the FPU and leave CR0[TS] unset. @@ -222,7 +222,7 @@ void z_float_enable(struct k_thread *thread, unsigned int options) * of the FPU to them (unless we need it ourselves). */ - if ((_current->base.user_options & _FP_USER_MASK) == 0) { + if ((arch_current_thread()->base.user_options & _FP_USER_MASK) == 0) { /* * We are not FP-capable, so mark FPU as owned by the * thread we've just enabled FP support for, then @@ -278,7 +278,7 @@ int z_float_disable(struct k_thread *thread) thread->base.user_options &= ~_FP_USER_MASK; - if (thread == _current) { + if (thread == arch_current_thread()) { z_FpAccessDisable(); _kernel.current_fp = (struct k_thread *)0; } else { @@ -314,7 +314,7 @@ void _FpNotAvailableExcHandler(struct arch_esf *pEsf) /* Enable highest level of FP capability configured into the kernel */ - k_float_enable(_current, _FP_USER_MASK); + k_float_enable(arch_current_thread(), _FP_USER_MASK); } _EXCEPTION_CONNECT_NOCODE(_FpNotAvailableExcHandler, IV_DEVICE_NOT_AVAILABLE, 0); diff --git a/arch/x86/core/userspace.c b/arch/x86/core/userspace.c index 436bc18edb73d8..fd38d22cb90b08 100644 --- a/arch/x86/core/userspace.c +++ b/arch/x86/core/userspace.c @@ -132,9 +132,9 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, /* Transition will reset stack pointer to initial, discarding * any old context since this is a one-way operation */ - stack_end = Z_STACK_PTR_ALIGN(_current->stack_info.start + - _current->stack_info.size - - _current->stack_info.delta); + stack_end = Z_STACK_PTR_ALIGN(arch_current_thread()->stack_info.start + + arch_current_thread()->stack_info.size - + arch_current_thread()->stack_info.delta); #ifdef CONFIG_X86_64 /* x86_64 SysV ABI requires 16 byte stack alignment, which @@ -156,15 +156,15 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, * Note that this also needs to page in the reserved * portion of the stack (which is usually the page just * before the beginning of stack in - * _current->stack_info.start. + * arch_current_thread()->stack_info.start. */ uintptr_t stack_start; size_t stack_size; uintptr_t stack_aligned_start; size_t stack_aligned_size; - stack_start = POINTER_TO_UINT(_current->stack_obj); - stack_size = K_THREAD_STACK_LEN(_current->stack_info.size); + stack_start = POINTER_TO_UINT(arch_current_thread()->stack_obj); + stack_size = K_THREAD_STACK_LEN(arch_current_thread()->stack_info.size); #if defined(CONFIG_X86_STACK_PROTECTION) /* With hardware stack protection, the first page of stack @@ -182,7 +182,7 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, #endif z_x86_userspace_enter(user_entry, p1, p2, p3, stack_end, - _current->stack_info.start); + arch_current_thread()->stack_info.start); CODE_UNREACHABLE; } diff --git a/arch/x86/core/x86_mmu.c b/arch/x86/core/x86_mmu.c index e4188b8670f845..fdda995b3b07fd 100644 --- a/arch/x86/core/x86_mmu.c +++ b/arch/x86/core/x86_mmu.c @@ -421,7 +421,7 @@ void z_x86_tlb_ipi(const void *arg) /* We might have been moved to another memory domain, so always invoke * z_x86_thread_page_tables_get() instead of using current CR3 value. */ - ptables_phys = k_mem_phys_addr(z_x86_thread_page_tables_get(_current)); + ptables_phys = k_mem_phys_addr(z_x86_thread_page_tables_get(arch_current_thread())); #endif /* * In the future, we can consider making this smarter, such as @@ -1440,7 +1440,7 @@ static inline void bcb_fence(void) __pinned_func int arch_buffer_validate(const void *addr, size_t size, int write) { - pentry_t *ptables = z_x86_thread_page_tables_get(_current); + pentry_t *ptables = z_x86_thread_page_tables_get(arch_current_thread()); uint8_t *virt; size_t aligned_size; int ret = 0; @@ -1958,7 +1958,7 @@ int arch_mem_domain_thread_add(struct k_thread *thread) * IPI takes care of this if the thread is currently running on some * other CPU. */ - if (thread == _current && thread->arch.ptables != z_x86_cr3_get()) { + if (thread == arch_current_thread() && thread->arch.ptables != z_x86_cr3_get()) { z_x86_cr3_set(thread->arch.ptables); } #endif /* CONFIG_X86_KPTI */ @@ -1980,8 +1980,9 @@ void z_x86_current_stack_perms(void) /* Clear any previous context in the stack buffer to prevent * unintentional data leakage. */ - (void)memset((void *)_current->stack_info.start, 0xAA, - _current->stack_info.size - _current->stack_info.delta); + (void)memset((void *)arch_current_thread()->stack_info.start, 0xAA, + arch_current_thread()->stack_info.size - + arch_current_thread()->stack_info.delta); /* Only now is it safe to grant access to the stack buffer since any * previous context has been erased. @@ -1991,13 +1992,13 @@ void z_x86_current_stack_perms(void) * This will grant stack and memory domain access if it wasn't set * already (in which case this returns very quickly). */ - z_x86_swap_update_common_page_table(_current); + z_x86_swap_update_common_page_table(arch_current_thread()); #else /* Memory domain access is already programmed into the page tables. * Need to enable access to this new user thread's stack buffer in * its domain-specific page tables. */ - set_stack_perms(_current, z_x86_thread_page_tables_get(_current)); + set_stack_perms(arch_current_thread(), z_x86_thread_page_tables_get(arch_current_thread())); #endif } #endif /* CONFIG_USERSPACE */ diff --git a/arch/xtensa/core/fatal.c b/arch/xtensa/core/fatal.c index 41a7a8d14097bb..5721f130446a16 100644 --- a/arch/xtensa/core/fatal.c +++ b/arch/xtensa/core/fatal.c @@ -140,7 +140,7 @@ FUNC_NORETURN void arch_syscall_oops(void *ssf) #ifdef CONFIG_USERSPACE void z_impl_xtensa_user_fault(unsigned int reason) { - if ((_current->base.user_options & K_USER) != 0) { + if ((arch_current_thread()->base.user_options & K_USER) != 0) { if ((reason != K_ERR_KERNEL_OOPS) && (reason != K_ERR_STACK_CHK_FAIL)) { reason = K_ERR_KERNEL_OOPS; diff --git a/arch/xtensa/core/ptables.c b/arch/xtensa/core/ptables.c index c02ecc64b0dbef..b6c8e8fb7fd326 100644 --- a/arch/xtensa/core/ptables.c +++ b/arch/xtensa/core/ptables.c @@ -1086,7 +1086,7 @@ static int mem_buffer_validate(const void *addr, size_t size, int write, int rin int ret = 0; uint8_t *virt; size_t aligned_size; - const struct k_thread *thread = _current; + const struct k_thread *thread = arch_current_thread(); uint32_t *ptables = thread_page_tables_get(thread); /* addr/size arbitrary, fix this up into an aligned region */ diff --git a/arch/xtensa/core/thread.c b/arch/xtensa/core/thread.c index f9b8179173d4e7..5bc736a352f2f3 100644 --- a/arch/xtensa/core/thread.c +++ b/arch/xtensa/core/thread.c @@ -156,7 +156,7 @@ int arch_float_enable(struct k_thread *thread, unsigned int options) FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3) { - struct k_thread *current = _current; + struct k_thread *current = arch_current_thread(); size_t stack_end; /* Transition will reset stack pointer to initial, discarding diff --git a/arch/xtensa/core/vector_handlers.c b/arch/xtensa/core/vector_handlers.c index fa58b9c2133adc..f721e480a2c437 100644 --- a/arch/xtensa/core/vector_handlers.c +++ b/arch/xtensa/core/vector_handlers.c @@ -34,7 +34,7 @@ extern char xtensa_arch_kernel_oops_epc[]; bool xtensa_is_outside_stack_bounds(uintptr_t addr, size_t sz, uint32_t ps) { uintptr_t start, end; - struct k_thread *thread = _current; + struct k_thread *thread = arch_current_thread(); bool was_in_isr, invalid; /* Without userspace, there is no privileged stack so the thread stack diff --git a/boards/native/native_posix/irq_handler.c b/boards/native/native_posix/irq_handler.c index 56ce6931c260bc..69a1f131dcbf2b 100644 --- a/boards/native/native_posix/irq_handler.c +++ b/boards/native/native_posix/irq_handler.c @@ -105,7 +105,7 @@ void posix_irq_handler(void) */ if (may_swap && (hw_irq_ctrl_get_cur_prio() == 256) - && (_kernel.ready_q.cache) && (_kernel.ready_q.cache != _current)) { + && (_kernel.ready_q.cache) && (_kernel.ready_q.cache != arch_current_thread())) { (void)z_swap_irqlock(irq_lock); } diff --git a/boards/native/native_sim/irq_handler.c b/boards/native/native_sim/irq_handler.c index 38462b4b14a428..c9a18f018639bc 100644 --- a/boards/native/native_sim/irq_handler.c +++ b/boards/native/native_sim/irq_handler.c @@ -113,7 +113,7 @@ void posix_irq_handler(void) */ if (may_swap && (hw_irq_ctrl_get_cur_prio() == 256) - && (_kernel.ready_q.cache) && (_kernel.ready_q.cache != _current)) { + && (_kernel.ready_q.cache) && (_kernel.ready_q.cache != arch_current_thread())) { (void)z_swap_irqlock(irq_lock); } diff --git a/boards/native/nrf_bsim/irq_handler.c b/boards/native/nrf_bsim/irq_handler.c index 2d6ad4f66b7c43..c794395fb9a73d 100644 --- a/boards/native/nrf_bsim/irq_handler.c +++ b/boards/native/nrf_bsim/irq_handler.c @@ -135,7 +135,7 @@ void posix_irq_handler(void) if (may_swap && (hw_irq_ctrl_get_cur_prio(cpu_n) == 256) && (CPU_will_be_awaken_from_WFE == false) - && (_kernel.ready_q.cache) && (_kernel.ready_q.cache != _current)) { + && (_kernel.ready_q.cache) && (_kernel.ready_q.cache != arch_current_thread())) { z_swap_irqlock(irq_lock); } diff --git a/doc/kernel/services/smp/smp.rst b/doc/kernel/services/smp/smp.rst index 615676494f822c..e570958fce23e2 100644 --- a/doc/kernel/services/smp/smp.rst +++ b/doc/kernel/services/smp/smp.rst @@ -276,7 +276,7 @@ Per-CPU data ============ Many elements of the core kernel data need to be implemented for each -CPU in SMP mode. For example, the ``_current`` thread pointer obviously +CPU in SMP mode. For example, the ``arch_current_thread()`` thread pointer obviously needs to reflect what is running locally, there are many threads running concurrently. Likewise a kernel-provided interrupt stack needs to be created and assigned for each physical CPU, as does the diff --git a/doc/releases/migration-guide-4.1.rst b/doc/releases/migration-guide-4.1.rst index 055066c5d4b35b..e7420af494173d 100644 --- a/doc/releases/migration-guide-4.1.rst +++ b/doc/releases/migration-guide-4.1.rst @@ -168,6 +168,10 @@ Modem Architectures ************* +* Common + + * ``_current`` is deprecated, used :c:func:`arch_current_thread` instead. + * native/POSIX * :kconfig:option:`CONFIG_NATIVE_APPLICATION` has been deprecated. Out-of-tree boards using this diff --git a/drivers/wifi/eswifi/eswifi.h b/drivers/wifi/eswifi/eswifi.h index 54bf00f0f78960..0bf4fddb2066a4 100644 --- a/drivers/wifi/eswifi/eswifi.h +++ b/drivers/wifi/eswifi/eswifi.h @@ -92,9 +92,9 @@ static inline int eswifi_request(struct eswifi_dev *eswifi, char *cmd, static inline void eswifi_lock(struct eswifi_dev *eswifi) { /* Nested locking */ - if (atomic_get(&eswifi->mutex_owner) != (atomic_t)(uintptr_t)_current) { + if (atomic_get(&eswifi->mutex_owner) != (atomic_t)(uintptr_t)arch_current_thread()) { k_mutex_lock(&eswifi->mutex, K_FOREVER); - atomic_set(&eswifi->mutex_owner, (atomic_t)(uintptr_t)_current); + atomic_set(&eswifi->mutex_owner, (atomic_t)(uintptr_t)arch_current_thread()); eswifi->mutex_depth = 1; } else { eswifi->mutex_depth++; diff --git a/include/zephyr/arch/arch_interface.h b/include/zephyr/arch/arch_interface.h index 6721cec1171714..0f081d06adafa9 100644 --- a/include/zephyr/arch/arch_interface.h +++ b/include/zephyr/arch/arch_interface.h @@ -1289,7 +1289,7 @@ typedef bool (*stack_trace_callback_fn)(void *cookie, unsigned long addr); * ============ ======= ============================================ * thread esf * ============ ======= ============================================ - * thread NULL Stack trace from thread (can be _current) + * thread NULL Stack trace from thread (can be arch_current_thread()) * thread esf Stack trace starting on esf * ============ ======= ============================================ */ diff --git a/include/zephyr/arch/common/arch_inlines.h b/include/zephyr/arch/common/arch_inlines.h index 0490dba71aab53..8c0ba3343ad9f6 100644 --- a/include/zephyr/arch/common/arch_inlines.h +++ b/include/zephyr/arch/common/arch_inlines.h @@ -19,7 +19,7 @@ static ALWAYS_INLINE struct k_thread *arch_current_thread(void) { #ifdef CONFIG_SMP - /* In SMP, _current is a field read from _current_cpu, which + /* In SMP, arch_current_thread() is a field read from _current_cpu, which * can race with preemption before it is read. We must lock * local interrupts when reading it. */ diff --git a/include/zephyr/arch/x86/ia32/arch.h b/include/zephyr/arch/x86/ia32/arch.h index b82e0db0f1733e..e2f961c817e25a 100644 --- a/include/zephyr/arch/x86/ia32/arch.h +++ b/include/zephyr/arch/x86/ia32/arch.h @@ -305,7 +305,7 @@ static inline void arch_isr_direct_footer(int swap) * 3) Next thread to run in the ready queue is not this thread */ if (swap != 0 && _kernel.cpus[0].nested == 0 && - _kernel.ready_q.cache != _current) { + _kernel.ready_q.cache != arch_current_thread()) { unsigned int flags; /* Fetch EFLAGS argument to z_swap() */ diff --git a/include/zephyr/internal/syscall_handler.h b/include/zephyr/internal/syscall_handler.h index b48070fad6b46f..a1264d6c287334 100644 --- a/include/zephyr/internal/syscall_handler.h +++ b/include/zephyr/internal/syscall_handler.h @@ -62,7 +62,7 @@ static inline bool k_is_in_user_syscall(void) * calls from supervisor mode bypass everything directly to * the implementation function. */ - return !k_is_in_isr() && (_current->syscall_frame != NULL); + return !k_is_in_isr() && (arch_current_thread()->syscall_frame != NULL); } /** @@ -350,7 +350,7 @@ int k_usermode_string_copy(char *dst, const char *src, size_t maxlen); #define K_OOPS(expr) \ do { \ if (expr) { \ - arch_syscall_oops(_current->syscall_frame); \ + arch_syscall_oops(arch_current_thread()->syscall_frame); \ } \ } while (false) diff --git a/include/zephyr/kernel_structs.h b/include/zephyr/kernel_structs.h index 2467598175fc57..175e0a6c6dd542 100644 --- a/include/zephyr/kernel_structs.h +++ b/include/zephyr/kernel_structs.h @@ -171,7 +171,7 @@ struct _cpu { #endif #ifdef CONFIG_SMP - /* True when _current is allowed to context switch */ + /* True when arch_current_thread() is allowed to context switch */ uint8_t swap_ok; #endif @@ -260,12 +260,12 @@ bool z_smp_cpu_mobile(void); #define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \ arch_curr_cpu(); }) -#define _current arch_current_thread() #else #define _current_cpu (&_kernel.cpus[0]) -#define _current _kernel.cpus[0].current -#endif +#endif /* CONFIG_SMP */ + +#define _current arch_current_thread() __DEPRECATED_MACRO /* kernel wait queue record */ #ifdef CONFIG_WAITQ_SCALABLE diff --git a/kernel/Kconfig b/kernel/Kconfig index 0553161eca1ee7..62713a5c9bb65b 100644 --- a/kernel/Kconfig +++ b/kernel/Kconfig @@ -211,7 +211,7 @@ config THREAD_ABORT_NEED_CLEANUP bool help This option enables the bits to clean up the current thread if - k_thread_abort(_current) is called, as the cleanup cannot be + k_thread_abort(arch_current_thread()) is called, as the cleanup cannot be running in the current thread stack. config THREAD_CUSTOM_DATA diff --git a/kernel/errno.c b/kernel/errno.c index bbbd6f87bfd039..2535e00e336cb0 100644 --- a/kernel/errno.c +++ b/kernel/errno.c @@ -36,7 +36,7 @@ int *z_impl_z_errno(void) /* Initialized to the lowest address in the stack so the thread can * directly read/write it */ - return &_current->userspace_local_data->errno_var; + return &arch_current_thread()->userspace_local_data->errno_var; } static inline int *z_vrfy_z_errno(void) @@ -48,7 +48,7 @@ static inline int *z_vrfy_z_errno(void) #else int *z_impl_z_errno(void) { - return &_current->errno_var; + return &arch_current_thread()->errno_var; } #endif /* CONFIG_USERSPACE */ diff --git a/kernel/fatal.c b/kernel/fatal.c index 3cf3114364da87..a5682e7cd7f417 100644 --- a/kernel/fatal.c +++ b/kernel/fatal.c @@ -90,7 +90,7 @@ void z_fatal_error(unsigned int reason, const struct arch_esf *esf) */ unsigned int key = arch_irq_lock(); struct k_thread *thread = IS_ENABLED(CONFIG_MULTITHREADING) ? - _current : NULL; + arch_current_thread() : NULL; /* twister looks for the "ZEPHYR FATAL ERROR" string, don't * change it without also updating twister diff --git a/kernel/idle.c b/kernel/idle.c index 62ff84e4c88dc9..4d095c8f27b3ab 100644 --- a/kernel/idle.c +++ b/kernel/idle.c @@ -24,7 +24,7 @@ void idle(void *unused1, void *unused2, void *unused3) ARG_UNUSED(unused2); ARG_UNUSED(unused3); - __ASSERT_NO_MSG(_current->base.prio >= 0); + __ASSERT_NO_MSG(arch_current_thread()->base.prio >= 0); while (true) { /* SMP systems without a working IPI can't actual @@ -85,7 +85,7 @@ void idle(void *unused1, void *unused2, void *unused3) * explicitly yield in the idle thread otherwise * nothing else will run once it starts. */ - if (_kernel.ready_q.cache != _current) { + if (_kernel.ready_q.cache != arch_current_thread()) { z_swap_unlocked(); } # endif /* !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC) */ diff --git a/kernel/include/kernel_internal.h b/kernel/include/kernel_internal.h index 94f90ce94624c3..cb13aacf6007a6 100644 --- a/kernel/include/kernel_internal.h +++ b/kernel/include/kernel_internal.h @@ -286,7 +286,7 @@ int z_kernel_stats_query(struct k_obj_core *obj_core, void *stats); * where these steps require that the thread is no longer running. * If the target thread is not the current running thread, the cleanup * steps will be performed immediately. However, if the target thread is - * the current running thread (e.g. k_thread_abort(_current)), it defers + * the current running thread (e.g. k_thread_abort(arch_current_thread())), it defers * the cleanup steps to later when the work will be finished in another * context. * diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index a2fd3287fd6a6d..ff529d06fca036 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -143,9 +143,9 @@ static inline bool _is_valid_prio(int prio, void *entry_point) static inline void z_sched_lock(void) { __ASSERT(!arch_is_in_isr(), ""); - __ASSERT(_current->base.sched_locked != 1U, ""); + __ASSERT(arch_current_thread()->base.sched_locked != 1U, ""); - --_current->base.sched_locked; + --arch_current_thread()->base.sched_locked; compiler_barrier(); } diff --git a/kernel/include/kswap.h b/kernel/include/kswap.h index 03c9967b2b609c..66d7b431d3876d 100644 --- a/kernel/include/kswap.h +++ b/kernel/include/kswap.h @@ -97,12 +97,12 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key, */ # ifndef CONFIG_ARM64 __ASSERT(arch_irq_unlocked(key) || - _current->base.thread_state & (_THREAD_DUMMY | _THREAD_DEAD), + arch_current_thread()->base.thread_state & (_THREAD_DUMMY | _THREAD_DEAD), "Context switching while holding lock!"); # endif /* CONFIG_ARM64 */ #endif /* CONFIG_SPIN_VALIDATE */ - old_thread = _current; + old_thread = arch_current_thread(); z_check_stack_sentinel(); @@ -147,7 +147,7 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key, arch_cohere_stacks(old_thread, NULL, new_thread); #ifdef CONFIG_SMP - /* Now add _current back to the run queue, once we are + /* Now add arch_current_thread() back to the run queue, once we are * guaranteed to reach the context switch in finite * time. See z_sched_switch_spin(). */ @@ -175,7 +175,7 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key, irq_unlock(key); } - return _current->swap_retval; + return arch_current_thread()->swap_retval; } static inline int z_swap_irqlock(unsigned int key) diff --git a/kernel/include/kthread.h b/kernel/include/kthread.h index e2505f65b3b96b..512636c49c588b 100644 --- a/kernel/include/kthread.h +++ b/kernel/include/kthread.h @@ -197,17 +197,17 @@ static ALWAYS_INLINE bool should_preempt(struct k_thread *thread, return true; } - __ASSERT(_current != NULL, ""); + __ASSERT(arch_current_thread() != NULL, ""); /* Or if we're pended/suspended/dummy (duh) */ - if (z_is_thread_prevented_from_running(_current)) { + if (z_is_thread_prevented_from_running(arch_current_thread())) { return true; } /* Otherwise we have to be running a preemptible thread or * switching to a metairq */ - if (thread_is_preemptible(_current) || thread_is_metairq(thread)) { + if (thread_is_preemptible(arch_current_thread()) || thread_is_metairq(thread)) { return true; } diff --git a/kernel/ipi.c b/kernel/ipi.c index ee01c4594251ca..59c2eba669867e 100644 --- a/kernel/ipi.c +++ b/kernel/ipi.c @@ -101,7 +101,7 @@ void z_sched_ipi(void) #endif /* CONFIG_TRACE_SCHED_IPI */ #ifdef CONFIG_TIMESLICING - if (thread_is_sliceable(_current)) { + if (thread_is_sliceable(arch_current_thread())) { z_time_slice(); } #endif /* CONFIG_TIMESLICING */ diff --git a/kernel/mailbox.c b/kernel/mailbox.c index d7da8e3c8e49ae..17ebfb2ea03512 100644 --- a/kernel/mailbox.c +++ b/kernel/mailbox.c @@ -216,7 +216,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_spinlock_key_t key; /* save sender id so it can be used during message matching */ - tx_msg->rx_source_thread = _current; + tx_msg->rx_source_thread = arch_current_thread(); /* finish readying sending thread (actual or dummy) for send */ sending_thread = tx_msg->_syncing_thread; @@ -296,7 +296,7 @@ int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout) { /* configure things for a synchronous send, then send the message */ - tx_msg->_syncing_thread = _current; + tx_msg->_syncing_thread = arch_current_thread(); SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, put, mbox, timeout); @@ -321,7 +321,7 @@ void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, */ mbox_async_alloc(&async); - async->thread.prio = _current->base.prio; + async->thread.prio = arch_current_thread()->base.prio; async->tx_msg = *tx_msg; async->tx_msg._syncing_thread = (struct k_thread *)&async->thread; @@ -388,7 +388,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, int result; /* save receiver id so it can be used during message matching */ - rx_msg->tx_target_thread = _current; + rx_msg->tx_target_thread = arch_current_thread(); /* search mailbox's tx queue for a compatible sender */ key = k_spin_lock(&mbox->lock); @@ -425,7 +425,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, get, mbox, timeout); /* wait until a matching sender appears or a timeout occurs */ - _current->base.swap_data = rx_msg; + arch_current_thread()->base.swap_data = rx_msg; result = z_pend_curr(&mbox->lock, key, &mbox->rx_msg_queue, timeout); /* consume message data immediately, if needed */ diff --git a/kernel/mem_domain.c b/kernel/mem_domain.c index 16b337acf011df..1fc8a36a94de29 100644 --- a/kernel/mem_domain.c +++ b/kernel/mem_domain.c @@ -299,7 +299,7 @@ void z_mem_domain_init_thread(struct k_thread *thread) k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock); /* New threads inherit memory domain configuration from parent */ - ret = add_thread_locked(_current->mem_domain_info.mem_domain, thread); + ret = add_thread_locked(arch_current_thread()->mem_domain_info.mem_domain, thread); __ASSERT_NO_MSG(ret == 0); ARG_UNUSED(ret); diff --git a/kernel/mem_slab.c b/kernel/mem_slab.c index e64359174c5ab3..45ba08e27cbe2e 100644 --- a/kernel/mem_slab.c +++ b/kernel/mem_slab.c @@ -248,7 +248,7 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout) /* wait for a free block or timeout */ result = z_pend_curr(&slab->lock, key, &slab->wait_q, timeout); if (result == 0) { - *mem = _current->base.swap_data; + *mem = arch_current_thread()->base.swap_data; } SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, alloc, slab, timeout, result); diff --git a/kernel/mempool.c b/kernel/mempool.c index d8926c63ed9407..7e9a7677f71ab8 100644 --- a/kernel/mempool.c +++ b/kernel/mempool.c @@ -165,7 +165,7 @@ void *z_thread_aligned_alloc(size_t align, size_t size) if (k_is_in_isr()) { heap = _SYSTEM_HEAP; } else { - heap = _current->resource_pool; + heap = arch_current_thread()->resource_pool; } if (heap != NULL) { diff --git a/kernel/mmu.c b/kernel/mmu.c index 617b02997dd9cd..fc55096d44b53f 100644 --- a/kernel/mmu.c +++ b/kernel/mmu.c @@ -1674,7 +1674,7 @@ static bool do_page_fault(void *addr, bool pin) #endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */ key = k_spin_lock(&z_mm_lock); - faulting_thread = _current; + faulting_thread = arch_current_thread(); status = arch_page_location_get(addr, &page_in_location); if (status == ARCH_PAGE_LOCATION_BAD) { diff --git a/kernel/msg_q.c b/kernel/msg_q.c index 9adfd9ae34f29c..190b8c4e78e323 100644 --- a/kernel/msg_q.c +++ b/kernel/msg_q.c @@ -169,7 +169,7 @@ int z_impl_k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_msgq, put, msgq, timeout); /* wait for put message success, failure, or timeout */ - _current->base.swap_data = (void *) data; + arch_current_thread()->base.swap_data = (void *) data; result = z_pend_curr(&msgq->lock, key, &msgq->wait_q, timeout); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_msgq, put, msgq, timeout, result); @@ -267,7 +267,7 @@ int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout) SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_msgq, get, msgq, timeout); /* wait for get message success or timeout */ - _current->base.swap_data = data; + arch_current_thread()->base.swap_data = data; result = z_pend_curr(&msgq->lock, key, &msgq->wait_q, timeout); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_msgq, get, msgq, timeout, result); diff --git a/kernel/mutex.c b/kernel/mutex.c index ce76e5a2af545a..2fbede19e2caf3 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -114,17 +114,17 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout) key = k_spin_lock(&lock); - if (likely((mutex->lock_count == 0U) || (mutex->owner == _current))) { + if (likely((mutex->lock_count == 0U) || (mutex->owner == arch_current_thread()))) { mutex->owner_orig_prio = (mutex->lock_count == 0U) ? - _current->base.prio : + arch_current_thread()->base.prio : mutex->owner_orig_prio; mutex->lock_count++; - mutex->owner = _current; + mutex->owner = arch_current_thread(); LOG_DBG("%p took mutex %p, count: %d, orig prio: %d", - _current, mutex, mutex->lock_count, + arch_current_thread(), mutex, mutex->lock_count, mutex->owner_orig_prio); k_spin_unlock(&lock, key); @@ -144,7 +144,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout) SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mutex, lock, mutex, timeout); - new_prio = new_prio_for_inheritance(_current->base.prio, + new_prio = new_prio_for_inheritance(arch_current_thread()->base.prio, mutex->owner->base.prio); LOG_DBG("adjusting prio up on mutex %p", mutex); @@ -157,7 +157,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout) LOG_DBG("on mutex %p got_mutex value: %d", mutex, got_mutex); - LOG_DBG("%p got mutex %p (y/n): %c", _current, mutex, + LOG_DBG("%p got mutex %p (y/n): %c", arch_current_thread(), mutex, got_mutex ? 'y' : 'n'); if (got_mutex == 0) { @@ -167,7 +167,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout) /* timed out */ - LOG_DBG("%p timeout on mutex %p", _current, mutex); + LOG_DBG("%p timeout on mutex %p", arch_current_thread(), mutex); key = k_spin_lock(&lock); @@ -224,7 +224,7 @@ int z_impl_k_mutex_unlock(struct k_mutex *mutex) /* * The current thread does not own the mutex. */ - CHECKIF(mutex->owner != _current) { + CHECKIF(mutex->owner != arch_current_thread()) { SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, unlock, mutex, -EPERM); return -EPERM; diff --git a/kernel/pipes.c b/kernel/pipes.c index a81393c508d1c5..a9eef5a4f368cb 100644 --- a/kernel/pipes.c +++ b/kernel/pipes.c @@ -443,11 +443,11 @@ int z_impl_k_pipe_put(struct k_pipe *pipe, const void *data, * invoked from within an ISR as that is not safe to do. */ - src_desc = k_is_in_isr() ? &isr_desc : &_current->pipe_desc; + src_desc = k_is_in_isr() ? &isr_desc : &arch_current_thread()->pipe_desc; src_desc->buffer = (unsigned char *)data; src_desc->bytes_to_xfer = bytes_to_write; - src_desc->thread = _current; + src_desc->thread = arch_current_thread(); sys_dlist_append(&src_list, &src_desc->node); *bytes_written = pipe_write(pipe, &src_list, @@ -488,7 +488,7 @@ int z_impl_k_pipe_put(struct k_pipe *pipe, const void *data, SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_pipe, put, pipe, timeout); - _current->base.swap_data = src_desc; + arch_current_thread()->base.swap_data = src_desc; z_sched_wait(&pipe->lock, key, &pipe->wait_q.writers, timeout, NULL); @@ -581,11 +581,11 @@ static int pipe_get_internal(k_spinlock_key_t key, struct k_pipe *pipe, * invoked from within an ISR as that is not safe to do. */ - dest_desc = k_is_in_isr() ? &isr_desc : &_current->pipe_desc; + dest_desc = k_is_in_isr() ? &isr_desc : &arch_current_thread()->pipe_desc; dest_desc->buffer = data; dest_desc->bytes_to_xfer = bytes_to_read; - dest_desc->thread = _current; + dest_desc->thread = arch_current_thread(); src_desc = (struct _pipe_desc *)sys_dlist_get(&src_list); while (src_desc != NULL) { @@ -674,7 +674,7 @@ static int pipe_get_internal(k_spinlock_key_t key, struct k_pipe *pipe, SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_pipe, get, pipe, timeout); - _current->base.swap_data = dest_desc; + arch_current_thread()->base.swap_data = dest_desc; z_sched_wait(&pipe->lock, key, &pipe->wait_q.readers, timeout, NULL); diff --git a/kernel/poll.c b/kernel/poll.c index 502e97537b9b60..05e9fe10c3e066 100644 --- a/kernel/poll.c +++ b/kernel/poll.c @@ -290,7 +290,7 @@ int z_impl_k_poll(struct k_poll_event *events, int num_events, { int events_registered; k_spinlock_key_t key; - struct z_poller *poller = &_current->poller; + struct z_poller *poller = &arch_current_thread()->poller; poller->is_polling = true; poller->mode = MODE_POLL; diff --git a/kernel/queue.c b/kernel/queue.c index 4b00deeb1e757d..09b224c9c94494 100644 --- a/kernel/queue.c +++ b/kernel/queue.c @@ -346,9 +346,9 @@ void *z_impl_k_queue_get(struct k_queue *queue, k_timeout_t timeout) int ret = z_pend_curr(&queue->lock, key, &queue->wait_q, timeout); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout, - (ret != 0) ? NULL : _current->base.swap_data); + (ret != 0) ? NULL : arch_current_thread()->base.swap_data); - return (ret != 0) ? NULL : _current->base.swap_data; + return (ret != 0) ? NULL : arch_current_thread()->base.swap_data; } bool k_queue_remove(struct k_queue *queue, void *data) diff --git a/kernel/sched.c b/kernel/sched.c index 8faeea27cec17c..d88db029b7455c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -31,7 +31,7 @@ extern struct k_thread *pending_current; struct k_spinlock _sched_spinlock; /* Storage to "complete" the context switch from an invalid/incomplete thread - * context (ex: exiting an ISR that aborted _current) + * context (ex: exiting an ISR that aborted arch_current_thread()) */ __incoherent struct k_thread _thread_dummy; @@ -135,12 +135,12 @@ static ALWAYS_INLINE struct k_thread *runq_best(void) return _priq_run_best(curr_cpu_runq()); } -/* _current is never in the run queue until context switch on +/* arch_current_thread() is never in the run queue until context switch on * SMP configurations, see z_requeue_current() */ static inline bool should_queue_thread(struct k_thread *thread) { - return !IS_ENABLED(CONFIG_SMP) || (thread != _current); + return !IS_ENABLED(CONFIG_SMP) || (thread != arch_current_thread()); } static ALWAYS_INLINE void queue_thread(struct k_thread *thread) @@ -150,7 +150,7 @@ static ALWAYS_INLINE void queue_thread(struct k_thread *thread) runq_add(thread); } #ifdef CONFIG_SMP - if (thread == _current) { + if (thread == arch_current_thread()) { /* add current to end of queue means "yield" */ _current_cpu->swap_ok = true; } @@ -202,8 +202,8 @@ static inline void clear_halting(struct k_thread *thread) static ALWAYS_INLINE struct k_thread *next_up(void) { #ifdef CONFIG_SMP - if (is_halting(_current)) { - halt_thread(_current, is_aborting(_current) ? + if (is_halting(arch_current_thread())) { + halt_thread(arch_current_thread(), is_aborting(arch_current_thread()) ? _THREAD_DEAD : _THREAD_SUSPENDED); } #endif /* CONFIG_SMP */ @@ -242,42 +242,42 @@ static ALWAYS_INLINE struct k_thread *next_up(void) #else /* Under SMP, the "cache" mechanism for selecting the next * thread doesn't work, so we have more work to do to test - * _current against the best choice from the queue. Here, the + * arch_current_thread() against the best choice from the queue. Here, the * thread selected above represents "the best thread that is * not current". * - * Subtle note on "queued": in SMP mode, _current does not + * Subtle note on "queued": in SMP mode, arch_current_thread() does not * live in the queue, so this isn't exactly the same thing as - * "ready", it means "is _current already added back to the + * "ready", it means "is arch_current_thread() already added back to the * queue such that we don't want to re-add it". */ - bool queued = z_is_thread_queued(_current); - bool active = !z_is_thread_prevented_from_running(_current); + bool queued = z_is_thread_queued(arch_current_thread()); + bool active = !z_is_thread_prevented_from_running(arch_current_thread()); if (thread == NULL) { thread = _current_cpu->idle_thread; } if (active) { - int32_t cmp = z_sched_prio_cmp(_current, thread); + int32_t cmp = z_sched_prio_cmp(arch_current_thread(), thread); /* Ties only switch if state says we yielded */ if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) { - thread = _current; + thread = arch_current_thread(); } if (!should_preempt(thread, _current_cpu->swap_ok)) { - thread = _current; + thread = arch_current_thread(); } } - /* Put _current back into the queue */ - if ((thread != _current) && active && - !z_is_idle_thread_object(_current) && !queued) { - queue_thread(_current); + /* Put arch_current_thread() back into the queue */ + if ((thread != arch_current_thread()) && active && + !z_is_idle_thread_object(arch_current_thread()) && !queued) { + queue_thread(arch_current_thread()); } - /* Take the new _current out of the queue */ + /* Take the new arch_current_thread() out of the queue */ if (z_is_thread_queued(thread)) { dequeue_thread(thread); } @@ -293,7 +293,7 @@ void move_thread_to_end_of_prio_q(struct k_thread *thread) dequeue_thread(thread); } queue_thread(thread); - update_cache(thread == _current); + update_cache(thread == arch_current_thread()); } /* Track cooperative threads preempted by metairqs so we can return to @@ -304,10 +304,10 @@ static void update_metairq_preempt(struct k_thread *thread) { #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \ (CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES) - if (thread_is_metairq(thread) && !thread_is_metairq(_current) && - !thread_is_preemptible(_current)) { + if (thread_is_metairq(thread) && !thread_is_metairq(arch_current_thread()) && + !thread_is_preemptible(arch_current_thread())) { /* Record new preemption */ - _current_cpu->metairq_preempted = _current; + _current_cpu->metairq_preempted = arch_current_thread(); } else if (!thread_is_metairq(thread) && !z_is_idle_thread_object(thread)) { /* Returning from existing preemption */ _current_cpu->metairq_preempted = NULL; @@ -327,14 +327,14 @@ static ALWAYS_INLINE void update_cache(int preempt_ok) if (should_preempt(thread, preempt_ok)) { #ifdef CONFIG_TIMESLICING - if (thread != _current) { + if (thread != arch_current_thread()) { z_reset_time_slice(thread); } #endif /* CONFIG_TIMESLICING */ update_metairq_preempt(thread); _kernel.ready_q.cache = thread; } else { - _kernel.ready_q.cache = _current; + _kernel.ready_q.cache = arch_current_thread(); } #else @@ -427,9 +427,9 @@ void z_sched_start(struct k_thread *thread) */ static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key) { - if (is_halting(_current)) { - halt_thread(_current, - is_aborting(_current) ? _THREAD_DEAD : _THREAD_SUSPENDED); + if (is_halting(arch_current_thread())) { + halt_thread(arch_current_thread(), + is_aborting(arch_current_thread()) ? _THREAD_DEAD : _THREAD_SUSPENDED); } k_spin_unlock(&_sched_spinlock, key); while (is_halting(thread)) { @@ -443,7 +443,7 @@ static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key) /* Shared handler for k_thread_{suspend,abort}(). Called with the * scheduler lock held and the key passed (which it may * release/reacquire!) which will be released before a possible return - * (aborting _current will not return, obviously), which may be after + * (aborting arch_current_thread() will not return, obviously), which may be after * a context switch. */ static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key, @@ -476,14 +476,14 @@ static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key, if (arch_is_in_isr()) { thread_halt_spin(thread, key); } else { - add_to_waitq_locked(_current, wq); + add_to_waitq_locked(arch_current_thread(), wq); z_swap(&_sched_spinlock, key); } } else { halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED); - if ((thread == _current) && !arch_is_in_isr()) { + if ((thread == arch_current_thread()) && !arch_is_in_isr()) { z_swap(&_sched_spinlock, key); - __ASSERT(!terminate, "aborted _current back from dead"); + __ASSERT(!terminate, "aborted arch_current_thread() back from dead"); } else { k_spin_unlock(&_sched_spinlock, key); } @@ -559,7 +559,7 @@ static void unready_thread(struct k_thread *thread) if (z_is_thread_queued(thread)) { dequeue_thread(thread); } - update_cache(thread == _current); + update_cache(thread == arch_current_thread()); } /* _sched_spinlock must be held */ @@ -596,7 +596,7 @@ static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q, void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, k_timeout_t timeout) { - __ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread)); + __ASSERT_NO_MSG(thread == arch_current_thread() || is_thread_dummy(thread)); K_SPINLOCK(&_sched_spinlock) { pend_locked(thread, wait_q, timeout); } @@ -657,7 +657,7 @@ int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key, _wait_q_t *wait_q, k_timeout_t timeout) { #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC) - pending_current = _current; + pending_current = arch_current_thread(); #endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */ __ASSERT_NO_MSG(sizeof(_sched_spinlock) == 0 || lock != &_sched_spinlock); @@ -670,7 +670,7 @@ int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key, * held. */ (void) k_spin_lock(&_sched_spinlock); - pend_locked(_current, wait_q, timeout); + pend_locked(arch_current_thread(), wait_q, timeout); k_spin_release(lock); return z_swap(&_sched_spinlock, key); } @@ -768,7 +768,7 @@ static inline bool need_swap(void) /* Check if the next ready thread is the same as the current thread */ new_thread = _kernel.ready_q.cache; - return new_thread != _current; + return new_thread != arch_current_thread(); #endif /* CONFIG_SMP */ } @@ -804,15 +804,15 @@ void k_sched_lock(void) void k_sched_unlock(void) { K_SPINLOCK(&_sched_spinlock) { - __ASSERT(_current->base.sched_locked != 0U, ""); + __ASSERT(arch_current_thread()->base.sched_locked != 0U, ""); __ASSERT(!arch_is_in_isr(), ""); - ++_current->base.sched_locked; + ++arch_current_thread()->base.sched_locked; update_cache(0); } LOG_DBG("scheduler unlocked (%p:%d)", - _current, _current->base.sched_locked); + arch_current_thread(), arch_current_thread()->base.sched_locked); SYS_PORT_TRACING_FUNC(k_thread, sched_unlock); @@ -824,10 +824,10 @@ struct k_thread *z_swap_next_thread(void) #ifdef CONFIG_SMP struct k_thread *ret = next_up(); - if (ret == _current) { + if (ret == arch_current_thread()) { /* When not swapping, have to signal IPIs here. In * the context switch case it must happen later, after - * _current gets requeued. + * arch_current_thread() gets requeued. */ signal_pending_ipi(); } @@ -868,7 +868,7 @@ static inline void set_current(struct k_thread *new_thread) * function. * * @warning - * The _current value may have changed after this call and not refer + * The arch_current_thread() value may have changed after this call and not refer * to the interrupted thread anymore. It might be necessary to make a local * copy before calling this function. * @@ -884,7 +884,7 @@ void *z_get_next_switch_handle(void *interrupted) void *ret = NULL; K_SPINLOCK(&_sched_spinlock) { - struct k_thread *old_thread = _current, *new_thread; + struct k_thread *old_thread = arch_current_thread(), *new_thread; if (IS_ENABLED(CONFIG_SMP)) { old_thread->switch_handle = NULL; @@ -910,7 +910,7 @@ void *z_get_next_switch_handle(void *interrupted) #endif /* CONFIG_TIMESLICING */ #ifdef CONFIG_SPIN_VALIDATE - /* Changed _current! Update the spinlock + /* Changed arch_current_thread()! Update the spinlock * bookkeeping so the validation doesn't get * confused when the "wrong" thread tries to * release the lock. @@ -945,9 +945,9 @@ void *z_get_next_switch_handle(void *interrupted) return ret; #else z_sched_usage_switch(_kernel.ready_q.cache); - _current->switch_handle = interrupted; + arch_current_thread()->switch_handle = interrupted; set_current(_kernel.ready_q.cache); - return _current->switch_handle; + return arch_current_thread()->switch_handle; #endif /* CONFIG_SMP */ } #endif /* CONFIG_USE_SWITCH */ @@ -993,7 +993,7 @@ void z_impl_k_thread_priority_set(k_tid_t thread, int prio) bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio); if ((need_sched) && (IS_ENABLED(CONFIG_SMP) || - (_current->base.sched_locked == 0U))) { + (arch_current_thread()->base.sched_locked == 0U))) { z_reschedule_unlocked(); } } @@ -1059,7 +1059,7 @@ static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline) bool k_can_yield(void) { return !(k_is_pre_kernel() || k_is_in_isr() || - z_is_idle_thread_object(_current)); + z_is_idle_thread_object(arch_current_thread())); } void z_impl_k_yield(void) @@ -1071,10 +1071,10 @@ void z_impl_k_yield(void) k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); if (!IS_ENABLED(CONFIG_SMP) || - z_is_thread_queued(_current)) { - dequeue_thread(_current); + z_is_thread_queued(arch_current_thread())) { + dequeue_thread(arch_current_thread()); } - queue_thread(_current); + queue_thread(arch_current_thread()); update_cache(1); z_swap(&_sched_spinlock, key); } @@ -1093,7 +1093,7 @@ static int32_t z_tick_sleep(k_ticks_t ticks) __ASSERT(!arch_is_in_isr(), ""); - LOG_DBG("thread %p for %lu ticks", _current, (unsigned long)ticks); + LOG_DBG("thread %p for %lu ticks", arch_current_thread(), (unsigned long)ticks); /* wait of 0 ms is treated as a 'yield' */ if (ticks == 0) { @@ -1111,15 +1111,15 @@ static int32_t z_tick_sleep(k_ticks_t ticks) k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC) - pending_current = _current; + pending_current = arch_current_thread(); #endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */ - unready_thread(_current); - z_add_thread_timeout(_current, timeout); - z_mark_thread_as_suspended(_current); + unready_thread(arch_current_thread()); + z_add_thread_timeout(arch_current_thread(), timeout); + z_mark_thread_as_suspended(arch_current_thread()); (void)z_swap(&_sched_spinlock, key); - __ASSERT(!z_is_thread_state_set(_current, _THREAD_SUSPENDED), ""); + __ASSERT(!z_is_thread_state_set(arch_current_thread(), _THREAD_SUSPENDED), ""); ticks = (k_ticks_t)expected_wakeup_ticks - sys_clock_tick_get_32(); if (ticks > 0) { @@ -1140,7 +1140,7 @@ int32_t z_impl_k_sleep(k_timeout_t timeout) /* in case of K_FOREVER, we suspend */ if (K_TIMEOUT_EQ(timeout, K_FOREVER)) { - k_thread_suspend(_current); + k_thread_suspend(arch_current_thread()); SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER); return (int32_t) K_TICKS_FOREVER; @@ -1285,13 +1285,13 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state) (void)z_abort_thread_timeout(thread); unpend_all(&thread->join_queue); - /* Edge case: aborting _current from within an + /* Edge case: aborting arch_current_thread() from within an * ISR that preempted it requires clearing the - * _current pointer so the upcoming context + * arch_current_thread() pointer so the upcoming context * switch doesn't clobber the now-freed * memory */ - if (thread == _current && arch_is_in_isr()) { + if (thread == arch_current_thread() && arch_is_in_isr()) { dummify = true; } } @@ -1334,10 +1334,10 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state) k_thread_abort_cleanup(thread); #endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */ - /* Do this "set _current to dummy" step last so that - * subsystems above can rely on _current being + /* Do this "set arch_current_thread() to dummy" step last so that + * subsystems above can rely on arch_current_thread() being * unchanged. Disabled for posix as that arch - * continues to use the _current pointer in its swap + * continues to use the arch_current_thread() pointer in its swap * code. Note that we must leave a non-null switch * handle for any threads spinning in join() (this can * never be used, as our thread is flagged dead, but @@ -1345,7 +1345,7 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state) */ if (dummify && !IS_ENABLED(CONFIG_ARCH_POSIX)) { #ifdef CONFIG_USE_SWITCH - _current->switch_handle = _current; + arch_current_thread()->switch_handle = arch_current_thread(); #endif z_dummy_thread_init(&_thread_dummy); @@ -1403,13 +1403,13 @@ int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout) ret = 0; } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { ret = -EBUSY; - } else if ((thread == _current) || - (thread->base.pended_on == &_current->join_queue)) { + } else if ((thread == arch_current_thread()) || + (thread->base.pended_on == &arch_current_thread()->join_queue)) { ret = -EDEADLK; } else { __ASSERT(!arch_is_in_isr(), "cannot join in ISR"); - add_to_waitq_locked(_current, &thread->join_queue); - add_thread_timeout(_current, timeout); + add_to_waitq_locked(arch_current_thread(), &thread->join_queue); + add_thread_timeout(arch_current_thread(), timeout); SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout); ret = z_swap(&_sched_spinlock, key); @@ -1508,7 +1508,7 @@ int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key, int ret = z_pend_curr(lock, key, wait_q, timeout); if (data != NULL) { - *data = _current->base.swap_data; + *data = arch_current_thread()->base.swap_data; } return ret; } diff --git a/kernel/smp.c b/kernel/smp.c index a56595252789a9..b0eefb35e41449 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -58,23 +58,23 @@ unsigned int z_smp_global_lock(void) { unsigned int key = arch_irq_lock(); - if (!_current->base.global_lock_count) { + if (!arch_current_thread()->base.global_lock_count) { while (!atomic_cas(&global_lock, 0, 1)) { arch_spin_relax(); } } - _current->base.global_lock_count++; + arch_current_thread()->base.global_lock_count++; return key; } void z_smp_global_unlock(unsigned int key) { - if (_current->base.global_lock_count != 0U) { - _current->base.global_lock_count--; + if (arch_current_thread()->base.global_lock_count != 0U) { + arch_current_thread()->base.global_lock_count--; - if (!_current->base.global_lock_count) { + if (!arch_current_thread()->base.global_lock_count) { (void)atomic_clear(&global_lock); } } diff --git a/kernel/spinlock_validate.c b/kernel/spinlock_validate.c index cb7ff5a3e7ff7a..c2a97356d9cec0 100644 --- a/kernel/spinlock_validate.c +++ b/kernel/spinlock_validate.c @@ -24,11 +24,11 @@ bool z_spin_unlock_valid(struct k_spinlock *l) l->thread_cpu = 0; - if (arch_is_in_isr() && _current->base.thread_state & _THREAD_DUMMY) { - /* Edge case where an ISR aborted _current */ + if (arch_is_in_isr() && arch_current_thread()->base.thread_state & _THREAD_DUMMY) { + /* Edge case where an ISR aborted arch_current_thread() */ return true; } - if (tcpu != (_current_cpu->id | (uintptr_t)_current)) { + if (tcpu != (_current_cpu->id | (uintptr_t)arch_current_thread())) { return false; } return true; @@ -36,7 +36,7 @@ bool z_spin_unlock_valid(struct k_spinlock *l) void z_spin_lock_set_owner(struct k_spinlock *l) { - l->thread_cpu = _current_cpu->id | (uintptr_t)_current; + l->thread_cpu = _current_cpu->id | (uintptr_t)arch_current_thread(); } #ifdef CONFIG_KERNEL_COHERENCE diff --git a/kernel/stack.c b/kernel/stack.c index 5add38b9c2318e..b3ea624b1625e7 100644 --- a/kernel/stack.c +++ b/kernel/stack.c @@ -182,7 +182,7 @@ int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data, return -EAGAIN; } - *data = (stack_data_t)_current->base.swap_data; + *data = (stack_data_t)arch_current_thread()->base.swap_data; SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_stack, pop, stack, timeout, 0); diff --git a/kernel/thread.c b/kernel/thread.c index 8dabaa8db2cfe4..60bc39e40dc4da 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -82,7 +82,7 @@ EXPORT_SYMBOL(k_is_in_isr); #ifdef CONFIG_THREAD_CUSTOM_DATA void z_impl_k_thread_custom_data_set(void *value) { - _current->custom_data = value; + arch_current_thread()->custom_data = value; } #ifdef CONFIG_USERSPACE @@ -95,7 +95,7 @@ static inline void z_vrfy_k_thread_custom_data_set(void *data) void *z_impl_k_thread_custom_data_get(void) { - return _current->custom_data; + return arch_current_thread()->custom_data; } #ifdef CONFIG_USERSPACE @@ -110,7 +110,7 @@ static inline void *z_vrfy_k_thread_custom_data_get(void) int z_impl_k_is_preempt_thread(void) { - return !arch_is_in_isr() && thread_is_preemptible(_current); + return !arch_is_in_isr() && thread_is_preemptible(arch_current_thread()); } #ifdef CONFIG_USERSPACE @@ -139,7 +139,7 @@ int z_impl_k_thread_name_set(k_tid_t thread, const char *str) { #ifdef CONFIG_THREAD_NAME if (thread == NULL) { - thread = _current; + thread = arch_current_thread(); } strncpy(thread->name, str, CONFIG_THREAD_MAX_NAME_LEN - 1); @@ -331,11 +331,11 @@ void z_check_stack_sentinel(void) { uint32_t *stack; - if ((_current->base.thread_state & _THREAD_DUMMY) != 0) { + if ((arch_current_thread()->base.thread_state & _THREAD_DUMMY) != 0) { return; } - stack = (uint32_t *)_current->stack_info.start; + stack = (uint32_t *)arch_current_thread()->stack_info.start; if (*stack != STACK_SENTINEL) { /* Restore it so further checks don't trigger this same error */ *stack = STACK_SENTINEL; @@ -627,8 +627,8 @@ char *z_setup_new_thread(struct k_thread *new_thread, } #endif /* CONFIG_SCHED_CPU_MASK */ #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN - /* _current may be null if the dummy thread is not used */ - if (!_current) { + /* arch_current_thread() may be null if the dummy thread is not used */ + if (!arch_current_thread()) { new_thread->resource_pool = NULL; return stack_ptr; } @@ -637,13 +637,13 @@ char *z_setup_new_thread(struct k_thread *new_thread, z_mem_domain_init_thread(new_thread); if ((options & K_INHERIT_PERMS) != 0U) { - k_thread_perms_inherit(_current, new_thread); + k_thread_perms_inherit(arch_current_thread(), new_thread); } #endif /* CONFIG_USERSPACE */ #ifdef CONFIG_SCHED_DEADLINE new_thread->base.prio_deadline = 0; #endif /* CONFIG_SCHED_DEADLINE */ - new_thread->resource_pool = _current->resource_pool; + new_thread->resource_pool = arch_current_thread()->resource_pool; #ifdef CONFIG_SMP z_waitq_init(&new_thread->halt_queue); @@ -738,7 +738,7 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread, */ K_OOPS(K_SYSCALL_VERIFY(_is_valid_prio(prio, NULL))); K_OOPS(K_SYSCALL_VERIFY(z_is_prio_lower_or_equal(prio, - _current->base.prio))); + arch_current_thread()->base.prio))); z_setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3, prio, options, NULL); @@ -783,25 +783,25 @@ FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry, { SYS_PORT_TRACING_FUNC(k_thread, user_mode_enter); - _current->base.user_options |= K_USER; - z_thread_essential_clear(_current); + arch_current_thread()->base.user_options |= K_USER; + z_thread_essential_clear(arch_current_thread()); #ifdef CONFIG_THREAD_MONITOR - _current->entry.pEntry = entry; - _current->entry.parameter1 = p1; - _current->entry.parameter2 = p2; - _current->entry.parameter3 = p3; + arch_current_thread()->entry.pEntry = entry; + arch_current_thread()->entry.parameter1 = p1; + arch_current_thread()->entry.parameter2 = p2; + arch_current_thread()->entry.parameter3 = p3; #endif /* CONFIG_THREAD_MONITOR */ #ifdef CONFIG_USERSPACE - __ASSERT(z_stack_is_user_capable(_current->stack_obj), + __ASSERT(z_stack_is_user_capable(arch_current_thread()->stack_obj), "dropping to user mode with kernel-only stack object"); #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA - memset(_current->userspace_local_data, 0, + memset(arch_current_thread()->userspace_local_data, 0, sizeof(struct _thread_userspace_local_data)); #endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */ #ifdef CONFIG_THREAD_LOCAL_STORAGE - arch_tls_stack_setup(_current, - (char *)(_current->stack_info.start + - _current->stack_info.size)); + arch_tls_stack_setup(arch_current_thread(), + (char *)(arch_current_thread()->stack_info.start + + arch_current_thread()->stack_info.size)); #endif /* CONFIG_THREAD_LOCAL_STORAGE */ arch_user_mode_enter(entry, p1, p2, p3); #else @@ -929,7 +929,7 @@ static inline k_ticks_t z_vrfy_k_thread_timeout_expires_ticks( void z_thread_mark_switched_in(void) { #if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH) - z_sched_usage_start(_current); + z_sched_usage_start(arch_current_thread()); #endif /* CONFIG_SCHED_THREAD_USAGE && !CONFIG_USE_SWITCH */ #ifdef CONFIG_TRACING @@ -946,8 +946,8 @@ void z_thread_mark_switched_out(void) #ifdef CONFIG_TRACING #ifdef CONFIG_THREAD_LOCAL_STORAGE /* Dummy thread won't have TLS set up to run arbitrary code */ - if (!_current || - (_current->base.thread_state & _THREAD_DUMMY) != 0) + if (!arch_current_thread() || + (arch_current_thread()->base.thread_state & _THREAD_DUMMY) != 0) return; #endif /* CONFIG_THREAD_LOCAL_STORAGE */ SYS_PORT_TRACING_FUNC(k_thread, switched_out); @@ -1097,7 +1097,7 @@ void k_thread_abort_cleanup(struct k_thread *thread) thread_to_cleanup = NULL; } - if (thread == _current) { + if (thread == arch_current_thread()) { /* Need to defer for current running thread as the cleanup * might result in exception. Actual cleanup will be done * at the next time k_thread_abort() is called, or at thread diff --git a/kernel/timeslicing.c b/kernel/timeslicing.c index be91d9606f51e2..0410d42b91fe3f 100644 --- a/kernel/timeslicing.c +++ b/kernel/timeslicing.c @@ -15,7 +15,7 @@ static bool slice_expired[CONFIG_MP_MAX_NUM_CPUS]; #ifdef CONFIG_SWAP_NONATOMIC /* If z_swap() isn't atomic, then it's possible for a timer interrupt - * to try to timeslice away _current after it has already pended + * to try to timeslice away arch_current_thread() after it has already pended * itself but before the corresponding context switch. Treat that as * a noop condition in z_time_slice(). */ @@ -82,7 +82,7 @@ void k_sched_time_slice_set(int32_t slice, int prio) K_SPINLOCK(&_sched_spinlock) { slice_ticks = k_ms_to_ticks_ceil32(slice); slice_max_prio = prio; - z_reset_time_slice(_current); + z_reset_time_slice(arch_current_thread()); } } @@ -103,7 +103,7 @@ void k_thread_time_slice_set(struct k_thread *thread, int32_t thread_slice_ticks void z_time_slice(void) { k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); - struct k_thread *curr = _current; + struct k_thread *curr = arch_current_thread(); #ifdef CONFIG_SWAP_NONATOMIC if (pending_current == curr) { diff --git a/kernel/userspace.c b/kernel/userspace.c index 7a66513c03e5a9..5aeafe221c72a0 100644 --- a/kernel/userspace.c +++ b/kernel/userspace.c @@ -437,7 +437,7 @@ static void *z_object_alloc(enum k_objects otype, size_t size) /* The allocating thread implicitly gets permission on kernel objects * that it allocates */ - k_thread_perms_set(zo, _current); + k_thread_perms_set(zo, arch_current_thread()); /* Activates reference counting logic for automatic disposal when * all permissions have been revoked @@ -654,7 +654,7 @@ static int thread_perms_test(struct k_object *ko) return 1; } - index = thread_index_get(_current); + index = thread_index_get(arch_current_thread()); if (index != -1) { return sys_bitfield_test_bit((mem_addr_t)&ko->perms, index); } @@ -663,9 +663,9 @@ static int thread_perms_test(struct k_object *ko) static void dump_permission_error(struct k_object *ko) { - int index = thread_index_get(_current); + int index = thread_index_get(arch_current_thread()); LOG_ERR("thread %p (%d) does not have permission on %s %p", - _current, index, + arch_current_thread(), index, otype_to_str(ko->type), ko->name); LOG_HEXDUMP_ERR(ko->perms, sizeof(ko->perms), "permission bitmap"); } @@ -718,7 +718,7 @@ void k_object_access_revoke(const void *object, struct k_thread *thread) void z_impl_k_object_release(const void *object) { - k_object_access_revoke(object, _current); + k_object_access_revoke(object, arch_current_thread()); } void k_object_access_all_grant(const void *object) @@ -794,7 +794,7 @@ void k_object_recycle(const void *obj) if (ko != NULL) { (void)memset(ko->perms, 0, sizeof(ko->perms)); - k_thread_perms_set(ko, _current); + k_thread_perms_set(ko, arch_current_thread()); ko->flags |= K_OBJ_FLAG_INITIALIZED; } } diff --git a/kernel/userspace_handler.c b/kernel/userspace_handler.c index ab6e4f0623c7f2..38e778713bafc2 100644 --- a/kernel/userspace_handler.c +++ b/kernel/userspace_handler.c @@ -72,7 +72,7 @@ static inline void z_vrfy_k_object_release(const void *object) ko = validate_any_object(object); K_OOPS(K_SYSCALL_VERIFY_MSG(ko != NULL, "object %p access denied", object)); - k_thread_perms_clear(ko, _current); + k_thread_perms_clear(ko, arch_current_thread()); } #include diff --git a/kernel/work.c b/kernel/work.c index 0871fad0eb92ba..5e46576770724a 100644 --- a/kernel/work.c +++ b/kernel/work.c @@ -262,7 +262,7 @@ static inline int queue_submit_locked(struct k_work_q *queue, } int ret; - bool chained = (_current == &queue->thread) && !k_is_in_isr(); + bool chained = (arch_current_thread() == &queue->thread) && !k_is_in_isr(); bool draining = flag_test(&queue->flags, K_WORK_QUEUE_DRAIN_BIT); bool plugged = flag_test(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT); diff --git a/lib/libc/armstdc/src/libc-hooks.c b/lib/libc/armstdc/src/libc-hooks.c index afce534eddfd61..f9fe9d1c4200cf 100644 --- a/lib/libc/armstdc/src/libc-hooks.c +++ b/lib/libc/armstdc/src/libc-hooks.c @@ -23,7 +23,7 @@ void __stdout_hook_install(int (*hook)(int)) volatile int *__aeabi_errno_addr(void) { - return &_current->errno_var; + return &arch_current_thread()->errno_var; } int fputc(int c, FILE *f) diff --git a/lib/os/p4wq.c b/lib/os/p4wq.c index d95de01d2f7676..339de493944355 100644 --- a/lib/os/p4wq.c +++ b/lib/os/p4wq.c @@ -87,10 +87,10 @@ static FUNC_NORETURN void p4wq_loop(void *p0, void *p1, void *p2) = CONTAINER_OF(r, struct k_p4wq_work, rbnode); rb_remove(&queue->queue, r); - w->thread = _current; + w->thread = arch_current_thread(); sys_dlist_append(&queue->active, &w->dlnode); - set_prio(_current, w); - thread_clear_requeued(_current); + set_prio(arch_current_thread(), w); + thread_clear_requeued(arch_current_thread()); k_spin_unlock(&queue->lock, k); @@ -101,7 +101,7 @@ static FUNC_NORETURN void p4wq_loop(void *p0, void *p1, void *p2) /* Remove from the active list only if it * wasn't resubmitted already */ - if (!thread_was_requeued(_current)) { + if (!thread_was_requeued(arch_current_thread())) { sys_dlist_remove(&w->dlnode); w->thread = NULL; k_sem_give(&w->done_sem); @@ -228,9 +228,9 @@ void k_p4wq_submit(struct k_p4wq *queue, struct k_p4wq_work *item) item->deadline += k_cycle_get_32(); /* Resubmission from within handler? Remove from active list */ - if (item->thread == _current) { + if (item->thread == arch_current_thread()) { sys_dlist_remove(&item->dlnode); - thread_set_requeued(_current); + thread_set_requeued(arch_current_thread()); item->thread = NULL; } else { k_sem_init(&item->done_sem, 0, 1); diff --git a/scripts/build/gen_syscalls.py b/scripts/build/gen_syscalls.py index 13dc1448a1f290..607445ce129cdf 100755 --- a/scripts/build/gen_syscalls.py +++ b/scripts/build/gen_syscalls.py @@ -345,7 +345,7 @@ def marshall_defs(func_name, func_type, args): else: mrsh += "\t\t" + "uintptr_t arg3, uintptr_t arg4, void *more, void *ssf)\n" mrsh += "{\n" - mrsh += "\t" + "_current->syscall_frame = ssf;\n" + mrsh += "\t" + "arch_current_thread()->syscall_frame = ssf;\n" for unused_arg in range(nmrsh, 6): mrsh += "\t(void) arg%d;\t/* unused */\n" % unused_arg @@ -371,7 +371,7 @@ def marshall_defs(func_name, func_type, args): if func_type == "void": mrsh += "\t" + "%s;\n" % vrfy_call - mrsh += "\t" + "_current->syscall_frame = NULL;\n" + mrsh += "\t" + "arch_current_thread()->syscall_frame = NULL;\n" mrsh += "\t" + "return 0;\n" else: mrsh += "\t" + "%s ret = %s;\n" % (func_type, vrfy_call) @@ -380,10 +380,10 @@ def marshall_defs(func_name, func_type, args): ptr = "((uint64_t *)%s)" % mrsh_rval(nmrsh - 1, nmrsh) mrsh += "\t" + "K_OOPS(K_SYSCALL_MEMORY_WRITE(%s, 8));\n" % ptr mrsh += "\t" + "*%s = ret;\n" % ptr - mrsh += "\t" + "_current->syscall_frame = NULL;\n" + mrsh += "\t" + "arch_current_thread()->syscall_frame = NULL;\n" mrsh += "\t" + "return 0;\n" else: - mrsh += "\t" + "_current->syscall_frame = NULL;\n" + mrsh += "\t" + "arch_current_thread()->syscall_frame = NULL;\n" mrsh += "\t" + "return (uintptr_t) ret;\n" mrsh += "}\n" diff --git a/soc/espressif/esp32/soc.c b/soc/espressif/esp32/soc.c index 3a1fa2041d1ca5..f6f21eddffb1bb 100644 --- a/soc/espressif/esp32/soc.c +++ b/soc/espressif/esp32/soc.c @@ -117,7 +117,7 @@ void IRAM_ATTR __esp_platform_start(void) : "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE)); /* Initialize the architecture CPU pointer. Some of the - * initialization code wants a valid _current before + * initialization code wants a valid arch_current_thread() before * z_prep_c() is invoked. */ __asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[0])); diff --git a/soc/espressif/esp32/soc_appcpu.c b/soc/espressif/esp32/soc_appcpu.c index 45023b0d2747a2..74d807606248f8 100644 --- a/soc/espressif/esp32/soc_appcpu.c +++ b/soc/espressif/esp32/soc_appcpu.c @@ -66,7 +66,7 @@ void __app_cpu_start(void) : "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE)); /* Initialize the architecture CPU pointer. Some of the - * initialization code wants a valid _current before + * initialization code wants a valid arch_current_thread() before * z_prep_c() is invoked. */ __asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[0])); diff --git a/soc/espressif/esp32s2/soc.c b/soc/espressif/esp32s2/soc.c index 02a6d1b4dc6fbb..858618817f2bda 100644 --- a/soc/espressif/esp32s2/soc.c +++ b/soc/espressif/esp32s2/soc.c @@ -62,7 +62,7 @@ void __attribute__((section(".iram1"))) __esp_platform_start(void) __asm__ __volatile__("wsr %0, PS" : : "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE)); /* Initialize the architecture CPU pointer. Some of the - * initialization code wants a valid _current before + * initialization code wants a valid arch_current_thread() before * arch_kernel_init() is invoked. */ __asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[0])); diff --git a/soc/espressif/esp32s3/soc.c b/soc/espressif/esp32s3/soc.c index 80a02696fe0496..2449fd90887147 100644 --- a/soc/espressif/esp32s3/soc.c +++ b/soc/espressif/esp32s3/soc.c @@ -97,7 +97,7 @@ void IRAM_ATTR __esp_platform_start(void) __asm__ __volatile__("wsr %0, PS" : : "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE)); /* Initialize the architecture CPU pointer. Some of the - * initialization code wants a valid _current before + * initialization code wants a valid arch_current_thread() before * arch_kernel_init() is invoked. */ __asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[0])); diff --git a/soc/espressif/esp32s3/soc_appcpu.c b/soc/espressif/esp32s3/soc_appcpu.c index a03304c87519ee..1ec3bbf94aab7d 100644 --- a/soc/espressif/esp32s3/soc_appcpu.c +++ b/soc/espressif/esp32s3/soc_appcpu.c @@ -65,7 +65,7 @@ void IRAM_ATTR __appcpu_start(void) __asm__ __volatile__("wsr %0, PS" : : "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE)); /* Initialize the architecture CPU pointer. Some of the - * initialization code wants a valid _current before + * initialization code wants a valid arch_current_thread() before * arch_kernel_init() is invoked. */ __asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[1])); diff --git a/subsys/net/lib/sockets/sockets.c b/subsys/net/lib/sockets/sockets.c index 441ed438472b44..a3bc0b71135e44 100644 --- a/subsys/net/lib/sockets/sockets.c +++ b/subsys/net/lib/sockets/sockets.c @@ -68,7 +68,7 @@ static inline void *get_sock_vtable(int sock, if (ctx == NULL) { NET_DBG("Invalid access on sock %d by thread %p (%s)", sock, - _current, k_thread_name_get(_current)); + arch_current_thread(), k_thread_name_get(arch_current_thread())); } return ctx; diff --git a/subsys/portability/cmsis_rtos_v2/kernel.c b/subsys/portability/cmsis_rtos_v2/kernel.c index 519be96023ddb7..1fbd606dbbf065 100644 --- a/subsys/portability/cmsis_rtos_v2/kernel.c +++ b/subsys/portability/cmsis_rtos_v2/kernel.c @@ -39,7 +39,7 @@ osStatus_t osKernelGetInfo(osVersion_t *version, char *id_buf, uint32_t id_size) */ int32_t osKernelLock(void) { - int temp = _current->base.sched_locked; + int temp = arch_current_thread()->base.sched_locked; if (k_is_in_isr()) { return osErrorISR; @@ -55,7 +55,7 @@ int32_t osKernelLock(void) */ int32_t osKernelUnlock(void) { - int temp = _current->base.sched_locked; + int temp = arch_current_thread()->base.sched_locked; if (k_is_in_isr()) { return osErrorISR; @@ -71,7 +71,7 @@ int32_t osKernelUnlock(void) */ int32_t osKernelRestoreLock(int32_t lock) { - _current->base.sched_locked = lock; + arch_current_thread()->base.sched_locked = lock; if (k_is_in_isr()) { return osErrorISR; diff --git a/subsys/profiling/perf/backends/perf_riscv.c b/subsys/profiling/perf/backends/perf_riscv.c index 2259e1c60db3a3..0d04676a11b179 100644 --- a/subsys/profiling/perf/backends/perf_riscv.c +++ b/subsys/profiling/perf/backends/perf_riscv.c @@ -76,10 +76,10 @@ size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size) * function prologue or epilogue. */ buf[idx++] = (uintptr_t)esf->ra; - if (valid_stack((uintptr_t)new_fp, _current)) { + if (valid_stack((uintptr_t)new_fp, arch_current_thread())) { fp = new_fp; } - while (valid_stack((uintptr_t)fp, _current)) { + while (valid_stack((uintptr_t)fp, arch_current_thread())) { if (idx >= size) { return 0; } diff --git a/subsys/profiling/perf/backends/perf_x86.c b/subsys/profiling/perf/backends/perf_x86.c index e62ad64b74a0e3..1321631763428b 100644 --- a/subsys/profiling/perf/backends/perf_x86.c +++ b/subsys/profiling/perf/backends/perf_x86.c @@ -67,7 +67,7 @@ size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size) */ buf[idx++] = (uintptr_t)isf->eip; - while (valid_stack((uintptr_t)fp, _current)) { + while (valid_stack((uintptr_t)fp, arch_current_thread())) { if (idx >= size) { return 0; } diff --git a/subsys/profiling/perf/backends/perf_x86_64.c b/subsys/profiling/perf/backends/perf_x86_64.c index 84e45024c3cb4c..f5e13b53597f42 100644 --- a/subsys/profiling/perf/backends/perf_x86_64.c +++ b/subsys/profiling/perf/backends/perf_x86_64.c @@ -35,13 +35,13 @@ size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size) /* * In x86_64 (arch/x86/core/intel64/locore.S) %rip and %rbp - * are always saved in _current->callee_saved before calling + * are always saved in arch_current_thread()->callee_saved before calling * handler function if interrupt is not nested * * %rip points the location where interrupt was occurred */ - buf[idx++] = (uintptr_t)_current->callee_saved.rip; - void **fp = (void **)_current->callee_saved.rbp; + buf[idx++] = (uintptr_t)arch_current_thread()->callee_saved.rip; + void **fp = (void **)arch_current_thread()->callee_saved.rbp; /* * %rbp is frame pointer. @@ -53,7 +53,7 @@ size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size) * %rbp (next) <- %rbp (curr) * .... */ - while (valid_stack((uintptr_t)fp, _current)) { + while (valid_stack((uintptr_t)fp, arch_current_thread())) { if (idx >= size) { return 0; } diff --git a/subsys/shell/modules/kernel_service/thread/unwind.c b/subsys/shell/modules/kernel_service/thread/unwind.c index 903f05822b14a9..e41df7d3b00b01 100644 --- a/subsys/shell/modules/kernel_service/thread/unwind.c +++ b/subsys/shell/modules/kernel_service/thread/unwind.c @@ -30,7 +30,7 @@ static int cmd_kernel_thread_unwind(const struct shell *sh, size_t argc, char ** int err = 0; if (argc == 1) { - thread = _current; + thread = arch_current_thread(); } else { thread = UINT_TO_POINTER(shell_strtoull(argv[1], 16, &err)); if (err != 0) { diff --git a/tests/arch/arm/arm_interrupt/src/arm_interrupt.c b/tests/arch/arm/arm_interrupt/src/arm_interrupt.c index 3891c9b1d41c2d..8ab6e6c4691968 100644 --- a/tests/arch/arm/arm_interrupt/src/arm_interrupt.c +++ b/tests/arch/arm/arm_interrupt/src/arm_interrupt.c @@ -177,7 +177,7 @@ ZTEST(arm_interrupt, test_arm_esf_collection) * crashy thread we create below runs to completion before we get * to the end of this function */ - k_thread_priority_set(_current, K_PRIO_PREEMPT(MAIN_PRIORITY)); + k_thread_priority_set(arch_current_thread(), K_PRIO_PREEMPT(MAIN_PRIORITY)); TC_PRINT("Testing ESF Reporting\n"); k_thread_create(&esf_collection_thread, esf_collection_stack, @@ -366,9 +366,9 @@ ZTEST(arm_interrupt, test_arm_interrupt) uint32_t fp_extra_size = (__get_CONTROL() & CONTROL_FPCA_Msk) ? FPU_STACK_EXTRA_SIZE : 0; - __set_PSP(_current->stack_info.start + 0x10 + fp_extra_size); + __set_PSP(arch_current_thread()->stack_info.start + 0x10 + fp_extra_size); #else - __set_PSP(_current->stack_info.start + 0x10); + __set_PSP(arch_current_thread()->stack_info.start + 0x10); #endif __enable_irq(); diff --git a/tests/arch/arm/arm_thread_swap/src/arm_syscalls.c b/tests/arch/arm/arm_thread_swap/src/arm_syscalls.c index e1573bd1e39a69..506c6ed934a196 100644 --- a/tests/arch/arm/arm_thread_swap/src/arm_syscalls.c +++ b/tests/arch/arm/arm_thread_swap/src/arm_syscalls.c @@ -42,20 +42,20 @@ void z_impl_test_arm_user_syscall(void) * - PSPLIM register guards the privileged stack * - MSPLIM register still guards the interrupt stack */ - zassert_true((_current->arch.mode & CONTROL_nPRIV_Msk) == 0, + zassert_true((arch_current_thread()->arch.mode & CONTROL_nPRIV_Msk) == 0, "mode variable not set to PRIV mode in system call\n"); zassert_false(arch_is_user_context(), "arch_is_user_context() indicates nPRIV\n"); zassert_true( - ((__get_PSP() >= _current->arch.priv_stack_start) && - (__get_PSP() < (_current->arch.priv_stack_start + + ((__get_PSP() >= arch_current_thread()->arch.priv_stack_start) && + (__get_PSP() < (arch_current_thread()->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE))), "Process SP outside thread privileged stack limits\n"); #if defined(CONFIG_BUILTIN_STACK_GUARD) - zassert_true(__get_PSPLIM() == _current->arch.priv_stack_start, + zassert_true(__get_PSPLIM() == arch_current_thread()->arch.priv_stack_start, "PSPLIM not guarding the thread's privileged stack\n"); zassert_true(__get_MSPLIM() == (uint32_t)z_interrupt_stacks, "MSPLIM not guarding the interrupt stack\n"); @@ -82,16 +82,16 @@ void arm_isr_handler(const void *args) * - MSPLIM register still guards the interrupt stack */ - zassert_true((_current->arch.mode & CONTROL_nPRIV_Msk) != 0, + zassert_true((arch_current_thread()->arch.mode & CONTROL_nPRIV_Msk) != 0, "mode variable not set to nPRIV mode for user thread\n"); zassert_false(arch_is_user_context(), "arch_is_user_context() indicates nPRIV in ISR\n"); zassert_true( - ((__get_PSP() >= _current->stack_info.start) && - (__get_PSP() < (_current->stack_info.start + - _current->stack_info.size))), + ((__get_PSP() >= arch_current_thread()->stack_info.start) && + (__get_PSP() < (arch_current_thread()->stack_info.start + + arch_current_thread()->stack_info.size))), "Process SP outside thread stack limits\n"); static int first_call = 1; @@ -101,7 +101,7 @@ void arm_isr_handler(const void *args) /* Trigger thread yield() manually */ (void)irq_lock(); - z_move_thread_to_end_of_prio_q(_current); + z_move_thread_to_end_of_prio_q(arch_current_thread()); SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk; irq_unlock(0); @@ -169,20 +169,20 @@ ZTEST(arm_thread_swap, test_arm_syscalls) * - PSPLIM register guards the default stack * - MSPLIM register guards the interrupt stack */ - zassert_true((_current->arch.mode & CONTROL_nPRIV_Msk) == 0, + zassert_true((arch_current_thread()->arch.mode & CONTROL_nPRIV_Msk) == 0, "mode variable not set to PRIV mode for supervisor thread\n"); zassert_false(arch_is_user_context(), "arch_is_user_context() indicates nPRIV\n"); zassert_true( - ((__get_PSP() >= _current->stack_info.start) && - (__get_PSP() < (_current->stack_info.start + - _current->stack_info.size))), + ((__get_PSP() >= arch_current_thread()->stack_info.start) && + (__get_PSP() < (arch_current_thread()->stack_info.start + + arch_current_thread()->stack_info.size))), "Process SP outside thread stack limits\n"); #if defined(CONFIG_BUILTIN_STACK_GUARD) - zassert_true(__get_PSPLIM() == _current->stack_info.start, + zassert_true(__get_PSPLIM() == arch_current_thread()->stack_info.start, "PSPLIM not guarding the default stack\n"); zassert_true(__get_MSPLIM() == (uint32_t)z_interrupt_stacks, "MSPLIM not guarding the interrupt stack\n"); diff --git a/tests/arch/arm/arm_thread_swap/src/arm_thread_arch.c b/tests/arch/arm/arm_thread_swap/src/arm_thread_arch.c index f299ce4c4a0d5d..433a6bb9d9a169 100644 --- a/tests/arch/arm/arm_thread_swap/src/arm_thread_arch.c +++ b/tests/arch/arm/arm_thread_swap/src/arm_thread_arch.c @@ -278,16 +278,16 @@ static void alt_thread_entry(void *p1, void *p2, void *p3) /* Verify that the _current_ (alt) thread is * initialized with EXC_RETURN.Ftype set */ - zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0, + zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0, "Alt thread FPCA flag not clear at initialization\n"); #if defined(CONFIG_MPU_STACK_GUARD) /* Alt thread is created with K_FP_REGS set, so we * expect lazy stacking and long guard to be enabled. */ - zassert_true((_current->arch.mode & + zassert_true((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0, "Alt thread MPU GUAR DFLOAT flag not set at initialization\n"); - zassert_true((_current->base.user_options & K_FP_REGS) != 0, + zassert_true((arch_current_thread()->base.user_options & K_FP_REGS) != 0, "Alt thread K_FP_REGS not set at initialization\n"); zassert_true((FPU->FPCCR & FPU_FPCCR_LSPEN_Msk) != 0, "Lazy FP Stacking not set at initialization\n"); @@ -330,7 +330,7 @@ static void alt_thread_entry(void *p1, void *p2, void *p3) p_ztest_thread->arch.swap_return_value = SWAP_RETVAL; #endif - z_move_thread_to_end_of_prio_q(_current); + z_move_thread_to_end_of_prio_q(arch_current_thread()); /* Modify the callee-saved registers by zero-ing them. * The main test thread will, later, assert that they @@ -448,20 +448,20 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) */ load_callee_saved_regs(&ztest_thread_callee_saved_regs_init); - k_thread_priority_set(_current, K_PRIO_COOP(PRIORITY)); + k_thread_priority_set(arch_current_thread(), K_PRIO_COOP(PRIORITY)); /* Export current thread's callee-saved registers pointer * and arch.basepri variable pointer, into global pointer * variables, so they can be easily accessible by other * (alternative) test thread. */ - p_ztest_thread = _current; + p_ztest_thread = arch_current_thread(); /* Confirm initial conditions before starting the test. */ test_flag = switch_flag; zassert_true(test_flag == false, "Switch flag not initialized properly\n"); - zassert_true(_current->arch.basepri == 0, + zassert_true(arch_current_thread()->arch.basepri == 0, "Thread BASEPRI flag not clear at thread start\n"); /* Verify, also, that the interrupts are unlocked. */ #if defined(CONFIG_CPU_CORTEX_M_HAS_BASEPRI) @@ -481,16 +481,16 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) "Main test thread does not start in privilege mode\n"); /* Assert that the mode status variable indicates privilege mode */ - zassert_true((_current->arch.mode & CONTROL_nPRIV_Msk) == 0, + zassert_true((arch_current_thread()->arch.mode & CONTROL_nPRIV_Msk) == 0, "Thread nPRIV flag not clear for supervisor thread: 0x%0x\n", - _current->arch.mode); + arch_current_thread()->arch.mode); #endif /* CONFIG_USERSPACE */ #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) /* The main test thread is not (yet) actively using the FP registers */ - zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0, + zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0, "Thread Ftype flag not set at initialization 0x%0x\n", - _current->arch.mode); + arch_current_thread()->arch.mode); /* Verify that the main test thread is initialized with FPCA cleared. */ zassert_true((__get_CONTROL() & CONTROL_FPCA_Msk) == 0, @@ -503,7 +503,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) /* Clear the thread's floating-point callee-saved registers' container. * The container will, later, be populated by the swap mechanism. */ - memset(&_current->arch.preempt_float, 0, + memset(&arch_current_thread()->arch.preempt_float, 0, sizeof(struct _preempt_float)); /* Randomize the FP callee-saved registers at test initialization */ @@ -517,13 +517,13 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) /* The main test thread is using the FP registers, but the .mode * flag is not updated until the next context switch. */ - zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0, + zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0, "Thread Ftype flag not set at initialization\n"); #if defined(CONFIG_MPU_STACK_GUARD) - zassert_true((_current->arch.mode & + zassert_true((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) == 0, "Thread MPU GUAR DFLOAT flag not clear at initialization\n"); - zassert_true((_current->base.user_options & K_FP_REGS) == 0, + zassert_true((arch_current_thread()->base.user_options & K_FP_REGS) == 0, "Thread K_FP_REGS not clear at initialization\n"); zassert_true((FPU->FPCCR & FPU_FPCCR_LSPEN_Msk) == 0, "Lazy FP Stacking not clear at initialization\n"); @@ -552,13 +552,13 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) * explicitly required by the test. */ (void)irq_lock(); - z_move_thread_to_end_of_prio_q(_current); + z_move_thread_to_end_of_prio_q(arch_current_thread()); /* Clear the thread's callee-saved registers' container. * The container will, later, be populated by the swap * mechanism. */ - memset(&_current->callee_saved, 0, sizeof(_callee_saved_t)); + memset(&arch_current_thread()->callee_saved, 0, sizeof(_callee_saved_t)); /* Verify context-switch has not occurred yet. */ test_flag = switch_flag; @@ -672,7 +672,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) */ verify_callee_saved( &ztest_thread_callee_saved_regs_container, - &_current->callee_saved); + &arch_current_thread()->callee_saved); /* Verify context-switch did occur. */ test_flag = switch_flag; @@ -688,7 +688,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) * the alternative thread modified it, since the thread * is now switched back in. */ - zassert_true(_current->arch.basepri == 0, + zassert_true(arch_current_thread()->arch.basepri == 0, "arch.basepri value not in accordance with the update\n"); #if defined(CONFIG_CPU_CORTEX_M_HAS_BASEPRI) @@ -709,12 +709,12 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) #if !defined(CONFIG_NO_OPTIMIZATIONS) /* The thread is now swapped-back in. */ - zassert_equal(_current->arch.swap_return_value, SWAP_RETVAL, + zassert_equal(arch_current_thread()->arch.swap_return_value, SWAP_RETVAL, "Swap value not set as expected: 0x%x (0x%x)\n", - _current->arch.swap_return_value, SWAP_RETVAL); - zassert_equal(_current->arch.swap_return_value, ztest_swap_return_val, + arch_current_thread()->arch.swap_return_value, SWAP_RETVAL); + zassert_equal(arch_current_thread()->arch.swap_return_value, ztest_swap_return_val, "Swap value not returned as expected 0x%x (0x%x)\n", - _current->arch.swap_return_value, ztest_swap_return_val); + arch_current_thread()->arch.swap_return_value, ztest_swap_return_val); #endif #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) @@ -732,7 +732,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) */ verify_fp_callee_saved( &ztest_thread_fp_callee_saved_regs, - &_current->arch.preempt_float); + &arch_current_thread()->arch.preempt_float); /* Verify that the main test thread restored the FPSCR bit-0. */ zassert_true((__get_FPSCR() & 0x1) == 0x1, @@ -741,13 +741,13 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) /* The main test thread is using the FP registers, and the .mode * flag and MPU GUARD flag are now updated. */ - zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0, + zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0, "Thread Ftype flag not cleared after main returned back\n"); #if defined(CONFIG_MPU_STACK_GUARD) - zassert_true((_current->arch.mode & + zassert_true((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0, "Thread MPU GUARD FLOAT flag not set\n"); - zassert_true((_current->base.user_options & K_FP_REGS) != 0, + zassert_true((arch_current_thread()->base.user_options & K_FP_REGS) != 0, "Thread K_FPREGS not set after main returned back\n"); zassert_true((FPU->FPCCR & FPU_FPCCR_LSPEN_Msk) != 0, "Lazy FP Stacking not set after main returned back\n"); diff --git a/tests/benchmarks/footprints/src/system_thread.c b/tests/benchmarks/footprints/src/system_thread.c index c88be8c631cb02..c44ed9146be58e 100644 --- a/tests/benchmarks/footprints/src/system_thread.c +++ b/tests/benchmarks/footprints/src/system_thread.c @@ -28,12 +28,12 @@ void test_thread_entry(void *p, void *p1, void *p2) void thread_swap(void *p1, void *p2, void *p3) { - k_thread_abort(_current); + k_thread_abort(arch_current_thread()); } void thread_suspend(void *p1, void *p2, void *p3) { - k_thread_suspend(_current); + k_thread_suspend(arch_current_thread()); } void thread_yield0(void *p1, void *p2, void *p3) diff --git a/tests/kernel/context/src/main.c b/tests/kernel/context/src/main.c index 699c7bdc642bb0..f7b1886aee14e5 100644 --- a/tests/kernel/context/src/main.c +++ b/tests/kernel/context/src/main.c @@ -135,7 +135,7 @@ static void isr_handler(const void *data) break; } - if (_current->base.prio < 0) { + if (arch_current_thread()->base.prio < 0) { isr_info.value = K_COOP_THREAD; break; } @@ -643,9 +643,9 @@ ZTEST(context, test_ctx_thread) TC_PRINT("Testing k_is_in_isr() from a preemptible thread\n"); zassert_false(k_is_in_isr(), "Should not be in ISR context"); - zassert_false(_current->base.prio < 0, + zassert_false(arch_current_thread()->base.prio < 0, "Current thread should have preemptible priority: %d", - _current->base.prio); + arch_current_thread()->base.prio); } @@ -683,7 +683,7 @@ static void _test_kernel_thread(k_tid_t _thread_id) zassert_false(k_is_in_isr(), "k_is_in_isr() when called from a thread is true"); - zassert_false((_current->base.prio >= 0), + zassert_false((arch_current_thread()->base.prio >= 0), "thread is not a cooperative thread"); } diff --git a/tests/kernel/fatal/exception/src/main.c b/tests/kernel/fatal/exception/src/main.c index 6eb97068b6ce11..3d289af407bbd5 100644 --- a/tests/kernel/fatal/exception/src/main.c +++ b/tests/kernel/fatal/exception/src/main.c @@ -314,7 +314,7 @@ ZTEST(fatal_exception, test_fatal) * priority -1. To run the test smoothly make both main and ztest * threads run at same priority level. */ - k_thread_priority_set(_current, K_PRIO_PREEMPT(MAIN_PRIORITY)); + k_thread_priority_set(arch_current_thread(), K_PRIO_PREEMPT(MAIN_PRIORITY)); #ifndef CONFIG_ARCH_POSIX TC_PRINT("test alt thread 1: generic CPU exception\n"); diff --git a/tests/kernel/fatal/message_capture/src/main.c b/tests/kernel/fatal/message_capture/src/main.c index 514f3ea6e55c91..c23b042c1a3c92 100644 --- a/tests/kernel/fatal/message_capture/src/main.c +++ b/tests/kernel/fatal/message_capture/src/main.c @@ -86,7 +86,7 @@ int main(void) * panic and not an oops). Set the thread non-essential as a * workaround. */ - z_thread_essential_clear(_current); + z_thread_essential_clear(arch_current_thread()); test_message_capture(); return 0; diff --git a/tests/kernel/ipi_cascade/src/main.c b/tests/kernel/ipi_cascade/src/main.c index 1f96e677ce681c..95e77607cb420a 100644 --- a/tests/kernel/ipi_cascade/src/main.c +++ b/tests/kernel/ipi_cascade/src/main.c @@ -116,7 +116,7 @@ void thread3_entry(void *p1, void *p2, void *p3) /* 9.1 - T3 should be executing on the same CPU that T1 was. */ - cpu_t3 = _current->base.cpu; + cpu_t3 = arch_current_thread()->base.cpu; zassert_true(cpu_t3 == cpu_t1, "T3 not executing on T1's original CPU"); @@ -136,7 +136,7 @@ void thread4_entry(void *p1, void *p2, void *p3) * It is expected to execute on the same CPU that T2 did. */ - cpu_t4 = _current->base.cpu; + cpu_t4 = arch_current_thread()->base.cpu; zassert_true(cpu_t4 == cpu_t2, "T4 on unexpected CPU"); @@ -165,7 +165,7 @@ void thread2_entry(void *p1, void *p2, void *p3) arch_irq_unlock(key); } - cpu_t2 = _current->base.cpu; + cpu_t2 = arch_current_thread()->base.cpu; zassert_false(cpu_t2 == cpu_t1, "T2 and T1 unexpectedly on the same CPU"); @@ -205,7 +205,7 @@ ZTEST(ipi_cascade, test_ipi_cascade) /* 3. T3 and T4 are blocked. Pin T3 to this CPU */ - cpu_t1 = _current->base.cpu; + cpu_t1 = arch_current_thread()->base.cpu; status = k_thread_cpu_pin(&thread3, cpu_t1); zassert_true(status == 0, "Failed to pin T3 to %d : %d\n", cpu_t1, status); @@ -249,7 +249,7 @@ ZTEST(ipi_cascade, test_ipi_cascade) zassert_false(timer_expired, "Test terminated by timer"); - zassert_true(cpu_t1 != _current->base.cpu, + zassert_true(cpu_t1 != arch_current_thread()->base.cpu, "Main thread (T1) did not change CPUs\n"); show_executing_threads("Final"); diff --git a/tests/kernel/mem_protect/mem_protect/src/inherit.c b/tests/kernel/mem_protect/mem_protect/src/inherit.c index 021fdf884fdd7b..c0d614fae09d17 100644 --- a/tests/kernel/mem_protect/mem_protect/src/inherit.c +++ b/tests/kernel/mem_protect/mem_protect/src/inherit.c @@ -125,7 +125,7 @@ ZTEST(mem_protect, test_permission_inheritance) struct k_heap *z_impl_ret_resource_pool_ptr(void) { - return _current->resource_pool; + return arch_current_thread()->resource_pool; } static inline struct k_heap *z_vrfy_ret_resource_pool_ptr(void) diff --git a/tests/kernel/mem_protect/obj_validation/src/main.c b/tests/kernel/mem_protect/obj_validation/src/main.c index df7ddbbc8456ed..a6c03dce1b1e0c 100644 --- a/tests/kernel/mem_protect/obj_validation/src/main.c +++ b/tests/kernel/mem_protect/obj_validation/src/main.c @@ -132,7 +132,7 @@ ZTEST(object_validation, test_generic_object) ZTEST(object_validation, test_kobj_assign_perms_on_alloc_obj) { static struct k_sem *test_dyn_sem; - struct k_thread *thread = _current; + struct k_thread *thread = arch_current_thread(); uintptr_t start_addr, end_addr; size_t size_heap = K_HEAP_MEM_POOL_SIZE; @@ -173,7 +173,7 @@ ZTEST(object_validation, test_no_ref_dyn_kobj_release_mem) zassert_not_null(test_dyn_mutex, "Can not allocate dynamic kernel object"); - struct k_thread *thread = _current; + struct k_thread *thread = arch_current_thread(); /* revoke access from the current thread */ k_object_access_revoke(test_dyn_mutex, thread); diff --git a/tests/kernel/mem_protect/userspace/src/main.c b/tests/kernel/mem_protect/userspace/src/main.c index 76d5581b90678d..08b3932c7adfaf 100644 --- a/tests/kernel/mem_protect/userspace/src/main.c +++ b/tests/kernel/mem_protect/userspace/src/main.c @@ -312,7 +312,7 @@ ZTEST_USER(userspace, test_read_kernram) set_fault(K_ERR_CPU_EXCEPTION); - p = _current->init_data; + p = arch_current_thread()->init_data; printk("%p\n", p); zassert_unreachable("Read from kernel RAM did not fault"); } @@ -327,7 +327,7 @@ ZTEST_USER(userspace, test_write_kernram) /* Try to write to kernel RAM. */ set_fault(K_ERR_CPU_EXCEPTION); - _current->init_data = NULL; + arch_current_thread()->init_data = NULL; zassert_unreachable("Write to kernel RAM did not fault"); } @@ -1038,11 +1038,11 @@ ZTEST(userspace, test_tls_leakage) * supervisor mode to be leaked */ - memset(_current->userspace_local_data, 0xff, + memset(arch_current_thread()->userspace_local_data, 0xff, sizeof(struct _thread_userspace_local_data)); k_thread_user_mode_enter(tls_leakage_user_part, - _current->userspace_local_data, NULL, NULL); + arch_current_thread()->userspace_local_data, NULL, NULL); #else ztest_test_skip(); #endif diff --git a/tests/kernel/smp/src/main.c b/tests/kernel/smp/src/main.c index de17be4f1db043..1b747c7208e08d 100644 --- a/tests/kernel/smp/src/main.c +++ b/tests/kernel/smp/src/main.c @@ -318,8 +318,9 @@ ZTEST(smp, test_coop_switch_in_abort) unsigned int num_threads = arch_num_cpus(); unsigned int i; - zassert_true(_current->base.prio < 0, "test case relies on ztest thread be cooperative"); - zassert_true(_current->base.prio > SPAWN_AB_PRIO, + zassert_true(arch_current_thread()->base.prio < 0, + "test case relies on ztest thread be cooperative"); + zassert_true(arch_current_thread()->base.prio > SPAWN_AB_PRIO, "spawn test need to have higher priority than ztest thread"); /* Spawn N number of cooperative threads, where N = number of CPUs */ @@ -869,15 +870,15 @@ static void t2_mutex_lock(void *p1, void *p2, void *p3) ARG_UNUSED(p2); ARG_UNUSED(p3); - zassert_equal(_current->base.global_lock_count, 0, + zassert_equal(arch_current_thread()->base.global_lock_count, 0, "thread global lock cnt %d is incorrect", - _current->base.global_lock_count); + arch_current_thread()->base.global_lock_count); k_mutex_lock((struct k_mutex *)p1, K_FOREVER); - zassert_equal(_current->base.global_lock_count, 0, + zassert_equal(arch_current_thread()->base.global_lock_count, 0, "thread global lock cnt %d is incorrect", - _current->base.global_lock_count); + arch_current_thread()->base.global_lock_count); k_mutex_unlock((struct k_mutex *)p1); @@ -885,9 +886,9 @@ static void t2_mutex_lock(void *p1, void *p2, void *p3) * context switch but global_lock_cnt has not been decrease * because no irq_lock() was called. */ - zassert_equal(_current->base.global_lock_count, 0, + zassert_equal(arch_current_thread()->base.global_lock_count, 0, "thread global lock cnt %d is incorrect", - _current->base.global_lock_count); + arch_current_thread()->base.global_lock_count); } /** diff --git a/tests/kernel/threads/thread_apis/src/main.c b/tests/kernel/threads/thread_apis/src/main.c index 162f85e5f5f6d5..1b351068abedfc 100644 --- a/tests/kernel/threads/thread_apis/src/main.c +++ b/tests/kernel/threads/thread_apis/src/main.c @@ -232,7 +232,7 @@ static void umode_entry(void *thread_id, void *p2, void *p3) ARG_UNUSED(p2); ARG_UNUSED(p3); - if (!z_is_thread_essential(_current) && + if (!z_is_thread_essential(arch_current_thread()) && (k_current_get() == (k_tid_t)thread_id)) { ztest_test_pass(); } else { @@ -249,9 +249,9 @@ static void umode_entry(void *thread_id, void *p2, void *p3) */ static void enter_user_mode_entry(void *p1, void *p2, void *p3) { - z_thread_essential_set(_current); + z_thread_essential_set(arch_current_thread()); - zassert_true(z_is_thread_essential(_current), "Thread isn't set" + zassert_true(z_is_thread_essential(arch_current_thread()), "Thread isn't set" " as essential\n"); k_thread_user_mode_enter(umode_entry, diff --git a/tests/kernel/threads/thread_apis/src/test_essential_thread.c b/tests/kernel/threads/thread_apis/src/test_essential_thread.c index 082765bd148160..fc101e7caf794f 100644 --- a/tests/kernel/threads/thread_apis/src/test_essential_thread.c +++ b/tests/kernel/threads/thread_apis/src/test_essential_thread.c @@ -27,16 +27,16 @@ static void thread_entry(void *p1, void *p2, void *p3) ARG_UNUSED(p2); ARG_UNUSED(p3); - z_thread_essential_set(_current); + z_thread_essential_set(arch_current_thread()); - if (z_is_thread_essential(_current)) { + if (z_is_thread_essential(arch_current_thread())) { k_busy_wait(100); } else { zassert_unreachable("The thread is not set as essential"); } - z_thread_essential_clear(_current); - zassert_false(z_is_thread_essential(_current), + z_thread_essential_clear(arch_current_thread()); + zassert_false(z_is_thread_essential(arch_current_thread()), "Essential flag of the thread is not cleared"); k_sem_give(&sync_sem); @@ -68,7 +68,7 @@ void k_sys_fatal_error_handler(unsigned int reason, fatal_error_signaled = true; - z_thread_essential_clear(_current); + z_thread_essential_clear(arch_current_thread()); } static void abort_thread_entry(void *p1, void *p2, void *p3) @@ -77,9 +77,9 @@ static void abort_thread_entry(void *p1, void *p2, void *p3) ARG_UNUSED(p2); ARG_UNUSED(p3); - z_thread_essential_set(_current); + z_thread_essential_set(arch_current_thread()); - if (z_is_thread_essential(_current)) { + if (z_is_thread_essential(arch_current_thread())) { k_busy_wait(100); } else { zassert_unreachable("The thread is not set as essential"); diff --git a/tests/kernel/usage/thread_runtime_stats/src/test_thread_runtime_stats.c b/tests/kernel/usage/thread_runtime_stats/src/test_thread_runtime_stats.c index ff0ceeac7c844f..08fd395ed2e614 100644 --- a/tests/kernel/usage/thread_runtime_stats/src/test_thread_runtime_stats.c +++ b/tests/kernel/usage/thread_runtime_stats/src/test_thread_runtime_stats.c @@ -72,7 +72,7 @@ ZTEST(usage_api, test_all_stats_usage) k_thread_runtime_stats_t stats4; k_thread_runtime_stats_t stats5; - priority = k_thread_priority_get(_current); + priority = k_thread_priority_get(arch_current_thread()); tid = k_thread_create(&helper_thread, helper_stack, K_THREAD_STACK_SIZEOF(helper_stack), helper1, NULL, NULL, NULL, @@ -196,7 +196,7 @@ ZTEST(usage_api, test_thread_stats_enable_disable) k_thread_runtime_stats_t helper_stats3; int priority; - priority = k_thread_priority_get(_current); + priority = k_thread_priority_get(arch_current_thread()); tid = k_thread_create(&helper_thread, helper_stack, K_THREAD_STACK_SIZEOF(helper_stack), helper1, NULL, NULL, NULL, @@ -209,7 +209,7 @@ ZTEST(usage_api, test_thread_stats_enable_disable) k_sleep(K_TICKS(5)); - k_thread_runtime_stats_get(_current, &stats1); + k_thread_runtime_stats_get(arch_current_thread(), &stats1); k_thread_runtime_stats_get(tid, &helper_stats1); k_thread_runtime_stats_disable(tid); @@ -225,7 +225,7 @@ ZTEST(usage_api, test_thread_stats_enable_disable) k_sleep(K_TICKS(2)); k_thread_runtime_stats_enable(tid); - k_thread_runtime_stats_get(_current, &stats2); + k_thread_runtime_stats_get(arch_current_thread(), &stats2); k_thread_runtime_stats_get(tid, &helper_stats2); /* Sleep for two ticks to let the helper thread execute again. */ @@ -280,12 +280,12 @@ ZTEST(usage_api, test_sys_stats_enable_disable) k_sys_runtime_stats_disable(); - k_thread_runtime_stats_get(_current, &thread_stats1); + k_thread_runtime_stats_get(arch_current_thread(), &thread_stats1); k_thread_runtime_stats_all_get(&sys_stats1); busy_loop(2); - k_thread_runtime_stats_get(_current, &thread_stats2); + k_thread_runtime_stats_get(arch_current_thread(), &thread_stats2); k_thread_runtime_stats_all_get(&sys_stats2); /* @@ -297,7 +297,7 @@ ZTEST(usage_api, test_sys_stats_enable_disable) busy_loop(2); - k_thread_runtime_stats_get(_current, &thread_stats3); + k_thread_runtime_stats_get(arch_current_thread(), &thread_stats3); k_thread_runtime_stats_all_get(&sys_stats3); /* @@ -398,7 +398,7 @@ ZTEST(usage_api, test_thread_stats_usage) k_thread_runtime_stats_t stats2; k_thread_runtime_stats_t stats3; - priority = k_thread_priority_get(_current); + priority = k_thread_priority_get(arch_current_thread()); /* * Verify that k_thread_runtime_stats_get() returns the expected @@ -408,7 +408,7 @@ ZTEST(usage_api, test_thread_stats_usage) status = k_thread_runtime_stats_get(NULL, &stats1); zassert_true(status == -EINVAL); - status = k_thread_runtime_stats_get(_current, NULL); + status = k_thread_runtime_stats_get(arch_current_thread(), NULL); zassert_true(status == -EINVAL); /* Align to the next tick */ @@ -422,7 +422,7 @@ ZTEST(usage_api, test_thread_stats_usage) helper1, NULL, NULL, NULL, priority + 2, 0, K_TICKS(1)); - main_thread = _current; + main_thread = arch_current_thread(); k_timer_init(&timer, resume_main, NULL); k_timer_start(&timer, K_TICKS(1), K_TICKS(10)); @@ -440,7 +440,7 @@ ZTEST(usage_api, test_thread_stats_usage) * the helper threads runtime stats. */ - k_thread_suspend(_current); + k_thread_suspend(arch_current_thread()); /* * T = 1. @@ -449,14 +449,14 @@ ZTEST(usage_api, test_thread_stats_usage) */ k_thread_runtime_stats_get(tid, &stats1); - k_thread_suspend(_current); + k_thread_suspend(arch_current_thread()); /* * T = 11. * Timer woke the main thread. Suspend main thread again. */ - k_thread_suspend(_current); + k_thread_suspend(arch_current_thread()); /* * T = 21. @@ -465,7 +465,7 @@ ZTEST(usage_api, test_thread_stats_usage) */ k_thread_runtime_stats_get(tid, &stats2); - k_thread_suspend(_current); + k_thread_suspend(arch_current_thread()); /* * T = 31. diff --git a/tests/subsys/pm/power_mgmt/src/main.c b/tests/subsys/pm/power_mgmt/src/main.c index 3c75e44587fc77..da47ecee19d115 100644 --- a/tests/subsys/pm/power_mgmt/src/main.c +++ b/tests/subsys/pm/power_mgmt/src/main.c @@ -254,7 +254,7 @@ const struct pm_state_info *pm_policy_next_state(uint8_t cpu, int32_t ticks) "There is no power state defined"); /* make sure this is idle thread */ - zassert_true(z_is_idle_thread_object(_current)); + zassert_true(z_is_idle_thread_object(arch_current_thread())); zassert_true(ticks == _kernel.idle); zassert_false(k_can_yield()); idle_entered = true; @@ -276,7 +276,7 @@ static void notify_pm_state_entry(enum pm_state state) /* enter suspend */ zassert_true(notify_app_entry == true, "Notification to enter suspend was not sent to the App"); - zassert_true(z_is_idle_thread_object(_current)); + zassert_true(z_is_idle_thread_object(arch_current_thread())); zassert_equal(state, PM_STATE_SUSPEND_TO_IDLE); pm_device_state_get(device_dummy, &device_power_state); @@ -301,7 +301,7 @@ static void notify_pm_state_exit(enum pm_state state) /* leave suspend */ zassert_true(notify_app_exit == true, "Notification to leave suspend was not sent to the App"); - zassert_true(z_is_idle_thread_object(_current)); + zassert_true(z_is_idle_thread_object(arch_current_thread())); zassert_equal(state, PM_STATE_SUSPEND_TO_IDLE); /* at this point, devices are active again*/ diff --git a/tests/ztest/error_hook/src/main.c b/tests/ztest/error_hook/src/main.c index e4b05fc3eb0d19..b775c38c04b273 100644 --- a/tests/ztest/error_hook/src/main.c +++ b/tests/ztest/error_hook/src/main.c @@ -71,10 +71,10 @@ __no_optimization static void trigger_fault_access(void) #elif defined(CONFIG_CPU_CORTEX_M) || defined(CONFIG_CPU_AARCH32_CORTEX_R) || \ defined(CONFIG_CPU_AARCH64_CORTEX_R) /* As this test case only runs when User Mode is enabled, - * accessing _current always triggers a memory access fault, + * accessing arch_current_thread() always triggers a memory access fault, * and is guaranteed not to trigger SecureFault exceptions. */ - void *a = (void *)_current; + void *a = (void *)arch_current_thread(); #else /* For most arch which support userspace, dereferencing NULL * pointer will be caught by exception. @@ -338,7 +338,7 @@ ZTEST(error_hook_tests, test_catch_assert_in_isr) static void trigger_z_oops(void) { /* Set up a dummy syscall frame, pointing to a valid area in memory. */ - _current->syscall_frame = _image_ram_start; + arch_current_thread()->syscall_frame = _image_ram_start; K_OOPS(true); }