Skip to content

Commit fc1b5de

Browse files
committed
kernel: move thread definitions to thread.h
Move thread definitions to its own header to avoid redeclaration and redefinition of types which is not allowed in some standards. Fixes #29937 Signed-off-by: Anas Nashif <[email protected]>
1 parent ca0e5df commit fc1b5de

File tree

4 files changed

+323
-314
lines changed

4 files changed

+323
-314
lines changed

include/app_memory/mem_domain.h

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,15 +12,12 @@
1212
#include <stddef.h>
1313
#include <sys/dlist.h>
1414
#include <toolchain.h>
15+
#include <kernel/thread.h>
1516

1617
#ifdef __cplusplus
1718
extern "C" {
1819
#endif
1920

20-
/* Forward declaration */
21-
struct k_thread;
22-
typedef struct k_thread *k_tid_t;
23-
2421
/**
2522
* @defgroup mem_domain_apis Memory domain APIs
2623
* @ingroup kernel_apis

include/kernel.h

Lines changed: 4 additions & 310 deletions
Original file line numberDiff line numberDiff line change
@@ -114,322 +114,16 @@ struct k_mem_domain;
114114
struct k_mem_partition;
115115
struct k_futex;
116116

117-
/**
118-
* @typedef k_thread_entry_t
119-
* @brief Thread entry point function type.
120-
*
121-
* A thread's entry point function is invoked when the thread starts executing.
122-
* Up to 3 argument values can be passed to the function.
123-
*
124-
* The thread terminates execution permanently if the entry point function
125-
* returns. The thread is responsible for releasing any shared resources
126-
* it may own (such as mutexes and dynamically allocated memory), prior to
127-
* returning.
128-
*
129-
* @param p1 First argument.
130-
* @param p2 Second argument.
131-
* @param p3 Third argument.
132-
*
133-
* @return N/A
134-
*/
135-
136-
#ifdef CONFIG_THREAD_MONITOR
137-
struct __thread_entry {
138-
k_thread_entry_t pEntry;
139-
void *parameter1;
140-
void *parameter2;
141-
void *parameter3;
142-
};
143-
#endif
144-
145-
/* can be used for creating 'dummy' threads, e.g. for pending on objects */
146-
struct _thread_base {
147-
148-
/* this thread's entry in a ready/wait queue */
149-
union {
150-
sys_dnode_t qnode_dlist;
151-
struct rbnode qnode_rb;
152-
};
153-
154-
/* wait queue on which the thread is pended (needed only for
155-
* trees, not dumb lists)
156-
*/
157-
_wait_q_t *pended_on;
158-
159-
/* user facing 'thread options'; values defined in include/kernel.h */
160-
uint8_t user_options;
161-
162-
/* thread state */
163-
uint8_t thread_state;
164-
165-
/*
166-
* scheduler lock count and thread priority
167-
*
168-
* These two fields control the preemptibility of a thread.
169-
*
170-
* When the scheduler is locked, sched_locked is decremented, which
171-
* means that the scheduler is locked for values from 0xff to 0x01. A
172-
* thread is coop if its prio is negative, thus 0x80 to 0xff when
173-
* looked at the value as unsigned.
174-
*
175-
* By putting them end-to-end, this means that a thread is
176-
* non-preemptible if the bundled value is greater than or equal to
177-
* 0x0080.
178-
*/
179-
union {
180-
struct {
181-
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
182-
uint8_t sched_locked;
183-
int8_t prio;
184-
#else /* LITTLE and PDP */
185-
int8_t prio;
186-
uint8_t sched_locked;
187-
#endif
188-
};
189-
uint16_t preempt;
190-
};
191-
192-
#ifdef CONFIG_SCHED_DEADLINE
193-
int prio_deadline;
194-
#endif
195-
196-
uint32_t order_key;
197-
198-
#ifdef CONFIG_SMP
199-
/* True for the per-CPU idle threads */
200-
uint8_t is_idle;
201-
202-
/* CPU index on which thread was last run */
203-
uint8_t cpu;
204-
205-
/* Recursive count of irq_lock() calls */
206-
uint8_t global_lock_count;
207-
208-
#endif
209-
210-
#ifdef CONFIG_SCHED_CPU_MASK
211-
/* "May run on" bits for each CPU */
212-
uint8_t cpu_mask;
213-
#endif
214-
215-
/* data returned by APIs */
216-
void *swap_data;
217-
218-
#ifdef CONFIG_SYS_CLOCK_EXISTS
219-
/* this thread's entry in a timeout queue */
220-
struct _timeout timeout;
221-
#endif
222-
223-
_wait_q_t join_waiters;
224-
#if __ASSERT_ON
225-
/* For detecting calls to k_thread_create() on threads that are
226-
* already active
227-
*/
228-
atomic_t cookie;
229-
#endif
230-
};
231-
232-
typedef struct _thread_base _thread_base_t;
233-
234-
#if defined(CONFIG_THREAD_STACK_INFO)
235-
/* Contains the stack information of a thread */
236-
struct _thread_stack_info {
237-
/* Stack start - Represents the start address of the thread-writable
238-
* stack area.
239-
*/
240-
uintptr_t start;
241-
242-
/* Thread writable stack buffer size. Represents the size of the actual
243-
* buffer, starting from the 'start' member, that should be writable by
244-
* the thread. This comprises of the thread stack area, any area reserved
245-
* for local thread data storage, as well as any area left-out due to
246-
* random adjustments applied to the initial thread stack pointer during
247-
* thread initialization.
248-
*/
249-
size_t size;
250-
251-
/* Adjustment value to the size member, removing any storage
252-
* used for TLS or random stack base offsets. (start + size - delta)
253-
* is the initial stack pointer for a thread. May be 0.
254-
*/
255-
size_t delta;
256-
};
257-
258-
typedef struct _thread_stack_info _thread_stack_info_t;
259-
#endif /* CONFIG_THREAD_STACK_INFO */
260-
261-
#if defined(CONFIG_USERSPACE)
262-
struct _mem_domain_info {
263-
/** memory domain queue node */
264-
sys_dnode_t mem_domain_q_node;
265-
/** memory domain of the thread */
266-
struct k_mem_domain *mem_domain;
267-
};
268-
269-
#endif /* CONFIG_USERSPACE */
270-
271-
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
272-
struct _thread_userspace_local_data {
273-
#if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS)
274-
int errno_var;
275-
#endif
276-
};
277-
#endif
278-
279-
/* private, used by k_poll and k_work_poll */
280-
struct k_work_poll;
281-
typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
282-
struct z_poller {
283-
bool is_polling;
284-
uint8_t mode;
285-
};
286-
287-
#ifdef CONFIG_THREAD_RUNTIME_STATS
288-
struct k_thread_runtime_stats {
289-
/* Thread execution cycles */
290-
#ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
291-
timing_t execution_cycles;
292-
#else
293-
uint64_t execution_cycles;
294-
#endif
295-
};
296-
297-
typedef struct k_thread_runtime_stats k_thread_runtime_stats_t;
298-
299-
struct _thread_runtime_stats {
300-
/* Timestamp when last switched in */
301-
#ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
302-
timing_t last_switched_in;
303-
#else
304-
uint32_t last_switched_in;
305-
#endif
306-
307-
k_thread_runtime_stats_t stats;
308-
};
309-
#endif
310-
311-
/**
312-
* @ingroup thread_apis
313-
* Thread Structure
314-
*/
315-
struct k_thread {
316-
317-
struct _thread_base base;
318-
319-
/** defined by the architecture, but all archs need these */
320-
struct _callee_saved callee_saved;
321-
322-
/** static thread init data */
323-
void *init_data;
324-
325-
/**
326-
* abort function
327-
*
328-
* This function pointer, if non-NULL, will be run once after the
329-
* thread has completely exited. It may run in the context of:
330-
* - the idle thread if the thread self-exited
331-
* - another thread calling k_thread_abort()
332-
* - a fatal exception handler on a special stack
333-
*
334-
* It will never run in the context of the thread itself.
335-
*
336-
* A pointer to the thread object that was aborted is provided. At the
337-
* time this runs, this thread object has completely exited. It may
338-
* be re-used with k_thread_create() or return it to a heap or slab
339-
* pool.
340-
*
341-
* This function does not run with any kind of lock active and
342-
* there is the possibility of races leading to undefined behavior
343-
* if other threads are attempting to free or recycle this object
344-
* concurrently.
345-
*/
346-
void (*fn_abort)(struct k_thread *aborted);
347-
348-
#if defined(CONFIG_POLL)
349-
struct z_poller poller;
350-
#endif
351-
352-
#if defined(CONFIG_THREAD_MONITOR)
353-
/** thread entry and parameters description */
354-
struct __thread_entry entry;
355-
356-
/** next item in list of all threads */
357-
struct k_thread *next_thread;
358-
#endif
359-
360-
#if defined(CONFIG_THREAD_NAME)
361-
/** Thread name */
362-
char name[CONFIG_THREAD_MAX_NAME_LEN];
363-
#endif
364-
365-
#ifdef CONFIG_THREAD_CUSTOM_DATA
366-
/** crude thread-local storage */
367-
void *custom_data;
368-
#endif
369-
370-
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
371-
struct _thread_userspace_local_data *userspace_local_data;
372-
#endif
373-
374-
#if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS)
375-
#ifndef CONFIG_USERSPACE
376-
/** per-thread errno variable */
377-
int errno_var;
378-
#endif
379-
#endif
380-
381-
#if defined(CONFIG_THREAD_STACK_INFO)
382-
/** Stack Info */
383-
struct _thread_stack_info stack_info;
384-
#endif /* CONFIG_THREAD_STACK_INFO */
385-
386-
#if defined(CONFIG_USERSPACE)
387-
/** memory domain info of the thread */
388-
struct _mem_domain_info mem_domain_info;
389-
/** Base address of thread stack */
390-
k_thread_stack_t *stack_obj;
391-
/** current syscall frame pointer */
392-
void *syscall_frame;
393-
#endif /* CONFIG_USERSPACE */
394-
395-
396-
#if defined(CONFIG_USE_SWITCH)
397-
/* When using __switch() a few previously arch-specific items
398-
* become part of the core OS
399-
*/
400-
401-
/** z_swap() return value */
402-
int swap_retval;
403-
404-
/** Context handle returned via arch_switch() */
405-
void *switch_handle;
406-
#endif
407-
/** resource pool */
408-
struct k_heap *resource_pool;
409-
410-
#if defined(CONFIG_THREAD_LOCAL_STORAGE)
411-
/* Pointer to arch-specific TLS area */
412-
uintptr_t tls;
413-
#endif /* CONFIG_THREAD_LOCAL_STORAGE */
414-
415-
#ifdef CONFIG_THREAD_RUNTIME_STATS
416-
/** Runtime statistics */
417-
struct _thread_runtime_stats rt_stats;
418-
#endif
419-
420-
/** arch-specifics: must always be at the end */
421-
struct _thread_arch arch;
422-
};
423-
424-
typedef struct k_thread _thread_t;
425-
typedef struct k_thread *k_tid_t;
426-
427117
enum execution_context_types {
428118
K_ISR = 0,
429119
K_COOP_THREAD,
430120
K_PREEMPT_THREAD,
431121
};
432122

123+
/* private, used by k_poll and k_work_poll */
124+
struct k_work_poll;
125+
typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
126+
433127
/**
434128
* @addtogroup thread_apis
435129
* @{

0 commit comments

Comments
 (0)