Skip to content

Commit 93ccf67

Browse files
LarsMorstadTIvaishnavachath
authored andcommitted
hal: drivers: ports EventP, MessageQueueP and TaskP DPL features to Zephyr
Ports parts of simplelink's Driver Porting Layer to TI F3 devices on Zephyr. Taking inspiration from already existing DPL features, we allocate kernel objects using slab memory. This means the total number of k_event, k_msgq and k_thread objects is limited by a compile-time configuration. Signed-off-by: Lars Thalian Morstad <[email protected]>
1 parent b6f1c85 commit 93ccf67

File tree

9 files changed

+1468
-0
lines changed

9 files changed

+1468
-0
lines changed

simplelink_lpf3/CMakeLists.txt

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,5 +27,10 @@ if(CONFIG_HAS_CC23X0_SDK)
2727
# DPL
2828
kernel/zephyr/dpl/ClockP_zephyr.c
2929
kernel/zephyr/dpl/HwiP_zephyr.c
30+
kernel/zephyr/dpl/TaskP_zephyr.c
31+
kernel/zephyr/dpl/MessageQueueP_zephyr.c
3032
)
33+
if(CONFIG_EVENTS)
34+
zephyr_library_sources(kernel/zephyr/dpl/EventP_zephyr.c)
35+
endif()
3136
endif()
Lines changed: 174 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,174 @@
1+
/*
2+
* Copyright (c) 2024, Texas Instruments Incorporated
3+
*
4+
* SPDX-License-Identifier: Apache-2.0
5+
*/
6+
7+
#include <zephyr/kernel.h>
8+
#include <zephyr/kernel_structs.h>
9+
10+
#include <zephyr/sys/__assert.h>
11+
12+
#include <inc/hw_types.h>
13+
#include <inc/hw_ints.h>
14+
15+
#include <driverlib/interrupt.h>
16+
17+
#include <kernel/zephyr/dpl/dpl.h>
18+
#include <ti/drivers/dpl/EventP.h>
19+
#include <ti/drivers/dpl/ClockP.h>
20+
#include <ti/drivers/dpl/HwiP.h>
21+
22+
#ifdef CONFIG_DYNAMIC_DPL_OBJECTS
23+
/* We can't easily dynamically allocate kernel objects */
24+
#define DPL_MAX_EVENTS 5
25+
K_MEM_SLAB_DEFINE(event_slab, sizeof(struct k_event), DPL_MAX_EVENTS,\
26+
MEM_ALIGN);
27+
28+
29+
static struct k_event *dpl_event_pool_alloc()
30+
{
31+
struct k_event *event_ptr = NULL;
32+
33+
if (k_mem_slab_alloc(&event_slab, (void **)&event_ptr, K_NO_WAIT) < 0) {
34+
35+
__ASSERT(0, "Increase size of DPL event pool");
36+
}
37+
return event_ptr;
38+
}
39+
40+
static void dpl_event_pool_free(struct k_event *event)
41+
{
42+
k_mem_slab_free(&event_slab, (void *)event);
43+
44+
return;
45+
}
46+
47+
/*
48+
* ======== EventP_create ========
49+
*/
50+
EventP_Handle EventP_create(void)
51+
{
52+
struct k_event *event = dpl_event_pool_alloc();
53+
k_event_init(event);
54+
return (EventP_Handle) event;
55+
}
56+
57+
/*
58+
* ======== EventP_delete ========
59+
*/
60+
void EventP_delete(EventP_Handle handle)
61+
{
62+
if (handle != NULL)
63+
{
64+
dpl_event_pool_free((struct k_event *) handle);
65+
}
66+
}
67+
68+
#endif /* CONFIG_DYNAMIC_DPL_OBJECTS */
69+
70+
/*
71+
* ======== EventP_construct ========
72+
*/
73+
EventP_Handle EventP_construct(EventP_Struct *obj)
74+
{
75+
struct k_event *event;
76+
event = (struct k_event*)obj;
77+
78+
if (event) {
79+
k_event_init(event);
80+
}
81+
82+
return (EventP_Handle)event;
83+
}
84+
85+
/*
86+
* ======== EventP_destruct ========
87+
*/
88+
void EventP_destruct(EventP_Struct *obj)
89+
{
90+
struct k_event *event;
91+
92+
event = (struct k_event *)obj->data;
93+
94+
k_event_clear(event, 0xFFFFFFFF);
95+
}
96+
97+
/*
98+
* ======== EventP_pend ========
99+
*/
100+
uint32_t EventP_pend(EventP_Handle event, uint32_t eventMask, bool waitForAll, uint32_t timeout)
101+
{
102+
uint32_t eventBits, tickPeriod;
103+
k_timeout_t eventTimeout;
104+
uint64_t timeUS;
105+
106+
107+
if (timeout == EventP_WAIT_FOREVER)
108+
{
109+
eventTimeout = K_FOREVER;
110+
}
111+
else if (timeout == EventP_NO_WAIT)
112+
{
113+
eventTimeout = K_NO_WAIT;
114+
}
115+
else
116+
{
117+
/* if necessary, convert ClockP ticks to Zephyr ticks */
118+
/* Should really be ClockP_getSystemTickPeriod() but this causes issues with ielftool post build step */
119+
tickPeriod = CLOCKP_TICK_PERIOD;
120+
eventTimeout = K_TICKS(timeout);
121+
}
122+
123+
if(waitForAll)
124+
{
125+
/* Wait for all event bits */
126+
eventBits = k_event_wait_all((struct k_event *) event, eventMask, false, eventTimeout);
127+
}
128+
else
129+
{
130+
/* Wait for any event bits */
131+
eventBits = k_event_wait((struct k_event *) event, eventMask, false, eventTimeout);
132+
}
133+
134+
/* Clear the events that caused the return */
135+
k_event_clear((struct k_event *) event, eventBits);
136+
137+
/* Check if wait returned because of timeout */
138+
if (((eventBits == 0)) || ((eventBits != eventMask) && (waitForAll == true)))
139+
{
140+
return 0;
141+
}
142+
else
143+
{
144+
return eventBits;
145+
}
146+
}
147+
148+
/*
149+
* ======== EventP_post ========
150+
*/
151+
void EventP_post(EventP_Handle event, uint32_t eventMask)
152+
{
153+
/* Unpend all tasks waiting for these events, and merge these events with
154+
the ones already tracked by the object
155+
*/
156+
k_event_set((struct k_event *) event, eventMask);
157+
}
158+
159+
/*
160+
* ======== EventP_clear ========
161+
*/
162+
void EventP_clear(EventP_Handle event, uint32_t eventMask)
163+
{
164+
k_event_clear((struct k_event *) event, eventMask);
165+
}
166+
167+
/*
168+
* ======== EventP_get ========
169+
*/
170+
uint32_t EventP_get(EventP_Handle event)
171+
{
172+
uint32_t events = ((struct k_event *)event)->events;
173+
return events;
174+
}

0 commit comments

Comments
 (0)