18
18
#include <zephyr/net/mdio.h>
19
19
#include <zephyr/drivers/gpio.h>
20
20
#include <zephyr/drivers/mdio.h>
21
+ #include <zephyr/random/random.h>
21
22
22
23
#include <zephyr/logging/log.h>
23
24
LOG_MODULE_REGISTER (phy_tja1103 , CONFIG_PHY_LOG_LEVEL );
@@ -65,15 +66,12 @@ struct phy_tja1103_data {
65
66
const struct device * dev ;
66
67
struct phy_link_state state ;
67
68
struct k_sem sem ;
68
- struct k_sem offload_sem ;
69
69
phy_callback_t cb ;
70
70
struct gpio_callback phy_tja1103_int_callback ;
71
71
void * cb_data ;
72
-
73
- K_KERNEL_STACK_MEMBER (irq_thread_stack , CONFIG_PHY_TJA1103_IRQ_THREAD_STACK_SIZE );
74
- struct k_thread irq_thread ;
75
-
76
- struct k_work_delayable monitor_work ;
72
+ struct k_work_delayable phy_work ;
73
+ uint64_t start_time ;
74
+ uint32_t wait_time ;
77
75
};
78
76
79
77
static inline int phy_tja1103_c22_read (const struct device * dev , uint16_t reg , uint16_t * val )
@@ -153,140 +151,118 @@ static int phy_tja1103_id(const struct device *dev, uint32_t *phy_id)
153
151
return 0 ;
154
152
}
155
153
156
- static int update_link_state (const struct device * dev )
154
+ static int phy_tja1103_get_link_state (const struct device * dev , struct phy_link_state * state )
157
155
{
158
156
struct phy_tja1103_data * const data = dev -> data ;
159
- bool link_up ;
160
157
uint16_t val ;
158
+ int rc = 0 ;
159
+
160
+ k_sem_take (& data -> sem , K_FOREVER );
161
161
162
162
if (phy_tja1103_c45_read (dev , MDIO_MMD_VENDOR_SPECIFIC1 , TJA1103_PHY_STATUS , & val ) < 0 ) {
163
163
return - EIO ;
164
164
}
165
165
166
- link_up = ( val & TJA1103_PHY_STATUS_LINK_STAT ) != 0 ;
166
+ k_sem_give ( & data -> sem ) ;
167
167
168
- /* Let workqueue re-schedule and re-check if the
169
- * link status is unchanged this time
170
- */
171
- if (data -> state .is_up == link_up ) {
172
- return - EAGAIN ;
173
- }
168
+ /* TJA1103 Only supports 100BASE Full-duplex */
169
+ state -> speed = LINK_FULL_100BASE ;
170
+ state -> is_up = (val & TJA1103_PHY_STATUS_LINK_STAT ) != 0 ;
174
171
175
- data -> state . is_up = link_up ;
172
+ LOG_DBG ( "TJA1103 Link state %i" , state -> is_up ) ;
176
173
177
- return 0 ;
174
+ return rc ;
178
175
}
179
176
180
- static int phy_tja1103_get_link_state (const struct device * dev , struct phy_link_state * state )
177
+ #if DT_ANY_INST_HAS_PROP_STATUS_OKAY (int_gpios )
178
+ static void phy_tja1103_ack_irq (const struct device * dev )
181
179
{
182
- struct phy_tja1103_data * const data = dev -> data ;
183
- const struct phy_tja1103_config * const cfg = dev -> config ;
184
- int rc = 0 ;
185
-
186
- k_sem_take (& data -> sem , K_FOREVER );
180
+ uint16_t irq ;
187
181
188
- /* If Interrupt is configured then the workqueue will not
189
- * update the link state periodically so do it explicitly
190
- */
191
- if (cfg -> gpio_interrupt .port != NULL ) {
192
- rc = update_link_state (dev );
182
+ if (phy_tja1103_c45_read (dev , MDIO_MMD_VENDOR_SPECIFIC1 , TJA1103_PHY_FUNC_IRQ_MSTATUS ,
183
+ & irq ) < 0 ) {
184
+ return ;
193
185
}
194
186
195
- memcpy (state , & data -> state , sizeof (struct phy_link_state ));
196
-
197
- k_sem_give (& data -> sem );
198
-
199
- return rc ;
187
+ /* Handling Link related Functional IRQs */
188
+ if (irq & (TJA1103_PHY_FUNC_IRQ_LINK_EVENT | TJA1103_PHY_FUNC_IRQ_LINK_AVAIL )) {
189
+ /* Ack the assered link related interrupts */
190
+ phy_tja1103_c45_write (dev , MDIO_MMD_VENDOR_SPECIFIC1 , TJA1103_PHY_FUNC_IRQ_ACK ,
191
+ irq );
192
+ }
200
193
}
201
194
202
- static void invoke_link_cb (const struct device * dev )
195
+ static void phy_tja1103_handle_irq (const struct device * port , struct gpio_callback * cb ,
196
+ uint32_t pins )
203
197
{
204
- struct phy_tja1103_data * const data = dev -> data ;
205
- struct phy_link_state state ;
206
-
207
- if (data -> cb == NULL ) {
208
- return ;
209
- }
198
+ ARG_UNUSED (pins );
199
+ ARG_UNUSED (port );
210
200
211
- /* Send callback only on link state change */
212
- if (phy_tja1103_get_link_state (dev , & state ) != 0 ) {
213
- return ;
214
- }
201
+ struct phy_tja1103_data * const data =
202
+ CONTAINER_OF (cb , struct phy_tja1103_data , phy_tja1103_int_callback );
215
203
216
- data -> cb (dev , & state , data -> cb_data );
204
+ /* Trigger workqueue before leaving the ISR */
205
+ k_work_reschedule (& data -> phy_work , K_NO_WAIT );
217
206
}
207
+ #endif
218
208
219
- static void monitor_work_handler (struct k_work * work )
209
+ static void phy_work_handler (struct k_work * work )
220
210
{
221
211
struct k_work_delayable * dwork = k_work_delayable_from_work (work );
222
212
struct phy_tja1103_data * const data =
223
- CONTAINER_OF (dwork , struct phy_tja1103_data , monitor_work );
213
+ CONTAINER_OF (dwork , struct phy_tja1103_data , phy_work );
224
214
const struct device * dev = data -> dev ;
215
+ struct phy_link_state state = {};
225
216
int rc ;
217
+ const struct phy_tja1103_config * const cfg = dev -> config ;
226
218
227
- k_sem_take (& data -> sem , K_FOREVER );
228
-
229
- rc = update_link_state (dev );
230
-
231
- k_sem_give (& data -> sem );
219
+ rc = phy_tja1103_get_link_state (dev , & state );
232
220
233
- /* If link state has changed and a callback is set, invoke callback */
234
- if (rc == 0 ) {
235
- invoke_link_cb (dev );
221
+ /* Update link state and trigger callback if changed */
222
+ if (rc == 0 && (state .speed != data -> state .speed || state .is_up != data -> state .is_up )) {
223
+ memcpy (& data -> state , & state , sizeof (struct phy_link_state ));
224
+ if (data -> cb ) {
225
+ data -> cb (dev , & data -> state , data -> cb_data );
226
+ }
236
227
}
237
228
238
- /* Submit delayed work */
239
- k_work_reschedule (& data -> monitor_work , K_MSEC ( CONFIG_PHY_MONITOR_PERIOD ));
240
- }
229
+ if ( cfg -> master_slave == 3 && ! data -> state . is_up &&
230
+ k_uptime_delta (& data -> start_time ) > data -> wait_time ) {
231
+ uint16_t val ;
241
232
242
- static void phy_tja1103_irq_offload_thread (void * p1 , void * p2 , void * p3 )
243
- {
244
- ARG_UNUSED (p2 );
245
- ARG_UNUSED (p3 );
233
+ data -> start_time = k_uptime_get ();
234
+ data -> wait_time = 1000 + (sys_rand32_get () % 2001 );
246
235
247
- const struct device * dev = p1 ;
248
- struct phy_tja1103_data * const data = dev -> data ;
249
- uint16_t irq ;
236
+ phy_tja1103_c45_read (dev , MDIO_MMD_PMAPMD , MDIO_PMA_PMD_BT1_CTRL , & val );
250
237
251
- for (;;) {
252
- /* await trigger from ISR */
253
- k_sem_take (& data -> offload_sem , K_FOREVER );
238
+ val ^= MDIO_PMA_PMD_BT1_CTRL_CFG_MST ;
254
239
255
- if (phy_tja1103_c45_read (dev , MDIO_MMD_VENDOR_SPECIFIC1 ,
256
- TJA1103_PHY_FUNC_IRQ_MSTATUS , & irq ) < 0 ) {
257
- return ;
258
- }
240
+ phy_tja1103_c45_write (dev , MDIO_MMD_PMAPMD , MDIO_PMA_PMD_BT1_CTRL , val );
241
+ }
259
242
260
- /* Handling Link related Functional IRQs */
261
- if (irq & (TJA1103_PHY_FUNC_IRQ_LINK_EVENT | TJA1103_PHY_FUNC_IRQ_LINK_AVAIL )) {
262
- /* Send callback to MAC on link status changed */
263
- invoke_link_cb (dev );
243
+ #if DT_ANY_INST_HAS_PROP_STATUS_OKAY (int_gpios )
244
+ if (cfg -> gpio_interrupt .port ) {
245
+ phy_tja1103_ack_irq (dev );
264
246
265
- /* Ack the assered link related interrupts */
266
- phy_tja1103_c45_write (dev , MDIO_MMD_VENDOR_SPECIFIC1 ,
267
- TJA1103_PHY_FUNC_IRQ_ACK , irq );
247
+ if (cfg -> master_slave == 3 && !data -> state .is_up ) {
248
+ k_work_reschedule (& data -> phy_work , K_MSEC (data -> wait_time + 10 ));
268
249
}
269
- }
270
- }
271
-
272
- static void phy_tja1103_handle_irq (const struct device * port , struct gpio_callback * cb ,
273
- uint32_t pins )
274
- {
275
- ARG_UNUSED (pins );
276
- ARG_UNUSED (port );
277
250
278
- struct phy_tja1103_data * const data =
279
- CONTAINER_OF (cb , struct phy_tja1103_data , phy_tja1103_int_callback );
251
+ return ;
252
+ }
253
+ #endif
280
254
281
- /* Trigger BH before leaving the ISR */
282
- k_sem_give (& data -> offload_sem );
255
+ /* Submit delayed work */
256
+ k_work_reschedule (& data -> phy_work , K_MSEC ( CONFIG_PHY_MONITOR_PERIOD ) );
283
257
}
284
258
285
259
static void phy_tja1103_cfg_irq_poll (const struct device * dev )
286
260
{
287
261
struct phy_tja1103_data * const data = dev -> data ;
288
- const struct phy_tja1103_config * const cfg = dev -> config ;
262
+
263
+ #if DT_ANY_INST_HAS_PROP_STATUS_OKAY (int_gpios )
289
264
int ret ;
265
+ const struct phy_tja1103_config * const cfg = dev -> config ;
290
266
291
267
if (cfg -> gpio_interrupt .port != NULL ) {
292
268
if (!gpio_is_ready_dt (& cfg -> gpio_interrupt )) {
@@ -311,9 +287,8 @@ static void phy_tja1103_cfg_irq_poll(const struct device *dev)
311
287
return ;
312
288
}
313
289
314
- ret = phy_tja1103_c45_write (
315
- dev , MDIO_MMD_VENDOR_SPECIFIC1 , TJA1103_PHY_FUNC_IRQ_EN ,
316
- (TJA1103_PHY_FUNC_IRQ_LINK_EVENT_EN | TJA1103_PHY_FUNC_IRQ_LINK_AVAIL_EN ));
290
+ ret = phy_tja1103_c45_write (dev , MDIO_MMD_VENDOR_SPECIFIC1 , TJA1103_PHY_FUNC_IRQ_EN ,
291
+ (TJA1103_PHY_FUNC_IRQ_LINK_EVENT_EN ));
317
292
if (ret < 0 ) {
318
293
return ;
319
294
}
@@ -323,19 +298,15 @@ static void phy_tja1103_cfg_irq_poll(const struct device *dev)
323
298
LOG_ERR ("Failed to enable INT, %d" , ret );
324
299
return ;
325
300
}
301
+ }
302
+ #endif
326
303
327
- /* PHY initialized, IRQ configured, now initialize the BH handler */
328
- k_thread_create (& data -> irq_thread , data -> irq_thread_stack ,
329
- CONFIG_PHY_TJA1103_IRQ_THREAD_STACK_SIZE ,
330
- phy_tja1103_irq_offload_thread , (void * )dev , NULL , NULL ,
331
- CONFIG_PHY_TJA1103_IRQ_THREAD_PRIO , K_ESSENTIAL , K_NO_WAIT );
332
- k_thread_name_set (& data -> irq_thread , "phy_tja1103_irq_offload" );
304
+ data -> start_time = k_uptime_get ();
305
+ data -> wait_time = 1000 + (sys_rand32_get () % 4001 );
333
306
334
- } else {
335
- k_work_init_delayable (& data -> monitor_work , monitor_work_handler );
307
+ k_work_init_delayable (& data -> phy_work , phy_work_handler );
336
308
337
- monitor_work_handler (& data -> monitor_work .work );
338
- }
309
+ phy_work_handler (& data -> phy_work .work );
339
310
}
340
311
341
312
static int phy_tja1103_init (const struct device * dev )
@@ -354,7 +325,7 @@ static int phy_tja1103_init(const struct device *dev)
354
325
ret = WAIT_FOR (!phy_tja1103_id (dev , & phy_id ) && phy_id == TJA1103_ID ,
355
326
TJA1103_AWAIT_RETRY_COUNT * TJA1103_AWAIT_DELAY_POLL_US ,
356
327
k_sleep (K_USEC (TJA1103_AWAIT_DELAY_POLL_US )));
357
- if (ret < 0 ) {
328
+ if (ret == 0 ) {
358
329
LOG_ERR ("Unable to obtain PHY ID for device 0x%x" , cfg -> phy_addr );
359
330
return - ENODEV ;
360
331
}
@@ -411,17 +382,19 @@ static int phy_tja1103_init(const struct device *dev)
411
382
412
383
static int phy_tja1103_link_cb_set (const struct device * dev , phy_callback_t cb , void * user_data )
413
384
{
385
+ int rc = 0 ;
414
386
struct phy_tja1103_data * const data = dev -> data ;
415
387
416
388
data -> cb = cb ;
417
389
data -> cb_data = user_data ;
418
390
419
- /* Invoke the callback to notify the caller of the current
420
- * link status.
421
- */
422
- invoke_link_cb (dev );
391
+ rc = phy_tja1103_get_link_state (dev , & data -> state );
423
392
424
- return 0 ;
393
+ if (rc == 0 ) {
394
+ data -> cb (dev , & data -> state , data -> cb_data );
395
+ }
396
+
397
+ return rc ;
425
398
}
426
399
427
400
static DEVICE_API (ethphy , phy_tja1103_api ) = {
@@ -440,7 +413,6 @@ static DEVICE_API(ethphy, phy_tja1103_api) = {
440
413
}; \
441
414
static struct phy_tja1103_data phy_tja1103_data_##n = { \
442
415
.sem = Z_SEM_INITIALIZER(phy_tja1103_data_##n.sem, 1, 1), \
443
- .offload_sem = Z_SEM_INITIALIZER(phy_tja1103_data_##n.offload_sem, 0, 1), \
444
416
}; \
445
417
DEVICE_DT_INST_DEFINE(n, &phy_tja1103_init, NULL, &phy_tja1103_data_##n, \
446
418
&phy_tja1103_config_##n, POST_KERNEL, CONFIG_PHY_INIT_PRIORITY, \
0 commit comments