@@ -65,11 +65,16 @@ struct mspi_dw_data {
65
65
bool standard_spi ;
66
66
bool suspended ;
67
67
68
+ #if defined(CONFIG_MULTITHREADING )
68
69
struct k_sem finished ;
69
70
/* For synchronization of API calls made from different contexts. */
70
71
struct k_sem ctx_lock ;
71
72
/* For locking of controller configuration. */
72
73
struct k_sem cfg_lock ;
74
+ #else
75
+ volatile bool finished ;
76
+ bool cfg_lock ;
77
+ #endif
73
78
struct mspi_xfer xfer ;
74
79
75
80
#if defined(CONFIG_MSPI_DW_HANDLE_FIFOS_IN_SYSTEM_WORKQUEUE )
@@ -364,7 +369,11 @@ static void handle_fifos(const struct device *dev)
364
369
if (finished ) {
365
370
set_imr (dev , 0 );
366
371
372
+ #if defined(CONFIG_MULTITHREADING )
367
373
k_sem_give (& dev_data -> finished );
374
+ #else
375
+ dev_data -> finished = true;
376
+ #endif
368
377
}
369
378
}
370
379
@@ -816,8 +825,17 @@ static int api_dev_config(const struct device *dev,
816
825
int rc ;
817
826
818
827
if (dev_id != dev_data -> dev_id ) {
828
+ #if defined(CONFIG_MULTITHREADING )
819
829
rc = k_sem_take (& dev_data -> cfg_lock ,
820
830
K_MSEC (CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE ));
831
+ #else
832
+ if (dev_data -> cfg_lock ) {
833
+ rc = -1 ;
834
+ } else {
835
+ dev_data -> cfg_lock = true;
836
+ rc = 0 ;
837
+ }
838
+ #endif
821
839
if (rc < 0 ) {
822
840
LOG_ERR ("Failed to switch controller to device" );
823
841
return - EBUSY ;
@@ -831,15 +849,23 @@ static int api_dev_config(const struct device *dev,
831
849
return 0 ;
832
850
}
833
851
852
+ #if defined(CONFIG_MULTITHREADING )
834
853
(void )k_sem_take (& dev_data -> ctx_lock , K_FOREVER );
854
+ #endif
835
855
836
856
rc = _api_dev_config (dev , param_mask , cfg );
837
857
858
+ #if defined(CONFIG_MULTITHREADING )
838
859
k_sem_give (& dev_data -> ctx_lock );
860
+ #endif
839
861
840
862
if (rc < 0 ) {
841
863
dev_data -> dev_id = NULL ;
864
+ #if defined(CONFIG_MULTITHREADING )
842
865
k_sem_give (& dev_data -> cfg_lock );
866
+ #else
867
+ dev_data -> cfg_lock = false;
868
+ #endif
843
869
}
844
870
845
871
return rc ;
@@ -851,12 +877,17 @@ static int api_get_channel_status(const struct device *dev, uint8_t ch)
851
877
852
878
struct mspi_dw_data * dev_data = dev -> data ;
853
879
880
+ #if defined(CONFIG_MULTITHREADING )
854
881
(void )k_sem_take (& dev_data -> ctx_lock , K_FOREVER );
882
+ #endif
855
883
856
884
dev_data -> dev_id = NULL ;
885
+ #if defined(CONFIG_MULTITHREADING )
857
886
k_sem_give (& dev_data -> cfg_lock );
858
-
859
887
k_sem_give (& dev_data -> ctx_lock );
888
+ #else
889
+ dev_data -> cfg_lock = false;
890
+ #endif
860
891
861
892
return 0 ;
862
893
}
@@ -1119,7 +1150,17 @@ static int start_next_packet(const struct device *dev, k_timeout_t timeout)
1119
1150
/* Write SER to start transfer */
1120
1151
write_ser (dev , BIT (dev_data -> dev_id -> dev_idx ));
1121
1152
1153
+ #if defined(CONFIG_MULTITHREADING )
1122
1154
rc = k_sem_take (& dev_data -> finished , timeout );
1155
+ #else
1156
+ if (!WAIT_FOR (dev_data -> finished ,
1157
+ dev_data -> xfer .timeout * USEC_PER_MSEC ,
1158
+ NULL )) {
1159
+ rc = - ETIMEDOUT ;
1160
+ }
1161
+
1162
+ dev_data -> finished = false;
1163
+ #endif
1123
1164
if (read_risr (dev ) & RISR_RXOIR_BIT ) {
1124
1165
LOG_ERR ("RX FIFO overflow occurred" );
1125
1166
rc = - EIO ;
@@ -1232,15 +1273,19 @@ static int api_transceive(const struct device *dev,
1232
1273
return rc ;
1233
1274
}
1234
1275
1276
+ #if defined(CONFIG_MULTITHREADING )
1235
1277
(void )k_sem_take (& dev_data -> ctx_lock , K_FOREVER );
1278
+ #endif
1236
1279
1237
1280
if (dev_data -> suspended ) {
1238
1281
rc = - EFAULT ;
1239
1282
} else {
1240
1283
rc = _api_transceive (dev , req );
1241
1284
}
1242
1285
1286
+ #if defined(CONFIG_MULTITHREADING )
1243
1287
k_sem_give (& dev_data -> ctx_lock );
1288
+ #endif
1244
1289
1245
1290
rc2 = pm_device_runtime_put (dev );
1246
1291
if (rc2 < 0 ) {
@@ -1391,15 +1436,19 @@ static int api_xip_config(const struct device *dev,
1391
1436
return rc ;
1392
1437
}
1393
1438
1439
+ #if defined(CONFIG_MULTITHREADING )
1394
1440
(void )k_sem_take (& dev_data -> ctx_lock , K_FOREVER );
1441
+ #endif
1395
1442
1396
1443
if (dev_data -> suspended ) {
1397
1444
rc = - EFAULT ;
1398
1445
} else {
1399
1446
rc = _api_xip_config (dev , dev_id , cfg );
1400
1447
}
1401
1448
1449
+ #if defined(CONFIG_MULTITHREADING )
1402
1450
k_sem_give (& dev_data -> ctx_lock );
1451
+ #endif
1403
1452
1404
1453
rc2 = pm_device_runtime_put (dev );
1405
1454
if (rc2 < 0 ) {
@@ -1450,8 +1499,12 @@ static int dev_pm_action_cb(const struct device *dev,
1450
1499
return rc ;
1451
1500
}
1452
1501
#endif
1502
+ #if defined(CONFIG_MULTITHREADING )
1453
1503
if (xip_enabled ||
1454
1504
k_sem_take (& dev_data -> ctx_lock , K_NO_WAIT ) != 0 ) {
1505
+ #else
1506
+ if (xip_enabled ) {
1507
+ #endif
1455
1508
LOG_ERR ("Controller in use, cannot be suspended" );
1456
1509
return - EBUSY ;
1457
1510
}
@@ -1460,7 +1513,9 @@ static int dev_pm_action_cb(const struct device *dev,
1460
1513
1461
1514
vendor_specific_suspend (dev );
1462
1515
1516
+ #if defined(CONFIG_MULTITHREADING )
1463
1517
k_sem_give (& dev_data -> ctx_lock );
1518
+ #endif
1464
1519
1465
1520
return 0 ;
1466
1521
}
@@ -1470,7 +1525,6 @@ static int dev_pm_action_cb(const struct device *dev,
1470
1525
1471
1526
static int dev_init (const struct device * dev )
1472
1527
{
1473
- struct mspi_dw_data * dev_data = dev -> data ;
1474
1528
const struct mspi_dw_config * dev_config = dev -> config ;
1475
1529
const struct gpio_dt_spec * ce_gpio ;
1476
1530
int rc ;
@@ -1481,9 +1535,13 @@ static int dev_init(const struct device *dev)
1481
1535
1482
1536
dev_config -> irq_config ();
1483
1537
1538
+ #if defined(CONFIG_MULTITHREADING )
1539
+ struct mspi_dw_data * dev_data = dev -> data ;
1540
+
1484
1541
k_sem_init (& dev_data -> finished , 0 , 1 );
1485
1542
k_sem_init (& dev_data -> cfg_lock , 1 , 1 );
1486
1543
k_sem_init (& dev_data -> ctx_lock , 1 , 1 );
1544
+ #endif
1487
1545
1488
1546
#if defined(CONFIG_MSPI_DW_HANDLE_FIFOS_IN_SYSTEM_WORKQUEUE )
1489
1547
dev_data -> dev = dev ;
0 commit comments