@@ -83,14 +83,14 @@ ble_ll_sched_is_overlap(struct ble_ll_sched_item *s1,
83
83
int rc ;
84
84
85
85
rc = 1 ;
86
- if (( int32_t )( s1 -> start_time - s2 -> start_time ) < 0 ) {
86
+ if (CPUTIME_LT ( s1 -> start_time , s2 -> start_time )) {
87
87
/* Make sure this event does not overlap current event */
88
- if (( int32_t )( s1 -> end_time - s2 -> start_time ) <= 0 ) {
88
+ if (CPUTIME_LEQ ( s1 -> end_time , s2 -> start_time )) {
89
89
rc = 0 ;
90
90
}
91
91
} else {
92
92
/* Check for overlap */
93
- if (( int32_t )( s1 -> start_time - s2 -> end_time ) >= 0 ) {
93
+ if (CPUTIME_GEQ ( s1 -> start_time , s2 -> end_time )) {
94
94
rc = 0 ;
95
95
}
96
96
}
@@ -111,7 +111,7 @@ ble_ll_sched_overlaps_current(struct ble_ll_sched_item *sch)
111
111
rc = 0 ;
112
112
if (ble_ll_state_get () == BLE_LL_STATE_CONNECTION ) {
113
113
ce_end_time = ble_ll_conn_get_ce_end_time ();
114
- if (( int32_t )( ce_end_time - sch -> start_time ) > 0 ) {
114
+ if (CPUTIME_GT ( ce_end_time , sch -> start_time )) {
115
115
rc = 1 ;
116
116
}
117
117
}
@@ -178,7 +178,7 @@ ble_ll_sched_conn_reschedule(struct ble_ll_conn_sm *connsm)
178
178
sch -> end_time = connsm -> ce_end_time ;
179
179
180
180
/* Better be past current time or we just leave */
181
- if (( int32_t )( sch -> start_time - os_cputime_get32 ()) < 0 ) {
181
+ if (CPUTIME_LT ( sch -> start_time , os_cputime_get32 ())) {
182
182
return -1 ;
183
183
}
184
184
@@ -216,7 +216,7 @@ ble_ll_sched_conn_reschedule(struct ble_ll_conn_sm *connsm)
216
216
end_overlap = entry ;
217
217
}
218
218
} else {
219
- if (( int32_t )( sch -> end_time - entry -> start_time ) <= 0 ) {
219
+ if (CPUTIME_LEQ ( sch -> end_time , entry -> start_time )) {
220
220
rc = 0 ;
221
221
TAILQ_INSERT_BEFORE (entry , sch , link );
222
222
break ;
@@ -468,7 +468,7 @@ ble_ll_sched_master_new(struct ble_ll_conn_sm *connsm,
468
468
sch -> end_time = earliest_end ;
469
469
470
470
/* We can insert if before entry in list */
471
- if (( int32_t )( sch -> end_time - entry -> start_time ) <= 0 ) {
471
+ if (CPUTIME_LEQ ( sch -> end_time , entry -> start_time )) {
472
472
if ((earliest_start - initial_start ) <= itvl_t ) {
473
473
rc = 0 ;
474
474
TAILQ_INSERT_BEFORE (entry , sch , link );
@@ -655,7 +655,7 @@ ble_ll_sched_master_new(struct ble_ll_conn_sm *connsm,
655
655
sch -> end_time = earliest_end ;
656
656
657
657
/* We can insert if before entry in list */
658
- if (( int32_t )( sch -> end_time - entry -> start_time ) <= 0 ) {
658
+ if (CPUTIME_LEQ ( sch -> end_time , entry -> start_time )) {
659
659
if ((earliest_start - initial_start ) <= itvl_t ) {
660
660
rc = 0 ;
661
661
TAILQ_INSERT_BEFORE (entry , sch , link );
@@ -770,7 +770,7 @@ ble_ll_sched_slave_new(struct ble_ll_conn_sm *connsm)
770
770
while (1 ) {
771
771
next_sch = entry -> link .tqe_next ;
772
772
/* Insert if event ends before next starts */
773
- if (( int32_t )( sch -> end_time - entry -> start_time ) <= 0 ) {
773
+ if (CPUTIME_LEQ ( sch -> end_time , entry -> start_time )) {
774
774
rc = 0 ;
775
775
TAILQ_INSERT_BEFORE (entry , sch , link );
776
776
break ;
@@ -1047,7 +1047,7 @@ ble_ll_sched_adv_new(struct ble_ll_sched_item *sch, ble_ll_sched_adv_new_cb cb,
1047
1047
os_cputime_timer_stop (& g_ble_ll_sched_timer );
1048
1048
TAILQ_FOREACH (entry , & g_ble_ll_sched_q , link ) {
1049
1049
/* We can insert if before entry in list */
1050
- if (( int32_t )( sch -> end_time - entry -> start_time ) <= 0 ) {
1050
+ if (CPUTIME_LEQ ( sch -> end_time , entry -> start_time )) {
1051
1051
TAILQ_INSERT_BEFORE (entry , sch , link );
1052
1052
break ;
1053
1053
}
@@ -1111,7 +1111,7 @@ ble_ll_sched_periodic_adv(struct ble_ll_sched_item *sch, uint32_t *start,
1111
1111
os_cputime_timer_stop (& g_ble_ll_sched_timer );
1112
1112
TAILQ_FOREACH (entry , & g_ble_ll_sched_q , link ) {
1113
1113
/* We can insert if before entry in list */
1114
- if (( int32_t )( sch -> end_time - entry -> start_time ) <= 0 ) {
1114
+ if (CPUTIME_LEQ ( sch -> end_time , entry -> start_time )) {
1115
1115
TAILQ_INSERT_BEFORE (entry , sch , link );
1116
1116
break ;
1117
1117
}
@@ -1200,7 +1200,7 @@ ble_ll_sched_adv_reschedule(struct ble_ll_sched_item *sch, uint32_t *start,
1200
1200
end_overlap = entry ;
1201
1201
}
1202
1202
} else {
1203
- if (( int32_t )( sch -> end_time - entry -> start_time ) <= 0 ) {
1203
+ if (CPUTIME_LEQ ( sch -> end_time , entry -> start_time )) {
1204
1204
before = entry ;
1205
1205
break ;
1206
1206
}
@@ -1233,7 +1233,7 @@ ble_ll_sched_adv_reschedule(struct ble_ll_sched_item *sch, uint32_t *start,
1233
1233
sch -> end_time = sch -> start_time + duration ;
1234
1234
while (1 ) {
1235
1235
next_sch = entry -> link .tqe_next ;
1236
- if (( int32_t )( sch -> end_time - entry -> start_time ) <= 0 ) {
1236
+ if (CPUTIME_LEQ ( sch -> end_time , entry -> start_time )) {
1237
1237
rand_ticks = entry -> start_time - sch -> end_time ;
1238
1238
before = entry ;
1239
1239
TAILQ_INSERT_BEFORE (before , sch , link );
@@ -1580,7 +1580,7 @@ ble_ll_sched_scan_req_over_aux_ptr(uint32_t chan, uint8_t phy_mode)
1580
1580
while (sch ) {
1581
1581
/* Let's check if there is no scheduled item which want to start within
1582
1582
* given usecs.*/
1583
- if (( int32_t )( sch -> start_time - now + os_cputime_usecs_to_ticks (usec_dur )) > 0 ) {
1583
+ if (CPUTIME_GT ( sch -> start_time , now + os_cputime_usecs_to_ticks (usec_dur ))) {
1584
1584
/* We are fine. Have time for scan req */
1585
1585
return 0 ;
1586
1586
}
@@ -1670,7 +1670,7 @@ ble_ll_sched_aux_scan(struct ble_mbuf_hdr *ble_hdr,
1670
1670
os_cputime_timer_stop (& g_ble_ll_sched_timer );
1671
1671
TAILQ_FOREACH (entry , & g_ble_ll_sched_q , link ) {
1672
1672
/* We can insert if before entry in list */
1673
- if (( int32_t )( sch -> end_time - entry -> start_time ) <= 0 ) {
1673
+ if (CPUTIME_LEQ ( sch -> end_time , entry -> start_time )) {
1674
1674
rc = 0 ;
1675
1675
TAILQ_INSERT_BEFORE (entry , sch , link );
1676
1676
sch -> enqueued = 1 ;
0 commit comments