38
38
#define PV_PREV_CHECK_MASK 0xff
39
39
40
40
/*
41
- * Queue node uses: vcpu_running & vcpu_halted .
42
- * Queue head uses: vcpu_running & vcpu_hashed .
41
+ * Queue node uses: VCPU_RUNNING & VCPU_HALTED .
42
+ * Queue head uses: VCPU_RUNNING & VCPU_HASHED .
43
43
*/
44
44
enum vcpu_state {
45
- vcpu_running = 0 ,
46
- vcpu_halted , /* Used only in pv_wait_node */
47
- vcpu_hashed , /* = pv_hash'ed + vcpu_halted */
45
+ VCPU_RUNNING = 0 ,
46
+ VCPU_HALTED , /* Used only in pv_wait_node */
47
+ VCPU_HASHED , /* = pv_hash'ed + VCPU_HALTED */
48
48
};
49
49
50
50
struct pv_node {
@@ -266,7 +266,7 @@ pv_wait_early(struct pv_node *prev, int loop)
266
266
if ((loop & PV_PREV_CHECK_MASK) != 0 )
267
267
return false ;
268
268
269
- return READ_ONCE (prev->state ) != vcpu_running ;
269
+ return READ_ONCE (prev->state ) != VCPU_RUNNING ;
270
270
}
271
271
272
272
/*
@@ -279,7 +279,7 @@ static void pv_init_node(struct mcs_spinlock *node)
279
279
BUILD_BUG_ON (sizeof (struct pv_node ) > sizeof (struct qnode ));
280
280
281
281
pn->cpu = smp_processor_id ();
282
- pn->state = vcpu_running ;
282
+ pn->state = VCPU_RUNNING ;
283
283
}
284
284
285
285
/*
@@ -308,26 +308,26 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
308
308
/*
309
309
* Order pn->state vs pn->locked thusly:
310
310
*
311
- * [S] pn->state = vcpu_halted [S] next->locked = 1
311
+ * [S] pn->state = VCPU_HALTED [S] next->locked = 1
312
312
* MB MB
313
- * [L] pn->locked [RmW] pn->state = vcpu_hashed
313
+ * [L] pn->locked [RmW] pn->state = VCPU_HASHED
314
314
*
315
315
* Matches the cmpxchg() from pv_kick_node().
316
316
*/
317
- smp_store_mb (pn->state , vcpu_halted );
317
+ smp_store_mb (pn->state , VCPU_HALTED );
318
318
319
319
if (!READ_ONCE (node->locked )) {
320
320
lockevent_inc (pv_wait_node);
321
321
lockevent_cond_inc (pv_wait_early, wait_early);
322
- pv_wait (&pn->state , vcpu_halted );
322
+ pv_wait (&pn->state , VCPU_HALTED );
323
323
}
324
324
325
325
/*
326
- * If pv_kick_node() changed us to vcpu_hashed , retain that
326
+ * If pv_kick_node() changed us to VCPU_HASHED , retain that
327
327
* value so that pv_wait_head_or_lock() knows to not also try
328
328
* to hash this lock.
329
329
*/
330
- cmpxchg (&pn->state , vcpu_halted, vcpu_running );
330
+ cmpxchg (&pn->state , VCPU_HALTED, VCPU_RUNNING );
331
331
332
332
/*
333
333
* If the locked flag is still not set after wakeup, it is a
@@ -357,7 +357,7 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
357
357
static void pv_kick_node (struct qspinlock *lock, struct mcs_spinlock *node)
358
358
{
359
359
struct pv_node *pn = (struct pv_node *)node;
360
- u8 old = vcpu_halted ;
360
+ u8 old = VCPU_HALTED ;
361
361
/*
362
362
* If the vCPU is indeed halted, advance its state to match that of
363
363
* pv_wait_node(). If OTOH this fails, the vCPU was running and will
@@ -374,7 +374,7 @@ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
374
374
* subsequent writes.
375
375
*/
376
376
smp_mb__before_atomic ();
377
- if (!try_cmpxchg_relaxed (&pn->state , &old, vcpu_hashed ))
377
+ if (!try_cmpxchg_relaxed (&pn->state , &old, VCPU_HASHED ))
378
378
return ;
379
379
380
380
/*
@@ -407,7 +407,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
407
407
* If pv_kick_node() already advanced our state, we don't need to
408
408
* insert ourselves into the hash table anymore.
409
409
*/
410
- if (READ_ONCE (pn->state ) == vcpu_hashed )
410
+ if (READ_ONCE (pn->state ) == VCPU_HASHED )
411
411
lp = (struct qspinlock **)1 ;
412
412
413
413
/*
@@ -420,7 +420,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
420
420
* Set correct vCPU state to be used by queue node wait-early
421
421
* mechanism.
422
422
*/
423
- WRITE_ONCE (pn->state , vcpu_running );
423
+ WRITE_ONCE (pn->state , VCPU_RUNNING );
424
424
425
425
/*
426
426
* Set the pending bit in the active lock spinning loop to
@@ -460,7 +460,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
460
460
goto gotlock;
461
461
}
462
462
}
463
- WRITE_ONCE (pn->state , vcpu_hashed );
463
+ WRITE_ONCE (pn->state , VCPU_HASHED );
464
464
lockevent_inc (pv_wait_head);
465
465
lockevent_cond_inc (pv_wait_again, waitcnt);
466
466
pv_wait (&lock->locked , _Q_SLOW_VAL);
0 commit comments