@@ -313,25 +313,23 @@ static inline void ocf_alock_unlock_entry_rd(struct ocf_alock *alock,
313313 env_atomic_dec (access );
314314}
315315
316- static inline bool ocf_alock_trylock_entry_wr2wr (struct ocf_alock * alock ,
316+ static inline void ocf_alock_lock_entry_wr2wr (struct ocf_alock * alock ,
317317 ocf_cache_line_t entry )
318318{
319319 env_atomic * access = & alock -> access [entry ];
320320 int v = env_atomic_read (access );
321321
322322 ENV_BUG_ON (v != OCF_CACHE_LINE_ACCESS_WR );
323- return true;
324323}
325324
326- static inline bool ocf_alock_trylock_entry_wr2rd (struct ocf_alock * alock ,
325+ static inline void ocf_alock_lock_entry_wr2rd (struct ocf_alock * alock ,
327326 ocf_cache_line_t entry )
328327{
329328 env_atomic * access = & alock -> access [entry ];
330329 int v = env_atomic_read (access );
331330
332331 ENV_BUG_ON (v != OCF_CACHE_LINE_ACCESS_WR );
333332 env_atomic_set (access , OCF_CACHE_LINE_ACCESS_ONE_RD );
334- return true;
335333}
336334
337335static inline bool ocf_alock_trylock_entry_rd2wr (struct ocf_alock * alock ,
@@ -491,68 +489,95 @@ bool ocf_alock_lock_one_rd(struct ocf_alock *alock,
491489}
492490
493491/*
494- * Unlocks the given read lock. If any waiters are registered for the same
495- * cacheline, one is awakened and the lock is either upgraded to a write lock
496- * or kept as a readlock. If there are no waiters, it's just unlocked.
492+ * Unlocks the given read lock.
493+ *
494+ * The lock can be upgraded to write lock and passed to the first waiter if the
495+ * current owner is the last one holding the read lock.
496+ *
497+ * If there are requests waiting for read access to the cache line, the lock
498+ * will be granted to all such waiters that are not preceded by a write request
497499 */
498500static inline void ocf_alock_unlock_one_rd_common (struct ocf_alock * alock ,
499501 const ocf_cache_line_t entry )
500502{
501- bool locked = false;
502- bool exchanged = false;
503-
503+ bool locked = false, no_waiters = true;
504+ int rw ;
504505 uint32_t idx = _WAITERS_LIST_ITEM (entry );
505506 struct ocf_alock_waiters_list * lst = & alock -> waiters_lsts [idx ];
506- struct ocf_alock_waiter * waiter ;
507-
508- struct list_head * iter , * next ;
507+ struct ocf_alock_waiter * waiter , * waiter_tmp ;
509508
510509 /*
511510 * Lock exchange scenario
512511 * 1. RD -> IDLE
513- * 2. RD -> RD
514- * 3. RD -> WR
512+ * 2. RD -> WR
513+ * 3. RD -> RD
514+ * 4. RD -> Multiple RD
515515 */
516516
517- /* Check is requested page is on the list */
518- list_for_each_safe (iter , next , & lst -> head ) {
519- waiter = list_entry (iter , struct ocf_alock_waiter , item );
517+ /* Check if any request is waiting for the cache line */
518+ list_for_each_entry_safe (waiter , waiter_tmp , & lst -> head , item ) {
519+ if (entry == waiter -> entry ) {
520+ no_waiters = false;
521+ break ;
522+ }
523+ }
524+
525+ /* RD -> IDLE */
526+ if (no_waiters ) {
527+ ocf_alock_unlock_entry_rd (alock , entry );
528+ return ;
529+ }
530+
531+ ENV_BUG_ON (waiter -> rw != OCF_READ && waiter -> rw != OCF_WRITE );
532+
533+ /* RD -> WR/RD */
534+ if (waiter -> rw == OCF_WRITE )
535+ locked = ocf_alock_trylock_entry_rd2wr (alock , entry );
536+ else if (waiter -> rw == OCF_READ )
537+ locked = ocf_alock_trylock_entry_rd2rd (alock , entry );
520538
539+ if (unlikely (!locked )) {
540+ ocf_alock_unlock_entry_rd (alock , entry );
541+ return ;
542+ }
543+
544+ rw = waiter -> rw ;
545+
546+ list_del (& waiter -> item );
547+
548+ ocf_alock_mark_index_locked (alock , waiter -> req , waiter -> idx , true);
549+ ocf_alock_entry_locked (alock , waiter -> req , waiter -> cmpl );
550+
551+ env_allocator_del (alock -> allocator , waiter );
552+
553+ /* If we upgraded to write lock, the read reaquests won't be able to
554+ * lock the cache line anyways
555+ */
556+ if (rw == OCF_WRITE )
557+ return ;
558+
559+ waiter = waiter_tmp ;
560+
561+ /* RD -> Multiple RD */
562+ list_for_each_entry_safe_from (waiter , waiter_tmp , & lst -> head , item ) {
521563 if (entry != waiter -> entry )
522564 continue ;
523565
524566 ENV_BUG_ON (waiter -> rw != OCF_READ && waiter -> rw != OCF_WRITE );
525567
526- if (!exchanged ) {
527- if (waiter -> rw == OCF_WRITE )
528- locked = ocf_alock_trylock_entry_rd2wr (alock , entry );
529- else if (waiter -> rw == OCF_READ )
530- locked = ocf_alock_trylock_entry_rd2rd (alock , entry );
531- } else {
532- if (waiter -> rw == OCF_WRITE )
533- locked = ocf_alock_trylock_entry_wr (alock , entry );
534- else if (waiter -> rw == OCF_READ )
535- locked = ocf_alock_trylock_entry_rd (alock , entry );
536- }
568+ if (waiter -> rw == OCF_WRITE )
569+ return ;
537570
538- if ( locked ) {
539- exchanged = true;
540- list_del ( iter );
571+ locked = ocf_alock_trylock_entry_rd ( alock , entry );
572+ /* There is no limit for number of readers */
573+ ENV_BUG_ON (! locked );
541574
542- ocf_alock_mark_index_locked (alock , waiter -> req , waiter -> idx , true);
543- ocf_alock_entry_locked (alock , waiter -> req , waiter -> cmpl );
575+ list_del (& waiter -> item );
544576
545- env_allocator_del (alock -> allocator , waiter );
546- } else {
547- break ;
548- }
549- }
577+ ocf_alock_mark_index_locked (alock , waiter -> req , waiter -> idx , true);
578+ ocf_alock_entry_locked (alock , waiter -> req , waiter -> cmpl );
550579
551- if (!exchanged ) {
552- /* No exchange, no waiters on the list, unlock and return
553- * WR -> IDLE
554- */
555- ocf_alock_unlock_entry_rd (alock , entry );
580+ env_allocator_del (alock -> allocator , waiter );
556581 }
557582}
558583
@@ -576,68 +601,90 @@ void ocf_alock_unlock_one_rd(struct ocf_alock *alock,
576601}
577602
578603/*
579- * Unlocks the given write lock. If any waiters are registered for the same
580- * cacheline, one is awakened and the lock is either downgraded to a readlock
581- * or kept as a writelock. If there are no waiters, it's just unlocked.
604+ * Unlocks the given write lock.
605+ *
606+ * The lock can be passed to the first waiter
607+ *
608+ * If there are requests waiting for read access to the cache line, the lock
609+ * will be downgraded and granted to all such waiters that are not preceded by
610+ * a write request
582611 */
583612static inline void ocf_alock_unlock_one_wr_common (struct ocf_alock * alock ,
584613 const ocf_cache_line_t entry )
585614{
586- bool locked = false;
587- bool exchanged = false;
588-
615+ bool locked = false, no_waiters = true;
616+ int rw ;
589617 uint32_t idx = _WAITERS_LIST_ITEM (entry );
590618 struct ocf_alock_waiters_list * lst = & alock -> waiters_lsts [idx ];
591- struct ocf_alock_waiter * waiter ;
592-
593- struct list_head * iter , * next ;
619+ struct ocf_alock_waiter * waiter , * waiter_tmp ;
594620
595621 /*
596622 * Lock exchange scenario
597623 * 1. WR -> IDLE
598- * 2. WR -> RD
599- * 3. WR -> WR
624+ * 2. WR -> WR
625+ * 3. WR -> RD
626+ * 4. WR -> Multiple RD
600627 */
601628
602- /* Check is requested page is on the list */
603- list_for_each_safe (iter , next , & lst -> head ) {
604- waiter = list_entry (iter , struct ocf_alock_waiter , item );
629+ /* Check if any request is waiting for the cache line */
630+ list_for_each_entry_safe (waiter , waiter_tmp , & lst -> head , item ) {
631+ if (entry == waiter -> entry ) {
632+ no_waiters = false;
633+ break ;
634+ }
635+ }
636+
637+ /* WR -> IDLE */
638+ if (no_waiters ) {
639+ ocf_alock_unlock_entry_wr (alock , entry );
640+ return ;
641+ }
642+
643+ ENV_BUG_ON (waiter -> rw != OCF_READ && waiter -> rw != OCF_WRITE );
605644
645+ /* WR -> WR/RD */
646+ if (waiter -> rw == OCF_WRITE )
647+ ocf_alock_lock_entry_wr2wr (alock , entry );
648+ else if (waiter -> rw == OCF_READ )
649+ ocf_alock_lock_entry_wr2rd (alock , entry );
650+
651+ rw = waiter -> rw ;
652+
653+ list_del (& waiter -> item );
654+
655+ ocf_alock_mark_index_locked (alock , waiter -> req , waiter -> idx , true);
656+ ocf_alock_entry_locked (alock , waiter -> req , waiter -> cmpl );
657+
658+ env_allocator_del (alock -> allocator , waiter );
659+
660+ /* If we passed the write lock, the read requests won't be able to lock
661+ * the cache line anyways
662+ */
663+ if (rw == OCF_WRITE )
664+ return ;
665+
666+ waiter = waiter_tmp ;
667+
668+ /* WR -> Multiple RD */
669+ list_for_each_entry_safe_from (waiter , waiter_tmp , & lst -> head , item ) {
606670 if (entry != waiter -> entry )
607671 continue ;
608672
609673 ENV_BUG_ON (waiter -> rw != OCF_READ && waiter -> rw != OCF_WRITE );
610674
611- if (!exchanged ) {
612- if (waiter -> rw == OCF_WRITE )
613- locked = ocf_alock_trylock_entry_wr2wr (alock , entry );
614- else if (waiter -> rw == OCF_READ )
615- locked = ocf_alock_trylock_entry_wr2rd (alock , entry );
616- } else {
617- if (waiter -> rw == OCF_WRITE )
618- locked = ocf_alock_trylock_entry_wr (alock , entry );
619- else if (waiter -> rw == OCF_READ )
620- locked = ocf_alock_trylock_entry_rd (alock , entry );
621- }
675+ if (waiter -> rw == OCF_WRITE )
676+ return ;
622677
623- if ( locked ) {
624- exchanged = true;
625- list_del ( iter );
678+ locked = ocf_alock_trylock_entry_rd ( alock , entry );
679+ /* There is no limit for number of readers */
680+ ENV_BUG_ON (! locked );
626681
627- ocf_alock_mark_index_locked (alock , waiter -> req , waiter -> idx , true);
628- ocf_alock_entry_locked (alock , waiter -> req , waiter -> cmpl );
682+ list_del (& waiter -> item );
629683
630- env_allocator_del (alock -> allocator , waiter );
631- } else {
632- break ;
633- }
634- }
684+ ocf_alock_mark_index_locked (alock , waiter -> req , waiter -> idx , true);
685+ ocf_alock_entry_locked (alock , waiter -> req , waiter -> cmpl );
635686
636- if (!exchanged ) {
637- /* No exchange, no waiters on the list, unlock and return
638- * WR -> IDLE
639- */
640- ocf_alock_unlock_entry_wr (alock , entry );
687+ env_allocator_del (alock -> allocator , waiter );
641688 }
642689}
643690
0 commit comments