@@ -607,14 +607,19 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
607
607
if (gh && (ret & LM_OUT_CANCELED ))
608
608
gfs2_holder_wake (gh );
609
609
if (gh && !test_bit (GLF_DEMOTE_IN_PROGRESS , & gl -> gl_flags )) {
610
- /* move to back of queue and try next entry */
611
610
if (ret & LM_OUT_CANCELED ) {
612
- list_move_tail (& gh -> gh_list , & gl -> gl_holders );
611
+ list_del_init (& gh -> gh_list );
612
+ trace_gfs2_glock_queue (gh , 0 );
613
+ gl -> gl_target = gl -> gl_state ;
613
614
gh = find_first_waiter (gl );
614
- gl -> gl_target = gh -> gh_state ;
615
- if (do_promote (gl ))
616
- goto out ;
617
- goto retry ;
615
+ if (gh ) {
616
+ gl -> gl_target = gh -> gh_state ;
617
+ if (do_promote (gl ))
618
+ goto out ;
619
+ do_xmote (gl , gh , gl -> gl_target );
620
+ return ;
621
+ }
622
+ goto out ;
618
623
}
619
624
/* Some error or failed "try lock" - report it */
620
625
if ((ret & LM_OUT_ERROR ) ||
@@ -627,7 +632,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
627
632
switch (state ) {
628
633
/* Unlocked due to conversion deadlock, try again */
629
634
case LM_ST_UNLOCKED :
630
- retry :
631
635
do_xmote (gl , gh , gl -> gl_target );
632
636
break ;
633
637
/* Conversion fails, unlock and try again */
@@ -661,7 +665,8 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
661
665
do_promote (gl );
662
666
}
663
667
out :
664
- clear_bit (GLF_LOCK , & gl -> gl_flags );
668
+ if (!test_bit (GLF_CANCELING , & gl -> gl_flags ))
669
+ clear_bit (GLF_LOCK , & gl -> gl_flags );
665
670
}
666
671
667
672
static bool is_system_glock (struct gfs2_glock * gl )
@@ -807,6 +812,7 @@ __acquires(&gl->gl_lockref.lock)
807
812
}
808
813
809
814
if (ls -> ls_ops -> lm_lock ) {
815
+ set_bit (GLF_PENDING_REPLY , & gl -> gl_flags );
810
816
spin_unlock (& gl -> gl_lockref .lock );
811
817
ret = ls -> ls_ops -> lm_lock (gl , target , lck_flags );
812
818
spin_lock (& gl -> gl_lockref .lock );
@@ -825,6 +831,7 @@ __acquires(&gl->gl_lockref.lock)
825
831
/* The operation will be completed asynchronously. */
826
832
return ;
827
833
}
834
+ clear_bit (GLF_PENDING_REPLY , & gl -> gl_flags );
828
835
}
829
836
830
837
/* Complete the operation now. */
@@ -843,12 +850,13 @@ static void run_queue(struct gfs2_glock *gl, const int nonblock)
843
850
__releases (& gl - > gl_lockref .lock )
844
851
__acquires (& gl - > gl_lockref .lock )
845
852
{
846
- struct gfs2_holder * gh = NULL ;
853
+ struct gfs2_holder * gh ;
847
854
848
855
if (test_bit (GLF_LOCK , & gl -> gl_flags ))
849
856
return ;
850
857
set_bit (GLF_LOCK , & gl -> gl_flags );
851
858
859
+ /* While a demote is in progress, the GLF_LOCK flag must be set. */
852
860
GLOCK_BUG_ON (gl , test_bit (GLF_DEMOTE_IN_PROGRESS , & gl -> gl_flags ));
853
861
854
862
if (test_bit (GLF_DEMOTE , & gl -> gl_flags ) &&
@@ -860,18 +868,22 @@ __acquires(&gl->gl_lockref.lock)
860
868
set_bit (GLF_DEMOTE_IN_PROGRESS , & gl -> gl_flags );
861
869
GLOCK_BUG_ON (gl , gl -> gl_demote_state == LM_ST_EXCLUSIVE );
862
870
gl -> gl_target = gl -> gl_demote_state ;
871
+ do_xmote (gl , NULL , gl -> gl_target );
872
+ return ;
863
873
} else {
864
874
if (test_bit (GLF_DEMOTE , & gl -> gl_flags ))
865
875
gfs2_demote_wake (gl );
866
876
if (do_promote (gl ))
867
877
goto out_unlock ;
868
878
gh = find_first_waiter (gl );
879
+ if (!gh )
880
+ goto out_unlock ;
869
881
gl -> gl_target = gh -> gh_state ;
870
882
if (!(gh -> gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB )))
871
883
do_error (gl , 0 ); /* Fail queued try locks */
884
+ do_xmote (gl , gh , gl -> gl_target );
885
+ return ;
872
886
}
873
- do_xmote (gl , gh , gl -> gl_target );
874
- return ;
875
887
876
888
out_sched :
877
889
clear_bit (GLF_LOCK , & gl -> gl_flags );
@@ -898,12 +910,8 @@ void glock_set_object(struct gfs2_glock *gl, void *object)
898
910
prev_object = gl -> gl_object ;
899
911
gl -> gl_object = object ;
900
912
spin_unlock (& gl -> gl_lockref .lock );
901
- if (gfs2_assert_warn (gl -> gl_name .ln_sbd , prev_object == NULL )) {
902
- pr_warn ("glock=%u/%llx\n" ,
903
- gl -> gl_name .ln_type ,
904
- (unsigned long long )gl -> gl_name .ln_number );
913
+ if (gfs2_assert_warn (gl -> gl_name .ln_sbd , prev_object == NULL ))
905
914
gfs2_dump_glock (NULL , gl , true);
906
- }
907
915
}
908
916
909
917
/**
@@ -919,12 +927,8 @@ void glock_clear_object(struct gfs2_glock *gl, void *object)
919
927
prev_object = gl -> gl_object ;
920
928
gl -> gl_object = NULL ;
921
929
spin_unlock (& gl -> gl_lockref .lock );
922
- if (gfs2_assert_warn (gl -> gl_name .ln_sbd , prev_object == object )) {
923
- pr_warn ("glock=%u/%llx\n" ,
924
- gl -> gl_name .ln_type ,
925
- (unsigned long long )gl -> gl_name .ln_number );
930
+ if (gfs2_assert_warn (gl -> gl_name .ln_sbd , prev_object == object ))
926
931
gfs2_dump_glock (NULL , gl , true);
927
- }
928
932
}
929
933
930
934
void gfs2_inode_remember_delete (struct gfs2_glock * gl , u64 generation )
@@ -959,6 +963,25 @@ static void gfs2_glock_poke(struct gfs2_glock *gl)
959
963
gfs2_holder_uninit (& gh );
960
964
}
961
965
966
+ static struct gfs2_inode * gfs2_grab_existing_inode (struct gfs2_glock * gl )
967
+ {
968
+ struct gfs2_inode * ip ;
969
+
970
+ spin_lock (& gl -> gl_lockref .lock );
971
+ ip = gl -> gl_object ;
972
+ if (ip && !igrab (& ip -> i_inode ))
973
+ ip = NULL ;
974
+ spin_unlock (& gl -> gl_lockref .lock );
975
+ if (ip ) {
976
+ wait_on_inode (& ip -> i_inode );
977
+ if (is_bad_inode (& ip -> i_inode )) {
978
+ iput (& ip -> i_inode );
979
+ ip = NULL ;
980
+ }
981
+ }
982
+ return ip ;
983
+ }
984
+
962
985
static void gfs2_try_evict (struct gfs2_glock * gl )
963
986
{
964
987
struct gfs2_inode * ip ;
@@ -976,32 +999,15 @@ static void gfs2_try_evict(struct gfs2_glock *gl)
976
999
* happened below. (Verification is triggered by the call to
977
1000
* gfs2_queue_verify_delete() in gfs2_evict_inode().)
978
1001
*/
979
- spin_lock (& gl -> gl_lockref .lock );
980
- ip = gl -> gl_object ;
981
- if (ip && !igrab (& ip -> i_inode ))
982
- ip = NULL ;
983
- spin_unlock (& gl -> gl_lockref .lock );
984
- if (ip ) {
985
- wait_on_inode (& ip -> i_inode );
986
- if (is_bad_inode (& ip -> i_inode )) {
987
- iput (& ip -> i_inode );
988
- ip = NULL ;
989
- }
990
- }
1002
+ ip = gfs2_grab_existing_inode (gl );
991
1003
if (ip ) {
992
- set_bit (GIF_DEFER_DELETE , & ip -> i_flags );
1004
+ set_bit (GLF_DEFER_DELETE , & gl -> gl_flags );
993
1005
d_prune_aliases (& ip -> i_inode );
994
1006
iput (& ip -> i_inode );
1007
+ clear_bit (GLF_DEFER_DELETE , & gl -> gl_flags );
995
1008
996
1009
/* If the inode was evicted, gl->gl_object will now be NULL. */
997
- spin_lock (& gl -> gl_lockref .lock );
998
- ip = gl -> gl_object ;
999
- if (ip ) {
1000
- clear_bit (GIF_DEFER_DELETE , & ip -> i_flags );
1001
- if (!igrab (& ip -> i_inode ))
1002
- ip = NULL ;
1003
- }
1004
- spin_unlock (& gl -> gl_lockref .lock );
1010
+ ip = gfs2_grab_existing_inode (gl );
1005
1011
if (ip ) {
1006
1012
gfs2_glock_poke (ip -> i_gl );
1007
1013
iput (& ip -> i_inode );
@@ -1462,9 +1468,7 @@ static inline bool pid_is_meaningful(const struct gfs2_holder *gh)
1462
1468
{
1463
1469
if (!(gh -> gh_flags & GL_NOPID ))
1464
1470
return true;
1465
- if (gh -> gh_state == LM_ST_UNLOCKED )
1466
- return true;
1467
- return false;
1471
+ return !test_bit (HIF_HOLDER , & gh -> gh_iflags );
1468
1472
}
1469
1473
1470
1474
/**
@@ -1483,7 +1487,6 @@ __acquires(&gl->gl_lockref.lock)
1483
1487
{
1484
1488
struct gfs2_glock * gl = gh -> gh_gl ;
1485
1489
struct gfs2_sbd * sdp = gl -> gl_name .ln_sbd ;
1486
- struct list_head * insert_pt = NULL ;
1487
1490
struct gfs2_holder * gh2 ;
1488
1491
int try_futile = 0 ;
1489
1492
@@ -1519,21 +1522,11 @@ __acquires(&gl->gl_lockref.lock)
1519
1522
gfs2_holder_wake (gh );
1520
1523
return ;
1521
1524
}
1522
- if (test_bit (HIF_HOLDER , & gh2 -> gh_iflags ))
1523
- continue ;
1524
1525
}
1525
1526
trace_gfs2_glock_queue (gh , 1 );
1526
1527
gfs2_glstats_inc (gl , GFS2_LKS_QCOUNT );
1527
1528
gfs2_sbstats_inc (gl , GFS2_LKS_QCOUNT );
1528
- if (likely (insert_pt == NULL )) {
1529
- list_add_tail (& gh -> gh_list , & gl -> gl_holders );
1530
- return ;
1531
- }
1532
- list_add_tail (& gh -> gh_list , insert_pt );
1533
- spin_unlock (& gl -> gl_lockref .lock );
1534
- if (sdp -> sd_lockstruct .ls_ops -> lm_cancel )
1535
- sdp -> sd_lockstruct .ls_ops -> lm_cancel (gl );
1536
- spin_lock (& gl -> gl_lockref .lock );
1529
+ list_add_tail (& gh -> gh_list , & gl -> gl_holders );
1537
1530
return ;
1538
1531
1539
1532
trap_recursive :
@@ -1673,11 +1666,19 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
1673
1666
}
1674
1667
1675
1668
if (list_is_first (& gh -> gh_list , & gl -> gl_holders ) &&
1676
- !test_bit (HIF_HOLDER , & gh -> gh_iflags )) {
1669
+ !test_bit (HIF_HOLDER , & gh -> gh_iflags ) &&
1670
+ test_bit (GLF_LOCK , & gl -> gl_flags ) &&
1671
+ !test_bit (GLF_DEMOTE_IN_PROGRESS , & gl -> gl_flags ) &&
1672
+ !test_bit (GLF_CANCELING , & gl -> gl_flags )) {
1673
+ set_bit (GLF_CANCELING , & gl -> gl_flags );
1677
1674
spin_unlock (& gl -> gl_lockref .lock );
1678
1675
gl -> gl_name .ln_sbd -> sd_lockstruct .ls_ops -> lm_cancel (gl );
1679
1676
wait_on_bit (& gh -> gh_iflags , HIF_WAIT , TASK_UNINTERRUPTIBLE );
1680
1677
spin_lock (& gl -> gl_lockref .lock );
1678
+ clear_bit (GLF_CANCELING , & gl -> gl_flags );
1679
+ clear_bit (GLF_LOCK , & gl -> gl_flags );
1680
+ if (!gfs2_holder_queued (gh ))
1681
+ goto out ;
1681
1682
}
1682
1683
1683
1684
/*
@@ -1923,6 +1924,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1923
1924
struct lm_lockstruct * ls = & gl -> gl_name .ln_sbd -> sd_lockstruct ;
1924
1925
1925
1926
spin_lock (& gl -> gl_lockref .lock );
1927
+ clear_bit (GLF_PENDING_REPLY , & gl -> gl_flags );
1926
1928
gl -> gl_reply = ret ;
1927
1929
1928
1930
if (unlikely (test_bit (DFL_BLOCK_LOCKS , & ls -> ls_recover_flags ))) {
@@ -2323,6 +2325,8 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
2323
2325
* p ++ = 'f' ;
2324
2326
if (test_bit (GLF_INVALIDATE_IN_PROGRESS , gflags ))
2325
2327
* p ++ = 'i' ;
2328
+ if (test_bit (GLF_PENDING_REPLY , gflags ))
2329
+ * p ++ = 'R' ;
2326
2330
if (test_bit (GLF_HAVE_REPLY , gflags ))
2327
2331
* p ++ = 'r' ;
2328
2332
if (test_bit (GLF_INITIAL , gflags ))
@@ -2347,6 +2351,10 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
2347
2351
* p ++ = 'e' ;
2348
2352
if (test_bit (GLF_VERIFY_DELETE , gflags ))
2349
2353
* p ++ = 'E' ;
2354
+ if (test_bit (GLF_DEFER_DELETE , gflags ))
2355
+ * p ++ = 's' ;
2356
+ if (test_bit (GLF_CANCELING , gflags ))
2357
+ * p ++ = 'C' ;
2350
2358
* p = 0 ;
2351
2359
return buf ;
2352
2360
}
0 commit comments