@@ -274,7 +274,7 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
274
274
* Enqueue the glock on the work queue. Passes one glock reference on to the
275
275
* work queue.
276
276
*/
277
- static void __gfs2_glock_queue_work (struct gfs2_glock * gl , unsigned long delay ) {
277
+ static void gfs2_glock_queue_work (struct gfs2_glock * gl , unsigned long delay ) {
278
278
if (!queue_delayed_work (glock_workqueue , & gl -> gl_work , delay )) {
279
279
/*
280
280
* We are holding the lockref spinlock, and the work was still
@@ -287,12 +287,6 @@ static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay)
287
287
}
288
288
}
289
289
290
- static void gfs2_glock_queue_work (struct gfs2_glock * gl , unsigned long delay ) {
291
- spin_lock (& gl -> gl_lockref .lock );
292
- __gfs2_glock_queue_work (gl , delay );
293
- spin_unlock (& gl -> gl_lockref .lock );
294
- }
295
-
296
290
static void __gfs2_glock_put (struct gfs2_glock * gl )
297
291
{
298
292
struct gfs2_sbd * sdp = gl -> gl_name .ln_sbd ;
@@ -337,7 +331,8 @@ void gfs2_glock_put_async(struct gfs2_glock *gl)
337
331
if (lockref_put_or_lock (& gl -> gl_lockref ))
338
332
return ;
339
333
340
- __gfs2_glock_queue_work (gl , 0 );
334
+ GLOCK_BUG_ON (gl , gl -> gl_lockref .count != 1 );
335
+ gfs2_glock_queue_work (gl , 0 );
341
336
spin_unlock (& gl -> gl_lockref .lock );
342
337
}
343
338
@@ -831,7 +826,7 @@ __acquires(&gl->gl_lockref.lock)
831
826
*/
832
827
clear_bit (GLF_LOCK , & gl -> gl_flags );
833
828
clear_bit (GLF_DEMOTE_IN_PROGRESS , & gl -> gl_flags );
834
- __gfs2_glock_queue_work (gl , GL_GLOCK_DFT_HOLD );
829
+ gfs2_glock_queue_work (gl , GL_GLOCK_DFT_HOLD );
835
830
return ;
836
831
} else {
837
832
clear_bit (GLF_INVALIDATE_IN_PROGRESS , & gl -> gl_flags );
@@ -861,7 +856,7 @@ __acquires(&gl->gl_lockref.lock)
861
856
862
857
/* Complete the operation now. */
863
858
finish_xmote (gl , target );
864
- __gfs2_glock_queue_work (gl , 0 );
859
+ gfs2_glock_queue_work (gl , 0 );
865
860
}
866
861
867
862
/**
@@ -909,7 +904,7 @@ __acquires(&gl->gl_lockref.lock)
909
904
clear_bit (GLF_LOCK , & gl -> gl_flags );
910
905
smp_mb__after_atomic ();
911
906
gl -> gl_lockref .count ++ ;
912
- __gfs2_glock_queue_work (gl , 0 );
907
+ gfs2_glock_queue_work (gl , 0 );
913
908
return ;
914
909
915
910
out_unlock :
@@ -1141,12 +1136,12 @@ static void glock_work_func(struct work_struct *work)
1141
1136
drop_refs -- ;
1142
1137
if (gl -> gl_name .ln_type != LM_TYPE_INODE )
1143
1138
delay = 0 ;
1144
- __gfs2_glock_queue_work (gl , delay );
1139
+ gfs2_glock_queue_work (gl , delay );
1145
1140
}
1146
1141
1147
1142
/*
1148
1143
* Drop the remaining glock references manually here. (Mind that
1149
- * __gfs2_glock_queue_work depends on the lockref spinlock begin held
1144
+ * gfs2_glock_queue_work depends on the lockref spinlock begin held
1150
1145
* here as well.)
1151
1146
*/
1152
1147
gl -> gl_lockref .count -= drop_refs ;
@@ -1651,7 +1646,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
1651
1646
test_and_clear_bit (GLF_FROZEN , & gl -> gl_flags ))) {
1652
1647
set_bit (GLF_REPLY_PENDING , & gl -> gl_flags );
1653
1648
gl -> gl_lockref .count ++ ;
1654
- __gfs2_glock_queue_work (gl , 0 );
1649
+ gfs2_glock_queue_work (gl , 0 );
1655
1650
}
1656
1651
run_queue (gl , 1 );
1657
1652
spin_unlock (& gl -> gl_lockref .lock );
@@ -1717,7 +1712,7 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh)
1717
1712
!test_bit (GLF_DEMOTE , & gl -> gl_flags ) &&
1718
1713
gl -> gl_name .ln_type == LM_TYPE_INODE )
1719
1714
delay = gl -> gl_hold_time ;
1720
- __gfs2_glock_queue_work (gl , delay );
1715
+ gfs2_glock_queue_work (gl , delay );
1721
1716
}
1722
1717
}
1723
1718
@@ -1941,7 +1936,7 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1941
1936
delay = gl -> gl_hold_time ;
1942
1937
}
1943
1938
handle_callback (gl , state , delay , true);
1944
- __gfs2_glock_queue_work (gl , delay );
1939
+ gfs2_glock_queue_work (gl , delay );
1945
1940
spin_unlock (& gl -> gl_lockref .lock );
1946
1941
}
1947
1942
@@ -2001,7 +1996,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
2001
1996
2002
1997
gl -> gl_lockref .count ++ ;
2003
1998
set_bit (GLF_REPLY_PENDING , & gl -> gl_flags );
2004
- __gfs2_glock_queue_work (gl , 0 );
1999
+ gfs2_glock_queue_work (gl , 0 );
2005
2000
spin_unlock (& gl -> gl_lockref .lock );
2006
2001
}
2007
2002
@@ -2070,7 +2065,7 @@ __acquires(&lru_lock)
2070
2065
gl -> gl_lockref .count ++ ;
2071
2066
if (demote_ok (gl ))
2072
2067
handle_callback (gl , LM_ST_UNLOCKED , 0 , false);
2073
- __gfs2_glock_queue_work (gl , 0 );
2068
+ gfs2_glock_queue_work (gl , 0 );
2074
2069
spin_unlock (& gl -> gl_lockref .lock );
2075
2070
cond_resched_lock (& lru_lock );
2076
2071
}
@@ -2194,7 +2189,7 @@ static void thaw_glock(struct gfs2_glock *gl)
2194
2189
2195
2190
spin_lock (& gl -> gl_lockref .lock );
2196
2191
set_bit (GLF_REPLY_PENDING , & gl -> gl_flags );
2197
- __gfs2_glock_queue_work (gl , 0 );
2192
+ gfs2_glock_queue_work (gl , 0 );
2198
2193
spin_unlock (& gl -> gl_lockref .lock );
2199
2194
}
2200
2195
@@ -2213,7 +2208,7 @@ static void clear_glock(struct gfs2_glock *gl)
2213
2208
gl -> gl_lockref .count ++ ;
2214
2209
if (gl -> gl_state != LM_ST_UNLOCKED )
2215
2210
handle_callback (gl , LM_ST_UNLOCKED , 0 , false);
2216
- __gfs2_glock_queue_work (gl , 0 );
2211
+ gfs2_glock_queue_work (gl , 0 );
2217
2212
}
2218
2213
spin_unlock (& gl -> gl_lockref .lock );
2219
2214
}
0 commit comments