Skip to content

Commit 1e86044

Browse files
author
Andreas Gruenbacher
committed
gfs2: Remove and replace gfs2_glock_queue_work
There are no more callers of gfs2_glock_queue_work() left, so remove that helper. With that, we can now rename __gfs2_glock_queue_work() back to gfs2_glock_queue_work() to get rid of some unnecessary clutter. Signed-off-by: Andreas Gruenbacher <[email protected]>
1 parent 9947a06 commit 1e86044

File tree

1 file changed

+15
-20
lines changed

1 file changed

+15
-20
lines changed

fs/gfs2/glock.c

Lines changed: 15 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -274,7 +274,7 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
274274
* Enqueue the glock on the work queue. Passes one glock reference on to the
275275
* work queue.
276276
*/
277-
static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
277+
static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
278278
if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
279279
/*
280280
* We are holding the lockref spinlock, and the work was still
@@ -287,12 +287,6 @@ static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay)
287287
}
288288
}
289289

290-
static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
291-
spin_lock(&gl->gl_lockref.lock);
292-
__gfs2_glock_queue_work(gl, delay);
293-
spin_unlock(&gl->gl_lockref.lock);
294-
}
295-
296290
static void __gfs2_glock_put(struct gfs2_glock *gl)
297291
{
298292
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
@@ -337,7 +331,8 @@ void gfs2_glock_put_async(struct gfs2_glock *gl)
337331
if (lockref_put_or_lock(&gl->gl_lockref))
338332
return;
339333

340-
__gfs2_glock_queue_work(gl, 0);
334+
GLOCK_BUG_ON(gl, gl->gl_lockref.count != 1);
335+
gfs2_glock_queue_work(gl, 0);
341336
spin_unlock(&gl->gl_lockref.lock);
342337
}
343338

@@ -831,7 +826,7 @@ __acquires(&gl->gl_lockref.lock)
831826
*/
832827
clear_bit(GLF_LOCK, &gl->gl_flags);
833828
clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
834-
__gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
829+
gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
835830
return;
836831
} else {
837832
clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
@@ -861,7 +856,7 @@ __acquires(&gl->gl_lockref.lock)
861856

862857
/* Complete the operation now. */
863858
finish_xmote(gl, target);
864-
__gfs2_glock_queue_work(gl, 0);
859+
gfs2_glock_queue_work(gl, 0);
865860
}
866861

867862
/**
@@ -909,7 +904,7 @@ __acquires(&gl->gl_lockref.lock)
909904
clear_bit(GLF_LOCK, &gl->gl_flags);
910905
smp_mb__after_atomic();
911906
gl->gl_lockref.count++;
912-
__gfs2_glock_queue_work(gl, 0);
907+
gfs2_glock_queue_work(gl, 0);
913908
return;
914909

915910
out_unlock:
@@ -1141,12 +1136,12 @@ static void glock_work_func(struct work_struct *work)
11411136
drop_refs--;
11421137
if (gl->gl_name.ln_type != LM_TYPE_INODE)
11431138
delay = 0;
1144-
__gfs2_glock_queue_work(gl, delay);
1139+
gfs2_glock_queue_work(gl, delay);
11451140
}
11461141

11471142
/*
11481143
* Drop the remaining glock references manually here. (Mind that
1149-
* __gfs2_glock_queue_work depends on the lockref spinlock begin held
1144+
* gfs2_glock_queue_work depends on the lockref spinlock begin held
11501145
* here as well.)
11511146
*/
11521147
gl->gl_lockref.count -= drop_refs;
@@ -1651,7 +1646,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
16511646
test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
16521647
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
16531648
gl->gl_lockref.count++;
1654-
__gfs2_glock_queue_work(gl, 0);
1649+
gfs2_glock_queue_work(gl, 0);
16551650
}
16561651
run_queue(gl, 1);
16571652
spin_unlock(&gl->gl_lockref.lock);
@@ -1717,7 +1712,7 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh)
17171712
!test_bit(GLF_DEMOTE, &gl->gl_flags) &&
17181713
gl->gl_name.ln_type == LM_TYPE_INODE)
17191714
delay = gl->gl_hold_time;
1720-
__gfs2_glock_queue_work(gl, delay);
1715+
gfs2_glock_queue_work(gl, delay);
17211716
}
17221717
}
17231718

@@ -1941,7 +1936,7 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
19411936
delay = gl->gl_hold_time;
19421937
}
19431938
handle_callback(gl, state, delay, true);
1944-
__gfs2_glock_queue_work(gl, delay);
1939+
gfs2_glock_queue_work(gl, delay);
19451940
spin_unlock(&gl->gl_lockref.lock);
19461941
}
19471942

@@ -2001,7 +1996,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
20011996

20021997
gl->gl_lockref.count++;
20031998
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
2004-
__gfs2_glock_queue_work(gl, 0);
1999+
gfs2_glock_queue_work(gl, 0);
20052000
spin_unlock(&gl->gl_lockref.lock);
20062001
}
20072002

@@ -2070,7 +2065,7 @@ __acquires(&lru_lock)
20702065
gl->gl_lockref.count++;
20712066
if (demote_ok(gl))
20722067
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
2073-
__gfs2_glock_queue_work(gl, 0);
2068+
gfs2_glock_queue_work(gl, 0);
20742069
spin_unlock(&gl->gl_lockref.lock);
20752070
cond_resched_lock(&lru_lock);
20762071
}
@@ -2194,7 +2189,7 @@ static void thaw_glock(struct gfs2_glock *gl)
21942189

21952190
spin_lock(&gl->gl_lockref.lock);
21962191
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
2197-
__gfs2_glock_queue_work(gl, 0);
2192+
gfs2_glock_queue_work(gl, 0);
21982193
spin_unlock(&gl->gl_lockref.lock);
21992194
}
22002195

@@ -2213,7 +2208,7 @@ static void clear_glock(struct gfs2_glock *gl)
22132208
gl->gl_lockref.count++;
22142209
if (gl->gl_state != LM_ST_UNLOCKED)
22152210
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
2216-
__gfs2_glock_queue_work(gl, 0);
2211+
gfs2_glock_queue_work(gl, 0);
22172212
}
22182213
spin_unlock(&gl->gl_lockref.lock);
22192214
}

0 commit comments

Comments
 (0)