Skip to content

Commit 7a1ad9d

Browse files
author
Andreas Gruenbacher
committed
gfs2: Fix lru_count accounting
Currently, gfs2_scan_glock_lru() decrements lru_count when a glock is moved onto the dispose list. When such a glock is then stolen from the dispose list while gfs2_dispose_glock_lru() doesn't hold the lru_lock, lru_count will be decremented again, so the counter will eventually go negative. This bug has existed in one form or another since at least commit 97cc102 ("GFS2: Kill two daemons with one patch"). Fix this by only decrementing lru_count when we actually remove a glock and schedule for it to be unlocked and dropped. We also don't need to remove and then re-add glocks when we can just as well move them back onto the lru_list when necessary. In addition, return the number of glocks freed as we should, not the number of glocks moved onto the dispose list. Signed-off-by: Andreas Gruenbacher <[email protected]>
1 parent acf1f42 commit 7a1ad9d

File tree

1 file changed

+13
-14
lines changed

1 file changed

+13
-14
lines changed

fs/gfs2/glock.c

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2009,36 +2009,38 @@ static bool can_free_glock(struct gfs2_glock *gl)
20092009
* private)
20102010
*/
20112011

2012-
static void gfs2_dispose_glock_lru(struct list_head *list)
2012+
static unsigned long gfs2_dispose_glock_lru(struct list_head *list)
20132013
__releases(&lru_lock)
20142014
__acquires(&lru_lock)
20152015
{
20162016
struct gfs2_glock *gl;
2017+
unsigned long freed = 0;
20172018

20182019
list_sort(NULL, list, glock_cmp);
20192020

20202021
while(!list_empty(list)) {
20212022
gl = list_first_entry(list, struct gfs2_glock, gl_lru);
2022-
list_del_init(&gl->gl_lru);
2023-
clear_bit(GLF_LRU, &gl->gl_flags);
20242023
if (!spin_trylock(&gl->gl_lockref.lock)) {
20252024
add_back_to_lru:
2026-
list_add(&gl->gl_lru, &lru_list);
2027-
set_bit(GLF_LRU, &gl->gl_flags);
2028-
atomic_inc(&lru_count);
2025+
list_move(&gl->gl_lru, &lru_list);
20292026
continue;
20302027
}
20312028
if (!can_free_glock(gl)) {
20322029
spin_unlock(&gl->gl_lockref.lock);
20332030
goto add_back_to_lru;
20342031
}
2032+
list_del_init(&gl->gl_lru);
2033+
atomic_dec(&lru_count);
2034+
clear_bit(GLF_LRU, &gl->gl_flags);
2035+
freed++;
20352036
gl->gl_lockref.count++;
20362037
if (demote_ok(gl))
20372038
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
20382039
__gfs2_glock_queue_work(gl, 0);
20392040
spin_unlock(&gl->gl_lockref.lock);
20402041
cond_resched_lock(&lru_lock);
20412042
}
2043+
return freed;
20422044
}
20432045

20442046
/**
@@ -2050,24 +2052,21 @@ __acquires(&lru_lock)
20502052
* gfs2_dispose_glock_lru() above.
20512053
*/
20522054

2053-
static long gfs2_scan_glock_lru(int nr)
2055+
static unsigned long gfs2_scan_glock_lru(unsigned long nr)
20542056
{
20552057
struct gfs2_glock *gl, *next;
20562058
LIST_HEAD(dispose);
2057-
long freed = 0;
2059+
unsigned long freed = 0;
20582060

20592061
spin_lock(&lru_lock);
20602062
list_for_each_entry_safe(gl, next, &lru_list, gl_lru) {
2061-
if (nr-- <= 0)
2063+
if (!nr--)
20622064
break;
2063-
if (can_free_glock(gl)) {
2065+
if (can_free_glock(gl))
20642066
list_move(&gl->gl_lru, &dispose);
2065-
atomic_dec(&lru_count);
2066-
freed++;
2067-
}
20682067
}
20692068
if (!list_empty(&dispose))
2070-
gfs2_dispose_glock_lru(&dispose);
2069+
freed = gfs2_dispose_glock_lru(&dispose);
20712070
spin_unlock(&lru_lock);
20722071

20732072
return freed;

0 commit comments

Comments
 (0)