Skip to content

Commit 767fd5a

Browse files
author
Andreas Gruenbacher
committed
gfs2: Revise glock reference counting model
In the current glock reference counting model, a bias of one is added to a glock's refcount when it is locked (gl->gl_state != LM_ST_UNLOCKED). A glock is removed from the lru_list when it is enqueued, and added back when it is dequeued. This isn't a very appropriate model because most glocks are held for long periods of time (for example, the inode "owns" references to its inode and iopen glocks as long as the inode is cached even when the glock state changes to LM_ST_UNLOCKED), and they can only be freed when they are no longer referenced, anyway. Fix this by getting rid of the refcount bias for locked glocks. That way, we can use lockref_put_or_lock() to efficiently drop all but the last glock reference, and put the glock onto the lru_list when the last reference is dropped. When find_insert_glock() returns a reference to a cached glock, it removes the glock from the lru_list. Dumping the "glocks" and "glstats" debugfs files also takes glock references, but instead of removing the glocks from the lru_list in that case as well, we leave them on the list. This ensures that dumping those files won't perturb the order of the glocks on the lru_list. In addition, when the last reference to an *unlocked* glock is dropped, we immediately free it; this preserves the preexisting behavior. If it later turns out that caching unlocked glocks is useful in some situations, we can change the caching strategy. It is currently unclear if a glock that has no active references can have the GLF_LFLUSH flag set. To make sure that such a glock won't accidentally be evicted due to memory pressure, we add a GLF_LFLUSH check to gfs2_dispose_glock_lru(). Signed-off-by: Andreas Gruenbacher <[email protected]>
1 parent 30e388d commit 767fd5a

File tree

3 files changed

+30
-28
lines changed

3 files changed

+30
-28
lines changed

fs/gfs2/glock.c

Lines changed: 30 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -237,7 +237,7 @@ static int demote_ok(const struct gfs2_glock *gl)
237237
}
238238

239239

240-
void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
240+
static void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
241241
{
242242
if (!(gl->gl_ops->go_flags & GLOF_LRU))
243243
return;
@@ -305,6 +305,20 @@ static void __gfs2_glock_put(struct gfs2_glock *gl)
305305
sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
306306
}
307307

308+
static bool __gfs2_glock_put_or_lock(struct gfs2_glock *gl)
309+
{
310+
if (lockref_put_or_lock(&gl->gl_lockref))
311+
return true;
312+
GLOCK_BUG_ON(gl, gl->gl_lockref.count != 1);
313+
if (gl->gl_state != LM_ST_UNLOCKED) {
314+
gl->gl_lockref.count--;
315+
gfs2_glock_add_to_lru(gl);
316+
spin_unlock(&gl->gl_lockref.lock);
317+
return true;
318+
}
319+
return false;
320+
}
321+
308322
/**
309323
* gfs2_glock_put() - Decrement reference count on glock
310324
* @gl: The glock to put
@@ -313,7 +327,7 @@ static void __gfs2_glock_put(struct gfs2_glock *gl)
313327

314328
void gfs2_glock_put(struct gfs2_glock *gl)
315329
{
316-
if (lockref_put_or_lock(&gl->gl_lockref))
330+
if (__gfs2_glock_put_or_lock(gl))
317331
return;
318332

319333
__gfs2_glock_put(gl);
@@ -328,10 +342,9 @@ void gfs2_glock_put(struct gfs2_glock *gl)
328342
*/
329343
void gfs2_glock_put_async(struct gfs2_glock *gl)
330344
{
331-
if (lockref_put_or_lock(&gl->gl_lockref))
345+
if (__gfs2_glock_put_or_lock(gl))
332346
return;
333347

334-
GLOCK_BUG_ON(gl, gl->gl_lockref.count != 1);
335348
gfs2_glock_queue_work(gl, 0);
336349
spin_unlock(&gl->gl_lockref.lock);
337350
}
@@ -570,18 +583,6 @@ static inline struct gfs2_holder *find_last_waiter(const struct gfs2_glock *gl)
570583

571584
static void state_change(struct gfs2_glock *gl, unsigned int new_state)
572585
{
573-
int held1, held2;
574-
575-
held1 = (gl->gl_state != LM_ST_UNLOCKED);
576-
held2 = (new_state != LM_ST_UNLOCKED);
577-
578-
if (held1 != held2) {
579-
GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
580-
if (held2)
581-
gl->gl_lockref.count++;
582-
else
583-
gl->gl_lockref.count--;
584-
}
585586
if (new_state != gl->gl_target)
586587
/* shorten our minimum hold time */
587588
gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
@@ -1139,10 +1140,14 @@ static void glock_work_func(struct work_struct *work)
11391140
}
11401141

11411142
/* Drop the remaining glock references manually. */
1143+
GLOCK_BUG_ON(gl, gl->gl_lockref.count < drop_refs);
11421144
gl->gl_lockref.count -= drop_refs;
11431145
if (!gl->gl_lockref.count) {
1144-
__gfs2_glock_put(gl);
1145-
return;
1146+
if (gl->gl_state == LM_ST_UNLOCKED) {
1147+
__gfs2_glock_put(gl);
1148+
return;
1149+
}
1150+
gfs2_glock_add_to_lru(gl);
11461151
}
11471152
spin_unlock(&gl->gl_lockref.lock);
11481153
}
@@ -1178,6 +1183,8 @@ static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
11781183
out:
11791184
rcu_read_unlock();
11801185
finish_wait(wq, &wait.wait);
1186+
if (gl)
1187+
gfs2_glock_remove_from_lru(gl);
11811188
return gl;
11821189
}
11831190

@@ -1626,9 +1633,6 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
16261633
return error;
16271634
}
16281635

1629-
if (test_bit(GLF_LRU, &gl->gl_flags))
1630-
gfs2_glock_remove_from_lru(gl);
1631-
16321636
gh->gh_error = 0;
16331637
spin_lock(&gl->gl_lockref.lock);
16341638
add_to_queue(gh);
@@ -1693,9 +1697,6 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh)
16931697
fast_path = 1;
16941698
}
16951699

1696-
if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
1697-
gfs2_glock_add_to_lru(gl);
1698-
16991700
if (unlikely(!fast_path)) {
17001701
gl->gl_lockref.count++;
17011702
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
@@ -2008,10 +2009,12 @@ static int glock_cmp(void *priv, const struct list_head *a,
20082009

20092010
static bool can_free_glock(struct gfs2_glock *gl)
20102011
{
2011-
bool held = gl->gl_state != LM_ST_UNLOCKED;
2012+
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
20122013

20132014
return !test_bit(GLF_LOCK, &gl->gl_flags) &&
2014-
gl->gl_lockref.count == held;
2015+
!gl->gl_lockref.count &&
2016+
(!test_bit(GLF_LFLUSH, &gl->gl_flags) ||
2017+
test_bit(SDF_KILL, &sdp->sd_flags));
20152018
}
20162019

20172020
/**
@@ -2177,6 +2180,7 @@ static void thaw_glock(struct gfs2_glock *gl)
21772180
if (!lockref_get_not_dead(&gl->gl_lockref))
21782181
return;
21792182

2183+
gfs2_glock_remove_from_lru(gl);
21802184
spin_lock(&gl->gl_lockref.lock);
21812185
set_bit(GLF_HAVE_REPLY, &gl->gl_flags);
21822186
gfs2_glock_queue_work(gl, 0);

fs/gfs2/glock.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -250,7 +250,6 @@ void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
250250
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
251251
void gfs2_gl_dq_holders(struct gfs2_sbd *sdp);
252252
void gfs2_glock_thaw(struct gfs2_sbd *sdp);
253-
void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
254253
void gfs2_glock_free(struct gfs2_glock *gl);
255254
void gfs2_glock_free_later(struct gfs2_glock *gl);
256255

fs/gfs2/super.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1524,7 +1524,6 @@ static void gfs2_evict_inode(struct inode *inode)
15241524
if (ip->i_gl) {
15251525
glock_clear_object(ip->i_gl, ip);
15261526
wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1527-
gfs2_glock_add_to_lru(ip->i_gl);
15281527
gfs2_glock_put_eventually(ip->i_gl);
15291528
rcu_assign_pointer(ip->i_gl, NULL);
15301529
}

0 commit comments

Comments
 (0)