Permalink
Browse files

GFS2: speed up delete/unlink performance for large files

This patch improves the performance of delete/unlink
operations in a GFS2 file system where the files are large
by adding a layer of metadata read-ahead for indirect blocks.
Mileage will vary, but on my system, deleting an 8.6G file
dropped from 22 seconds to about 4.5 seconds.

Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
  • Loading branch information...
1 parent f75bbfb commit bd5437a7d4307a35f2c7cc19cad706ec0e5d61f0 Bob Peterson committed with swhiteho Sep 15, 2011
Showing with 23 additions and 3 deletions.
  1. +23 −3 fs/gfs2/bmap.c
View
@@ -831,7 +831,7 @@ static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head *bh = NULL;
- __be64 *top, *bottom;
+ __be64 *top, *bottom, *t2;
u64 bn;
int error;
int mh_size = sizeof(struct gfs2_meta_header);
@@ -859,7 +859,27 @@ static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
if (error)
goto out;
- if (height < ip->i_height - 1)
+ if (height < ip->i_height - 1) {
+ struct buffer_head *rabh;
+
+ for (t2 = top; t2 < bottom; t2++, first = 0) {
+ if (!*t2)
+ continue;
+
+ bn = be64_to_cpu(*t2);
+ rabh = gfs2_getbuf(ip->i_gl, bn, CREATE);
+ if (trylock_buffer(rabh)) {
+ if (buffer_uptodate(rabh)) {
+ unlock_buffer(rabh);
+ brelse(rabh);
+ continue;
+ }
+ rabh->b_end_io = end_buffer_read_sync;
+ submit_bh(READA | REQ_META, rabh);
+ continue;
+ }
+ brelse(rabh);
+ }
for (; top < bottom; top++, first = 0) {
if (!*top)
continue;
@@ -871,7 +891,7 @@ static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
if (error)
break;
}
-
+ }
out:
brelse(bh);
return error;

0 comments on commit bd5437a

Please sign in to comment.