Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

LU-723 ldiskfs: remove ext3 RHEL5 kernel series

Remove the old ext3 RHEL5 kernel patch series.  This has been
deprecated since Lustre 1.8.6 in favour of the ext4 RHEL5 series.

Signed-off-by: Andreas Dilger <adilger@whamcloud.com>
Change-Id: I1e09ff432f2e970446c3b43fb92f0c1a988159ae
Reviewed-on: http://review.whamcloud.com/1603
Tested-by: Hudson
Reviewed-by: Johann Lombardi <johann@whamcloud.com>
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
  • Loading branch information...
commit a16cc282ea41d887be28e0fb32dad4753e013a32 1 parent fd21a6c
Andreas Dilger authored morrone committed
Showing with 7 additions and 14,763 deletions.
  1. +0 −1  build/autoconf/lustre-build-linux.m4
  2. +7 −18 ldiskfs/configure.ac
  3. +0 −99 ldiskfs/kernel_patches/patches/alloc-policy-2.6-rhlel5.diff
  4. +0 −35 ldiskfs/kernel_patches/patches/export-ext3-2.6-rhel4.patch
  5. +0 −56 ldiskfs/kernel_patches/patches/ext3-big-endian-check-2.6-rhel5.patch
  6. +0 −169 ldiskfs/kernel_patches/patches/ext3-block-bitmap-validation-2.6-rhel5.patch
  7. +0 −145 ldiskfs/kernel_patches/patches/ext3-corrupted-orphans-2.6.patch
  8. +0 −153 ldiskfs/kernel_patches/patches/ext3-disable-mb-cache.patch
  9. +0 −33 ldiskfs/kernel_patches/patches/ext3-dynlocks-2.6-rhel5.patch
  10. +0 −278 ldiskfs/kernel_patches/patches/ext3-dynlocks-common.patch
  11. +0 −27 ldiskfs/kernel_patches/patches/ext3-ea-expand-lose-block.patch
  12. +0 −135 ldiskfs/kernel_patches/patches/ext3-export-64bit-name-hash.patch
  13. +0 −2,903 ldiskfs/kernel_patches/patches/ext3-extents-2.6.18-vanilla.patch
  14. +0 −96 ldiskfs/kernel_patches/patches/ext3-fiemap-2.6-rhel5.patch
  15. +0 −57 ldiskfs/kernel_patches/patches/ext3-force_over_8tb-rhel5.patch
  16. +0 −49 ldiskfs/kernel_patches/patches/ext3-get-raid-stripe-from-sb.patch
  17. +0 −87 ldiskfs/kernel_patches/patches/ext3-hash-indexed-dir-dotdot-update.patch
  18. +0 −53 ldiskfs/kernel_patches/patches/ext3-include-fixes-2.6-rhel4.patch
  19. +0 −466 ldiskfs/kernel_patches/patches/ext3-inode-version-2.6.18-vanilla.patch
  20. +0 −81 ldiskfs/kernel_patches/patches/ext3-journal-chksum-2.6.18-vanilla.patch
  21. +0 −232 ldiskfs/kernel_patches/patches/ext3-kill-dx_root.patch
  22. +0 −43 ldiskfs/kernel_patches/patches/ext3-lookup-dotdot-2.6.9.patch
  23. +0 −86 ldiskfs/kernel_patches/patches/ext3-map_inode_page-2.6.18.patch
  24. +0 −179 ldiskfs/kernel_patches/patches/ext3-max-dir-size.patch
  25. +0 −150 ldiskfs/kernel_patches/patches/ext3-mballoc-pa_free-mismatch.patch
  26. +0 −608 ldiskfs/kernel_patches/patches/ext3-mballoc3-2.6.18.patch
  27. +0 −4,777 ldiskfs/kernel_patches/patches/ext3-mballoc3-core.patch
  28. +0 −593 ldiskfs/kernel_patches/patches/ext3-mmp-2.6.18-vanilla.patch
  29. +0 −405 ldiskfs/kernel_patches/patches/ext3-nanosecond-2.6.18-vanilla.patch
  30. +0 −158 ldiskfs/kernel_patches/patches/ext3-nlinks-2.6-rhel5.patch
  31. +0 −65 ldiskfs/kernel_patches/patches/ext3-osd-iam-exports.patch
  32. +0 −228 ldiskfs/kernel_patches/patches/ext3-osd-iop-common.patch
  33. +0 −68 ldiskfs/kernel_patches/patches/ext3-pdir-fix.patch
  34. +0 −16 ldiskfs/kernel_patches/patches/ext3-print-inum-in-htree-warning.patch
  35. +0 −29 ldiskfs/kernel_patches/patches/ext3-remove-cond_resched-calls-2.6.12.patch
  36. +0 −70 ldiskfs/kernel_patches/patches/ext3-statfs-2.6-rhel5.patch
  37. +0 −699 ldiskfs/kernel_patches/patches/ext3-uninit-2.6.18.patch
  38. +0 −19 ldiskfs/kernel_patches/patches/ext3-version-2.6-rhel5.patch
  39. +0 −205 ldiskfs/kernel_patches/patches/ext3-wantedi-2.6-rhel4.patch
  40. +0 −32 ldiskfs/kernel_patches/patches/ext3-xattr-no-update-ctime-2.6.22-vanilla.patch
  41. +0 −539 ldiskfs/kernel_patches/patches/ext3_data_in_dirent.patch
  42. +0 −62 ldiskfs/kernel_patches/patches/ext3_fix_i_flags.patch
  43. +0 −519 ldiskfs/kernel_patches/patches/ext4-convert-group-lock-rhel5.patch
  44. +0 −40 ldiskfs/kernel_patches/series/ldiskfs-2.6-rhel5.series
View
1  build/autoconf/lustre-build-linux.m4
@@ -116,7 +116,6 @@ LB_LINUX_TRY_COMPILE([
#error "not redhat kernel"
#endif
],[
- RHEL_KENEL="yes"
RHEL_KERNEL="yes"
AC_MSG_RESULT([yes])
],[
View
25 ldiskfs/configure.ac
@@ -116,27 +116,16 @@ if test x$enable_dist != xyes; then
# don't need to do this if only configuring for make dist
AC_MSG_CHECKING([which ldiskfs series to use])
case $LINUXRELEASE in
-2.6.5*) LDISKFS_SERIES="2.6-suse.series" ;;
-2.6.9*) LDISKFS_SERIES="2.6-rhel4.series" ;;
-2.6.10-ac*) LDISKFS_SERIES="2.6-fc3.series" ;;
-2.6.10*) LDISKFS_SERIES="2.6-rhel4.series" ;;
-2.6.12*) LDISKFS_SERIES="2.6.12-vanilla.series" ;;
-2.6.15*) LDISKFS_SERIES="2.6-fc5.series";;
-2.6.16*) LDISKFS_SERIES="2.6-sles10.series";;
2.6.18*)
- if test x$RHEL_KENEL = xyes; then
- if test x$enable_ext4 = xyes; then
- LDISKFS_SERIES="2.6-rhel5-ext4.series"
- else
- LDISKFS_SERIES="2.6-rhel5.series"
- fi
- else
- LDISKFS_SERIES="2.6.18-vanilla.series"
+ if test x$RHEL_KERNEL = xyes; then
+ LDISKFS_SERIES="2.6-rhel5-ext4.series"
+ fi
+ ;;
+2.6.32*)
+ if test x$RHEL_KERNEL = xyes; then
+ LDISKFS_SERIES="2.6-rhel6.series"
fi
;;
-2.6.22*) LDISKFS_SERIES="2.6.22-vanilla.series";;
-2.6.27*) LDISKFS_SERIES="2.6-sles11.series";;
-2.6.32*) LDISKFS_SERIES="2.6-rhel6.series";;
*) AC_MSG_WARN([Unknown kernel version $LINUXRELEASE, fix ldiskfs/configure.ac])
esac
AC_MSG_RESULT([$LDISKFS_SERIES])
View
99 ldiskfs/kernel_patches/patches/alloc-policy-2.6-rhlel5.diff
@@ -1,99 +0,0 @@
-diff -Nrpu /tmp/linux-stage/fs/ext3/ialloc.c linux-stage/fs/ext3/ialloc.c
---- /tmp/linux-stage/fs/ext3/ialloc.c 2009-05-09 06:44:02.000000000 +0400
-+++ linux-stage/fs/ext3/ialloc.c 2009-05-09 06:45:33.000000000 +0400
-@@ -822,6 +822,36 @@ fail_drop:
- return ERR_PTR(err);
- }
-
-+unsigned long ext3_find_reverse(struct super_block *sb)
-+{
-+ struct ext3_group_desc *desc;
-+ struct buffer_head *bitmap_bh = NULL;
-+ int group;
-+ unsigned long ino, offset;
-+
-+ for (offset = (EXT3_INODES_PER_GROUP(sb) >> 1); offset >= 0;
-+ offset >>= 1) {
-+ for (group = EXT3_SB(sb)->s_groups_count - 1; group >= 0;
-+ --group) {
-+ desc = ext3_get_group_desc(sb, group, NULL);
-+ if (desc->bg_free_inodes_count == 0)
-+ continue;
-+
-+ bitmap_bh = read_inode_bitmap(sb, group);
-+ if (!bitmap_bh)
-+ continue;
-+
-+ ino = ext3_find_next_zero_bit((unsigned long *)
-+ bitmap_bh->b_data,
-+ EXT3_INODES_PER_GROUP(sb), offset);
-+ if (ino < EXT3_INODES_PER_GROUP(sb))
-+ return(group * EXT3_INODES_PER_GROUP(sb) +
-+ ino + 1);
-+ }
-+ }
-+ return 0;
-+}
-+
- /* Verify that we are loading a valid orphan from disk */
- struct inode *ext3_orphan_get(struct super_block *sb, unsigned long ino)
- {
-diff -Nrpu /tmp/linux-stage/fs/ext3/namei.c linux-stage/fs/ext3/namei.c
---- /tmp/linux-stage/fs/ext3/namei.c 2009-05-09 06:44:02.000000000 +0400
-+++ linux-stage/fs/ext3/namei.c 2009-05-09 06:45:33.000000000 +0400
-@@ -145,14 +145,25 @@ struct dx_map_entry
- u32 offs;
- };
-
-+/*
-+ * dentry_param used by ext3_new_inode_wantedi()
-+ */
- #define LVFS_DENTRY_PARAM_MAGIC 20070216UL
- struct lvfs_dentry_params
- {
-- unsigned long p_inum;
-- void *p_ptr;
-- u32 magic;
-+ unsigned long ldp_inum;
-+ long ldp_flags;
-+ u32 ldp_magic;
- };
-
-+/* Only use the least 3 bits of ldp_flags for goal policy */
-+typedef enum {
-+ DP_GOAL_POLICY = 0,
-+ DP_LASTGROUP_REVERSE = 1,
-+} dp_policy_t;
-+
-+#define LDP_FLAGS_RANGE 0x07
-+
- #ifdef CONFIG_EXT3_INDEX
- static inline unsigned dx_get_block (struct dx_entry *entry);
- static void dx_set_block (struct dx_entry *entry, unsigned value);
-@@ -1718,8 +1727,13 @@ static struct inode * ext3_new_inode_wan
- if (dentry->d_fsdata != NULL) {
- struct lvfs_dentry_params *param = dentry->d_fsdata;
-
-- if (param->magic == LVFS_DENTRY_PARAM_MAGIC)
-- inum = param->p_inum;
-+ if (param->ldp_magic == LVFS_DENTRY_PARAM_MAGIC) {
-+ if ((dp_policy_t)(param->ldp_flags & LDP_FLAGS_RANGE) ==
-+ DP_LASTGROUP_REVERSE)
-+ inum = ext3_find_reverse(dir->i_sb);
-+ else /* DP_GOAL_POLICY */
-+ inum = param->ldp_inum;
-+ }
- }
- return ext3_new_inode(handle, dir, mode, inum);
- }
-diff -Nrpu /tmp/linux-stage/include/linux/ext3_fs.h linux-stage/include/linux/ext3_fs.h
---- /tmp/linux-stage/include/linux/ext3_fs.h 2009-05-09 06:44:02.000000000 +0400
-+++ linux-stage/include/linux/ext3_fs.h 2009-05-09 06:45:33.000000000 +0400
-@@ -973,6 +973,7 @@ extern int ext3fs_dirhash(const char *na
- /* ialloc.c */
- extern struct inode * ext3_new_inode (handle_t *, struct inode *, int,
- unsigned long);
-+extern unsigned long ext3_find_reverse(struct super_block *);
- extern void ext3_free_inode (handle_t *, struct inode *);
- extern struct inode * ext3_orphan_get (struct super_block *, unsigned long);
- extern unsigned long ext3_count_free_inodes (struct super_block *);
View
35 ldiskfs/kernel_patches/patches/export-ext3-2.6-rhel4.patch
@@ -1,35 +0,0 @@
-Index: linux-2.6.9-42.0.10.EL_lustre.1.4.10/fs/ext3/super.c
-===================================================================
---- linux-2.6.9-42.0.10.EL_lustre.1.4.10.orig/fs/ext3/super.c 2007-05-16 08:46:24.000000000 +0200
-+++ linux-2.6.9-42.0.10.EL_lustre.1.4.10/fs/ext3/super.c 2007-05-16 08:48:58.000000000 +0200
-@@ -123,6 +123,8 @@ void ext3_journal_abort_handle(const cha
- journal_abort_handle(handle);
- }
-
-+EXPORT_SYMBOL(ext3_journal_abort_handle);
-+
- /* Deal with the reporting of failure conditions on a filesystem such as
- * inconsistencies detected or read IO failures.
- *
-@@ -2064,6 +2066,8 @@ int ext3_force_commit(struct super_block
- return ret;
- }
-
-+EXPORT_SYMBOL(ext3_force_commit);
-+
- /*
- * Ext3 always journals updates to the superblock itself, so we don't
- * have to propagate any other updates to the superblock on disk at this
-@@ -2586,6 +2590,12 @@ int ext3_map_inode_page(struct inode *in
- unsigned long *blocks, int *created, int create);
- EXPORT_SYMBOL(ext3_map_inode_page);
-
-+EXPORT_SYMBOL(ext3_xattr_get);
-+EXPORT_SYMBOL(ext3_xattr_set_handle);
-+EXPORT_SYMBOL(ext3_bread);
-+EXPORT_SYMBOL(ext3_journal_start_sb);
-+EXPORT_SYMBOL(__ext3_journal_stop);
-+
- MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
- MODULE_DESCRIPTION("Second Extended Filesystem with journaling extensions");
- MODULE_LICENSE("GPL");
View
56 ldiskfs/kernel_patches/patches/ext3-big-endian-check-2.6-rhel5.patch
@@ -1,56 +0,0 @@
-Index: linux-2.6.18-92.1.6/fs/ext3/super.c
-===================================================================
---- linux-2.6.18-92.1.6.orig/fs/ext3/super.c
-+++ linux-2.6.18-92.1.6/fs/ext3/super.c
-@@ -71,6 +71,8 @@ static void ext3_unlockfs(struct super_b
- static void ext3_write_super (struct super_block * sb);
- static void ext3_write_super_lockfs(struct super_block *sb);
-
-+static int bigendian_extents;
-+
- /*
- * Wrappers for journal_start/end.
- *
-@@ -706,7 +708,7 @@ enum {
- Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
- Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
- Opt_grpquota,
-- Opt_extents, Opt_noextents, Opt_extdebug,
-+ Opt_extents, Opt_noextents, Opt_bigendian_extents, Opt_extdebug,
- Opt_mballoc, Opt_nomballoc, Opt_stripe, Opt_maxdirsize
- };
-
-@@ -766,6 +768,7 @@ static match_table_t tokens = {
- {Opt_barrier, "barrier=%u"},
- {Opt_extents, "extents"},
- {Opt_noextents, "noextents"},
-+ {Opt_bigendian_extents, "bigendian_extents"},
- {Opt_extdebug, "extdebug"},
- {Opt_mballoc, "mballoc"},
- {Opt_nomballoc, "nomballoc"},
-@@ -1129,6 +1132,9 @@ clear_qf_name:
- case Opt_noextents:
- clear_opt (sbi->s_mount_opt, EXTENTS);
- break;
-+ case Opt_bigendian_extents:
-+ bigendian_extents = 1;
-+ break;
- case Opt_extdebug:
- set_opt (sbi->s_mount_opt, EXTDEBUG);
- break;
-@@ -2224,6 +2230,15 @@ static int ext3_fill_super (struct super
- goto failed_mount;
- }
-
-+#ifdef __BIG_ENDIAN
-+ if (bigendian_extents == 0) {
-+ printk(KERN_ERR "EXT3-fs: extents feature is not guaranteed to "
-+ "work on big-endian systems. Use \"bigendian_extents\" "
-+ "mount option to override.\n");
-+ goto failed_mount;
-+ }
-+#endif
-+
- bgl_lock_init(&sbi->s_blockgroup_lock);
-
- sbi->s_last_alloc_group = -1;
View
169 ldiskfs/kernel_patches/patches/ext3-block-bitmap-validation-2.6-rhel5.patch
@@ -1,169 +0,0 @@
- fs/ext3/balloc.c | 99 ++++++++++++++++++++++++++++++++++++++++++++----------
- 1 files changed, 81 insertions(+), 18 deletions(-)
-diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
-index ff3428e..a9140ea 100644
-Index: linux-stage/fs/ext3/balloc.c
-===================================================================
---- linux-stage.orig/fs/ext3/balloc.c
-+++ linux-stage/fs/ext3/balloc.c
-@@ -143,9 +143,96 @@ unsigned ext3_init_block_bitmap(struct s
- return free_blocks - sbi->s_itb_per_group - 2;
- }
-
--/*
-- * Read the bitmap for a given block_group, reading into the specified
-- * slot in the superblock's bitmap cache.
-+/**
-+* bh_uptodate_or_lock: Test whether the buffer is uptodate
-+* @bh: struct buffer_head
-+*
-+* Return true if the buffer is up-to-date and false,
-+* with the buffer locked, if not.
-+*/
-+int bh_uptodate_or_lock(struct buffer_head *bh)
-+{
-+ if (!buffer_uptodate(bh)) {
-+ lock_buffer(bh);
-+ if (!buffer_uptodate(bh))
-+ return 0;
-+ unlock_buffer(bh);
-+ }
-+ return 1;
-+}
-+
-+/**
-+* bh_submit_read: Submit a locked buffer for reading
-+* @bh: struct buffer_head
-+*
-+* Returns a negative error
-+*/
-+int bh_submit_read(struct buffer_head *bh)
-+{
-+ if (!buffer_locked(bh))
-+ lock_buffer(bh);
-+ if (buffer_uptodate(bh))
-+ return 0;
-+ get_bh(bh);
-+ bh->b_end_io = end_buffer_read_sync;
-+ submit_bh(READ, bh);
-+ wait_on_buffer(bh);
-+ if (buffer_uptodate(bh))
-+ return 0;
-+ return -EIO;
-+}
-+
-+static int ext3_valid_block_bitmap(struct super_block *sb,
-+ struct ext3_group_desc *desc,
-+ unsigned int block_group,
-+ struct buffer_head *bh)
-+{
-+ ext3_grpblk_t offset;
-+ ext3_grpblk_t next_zero_bit;
-+ ext3_fsblk_t bitmap_blk;
-+ ext3_fsblk_t group_first_block;
-+
-+ group_first_block = ext3_group_first_block_no(sb, block_group);
-+
-+ /* check whether block bitmap block number is set */
-+ bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
-+ offset = bitmap_blk - group_first_block;
-+ if (!ext3_test_bit(offset, bh->b_data))
-+ /* bad block bitmap */
-+ goto err_out;
-+
-+ /* check whether the inode bitmap block number is set */
-+ bitmap_blk = le32_to_cpu(desc->bg_inode_bitmap);
-+ offset = bitmap_blk - group_first_block;
-+ if (!ext3_test_bit(offset, bh->b_data))
-+ /* bad block bitmap */
-+ goto err_out;
-+
-+ /* check whether the inode table block number is set */
-+ bitmap_blk = le32_to_cpu(desc->bg_inode_table);
-+ offset = bitmap_blk - group_first_block;
-+ next_zero_bit = ext3_find_next_zero_bit(bh->b_data,
-+ offset + EXT3_SB(sb)->s_itb_per_group,
-+ offset);
-+ if (next_zero_bit >= offset + EXT3_SB(sb)->s_itb_per_group)
-+ /* good bitmap for inode tables */
-+ return 1;
-+
-+err_out:
-+ ext3_error(sb, __FUNCTION__,
-+ "Invalid block bitmap - "
-+ "block_group = %d, block = %lu",
-+ (int)block_group, bitmap_blk);
-+ return 0;
-+}
-+
-+/**
-+ * read_block_bitmap()
-+ * @sb: super block
-+ * @block_group: given block group
-+ *
-+ * Read the bitmap for a given block_group,and validate the
-+ * bits for block/inode/inode tables are set in the bitmaps.
- *
- * Return buffer_head on success or NULL in case of failure.
- */
-@@ -154,29 +241,42 @@ read_block_bitmap(struct super_block *sb
- {
- struct ext3_group_desc * desc;
- struct buffer_head * bh = NULL;
-+ ext3_fsblk_t bitmap_blk;
-
- desc = ext3_get_group_desc (sb, block_group, NULL);
- if (!desc)
-- goto error_out;
-+ return NULL;
-+ bitmap_blk = desc->bg_block_bitmap;
-+ bh = sb_getblk(sb, bitmap_blk);
-+ if (unlikely(!bh)) {
-+ ext3_error(sb, __FUNCTION__,
-+ "Can not read block bitmap - "
-+ "block group = %d, block_bitmap = %lu",
-+ (int)block_group, bitmap_blk);
-+ return NULL;
-+ }
-+ if (bh_uptodate_or_lock(bh))
-+ return bh;
-+
- if (desc->bg_flags & cpu_to_le16(EXT3_BG_BLOCK_UNINIT)) {
-- bh = sb_getblk(sb, le32_to_cpu(desc->bg_block_bitmap));
-- if (!buffer_uptodate(bh)) {
-- lock_buffer(bh);
-- if (!buffer_uptodate(bh)) {
-- ext3_init_block_bitmap(sb, bh,block_group,desc);
-- set_buffer_uptodate(bh);
-- }
-- unlock_buffer(bh);
-- }
-- } else {
-- bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap));
-+ ext3_init_block_bitmap(sb, bh, block_group, desc);
-+ set_buffer_uptodate(bh);
-+ unlock_buffer(bh);
-+ return bh;
-+ }
-+ if (bh_submit_read(bh) < 0) {
-+ brelse(bh);
-+ ext3_error(sb, __FUNCTION__,
-+ "Cannot read block bitmap - "
-+ "block group = %d block_bitmap = %lu",
-+ (int)block_group, bitmap_blk);
-+ return NULL;
-+ }
-+ if (!ext3_valid_block_bitmap(sb, desc, block_group, bh)) {
-+ brelse(bh);
-+ return NULL;
- }
-- if (!bh)
-- ext3_error (sb, "read_block_bitmap",
-- "Cannot read block bitmap - "
-- "block_group = %d, block_bitmap = %u",
-- block_group, le32_to_cpu(desc->bg_block_bitmap));
--error_out:
-+
- return bh;
- }
- /*
View
145 ldiskfs/kernel_patches/patches/ext3-corrupted-orphans-2.6.patch
@@ -1,145 +0,0 @@
---- linux-2.6.18-128.7.1.orig/include/linux/ext3_fs.h 2006-09-19 23:42:06.000000000 -0400
-+++ linux-2.6.18-128.7.1/include/linux/ext3_fs.h 2009-10-12 19:37:54.000000000 -0400
-@@ -809,6 +809,7 @@ extern void ext3_discard_reservation (st
- extern void ext3_dirty_inode(struct inode *);
- extern int ext3_change_inode_journal_flag(struct inode *, int);
- extern int ext3_get_inode_loc(struct inode *, struct ext3_iloc *);
-+extern int ext3_can_truncate(struct inode *inode);
- extern void ext3_truncate (struct inode *);
- extern void ext3_set_inode_flags(struct inode *);
- extern void ext3_set_aops(struct inode *inode);
---- linux-2.6.18-128.7.1.orig/fs/ext3/inode.c 2009-09-15 10:38:31.000000000 -0400
-+++ linux-2.6.18-128.7.1/fs/ext3/inode.c 2009-10-12 18:49:01.000000000 -0400
-@@ -2194,6 +2194,19 @@ static void ext3_free_branches(handle_t
- }
- }
-
-+int ext3_can_truncate(struct inode *inode)
-+{
-+ if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
-+ return 0;
-+ if (S_ISREG(inode->i_mode))
-+ return 1;
-+ if (S_ISDIR(inode->i_mode))
-+ return 1;
-+ if (S_ISLNK(inode->i_mode))
-+ return !ext3_inode_is_fast_symlink(inode);
-+ return 0;
-+}
-+
- /*
- * ext3_truncate()
- *
-@@ -2238,12 +2251,7 @@ void ext3_truncate(struct inode *inode)
- unsigned blocksize = inode->i_sb->s_blocksize;
- struct page *page;
-
-- if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
-- S_ISLNK(inode->i_mode)))
-- goto out_notrans;
-- if (ext3_inode_is_fast_symlink(inode))
-- goto out_notrans;
-- if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
-+ if (!ext3_can_truncate(inode))
- goto out_notrans;
-
- /*
---- linux-2.6.18-128.7.1.orig/fs/ext3/ialloc.c 2009-09-15 10:24:17.000000000 -0400
-+++ linux-2.6.18-128.7.1/fs/ext3/ialloc.c 2009-10-14 15:36:27.000000000 -0400
-@@ -645,54 +645,71 @@ struct inode *ext3_orphan_get(struct sup
- unsigned long max_ino = le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count);
- unsigned long block_group;
- int bit;
-- struct buffer_head *bitmap_bh = NULL;
-+ struct buffer_head *bitmap_bh;
- struct inode *inode = NULL;
-
- /* Error cases - e2fsck has already cleaned up for us */
- if (ino > max_ino) {
-- ext3_warning(sb, __FUNCTION__,
-+ ext3_warning(sb, __func__,
- "bad orphan ino %lu! e2fsck was run?", ino);
-- goto out;
-+ goto error;
- }
-
- block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
- bit = (ino - 1) % EXT3_INODES_PER_GROUP(sb);
- bitmap_bh = read_inode_bitmap(sb, block_group);
- if (!bitmap_bh) {
-- ext3_warning(sb, __FUNCTION__,
-+ ext3_warning(sb, __func__,
- "inode bitmap error for orphan %lu", ino);
-- goto out;
-+ goto error;
- }
-
- /* Having the inode bit set should be a 100% indicator that this
- * is a valid orphan (no e2fsck run on fs). Orphans also include
- * inodes that were being truncated, so we can't check i_nlink==0.
- */
-- if (!ext3_test_bit(bit, bitmap_bh->b_data) ||
-- !(inode = iget(sb, ino)) || is_bad_inode(inode) ||
-- NEXT_ORPHAN(inode) > max_ino) {
-- ext3_warning(sb, __FUNCTION__,
-- "bad orphan inode %lu! e2fsck was run?", ino);
-- printk(KERN_NOTICE "ext3_test_bit(bit=%d, block=%llu) = %d\n",
-- bit, (unsigned long long)bitmap_bh->b_blocknr,
-- ext3_test_bit(bit, bitmap_bh->b_data));
-- printk(KERN_NOTICE "inode=%p\n", inode);
-- if (inode) {
-- printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
-- is_bad_inode(inode));
-- printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
-- NEXT_ORPHAN(inode));
-- printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
-- }
-+ if (!ext3_test_bit(bit, bitmap_bh->b_data))
-+ goto bad_orphan;
-+
-+ inode = iget(sb, ino);
-+ if ((inode == NULL) || is_bad_inode(inode))
-+ goto bad_orphan;
-+
-+ /*
-+ * If the orphan has i_nlinks > 0 then it should be able to be
-+ * truncated, otherwise it won't be removed from the orphan list
-+ * during processing and an infinite loop will result.
-+ */
-+ if (inode->i_nlink && !ext3_can_truncate(inode))
-+ goto bad_orphan;
-+
-+ if (NEXT_ORPHAN(inode) > max_ino)
-+ goto bad_orphan;
-+ brelse(bitmap_bh);
-+ return inode;
-+
-+bad_orphan:
-+ ext3_warning(sb, __func__,
-+ "bad orphan inode %lu! e2fsck was run?", ino);
-+ printk(KERN_NOTICE "ext3_test_bit(bit=%d, block=%llu) = %d\n",
-+ bit, (unsigned long long)bitmap_bh->b_blocknr,
-+ ext3_test_bit(bit, bitmap_bh->b_data));
-+ printk(KERN_NOTICE "inode=%p\n", inode);
-+ if (inode) {
-+ printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
-+ is_bad_inode(inode));
-+ printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
-+ NEXT_ORPHAN(inode));
-+ printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
-+ printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
- /* Avoid freeing blocks if we got a bad deleted inode */
-- if (inode && inode->i_nlink == 0)
-+ if (inode->i_nlink == 0)
- inode->i_blocks = 0;
- iput(inode);
-- inode = NULL;
- }
--out:
- brelse(bitmap_bh);
-- return inode;
-+error:
-+ return NULL;
- }
-
- unsigned long ext3_count_free_inodes (struct super_block * sb)
View
153 ldiskfs/kernel_patches/patches/ext3-disable-mb-cache.patch
@@ -1,153 +0,0 @@
-Index: linux-stage/fs/ext3/xattr.c
-===================================================================
---- linux-stage.orig/fs/ext3/xattr.c
-+++ linux-stage/fs/ext3/xattr.c
-@@ -93,7 +93,8 @@
- # define ea_bdebug(f...)
- #endif
-
--static void ext3_xattr_cache_insert(struct buffer_head *);
-+static void ext3_xattr_cache_insert(struct super_block *,
-+ struct buffer_head *);
- static struct buffer_head *ext3_xattr_cache_find(struct inode *,
- struct ext3_xattr_header *,
- struct mb_cache_entry **);
-@@ -238,7 +239,7 @@ bad_block: ext3_error(inode->i_sb, __FUN
- error = -EIO;
- goto cleanup;
- }
-- ext3_xattr_cache_insert(bh);
-+ ext3_xattr_cache_insert(inode->i_sb, bh);
- entry = BFIRST(bh);
- error = ext3_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
- if (error == -EIO)
-@@ -380,7 +381,7 @@ ext3_xattr_block_list(struct inode *inod
- error = -EIO;
- goto cleanup;
- }
-- ext3_xattr_cache_insert(bh);
-+ ext3_xattr_cache_insert(inode->i_sb, bh);
- error = ext3_xattr_list_entries(inode, BFIRST(bh), buffer, buffer_size);
-
- cleanup:
-@@ -479,7 +480,9 @@ ext3_xattr_release_block(handle_t *handl
- {
- struct mb_cache_entry *ce = NULL;
-
-- ce = mb_cache_entry_get(ext3_xattr_cache, bh->b_bdev, bh->b_blocknr);
-+ if (!test_opt(inode->i_sb, NO_MBCACHE))
-+ ce = mb_cache_entry_get(ext3_xattr_cache, bh->b_bdev,
-+ bh->b_blocknr);
- if (BHDR(bh)->h_refcount == cpu_to_le32(1)) {
- ea_bdebug(bh, "refcount now=0; freeing");
- if (ce)
-@@ -699,8 +702,10 @@ ext3_xattr_block_set(handle_t *handle, s
- if (i->value && i->value_len > sb->s_blocksize)
- return -ENOSPC;
- if (s->base) {
-- ce = mb_cache_entry_get(ext3_xattr_cache, bs->bh->b_bdev,
-- bs->bh->b_blocknr);
-+ if (!test_opt(inode->i_sb, NO_MBCACHE))
-+ ce = mb_cache_entry_get(ext3_xattr_cache,
-+ bs->bh->b_bdev,
-+ bs->bh->b_blocknr);
- if (header(s->base)->h_refcount == cpu_to_le32(1)) {
- if (ce) {
- mb_cache_entry_free(ce);
-@@ -716,7 +721,7 @@ ext3_xattr_block_set(handle_t *handle, s
- if (!IS_LAST_ENTRY(s->first))
- ext3_xattr_rehash(header(s->base),
- s->here);
-- ext3_xattr_cache_insert(bs->bh);
-+ ext3_xattr_cache_insert(sb, bs->bh);
- }
- unlock_buffer(bs->bh);
- if (error == -EIO)
-@@ -797,7 +802,8 @@ inserted:
- if (error)
- goto cleanup_dquot;
- }
-- mb_cache_entry_release(ce);
-+ if (ce)
-+ mb_cache_entry_release(ce);
- ce = NULL;
- } else if (bs->bh && s->base == bs->bh->b_data) {
- /* We were modifying this block in-place. */
-@@ -832,7 +838,7 @@ getblk_failed:
- memcpy(new_bh->b_data, s->base, new_bh->b_size);
- set_buffer_uptodate(new_bh);
- unlock_buffer(new_bh);
-- ext3_xattr_cache_insert(new_bh);
-+ ext3_xattr_cache_insert(sb, new_bh);
- error = ext3_journal_dirty_metadata(handle, new_bh);
- if (error)
- goto cleanup;
-@@ -1387,12 +1393,15 @@ ext3_xattr_put_super(struct super_block
- * Returns 0, or a negative error number on failure.
- */
- static void
--ext3_xattr_cache_insert(struct buffer_head *bh)
-+ext3_xattr_cache_insert(struct super_block *sb, struct buffer_head *bh)
- {
- __u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
- struct mb_cache_entry *ce;
- int error;
-
-+ if (test_opt(sb, NO_MBCACHE))
-+ return;
-+
- ce = mb_cache_entry_alloc(ext3_xattr_cache);
- if (!ce) {
- ea_bdebug(bh, "out of memory");
-@@ -1466,6 +1475,8 @@ ext3_xattr_cache_find(struct inode *inod
- __u32 hash = le32_to_cpu(header->h_hash);
- struct mb_cache_entry *ce;
-
-+ if (test_opt(inode->i_sb, NO_MBCACHE))
-+ return NULL;
- if (!header->h_hash)
- return NULL; /* never share */
- ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
-Index: linux-stage/fs/ext3/super.c
-===================================================================
---- linux-stage.orig/fs/ext3/super.c
-+++ linux-stage/fs/ext3/super.c
-@@ -722,6 +722,7 @@ enum {
- Opt_grpquota,
- Opt_extents, Opt_noextents, Opt_bigendian_extents, Opt_extdebug,
- Opt_mballoc, Opt_nomballoc, Opt_stripe, Opt_maxdirsize, Opt_force_over_8tb,
-+ Opt_no_mbcache,
- };
-
- static match_table_t tokens = {
-@@ -788,6 +789,7 @@ static match_table_t tokens = {
- {Opt_force_over_8tb, "force_over_8tb"},
- {Opt_resize, "resize"},
- {Opt_maxdirsize, "maxdirsize=%u"},
-+ {Opt_no_mbcache, "no_mbcache"},
- {Opt_err, NULL}
- };
-
-@@ -1175,6 +1177,9 @@ clear_qf_name:
- case Opt_force_over_8tb:
- force_over_8tb = 1;
- break;
-+ case Opt_no_mbcache:
-+ set_opt(sbi->s_mount_opt, NO_MBCACHE);
-+ break;
- default:
- printk (KERN_ERR
- "EXT3-fs: Unrecognized mount option \"%s\" "
-Index: linux-stage/include/linux/ext3_fs.h
-===================================================================
---- linux-stage.orig/include/linux/ext3_fs.h
-+++ linux-stage/include/linux/ext3_fs.h
-@@ -483,6 +483,8 @@ do { \
- #define EXT3_MOUNT_JOURNAL_ASYNC_COMMIT 0x20000000 /* Journal Async Commit */
- #endif
-
-+#define EXT3_MOUNT_NO_MBCACHE 0x40000000 /* Disable mbcache */
-+
- /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
- #ifndef clear_opt
- #define clear_opt(o, opt) o &= ~EXT3_MOUNT_##opt
View
33 ldiskfs/kernel_patches/patches/ext3-dynlocks-2.6-rhel5.patch
@@ -1,33 +0,0 @@
-diff -rupN linux-2.6.18-128.1.6_1/fs/ext3/Makefile linux-2.6.18-128.1.6_2/fs/ext3/Makefile
---- linux-2.6.18-128.1.6_1/fs/ext3/Makefile 2009-08-13 19:19:54.000000000 +0530
-+++ linux-2.6.18-128.1.6_2/fs/ext3/Makefile 2009-08-13 19:20:30.000000000 +0530
-@@ -5,7 +5,8 @@
- obj-$(CONFIG_EXT3_FS) += ext3.o
-
- ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
-- ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o mballoc.o
-+ ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
-+ mballoc.o dynlocks.o
-
- ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
- ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
-diff -rupN linux-2.6.18-128.1.6_1/fs/ext3/super.c linux-2.6.18-128.1.6_2/fs/ext3/super.c
---- linux-2.6.18-128.1.6_1/fs/ext3/super.c 2009-08-13 19:19:54.000000000 +0530
-+++ linux-2.6.18-128.1.6_2/fs/ext3/super.c 2009-08-13 19:23:23.000000000 +0530
-@@ -3529,6 +3530,7 @@ static int __init init_ext3_fs(void)
- err = init_inodecache();
- if (err)
- goto out1;
-+ dynlock_cache_init();
- err = register_filesystem(&ext3_fs_type);
- if (err)
- goto out;
-@@ -3546,6 +3548,7 @@ out1:
- static void __exit exit_ext3_fs(void)
- {
- unregister_filesystem(&ext3_fs_type);
-+ dynlock_cache_exit();
- destroy_inodecache();
- exit_ext3_xattr();
- exit_ext3_proc();
-
View
278 ldiskfs/kernel_patches/patches/ext3-dynlocks-common.patch
@@ -1,278 +0,0 @@
-diff -rupN linux-2.6.18-128.1.6_1/fs/ext3/dynlocks.c linux-2.6.18-128.1.6_2/fs/ext3/dynlocks.c
---- linux-2.6.18-128.1.6_1/fs/ext3/dynlocks.c 1970-01-01 05:30:00.000000000 +0530
-+++ linux-2.6.18-128.1.6_2/fs/ext3/dynlocks.c 2009-08-13 20:42:59.000000000 +0530
-@@ -0,0 +1,236 @@
-+/*
-+ * Dynamic Locks
-+ *
-+ * struct dynlock is lockspace
-+ * one may request lock (exclusive or shared) for some value
-+ * in that lockspace
-+ *
-+ */
-+
-+#include <linux/dynlocks.h>
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/sched.h>
-+
-+#define DYNLOCK_HANDLE_MAGIC 0xd19a10c
-+#define DYNLOCK_HANDLE_DEAD 0xd1956ee
-+#define DYNLOCK_LIST_MAGIC 0x11ee91e6
-+
-+static kmem_cache_t * dynlock_cachep = NULL;
-+
-+struct dynlock_handle {
-+ unsigned dh_magic;
-+ struct list_head dh_list;
-+ unsigned long dh_value; /* lock value */
-+ int dh_refcount; /* number of users */
-+ int dh_readers;
-+ int dh_writers;
-+ int dh_pid; /* holder of the lock */
-+ wait_queue_head_t dh_wait;
-+};
-+
-+int __init dynlock_cache_init(void)
-+{
-+ int rc = 0;
-+
-+ printk(KERN_INFO "init dynlocks cache\n");
-+ dynlock_cachep = kmem_cache_create("dynlock_cache",
-+ sizeof(struct dynlock_handle),
-+ 0,
-+ SLAB_HWCACHE_ALIGN,
-+ NULL, NULL);
-+ if (dynlock_cachep == NULL) {
-+ printk(KERN_ERR "Not able to create dynlock cache");
-+ rc = -ENOMEM;
-+ }
-+ return rc;
-+}
-+
-+void __exit dynlock_cache_exit(void)
-+{
-+ printk(KERN_INFO "exit dynlocks cache\n");
-+ kmem_cache_destroy(dynlock_cachep);
-+}
-+
-+/*
-+ * dynlock_init
-+ *
-+ * initialize lockspace
-+ *
-+ */
-+void dynlock_init(struct dynlock *dl)
-+{
-+ spin_lock_init(&dl->dl_list_lock);
-+ INIT_LIST_HEAD(&dl->dl_list);
-+ dl->dl_magic = DYNLOCK_LIST_MAGIC;
-+}
-+EXPORT_SYMBOL(dynlock_init);
-+
-+/*
-+ * dynlock_lock
-+ *
-+ * acquires lock (exclusive or shared) in specified lockspace
-+ * each lock in lockspace is allocated separately, so user have
-+ * to specify GFP flags.
-+ * routine returns pointer to lock. this pointer is intended to
-+ * be passed to dynlock_unlock
-+ *
-+ */
-+struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
-+ enum dynlock_type lt, gfp_t gfp)
-+{
-+ struct dynlock_handle *nhl = NULL;
-+ struct dynlock_handle *hl;
-+
-+ BUG_ON(dl == NULL);
-+ BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
-+
-+repeat:
-+ /* find requested lock in lockspace */
-+ spin_lock(&dl->dl_list_lock);
-+ BUG_ON(dl->dl_list.next == NULL);
-+ BUG_ON(dl->dl_list.prev == NULL);
-+ list_for_each_entry(hl, &dl->dl_list, dh_list) {
-+ BUG_ON(hl->dh_list.next == NULL);
-+ BUG_ON(hl->dh_list.prev == NULL);
-+ BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
-+ if (hl->dh_value == value) {
-+ /* lock is found */
-+ if (nhl) {
-+ /* someone else just allocated
-+ * lock we didn't find and just created
-+ * so, we drop our lock
-+ */
-+ kmem_cache_free(dynlock_cachep, nhl);
-+ nhl = NULL;
-+ }
-+ hl->dh_refcount++;
-+ goto found;
-+ }
-+ }
-+ /* lock not found */
-+ if (nhl) {
-+ /* we already have allocated lock. use it */
-+ hl = nhl;
-+ nhl = NULL;
-+ list_add(&hl->dh_list, &dl->dl_list);
-+ goto found;
-+ }
-+ spin_unlock(&dl->dl_list_lock);
-+
-+ /* lock not found and we haven't allocated lock yet. allocate it */
-+ nhl = kmem_cache_alloc(dynlock_cachep, gfp);
-+ if (nhl == NULL)
-+ return NULL;
-+ nhl->dh_refcount = 1;
-+ nhl->dh_value = value;
-+ nhl->dh_readers = 0;
-+ nhl->dh_writers = 0;
-+ nhl->dh_magic = DYNLOCK_HANDLE_MAGIC;
-+ init_waitqueue_head(&nhl->dh_wait);
-+
-+ /* while lock is being allocated, someone else may allocate it
-+ * and put onto to list. check this situation
-+ */
-+ goto repeat;
-+
-+found:
-+ if (lt == DLT_WRITE) {
-+ /* exclusive lock: user don't want to share lock at all
-+ * NOTE: one process may take the same lock several times
-+ * this functionaly is useful for rename operations */
-+ while ((hl->dh_writers && hl->dh_pid != current->pid) ||
-+ hl->dh_readers) {
-+ spin_unlock(&dl->dl_list_lock);
-+ wait_event(hl->dh_wait,
-+ hl->dh_writers == 0 && hl->dh_readers == 0);
-+ spin_lock(&dl->dl_list_lock);
-+ }
-+ hl->dh_writers++;
-+ } else {
-+ /* shared lock: user do not want to share lock with writer */
-+ while (hl->dh_writers) {
-+ spin_unlock(&dl->dl_list_lock);
-+ wait_event(hl->dh_wait, hl->dh_writers == 0);
-+ spin_lock(&dl->dl_list_lock);
-+ }
-+ hl->dh_readers++;
-+ }
-+ hl->dh_pid = current->pid;
-+ spin_unlock(&dl->dl_list_lock);
-+
-+ return hl;
-+}
-+EXPORT_SYMBOL(dynlock_lock);
-+
-+
-+/*
-+ * dynlock_unlock
-+ *
-+ * user have to specify lockspace (dl) and pointer to lock structure
-+ * returned by dynlock_lock()
-+ *
-+ */
-+void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *hl)
-+{
-+ int wakeup = 0;
-+
-+ BUG_ON(dl == NULL);
-+ BUG_ON(hl == NULL);
-+ BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
-+
-+ if (hl->dh_magic != DYNLOCK_HANDLE_MAGIC)
-+ printk(KERN_EMERG "wrong lock magic: %#x\n", hl->dh_magic);
-+
-+ BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
-+ BUG_ON(hl->dh_writers != 0 && current->pid != hl->dh_pid);
-+
-+ spin_lock(&dl->dl_list_lock);
-+ if (hl->dh_writers) {
-+ BUG_ON(hl->dh_readers != 0);
-+ hl->dh_writers--;
-+ if (hl->dh_writers == 0)
-+ wakeup = 1;
-+ } else if (hl->dh_readers) {
-+ hl->dh_readers--;
-+ if (hl->dh_readers == 0)
-+ wakeup = 1;
-+ } else {
-+ BUG();
-+ }
-+ if (wakeup) {
-+ hl->dh_pid = 0;
-+ wake_up(&hl->dh_wait);
-+ }
-+ if (--(hl->dh_refcount) == 0) {
-+ hl->dh_magic = DYNLOCK_HANDLE_DEAD;
-+ list_del(&hl->dh_list);
-+ kmem_cache_free(dynlock_cachep, hl);
-+ }
-+ spin_unlock(&dl->dl_list_lock);
-+}
-+EXPORT_SYMBOL(dynlock_unlock);
-+
-+int dynlock_is_locked(struct dynlock *dl, unsigned long value)
-+{
-+ struct dynlock_handle *hl;
-+ int result = 0;
-+
-+ /* find requested lock in lockspace */
-+ spin_lock(&dl->dl_list_lock);
-+ BUG_ON(dl->dl_list.next == NULL);
-+ BUG_ON(dl->dl_list.prev == NULL);
-+ list_for_each_entry(hl, &dl->dl_list, dh_list) {
-+ BUG_ON(hl->dh_list.next == NULL);
-+ BUG_ON(hl->dh_list.prev == NULL);
-+ BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
-+ if (hl->dh_value == value && hl->dh_pid == current->pid) {
-+ /* lock is found */
-+ result = 1;
-+ break;
-+ }
-+ }
-+ spin_unlock(&dl->dl_list_lock);
-+ return result;
-+}
-+EXPORT_SYMBOL(dynlock_is_locked);
-diff -rupN linux-2.6.18-128.1.6_1/include/linux/dynlocks.h linux-2.6.18-128.1.6_2/include/linux/dynlocks.h
---- linux-2.6.18-128.1.6_1/include/linux/dynlocks.h 1970-01-01 05:30:00.000000000 +0530
-+++ linux-2.6.18-128.1.6_2/include/linux/dynlocks.h 2009-08-13 20:43:18.000000000 +0530
-@@ -0,0 +1,34 @@
-+#ifndef _LINUX_DYNLOCKS_H
-+#define _LINUX_DYNLOCKS_H
-+
-+#include <linux/list.h>
-+#include <linux/wait.h>
-+
-+struct dynlock_handle;
-+
-+/*
-+ * lock's namespace:
-+ * - list of locks
-+ * - lock to protect this list
-+ */
-+struct dynlock {
-+ unsigned dl_magic;
-+ struct list_head dl_list;
-+ spinlock_t dl_list_lock;
-+};
-+
-+enum dynlock_type {
-+ DLT_WRITE,
-+ DLT_READ
-+};
-+
-+int dynlock_cache_init(void);
-+void dynlock_cache_exit(void);
-+void dynlock_init(struct dynlock *dl);
-+struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
-+ enum dynlock_type lt, gfp_t gfp);
-+void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *lock);
-+int dynlock_is_locked(struct dynlock *dl, unsigned long value);
-+
-+#endif
-+
View
27 ldiskfs/kernel_patches/patches/ext3-ea-expand-lose-block.patch
@@ -1,27 +0,0 @@
-Date: Mon, 12 May 2008 11:24:40 +0800
-From: Tiger Yang <tiger.yang@oracle.com>
-Subject: [PATCH] ext3/4: fix uninitialized bs in ext3/4_xattr_set_handle()
-To: linux-ext4@vger.kernel.org
-Cc: linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org
-
-This fix the uninitialized bs when we try to replace a xattr entry in ibody
-with the new value which require more than free space.
-
-Signed-off-by: Tiger Yang <tiger.yang@oracle.com>
-
-
-diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
---- a/fs/ext3/xattr.c
-+++ b/fs/ext3/xattr.c
-@@ -1000,6 +1000,11 @@ ext3_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
- i.value = NULL;
- error = ext3_xattr_block_set(handle, inode, &i, &bs);
- } else if (error == -ENOSPC) {
-+ if (EXT3_I(inode)->i_file_acl && !bs.s.base) {
-+ error = ext3_xattr_block_find(inode, &i, &bs);
-+ if (error)
-+ goto cleanup;
-+ }
- error = ext3_xattr_block_set(handle, inode, &i, &bs);
- if (error)
- goto cleanup;
View
135 ldiskfs/kernel_patches/patches/ext3-export-64bit-name-hash.patch
@@ -1,135 +0,0 @@
-Index: linux-stage/fs/ext3/dir.c
-===================================================================
---- linux-stage.orig/fs/ext3/dir.c 2011-04-19 01:39:47.000000000 +0800
-+++ linux-stage/fs/ext3/dir.c 2011-04-19 01:44:19.000000000 +0800
-@@ -237,22 +237,50 @@
- }
-
- #ifdef CONFIG_EXT3_INDEX
-+static inline int is_32bit_api(void)
-+{
-+#ifdef HAVE_IS_COMPAT_TASK
-+ return is_compat_task();
-+#else
-+ return (BITS_PER_LONG == 32);
-+#endif
-+}
-+
- /*
- * These functions convert from the major/minor hash to an f_pos
- * value.
-- *
-- * Currently we only use major hash numer. This is unfortunate, but
-- * on 32-bit machines, the same VFS interface is used for lseek and
-- * llseek, so if we use the 64 bit offset, then the 32-bit versions of
-- * lseek/telldir/seekdir will blow out spectacularly, and from within
-- * the ext2 low-level routine, we don't know if we're being called by
-- * a 64-bit version of the system call or the 32-bit version of the
-- * system call. Worse yet, NFSv2 only allows for a 32-bit readdir
-- * cookie. Sigh.
-+ *
-+ * Up layer (OSD) should specify O_32BITHASH or O_64BITHASH explicitly.
-+ * On the other hand, we allow ldiskfs to be mounted directly on both 32-bit
-+ * and 64-bit nodes, under such case, neither O_32BITHASH nor O_64BITHASH is
-+ * specified.
- */
--#define hash2pos(major, minor) (major >> 1)
--#define pos2maj_hash(pos) ((pos << 1) & 0xffffffff)
--#define pos2min_hash(pos) (0)
-+static inline loff_t hash2pos(struct file *filp, __u32 major, __u32 minor)
-+{
-+ if ((filp->f_flags & O_32BITHASH) ||
-+ (!(filp->f_flags & O_64BITHASH) && is_32bit_api()))
-+ return (major >> 1);
-+ else
-+ return (((__u64)(major >> 1) << 32) | (__u64)minor);
-+}
-+
-+static inline __u32 pos2maj_hash(struct file *filp, loff_t pos)
-+{
-+ if ((filp->f_flags & O_32BITHASH) ||
-+ (!(filp->f_flags & O_64BITHASH) && is_32bit_api()))
-+ return ((pos << 1) & 0xffffffff);
-+ else
-+ return (((pos >> 32) << 1) & 0xffffffff);
-+}
-+
-+static inline __u32 pos2min_hash(struct file *filp, loff_t pos)
-+{
-+ if ((filp->f_flags & O_32BITHASH) ||
-+ (!(filp->f_flags & O_64BITHASH) && is_32bit_api()))
-+ return (0);
-+ else
-+ return (pos & 0xffffffff);
-+}
-
- /*
- * This structure holds the nodes of the red-black tree used to store
-@@ -314,7 +342,7 @@
- }
-
-
--static struct dir_private_info *create_dir_info(loff_t pos)
-+static struct dir_private_info *create_dir_info(struct file* filp, loff_t pos)
- {
- struct dir_private_info *p;
-
-@@ -325,8 +353,8 @@
- p->curr_node = NULL;
- p->extra_fname = NULL;
- p->last_pos = 0;
-- p->curr_hash = pos2maj_hash(pos);
-- p->curr_minor_hash = pos2min_hash(pos);
-+ p->curr_hash = pos2maj_hash(filp, pos);
-+ p->curr_minor_hash = pos2min_hash(filp, pos);
- p->next_hash = 0;
- return p;
- }
-@@ -422,7 +450,7 @@
- printk("call_filldir: called with null fname?!?\n");
- return 0;
- }
-- curr_pos = hash2pos(fname->hash, fname->minor_hash);
-+ curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
- while (fname) {
- error = filldir(dirent, fname->name,
- fname->name_len, curr_pos,
-@@ -447,7 +475,7 @@
- int ret;
-
- if (!info) {
-- info = create_dir_info(filp->f_pos);
-+ info = create_dir_info(filp, filp->f_pos);
- if (!info)
- return -ENOMEM;
- filp->private_data = info;
-@@ -461,8 +489,8 @@
- free_rb_tree_fname(&info->root);
- info->curr_node = NULL;
- info->extra_fname = NULL;
-- info->curr_hash = pos2maj_hash(filp->f_pos);
-- info->curr_minor_hash = pos2min_hash(filp->f_pos);
-+ info->curr_hash = pos2maj_hash(filp, filp->f_pos);
-+ info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
- }
-
- /*
-Index: linux-stage/include/linux/ext3_fs.h
-===================================================================
---- linux-stage.orig/include/linux/ext3_fs.h 2011-04-19 01:39:47.000000000 +0800
-+++ linux-stage/include/linux/ext3_fs.h 2011-04-19 01:45:21.000000000 +0800
-@@ -54,6 +54,14 @@
- #define ext3_debug(f, a...) do {} while (0)
- #endif
-
-+#ifndef O_32BITHASH
-+# define O_32BITHASH 0x10000000
-+#endif
-+
-+#ifndef O_64BITHASH
-+# define O_64BITHASH 0x20000000
-+#endif
-+
- #define EXT3_MULTIBLOCK_ALLOCATOR 1
-
- #define EXT3_MB_HINT_MERGE 1 /* prefer goal again. length */
View
2,903 ldiskfs/kernel_patches/patches/ext3-extents-2.6.18-vanilla.patch
@@ -1,2903 +0,0 @@
-Index: linux-2.6.18.8/fs/ext3/dir.c
-===================================================================
---- linux-2.6.18.8.orig/fs/ext3/dir.c 2007-02-24 00:52:30.000000000 +0100
-+++ linux-2.6.18.8/fs/ext3/dir.c 2007-07-17 09:18:14.000000000 +0200
-@@ -131,8 +131,7 @@ static int ext3_readdir(struct file * fi
- struct buffer_head *bh = NULL;
-
- map_bh.b_state = 0;
-- err = ext3_get_blocks_handle(NULL, inode, blk, 1,
-- &map_bh, 0, 0);
-+ err = ext3_get_blocks_wrap(NULL, inode, blk, 1, &map_bh, 0, 0);
- if (err > 0) {
- page_cache_readahead(sb->s_bdev->bd_inode->i_mapping,
- &filp->f_ra,
-Index: linux-2.6.18.8/fs/ext3/extents.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.18.8/fs/ext3/extents.c 2007-07-17 11:08:59.000000000 +0200
-@@ -0,0 +1,2276 @@
-+/*
-+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
-+ * Written by Alex Tomas <alex@clusterfs.com>
-+ *
-+ * Architecture independence:
-+ * Copyright (c) 2005, Bull S.A.
-+ * Written by Pierre Peiffer <pierre.peiffer@bull.net>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public Licens
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
-+ */
-+
-+/*
-+ * Extents support for EXT3
-+ *
-+ * TODO:
-+ * - ext3*_error() should be used in some situations
-+ * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
-+ * - smart tree reduction
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/fs.h>
-+#include <linux/time.h>
-+#include <linux/ext3_jbd.h>
-+#include <linux/jbd.h>
-+#include <linux/smp_lock.h>
-+#include <linux/highuid.h>
-+#include <linux/pagemap.h>
-+#include <linux/quotaops.h>
-+#include <linux/string.h>
-+#include <linux/slab.h>
-+#include <linux/ext3_extents.h>
-+#include <asm/uaccess.h>
-+
-+
-+static handle_t *ext3_ext_journal_restart(handle_t *handle, int needed)
-+{
-+ int err;
-+
-+ if (handle->h_buffer_credits > needed)
-+ return handle;
-+ if (!ext3_journal_extend(handle, needed))
-+ return handle;
-+ err = ext3_journal_restart(handle, needed);
-+
-+ return handle;
-+}
-+
-+/*
-+ * could return:
-+ * - EROFS
-+ * - ENOMEM
-+ */
-+static int ext3_ext_get_access(handle_t *handle, struct inode *inode,
-+ struct ext3_ext_path *path)
-+{
-+ if (path->p_bh) {
-+ /* path points to block */
-+ return ext3_journal_get_write_access(handle, path->p_bh);
-+ }
-+ /* path points to leaf/index in inode body */
-+ /* we use in-core data, no need to protect them */
-+ return 0;
-+}
-+
-+/*
-+ * could return:
-+ * - EROFS
-+ * - ENOMEM
-+ * - EIO
-+ */
-+static int ext3_ext_dirty(handle_t *handle, struct inode *inode,
-+ struct ext3_ext_path *path)
-+{
-+ int err;
-+ if (path->p_bh) {
-+ /* path points to block */
-+ err = ext3_journal_dirty_metadata(handle, path->p_bh);
-+ } else {
-+ /* path points to leaf/index in inode body */
-+ err = ext3_mark_inode_dirty(handle, inode);
-+ }
-+ return err;
-+}
-+
-+static int ext3_ext_find_goal(struct inode *inode,
-+ struct ext3_ext_path *path,
-+ unsigned long block)
-+{
-+ struct ext3_inode_info *ei = EXT3_I(inode);
-+ unsigned long bg_start;
-+ unsigned long colour;
-+ int depth;
-+
-+ if (path) {
-+ struct ext3_extent *ex;
-+ depth = path->p_depth;
-+
-+ /* try to predict block placement */
-+ if ((ex = path[depth].p_ext))
-+ return le32_to_cpu(ex->ee_start)
-+ + (block - le32_to_cpu(ex->ee_block));
-+
-+ /* it looks index is empty
-+ * try to find starting from index itself */
-+ if (path[depth].p_bh)
-+ return path[depth].p_bh->b_blocknr;
-+ }
-+
-+ /* OK. use inode's group */
-+ bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
-+ le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
-+ colour = (current->pid % 16) *
-+ (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
-+ return bg_start + colour + block;
-+}
-+
-+static int
-+ext3_ext_new_block(handle_t *handle, struct inode *inode,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *ex, int *err)
-+{
-+ int goal, newblock;
-+
-+ goal = ext3_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
-+ newblock = ext3_new_block(handle, inode, goal, err);
-+ return newblock;
-+}
-+
-+static inline int ext3_ext_space_block(struct inode *inode)
-+{
-+ int size;
-+
-+ size = (inode->i_sb->s_blocksize - sizeof(struct ext3_extent_header))
-+ / sizeof(struct ext3_extent);
-+#ifdef AGRESSIVE_TEST
-+ if (size > 6)
-+ size = 6;
-+#endif
-+ return size;
-+}
-+
-+static inline int ext3_ext_space_block_idx(struct inode *inode)
-+{
-+ int size;
-+
-+ size = (inode->i_sb->s_blocksize - sizeof(struct ext3_extent_header))
-+ / sizeof(struct ext3_extent_idx);
-+#ifdef AGRESSIVE_TEST
-+ if (size > 5)
-+ size = 5;
-+#endif
-+ return size;
-+}
-+
-+static inline int ext3_ext_space_root(struct inode *inode)
-+{
-+ int size;
-+
-+ size = sizeof(EXT3_I(inode)->i_data);
-+ size -= sizeof(struct ext3_extent_header);
-+ size /= sizeof(struct ext3_extent);
-+#ifdef AGRESSIVE_TEST
-+ if (size > 3)
-+ size = 3;
-+#endif
-+ return size;
-+}
-+
-+static inline int ext3_ext_space_root_idx(struct inode *inode)
-+{
-+ int size;
-+
-+ size = sizeof(EXT3_I(inode)->i_data);
-+ size -= sizeof(struct ext3_extent_header);
-+ size /= sizeof(struct ext3_extent_idx);
-+#ifdef AGRESSIVE_TEST
-+ if (size > 4)
-+ size = 4;
-+#endif
-+ return size;
-+}
-+
-+static inline int
-+ext3_ext_max_entries(struct inode *inode, int depth)
-+{
-+ int max;
-+
-+ if (depth == ext_depth(inode)) {
-+ if (depth == 0)
-+ max = ext3_ext_space_root(inode);
-+ else
-+ max = ext3_ext_space_root_idx(inode);
-+ } else {
-+ if (depth == 0)
-+ max = ext3_ext_space_block(inode);
-+ else
-+ max = ext3_ext_space_block_idx(inode);
-+ }
-+
-+ return max;
-+}
-+
-+static int __ext3_ext_check_header(const char *function, int line, struct inode *inode,
-+ struct ext3_extent_header *eh,
-+ int depth)
-+{
-+ const char *error_msg = NULL;
-+ int max = 0;
-+
-+ if (unlikely(eh->eh_magic != cpu_to_le16(EXT3_EXT_MAGIC))) {
-+ error_msg = "invalid magic";
-+ goto corrupted;
-+ }
-+ if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
-+ error_msg = "unexpected eh_depth";
-+ goto corrupted;
-+ }
-+ if (unlikely(eh->eh_max == 0)) {
-+ error_msg = "invalid eh_max";
-+ goto corrupted;
-+ }
-+ max = ext3_ext_max_entries(inode, depth);
-+#ifdef AGRESSIVE_TEST
-+ if (eh->eh_max > 3) {
-+ /* inode probably got extent without defining AGRESSIVE_TEST */
-+ max = eh->eh_max;
-+ }
-+#endif
-+ if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
-+ error_msg = "too large eh_max";
-+ goto corrupted;
-+ }
-+ if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
-+ error_msg = "invalid eh_entries";
-+ goto corrupted;
-+ }
-+ if (unlikely((eh->eh_entries == 0) && (eh->eh_depth != 0))) {
-+ error_msg = "invalid index, eh_entries=0 && eh_depth != 0";
-+ goto corrupted;
-+ }
-+ return 0;
-+
-+corrupted:
-+ ext3_error(inode->i_sb, function,
-+ ":%d: bad header in inode #%lu: %s - magic %x, "
-+ "entries %u, max %u(%u), depth %u(%u)", line,
-+ inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
-+ le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
-+ max, le16_to_cpu(eh->eh_depth), depth);
-+
-+ return -EIO;
-+}
-+
-+#define ext3_ext_check_header(inode,eh,depth) \
-+ __ext3_ext_check_header(__FUNCTION__,__LINE__,inode,eh,depth)
-+
-+#ifdef EXT_DEBUG
-+static void ext3_ext_show_path(struct inode *inode, struct ext3_ext_path *path)
-+{
-+ int k, l = path->p_depth;
-+
-+ ext_debug(inode, "path:");
-+ for (k = 0; k <= l; k++, path++) {
-+ if (path->p_idx) {
-+ ext_debug(inode, " %d->%d", le32_to_cpu(path->p_idx->ei_block),
-+ le32_to_cpu(path->p_idx->ei_leaf));
-+ } else if (path->p_ext) {
-+ ext_debug(inode, " %d:%d:%d",
-+ le32_to_cpu(path->p_ext->ee_block),
-+ le16_to_cpu(path->p_ext->ee_len),
-+ le32_to_cpu(path->p_ext->ee_start));
-+ } else
-+ ext_debug(inode, " []");
-+ }
-+ ext_debug(inode, "\n");
-+}
-+
-+static void ext3_ext_show_leaf(struct inode *inode, struct ext3_ext_path *path)
-+{
-+ int depth = ext_depth(inode);
-+ struct ext3_extent_header *eh;
-+ struct ext3_extent *ex;
-+ int i;
-+
-+ if (!path)
-+ return;
-+
-+ eh = path[depth].p_hdr;
-+ ex = EXT_FIRST_EXTENT(eh);
-+
-+ for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
-+ ext_debug(inode, "%d:%d:%d ", le32_to_cpu(ex->ee_block),
-+ le16_to_cpu(ex->ee_len),
-+ le32_to_cpu(ex->ee_start));
-+ }
-+ ext_debug(inode, "\n");
-+}
-+#else
-+#define ext3_ext_show_path(inode,path)
-+#define ext3_ext_show_leaf(inode,path)
-+#endif
-+
-+static void ext3_ext_drop_refs(struct ext3_ext_path *path)
-+{
-+ int depth = path->p_depth;
-+ int i;
-+
-+ for (i = 0; i <= depth; i++, path++)
-+ if (path->p_bh) {
-+ brelse(path->p_bh);
-+ path->p_bh = NULL;
-+ }
-+}
-+
-+/*
-+ * binary search for closest index by given block
-+ * the header must be checked before calling this
-+ */
-+static void
-+ext3_ext_binsearch_idx(struct inode *inode, struct ext3_ext_path *path, int block)
-+{
-+ struct ext3_extent_header *eh = path->p_hdr;
-+ struct ext3_extent_idx *r, *l, *m;
-+
-+ ext_debug(inode, "binsearch for %d(idx): ", block);
-+
-+ l = EXT_FIRST_INDEX(eh) + 1;
-+ r = EXT_FIRST_INDEX(eh) + le16_to_cpu(eh->eh_entries) - 1;
-+ while (l <= r) {
-+ m = l + (r - l) / 2;
-+ if (block < le32_to_cpu(m->ei_block))
-+ r = m - 1;
-+ else
-+ l = m + 1;
-+ ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l, l->ei_block,
-+ m, m->ei_block, r, r->ei_block);
-+ }
-+
-+ path->p_idx = l - 1;
-+ ext_debug(inode, " -> %d->%d ", le32_to_cpu(path->p_idx->ei_block),
-+ le32_to_cpu(path->p_idx->ei_leaf));
-+
-+#ifdef CHECK_BINSEARCH
-+ {
-+ struct ext3_extent_idx *chix, *ix;
-+ int k;
-+
-+ chix = ix = EXT_FIRST_INDEX(eh);
-+ for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
-+ if (k != 0 &&
-+ le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
-+ printk("k=%d, ix=0x%p, first=0x%p\n", k,
-+ ix, EXT_FIRST_INDEX(eh));
-+ printk("%u <= %u\n",
-+ le32_to_cpu(ix->ei_block),
-+ le32_to_cpu(ix[-1].ei_block));
-+ }
-+ BUG_ON(k && le32_to_cpu(ix->ei_block)
-+ <= le32_to_cpu(ix[-1].ei_block));
-+ if (block < le32_to_cpu(ix->ei_block))
-+ break;
-+ chix = ix;
-+ }
-+ BUG_ON(chix != path->p_idx);
-+ }
-+#endif
-+
-+}
-+
-+/*
-+ * binary search for closest extent by given block
-+ * the header must be checked before calling this
-+ */
-+static void
-+ext3_ext_binsearch(struct inode *inode, struct ext3_ext_path *path, int block)
-+{
-+ struct ext3_extent_header *eh = path->p_hdr;
-+ struct ext3_extent *r, *l, *m;
-+
-+ if (eh->eh_entries == 0) {
-+ /*
-+ * this leaf is empty yet:
-+ * we get such a leaf in split/add case
-+ */
-+ return;
-+ }
-+
-+ ext_debug(inode, "binsearch for %d: ", block);
-+
-+ l = EXT_FIRST_EXTENT(eh) + 1;
-+ r = EXT_FIRST_EXTENT(eh) + le16_to_cpu(eh->eh_entries) - 1;
-+
-+ while (l <= r) {
-+ m = l + (r - l) / 2;
-+ if (block < le32_to_cpu(m->ee_block))
-+ r = m - 1;
-+ else
-+ l = m + 1;
-+ ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l, l->ee_block,
-+ m, m->ee_block, r, r->ee_block);
-+ }
-+
-+ path->p_ext = l - 1;
-+ ext_debug(inode, " -> %d:%d:%d ",
-+ le32_to_cpu(path->p_ext->ee_block),
-+ le32_to_cpu(path->p_ext->ee_start),
-+ le16_to_cpu(path->p_ext->ee_len));
-+
-+#ifdef CHECK_BINSEARCH
-+ {
-+ struct ext3_extent *chex, *ex;
-+ int k;
-+
-+ chex = ex = EXT_FIRST_EXTENT(eh);
-+ for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
-+ BUG_ON(k && le32_to_cpu(ex->ee_block)
-+ <= le32_to_cpu(ex[-1].ee_block));
-+ if (block < le32_to_cpu(ex->ee_block))
-+ break;
-+ chex = ex;
-+ }
-+ BUG_ON(chex != path->p_ext);
-+ }
-+#endif
-+
-+}
-+
-+int ext3_ext_tree_init(handle_t *handle, struct inode *inode)
-+{
-+ struct ext3_extent_header *eh;
-+
-+ eh = ext_inode_hdr(inode);
-+ eh->eh_depth = 0;
-+ eh->eh_entries = 0;
-+ eh->eh_magic = cpu_to_le16(EXT3_EXT_MAGIC);
-+ eh->eh_max = cpu_to_le16(ext3_ext_space_root(inode));
-+ ext3_mark_inode_dirty(handle, inode);
-+ ext3_ext_invalidate_cache(inode);
-+ return 0;
-+}
-+
-+struct ext3_ext_path *
-+ext3_ext_find_extent(struct inode *inode, int block, struct ext3_ext_path *path)
-+{
-+ struct ext3_extent_header *eh;
-+ struct buffer_head *bh;
-+ short int depth, i, ppos = 0, alloc = 0;
-+
-+ eh = ext_inode_hdr(inode);
-+ i = depth = ext_depth(inode);
-+ if (ext3_ext_check_header(inode, eh, depth))
-+ return ERR_PTR(-EIO);
-+
-+ /* account possible depth increase */
-+ if (!path) {
-+ path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 2),
-+ GFP_NOFS);
-+ if (!path)
-+ return ERR_PTR(-ENOMEM);
-+ alloc = 1;
-+ }
-+ memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
-+ path[0].p_hdr = eh;
-+
-+ /* walk through the tree */
-+ while (i) {
-+ ext_debug(inode, "depth %d: num %d, max %d\n",
-+ ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
-+
-+ ext3_ext_binsearch_idx(inode, path + ppos, block);
-+ path[ppos].p_block = le32_to_cpu(path[ppos].p_idx->ei_leaf);
-+ path[ppos].p_depth = i;
-+ path[ppos].p_ext = NULL;
-+
-+ bh = sb_bread(inode->i_sb, path[ppos].p_block);
-+ if (!bh)
-+ goto err;
-+
-+ eh = ext_block_hdr(bh);
-+ ppos++;
-+ BUG_ON(ppos > depth);
-+ path[ppos].p_bh = bh;
-+ path[ppos].p_hdr = eh;
-+ i--;
-+
-+ if (ext3_ext_check_header(inode, eh, i))
-+ goto err;
-+ }
-+
-+ path[ppos].p_depth = i;
-+ path[ppos].p_hdr = eh;
-+ path[ppos].p_ext = NULL;
-+ path[ppos].p_idx = NULL;
-+
-+ /* find extent */
-+ ext3_ext_binsearch(inode, path + ppos, block);
-+
-+ ext3_ext_show_path(inode, path);
-+
-+ return path;
-+
-+err:
-+ ext3_ext_drop_refs(path);
-+ if (alloc)
-+ kfree(path);
-+ return ERR_PTR(-EIO);
-+}
-+
-+/*
-+ * insert new index [logical;ptr] into the block at cupr
-+ * it check where to insert: before curp or after curp
-+ */
-+static int ext3_ext_insert_index(handle_t *handle, struct inode *inode,
-+ struct ext3_ext_path *curp,
-+ int logical, int ptr)
-+{
-+ struct ext3_extent_idx *ix;
-+ int len, err;
-+
-+ if ((err = ext3_ext_get_access(handle, inode, curp)))
-+ return err;
-+
-+ BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
-+ len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
-+ if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
-+ /* insert after */
-+ if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
-+ len = (len - 1) * sizeof(struct ext3_extent_idx);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(inode, "insert new index %d after: %d. "
-+ "move %d from 0x%p to 0x%p\n",
-+ logical, ptr, len,
-+ (curp->p_idx + 1), (curp->p_idx + 2));
-+ memmove(curp->p_idx + 2, curp->p_idx + 1, len);
-+ }
-+ ix = curp->p_idx + 1;
-+ } else {
-+ /* insert before */
-+ len = len * sizeof(struct ext3_extent_idx);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(inode, "insert new index %d before: %d. "
-+ "move %d from 0x%p to 0x%p\n",
-+ logical, ptr, len,
-+ curp->p_idx, (curp->p_idx + 1));
-+ memmove(curp->p_idx + 1, curp->p_idx, len);
-+ ix = curp->p_idx;
-+ }
-+
-+ ix->ei_block = cpu_to_le32(logical);
-+ ix->ei_leaf = cpu_to_le32(ptr);
-+ ix->ei_leaf_hi = ix->ei_unused = 0;
-+ curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1);
-+
-+ BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
-+ > le16_to_cpu(curp->p_hdr->eh_max));
-+ BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
-+
-+ err = ext3_ext_dirty(handle, inode, curp);
-+ ext3_std_error(inode->i_sb, err);
-+
-+ return err;
-+}
-+
-+/*
-+ * routine inserts new subtree into the path, using free index entry
-+ * at depth 'at:
-+ * - allocates all needed blocks (new leaf and all intermediate index blocks)
-+ * - makes decision where to split
-+ * - moves remaining extens and index entries (right to the split point)
-+ * into the newly allocated blocks
-+ * - initialize subtree
-+ */
-+static int ext3_ext_split(handle_t *handle, struct inode *inode,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext, int at)
-+{
-+ struct buffer_head *bh = NULL;
-+ int depth = ext_depth(inode);
-+ struct ext3_extent_header *neh;
-+ struct ext3_extent_idx *fidx;
-+ struct ext3_extent *ex;
-+ int i = at, k, m, a;
-+ unsigned long newblock, oldblock;
-+ __le32 border;
-+ int *ablocks = NULL; /* array of allocated blocks */
-+ int err = 0;
-+
-+ /* make decision: where to split? */
-+ /* FIXME: now desicion is simplest: at current extent */
-+
-+ /* if current leaf will be splitted, then we should use
-+ * border from split point */
-+ BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));
-+ if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
-+ border = path[depth].p_ext[1].ee_block;
-+ ext_debug(inode, "leaf will be splitted."
-+ " next leaf starts at %d\n",
-+ le32_to_cpu(border));
-+ } else {
-+ border = newext->ee_block;
-+ ext_debug(inode, "leaf will be added."
-+ " next leaf starts at %d\n",
-+ le32_to_cpu(border));
-+ }
-+
-+ /*
-+ * if error occurs, then we break processing
-+ * and turn filesystem read-only. so, index won't
-+ * be inserted and tree will be in consistent
-+ * state. next mount will repair buffers too
-+ */
-+
-+ /*
-+ * get array to track all allocated blocks
-+ * we need this to handle errors and free blocks
-+ * upon them
-+ */
-+ ablocks = kmalloc(sizeof(unsigned long) * depth, GFP_NOFS);
-+ if (!ablocks)
-+ return -ENOMEM;
-+ memset(ablocks, 0, sizeof(unsigned long) * depth);
-+
-+ /* allocate all needed blocks */
-+ ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at);
-+ for (a = 0; a < depth - at; a++) {
-+ newblock = ext3_ext_new_block(handle, inode, path, newext, &err);
-+ if (newblock == 0)
-+ goto cleanup;
-+ ablocks[a] = newblock;
-+ }
-+
-+ /* initialize new leaf */
-+ newblock = ablocks[--a];
-+ BUG_ON(newblock == 0);
-+ bh = sb_getblk(inode->i_sb, newblock);
-+ if (!bh) {
-+ err = -EIO;
-+ goto cleanup;
-+ }
-+ lock_buffer(bh);
-+
-+ if ((err = ext3_journal_get_create_access(handle, bh)))
-+ goto cleanup;
-+
-+ neh = ext_block_hdr(bh);
-+ neh->eh_entries = 0;
-+ neh->eh_max = cpu_to_le16(ext3_ext_space_block(inode));
-+ neh->eh_magic = cpu_to_le16(EXT3_EXT_MAGIC);
-+ neh->eh_depth = 0;
-+ ex = EXT_FIRST_EXTENT(neh);
-+
-+ /* move remain of path[depth] to the new leaf */
-+ BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);
-+ /* start copy from next extent */
-+ /* TODO: we could do it by single memmove */
-+ m = 0;
-+ path[depth].p_ext++;
-+ while (path[depth].p_ext <=
-+ EXT_MAX_EXTENT(path[depth].p_hdr)) {
-+ ext_debug(inode, "move %d:%d:%d in new leaf %lu\n",
-+ le32_to_cpu(path[depth].p_ext->ee_block),
-+ le32_to_cpu(path[depth].p_ext->ee_start),
-+ le16_to_cpu(path[depth].p_ext->ee_len),
-+ newblock);
-+ /*memmove(ex++, path[depth].p_ext++,
-+ sizeof(struct ext3_extent));
-+ neh->eh_entries++;*/
-+ path[depth].p_ext++;
-+ m++;
-+ }
-+ if (m) {
-+ memmove(ex, path[depth].p_ext-m, sizeof(struct ext3_extent)*m);
-+ neh->eh_entries = cpu_to_le16(le16_to_cpu(neh->eh_entries)+m);
-+ }
-+
-+ set_buffer_uptodate(bh);
-+ unlock_buffer(bh);
-+
-+ if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+ goto cleanup;
-+ brelse(bh);
-+ bh = NULL;
-+
-+ /* correct old leaf */
-+ if (m) {
-+ if ((err = ext3_ext_get_access(handle, inode, path + depth)))
-+ goto cleanup;
-+ path[depth].p_hdr->eh_entries =
-+ cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);
-+ if ((err = ext3_ext_dirty(handle, inode, path + depth)))
-+ goto cleanup;
-+
-+ }
-+
-+ /* create intermediate indexes */
-+ k = depth - at - 1;
-+ BUG_ON(k < 0);
-+ if (k)
-+ ext_debug(inode, "create %d intermediate indices\n", k);
-+ /* insert new index into current index block */
-+ /* current depth stored in i var */
-+ i = depth - 1;
-+ while (k--) {
-+ oldblock = newblock;
-+ newblock = ablocks[--a];
-+ bh = sb_getblk(inode->i_sb, newblock);
-+ if (!bh) {
-+ err = -EIO;
-+ goto cleanup;
-+ }
-+ lock_buffer(bh);
-+
-+ if ((err = ext3_journal_get_create_access(handle, bh)))
-+ goto cleanup;
-+
-+ neh = ext_block_hdr(bh);
-+ neh->eh_entries = cpu_to_le16(1);
-+ neh->eh_magic = cpu_to_le16(EXT3_EXT_MAGIC);
-+ neh->eh_max = cpu_to_le16(ext3_ext_space_block_idx(inode));
-+ neh->eh_depth = cpu_to_le16(depth - i);
-+ fidx = EXT_FIRST_INDEX(neh);
-+ fidx->ei_block = border;
-+ fidx->ei_leaf = cpu_to_le32(oldblock);
-+ fidx->ei_leaf_hi = fidx->ei_unused = 0;
-+
-+ ext_debug(inode, "int.index at %d (block %lu): %lu -> %lu\n", i,
-+ newblock, (unsigned long) le32_to_cpu(border),
-+ oldblock);
-+ /* copy indexes */
-+ m = 0;
-+ path[i].p_idx++;
-+
-+ ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx,
-+ EXT_MAX_INDEX(path[i].p_hdr));
-+ BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
-+ EXT_LAST_INDEX(path[i].p_hdr));
-+ while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
-+ ext_debug(inode, "%d: move %d:%d in new index %lu\n", i,
-+ le32_to_cpu(path[i].p_idx->ei_block),
-+ le32_to_cpu(path[i].p_idx->ei_leaf),
-+ newblock);
-+ /*memmove(++fidx, path[i].p_idx++,
-+ sizeof(struct ext3_extent_idx));
-+ neh->eh_entries++;
-+ BUG_ON(neh->eh_entries > neh->eh_max);*/
-+ path[i].p_idx++;
-+ m++;
-+ }
-+ if (m) {
-+ memmove(++fidx, path[i].p_idx - m,
-+ sizeof(struct ext3_extent_idx) * m);
-+ neh->eh_entries =
-+ cpu_to_le16(le16_to_cpu(neh->eh_entries) + m);
-+ }
-+ set_buffer_uptodate(bh);
-+ unlock_buffer(bh);
-+
-+ if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+ goto cleanup;
-+ brelse(bh);
-+ bh = NULL;
-+
-+ /* correct old index */
-+ if (m) {
-+ err = ext3_ext_get_access(handle, inode, path + i);
-+ if (err)
-+ goto cleanup;
-+ path[i].p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path[i].p_hdr->eh_entries)-m);
-+ err = ext3_ext_dirty(handle, inode, path + i);
-+ if (err)
-+ goto cleanup;
-+ }
-+
-+ i--;
-+ }
-+
-+ /* insert new index */
-+ if (err)
-+ goto cleanup;
-+
-+ err = ext3_ext_insert_index(handle, inode, path + at,
-+ le32_to_cpu(border), newblock);
-+
-+cleanup:
-+ if (bh) {
-+ if (buffer_locked(bh))
-+ unlock_buffer(bh);
-+ brelse(bh);
-+ }
-+
-+ if (err) {
-+ /* free all allocated blocks in error case */
-+ for (i = 0; i < depth; i++) {
-+ if (!ablocks[i])
-+ continue;
-+ ext3_free_blocks(handle, inode, ablocks[i], 1);
-+ }
-+ }
-+ kfree(ablocks);
-+
-+ return err;
-+}
-+
-+/*
-+ * routine implements tree growing procedure:
-+ * - allocates new block
-+ * - moves top-level data (index block or leaf) into the new block
-+ * - initialize new top-level, creating index that points to the
-+ * just created block
-+ */
-+static int ext3_ext_grow_indepth(handle_t *handle, struct inode *inode,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext)
-+{
-+ struct ext3_ext_path *curp = path;
-+ struct ext3_extent_header *neh;
-+ struct ext3_extent_idx *fidx;
-+ struct buffer_head *bh;
-+ unsigned long newblock;
-+ int err = 0;
-+
-+ newblock = ext3_ext_new_block(handle, inode, path, newext, &err);
-+ if (newblock == 0)
-+ return err;
-+
-+ bh = sb_getblk(inode->i_sb, newblock);
-+ if (!bh) {
-+ err = -EIO;
-+ ext3_std_error(inode->i_sb, err);
-+ return err;
-+ }
-+ lock_buffer(bh);
-+
-+ if ((err = ext3_journal_get_create_access(handle, bh))) {
-+ unlock_buffer(bh);
-+ goto out;
-+ }
-+
-+ /* move top-level index/leaf into new block */
-+ memmove(bh->b_data, curp->p_hdr, sizeof(EXT3_I(inode)->i_data));
-+
-+ /* set size of new block */
-+ neh = ext_block_hdr(bh);
-+ /* old root could have indexes or leaves
-+ * so calculate e_max right way */
-+ if (ext_depth(inode))
-+ neh->eh_max = cpu_to_le16(ext3_ext_space_block_idx(inode));
-+ else
-+ neh->eh_max = cpu_to_le16(ext3_ext_space_block(inode));
-+ neh->eh_magic = cpu_to_le16(EXT3_EXT_MAGIC);
-+ set_buffer_uptodate(bh);
-+ unlock_buffer(bh);
-+
-+ if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+ goto out;
-+
-+ /* create index in new top-level index: num,max,pointer */
-+ if ((err = ext3_ext_get_access(handle, inode, curp)))
-+ goto out;
-+
-+ curp->p_hdr->eh_magic = cpu_to_le16(EXT3_EXT_MAGIC);
-+ curp->p_hdr->eh_max = cpu_to_le16(ext3_ext_space_root_idx(inode));
-+ curp->p_hdr->eh_entries = cpu_to_le16(1);
-+ curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
-+ /* FIXME: it works, but actually path[0] can be index */
-+ curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
-+ curp->p_idx->ei_leaf = cpu_to_le32(newblock);
-+ curp->p_idx->ei_leaf_hi = curp->p_idx->ei_unused = 0;
-+
-+ neh = ext_inode_hdr(inode);
-+ fidx = EXT_FIRST_INDEX(neh);
-+ ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %d\n",
-+ le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
-+ le32_to_cpu(fidx->ei_block), le32_to_cpu(fidx->ei_leaf));
-+
-+ neh->eh_depth = cpu_to_le16(path->p_depth + 1);
-+ err = ext3_ext_dirty(handle, inode, curp);
-+out:
-+ brelse(bh);
-+
-+ return err;
-+}
-+
-+/*
-+ * routine finds empty index and adds new leaf. if no free index found
-+ * then it requests in-depth growing
-+ */
-+static int ext3_ext_create_new_leaf(handle_t *handle, struct inode *inode,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext)
-+{
-+ struct ext3_ext_path *curp;
-+ int depth, i, err = 0;
-+
-+repeat:
-+ i = depth = ext_depth(inode);
-+
-+ /* walk up to the tree and look for free index entry */
-+ curp = path + depth;
-+ while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
-+ i--;
-+ curp--;
-+ }
-+
-+ /* we use already allocated block for index block
-+ * so, subsequent data blocks should be contigoues */
-+ if (EXT_HAS_FREE_INDEX(curp)) {
-+ /* if we found index with free entry, then use that
-+ * entry: create all needed subtree and add new leaf */
-+ err = ext3_ext_split(handle, inode, path, newext, i);
-+ if (err)
-+ goto out;
-+
-+ /* refill path */
-+ ext3_ext_drop_refs(path);
-+ path = ext3_ext_find_extent(inode,
-+ le32_to_cpu(newext->ee_block),
-+ path);
-+ if (IS_ERR(path))
-+ err = PTR_ERR(path);
-+ } else {
-+ /* tree is full, time to grow in depth */
-+ err = ext3_ext_grow_indepth(handle, inode, path, newext);
-+ if (err)
-+ goto out;
-+
-+ /* refill path */
-+ ext3_ext_drop_refs(path);
-+ path = ext3_ext_find_extent(inode,
-+ le32_to_cpu(newext->ee_block),
-+ path);
-+ if (IS_ERR(path)) {
-+ err = PTR_ERR(path);
-+ goto out;
-+ }
-+
-+ /*
-+ * only first (depth 0 -> 1) produces free space
-+ * in all other cases we have to split growed tree
-+ */
-+ depth = ext_depth(inode);
-+ if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
-+ /* now we need split */
-+ goto repeat;
-+ }
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+/*
-+ * search the closest allocated block to the left for *logical
-+ * and returns it at @logical + it's physical address at @phys
-+ * if *logical is the smallest allocated block, the function
-+ * returns 0 at @phys
-+ * return value contains 0 (success) or error code
-+ */
-+int
-+ext3_ext_search_left(struct inode *inode, struct ext3_ext_path *path,
-+ unsigned long *logical, unsigned long *phys)
-+{
-+ struct ext3_extent_idx *ix;
-+ struct ext3_extent *ex;
-+ int depth;
-+
-+ BUG_ON(path == NULL);
-+ depth = path->p_depth;
-+ *phys = 0;
-+
-+ if (depth == 0 && path->p_ext == NULL)
-+ return 0;
-+
-+ /* usually extent in the path covers blocks smaller
-+ * then *logical, but it can be that extent is the
-+ * first one in the file */
-+
-+ ex = path[depth].p_ext;
-+ if (*logical < le32_to_cpu(ex->ee_block)) {
-+ BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
-+ while (--depth >= 0) {
-+ ix = path[depth].p_idx;
-+ BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
-+ }
-+ return 0;
-+ }
-+
-+ BUG_ON(*logical < le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len));
-+
-+ *logical = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1;
-+ *phys = le32_to_cpu(ex->ee_start) + le16_to_cpu(ex->ee_len) - 1;
-+ return 0;
-+}
-+EXPORT_SYMBOL(ext3_ext_search_left);
-+
-+/*
-+ * search the closest allocated block to the right for *logical
-+ * and returns it at @logical + it's physical address at @phys
-+ * if *logical is the smallest allocated block, the function
-+ * returns 0 at @phys
-+ * return value contains 0 (success) or error code
-+ */
-+int
-+ext3_ext_search_right(struct inode *inode, struct ext3_ext_path *path,
-+ unsigned long *logical, unsigned long *phys)
-+{
-+ struct buffer_head *bh = NULL;
-+ struct ext3_extent_header *eh;
-+ struct ext3_extent_idx *ix = NULL;
-+ struct ext3_extent *ex;
-+ unsigned long block;
-+ int depth;
-+
-+ BUG_ON(path == NULL);
-+ depth = path->p_depth;
-+ *phys = 0;
-+
-+ if (depth == 0 && path->p_ext == NULL)
-+ return 0;
-+
-+ /* usually extent in the path covers blocks smaller
-+ * then *logical, but it can be that extent is the
-+ * first one in the file */
-+
-+ ex = path[depth].p_ext;
-+ if (*logical < le32_to_cpu(ex->ee_block)) {
-+ BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
-+ while (--depth >= 0) {
-+ ix = path[depth].p_idx;
-+ BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
-+ }
-+ *logical = le32_to_cpu(ex->ee_block);
-+ *phys = le32_to_cpu(ex->ee_start);
-+ return 0;
-+ }
-+
-+ BUG_ON(*logical < le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len));
-+
-+ if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
-+ /* next allocated block in this leaf */
-+ ex++;
-+ *logical = le32_to_cpu(ex->ee_block);
-+ *phys = le32_to_cpu(ex->ee_start);
-+ return 0;
-+ }
-+
-+ /* go up and search for index to the right */
-+ while (--depth >= 0) {
-+ ix = path[depth].p_idx;
-+ if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
-+ break;
-+ }
-+
-+ if (depth < 0) {
-+ /* we've gone up to the root and
-+ * found no index to the right */
-+ return 0;
-+ }
-+
-+ /* we've found index to the right, let's
-+ * follow it and find the closest allocated
-+ * block to the right */
-+ ix++;
-+ block = le32_to_cpu(ix->ei_leaf);
-+ while (++depth < path->p_depth) {
-+ bh = sb_bread(inode->i_sb, block);
-+ if (bh == NULL)
-+ return -EIO;
-+ eh = ext_block_hdr(bh);
-+ if (ext3_ext_check_header(inode, eh, path->p_depth - depth)) {
-+ brelse(bh);
-+ return -EIO;
-+ }
-+ ix = EXT_FIRST_INDEX(eh);
-+ block = le32_to_cpu(ix->ei_leaf);
-+ brelse(bh);
-+ }
-+
-+ bh = sb_bread(inode->i_sb, block);
-+ if (bh == NULL)
-+ return -EIO;
-+ eh = ext_block_hdr(bh);
-+ if (ext3_ext_check_header(inode, eh, 0)) {
-+ brelse(bh);
-+ return -EIO;
-+ }
-+ ex = EXT_FIRST_EXTENT(eh);
-+ *logical = le32_to_cpu(ex->ee_block);
-+ *phys = le32_to_cpu(ex->ee_start);
-+ brelse(bh);
-+ return 0;
-+
-+}
-+EXPORT_SYMBOL(ext3_ext_search_right);
-+
-+
-+
-+/*
-+ * returns allocated block in subsequent extent or EXT_MAX_BLOCK
-+ * NOTE: it consider block number from index entry as
-+ * allocated block. thus, index entries have to be consistent
-+ * with leafs
-+ */
-+static unsigned long
-+ext3_ext_next_allocated_block(struct ext3_ext_path *path)
-+{
-+ int depth;
-+
-+ BUG_ON(path == NULL);
-+ depth = path->p_depth;
-+
-+ if (depth == 0 && path->p_ext == NULL)
-+ return EXT_MAX_BLOCK;
-+
-+ while (depth >= 0) {
-+ if (depth == path->p_depth) {
-+ /* leaf */
-+ if (path[depth].p_ext !=
-+ EXT_LAST_EXTENT(path[depth].p_hdr))
-+ return le32_to_cpu(path[depth].p_ext[1].ee_block);
-+ } else {
-+ /* index */
-+ if (path[depth].p_idx !=
-+ EXT_LAST_INDEX(path[depth].p_hdr))
-+ return le32_to_cpu(path[depth].p_idx[1].ei_block);
-+ }
-+ depth--;
-+ }
-+
-+ return EXT_MAX_BLOCK;
-+}
-+
-+/*
-+ * returns first allocated block from next leaf or EXT_UNSET_BLOCK
-+ */
-+static unsigned ext3_ext_next_leaf_block(struct inode *inode,
-+ struct ext3_ext_path *path)
-+{
-+ int depth;
-+
-+ BUG_ON(path == NULL);
-+ depth = path->p_depth;
-+
-+ /* zero-tree has no leaf blocks at all */
-+ if (depth == 0)
-+ return EXT_UNSET_BLOCK;
-+
-+ /* go to index block */
-+ depth--;
-+
-+ while (depth >= 0) {
-+ if (path[depth].p_idx !=
-+ EXT_LAST_INDEX(path[depth].p_hdr))
-+ return le32_to_cpu(path[depth].p_idx[1].ei_block);
-+ depth--;
-+ }
-+
-+ return EXT_UNSET_BLOCK;
-+}
-+
-+/*
-+ * if leaf gets modified and modified extent is first in the leaf
-+ * then we have to correct all indexes above
-+ * TODO: do we need to correct tree in all cases?
-+ */
-+int ext3_ext_correct_indexes(handle_t *handle, struct inode *inode,
-+ struct ext3_ext_path *path)
-+{
-+ struct ext3_extent_header *eh;
-+ int depth = ext_depth(inode);
-+ struct ext3_extent *ex;
-+ __le32 border;
-+ int k, err = 0;
-+
-+ eh = path[depth].p_hdr;
-+ ex = path[depth].p_ext;
-+ BUG_ON(ex == NULL);
-+ BUG_ON(eh == NULL);
-+
-+ if (depth == 0) {
-+ /* there is no tree at all */
-+ return 0;
-+ }
-+
-+ if (ex != EXT_FIRST_EXTENT(eh)) {
-+ /* we correct tree if first leaf got modified only */
-+ return 0;
-+ }
-+
-+ /*
-+ * TODO: we need correction if border is smaller then current one
-+ */
-+ k = depth - 1;
-+ border = path[depth].p_ext->ee_block;
-+ if ((err = ext3_ext_get_access(handle, inode, path + k)))
-+ return err;
-+ path[k].p_idx->ei_block = border;
-+ if ((err = ext3_ext_dirty(handle, inode, path + k)))
-+ return err;
-+
-+ while (k--) {
-+ /* change all left-side indexes */
-+ if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
-+ break;
-+ if ((err = ext3_ext_get_access(handle, inode, path + k)))
-+ break;
-+ path[k].p_idx->ei_block = border;
-+ if ((err = ext3_ext_dirty(handle, inode, path + k)))
-+ break;
-+ }
-+
-+ return err;
-+}
-+
-+static int inline
-+ext3_can_extents_be_merged(struct inode *inode, struct ext3_extent *ex1,
-+ struct ext3_extent *ex2)
-+{
-+ /* FIXME: 48bit support */
-+ if (le32_to_cpu(ex1->ee_block) + le16_to_cpu(ex1->ee_len) !=
-+ le32_to_cpu(ex2->ee_block))
-+ return 0;
-+
-+#ifdef AGRESSIVE_TEST
-+ if (le16_to_cpu(ex1->ee_len) >= 4)
-+ return 0;
-+#endif
-+
-+ if (le32_to_cpu(ex1->ee_start) + le16_to_cpu(ex1->ee_len) ==
-+ le32_to_cpu(ex2->ee_start))
-+ return 1;
-+ return 0;
-+}
-+
-+/*
-+ * this routine tries to merge requsted extent into the existing
-+ * extent or inserts requested extent as new one into the tree,
-+ * creating new leaf in no-space case
-+ */
-+int ext3_ext_insert_extent(handle_t *handle, struct inode *inode,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext)
-+{
-+ struct ext3_extent_header * eh;
-+ struct ext3_extent *ex, *fex;
-+ struct ext3_extent *nearex; /* nearest extent */
-+ struct ext3_ext_path *npath = NULL;
-+ int depth, len, err, next;
-+
-+ BUG_ON(newext->ee_len == 0);
-+ depth = ext_depth(inode);
-+ ex = path[depth].p_ext;
-+ BUG_ON(path[depth].p_hdr == NULL);
-+
-+ /* try to insert block into found extent and return */
-+ if (ex && ext3_can_extents_be_merged(inode, ex, newext)) {
-+ ext_debug(inode, "append %d block to %d:%d (from %d)\n",
-+ le16_to_cpu(newext->ee_len),
-+ le32_to_cpu(ex->ee_block),
-+ le16_to_cpu(ex->ee_len),
-+ le32_to_cpu(ex->ee_start));
-+ if ((err = ext3_ext_get_access(handle, inode, path + depth)))
-+ return err;
-+ ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len)
-+ + le16_to_cpu(newext->ee_len));
-+ eh = path[depth].p_hdr;
-+ nearex = ex;
-+ goto merge;
-+ }
-+
-+repeat:
-+ depth = ext_depth(inode);
-+ eh = path[depth].p_hdr;
-+ if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
-+ goto has_space;
-+
-+ /* probably next leaf has space for us? */
-+ fex = EXT_LAST_EXTENT(eh);
-+ next = ext3_ext_next_leaf_block(inode, path);
-+ if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
-+ && next != EXT_UNSET_BLOCK) {
-+ ext_debug(inode, "next leaf block - %d\n", next);
-+ BUG_ON(npath != NULL);
-+ npath = ext3_ext_find_extent(inode, next, NULL);
-+ if (IS_ERR(npath))
-+ return PTR_ERR(npath);
-+ BUG_ON(npath->p_depth != path->p_depth);
-+ eh = npath[depth].p_hdr;
-+ if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
-+ ext_debug(inode, "next leaf isnt full(%d)\n",
-+ le16_to_cpu(eh->eh_entries));
-+ path = npath;
-+ goto repeat;
-+ }
-+ ext_debug(inode, "next leaf has no free space(%d,%d)\n",
-+ le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
-+ }
-+
-+ /*
-+ * there is no free space in found leaf
-+ * we're gonna add new leaf in the tree
-+ */
-+ err = ext3_ext_create_new_leaf(handle, inode, path, newext);
-+ if (err)
-+ goto cleanup;
-+ depth = ext_depth(inode);
-+ eh = path[depth].p_hdr;
-+
-+has_space:
-+ nearex = path[depth].p_ext;
-+
-+ if ((err = ext3_ext_get_access(handle, inode, path + depth)))