From 038b92d58c79afbf348dfdef908fff049793eb91 Mon Sep 17 00:00:00 2001
From: johann <johann>
Date: Wed, 18 Jul 2007 20:56:05 +0000
Subject: [PATCH] Branch b1_6 b=11039 i=adilger i=scjody

Remove 2.4 and 2.6.12 kernel patches.
---
 .../patches/ext3-extents-2.6.12.patch         | 2940 ---------------
 .../ext3-external-journal-2.6.12.patch        |  148 -
 .../patches/ext3-mballoc2-2.6.12.patch        | 3102 ----------------
 .../ext3-mballoc2-2.6.18-vanilla.patch        | 3140 -----------------
 .../ext3-sector_t-overflow-2.6.12.patch       |   64 -
 .../kernel_patches/patches/iopen-2.6.12.patch |  471 ---
 .../series/ldiskfs-2.6.12-vanilla.series      |   18 -
 7 files changed, 9883 deletions(-)
 delete mode 100644 ldiskfs/kernel_patches/patches/ext3-extents-2.6.12.patch
 delete mode 100644 ldiskfs/kernel_patches/patches/ext3-external-journal-2.6.12.patch
 delete mode 100644 ldiskfs/kernel_patches/patches/ext3-mballoc2-2.6.12.patch
 delete mode 100644 ldiskfs/kernel_patches/patches/ext3-mballoc2-2.6.18-vanilla.patch
 delete mode 100644 ldiskfs/kernel_patches/patches/ext3-sector_t-overflow-2.6.12.patch
 delete mode 100644 ldiskfs/kernel_patches/patches/iopen-2.6.12.patch
 delete mode 100644 ldiskfs/kernel_patches/series/ldiskfs-2.6.12-vanilla.series

diff --git a/ldiskfs/kernel_patches/patches/ext3-extents-2.6.12.patch b/ldiskfs/kernel_patches/patches/ext3-extents-2.6.12.patch
deleted file mode 100644
index f421f88e95..0000000000
--- a/ldiskfs/kernel_patches/patches/ext3-extents-2.6.12.patch
+++ /dev/null
@@ -1,2940 +0,0 @@
-Index: linux-2.6.12-rc6/fs/ext3/extents.c
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/extents.c	2005-06-14 16:31:25.756503133 +0200
-+++ linux-2.6.12-rc6/fs/ext3/extents.c	2005-06-14 16:31:25.836581257 +0200
-@@ -0,0 +1,2359 @@
-+/*
-+ * Copyright(c) 2003, 2004, 2005, Cluster File Systems, Inc, info@clusterfs.com
-+ * Written by Alex Tomas <alex@clusterfs.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public Licens
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
-+ */
-+
-+/*
-+ * Extents support for EXT3
-+ *
-+ * TODO:
-+ *   - ext3_ext_walk_space() sould not use ext3_ext_find_extent()
-+ *   - ext3_ext_calc_credits() could take 'mergable' into account
-+ *   - ext3*_error() should be used in some situations
-+ *   - find_goal() [to be tested and improved]
-+ *   - smart tree reduction
-+ *   - arch-independence
-+ *     common on-disk format for big/little-endian arch
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/fs.h>
-+#include <linux/time.h>
-+#include <linux/ext3_jbd.h>
-+#include <linux/jbd.h>
-+#include <linux/smp_lock.h>
-+#include <linux/highuid.h>
-+#include <linux/pagemap.h>
-+#include <linux/quotaops.h>
-+#include <linux/string.h>
-+#include <linux/slab.h>
-+#include <linux/ext3_extents.h>
-+#include <asm/uaccess.h>
-+
-+
-+static inline int ext3_ext_check_header(struct ext3_extent_header *eh)
-+{
-+	if (eh->eh_magic != EXT3_EXT_MAGIC) {
-+		printk(KERN_ERR "EXT3-fs: invalid magic = 0x%x\n",
-+		       (unsigned)eh->eh_magic);
-+		return -EIO;
-+	}
-+	if (eh->eh_max == 0) {
-+		printk(KERN_ERR "EXT3-fs: invalid eh_max = %u\n",
-+		       (unsigned)eh->eh_max);
-+		return -EIO;
-+	}
-+	if (eh->eh_entries > eh->eh_max) {
-+		printk(KERN_ERR "EXT3-fs: invalid eh_entries = %u\n",
-+		       (unsigned)eh->eh_entries);
-+		return -EIO;
-+	}
-+	return 0;
-+}
-+
-+static handle_t *ext3_ext_journal_restart(handle_t *handle, int needed)
-+{
-+	int err;
-+
-+	if (handle->h_buffer_credits > needed)
-+		return handle;
-+	if (!ext3_journal_extend(handle, needed))
-+		return handle;
-+	err = ext3_journal_restart(handle, needed);
-+	
-+	return handle;
-+}
-+
-+static int inline
-+ext3_ext_get_access_for_root(handle_t *h, struct ext3_extents_tree *tree)
-+{
-+	if (tree->ops->get_write_access)
-+		return tree->ops->get_write_access(h,tree->buffer);
-+	else
-+		return 0;
-+}
-+
-+static int inline
-+ext3_ext_mark_root_dirty(handle_t *h, struct ext3_extents_tree *tree)
-+{
-+	if (tree->ops->mark_buffer_dirty)
-+		return tree->ops->mark_buffer_dirty(h,tree->buffer);
-+	else
-+		return 0;
-+}
-+
-+/*
-+ * could return:
-+ *  - EROFS
-+ *  - ENOMEM
-+ */
-+static int ext3_ext_get_access(handle_t *handle,
-+			       struct ext3_extents_tree *tree,
-+			       struct ext3_ext_path *path)
-+{
-+	int err;
-+
-+	if (path->p_bh) {
-+		/* path points to block */
-+		err = ext3_journal_get_write_access(handle, path->p_bh);
-+	} else {
-+		/* path points to leaf/index in inode body */
-+		err = ext3_ext_get_access_for_root(handle, tree);
-+	}
-+	return err;
-+}
-+
-+/*
-+ * could return:
-+ *  - EROFS
-+ *  - ENOMEM
-+ *  - EIO
-+ */
-+static int ext3_ext_dirty(handle_t *handle, struct ext3_extents_tree *tree,
-+			  struct ext3_ext_path *path)
-+{
-+	int err;
-+	if (path->p_bh) {
-+		/* path points to block */
-+		err =ext3_journal_dirty_metadata(handle, path->p_bh);
-+	} else {
-+		/* path points to leaf/index in inode body */
-+		err = ext3_ext_mark_root_dirty(handle, tree);
-+	}
-+	return err;
-+}
-+
-+static int inline
-+ext3_ext_new_block(handle_t *handle, struct ext3_extents_tree *tree,
-+		   struct ext3_ext_path *path, struct ext3_extent *ex,
-+		   int *err)
-+{
-+	int goal, depth, newblock;
-+	struct inode *inode;
-+
-+	EXT_ASSERT(tree);
-+	if (tree->ops->new_block)
-+		return tree->ops->new_block(handle, tree, path, ex, err);
-+
-+	inode = tree->inode;
-+	depth = EXT_DEPTH(tree);
-+	if (path && depth > 0) {
-+		goal = path[depth-1].p_block;
-+	} else {
-+		struct ext3_inode_info *ei = EXT3_I(inode);
-+		unsigned long bg_start;
-+		unsigned long colour;
-+
-+		bg_start = (ei->i_block_group *
-+			    EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
-+			le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
-+		colour = (current->pid % 16) *
-+			(EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
-+		goal = bg_start + colour;
-+	}
-+
-+	newblock = ext3_new_block(handle, inode, goal, err);
-+	return newblock;
-+}
-+
-+static inline void ext3_ext_tree_changed(struct ext3_extents_tree *tree)
-+{
-+	struct ext3_extent_header *neh = EXT_ROOT_HDR(tree);
-+	neh->eh_generation = ((EXT_FLAGS(neh) & ~EXT_FLAGS_CLR_UNKNOWN) << 24) |
-+			     (EXT_HDR_GEN(neh) + 1);
-+}
-+
-+static inline int ext3_ext_space_block(struct ext3_extents_tree *tree)
-+{
-+	int size;
-+
-+	size = (tree->inode->i_sb->s_blocksize -
-+		sizeof(struct ext3_extent_header)) /
-+				sizeof(struct ext3_extent);
-+#ifdef AGRESSIVE_TEST
-+	size = 6;
-+#endif
-+	return size;
-+}
-+
-+static inline int ext3_ext_space_block_idx(struct ext3_extents_tree *tree)
-+{
-+	int size;
-+
-+	size = (tree->inode->i_sb->s_blocksize -
-+		sizeof(struct ext3_extent_header)) /
-+				sizeof(struct ext3_extent_idx);
-+#ifdef AGRESSIVE_TEST
-+	size = 5;
-+#endif
-+	return size;
-+}
-+
-+static inline int ext3_ext_space_root(struct ext3_extents_tree *tree)
-+{
-+	int size;
-+
-+	size = (tree->buffer_len - sizeof(struct ext3_extent_header)) /
-+			sizeof(struct ext3_extent);
-+#ifdef AGRESSIVE_TEST
-+	size = 3;
-+#endif
-+	return size;
-+}
-+
-+static inline int ext3_ext_space_root_idx(struct ext3_extents_tree *tree)
-+{
-+	int size;
-+
-+	size = (tree->buffer_len - sizeof(struct ext3_extent_header)) /
-+			sizeof(struct ext3_extent_idx);
-+#ifdef AGRESSIVE_TEST
-+	size = 4;
-+#endif
-+	return size;
-+}
-+
-+static void ext3_ext_show_path(struct ext3_extents_tree *tree,
-+			       struct ext3_ext_path *path)
-+{
-+#ifdef EXT_DEBUG
-+	int k, l = path->p_depth;
-+
-+	ext_debug(tree, "path:");
-+	for (k = 0; k <= l; k++, path++) {
-+		if (path->p_idx) {
-+			ext_debug(tree, "  %d->%d", path->p_idx->ei_block,
-+				  path->p_idx->ei_leaf);
-+		} else if (path->p_ext) {
-+			ext_debug(tree, "  %d:%d:%d",
-+				  path->p_ext->ee_block,
-+				  path->p_ext->ee_len,
-+				  path->p_ext->ee_start);
-+		} else
-+			ext_debug(tree, "  []");
-+	}
-+	ext_debug(tree, "\n");
-+#endif
-+}
-+
-+static void ext3_ext_show_leaf(struct ext3_extents_tree *tree,
-+			       struct ext3_ext_path *path)
-+{
-+#ifdef EXT_DEBUG
-+	int depth = EXT_DEPTH(tree);
-+	struct ext3_extent_header *eh;
-+	struct ext3_extent *ex;
-+	int i;
-+
-+	if (!path)
-+		return;
-+
-+	eh = path[depth].p_hdr;
-+	ex = EXT_FIRST_EXTENT(eh);
-+
-+	for (i = 0; i < eh->eh_entries; i++, ex++) {
-+		ext_debug(tree, "%d:%d:%d ",
-+			  ex->ee_block, ex->ee_len, ex->ee_start);
-+	}
-+	ext_debug(tree, "\n");
-+#endif
-+}
-+
-+static void ext3_ext_drop_refs(struct ext3_ext_path *path)
-+{
-+	int depth = path->p_depth;
-+	int i;
-+
-+	for (i = 0; i <= depth; i++, path++) {
-+		if (path->p_bh) {
-+			brelse(path->p_bh);
-+			path->p_bh = NULL;
-+		}
-+	}
-+}
-+
-+/*
-+ * binary search for closest index by given block
-+ */
-+static inline void
-+ext3_ext_binsearch_idx(struct ext3_extents_tree *tree,
-+		       struct ext3_ext_path *path, int block)
-+{
-+	struct ext3_extent_header *eh = path->p_hdr;
-+	struct ext3_extent_idx *ix;
-+	int l = 0, k, r;
-+
-+	EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+	EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-+	EXT_ASSERT(eh->eh_entries > 0);
-+
-+	ext_debug(tree, "binsearch for %d(idx):  ", block);
-+
-+	path->p_idx = ix = EXT_FIRST_INDEX(eh);
-+
-+	r = k = eh->eh_entries;
-+	while (k > 1) {
-+		k = (r - l) / 2;
-+		if (block < ix[l + k].ei_block)
-+			r -= k;
-+		else
-+			l += k;
-+		ext_debug(tree, "%d:%d:%d ", k, l, r);
-+	}
-+
-+	ix += l;
-+	path->p_idx = ix;
-+	ext_debug(tree," -> %d->%d ",path->p_idx->ei_block,path->p_idx->ei_leaf);
-+
-+	while (l++ < r) {
-+		if (block < ix->ei_block) 
-+			break;
-+		path->p_idx = ix++;
-+	}
-+	ext_debug(tree, "  -> %d->%d\n", path->p_idx->ei_block,
-+		  path->p_idx->ei_leaf);
-+
-+#ifdef CHECK_BINSEARCH 
-+	{
-+		struct ext3_extent_idx *chix;
-+
-+		chix = ix = EXT_FIRST_INDEX(eh);
-+		for (k = 0; k < eh->eh_entries; k++, ix++) {
-+			if (k != 0 && ix->ei_block <= ix[-1].ei_block) {
-+				printk("k=%d, ix=0x%p, first=0x%p\n", k,
-+				       ix, EXT_FIRST_INDEX(eh));
-+				printk("%u <= %u\n",
-+				       ix->ei_block,ix[-1].ei_block);
-+			}
-+			EXT_ASSERT(k == 0 || ix->ei_block > ix[-1].ei_block);
-+			if (block < ix->ei_block) 
-+				break;
-+			chix = ix;
-+		}
-+		EXT_ASSERT(chix == path->p_idx);
-+	}
-+#endif
-+}
-+
-+/*
-+ * binary search for closest extent by given block
-+ */
-+static inline void
-+ext3_ext_binsearch(struct ext3_extents_tree *tree,
-+		   struct ext3_ext_path *path, int block)
-+{
-+	struct ext3_extent_header *eh = path->p_hdr;
-+	struct ext3_extent *ex;
-+	int l = 0, k, r;
-+
-+	EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+	EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-+
-+	if (eh->eh_entries == 0) {
-+		/*
-+		 * this leaf is empty yet:
-+		 *  we get such a leaf in split/add case
-+		 */
-+		return;
-+	}
-+	
-+	ext_debug(tree, "binsearch for %d:  ", block);
-+
-+	path->p_ext = ex = EXT_FIRST_EXTENT(eh);
-+
-+	r = k = eh->eh_entries;
-+	while (k > 1) {
-+		k = (r - l) / 2;
-+		if (block < ex[l + k].ee_block)
-+			r -= k;
-+		else
-+			l += k;
-+		ext_debug(tree, "%d:%d:%d ", k, l, r);
-+	}
-+
-+	ex += l;
-+	path->p_ext = ex;
-+	ext_debug(tree, "  -> %d:%d:%d ", path->p_ext->ee_block,
-+		  path->p_ext->ee_start, path->p_ext->ee_len);
-+
-+	while (l++ < r) {
-+		if (block < ex->ee_block) 
-+			break;
-+		path->p_ext = ex++;
-+	}
-+	ext_debug(tree, "  -> %d:%d:%d\n", path->p_ext->ee_block,
-+		  path->p_ext->ee_start, path->p_ext->ee_len);
-+
-+#ifdef CHECK_BINSEARCH 
-+	{
-+		struct ext3_extent *chex;
-+
-+		chex = ex = EXT_FIRST_EXTENT(eh);
-+		for (k = 0; k < eh->eh_entries; k++, ex++) {
-+			EXT_ASSERT(k == 0 || ex->ee_block > ex[-1].ee_block);
-+			if (block < ex->ee_block) 
-+				break;
-+			chex = ex;
-+		}
-+		EXT_ASSERT(chex == path->p_ext);
-+	}
-+#endif
-+}
-+
-+int ext3_extent_tree_init(handle_t *handle, struct ext3_extents_tree *tree)
-+{
-+	struct ext3_extent_header *eh;
-+
-+	BUG_ON(tree->buffer_len == 0);
-+	ext3_ext_get_access_for_root(handle, tree);
-+	eh = EXT_ROOT_HDR(tree);
-+	eh->eh_depth = 0;
-+	eh->eh_entries = 0;
-+	eh->eh_magic = EXT3_EXT_MAGIC;
-+	eh->eh_max = ext3_ext_space_root(tree);
-+	ext3_ext_mark_root_dirty(handle, tree);
-+	ext3_ext_invalidate_cache(tree);
-+	return 0;
-+}
-+
-+struct ext3_ext_path *
-+ext3_ext_find_extent(struct ext3_extents_tree *tree, int block,
-+		     struct ext3_ext_path *path)
-+{
-+	struct ext3_extent_header *eh;
-+	struct buffer_head *bh;
-+	int depth, i, ppos = 0;
-+
-+	EXT_ASSERT(tree);
-+	EXT_ASSERT(tree->inode);
-+	EXT_ASSERT(tree->root);
-+
-+	eh = EXT_ROOT_HDR(tree);
-+	EXT_ASSERT(eh);
-+	if (ext3_ext_check_header(eh)) {
-+		/* don't free previously allocated path
-+		 * -- caller should take care */
-+		path = NULL;
-+		goto err;
-+	}
-+
-+	i = depth = EXT_DEPTH(tree);
-+	EXT_ASSERT(eh->eh_max);
-+	EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+	
-+	/* account possible depth increase */
-+	if (!path) {
-+		path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 2),
-+			       GFP_NOFS);
-+		if (!path)
-+			return ERR_PTR(-ENOMEM);
-+	}
-+	memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
-+	path[0].p_hdr = eh;
-+
-+	/* walk through the tree */
-+	while (i) {
-+		ext_debug(tree, "depth %d: num %d, max %d\n",
-+			  ppos, eh->eh_entries, eh->eh_max);
-+		ext3_ext_binsearch_idx(tree, path + ppos, block);
-+		path[ppos].p_block = path[ppos].p_idx->ei_leaf;
-+		path[ppos].p_depth = i;
-+		path[ppos].p_ext = NULL;
-+
-+		bh = sb_bread(tree->inode->i_sb, path[ppos].p_block);
-+		if (!bh)
-+			goto err;
-+
-+		eh = EXT_BLOCK_HDR(bh);
-+		ppos++;
-+		EXT_ASSERT(ppos <= depth);
-+		path[ppos].p_bh = bh;
-+		path[ppos].p_hdr = eh;
-+		i--;
-+
-+		if (ext3_ext_check_header(eh))
-+			goto err;
-+	}
-+
-+	path[ppos].p_depth = i;
-+	path[ppos].p_hdr = eh;
-+	path[ppos].p_ext = NULL;
-+	path[ppos].p_idx = NULL;
-+
-+	if (ext3_ext_check_header(eh))
-+		goto err;
-+
-+	/* find extent */
-+	ext3_ext_binsearch(tree, path + ppos, block);
-+
-+	ext3_ext_show_path(tree, path);
-+
-+	return path;
-+
-+err:
-+	printk(KERN_ERR "EXT3-fs: header is corrupted!\n");
-+	if (path) {
-+		ext3_ext_drop_refs(path);
-+		kfree(path);
-+	}
-+	return ERR_PTR(-EIO);
-+}
-+
-+/*
-+ * insert new index [logical;ptr] into the block at cupr
-+ * it check where to insert: before curp or after curp
-+ */
-+static int ext3_ext_insert_index(handle_t *handle,
-+				 struct ext3_extents_tree *tree,
-+				 struct ext3_ext_path *curp,
-+				 int logical, int ptr)
-+{
-+	struct ext3_extent_idx *ix;
-+	int len, err;
-+
-+	if ((err = ext3_ext_get_access(handle, tree, curp)))
-+		return err;
-+
-+	EXT_ASSERT(logical != curp->p_idx->ei_block);
-+	len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
-+	if (logical > curp->p_idx->ei_block) {
-+		/* insert after */
-+		if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
-+			len = (len - 1) * sizeof(struct ext3_extent_idx);
-+			len = len < 0 ? 0 : len;
-+			ext_debug(tree, "insert new index %d after: %d. "
-+				  "move %d from 0x%p to 0x%p\n",
-+				  logical, ptr, len,
-+				  (curp->p_idx + 1), (curp->p_idx + 2));
-+			memmove(curp->p_idx + 2, curp->p_idx + 1, len);
-+		}
-+		ix = curp->p_idx + 1;
-+	} else {
-+		/* insert before */
-+		len = len * sizeof(struct ext3_extent_idx);
-+		len = len < 0 ? 0 : len;
-+		ext_debug(tree, "insert new index %d before: %d. "
-+			  "move %d from 0x%p to 0x%p\n",
-+			  logical, ptr, len,
-+			  curp->p_idx, (curp->p_idx + 1));
-+		memmove(curp->p_idx + 1, curp->p_idx, len);
-+		ix = curp->p_idx;
-+	}
-+
-+	ix->ei_block = logical;
-+	ix->ei_leaf = ptr;
-+	ix->ei_leaf_hi = ix->ei_unused = 0;
-+	curp->p_hdr->eh_entries++;
-+
-+	EXT_ASSERT(curp->p_hdr->eh_entries <= curp->p_hdr->eh_max);
-+	EXT_ASSERT(ix <= EXT_LAST_INDEX(curp->p_hdr));
-+
-+	err = ext3_ext_dirty(handle, tree, curp);
-+	ext3_std_error(tree->inode->i_sb, err);
-+
-+	return err;
-+}
-+
-+/*
-+ * routine inserts new subtree into the path, using free index entry
-+ * at depth 'at:
-+ *  - allocates all needed blocks (new leaf and all intermediate index blocks)
-+ *  - makes decision where to split
-+ *  - moves remaining extens and index entries (right to the split point)
-+ *    into the newly allocated blocks
-+ *  - initialize subtree
-+ */
-+static int ext3_ext_split(handle_t *handle, struct ext3_extents_tree *tree,
-+			  struct ext3_ext_path *path,
-+			  struct ext3_extent *newext, int at)
-+{
-+	struct buffer_head *bh = NULL;
-+	int depth = EXT_DEPTH(tree);
-+	struct ext3_extent_header *neh;
-+	struct ext3_extent_idx *fidx;
-+	struct ext3_extent *ex;
-+	int i = at, k, m, a;
-+	unsigned long newblock, oldblock, border;
-+	int *ablocks = NULL; /* array of allocated blocks */
-+	int err = 0;
-+
-+	/* make decision: where to split? */
-+	/* FIXME: now desicion is simplest: at current extent */
-+
-+	/* if current leaf will be splitted, then we should use 
-+	 * border from split point */
-+	EXT_ASSERT(path[depth].p_ext <= EXT_MAX_EXTENT(path[depth].p_hdr));
-+	if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
-+		border = path[depth].p_ext[1].ee_block;
-+		ext_debug(tree, "leaf will be splitted."
-+			  " next leaf starts at %d\n",
-+			  (int)border);
-+	} else {
-+		border = newext->ee_block;
-+		ext_debug(tree, "leaf will be added."
-+			  " next leaf starts at %d\n",
-+			  (int)border);
-+	}
-+
-+	/* 
-+	 * if error occurs, then we break processing
-+	 * and turn filesystem read-only. so, index won't
-+	 * be inserted and tree will be in consistent
-+	 * state. next mount will repair buffers too
-+	 */
-+
-+	/*
-+	 * get array to track all allocated blocks
-+	 * we need this to handle errors and free blocks
-+	 * upon them
-+	 */
-+	ablocks = kmalloc(sizeof(unsigned long) * depth, GFP_NOFS);
-+	if (!ablocks)
-+		return -ENOMEM;
-+	memset(ablocks, 0, sizeof(unsigned long) * depth);
-+
-+	/* allocate all needed blocks */
-+	ext_debug(tree, "allocate %d blocks for indexes/leaf\n", depth - at);
-+	for (a = 0; a < depth - at; a++) {
-+		newblock = ext3_ext_new_block(handle, tree, path, newext, &err);
-+		if (newblock == 0)
-+			goto cleanup;
-+		ablocks[a] = newblock;
-+	}
-+
-+	/* initialize new leaf */
-+	newblock = ablocks[--a];
-+	EXT_ASSERT(newblock);
-+	bh = sb_getblk(tree->inode->i_sb, newblock);
-+	if (!bh) {
-+		err = -EIO;
-+		goto cleanup;
-+	}
-+	lock_buffer(bh);
-+
-+	if ((err = ext3_journal_get_create_access(handle, bh)))
-+		goto cleanup;
-+
-+	neh = EXT_BLOCK_HDR(bh);
-+	neh->eh_entries = 0;
-+	neh->eh_max = ext3_ext_space_block(tree);
-+	neh->eh_magic = EXT3_EXT_MAGIC;
-+	neh->eh_depth = 0;
-+	ex = EXT_FIRST_EXTENT(neh);
-+
-+	/* move remain of path[depth] to the new leaf */
-+	EXT_ASSERT(path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max);
-+	/* start copy from next extent */
-+	/* TODO: we could do it by single memmove */
-+	m = 0;
-+	path[depth].p_ext++;
-+	while (path[depth].p_ext <=
-+			EXT_MAX_EXTENT(path[depth].p_hdr)) {
-+		ext_debug(tree, "move %d:%d:%d in new leaf %lu\n",
-+			  path[depth].p_ext->ee_block,
-+			  path[depth].p_ext->ee_start,
-+			  path[depth].p_ext->ee_len,
-+			  newblock);
-+		memmove(ex++, path[depth].p_ext++, sizeof(struct ext3_extent));
-+		neh->eh_entries++;
-+		m++;
-+	}
-+	set_buffer_uptodate(bh);
-+	unlock_buffer(bh);
-+
-+	if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+		goto cleanup;	
-+	brelse(bh);
-+	bh = NULL;
-+
-+	/* correct old leaf */
-+	if (m) {
-+		if ((err = ext3_ext_get_access(handle, tree, path + depth)))
-+			goto cleanup;
-+		path[depth].p_hdr->eh_entries -= m;
-+		if ((err = ext3_ext_dirty(handle, tree, path + depth)))
-+			goto cleanup;
-+		
-+	}
-+
-+	/* create intermediate indexes */
-+	k = depth - at - 1;
-+	EXT_ASSERT(k >= 0);
-+	if (k)
-+		ext_debug(tree,	"create %d intermediate indices\n", k);
-+	/* insert new index into current index block */
-+	/* current depth stored in i var */
-+	i = depth - 1;
-+	while (k--) {
-+		oldblock = newblock;
-+		newblock = ablocks[--a];
-+		bh = sb_getblk(tree->inode->i_sb, newblock);
-+		if (!bh) {
-+			err = -EIO;
-+			goto cleanup;
-+		}
-+		lock_buffer(bh);
-+
-+		if ((err = ext3_journal_get_create_access(handle, bh)))
-+			goto cleanup;
-+
-+		neh = EXT_BLOCK_HDR(bh);
-+		neh->eh_entries = 1;
-+		neh->eh_magic = EXT3_EXT_MAGIC;
-+		neh->eh_max = ext3_ext_space_block_idx(tree);
-+		neh->eh_depth = depth - i; 
-+		fidx = EXT_FIRST_INDEX(neh);
-+		fidx->ei_block = border;
-+		fidx->ei_leaf = oldblock;
-+		fidx->ei_leaf_hi = fidx->ei_unused = 0;
-+
-+		ext_debug(tree,	"int.index at %d (block %lu): %lu -> %lu\n",
-+			  i, newblock, border, oldblock);
-+		/* copy indexes */
-+		m = 0;
-+		path[i].p_idx++;
-+
-+		ext_debug(tree, "cur 0x%p, last 0x%p\n", path[i].p_idx,
-+			  EXT_MAX_INDEX(path[i].p_hdr));
-+		EXT_ASSERT(EXT_MAX_INDEX(path[i].p_hdr) ==
-+			   EXT_LAST_INDEX(path[i].p_hdr));
-+		while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
-+			ext_debug(tree, "%d: move %d:%d in new index %lu\n",
-+				  i, path[i].p_idx->ei_block,
-+				  path[i].p_idx->ei_leaf, newblock);
-+			memmove(++fidx, path[i].p_idx++,
-+				sizeof(struct ext3_extent_idx));
-+			neh->eh_entries++;
-+			EXT_ASSERT(neh->eh_entries <= neh->eh_max);
-+			m++;
-+		}
-+		set_buffer_uptodate(bh);
-+		unlock_buffer(bh);
-+
-+		if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+			goto cleanup;
-+		brelse(bh);
-+		bh = NULL;
-+
-+		/* correct old index */
-+		if (m) {
-+			err = ext3_ext_get_access(handle, tree, path + i);
-+			if (err)
-+				goto cleanup;
-+			path[i].p_hdr->eh_entries -= m;
-+			err = ext3_ext_dirty(handle, tree, path + i);
-+			if (err)
-+				goto cleanup;
-+		}
-+
-+		i--;
-+	}
-+
-+	/* insert new index */
-+	if (!err)
-+		err = ext3_ext_insert_index(handle, tree, path + at,
-+					    border, newblock);
-+
-+cleanup:
-+	if (bh) {
-+		if (buffer_locked(bh))
-+			unlock_buffer(bh);
-+		brelse(bh);
-+	}
-+
-+	if (err) {
-+		/* free all allocated blocks in error case */
-+		for (i = 0; i < depth; i++) {
-+			if (!ablocks[i])
-+				continue;
-+			ext3_free_blocks(handle, tree->inode, ablocks[i], 1);
-+		}
-+	}
-+	kfree(ablocks);
-+
-+	return err;
-+}
-+
-+/*
-+ * routine implements tree growing procedure:
-+ *  - allocates new block
-+ *  - moves top-level data (index block or leaf) into the new block
-+ *  - initialize new top-level, creating index that points to the
-+ *    just created block
-+ */
-+static int ext3_ext_grow_indepth(handle_t *handle,
-+				 struct ext3_extents_tree *tree,
-+				 struct ext3_ext_path *path,
-+				 struct ext3_extent *newext)
-+{
-+	struct ext3_ext_path *curp = path;
-+	struct ext3_extent_header *neh;
-+	struct ext3_extent_idx *fidx;
-+	struct buffer_head *bh;
-+	unsigned long newblock;
-+	int err = 0;
-+
-+	newblock = ext3_ext_new_block(handle, tree, path, newext, &err);
-+	if (newblock == 0)
-+		return err;
-+
-+	bh = sb_getblk(tree->inode->i_sb, newblock);
-+	if (!bh) {
-+		err = -EIO;
-+		ext3_std_error(tree->inode->i_sb, err);
-+		return err;
-+	}
-+	lock_buffer(bh);
-+
-+	if ((err = ext3_journal_get_create_access(handle, bh))) {
-+		unlock_buffer(bh);
-+		goto out;	
-+	}
-+
-+	/* move top-level index/leaf into new block */
-+	memmove(bh->b_data, curp->p_hdr, tree->buffer_len);
-+
-+	/* set size of new block */
-+	neh = EXT_BLOCK_HDR(bh);
-+	/* old root could have indexes or leaves
-+	 * so calculate eh_max right way */
-+	if (EXT_DEPTH(tree))
-+		neh->eh_max = ext3_ext_space_block_idx(tree);
-+	else
-+		neh->eh_max = ext3_ext_space_block(tree);
-+	neh->eh_magic = EXT3_EXT_MAGIC;
-+	set_buffer_uptodate(bh);
-+	unlock_buffer(bh);
-+
-+	if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+		goto out;
-+
-+	/* create index in new top-level index: num,max,pointer */
-+	if ((err = ext3_ext_get_access(handle, tree, curp)))
-+		goto out;
-+
-+	curp->p_hdr->eh_magic = EXT3_EXT_MAGIC;
-+	curp->p_hdr->eh_max = ext3_ext_space_root_idx(tree);
-+	curp->p_hdr->eh_entries = 1;
-+	curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
-+	/* FIXME: it works, but actually path[0] can be index */
-+	curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
-+	curp->p_idx->ei_leaf = newblock;
-+	curp->p_idx->ei_leaf_hi = curp->p_idx->ei_unused = 0;
-+
-+	neh = EXT_ROOT_HDR(tree);
-+	fidx = EXT_FIRST_INDEX(neh);
-+	ext_debug(tree, "new root: num %d(%d), lblock %d, ptr %d\n",
-+		  neh->eh_entries, neh->eh_max, fidx->ei_block, fidx->ei_leaf); 
-+
-+	neh->eh_depth = path->p_depth + 1;
-+	err = ext3_ext_dirty(handle, tree, curp);
-+out:
-+	brelse(bh);
-+
-+	return err;
-+}
-+
-+/*
-+ * routine finds empty index and adds new leaf. if no free index found
-+ * then it requests in-depth growing
-+ */
-+static int ext3_ext_create_new_leaf(handle_t *handle,
-+				    struct ext3_extents_tree *tree,
-+				    struct ext3_ext_path *path,
-+				    struct ext3_extent *newext)
-+{
-+	struct ext3_ext_path *curp;
-+	int depth, i, err = 0;
-+
-+repeat:
-+	i = depth = EXT_DEPTH(tree);
-+	
-+	/* walk up to the tree and look for free index entry */
-+	curp = path + depth;
-+	while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
-+		i--;
-+		curp--;
-+	}
-+
-+	/* we use already allocated block for index block
-+	 * so, subsequent data blocks should be contigoues */
-+	if (EXT_HAS_FREE_INDEX(curp)) {
-+		/* if we found index with free entry, then use that
-+		 * entry: create all needed subtree and add new leaf */
-+		err = ext3_ext_split(handle, tree, path, newext, i);
-+
-+		/* refill path */
-+		ext3_ext_drop_refs(path);
-+		path = ext3_ext_find_extent(tree, newext->ee_block, path);
-+		if (IS_ERR(path))
-+			err = PTR_ERR(path);
-+	} else {
-+		/* tree is full, time to grow in depth */
-+		err = ext3_ext_grow_indepth(handle, tree, path, newext);
-+
-+		/* refill path */
-+		ext3_ext_drop_refs(path);
-+		path = ext3_ext_find_extent(tree, newext->ee_block, path);
-+		if (IS_ERR(path))
-+			err = PTR_ERR(path);
-+
-+		/*
-+		 * only first (depth 0 -> 1) produces free space
-+		 * in all other cases we have to split growed tree
-+		 */
-+		depth = EXT_DEPTH(tree);
-+		if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
-+			/* now we need split */
-+			goto repeat;
-+		}
-+	}
-+
-+	if (err)
-+		return err;
-+
-+	return 0;
-+}
-+
-+/*
-+ * returns allocated block in subsequent extent or EXT_MAX_BLOCK
-+ * NOTE: it consider block number from index entry as
-+ * allocated block. thus, index entries have to be consistent
-+ * with leafs
-+ */
-+static unsigned long
-+ext3_ext_next_allocated_block(struct ext3_ext_path *path)
-+{
-+	int depth;
-+
-+	EXT_ASSERT(path != NULL);
-+	depth = path->p_depth;
-+
-+	if (depth == 0 && path->p_ext == NULL)
-+		return EXT_MAX_BLOCK;
-+
-+	/* FIXME: what if index isn't full ?! */
-+	while (depth >= 0) {
-+		if (depth == path->p_depth) {
-+			/* leaf */
-+			if (path[depth].p_ext !=
-+			    EXT_LAST_EXTENT(path[depth].p_hdr))
-+				return path[depth].p_ext[1].ee_block;
-+		} else {
-+			/* index */
-+			if (path[depth].p_idx !=
-+			    EXT_LAST_INDEX(path[depth].p_hdr))
-+				return path[depth].p_idx[1].ei_block;
-+		}
-+		depth--;        
-+	}
-+
-+	return EXT_MAX_BLOCK;
-+}
-+
-+/*
-+ * returns first allocated block from next leaf or EXT_MAX_BLOCK
-+ */
-+static unsigned ext3_ext_next_leaf_block(struct ext3_extents_tree *tree,
-+					 struct ext3_ext_path *path)
-+{
-+	int depth;
-+
-+	EXT_ASSERT(path != NULL);
-+	depth = path->p_depth;
-+
-+	/* zero-tree has no leaf blocks at all */
-+	if (depth == 0)
-+		return EXT_MAX_BLOCK;
-+
-+	/* go to index block */
-+	depth--;
-+	
-+	while (depth >= 0) {
-+		if (path[depth].p_idx !=
-+		    EXT_LAST_INDEX(path[depth].p_hdr))
-+			return path[depth].p_idx[1].ei_block;
-+		depth--;        
-+	}
-+
-+	return EXT_MAX_BLOCK;
-+}
-+
-+/*
-+ * if leaf gets modified and modified extent is first in the leaf
-+ * then we have to correct all indexes above
-+ * TODO: do we need to correct tree in all cases?
-+ */
-+int ext3_ext_correct_indexes(handle_t *handle, struct ext3_extents_tree *tree,
-+			     struct ext3_ext_path *path)
-+{
-+	struct ext3_extent_header *eh;
-+	int depth = EXT_DEPTH(tree);	
-+	struct ext3_extent *ex;
-+	unsigned long border;
-+	int k, err = 0;
-+	
-+	eh = path[depth].p_hdr;
-+	ex = path[depth].p_ext;
-+	EXT_ASSERT(ex);
-+	EXT_ASSERT(eh);
-+	
-+	if (depth == 0) {
-+		/* there is no tree at all */
-+		return 0;
-+	}
-+	
-+	if (ex != EXT_FIRST_EXTENT(eh)) {
-+		/* we correct tree if first leaf got modified only */
-+		return 0;
-+	}
-+	
-+	/*
-+	 * TODO: we need correction if border is smaller then current one
-+	 */
-+	k = depth - 1;
-+	border = path[depth].p_ext->ee_block;
-+	if ((err = ext3_ext_get_access(handle, tree, path + k)))
-+		return err;
-+	path[k].p_idx->ei_block = border;
-+	if ((err = ext3_ext_dirty(handle, tree, path + k)))
-+		return err;
-+
-+	while (k--) {
-+		/* change all left-side indexes */
-+		if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
-+			break;
-+		if ((err = ext3_ext_get_access(handle, tree, path + k)))
-+			break;
-+		path[k].p_idx->ei_block = border;
-+		if ((err = ext3_ext_dirty(handle, tree, path + k)))
-+			break;
-+	}
-+
-+	return err;
-+}
-+
-+static int inline
-+ext3_can_extents_be_merged(struct ext3_extents_tree *tree,
-+			   struct ext3_extent *ex1,
-+			   struct ext3_extent *ex2)
-+{
-+	if (ex1->ee_block + ex1->ee_len != ex2->ee_block)
-+		return 0;
-+
-+#ifdef AGRESSIVE_TEST
-+	if (ex1->ee_len >= 4)
-+		return 0;
-+#endif
-+
-+	if (!tree->ops->mergable)
-+		return 1;
-+
-+	return tree->ops->mergable(ex1, ex2);
-+}
-+
-+/*
-+ * this routine tries to merge requsted extent into the existing
-+ * extent or inserts requested extent as new one into the tree,
-+ * creating new leaf in no-space case
-+ */
-+int ext3_ext_insert_extent(handle_t *handle, struct ext3_extents_tree *tree,
-+			   struct ext3_ext_path *path,
-+			   struct ext3_extent *newext)
-+{
-+	struct ext3_extent_header * eh;
-+	struct ext3_extent *ex, *fex;
-+	struct ext3_extent *nearex; /* nearest extent */
-+	struct ext3_ext_path *npath = NULL;
-+	int depth, len, err, next;
-+
-+	EXT_ASSERT(newext->ee_len > 0);
-+	depth = EXT_DEPTH(tree);
-+	ex = path[depth].p_ext;
-+	EXT_ASSERT(path[depth].p_hdr);
-+
-+	/* try to insert block into found extent and return */
-+	if (ex && ext3_can_extents_be_merged(tree, ex, newext)) {
-+		ext_debug(tree, "append %d block to %d:%d (from %d)\n",
-+			  newext->ee_len, ex->ee_block, ex->ee_len,
-+			  ex->ee_start);
-+		if ((err = ext3_ext_get_access(handle, tree, path + depth)))
-+			return err;
-+		ex->ee_len += newext->ee_len;
-+		eh = path[depth].p_hdr;
-+		nearex = ex;
-+		goto merge;
-+	}
-+
-+repeat:
-+	depth = EXT_DEPTH(tree);
-+	eh = path[depth].p_hdr;
-+	if (eh->eh_entries < eh->eh_max)
-+		goto has_space;
-+
-+	/* probably next leaf has space for us? */
-+	fex = EXT_LAST_EXTENT(eh);
-+	next = ext3_ext_next_leaf_block(tree, path);
-+	if (newext->ee_block > fex->ee_block && next != EXT_MAX_BLOCK) {
-+		ext_debug(tree, "next leaf block - %d\n", next);
-+		EXT_ASSERT(!npath);
-+		npath = ext3_ext_find_extent(tree, next, NULL);
-+		if (IS_ERR(npath))
-+			return PTR_ERR(npath);
-+		EXT_ASSERT(npath->p_depth == path->p_depth);
-+		eh = npath[depth].p_hdr;
-+		if (eh->eh_entries < eh->eh_max) {
-+			ext_debug(tree,	"next leaf isnt full(%d)\n",
-+				  eh->eh_entries);
-+			path = npath;
-+			goto repeat;
-+		}
-+		ext_debug(tree, "next leaf hasno free space(%d,%d)\n",
-+			  eh->eh_entries, eh->eh_max);
-+	}
-+
-+	/*
-+	 * there is no free space in found leaf
-+	 * we're gonna add new leaf in the tree
-+	 */
-+	err = ext3_ext_create_new_leaf(handle, tree, path, newext);
-+	if (err)
-+		goto cleanup;
-+	depth = EXT_DEPTH(tree);
-+	eh = path[depth].p_hdr;
-+
-+has_space:
-+	nearex = path[depth].p_ext;
-+
-+	if ((err = ext3_ext_get_access(handle, tree, path + depth)))
-+		goto cleanup;
-+
-+	if (!nearex) {
-+		/* there is no extent in this leaf, create first one */
-+		ext_debug(tree, "first extent in the leaf: %d:%d:%d\n",
-+			  newext->ee_block, newext->ee_start,
-+			  newext->ee_len);
-+		path[depth].p_ext = EXT_FIRST_EXTENT(eh);
-+	} else if (newext->ee_block > nearex->ee_block) {
-+		EXT_ASSERT(newext->ee_block != nearex->ee_block);
-+		if (nearex != EXT_LAST_EXTENT(eh)) {
-+			len = EXT_MAX_EXTENT(eh) - nearex;
-+			len = (len - 1) * sizeof(struct ext3_extent);
-+			len = len < 0 ? 0 : len;
-+			ext_debug(tree, "insert %d:%d:%d after: nearest 0x%p, "
-+				  "move %d from 0x%p to 0x%p\n",
-+				  newext->ee_block, newext->ee_start,
-+				  newext->ee_len,
-+				  nearex, len, nearex + 1, nearex + 2);
-+			memmove(nearex + 2, nearex + 1, len);
-+		}
-+		path[depth].p_ext = nearex + 1;
-+	} else {
-+		EXT_ASSERT(newext->ee_block != nearex->ee_block);
-+		len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext3_extent);
-+		len = len < 0 ? 0 : len;
-+		ext_debug(tree, "insert %d:%d:%d before: nearest 0x%p, "
-+			  "move %d from 0x%p to 0x%p\n",
-+			  newext->ee_block, newext->ee_start, newext->ee_len,
-+			  nearex, len, nearex + 1, nearex + 2);
-+		memmove(nearex + 1, nearex, len);
-+		path[depth].p_ext = nearex;
-+	}
-+
-+	eh->eh_entries++;
-+	nearex = path[depth].p_ext;
-+	nearex->ee_block = newext->ee_block;
-+	nearex->ee_start = newext->ee_start;
-+	nearex->ee_len = newext->ee_len;
-+	/* FIXME: support for large fs */
-+	nearex->ee_start_hi = 0;
-+
-+merge:
-+	/* try to merge extents to the right */
-+	while (nearex < EXT_LAST_EXTENT(eh)) {
-+		if (!ext3_can_extents_be_merged(tree, nearex, nearex + 1))
-+			break;
-+		/* merge with next extent! */
-+		nearex->ee_len += nearex[1].ee_len;
-+		if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
-+			len = (EXT_LAST_EXTENT(eh) - nearex - 1) *
-+				sizeof(struct ext3_extent);
-+			memmove(nearex + 1, nearex + 2, len);
-+		}
-+		eh->eh_entries--;
-+		EXT_ASSERT(eh->eh_entries > 0);
-+	}
-+
-+	/* try to merge extents to the left */
-+
-+	/* time to correct all indexes above */
-+	err = ext3_ext_correct_indexes(handle, tree, path);
-+	if (err)
-+		goto cleanup;
-+
-+	err = ext3_ext_dirty(handle, tree, path + depth);
-+
-+cleanup:
-+	if (npath) {
-+		ext3_ext_drop_refs(npath);
-+		kfree(npath);
-+	}
-+	ext3_ext_tree_changed(tree);
-+	ext3_ext_invalidate_cache(tree);
-+	return err;
-+}
-+
-+int ext3_ext_walk_space(struct ext3_extents_tree *tree, unsigned long block,
-+			unsigned long num, ext_prepare_callback func)
-+{
-+	struct ext3_ext_path *path = NULL;
-+	struct ext3_ext_cache cbex;
-+	struct ext3_extent *ex;
-+	unsigned long next, start = 0, end = 0;
-+	unsigned long last = block + num;
-+	int depth, exists, err = 0;
-+
-+	EXT_ASSERT(tree);
-+	EXT_ASSERT(func);
-+	EXT_ASSERT(tree->inode);
-+	EXT_ASSERT(tree->root);
-+
-+	while (block < last && block != EXT_MAX_BLOCK) {
-+		num = last - block;
-+		/* find extent for this block */
-+		path = ext3_ext_find_extent(tree, block, path);
-+		if (IS_ERR(path)) {
-+			err = PTR_ERR(path);
-+			path = NULL;
-+			break;
-+		}
-+
-+		depth = EXT_DEPTH(tree);
-+		EXT_ASSERT(path[depth].p_hdr);
-+		ex = path[depth].p_ext;
-+		next = ext3_ext_next_allocated_block(path);
-+
-+		exists = 0;
-+		if (!ex) {
-+			/* there is no extent yet, so try to allocate
-+			 * all requested space */
-+			start = block;
-+			end = block + num;
-+		} else if (ex->ee_block > block) {
-+			/* need to allocate space before found extent */
-+			start = block;
-+			end = ex->ee_block;
-+			if (block + num < end)
-+				end = block + num;
-+		} else if (block >= ex->ee_block + ex->ee_len) {
-+			/* need to allocate space after found extent */
-+			start = block;
-+			end = block + num;
-+			if (end >= next)
-+				end = next;
-+		} else if (block >= ex->ee_block) {
-+			/* 
-+			 * some part of requested space is covered
-+			 * by found extent
-+			 */
-+			start = block;
-+			end = ex->ee_block + ex->ee_len;
-+			if (block + num < end)
-+				end = block + num;
-+			exists = 1;
-+		} else {
-+			BUG();
-+		}
-+		EXT_ASSERT(end > start);
-+
-+		if (!exists) {
-+			cbex.ec_block = start;
-+			cbex.ec_len = end - start;
-+			cbex.ec_start = 0;
-+			cbex.ec_type = EXT3_EXT_CACHE_GAP;
-+		} else {
-+			cbex.ec_block = ex->ee_block;
-+			cbex.ec_len = ex->ee_len;
-+			cbex.ec_start = ex->ee_start;
-+			cbex.ec_type = EXT3_EXT_CACHE_EXTENT;
-+		}
-+
-+		EXT_ASSERT(cbex.ec_len > 0);
-+		EXT_ASSERT(path[depth].p_hdr);
-+		err = func(tree, path, &cbex);
-+		ext3_ext_drop_refs(path);
-+
-+		if (err < 0)
-+			break;
-+		if (err == EXT_REPEAT)
-+			continue;
-+		else if (err == EXT_BREAK) {
-+			err = 0;
-+			break;
-+		}
-+
-+		if (EXT_DEPTH(tree) != depth) {
-+			/* depth was changed. we have to realloc path */
-+			kfree(path);
-+			path = NULL;
-+		}
-+
-+		block = cbex.ec_block + cbex.ec_len;
-+	}
-+
-+	if (path) {
-+		ext3_ext_drop_refs(path);
-+		kfree(path);
-+	}
-+
-+	return err;
-+}
-+
-+static inline void
-+ext3_ext_put_in_cache(struct ext3_extents_tree *tree, __u32 block,
-+		      __u32 len, __u32 start, int type)
-+{
-+	EXT_ASSERT(len > 0);
-+	if (tree->cex) {
-+		tree->cex->ec_type = type;
-+		tree->cex->ec_block = block;
-+		tree->cex->ec_len = len;
-+		tree->cex->ec_start = start;
-+	}
-+}
-+
-+/*
-+ * this routine calculate boundaries of the gap requested block fits into
-+ * and cache this gap
-+ */
-+static inline void
-+ext3_ext_put_gap_in_cache(struct ext3_extents_tree *tree,
-+			  struct ext3_ext_path *path,
-+			  unsigned long block)
-+{
-+	int depth = EXT_DEPTH(tree);
-+	unsigned long lblock, len;
-+	struct ext3_extent *ex;
-+
-+	if (!tree->cex)
-+		return;
-+
-+	ex = path[depth].p_ext;
-+	if (ex == NULL) {
-+		/* there is no extent yet, so gap is [0;-] */
-+		lblock = 0;
-+		len = EXT_MAX_BLOCK;
-+		ext_debug(tree, "cache gap(whole file):");
-+	} else if (block < ex->ee_block) {
-+		lblock = block;
-+		len = ex->ee_block - block;
-+		ext_debug(tree, "cache gap(before): %lu [%lu:%lu]",
-+			  (unsigned long) block,
-+			  (unsigned long) ex->ee_block,
-+			  (unsigned long) ex->ee_len);
-+	} else if (block >= ex->ee_block + ex->ee_len) {
-+		lblock = ex->ee_block + ex->ee_len;
-+		len = ext3_ext_next_allocated_block(path);
-+		ext_debug(tree, "cache gap(after): [%lu:%lu] %lu",
-+			  (unsigned long) ex->ee_block,
-+			  (unsigned long) ex->ee_len,
-+			  (unsigned long) block);
-+		EXT_ASSERT(len > lblock);
-+		len = len - lblock;
-+	} else {
-+		lblock = len = 0;
-+		BUG();
-+	}
-+
-+	ext_debug(tree, " -> %lu:%lu\n", (unsigned long) lblock, len);
-+	ext3_ext_put_in_cache(tree, lblock, len, 0, EXT3_EXT_CACHE_GAP);
-+}
-+
-+static inline int
-+ext3_ext_in_cache(struct ext3_extents_tree *tree, unsigned long block,
-+		  struct ext3_extent *ex)
-+{
-+	struct ext3_ext_cache *cex = tree->cex;
-+
-+	/* is there cache storage at all? */
-+	if (!cex)
-+		return EXT3_EXT_CACHE_NO;
-+
-+	/* has cache valid data? */
-+	if (cex->ec_type == EXT3_EXT_CACHE_NO)
-+		return EXT3_EXT_CACHE_NO;
-+
-+	EXT_ASSERT(cex->ec_type == EXT3_EXT_CACHE_GAP ||
-+		   cex->ec_type == EXT3_EXT_CACHE_EXTENT);
-+	if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
-+		ex->ee_block = cex->ec_block;
-+		ex->ee_start = cex->ec_start;
-+		ex->ee_start_hi = 0;
-+		ex->ee_len = cex->ec_len;
-+		ext_debug(tree, "%lu cached by %lu:%lu:%lu\n",
-+			  (unsigned long) block,
-+			  (unsigned long) ex->ee_block,
-+			  (unsigned long) ex->ee_len,
-+			  (unsigned long) ex->ee_start);
-+		return cex->ec_type;
-+	}
-+
-+	/* not in cache */
-+	return EXT3_EXT_CACHE_NO;
-+}
-+
-+/*
-+ * routine removes index from the index block
-+ * it's used in truncate case only. thus all requests are for
-+ * last index in the block only
-+ */
-+int ext3_ext_rm_idx(handle_t *handle, struct ext3_extents_tree *tree,
-+		    struct ext3_ext_path *path)
-+{
-+	struct buffer_head *bh;
-+	int err;
-+	
-+	/* free index block */
-+	path--;
-+	EXT_ASSERT(path->p_hdr->eh_entries);
-+	if ((err = ext3_ext_get_access(handle, tree, path)))
-+		return err;
-+	path->p_hdr->eh_entries--;
-+	if ((err = ext3_ext_dirty(handle, tree, path)))
-+		return err;
-+	ext_debug(tree, "index is empty, remove it, free block %d\n",
-+		  path->p_idx->ei_leaf);
-+	bh = sb_find_get_block(tree->inode->i_sb, path->p_idx->ei_leaf);
-+	ext3_forget(handle, 1, tree->inode, bh, path->p_idx->ei_leaf);
-+	ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1);
-+	return err;
-+}
-+
-+int ext3_ext_calc_credits_for_insert(struct ext3_extents_tree *tree,
-+				     struct ext3_ext_path *path)
-+{
-+	int depth = EXT_DEPTH(tree);
-+	int needed;
-+
-+	if (path) {
-+		/* probably there is space in leaf? */
-+		if (path[depth].p_hdr->eh_entries < path[depth].p_hdr->eh_max)
-+			return 1;
-+	}
-+	
-+	/*
-+	 * the worste case we're expecting is creation of the
-+	 * new root (growing in depth) with index splitting
-+	 * for splitting we have to consider depth + 1 because
-+	 * previous growing could increase it
-+	 */
-+	depth = depth + 1;
-+
-+	/* 
-+	 * growing in depth:
-+	 * block allocation + new root + old root
-+	 */
-+	needed = EXT3_ALLOC_NEEDED + 2;
-+
-+	/* index split. we may need:
-+	 *   allocate intermediate indexes and new leaf
-+	 *   change two blocks at each level, but root
-+	 *   modify root block (inode)
-+	 */
-+	needed += (depth * EXT3_ALLOC_NEEDED) + (2 * depth) + 1;
-+
-+	return needed;
-+}
-+
-+static int
-+ext3_ext_split_for_rm(handle_t *handle, struct ext3_extents_tree *tree,
-+		      struct ext3_ext_path *path, unsigned long start,
-+		      unsigned long end)
-+{
-+	struct ext3_extent *ex, tex;
-+	struct ext3_ext_path *npath;
-+	int depth, creds, err;
-+
-+	depth = EXT_DEPTH(tree);
-+	ex = path[depth].p_ext;
-+	EXT_ASSERT(ex);
-+	EXT_ASSERT(end < ex->ee_block + ex->ee_len - 1);
-+	EXT_ASSERT(ex->ee_block < start);
-+
-+	/* calculate tail extent */
-+	tex.ee_block = end + 1;
-+	EXT_ASSERT(tex.ee_block < ex->ee_block + ex->ee_len);
-+	tex.ee_len = ex->ee_block + ex->ee_len - tex.ee_block;
-+
-+	creds = ext3_ext_calc_credits_for_insert(tree, path);
-+	handle = ext3_ext_journal_restart(handle, creds);
-+	if (IS_ERR(handle))
-+		return PTR_ERR(handle);
-+	
-+	/* calculate head extent. use primary extent */
-+	err = ext3_ext_get_access(handle, tree, path + depth);
-+	if (err)
-+		return err;
-+	ex->ee_len = start - ex->ee_block;
-+	err = ext3_ext_dirty(handle, tree, path + depth);
-+	if (err)
-+		return err;
-+
-+	/* FIXME: some callback to free underlying resource
-+	 * and correct ee_start? */
-+	ext_debug(tree, "split extent: head %u:%u, tail %u:%u\n",
-+		  ex->ee_block, ex->ee_len, tex.ee_block, tex.ee_len);
-+
-+	npath = ext3_ext_find_extent(tree, ex->ee_block, NULL);
-+	if (IS_ERR(npath))
-+		return PTR_ERR(npath);
-+	depth = EXT_DEPTH(tree);
-+	EXT_ASSERT(npath[depth].p_ext->ee_block == ex->ee_block);
-+	EXT_ASSERT(npath[depth].p_ext->ee_len == ex->ee_len);
-+
-+	err = ext3_ext_insert_extent(handle, tree, npath, &tex);
-+	ext3_ext_drop_refs(npath);
-+	kfree(npath);
-+
-+	return err;
-+}
-+
-+static int
-+ext3_ext_rm_leaf(handle_t *handle, struct ext3_extents_tree *tree,
-+		 struct ext3_ext_path *path, unsigned long start,
-+		 unsigned long end)
-+{
-+	struct ext3_extent *ex, *fu = NULL, *lu, *le;
-+	int err = 0, correct_index = 0;
-+	int depth = EXT_DEPTH(tree), credits;
-+	struct ext3_extent_header *eh;
-+	unsigned a, b, block, num;
-+
-+	ext_debug(tree, "remove [%lu:%lu] in leaf\n", start, end);
-+	if (!path[depth].p_hdr)
-+		path[depth].p_hdr = EXT_BLOCK_HDR(path[depth].p_bh);
-+	eh = path[depth].p_hdr;
-+	EXT_ASSERT(eh);
-+	EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-+	EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+	
-+	/* find where to start removing */
-+	le = ex = EXT_LAST_EXTENT(eh);
-+	while (ex != EXT_FIRST_EXTENT(eh)) {
-+		if (ex->ee_block <= end)
-+			break;
-+		ex--;
-+	}
-+
-+	if (start > ex->ee_block && end < ex->ee_block + ex->ee_len - 1) {
-+		/* removal of internal part of the extent requested
-+		 * tail and head must be placed in different extent
-+		 * so, we have to insert one more extent */
-+		path[depth].p_ext = ex;
-+		return ext3_ext_split_for_rm(handle, tree, path, start, end);
-+	}
-+	
-+	lu = ex;
-+	while (ex >= EXT_FIRST_EXTENT(eh) && ex->ee_block + ex->ee_len > start) {
-+		ext_debug(tree, "remove ext %u:%u\n", ex->ee_block, ex->ee_len);
-+		path[depth].p_ext = ex;
-+	
-+		a = ex->ee_block > start ? ex->ee_block : start;
-+		b = ex->ee_block + ex->ee_len - 1 < end ?
-+			ex->ee_block + ex->ee_len - 1 : end;
-+		
-+		ext_debug(tree, "  border %u:%u\n", a, b);
-+
-+		if (a != ex->ee_block && b != ex->ee_block + ex->ee_len - 1) {
-+			block = 0;
-+			num = 0;
-+			BUG();
-+		} else if (a != ex->ee_block) {
-+			/* remove tail of the extent */
-+			block = ex->ee_block;
-+			num = a - block;
-+		} else if (b != ex->ee_block + ex->ee_len - 1) {
-+			/* remove head of the extent */
-+			block = a;
-+			num = b - a;
-+		} else {
-+			/* remove whole extent: excelent! */
-+			block = ex->ee_block; 
-+			num = 0;
-+			EXT_ASSERT(a == ex->ee_block &&
-+				   b == ex->ee_block + ex->ee_len - 1);
-+		}
-+
-+		if (ex == EXT_FIRST_EXTENT(eh))
-+			correct_index = 1;
-+
-+		credits = 1;
-+		if (correct_index)
-+			credits += (EXT_DEPTH(tree) * EXT3_ALLOC_NEEDED) + 1;
-+		if (tree->ops->remove_extent_credits)
-+			credits+=tree->ops->remove_extent_credits(tree,ex,a,b);
-+		
-+		handle = ext3_ext_journal_restart(handle, credits);
-+		if (IS_ERR(handle)) {
-+			err = PTR_ERR(handle);
-+			goto out;
-+		}
-+
-+		err = ext3_ext_get_access(handle, tree, path + depth);
-+		if (err)
-+			goto out;
-+
-+		if (tree->ops->remove_extent)
-+			err = tree->ops->remove_extent(tree, ex, a, b);
-+		if (err)
-+			goto out;
-+
-+		if (num == 0) {
-+			/* this extent is removed entirely mark slot unused */
-+			ex->ee_start = ex->ee_start_hi = 0;
-+			eh->eh_entries--;
-+			fu = ex;
-+		}
-+
-+		ex->ee_block = block;
-+		ex->ee_len = num;
-+
-+		err = ext3_ext_dirty(handle, tree, path + depth);
-+		if (err)
-+			goto out;
-+
-+		ext_debug(tree, "new extent: %u:%u:%u\n",
-+			  ex->ee_block, ex->ee_len, ex->ee_start);
-+		ex--;
-+	}
-+
-+	if (fu) {
-+		/* reuse unused slots */
-+		while (lu < le) {
-+			if (lu->ee_start) {
-+				*fu = *lu;
-+				lu->ee_start = lu->ee_start_hi = 0;
-+				fu++;
-+			}
-+			lu++;
-+		}
-+	}
-+
-+	if (correct_index && eh->eh_entries)
-+		err = ext3_ext_correct_indexes(handle, tree, path);
-+
-+	/* if this leaf is free, then we should
-+	 * remove it from index block above */
-+	if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
-+		err = ext3_ext_rm_idx(handle, tree, path + depth);
-+
-+out:
-+	return err;
-+}
-+
-+
-+static struct ext3_extent_idx *
-+ext3_ext_last_covered(struct ext3_extent_header *hdr, unsigned long block)
-+{
-+	struct ext3_extent_idx *ix;
-+	
-+	ix = EXT_LAST_INDEX(hdr);
-+	while (ix != EXT_FIRST_INDEX(hdr)) {
-+		if (ix->ei_block <= block)
-+			break;
-+		ix--;
-+	}
-+	return ix;
-+}
-+
-+/*
-+ * returns 1 if current index have to be freed (even partial)
-+ */
-+static int inline
-+ext3_ext_more_to_rm(struct ext3_ext_path *path)
-+{
-+	EXT_ASSERT(path->p_idx);
-+
-+	if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
-+		return 0;
-+
-+	/*
-+	 * if truncate on deeper level happened it it wasn't partial
-+	 * so we have to consider current index for truncation
-+	 */
-+	if (path->p_hdr->eh_entries == path->p_block)
-+		return 0;
-+	return 1;
-+}
-+
-+int ext3_ext_remove_space(struct ext3_extents_tree *tree,
-+			  unsigned long start, unsigned long end)
-+{
-+	struct inode *inode = tree->inode;
-+	struct super_block *sb = inode->i_sb;
-+	int depth = EXT_DEPTH(tree);
-+	struct ext3_ext_path *path;
-+	handle_t *handle;
-+	int i = 0, err = 0;
-+
-+	ext_debug(tree, "space to be removed: %lu:%lu\n", start, end);
-+
-+	/* probably first extent we're gonna free will be last in block */
-+	handle = ext3_journal_start(inode, depth + 1);
-+	if (IS_ERR(handle))
-+		return PTR_ERR(handle);
-+
-+	ext3_ext_invalidate_cache(tree);
-+
-+	/*
-+	 * we start scanning from right side freeing all the blocks
-+	 * after i_size and walking into the deep
-+	 */
-+	path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 1), GFP_KERNEL);
-+	if (IS_ERR(path)) {
-+		ext3_error(sb, __FUNCTION__, "Can't allocate path array");
-+		ext3_journal_stop(handle);
-+		return -ENOMEM;
-+	}
-+	memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
-+	path[i].p_hdr = EXT_ROOT_HDR(tree);
-+	
-+	while (i >= 0 && err == 0) {
-+		if (i == depth) {
-+			/* this is leaf block */
-+			err = ext3_ext_rm_leaf(handle, tree, path, start, end);
-+			/* root level have p_bh == NULL, brelse() eats this */
-+			brelse(path[i].p_bh);
-+			i--;
-+			continue;
-+		}
-+		
-+		/* this is index block */
-+		if (!path[i].p_hdr) {
-+			ext_debug(tree, "initialize header\n");
-+			path[i].p_hdr = EXT_BLOCK_HDR(path[i].p_bh);
-+		}
-+
-+		EXT_ASSERT(path[i].p_hdr->eh_entries <= path[i].p_hdr->eh_max);
-+		EXT_ASSERT(path[i].p_hdr->eh_magic == EXT3_EXT_MAGIC);
-+		
-+		if (!path[i].p_idx) {
-+			/* this level hasn't touched yet */
-+			path[i].p_idx =
-+				ext3_ext_last_covered(path[i].p_hdr, end);
-+			path[i].p_block = path[i].p_hdr->eh_entries + 1;
-+			ext_debug(tree, "init index ptr: hdr 0x%p, num %d\n",
-+				  path[i].p_hdr, path[i].p_hdr->eh_entries);
-+		} else {
-+			/* we've already was here, see at next index */
-+			path[i].p_idx--;
-+		}
-+
-+		ext_debug(tree, "level %d - index, first 0x%p, cur 0x%p\n",
-+			  i, EXT_FIRST_INDEX(path[i].p_hdr),
-+			  path[i].p_idx);
-+		if (ext3_ext_more_to_rm(path + i)) {
-+			/* go to the next level */
-+			ext_debug(tree, "move to level %d (block %d)\n",
-+				  i + 1, path[i].p_idx->ei_leaf);
-+			memset(path + i + 1, 0, sizeof(*path));
-+			path[i+1].p_bh = sb_bread(sb, path[i].p_idx->ei_leaf);
-+			if (!path[i+1].p_bh) {
-+				/* should we reset i_size? */
-+				err = -EIO;
-+				break;
-+			}
-+			/* put actual number of indexes to know is this
-+			 * number got changed at the next iteration */
-+			path[i].p_block = path[i].p_hdr->eh_entries;
-+			i++;
-+		} else {
-+			/* we finish processing this index, go up */
-+			if (path[i].p_hdr->eh_entries == 0 && i > 0) {
-+				/* index is empty, remove it
-+				 * handle must be already prepared by the
-+				 * truncatei_leaf() */
-+				err = ext3_ext_rm_idx(handle, tree, path + i);
-+			}
-+			/* root level have p_bh == NULL, brelse() eats this */
-+			brelse(path[i].p_bh);
-+			i--;
-+			ext_debug(tree, "return to level %d\n", i);
-+		}
-+	}
-+
-+	/* TODO: flexible tree reduction should be here */
-+	if (path->p_hdr->eh_entries == 0) {
-+		/*
-+		 * truncate to zero freed all the tree
-+		 * so, we need to correct eh_depth
-+		 */
-+		err = ext3_ext_get_access(handle, tree, path);
-+		if (err == 0) {
-+			EXT_ROOT_HDR(tree)->eh_depth = 0;
-+			EXT_ROOT_HDR(tree)->eh_max = ext3_ext_space_root(tree);
-+			err = ext3_ext_dirty(handle, tree, path);
-+		}
-+	}
-+	ext3_ext_tree_changed(tree);
-+
-+	kfree(path);
-+	ext3_journal_stop(handle);
-+
-+	return err;
-+}
-+
-+int ext3_ext_calc_metadata_amount(struct ext3_extents_tree *tree, int blocks)
-+{
-+	int lcap, icap, rcap, leafs, idxs, num;
-+
-+	rcap = ext3_ext_space_root(tree);
-+	if (blocks <= rcap) {
-+		/* all extents fit to the root */
-+		return 0;
-+	}
-+
-+	rcap = ext3_ext_space_root_idx(tree);
-+	lcap = ext3_ext_space_block(tree);
-+	icap = ext3_ext_space_block_idx(tree);
-+
-+	num = leafs = (blocks + lcap - 1) / lcap;
-+	if (leafs <= rcap) {
-+		/* all pointers to leafs fit to the root */
-+		return leafs;
-+	}
-+
-+	/* ok. we need separate index block(s) to link all leaf blocks */
-+	idxs = (leafs + icap - 1) / icap;
-+	do {
-+		num += idxs;
-+		idxs = (idxs + icap - 1) / icap;
-+	} while (idxs > rcap);
-+
-+	return num;
-+}
-+
-+/*
-+ * called at mount time
-+ */
-+void ext3_ext_init(struct super_block *sb)
-+{
-+	/*
-+	 * possible initialization would be here
-+	 */
-+
-+	if (test_opt(sb, EXTENTS)) {
-+		printk("EXT3-fs: file extents enabled");
-+#ifdef AGRESSIVE_TEST
-+		printk(", agressive tests");
-+#endif
-+#ifdef CHECK_BINSEARCH
-+		printk(", check binsearch");
-+#endif
-+		printk("\n");
-+	}
-+}
-+
-+/*
-+ * called at umount time
-+ */
-+void ext3_ext_release(struct super_block *sb)
-+{
-+}
-+
-+/************************************************************************
-+ * VFS related routines
-+ ************************************************************************/
-+
-+static int ext3_get_inode_write_access(handle_t *handle, void *buffer)
-+{
-+	/* we use in-core data, not bh */
-+	return 0;
-+}
-+
-+static int ext3_mark_buffer_dirty(handle_t *handle, void *buffer)
-+{
-+	struct inode *inode = buffer;
-+	return ext3_mark_inode_dirty(handle, inode);
-+}
-+
-+static int ext3_ext_mergable(struct ext3_extent *ex1,
-+			     struct ext3_extent *ex2)
-+{
-+	/* FIXME: support for large fs */
-+	if (ex1->ee_start + ex1->ee_len == ex2->ee_start)
-+		return 1;
-+	return 0;
-+}
-+
-+static int
-+ext3_remove_blocks_credits(struct ext3_extents_tree *tree,
-+			   struct ext3_extent *ex,
-+			   unsigned long from, unsigned long to)
-+{
-+	int needed;
-+	
-+	/* at present, extent can't cross block group */;
-+	needed = 4; /* bitmap + group desc + sb + inode */
-+
-+#ifdef CONFIG_QUOTA
-+	needed += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
-+#endif
-+	return needed;
-+}
-+
-+static int
-+ext3_remove_blocks(struct ext3_extents_tree *tree,
-+		   struct ext3_extent *ex,
-+		   unsigned long from, unsigned long to)
-+{
-+	int needed = ext3_remove_blocks_credits(tree, ex, from, to);
-+	handle_t *handle = ext3_journal_start(tree->inode, needed);
-+	struct buffer_head *bh;
-+	int i;
-+
-+	if (IS_ERR(handle))
-+		return PTR_ERR(handle);
-+	if (from >= ex->ee_block && to == ex->ee_block + ex->ee_len - 1) {
-+		/* tail removal */
-+		unsigned long num, start;
-+		num = ex->ee_block + ex->ee_len - from;
-+		start = ex->ee_start + ex->ee_len - num;
-+		ext_debug(tree, "free last %lu blocks starting %lu\n",
-+			  num, start);
-+		for (i = 0; i < num; i++) {
-+			bh = sb_find_get_block(tree->inode->i_sb, start + i);
-+			ext3_forget(handle, 0, tree->inode, bh, start + i);
-+		}
-+		ext3_free_blocks(handle, tree->inode, start, num);
-+	} else if (from == ex->ee_block && to <= ex->ee_block + ex->ee_len - 1) {
-+		printk("strange request: removal %lu-%lu from %u:%u\n",
-+		       from, to, ex->ee_block, ex->ee_len);
-+	} else {
-+		printk("strange request: removal(2) %lu-%lu from %u:%u\n",
-+		       from, to, ex->ee_block, ex->ee_len);
-+	}
-+	ext3_journal_stop(handle);
-+	return 0;
-+}
-+
-+static int ext3_ext_find_goal(struct inode *inode,
-+			      struct ext3_ext_path *path, unsigned long block)
-+{
-+	struct ext3_inode_info *ei = EXT3_I(inode);
-+	unsigned long bg_start;
-+	unsigned long colour;
-+	int depth;
-+	
-+	if (path) {
-+		struct ext3_extent *ex;
-+		depth = path->p_depth;
-+		
-+		/* try to predict block placement */
-+		if ((ex = path[depth].p_ext))
-+			return ex->ee_start + (block - ex->ee_block);
-+
-+		/* it looks index is empty
-+		 * try to find starting from index itself */
-+		if (path[depth].p_bh)
-+			return path[depth].p_bh->b_blocknr;
-+	}
-+
-+	/* OK. use inode's group */
-+	bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
-+		le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
-+	colour = (current->pid % 16) *
-+			(EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
-+	return bg_start + colour + block;
-+}
-+
-+static int ext3_new_block_cb(handle_t *handle, struct ext3_extents_tree *tree,
-+			     struct ext3_ext_path *path,
-+			     struct ext3_extent *ex, int *err)
-+{
-+	struct inode *inode = tree->inode;
-+	int newblock, goal;
-+	
-+	EXT_ASSERT(path);
-+	EXT_ASSERT(ex);
-+	EXT_ASSERT(ex->ee_start);
-+	EXT_ASSERT(ex->ee_len);
-+	
-+	/* reuse block from the extent to order data/metadata */
-+	newblock = ex->ee_start++;
-+	ex->ee_len--;
-+	if (ex->ee_len == 0) {
-+		ex->ee_len = 1;
-+		/* allocate new block for the extent */
-+		goal = ext3_ext_find_goal(inode, path, ex->ee_block);
-+		ex->ee_start = ext3_new_block(handle, inode, goal, err);
-+		ex->ee_start_hi = 0;
-+		if (ex->ee_start == 0) {
-+			/* error occured: restore old extent */
-+			ex->ee_start = newblock;
-+			return 0;
-+		}
-+	}
-+	return newblock;
-+}
-+
-+static struct ext3_extents_helpers ext3_blockmap_helpers = {
-+	.get_write_access	= ext3_get_inode_write_access,
-+	.mark_buffer_dirty	= ext3_mark_buffer_dirty,
-+	.mergable		= ext3_ext_mergable,
-+	.new_block		= ext3_new_block_cb,
-+	.remove_extent		= ext3_remove_blocks,
-+	.remove_extent_credits	= ext3_remove_blocks_credits,
-+};
-+
-+void ext3_init_tree_desc(struct ext3_extents_tree *tree,
-+			 struct inode *inode)
-+{
-+	tree->inode = inode;
-+	tree->root = (void *) EXT3_I(inode)->i_data;
-+	tree->buffer = (void *) inode;
-+	tree->buffer_len = sizeof(EXT3_I(inode)->i_data);
-+	tree->cex = (struct ext3_ext_cache *) &EXT3_I(inode)->i_cached_extent;
-+	tree->ops = &ext3_blockmap_helpers;
-+}
-+
-+int ext3_ext_get_block(handle_t *handle, struct inode *inode,
-+		       long iblock, struct buffer_head *bh_result,
-+		       int create, int extend_disksize)
-+{
-+	struct ext3_ext_path *path = NULL;
-+	struct ext3_extent newex;
-+	struct ext3_extent *ex;
-+	int goal, newblock, err = 0, depth;
-+	struct ext3_extents_tree tree;
-+
-+	clear_buffer_new(bh_result);
-+	ext3_init_tree_desc(&tree, inode);
-+	ext_debug(&tree, "block %d requested for inode %u\n",
-+		  (int) iblock, (unsigned) inode->i_ino);
-+	down(&EXT3_I(inode)->truncate_sem);
-+
-+	/* check in cache */
-+	if ((goal = ext3_ext_in_cache(&tree, iblock, &newex))) {
-+		if (goal == EXT3_EXT_CACHE_GAP) {
-+			if (!create) {
-+				/* block isn't allocated yet and
-+				 * user don't want to allocate it */
-+				goto out2;
-+			}
-+			/* we should allocate requested block */
-+		} else if (goal == EXT3_EXT_CACHE_EXTENT) {
-+			/* block is already allocated */
-+			newblock = iblock - newex.ee_block + newex.ee_start;
-+			goto out;
-+		} else {
-+			EXT_ASSERT(0);
-+		}
-+	}
-+
-+	/* find extent for this block */
-+	path = ext3_ext_find_extent(&tree, iblock, NULL);
-+	if (IS_ERR(path)) {
-+		err = PTR_ERR(path);
-+		path = NULL;
-+		goto out2;
-+	}
-+
-+	depth = EXT_DEPTH(&tree);
-+
-+	/*
-+	 * consistent leaf must not be empty
-+	 * this situations is possible, though, _during_ tree modification
-+	 * this is why assert can't be put in ext3_ext_find_extent()
-+	 */
-+	EXT_ASSERT(path[depth].p_ext != NULL || depth == 0);
-+
-+	if ((ex = path[depth].p_ext)) {
-+		/* if found exent covers block, simple return it */
-+		if (iblock >= ex->ee_block && iblock < ex->ee_block + ex->ee_len) {
-+			newblock = iblock - ex->ee_block + ex->ee_start;
-+			ext_debug(&tree, "%d fit into %d:%d -> %d\n",
-+				  (int) iblock, ex->ee_block, ex->ee_len,
-+				  newblock);
-+			ext3_ext_put_in_cache(&tree, ex->ee_block,
-+					      ex->ee_len, ex->ee_start,
-+					      EXT3_EXT_CACHE_EXTENT);
-+			goto out;
-+		}
-+	}
-+
-+	/*
-+	 * requested block isn't allocated yet
-+	 * we couldn't try to create block if create flag is zero 
-+	 */
-+	if (!create) {
-+		/* put just found gap into cache to speedup subsequest reqs */
-+		ext3_ext_put_gap_in_cache(&tree, path, iblock);
-+		goto out2;
-+	}
-+
-+	/* allocate new block */
-+	goal = ext3_ext_find_goal(inode, path, iblock);
-+	newblock = ext3_new_block(handle, inode, goal, &err);
-+	if (!newblock)
-+		goto out2;
-+	ext_debug(&tree, "allocate new block: goal %d, found %d\n",
-+		  goal, newblock);
-+
-+	/* try to insert new extent into found leaf and return */
-+	newex.ee_block = iblock;
-+	newex.ee_start = newblock;
-+	newex.ee_start_hi = 0;
-+	newex.ee_len = 1;
-+	err = ext3_ext_insert_extent(handle, &tree, path, &newex);
-+	if (err)
-+		goto out2;
-+	
-+	if (extend_disksize && inode->i_size > EXT3_I(inode)->i_disksize)
-+		EXT3_I(inode)->i_disksize = inode->i_size;
-+
-+	/* previous routine could use block we allocated */
-+	newblock = newex.ee_start;
-+	set_buffer_new(bh_result);
-+
-+	ext3_ext_put_in_cache(&tree, newex.ee_block, newex.ee_len,
-+			      newex.ee_start, EXT3_EXT_CACHE_EXTENT);
-+out:
-+	ext3_ext_show_leaf(&tree, path);
-+	map_bh(bh_result, inode->i_sb, newblock);
-+out2:
-+	if (path) {
-+		ext3_ext_drop_refs(path);
-+		kfree(path);
-+	}
-+	up(&EXT3_I(inode)->truncate_sem);
-+
-+	return err;	
-+}
-+
-+void ext3_ext_truncate(struct inode * inode, struct page *page)
-+{
-+	struct address_space *mapping = inode->i_mapping;
-+	struct super_block *sb = inode->i_sb;
-+	struct ext3_extents_tree tree;
-+	unsigned long last_block;
-+	handle_t *handle;
-+	int err = 0;
-+
-+	ext3_init_tree_desc(&tree, inode);
-+
-+	/*
-+	 * probably first extent we're gonna free will be last in block
-+	 */
-+	err = ext3_writepage_trans_blocks(inode) + 3;
-+	handle = ext3_journal_start(inode, err);
-+	if (IS_ERR(handle)) {
-+		if (page) {
-+			clear_highpage(page);
-+			flush_dcache_page(page);
-+			unlock_page(page);
-+			page_cache_release(page);
-+		}
-+		return;
-+	}
-+
-+	if (page)
-+		ext3_block_truncate_page(handle, page, mapping, inode->i_size);
-+
-+	down(&EXT3_I(inode)->truncate_sem);
-+	ext3_ext_invalidate_cache(&tree);
-+
-+	/* 
-+	 * TODO: optimization is possible here
-+	 * probably we need not scaning at all,
-+	 * because page truncation is enough
-+	 */
-+	if (ext3_orphan_add(handle, inode))
-+		goto out_stop;
-+
-+	/* we have to know where to truncate from in crash case */
-+	EXT3_I(inode)->i_disksize = inode->i_size;
-+	ext3_mark_inode_dirty(handle, inode);
-+
-+	last_block = (inode->i_size + sb->s_blocksize - 1) >>
-+			EXT3_BLOCK_SIZE_BITS(sb);
-+	err = ext3_ext_remove_space(&tree, last_block, EXT_MAX_BLOCK);
-+	
-+	/* In a multi-transaction truncate, we only make the final
-+	 * transaction synchronous */
-+	if (IS_SYNC(inode))
-+		handle->h_sync = 1;
-+
-+out_stop:
-+	/*
-+	 * If this was a simple ftruncate(), and the file will remain alive
-+	 * then we need to clear up the orphan record which we created above.
-+	 * However, if this was a real unlink then we were called by
-+	 * ext3_delete_inode(), and we allow that function to clean up the
-+	 * orphan info for us.
-+	 */
-+	if (inode->i_nlink)
-+		ext3_orphan_del(handle, inode);
-+
-+	up(&EXT3_I(inode)->truncate_sem);
-+	ext3_journal_stop(handle);
-+}
-+
-+/*
-+ * this routine calculate max number of blocks we could modify
-+ * in order to allocate new block for an inode
-+ */
-+int ext3_ext_writepage_trans_blocks(struct inode *inode, int num)
-+{
-+	struct ext3_extents_tree tree;
-+	int needed;
-+	
-+	ext3_init_tree_desc(&tree, inode);
-+	
-+	needed = ext3_ext_calc_credits_for_insert(&tree, NULL);
-+
-+	/* caller want to allocate num blocks */
-+	needed *= num;
-+	
-+#ifdef CONFIG_QUOTA
-+	/* 
-+	 * FIXME: real calculation should be here
-+	 * it depends on blockmap format of qouta file
-+	 */
-+	needed += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
-+#endif
-+
-+	return needed;
-+}
-+
-+void ext3_extents_initialize_blockmap(handle_t *handle, struct inode *inode)
-+{
-+	struct ext3_extents_tree tree;
-+
-+	ext3_init_tree_desc(&tree, inode);
-+	ext3_extent_tree_init(handle, &tree);
-+}
-+
-+int ext3_ext_calc_blockmap_metadata(struct inode *inode, int blocks)
-+{
-+	struct ext3_extents_tree tree;
-+
-+	ext3_init_tree_desc(&tree, inode);
-+	return ext3_ext_calc_metadata_amount(&tree, blocks);
-+}
-+	
-+static int
-+ext3_ext_store_extent_cb(struct ext3_extents_tree *tree,
-+			 struct ext3_ext_path *path,
-+			 struct ext3_ext_cache *newex)
-+{
-+	struct ext3_extent_buf *buf = (struct ext3_extent_buf *) tree->private;
-+
-+	if (newex->ec_type != EXT3_EXT_CACHE_EXTENT)
-+		return EXT_CONTINUE;
-+
-+	if (buf->err < 0)
-+		return EXT_BREAK;
-+	if (buf->cur - buf->buffer + sizeof(*newex) > buf->buflen)
-+		return EXT_BREAK;
-+
-+	if (!copy_to_user(buf->cur, newex, sizeof(*newex))) {
-+		buf->err++;
-+		buf->cur += sizeof(*newex);
-+	} else {
-+		buf->err = -EFAULT;
-+		return EXT_BREAK;
-+	}
-+	return EXT_CONTINUE;
-+}
-+
-+static int
-+ext3_ext_collect_stats_cb(struct ext3_extents_tree *tree,
-+			  struct ext3_ext_path *path,
-+			  struct ext3_ext_cache *ex)
-+{
-+	struct ext3_extent_tree_stats *buf =
-+		(struct ext3_extent_tree_stats *) tree->private;
-+	int depth;
-+
-+	if (ex->ec_type != EXT3_EXT_CACHE_EXTENT)
-+		return EXT_CONTINUE;
-+
-+	depth = EXT_DEPTH(tree);
-+	buf->extents_num++;
-+	if (path[depth].p_ext == EXT_FIRST_EXTENT(path[depth].p_hdr))
-+		buf->leaf_num++;
-+	return EXT_CONTINUE;
-+}
-+
-+int ext3_ext_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
-+		   unsigned long arg)
-+{
-+	int err = 0;
-+
-+	if (!(EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL))
-+		return -EINVAL;
-+
-+	if (cmd == EXT3_IOC_GET_EXTENTS) {
-+		struct ext3_extent_buf buf;
-+		struct ext3_extents_tree tree;
-+
-+		if (copy_from_user(&buf, (void *) arg, sizeof(buf)))
-+			return -EFAULT;
-+
-+		ext3_init_tree_desc(&tree, inode);
-+		buf.cur = buf.buffer;
-+		buf.err = 0;
-+		tree.private = &buf;
-+		down(&EXT3_I(inode)->truncate_sem);
-+		err = ext3_ext_walk_space(&tree, buf.start, EXT_MAX_BLOCK,
-+					  ext3_ext_store_extent_cb);
-+		up(&EXT3_I(inode)->truncate_sem);
-+		if (err == 0)
-+			err = buf.err;
-+	} else if (cmd == EXT3_IOC_GET_TREE_STATS) {
-+		struct ext3_extent_tree_stats buf;
-+		struct ext3_extents_tree tree;
-+
-+		ext3_init_tree_desc(&tree, inode);
-+		down(&EXT3_I(inode)->truncate_sem);
-+		buf.depth = EXT_DEPTH(&tree);
-+		buf.extents_num = 0;
-+		buf.leaf_num = 0;
-+		tree.private = &buf;
-+		err = ext3_ext_walk_space(&tree, 0, EXT_MAX_BLOCK,
-+					  ext3_ext_collect_stats_cb);
-+		up(&EXT3_I(inode)->truncate_sem);
-+		if (!err)
-+			err = copy_to_user((void *) arg, &buf, sizeof(buf));
-+	} else if (cmd == EXT3_IOC_GET_TREE_DEPTH) {
-+		struct ext3_extents_tree tree;
-+		ext3_init_tree_desc(&tree, inode);
-+		down(&EXT3_I(inode)->truncate_sem);
-+		err = EXT_DEPTH(&tree);
-+		up(&EXT3_I(inode)->truncate_sem);
-+	}
-+
-+	return err;
-+}
-+
-+EXPORT_SYMBOL(ext3_init_tree_desc);
-+EXPORT_SYMBOL(ext3_mark_inode_dirty);
-+EXPORT_SYMBOL(ext3_ext_invalidate_cache);
-+EXPORT_SYMBOL(ext3_ext_insert_extent);
-+EXPORT_SYMBOL(ext3_ext_walk_space);
-+EXPORT_SYMBOL(ext3_ext_find_goal);
-+EXPORT_SYMBOL(ext3_ext_calc_credits_for_insert);
-Index: linux-2.6.12-rc6/fs/ext3/ialloc.c
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/ialloc.c	2005-06-14 16:31:08.634433030 +0200
-+++ linux-2.6.12-rc6/fs/ext3/ialloc.c	2005-06-14 16:31:25.846346882 +0200
-@@ -598,7 +598,7 @@
- 	ei->i_dir_start_lookup = 0;
- 	ei->i_disksize = 0;
- 
--	ei->i_flags = EXT3_I(dir)->i_flags & ~EXT3_INDEX_FL;
-+	ei->i_flags = EXT3_I(dir)->i_flags & ~(EXT3_INDEX_FL|EXT3_EXTENTS_FL);
- 	if (S_ISLNK(mode))
- 		ei->i_flags &= ~(EXT3_IMMUTABLE_FL|EXT3_APPEND_FL);
- 	/* dirsync only applies to directories */
-@@ -639,6 +639,18 @@
- 		DQUOT_FREE_INODE(inode);
- 		goto fail2;
-   	}
-+	if (test_opt(sb, EXTENTS) && S_ISREG(inode->i_mode)) {
-+		EXT3_I(inode)->i_flags |= EXT3_EXTENTS_FL;
-+		ext3_extents_initialize_blockmap(handle, inode);
-+		if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS)) {
-+			err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
-+			if (err) goto fail;
-+			EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS);
-+			BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "call ext3_journal_dirty_metadata");
-+			err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
-+		}
-+	}
-+
- 	err = ext3_mark_inode_dirty(handle, inode);
- 	if (err) {
- 		ext3_std_error(sb, err);
-Index: linux-2.6.12-rc6/fs/ext3/inode.c
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/inode.c	2005-06-14 16:31:09.701815830 +0200
-+++ linux-2.6.12-rc6/fs/ext3/inode.c	2005-06-14 16:31:25.861971882 +0200
-@@ -40,7 +40,7 @@
- #include "iopen.h"
- #include "acl.h"
- 
--static int ext3_writepage_trans_blocks(struct inode *inode);
-+int ext3_writepage_trans_blocks(struct inode *inode);
- 
- /*
-  * Test whether an inode is a fast symlink.
-@@ -784,6 +784,17 @@
- 	return err;
- }
- 
-+static inline int
-+ext3_get_block_wrap(handle_t *handle, struct inode *inode, long block,
-+		    struct buffer_head *bh, int create, int extend_disksize)
-+{
-+	if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
-+		return ext3_ext_get_block(handle, inode, block, bh, create,
-+					  extend_disksize);
-+	return ext3_get_block_handle(handle, inode, block, bh, create,
-+				     extend_disksize);
-+}
-+
- static int ext3_get_block(struct inode *inode, sector_t iblock,
- 			struct buffer_head *bh_result, int create)
- {
-@@ -794,8 +805,8 @@
- 		handle = ext3_journal_current_handle();
- 		J_ASSERT(handle != 0);
- 	}
--	ret = ext3_get_block_handle(handle, inode, iblock,
--				bh_result, create, 1);
-+	ret = ext3_get_block_wrap(handle, inode, iblock,
-+				  bh_result, create, 1);
- 	return ret;
- }
- 
-@@ -839,7 +850,7 @@
- 
- get_block:
- 	if (ret == 0)
--		ret = ext3_get_block_handle(handle, inode, iblock,
-+		ret = ext3_get_block_wrap(handle, inode, iblock,
- 					bh_result, create, 0);
- 	bh_result->b_size = (1 << inode->i_blkbits);
- 	return ret;
-@@ -859,7 +870,7 @@
- 	dummy.b_state = 0;
- 	dummy.b_blocknr = -1000;
- 	buffer_trace_init(&dummy.b_history);
--	*errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1);
-+	*errp = ext3_get_block_wrap(handle, inode, block, &dummy, create, 1);
- 	if (!*errp && buffer_mapped(&dummy)) {
- 		struct buffer_head *bh;
- 		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
-@@ -1593,7 +1604,7 @@
-  * This required during truncate. We need to physically zero the tail end
-  * of that block so it doesn't yield old data if the file is later grown.
-  */
--static int ext3_block_truncate_page(handle_t *handle, struct page *page,
-+int ext3_block_truncate_page(handle_t *handle, struct page *page,
- 		struct address_space *mapping, loff_t from)
- {
- 	unsigned long index = from >> PAGE_CACHE_SHIFT;
-@@ -2104,6 +2115,9 @@
- 			return;
- 	}
- 
-+	if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
-+		return ext3_ext_truncate(inode, page);
-+
- 	handle = start_transaction(inode);
- 	if (IS_ERR(handle)) {
- 		if (page) {
-@@ -2850,12 +2864,15 @@
-  * block and work out the exact number of indirects which are touched.  Pah.
-  */
- 
--static int ext3_writepage_trans_blocks(struct inode *inode)
-+int ext3_writepage_trans_blocks(struct inode *inode)
- {
- 	int bpp = ext3_journal_blocks_per_page(inode);
- 	int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
- 	int ret;
- 
-+	if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
-+		return ext3_ext_writepage_trans_blocks(inode, bpp);
-+
- 	if (ext3_should_journal_data(inode))
- 		ret = 3 * (bpp + indirects) + 2;
- 	else
-Index: linux-2.6.12-rc6/fs/ext3/Makefile
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/Makefile	2005-06-14 16:31:09.179354899 +0200
-+++ linux-2.6.12-rc6/fs/ext3/Makefile	2005-06-14 16:31:25.872714069 +0200
-@@ -5,7 +5,8 @@
- obj-$(CONFIG_EXT3_FS) += ext3.o
- 
- ext3-y	:= balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
--	   ioctl.o namei.o super.o symlink.o hash.o resize.o
-+	   ioctl.o namei.o super.o symlink.o hash.o resize.o \
-+	   extents.o
- 
- ext3-$(CONFIG_EXT3_FS_XATTR)	 += xattr.o xattr_user.o xattr_trusted.o
- ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
-Index: linux-2.6.12-rc6/fs/ext3/super.c
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/super.c	2005-06-14 16:31:09.950839264 +0200
-+++ linux-2.6.12-rc6/fs/ext3/super.c	2005-06-14 16:31:25.886385944 +0200
-@@ -387,6 +387,7 @@
- 	struct ext3_super_block *es = sbi->s_es;
- 	int i;
- 
-+	ext3_ext_release(sb);
- 	ext3_xattr_put_super(sb);
- 	journal_destroy(sbi->s_journal);
- 	if (!(sb->s_flags & MS_RDONLY)) {
-@@ -451,6 +452,8 @@
- #endif
- 	ei->i_block_alloc_info = NULL;
- 	ei->vfs_inode.i_version = 1;
-+	
-+	memset(&ei->i_cached_extent, 0, sizeof(ei->i_cached_extent));
- 	return &ei->vfs_inode;
- }
- 
-@@ -593,6 +596,7 @@
- 	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0,
- 	Opt_ignore, Opt_barrier, Opt_err, Opt_resize,
- 	Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
-+	Opt_extents, Opt_noextents, Opt_extdebug,
- };
- 
- static match_table_t tokens = {
-@@ -644,6 +647,9 @@
- 	{Opt_iopen, "iopen"},
- 	{Opt_noiopen, "noiopen"},
- 	{Opt_iopen_nopriv, "iopen_nopriv"},
-+	{Opt_extents, "extents"},
-+	{Opt_noextents, "noextents"},
-+	{Opt_extdebug, "extdebug"},
- 	{Opt_barrier, "barrier=%u"},
- 	{Opt_err, NULL},
- 	{Opt_resize, "resize"},
-@@ -953,6 +958,15 @@
- 		case Opt_nobh:
- 			set_opt(sbi->s_mount_opt, NOBH);
- 			break;
-+		case Opt_extents:
-+			set_opt (sbi->s_mount_opt, EXTENTS);
-+			break;
-+		case Opt_noextents:
-+			clear_opt (sbi->s_mount_opt, EXTENTS);
-+			break;
-+		case Opt_extdebug:
-+			set_opt (sbi->s_mount_opt, EXTDEBUG);
-+			break;
- 		default:
- 			printk (KERN_ERR
- 				"EXT3-fs: Unrecognized mount option \"%s\" "
-@@ -1668,6 +1681,7 @@
- 	percpu_counter_mod(&sbi->s_dirs_counter,
- 		ext3_count_dirs(sb));
- 
-+	ext3_ext_init(sb);
- 	lock_kernel();
- 	return 0;
- 
-Index: linux-2.6.12-rc6/fs/ext3/ioctl.c
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/ioctl.c	2005-06-14 16:31:08.646151780 +0200
-+++ linux-2.6.12-rc6/fs/ext3/ioctl.c	2005-06-14 16:31:25.897128131 +0200
-@@ -124,6 +124,10 @@
- 			err = ext3_change_inode_journal_flag(inode, jflag);
- 		return err;
- 	}
-+	case EXT3_IOC_GET_EXTENTS:
-+	case EXT3_IOC_GET_TREE_STATS:
-+	case EXT3_IOC_GET_TREE_DEPTH:
-+		return ext3_ext_ioctl(inode, filp, cmd, arg);
- 	case EXT3_IOC_GETVERSION:
- 	case EXT3_IOC_GETVERSION_OLD:
- 		return put_user(inode->i_generation, (int __user *) arg);
-Index: linux-2.6.12-rc6/include/linux/ext3_fs.h
-===================================================================
---- linux-2.6.12-rc6.orig/include/linux/ext3_fs.h	2005-06-14 16:31:10.185214261 +0200
-+++ linux-2.6.12-rc6/include/linux/ext3_fs.h	2005-06-14 16:31:52.859041864 +0200
-@@ -186,8 +186,9 @@
- #define EXT3_NOTAIL_FL			0x00008000 /* don't merge file tail */
- #define EXT3_DIRSYNC_FL			0x00010000 /* dirsync behaviour (directories only) */
- #define EXT3_TOPDIR_FL			0x00020000 /* Top of directory hierarchies*/
-+#define EXT3_EXTENTS_FL			0x00080000 /* Inode uses extents */
- #define EXT3_RESERVED_FL		0x80000000 /* reserved for ext3 lib */
- 
--#define EXT3_FL_USER_VISIBLE		0x0003DFFF /* User visible flags */
-+#define EXT3_FL_USER_VISIBLE		0x000BDFFF /* User visible flags */
- #define EXT3_FL_USER_MODIFIABLE		0x000380FF /* User modifiable flags */
- 
-@@ -237,6 +238,9 @@
- #endif
- #define EXT3_IOC_GETRSVSZ		_IOR('f', 5, long)
- #define EXT3_IOC_SETRSVSZ		_IOW('f', 6, long)
-+#define EXT3_IOC_GET_EXTENTS		_IOR('f', 7, long)
-+#define EXT3_IOC_GET_TREE_DEPTH		_IOR('f', 8, long)
-+#define EXT3_IOC_GET_TREE_STATS		_IOR('f', 9, long)
- 
- /*
-  * Structure of an inode on the disk
-@@ -360,6 +364,8 @@
- #define EXT3_MOUNT_NOBH			0x40000 /* No bufferheads */
- #define EXT3_MOUNT_IOPEN		0x80000	/* Allow access via iopen */
- #define EXT3_MOUNT_IOPEN_NOPRIV		0x100000/* Make iopen world-readable */
-+#define EXT3_MOUNT_EXTENTS		0x200000/* Extents support */
-+#define EXT3_MOUNT_EXTDEBUG		0x400000/* Extents debug */
- 
- /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
- #ifndef clear_opt
-@@ -548,11 +554,13 @@
- #define EXT3_FEATURE_INCOMPAT_RECOVER		0x0004 /* Needs recovery */
- #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV	0x0008 /* Journal device */
- #define EXT3_FEATURE_INCOMPAT_META_BG		0x0010
-+#define EXT3_FEATURE_INCOMPAT_EXTENTS		0x0040 /* extents support */
- 
- #define EXT3_FEATURE_COMPAT_SUPP	EXT2_FEATURE_COMPAT_EXT_ATTR
- #define EXT3_FEATURE_INCOMPAT_SUPP	(EXT3_FEATURE_INCOMPAT_FILETYPE| \
- 					 EXT3_FEATURE_INCOMPAT_RECOVER| \
--					 EXT3_FEATURE_INCOMPAT_META_BG)
-+					 EXT3_FEATURE_INCOMPAT_META_BG| \
-+					 EXT3_FEATURE_INCOMPAT_EXTENTS)
- #define EXT3_FEATURE_RO_COMPAT_SUPP	(EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
- 					 EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
- 					 EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
-@@ -759,6 +767,9 @@
- 
- 
- /* inode.c */
-+extern int ext3_block_truncate_page(handle_t *, struct page *,
-+				    struct address_space *, loff_t);
-+extern int ext3_writepage_trans_blocks(struct inode *inode);
- extern int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
- extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
- extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
-@@ -828,6 +837,16 @@
- extern struct inode_operations ext3_symlink_inode_operations;
- extern struct inode_operations ext3_fast_symlink_inode_operations;
- 
-+/* extents.c */
-+extern int ext3_ext_writepage_trans_blocks(struct inode *, int);
-+extern int ext3_ext_get_block(handle_t *, struct inode *, long,
-+			      struct buffer_head *, int, int);
-+extern void ext3_ext_truncate(struct inode *, struct page *);
-+extern void ext3_ext_init(struct super_block *);
-+extern void ext3_ext_release(struct super_block *);
-+extern void ext3_extents_initialize_blockmap(handle_t *, struct inode *);
-+extern int ext3_ext_ioctl(struct inode *inode, struct file *filp,
-+			  unsigned int cmd, unsigned long arg);
- 
- #endif	/* __KERNEL__ */
- 
-Index: linux-2.6.12-rc6/include/linux/ext3_extents.h
-===================================================================
---- linux-2.6.12-rc6.orig/include/linux/ext3_extents.h	2005-06-14 16:31:25.780917195 +0200
-+++ linux-2.6.12-rc6/include/linux/ext3_extents.h	2005-06-14 16:31:25.932284381 +0200
-@@ -0,0 +1,262 @@
-+/*
-+ * Copyright (c) 2003, Cluster File Systems, Inc, info@clusterfs.com
-+ * Written by Alex Tomas <alex@clusterfs.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public Licens
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
-+ */
-+
-+#ifndef _LINUX_EXT3_EXTENTS
-+#define _LINUX_EXT3_EXTENTS
-+
-+/*
-+ * with AGRESSIVE_TEST defined capacity of index/leaf blocks
-+ * become very little, so index split, in-depth growing and
-+ * other hard changes happens much more often
-+ * this is for debug purposes only
-+ */
-+#define AGRESSIVE_TEST_
-+
-+/*
-+ * if CHECK_BINSEARCH defined, then results of binary search
-+ * will be checked by linear search
-+ */
-+#define CHECK_BINSEARCH_
-+
-+/*
-+ * if EXT_DEBUG is defined you can use 'extdebug' mount option
-+ * to get lots of info what's going on
-+ */
-+#define EXT_DEBUG_
-+#ifdef EXT_DEBUG
-+#define ext_debug(tree,fmt,a...)			\
-+do {							\
-+	if (test_opt((tree)->inode->i_sb, EXTDEBUG))	\
-+		printk(fmt, ##a);			\
-+} while (0);
-+#else
-+#define ext_debug(tree,fmt,a...)
-+#endif
-+
-+/*
-+ * if EXT_STATS is defined then stats numbers are collected
-+ * these number will be displayed at umount time
-+ */
-+#define EXT_STATS_
-+
-+
-+#define EXT3_ALLOC_NEEDED	3	/* block bitmap + group desc. + sb */
-+
-+/*
-+ * ext3_inode has i_block array (total 60 bytes)
-+ * first 4 bytes are used to store:
-+ *  - tree depth (0 mean there is no tree yet. all extents in the inode)
-+ *  - number of alive extents in the inode
-+ */
-+
-+/*
-+ * this is extent on-disk structure
-+ * it's used at the bottom of the tree
-+ */
-+struct ext3_extent {
-+	__u32	ee_block;	/* first logical block extent covers */
-+	__u16	ee_len;		/* number of blocks covered by extent */
-+	__u16	ee_start_hi;	/* high 16 bits of physical block */
-+	__u32	ee_start;	/* low 32 bigs of physical block */
-+};
-+
-+/*
-+ * this is index on-disk structure
-+ * it's used at all the levels, but the bottom
-+ */
-+struct ext3_extent_idx {
-+	__u32	ei_block;	/* index covers logical blocks from 'block' */
-+	__u32	ei_leaf;	/* pointer to the physical block of the next *
-+				 * level. leaf or next index could bet here */
-+	__u16	ei_leaf_hi;	/* high 16 bits of physical block */
-+	__u16	ei_unused;
-+};
-+
-+/*
-+ * each block (leaves and indexes), even inode-stored has header
-+ */
-+struct ext3_extent_header {	
-+	__u16	eh_magic;	/* probably will support different formats */	
-+	__u16	eh_entries;	/* number of valid entries */
-+	__u16	eh_max;		/* capacity of store in entries */
-+	__u16	eh_depth;	/* has tree real underlaying blocks? */
-+	__u32	eh_generation;	/* flags(8 bits) | generation of the tree */
-+};
-+
-+#define EXT3_EXT_MAGIC		0xf30a
-+
-+/*
-+ * array of ext3_ext_path contains path to some extent
-+ * creation/lookup routines use it for traversal/splitting/etc
-+ * truncate uses it to simulate recursive walking
-+ */
-+struct ext3_ext_path {
-+	__u32				p_block;
-+	__u16				p_depth;
-+	struct ext3_extent		*p_ext;
-+	struct ext3_extent_idx		*p_idx;
-+	struct ext3_extent_header	*p_hdr;
-+	struct buffer_head		*p_bh;
-+};
-+
-+/*
-+ * structure for external API
-+ */
-+
-+/*
-+ * storage for cached extent
-+ */
-+struct ext3_ext_cache {
-+	__u32	ec_start;
-+	__u32	ec_block;
-+	__u32	ec_len;
-+	__u32	ec_type;
-+};
-+
-+#define EXT3_EXT_CACHE_NO	0
-+#define EXT3_EXT_CACHE_GAP	1
-+#define EXT3_EXT_CACHE_EXTENT	2
-+
-+/*
-+ * ext3_extents_tree is used to pass initial information
-+ * to top-level extents API
-+ */
-+struct ext3_extents_helpers;
-+struct ext3_extents_tree {
-+	struct inode *inode;	/* inode which tree belongs to */
-+	void *root;		/* ptr to data top of tree resides at */
-+	void *buffer;		/* will be passed as arg to ^^ routines	*/
-+	int buffer_len;
-+	void *private;
-+	struct ext3_ext_cache *cex;/* last found extent */
-+	struct ext3_extents_helpers *ops;
-+};
-+
-+struct ext3_extents_helpers {
-+	int (*get_write_access)(handle_t *h, void *buffer);
-+	int (*mark_buffer_dirty)(handle_t *h, void *buffer);
-+	int (*mergable)(struct ext3_extent *ex1, struct ext3_extent *ex2);
-+	int (*remove_extent_credits)(struct ext3_extents_tree *,
-+				     struct ext3_extent *, unsigned long,
-+				     unsigned long);
-+	int (*remove_extent)(struct ext3_extents_tree *,
-+			     struct ext3_extent *, unsigned long,
-+			     unsigned long);
-+	int (*new_block)(handle_t *, struct ext3_extents_tree *,
-+			 struct ext3_ext_path *, struct ext3_extent *,
-+			 int *);
-+};
-+
-+/*
-+ * to be called by ext3_ext_walk_space()
-+ * negative retcode - error
-+ * positive retcode - signal for ext3_ext_walk_space(), see below
-+ * callback must return valid extent (passed or newly created)
-+ */
-+typedef int (*ext_prepare_callback)(struct ext3_extents_tree *,
-+				    struct ext3_ext_path *,
-+				    struct ext3_ext_cache *);
-+
-+#define EXT_CONTINUE	0
-+#define EXT_BREAK	1
-+#define EXT_REPEAT	2
-+
-+
-+#define EXT_MAX_BLOCK	0xffffffff
-+
-+
-+#define EXT_FIRST_EXTENT(__hdr__) \
-+	((struct ext3_extent *) (((char *) (__hdr__)) +		\
-+				 sizeof(struct ext3_extent_header)))
-+#define EXT_FIRST_INDEX(__hdr__) \
-+	((struct ext3_extent_idx *) (((char *) (__hdr__)) +	\
-+				     sizeof(struct ext3_extent_header)))
-+#define EXT_HAS_FREE_INDEX(__path__) \
-+	((__path__)->p_hdr->eh_entries < (__path__)->p_hdr->eh_max)
-+#define EXT_LAST_EXTENT(__hdr__) \
-+	(EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->eh_entries - 1)
-+#define EXT_LAST_INDEX(__hdr__) \
-+	(EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->eh_entries - 1)
-+#define EXT_MAX_EXTENT(__hdr__) \
-+	(EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->eh_max - 1)
-+#define EXT_MAX_INDEX(__hdr__) \
-+	(EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->eh_max - 1)
-+#define EXT_HDR_GEN(__hdr__)	((__hdr__)->eh_generation & 0x00ffffff)
-+#define EXT_FLAGS(__hdr__)	((__hdr__)->eh_generation >> 24)
-+#define EXT_FLAGS_CLR_UNKNOWN	0x7	/* Flags cleared on modification */
-+
-+#define EXT_BLOCK_HDR(__bh__) 	((struct ext3_extent_header *)(__bh__)->b_data)
-+#define EXT_ROOT_HDR(__tree__)	((struct ext3_extent_header *)(__tree__)->root)
-+#define EXT_DEPTH(__tree__)	(EXT_ROOT_HDR(__tree__)->eh_depth)
-+#define EXT_GENERATION(__tree__) EXT_HDR_GEN(EXT_ROOT_HDR(__tree__))
-+
-+#define EXT_ASSERT(__x__) if (!(__x__)) BUG();
-+
-+#define EXT_CHECK_PATH(tree,path)					\
-+{									\
-+	int depth = EXT_DEPTH(tree);					\
-+	BUG_ON((unsigned long) (path) < __PAGE_OFFSET);			\
-+	BUG_ON((unsigned long) (path)[depth].p_idx <			\
-+			__PAGE_OFFSET && (path)[depth].p_idx != NULL);	\
-+	BUG_ON((unsigned long) (path)[depth].p_ext <			\
-+			__PAGE_OFFSET && (path)[depth].p_ext != NULL);	\
-+	BUG_ON((unsigned long) (path)[depth].p_hdr < __PAGE_OFFSET);	\
-+	BUG_ON((unsigned long) (path)[depth].p_bh < __PAGE_OFFSET	\
-+			&& depth != 0);					\
-+	BUG_ON((path)[0].p_depth != depth);				\
-+}
-+
-+
-+/*
-+ * this structure is used to gather extents from the tree via ioctl
-+ */
-+struct ext3_extent_buf {
-+	unsigned long start;
-+	int buflen;
-+	void *buffer;
-+	void *cur;
-+	int err;
-+};
-+
-+/*
-+ * this structure is used to collect stats info about the tree
-+ */
-+struct ext3_extent_tree_stats {
-+	int depth;
-+	int extents_num;
-+	int leaf_num;
-+};
-+
-+extern void ext3_init_tree_desc(struct ext3_extents_tree *, struct inode *);
-+extern int ext3_extent_tree_init(handle_t *, struct ext3_extents_tree *);
-+extern int ext3_ext_calc_credits_for_insert(struct ext3_extents_tree *, struct ext3_ext_path *);
-+extern int ext3_ext_insert_extent(handle_t *, struct ext3_extents_tree *, struct ext3_ext_path *, struct ext3_extent *);
-+extern int ext3_ext_walk_space(struct ext3_extents_tree *, unsigned long, unsigned long, ext_prepare_callback);
-+extern int ext3_ext_remove_space(struct ext3_extents_tree *, unsigned long, unsigned long);
-+extern struct ext3_ext_path * ext3_ext_find_extent(struct ext3_extents_tree *, int, struct ext3_ext_path *);
-+extern int ext3_ext_calc_blockmap_metadata(struct inode *, int);
-+
-+static inline void
-+ext3_ext_invalidate_cache(struct ext3_extents_tree *tree)
-+{
-+	if (tree->cex)
-+		tree->cex->ec_type = EXT3_EXT_CACHE_NO;
-+}
-+
-+
-+#endif /* _LINUX_EXT3_EXTENTS */
-Index: linux-2.6.12-rc6/include/linux/ext3_fs_i.h
-===================================================================
---- linux-2.6.12-rc6.orig/include/linux/ext3_fs_i.h	2005-06-06 17:22:29.000000000 +0200
-+++ linux-2.6.12-rc6/include/linux/ext3_fs_i.h	2005-06-14 16:31:25.941073443 +0200
-@@ -133,6 +133,8 @@
- 	 */
- 	struct semaphore truncate_sem;
- 	struct inode vfs_inode;
-+
-+	__u32 i_cached_extent[4];
- };
- 
- #endif	/* _LINUX_EXT3_FS_I */
diff --git a/ldiskfs/kernel_patches/patches/ext3-external-journal-2.6.12.patch b/ldiskfs/kernel_patches/patches/ext3-external-journal-2.6.12.patch
deleted file mode 100644
index bcfdae293f..0000000000
--- a/ldiskfs/kernel_patches/patches/ext3-external-journal-2.6.12.patch
+++ /dev/null
@@ -1,148 +0,0 @@
-Signed-off-by: Johann Lombardi <johann.lombardi@bull.net>
-
---- linux-2.6.12.orig/fs/ext3/super.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12/fs/ext3/super.c	2005-11-07 13:37:30.000000000 +0100
-@@ -39,7 +39,8 @@
- #include "xattr.h"
- #include "acl.h"
- 
--static int ext3_load_journal(struct super_block *, struct ext3_super_block *);
-+static int ext3_load_journal(struct super_block *, struct ext3_super_block *,
-+			     unsigned long journal_devnum);
- static int ext3_create_journal(struct super_block *, struct ext3_super_block *,
- 			       int);
- static void ext3_commit_super (struct super_block * sb,
-@@ -586,7 +587,7 @@ enum {
- 	Opt_nouid32, Opt_check, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov,
- 	Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
- 	Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh,
--	Opt_commit, Opt_journal_update, Opt_journal_inum,
-+	Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev,
- 	Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
- 	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
- 	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0,
-@@ -624,6 +625,7 @@ static match_table_t tokens = {
- 	{Opt_commit, "commit=%u"},
- 	{Opt_journal_update, "journal=update"},
- 	{Opt_journal_inum, "journal=%u"},
-+	{Opt_journal_dev, "journal_dev=%u"},
- 	{Opt_abort, "abort"},
- 	{Opt_data_journal, "data=journal"},
- 	{Opt_data_ordered, "data=ordered"},
-@@ -663,8 +665,9 @@ static unsigned long get_sb_block(void *
- 	return sb_block;
- }
- 
--static int parse_options (char * options, struct super_block *sb,
--			  unsigned long * inum, unsigned long *n_blocks_count, int is_remount)
-+static int parse_options (char *options, struct super_block *sb,
-+			  unsigned long *inum, unsigned long *journal_devnum, 
-+			  unsigned long *n_blocks_count, int is_remount)
- {
- 	struct ext3_sb_info *sbi = EXT3_SB(sb);
- 	char * p;
-@@ -805,6 +808,16 @@ static int parse_options (char * options
- 				return 0;
- 			*inum = option;
- 			break;
-+		case Opt_journal_dev:
-+			if (is_remount) {
-+				printk(KERN_ERR "EXT3-fs: cannot specify "
-+				       "journal on remount\n");
-+				return 0;
-+			}
-+			if (match_int(&args[0], &option))
-+				return 0;
-+			*journal_devnum = option;
-+			break;
- 		case Opt_noload:
- 			set_opt (sbi->s_mount_opt, NOLOAD);
- 			break;
-@@ -1250,6 +1263,7 @@ static int ext3_fill_super (struct super
- 	unsigned long logic_sb_block;
- 	unsigned long offset = 0;
- 	unsigned long journal_inum = 0;
-+	unsigned long journal_devnum = 0;
- 	unsigned long def_mount_opts;
- 	struct inode *root;
- 	int blocksize;
-@@ -1330,7 +1344,8 @@ static int ext3_fill_super (struct super
- 
- 	set_opt(sbi->s_mount_opt, RESERVATION);
- 
--	if (!parse_options ((char *) data, sb, &journal_inum, NULL, 0))
-+	if (!parse_options ((char *) data, sb, &journal_inum, &journal_devnum, 
-+			    NULL, 0))
- 		goto failed_mount;
- 
- 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-@@ -1541,7 +1556,7 @@ static int ext3_fill_super (struct super
- 	 */
- 	if (!test_opt(sb, NOLOAD) &&
- 	    EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
--		if (ext3_load_journal(sb, es))
-+		if (ext3_load_journal(sb, es, journal_devnum))
- 			goto failed_mount2;
- 	} else if (journal_inum) {
- 		if (ext3_create_journal(sb, es, journal_inum))
-@@ -1821,15 +1836,24 @@ out_bdev:
- 	return NULL;
- }
- 
--static int ext3_load_journal(struct super_block * sb,
--			     struct ext3_super_block * es)
-+static int ext3_load_journal(struct super_block *sb,
-+			     struct ext3_super_block *es,
-+			     unsigned long journal_devnum)
- {
- 	journal_t *journal;
- 	int journal_inum = le32_to_cpu(es->s_journal_inum);
--	dev_t journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
-+	dev_t journal_dev;
- 	int err = 0;
- 	int really_read_only;
- 
-+	if (journal_devnum &&
-+	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
-+		printk(KERN_INFO "EXT3-fs: external journal device major/minor "
-+			"numbers have changed\n");
-+		journal_dev = new_decode_dev(journal_devnum);
-+	} else
-+		journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
-+
- 	really_read_only = bdev_read_only(sb->s_bdev);
- 
- 	/*
-@@ -1888,6 +1912,16 @@ static int ext3_load_journal(struct supe
- 
- 	EXT3_SB(sb)->s_journal = journal;
- 	ext3_clear_journal_err(sb, es);
-+
-+	if (journal_devnum &&
-+	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
-+		es->s_journal_dev = cpu_to_le32(journal_devnum);
-+		sb->s_dirt = 1;
-+
-+		/* Make sure we flush the recovery flag to disk. */
-+		ext3_commit_super(sb, es, 1);
-+	}
-+
- 	return 0;
- }
- 
-@@ -2093,13 +2127,13 @@ static int ext3_remount (struct super_bl
- {
- 	struct ext3_super_block * es;
- 	struct ext3_sb_info *sbi = EXT3_SB(sb);
--	unsigned long tmp;
-+	unsigned long tmp1, tmp2;
- 	unsigned long n_blocks_count = 0;
- 
- 	/*
- 	 * Allow the "check" option to be passed as a remount option.
- 	 */
--	if (!parse_options(data, sb, &tmp, &n_blocks_count, 1))
-+	if (!parse_options(data, sb, &tmp1, &tmp2, &n_blocks_count, 1))
- 		return -EINVAL;
- 
- 	if (sbi->s_mount_opt & EXT3_MOUNT_ABORT)
diff --git a/ldiskfs/kernel_patches/patches/ext3-mballoc2-2.6.12.patch b/ldiskfs/kernel_patches/patches/ext3-mballoc2-2.6.12.patch
deleted file mode 100644
index ad077145da..0000000000
--- a/ldiskfs/kernel_patches/patches/ext3-mballoc2-2.6.12.patch
+++ /dev/null
@@ -1,3102 +0,0 @@
-Index: linux-2.6.12.6-bull/include/linux/ext3_fs.h
-===================================================================
---- linux-2.6.12.6-bull.orig/include/linux/ext3_fs.h	2006-04-29 20:39:09.000000000 +0400
-+++ linux-2.6.12.6-bull/include/linux/ext3_fs.h	2006-04-29 20:39:10.000000000 +0400
-@@ -57,6 +57,14 @@ struct statfs;
- #define ext3_debug(f, a...)	do {} while (0)
- #endif
- 
-+#define EXT3_MULTIBLOCK_ALLOCATOR	1
-+
-+#define EXT3_MB_HINT_MERGE		1
-+#define EXT3_MB_HINT_RESERVED		2
-+#define EXT3_MB_HINT_METADATA		4
-+#define EXT3_MB_HINT_FIRST		8
-+#define EXT3_MB_HINT_BEST		16
-+
- /*
-  * Special inodes numbers
-  */
-@@ -366,6 +374,7 @@ struct ext3_inode {
- #define EXT3_MOUNT_IOPEN_NOPRIV		0x100000/* Make iopen world-readable */
- #define EXT3_MOUNT_EXTENTS		0x200000/* Extents support */
- #define EXT3_MOUNT_EXTDEBUG		0x400000/* Extents debug */
-+#define EXT3_MOUNT_MBALLOC		0x800000/* Buddy allocation support */
- 
- /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
- #ifndef clear_opt
-@@ -387,6 +396,14 @@ struct ext3_inode {
- #define ext3_find_first_zero_bit	ext2_find_first_zero_bit
- #define ext3_find_next_zero_bit		ext2_find_next_zero_bit
- 
-+#ifndef ext2_find_next_le_bit
-+#ifdef __LITTLE_ENDIAN
-+#define ext2_find_next_le_bit(addr, size, off) find_next_bit((addr), (size), (off))
-+#else
-+#error "mballoc needs a patch for big-endian systems - CFS bug 10634"
-+#endif	/* __LITTLE_ENDIAN */
-+#endif	/* !ext2_find_next_le_bit */
-+
- /*
-  * Maximal mount counts between two filesystem checks
-  */
-@@ -727,7 +736,8 @@ extern int ext3_bg_has_super(struct supe
- extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group);
- extern int ext3_new_block (handle_t *, struct inode *, unsigned long, int *);
- extern void ext3_free_blocks (handle_t *, struct inode *, unsigned long,
--			      unsigned long);
-+			      unsigned long, int);
-+extern int ext3_new_block_old(handle_t *, struct inode *, unsigned long, int *);
- extern void ext3_free_blocks_sb (handle_t *, struct super_block *,
- 				 unsigned long, unsigned long, int *);
- extern unsigned long ext3_count_free_blocks (struct super_block *);
-@@ -848,6 +857,17 @@ extern void ext3_extents_initialize_bloc
- extern int ext3_ext_ioctl(struct inode *inode, struct file *filp,
- 			  unsigned int cmd, unsigned long arg);
- 
-+/* mballoc.c */
-+extern long ext3_mb_stats;
-+extern long ext3_mb_max_to_scan;
-+extern int ext3_mb_init(struct super_block *, int);
-+extern int ext3_mb_release(struct super_block *);
-+extern int ext3_mb_new_blocks(handle_t *, struct inode *, unsigned long, int *, int, int *);
-+extern int ext3_mb_reserve_blocks(struct super_block *, int);
-+extern void ext3_mb_release_blocks(struct super_block *, int);
-+int __init init_ext3_proc(void);
-+void exit_ext3_proc(void);
-+
- #endif	/* __KERNEL__ */
- 
- /* EXT3_IOC_CREATE_INUM at bottom of file (visible to kernel and user). */
-Index: linux-2.6.12.6-bull/include/linux/ext3_fs_sb.h
-===================================================================
---- linux-2.6.12.6-bull.orig/include/linux/ext3_fs_sb.h	2005-08-29 20:55:27.000000000 +0400
-+++ linux-2.6.12.6-bull/include/linux/ext3_fs_sb.h	2006-04-29 20:39:10.000000000 +0400
-@@ -21,8 +21,14 @@
- #include <linux/wait.h>
- #include <linux/blockgroup_lock.h>
- #include <linux/percpu_counter.h>
-+#include <linux/list.h>
- #endif
- #include <linux/rbtree.h>
-+#include <linux/proc_fs.h>
-+
-+struct ext3_buddy_group_blocks;
-+struct ext3_mb_history;
-+#define EXT3_BB_MAX_BLOCKS
- 
- /*
-  * third extended-fs super-block data in memory
-@@ -78,6 +84,43 @@ struct ext3_sb_info {
- 	char *s_qf_names[MAXQUOTAS];		/* Names of quota files with journalled quota */
- 	int s_jquota_fmt;			/* Format of quota to use */
- #endif
-+
-+	/* for buddy allocator */
-+	struct ext3_group_info ***s_group_info;
-+	struct inode *s_buddy_cache;
-+	long s_blocks_reserved;
-+	spinlock_t s_reserve_lock;
-+	struct list_head s_active_transaction;
-+	struct list_head s_closed_transaction;
-+	struct list_head s_committed_transaction;
-+	spinlock_t s_md_lock;
-+	tid_t s_last_transaction;
-+	int s_mb_factor;
-+	unsigned short *s_mb_offsets, *s_mb_maxs;
-+	unsigned long s_stripe;
-+
-+	/* history to debug policy */
-+	struct ext3_mb_history *s_mb_history;
-+	int s_mb_history_cur;
-+	int s_mb_history_max;
-+	struct proc_dir_entry *s_mb_proc;
-+	spinlock_t s_mb_history_lock;
-+
-+	/* stats for buddy allocator */
-+	atomic_t s_bal_reqs;	/* number of reqs with len > 1 */
-+	atomic_t s_bal_success;	/* we found long enough chunks */
-+	atomic_t s_bal_allocated;	/* in blocks */
-+	atomic_t s_bal_ex_scanned;	/* total extents scanned */
-+	atomic_t s_bal_goals;	/* goal hits */
-+	atomic_t s_bal_breaks;	/* too long searches */
-+	atomic_t s_bal_2orders;	/* 2^order hits */
-+	spinlock_t s_bal_lock;
-+	unsigned long s_mb_buddies_generated;
-+	unsigned long long s_mb_generation_time;
- };
-+
-+#define EXT3_GROUP_INFO(sb, group)					   \
-+	EXT3_SB(sb)->s_group_info[(group) >> EXT3_DESC_PER_BLOCK_BITS(sb)] \
-+				 [(group) & (EXT3_DESC_PER_BLOCK(sb) - 1)]
- 
- #endif	/* _LINUX_EXT3_FS_SB */
-Index: linux-2.6.12.6-bull/fs/ext3/super.c
-===================================================================
---- linux-2.6.12.6-bull.orig/fs/ext3/super.c	2006-04-29 20:39:09.000000000 +0400
-+++ linux-2.6.12.6-bull/fs/ext3/super.c	2006-04-29 20:39:10.000000000 +0400
-@@ -387,6 +387,7 @@ static void ext3_put_super (struct super
- 	struct ext3_super_block *es = sbi->s_es;
- 	int i;
- 
-+	ext3_mb_release(sb);
- 	ext3_ext_release(sb);
- 	ext3_xattr_put_super(sb);
- 	journal_destroy(sbi->s_journal);
-@@ -597,6 +598,7 @@ enum {
- 	Opt_ignore, Opt_barrier, Opt_err, Opt_resize,
- 	Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
- 	Opt_extents, Opt_noextents, Opt_extdebug,
-+	Opt_mballoc, Opt_nomballoc, Opt_stripe,
- };
- 
- static match_table_t tokens = {
-@@ -650,6 +651,9 @@ static match_table_t tokens = {
- 	{Opt_extents, "extents"},
- 	{Opt_noextents, "noextents"},
- 	{Opt_extdebug, "extdebug"},
-+	{Opt_mballoc, "mballoc"},
-+	{Opt_nomballoc, "nomballoc"},
-+	{Opt_stripe, "stripe=%u"},
- 	{Opt_barrier, "barrier=%u"},
- 	{Opt_err, NULL},
- 	{Opt_resize, "resize"},
-@@ -965,6 +967,19 @@ clear_qf_name:
- 		case Opt_extdebug:
- 			set_opt (sbi->s_mount_opt, EXTDEBUG);
- 			break;
-+		case Opt_mballoc:
-+			set_opt(sbi->s_mount_opt, MBALLOC);
-+			break;
-+		case Opt_nomballoc:
-+			clear_opt(sbi->s_mount_opt, MBALLOC);
-+			break;
-+		case Opt_stripe:
-+			if (match_int(&args[0], &option))
-+				return 0;
-+			if (option < 0)
-+				return 0;
-+			sbi->s_stripe = option;
-+			break;
- 		default:
- 			printk (KERN_ERR
- 				"EXT3-fs: Unrecognized mount option \"%s\" "
-@@ -1670,6 +1675,7 @@ static int ext3_fill_super (struct super
- 		ext3_count_dirs(sb));
- 
- 	ext3_ext_init(sb);
-+	ext3_mb_init(sb, needs_recovery);
- 	lock_kernel();
- 	return 0;
- 
-@@ -2549,7 +2555,13 @@ static struct file_system_type ext3_fs_t
- 
- static int __init init_ext3_fs(void)
- {
--	int err = init_ext3_xattr();
-+	int err;
-+
-+	err = init_ext3_proc();
-+	if (err)
-+		return err;
-+
-+	err = init_ext3_xattr();
- 	if (err)
- 		return err;
- 	err = init_inodecache();
-@@ -2571,6 +2583,7 @@ static void __exit exit_ext3_fs(void)
- 	unregister_filesystem(&ext3_fs_type);
- 	destroy_inodecache();
- 	exit_ext3_xattr();
-+	exit_ext3_proc();
- }
- 
- int ext3_prep_san_write(struct inode *inode, long *blocks,
-Index: linux-2.6.12.6-bull/fs/ext3/extents.c
-===================================================================
---- linux-2.6.12.6-bull.orig/fs/ext3/extents.c	2006-04-29 20:39:09.000000000 +0400
-+++ linux-2.6.12.6-bull/fs/ext3/extents.c	2006-04-29 20:39:10.000000000 +0400
-@@ -777,7 +777,7 @@ cleanup:
- 		for (i = 0; i < depth; i++) {
- 			if (!ablocks[i])
- 				continue;
--			ext3_free_blocks(handle, tree->inode, ablocks[i], 1);
-+			ext3_free_blocks(handle, tree->inode, ablocks[i], 1, 1);
- 		}
- 	}
- 	kfree(ablocks);
-@@ -1434,7 +1434,7 @@ int ext3_ext_rm_idx(handle_t *handle, st
- 		  path->p_idx->ei_leaf);
- 	bh = sb_find_get_block(tree->inode->i_sb, path->p_idx->ei_leaf);
- 	ext3_forget(handle, 1, tree->inode, bh, path->p_idx->ei_leaf);
--	ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1);
-+	ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1, 1);
- 	return err;
- }
- 
-@@ -1919,10 +1919,12 @@ ext3_remove_blocks(struct ext3_extents_t
- 	int needed = ext3_remove_blocks_credits(tree, ex, from, to);
- 	handle_t *handle = ext3_journal_start(tree->inode, needed);
- 	struct buffer_head *bh;
--	int i;
-+	int i, metadata = 0;
- 
- 	if (IS_ERR(handle))
- 		return PTR_ERR(handle);
-+	if (S_ISDIR(tree->inode->i_mode) || S_ISLNK(tree->inode->i_mode))
-+		metadata = 1;
- 	if (from >= ex->ee_block && to == ex->ee_block + ex->ee_len - 1) {
- 		/* tail removal */
- 		unsigned long num, start;
-@@ -1934,7 +1936,7 @@ ext3_remove_blocks(struct ext3_extents_t
- 			bh = sb_find_get_block(tree->inode->i_sb, start + i);
- 			ext3_forget(handle, 0, tree->inode, bh, start + i);
- 		}
--		ext3_free_blocks(handle, tree->inode, start, num);
-+		ext3_free_blocks(handle, tree->inode, start, num, metadata);
- 	} else if (from == ex->ee_block && to <= ex->ee_block + ex->ee_len - 1) {
- 		printk("strange request: removal %lu-%lu from %u:%u\n",
- 		       from, to, ex->ee_block, ex->ee_len);
-Index: linux-2.6.12.6-bull/fs/ext3/inode.c
-===================================================================
---- linux-2.6.12.6-bull.orig/fs/ext3/inode.c	2006-04-29 20:39:09.000000000 +0400
-+++ linux-2.6.12.6-bull/fs/ext3/inode.c	2006-04-29 20:39:10.000000000 +0400
-@@ -564,7 +564,7 @@ static int ext3_alloc_branch(handle_t *h
- 		ext3_journal_forget(handle, branch[i].bh);
- 	}
- 	for (i = 0; i < keys; i++)
--		ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1);
-+		ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1, 1);
- 	return err;
- }
- 
-@@ -1850,7 +1850,7 @@ ext3_clear_blocks(handle_t *handle, stru
- 		}
- 	}
- 
--	ext3_free_blocks(handle, inode, block_to_free, count);
-+	ext3_free_blocks(handle, inode, block_to_free, count, 1);
- }
- 
- /**
-@@ -2023,7 +2023,7 @@ static void ext3_free_branches(handle_t 
- 				ext3_journal_test_restart(handle, inode);
- 			}
- 
--			ext3_free_blocks(handle, inode, nr, 1);
-+			ext3_free_blocks(handle, inode, nr, 1, 1);
- 
- 			if (parent_bh) {
- 				/*
-Index: linux-2.6.12.6-bull/fs/ext3/balloc.c
-===================================================================
---- linux-2.6.12.6-bull.orig/fs/ext3/balloc.c	2005-08-29 20:55:27.000000000 +0400
-+++ linux-2.6.12.6-bull/fs/ext3/balloc.c	2006-04-29 20:39:10.000000000 +0400
-@@ -79,7 +79,7 @@ struct ext3_group_desc * ext3_get_group_
-  *
-  * Return buffer_head on success or NULL in case of failure.
-  */
--static struct buffer_head *
-+struct buffer_head *
- read_block_bitmap(struct super_block *sb, unsigned int block_group)
- {
- 	struct ext3_group_desc * desc;
-@@ -490,24 +490,6 @@ error_return:
- 	return;
- }
- 
--/* Free given blocks, update quota and i_blocks field */
--void ext3_free_blocks(handle_t *handle, struct inode *inode,
--			unsigned long block, unsigned long count)
--{
--	struct super_block * sb;
--	int dquot_freed_blocks;
--
--	sb = inode->i_sb;
--	if (!sb) {
--		printk ("ext3_free_blocks: nonexistent device");
--		return;
--	}
--	ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
--	if (dquot_freed_blocks)
--		DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
--	return;
--}
--
- /*
-  * For ext3 allocations, we must not reuse any blocks which are
-  * allocated in the bitmap buffer's "last committed data" copy.  This
-@@ -1162,7 +1144,7 @@ int ext3_should_retry_alloc(struct super
-  * bitmap, and then for any free bit if that fails.
-  * This function also updates quota and i_blocks field.
-  */
--int ext3_new_block(handle_t *handle, struct inode *inode,
-+int ext3_new_block_old(handle_t *handle, struct inode *inode,
- 			unsigned long goal, int *errp)
- {
- 	struct buffer_head *bitmap_bh = NULL;
-Index: linux-2.6.12.6-bull/fs/ext3/xattr.c
-===================================================================
---- linux-2.6.12.6-bull.orig/fs/ext3/xattr.c	2005-08-29 20:55:27.000000000 +0400
-+++ linux-2.6.12.6-bull/fs/ext3/xattr.c	2006-04-29 20:39:10.000000000 +0400
-@@ -484,7 +484,7 @@ ext3_xattr_release_block(handle_t *handl
- 		ea_bdebug(bh, "refcount now=0; freeing");
- 		if (ce)
- 			mb_cache_entry_free(ce);
--		ext3_free_blocks(handle, inode, bh->b_blocknr, 1);
-+		ext3_free_blocks(handle, inode, bh->b_blocknr, 1, 1);
- 		get_bh(bh);
- 		ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
- 	} else {
-@@ -804,7 +804,7 @@ inserted:
- 			new_bh = sb_getblk(sb, block);
- 			if (!new_bh) {
- getblk_failed:
--				ext3_free_blocks(handle, inode, block, 1);
-+				ext3_free_blocks(handle, inode, block, 1, 1);
- 				error = -EIO;
- 				goto cleanup;
- 			}
-Index: linux-2.6.12.6-bull/fs/ext3/mballoc.c
-===================================================================
---- linux-2.6.12.6-bull.orig/fs/ext3/mballoc.c	2006-04-22 17:31:47.543334750 +0400
-+++ linux-2.6.12.6-bull/fs/ext3/mballoc.c	2006-04-30 01:24:11.000000000 +0400
-@@ -0,0 +1,2725 @@
-+/*
-+ * Copyright (c) 2003-2005, Cluster File Systems, Inc, info@clusterfs.com
-+ * Written by Alex Tomas <alex@clusterfs.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public Licens
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
-+ */
-+
-+
-+/*
-+ * mballoc.c contains the multiblocks allocation routines
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/time.h>
-+#include <linux/fs.h>
-+#include <linux/namei.h>
-+#include <linux/jbd.h>
-+#include <linux/ext3_fs.h>
-+#include <linux/ext3_jbd.h>
-+#include <linux/quotaops.h>
-+#include <linux/buffer_head.h>
-+#include <linux/module.h>
-+#include <linux/swap.h>
-+#include <linux/proc_fs.h>
-+#include <linux/pagemap.h>
-+#include <linux/seq_file.h>
-+
-+/*
-+ * TODO:
-+ *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
-+ *   - track min/max extents in each group for better group selection
-+ *   - mb_mark_used() may allocate chunk right after splitting buddy
-+ *   - special flag to advice allocator to look for requested + N blocks
-+ *     this may improve interaction between extents and mballoc
-+ *   - tree of groups sorted by number of free blocks
-+ *   - percpu reservation code (hotpath)
-+ *   - error handling
-+ */
-+
-+/*
-+ * with AGRESSIVE_CHECK allocator runs consistency checks over
-+ * structures. these checks slow things down a lot
-+ */
-+#define AGGRESSIVE_CHECK__
-+
-+/*
-+ */
-+#define MB_DEBUG__
-+#ifdef MB_DEBUG
-+#define mb_debug(fmt,a...)	printk(fmt, ##a)
-+#else
-+#define mb_debug(fmt,a...)
-+#endif
-+
-+/*
-+ * with EXT3_MB_HISTORY mballoc stores last N allocations in memory
-+ * and you can monitor it in /proc/fs/ext3/<dev>/mb_history
-+ */
-+#define EXT3_MB_HISTORY
-+
-+/*
-+ * How long mballoc can look for a best extent (in found extents)
-+ */
-+long ext3_mb_max_to_scan = 500;
-+
-+/*
-+ * How long mballoc must look for a best extent
-+ */
-+long ext3_mb_min_to_scan = 30;
-+
-+/*
-+ * with 'ext3_mb_stats' allocator will collect stats that will be
-+ * shown at umount. The collecting costs though!
-+ */
-+
-+long ext3_mb_stats = 1;
-+
-+/*
-+ * for which requests use 2^N search using buddies
-+ */
-+long ext3_mb_order2_reqs = 8;
-+
-+#ifdef EXT3_BB_MAX_BLOCKS
-+#undef EXT3_BB_MAX_BLOCKS
-+#endif
-+#define EXT3_BB_MAX_BLOCKS	30
-+
-+struct ext3_free_metadata {
-+	unsigned short group;
-+	unsigned short num;
-+	unsigned short blocks[EXT3_BB_MAX_BLOCKS];
-+	struct list_head list;
-+};
-+
-+struct ext3_group_info {
-+	unsigned long	bb_state;
-+	unsigned long	bb_tid;
-+	struct ext3_free_metadata *bb_md_cur;
-+	unsigned short	bb_first_free;
-+	unsigned short	bb_free;
-+	unsigned short	bb_fragments;
-+	unsigned short	bb_counters[];
-+};
-+
-+
-+#define EXT3_GROUP_INFO_NEED_INIT_BIT	0
-+#define EXT3_GROUP_INFO_LOCKED_BIT	1
-+
-+#define EXT3_MB_GRP_NEED_INIT(grp)	\
-+	(test_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, &(grp)->bb_state))
-+
-+struct ext3_free_extent {
-+	__u16 fe_start;
-+	__u16 fe_len;
-+	__u16 fe_group;
-+};
-+
-+struct ext3_allocation_context {
-+	struct super_block *ac_sb;
-+
-+	/* search goals */
-+	struct ext3_free_extent ac_g_ex;
-+
-+	/* the best found extent */
-+	struct ext3_free_extent ac_b_ex;
-+
-+	/* number of iterations done. we have to track to limit searching */
-+	unsigned long ac_ex_scanned;
-+	__u16 ac_groups_scanned;
-+	__u16 ac_found;
-+	__u16 ac_tail;
-+	__u16 ac_buddy;
-+	__u8 ac_status;
-+	__u8 ac_flags;		/* allocation hints */
-+	__u8 ac_criteria;
-+	__u8 ac_repeats;
-+	__u8 ac_2order;		/* if request is to allocate 2^N blocks and
-+				 * N > 0, the field stores N, otherwise 0 */
-+
-+	struct page *ac_buddy_page;
-+	struct page *ac_bitmap_page;
-+};
-+
-+#define AC_STATUS_CONTINUE	1
-+#define AC_STATUS_FOUND		2
-+#define AC_STATUS_BREAK		3
-+
-+struct ext3_mb_history {
-+	struct ext3_free_extent goal;	/* goal allocation */
-+	struct ext3_free_extent result;	/* result allocation */
-+	unsigned pid;
-+	unsigned ino;
-+	__u16 found;	/* how many extents have been found */
-+	__u16 groups;	/* how many groups have been scanned */
-+	__u16 tail;	/* what tail broke some buddy */
-+	__u16 buddy;	/* buddy the tail ^^^ broke */
-+	__u8 cr;	/* which phase the result extent was found at */
-+	__u8 merged;
-+};
-+
-+struct ext3_buddy {
-+	struct page *bd_buddy_page;
-+	void *bd_buddy;
-+	struct page *bd_bitmap_page;
-+	void *bd_bitmap;
-+	struct ext3_group_info *bd_info;
-+	struct super_block *bd_sb;
-+	__u16 bd_blkbits;
-+	__u16 bd_group;
-+};
-+#define EXT3_MB_BITMAP(e3b)	((e3b)->bd_bitmap)
-+#define EXT3_MB_BUDDY(e3b)	((e3b)->bd_buddy)
-+
-+#ifndef EXT3_MB_HISTORY
-+#define ext3_mb_store_history(sb,ino,ac)
-+#else
-+static void ext3_mb_store_history(struct super_block *, unsigned ino,
-+				struct ext3_allocation_context *ac);
-+#endif
-+
-+#define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1)
-+
-+static struct proc_dir_entry *proc_root_ext3;
-+
-+struct buffer_head * read_block_bitmap(struct super_block *, unsigned int);
-+void ext3_mb_poll_new_transaction(struct super_block *, handle_t *);
-+void ext3_mb_free_committed_blocks(struct super_block *);
-+
-+#if BITS_PER_LONG == 64
-+#define mb_correct_addr_and_bit(bit,addr)		\
-+{							\
-+	bit += ((unsigned long) addr & 7UL) << 3;	\
-+	addr = (void *) ((unsigned long) addr & ~7UL);	\
-+}
-+#elif BITS_PER_LONG == 32
-+#define mb_correct_addr_and_bit(bit,addr)		\
-+{							\
-+	bit += ((unsigned long) addr & 3UL) << 3;	\
-+	addr = (void *) ((unsigned long) addr & ~3UL);	\
-+}
-+#else
-+#error "how many bits you are?!"
-+#endif
-+
-+static inline int mb_test_bit(int bit, void *addr)
-+{
-+	mb_correct_addr_and_bit(bit,addr);
-+	return ext2_test_bit(bit, addr);
-+}
-+
-+static inline void mb_set_bit(int bit, void *addr)
-+{
-+	mb_correct_addr_and_bit(bit,addr);
-+	ext2_set_bit(bit, addr);
-+}
-+
-+static inline void mb_set_bit_atomic(int bit, void *addr)
-+{
-+	mb_correct_addr_and_bit(bit,addr);
-+	ext2_set_bit_atomic(NULL, bit, addr);
-+}
-+
-+static inline void mb_clear_bit(int bit, void *addr)
-+{
-+	mb_correct_addr_and_bit(bit,addr);
-+	ext2_clear_bit(bit, addr);
-+}
-+
-+static inline void mb_clear_bit_atomic(int bit, void *addr)
-+{
-+	mb_correct_addr_and_bit(bit,addr);
-+	ext2_clear_bit_atomic(NULL, bit, addr);
-+}
-+
-+static inline int mb_find_next_zero_bit(void *addr, int max, int start)
-+{
-+	int fix;
-+#if BITS_PER_LONG == 64
-+	fix = ((unsigned long) addr & 7UL) << 3;
-+	addr = (void *) ((unsigned long) addr & ~7UL);
-+#elif BITS_PER_LONG == 32
-+	fix = ((unsigned long) addr & 3UL) << 3;
-+	addr = (void *) ((unsigned long) addr & ~3UL);
-+#else
-+#error "how many bits you are?!"
-+#endif
-+	max += fix;
-+	start += fix;
-+	return ext2_find_next_zero_bit(addr, max, start) - fix;
-+}
-+
-+static inline void *mb_find_buddy(struct ext3_buddy *e3b, int order, int *max)
-+{
-+	char *bb;
-+
-+	J_ASSERT(EXT3_MB_BITMAP(e3b) != EXT3_MB_BUDDY(e3b));
-+	J_ASSERT(max != NULL);
-+
-+	if (order > e3b->bd_blkbits + 1) {
-+		*max = 0;
-+		return NULL;
-+	}
-+
-+	/* at order 0 we see each particular block */
-+	*max = 1 << (e3b->bd_blkbits + 3);
-+	if (order == 0)
-+		return EXT3_MB_BITMAP(e3b);
-+
-+	bb = EXT3_MB_BUDDY(e3b) + EXT3_SB(e3b->bd_sb)->s_mb_offsets[order];
-+	*max = EXT3_SB(e3b->bd_sb)->s_mb_maxs[order];
-+
-+	return bb;
-+}
-+
-+#ifdef AGGRESSIVE_CHECK
-+
-+static void mb_check_buddy(struct ext3_buddy *e3b)
-+{
-+	int order = e3b->bd_blkbits + 1;
-+	int max, max2, i, j, k, count;
-+	int fragments = 0, fstart;
-+	void *buddy, *buddy2;
-+
-+	if (!test_opt(e3b->bd_sb, MBALLOC))
-+		return;
-+
-+	{
-+		static int mb_check_counter = 0;
-+		if (mb_check_counter++ % 300 != 0)
-+			return;
-+	}
-+
-+	while (order > 1) {
-+		buddy = mb_find_buddy(e3b, order, &max);
-+		J_ASSERT(buddy);
-+		buddy2 = mb_find_buddy(e3b, order - 1, &max2);
-+		J_ASSERT(buddy2);
-+		J_ASSERT(buddy != buddy2);
-+		J_ASSERT(max * 2 == max2);
-+
-+		count = 0;
-+		for (i = 0; i < max; i++) {
-+
-+			if (mb_test_bit(i, buddy)) {
-+				/* only single bit in buddy2 may be 1 */
-+				if (!mb_test_bit(i << 1, buddy2))
-+					J_ASSERT(mb_test_bit((i<<1)+1, buddy2));
-+				else if (!mb_test_bit((i << 1) + 1, buddy2))
-+					J_ASSERT(mb_test_bit(i << 1, buddy2));
-+				continue;
-+			}
-+
-+			/* both bits in buddy2 must be 0 */
-+			J_ASSERT(mb_test_bit(i << 1, buddy2));
-+			J_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
-+
-+			for (j = 0; j < (1 << order); j++) {
-+				k = (i * (1 << order)) + j;
-+				J_ASSERT(!mb_test_bit(k, EXT3_MB_BITMAP(e3b)));
-+			}
-+			count++;
-+		}
-+		J_ASSERT(e3b->bd_info->bb_counters[order] == count);
-+		order--;
-+	}
-+
-+	fstart = -1;
-+	buddy = mb_find_buddy(e3b, 0, &max);
-+	for (i = 0; i < max; i++) {
-+		if (!mb_test_bit(i, buddy)) {
-+			J_ASSERT(i >= e3b->bd_info->bb_first_free);
-+			if (fstart == -1) {
-+				fragments++;
-+				fstart = i;
-+			}
-+			continue;
-+		}
-+		fstart = -1;
-+		/* check used bits only */
-+		for (j = 0; j < e3b->bd_blkbits + 1; j++) {
-+			buddy2 = mb_find_buddy(e3b, j, &max2);
-+			k = i >> j;
-+			J_ASSERT(k < max2);
-+			J_ASSERT(mb_test_bit(k, buddy2));
-+		}
-+	}
-+	J_ASSERT(!EXT3_MB_GRP_NEED_INIT(e3b->bd_info));
-+	J_ASSERT(e3b->bd_info->bb_fragments == fragments);
-+}
-+
-+#else
-+#define mb_check_buddy(e3b)
-+#endif
-+
-+/* find most significant bit */
-+static int inline fmsb(unsigned short word)
-+{
-+	int order;
-+
-+	if (word > 255) {
-+		order = 7;
-+		word >>= 8;
-+	} else {
-+		order = -1;
-+	}
-+
-+	do {
-+		order++;
-+		word >>= 1;
-+	} while (word != 0);
-+
-+	return order;
-+}
-+
-+static void inline
-+ext3_mb_mark_free_simple(struct super_block *sb, void *buddy, unsigned first,
-+				int len, struct ext3_group_info *grp)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	unsigned short min, max, chunk, border;
-+
-+	mb_debug("mark %u/%u free\n", first, len);
-+	J_ASSERT(len < EXT3_BLOCKS_PER_GROUP(sb));
-+
-+	border = 2 << sb->s_blocksize_bits;
-+
-+	while (len > 0) {
-+		/* find how many blocks can be covered since this position */
-+		max = ffs(first | border) - 1;
-+
-+		/* find how many blocks of power 2 we need to mark */
-+		min = fmsb(len);
-+
-+		mb_debug("  %u/%u -> max %u, min %u\n",
-+			first & ((2 << sb->s_blocksize_bits) - 1),
-+			len, max, min);
-+
-+		if (max < min)
-+			min = max;
-+		chunk = 1 << min;
-+
-+		/* mark multiblock chunks only */
-+		grp->bb_counters[min]++;
-+		if (min > 0) {
-+			mb_debug("    set %u at %u \n", first >> min,
-+				sbi->s_mb_offsets[min]);
-+			mb_clear_bit(first >> min, buddy + sbi->s_mb_offsets[min]);
-+		}
-+
-+		len -= chunk;
-+		first += chunk;
-+	}
-+}
-+
-+static void
-+ext3_mb_generate_buddy(struct super_block *sb, void *buddy, void *bitmap,
-+			int group)
-+{
-+	struct ext3_group_info *grp = EXT3_GROUP_INFO(sb, group);
-+	unsigned short max = EXT3_BLOCKS_PER_GROUP(sb);
-+	unsigned short i = 0, first, len;
-+	unsigned free = 0, fragments = 0;
-+	unsigned long long period = get_cycles();
-+
-+	i = mb_find_next_zero_bit(bitmap, max, 0);
-+	grp->bb_first_free = i;
-+	while (i < max) {
-+		fragments++;
-+		first = i;
-+		i = ext2_find_next_le_bit(bitmap, max, i);
-+		len = i - first;
-+		free += len;
-+		if (len > 1)
-+			ext3_mb_mark_free_simple(sb, buddy, first, len, grp);
-+		else
-+			grp->bb_counters[0]++;
-+		if (i < max)
-+			i = mb_find_next_zero_bit(bitmap, max, i);
-+	}
-+	grp->bb_fragments = fragments;
-+
-+	/* bb_state shouldn't being modified because all
-+	 * others waits for init completion on page lock */
-+	clear_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, &grp->bb_state);
-+	if (free != grp->bb_free) {
-+		printk("EXT3-fs: group %u: %u blocks in bitmap, %u in gd\n",
-+			group, free, grp->bb_free);
-+		grp->bb_free = free;
-+	}
-+
-+	period = get_cycles() - period;
-+	spin_lock(&EXT3_SB(sb)->s_bal_lock);
-+	EXT3_SB(sb)->s_mb_buddies_generated++;
-+	EXT3_SB(sb)->s_mb_generation_time += period;
-+	spin_unlock(&EXT3_SB(sb)->s_bal_lock);
-+}
-+
-+static int ext3_mb_init_cache(struct page *page)
-+{
-+	int blocksize, blocks_per_page, groups_per_page;
-+	int err = 0, i, first_group, first_block;
-+	struct super_block *sb;
-+	struct buffer_head *bhs;
-+	struct buffer_head **bh;
-+	struct inode *inode;
-+	char *data, *bitmap;
-+
-+	mb_debug("init page %lu\n", page->index);
-+
-+	inode = page->mapping->host;
-+	sb = inode->i_sb;
-+	blocksize = 1 << inode->i_blkbits;
-+	blocks_per_page = PAGE_CACHE_SIZE / blocksize;
-+
-+	groups_per_page = blocks_per_page >> 1;
-+	if (groups_per_page == 0)
-+		groups_per_page = 1;
-+
-+	/* allocate buffer_heads to read bitmaps */
-+	if (groups_per_page > 1) {
-+		err = -ENOMEM;
-+		i = sizeof(struct buffer_head *) * groups_per_page;
-+		bh = kmalloc(i, GFP_NOFS);
-+		if (bh == NULL)
-+			goto out;
-+		memset(bh, 0, i);
-+	} else
-+		bh = &bhs;
-+
-+	first_group = page->index * blocks_per_page / 2;
-+
-+	/* read all groups the page covers into the cache */
-+	for (i = 0; i < groups_per_page; i++) {
-+		struct ext3_group_desc * desc;
-+
-+		if (first_group + i >= EXT3_SB(sb)->s_groups_count)
-+			break;
-+
-+		err = -EIO;
-+		desc = ext3_get_group_desc(sb, first_group + i, NULL);
-+		if (desc == NULL)
-+			goto out;
-+
-+		err = -ENOMEM;
-+		bh[i] = sb_getblk(sb, le32_to_cpu(desc->bg_block_bitmap));
-+		if (bh[i] == NULL)
-+			goto out;
-+
-+		if (buffer_uptodate(bh[i]))
-+			continue;
-+
-+		lock_buffer(bh[i]);
-+		if (buffer_uptodate(bh[i])) {
-+			unlock_buffer(bh[i]);
-+			continue;
-+		}
-+
-+		get_bh(bh[i]);
-+		bh[i]->b_end_io = end_buffer_read_sync;
-+		submit_bh(READ, bh[i]);
-+		mb_debug("read bitmap for group %u\n", first_group + i);
-+	}
-+
-+	/* wait for I/O completion */
-+	for (i = 0; i < groups_per_page && bh[i]; i++)
-+		wait_on_buffer(bh[i]);
-+
-+	err = -EIO;
-+	for (i = 0; i < groups_per_page && bh[i]; i++)
-+		if (!buffer_uptodate(bh[i]))
-+			goto out;
-+
-+	first_block = page->index * blocks_per_page;
-+	for (i = 0; i < blocks_per_page; i++) {
-+		int group;
-+
-+		group = (first_block + i) >> 1;
-+		if (group >= EXT3_SB(sb)->s_groups_count)
-+			break;
-+
-+		data = page_address(page) + (i * blocksize);
-+		bitmap = bh[group - first_group]->b_data;
-+
-+		if ((first_block + i) & 1) {
-+			/* this is block of buddy */
-+			mb_debug("put buddy for group %u in page %lu/%x\n",
-+				group, page->index, i * blocksize);
-+			memset(data, 0xff, blocksize);
-+			EXT3_GROUP_INFO(sb, group)->bb_fragments = 0;
-+			memset(EXT3_GROUP_INFO(sb, group)->bb_counters, 0,
-+			       sizeof(unsigned short)*(sb->s_blocksize_bits+2));
-+			ext3_mb_generate_buddy(sb, data, bitmap, group);
-+		} else {
-+			/* this is block of bitmap */
-+			mb_debug("put bitmap for group %u in page %lu/%x\n",
-+				group, page->index, i * blocksize);
-+			memcpy(data, bitmap, blocksize);
-+		}
-+	}
-+	SetPageUptodate(page);
-+
-+out:
-+	if (bh) {
-+		for (i = 0; i < groups_per_page && bh[i]; i++)
-+			brelse(bh[i]);
-+		if (bh != &bhs)
-+			kfree(bh);
-+	}
-+	return err;
-+}
-+
-+static int ext3_mb_load_buddy(struct super_block *sb, int group,
-+		struct ext3_buddy *e3b)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	struct inode *inode = sbi->s_buddy_cache;
-+	int blocks_per_page, block, pnum, poff;
-+	struct page *page;
-+
-+	mb_debug("load group %u\n", group);
-+
-+	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
-+
-+	e3b->bd_blkbits = sb->s_blocksize_bits;
-+	e3b->bd_info = EXT3_GROUP_INFO(sb, group);
-+	e3b->bd_sb = sb;
-+	e3b->bd_group = group;
-+	e3b->bd_buddy_page = NULL;
-+	e3b->bd_bitmap_page = NULL;
-+
-+	block = group * 2;
-+	pnum = block / blocks_per_page;
-+	poff = block % blocks_per_page;
-+
-+	/* we could use find_or_create_page(), but it locks page
-+	 * what we'd like to avoid in fast path ... */
-+	page = find_get_page(inode->i_mapping, pnum);
-+	if (page == NULL || !PageUptodate(page)) {
-+		if (page)
-+			page_cache_release(page);
-+		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
-+		if (page) {
-+			BUG_ON(page->mapping != inode->i_mapping);
-+			if (!PageUptodate(page))
-+				ext3_mb_init_cache(page);
-+			unlock_page(page);
-+		}
-+	}
-+	if (page == NULL || !PageUptodate(page))
-+		goto err;
-+	e3b->bd_bitmap_page = page;
-+	e3b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
-+	mark_page_accessed(page);
-+
-+	block++;
-+	pnum = block / blocks_per_page;
-+	poff = block % blocks_per_page;
-+
-+	page = find_get_page(inode->i_mapping, pnum);
-+	if (page == NULL || !PageUptodate(page)) {
-+		if (page)
-+			page_cache_release(page);
-+		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
-+		if (page) {
-+			BUG_ON(page->mapping != inode->i_mapping);
-+			if (!PageUptodate(page))
-+				ext3_mb_init_cache(page);
-+			unlock_page(page);
-+		}
-+	}
-+	if (page == NULL || !PageUptodate(page))
-+		goto err;
-+	e3b->bd_buddy_page = page;
-+	e3b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
-+	mark_page_accessed(page);
-+
-+	J_ASSERT(e3b->bd_bitmap_page != NULL);
-+	J_ASSERT(e3b->bd_buddy_page != NULL);
-+
-+	return 0;
-+
-+err:
-+	if (e3b->bd_bitmap_page)
-+		page_cache_release(e3b->bd_bitmap_page);
-+	if (e3b->bd_buddy_page)
-+		page_cache_release(e3b->bd_buddy_page);
-+	e3b->bd_buddy = NULL;
-+	e3b->bd_bitmap = NULL;
-+	return -EIO;
-+}
-+
-+static void ext3_mb_release_desc(struct ext3_buddy *e3b)
-+{
-+	if (e3b->bd_bitmap_page)
-+		page_cache_release(e3b->bd_bitmap_page);
-+	if (e3b->bd_buddy_page)
-+		page_cache_release(e3b->bd_buddy_page);
-+}
-+
-+
-+static inline void
-+ext3_lock_group(struct super_block *sb, int group)
-+{
-+	bit_spin_lock(EXT3_GROUP_INFO_LOCKED_BIT,
-+		      &EXT3_GROUP_INFO(sb, group)->bb_state);
-+}
-+
-+static inline void
-+ext3_unlock_group(struct super_block *sb, int group)
-+{
-+	bit_spin_unlock(EXT3_GROUP_INFO_LOCKED_BIT,
-+			&EXT3_GROUP_INFO(sb, group)->bb_state);
-+}
-+
-+static int mb_find_order_for_block(struct ext3_buddy *e3b, int block)
-+{
-+	int order = 1;
-+	void *bb;
-+
-+	J_ASSERT(EXT3_MB_BITMAP(e3b) != EXT3_MB_BUDDY(e3b));
-+	J_ASSERT(block < (1 << (e3b->bd_blkbits + 3)));
-+
-+	bb = EXT3_MB_BUDDY(e3b);
-+	while (order <= e3b->bd_blkbits + 1) {
-+		block = block >> 1;
-+		if (!mb_test_bit(block, bb)) {
-+			/* this block is part of buddy of order 'order' */
-+			return order;
-+		}
-+		bb += 1 << (e3b->bd_blkbits - order);
-+		order++;
-+	}
-+	return 0;
-+}
-+
-+static inline void mb_clear_bits(void *bm, int cur, int len)
-+{
-+	__u32 *addr;
-+
-+	len = cur + len;
-+	while (cur < len) {
-+		if ((cur & 31) == 0 && (len - cur) >= 32) {
-+			/* fast path: clear whole word at once */
-+			addr = bm + (cur >> 3);
-+			*addr = 0;
-+			cur += 32;
-+			continue;
-+		}
-+		mb_clear_bit_atomic(cur, bm);
-+		cur++;
-+	}
-+}
-+
-+static inline void mb_set_bits(void *bm, int cur, int len)
-+{
-+	__u32 *addr;
-+
-+	len = cur + len;
-+	while (cur < len) {
-+		if ((cur & 31) == 0 && (len - cur) >= 32) {
-+			/* fast path: clear whole word at once */
-+			addr = bm + (cur >> 3);
-+			*addr = 0xffffffff;
-+			cur += 32;
-+			continue;
-+		}
-+		mb_set_bit_atomic(cur, bm);
-+		cur++;
-+	}
-+}
-+
-+static int mb_free_blocks(struct ext3_buddy *e3b, int first, int count)
-+{
-+	int block = 0, max = 0, order;
-+	void *buddy, *buddy2;
-+
-+	mb_check_buddy(e3b);
-+
-+	e3b->bd_info->bb_free += count;
-+	if (first < e3b->bd_info->bb_first_free)
-+		e3b->bd_info->bb_first_free = first;
-+
-+	/* let's maintain fragments counter */
-+	if (first != 0)
-+		block = !mb_test_bit(first - 1, EXT3_MB_BITMAP(e3b));
-+	if (first + count < EXT3_SB(e3b->bd_sb)->s_mb_maxs[0])
-+		max = !mb_test_bit(first + count, EXT3_MB_BITMAP(e3b));
-+	if (block && max)
-+		e3b->bd_info->bb_fragments--;
-+	else if (!block && !max)
-+		e3b->bd_info->bb_fragments++;
-+
-+	/* let's maintain buddy itself */
-+	while (count-- > 0) {
-+		block = first++;
-+		order = 0;
-+
-+		J_ASSERT(mb_test_bit(block, EXT3_MB_BITMAP(e3b)));
-+		mb_clear_bit(block, EXT3_MB_BITMAP(e3b));
-+		e3b->bd_info->bb_counters[order]++;
-+
-+		/* start of the buddy */
-+		buddy = mb_find_buddy(e3b, order, &max);
-+
-+		do {
-+			block &= ~1UL;
-+			if (mb_test_bit(block, buddy) ||
-+					mb_test_bit(block + 1, buddy))
-+				break;
-+
-+			/* both the buddies are free, try to coalesce them */
-+			buddy2 = mb_find_buddy(e3b, order + 1, &max);
-+
-+			if (!buddy2)
-+				break;
-+
-+			if (order > 0) {
-+				/* for special purposes, we don't set
-+				 * free bits in bitmap */
-+				mb_set_bit(block, buddy);
-+				mb_set_bit(block + 1, buddy);
-+			}
-+			e3b->bd_info->bb_counters[order]--;
-+			e3b->bd_info->bb_counters[order]--;
-+
-+			block = block >> 1;
-+			order++;
-+			e3b->bd_info->bb_counters[order]++;
-+
-+			mb_clear_bit(block, buddy2);
-+			buddy = buddy2;
-+		} while (1);
-+	}
-+	mb_check_buddy(e3b);
-+
-+	return 0;
-+}
-+
-+static int mb_find_extent(struct ext3_buddy *e3b, int order, int block,
-+				int needed, struct ext3_free_extent *ex)
-+{
-+	int next = block, max, ord;
-+	void *buddy;
-+
-+	J_ASSERT(ex != NULL);
-+
-+	buddy = mb_find_buddy(e3b, order, &max);
-+	J_ASSERT(buddy);
-+	J_ASSERT(block < max);
-+	if (mb_test_bit(block, buddy)) {
-+		ex->fe_len = 0;
-+		ex->fe_start = 0;
-+		ex->fe_group = 0;
-+		return 0;
-+	}
-+
-+	if (likely(order == 0)) {
-+		/* find actual order */
-+		order = mb_find_order_for_block(e3b, block);
-+		block = block >> order;
-+	}
-+
-+	ex->fe_len = 1 << order;
-+	ex->fe_start = block << order;
-+	ex->fe_group = e3b->bd_group;
-+
-+	/* calc difference from given start */
-+	next = next - ex->fe_start;
-+	ex->fe_len -= next;
-+	ex->fe_start += next;
-+
-+	while (needed > ex->fe_len && (buddy = mb_find_buddy(e3b, order, &max))) {
-+
-+		if (block + 1 >= max)
-+			break;
-+
-+		next = (block + 1) * (1 << order);
-+		if (mb_test_bit(next, EXT3_MB_BITMAP(e3b)))
-+			break;
-+
-+		ord = mb_find_order_for_block(e3b, next);
-+
-+		order = ord;
-+		block = next >> order;
-+		ex->fe_len += 1 << order;
-+	}
-+
-+	J_ASSERT(ex->fe_start + ex->fe_len <= (1 << (e3b->bd_blkbits + 3)));
-+	return ex->fe_len;
-+}
-+
-+static int mb_mark_used(struct ext3_buddy *e3b, struct ext3_free_extent *ex)
-+{
-+	int ord, mlen = 0, max = 0, cur;
-+	int start = ex->fe_start;
-+	int len = ex->fe_len;
-+	unsigned ret = 0;
-+	int len0 = len;
-+	void *buddy;
-+
-+	mb_check_buddy(e3b);
-+
-+	e3b->bd_info->bb_free -= len;
-+	if (e3b->bd_info->bb_first_free == start)
-+		e3b->bd_info->bb_first_free += len;
-+
-+	/* let's maintain fragments counter */
-+	if (start != 0)
-+		mlen = !mb_test_bit(start - 1, EXT3_MB_BITMAP(e3b));
-+	if (start + len < EXT3_SB(e3b->bd_sb)->s_mb_maxs[0])
-+		max = !mb_test_bit(start + len, EXT3_MB_BITMAP(e3b));
-+	if (mlen && max)
-+		e3b->bd_info->bb_fragments++;
-+	else if (!mlen && !max)
-+		e3b->bd_info->bb_fragments--;
-+
-+	/* let's maintain buddy itself */
-+	while (len) {
-+		ord = mb_find_order_for_block(e3b, start);
-+
-+		if (((start >> ord) << ord) == start && len >= (1 << ord)) {
-+			/* the whole chunk may be allocated at once! */
-+			mlen = 1 << ord;
-+			buddy = mb_find_buddy(e3b, ord, &max);
-+			J_ASSERT((start >> ord) < max);
-+			mb_set_bit(start >> ord, buddy);
-+			e3b->bd_info->bb_counters[ord]--;
-+			start += mlen;
-+			len -= mlen;
-+			J_ASSERT(len >= 0);
-+			continue;
-+		}
-+
-+		/* store for history */
-+		if (ret == 0)
-+			ret = len | (ord << 16);
-+
-+		/* we have to split large buddy */
-+		J_ASSERT(ord > 0);
-+		buddy = mb_find_buddy(e3b, ord, &max);
-+		mb_set_bit(start >> ord, buddy);
-+		e3b->bd_info->bb_counters[ord]--;
-+
-+		ord--;
-+		cur = (start >> ord) & ~1U;
-+		buddy = mb_find_buddy(e3b, ord, &max);
-+		mb_clear_bit(cur, buddy);
-+		mb_clear_bit(cur + 1, buddy);
-+		e3b->bd_info->bb_counters[ord]++;
-+		e3b->bd_info->bb_counters[ord]++;
-+	}
-+
-+	/* now drop all the bits in bitmap */
-+	mb_set_bits(EXT3_MB_BITMAP(e3b), ex->fe_start, len0);
-+
-+	mb_check_buddy(e3b);
-+
-+	return ret;
-+}
-+
-+/*
-+ * Must be called under group lock!
-+ */
-+static void ext3_mb_use_best_found(struct ext3_allocation_context *ac,
-+					struct ext3_buddy *e3b)
-+{
-+	unsigned long ret;
-+
-+	ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
-+	ret = mb_mark_used(e3b, &ac->ac_b_ex);
-+
-+	ac->ac_status = AC_STATUS_FOUND;
-+	ac->ac_tail = ret & 0xffff;
-+	ac->ac_buddy = ret >> 16;
-+
-+	/* hold in-core structures until allocated
-+	 * blocks are marked non-free in on-disk bitmap */
-+	ac->ac_buddy_page = e3b->bd_buddy_page;
-+	page_cache_get(e3b->bd_buddy_page);
-+	ac->ac_bitmap_page = e3b->bd_bitmap_page;
-+	page_cache_get(e3b->bd_bitmap_page);
-+}
-+
-+/*
-+ * The routine checks whether found extent is good enough. If it is,
-+ * then the extent gets marked used and flag is set to the context
-+ * to stop scanning. Otherwise, the extent is compared with the
-+ * previous found extent and if new one is better, then it's stored
-+ * in the context. Later, the best found extent will be used, if
-+ * mballoc can't find good enough extent.
-+ *
-+ * FIXME: real allocation policy is to be designed yet!
-+ */
-+static void ext3_mb_measure_extent(struct ext3_allocation_context *ac,
-+					struct ext3_free_extent *ex,
-+					struct ext3_buddy *e3b)
-+{
-+	struct ext3_free_extent *bex = &ac->ac_b_ex;
-+	struct ext3_free_extent *gex = &ac->ac_g_ex;
-+
-+	J_ASSERT(ex->fe_len > 0);
-+	J_ASSERT(ex->fe_len < (1 << ac->ac_sb->s_blocksize_bits) * 8);
-+	J_ASSERT(ex->fe_start < (1 << ac->ac_sb->s_blocksize_bits) * 8);
-+
-+	ac->ac_found++;
-+
-+	/*
-+	 * The special case - take what you catch first
-+	 */
-+	if (unlikely(ac->ac_flags & EXT3_MB_HINT_FIRST)) {
-+		*bex = *ex;
-+		ext3_mb_use_best_found(ac, e3b);
-+		return;
-+	}
-+
-+	/*
-+	 * Let's check whether the chunk is good enough
-+	 */
-+	if (ex->fe_len == gex->fe_len) {
-+		*bex = *ex;
-+		ext3_mb_use_best_found(ac, e3b);
-+		return;
-+	}
-+
-+	/*
-+	 * If this is first found extent, just store it in the context
-+	 */
-+	if (bex->fe_len == 0) {
-+		*bex = *ex;
-+		return;
-+	}
-+
-+	/*
-+	 * If new found extent is better, store it in the context
-+	 */
-+	if (bex->fe_len < gex->fe_len) {
-+		/* if the request isn't satisfied, any found extent
-+		 * larger than previous best one is better */
-+		if (ex->fe_len > bex->fe_len)
-+			*bex = *ex;
-+	} else if (ex->fe_len > gex->fe_len) {
-+		/* if the request is satisfied, then we try to find
-+		 * an extent that still satisfy the request, but is
-+		 * smaller than previous one */
-+		*bex = *ex;
-+	}
-+
-+	/*
-+	 * Let's scan at least few extents and don't pick up a first one
-+	 */
-+	if (bex->fe_len > gex->fe_len && ac->ac_found > ext3_mb_min_to_scan)
-+		ac->ac_status = AC_STATUS_BREAK;
-+
-+	/*
-+	 * We don't want to scan for a whole year
-+	 */
-+	if (ac->ac_found > ext3_mb_max_to_scan)
-+		ac->ac_status = AC_STATUS_BREAK;
-+}
-+
-+static int ext3_mb_try_best_found(struct ext3_allocation_context *ac,
-+					struct ext3_buddy *e3b)
-+{
-+	struct ext3_free_extent ex = ac->ac_b_ex;
-+	int group = ex.fe_group, max, err;
-+
-+	J_ASSERT(ex.fe_len > 0);
-+	err = ext3_mb_load_buddy(ac->ac_sb, group, e3b);
-+	if (err)
-+		return err;
-+
-+	ext3_lock_group(ac->ac_sb, group);
-+	max = mb_find_extent(e3b, 0, ex.fe_start, ex.fe_len, &ex);
-+
-+	if (max > 0) {
-+		ac->ac_b_ex = ex;
-+		ext3_mb_use_best_found(ac, e3b);
-+	}
-+
-+	ext3_unlock_group(ac->ac_sb, group);
-+
-+	ext3_mb_release_desc(e3b);
-+
-+	return 0;
-+}
-+
-+static int ext3_mb_find_by_goal(struct ext3_allocation_context *ac,
-+				struct ext3_buddy *e3b)
-+{
-+	int group = ac->ac_g_ex.fe_group, max, err;
-+	struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
-+	struct ext3_super_block *es = sbi->s_es;
-+	struct ext3_free_extent ex;
-+
-+	err = ext3_mb_load_buddy(ac->ac_sb, group, e3b);
-+	if (err)
-+		return err;
-+
-+	ext3_lock_group(ac->ac_sb, group);
-+	max = mb_find_extent(e3b, 0, ac->ac_g_ex.fe_start,
-+			     ac->ac_g_ex.fe_len, &ex);
-+
-+	if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
-+		unsigned long start;
-+		start = (e3b->bd_group * EXT3_BLOCKS_PER_GROUP(ac->ac_sb) +
-+			ex.fe_start + le32_to_cpu(es->s_first_data_block));
-+		if (start % sbi->s_stripe == 0) {
-+			ac->ac_found++;
-+			ac->ac_b_ex = ex;
-+			ext3_mb_use_best_found(ac, e3b);
-+		}
-+	} else if (max >= ac->ac_g_ex.fe_len) {
-+		J_ASSERT(ex.fe_len > 0);
-+		J_ASSERT(ex.fe_group == ac->ac_g_ex.fe_group);
-+		J_ASSERT(ex.fe_start == ac->ac_g_ex.fe_start);
-+		ac->ac_found++;
-+		ac->ac_b_ex = ex;
-+		ext3_mb_use_best_found(ac, e3b);
-+	} else if (max > 0 && (ac->ac_flags & EXT3_MB_HINT_MERGE)) {
-+		/* Sometimes, caller may want to merge even small
-+		 * number of blocks to an existing extent */
-+		J_ASSERT(ex.fe_len > 0);
-+		J_ASSERT(ex.fe_group == ac->ac_g_ex.fe_group);
-+		J_ASSERT(ex.fe_start == ac->ac_g_ex.fe_start);
-+		ac->ac_found++;
-+		ac->ac_b_ex = ex;
-+		ext3_mb_use_best_found(ac, e3b);
-+	}
-+	ext3_unlock_group(ac->ac_sb, group);
-+
-+	ext3_mb_release_desc(e3b);
-+
-+	return 0;
-+}
-+
-+/*
-+ * The routine scans buddy structures (not bitmap!) from given order
-+ * to max order and tries to find big enough chunk to satisfy the req
-+ */
-+static void ext3_mb_simple_scan_group(struct ext3_allocation_context *ac,
-+					struct ext3_buddy *e3b)
-+{
-+	struct super_block *sb = ac->ac_sb;
-+	struct ext3_group_info *grp = e3b->bd_info;
-+	void *buddy;
-+	int i, k, max;
-+
-+	J_ASSERT(ac->ac_2order > 0);
-+	for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
-+		if (grp->bb_counters[i] == 0)
-+			continue;
-+
-+		buddy = mb_find_buddy(e3b, i, &max);
-+		if (buddy == NULL) {
-+			printk(KERN_ALERT "looking for wrong order?\n");
-+			break;
-+		}
-+
-+		k = mb_find_next_zero_bit(buddy, max, 0);
-+		J_ASSERT(k < max);
-+
-+		ac->ac_found++;
-+
-+		ac->ac_b_ex.fe_len = 1 << i;
-+		ac->ac_b_ex.fe_start = k << i;
-+		ac->ac_b_ex.fe_group = e3b->bd_group;
-+
-+		ext3_mb_use_best_found(ac, e3b);
-+		J_ASSERT(ac->ac_b_ex.fe_len == ac->ac_g_ex.fe_len);
-+
-+		if (unlikely(ext3_mb_stats))
-+			atomic_inc(&EXT3_SB(sb)->s_bal_2orders);
-+
-+		break;
-+	}
-+}
-+
-+/*
-+ * The routine scans the group and measures all found extents.
-+ * In order to optimize scanning, caller must pass number of
-+ * free blocks in the group, so the routine can know upper limit.
-+ */
-+static void ext3_mb_complex_scan_group(struct ext3_allocation_context *ac,
-+					struct ext3_buddy *e3b)
-+{
-+	struct super_block *sb = ac->ac_sb;
-+	void *bitmap = EXT3_MB_BITMAP(e3b);
-+	struct ext3_free_extent ex;
-+	int i, free;
-+
-+	free = e3b->bd_info->bb_free;
-+	J_ASSERT(free > 0);
-+
-+	i = e3b->bd_info->bb_first_free;
-+
-+	while (free && ac->ac_status == AC_STATUS_CONTINUE) {
-+		i = mb_find_next_zero_bit(bitmap, sb->s_blocksize * 8, i);
-+		if (i >= sb->s_blocksize * 8) {
-+			J_ASSERT(free == 0);
-+			break;
-+		}
-+
-+		mb_find_extent(e3b, 0, i, ac->ac_g_ex.fe_len, &ex);
-+		J_ASSERT(ex.fe_len > 0);
-+		J_ASSERT(free >= ex.fe_len);
-+
-+		ext3_mb_measure_extent(ac, &ex, e3b);
-+
-+		i += ex.fe_len;
-+		free -= ex.fe_len;
-+	}
-+}
-+
-+/*
-+ * This is a special case for storages like raid5
-+ * we try to find stripe-aligned chunks for stripe-size requests
-+ */
-+static void ext3_mb_scan_aligned(struct ext3_allocation_context *ac,
-+				 struct ext3_buddy *e3b)
-+{
-+	struct super_block *sb = ac->ac_sb;
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	void *bitmap = EXT3_MB_BITMAP(e3b);
-+	struct ext3_free_extent ex;
-+	unsigned long i, max;
-+
-+	J_ASSERT(sbi->s_stripe != 0);
-+
-+	/* find first stripe-aligned block */
-+	i = e3b->bd_group * EXT3_BLOCKS_PER_GROUP(sb) +
-+		le32_to_cpu(sbi->s_es->s_first_data_block);
-+	i = ((i + sbi->s_stripe - 1) / sbi->s_stripe) * sbi->s_stripe;
-+	i = (i - le32_to_cpu(sbi->s_es->s_first_data_block)) %
-+		EXT3_BLOCKS_PER_GROUP(sb);
-+
-+	while (i < sb->s_blocksize * 8) {
-+		if (!mb_test_bit(i, bitmap)) {
-+			max = mb_find_extent(e3b, 0, i, sbi->s_stripe, &ex);
-+			if (max >= sbi->s_stripe) {
-+				ac->ac_found++;
-+				ac->ac_b_ex = ex;
-+				ext3_mb_use_best_found(ac, e3b);
-+				break;
-+			}
-+		}
-+		i += sbi->s_stripe;
-+	}
-+}
-+
-+static int ext3_mb_good_group(struct ext3_allocation_context *ac,
-+				int group, int cr)
-+{
-+	struct ext3_group_info *grp = EXT3_GROUP_INFO(ac->ac_sb, group);
-+	unsigned free, fragments, i, bits;
-+
-+	J_ASSERT(cr >= 0 && cr < 4);
-+	J_ASSERT(!EXT3_MB_GRP_NEED_INIT(grp));
-+
-+	free = grp->bb_free;
-+	fragments = grp->bb_fragments;
-+	if (free == 0)
-+		return 0;
-+	if (fragments == 0)
-+		return 0;
-+
-+	switch (cr) {
-+		case 0:
-+			J_ASSERT(ac->ac_2order != 0);
-+			bits = ac->ac_sb->s_blocksize_bits + 1;
-+			for (i = ac->ac_2order; i <= bits; i++)
-+				if (grp->bb_counters[i] > 0)
-+					return 1;
-+			break;
-+		case 1:
-+			if ((free / fragments) >= ac->ac_g_ex.fe_len)
-+				return 1;
-+			break;
-+		case 2:
-+			if (free >= ac->ac_g_ex.fe_len)
-+				return 1;
-+			break;
-+		case 3:
-+			return 1;
-+		default:
-+			BUG();
-+	}
-+
-+	return 0;
-+}
-+
-+int ext3_mb_new_blocks(handle_t *handle, struct inode *inode,
-+		       unsigned long goal, int *len, int flags, int *errp)
-+{
-+	struct buffer_head *bitmap_bh = NULL;
-+	struct ext3_allocation_context ac;
-+	int i, group, block, cr, err = 0;
-+	struct ext3_group_desc *gdp;
-+	struct ext3_super_block *es;
-+	struct buffer_head *gdp_bh;
-+	struct ext3_sb_info *sbi;
-+	struct super_block *sb;
-+	struct ext3_buddy e3b;
-+
-+	J_ASSERT(len != NULL);
-+	J_ASSERT(*len > 0);
-+
-+	sb = inode->i_sb;
-+	if (!sb) {
-+		printk("ext3_mb_new_nblocks: nonexistent device");
-+		return 0;
-+	}
-+
-+	if (!test_opt(sb, MBALLOC)) {
-+		static int ext3_mballoc_warning = 0;
-+		if (ext3_mballoc_warning == 0) {
-+			printk(KERN_ERR "EXT3-fs: multiblock request with "
-+				"mballoc disabled!\n");
-+			ext3_mballoc_warning++;
-+		}
-+		*len = 1;
-+		err = ext3_new_block_old(handle, inode, goal, errp);
-+		return err;
-+	}
-+
-+	ext3_mb_poll_new_transaction(sb, handle);
-+
-+	sbi = EXT3_SB(sb);
-+	es = EXT3_SB(sb)->s_es;
-+
-+	/*
-+	 * We can't allocate > group size
-+	 */
-+	if (*len >= EXT3_BLOCKS_PER_GROUP(sb) - 10)
-+		*len = EXT3_BLOCKS_PER_GROUP(sb) - 10;
-+
-+	if (!(flags & EXT3_MB_HINT_RESERVED)) {
-+		/* someone asks for non-reserved blocks */
-+		BUG_ON(*len > 1);
-+		err = ext3_mb_reserve_blocks(sb, 1);
-+		if (err) {
-+			*errp = err;
-+			return 0;
-+		}
-+	}
-+
-+	ac.ac_buddy_page = NULL;
-+	ac.ac_bitmap_page = NULL;
-+
-+	/*
-+	 * Check quota for allocation of this blocks.
-+	 */
-+	while (*len && DQUOT_ALLOC_BLOCK(inode, *len))
-+		*len -= 1;
-+	if (*len == 0) {
-+		*errp = -EDQUOT;
-+		block = 0;
-+		goto out;
-+	}
-+
-+	/* start searching from the goal */
-+	if (goal < le32_to_cpu(es->s_first_data_block) ||
-+	    goal >= le32_to_cpu(es->s_blocks_count))
-+		goal = le32_to_cpu(es->s_first_data_block);
-+	group = (goal - le32_to_cpu(es->s_first_data_block)) /
-+			EXT3_BLOCKS_PER_GROUP(sb);
-+	block = ((goal - le32_to_cpu(es->s_first_data_block)) %
-+			EXT3_BLOCKS_PER_GROUP(sb));
-+
-+	/* set up allocation goals */
-+	ac.ac_b_ex.fe_group = 0;
-+	ac.ac_b_ex.fe_start = 0;
-+	ac.ac_b_ex.fe_len = 0;
-+	ac.ac_status = AC_STATUS_CONTINUE;
-+	ac.ac_groups_scanned = 0;
-+	ac.ac_ex_scanned = 0;
-+	ac.ac_found = 0;
-+	ac.ac_sb = inode->i_sb;
-+	ac.ac_g_ex.fe_group = group;
-+	ac.ac_g_ex.fe_start = block;
-+	ac.ac_g_ex.fe_len = *len;
-+	ac.ac_flags = flags;
-+	ac.ac_2order = 0;
-+	ac.ac_criteria = 0;
-+
-+	if (*len == 1 && sbi->s_stripe) {
-+		/* looks like a metadata, let's use a dirty hack for raid5
-+		 * move all metadata in first groups in hope to hit cached
-+		 * sectors and thus avoid read-modify cycles in raid5 */
-+		ac.ac_g_ex.fe_group = group = 0;
-+	}
-+
-+	/* probably, the request is for 2^8+ blocks (1/2/3/... MB) */
-+	i = ffs(*len);
-+	if (i >= ext3_mb_order2_reqs) {
-+		i--;
-+		if ((*len & (~(1 << i))) == 0)
-+			ac.ac_2order = i;
-+	}
-+
-+	/* first, try the goal */
-+	err = ext3_mb_find_by_goal(&ac, &e3b);
-+	if (err)
-+		goto out_err;
-+	if (ac.ac_status == AC_STATUS_FOUND)
-+		goto found;
-+
-+	/* Let's just scan groups to find more-less suitable blocks */
-+	cr = ac.ac_2order ? 0 : 1;
-+repeat:
-+	for (; cr < 4 && ac.ac_status == AC_STATUS_CONTINUE; cr++) {
-+		ac.ac_criteria = cr;
-+		for (i = 0; i < EXT3_SB(sb)->s_groups_count; group++, i++) {
-+			if (group == EXT3_SB(sb)->s_groups_count)
-+				group = 0;
-+
-+			if (EXT3_MB_GRP_NEED_INIT(EXT3_GROUP_INFO(sb, group))) {
-+				/* we need full data about the group
-+				 * to make a good selection */
-+				err = ext3_mb_load_buddy(ac.ac_sb, group, &e3b);
-+				if (err)
-+					goto out_err;
-+				ext3_mb_release_desc(&e3b);
-+			}
-+
-+			/* check is group good for our criteries */
-+			if (!ext3_mb_good_group(&ac, group, cr))
-+				continue;
-+
-+			err = ext3_mb_load_buddy(ac.ac_sb, group, &e3b);
-+			if (err)
-+				goto out_err;
-+
-+			ext3_lock_group(sb, group);
-+			if (!ext3_mb_good_group(&ac, group, cr)) {
-+				/* someone did allocation from this group */
-+				ext3_unlock_group(sb, group);
-+				ext3_mb_release_desc(&e3b);
-+				continue;
-+			}
-+
-+			ac.ac_groups_scanned++;
-+			if (cr == 0)
-+				ext3_mb_simple_scan_group(&ac, &e3b);
-+			else if (cr == 1 && *len == sbi->s_stripe)
-+				ext3_mb_scan_aligned(&ac, &e3b);
-+			else
-+				ext3_mb_complex_scan_group(&ac, &e3b);
-+
-+			ext3_unlock_group(sb, group);
-+
-+			ext3_mb_release_desc(&e3b);
-+
-+			if (ac.ac_status != AC_STATUS_CONTINUE)
-+				break;
-+		}
-+	}
-+
-+	if (ac.ac_b_ex.fe_len > 0 && ac.ac_status != AC_STATUS_FOUND &&
-+	    !(ac.ac_flags & EXT3_MB_HINT_FIRST)) {
-+		/*
-+		 * We've been searching too long. Let's try to allocate
-+		 * the best chunk we've found so far
-+		 */
-+
-+		/*if (ac.ac_found > ext3_mb_max_to_scan)
-+			printk(KERN_DEBUG "EXT3-fs: too long searching at "
-+				"%u (%d/%d)\n", cr, ac.ac_b_ex.fe_len,
-+				ac.ac_g_ex.fe_len);*/
-+		ext3_mb_try_best_found(&ac, &e3b);
-+		if (ac.ac_status != AC_STATUS_FOUND) {
-+			/*
-+			 * Someone more lucky has already allocated it.
-+			 * The only thing we can do is just take first
-+			 * found block(s)
-+			printk(KERN_DEBUG "EXT3-fs: someone won our chunk\n");
-+			 */
-+			ac.ac_b_ex.fe_group = 0;
-+			ac.ac_b_ex.fe_start = 0;
-+			ac.ac_b_ex.fe_len = 0;
-+			ac.ac_status = AC_STATUS_CONTINUE;
-+			ac.ac_flags |= EXT3_MB_HINT_FIRST;
-+			cr = 3;
-+			goto repeat;
-+		}
-+	}
-+
-+	if (ac.ac_status != AC_STATUS_FOUND) {
-+		/*
-+		 * We aren't lucky definitely
-+		 */
-+		DQUOT_FREE_BLOCK(inode, *len);
-+		*errp = -ENOSPC;
-+		block = 0;
-+#if 1
-+		printk(KERN_ERR "EXT3-fs: can't allocate: status %d flags %d\n",
-+			ac.ac_status, ac.ac_flags);
-+		printk(KERN_ERR "EXT3-fs: goal %d, best found %d/%d/%d cr %d\n",
-+			ac.ac_g_ex.fe_len, ac.ac_b_ex.fe_group,
-+			ac.ac_b_ex.fe_start, ac.ac_b_ex.fe_len, cr);
-+		printk(KERN_ERR "EXT3-fs: %lu block reserved, %d found\n",
-+			sbi->s_blocks_reserved, ac.ac_found);
-+		printk("EXT3-fs: groups: ");
-+		for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++)
-+			printk("%d: %d ", i, EXT3_GROUP_INFO(sb, i)->bb_free);
-+		printk("\n");
-+#endif
-+		goto out;
-+	}
-+
-+found:
-+	J_ASSERT(ac.ac_b_ex.fe_len > 0);
-+
-+	/* good news - free block(s) have been found. now it's time
-+	 * to mark block(s) in good old journaled bitmap */
-+	block = ac.ac_b_ex.fe_group * EXT3_BLOCKS_PER_GROUP(sb)
-+			+ ac.ac_b_ex.fe_start
-+			+ le32_to_cpu(es->s_first_data_block);
-+
-+	/* we made a desicion, now mark found blocks in good old
-+	 * bitmap to be journaled */
-+
-+	ext3_debug("using block group %d(%d)\n",
-+			ac.ac_b_group.group, gdp->bg_free_blocks_count);
-+
-+	bitmap_bh = read_block_bitmap(sb, ac.ac_b_ex.fe_group);
-+	if (!bitmap_bh) {
-+		*errp = -EIO;
-+		goto out_err;
-+	}
-+
-+	err = ext3_journal_get_write_access(handle, bitmap_bh);
-+	if (err) {
-+		*errp = err;
-+		goto out_err;
-+	}
-+
-+	gdp = ext3_get_group_desc(sb, ac.ac_b_ex.fe_group, &gdp_bh);
-+	if (!gdp) {
-+		*errp = -EIO;
-+		goto out_err;
-+	}
-+
-+	err = ext3_journal_get_write_access(handle, gdp_bh);
-+	if (err)
-+		goto out_err;
-+
-+	block = ac.ac_b_ex.fe_group * EXT3_BLOCKS_PER_GROUP(sb)
-+			+ ac.ac_b_ex.fe_start
-+			+ le32_to_cpu(es->s_first_data_block);
-+
-+	if (block == le32_to_cpu(gdp->bg_block_bitmap) ||
-+	    block == le32_to_cpu(gdp->bg_inode_bitmap) ||
-+	    in_range(block, le32_to_cpu(gdp->bg_inode_table),
-+		      EXT3_SB(sb)->s_itb_per_group))
-+		ext3_error(sb, "ext3_new_block",
-+			    "Allocating block in system zone - "
-+			    "block = %u", block);
-+#ifdef AGGRESSIVE_CHECK
-+	for (i = 0; i < ac.ac_b_ex.fe_len; i++)
-+		J_ASSERT(!mb_test_bit(ac.ac_b_ex.fe_start + i, bitmap_bh->b_data));
-+#endif
-+	mb_set_bits(bitmap_bh->b_data, ac.ac_b_ex.fe_start, ac.ac_b_ex.fe_len);
-+
-+	spin_lock(sb_bgl_lock(sbi, ac.ac_b_ex.fe_group));
-+	gdp->bg_free_blocks_count =
-+			cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)
-+					- ac.ac_b_ex.fe_len);
-+	spin_unlock(sb_bgl_lock(sbi, ac.ac_b_ex.fe_group));
-+	percpu_counter_mod(&sbi->s_freeblocks_counter, - ac.ac_b_ex.fe_len);
-+
-+	err = ext3_journal_dirty_metadata(handle, bitmap_bh);
-+	if (err)
-+		goto out_err;
-+	err = ext3_journal_dirty_metadata(handle, gdp_bh);
-+	if (err)
-+		goto out_err;
-+
-+	sb->s_dirt = 1;
-+	*errp = 0;
-+	brelse(bitmap_bh);
-+
-+	/* drop non-allocated, but dquote'd blocks */
-+	J_ASSERT(*len >= ac.ac_b_ex.fe_len);
-+	DQUOT_FREE_BLOCK(inode, *len - ac.ac_b_ex.fe_len);
-+
-+	*len = ac.ac_b_ex.fe_len;
-+	J_ASSERT(*len > 0);
-+	J_ASSERT(block != 0);
-+	goto out;
-+
-+out_err:
-+	/* if we've already allocated something, roll it back */
-+	if (ac.ac_status == AC_STATUS_FOUND) {
-+		/* FIXME: free blocks here */
-+	}
-+
-+	DQUOT_FREE_BLOCK(inode, *len);
-+	brelse(bitmap_bh);
-+	*errp = err;
-+	block = 0;
-+out:
-+	if (ac.ac_buddy_page)
-+		page_cache_release(ac.ac_buddy_page);
-+	if (ac.ac_bitmap_page)
-+		page_cache_release(ac.ac_bitmap_page);
-+
-+	if (!(flags & EXT3_MB_HINT_RESERVED)) {
-+		/* block wasn't reserved before and we reserved it
-+		 * at the beginning of allocation. it doesn't matter
-+		 * whether we allocated anything or we failed: time
-+		 * to release reservation. NOTE: because I expect
-+		 * any multiblock request from delayed allocation
-+		 * path only, here is single block always */
-+		ext3_mb_release_blocks(sb, 1);
-+	}
-+
-+	if (unlikely(ext3_mb_stats) && ac.ac_g_ex.fe_len > 1) {
-+		atomic_inc(&sbi->s_bal_reqs);
-+		atomic_add(*len, &sbi->s_bal_allocated);
-+		if (*len >= ac.ac_g_ex.fe_len)
-+			atomic_inc(&sbi->s_bal_success);
-+		atomic_add(ac.ac_found, &sbi->s_bal_ex_scanned);
-+		if (ac.ac_g_ex.fe_start == ac.ac_b_ex.fe_start &&
-+				ac.ac_g_ex.fe_group == ac.ac_b_ex.fe_group)
-+			atomic_inc(&sbi->s_bal_goals);
-+		if (ac.ac_found > ext3_mb_max_to_scan)
-+			atomic_inc(&sbi->s_bal_breaks);
-+	}
-+
-+	ext3_mb_store_history(sb, inode->i_ino, &ac);
-+
-+	return block;
-+}
-+EXPORT_SYMBOL(ext3_mb_new_blocks);
-+
-+#ifdef EXT3_MB_HISTORY
-+struct ext3_mb_proc_session {
-+	struct ext3_mb_history *history;
-+	struct super_block *sb;
-+	int start;
-+	int max;
-+};
-+
-+static void *ext3_mb_history_skip_empty(struct ext3_mb_proc_session *s,
-+					struct ext3_mb_history *hs,
-+					int first)
-+{
-+	if (hs == s->history + s->max)
-+		hs = s->history;
-+	if (!first && hs == s->history + s->start)
-+		return NULL;
-+	while (hs->goal.fe_len == 0) {
-+		hs++;
-+		if (hs == s->history + s->max)
-+			hs = s->history;
-+		if (hs == s->history + s->start)
-+			return NULL;
-+	}
-+	return hs;
-+}
-+
-+static void *ext3_mb_seq_history_start(struct seq_file *seq, loff_t *pos)
-+{
-+	struct ext3_mb_proc_session *s = seq->private;
-+	struct ext3_mb_history *hs;
-+	int l = *pos;
-+
-+	if (l == 0)
-+		return SEQ_START_TOKEN;
-+	hs = ext3_mb_history_skip_empty(s, s->history + s->start, 1);
-+	if (!hs)
-+		return NULL;
-+	while (--l && (hs = ext3_mb_history_skip_empty(s, ++hs, 0)) != NULL);
-+	return hs;
-+}
-+
-+static void *ext3_mb_seq_history_next(struct seq_file *seq, void *v, loff_t *pos)
-+{
-+	struct ext3_mb_proc_session *s = seq->private;
-+	struct ext3_mb_history *hs = v;
-+
-+	++*pos;
-+	if (v == SEQ_START_TOKEN)
-+		return ext3_mb_history_skip_empty(s, s->history + s->start, 1);
-+	else
-+		return ext3_mb_history_skip_empty(s, ++hs, 0);
-+}
-+
-+static int ext3_mb_seq_history_show(struct seq_file *seq, void *v)
-+{
-+	struct ext3_mb_history *hs = v;
-+	char buf[20], buf2[20];
-+
-+	if (v == SEQ_START_TOKEN) {
-+		seq_printf(seq, "%-5s %-8s %-17s %-17s %-5s %-5s %-2s %-5s %-5s %-6s\n",
-+			 "pid", "inode", "goal", "result", "found", "grps", "cr",
-+			 "merge", "tail", "broken");
-+		return 0;
-+	}
-+
-+	sprintf(buf, "%u/%u/%u", hs->goal.fe_group,
-+		hs->goal.fe_start, hs->goal.fe_len);
-+	sprintf(buf2, "%u/%u/%u", hs->result.fe_group,
-+		hs->result.fe_start, hs->result.fe_len);
-+	seq_printf(seq, "%-5u %-8u %-17s %-17s %-5u %-5u %-2u %-5s %-5u %-6u\n",
-+			hs->pid, hs->ino, buf, buf2, hs->found, hs->groups,
-+			hs->cr, hs->merged ? "M" : "", hs->tail,
-+			hs->buddy ? 1 << hs->buddy : 0);
-+	return 0;
-+}
-+
-+static void ext3_mb_seq_history_stop(struct seq_file *seq, void *v)
-+{
-+}
-+
-+static struct seq_operations ext3_mb_seq_history_ops = {
-+	.start  = ext3_mb_seq_history_start,
-+	.next   = ext3_mb_seq_history_next,
-+	.stop   = ext3_mb_seq_history_stop,
-+	.show   = ext3_mb_seq_history_show,
-+};
-+
-+static int ext3_mb_seq_history_open(struct inode *inode, struct file *file)
-+{
-+	struct super_block *sb = PDE(inode)->data;
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	struct ext3_mb_proc_session *s;
-+	int rc, size;
-+
-+	s = kmalloc(sizeof(*s), GFP_KERNEL);
-+	if (s == NULL)
-+		return -EIO;
-+	size = sizeof(struct ext3_mb_history) * sbi->s_mb_history_max;
-+	s->history = kmalloc(size, GFP_KERNEL);
-+	if (s->history == NULL) {
-+		kfree(s);
-+		return -EIO;
-+	}
-+
-+	spin_lock(&sbi->s_mb_history_lock);
-+	memcpy(s->history, sbi->s_mb_history, size);
-+	s->max = sbi->s_mb_history_max;
-+	s->start = sbi->s_mb_history_cur % s->max;
-+	spin_unlock(&sbi->s_mb_history_lock);
-+
-+	rc = seq_open(file, &ext3_mb_seq_history_ops);
-+	if (rc == 0) {
-+		struct seq_file *m = (struct seq_file *)file->private_data;
-+		m->private = s;
-+	} else {
-+		kfree(s->history);
-+		kfree(s);
-+	}
-+	return rc;
-+
-+}
-+
-+static int ext3_mb_seq_history_release(struct inode *inode, struct file *file)
-+{
-+	struct seq_file *seq = (struct seq_file *)file->private_data;
-+	struct ext3_mb_proc_session *s = seq->private;
-+	kfree(s->history);
-+	kfree(s);
-+	return seq_release(inode, file);
-+}
-+
-+static struct file_operations ext3_mb_seq_history_fops = {
-+	.owner		= THIS_MODULE,
-+	.open		= ext3_mb_seq_history_open,
-+	.read		= seq_read,
-+	.llseek		= seq_lseek,
-+	.release	= ext3_mb_seq_history_release,
-+};
-+
-+static void *ext3_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
-+{
-+	struct super_block *sb = seq->private;
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	long group;
-+
-+	if (*pos < 0 || *pos >= sbi->s_groups_count)
-+		return NULL;
-+
-+	group = *pos + 1;
-+	return (void *) group;
-+}
-+
-+static void *ext3_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
-+{
-+	struct super_block *sb = seq->private;
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	long group;
-+
-+	++*pos;
-+	if (*pos < 0 || *pos >= sbi->s_groups_count)
-+		return NULL;
-+	group = *pos + 1;
-+	return (void *) group;;
-+}
-+
-+static int ext3_mb_seq_groups_show(struct seq_file *seq, void *v)
-+{
-+	struct super_block *sb = seq->private;
-+	long group = (long) v, i;
-+	struct sg {
-+		struct ext3_group_info info;
-+		unsigned short counters[16];
-+	} sg;
-+
-+	group--;
-+	if (group == 0)
-+		seq_printf(seq, "#%-5s: %-5s %-5s %-5s [ %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
-+			 "group", "free", "frags", "first", "2^0", "2^1", "2^2",
-+			 "2^3", "2^4", "2^5", "2^6", "2^7", "2^8", "2^9", "2^10",
-+			 "2^11", "2^12", "2^13");
-+
-+	i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
-+		sizeof(struct ext3_group_info);
-+	ext3_lock_group(sb, group);
-+	memcpy(&sg, EXT3_GROUP_INFO(sb, group), i);
-+	ext3_unlock_group(sb, group);
-+
-+	if (EXT3_MB_GRP_NEED_INIT(&sg.info))
-+		return 0;
-+
-+	seq_printf(seq, "#%-5lu: %-5u %-5u %-5u [", group, sg.info.bb_free,
-+			sg.info.bb_fragments, sg.info.bb_first_free);
-+	for (i = 0; i <= 13; i++)
-+		seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
-+				sg.info.bb_counters[i] : 0);
-+	seq_printf(seq, " ]\n");
-+
-+	return 0;
-+}
-+
-+static void ext3_mb_seq_groups_stop(struct seq_file *seq, void *v)
-+{
-+}
-+
-+static struct seq_operations ext3_mb_seq_groups_ops = {
-+	.start  = ext3_mb_seq_groups_start,
-+	.next   = ext3_mb_seq_groups_next,
-+	.stop   = ext3_mb_seq_groups_stop,
-+	.show   = ext3_mb_seq_groups_show,
-+};
-+
-+static int ext3_mb_seq_groups_open(struct inode *inode, struct file *file)
-+{
-+	struct super_block *sb = PDE(inode)->data;
-+	int rc;
-+
-+	rc = seq_open(file, &ext3_mb_seq_groups_ops);
-+	if (rc == 0) {
-+		struct seq_file *m = (struct seq_file *)file->private_data;
-+		m->private = sb;
-+	}
-+	return rc;
-+
-+}
-+
-+static struct file_operations ext3_mb_seq_groups_fops = {
-+	.owner		= THIS_MODULE,
-+	.open		= ext3_mb_seq_groups_open,
-+	.read		= seq_read,
-+	.llseek		= seq_lseek,
-+	.release	= seq_release,
-+};
-+
-+static void ext3_mb_history_release(struct super_block *sb)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	char name[64];
-+
-+	snprintf(name, sizeof(name) - 1, "%s", bdevname(sb->s_bdev, name));
-+	remove_proc_entry("mb_groups", sbi->s_mb_proc);
-+	remove_proc_entry("mb_history", sbi->s_mb_proc);
-+	remove_proc_entry(name, proc_root_ext3);
-+
-+	if (sbi->s_mb_history)
-+		kfree(sbi->s_mb_history);
-+}
-+
-+static void ext3_mb_history_init(struct super_block *sb)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	char name[64];
-+	int i;
-+
-+	snprintf(name, sizeof(name) - 1, "%s", bdevname(sb->s_bdev, name));
-+	sbi->s_mb_proc = proc_mkdir(name, proc_root_ext3);
-+	if (sbi->s_mb_proc != NULL) {
-+		struct proc_dir_entry *p;
-+		p = create_proc_entry("mb_history", S_IRUGO, sbi->s_mb_proc);
-+		if (p) {
-+			p->proc_fops = &ext3_mb_seq_history_fops;
-+			p->data = sb;
-+		}
-+		p = create_proc_entry("mb_groups", S_IRUGO, sbi->s_mb_proc);
-+		if (p) {
-+			p->proc_fops = &ext3_mb_seq_groups_fops;
-+			p->data = sb;
-+		}
-+	}
-+
-+	sbi->s_mb_history_max = 1000;
-+	sbi->s_mb_history_cur = 0;
-+	spin_lock_init(&sbi->s_mb_history_lock);
-+	i = sbi->s_mb_history_max * sizeof(struct ext3_mb_history);
-+	sbi->s_mb_history = kmalloc(i, GFP_KERNEL);
-+	memset(sbi->s_mb_history, 0, i);
-+	/* if we can't allocate history, then we simple won't use it */
-+}
-+
-+static void
-+ext3_mb_store_history(struct super_block *sb, unsigned ino,
-+			struct ext3_allocation_context *ac)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	struct ext3_mb_history h;
-+
-+	if (likely(sbi->s_mb_history == NULL))
-+		return;
-+
-+	h.pid = current->pid;
-+	h.ino = ino;
-+	h.goal = ac->ac_g_ex;
-+	h.result = ac->ac_b_ex;
-+	h.found = ac->ac_found;
-+	h.cr = ac->ac_criteria;
-+	h.groups = ac->ac_groups_scanned;
-+	h.tail = ac->ac_tail;
-+	h.buddy = ac->ac_buddy;
-+	h.merged = 0;
-+	if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
-+			ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
-+		h.merged = 1;
-+
-+	spin_lock(&sbi->s_mb_history_lock);
-+	memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
-+	if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max)
-+		sbi->s_mb_history_cur = 0;
-+	spin_unlock(&sbi->s_mb_history_lock);
-+}
-+
-+#else
-+#define ext3_mb_history_release(sb)
-+#define ext3_mb_history_init(sb)
-+#endif
-+
-+int ext3_mb_init_backend(struct super_block *sb)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	int i, j, len, metalen;
-+	int num_meta_group_infos =
-+		(sbi->s_groups_count + EXT3_DESC_PER_BLOCK(sb) - 1) >>
-+			EXT3_DESC_PER_BLOCK_BITS(sb);
-+	struct ext3_group_info **meta_group_info;
-+
-+	/* An 8TB filesystem with 64-bit pointers requires a 4096 byte
-+	 * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
-+	 * So a two level scheme suffices for now. */
-+	sbi->s_group_info = kmalloc(sizeof(*sbi->s_group_info) *
-+				    num_meta_group_infos, GFP_KERNEL);
-+	if (sbi->s_group_info == NULL) {
-+		printk(KERN_ERR "EXT3-fs: can't allocate buddy meta group\n");
-+		return -ENOMEM;
-+	}
-+	sbi->s_buddy_cache = new_inode(sb);
-+	if (sbi->s_buddy_cache == NULL) {
-+		printk(KERN_ERR "EXT3-fs: can't get new inode\n");
-+		goto err_freesgi;
-+	}
-+
-+	metalen = sizeof(*meta_group_info) << EXT3_DESC_PER_BLOCK_BITS(sb);
-+	for (i = 0; i < num_meta_group_infos; i++) {
-+		if ((i + 1) == num_meta_group_infos)
-+			metalen = sizeof(*meta_group_info) *
-+				(sbi->s_groups_count -
-+					(i << EXT3_DESC_PER_BLOCK_BITS(sb)));
-+		meta_group_info = kmalloc(metalen, GFP_KERNEL);
-+		if (meta_group_info == NULL) {
-+			printk(KERN_ERR "EXT3-fs: can't allocate mem for a "
-+			       "buddy group\n");
-+			goto err_freemeta;
-+		}
-+		sbi->s_group_info[i] = meta_group_info;
-+	}
-+
-+	/*
-+	 * calculate needed size. if change bb_counters size,
-+	 * don't forget about ext3_mb_generate_buddy()
-+	 */
-+	len = sizeof(struct ext3_group_info);
-+	len += sizeof(unsigned short) * (sb->s_blocksize_bits + 2);
-+	for (i = 0; i < sbi->s_groups_count; i++) {
-+		struct ext3_group_desc * desc;
-+
-+		meta_group_info =
-+			sbi->s_group_info[i >> EXT3_DESC_PER_BLOCK_BITS(sb)];
-+		j = i & (EXT3_DESC_PER_BLOCK(sb) - 1);
-+
-+		meta_group_info[j] = kmalloc(len, GFP_KERNEL);
-+		if (meta_group_info[j] == NULL) {
-+			printk(KERN_ERR "EXT3-fs: can't allocate buddy mem\n");
-+			i--;
-+			goto err_freebuddy;
-+		}
-+		desc = ext3_get_group_desc(sb, i, NULL);
-+		if (desc == NULL) {
-+			printk(KERN_ERR"EXT3-fs: can't read descriptor %u\n",i);
-+			goto err_freebuddy;
-+		}
-+		memset(meta_group_info[j], 0, len);
-+		set_bit(EXT3_GROUP_INFO_NEED_INIT_BIT,
-+			&meta_group_info[j]->bb_state);
-+		meta_group_info[j]->bb_free =
-+			le16_to_cpu(desc->bg_free_blocks_count);
-+	}
-+
-+	return 0;
-+
-+err_freebuddy:
-+	while (i >= 0) {
-+		kfree(EXT3_GROUP_INFO(sb, i));
-+		i--;
-+	}
-+	i = num_meta_group_infos;
-+err_freemeta:
-+	while (--i >= 0)
-+		kfree(sbi->s_group_info[i]);
-+	iput(sbi->s_buddy_cache);
-+err_freesgi:
-+	kfree(sbi->s_group_info);
-+	return -ENOMEM;
-+}
-+
-+int ext3_mb_init(struct super_block *sb, int needs_recovery)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	struct inode *root = sb->s_root->d_inode;
-+	unsigned i, offset, max;
-+	struct dentry *dentry;
-+
-+	if (!test_opt(sb, MBALLOC))
-+		return 0;
-+
-+	i = (sb->s_blocksize_bits + 2) * sizeof(unsigned short);
-+
-+	sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
-+	if (sbi->s_mb_offsets == NULL) {
-+		clear_opt(sbi->s_mount_opt, MBALLOC);
-+		return -ENOMEM;
-+	}
-+	sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
-+	if (sbi->s_mb_maxs == NULL) {
-+		clear_opt(sbi->s_mount_opt, MBALLOC);
-+		kfree(sbi->s_mb_maxs);
-+		return -ENOMEM;
-+	}
-+
-+	 /* order 0 is regular bitmap */
-+	sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
-+	sbi->s_mb_offsets[0] = 0;
-+
-+	i = 1;
-+	offset = 0;
-+	max = sb->s_blocksize << 2;
-+	do {
-+		sbi->s_mb_offsets[i] = offset;
-+		sbi->s_mb_maxs[i] = max;
-+		offset += 1 << (sb->s_blocksize_bits - i);
-+		max = max >> 1;
-+		i++;
-+	} while (i <= sb->s_blocksize_bits + 1);
-+
-+	/* init file for buddy data */
-+	if ((i = ext3_mb_init_backend(sb))) {
-+		clear_opt(sbi->s_mount_opt, MBALLOC);
-+		kfree(sbi->s_mb_offsets);
-+		kfree(sbi->s_mb_maxs);
-+		return i;
-+	}
-+
-+	spin_lock_init(&sbi->s_reserve_lock);
-+	spin_lock_init(&sbi->s_md_lock);
-+	INIT_LIST_HEAD(&sbi->s_active_transaction);
-+	INIT_LIST_HEAD(&sbi->s_closed_transaction);
-+	INIT_LIST_HEAD(&sbi->s_committed_transaction);
-+	spin_lock_init(&sbi->s_bal_lock);
-+
-+	/* remove old on-disk buddy file */
-+	down(&root->i_sem);
-+	dentry = lookup_one_len(".buddy", sb->s_root, strlen(".buddy"));
-+	if (dentry->d_inode != NULL) {
-+		i = vfs_unlink(root, dentry);
-+		if (i != 0)
-+			printk("EXT3-fs: can't remove .buddy file: %d\n", i);
-+	}
-+	dput(dentry);
-+	up(&root->i_sem);
-+
-+	ext3_mb_history_init(sb);
-+
-+	printk("EXT3-fs: mballoc enabled\n");
-+	return 0;
-+}
-+
-+int ext3_mb_release(struct super_block *sb)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	int i, num_meta_group_infos;
-+
-+	if (!test_opt(sb, MBALLOC))
-+		return 0;
-+
-+	/* release freed, non-committed blocks */
-+	spin_lock(&sbi->s_md_lock);
-+	list_splice_init(&sbi->s_closed_transaction,
-+			&sbi->s_committed_transaction);
-+	list_splice_init(&sbi->s_active_transaction,
-+			&sbi->s_committed_transaction);
-+	spin_unlock(&sbi->s_md_lock);
-+	ext3_mb_free_committed_blocks(sb);
-+
-+	if (sbi->s_group_info) {
-+		for (i = 0; i < sbi->s_groups_count; i++)
-+			kfree(EXT3_GROUP_INFO(sb, i));
-+		num_meta_group_infos = (sbi->s_groups_count +
-+			EXT3_DESC_PER_BLOCK(sb) - 1) >>
-+			EXT3_DESC_PER_BLOCK_BITS(sb);
-+		for (i = 0; i < num_meta_group_infos; i++)
-+			kfree(sbi->s_group_info[i]);
-+		kfree(sbi->s_group_info);
-+	}
-+	if (sbi->s_mb_offsets)
-+		kfree(sbi->s_mb_offsets);
-+	if (sbi->s_mb_maxs)
-+		kfree(sbi->s_mb_maxs);
-+	if (sbi->s_buddy_cache)
-+		iput(sbi->s_buddy_cache);
-+	if (sbi->s_blocks_reserved)
-+		printk("ext3-fs: %ld blocks being reserved at umount!\n",
-+				sbi->s_blocks_reserved);
-+	if (ext3_mb_stats) {
-+		printk("EXT3-fs: mballoc: %u blocks %u reqs (%u success)\n",
-+			atomic_read(&sbi->s_bal_allocated),
-+			atomic_read(&sbi->s_bal_reqs),
-+			atomic_read(&sbi->s_bal_success));
-+		printk("EXT3-fs: mballoc: %u extents scanned, %u goal hits, "
-+			"%u 2^N hits, %u breaks\n",
-+			atomic_read(&sbi->s_bal_ex_scanned),
-+			atomic_read(&sbi->s_bal_goals),
-+			atomic_read(&sbi->s_bal_2orders),
-+			atomic_read(&sbi->s_bal_breaks));
-+		printk("EXT3-fs: mballoc: %lu generated and it took %Lu\n",
-+			sbi->s_mb_buddies_generated++,
-+			sbi->s_mb_generation_time);
-+	}
-+
-+	ext3_mb_history_release(sb);
-+
-+	return 0;
-+}
-+
-+void ext3_mb_free_committed_blocks(struct super_block *sb)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	int err, i, count = 0, count2 = 0;
-+	struct ext3_free_metadata *md;
-+	struct ext3_buddy e3b;
-+
-+	if (list_empty(&sbi->s_committed_transaction))
-+		return;
-+
-+	/* there is committed blocks to be freed yet */
-+	do {
-+		/* get next array of blocks */
-+		md = NULL;
-+		spin_lock(&sbi->s_md_lock);
-+		if (!list_empty(&sbi->s_committed_transaction)) {
-+			md = list_entry(sbi->s_committed_transaction.next,
-+					struct ext3_free_metadata, list);
-+			list_del(&md->list);
-+		}
-+		spin_unlock(&sbi->s_md_lock);
-+
-+		if (md == NULL)
-+			break;
-+
-+		mb_debug("gonna free %u blocks in group %u (0x%p):",
-+				md->num, md->group, md);
-+
-+		err = ext3_mb_load_buddy(sb, md->group, &e3b);
-+		/* we expect to find existing buddy because it's pinned */
-+		BUG_ON(err != 0);
-+
-+		/* there are blocks to put in buddy to make them really free */
-+		count += md->num;
-+		count2++;
-+		ext3_lock_group(sb, md->group);
-+		for (i = 0; i < md->num; i++) {
-+			mb_debug(" %u", md->blocks[i]);
-+			mb_free_blocks(&e3b, md->blocks[i], 1);
-+		}
-+		mb_debug("\n");
-+		ext3_unlock_group(sb, md->group);
-+
-+		/* balance refcounts from ext3_mb_free_metadata() */
-+		page_cache_release(e3b.bd_buddy_page);
-+		page_cache_release(e3b.bd_bitmap_page);
-+
-+		kfree(md);
-+		ext3_mb_release_desc(&e3b);
-+
-+	} while (md);
-+	mb_debug("freed %u blocks in %u structures\n", count, count2);
-+}
-+
-+void ext3_mb_poll_new_transaction(struct super_block *sb, handle_t *handle)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+
-+	if (sbi->s_last_transaction == handle->h_transaction->t_tid)
-+		return;
-+
-+	/* new transaction! time to close last one and free blocks for
-+	 * committed transaction. we know that only transaction can be
-+	 * active, so previos transaction can be being logged and we
-+	 * know that transaction before previous is known to be already
-+	 * logged. this means that now we may free blocks freed in all
-+	 * transactions before previous one. hope I'm clear enough ... */
-+
-+	spin_lock(&sbi->s_md_lock);
-+	if (sbi->s_last_transaction != handle->h_transaction->t_tid) {
-+		mb_debug("new transaction %lu, old %lu\n",
-+				(unsigned long) handle->h_transaction->t_tid,
-+				(unsigned long) sbi->s_last_transaction);
-+		list_splice_init(&sbi->s_closed_transaction,
-+					&sbi->s_committed_transaction);
-+		list_splice_init(&sbi->s_active_transaction,
-+					&sbi->s_closed_transaction);
-+		sbi->s_last_transaction = handle->h_transaction->t_tid;
-+	}
-+	spin_unlock(&sbi->s_md_lock);
-+
-+	ext3_mb_free_committed_blocks(sb);
-+}
-+
-+int ext3_mb_free_metadata(handle_t *handle, struct ext3_buddy *e3b,
-+				int group, int block, int count)
-+{
-+	struct ext3_group_info *db = e3b->bd_info;
-+	struct super_block *sb = e3b->bd_sb;
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	struct ext3_free_metadata *md;
-+	int i;
-+
-+	J_ASSERT(e3b->bd_bitmap_page != NULL);
-+	J_ASSERT(e3b->bd_buddy_page != NULL);
-+
-+	ext3_lock_group(sb, group);
-+	for (i = 0; i < count; i++) {
-+		md = db->bb_md_cur;
-+		if (md && db->bb_tid != handle->h_transaction->t_tid) {
-+			db->bb_md_cur = NULL;
-+			md = NULL;
-+		}
-+
-+		if (md == NULL) {
-+			ext3_unlock_group(sb, group);
-+			md = kmalloc(sizeof(*md), GFP_KERNEL);
-+			if (md == NULL)
-+				return -ENOMEM;
-+			md->num = 0;
-+			md->group = group;
-+
-+			ext3_lock_group(sb, group);
-+			if (db->bb_md_cur == NULL) {
-+				spin_lock(&sbi->s_md_lock);
-+				list_add(&md->list, &sbi->s_active_transaction);
-+				spin_unlock(&sbi->s_md_lock);
-+				/* protect buddy cache from being freed,
-+				 * otherwise we'll refresh it from
-+				 * on-disk bitmap and lose not-yet-available
-+				 * blocks */
-+				page_cache_get(e3b->bd_buddy_page);
-+				page_cache_get(e3b->bd_bitmap_page);
-+				db->bb_md_cur = md;
-+				db->bb_tid = handle->h_transaction->t_tid;
-+				mb_debug("new md 0x%p for group %u\n",
-+							md, md->group);
-+			} else {
-+				kfree(md);
-+				md = db->bb_md_cur;
-+			}
-+		}
-+
-+		BUG_ON(md->num >= EXT3_BB_MAX_BLOCKS);
-+		md->blocks[md->num] = block + i;
-+		md->num++;
-+		if (md->num == EXT3_BB_MAX_BLOCKS) {
-+			/* no more space, put full container on a sb's list */
-+			db->bb_md_cur = NULL;
-+		}
-+	}
-+	ext3_unlock_group(sb, group);
-+	return 0;
-+}
-+
-+void ext3_mb_free_blocks(handle_t *handle, struct inode *inode,
-+			unsigned long block, unsigned long count,
-+			int metadata, int *freed)
-+{
-+	struct buffer_head *bitmap_bh = NULL;
-+	struct ext3_group_desc *gdp;
-+	struct ext3_super_block *es;
-+	unsigned long bit, overflow;
-+	struct buffer_head *gd_bh;
-+	unsigned long block_group;
-+	struct ext3_sb_info *sbi;
-+	struct super_block *sb;
-+	struct ext3_buddy e3b;
-+	int err = 0, ret;
-+
-+	*freed = 0;
-+	sb = inode->i_sb;
-+	if (!sb) {
-+		printk ("ext3_free_blocks: nonexistent device");
-+		return;
-+	}
-+
-+	ext3_mb_poll_new_transaction(sb, handle);
-+
-+	sbi = EXT3_SB(sb);
-+	es = EXT3_SB(sb)->s_es;
-+	if (block < le32_to_cpu(es->s_first_data_block) ||
-+	    block + count < block ||
-+	    block + count > le32_to_cpu(es->s_blocks_count)) {
-+		ext3_error (sb, "ext3_free_blocks",
-+			    "Freeing blocks not in datazone - "
-+			    "block = %lu, count = %lu", block, count);
-+		goto error_return;
-+	}
-+
-+	ext3_debug("freeing block %lu\n", block);
-+
-+do_more:
-+	overflow = 0;
-+	block_group = (block - le32_to_cpu(es->s_first_data_block)) /
-+		      EXT3_BLOCKS_PER_GROUP(sb);
-+	bit = (block - le32_to_cpu(es->s_first_data_block)) %
-+		      EXT3_BLOCKS_PER_GROUP(sb);
-+	/*
-+	 * Check to see if we are freeing blocks across a group
-+	 * boundary.
-+	 */
-+	if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) {
-+		overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb);
-+		count -= overflow;
-+	}
-+	brelse(bitmap_bh);
-+	bitmap_bh = read_block_bitmap(sb, block_group);
-+	if (!bitmap_bh)
-+		goto error_return;
-+	gdp = ext3_get_group_desc (sb, block_group, &gd_bh);
-+	if (!gdp)
-+		goto error_return;
-+
-+	if (in_range (le32_to_cpu(gdp->bg_block_bitmap), block, count) ||
-+	    in_range (le32_to_cpu(gdp->bg_inode_bitmap), block, count) ||
-+	    in_range (block, le32_to_cpu(gdp->bg_inode_table),
-+		      EXT3_SB(sb)->s_itb_per_group) ||
-+	    in_range (block + count - 1, le32_to_cpu(gdp->bg_inode_table),
-+		      EXT3_SB(sb)->s_itb_per_group))
-+		ext3_error (sb, "ext3_free_blocks",
-+			    "Freeing blocks in system zones - "
-+			    "Block = %lu, count = %lu",
-+			    block, count);
-+
-+	BUFFER_TRACE(bitmap_bh, "getting write access");
-+	err = ext3_journal_get_write_access(handle, bitmap_bh);
-+	if (err)
-+		goto error_return;
-+
-+	/*
-+	 * We are about to modify some metadata.  Call the journal APIs
-+	 * to unshare ->b_data if a currently-committing transaction is
-+	 * using it
-+	 */
-+	BUFFER_TRACE(gd_bh, "get_write_access");
-+	err = ext3_journal_get_write_access(handle, gd_bh);
-+	if (err)
-+		goto error_return;
-+
-+	err = ext3_mb_load_buddy(sb, block_group, &e3b);
-+	if (err)
-+		goto error_return;
-+
-+#ifdef AGGRESSIVE_CHECK
-+	{
-+		int i;
-+		for (i = 0; i < count; i++)
-+			J_ASSERT(mb_test_bit(bit + i, bitmap_bh->b_data));
-+	}
-+#endif
-+	mb_clear_bits(bitmap_bh->b_data, bit, count);
-+
-+	/* We dirtied the bitmap block */
-+	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
-+	err = ext3_journal_dirty_metadata(handle, bitmap_bh);
-+
-+	if (metadata) {
-+		/* blocks being freed are metadata. these blocks shouldn't
-+		 * be used until this transaction is committed */
-+		ext3_mb_free_metadata(handle, &e3b, block_group, bit, count);
-+	} else {
-+		ext3_lock_group(sb, block_group);
-+		mb_free_blocks(&e3b, bit, count);
-+		ext3_unlock_group(sb, block_group);
-+	}
-+
-+	spin_lock(sb_bgl_lock(sbi, block_group));
-+	gdp->bg_free_blocks_count =
-+		cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) + count);
-+	spin_unlock(sb_bgl_lock(sbi, block_group));
-+	percpu_counter_mod(&sbi->s_freeblocks_counter, count);
-+
-+	ext3_mb_release_desc(&e3b);
-+
-+	*freed = count;
-+
-+	/* And the group descriptor block */
-+	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
-+	ret = ext3_journal_dirty_metadata(handle, gd_bh);
-+	if (!err) err = ret;
-+
-+	if (overflow && !err) {
-+		block += count;
-+		count = overflow;
-+		goto do_more;
-+	}
-+	sb->s_dirt = 1;
-+error_return:
-+	brelse(bitmap_bh);
-+	ext3_std_error(sb, err);
-+	return;
-+}
-+
-+int ext3_mb_reserve_blocks(struct super_block *sb, int blocks)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	int free, ret = -ENOSPC;
-+
-+	BUG_ON(blocks < 0);
-+	spin_lock(&sbi->s_reserve_lock);
-+	free = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
-+	if (blocks <= free - sbi->s_blocks_reserved) {
-+		sbi->s_blocks_reserved += blocks;
-+		ret = 0;
-+	}
-+	spin_unlock(&sbi->s_reserve_lock);
-+	return ret;
-+}
-+
-+void ext3_mb_release_blocks(struct super_block *sb, int blocks)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+
-+	BUG_ON(blocks < 0);
-+	spin_lock(&sbi->s_reserve_lock);
-+	sbi->s_blocks_reserved -= blocks;
-+	WARN_ON(sbi->s_blocks_reserved < 0);
-+	if (sbi->s_blocks_reserved < 0)
-+		sbi->s_blocks_reserved = 0;
-+	spin_unlock(&sbi->s_reserve_lock);
-+}
-+
-+int ext3_new_block(handle_t *handle, struct inode *inode,
-+		unsigned long goal, int *errp)
-+{
-+	int ret, len;
-+
-+	if (!test_opt(inode->i_sb, MBALLOC)) {
-+		ret = ext3_new_block_old(handle, inode, goal, errp);
-+		goto out;
-+	}
-+	len = 1;
-+	ret = ext3_mb_new_blocks(handle, inode, goal, &len, 0, errp);
-+out:
-+	return ret;
-+}
-+
-+
-+void ext3_free_blocks(handle_t *handle, struct inode * inode,
-+			unsigned long block, unsigned long count, int metadata)
-+{
-+	struct super_block *sb;
-+	int freed;
-+
-+	sb = inode->i_sb;
-+	if (!test_opt(sb, MBALLOC) || !EXT3_SB(sb)->s_group_info)
-+		ext3_free_blocks_sb(handle, sb, block, count, &freed);
-+	else
-+		ext3_mb_free_blocks(handle, inode, block, count, metadata, &freed);
-+	if (freed)
-+		DQUOT_FREE_BLOCK(inode, freed);
-+	return;
-+}
-+
-+#define EXT3_ROOT		   "ext3"
-+#define EXT3_MB_STATS_NAME	   "mb_stats"
-+#define EXT3_MB_MAX_TO_SCAN_NAME  "mb_max_to_scan"
-+#define EXT3_MB_MIN_TO_SCAN_NAME  "mb_min_to_scan"
-+#define EXT3_MB_ORDER2_REQ	   "mb_order2_req"
-+
-+static int ext3_mb_stats_read(char *page, char **start, off_t off,
-+		int count, int *eof, void *data)
-+{
-+	int len;
-+
-+	*eof = 1;
-+	if (off != 0)
-+		return 0;
-+
-+	len = sprintf(page, "%ld\n", ext3_mb_stats);
-+	*start = page;
-+	return len;
-+}
-+
-+static int ext3_mb_stats_write(struct file *file, const char *buffer,
-+		unsigned long count, void *data)
-+{
-+	char str[32];
-+
-+	if (count >= sizeof(str)) {
-+		printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+		       EXT3_MB_STATS_NAME, (int)sizeof(str));
-+		return -EOVERFLOW;
-+	}
-+
-+	if (copy_from_user(str, buffer, count))
-+		return -EFAULT;
-+
-+	/* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+	ext3_mb_stats = (simple_strtol(str, NULL, 0) != 0);
-+	return count;
-+}
-+
-+static int ext3_mb_max_to_scan_read(char *page, char **start, off_t off,
-+		int count, int *eof, void *data)
-+{
-+	int len;
-+
-+	*eof = 1;
-+	if (off != 0)
-+		return 0;
-+
-+	len = sprintf(page, "%ld\n", ext3_mb_max_to_scan);
-+	*start = page;
-+	return len;
-+}
-+
-+static int ext3_mb_max_to_scan_write(struct file *file, const char *buffer,
-+		unsigned long count, void *data)
-+{
-+	char str[32];
-+	long value;
-+
-+	if (count >= sizeof(str)) {
-+		printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+		       EXT3_MB_MAX_TO_SCAN_NAME, (int)sizeof(str));
-+		return -EOVERFLOW;
-+	}
-+
-+	if (copy_from_user(str, buffer, count))
-+		return -EFAULT;
-+
-+	/* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+	value = simple_strtol(str, NULL, 0);
-+	if (value <= 0)
-+		return -ERANGE;
-+
-+	ext3_mb_max_to_scan = value;
-+
-+	return count;
-+}
-+
-+static int ext3_mb_min_to_scan_read(char *page, char **start, off_t off,
-+		int count, int *eof, void *data)
-+{
-+	int len;
-+
-+	*eof = 1;
-+	if (off != 0)
-+		return 0;
-+
-+	len = sprintf(page, "%ld\n", ext3_mb_min_to_scan);
-+	*start = page;
-+	return len;
-+}
-+
-+static int ext3_mb_min_to_scan_write(struct file *file, const char *buffer,
-+		unsigned long count, void *data)
-+{
-+	char str[32];
-+	long value;
-+
-+	if (count >= sizeof(str)) {
-+		printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+		       EXT3_MB_MIN_TO_SCAN_NAME, (int)sizeof(str));
-+		return -EOVERFLOW;
-+	}
-+
-+	if (copy_from_user(str, buffer, count))
-+		return -EFAULT;
-+
-+	/* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+	value = simple_strtol(str, NULL, 0);
-+	if (value <= 0)
-+		return -ERANGE;
-+
-+	ext3_mb_min_to_scan = value;
-+
-+	return count;
-+}
-+
-+static int ext3_mb_order2_req_read(char *page, char **start, off_t off,
-+				   int count, int *eof, void *data)
-+{
-+	int len;
-+
-+	*eof = 1;
-+	if (off != 0)
-+		return 0;
-+
-+	len = sprintf(page, "%ld\n", ext3_mb_order2_reqs);
-+	*start = page;
-+	return len;
-+}
-+
-+static int ext3_mb_order2_req_write(struct file *file, const char *buffer,
-+				    unsigned long count, void *data)
-+{
-+	char str[32];
-+	long value;
-+
-+	if (count >= sizeof(str)) {
-+		printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+		       EXT3_MB_MIN_TO_SCAN_NAME, (int)sizeof(str));
-+		return -EOVERFLOW;
-+	}
-+
-+	if (copy_from_user(str, buffer, count))
-+		return -EFAULT;
-+
-+	/* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+	value = simple_strtol(str, NULL, 0);
-+	if (value <= 0)
-+		return -ERANGE;
-+
-+	ext3_mb_order2_reqs = value;
-+
-+	return count;
-+}
-+
-+int __init init_ext3_proc(void)
-+{
-+	struct proc_dir_entry *proc_ext3_mb_stats;
-+	struct proc_dir_entry *proc_ext3_mb_max_to_scan;
-+	struct proc_dir_entry *proc_ext3_mb_min_to_scan;
-+	struct proc_dir_entry *proc_ext3_mb_order2_req;
-+
-+	proc_root_ext3 = proc_mkdir(EXT3_ROOT, proc_root_fs);
-+	if (proc_root_ext3 == NULL) {
-+		printk(KERN_ERR "EXT3-fs: Unable to create %s\n", EXT3_ROOT);
-+		return -EIO;
-+	}
-+
-+	/* Initialize EXT3_MB_STATS_NAME */
-+	proc_ext3_mb_stats = create_proc_entry(EXT3_MB_STATS_NAME,
-+			S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+	if (proc_ext3_mb_stats == NULL) {
-+		printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+				EXT3_MB_STATS_NAME);
-+		remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+		return -EIO;
-+	}
-+
-+	proc_ext3_mb_stats->data = NULL;
-+	proc_ext3_mb_stats->read_proc  = ext3_mb_stats_read;
-+	proc_ext3_mb_stats->write_proc = ext3_mb_stats_write;
-+
-+	/* Initialize EXT3_MAX_TO_SCAN_NAME */
-+	proc_ext3_mb_max_to_scan = create_proc_entry(
-+			EXT3_MB_MAX_TO_SCAN_NAME,
-+			S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+	if (proc_ext3_mb_max_to_scan == NULL) {
-+		printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+				EXT3_MB_MAX_TO_SCAN_NAME);
-+		remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+		remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+		return -EIO;
-+	}
-+
-+	proc_ext3_mb_max_to_scan->data = NULL;
-+	proc_ext3_mb_max_to_scan->read_proc  = ext3_mb_max_to_scan_read;
-+	proc_ext3_mb_max_to_scan->write_proc = ext3_mb_max_to_scan_write;
-+
-+	/* Initialize EXT3_MIN_TO_SCAN_NAME */
-+	proc_ext3_mb_min_to_scan = create_proc_entry(
-+			EXT3_MB_MIN_TO_SCAN_NAME,
-+			S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+	if (proc_ext3_mb_min_to_scan == NULL) {
-+		printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+				EXT3_MB_MIN_TO_SCAN_NAME);
-+		remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, proc_root_ext3);
-+		remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+		remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+		return -EIO;
-+	}
-+
-+	proc_ext3_mb_min_to_scan->data = NULL;
-+	proc_ext3_mb_min_to_scan->read_proc  = ext3_mb_min_to_scan_read;
-+	proc_ext3_mb_min_to_scan->write_proc = ext3_mb_min_to_scan_write;
-+
-+	/* Initialize EXT3_ORDER2_REQ */
-+	proc_ext3_mb_order2_req = create_proc_entry(
-+			EXT3_MB_ORDER2_REQ,
-+			S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+	if (proc_ext3_mb_order2_req == NULL) {
-+		printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+				EXT3_MB_ORDER2_REQ);
-+		remove_proc_entry(EXT3_MB_MIN_TO_SCAN_NAME, proc_root_ext3);
-+		remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, proc_root_ext3);
-+		remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+		remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+		return -EIO;
-+	}
-+
-+	proc_ext3_mb_order2_req->data = NULL;
-+	proc_ext3_mb_order2_req->read_proc  = ext3_mb_order2_req_read;
-+	proc_ext3_mb_order2_req->write_proc = ext3_mb_order2_req_write;
-+
-+	return 0;
-+}
-+
-+void exit_ext3_proc(void)
-+{
-+	remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+	remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, proc_root_ext3);
-+	remove_proc_entry(EXT3_MB_MIN_TO_SCAN_NAME, proc_root_ext3);
-+	remove_proc_entry(EXT3_MB_ORDER2_REQ, proc_root_ext3);
-+	remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+}
-Index: linux-2.6.12.6-bull/fs/ext3/Makefile
-===================================================================
---- linux-2.6.12.6-bull.orig/fs/ext3/Makefile	2006-04-29 20:39:09.000000000 +0400
-+++ linux-2.6.12.6-bull/fs/ext3/Makefile	2006-04-29 20:39:10.000000000 +0400
-@@ -6,7 +6,7 @@ obj-$(CONFIG_EXT3_FS) += ext3.o
- 
- ext3-y	:= balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
- 	   ioctl.o namei.o super.o symlink.o hash.o resize.o \
--	   extents.o
-+	   extents.o mballoc.o
- 
- ext3-$(CONFIG_EXT3_FS_XATTR)	 += xattr.o xattr_user.o xattr_trusted.o
- ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
diff --git a/ldiskfs/kernel_patches/patches/ext3-mballoc2-2.6.18-vanilla.patch b/ldiskfs/kernel_patches/patches/ext3-mballoc2-2.6.18-vanilla.patch
deleted file mode 100644
index 702dfcc502..0000000000
--- a/ldiskfs/kernel_patches/patches/ext3-mballoc2-2.6.18-vanilla.patch
+++ /dev/null
@@ -1,3140 +0,0 @@
-Index: linux-stage/include/linux/ext3_fs.h
-===================================================================
---- linux-stage.orig/include/linux/ext3_fs.h	2006-07-16 02:29:43.000000000 +0800
-+++ linux-stage/include/linux/ext3_fs.h	2006-07-16 02:29:49.000000000 +0800
-@@ -53,6 +53,14 @@
- #define ext3_debug(f, a...)	do {} while (0)
- #endif
- 
-+#define EXT3_MULTIBLOCK_ALLOCATOR	1
-+
-+#define EXT3_MB_HINT_MERGE		1
-+#define EXT3_MB_HINT_RESERVED		2
-+#define EXT3_MB_HINT_METADATA		4
-+#define EXT3_MB_HINT_FIRST		8
-+#define EXT3_MB_HINT_BEST		16
-+
- /*
-  * Special inodes numbers
-  */
-@@ -379,6 +387,7 @@ struct ext3_inode {
- #define EXT3_MOUNT_IOPEN_NOPRIV		0x800000/* Make iopen world-readable */
- #define EXT3_MOUNT_EXTENTS		0x1000000/* Extents support */
- #define EXT3_MOUNT_EXTDEBUG		0x2000000/* Extents debug */
-+#define EXT3_MOUNT_MBALLOC		0x4000000/* Buddy allocation support */
- 
- /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
- #ifndef clear_opt
-@@ -405,6 +413,14 @@
- #define ext3_find_first_zero_bit	ext2_find_first_zero_bit
- #define ext3_find_next_zero_bit		ext2_find_next_zero_bit
- 
-+#ifndef ext2_find_next_le_bit
-+#ifdef __LITTLE_ENDIAN
-+#define ext2_find_next_le_bit(addr, size, off) find_next_bit((addr), (size), (off))
-+#else
-+#error "mballoc needs a patch for big-endian systems - CFS bug 10634"
-+#endif	/* __LITTLE_ENDIAN */
-+#endif	/* !ext2_find_next_le_bit */
-+
- /*
-  * Maximal mount counts between two filesystem checks
-  */
-@@ -749,12 +758,12 @@ ext3_group_first_block_no(struct super_b
- /* balloc.c */
- extern int ext3_bg_has_super(struct super_block *sb, int group);
- extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group);
--extern ext3_fsblk_t ext3_new_block (handle_t *handle, struct inode *inode,
-+extern ext3_fsblk_t ext3_new_block_old(handle_t *handle, struct inode *inode,
- 			ext3_fsblk_t goal, int *errp);
- extern ext3_fsblk_t ext3_new_blocks (handle_t *handle, struct inode *inode,
- 			ext3_fsblk_t goal, unsigned long *count, int *errp);
- extern void ext3_free_blocks (handle_t *handle, struct inode *inode,
--			ext3_fsblk_t block, unsigned long count);
-+			ext3_fsblk_t block, unsigned long count, int metadata);
- extern void ext3_free_blocks_sb (handle_t *handle, struct super_block *sb,
- 				 ext3_fsblk_t block, unsigned long count,
- 				unsigned long *pdquot_freed_blocks);
-@@ -881,6 +890,21 @@ extern void ext3_extents_initialize_bloc
- extern int ext3_ext_ioctl(struct inode *inode, struct file *filp,
- 			  unsigned int cmd, unsigned long arg);
- 
-+/* mballoc.c */
-+extern long ext3_mb_stats;
-+extern long ext3_mb_max_to_scan;
-+extern int ext3_mb_init(struct super_block *sb, int needs_recovery);
-+extern int ext3_mb_release(struct super_block *sb);
-+extern ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode,
-+				   ext3_fsblk_t goal, int *errp);
-+extern ext3_fsblk_t ext3_mb_new_blocks(handle_t *handle, struct inode *inode,
-+				       ext3_fsblk_t goal, int *len, int flags,
-+				       int *errp);
-+extern int ext3_mb_reserve_blocks(struct super_block *sb, int);
-+extern void ext3_mb_release_blocks(struct super_block *sb, int);
-+int __init init_ext3_proc(void);
-+void exit_ext3_proc(void);
-+
- #endif	/* __KERNEL__ */
- 
- /* EXT3_IOC_CREATE_INUM at bottom of file (visible to kernel and user). */
-Index: linux-stage/include/linux/ext3_fs_sb.h
-===================================================================
---- linux-stage.orig/include/linux/ext3_fs_sb.h	2006-07-16 02:29:43.000000000 +0800
-+++ linux-stage/include/linux/ext3_fs_sb.h	2006-07-16 02:29:49.000000000 +0800
-@@ -21,8 +21,14 @@
- #include <linux/wait.h>
- #include <linux/blockgroup_lock.h>
- #include <linux/percpu_counter.h>
-+#include <linux/list.h>
- #endif
- #include <linux/rbtree.h>
-+#include <linux/proc_fs.h>
-+
-+struct ext3_buddy_group_blocks;
-+struct ext3_mb_history;
-+#define EXT3_BB_MAX_BLOCKS
- 
- /*
-  * third extended-fs super-block data in memory
-@@ -78,6 +84,43 @@ struct ext3_sb_info {
- 	char *s_qf_names[MAXQUOTAS];		/* Names of quota files with journalled quota */
- 	int s_jquota_fmt;			/* Format of quota to use */
- #endif
-+
-+	/* for buddy allocator */
-+	struct ext3_group_info ***s_group_info;
-+	struct inode *s_buddy_cache;
-+	long s_blocks_reserved;
-+	spinlock_t s_reserve_lock;
-+	struct list_head s_active_transaction;
-+	struct list_head s_closed_transaction;
-+	struct list_head s_committed_transaction;
-+	spinlock_t s_md_lock;
-+	tid_t s_last_transaction;
-+	int s_mb_factor;
-+	unsigned short *s_mb_offsets, *s_mb_maxs;
-+	unsigned long s_stripe;
-+
-+	/* history to debug policy */
-+	struct ext3_mb_history *s_mb_history;
-+	int s_mb_history_cur;
-+	int s_mb_history_max;
-+	struct proc_dir_entry *s_mb_proc;
-+	spinlock_t s_mb_history_lock;
-+
-+	/* stats for buddy allocator */
-+	atomic_t s_bal_reqs;	/* number of reqs with len > 1 */
-+	atomic_t s_bal_success;	/* we found long enough chunks */
-+	atomic_t s_bal_allocated;	/* in blocks */
-+	atomic_t s_bal_ex_scanned;	/* total extents scanned */
-+	atomic_t s_bal_goals;	/* goal hits */
-+	atomic_t s_bal_breaks;	/* too long searches */
-+	atomic_t s_bal_2orders;	/* 2^order hits */
-+	spinlock_t s_bal_lock;
-+	unsigned long s_mb_buddies_generated;
-+	unsigned long long s_mb_generation_time;
- };
-+
-+#define EXT3_GROUP_INFO(sb, group)					   \
-+	EXT3_SB(sb)->s_group_info[(group) >> EXT3_DESC_PER_BLOCK_BITS(sb)] \
-+				 [(group) & (EXT3_DESC_PER_BLOCK(sb) - 1)]
- 
- #endif	/* _LINUX_EXT3_FS_SB */
-Index: linux-stage/fs/ext3/super.c
-===================================================================
---- linux-stage.orig/fs/ext3/super.c	2006-07-16 02:29:43.000000000 +0800
-+++ linux-stage/fs/ext3/super.c	2006-07-16 02:29:49.000000000 +0800
-@@ -391,6 +391,7 @@ static void ext3_put_super (struct super
- 	struct ext3_super_block *es = sbi->s_es;
- 	int i;
- 
-+	ext3_mb_release(sb);
- 	ext3_ext_release(sb);
- 	ext3_xattr_put_super(sb);
- 	journal_destroy(sbi->s_journal);
-@@ -642,6 +643,7 @@ enum {
- 	Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
- 	Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
- 	Opt_extents, Opt_noextents, Opt_extdebug,
-+	Opt_mballoc, Opt_nomballoc, Opt_stripe,
- 	Opt_grpquota
- };
- 
-@@ -696,6 +697,9 @@ static match_table_t tokens = {
- 	{Opt_extents, "extents"},
- 	{Opt_noextents, "noextents"},
- 	{Opt_extdebug, "extdebug"},
-+	{Opt_mballoc, "mballoc"},
-+	{Opt_nomballoc, "nomballoc"},
-+	{Opt_stripe, "stripe=%u"},
- 	{Opt_barrier, "barrier=%u"},
- 	{Opt_err, NULL},
- 	{Opt_resize, "resize"},
-@@ -1047,6 +1049,19 @@ clear_qf_name:
- 		case Opt_extdebug:
- 			set_opt (sbi->s_mount_opt, EXTDEBUG);
- 			break;
-+		case Opt_mballoc:
-+			set_opt(sbi->s_mount_opt, MBALLOC);
-+			break;
-+		case Opt_nomballoc:
-+			clear_opt(sbi->s_mount_opt, MBALLOC);
-+			break;
-+		case Opt_stripe:
-+			if (match_int(&args[0], &option))
-+				return 0;
-+			if (option < 0)
-+				return 0;
-+			sbi->s_stripe = option;
-+			break;
- 		default:
- 			printk (KERN_ERR
- 				"EXT3-fs: Unrecognized mount option \"%s\" "
-@@ -1773,6 +1778,7 @@ static int ext3_fill_super (struct super
- 		"writeback");
- 
- 	ext3_ext_init(sb);
-+	ext3_mb_init(sb, needs_recovery);
- 	lock_kernel();
- 	return 0;
- 
-@@ -2712,7 +2718,13 @@ static struct file_system_type ext3_fs_t
- 
- static int __init init_ext3_fs(void)
- {
--	int err = init_ext3_xattr();
-+	int err;
-+
-+	err = init_ext3_proc();
-+	if (err)
-+		return err;
-+
-+	err = init_ext3_xattr();
- 	if (err)
- 		return err;
- 	err = init_inodecache();
-@@ -2734,6 +2746,7 @@ static void __exit exit_ext3_fs(void)
- 	unregister_filesystem(&ext3_fs_type);
- 	destroy_inodecache();
- 	exit_ext3_xattr();
-+	exit_ext3_proc();
- }
- 
- int ext3_prep_san_write(struct inode *inode, long *blocks,
-Index: linux-stage/fs/ext3/extents.c
-===================================================================
---- linux-stage.orig/fs/ext3/extents.c	2006-07-16 02:29:43.000000000 +0800
-+++ linux-stage/fs/ext3/extents.c	2006-07-16 02:29:49.000000000 +0800
-@@ -771,7 +771,7 @@ cleanup:
- 		for (i = 0; i < depth; i++) {
- 			if (!ablocks[i])
- 				continue;
--			ext3_free_blocks(handle, tree->inode, ablocks[i], 1);
-+			ext3_free_blocks(handle, tree->inode, ablocks[i], 1, 1);
- 		}
- 	}
- 	kfree(ablocks);
-@@ -1428,7 +1428,7 @@ int ext3_ext_rm_idx(handle_t *handle, st
- 		  path->p_idx->ei_leaf);
- 	bh = sb_find_get_block(tree->inode->i_sb, path->p_idx->ei_leaf);
- 	ext3_forget(handle, 1, tree->inode, bh, path->p_idx->ei_leaf);
--	ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1);
-+	ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1, 1);
- 	return err;
- }
- 
-@@ -1913,10 +1913,12 @@ ext3_remove_blocks(struct ext3_extents_t
- 	int needed = ext3_remove_blocks_credits(tree, ex, from, to);
- 	handle_t *handle = ext3_journal_start(tree->inode, needed);
- 	struct buffer_head *bh;
--	int i;
-+	int i, metadata = 0;
- 
- 	if (IS_ERR(handle))
- 		return PTR_ERR(handle);
-+	if (S_ISDIR(tree->inode->i_mode) || S_ISLNK(tree->inode->i_mode))
-+		metadata = 1;
- 	if (from >= ex->ee_block && to == ex->ee_block + ex->ee_len - 1) {
- 		/* tail removal */
- 		unsigned long num, start;
-@@ -1928,7 +1930,7 @@ ext3_remove_blocks(struct ext3_extents_t
- 			bh = sb_find_get_block(tree->inode->i_sb, start + i);
- 			ext3_forget(handle, 0, tree->inode, bh, start + i);
- 		}
--		ext3_free_blocks(handle, tree->inode, start, num);
-+		ext3_free_blocks(handle, tree->inode, start, num, metadata);
- 	} else if (from == ex->ee_block && to <= ex->ee_block + ex->ee_len - 1) {
- 		printk("strange request: removal %lu-%lu from %u:%u\n",
- 		       from, to, ex->ee_block, ex->ee_len);
-Index: linux-stage/fs/ext3/inode.c
-===================================================================
---- linux-stage.orig/fs/ext3/inode.c	2006-07-16 02:29:43.000000000 +0800
-+++ linux-stage/fs/ext3/inode.c	2006-07-16 02:29:49.000000000 +0800
-@@ -562,7 +562,7 @@ static int ext3_alloc_blocks(handle_t *h
- 	return ret;
- failed_out:
- 	for (i = 0; i <index; i++)
--		ext3_free_blocks(handle, inode, new_blocks[i], 1);
-+		ext3_free_blocks(handle, inode, new_blocks[i], 1, 1);
- 	return ret;
- }
- 
-@@ -661,9 +661,9 @@ failed:
- 		ext3_journal_forget(handle, branch[i].bh);
- 	}
- 	for (i = 0; i <indirect_blks; i++)
--		ext3_free_blocks(handle, inode, new_blocks[i], 1);
-+		ext3_free_blocks(handle, inode, new_blocks[i], 1, 1);
- 
--	ext3_free_blocks(handle, inode, new_blocks[i], num);
-+	ext3_free_blocks(handle, inode, new_blocks[i], num, 1);
- 
- 	return err;
- }
-@@ -760,9 +760,9 @@ err_out:
- 	for (i = 1; i <= num; i++) {
- 		BUFFER_TRACE(where[i].bh, "call journal_forget");
- 		ext3_journal_forget(handle, where[i].bh);
--		ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
-+		ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1,1);
- 	}
--	ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
-+	ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 1);
- 
- 	return err;
- }
-@@ -2007,7 +2007,7 @@ static void ext3_clear_blocks(handle_t *
- 		}
- 	}
- 
--	ext3_free_blocks(handle, inode, block_to_free, count);
-+	ext3_free_blocks(handle, inode, block_to_free, count, 1);
- }
- 
- /**
-@@ -2180,7 +2180,7 @@ static void ext3_free_branches(handle_t 
- 				ext3_journal_test_restart(handle, inode);
- 			}
- 
--			ext3_free_blocks(handle, inode, nr, 1);
-+			ext3_free_blocks(handle, inode, nr, 1, 1);
- 
- 			if (parent_bh) {
- 				/*
-Index: linux-stage/fs/ext3/balloc.c
-===================================================================
---- linux-stage.orig/fs/ext3/balloc.c	2006-07-16 02:29:43.000000000 +0800
-+++ linux-stage/fs/ext3/balloc.c	2006-07-16 02:33:13.000000000 +0800
-@@ -79,7 +79,7 @@ struct ext3_group_desc * ext3_get_group_
-  *
-  * Return buffer_head on success or NULL in case of failure.
-  */
--static struct buffer_head *
-+struct buffer_head *
- read_block_bitmap(struct super_block *sb, unsigned int block_group)
- {
- 	struct ext3_group_desc * desc;
-@@ -490,24 +490,6 @@ error_return:
- 	return;
- }
- 
--/* Free given blocks, update quota and i_blocks field */
--void ext3_free_blocks(handle_t *handle, struct inode *inode,
--			ext3_fsblk_t block, unsigned long count)
--{
--	struct super_block * sb;
--	unsigned long dquot_freed_blocks;
--
--	sb = inode->i_sb;
--	if (!sb) {
--		printk ("ext3_free_blocks: nonexistent device");
--		return;
--	}
--	ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
--	if (dquot_freed_blocks)
--		DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
--	return;
--}
--
- /*
-  * For ext3 allocations, we must not reuse any blocks which are
-  * allocated in the bitmap buffer's "last committed data" copy.  This
-@@ -1463,7 +1445,7 @@ out:
- 	return 0;
- }
- 
--ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode,
-+ext3_fsblk_t ext3_new_block_old(handle_t *handle, struct inode *inode,
- 			ext3_fsblk_t goal, int *errp)
- {
- 	unsigned long count = 1;
-Index: linux-stage/fs/ext3/xattr.c
-===================================================================
---- linux-stage.orig/fs/ext3/xattr.c	2006-07-16 02:29:43.000000000 +0800
-+++ linux-stage/fs/ext3/xattr.c	2006-07-16 02:29:49.000000000 +0800
-@@ -484,7 +484,7 @@ ext3_xattr_release_block(handle_t *handl
- 		ea_bdebug(bh, "refcount now=0; freeing");
- 		if (ce)
- 			mb_cache_entry_free(ce);
--		ext3_free_blocks(handle, inode, bh->b_blocknr, 1);
-+		ext3_free_blocks(handle, inode, bh->b_blocknr, 1, 1);
- 		get_bh(bh);
- 		ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
- 	} else {
-@@ -805,7 +805,7 @@ inserted:
- 			new_bh = sb_getblk(sb, block);
- 			if (!new_bh) {
- getblk_failed:
--				ext3_free_blocks(handle, inode, block, 1);
-+				ext3_free_blocks(handle, inode, block, 1, 1);
- 				error = -EIO;
- 				goto cleanup;
- 			}
-Index: linux-stage/fs/ext3/mballoc.c
-===================================================================
---- /dev/null	1970-01-01 00:00:00.000000000 +0000
-+++ linux-stage/fs/ext3/mballoc.c	2006-07-16 02:29:49.000000000 +0800
-@@ -0,0 +1,2730 @@
-+/*
-+ * Copyright (c) 2003-2005, Cluster File Systems, Inc, info@clusterfs.com
-+ * Written by Alex Tomas <alex@clusterfs.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public Licens
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
-+ */
-+
-+
-+/*
-+ * mballoc.c contains the multiblocks allocation routines
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/time.h>
-+#include <linux/fs.h>
-+#include <linux/namei.h>
-+#include <linux/jbd.h>
-+#include <linux/ext3_fs.h>
-+#include <linux/ext3_jbd.h>
-+#include <linux/quotaops.h>
-+#include <linux/buffer_head.h>
-+#include <linux/module.h>
-+#include <linux/swap.h>
-+#include <linux/proc_fs.h>
-+#include <linux/pagemap.h>
-+#include <linux/seq_file.h>
-+
-+/*
-+ * TODO:
-+ *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
-+ *   - track min/max extents in each group for better group selection
-+ *   - mb_mark_used() may allocate chunk right after splitting buddy
-+ *   - special flag to advice allocator to look for requested + N blocks
-+ *     this may improve interaction between extents and mballoc
-+ *   - tree of groups sorted by number of free blocks
-+ *   - percpu reservation code (hotpath)
-+ *   - error handling
-+ */
-+
-+/*
-+ * with AGRESSIVE_CHECK allocator runs consistency checks over
-+ * structures. these checks slow things down a lot
-+ */
-+#define AGGRESSIVE_CHECK__
-+
-+/*
-+ */
-+#define MB_DEBUG__
-+#ifdef MB_DEBUG
-+#define mb_debug(fmt,a...)	printk(fmt, ##a)
-+#else
-+#define mb_debug(fmt,a...)
-+#endif
-+
-+/*
-+ * with EXT3_MB_HISTORY mballoc stores last N allocations in memory
-+ * and you can monitor it in /proc/fs/ext3/<dev>/mb_history
-+ */
-+#define EXT3_MB_HISTORY
-+
-+/*
-+ * How long mballoc can look for a best extent (in found extents)
-+ */
-+long ext3_mb_max_to_scan = 500;
-+
-+/*
-+ * How long mballoc must look for a best extent
-+ */
-+long ext3_mb_min_to_scan = 30;
-+
-+/*
-+ * with 'ext3_mb_stats' allocator will collect stats that will be
-+ * shown at umount. The collecting costs though!
-+ */
-+
-+long ext3_mb_stats = 1;
-+
-+/*
-+ * for which requests use 2^N search using buddies
-+ */
-+long ext3_mb_order2_reqs = 8;
-+
-+#ifdef EXT3_BB_MAX_BLOCKS
-+#undef EXT3_BB_MAX_BLOCKS
-+#endif
-+#define EXT3_BB_MAX_BLOCKS	30
-+
-+struct ext3_free_metadata {
-+	unsigned short group;
-+	unsigned short num;
-+	unsigned short blocks[EXT3_BB_MAX_BLOCKS];
-+	struct list_head list;
-+};
-+
-+struct ext3_group_info {
-+	unsigned long	bb_state;
-+	unsigned long	bb_tid;
-+	struct ext3_free_metadata *bb_md_cur;
-+	unsigned short	bb_first_free;
-+	unsigned short	bb_free;
-+	unsigned short	bb_fragments;
-+	unsigned short	bb_counters[];
-+};
-+
-+
-+#define EXT3_GROUP_INFO_NEED_INIT_BIT	0
-+#define EXT3_GROUP_INFO_LOCKED_BIT	1
-+
-+#define EXT3_MB_GRP_NEED_INIT(grp)	\
-+	(test_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, &(grp)->bb_state))
-+
-+struct ext3_free_extent {
-+	__u16 fe_start;
-+	__u16 fe_len;
-+	__u16 fe_group;
-+};
-+
-+struct ext3_allocation_context {
-+	struct super_block *ac_sb;
-+
-+	/* search goals */
-+	struct ext3_free_extent ac_g_ex;
-+
-+	/* the best found extent */
-+	struct ext3_free_extent ac_b_ex;
-+
-+	/* number of iterations done. we have to track to limit searching */
-+	unsigned long ac_ex_scanned;
-+	__u16 ac_groups_scanned;
-+	__u16 ac_found;
-+	__u16 ac_tail;
-+	__u16 ac_buddy;
-+	__u8 ac_status;
-+	__u8 ac_flags;		/* allocation hints */
-+	__u8 ac_criteria;
-+	__u8 ac_repeats;
-+	__u8 ac_2order;		/* if request is to allocate 2^N blocks and
-+				 * N > 0, the field stores N, otherwise 0 */
-+
-+	struct page *ac_buddy_page;
-+	struct page *ac_bitmap_page;
-+};
-+
-+#define AC_STATUS_CONTINUE	1
-+#define AC_STATUS_FOUND		2
-+#define AC_STATUS_BREAK		3
-+
-+struct ext3_mb_history {
-+	struct ext3_free_extent goal;	/* goal allocation */
-+	struct ext3_free_extent result;	/* result allocation */
-+	unsigned pid;
-+	unsigned ino;
-+	__u16 found;	/* how many extents have been found */
-+	__u16 groups;	/* how many groups have been scanned */
-+	__u16 tail;	/* what tail broke some buddy */
-+	__u16 buddy;	/* buddy the tail ^^^ broke */
-+	__u8 cr;	/* which phase the result extent was found at */
-+	__u8 merged;
-+};
-+
-+struct ext3_buddy {
-+	struct page *bd_buddy_page;
-+	void *bd_buddy;
-+	struct page *bd_bitmap_page;
-+	void *bd_bitmap;
-+	struct ext3_group_info *bd_info;
-+	struct super_block *bd_sb;
-+	__u16 bd_blkbits;
-+	__u16 bd_group;
-+};
-+#define EXT3_MB_BITMAP(e3b)	((e3b)->bd_bitmap)
-+#define EXT3_MB_BUDDY(e3b)	((e3b)->bd_buddy)
-+
-+#ifndef EXT3_MB_HISTORY
-+#define ext3_mb_store_history(sb,ino,ac)
-+#else
-+static void ext3_mb_store_history(struct super_block *, unsigned ino,
-+				struct ext3_allocation_context *ac);
-+#endif
-+
-+#define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1)
-+
-+static struct proc_dir_entry *proc_root_ext3;
-+
-+int ext3_create (struct inode *, struct dentry *, int, struct nameidata *);
-+struct buffer_head * read_block_bitmap(struct super_block *, unsigned int);
-+int ext3_mb_reserve_blocks(struct super_block *, int);
-+void ext3_mb_release_blocks(struct super_block *, int);
-+void ext3_mb_poll_new_transaction(struct super_block *, handle_t *);
-+void ext3_mb_free_committed_blocks(struct super_block *);
-+
-+#if BITS_PER_LONG == 64
-+#define mb_correct_addr_and_bit(bit,addr)		\
-+{							\
-+	bit += ((unsigned long) addr & 7UL) << 3;	\
-+	addr = (void *) ((unsigned long) addr & ~7UL);	\
-+}
-+#elif BITS_PER_LONG == 32
-+#define mb_correct_addr_and_bit(bit,addr)		\
-+{							\
-+	bit += ((unsigned long) addr & 3UL) << 3;	\
-+	addr = (void *) ((unsigned long) addr & ~3UL);	\
-+}
-+#else
-+#error "how many bits you are?!"
-+#endif
-+
-+static inline int mb_test_bit(int bit, void *addr)
-+{
-+	mb_correct_addr_and_bit(bit,addr);
-+	return ext2_test_bit(bit, addr);
-+}
-+
-+static inline void mb_set_bit(int bit, void *addr)
-+{
-+	mb_correct_addr_and_bit(bit,addr);
-+	ext2_set_bit(bit, addr);
-+}
-+
-+static inline void mb_set_bit_atomic(int bit, void *addr)
-+{
-+	mb_correct_addr_and_bit(bit,addr);
-+	ext2_set_bit_atomic(NULL, bit, addr);
-+}
-+
-+static inline void mb_clear_bit(int bit, void *addr)
-+{
-+	mb_correct_addr_and_bit(bit,addr);
-+	ext2_clear_bit(bit, addr);
-+}
-+
-+static inline void mb_clear_bit_atomic(int bit, void *addr)
-+{
-+	mb_correct_addr_and_bit(bit,addr);
-+	ext2_clear_bit_atomic(NULL, bit, addr);
-+}
-+
-+static inline int mb_find_next_zero_bit(void *addr, int max, int start)
-+{
-+	int fix;
-+#if BITS_PER_LONG == 64
-+	fix = ((unsigned long) addr & 7UL) << 3;
-+	addr = (void *) ((unsigned long) addr & ~7UL);
-+#elif BITS_PER_LONG == 32
-+	fix = ((unsigned long) addr & 3UL) << 3;
-+	addr = (void *) ((unsigned long) addr & ~3UL);
-+#else
-+#error "how many bits you are?!"
-+#endif
-+	max += fix;
-+	start += fix;
-+	return ext2_find_next_zero_bit(addr, max, start) - fix;
-+}
-+
-+static inline void *mb_find_buddy(struct ext3_buddy *e3b, int order, int *max)
-+{
-+	char *bb;
-+
-+	J_ASSERT(EXT3_MB_BITMAP(e3b) != EXT3_MB_BUDDY(e3b));
-+	J_ASSERT(max != NULL);
-+
-+	if (order > e3b->bd_blkbits + 1) {
-+		*max = 0;
-+		return NULL;
-+	}
-+
-+	/* at order 0 we see each particular block */
-+	*max = 1 << (e3b->bd_blkbits + 3);
-+	if (order == 0)
-+		return EXT3_MB_BITMAP(e3b);
-+
-+	bb = EXT3_MB_BUDDY(e3b) + EXT3_SB(e3b->bd_sb)->s_mb_offsets[order];
-+	*max = EXT3_SB(e3b->bd_sb)->s_mb_maxs[order];
-+
-+	return bb;
-+}
-+
-+#ifdef AGGRESSIVE_CHECK
-+
-+static void mb_check_buddy(struct ext3_buddy *e3b)
-+{
-+	int order = e3b->bd_blkbits + 1;
-+	int max, max2, i, j, k, count;
-+	int fragments = 0, fstart;
-+	void *buddy, *buddy2;
-+
-+	if (!test_opt(e3b->bd_sb, MBALLOC))
-+		return;
-+
-+	{
-+		static int mb_check_counter = 0;
-+		if (mb_check_counter++ % 300 != 0)
-+			return;
-+	}
-+
-+	while (order > 1) {
-+		buddy = mb_find_buddy(e3b, order, &max);
-+		J_ASSERT(buddy);
-+		buddy2 = mb_find_buddy(e3b, order - 1, &max2);
-+		J_ASSERT(buddy2);
-+		J_ASSERT(buddy != buddy2);
-+		J_ASSERT(max * 2 == max2);
-+
-+		count = 0;
-+		for (i = 0; i < max; i++) {
-+
-+			if (mb_test_bit(i, buddy)) {
-+				/* only single bit in buddy2 may be 1 */
-+				if (!mb_test_bit(i << 1, buddy2))
-+					J_ASSERT(mb_test_bit((i<<1)+1, buddy2));
-+				else if (!mb_test_bit((i << 1) + 1, buddy2))
-+					J_ASSERT(mb_test_bit(i << 1, buddy2));
-+				continue;
-+			}
-+
-+			/* both bits in buddy2 must be 0 */
-+			J_ASSERT(mb_test_bit(i << 1, buddy2));
-+			J_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
-+
-+			for (j = 0; j < (1 << order); j++) {
-+				k = (i * (1 << order)) + j;
-+				J_ASSERT(!mb_test_bit(k, EXT3_MB_BITMAP(e3b)));
-+			}
-+			count++;
-+		}
-+		J_ASSERT(e3b->bd_info->bb_counters[order] == count);
-+		order--;
-+	}
-+
-+	fstart = -1;
-+	buddy = mb_find_buddy(e3b, 0, &max);
-+	for (i = 0; i < max; i++) {
-+		if (!mb_test_bit(i, buddy)) {
-+			J_ASSERT(i >= e3b->bd_info->bb_first_free);
-+			if (fstart == -1) {
-+				fragments++;
-+				fstart = i;
-+			}
-+			continue;
-+		}
-+		fstart = -1;
-+		/* check used bits only */
-+		for (j = 0; j < e3b->bd_blkbits + 1; j++) {
-+			buddy2 = mb_find_buddy(e3b, j, &max2);
-+			k = i >> j;
-+			J_ASSERT(k < max2);
-+			J_ASSERT(mb_test_bit(k, buddy2));
-+		}
-+	}
-+	J_ASSERT(!EXT3_MB_GRP_NEED_INIT(e3b->bd_info));
-+	J_ASSERT(e3b->bd_info->bb_fragments == fragments);
-+}
-+
-+#else
-+#define mb_check_buddy(e3b)
-+#endif
-+
-+/* find most significant bit */
-+static int inline fmsb(unsigned short word)
-+{
-+	int order;
-+
-+	if (word > 255) {
-+		order = 7;
-+		word >>= 8;
-+	} else {
-+		order = -1;
-+	}
-+
-+	do {
-+		order++;
-+		word >>= 1;
-+	} while (word != 0);
-+
-+	return order;
-+}
-+
-+static void inline
-+ext3_mb_mark_free_simple(struct super_block *sb, void *buddy, unsigned first,
-+				int len, struct ext3_group_info *grp)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	unsigned short min, max, chunk, border;
-+
-+	mb_debug("mark %u/%u free\n", first, len);
-+	J_ASSERT(len < EXT3_BLOCKS_PER_GROUP(sb));
-+
-+	border = 2 << sb->s_blocksize_bits;
-+
-+	while (len > 0) {
-+		/* find how many blocks can be covered since this position */
-+		max = ffs(first | border) - 1;
-+
-+		/* find how many blocks of power 2 we need to mark */
-+		min = fmsb(len);
-+
-+		mb_debug("  %u/%u -> max %u, min %u\n",
-+			first & ((2 << sb->s_blocksize_bits) - 1),
-+			len, max, min);
-+
-+		if (max < min)
-+			min = max;
-+		chunk = 1 << min;
-+
-+		/* mark multiblock chunks only */
-+		grp->bb_counters[min]++;
-+		if (min > 0) {
-+			mb_debug("    set %u at %u \n", first >> min,
-+				sbi->s_mb_offsets[min]);
-+			mb_clear_bit(first >> min, buddy + sbi->s_mb_offsets[min]);
-+		}
-+
-+		len -= chunk;
-+		first += chunk;
-+	}
-+}
-+
-+static void
-+ext3_mb_generate_buddy(struct super_block *sb, void *buddy, void *bitmap,
-+			int group)
-+{
-+	struct ext3_group_info *grp = EXT3_GROUP_INFO(sb, group);
-+	unsigned short max = EXT3_BLOCKS_PER_GROUP(sb);
-+	unsigned short i = 0, first, len;
-+	unsigned free = 0, fragments = 0;
-+	unsigned long long period = get_cycles();
-+
-+	i = mb_find_next_zero_bit(bitmap, max, 0);
-+	grp->bb_first_free = i;
-+	while (i < max) {
-+		fragments++;
-+		first = i;
-+		i = ext2_find_next_le_bit(bitmap, max, i);
-+		len = i - first;
-+		free += len;
-+		if (len > 1)
-+			ext3_mb_mark_free_simple(sb, buddy, first, len, grp);
-+		else
-+			grp->bb_counters[0]++;
-+		if (i < max)
-+			i = mb_find_next_zero_bit(bitmap, max, i);
-+	}
-+	grp->bb_fragments = fragments;
-+
-+	/* bb_state shouldn't being modified because all
-+	 * others waits for init completion on page lock */
-+	clear_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, &grp->bb_state);
-+	if (free != grp->bb_free) {
-+		printk("EXT3-fs: group %u: %u blocks in bitmap, %u in gd\n",
-+			group, free, grp->bb_free);
-+		grp->bb_free = free;
-+	}
-+
-+	period = get_cycles() - period;
-+	spin_lock(&EXT3_SB(sb)->s_bal_lock);
-+	EXT3_SB(sb)->s_mb_buddies_generated++;
-+	EXT3_SB(sb)->s_mb_generation_time += period;
-+	spin_unlock(&EXT3_SB(sb)->s_bal_lock);
-+}
-+
-+static int ext3_mb_init_cache(struct page *page)
-+{
-+	int blocksize, blocks_per_page, groups_per_page;
-+	int err = 0, i, first_group, first_block;
-+	struct super_block *sb;
-+	struct buffer_head *bhs;
-+	struct buffer_head **bh;
-+	struct inode *inode;
-+	char *data, *bitmap;
-+
-+	mb_debug("init page %lu\n", page->index);
-+
-+	inode = page->mapping->host;
-+	sb = inode->i_sb;
-+	blocksize = 1 << inode->i_blkbits;
-+	blocks_per_page = PAGE_CACHE_SIZE / blocksize;
-+
-+	groups_per_page = blocks_per_page >> 1;
-+	if (groups_per_page == 0)
-+		groups_per_page = 1;
-+
-+	/* allocate buffer_heads to read bitmaps */
-+	if (groups_per_page > 1) {
-+		err = -ENOMEM;
-+		i = sizeof(struct buffer_head *) * groups_per_page;
-+		bh = kmalloc(i, GFP_NOFS);
-+		if (bh == NULL)
-+			goto out;
-+		memset(bh, 0, i);
-+	} else
-+		bh = &bhs;
-+
-+	first_group = page->index * blocks_per_page / 2;
-+
-+	/* read all groups the page covers into the cache */
-+	for (i = 0; i < groups_per_page; i++) {
-+		struct ext3_group_desc * desc;
-+
-+		if (first_group + i >= EXT3_SB(sb)->s_groups_count)
-+			break;
-+
-+		err = -EIO;
-+		desc = ext3_get_group_desc(sb, first_group + i, NULL);
-+		if (desc == NULL)
-+			goto out;
-+
-+		err = -ENOMEM;
-+		bh[i] = sb_getblk(sb, le32_to_cpu(desc->bg_block_bitmap));
-+		if (bh[i] == NULL)
-+			goto out;
-+
-+		if (buffer_uptodate(bh[i]))
-+			continue;
-+
-+		lock_buffer(bh[i]);
-+		if (buffer_uptodate(bh[i])) {
-+			unlock_buffer(bh[i]);
-+			continue;
-+		}
-+
-+		get_bh(bh[i]);
-+		bh[i]->b_end_io = end_buffer_read_sync;
-+		submit_bh(READ, bh[i]);
-+		mb_debug("read bitmap for group %u\n", first_group + i);
-+	}
-+
-+	/* wait for I/O completion */
-+	for (i = 0; i < groups_per_page && bh[i]; i++)
-+		wait_on_buffer(bh[i]);
-+
-+	err = -EIO;
-+	for (i = 0; i < groups_per_page && bh[i]; i++)
-+		if (!buffer_uptodate(bh[i]))
-+			goto out;
-+
-+	first_block = page->index * blocks_per_page;
-+	for (i = 0; i < blocks_per_page; i++) {
-+		int group;
-+
-+		group = (first_block + i) >> 1;
-+		if (group >= EXT3_SB(sb)->s_groups_count)
-+			break;
-+
-+		data = page_address(page) + (i * blocksize);
-+		bitmap = bh[group - first_group]->b_data;
-+
-+		if ((first_block + i) & 1) {
-+			/* this is block of buddy */
-+			mb_debug("put buddy for group %u in page %lu/%x\n",
-+				group, page->index, i * blocksize);
-+			memset(data, 0xff, blocksize);
-+			EXT3_GROUP_INFO(sb, group)->bb_fragments = 0;
-+			memset(EXT3_GROUP_INFO(sb, group)->bb_counters, 0,
-+			       sizeof(unsigned short)*(sb->s_blocksize_bits+2));
-+			ext3_mb_generate_buddy(sb, data, bitmap, group);
-+		} else {
-+			/* this is block of bitmap */
-+			mb_debug("put bitmap for group %u in page %lu/%x\n",
-+				group, page->index, i * blocksize);
-+			memcpy(data, bitmap, blocksize);
-+		}
-+	}
-+	SetPageUptodate(page);
-+
-+out:
-+	if (bh) {
-+		for (i = 0; i < groups_per_page && bh[i]; i++)
-+			brelse(bh[i]);
-+		if (bh != &bhs)
-+			kfree(bh);
-+	}
-+	return err;
-+}
-+
-+static int ext3_mb_load_buddy(struct super_block *sb, int group,
-+		struct ext3_buddy *e3b)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	struct inode *inode = sbi->s_buddy_cache;
-+	int blocks_per_page, block, pnum, poff;
-+	struct page *page;
-+
-+	mb_debug("load group %u\n", group);
-+
-+	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
-+
-+	e3b->bd_blkbits = sb->s_blocksize_bits;
-+	e3b->bd_info = EXT3_GROUP_INFO(sb, group);
-+	e3b->bd_sb = sb;
-+	e3b->bd_group = group;
-+	e3b->bd_buddy_page = NULL;
-+	e3b->bd_bitmap_page = NULL;
-+
-+	block = group * 2;
-+	pnum = block / blocks_per_page;
-+	poff = block % blocks_per_page;
-+
-+	/* we could use find_or_create_page(), but it locks page
-+	 * what we'd like to avoid in fast path ... */
-+	page = find_get_page(inode->i_mapping, pnum);
-+	if (page == NULL || !PageUptodate(page)) {
-+		if (page)
-+			page_cache_release(page);
-+		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
-+		if (page) {
-+			BUG_ON(page->mapping != inode->i_mapping);
-+			if (!PageUptodate(page))
-+				ext3_mb_init_cache(page);
-+			unlock_page(page);
-+		}
-+	}
-+	if (page == NULL || !PageUptodate(page))
-+		goto err;
-+	e3b->bd_bitmap_page = page;
-+	e3b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
-+	mark_page_accessed(page);
-+
-+	block++;
-+	pnum = block / blocks_per_page;
-+	poff = block % blocks_per_page;
-+
-+	page = find_get_page(inode->i_mapping, pnum);
-+	if (page == NULL || !PageUptodate(page)) {
-+		if (page)
-+			page_cache_release(page);
-+		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
-+		if (page) {
-+			BUG_ON(page->mapping != inode->i_mapping);
-+			if (!PageUptodate(page))
-+				ext3_mb_init_cache(page);
-+			unlock_page(page);
-+		}
-+	}
-+	if (page == NULL || !PageUptodate(page))
-+		goto err;
-+	e3b->bd_buddy_page = page;
-+	e3b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
-+	mark_page_accessed(page);
-+
-+	J_ASSERT(e3b->bd_bitmap_page != NULL);
-+	J_ASSERT(e3b->bd_buddy_page != NULL);
-+
-+	return 0;
-+
-+err:
-+	if (e3b->bd_bitmap_page)
-+		page_cache_release(e3b->bd_bitmap_page);
-+	if (e3b->bd_buddy_page)
-+		page_cache_release(e3b->bd_buddy_page);
-+	e3b->bd_buddy = NULL;
-+	e3b->bd_bitmap = NULL;
-+	return -EIO;
-+}
-+
-+static void ext3_mb_release_desc(struct ext3_buddy *e3b)
-+{
-+	if (e3b->bd_bitmap_page)
-+		page_cache_release(e3b->bd_bitmap_page);
-+	if (e3b->bd_buddy_page)
-+		page_cache_release(e3b->bd_buddy_page);
-+}
-+
-+
-+static inline void
-+ext3_lock_group(struct super_block *sb, int group)
-+{
-+	bit_spin_lock(EXT3_GROUP_INFO_LOCKED_BIT,
-+		      &EXT3_GROUP_INFO(sb, group)->bb_state);
-+}
-+
-+static inline void
-+ext3_unlock_group(struct super_block *sb, int group)
-+{
-+	bit_spin_unlock(EXT3_GROUP_INFO_LOCKED_BIT,
-+			&EXT3_GROUP_INFO(sb, group)->bb_state);
-+}
-+
-+static int mb_find_order_for_block(struct ext3_buddy *e3b, int block)
-+{
-+	int order = 1;
-+	void *bb;
-+
-+	J_ASSERT(EXT3_MB_BITMAP(e3b) != EXT3_MB_BUDDY(e3b));
-+	J_ASSERT(block < (1 << (e3b->bd_blkbits + 3)));
-+
-+	bb = EXT3_MB_BUDDY(e3b);
-+	while (order <= e3b->bd_blkbits + 1) {
-+		block = block >> 1;
-+		if (!mb_test_bit(block, bb)) {
-+			/* this block is part of buddy of order 'order' */
-+			return order;
-+		}
-+		bb += 1 << (e3b->bd_blkbits - order);
-+		order++;
-+	}
-+	return 0;
-+}
-+
-+static inline void mb_clear_bits(void *bm, int cur, int len)
-+{
-+	__u32 *addr;
-+
-+	len = cur + len;
-+	while (cur < len) {
-+		if ((cur & 31) == 0 && (len - cur) >= 32) {
-+			/* fast path: clear whole word at once */
-+			addr = bm + (cur >> 3);
-+			*addr = 0;
-+			cur += 32;
-+			continue;
-+		}
-+		mb_clear_bit_atomic(cur, bm);
-+		cur++;
-+	}
-+}
-+
-+static inline void mb_set_bits(void *bm, int cur, int len)
-+{
-+	__u32 *addr;
-+
-+	len = cur + len;
-+	while (cur < len) {
-+		if ((cur & 31) == 0 && (len - cur) >= 32) {
-+			/* fast path: clear whole word at once */
-+			addr = bm + (cur >> 3);
-+			*addr = 0xffffffff;
-+			cur += 32;
-+			continue;
-+		}
-+		mb_set_bit_atomic(cur, bm);
-+		cur++;
-+	}
-+}
-+
-+static int mb_free_blocks(struct ext3_buddy *e3b, int first, int count)
-+{
-+	int block = 0, max = 0, order;
-+	void *buddy, *buddy2;
-+
-+	mb_check_buddy(e3b);
-+
-+	e3b->bd_info->bb_free += count;
-+	if (first < e3b->bd_info->bb_first_free)
-+		e3b->bd_info->bb_first_free = first;
-+
-+	/* let's maintain fragments counter */
-+	if (first != 0)
-+		block = !mb_test_bit(first - 1, EXT3_MB_BITMAP(e3b));
-+	if (first + count < EXT3_SB(e3b->bd_sb)->s_mb_maxs[0])
-+		max = !mb_test_bit(first + count, EXT3_MB_BITMAP(e3b));
-+	if (block && max)
-+		e3b->bd_info->bb_fragments--;
-+	else if (!block && !max)
-+		e3b->bd_info->bb_fragments++;
-+
-+	/* let's maintain buddy itself */
-+	while (count-- > 0) {
-+		block = first++;
-+		order = 0;
-+
-+		J_ASSERT(mb_test_bit(block, EXT3_MB_BITMAP(e3b)));
-+		mb_clear_bit(block, EXT3_MB_BITMAP(e3b));
-+		e3b->bd_info->bb_counters[order]++;
-+
-+		/* start of the buddy */
-+		buddy = mb_find_buddy(e3b, order, &max);
-+
-+		do {
-+			block &= ~1UL;
-+			if (mb_test_bit(block, buddy) ||
-+					mb_test_bit(block + 1, buddy))
-+				break;
-+
-+			/* both the buddies are free, try to coalesce them */
-+			buddy2 = mb_find_buddy(e3b, order + 1, &max);
-+
-+			if (!buddy2)
-+				break;
-+
-+			if (order > 0) {
-+				/* for special purposes, we don't set
-+				 * free bits in bitmap */
-+				mb_set_bit(block, buddy);
-+				mb_set_bit(block + 1, buddy);
-+			}
-+			e3b->bd_info->bb_counters[order]--;
-+			e3b->bd_info->bb_counters[order]--;
-+
-+			block = block >> 1;
-+			order++;
-+			e3b->bd_info->bb_counters[order]++;
-+
-+			mb_clear_bit(block, buddy2);
-+			buddy = buddy2;
-+		} while (1);
-+	}
-+	mb_check_buddy(e3b);
-+
-+	return 0;
-+}
-+
-+static int mb_find_extent(struct ext3_buddy *e3b, int order, int block,
-+				int needed, struct ext3_free_extent *ex)
-+{
-+	int next = block, max, ord;
-+	void *buddy;
-+
-+	J_ASSERT(ex != NULL);
-+
-+	buddy = mb_find_buddy(e3b, order, &max);
-+	J_ASSERT(buddy);
-+	J_ASSERT(block < max);
-+	if (mb_test_bit(block, buddy)) {
-+		ex->fe_len = 0;
-+		ex->fe_start = 0;
-+		ex->fe_group = 0;
-+		return 0;
-+	}
-+
-+	if (likely(order == 0)) {
-+		/* find actual order */
-+		order = mb_find_order_for_block(e3b, block);
-+		block = block >> order;
-+	}
-+
-+	ex->fe_len = 1 << order;
-+	ex->fe_start = block << order;
-+	ex->fe_group = e3b->bd_group;
-+
-+	/* calc difference from given start */
-+	next = next - ex->fe_start;
-+	ex->fe_len -= next;
-+	ex->fe_start += next;
-+
-+	while (needed > ex->fe_len && (buddy = mb_find_buddy(e3b, order, &max))) {
-+
-+		if (block + 1 >= max)
-+			break;
-+
-+		next = (block + 1) * (1 << order);
-+		if (mb_test_bit(next, EXT3_MB_BITMAP(e3b)))
-+			break;
-+
-+		ord = mb_find_order_for_block(e3b, next);
-+
-+		order = ord;
-+		block = next >> order;
-+		ex->fe_len += 1 << order;
-+	}
-+
-+	J_ASSERT(ex->fe_start + ex->fe_len <= (1 << (e3b->bd_blkbits + 3)));
-+	return ex->fe_len;
-+}
-+
-+static int mb_mark_used(struct ext3_buddy *e3b, struct ext3_free_extent *ex)
-+{
-+	int ord, mlen = 0, max = 0, cur;
-+	int start = ex->fe_start;
-+	int len = ex->fe_len;
-+	unsigned ret = 0;
-+	int len0 = len;
-+	void *buddy;
-+
-+	mb_check_buddy(e3b);
-+
-+	e3b->bd_info->bb_free -= len;
-+	if (e3b->bd_info->bb_first_free == start)
-+		e3b->bd_info->bb_first_free += len;
-+
-+	/* let's maintain fragments counter */
-+	if (start != 0)
-+		mlen = !mb_test_bit(start - 1, EXT3_MB_BITMAP(e3b));
-+	if (start + len < EXT3_SB(e3b->bd_sb)->s_mb_maxs[0])
-+		max = !mb_test_bit(start + len, EXT3_MB_BITMAP(e3b));
-+	if (mlen && max)
-+		e3b->bd_info->bb_fragments++;
-+	else if (!mlen && !max)
-+		e3b->bd_info->bb_fragments--;
-+
-+	/* let's maintain buddy itself */
-+	while (len) {
-+		ord = mb_find_order_for_block(e3b, start);
-+
-+		if (((start >> ord) << ord) == start && len >= (1 << ord)) {
-+			/* the whole chunk may be allocated at once! */
-+			mlen = 1 << ord;
-+			buddy = mb_find_buddy(e3b, ord, &max);
-+			J_ASSERT((start >> ord) < max);
-+			mb_set_bit(start >> ord, buddy);
-+			e3b->bd_info->bb_counters[ord]--;
-+			start += mlen;
-+			len -= mlen;
-+			J_ASSERT(len >= 0);
-+			continue;
-+		}
-+
-+		/* store for history */
-+		if (ret == 0)
-+			ret = len | (ord << 16);
-+
-+		/* we have to split large buddy */
-+		J_ASSERT(ord > 0);
-+		buddy = mb_find_buddy(e3b, ord, &max);
-+		mb_set_bit(start >> ord, buddy);
-+		e3b->bd_info->bb_counters[ord]--;
-+
-+		ord--;
-+		cur = (start >> ord) & ~1U;
-+		buddy = mb_find_buddy(e3b, ord, &max);
-+		mb_clear_bit(cur, buddy);
-+		mb_clear_bit(cur + 1, buddy);
-+		e3b->bd_info->bb_counters[ord]++;
-+		e3b->bd_info->bb_counters[ord]++;
-+	}
-+
-+	/* now drop all the bits in bitmap */
-+	mb_set_bits(EXT3_MB_BITMAP(e3b), ex->fe_start, len0);
-+
-+	mb_check_buddy(e3b);
-+
-+	return ret;
-+}
-+
-+/*
-+ * Must be called under group lock!
-+ */
-+static void ext3_mb_use_best_found(struct ext3_allocation_context *ac,
-+					struct ext3_buddy *e3b)
-+{
-+	unsigned long ret;
-+
-+	ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
-+	ret = mb_mark_used(e3b, &ac->ac_b_ex);
-+
-+	ac->ac_status = AC_STATUS_FOUND;
-+	ac->ac_tail = ret & 0xffff;
-+	ac->ac_buddy = ret >> 16;
-+
-+	/* hold in-core structures until allocated
-+	 * blocks are marked non-free in on-disk bitmap */
-+	ac->ac_buddy_page = e3b->bd_buddy_page;
-+	page_cache_get(e3b->bd_buddy_page);
-+	ac->ac_bitmap_page = e3b->bd_bitmap_page;
-+	page_cache_get(e3b->bd_bitmap_page);
-+}
-+
-+/*
-+ * The routine checks whether found extent is good enough. If it is,
-+ * then the extent gets marked used and flag is set to the context
-+ * to stop scanning. Otherwise, the extent is compared with the
-+ * previous found extent and if new one is better, then it's stored
-+ * in the context. Later, the best found extent will be used, if
-+ * mballoc can't find good enough extent.
-+ *
-+ * FIXME: real allocation policy is to be designed yet!
-+ */
-+static void ext3_mb_measure_extent(struct ext3_allocation_context *ac,
-+					struct ext3_free_extent *ex,
-+					struct ext3_buddy *e3b)
-+{
-+	struct ext3_free_extent *bex = &ac->ac_b_ex;
-+	struct ext3_free_extent *gex = &ac->ac_g_ex;
-+
-+	J_ASSERT(ex->fe_len > 0);
-+	J_ASSERT(ex->fe_len < (1 << ac->ac_sb->s_blocksize_bits) * 8);
-+	J_ASSERT(ex->fe_start < (1 << ac->ac_sb->s_blocksize_bits) * 8);
-+
-+	ac->ac_found++;
-+
-+	/*
-+	 * The special case - take what you catch first
-+	 */
-+	if (unlikely(ac->ac_flags & EXT3_MB_HINT_FIRST)) {
-+		*bex = *ex;
-+		ext3_mb_use_best_found(ac, e3b);
-+		return;
-+	}
-+
-+	/*
-+	 * Let's check whether the chunk is good enough
-+	 */
-+	if (ex->fe_len == gex->fe_len) {
-+		*bex = *ex;
-+		ext3_mb_use_best_found(ac, e3b);
-+		return;
-+	}
-+
-+	/*
-+	 * If this is first found extent, just store it in the context
-+	 */
-+	if (bex->fe_len == 0) {
-+		*bex = *ex;
-+		return;
-+	}
-+
-+	/*
-+	 * If new found extent is better, store it in the context
-+	 */
-+	if (bex->fe_len < gex->fe_len) {
-+		/* if the request isn't satisfied, any found extent
-+		 * larger than previous best one is better */
-+		if (ex->fe_len > bex->fe_len)
-+			*bex = *ex;
-+	} else if (ex->fe_len > gex->fe_len) {
-+		/* if the request is satisfied, then we try to find
-+		 * an extent that still satisfy the request, but is
-+		 * smaller than previous one */
-+		*bex = *ex;
-+	}
-+
-+	/*
-+	 * Let's scan at least few extents and don't pick up a first one
-+	 */
-+	if (bex->fe_len > gex->fe_len && ac->ac_found > ext3_mb_min_to_scan)
-+		ac->ac_status = AC_STATUS_BREAK;
-+
-+	/*
-+	 * We don't want to scan for a whole year
-+	 */
-+	if (ac->ac_found > ext3_mb_max_to_scan)
-+		ac->ac_status = AC_STATUS_BREAK;
-+}
-+
-+static int ext3_mb_try_best_found(struct ext3_allocation_context *ac,
-+					struct ext3_buddy *e3b)
-+{
-+	struct ext3_free_extent ex = ac->ac_b_ex;
-+	int group = ex.fe_group, max, err;
-+
-+	J_ASSERT(ex.fe_len > 0);
-+	err = ext3_mb_load_buddy(ac->ac_sb, group, e3b);
-+	if (err)
-+		return err;
-+
-+	ext3_lock_group(ac->ac_sb, group);
-+	max = mb_find_extent(e3b, 0, ex.fe_start, ex.fe_len, &ex);
-+
-+	if (max > 0) {
-+		ac->ac_b_ex = ex;
-+		ext3_mb_use_best_found(ac, e3b);
-+	}
-+
-+	ext3_unlock_group(ac->ac_sb, group);
-+
-+	ext3_mb_release_desc(e3b);
-+
-+	return 0;
-+}
-+
-+static int ext3_mb_find_by_goal(struct ext3_allocation_context *ac,
-+				struct ext3_buddy *e3b)
-+{
-+	int group = ac->ac_g_ex.fe_group, max, err;
-+	struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
-+	struct ext3_super_block *es = sbi->s_es;
-+	struct ext3_free_extent ex;
-+
-+	err = ext3_mb_load_buddy(ac->ac_sb, group, e3b);
-+	if (err)
-+		return err;
-+
-+	ext3_lock_group(ac->ac_sb, group);
-+	max = mb_find_extent(e3b, 0, ac->ac_g_ex.fe_start,
-+			     ac->ac_g_ex.fe_len, &ex);
-+
-+	if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
-+		ext3_fsblk_t start;
-+		start = (e3b->bd_group * EXT3_BLOCKS_PER_GROUP(ac->ac_sb) +
-+			ex.fe_start + le32_to_cpu(es->s_first_data_block));
-+		if (start % sbi->s_stripe == 0) {
-+			ac->ac_found++;
-+			ac->ac_b_ex = ex;
-+			ext3_mb_use_best_found(ac, e3b);
-+		}
-+	} else if (max >= ac->ac_g_ex.fe_len) {
-+		J_ASSERT(ex.fe_len > 0);
-+		J_ASSERT(ex.fe_group == ac->ac_g_ex.fe_group);
-+		J_ASSERT(ex.fe_start == ac->ac_g_ex.fe_start);
-+		ac->ac_found++;
-+		ac->ac_b_ex = ex;
-+		ext3_mb_use_best_found(ac, e3b);
-+	} else if (max > 0 && (ac->ac_flags & EXT3_MB_HINT_MERGE)) {
-+		/* Sometimes, caller may want to merge even small
-+		 * number of blocks to an existing extent */
-+		J_ASSERT(ex.fe_len > 0);
-+		J_ASSERT(ex.fe_group == ac->ac_g_ex.fe_group);
-+		J_ASSERT(ex.fe_start == ac->ac_g_ex.fe_start);
-+		ac->ac_found++;
-+		ac->ac_b_ex = ex;
-+		ext3_mb_use_best_found(ac, e3b);
-+	}
-+	ext3_unlock_group(ac->ac_sb, group);
-+
-+	ext3_mb_release_desc(e3b);
-+
-+	return 0;
-+}
-+
-+/*
-+ * The routine scans buddy structures (not bitmap!) from given order
-+ * to max order and tries to find big enough chunk to satisfy the req
-+ */
-+static void ext3_mb_simple_scan_group(struct ext3_allocation_context *ac,
-+					struct ext3_buddy *e3b)
-+{
-+	struct super_block *sb = ac->ac_sb;
-+	struct ext3_group_info *grp = e3b->bd_info;
-+	void *buddy;
-+	int i, k, max;
-+
-+	J_ASSERT(ac->ac_2order > 0);
-+	for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
-+		if (grp->bb_counters[i] == 0)
-+			continue;
-+
-+		buddy = mb_find_buddy(e3b, i, &max);
-+		if (buddy == NULL) {
-+			printk(KERN_ALERT "looking for wrong order?\n");
-+			break;
-+		}
-+
-+		k = mb_find_next_zero_bit(buddy, max, 0);
-+		J_ASSERT(k < max);
-+
-+		ac->ac_found++;
-+
-+		ac->ac_b_ex.fe_len = 1 << i;
-+		ac->ac_b_ex.fe_start = k << i;
-+		ac->ac_b_ex.fe_group = e3b->bd_group;
-+
-+		ext3_mb_use_best_found(ac, e3b);
-+		J_ASSERT(ac->ac_b_ex.fe_len == ac->ac_g_ex.fe_len);
-+
-+		if (unlikely(ext3_mb_stats))
-+			atomic_inc(&EXT3_SB(sb)->s_bal_2orders);
-+
-+		break;
-+	}
-+}
-+
-+/*
-+ * The routine scans the group and measures all found extents.
-+ * In order to optimize scanning, caller must pass number of
-+ * free blocks in the group, so the routine can know upper limit.
-+ */
-+static void ext3_mb_complex_scan_group(struct ext3_allocation_context *ac,
-+					struct ext3_buddy *e3b)
-+{
-+	struct super_block *sb = ac->ac_sb;
-+	void *bitmap = EXT3_MB_BITMAP(e3b);
-+	struct ext3_free_extent ex;
-+	int i, free;
-+
-+	free = e3b->bd_info->bb_free;
-+	J_ASSERT(free > 0);
-+
-+	i = e3b->bd_info->bb_first_free;
-+
-+	while (free && ac->ac_status == AC_STATUS_CONTINUE) {
-+		i = mb_find_next_zero_bit(bitmap, sb->s_blocksize * 8, i);
-+		if (i >= sb->s_blocksize * 8) {
-+			J_ASSERT(free == 0);
-+			break;
-+		}
-+
-+		mb_find_extent(e3b, 0, i, ac->ac_g_ex.fe_len, &ex);
-+		J_ASSERT(ex.fe_len > 0);
-+		J_ASSERT(free >= ex.fe_len);
-+
-+		ext3_mb_measure_extent(ac, &ex, e3b);
-+
-+		i += ex.fe_len;
-+		free -= ex.fe_len;
-+	}
-+}
-+
-+/*
-+ * This is a special case for storages like raid5
-+ * we try to find stripe-aligned chunks for stripe-size requests
-+ */
-+static void ext3_mb_scan_aligned(struct ext3_allocation_context *ac,
-+				 struct ext3_buddy *e3b)
-+{
-+	struct super_block *sb = ac->ac_sb;
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	void *bitmap = EXT3_MB_BITMAP(e3b);
-+	struct ext3_free_extent ex;
-+	ext3_fsblk_t i, max;
-+
-+	J_ASSERT(sbi->s_stripe != 0);
-+
-+	/* find first stripe-aligned block */
-+	i = e3b->bd_group * EXT3_BLOCKS_PER_GROUP(sb) +
-+		le32_to_cpu(sbi->s_es->s_first_data_block);
-+	i = ((i + sbi->s_stripe - 1) / sbi->s_stripe) * sbi->s_stripe;
-+	i = (i - le32_to_cpu(sbi->s_es->s_first_data_block)) %
-+		EXT3_BLOCKS_PER_GROUP(sb);
-+
-+	while (i < sb->s_blocksize * 8) {
-+		if (!mb_test_bit(i, bitmap)) {
-+			max = mb_find_extent(e3b, 0, i, sbi->s_stripe, &ex);
-+			if (max >= sbi->s_stripe) {
-+				ac->ac_found++;
-+				ac->ac_b_ex = ex;
-+				ext3_mb_use_best_found(ac, e3b);
-+				break;
-+			}
-+		}
-+		i += sbi->s_stripe;
-+	}
-+}
-+
-+static int ext3_mb_good_group(struct ext3_allocation_context *ac,
-+				int group, int cr)
-+{
-+	struct ext3_group_info *grp = EXT3_GROUP_INFO(ac->ac_sb, group);
-+	unsigned free, fragments, i, bits;
-+
-+	J_ASSERT(cr >= 0 && cr < 4);
-+	J_ASSERT(!EXT3_MB_GRP_NEED_INIT(grp));
-+
-+	free = grp->bb_free;
-+	fragments = grp->bb_fragments;
-+	if (free == 0)
-+		return 0;
-+	if (fragments == 0)
-+		return 0;
-+
-+	switch (cr) {
-+		case 0:
-+			J_ASSERT(ac->ac_2order != 0);
-+			bits = ac->ac_sb->s_blocksize_bits + 1;
-+			for (i = ac->ac_2order; i <= bits; i++)
-+				if (grp->bb_counters[i] > 0)
-+					return 1;
-+			break;
-+		case 1:
-+			if ((free / fragments) >= ac->ac_g_ex.fe_len)
-+				return 1;
-+			break;
-+		case 2:
-+			if (free >= ac->ac_g_ex.fe_len)
-+				return 1;
-+			break;
-+		case 3:
-+			return 1;
-+		default:
-+			BUG();
-+	}
-+
-+	return 0;
-+}
-+
-+ext3_fsblk_t ext3_mb_new_blocks(handle_t *handle, struct inode *inode,
-+				ext3_fsblk_t goal, int *len,int flags,int *errp)
-+{
-+	struct buffer_head *bitmap_bh = NULL;
-+	struct ext3_allocation_context ac;
-+	int i, group, cr, err = 0;
-+	struct ext3_group_desc *gdp;
-+	struct ext3_super_block *es;
-+	struct buffer_head *gdp_bh;
-+	struct ext3_sb_info *sbi;
-+	struct super_block *sb;
-+	struct ext3_buddy e3b;
-+	ext3_fsblk_t block;
-+
-+	J_ASSERT(len != NULL);
-+	J_ASSERT(*len > 0);
-+
-+	sb = inode->i_sb;
-+	if (!sb) {
-+		printk("ext3_mb_new_nblocks: nonexistent device");
-+		return 0;
-+	}
-+
-+	if (!test_opt(sb, MBALLOC)) {
-+		static int ext3_mballoc_warning = 0;
-+		if (ext3_mballoc_warning == 0) {
-+			printk(KERN_ERR "EXT3-fs: multiblock request with "
-+				"mballoc disabled!\n");
-+			ext3_mballoc_warning++;
-+		}
-+		*len = 1;
-+		err = ext3_new_block_old(handle, inode, goal, errp);
-+		return err;
-+	}
-+
-+	ext3_mb_poll_new_transaction(sb, handle);
-+
-+	sbi = EXT3_SB(sb);
-+	es = EXT3_SB(sb)->s_es;
-+
-+	/*
-+	 * We can't allocate > group size
-+	 */
-+	if (*len >= EXT3_BLOCKS_PER_GROUP(sb) - 10)
-+		*len = EXT3_BLOCKS_PER_GROUP(sb) - 10;
-+
-+	if (!(flags & EXT3_MB_HINT_RESERVED)) {
-+		/* someone asks for non-reserved blocks */
-+		BUG_ON(*len > 1);
-+		err = ext3_mb_reserve_blocks(sb, 1);
-+		if (err) {
-+			*errp = err;
-+			return 0;
-+		}
-+	}
-+
-+	ac.ac_buddy_page = NULL;
-+	ac.ac_bitmap_page = NULL;
-+
-+	/*
-+	 * Check quota for allocation of this blocks.
-+	 */
-+	while (*len && DQUOT_ALLOC_BLOCK(inode, *len))
-+		*len -= 1;
-+	if (*len == 0) {
-+		*errp = -EDQUOT;
-+		block = 0;
-+		goto out;
-+	}
-+
-+	/* start searching from the goal */
-+	if (goal < le32_to_cpu(es->s_first_data_block) ||
-+	    goal >= le32_to_cpu(es->s_blocks_count))
-+		goal = le32_to_cpu(es->s_first_data_block);
-+	group = (goal - le32_to_cpu(es->s_first_data_block)) /
-+			EXT3_BLOCKS_PER_GROUP(sb);
-+	block = ((goal - le32_to_cpu(es->s_first_data_block)) %
-+			EXT3_BLOCKS_PER_GROUP(sb));
-+
-+	/* set up allocation goals */
-+	ac.ac_b_ex.fe_group = 0;
-+	ac.ac_b_ex.fe_start = 0;
-+	ac.ac_b_ex.fe_len = 0;
-+	ac.ac_status = AC_STATUS_CONTINUE;
-+	ac.ac_groups_scanned = 0;
-+	ac.ac_ex_scanned = 0;
-+	ac.ac_found = 0;
-+	ac.ac_sb = inode->i_sb;
-+	ac.ac_g_ex.fe_group = group;
-+	ac.ac_g_ex.fe_start = block;
-+	ac.ac_g_ex.fe_len = *len;
-+	ac.ac_flags = flags;
-+	ac.ac_2order = 0;
-+	ac.ac_criteria = 0;
-+
-+	if (*len == 1 && sbi->s_stripe) {
-+		/* looks like a metadata, let's use a dirty hack for raid5
-+		 * move all metadata in first groups in hope to hit cached
-+		 * sectors and thus avoid read-modify cycles in raid5 */
-+		ac.ac_g_ex.fe_group = group = 0;
-+	}
-+
-+	/* probably, the request is for 2^8+ blocks (1/2/3/... MB) */
-+	i = ffs(*len);
-+	if (i >= ext3_mb_order2_reqs) {
-+		i--;
-+		if ((*len & (~(1 << i))) == 0)
-+			ac.ac_2order = i;
-+	}
-+
-+	/* first, try the goal */
-+	err = ext3_mb_find_by_goal(&ac, &e3b);
-+	if (err)
-+		goto out_err;
-+	if (ac.ac_status == AC_STATUS_FOUND)
-+		goto found;
-+
-+	/* Let's just scan groups to find more-less suitable blocks */
-+	cr = ac.ac_2order ? 0 : 1;
-+repeat:
-+	for (; cr < 4 && ac.ac_status == AC_STATUS_CONTINUE; cr++) {
-+		ac.ac_criteria = cr;
-+		for (i = 0; i < EXT3_SB(sb)->s_groups_count; group++, i++) {
-+			if (group == EXT3_SB(sb)->s_groups_count)
-+				group = 0;
-+
-+			if (EXT3_MB_GRP_NEED_INIT(EXT3_GROUP_INFO(sb, group))) {
-+				/* we need full data about the group
-+				 * to make a good selection */
-+				err = ext3_mb_load_buddy(ac.ac_sb, group, &e3b);
-+				if (err)
-+					goto out_err;
-+				ext3_mb_release_desc(&e3b);
-+			}
-+
-+			/* check is group good for our criteries */
-+			if (!ext3_mb_good_group(&ac, group, cr))
-+				continue;
-+
-+			err = ext3_mb_load_buddy(ac.ac_sb, group, &e3b);
-+			if (err)
-+				goto out_err;
-+
-+			ext3_lock_group(sb, group);
-+			if (!ext3_mb_good_group(&ac, group, cr)) {
-+				/* someone did allocation from this group */
-+				ext3_unlock_group(sb, group);
-+				ext3_mb_release_desc(&e3b);
-+				continue;
-+			}
-+
-+			ac.ac_groups_scanned++;
-+			if (cr == 0)
-+				ext3_mb_simple_scan_group(&ac, &e3b);
-+			else if (cr == 1 && *len == sbi->s_stripe)
-+				ext3_mb_scan_aligned(&ac, &e3b);
-+			else
-+				ext3_mb_complex_scan_group(&ac, &e3b);
-+
-+			ext3_unlock_group(sb, group);
-+
-+			ext3_mb_release_desc(&e3b);
-+
-+			if (ac.ac_status != AC_STATUS_CONTINUE)
-+				break;
-+		}
-+	}
-+
-+	if (ac.ac_b_ex.fe_len > 0 && ac.ac_status != AC_STATUS_FOUND &&
-+	    !(ac.ac_flags & EXT3_MB_HINT_FIRST)) {
-+		/*
-+		 * We've been searching too long. Let's try to allocate
-+		 * the best chunk we've found so far
-+		 */
-+
-+		/*if (ac.ac_found > ext3_mb_max_to_scan)
-+			printk(KERN_DEBUG "EXT3-fs: too long searching at "
-+				"%u (%d/%d)\n", cr, ac.ac_b_ex.fe_len,
-+				ac.ac_g_ex.fe_len);*/
-+		ext3_mb_try_best_found(&ac, &e3b);
-+		if (ac.ac_status != AC_STATUS_FOUND) {
-+			/*
-+			 * Someone more lucky has already allocated it.
-+			 * The only thing we can do is just take first
-+			 * found block(s)
-+			printk(KERN_DEBUG "EXT3-fs: someone won our chunk\n");
-+			 */
-+			ac.ac_b_ex.fe_group = 0;
-+			ac.ac_b_ex.fe_start = 0;
-+			ac.ac_b_ex.fe_len = 0;
-+			ac.ac_status = AC_STATUS_CONTINUE;
-+			ac.ac_flags |= EXT3_MB_HINT_FIRST;
-+			cr = 3;
-+			goto repeat;
-+		}
-+	}
-+
-+	if (ac.ac_status != AC_STATUS_FOUND) {
-+		/*
-+		 * We aren't lucky definitely
-+		 */
-+		DQUOT_FREE_BLOCK(inode, *len);
-+		*errp = -ENOSPC;
-+		block = 0;
-+#if 1
-+		printk(KERN_ERR "EXT3-fs: can't allocate: status %d flags %d\n",
-+			ac.ac_status, ac.ac_flags);
-+		printk(KERN_ERR "EXT3-fs: goal %d, best found %d/%d/%d cr %d\n",
-+			ac.ac_g_ex.fe_len, ac.ac_b_ex.fe_group,
-+			ac.ac_b_ex.fe_start, ac.ac_b_ex.fe_len, cr);
-+		printk(KERN_ERR "EXT3-fs: %lu block reserved, %d found\n",
-+			sbi->s_blocks_reserved, ac.ac_found);
-+		printk("EXT3-fs: groups: ");
-+		for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++)
-+			printk("%d: %d ", i, EXT3_GROUP_INFO(sb, i)->bb_free);
-+		printk("\n");
-+#endif
-+		goto out;
-+	}
-+
-+found:
-+	J_ASSERT(ac.ac_b_ex.fe_len > 0);
-+
-+	/* good news - free block(s) have been found. now it's time
-+	 * to mark block(s) in good old journaled bitmap */
-+	block = ac.ac_b_ex.fe_group * EXT3_BLOCKS_PER_GROUP(sb)
-+			+ ac.ac_b_ex.fe_start
-+			+ le32_to_cpu(es->s_first_data_block);
-+
-+	/* we made a desicion, now mark found blocks in good old
-+	 * bitmap to be journaled */
-+
-+	ext3_debug("using block group %d(%d)\n",
-+			ac.ac_b_group.group, gdp->bg_free_blocks_count);
-+
-+	bitmap_bh = read_block_bitmap(sb, ac.ac_b_ex.fe_group);
-+	if (!bitmap_bh) {
-+		*errp = -EIO;
-+		goto out_err;
-+	}
-+
-+	err = ext3_journal_get_write_access(handle, bitmap_bh);
-+	if (err) {
-+		*errp = err;
-+		goto out_err;
-+	}
-+
-+	gdp = ext3_get_group_desc(sb, ac.ac_b_ex.fe_group, &gdp_bh);
-+	if (!gdp) {
-+		*errp = -EIO;
-+		goto out_err;
-+	}
-+
-+	err = ext3_journal_get_write_access(handle, gdp_bh);
-+	if (err)
-+		goto out_err;
-+
-+	block = ac.ac_b_ex.fe_group * EXT3_BLOCKS_PER_GROUP(sb)
-+			+ ac.ac_b_ex.fe_start
-+			+ le32_to_cpu(es->s_first_data_block);
-+
-+	if (block == le32_to_cpu(gdp->bg_block_bitmap) ||
-+	    block == le32_to_cpu(gdp->bg_inode_bitmap) ||
-+	    in_range(block, le32_to_cpu(gdp->bg_inode_table),
-+		      EXT3_SB(sb)->s_itb_per_group))
-+		ext3_error(sb, "ext3_new_block",
-+			    "Allocating block in system zone - "
-+			    "block = "E3FSBLK, block);
-+#ifdef AGGRESSIVE_CHECK
-+	for (i = 0; i < ac.ac_b_ex.fe_len; i++)
-+		J_ASSERT(!mb_test_bit(ac.ac_b_ex.fe_start + i, bitmap_bh->b_data));
-+#endif
-+	mb_set_bits(bitmap_bh->b_data, ac.ac_b_ex.fe_start, ac.ac_b_ex.fe_len);
-+
-+	spin_lock(sb_bgl_lock(sbi, ac.ac_b_ex.fe_group));
-+	gdp->bg_free_blocks_count =
-+			cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)
-+					- ac.ac_b_ex.fe_len);
-+	spin_unlock(sb_bgl_lock(sbi, ac.ac_b_ex.fe_group));
-+	percpu_counter_mod(&sbi->s_freeblocks_counter, - ac.ac_b_ex.fe_len);
-+
-+	err = ext3_journal_dirty_metadata(handle, bitmap_bh);
-+	if (err)
-+		goto out_err;
-+	err = ext3_journal_dirty_metadata(handle, gdp_bh);
-+	if (err)
-+		goto out_err;
-+
-+	sb->s_dirt = 1;
-+	*errp = 0;
-+	brelse(bitmap_bh);
-+
-+	/* drop non-allocated, but dquote'd blocks */
-+	J_ASSERT(*len >= ac.ac_b_ex.fe_len);
-+	DQUOT_FREE_BLOCK(inode, *len - ac.ac_b_ex.fe_len);
-+
-+	*len = ac.ac_b_ex.fe_len;
-+	J_ASSERT(*len > 0);
-+	J_ASSERT(block != 0);
-+	goto out;
-+
-+out_err:
-+	/* if we've already allocated something, roll it back */
-+	if (ac.ac_status == AC_STATUS_FOUND) {
-+		/* FIXME: free blocks here */
-+	}
-+
-+	DQUOT_FREE_BLOCK(inode, *len);
-+	brelse(bitmap_bh);
-+	*errp = err;
-+	block = 0;
-+out:
-+	if (ac.ac_buddy_page)
-+		page_cache_release(ac.ac_buddy_page);
-+	if (ac.ac_bitmap_page)
-+		page_cache_release(ac.ac_bitmap_page);
-+
-+	if (!(flags & EXT3_MB_HINT_RESERVED)) {
-+		/* block wasn't reserved before and we reserved it
-+		 * at the beginning of allocation. it doesn't matter
-+		 * whether we allocated anything or we failed: time
-+		 * to release reservation. NOTE: because I expect
-+		 * any multiblock request from delayed allocation
-+		 * path only, here is single block always */
-+		ext3_mb_release_blocks(sb, 1);
-+	}
-+
-+	if (unlikely(ext3_mb_stats) && ac.ac_g_ex.fe_len > 1) {
-+		atomic_inc(&sbi->s_bal_reqs);
-+		atomic_add(*len, &sbi->s_bal_allocated);
-+		if (*len >= ac.ac_g_ex.fe_len)
-+			atomic_inc(&sbi->s_bal_success);
-+		atomic_add(ac.ac_found, &sbi->s_bal_ex_scanned);
-+		if (ac.ac_g_ex.fe_start == ac.ac_b_ex.fe_start &&
-+				ac.ac_g_ex.fe_group == ac.ac_b_ex.fe_group)
-+			atomic_inc(&sbi->s_bal_goals);
-+		if (ac.ac_found > ext3_mb_max_to_scan)
-+			atomic_inc(&sbi->s_bal_breaks);
-+	}
-+
-+	ext3_mb_store_history(sb, inode->i_ino, &ac);
-+
-+	return block;
-+}
-+EXPORT_SYMBOL(ext3_mb_new_blocks);
-+
-+#ifdef EXT3_MB_HISTORY
-+struct ext3_mb_proc_session {
-+	struct ext3_mb_history *history;
-+	struct super_block *sb;
-+	int start;
-+	int max;
-+};
-+
-+static void *ext3_mb_history_skip_empty(struct ext3_mb_proc_session *s,
-+					struct ext3_mb_history *hs,
-+					int first)
-+{
-+	if (hs == s->history + s->max)
-+		hs = s->history;
-+	if (!first && hs == s->history + s->start)
-+		return NULL;
-+	while (hs->goal.fe_len == 0) {
-+		hs++;
-+		if (hs == s->history + s->max)
-+			hs = s->history;
-+		if (hs == s->history + s->start)
-+			return NULL;
-+	}
-+	return hs;
-+}
-+
-+static void *ext3_mb_seq_history_start(struct seq_file *seq, loff_t *pos)
-+{
-+	struct ext3_mb_proc_session *s = seq->private;
-+	struct ext3_mb_history *hs;
-+	int l = *pos;
-+
-+	if (l == 0)
-+		return SEQ_START_TOKEN;
-+	hs = ext3_mb_history_skip_empty(s, s->history + s->start, 1);
-+	if (!hs)
-+		return NULL;
-+	while (--l && (hs = ext3_mb_history_skip_empty(s, ++hs, 0)) != NULL);
-+	return hs;
-+}
-+
-+static void *ext3_mb_seq_history_next(struct seq_file *seq, void *v, loff_t *pos)
-+{
-+	struct ext3_mb_proc_session *s = seq->private;
-+	struct ext3_mb_history *hs = v;
-+
-+	++*pos;
-+	if (v == SEQ_START_TOKEN)
-+		return ext3_mb_history_skip_empty(s, s->history + s->start, 1);
-+	else
-+		return ext3_mb_history_skip_empty(s, ++hs, 0);
-+}
-+
-+static int ext3_mb_seq_history_show(struct seq_file *seq, void *v)
-+{
-+	struct ext3_mb_history *hs = v;
-+	char buf[20], buf2[20];
-+
-+	if (v == SEQ_START_TOKEN) {
-+		seq_printf(seq, "%-5s %-8s %-17s %-17s %-5s %-5s %-2s %-5s %-5s %-6s\n",
-+			 "pid", "inode", "goal", "result", "found", "grps", "cr",
-+			 "merge", "tail", "broken");
-+		return 0;
-+	}
-+
-+	sprintf(buf, "%u/%u/%u", hs->goal.fe_group,
-+		hs->goal.fe_start, hs->goal.fe_len);
-+	sprintf(buf2, "%u/%u/%u", hs->result.fe_group,
-+		hs->result.fe_start, hs->result.fe_len);
-+	seq_printf(seq, "%-5u %-8u %-17s %-17s %-5u %-5u %-2u %-5s %-5u %-6u\n",
-+			hs->pid, hs->ino, buf, buf2, hs->found, hs->groups,
-+			hs->cr, hs->merged ? "M" : "", hs->tail,
-+			hs->buddy ? 1 << hs->buddy : 0);
-+	return 0;
-+}
-+
-+static void ext3_mb_seq_history_stop(struct seq_file *seq, void *v)
-+{
-+}
-+
-+static struct seq_operations ext3_mb_seq_history_ops = {
-+	.start  = ext3_mb_seq_history_start,
-+	.next   = ext3_mb_seq_history_next,
-+	.stop   = ext3_mb_seq_history_stop,
-+	.show   = ext3_mb_seq_history_show,
-+};
-+
-+static int ext3_mb_seq_history_open(struct inode *inode, struct file *file)
-+{
-+	struct super_block *sb = PDE(inode)->data;
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	struct ext3_mb_proc_session *s;
-+	int rc, size;
-+
-+	s = kmalloc(sizeof(*s), GFP_KERNEL);
-+	if (s == NULL)
-+		return -EIO;
-+	size = sizeof(struct ext3_mb_history) * sbi->s_mb_history_max;
-+	s->history = kmalloc(size, GFP_KERNEL);
-+	if (s == NULL) {
-+		kfree(s);
-+		return -EIO;
-+	}
-+
-+	spin_lock(&sbi->s_mb_history_lock);
-+	memcpy(s->history, sbi->s_mb_history, size);
-+	s->max = sbi->s_mb_history_max;
-+	s->start = sbi->s_mb_history_cur % s->max;
-+	spin_unlock(&sbi->s_mb_history_lock);
-+
-+	rc = seq_open(file, &ext3_mb_seq_history_ops);
-+	if (rc == 0) {
-+		struct seq_file *m = (struct seq_file *)file->private_data;
-+		m->private = s;
-+	} else {
-+		kfree(s->history);
-+		kfree(s);
-+	}
-+	return rc;
-+
-+}
-+
-+static int ext3_mb_seq_history_release(struct inode *inode, struct file *file)
-+{
-+	struct seq_file *seq = (struct seq_file *)file->private_data;
-+	struct ext3_mb_proc_session *s = seq->private;
-+	kfree(s->history);
-+	kfree(s);
-+	return seq_release(inode, file);
-+}
-+
-+static struct file_operations ext3_mb_seq_history_fops = {
-+	.owner		= THIS_MODULE,
-+	.open		= ext3_mb_seq_history_open,
-+	.read		= seq_read,
-+	.llseek		= seq_lseek,
-+	.release	= ext3_mb_seq_history_release,
-+};
-+
-+static void *ext3_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
-+{
-+	struct super_block *sb = seq->private;
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	long group;
-+
-+	if (*pos < 0 || *pos >= sbi->s_groups_count)
-+		return NULL;
-+
-+	group = *pos + 1;
-+	return (void *) group;
-+}
-+
-+static void *ext3_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
-+{
-+	struct super_block *sb = seq->private;
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	long group;
-+
-+	++*pos;
-+	if (*pos < 0 || *pos >= sbi->s_groups_count)
-+		return NULL;
-+	group = *pos + 1;
-+	return (void *) group;;
-+}
-+
-+static int ext3_mb_seq_groups_show(struct seq_file *seq, void *v)
-+{
-+	struct super_block *sb = seq->private;
-+	long group = (long) v, i;
-+	struct sg {
-+		struct ext3_group_info info;
-+		unsigned short counters[16];
-+	} sg;
-+
-+	group--;
-+	if (group == 0)
-+		seq_printf(seq, "#%-5s: %-5s %-5s %-5s [ %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
-+			 "group", "free", "frags", "first", "2^0", "2^1", "2^2",
-+			 "2^3", "2^4", "2^5", "2^6", "2^7", "2^8", "2^9", "2^10",
-+			 "2^11", "2^12", "2^13");
-+
-+	i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
-+		sizeof(struct ext3_group_info);
-+	ext3_lock_group(sb, group);
-+	memcpy(&sg, EXT3_GROUP_INFO(sb, group), i);
-+	ext3_unlock_group(sb, group);
-+
-+	if (EXT3_MB_GRP_NEED_INIT(&sg.info))
-+		return 0;
-+
-+	seq_printf(seq, "#%-5lu: %-5u %-5u %-5u [", group, sg.info.bb_free,
-+			sg.info.bb_fragments, sg.info.bb_first_free);
-+	for (i = 0; i <= 13; i++)
-+		seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
-+				sg.info.bb_counters[i] : 0);
-+	seq_printf(seq, " ]\n");
-+
-+	return 0;
-+}
-+
-+static void ext3_mb_seq_groups_stop(struct seq_file *seq, void *v)
-+{
-+}
-+
-+static struct seq_operations ext3_mb_seq_groups_ops = {
-+	.start  = ext3_mb_seq_groups_start,
-+	.next   = ext3_mb_seq_groups_next,
-+	.stop   = ext3_mb_seq_groups_stop,
-+	.show   = ext3_mb_seq_groups_show,
-+};
-+
-+static int ext3_mb_seq_groups_open(struct inode *inode, struct file *file)
-+{
-+	struct super_block *sb = PDE(inode)->data;
-+	int rc;
-+
-+	rc = seq_open(file, &ext3_mb_seq_groups_ops);
-+	if (rc == 0) {
-+		struct seq_file *m = (struct seq_file *)file->private_data;
-+		m->private = sb;
-+	}
-+	return rc;
-+
-+}
-+
-+static struct file_operations ext3_mb_seq_groups_fops = {
-+	.owner		= THIS_MODULE,
-+	.open		= ext3_mb_seq_groups_open,
-+	.read		= seq_read,
-+	.llseek		= seq_lseek,
-+	.release	= seq_release,
-+};
-+
-+static void ext3_mb_history_release(struct super_block *sb)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	char name[64];
-+
-+	snprintf(name, sizeof(name) - 1, "%s", bdevname(sb->s_bdev, name));
-+	remove_proc_entry("mb_groups", sbi->s_mb_proc);
-+	remove_proc_entry("mb_history", sbi->s_mb_proc);
-+	remove_proc_entry(name, proc_root_ext3);
-+
-+	if (sbi->s_mb_history)
-+		kfree(sbi->s_mb_history);
-+}
-+
-+static void ext3_mb_history_init(struct super_block *sb)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	char name[64];
-+	int i;
-+
-+	snprintf(name, sizeof(name) - 1, "%s", bdevname(sb->s_bdev, name));
-+	sbi->s_mb_proc = proc_mkdir(name, proc_root_ext3);
-+	if (sbi->s_mb_proc != NULL) {
-+		struct proc_dir_entry *p;
-+		p = create_proc_entry("mb_history", S_IRUGO, sbi->s_mb_proc);
-+		if (p) {
-+			p->proc_fops = &ext3_mb_seq_history_fops;
-+			p->data = sb;
-+		}
-+		p = create_proc_entry("mb_groups", S_IRUGO, sbi->s_mb_proc);
-+		if (p) {
-+			p->proc_fops = &ext3_mb_seq_groups_fops;
-+			p->data = sb;
-+		}
-+	}
-+
-+	sbi->s_mb_history_max = 1000;
-+	sbi->s_mb_history_cur = 0;
-+	spin_lock_init(&sbi->s_mb_history_lock);
-+	i = sbi->s_mb_history_max * sizeof(struct ext3_mb_history);
-+	sbi->s_mb_history = kmalloc(i, GFP_KERNEL);
-+	memset(sbi->s_mb_history, 0, i);
-+	/* if we can't allocate history, then we simple won't use it */
-+}
-+
-+static void
-+ext3_mb_store_history(struct super_block *sb, unsigned ino,
-+			struct ext3_allocation_context *ac)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	struct ext3_mb_history h;
-+
-+	if (likely(sbi->s_mb_history == NULL))
-+		return;
-+
-+	h.pid = current->pid;
-+	h.ino = ino;
-+	h.goal = ac->ac_g_ex;
-+	h.result = ac->ac_b_ex;
-+	h.found = ac->ac_found;
-+	h.cr = ac->ac_criteria;
-+	h.groups = ac->ac_groups_scanned;
-+	h.tail = ac->ac_tail;
-+	h.buddy = ac->ac_buddy;
-+	h.merged = 0;
-+	if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
-+			ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
-+		h.merged = 1;
-+
-+	spin_lock(&sbi->s_mb_history_lock);
-+	memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
-+	if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max)
-+		sbi->s_mb_history_cur = 0;
-+	spin_unlock(&sbi->s_mb_history_lock);
-+}
-+
-+#else
-+#define ext3_mb_history_release(sb)
-+#define ext3_mb_history_init(sb)
-+#endif
-+
-+int ext3_mb_init_backend(struct super_block *sb)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	int i, j, len, metalen;
-+	int num_meta_group_infos =
-+		(sbi->s_groups_count + EXT3_DESC_PER_BLOCK(sb) - 1) >>
-+			EXT3_DESC_PER_BLOCK_BITS(sb);
-+	struct ext3_group_info **meta_group_info;
-+
-+	/* An 8TB filesystem with 64-bit pointers requires a 4096 byte
-+	 * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
-+	 * So a two level scheme suffices for now. */
-+	sbi->s_group_info = kmalloc(sizeof(*sbi->s_group_info) *
-+				    num_meta_group_infos, GFP_KERNEL);
-+	if (sbi->s_group_info == NULL) {
-+		printk(KERN_ERR "EXT3-fs: can't allocate buddy meta group\n");
-+		return -ENOMEM;
-+	}
-+	sbi->s_buddy_cache = new_inode(sb);
-+	if (sbi->s_buddy_cache == NULL) {
-+		printk(KERN_ERR "EXT3-fs: can't get new inode\n");
-+		goto err_freesgi;
-+	}
-+
-+	metalen = sizeof(*meta_group_info) << EXT3_DESC_PER_BLOCK_BITS(sb);
-+	for (i = 0; i < num_meta_group_infos; i++) {
-+		if ((i + 1) == num_meta_group_infos)
-+			metalen = sizeof(*meta_group_info) *
-+				(sbi->s_groups_count -
-+					(i << EXT3_DESC_PER_BLOCK_BITS(sb)));
-+		meta_group_info = kmalloc(metalen, GFP_KERNEL);
-+		if (meta_group_info == NULL) {
-+			printk(KERN_ERR "EXT3-fs: can't allocate mem for a "
-+			       "buddy group\n");
-+			goto err_freemeta;
-+		}
-+		sbi->s_group_info[i] = meta_group_info;
-+	}
-+
-+	/*
-+	 * calculate needed size. if change bb_counters size,
-+	 * don't forget about ext3_mb_generate_buddy()
-+	 */
-+	len = sizeof(struct ext3_group_info);
-+	len += sizeof(unsigned short) * (sb->s_blocksize_bits + 2);
-+	for (i = 0; i < sbi->s_groups_count; i++) {
-+		struct ext3_group_desc * desc;
-+
-+		meta_group_info =
-+			sbi->s_group_info[i >> EXT3_DESC_PER_BLOCK_BITS(sb)];
-+		j = i & (EXT3_DESC_PER_BLOCK(sb) - 1);
-+
-+		meta_group_info[j] = kmalloc(len, GFP_KERNEL);
-+		if (meta_group_info[j] == NULL) {
-+			printk(KERN_ERR "EXT3-fs: can't allocate buddy mem\n");
-+			i--;
-+			goto err_freebuddy;
-+		}
-+		desc = ext3_get_group_desc(sb, i, NULL);
-+		if (desc == NULL) {
-+			printk(KERN_ERR"EXT3-fs: can't read descriptor %u\n",i);
-+			goto err_freebuddy;
-+		}
-+		memset(meta_group_info[j], 0, len);
-+		set_bit(EXT3_GROUP_INFO_NEED_INIT_BIT,
-+			&meta_group_info[j]->bb_state);
-+		meta_group_info[j]->bb_free =
-+			le16_to_cpu(desc->bg_free_blocks_count);
-+	}
-+
-+	return 0;
-+
-+err_freebuddy:
-+	while (i >= 0) {
-+		kfree(EXT3_GROUP_INFO(sb, i));
-+		i--;
-+	}
-+	i = num_meta_group_infos;
-+err_freemeta:
-+	while (--i >= 0)
-+		kfree(sbi->s_group_info[i]);
-+	iput(sbi->s_buddy_cache);
-+err_freesgi:
-+	kfree(sbi->s_group_info);
-+	return -ENOMEM;
-+}
-+
-+int ext3_mb_init(struct super_block *sb, int needs_recovery)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	struct inode *root = sb->s_root->d_inode;
-+	unsigned i, offset, max;
-+	struct dentry *dentry;
-+
-+	if (!test_opt(sb, MBALLOC))
-+		return 0;
-+
-+	i = (sb->s_blocksize_bits + 2) * sizeof(unsigned short);
-+
-+	sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
-+	if (sbi->s_mb_offsets == NULL) {
-+		clear_opt(sbi->s_mount_opt, MBALLOC);
-+		return -ENOMEM;
-+	}
-+	sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
-+	if (sbi->s_mb_maxs == NULL) {
-+		clear_opt(sbi->s_mount_opt, MBALLOC);
-+		kfree(sbi->s_mb_maxs);
-+		return -ENOMEM;
-+	}
-+
-+	 /* order 0 is regular bitmap */
-+	sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
-+	sbi->s_mb_offsets[0] = 0;
-+
-+	i = 1;
-+	offset = 0;
-+	max = sb->s_blocksize << 2;
-+	do {
-+		sbi->s_mb_offsets[i] = offset;
-+		sbi->s_mb_maxs[i] = max;
-+		offset += 1 << (sb->s_blocksize_bits - i);
-+		max = max >> 1;
-+		i++;
-+	} while (i <= sb->s_blocksize_bits + 1);
-+
-+	/* init file for buddy data */
-+	if ((i = ext3_mb_init_backend(sb))) {
-+		clear_opt(sbi->s_mount_opt, MBALLOC);
-+		kfree(sbi->s_mb_offsets);
-+		kfree(sbi->s_mb_maxs);
-+		return i;
-+	}
-+
-+	spin_lock_init(&sbi->s_reserve_lock);
-+	spin_lock_init(&sbi->s_md_lock);
-+	INIT_LIST_HEAD(&sbi->s_active_transaction);
-+	INIT_LIST_HEAD(&sbi->s_closed_transaction);
-+	INIT_LIST_HEAD(&sbi->s_committed_transaction);
-+	spin_lock_init(&sbi->s_bal_lock);
-+
-+	/* remove old on-disk buddy file */
-+	mutex_lock(&root->i_mutex);
-+	dentry = lookup_one_len(".buddy", sb->s_root, strlen(".buddy"));
-+	if (dentry->d_inode != NULL) {
-+		i = vfs_unlink(root, dentry);
-+		if (i != 0)
-+			printk("EXT3-fs: can't remove .buddy file: %d\n", i);
-+	}
-+	dput(dentry);
-+	mutex_unlock(&root->i_mutex);
-+
-+	ext3_mb_history_init(sb);
-+
-+	printk("EXT3-fs: mballoc enabled\n");
-+	return 0;
-+}
-+
-+int ext3_mb_release(struct super_block *sb)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	int i, num_meta_group_infos;
-+
-+	if (!test_opt(sb, MBALLOC))
-+		return 0;
-+
-+	/* release freed, non-committed blocks */
-+	spin_lock(&sbi->s_md_lock);
-+	list_splice_init(&sbi->s_closed_transaction,
-+			&sbi->s_committed_transaction);
-+	list_splice_init(&sbi->s_active_transaction,
-+			&sbi->s_committed_transaction);
-+	spin_unlock(&sbi->s_md_lock);
-+	ext3_mb_free_committed_blocks(sb);
-+
-+	if (sbi->s_group_info) {
-+		for (i = 0; i < sbi->s_groups_count; i++)
-+			kfree(EXT3_GROUP_INFO(sb, i));
-+		num_meta_group_infos = (sbi->s_groups_count +
-+			EXT3_DESC_PER_BLOCK(sb) - 1) >>
-+			EXT3_DESC_PER_BLOCK_BITS(sb);
-+		for (i = 0; i < num_meta_group_infos; i++)
-+			kfree(sbi->s_group_info[i]);
-+		kfree(sbi->s_group_info);
-+	}
-+	if (sbi->s_mb_offsets)
-+		kfree(sbi->s_mb_offsets);
-+	if (sbi->s_mb_maxs)
-+		kfree(sbi->s_mb_maxs);
-+	if (sbi->s_buddy_cache)
-+		iput(sbi->s_buddy_cache);
-+	if (sbi->s_blocks_reserved)
-+		printk("ext3-fs: %ld blocks being reserved at umount!\n",
-+				sbi->s_blocks_reserved);
-+	if (ext3_mb_stats) {
-+		printk("EXT3-fs: mballoc: %u blocks %u reqs (%u success)\n",
-+			atomic_read(&sbi->s_bal_allocated),
-+			atomic_read(&sbi->s_bal_reqs),
-+			atomic_read(&sbi->s_bal_success));
-+		printk("EXT3-fs: mballoc: %u extents scanned, %u goal hits, "
-+			"%u 2^N hits, %u breaks\n",
-+			atomic_read(&sbi->s_bal_ex_scanned),
-+			atomic_read(&sbi->s_bal_goals),
-+			atomic_read(&sbi->s_bal_2orders),
-+			atomic_read(&sbi->s_bal_breaks));
-+		printk("EXT3-fs: mballoc: %lu generated and it took %Lu\n",
-+			sbi->s_mb_buddies_generated++,
-+			sbi->s_mb_generation_time);
-+	}
-+
-+	ext3_mb_history_release(sb);
-+
-+	return 0;
-+}
-+
-+void ext3_mb_free_committed_blocks(struct super_block *sb)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	int err, i, count = 0, count2 = 0;
-+	struct ext3_free_metadata *md;
-+	struct ext3_buddy e3b;
-+
-+	if (list_empty(&sbi->s_committed_transaction))
-+		return;
-+
-+	/* there is committed blocks to be freed yet */
-+	do {
-+		/* get next array of blocks */
-+		md = NULL;
-+		spin_lock(&sbi->s_md_lock);
-+		if (!list_empty(&sbi->s_committed_transaction)) {
-+			md = list_entry(sbi->s_committed_transaction.next,
-+					struct ext3_free_metadata, list);
-+			list_del(&md->list);
-+		}
-+		spin_unlock(&sbi->s_md_lock);
-+
-+		if (md == NULL)
-+			break;
-+
-+		mb_debug("gonna free %u blocks in group %u (0x%p):",
-+				md->num, md->group, md);
-+
-+		err = ext3_mb_load_buddy(sb, md->group, &e3b);
-+		/* we expect to find existing buddy because it's pinned */
-+		BUG_ON(err != 0);
-+
-+		/* there are blocks to put in buddy to make them really free */
-+		count += md->num;
-+		count2++;
-+		ext3_lock_group(sb, md->group);
-+		for (i = 0; i < md->num; i++) {
-+			mb_debug(" %u", md->blocks[i]);
-+			mb_free_blocks(&e3b, md->blocks[i], 1);
-+		}
-+		mb_debug("\n");
-+		ext3_unlock_group(sb, md->group);
-+
-+		/* balance refcounts from ext3_mb_free_metadata() */
-+		page_cache_release(e3b.bd_buddy_page);
-+		page_cache_release(e3b.bd_bitmap_page);
-+
-+		kfree(md);
-+		ext3_mb_release_desc(&e3b);
-+
-+	} while (md);
-+	mb_debug("freed %u blocks in %u structures\n", count, count2);
-+}
-+
-+void ext3_mb_poll_new_transaction(struct super_block *sb, handle_t *handle)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+
-+	if (sbi->s_last_transaction == handle->h_transaction->t_tid)
-+		return;
-+
-+	/* new transaction! time to close last one and free blocks for
-+	 * committed transaction. we know that only transaction can be
-+	 * active, so previos transaction can be being logged and we
-+	 * know that transaction before previous is known to be already
-+	 * logged. this means that now we may free blocks freed in all
-+	 * transactions before previous one. hope I'm clear enough ... */
-+
-+	spin_lock(&sbi->s_md_lock);
-+	if (sbi->s_last_transaction != handle->h_transaction->t_tid) {
-+		mb_debug("new transaction %lu, old %lu\n",
-+				(unsigned long) handle->h_transaction->t_tid,
-+				(unsigned long) sbi->s_last_transaction);
-+		list_splice_init(&sbi->s_closed_transaction,
-+					&sbi->s_committed_transaction);
-+		list_splice_init(&sbi->s_active_transaction,
-+					&sbi->s_closed_transaction);
-+		sbi->s_last_transaction = handle->h_transaction->t_tid;
-+	}
-+	spin_unlock(&sbi->s_md_lock);
-+
-+	ext3_mb_free_committed_blocks(sb);
-+}
-+
-+int ext3_mb_free_metadata(handle_t *handle, struct ext3_buddy *e3b,
-+				int group, int block, int count)
-+{
-+	struct ext3_group_info *db = e3b->bd_info;
-+	struct super_block *sb = e3b->bd_sb;
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	struct ext3_free_metadata *md;
-+	int i;
-+
-+	J_ASSERT(e3b->bd_bitmap_page != NULL);
-+	J_ASSERT(e3b->bd_buddy_page != NULL);
-+
-+	ext3_lock_group(sb, group);
-+	for (i = 0; i < count; i++) {
-+		md = db->bb_md_cur;
-+		if (md && db->bb_tid != handle->h_transaction->t_tid) {
-+			db->bb_md_cur = NULL;
-+			md = NULL;
-+		}
-+
-+		if (md == NULL) {
-+			ext3_unlock_group(sb, group);
-+			md = kmalloc(sizeof(*md), GFP_KERNEL);
-+			if (md == NULL)
-+				return -ENOMEM;
-+			md->num = 0;
-+			md->group = group;
-+
-+			ext3_lock_group(sb, group);
-+			if (db->bb_md_cur == NULL) {
-+				spin_lock(&sbi->s_md_lock);
-+				list_add(&md->list, &sbi->s_active_transaction);
-+				spin_unlock(&sbi->s_md_lock);
-+				/* protect buddy cache from being freed,
-+				 * otherwise we'll refresh it from
-+				 * on-disk bitmap and lose not-yet-available
-+				 * blocks */
-+				page_cache_get(e3b->bd_buddy_page);
-+				page_cache_get(e3b->bd_bitmap_page);
-+				db->bb_md_cur = md;
-+				db->bb_tid = handle->h_transaction->t_tid;
-+				mb_debug("new md 0x%p for group %u\n",
-+							md, md->group);
-+			} else {
-+				kfree(md);
-+				md = db->bb_md_cur;
-+			}
-+		}
-+
-+		BUG_ON(md->num >= EXT3_BB_MAX_BLOCKS);
-+		md->blocks[md->num] = block + i;
-+		md->num++;
-+		if (md->num == EXT3_BB_MAX_BLOCKS) {
-+			/* no more space, put full container on a sb's list */
-+			db->bb_md_cur = NULL;
-+		}
-+	}
-+	ext3_unlock_group(sb, group);
-+	return 0;
-+}
-+
-+void ext3_mb_free_blocks(handle_t *handle, struct inode *inode,
-+			ext3_fsblk_t block, unsigned long count,
-+			int metadata, unsigned long *freed)
-+{
-+	struct buffer_head *bitmap_bh = NULL;
-+	struct ext3_group_desc *gdp;
-+	struct ext3_super_block *es;
-+	unsigned long bit, overflow;
-+	struct buffer_head *gd_bh;
-+	unsigned long block_group;
-+	struct ext3_sb_info *sbi;
-+	struct super_block *sb;
-+	struct ext3_buddy e3b;
-+	int err = 0, ret;
-+
-+	*freed = 0;
-+	sb = inode->i_sb;
-+	if (!sb) {
-+		printk ("ext3_free_blocks: nonexistent device");
-+		return;
-+	}
-+
-+	ext3_mb_poll_new_transaction(sb, handle);
-+
-+	sbi = EXT3_SB(sb);
-+	es = EXT3_SB(sb)->s_es;
-+	if (block < le32_to_cpu(es->s_first_data_block) ||
-+	    block + count < block ||
-+	    block + count > le32_to_cpu(es->s_blocks_count)) {
-+		ext3_error (sb, "ext3_free_blocks",
-+			    "Freeing blocks not in datazone - "
-+			    "block = %lu, count = %lu", block, count);
-+		goto error_return;
-+	}
-+
-+	ext3_debug("freeing block %lu\n", block);
-+
-+do_more:
-+	overflow = 0;
-+	block_group = (block - le32_to_cpu(es->s_first_data_block)) /
-+		      EXT3_BLOCKS_PER_GROUP(sb);
-+	bit = (block - le32_to_cpu(es->s_first_data_block)) %
-+		      EXT3_BLOCKS_PER_GROUP(sb);
-+	/*
-+	 * Check to see if we are freeing blocks across a group
-+	 * boundary.
-+	 */
-+	if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) {
-+		overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb);
-+		count -= overflow;
-+	}
-+	brelse(bitmap_bh);
-+	bitmap_bh = read_block_bitmap(sb, block_group);
-+	if (!bitmap_bh)
-+		goto error_return;
-+	gdp = ext3_get_group_desc (sb, block_group, &gd_bh);
-+	if (!gdp)
-+		goto error_return;
-+
-+	if (in_range (le32_to_cpu(gdp->bg_block_bitmap), block, count) ||
-+	    in_range (le32_to_cpu(gdp->bg_inode_bitmap), block, count) ||
-+	    in_range (block, le32_to_cpu(gdp->bg_inode_table),
-+		      EXT3_SB(sb)->s_itb_per_group) ||
-+	    in_range (block + count - 1, le32_to_cpu(gdp->bg_inode_table),
-+		      EXT3_SB(sb)->s_itb_per_group))
-+		ext3_error (sb, "ext3_free_blocks",
-+			    "Freeing blocks in system zones - "
-+			    "Block = %lu, count = %lu",
-+			    block, count);
-+
-+	BUFFER_TRACE(bitmap_bh, "getting write access");
-+	err = ext3_journal_get_write_access(handle, bitmap_bh);
-+	if (err)
-+		goto error_return;
-+
-+	/*
-+	 * We are about to modify some metadata.  Call the journal APIs
-+	 * to unshare ->b_data if a currently-committing transaction is
-+	 * using it
-+	 */
-+	BUFFER_TRACE(gd_bh, "get_write_access");
-+	err = ext3_journal_get_write_access(handle, gd_bh);
-+	if (err)
-+		goto error_return;
-+
-+	err = ext3_mb_load_buddy(sb, block_group, &e3b);
-+	if (err)
-+		goto error_return;
-+
-+#ifdef AGGRESSIVE_CHECK
-+	{
-+		int i;
-+		for (i = 0; i < count; i++)
-+			J_ASSERT(mb_test_bit(bit + i, bitmap_bh->b_data));
-+	}
-+#endif
-+	mb_clear_bits(bitmap_bh->b_data, bit, count);
-+
-+	/* We dirtied the bitmap block */
-+	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
-+	err = ext3_journal_dirty_metadata(handle, bitmap_bh);
-+
-+	if (metadata) {
-+		/* blocks being freed are metadata. these blocks shouldn't
-+		 * be used until this transaction is committed */
-+		ext3_mb_free_metadata(handle, &e3b, block_group, bit, count);
-+	} else {
-+		ext3_lock_group(sb, block_group);
-+		mb_free_blocks(&e3b, bit, count);
-+		ext3_unlock_group(sb, block_group);
-+	}
-+
-+	spin_lock(sb_bgl_lock(sbi, block_group));
-+	gdp->bg_free_blocks_count =
-+		cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) + count);
-+	spin_unlock(sb_bgl_lock(sbi, block_group));
-+	percpu_counter_mod(&sbi->s_freeblocks_counter, count);
-+
-+	ext3_mb_release_desc(&e3b);
-+
-+	*freed = count;
-+
-+	/* And the group descriptor block */
-+	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
-+	ret = ext3_journal_dirty_metadata(handle, gd_bh);
-+	if (!err) err = ret;
-+
-+	if (overflow && !err) {
-+		block += count;
-+		count = overflow;
-+		goto do_more;
-+	}
-+	sb->s_dirt = 1;
-+error_return:
-+	brelse(bitmap_bh);
-+	ext3_std_error(sb, err);
-+	return;
-+}
-+
-+int ext3_mb_reserve_blocks(struct super_block *sb, int blocks)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+	int free, ret = -ENOSPC;
-+
-+	BUG_ON(blocks < 0);
-+	spin_lock(&sbi->s_reserve_lock);
-+	free = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
-+	if (blocks <= free - sbi->s_blocks_reserved) {
-+		sbi->s_blocks_reserved += blocks;
-+		ret = 0;
-+	}
-+	spin_unlock(&sbi->s_reserve_lock);
-+	return ret;
-+}
-+
-+void ext3_mb_release_blocks(struct super_block *sb, int blocks)
-+{
-+	struct ext3_sb_info *sbi = EXT3_SB(sb);
-+
-+	BUG_ON(blocks < 0);
-+	spin_lock(&sbi->s_reserve_lock);
-+	sbi->s_blocks_reserved -= blocks;
-+	WARN_ON(sbi->s_blocks_reserved < 0);
-+	if (sbi->s_blocks_reserved < 0)
-+		sbi->s_blocks_reserved = 0;
-+	spin_unlock(&sbi->s_reserve_lock);
-+}
-+
-+ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode,
-+			    ext3_fsblk_t goal, int *errp)
-+{
-+	ext3_fsblk_t ret;
-+	int len;
-+
-+	if (!test_opt(inode->i_sb, MBALLOC)) {
-+		ret = ext3_new_block_old(handle, inode, goal, errp);
-+		goto out;
-+	}
-+	len = 1;
-+	ret = ext3_mb_new_blocks(handle, inode, goal, &len, 0, errp);
-+out:
-+	return ret;
-+}
-+
-+void ext3_free_blocks(handle_t *handle, struct inode * inode,
-+		      ext3_fsblk_t block, unsigned long count, int metadata)
-+{
-+	struct super_block *sb;
-+	unsigned long freed;
-+
-+	sb = inode->i_sb;
-+	if (!test_opt(sb, MBALLOC) || !EXT3_SB(sb)->s_group_info)
-+		ext3_free_blocks_sb(handle, sb, block, count, &freed);
-+	else
-+		ext3_mb_free_blocks(handle, inode, block, count, metadata,
-+				    &freed);
-+	if (freed)
-+		DQUOT_FREE_BLOCK(inode, freed);
-+	return;
-+}
-+
-+#define EXT3_ROOT		   "ext3"
-+#define EXT3_MB_STATS_NAME	   "mb_stats"
-+#define EXT3_MB_MAX_TO_SCAN_NAME  "mb_max_to_scan"
-+#define EXT3_MB_MIN_TO_SCAN_NAME  "mb_min_to_scan"
-+#define EXT3_MB_ORDER2_REQ	   "mb_order2_req"
-+
-+static int ext3_mb_stats_read(char *page, char **start, off_t off,
-+		int count, int *eof, void *data)
-+{
-+	int len;
-+
-+	*eof = 1;
-+	if (off != 0)
-+		return 0;
-+
-+	len = sprintf(page, "%ld\n", ext3_mb_stats);
-+	*start = page;
-+	return len;
-+}
-+
-+static int ext3_mb_stats_write(struct file *file, const char *buffer,
-+		unsigned long count, void *data)
-+{
-+	char str[32];
-+
-+	if (count >= sizeof(str)) {
-+		printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+		       EXT3_MB_STATS_NAME, (int)sizeof(str));
-+		return -EOVERFLOW;
-+	}
-+
-+	if (copy_from_user(str, buffer, count))
-+		return -EFAULT;
-+
-+	/* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+	ext3_mb_stats = (simple_strtol(str, NULL, 0) != 0);
-+	return count;
-+}
-+
-+static int ext3_mb_max_to_scan_read(char *page, char **start, off_t off,
-+		int count, int *eof, void *data)
-+{
-+	int len;
-+
-+	*eof = 1;
-+	if (off != 0)
-+		return 0;
-+
-+	len = sprintf(page, "%ld\n", ext3_mb_max_to_scan);
-+	*start = page;
-+	return len;
-+}
-+
-+static int ext3_mb_max_to_scan_write(struct file *file, const char *buffer,
-+		unsigned long count, void *data)
-+{
-+	char str[32];
-+	long value;
-+
-+	if (count >= sizeof(str)) {
-+		printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+		       EXT3_MB_MAX_TO_SCAN_NAME, (int)sizeof(str));
-+		return -EOVERFLOW;
-+	}
-+
-+	if (copy_from_user(str, buffer, count))
-+		return -EFAULT;
-+
-+	/* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+	value = simple_strtol(str, NULL, 0);
-+	if (value <= 0)
-+		return -ERANGE;
-+
-+	ext3_mb_max_to_scan = value;
-+
-+	return count;
-+}
-+
-+static int ext3_mb_min_to_scan_read(char *page, char **start, off_t off,
-+		int count, int *eof, void *data)
-+{
-+	int len;
-+
-+	*eof = 1;
-+	if (off != 0)
-+		return 0;
-+
-+	len = sprintf(page, "%ld\n", ext3_mb_min_to_scan);
-+	*start = page;
-+	return len;
-+}
-+
-+static int ext3_mb_min_to_scan_write(struct file *file, const char *buffer,
-+		unsigned long count, void *data)
-+{
-+	char str[32];
-+	long value;
-+
-+	if (count >= sizeof(str)) {
-+		printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+		       EXT3_MB_MIN_TO_SCAN_NAME, (int)sizeof(str));
-+		return -EOVERFLOW;
-+	}
-+
-+	if (copy_from_user(str, buffer, count))
-+		return -EFAULT;
-+
-+	/* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+	value = simple_strtol(str, NULL, 0);
-+	if (value <= 0)
-+		return -ERANGE;
-+
-+	ext3_mb_min_to_scan = value;
-+
-+	return count;
-+}
-+
-+static int ext3_mb_order2_req_read(char *page, char **start, off_t off,
-+				   int count, int *eof, void *data)
-+{
-+	int len;
-+
-+	*eof = 1;
-+	if (off != 0)
-+		return 0;
-+
-+	len = sprintf(page, "%ld\n", ext3_mb_order2_reqs);
-+	*start = page;
-+	return len;
-+}
-+
-+static int ext3_mb_order2_req_write(struct file *file, const char *buffer,
-+				    unsigned long count, void *data)
-+{
-+	char str[32];
-+	long value;
-+
-+	if (count >= sizeof(str)) {
-+		printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+		       EXT3_MB_MIN_TO_SCAN_NAME, (int)sizeof(str));
-+		return -EOVERFLOW;
-+	}
-+
-+	if (copy_from_user(str, buffer, count))
-+		return -EFAULT;
-+
-+	/* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+	value = simple_strtol(str, NULL, 0);
-+	if (value <= 0)
-+		return -ERANGE;
-+
-+	ext3_mb_order2_reqs = value;
-+
-+	return count;
-+}
-+
-+int __init init_ext3_proc(void)
-+{
-+	struct proc_dir_entry *proc_ext3_mb_stats;
-+	struct proc_dir_entry *proc_ext3_mb_max_to_scan;
-+	struct proc_dir_entry *proc_ext3_mb_min_to_scan;
-+	struct proc_dir_entry *proc_ext3_mb_order2_req;
-+
-+	proc_root_ext3 = proc_mkdir(EXT3_ROOT, proc_root_fs);
-+	if (proc_root_ext3 == NULL) {
-+		printk(KERN_ERR "EXT3-fs: Unable to create %s\n", EXT3_ROOT);
-+		return -EIO;
-+	}
-+
-+	/* Initialize EXT3_MB_STATS_NAME */
-+	proc_ext3_mb_stats = create_proc_entry(EXT3_MB_STATS_NAME,
-+			S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+	if (proc_ext3_mb_stats == NULL) {
-+		printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+				EXT3_MB_STATS_NAME);
-+		remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+		return -EIO;
-+	}
-+
-+	proc_ext3_mb_stats->data = NULL;
-+	proc_ext3_mb_stats->read_proc  = ext3_mb_stats_read;
-+	proc_ext3_mb_stats->write_proc = ext3_mb_stats_write;
-+
-+	/* Initialize EXT3_MAX_TO_SCAN_NAME */
-+	proc_ext3_mb_max_to_scan = create_proc_entry(
-+			EXT3_MB_MAX_TO_SCAN_NAME,
-+			S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+	if (proc_ext3_mb_max_to_scan == NULL) {
-+		printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+				EXT3_MB_MAX_TO_SCAN_NAME);
-+		remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+		remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+		return -EIO;
-+	}
-+
-+	proc_ext3_mb_max_to_scan->data = NULL;
-+	proc_ext3_mb_max_to_scan->read_proc  = ext3_mb_max_to_scan_read;
-+	proc_ext3_mb_max_to_scan->write_proc = ext3_mb_max_to_scan_write;
-+
-+	/* Initialize EXT3_MIN_TO_SCAN_NAME */
-+	proc_ext3_mb_min_to_scan = create_proc_entry(
-+			EXT3_MB_MIN_TO_SCAN_NAME,
-+			S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+	if (proc_ext3_mb_min_to_scan == NULL) {
-+		printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+				EXT3_MB_MIN_TO_SCAN_NAME);
-+		remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, proc_root_ext3);
-+		remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+		remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+		return -EIO;
-+	}
-+
-+	proc_ext3_mb_min_to_scan->data = NULL;
-+	proc_ext3_mb_min_to_scan->read_proc  = ext3_mb_min_to_scan_read;
-+	proc_ext3_mb_min_to_scan->write_proc = ext3_mb_min_to_scan_write;
-+
-+	/* Initialize EXT3_ORDER2_REQ */
-+	proc_ext3_mb_order2_req = create_proc_entry(
-+			EXT3_MB_ORDER2_REQ,
-+			S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+	if (proc_ext3_mb_order2_req == NULL) {
-+		printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+				EXT3_MB_ORDER2_REQ);
-+		remove_proc_entry(EXT3_MB_MIN_TO_SCAN_NAME, proc_root_ext3);
-+		remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, proc_root_ext3);
-+		remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+		remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+		return -EIO;
-+	}
-+
-+	proc_ext3_mb_order2_req->data = NULL;
-+	proc_ext3_mb_order2_req->read_proc  = ext3_mb_order2_req_read;
-+	proc_ext3_mb_order2_req->write_proc = ext3_mb_order2_req_write;
-+
-+	return 0;
-+}
-+
-+void exit_ext3_proc(void)
-+{
-+	remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+	remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, proc_root_ext3);
-+	remove_proc_entry(EXT3_MB_MIN_TO_SCAN_NAME, proc_root_ext3);
-+	remove_proc_entry(EXT3_MB_ORDER2_REQ, proc_root_ext3);
-+	remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+}
-Index: linux-stage/fs/ext3/Makefile
-===================================================================
---- linux-stage.orig/fs/ext3/Makefile	2006-07-16 02:29:43.000000000 +0800
-+++ linux-stage/fs/ext3/Makefile	2006-07-16 02:29:49.000000000 +0800
-@@ -6,7 +6,7 @@ obj-$(CONFIG_EXT3_FS) += ext3.o
- 
- ext3-y	:= balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
- 	   ioctl.o namei.o super.o symlink.o hash.o resize.o \
--	   extents.o
-+	   extents.o mballoc.o
- 
- ext3-$(CONFIG_EXT3_FS_XATTR)	 += xattr.o xattr_user.o xattr_trusted.o
- ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
diff --git a/ldiskfs/kernel_patches/patches/ext3-sector_t-overflow-2.6.12.patch b/ldiskfs/kernel_patches/patches/ext3-sector_t-overflow-2.6.12.patch
deleted file mode 100644
index ef0f4a41bd..0000000000
--- a/ldiskfs/kernel_patches/patches/ext3-sector_t-overflow-2.6.12.patch
+++ /dev/null
@@ -1,64 +0,0 @@
-Subject: Avoid disk sector_t overflow for >2TB ext3 filesystem
-From: Mingming Cao <cmm@us.ibm.com>
-
-
-If ext3 filesystem is larger than 2TB, and sector_t is a u32 (i.e.
-CONFIG_LBD not defined in the kernel), the calculation of the disk sector
-will overflow.  Add check at ext3_fill_super() and ext3_group_extend() to
-prevent mount/remount/resize >2TB ext3 filesystem if sector_t size is 4
-bytes.
-
-Verified this patch on a 32 bit platform without CONFIG_LBD defined
-(sector_t is 32 bits long), mount refuse to mount a 10TB ext3.
-
-Signed-off-by: Mingming Cao<cmm@us.ibm.com>
-Acked-by: Andreas Dilger <adilger@clusterfs.com>
-Signed-off-by: Andrew Morton <akpm@osdl.org>
----
-
- fs/ext3/resize.c |   10 ++++++++++
- fs/ext3/super.c  |   10 ++++++++++
- 2 files changed, 20 insertions(+)
-
-diff -puN fs/ext3/resize.c~avoid-disk-sector_t-overflow-for-2tb-ext3-filesystem fs/ext3/resize.c
---- devel/fs/ext3/resize.c~avoid-disk-sector_t-overflow-for-2tb-ext3-filesystem	2006-05-22 14:09:53.000000000 -0700
-+++ devel-akpm/fs/ext3/resize.c	2006-05-22 14:10:56.000000000 -0700
-@@ -926,6 +926,16 @@ int ext3_group_extend(struct super_block
- 	if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
- 		return 0;
- 
-+	if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
-+		printk(KERN_ERR "EXT3-fs: filesystem on %s: "
-+		       "too large to resize to %lu blocks safely\n",
-+		       sb->s_id, n_blocks_count);
-+		if (sizeof(sector_t) < 8)
-+			ext3_warning(sb, __FUNCTION__,
-+				     "CONFIG_LBD not enabled\n");
-+		return -EINVAL;
-+	}
-+
- 	if (n_blocks_count < o_blocks_count) {
- 		ext3_warning(sb, __FUNCTION__,
- 			     "can't shrink FS - resize aborted");
-diff -puN fs/ext3/super.c~avoid-disk-sector_t-overflow-for-2tb-ext3-filesystem fs/ext3/super.c
---- devel/fs/ext3/super.c~avoid-disk-sector_t-overflow-for-2tb-ext3-filesystem	2006-05-22 14:09:53.000000000 -0700
-+++ devel-akpm/fs/ext3/super.c	2006-05-22 14:11:10.000000000 -0700
-@@ -1565,6 +1565,17 @@ static int ext3_fill_super (struct super
- 		goto failed_mount;
- 	}
- 
-+	if (le32_to_cpu(es->s_blocks_count) >
-+	    (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
-+		printk(KERN_ERR "EXT3-fs: filesystem on %s: "
-+		       "too large to mount safely - %u blocks\n", sb->s_id,
-+		       le32_to_cpu(es->s_blocks_count));
-+		if (sizeof(sector_t) < 8)
-+			printk(KERN_WARNING
-+			       "EXT3-fs: CONFIG_LBD not enabled\n");
-+		goto failed_mount;
-+	}
-+
- 	if (EXT3_BLOCKS_PER_GROUP(sb) == 0)
- 		goto cantfind_ext3;
- 	sbi->s_groups_count = (le32_to_cpu(es->s_blocks_count) -
-_
diff --git a/ldiskfs/kernel_patches/patches/iopen-2.6.12.patch b/ldiskfs/kernel_patches/patches/iopen-2.6.12.patch
deleted file mode 100644
index 8d456ac251..0000000000
--- a/ldiskfs/kernel_patches/patches/iopen-2.6.12.patch
+++ /dev/null
@@ -1,471 +0,0 @@
-Index: linux-2.6.12-rc6/fs/ext3/Makefile
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/Makefile	2005-06-14 16:00:45.206720992 +0200
-+++ linux-2.6.12-rc6/fs/ext3/Makefile	2005-06-14 16:14:33.595382720 +0200
-@@ -4,7 +4,7 @@
- 
- obj-$(CONFIG_EXT3_FS) += ext3.o
- 
--ext3-y	:= balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
-+ext3-y	:= balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
- 	   ioctl.o namei.o super.o symlink.o hash.o resize.o
- 
- ext3-$(CONFIG_EXT3_FS_XATTR)	 += xattr.o xattr_user.o xattr_trusted.o
-Index: linux-2.6.12-rc6/fs/ext3/inode.c
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/inode.c	2005-06-14 16:01:16.272150299 +0200
-+++ linux-2.6.12-rc6/fs/ext3/inode.c	2005-06-14 16:24:55.686195412 +0200
-@@ -37,6 +37,7 @@
- #include <linux/mpage.h>
- #include <linux/uio.h>
- #include "xattr.h"
-+#include "iopen.h"
- #include "acl.h"
- 
- static int ext3_writepage_trans_blocks(struct inode *inode);
-@@ -2437,6 +2438,8 @@
- 	ei->i_default_acl = EXT3_ACL_NOT_CACHED;
- #endif
- 	ei->i_block_alloc_info = NULL;
-+ 	if (ext3_iopen_get_inode(inode))
-+ 		return;
- 
- 	if (__ext3_get_inode_loc(inode, &iloc, 0))
- 		goto bad_inode;
-Index: linux-2.6.12-rc6/fs/ext3/iopen.c
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/iopen.c	2005-06-14 16:14:33.530929595 +0200
-+++ linux-2.6.12-rc6/fs/ext3/iopen.c	2005-06-14 16:14:33.626632719 +0200
-@@ -0,0 +1,278 @@
-+/*
-+ * linux/fs/ext3/iopen.c
-+ *
-+ * Special support for open by inode number
-+ *
-+ * Copyright (C) 2001 by Theodore Ts'o (tytso@alum.mit.edu).
-+ *
-+ * This file may be redistributed under the terms of the GNU General
-+ * Public License.
-+ *
-+ *
-+ * Invariants:
-+ *   - there is only ever a single DCACHE_NFSD_DISCONNECTED dentry alias
-+ *     for an inode at one time.
-+ *   - there are never both connected and DCACHE_NFSD_DISCONNECTED dentry
-+ *     aliases on an inode at the same time.
-+ *
-+ * If we have any connected dentry aliases for an inode, use one of those
-+ * in iopen_lookup().  Otherwise, we instantiate a single NFSD_DISCONNECTED
-+ * dentry for this inode, which thereafter will be found by the dcache
-+ * when looking up this inode number in __iopen__, so we don't return here
-+ * until it is gone.
-+ *
-+ * If we get an inode via a regular name lookup, then we "rename" the
-+ * NFSD_DISCONNECTED dentry to the proper name and parent.  This ensures
-+ * existing users of the disconnected dentry will continue to use the same
-+ * dentry as the connected users, and there will never be both kinds of
-+ * dentry aliases at one time.
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/fs.h>
-+#include <linux/ext3_jbd.h>
-+#include <linux/jbd.h>
-+#include <linux/ext3_fs.h>
-+#include <linux/smp_lock.h>
-+#include <linux/dcache.h>
-+#include <linux/security.h>
-+#include "iopen.h"
-+
-+#ifndef assert
-+#define assert(test) J_ASSERT(test)
-+#endif
-+
-+#define IOPEN_NAME_LEN	32
-+
-+/*
-+ * This implements looking up an inode by number.
-+ */
-+static struct dentry *iopen_lookup(struct inode * dir, struct dentry *dentry,
-+				   struct nameidata *nd)
-+{
-+	struct inode *inode;
-+	unsigned long ino;
-+	struct list_head *lp;
-+	struct dentry *alternate;
-+	char buf[IOPEN_NAME_LEN];
-+
-+	if (dentry->d_name.len >= IOPEN_NAME_LEN)
-+		return ERR_PTR(-ENAMETOOLONG);
-+
-+	memcpy(buf, dentry->d_name.name, dentry->d_name.len);
-+	buf[dentry->d_name.len] = 0;
-+
-+	if (strcmp(buf, ".") == 0)
-+		ino = dir->i_ino;
-+	else if (strcmp(buf, "..") == 0)
-+		ino = EXT3_ROOT_INO;
-+	else
-+		ino = simple_strtoul(buf, 0, 0);
-+
-+	if ((ino != EXT3_ROOT_INO &&
-+	     //ino != EXT3_ACL_IDX_INO &&
-+	     //ino != EXT3_ACL_DATA_INO &&
-+	     ino < EXT3_FIRST_INO(dir->i_sb)) ||
-+	    ino > le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count))
-+		return ERR_PTR(-ENOENT);
-+
-+	inode = iget(dir->i_sb, ino);
-+	if (!inode)
-+		return ERR_PTR(-EACCES);
-+	if (is_bad_inode(inode)) {
-+		iput(inode);
-+		return ERR_PTR(-ENOENT);
-+	}
-+
-+	assert(list_empty(&dentry->d_alias));		/* d_instantiate */
-+	assert(d_unhashed(dentry));			/* d_rehash */
-+
-+	/* preferrably return a connected dentry */
-+	spin_lock(&dcache_lock);
-+	list_for_each(lp, &inode->i_dentry) {
-+		alternate = list_entry(lp, struct dentry, d_alias);
-+		assert(!(alternate->d_flags & DCACHE_DISCONNECTED));
-+	}
-+
-+	if (!list_empty(&inode->i_dentry)) {
-+		alternate = list_entry(inode->i_dentry.next,
-+				       struct dentry, d_alias);
-+		dget_locked(alternate);
-+		spin_lock(&alternate->d_lock);
-+		alternate->d_flags |= DCACHE_REFERENCED;
-+		spin_unlock(&alternate->d_lock);
-+		iput(inode);
-+		spin_unlock(&dcache_lock);
-+		return alternate;
-+	}
-+	dentry->d_flags |= DCACHE_DISCONNECTED;
-+
-+	/* d_add(), but don't drop dcache_lock before adding dentry to inode */
-+	list_add(&dentry->d_alias, &inode->i_dentry);	/* d_instantiate */
-+	dentry->d_inode = inode;
-+
-+	d_rehash_cond(dentry, 0);			/* d_rehash */
-+	spin_unlock(&dcache_lock);
-+
-+	return NULL;
-+}
-+
-+#define do_switch(x,y) do { \
-+	__typeof__ (x) __tmp = x; \
-+	x = y; y = __tmp; } while (0)
-+
-+static inline void switch_names(struct dentry *dentry, struct dentry *target)
-+{
-+	const unsigned char *old_name, *new_name;
-+
-+	memcpy(dentry->d_iname, target->d_iname, DNAME_INLINE_LEN_MIN);
-+	old_name = target->d_name.name;
-+	new_name = dentry->d_name.name;
-+	if (old_name == target->d_iname)
-+		old_name = dentry->d_iname;
-+	if (new_name == dentry->d_iname)
-+		new_name = target->d_iname;
-+	target->d_name.name = new_name;
-+	dentry->d_name.name = old_name;
-+}
-+
-+/* This function is spliced into ext3_lookup and does the move of a
-+ * disconnected dentry (if it exists) to a connected dentry.
-+ */
-+struct dentry *iopen_connect_dentry(struct dentry *dentry, struct inode *inode,
-+				    int rehash)
-+{
-+	struct dentry *tmp, *goal = NULL;
-+	struct list_head *lp;
-+
-+	/* verify this dentry is really new */
-+	assert(dentry->d_inode == NULL);
-+	assert(list_empty(&dentry->d_alias));		/* d_instantiate */
-+	if (rehash)
-+		assert(d_unhashed(dentry));		/* d_rehash */
-+	assert(list_empty(&dentry->d_subdirs));
-+
-+	spin_lock(&dcache_lock);
-+	if (!inode)
-+		goto do_rehash;
-+
-+	if (!test_opt(inode->i_sb, IOPEN))
-+		goto do_instantiate;
-+
-+	/* preferrably return a connected dentry */
-+	list_for_each(lp, &inode->i_dentry) {
-+		tmp = list_entry(lp, struct dentry, d_alias);
-+		if (tmp->d_flags & DCACHE_DISCONNECTED) {
-+			assert(tmp->d_alias.next == &inode->i_dentry);
-+			assert(tmp->d_alias.prev == &inode->i_dentry);
-+			goal = tmp;
-+			dget_locked(goal);
-+			break;
-+		}
-+	}
-+
-+	if (!goal)
-+		goto do_instantiate;
-+
-+	/* Move the goal to the de hash queue */
-+	goal->d_flags &= ~DCACHE_DISCONNECTED;
-+	security_d_instantiate(goal, inode);
-+	__d_drop(dentry);
-+	d_rehash_cond(dentry, 0);
-+	__d_move(goal, dentry);
-+	spin_unlock(&dcache_lock);
-+	iput(inode);
-+
-+	return goal;
-+
-+	/* d_add(), but don't drop dcache_lock before adding dentry to inode */
-+do_instantiate:
-+	list_add(&dentry->d_alias, &inode->i_dentry);	/* d_instantiate */
-+	dentry->d_inode = inode;
-+do_rehash:
-+	if (rehash)
-+		d_rehash_cond(dentry, 0);		/* d_rehash */
-+	spin_unlock(&dcache_lock);
-+
-+	return NULL;
-+}
-+
-+/*
-+ * These are the special structures for the iopen pseudo directory.
-+ */
-+
-+static struct inode_operations iopen_inode_operations = {
-+	lookup:		iopen_lookup,		/* BKL held */
-+};
-+
-+static struct file_operations iopen_file_operations = {
-+	read:		generic_read_dir,
-+};
-+
-+static int match_dentry(struct dentry *dentry, const char *name)
-+{
-+	int	len;
-+
-+	len = strlen(name);
-+	if (dentry->d_name.len != len)
-+		return 0;
-+	if (strncmp(dentry->d_name.name, name, len))
-+		return 0;
-+	return 1;
-+}
-+
-+/*
-+ * This function is spliced into ext3_lookup and returns 1 the file
-+ * name is __iopen__ and dentry has been filled in appropriately.
-+ */
-+int ext3_check_for_iopen(struct inode *dir, struct dentry *dentry)
-+{
-+	struct inode *inode;
-+
-+	if (dir->i_ino != EXT3_ROOT_INO ||
-+	    !test_opt(dir->i_sb, IOPEN) ||
-+	    !match_dentry(dentry, "__iopen__"))
-+		return 0;
-+
-+	inode = iget(dir->i_sb, EXT3_BAD_INO);
-+
-+	if (!inode)
-+		return 0;
-+	d_add(dentry, inode);
-+	return 1;
-+}
-+
-+/*
-+ * This function is spliced into read_inode; it returns 1 if inode
-+ * number is the one for /__iopen__, in which case the inode is filled
-+ * in appropriately.  Otherwise, this fuction returns 0.
-+ */
-+int ext3_iopen_get_inode(struct inode *inode)
-+{
-+	if (inode->i_ino != EXT3_BAD_INO)
-+		return 0;
-+
-+	inode->i_mode = S_IFDIR | S_IRUSR | S_IXUSR;
-+	if (test_opt(inode->i_sb, IOPEN_NOPRIV))
-+		inode->i_mode |= 0777;
-+	inode->i_uid = 0;
-+	inode->i_gid = 0;
-+	inode->i_nlink = 1;
-+	inode->i_size = 4096;
-+	inode->i_atime = CURRENT_TIME;
-+	inode->i_ctime = CURRENT_TIME;
-+	inode->i_mtime = CURRENT_TIME;
-+	EXT3_I(inode)->i_dtime = 0;
-+	inode->i_blksize = PAGE_SIZE;	/* This is the optimal IO size
-+					 * (for stat), not the fs block
-+					 * size */
-+	inode->i_blocks = 0;
-+	inode->i_version = 1;
-+	inode->i_generation = 0;
-+
-+	inode->i_op = &iopen_inode_operations;
-+	inode->i_fop = &iopen_file_operations;
-+	inode->i_mapping->a_ops = 0;
-+
-+	return 1;
-+}
-Index: linux-2.6.12-rc6/fs/ext3/iopen.h
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/iopen.h	2005-06-14 16:14:33.534835845 +0200
-+++ linux-2.6.12-rc6/fs/ext3/iopen.h	2005-06-14 16:14:33.633468657 +0200
-@@ -0,0 +1,15 @@
-+/*
-+ * iopen.h
-+ *
-+ * Special support for opening files by inode number.
-+ *
-+ * Copyright (C) 2001 by Theodore Ts'o (tytso@alum.mit.edu).
-+ *
-+ * This file may be redistributed under the terms of the GNU General
-+ * Public License.
-+ */
-+
-+extern int ext3_check_for_iopen(struct inode *dir, struct dentry *dentry);
-+extern int ext3_iopen_get_inode(struct inode *inode);
-+extern struct dentry *iopen_connect_dentry(struct dentry *dentry,
-+					   struct inode *inode, int rehash);
-Index: linux-2.6.12-rc6/fs/ext3/namei.c
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/namei.c	2005-06-14 16:01:14.701837819 +0200
-+++ linux-2.6.12-rc6/fs/ext3/namei.c	2005-06-14 16:14:33.644210844 +0200
-@@ -37,6 +37,7 @@
- #include <linux/buffer_head.h>
- #include <linux/smp_lock.h>
- #include "xattr.h"
-+#include "iopen.h"
- #include "acl.h"
- 
- /*
-@@ -985,6 +986,9 @@
- 	if (dentry->d_name.len > EXT3_NAME_LEN)
- 		return ERR_PTR(-ENAMETOOLONG);
- 
-+	if (ext3_check_for_iopen(dir, dentry))
-+		return NULL;
-+
- 	bh = ext3_find_entry(dentry, &de);
- 	inode = NULL;
- 	if (bh) {
-@@ -995,10 +999,8 @@
- 		if (!inode)
- 			return ERR_PTR(-EACCES);
- 	}
--	if (inode)
--		return d_splice_alias(inode, dentry);
--	d_add(dentry, inode);
--	return NULL;
-+
-+	return iopen_connect_dentry(dentry, inode, 1);
- }
- 
- 
-@@ -2042,10 +2044,6 @@
- 			      inode->i_nlink);
- 	inode->i_version++;
- 	inode->i_nlink = 0;
--	/* There's no need to set i_disksize: the fact that i_nlink is
--	 * zero will ensure that the right thing happens during any
--	 * recovery. */
--	inode->i_size = 0;
- 	ext3_orphan_add(handle, inode);
- 	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
- 	ext3_mark_inode_dirty(handle, inode);
-@@ -2168,6 +2166,23 @@
- 	return err;
- }
- 
-+/* Like ext3_add_nondir() except for call to iopen_connect_dentry */
-+static int ext3_add_link(handle_t *handle, struct dentry *dentry,
-+			 struct inode *inode)
-+{
-+	int err = ext3_add_entry(handle, dentry, inode);
-+	if (!err) {
-+		err = ext3_mark_inode_dirty(handle, inode);
-+		if (err == 0) {
-+			dput(iopen_connect_dentry(dentry, inode, 0));
-+			return 0;
-+		}
-+	}
-+	ext3_dec_count(handle, inode);
-+	iput(inode);
-+	return err;
-+}
-+
- static int ext3_link (struct dentry * old_dentry,
- 		struct inode * dir, struct dentry *dentry)
- {
-@@ -2191,7 +2206,8 @@
- 	ext3_inc_count(handle, inode);
- 	atomic_inc(&inode->i_count);
- 
--	err = ext3_add_nondir(handle, dentry, inode);
-+	err = ext3_add_link(handle, dentry, inode);
-+	ext3_orphan_del(handle, inode);
- 	ext3_journal_stop(handle);
- 	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
- 		goto retry;
-Index: linux-2.6.12-rc6/fs/ext3/super.c
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/super.c	2005-06-14 16:01:16.287775299 +0200
-+++ linux-2.6.12-rc6/fs/ext3/super.c	2005-06-14 16:14:33.656906156 +0200
-@@ -590,6 +590,7 @@
- 	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
- 	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0,
- 	Opt_ignore, Opt_barrier, Opt_err, Opt_resize,
-+	Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
- };
- 
- static match_table_t tokens = {
-@@ -638,6 +639,9 @@
- 	{Opt_ignore, "noquota"},
- 	{Opt_ignore, "quota"},
- 	{Opt_ignore, "usrquota"},
-+	{Opt_iopen, "iopen"},
-+	{Opt_noiopen, "noiopen"},
-+	{Opt_iopen_nopriv, "iopen_nopriv"},
- 	{Opt_barrier, "barrier=%u"},
- 	{Opt_err, NULL},
- 	{Opt_resize, "resize"},
-@@ -921,6 +925,18 @@
- 			else
- 				clear_opt(sbi->s_mount_opt, BARRIER);
- 			break;
-+		case Opt_iopen:
-+			set_opt (sbi->s_mount_opt, IOPEN);
-+			clear_opt (sbi->s_mount_opt, IOPEN_NOPRIV);
-+			break;
-+		case Opt_noiopen:
-+			clear_opt (sbi->s_mount_opt, IOPEN);
-+			clear_opt (sbi->s_mount_opt, IOPEN_NOPRIV);
-+			break;
-+		case Opt_iopen_nopriv:
-+			set_opt (sbi->s_mount_opt, IOPEN);
-+			set_opt (sbi->s_mount_opt, IOPEN_NOPRIV);
-+			break;
- 		case Opt_ignore:
- 			break;
- 		case Opt_resize:
-Index: linux-2.6.12-rc6/include/linux/ext3_fs.h
-===================================================================
---- linux-2.6.12-rc6.orig/include/linux/ext3_fs.h	2005-06-14 16:01:14.709650318 +0200
-+++ linux-2.6.12-rc6/include/linux/ext3_fs.h	2005-06-14 16:28:38.452794245 +0200
-@@ -358,6 +358,8 @@
- #define EXT3_MOUNT_RESERVATION		0x10000	/* Preallocation */
- #define EXT3_MOUNT_BARRIER		0x20000 /* Use block barriers */
- #define EXT3_MOUNT_NOBH			0x40000 /* No bufferheads */
-+#define EXT3_MOUNT_IOPEN		0x80000	/* Allow access via iopen */
-+#define EXT3_MOUNT_IOPEN_NOPRIV		0x100000/* Make iopen world-readable */
- 
- /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
- #ifndef _LINUX_EXT2_FS_H
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-2.6.12-vanilla.series b/ldiskfs/kernel_patches/series/ldiskfs-2.6.12-vanilla.series
deleted file mode 100644
index 286a3a2c0d..0000000000
--- a/ldiskfs/kernel_patches/series/ldiskfs-2.6.12-vanilla.series
+++ /dev/null
@@ -1,18 +0,0 @@
-ext3-wantedi-2.6-rhel4.patch
-ext3-san-jdike-2.6-suse.patch
-iopen-2.6.12.patch 
-ext3-map_inode_page-2.6-suse.patch
-export-ext3-2.6-rhel4.patch
-ext3-include-fixes-2.6-rhel4.patch
-ext3-extents-2.6.12.patch 
-ext3-mballoc2-2.6.12.patch 
-ext3-nlinks-2.6.9.patch
-ext3-ialloc-2.6.patch
-ext3-remove-cond_resched-calls-2.6.12.patch
-ext3-htree-dot-2.6.patch
-ext3-external-journal-2.6.12.patch
-ext3-check-jbd-errors-2.6.12.patch
-ext3-check-jbd-errors-2.6.9.patch
-ext3-uninit-2.6.9.patch
-ext3-nanosecond-2.6-rhel4.patch
-
-- 
GitLab