[packages/kernel] - up to 3.13.2 - removed reiser4, we have it disabled for 4 years, no point it dragging it
baggins
baggins at pld-linux.org
Fri Feb 7 18:48:04 CET 2014
commit 6d4f4c8f10b9bb7c5a0dc02052b8b56ce84c385f
Author: Jan Rękorajski <baggins at pld-linux.org>
Date: Fri Feb 7 18:47:21 2014 +0100
- up to 3.13.2
- removed reiser4, we have it disabled for 4 years, no point it dragging it
kernel-reiser4.config | 5 -
kernel-reiser4.patch | 78492 ------------------------------------------------
kernel.spec | 18 +-
3 files changed, 2 insertions(+), 78513 deletions(-)
---
diff --git a/kernel.spec b/kernel.spec
index 7ddb7c1..5e931b4 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -20,7 +20,6 @@
%bcond_without pcmcia # don't build pcmcia
%bcond_with verbose # verbose build (V=1)
-%bcond_with reiser4 # support for reiser4 fs (experimental)
%bcond_with fbcondecor # build fbcondecor (disable FB_TILEBLITTING and affected fb modules)
%bcond_without pae # build PAE (HIGHMEM64G) support on 32bit i686 athlon pentium3 pentium4
@@ -69,7 +68,7 @@
%define rel 1
%define basever 3.13
-%define postver .1
+%define postver .2
# __alt_kernel is list of features, empty string if none set
# _alt kernel is defined as: %{nil}%{?alt_kernel:-%{?alt_kernel}} (defined in rpm.macros)
@@ -112,7 +111,7 @@ Source0: http://www.kernel.org/pub/linux/kernel/v3.x/linux-%{basever}.tar.xz
# Source0-md5: 0ecbaf65c00374eb4a826c2f9f37606f
%if "%{postver}" != ".0"
Patch0: http://www.kernel.org/pub/linux/kernel/v3.x/patch-%{version}.xz
-# Patch0-md5: 675692f24410f375055d422e7886f3d8
+# Patch0-md5: 352993d23acc5760dafa10ffc9d8881a
%endif
Source1: kernel.sysconfig
@@ -136,7 +135,6 @@ Source41: kernel-patches.config
Source43: kernel-vserver.config
Source55: kernel-imq.config
-Source56: kernel-reiser4.config
Source58: kernel-inittmpfs.config
@@ -167,9 +165,6 @@ Patch49: kernel-zph.patch
# http://www.linuximq.net
Patch50: kernel-imq.patch
-Patch51: http://downloads.sourceforge.net/project/reiser4/reiser4-for-linux-3.x/reiser4-for-3.11.1.patch.gz
-# Patch51-md5: 24a7d3711aadd26000d16eaac943a4ce
-
# http://fatooh.org/esfq-2.6/sfq-2.6.24.1.tar.bz2
Patch53: kernel-esfq.patch
@@ -316,7 +311,6 @@ Conflicts: oprofile < 0.9
Conflicts: ppp < 1:2.4.0
Conflicts: procps < 3.2.0
Conflicts: quota-tools < 3.09
-%{?with_reiserfs4:Conflicts: reiser4progs < 1.0.0}
Conflicts: reiserfsprogs < 3.6.3
Conflicts: rpm < 4.4.2-0.2
Conflicts: udev < 1:081
@@ -667,11 +661,6 @@ cd linux-%{basever}
%patch50 -p1
%endif
-# reiser4
-%if %{with reiser4}
-%patch51 -p1
-%endif
-
# esfq
%if %{with esfq}
%patch53 -p1
@@ -925,9 +914,6 @@ EOCONFIG
rescue.config \
%endif
\
-%if %{with reiser4}
- %{SOURCE56} \
-%endif
%if %{with imq}
%{SOURCE55} \
%endif
diff --git a/kernel-reiser4.config b/kernel-reiser4.config
deleted file mode 100644
index f5607d6..0000000
--- a/kernel-reiser4.config
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Reiser4 support
-#
-CONFIG_REISER4_FS=m
-# CONFIG_REISER4_DEBUG is not set
diff --git a/kernel-reiser4.patch b/kernel-reiser4.patch
deleted file mode 100644
index 44e96ad..0000000
--- a/kernel-reiser4.patch
+++ /dev/null
@@ -1,78492 +0,0 @@
-diff -urN linux-2.6.35.orig/Documentation/Changes linux-2.6.35/Documentation/Changes
---- linux-2.6.35.orig/Documentation/Changes 2010-08-02 00:11:14.000000000 +0200
-+++ linux-2.6.35/Documentation/Changes 2010-08-04 15:44:57.000000000 +0200
-@@ -36,6 +36,7 @@
- o e2fsprogs 1.41.4 # e2fsck -V
- o jfsutils 1.1.3 # fsck.jfs -V
- o reiserfsprogs 3.6.3 # reiserfsck -V 2>&1|grep reiserfsprogs
-+o reiser4progs 1.0.0 # fsck.reiser4 -V
- o xfsprogs 2.6.0 # xfs_db -V
- o squashfs-tools 4.0 # mksquashfs -version
- o btrfs-progs 0.18 # btrfsck
-@@ -157,6 +158,13 @@
- versions of mkreiserfs, resize_reiserfs, debugreiserfs and
- reiserfsck. These utils work on both i386 and alpha platforms.
-
-+Reiser4progs
-+------------
-+
-+The reiser4progs package contains utilities for the reiser4 file system.
-+Detailed instructions are provided in the README file located at:
-+<ftp://ftp.namesys.com/pub/reiser4progs/README>.
-+
- Xfsprogs
- --------
-
-@@ -345,6 +353,10 @@
- -------------
- o <http://www.namesys.com/pub/reiserfsprogs/reiserfsprogs-3.6.3.tar.gz>
-
-+Reiser4progs
-+------------
-+o <ftp://ftp.namesys.com/pub/reiser4progs/>
-+
- Xfsprogs
- --------
- o <ftp://oss.sgi.com/projects/xfs/download/>
-diff -urN linux-2.6.35.orig/Documentation/filesystems/reiser4.txt linux-2.6.35/Documentation/filesystems/reiser4.txt
---- linux-2.6.35.orig/Documentation/filesystems/reiser4.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.35/Documentation/filesystems/reiser4.txt 2010-08-04 15:44:57.000000000 +0200
-@@ -0,0 +1,75 @@
-+Reiser4 filesystem
-+==================
-+Reiser4 is a file system based on dancing tree algorithms, and is
-+described at http://www.namesys.com
-+
-+
-+References
-+==========
-+web page http://namesys.com/v4/v4.html
-+source code ftp://ftp.namesys.com/pub/reiser4-for-2.6/
-+userland tools ftp://ftp.namesys.com/pub/reiser4progs/
-+install page http://www.namesys.com/install_v4.html
-+
-+Compile options
-+===============
-+Enable reiser4 debug mode
-+ This checks everything imaginable while reiser4
-+ runs
-+
-+Mount options
-+=============
-+tmgr.atom_max_size=N
-+ Atoms containing more than N blocks will be forced to commit.
-+ N is decimal.
-+ Default is nr_free_pagecache_pages() / 2 at mount time.
-+
-+tmgr.atom_max_age=N
-+ Atoms older than N seconds will be forced to commit. N is decimal.
-+ Default is 600.
-+
-+tmgr.atom_max_flushers=N
-+ Limit of concurrent flushers for one atom. 0 means no limit.
-+ Default is 0.
-+
-+tree.cbk_cache.nr_slots=N
-+ Number of slots in the cbk cache.
-+
-+flush.relocate_threshold=N
-+ If flush finds more than N adjacent dirty leaf-level blocks it
-+ will force them to be relocated.
-+ Default is 64.
-+
-+flush.relocate_distance=N
-+ If flush finds can find a block allocation closer than at most
-+ N from the preceder it will relocate to that position.
-+ Default is 64.
-+
-+flush.scan_maxnodes=N
-+ The maximum number of nodes to scan left on a level during
-+ flush.
-+ Default is 10000.
-+
-+optimal_io_size=N
-+ Preferred IO size. This value is used to set st_blksize of
-+ struct stat.
-+ Default is 65536.
-+
-+bsdgroups
-+ Turn on BSD-style gid assignment.
-+
-+32bittimes
-+ By default file in reiser4 have 64 bit timestamps. Files
-+ created when filesystem is mounted with 32bittimes mount
-+ option will get 32 bit timestamps.
-+
-+mtflush
-+ Turn off concurrent flushing.
-+
-+nopseudo
-+ Disable pseudo files support. See
-+ http://namesys.com/v4/pseudo.html for more about pseudo files.
-+
-+dont_load_bitmap
-+ Don't load all bitmap blocks at mount time, it is useful for
-+ machines with tiny RAM and large disks.
-diff -urN linux-2.6.35.orig/fs/fs-writeback.c linux-2.6.35/fs/fs-writeback.c
---- linux-2.6.35.orig/fs/fs-writeback.c 2010-08-02 00:11:14.000000000 +0200
-+++ linux-2.6.35/fs/fs-writeback.c 2010-08-04 20:33:23.000000000 +0200
-@@ -461,8 +461,10 @@
- * Return 1, if the caller writeback routine should be
- * interrupted. Otherwise return 0.
- */
--static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
-- struct writeback_control *wbc, bool only_this_sb)
-+int generic_writeback_sb_inodes(struct super_block *sb,
-+ struct bdi_writeback *wb,
-+ struct writeback_control *wbc,
-+ bool only_this_sb)
- {
- while (!list_empty(&wb->b_io)) {
- long pages_skipped;
-@@ -544,7 +546,10 @@
- requeue_io(inode);
- continue;
- }
-- ret = writeback_sb_inodes(sb, wb, wbc, false);
-+ if (sb->s_op->writeback_inodes)
-+ ret = sb->s_op->writeback_inodes(sb, wb, wbc, false);
-+ else
-+ ret = generic_writeback_sb_inodes(sb, wb, wbc, false);
- drop_super(sb);
-
- if (ret)
-@@ -553,6 +558,7 @@
- spin_unlock(&inode_lock);
- /* Leave any unwritten inodes on b_io */
- }
-+EXPORT_SYMBOL(writeback_inodes_wb);
-
- static void __writeback_inodes_sb(struct super_block *sb,
- struct bdi_writeback *wb, struct writeback_control *wbc)
-@@ -563,7 +569,7 @@
- spin_lock(&inode_lock);
- if (!wbc->for_kupdate || list_empty(&wb->b_io))
- queue_io(wb, wbc->older_than_this);
-- writeback_sb_inodes(sb, wb, wbc, true);
-+ generic_writeback_sb_inodes(sb, wb, wbc, true);
- spin_unlock(&inode_lock);
- }
-
-@@ -680,6 +686,32 @@
-
- return wrote;
- }
-+EXPORT_SYMBOL(generic_writeback_sb_inodes);
-+
-+/*
-+ * This function is for file systems which have their
-+ * own means of periodical write-out of old data.
-+ * NOTE: inode_lock should be hold.
-+ *
-+ * Skip a portion of b_io inodes which belong to @sb
-+ * and go sequentially in reverse order.
-+ */
-+void writeback_skip_sb_inodes(struct super_block *sb,
-+ struct bdi_writeback *wb)
-+{
-+ while (1) {
-+ struct inode *inode;
-+
-+ if (list_empty(&wb->b_io))
-+ break;
-+ inode = list_entry(wb->b_io.prev, struct inode, i_list);
-+ if (sb != inode->i_sb)
-+ break;
-+ redirty_tail(inode);
-+ }
-+}
-+EXPORT_SYMBOL(writeback_skip_sb_inodes);
-+
-
- /*
- * Return the next wb_writeback_work struct that hasn't been processed yet.
-@@ -1159,3 +1191,12 @@
- return ret;
- }
- EXPORT_SYMBOL(sync_inode);
-+/*
-+ * Local variables:
-+ * c-indentation-style: "K&R"
-+ * mode-name: "LC"
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * fill-column: 79
-+ * End:
-+ */
-diff -urN linux-2.6.35.orig/fs/inode.c linux-2.6.35/fs/inode.c
---- linux-2.6.35.orig/fs/inode.c 2010-08-02 00:11:14.000000000 +0200
-+++ linux-2.6.35/fs/inode.c 2010-08-04 15:44:57.000000000 +0200
-@@ -84,6 +84,7 @@
- * the i_state of an inode while it is in use..
- */
- DEFINE_SPINLOCK(inode_lock);
-+EXPORT_SYMBOL_GPL(inode_lock);
-
- /*
- * iprune_sem provides exclusion between the kswapd or try_to_free_pages
-diff -urN linux-2.6.35.orig/fs/Kconfig linux-2.6.35/fs/Kconfig
---- linux-2.6.35.orig/fs/Kconfig 2010-08-02 00:11:14.000000000 +0200
-+++ linux-2.6.35/fs/Kconfig 2010-08-04 15:44:57.000000000 +0200
-@@ -27,6 +27,7 @@
- default y if EXT4_FS=y && EXT4_FS_XATTR
- default m if EXT2_FS_XATTR || EXT3_FS_XATTR || EXT4_FS_XATTR
-
-+source "fs/reiser4/Kconfig"
- source "fs/reiserfs/Kconfig"
- source "fs/jfs/Kconfig"
-
-diff -urN linux-2.6.35.orig/fs/Makefile linux-2.6.35/fs/Makefile
---- linux-2.6.35.orig/fs/Makefile 2010-08-02 00:11:14.000000000 +0200
-+++ linux-2.6.35/fs/Makefile 2010-08-04 15:44:57.000000000 +0200
-@@ -65,6 +65,7 @@
- # Do not add any filesystems before this line
- obj-$(CONFIG_FSCACHE) += fscache/
- obj-$(CONFIG_REISERFS_FS) += reiserfs/
-+obj-$(CONFIG_REISER4_FS) += reiser4/
- obj-$(CONFIG_EXT3_FS) += ext3/ # Before ext2 so root fs can be ext3
- obj-$(CONFIG_EXT2_FS) += ext2/
- # We place ext4 after ext2 so plain ext2 root fs's are mounted using ext2
-diff -urN linux-2.6.35.orig/fs/reiser4/as_ops.c linux-2.6.35/fs/reiser4/as_ops.c
---- linux-2.6.35.orig/fs/reiser4/as_ops.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.35/fs/reiser4/as_ops.c 2010-08-04 15:44:57.000000000 +0200
-@@ -0,0 +1,337 @@
-+/* Copyright 2003 by Hans Reiser, licensing governed by reiser4/README */
-+
-+/* Interface to VFS. Reiser4 address_space_operations are defined here. */
-+
-+#include "forward.h"
-+#include "debug.h"
-+#include "dformat.h"
-+#include "coord.h"
-+#include "plugin/item/item.h"
-+#include "plugin/file/file.h"
-+#include "plugin/security/perm.h"
-+#include "plugin/disk_format/disk_format.h"
-+#include "plugin/plugin.h"
-+#include "plugin/plugin_set.h"
-+#include "plugin/object.h"
-+#include "txnmgr.h"
-+#include "jnode.h"
-+#include "znode.h"
-+#include "block_alloc.h"
-+#include "tree.h"
-+#include "vfs_ops.h"
-+#include "inode.h"
-+#include "page_cache.h"
-+#include "ktxnmgrd.h"
-+#include "super.h"
-+#include "reiser4.h"
-+#include "entd.h"
-+
-+#include <linux/profile.h>
-+#include <linux/types.h>
-+#include <linux/mount.h>
-+#include <linux/vfs.h>
-+#include <linux/mm.h>
-+#include <linux/buffer_head.h>
-+#include <linux/dcache.h>
-+#include <linux/list.h>
-+#include <linux/pagemap.h>
-+#include <linux/slab.h>
-+#include <linux/seq_file.h>
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/writeback.h>
-+#include <linux/backing-dev.h>
-+#include <linux/quotaops.h>
-+#include <linux/security.h>
-+
-+/* address space operations */
-+
-+/**
-+ * reiser4_set_page_dirty - set dirty bit, tag in page tree, dirty accounting
-+ * @page: page to be dirtied
-+ *
-+ * Operation of struct address_space_operations. This implementation is used by
-+ * unix and cryptcompress file plugins.
-+ *
-+ * This is called when reiser4 page gets dirtied outside of reiser4, for
-+ * example, when dirty bit is moved from pte to physical page.
-+ *
-+ * Tags page in the mapping's page tree with special tag so that it is possible
-+ * to do all the reiser4 specific work wrt dirty pages (jnode creation,
-+ * capturing by an atom) later because it can not be done in the contexts where
-+ * set_page_dirty is called.
-+ */
-+int reiser4_set_page_dirty(struct page *page)
-+{
-+ /* this page can be unformatted only */
-+ assert("vs-1734", (page->mapping &&
-+ page->mapping->host &&
-+ reiser4_get_super_fake(page->mapping->host->i_sb) !=
-+ page->mapping->host &&
-+ reiser4_get_cc_fake(page->mapping->host->i_sb) !=
-+ page->mapping->host &&
-+ reiser4_get_bitmap_fake(page->mapping->host->i_sb) !=
-+ page->mapping->host));
-+ return __set_page_dirty_nobuffers(page);
-+}
-+
-+/* ->invalidatepage method for reiser4 */
-+
-+/*
-+ * this is called for each truncated page from
-+ * truncate_inode_pages()->truncate_{complete,partial}_page().
-+ *
-+ * At the moment of call, page is under lock, and outstanding io (if any) has
-+ * completed.
-+ */
-+
-+/**
-+ * reiser4_invalidatepage
-+ * @page: page to invalidate
-+ * @offset: starting offset for partial invalidation
-+ *
-+ */
-+void reiser4_invalidatepage(struct page *page, unsigned long offset)
-+{
-+ int ret = 0;
-+ reiser4_context *ctx;
-+ struct inode *inode;
-+ jnode *node;
-+
-+ /*
-+ * This is called to truncate file's page.
-+ *
-+ * Originally, reiser4 implemented truncate in a standard way
-+ * (vmtruncate() calls ->invalidatepage() on all truncated pages
-+ * first, then file system ->truncate() call-back is invoked).
-+ *
-+ * This lead to the problem when ->invalidatepage() was called on a
-+ * page with jnode that was captured into atom in ASTAGE_PRE_COMMIT
-+ * process. That is, truncate was bypassing transactions. To avoid
-+ * this, try_capture_page_to_invalidate() call was added here.
-+ *
-+ * After many troubles with vmtruncate() based truncate (including
-+ * races with flush, tail conversion, etc.) it was re-written in the
-+ * top-to-bottom style: items are killed in reiser4_cut_tree_object()
-+ * and pages belonging to extent are invalidated in kill_hook_extent().
-+ * So probably now additional call to capture is not needed here.
-+ */
-+
-+ assert("nikita-3137", PageLocked(page));
-+ assert("nikita-3138", !PageWriteback(page));
-+ inode = page->mapping->host;
-+
-+ /*
-+ * ->invalidatepage() should only be called for the unformatted
-+ * jnodes. Destruction of all other types of jnodes is performed
-+ * separately. But, during some corner cases (like handling errors
-+ * during mount) it is simpler to let ->invalidatepage to be called on
-+ * them. Check for this, and do nothing.
-+ */
-+ if (reiser4_get_super_fake(inode->i_sb) == inode)
-+ return;
-+ if (reiser4_get_cc_fake(inode->i_sb) == inode)
-+ return;
-+ if (reiser4_get_bitmap_fake(inode->i_sb) == inode)
-+ return;
-+ assert("vs-1426", PagePrivate(page));
-+ assert("vs-1427",
-+ page->mapping == jnode_get_mapping(jnode_by_page(page)));
-+ assert("", jprivate(page) != NULL);
-+ assert("", ergo(inode_file_plugin(inode) !=
-+ file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID),
-+ offset == 0));
-+
-+ ctx = reiser4_init_context(inode->i_sb);
-+ if (IS_ERR(ctx))
-+ return;
-+
-+ node = jprivate(page);
-+ spin_lock_jnode(node);
-+ if (!(node->state & ((1 << JNODE_DIRTY) | (1 << JNODE_FLUSH_QUEUED) |
-+ (1 << JNODE_WRITEBACK) | (1 << JNODE_OVRWR)))) {
-+ /* there is not need to capture */
-+ jref(node);
-+ JF_SET(node, JNODE_HEARD_BANSHEE);
-+ page_clear_jnode(page, node);
-+ reiser4_uncapture_jnode(node);
-+ unhash_unformatted_jnode(node);
-+ jput(node);
-+ reiser4_exit_context(ctx);
-+ return;
-+ }
-+ spin_unlock_jnode(node);
-+
-+ /* capture page being truncated. */
-+ ret = try_capture_page_to_invalidate(page);
-+ if (ret != 0)
-+ warning("nikita-3141", "Cannot capture: %i", ret);
-+
-+ if (offset == 0) {
-+ /* remove jnode from transaction and detach it from page. */
-+ jref(node);
-+ JF_SET(node, JNODE_HEARD_BANSHEE);
-+ /* page cannot be detached from jnode concurrently, because it
-+ * is locked */
-+ reiser4_uncapture_page(page);
-+
-+ /* this detaches page from jnode, so that jdelete will not try
-+ * to lock page which is already locked */
-+ spin_lock_jnode(node);
-+ page_clear_jnode(page, node);
-+ spin_unlock_jnode(node);
-+ unhash_unformatted_jnode(node);
-+
-+ jput(node);
-+ }
-+
-+ reiser4_exit_context(ctx);
-+}
-+
-+/* help function called from reiser4_releasepage(). It returns true if jnode
-+ * can be detached from its page and page released. */
-+int jnode_is_releasable(jnode * node/* node to check */)
-+{
-+ assert("nikita-2781", node != NULL);
-+ assert_spin_locked(&(node->guard));
-+ assert_spin_locked(&(node->load));
-+
-+ /* is some thread is currently using jnode page, later cannot be
-+ * detached */
-+ if (atomic_read(&node->d_count) != 0)
-+ return 0;
-+
-+ assert("vs-1214", !jnode_is_loaded(node));
-+
-+ /*
-+ * can only release page if real block number is assigned to it. Simple
-+ * check for ->atom wouldn't do, because it is possible for node to be
-+ * clean, not it atom yet, and still having fake block number. For
-+ * example, node just created in jinit_new().
-+ */
-+ if (reiser4_blocknr_is_fake(jnode_get_block(node)))
-+ return 0;
-+
-+ /*
-+ * pages prepared for write can not be released anyway, so avoid
-+ * detaching jnode from the page
-+ */
-+ if (JF_ISSET(node, JNODE_WRITE_PREPARED))
-+ return 0;
-+
-+ /*
-+ * dirty jnode cannot be released. It can however be submitted to disk
-+ * as part of early flushing, but only after getting flush-prepped.
-+ */
-+ if (JF_ISSET(node, JNODE_DIRTY))
-+ return 0;
-+
-+ /* overwrite set is only written by log writer. */
-+ if (JF_ISSET(node, JNODE_OVRWR))
-+ return 0;
-+
-+ /* jnode is already under writeback */
-+ if (JF_ISSET(node, JNODE_WRITEBACK))
-+ return 0;
-+
-+ /* don't flush bitmaps or journal records */
-+ if (!jnode_is_znode(node) && !jnode_is_unformatted(node))
-+ return 0;
-+
-+ return 1;
-+}
-+
-+/*
-+ * ->releasepage method for reiser4
-+ *
-+ * This is called by VM scanner when it comes across clean page. What we have
-+ * to do here is to check whether page can really be released (freed that is)
-+ * and if so, detach jnode from it and remove page from the page cache.
-+ *
-+ * Check for releasability is done by releasable() function.
-+ */
-+int reiser4_releasepage(struct page *page, gfp_t gfp UNUSED_ARG)
-+{
-+ jnode *node;
-+
-+ assert("nikita-2257", PagePrivate(page));
-+ assert("nikita-2259", PageLocked(page));
-+ assert("nikita-2892", !PageWriteback(page));
-+ assert("nikita-3019", reiser4_schedulable());
-+
-+ /* NOTE-NIKITA: this can be called in the context of reiser4 call. It
-+ is not clear what to do in this case. A lot of deadlocks seems be
-+ possible. */
-+
-+ node = jnode_by_page(page);
-+ assert("nikita-2258", node != NULL);
-+ assert("reiser4-4", page->mapping != NULL);
-+ assert("reiser4-5", page->mapping->host != NULL);
-+
-+ if (PageDirty(page))
-+ return 0;
-+
-+ /* extra page reference is used by reiser4 to protect
-+ * jnode<->page link from this ->releasepage(). */
-+ if (page_count(page) > 3)
-+ return 0;
-+
-+ /* releasable() needs jnode lock, because it looks at the jnode fields
-+ * and we need jload_lock here to avoid races with jload(). */
-+ spin_lock_jnode(node);
-+ spin_lock(&(node->load));
-+ if (jnode_is_releasable(node)) {
-+ struct address_space *mapping;
-+
-+ mapping = page->mapping;
-+ jref(node);
-+ /* there is no need to synchronize against
-+ * jnode_extent_write() here, because pages seen by
-+ * jnode_extent_write() are !releasable(). */
-+ page_clear_jnode(page, node);
-+ spin_unlock(&(node->load));
-+ spin_unlock_jnode(node);
-+
-+ /* we are under memory pressure so release jnode also. */
-+ jput(node);
-+
-+ return 1;
-+ } else {
-+ spin_unlock(&(node->load));
-+ spin_unlock_jnode(node);
-+ assert("nikita-3020", reiser4_schedulable());
-+ return 0;
-+ }
-+}
-+
-+int reiser4_readpage(struct file *file, struct page *page)
-+{
-+ assert("edward-1533", PageLocked(page));
-+ assert("edward-1534", !PageUptodate(page));
-+ assert("edward-1535", page->mapping && page->mapping->host);
-+
-+ return inode_file_plugin(page->mapping->host)->readpage(file, page);
-+}
-+
-+int reiser4_readpages(struct file *file, struct address_space *mapping,
-+ struct list_head *pages, unsigned nr_pages)
-+{
-+ return inode_file_plugin(mapping->host)->readpages(file, mapping,
-+ pages, nr_pages);
-+}
-+
-+int reiser4_writepages(struct address_space *mapping,
-+ struct writeback_control *wbc)
-+{
-+ return inode_file_plugin(mapping->host)->writepages(mapping, wbc);
-+}
-+
-+/* Make Linus happy.
-+ Local variables:
-+ c-indentation-style: "K&R"
-+ mode-name: "LC"
-+ c-basic-offset: 8
-+ tab-width: 8
-+ fill-column: 120
-+ End:
-+*/
-diff -urN linux-2.6.35.orig/fs/reiser4/block_alloc.c linux-2.6.35/fs/reiser4/block_alloc.c
---- linux-2.6.35.orig/fs/reiser4/block_alloc.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.35/fs/reiser4/block_alloc.c 2010-08-04 15:44:57.000000000 +0200
-@@ -0,0 +1,1142 @@
-+/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by
-+reiser4/README */
-+
-+#include "debug.h"
-+#include "dformat.h"
-+#include "plugin/plugin.h"
-+#include "txnmgr.h"
-+#include "znode.h"
-+#include "block_alloc.h"
-+#include "tree.h"
-+#include "super.h"
-+
-+#include <linux/types.h> /* for __u?? */
-+#include <linux/fs.h> /* for struct super_block */
-+#include <linux/spinlock.h>
-+
-+/* THE REISER4 DISK SPACE RESERVATION SCHEME. */
-+
-+/* We need to be able to reserve enough disk space to ensure that an atomic
-+ operation will have enough disk space to flush (see flush.c and
-+ http://namesys.com/v4/v4.html) and commit it once it is started.
-+
-+ In our design a call for reserving disk space may fail but not an actual
-+ block allocation.
-+
-+ All free blocks, already allocated blocks, and all kinds of reserved blocks
-+ are counted in different per-fs block counters.
-+
-+ A reiser4 super block's set of block counters currently is:
-+
-+ free -- free blocks,
-+ used -- already allocated blocks,
-+
-+ grabbed -- initially reserved for performing an fs operation, those blocks
-+ are taken from free blocks, then grabbed disk space leaks from grabbed
-+ blocks counter to other counters like "fake allocated", "flush
-+ reserved", "used", the rest of not used grabbed space is returned to
-+ free space at the end of fs operation;
-+
-+ fake allocated -- counts all nodes without real disk block numbers assigned,
-+ we have separate accounting for formatted and unformatted
-+ nodes (for easier debugging);
-+
-+ flush reserved -- disk space needed for flushing and committing an atom.
-+ Each dirty already allocated block could be written as a
-+ part of atom's overwrite set or as a part of atom's
-+ relocate set. In both case one additional block is needed,
-+ it is used as a wandered block if we do overwrite or as a
-+ new location for a relocated block.
-+
-+ In addition, blocks in some states are counted on per-thread and per-atom
-+ basis. A reiser4 context has a counter of blocks grabbed by this transaction
-+ and the sb's grabbed blocks counter is a sum of grabbed blocks counter values
-+ of each reiser4 context. Each reiser4 atom has a counter of "flush reserved"
-+ blocks, which are reserved for flush processing and atom commit. */
-+
-+/* AN EXAMPLE: suppose we insert new item to the reiser4 tree. We estimate
-+ number of blocks to grab for most expensive case of balancing when the leaf
-+ node we insert new item to gets split and new leaf node is allocated.
-+
-+ So, we need to grab blocks for
-+
-+ 1) one block for possible dirtying the node we insert an item to. That block
-+ would be used for node relocation at flush time or for allocating of a
-+ wandered one, it depends what will be a result (what set, relocate or
-+ overwrite the node gets assigned to) of the node processing by the flush
-+ algorithm.
-+
-+ 2) one block for either allocating a new node, or dirtying of right or left
-+ clean neighbor, only one case may happen.
-+
-+ VS-FIXME-HANS: why can only one case happen? I would expect to see dirtying
-+ of left neighbor, right neighbor, current node, and creation of new node.
-+ Have I forgotten something? email me.
-+
-+ These grabbed blocks are counted in both reiser4 context "grabbed blocks"
-+ counter and in the fs-wide one (both ctx->grabbed_blocks and
-+ sbinfo->blocks_grabbed get incremented by 2), sb's free blocks counter is
-+ decremented by 2.
-+
-+ Suppose both two blocks were spent for dirtying of an already allocated clean
-+ node (one block went from "grabbed" to "flush reserved") and for new block
-+ allocating (one block went from "grabbed" to "fake allocated formatted").
-+
-+ Inserting of a child pointer to the parent node caused parent node to be
-+ split, the balancing code takes care about this grabbing necessary space
-+ immediately by calling reiser4_grab with BA_RESERVED flag set which means
-+ "can use the 5% reserved disk space".
-+
-+ At this moment insertion completes and grabbed blocks (if they were not used)
-+ should be returned to the free space counter.
-+
-+ However the atom life-cycle is not completed. The atom had one "flush
-+ reserved" block added by our insertion and the new fake allocated node is
-+ counted as a "fake allocated formatted" one. The atom has to be fully
-+ processed by flush before commit. Suppose that the flush moved the first,
-+ already allocated node to the atom's overwrite list, the new fake allocated
-+ node, obviously, went into the atom relocate set. The reiser4 flush
-+ allocates the new node using one unit from "fake allocated formatted"
-+ counter, the log writer uses one from "flush reserved" for wandered block
-+ allocation.
-+
-+ And, it is not the end. When the wandered block is deallocated after the
-+ atom gets fully played (see wander.c for term description), the disk space
-+ occupied for it is returned to free blocks. */
-+
-+/* BLOCK NUMBERS */
-+
-+/* Any reiser4 node has a block number assigned to it. We use these numbers for
-+ indexing in hash tables, so if a block has not yet been assigned a location
-+ on disk we need to give it a temporary fake block number.
-+
-+ Current implementation of reiser4 uses 64-bit integers for block numbers. We
-+ use highest bit in 64-bit block number to distinguish fake and real block
-+ numbers. So, only 63 bits may be used to addressing of real device
-+ blocks. That "fake" block numbers space is divided into subspaces of fake
-+ block numbers for data blocks and for shadow (working) bitmap blocks.
-+
-+ Fake block numbers for data blocks are generated by a cyclic counter, which
-+ gets incremented after each real block allocation. We assume that it is
-+ impossible to overload this counter during one transaction life. */
-+
-+/* Initialize a blocknr hint. */
-+void reiser4_blocknr_hint_init(reiser4_blocknr_hint * hint)
-+{
-+ memset(hint, 0, sizeof(reiser4_blocknr_hint));
-+}
-+
-+/* Release any resources of a blocknr hint. */
-+void reiser4_blocknr_hint_done(reiser4_blocknr_hint * hint UNUSED_ARG)
-+{
-+/* No resources should be freed in current blocknr_hint implementation. */
-+}
-+
-+/* see above for explanation of fake block number. */
-+/* Audited by: green(2002.06.11) */
-+int reiser4_blocknr_is_fake(const reiser4_block_nr * da)
-+{
-+ /* The reason for not simply returning result of '&' operation is that
-+ while return value is (possibly 32bit) int, the reiser4_block_nr is
-+ at least 64 bits long, and high bit (which is the only possible
-+ non zero bit after the masking) would be stripped off */
-+ return (*da & REISER4_FAKE_BLOCKNR_BIT_MASK) ? 1 : 0;
-+}
-+
-+/* Static functions for <reiser4 super block>/<reiser4 context> block counters
-+ arithmetic. Mostly, they are isolated to not to code same assertions in
-+ several places. */
-+static void sub_from_ctx_grabbed(reiser4_context * ctx, __u64 count)
-+{
-+ BUG_ON(ctx->grabbed_blocks < count);
-+ assert("zam-527", ctx->grabbed_blocks >= count);
-+ ctx->grabbed_blocks -= count;
-+}
-+
-+static void add_to_ctx_grabbed(reiser4_context * ctx, __u64 count)
-+{
-+ ctx->grabbed_blocks += count;
-+}
-+
-+static void sub_from_sb_grabbed(reiser4_super_info_data * sbinfo, __u64 count)
-+{
-+ assert("zam-525", sbinfo->blocks_grabbed >= count);
-+ sbinfo->blocks_grabbed -= count;
-+}
-+
-+/* Decrease the counter of block reserved for flush in super block. */
-+static void
-+sub_from_sb_flush_reserved(reiser4_super_info_data * sbinfo, __u64 count)
-+{
-+ assert("vpf-291", sbinfo->blocks_flush_reserved >= count);
-+ sbinfo->blocks_flush_reserved -= count;
-+}
-+
-+static void
-+sub_from_sb_fake_allocated(reiser4_super_info_data * sbinfo, __u64 count,
-+ reiser4_ba_flags_t flags)
-+{
-+ if (flags & BA_FORMATTED) {
-+ assert("zam-806", sbinfo->blocks_fake_allocated >= count);
-+ sbinfo->blocks_fake_allocated -= count;
-+ } else {
-+ assert("zam-528",
-+ sbinfo->blocks_fake_allocated_unformatted >= count);
-+ sbinfo->blocks_fake_allocated_unformatted -= count;
-+ }
-+}
-+
-+static void sub_from_sb_used(reiser4_super_info_data * sbinfo, __u64 count)
-+{
-+ assert("zam-530",
-+ sbinfo->blocks_used >= count + sbinfo->min_blocks_used);
-+ sbinfo->blocks_used -= count;
-+}
-+
-+static void
-+sub_from_cluster_reserved(reiser4_super_info_data * sbinfo, __u64 count)
-+{
-+ assert("edward-501", sbinfo->blocks_clustered >= count);
-+ sbinfo->blocks_clustered -= count;
-+}
-+
-+/* Increase the counter of block reserved for flush in atom. */
-+static void add_to_atom_flush_reserved_nolock(txn_atom * atom, __u32 count)
-+{
-+ assert("zam-772", atom != NULL);
-+ assert_spin_locked(&(atom->alock));
-+ atom->flush_reserved += count;
-+}
-+
-+/* Decrease the counter of block reserved for flush in atom. */
-+static void sub_from_atom_flush_reserved_nolock(txn_atom * atom, __u32 count)
-+{
-+ assert("zam-774", atom != NULL);
-+ assert_spin_locked(&(atom->alock));
-+ assert("nikita-2790", atom->flush_reserved >= count);
-+ atom->flush_reserved -= count;
-+}
-+
-+/* super block has 6 counters: free, used, grabbed, fake allocated
-+ (formatted and unformatted) and flush reserved. Their sum must be
-+ number of blocks on a device. This function checks this */
-+int reiser4_check_block_counters(const struct super_block *super)
-+{
-+ __u64 sum;
-+
-+ sum = reiser4_grabbed_blocks(super) + reiser4_free_blocks(super) +
-+ reiser4_data_blocks(super) + reiser4_fake_allocated(super) +
-+ reiser4_fake_allocated_unformatted(super) + reiser4_flush_reserved(super) +
-+ reiser4_clustered_blocks(super);
-+ if (reiser4_block_count(super) != sum) {
-+ printk("super block counters: "
-+ "used %llu, free %llu, "
-+ "grabbed %llu, fake allocated (formatetd %llu, unformatted %llu), "
-+ "reserved %llu, clustered %llu, sum %llu, must be (block count) %llu\n",
-+ (unsigned long long)reiser4_data_blocks(super),
-+ (unsigned long long)reiser4_free_blocks(super),
-+ (unsigned long long)reiser4_grabbed_blocks(super),
-+ (unsigned long long)reiser4_fake_allocated(super),
-+ (unsigned long long)
-+ reiser4_fake_allocated_unformatted(super),
-+ (unsigned long long)reiser4_flush_reserved(super),
-+ (unsigned long long)reiser4_clustered_blocks(super),
-+ (unsigned long long)sum,
-+ (unsigned long long)reiser4_block_count(super));
-+ return 0;
-+ }
-+ return 1;
-+}
-+
-+/* Adjust "working" free blocks counter for number of blocks we are going to
-+ allocate. Record number of grabbed blocks in fs-wide and per-thread
-+ counters. This function should be called before bitmap scanning or
-+ allocating fake block numbers
-+
-+ @super -- pointer to reiser4 super block;
-+ @count -- number of blocks we reserve;
-+
-+ @return -- 0 if success, -ENOSPC, if all
-+ free blocks are preserved or already allocated.
-+*/
-+
-+static int
-+reiser4_grab(reiser4_context * ctx, __u64 count, reiser4_ba_flags_t flags)
-+{
-+ __u64 free_blocks;
-+ int ret = 0, use_reserved = flags & BA_RESERVED;
-+ reiser4_super_info_data *sbinfo;
-+
-+ assert("vs-1276", ctx == get_current_context());
-+
-+ /* Do not grab anything on ro-mounted fs. */
-+ if (rofs_super(ctx->super)) {
-+ ctx->grab_enabled = 0;
-+ return 0;
-+ }
-+
-+ sbinfo = get_super_private(ctx->super);
-+
-+ spin_lock_reiser4_super(sbinfo);
-+
-+ free_blocks = sbinfo->blocks_free;
-+
-+ if ((use_reserved && free_blocks < count) ||
-+ (!use_reserved && free_blocks < count + sbinfo->blocks_reserved)) {
-+ ret = RETERR(-ENOSPC);
-+ goto unlock_and_ret;
-+ }
-+
-+ add_to_ctx_grabbed(ctx, count);
-+
-+ sbinfo->blocks_grabbed += count;
-+ sbinfo->blocks_free -= count;
-+
-+#if REISER4_DEBUG
-+ if (ctx->grabbed_initially == 0)
-+ ctx->grabbed_initially = count;
-+#endif
-+
-+ assert("nikita-2986", reiser4_check_block_counters(ctx->super));
-+
-+ /* disable grab space in current context */
-+ ctx->grab_enabled = 0;
-+
-+unlock_and_ret:
-+ spin_unlock_reiser4_super(sbinfo);
-+
-+ return ret;
-+}
-+
-+int reiser4_grab_space(__u64 count, reiser4_ba_flags_t flags)
-+{
-+ int ret;
-+ reiser4_context *ctx;
-+
-+ assert("nikita-2964", ergo(flags & BA_CAN_COMMIT,
-+ lock_stack_isclean(get_current_lock_stack
-+ ())));
-+ ctx = get_current_context();
-+ if (!(flags & BA_FORCE) && !is_grab_enabled(ctx))
-+ return 0;
-+
-+ ret = reiser4_grab(ctx, count, flags);
-+ if (ret == -ENOSPC) {
-+
-+ /* Trying to commit the all transactions if BA_CAN_COMMIT flag
-+ present */
-+ if (flags & BA_CAN_COMMIT) {
-+ txnmgr_force_commit_all(ctx->super, 0);
-+ ctx->grab_enabled = 1;
-+ ret = reiser4_grab(ctx, count, flags);
-+ }
-+ }
-+ /*
-+ * allocation from reserved pool cannot fail. This is severe error.
-+ */
-+ assert("nikita-3005", ergo(flags & BA_RESERVED, ret == 0));
-+ return ret;
-+}
-+
-+/*
-+ * SPACE RESERVED FOR UNLINK/TRUNCATE
-+ *
-+ * Unlink and truncate require space in transaction (to update stat data, at
-+ * least). But we don't want rm(1) to fail with "No space on device" error.
-+ *
-+ * Solution is to reserve 5% of disk space for truncates and
-+ * unlinks. Specifically, normal space grabbing requests don't grab space from
-+ * reserved area. Only requests with BA_RESERVED bit in flags are allowed to
-+ * drain it. Per super block delete mutex is used to allow only one
-+ * thread at a time to grab from reserved area.
-+ *
-+ * Grabbing from reserved area should always be performed with BA_CAN_COMMIT
-+ * flag.
-+ *
-+ */
-+
-+int reiser4_grab_reserved(struct super_block *super,
-+ __u64 count, reiser4_ba_flags_t flags)
-+{
-+ reiser4_super_info_data *sbinfo = get_super_private(super);
-+
-+ assert("nikita-3175", flags & BA_CAN_COMMIT);
-+
-+ /* Check the delete mutex already taken by us, we assume that
-+ * reading of machine word is atomic. */
-+ if (sbinfo->delete_mutex_owner == current) {
-+ if (reiser4_grab_space
-+ (count, (flags | BA_RESERVED) & ~BA_CAN_COMMIT)) {
-+ warning("zam-1003",
-+ "nested call of grab_reserved fails count=(%llu)",
-+ (unsigned long long)count);
-+ reiser4_release_reserved(super);
-+ return RETERR(-ENOSPC);
-+ }
-+ return 0;
-+ }
-+
-+ if (reiser4_grab_space(count, flags)) {
-+ mutex_lock(&sbinfo->delete_mutex);
-+ assert("nikita-2929", sbinfo->delete_mutex_owner == NULL);
-+ sbinfo->delete_mutex_owner = current;
-+
-+ if (reiser4_grab_space(count, flags | BA_RESERVED)) {
-+ warning("zam-833",
-+ "reserved space is not enough (%llu)",
-+ (unsigned long long)count);
-+ reiser4_release_reserved(super);
-+ return RETERR(-ENOSPC);
-+ }
-+ }
-+ return 0;
-+}
-+
-+void reiser4_release_reserved(struct super_block *super)
-+{
-+ reiser4_super_info_data *info;
-+
-+ info = get_super_private(super);
-+ if (info->delete_mutex_owner == current) {
-+ info->delete_mutex_owner = NULL;
-+ mutex_unlock(&info->delete_mutex);
-+ }
-+}
-+
-+static reiser4_super_info_data *grabbed2fake_allocated_head(int count)
-+{
-+ reiser4_context *ctx;
-+ reiser4_super_info_data *sbinfo;
-+
-+ ctx = get_current_context();
-+ sub_from_ctx_grabbed(ctx, count);
-+
-+ sbinfo = get_super_private(ctx->super);
-+ spin_lock_reiser4_super(sbinfo);
-+
-+ sub_from_sb_grabbed(sbinfo, count);
-+ /* return sbinfo locked */
-+ return sbinfo;
-+}
-+
-+/* is called after @count fake block numbers are allocated and pointer to
-+ those blocks are inserted into tree. */
-+static void grabbed2fake_allocated_formatted(void)
-+{
-+ reiser4_super_info_data *sbinfo;
-+
-+ sbinfo = grabbed2fake_allocated_head(1);
-+ sbinfo->blocks_fake_allocated++;
-+
-+ assert("vs-922", reiser4_check_block_counters(reiser4_get_current_sb()));
-+
-+ spin_unlock_reiser4_super(sbinfo);
-+}
-+
-+/**
-+ * grabbed2fake_allocated_unformatted
-+ * @count:
-+ *
-+ */
-+static void grabbed2fake_allocated_unformatted(int count)
-+{
-+ reiser4_super_info_data *sbinfo;
-+
-+ sbinfo = grabbed2fake_allocated_head(count);
-+ sbinfo->blocks_fake_allocated_unformatted += count;
-+
-+ assert("vs-9221", reiser4_check_block_counters(reiser4_get_current_sb()));
-+
-+ spin_unlock_reiser4_super(sbinfo);
-+}
-+
-+void grabbed2cluster_reserved(int count)
-+{
-+ reiser4_context *ctx;
-+ reiser4_super_info_data *sbinfo;
-+
-+ ctx = get_current_context();
-+ sub_from_ctx_grabbed(ctx, count);
-+
-+ sbinfo = get_super_private(ctx->super);
-+ spin_lock_reiser4_super(sbinfo);
-+
-+ sub_from_sb_grabbed(sbinfo, count);
-+ sbinfo->blocks_clustered += count;
-+
-+ assert("edward-504", reiser4_check_block_counters(ctx->super));
-+
-+ spin_unlock_reiser4_super(sbinfo);
-+}
-+
-+void cluster_reserved2grabbed(int count)
-+{
-+ reiser4_context *ctx;
-+ reiser4_super_info_data *sbinfo;
-+
-+ ctx = get_current_context();
-+
-+ sbinfo = get_super_private(ctx->super);
-+ spin_lock_reiser4_super(sbinfo);
-+
-+ sub_from_cluster_reserved(sbinfo, count);
-+ sbinfo->blocks_grabbed += count;
-+
-+ assert("edward-505", reiser4_check_block_counters(ctx->super));
-+
-+ spin_unlock_reiser4_super(sbinfo);
-+ add_to_ctx_grabbed(ctx, count);
-+}
-+
-+void cluster_reserved2free(int count)
-+{
-+ reiser4_context *ctx;
-+ reiser4_super_info_data *sbinfo;
-+
-+ ctx = get_current_context();
-+ sbinfo = get_super_private(ctx->super);
-+
-+ cluster_reserved2grabbed(count);
-+ grabbed2free(ctx, sbinfo, count);
-+}
-+
-+static DEFINE_SPINLOCK(fake_lock);
-+static reiser4_block_nr fake_gen = 0;
-+
-+/**
-+ * assign_fake_blocknr
-+ * @blocknr:
-+ * @count:
-+ *
-+ * Obtain a fake block number for new node which will be used to refer to
-+ * this newly allocated node until real allocation is done.
-+ */
-+static void assign_fake_blocknr(reiser4_block_nr *blocknr, int count)
-+{
-+ spin_lock(&fake_lock);
-+ *blocknr = fake_gen;
-+ fake_gen += count;
-+ spin_unlock(&fake_lock);
-+
-+ BUG_ON(*blocknr & REISER4_BLOCKNR_STATUS_BIT_MASK);
-+ /**blocknr &= ~REISER4_BLOCKNR_STATUS_BIT_MASK;*/
-+ *blocknr |= REISER4_UNALLOCATED_STATUS_VALUE;
-+ assert("zam-394", zlook(current_tree, blocknr) == NULL);
-+}
-+
-+int assign_fake_blocknr_formatted(reiser4_block_nr * blocknr)
-+{
-+ assign_fake_blocknr(blocknr, 1);
-+ grabbed2fake_allocated_formatted();
-+ return 0;
-+}
-+
-+/**
-+ * fake_blocknrs_unformatted
-+ * @count: number of fake numbers to get
-+ *
-+ * Allocates @count fake block numbers which will be assigned to jnodes
-+ */
-+reiser4_block_nr fake_blocknr_unformatted(int count)
-+{
-+ reiser4_block_nr blocknr;
-+
-+ assign_fake_blocknr(&blocknr, count);
-+ grabbed2fake_allocated_unformatted(count);
-+
-+ return blocknr;
-+}
-+
-+/* adjust sb block counters, if real (on-disk) block allocation immediately
-+ follows grabbing of free disk space. */
-+static void grabbed2used(reiser4_context *ctx, reiser4_super_info_data *sbinfo,
-+ __u64 count)
-+{
-+ sub_from_ctx_grabbed(ctx, count);
-+
-+ spin_lock_reiser4_super(sbinfo);
-+
-+ sub_from_sb_grabbed(sbinfo, count);
-+ sbinfo->blocks_used += count;
-+
-+ assert("nikita-2679", reiser4_check_block_counters(ctx->super));
-+
-+ spin_unlock_reiser4_super(sbinfo);
-+}
-+
-+/* adjust sb block counters when @count unallocated blocks get mapped to disk */
-+static void fake_allocated2used(reiser4_super_info_data *sbinfo, __u64 count,
-+ reiser4_ba_flags_t flags)
-+{
-+ spin_lock_reiser4_super(sbinfo);
-+
-+ sub_from_sb_fake_allocated(sbinfo, count, flags);
-+ sbinfo->blocks_used += count;
-+
-+ assert("nikita-2680",
-+ reiser4_check_block_counters(reiser4_get_current_sb()));
-+
-+ spin_unlock_reiser4_super(sbinfo);
-+}
-+
-+static void flush_reserved2used(txn_atom * atom, __u64 count)
-+{
-+ reiser4_super_info_data *sbinfo;
-+
-+ assert("zam-787", atom != NULL);
-+ assert_spin_locked(&(atom->alock));
-+
-+ sub_from_atom_flush_reserved_nolock(atom, (__u32) count);
-+
-+ sbinfo = get_current_super_private();
-+ spin_lock_reiser4_super(sbinfo);
-+
-+ sub_from_sb_flush_reserved(sbinfo, count);
-+ sbinfo->blocks_used += count;
-+
-+ assert("zam-789",
-+ reiser4_check_block_counters(reiser4_get_current_sb()));
-+
-+ spin_unlock_reiser4_super(sbinfo);
-+}
-+
-+/* update the per fs blocknr hint default value. */
-+void
-+update_blocknr_hint_default(const struct super_block *s,
-+ const reiser4_block_nr * block)
-+{
-+ reiser4_super_info_data *sbinfo = get_super_private(s);
-+
-+ assert("nikita-3342", !reiser4_blocknr_is_fake(block));
-+
-+ spin_lock_reiser4_super(sbinfo);
-+ if (*block < sbinfo->block_count) {
-+ sbinfo->blocknr_hint_default = *block;
-+ } else {
-+ warning("zam-676",
-+ "block number %llu is too large to be used in a blocknr hint\n",
-+ (unsigned long long)*block);
-+ dump_stack();
-+ DEBUGON(1);
-+ }
-+ spin_unlock_reiser4_super(sbinfo);
-+}
-+
-+/* get current value of the default blocknr hint. */
-+void get_blocknr_hint_default(reiser4_block_nr * result)
-+{
-+ reiser4_super_info_data *sbinfo = get_current_super_private();
-+
-+ spin_lock_reiser4_super(sbinfo);
-+ *result = sbinfo->blocknr_hint_default;
-+ assert("zam-677", *result < sbinfo->block_count);
-+ spin_unlock_reiser4_super(sbinfo);
-+}
-+
-+/* Allocate "real" disk blocks by calling a proper space allocation plugin
-+ * method. Blocks are allocated in one contiguous disk region. The plugin
-+ * independent part accounts blocks by subtracting allocated amount from grabbed
-+ * or fake block counter and add the same amount to the counter of allocated
-+ * blocks.
-+ *
-+ * @hint -- a reiser4 blocknr hint object which contains further block
-+ * allocation hints and parameters (search start, a stage of block
-+ * which will be mapped to disk, etc.),
-+ * @blk -- an out parameter for the beginning of the allocated region,
-+ * @len -- in/out parameter, it should contain the maximum number of allocated
-+ * blocks, after block allocation completes, it contains the length of
-+ * allocated disk region.
-+ * @flags -- see reiser4_ba_flags_t description.
-+ *
-+ * @return -- 0 if success, error code otherwise.
-+ */
-+int
-+reiser4_alloc_blocks(reiser4_blocknr_hint * hint, reiser4_block_nr * blk,
-+ reiser4_block_nr * len, reiser4_ba_flags_t flags)
-+{
-+ __u64 needed = *len;
-+ reiser4_context *ctx;
-+ reiser4_super_info_data *sbinfo;
-+ int ret;
-+
-+ assert("zam-986", hint != NULL);
-+
-+ ctx = get_current_context();
-+ sbinfo = get_super_private(ctx->super);
-+
-+ /* For write-optimized data we use default search start value, which is
-+ * close to last write location. */
-+ if (flags & BA_USE_DEFAULT_SEARCH_START)
-+ get_blocknr_hint_default(&hint->blk);
-+
-+ /* VITALY: allocator should grab this for internal/tx-lists/similar
-+ only. */
-+/* VS-FIXME-HANS: why is this comment above addressed to vitaly (from vitaly)?*/
-+ if (hint->block_stage == BLOCK_NOT_COUNTED) {
-+ ret = reiser4_grab_space_force(*len, flags);
-+ if (ret != 0)
-+ return ret;
-+ }
-+
-+ ret =
-+ sa_alloc_blocks(reiser4_get_space_allocator(ctx->super),
-+ hint, (int)needed, blk, len);
-+
-+ if (!ret) {
-+ assert("zam-680", *blk < reiser4_block_count(ctx->super));
-+ assert("zam-681",
-+ *blk + *len <= reiser4_block_count(ctx->super));
-+
-+ if (flags & BA_PERMANENT) {
-+ /* we assume that current atom exists at this moment */
-+ txn_atom *atom = get_current_atom_locked();
-+ atom->nr_blocks_allocated += *len;
-+ spin_unlock_atom(atom);
-+ }
-+
-+ switch (hint->block_stage) {
-+ case BLOCK_NOT_COUNTED:
-+ case BLOCK_GRABBED:
-+ grabbed2used(ctx, sbinfo, *len);
-+ break;
-+ case BLOCK_UNALLOCATED:
-+ fake_allocated2used(sbinfo, *len, flags);
-+ break;
-+ case BLOCK_FLUSH_RESERVED:
-+ {
-+ txn_atom *atom = get_current_atom_locked();
-+ flush_reserved2used(atom, *len);
-+ spin_unlock_atom(atom);
-+ }
-+ break;
-+ default:
-+ impossible("zam-531", "wrong block stage");
-+ }
-+ } else {
-+ assert("zam-821",
-+ ergo(hint->max_dist == 0
-+ && !hint->backward, ret != -ENOSPC));
-+ if (hint->block_stage == BLOCK_NOT_COUNTED)
-+ grabbed2free(ctx, sbinfo, needed);
-+ }
-+
-+ return ret;
-+}
-+
-+/* used -> fake_allocated -> grabbed -> free */
-+
-+/* adjust sb block counters when @count unallocated blocks get unmapped from
-+ disk */
-+static void
-+used2fake_allocated(reiser4_super_info_data * sbinfo, __u64 count,
-+ int formatted)
-+{
-+ spin_lock_reiser4_super(sbinfo);
-+
-+ if (formatted)
-+ sbinfo->blocks_fake_allocated += count;
-+ else
-+ sbinfo->blocks_fake_allocated_unformatted += count;
-+
-+ sub_from_sb_used(sbinfo, count);
-+
-+ assert("nikita-2681",
-+ reiser4_check_block_counters(reiser4_get_current_sb()));
-+
-+ spin_unlock_reiser4_super(sbinfo);
-+}
-+
-+static void
-+used2flush_reserved(reiser4_super_info_data * sbinfo, txn_atom * atom,
-+ __u64 count, reiser4_ba_flags_t flags UNUSED_ARG)
-+{
-+ assert("nikita-2791", atom != NULL);
-+ assert_spin_locked(&(atom->alock));
-+
-+ add_to_atom_flush_reserved_nolock(atom, (__u32) count);
-+
-+ spin_lock_reiser4_super(sbinfo);
-+
-+ sbinfo->blocks_flush_reserved += count;
-+ /*add_to_sb_flush_reserved(sbinfo, count); */
-+ sub_from_sb_used(sbinfo, count);
-+
-+ assert("nikita-2681",
-+ reiser4_check_block_counters(reiser4_get_current_sb()));
-+
-+ spin_unlock_reiser4_super(sbinfo);
-+}
-+
-+/* disk space, virtually used by fake block numbers is counted as "grabbed"
-+ again. */
-+static void
-+fake_allocated2grabbed(reiser4_context * ctx, reiser4_super_info_data * sbinfo,
-+ __u64 count, reiser4_ba_flags_t flags)
-+{
-+ add_to_ctx_grabbed(ctx, count);
-+
-+ spin_lock_reiser4_super(sbinfo);
-+
-+ assert("nikita-2682", reiser4_check_block_counters(ctx->super));
-+
-+ sbinfo->blocks_grabbed += count;
-+ sub_from_sb_fake_allocated(sbinfo, count, flags & BA_FORMATTED);
-+
-+ assert("nikita-2683", reiser4_check_block_counters(ctx->super));
-+
-+ spin_unlock_reiser4_super(sbinfo);
-+}
-+
-+void fake_allocated2free(__u64 count, reiser4_ba_flags_t flags)
-+{
-+ reiser4_context *ctx;
-+ reiser4_super_info_data *sbinfo;
-+
-+ ctx = get_current_context();
-+ sbinfo = get_super_private(ctx->super);
-+
-+ fake_allocated2grabbed(ctx, sbinfo, count, flags);
-+ grabbed2free(ctx, sbinfo, count);
-+}
-+
-+void grabbed2free_mark(__u64 mark)
-+{
-+ reiser4_context *ctx;
-+ reiser4_super_info_data *sbinfo;
-+
-+ ctx = get_current_context();
-+ sbinfo = get_super_private(ctx->super);
-+
-+ assert("nikita-3007", (__s64) mark >= 0);
-+ assert("nikita-3006", ctx->grabbed_blocks >= mark);
-+ grabbed2free(ctx, sbinfo, ctx->grabbed_blocks - mark);
-+}
-+
-+/**
-+ * grabbed2free - adjust grabbed and free block counters
-+ * @ctx: context to update grabbed block counter of
-+ * @sbinfo: super block to update grabbed and free block counters of
-+ * @count: number of blocks to adjust counters by
-+ *
-+ * Decreases context's and per filesystem's counters of grabbed
-+ * blocks. Increases per filesystem's counter of free blocks.
-+ */
-+void grabbed2free(reiser4_context *ctx, reiser4_super_info_data *sbinfo,
-+ __u64 count)
-+{
-+ sub_from_ctx_grabbed(ctx, count);
-+
-+ spin_lock_reiser4_super(sbinfo);
-+
-+ sub_from_sb_grabbed(sbinfo, count);
-+ sbinfo->blocks_free += count;
-+ assert("nikita-2684", reiser4_check_block_counters(ctx->super));
-+
-+ spin_unlock_reiser4_super(sbinfo);
-+}
-+
-+void grabbed2flush_reserved_nolock(txn_atom * atom, __u64 count)
-+{
-+ reiser4_context *ctx;
-+ reiser4_super_info_data *sbinfo;
-+
-+ assert("vs-1095", atom);
-+
-+ ctx = get_current_context();
-+ sbinfo = get_super_private(ctx->super);
-+
-+ sub_from_ctx_grabbed(ctx, count);
-+
-+ add_to_atom_flush_reserved_nolock(atom, count);
-+
-+ spin_lock_reiser4_super(sbinfo);
-+
-+ sbinfo->blocks_flush_reserved += count;
-+ sub_from_sb_grabbed(sbinfo, count);
-+
-+ assert("vpf-292", reiser4_check_block_counters(ctx->super));
-+
-+ spin_unlock_reiser4_super(sbinfo);
-+}
-+
-+void grabbed2flush_reserved(__u64 count)
-+{
-+ txn_atom *atom = get_current_atom_locked();
-+
-+ grabbed2flush_reserved_nolock(atom, count);
-+
-+ spin_unlock_atom(atom);
-+}
-+
-+void flush_reserved2grabbed(txn_atom * atom, __u64 count)
-+{
-+ reiser4_context *ctx;
-+ reiser4_super_info_data *sbinfo;
-+
-+ assert("nikita-2788", atom != NULL);
-+ assert_spin_locked(&(atom->alock));
-+
-+ ctx = get_current_context();
-+ sbinfo = get_super_private(ctx->super);
-+
-+ add_to_ctx_grabbed(ctx, count);
-+
-+ sub_from_atom_flush_reserved_nolock(atom, (__u32) count);
-+
-+ spin_lock_reiser4_super(sbinfo);
-+
-+ sbinfo->blocks_grabbed += count;
-+ sub_from_sb_flush_reserved(sbinfo, count);
-+
-+ assert("vpf-292", reiser4_check_block_counters(ctx->super));
-+
-+ spin_unlock_reiser4_super(sbinfo);
-+}
-+
-+/**
-+ * all_grabbed2free - releases all blocks grabbed in context
-+ *
-+ * Decreases context's and super block's grabbed block counters by number of
-+ * blocks grabbed by current context and increases super block's free block
-+ * counter correspondingly.
-+ */
-+void all_grabbed2free(void)
-+{
-+ reiser4_context *ctx = get_current_context();
-+
-+ grabbed2free(ctx, get_super_private(ctx->super), ctx->grabbed_blocks);
-+}
-+
-+/* adjust sb block counters if real (on-disk) blocks do not become unallocated
-+ after freeing, @count blocks become "grabbed". */
-+static void
-+used2grabbed(reiser4_context * ctx, reiser4_super_info_data * sbinfo,
-+ __u64 count)
-+{
-+ add_to_ctx_grabbed(ctx, count);
-+
-+ spin_lock_reiser4_super(sbinfo);
-+
-+ sbinfo->blocks_grabbed += count;
-+ sub_from_sb_used(sbinfo, count);
-+
-+ assert("nikita-2685", reiser4_check_block_counters(ctx->super));
-+
-+ spin_unlock_reiser4_super(sbinfo);
-+}
-+
-+/* this used to be done through used2grabbed and grabbed2free*/
-+static void used2free(reiser4_super_info_data * sbinfo, __u64 count)
-+{
-+ spin_lock_reiser4_super(sbinfo);
-+
-+ sbinfo->blocks_free += count;
-+ sub_from_sb_used(sbinfo, count);
-+
-+ assert("nikita-2685",
-+ reiser4_check_block_counters(reiser4_get_current_sb()));
-+
-+ spin_unlock_reiser4_super(sbinfo);
-+}
-+
-+#if REISER4_DEBUG
-+
-+/* check "allocated" state of given block range */
-+static void
-+reiser4_check_blocks(const reiser4_block_nr * start,
-+ const reiser4_block_nr * len, int desired)
-+{
-+ sa_check_blocks(start, len, desired);
-+}
-+
-+/* check "allocated" state of given block */
-+void reiser4_check_block(const reiser4_block_nr * block, int desired)
-+{
-+ const reiser4_block_nr one = 1;
-+
-+ reiser4_check_blocks(block, &one, desired);
-+}
-+
-+#endif
-+
-+/* Blocks deallocation function may do an actual deallocation through space
-+ plugin allocation or store deleted block numbers in atom's delete_set data
-+ structure depend on @defer parameter. */
-+
-+/* if BA_DEFER bit is not turned on, @target_stage means the stage of blocks
-+ which will be deleted from WORKING bitmap. They might be just unmapped from
-+ disk, or freed but disk space is still grabbed by current thread, or these
-+ blocks must not be counted in any reiser4 sb block counters,
-+ see block_stage_t comment */
-+
-+/* BA_FORMATTED bit is only used when BA_DEFER in not present: it is used to
-+ distinguish blocks allocated for unformatted and formatted nodes */
-+
-+int
-+reiser4_dealloc_blocks(const reiser4_block_nr * start,
-+ const reiser4_block_nr * len,
-+ block_stage_t target_stage, reiser4_ba_flags_t flags)
-+{
-+ txn_atom *atom = NULL;
-+ int ret;
-+ reiser4_context *ctx;
-+ reiser4_super_info_data *sbinfo;
-+
-+ ctx = get_current_context();
-+ sbinfo = get_super_private(ctx->super);
-+
-+ if (REISER4_DEBUG) {
-+ assert("zam-431", *len != 0);
-+ assert("zam-432", *start != 0);
-+ assert("zam-558", !reiser4_blocknr_is_fake(start));
-+
-+ spin_lock_reiser4_super(sbinfo);
-+ assert("zam-562", *start < sbinfo->block_count);
-+ spin_unlock_reiser4_super(sbinfo);
-+ }
-+
-+ if (flags & BA_DEFER) {
-+ blocknr_set_entry *bsep = NULL;
-+
-+ /* storing deleted block numbers in a blocknr set
-+ datastructure for further actual deletion */
-+ do {
-+ atom = get_current_atom_locked();
-+ assert("zam-430", atom != NULL);
-+
-+ ret =
-+ blocknr_set_add_extent(atom, &atom->delete_set,
-+ &bsep, start, len);
-+
-+ if (ret == -ENOMEM)
-+ return ret;
-+
-+ /* This loop might spin at most two times */
-+ } while (ret == -E_REPEAT);
-+
-+ assert("zam-477", ret == 0);
-+ assert("zam-433", atom != NULL);
-+
-+ spin_unlock_atom(atom);
-+
-+ } else {
-+ assert("zam-425", get_current_super_private() != NULL);
-+ sa_dealloc_blocks(reiser4_get_space_allocator(ctx->super),
-+ *start, *len);
-+
-+ if (flags & BA_PERMANENT) {
-+ /* These blocks were counted as allocated, we have to
-+ * revert it back if allocation is discarded. */
-+ txn_atom *atom = get_current_atom_locked();
-+ atom->nr_blocks_allocated -= *len;
-+ spin_unlock_atom(atom);
-+ }
-+
-+ switch (target_stage) {
-+ case BLOCK_NOT_COUNTED:
-+ assert("vs-960", flags & BA_FORMATTED);
-+ /* VITALY: This is what was grabbed for
-+ internal/tx-lists/similar only */
-+ used2free(sbinfo, *len);
-+ break;
-+
-+ case BLOCK_GRABBED:
-+ used2grabbed(ctx, sbinfo, *len);
-+ break;
-+
-+ case BLOCK_UNALLOCATED:
-+ used2fake_allocated(sbinfo, *len, flags & BA_FORMATTED);
-+ break;
-+
-+ case BLOCK_FLUSH_RESERVED:{
-+ txn_atom *atom;
-+
-+ atom = get_current_atom_locked();
-+ used2flush_reserved(sbinfo, atom, *len,
-+ flags & BA_FORMATTED);
-+ spin_unlock_atom(atom);
-+ break;
-+ }
-+ default:
-+ impossible("zam-532", "wrong block stage");
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+/* wrappers for block allocator plugin methods */
-+int reiser4_pre_commit_hook(void)
-+{
-+ assert("zam-502", get_current_super_private() != NULL);
-+ sa_pre_commit_hook();
-+ return 0;
-+}
-+
-+/* an actor which applies delete set to block allocator data */
-+static int
-+apply_dset(txn_atom * atom UNUSED_ARG, const reiser4_block_nr * a,
-+ const reiser4_block_nr * b, void *data UNUSED_ARG)
-+{
-+ reiser4_context *ctx;
-+ reiser4_super_info_data *sbinfo;
-+
-+ __u64 len = 1;
-+
-+ ctx = get_current_context();
-+ sbinfo = get_super_private(ctx->super);
-+
-+ assert("zam-877", atom->stage >= ASTAGE_PRE_COMMIT);
-+ assert("zam-552", sbinfo != NULL);
-+
-+ if (b != NULL)
-+ len = *b;
-+
-+ if (REISER4_DEBUG) {
-+ spin_lock_reiser4_super(sbinfo);
-+
-+ assert("zam-554", *a < reiser4_block_count(ctx->super));
-+ assert("zam-555", *a + len <= reiser4_block_count(ctx->super));
-+
-+ spin_unlock_reiser4_super(sbinfo);
-+ }
-+
-+ sa_dealloc_blocks(&sbinfo->space_allocator, *a, len);
-+ /* adjust sb block counters */
-+ used2free(sbinfo, len);
-+ return 0;
-+}
-+
-+void reiser4_post_commit_hook(void)
-+{
-+ txn_atom *atom;
-+
-+ atom = get_current_atom_locked();
-+ assert("zam-452", atom->stage == ASTAGE_POST_COMMIT);
-+ spin_unlock_atom(atom);
-+
-+ /* do the block deallocation which was deferred
-+ until commit is done */
-+ blocknr_set_iterator(atom, &atom->delete_set, apply_dset, NULL, 1);
-+
-+ assert("zam-504", get_current_super_private() != NULL);
-+ sa_post_commit_hook();
-+}
-+
-+void reiser4_post_write_back_hook(void)
-+{
-+ assert("zam-504", get_current_super_private() != NULL);
-+
-+ sa_post_commit_hook();
-+}
-+
-+/*
-+ Local variables:
-+ c-indentation-style: "K&R"
-+ mode-name: "LC"
-+ c-basic-offset: 8
-+ tab-width: 8
-+ fill-column: 120
-+ scroll-step: 1
-+ End:
-+*/
-diff -urN linux-2.6.35.orig/fs/reiser4/block_alloc.h linux-2.6.35/fs/reiser4/block_alloc.h
---- linux-2.6.35.orig/fs/reiser4/block_alloc.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.35/fs/reiser4/block_alloc.h 2010-08-04 15:44:57.000000000 +0200
-@@ -0,0 +1,177 @@
-+/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */
-+
-+#if !defined(__FS_REISER4_BLOCK_ALLOC_H__)
-+#define __FS_REISER4_BLOCK_ALLOC_H__
-+
-+#include "dformat.h"
-+#include "forward.h"
-+
-+#include <linux/types.h> /* for __u?? */
-+#include <linux/fs.h>
-+
-+/* Mask when is applied to given block number shows is that block number is a
-+ fake one */
-+#define REISER4_FAKE_BLOCKNR_BIT_MASK 0x8000000000000000ULL
-+/* Mask which isolates a type of object this fake block number was assigned
-+ to */
-+#define REISER4_BLOCKNR_STATUS_BIT_MASK 0xC000000000000000ULL
-+
-+/*result after applying the REISER4_BLOCKNR_STATUS_BIT_MASK should be compared
-+ against these two values to understand is the object unallocated or bitmap
-+ shadow object (WORKING BITMAP block, look at the plugin/space/bitmap.c) */
-+#define REISER4_UNALLOCATED_STATUS_VALUE 0xC000000000000000ULL
-+#define REISER4_BITMAP_BLOCKS_STATUS_VALUE 0x8000000000000000ULL
-+
-+/* specification how block allocation was counted in sb block counters */
-+typedef enum {
-+ BLOCK_NOT_COUNTED = 0, /* reiser4 has no info about this block yet */
-+ BLOCK_GRABBED = 1, /* free space grabbed for further allocation
-+ of this block */
-+ BLOCK_FLUSH_RESERVED = 2, /* block is reserved for flush needs. */
-+ BLOCK_UNALLOCATED = 3, /* block is used for existing in-memory object
-+ ( unallocated formatted or unformatted
-+ node) */
-+ BLOCK_ALLOCATED = 4 /* block is mapped to disk, real on-disk block
-+ number assigned */
-+} block_stage_t;
-+
-+/* a hint for block allocator */
-+struct reiser4_blocknr_hint {
-+ /* FIXME: I think we want to add a longterm lock on the bitmap block
-+ here. This is to prevent jnode_flush() calls from interleaving
-+ allocations on the same bitmap, once a hint is established. */
-+
-+ /* search start hint */
-+ reiser4_block_nr blk;
-+ /* if not zero, it is a region size we search for free blocks in */
-+ reiser4_block_nr max_dist;
-+ /* level for allocation, may be useful have branch-level and higher
-+ write-optimized. */
-+ tree_level level;
-+ /* block allocator assumes that blocks, which will be mapped to disk,
-+ are in this specified block_stage */
-+ block_stage_t block_stage;
-+ /* If direction = 1 allocate blocks in backward direction from the end
-+ * of disk to the beginning of disk. */
-+ unsigned int backward:1;
-+
-+};
-+
-+/* These flags control block allocation/deallocation behavior */
-+enum reiser4_ba_flags {
-+ /* do allocatations from reserved (5%) area */
-+ BA_RESERVED = (1 << 0),
-+
-+ /* block allocator can do commit trying to recover free space */
-+ BA_CAN_COMMIT = (1 << 1),
-+
-+ /* if operation will be applied to formatted block */
-+ BA_FORMATTED = (1 << 2),
-+
-+ /* defer actual block freeing until transaction commit */
-+ BA_DEFER = (1 << 3),
-+
-+ /* allocate blocks for permanent fs objects (formatted or unformatted),
-+ not wandered of log blocks */
-+ BA_PERMANENT = (1 << 4),
-+
-+ /* grab space even it was disabled */
-+ BA_FORCE = (1 << 5),
-+
-+ /* use default start value for free blocks search. */
-+ BA_USE_DEFAULT_SEARCH_START = (1 << 6)
-+};
-+
-+typedef enum reiser4_ba_flags reiser4_ba_flags_t;
-+
-+extern void reiser4_blocknr_hint_init(reiser4_blocknr_hint * hint);
-+extern void reiser4_blocknr_hint_done(reiser4_blocknr_hint * hint);
-+extern void update_blocknr_hint_default(const struct super_block *,
-+ const reiser4_block_nr *);
-+extern void get_blocknr_hint_default(reiser4_block_nr *);
-+
-+extern reiser4_block_nr reiser4_fs_reserved_space(struct super_block *super);
-+
-+int assign_fake_blocknr_formatted(reiser4_block_nr *);
-+reiser4_block_nr fake_blocknr_unformatted(int);
-+
-+/* free -> grabbed -> fake_allocated -> used */
-+
-+int reiser4_grab_space(__u64 count, reiser4_ba_flags_t flags);
-+void all_grabbed2free(void);
-+void grabbed2free(reiser4_context * , reiser4_super_info_data * , __u64 count);
-+void fake_allocated2free(__u64 count, reiser4_ba_flags_t flags);
-+void grabbed2flush_reserved_nolock(txn_atom * atom, __u64 count);
-+void grabbed2flush_reserved(__u64 count);
-+int reiser4_alloc_blocks(reiser4_blocknr_hint * hint,
-+ reiser4_block_nr * start,
-+ reiser4_block_nr * len, reiser4_ba_flags_t flags);
-+int reiser4_dealloc_blocks(const reiser4_block_nr *,
-+ const reiser4_block_nr *,
-+ block_stage_t, reiser4_ba_flags_t flags);
-+
-+static inline int reiser4_alloc_block(reiser4_blocknr_hint * hint,
-+ reiser4_block_nr * start,
-+ reiser4_ba_flags_t flags)
-+{
-+ reiser4_block_nr one = 1;
-+ return reiser4_alloc_blocks(hint, start, &one, flags);
-+}
-+
-+static inline int reiser4_dealloc_block(const reiser4_block_nr * block,
-+ block_stage_t stage,
-+ reiser4_ba_flags_t flags)
-+{
-+ const reiser4_block_nr one = 1;
-+ return reiser4_dealloc_blocks(block, &one, stage, flags);
-+}
-+
-+#define reiser4_grab_space_force(count, flags) \
-+ reiser4_grab_space(count, flags | BA_FORCE)
-+
-+extern void grabbed2free_mark(__u64 mark);
-+extern int reiser4_grab_reserved(struct super_block *,
-+ __u64, reiser4_ba_flags_t);
-+extern void reiser4_release_reserved(struct super_block *super);
-+
-+/* grabbed -> fake_allocated */
-+
-+/* fake_allocated -> used */
-+
-+/* used -> fake_allocated -> grabbed -> free */
-+
-+extern void flush_reserved2grabbed(txn_atom * atom, __u64 count);
-+
-+extern int reiser4_blocknr_is_fake(const reiser4_block_nr * da);
-+
-+extern void grabbed2cluster_reserved(int count);
-+extern void cluster_reserved2grabbed(int count);
-+extern void cluster_reserved2free(int count);
-+
-+extern int reiser4_check_block_counters(const struct super_block *);
-+
-+#if REISER4_DEBUG
-+
-+extern void reiser4_check_block(const reiser4_block_nr *, int);
-+
-+#else
-+
-+# define reiser4_check_block(beg, val) noop
-+
-+#endif
-+
-+extern int reiser4_pre_commit_hook(void);
-+extern void reiser4_post_commit_hook(void);
-+extern void reiser4_post_write_back_hook(void);
-+
-+#endif /* __FS_REISER4_BLOCK_ALLOC_H__ */
-+
-+/* Make Linus happy.
-+ Local variables:
-+ c-indentation-style: "K&R"
-+ mode-name: "LC"
-+ c-basic-offset: 8
-+ tab-width: 8
-+ fill-column: 120
-+ End:
-+*/
-diff -urN linux-2.6.35.orig/fs/reiser4/blocknrset.c linux-2.6.35/fs/reiser4/blocknrset.c
---- linux-2.6.35.orig/fs/reiser4/blocknrset.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.35/fs/reiser4/blocknrset.c 2010-08-04 15:44:57.000000000 +0200
-@@ -0,0 +1,371 @@
-+/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by
-+reiser4/README */
-+
-+/* This file contains code for various block number sets used by the atom to
-+ track the deleted set and wandered block mappings. */
-+
-+#include "debug.h"
-+#include "dformat.h"
-+#include "txnmgr.h"
-+#include "context.h"
-+
-+#include <linux/slab.h>
-+
-+/* The proposed data structure for storing unordered block number sets is a
-+ list of elements, each of which contains an array of block number or/and
-+ array of block number pairs. That element called blocknr_set_entry is used
-+ to store block numbers from the beginning and for extents from the end of
-+ the data field (char data[...]). The ->nr_blocks and ->nr_pairs fields
-+ count numbers of blocks and extents.
-+
-+ +------------------- blocknr_set_entry->data ------------------+
-+ |block1|block2| ... <free space> ... |pair3|pair2|pair1|
-+ +------------------------------------------------------------+
-+
-+ When current blocknr_set_entry is full, allocate a new one. */
-+
-+/* Usage examples: blocknr sets are used in reiser4 for storing atom's delete
-+ * set (single blocks and block extents), in that case blocknr pair represent an
-+ * extent; atom's wandered map is also stored as a blocknr set, blocknr pairs
-+ * there represent a (real block) -> (wandered block) mapping. */
-+
-+/* Protection: blocknr sets belong to reiser4 atom, and
-+ * their modifications are performed with the atom lock held */
-+
-+/* The total size of a blocknr_set_entry. */
-+#define BLOCKNR_SET_ENTRY_SIZE 128
-+
-+/* The number of blocks that can fit the blocknr data area. */
-+#define BLOCKNR_SET_ENTRIES_NUMBER \
-+ ((BLOCKNR_SET_ENTRY_SIZE - \
-+ 2 * sizeof(unsigned) - \
-+ sizeof(struct list_head)) / \
-+ sizeof(reiser4_block_nr))
-+
-+/* An entry of the blocknr_set */
-+struct blocknr_set_entry {
-+ unsigned nr_singles;
-+ unsigned nr_pairs;
-+ struct list_head link;
-+ reiser4_block_nr entries[BLOCKNR_SET_ENTRIES_NUMBER];
-+};
-+
-+/* A pair of blocks as recorded in the blocknr_set_entry data. */
-+struct blocknr_pair {
-+ reiser4_block_nr a;
-+ reiser4_block_nr b;
-+};
-+
-+/* Return the number of blocknr slots available in a blocknr_set_entry. */
-+/* Audited by: green(2002.06.11) */
-+static unsigned bse_avail(blocknr_set_entry * bse)
-+{
-+ unsigned used = bse->nr_singles + 2 * bse->nr_pairs;
-+
-+ assert("jmacd-5088", BLOCKNR_SET_ENTRIES_NUMBER >= used);
-+ cassert(sizeof(blocknr_set_entry) == BLOCKNR_SET_ENTRY_SIZE);
-+
-+ return BLOCKNR_SET_ENTRIES_NUMBER - used;
-+}
-+
-+/* Initialize a blocknr_set_entry. */
-+static void bse_init(blocknr_set_entry *bse)
-+{
-+ bse->nr_singles = 0;
-+ bse->nr_pairs = 0;
-+ INIT_LIST_HEAD(&bse->link);
-+}
-+
-+/* Allocate and initialize a blocknr_set_entry. */
-+/* Audited by: green(2002.06.11) */
-+static blocknr_set_entry *bse_alloc(void)
-+{
-+ blocknr_set_entry *e;
-+
-+ if ((e = (blocknr_set_entry *) kmalloc(sizeof(blocknr_set_entry),
-+ reiser4_ctx_gfp_mask_get())) == NULL)
-+ return NULL;
-+
-+ bse_init(e);
-+
-+ return e;
-+}
-+
-+/* Free a blocknr_set_entry. */
-+/* Audited by: green(2002.06.11) */
-+static void bse_free(blocknr_set_entry * bse)
-+{
-+ kfree(bse);
-+}
-+
-+/* Add a block number to a blocknr_set_entry */
-+/* Audited by: green(2002.06.11) */
-+static void
-+bse_put_single(blocknr_set_entry * bse, const reiser4_block_nr * block)
-+{
-+ assert("jmacd-5099", bse_avail(bse) >= 1);
-+
-+ bse->entries[bse->nr_singles++] = *block;
-+}
-+
-+/* Get a pair of block numbers */
-+/* Audited by: green(2002.06.11) */
-+static inline struct blocknr_pair *bse_get_pair(blocknr_set_entry * bse,
-+ unsigned pno)
-+{
-+ assert("green-1", BLOCKNR_SET_ENTRIES_NUMBER >= 2 * (pno + 1));
-+
-+ return (struct blocknr_pair *) (bse->entries +
-+ BLOCKNR_SET_ENTRIES_NUMBER -
-+ 2 * (pno + 1));
-+}
-+
-+/* Add a pair of block numbers to a blocknr_set_entry */
-+/* Audited by: green(2002.06.11) */
-+static void
-+bse_put_pair(blocknr_set_entry * bse, const reiser4_block_nr * a,
-+ const reiser4_block_nr * b)
-+{
-+ struct blocknr_pair *pair;
-+
-+ assert("jmacd-5100", bse_avail(bse) >= 2 && a != NULL && b != NULL);
-+
-+ pair = bse_get_pair(bse, bse->nr_pairs++);
-+
-+ pair->a = *a;
-+ pair->b = *b;
-+}
-+
-+/* Add either a block or pair of blocks to the block number set. The first
-+ blocknr (@a) must be non-NULL. If @b is NULL a single blocknr is added, if
-+ @b is non-NULL a pair is added. The block number set belongs to atom, and
-+ the call is made with the atom lock held. There may not be enough space in
-+ the current blocknr_set_entry. If new_bsep points to a non-NULL
-+ blocknr_set_entry then it will be added to the blocknr_set and new_bsep
-+ will be set to NULL. If new_bsep contains NULL then the atom lock will be
-+ released and a new bse will be allocated in new_bsep. E_REPEAT will be
-+ returned with the atom unlocked for the operation to be tried again. If
-+ the operation succeeds, 0 is returned. If new_bsep is non-NULL and not
-+ used during the call, it will be freed automatically. */
-+static int blocknr_set_add(txn_atom *atom, struct list_head *bset,
-+ blocknr_set_entry **new_bsep, const reiser4_block_nr *a,
-+ const reiser4_block_nr *b)
-+{
-+ blocknr_set_entry *bse;
-+ unsigned entries_needed;
-+
-+ assert("jmacd-5101", a != NULL);
-+
-+ entries_needed = (b == NULL) ? 1 : 2;
-+ if (list_empty(bset) ||
-+ bse_avail(list_entry(bset->next, blocknr_set_entry, link)) < entries_needed) {
-+ /* See if a bse was previously allocated. */
-+ if (*new_bsep == NULL) {
-+ spin_unlock_atom(atom);
-+ *new_bsep = bse_alloc();
-+ return (*new_bsep != NULL) ? -E_REPEAT :
-+ RETERR(-ENOMEM);
-+ }
-+
-+ /* Put it on the head of the list. */
-+ list_add(&((*new_bsep)->link), bset);
-+
-+ *new_bsep = NULL;
-+ }
-+
-+ /* Add the single or pair. */
-+ bse = list_entry(bset->next, blocknr_set_entry, link);
-+ if (b == NULL) {
-+ bse_put_single(bse, a);
-+ } else {
-+ bse_put_pair(bse, a, b);
-+ }
-+
-+ /* If new_bsep is non-NULL then there was an allocation race, free this
-+ copy. */
-+ if (*new_bsep != NULL) {
-+ bse_free(*new_bsep);
-+ *new_bsep = NULL;
-+ }
-+
-+ return 0;
-+}
-+
-+/* Add an extent to the block set. If the length is 1, it is treated as a
-+ single block (e.g., reiser4_set_add_block). */
-+/* Audited by: green(2002.06.11) */
-+/* Auditor note: Entire call chain cannot hold any spinlocks, because
-+ kmalloc might schedule. The only exception is atom spinlock, which is
-+ properly freed. */
-+int
-+blocknr_set_add_extent(txn_atom * atom,
-+ struct list_head *bset,
-+ blocknr_set_entry ** new_bsep,
-+ const reiser4_block_nr * start,
-+ const reiser4_block_nr * len)
-+{
-+ assert("jmacd-5102", start != NULL && len != NULL && *len > 0);
-+ return blocknr_set_add(atom, bset, new_bsep, start,
-+ *len == 1 ? NULL : len);
-+}
-+
-+/* Add a block pair to the block set. It adds exactly a pair, which is checked
-+ * by an assertion that both arguments are not null.*/
-+/* Audited by: green(2002.06.11) */
-+/* Auditor note: Entire call chain cannot hold any spinlocks, because
-+ kmalloc might schedule. The only exception is atom spinlock, which is
-+ properly freed. */
-+int
-+blocknr_set_add_pair(txn_atom * atom,
-+ struct list_head *bset,
-+ blocknr_set_entry ** new_bsep, const reiser4_block_nr * a,
-+ const reiser4_block_nr * b)
-+{
-+ assert("jmacd-5103", a != NULL && b != NULL);
-+ return blocknr_set_add(atom, bset, new_bsep, a, b);
-+}
-+
-+/* Initialize a blocknr_set. */
-+void blocknr_set_init(struct list_head *bset)
-+{
-+ INIT_LIST_HEAD(bset);
-+}
-+
-+/* Release the entries of a blocknr_set. */
-+void blocknr_set_destroy(struct list_head *bset)
-+{
-+ blocknr_set_entry *bse;
-+
-+ while (!list_empty(bset)) {
-+ bse = list_entry(bset->next, blocknr_set_entry, link);
-+ list_del_init(&bse->link);
-+ bse_free(bse);
-+ }
-+}
-+
-+/* Merge blocknr_set entries out of @from into @into. */
-+/* Audited by: green(2002.06.11) */
-+/* Auditor comments: This merge does not know if merged sets contain
-+ blocks pairs (As for wandered sets) or extents, so it cannot really merge
-+ overlapping ranges if there is some. So I believe it may lead to
-+ some blocks being presented several times in one blocknr_set. To help
-+ debugging such problems it might help to check for duplicate entries on
-+ actual processing of this set. Testing this kind of stuff right here is
-+ also complicated by the fact that these sets are not sorted and going
-+ through whole set on each element addition is going to be CPU-heavy task */
-+void blocknr_set_merge(struct list_head *from, struct list_head *into)
-+{
-+ blocknr_set_entry *bse_into = NULL;
-+
-+ /* If @from is empty, no work to perform. */
-+ if (list_empty(from))
-+ return;
-+ /* If @into is not empty, try merging partial-entries. */
-+ if (!list_empty(into)) {
-+
-+ /* Neither set is empty, pop the front to members and try to
-+ combine them. */
-+ blocknr_set_entry *bse_from;
-+ unsigned into_avail;
-+
-+ bse_into = list_entry(into->next, blocknr_set_entry, link);
-+ list_del_init(&bse_into->link);
-+ bse_from = list_entry(from->next, blocknr_set_entry, link);
-+ list_del_init(&bse_from->link);
-+
-+ /* Combine singles. */
-+ for (into_avail = bse_avail(bse_into);
-+ into_avail != 0 && bse_from->nr_singles != 0;
-+ into_avail -= 1) {
-+ bse_put_single(bse_into,
-+ &bse_from->entries[--bse_from->
-+ nr_singles]);
-+ }
-+
-+ /* Combine pairs. */
-+ for (; into_avail > 1 && bse_from->nr_pairs != 0;
-+ into_avail -= 2) {
-+ struct blocknr_pair *pair =
-+ bse_get_pair(bse_from, --bse_from->nr_pairs);
-+ bse_put_pair(bse_into, &pair->a, &pair->b);
-+ }
-+
-+ /* If bse_from is empty, delete it now. */
-+ if (bse_avail(bse_from) == BLOCKNR_SET_ENTRIES_NUMBER) {
-+ bse_free(bse_from);
-+ } else {
-+ /* Otherwise, bse_into is full or nearly full (e.g.,
-+ it could have one slot avail and bse_from has one
-+ pair left). Push it back onto the list. bse_from
-+ becomes bse_into, which will be the new partial. */
-+ list_add(&bse_into->link, into);
-+ bse_into = bse_from;
-+ }
-+ }
-+
-+ /* Splice lists together. */
-+ list_splice_init(from, into->prev);
-+
-+ /* Add the partial entry back to the head of the list. */
-+ if (bse_into != NULL)
-+ list_add(&bse_into->link, into);
-+}
-+
-+/* Iterate over all blocknr set elements. */
-+int blocknr_set_iterator(txn_atom *atom, struct list_head *bset,
-+ blocknr_set_actor_f actor, void *data, int delete)
-+{
-+
-+ blocknr_set_entry *entry;
-+
-+ assert("zam-429", atom != NULL);
-+ assert("zam-430", atom_is_protected(atom));
-+ assert("zam-431", bset != 0);
-+ assert("zam-432", actor != NULL);
-+
-+ entry = list_entry(bset->next, blocknr_set_entry, link);
-+ while (bset != &entry->link) {
-+ blocknr_set_entry *tmp = list_entry(entry->link.next, blocknr_set_entry, link);
-+ unsigned int i;
-+ int ret;
-+
-+ for (i = 0; i < entry->nr_singles; i++) {
-+ ret = actor(atom, &entry->entries[i], NULL, data);
-+
-+ /* We can't break a loop if delete flag is set. */
-+ if (ret != 0 && !delete)
-+ return ret;
-+ }
-+
-+ for (i = 0; i < entry->nr_pairs; i++) {
-+ struct blocknr_pair *ab;
-+
-+ ab = bse_get_pair(entry, i);
-+
-+ ret = actor(atom, &ab->a, &ab->b, data);
-+
-+ if (ret != 0 && !delete)
-+ return ret;
-+ }
-+
-+ if (delete) {
-+ list_del(&entry->link);
-+ bse_free(entry);
-+ }
-+
-+ entry = tmp;
-+ }
-+
-+ return 0;
-+}
-+
-+/*
-+ * Local variables:
-+ * c-indentation-style: "K&R"
-+ * mode-name: "LC"
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * fill-column: 79
-+ * scroll-step: 1
-+ * End:
-+ */
-diff -urN linux-2.6.35.orig/fs/reiser4/carry.c linux-2.6.35/fs/reiser4/carry.c
---- linux-2.6.35.orig/fs/reiser4/carry.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.35/fs/reiser4/carry.c 2010-08-04 15:44:57.000000000 +0200
-@@ -0,0 +1,1398 @@
-+/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by
-+ reiser4/README */
-+/* Functions to "carry" tree modification(s) upward. */
-+/* Tree is modified one level at a time. As we modify a level we accumulate a
-+ set of changes that need to be propagated to the next level. We manage
-+ node locking such that any searches that collide with carrying are
-+ restarted, from the root if necessary.
-+
-+ Insertion of a new item may result in items being moved among nodes and
-+ this requires the delimiting key to be updated at the least common parent
-+ of the nodes modified to preserve search tree invariants. Also, insertion
-+ may require allocation of a new node. A pointer to the new node has to be
-+ inserted into some node on the parent level, etc.
-+
-+ Tree carrying is meant to be analogous to arithmetic carrying.
-+
-+ A carry operation is always associated with some node (&carry_node).
-+
-+ Carry process starts with some initial set of operations to be performed
-+ and an initial set of already locked nodes. Operations are performed one
-+ by one. Performing each single operation has following possible effects:
-+
-+ - content of carry node associated with operation is modified
-+ - new carry nodes are locked and involved into carry process on this level
-+ - new carry operations are posted to the next level
-+
-+ After all carry operations on this level are done, process is repeated for
-+ the accumulated sequence on carry operations for the next level. This
-+ starts by trying to lock (in left to right order) all carry nodes
-+ associated with carry operations on the parent level. After this, we decide
-+ whether more nodes are required on the left of already locked set. If so,
-+ all locks taken on the parent level are released, new carry nodes are
-+ added, and locking process repeats.
-+
-+ It may happen that balancing process fails owing to unrecoverable error on
-+ some of upper levels of a tree (possible causes are io error, failure to
-+ allocate new node, etc.). In this case we should unmount the filesystem,
-+ rebooting if it is the root, and possibly advise the use of fsck.
-+
-+ USAGE:
-+
-+ int some_tree_operation( znode *node, ... )
-+ {
-+ // Allocate on a stack pool of carry objects: operations and nodes.
-+ // Most carry processes will only take objects from here, without
-+ // dynamic allocation.
-+
-+I feel uneasy about this pool. It adds to code complexity, I understand why it
-+exists, but.... -Hans
-+
-+ carry_pool pool;
-+ carry_level lowest_level;
-+ carry_op *op;
-+
-+ init_carry_pool( &pool );
-+ init_carry_level( &lowest_level, &pool );
-+
-+ // operation may be one of:
-+ // COP_INSERT --- insert new item into node
-+ // COP_CUT --- remove part of or whole node
-+ // COP_PASTE --- increase size of item
-+ // COP_DELETE --- delete pointer from parent node
-+ // COP_UPDATE --- update delimiting key in least
-+ // common ancestor of two
-+
-+ op = reiser4_post_carry( &lowest_level, operation, node, 0 );
-+ if( IS_ERR( op ) || ( op == NULL ) ) {
-+ handle error
-+ } else {
-+ // fill in remaining fields in @op, according to carry.h:carry_op
-+ result = carry(&lowest_level, NULL);
-+ }
-+ done_carry_pool(&pool);
-+ }
-+
-+ When you are implementing node plugin method that participates in carry
-+ (shifting, insertion, deletion, etc.), do the following:
-+
-+ int foo_node_method(znode * node, ..., carry_level * todo)
-+ {
-+ carry_op *op;
-+
-+ ....
-+
-+ // note, that last argument to reiser4_post_carry() is non-null
-+ // here, because @op is to be applied to the parent of @node, rather
-+ // than to the @node itself as in the previous case.
-+
-+ op = node_post_carry(todo, operation, node, 1);
-+ // fill in remaining fields in @op, according to carry.h:carry_op
-+
-+ ....
-+
-+ }
-+
-+ BATCHING:
-+
-+ One of the main advantages of level-by-level balancing implemented here is
-+ ability to batch updates on a parent level and to peform them more
-+ efficiently as a result.
-+
-+ Description To Be Done (TBD).
-+
-+ DIFFICULTIES AND SUBTLE POINTS:
-+
-+ 1. complex plumbing is required, because:
-+
-+ a. effective allocation through pools is needed
-+
-+ b. target of operation is not exactly known when operation is
-+ posted. This is worked around through bitfields in &carry_node and
-+ logic in lock_carry_node()
-+
-+ c. of interaction with locking code: node should be added into sibling
-+ list when pointer to it is inserted into its parent, which is some time
-+ after node was created. Between these moments, node is somewhat in
-+ suspended state and is only registered in the carry lists
-+
-+ 2. whole balancing logic is implemented here, in particular, insertion
-+ logic is coded in make_space().
-+
-+ 3. special cases like insertion (reiser4_add_tree_root()) or deletion
-+ (reiser4_kill_tree_root()) of tree root and morphing of paste into insert
-+ (insert_paste()) have to be handled.
-+
-+ 4. there is non-trivial interdependency between allocation of new nodes
-+ and almost everything else. This is mainly due to the (1.c) above. I shall
-+ write about this later.
-+
-+*/
-+
-+#include "forward.h"
-+#include "debug.h"
-+#include "key.h"
-+#include "coord.h"
-+#include "plugin/item/item.h"
-+#include "plugin/item/extent.h"
-+#include "plugin/node/node.h"
-+#include "jnode.h"
-+#include "znode.h"
-+#include "tree_mod.h"
-+#include "tree_walk.h"
-+#include "block_alloc.h"
-+#include "pool.h"
-+#include "tree.h"
-+#include "carry.h"
-+#include "carry_ops.h"
-+#include "super.h"
-+#include "reiser4.h"
-+
-+#include <linux/types.h>
-+
-+/* level locking/unlocking */
-+static int lock_carry_level(carry_level * level);
-+static void unlock_carry_level(carry_level * level, int failure);
-+static void done_carry_level(carry_level * level);
-+static void unlock_carry_node(carry_level * level, carry_node * node, int fail);
-+
-+int lock_carry_node(carry_level * level, carry_node * node);
-+int lock_carry_node_tail(carry_node * node);
-+
-+/* carry processing proper */
-+static int carry_on_level(carry_level * doing, carry_level * todo);
-+
-+static carry_op *add_op(carry_level * level, pool_ordering order,
-+ carry_op * reference);
-+
-+/* handlers for carry operations. */
-+
-+static void fatal_carry_error(carry_level * doing, int ecode);
-+static int add_new_root(carry_level * level, carry_node * node, znode * fake);
-+
-+static void print_level(const char *prefix, carry_level * level);
-+
-+#if REISER4_DEBUG
-+typedef enum {
-+ CARRY_TODO,
-+ CARRY_DOING
-+} carry_queue_state;
-+static int carry_level_invariant(carry_level * level, carry_queue_state state);
-+#endif
-+
-+/* main entry point for tree balancing.
-+
-+ Tree carry performs operations from @doing and while doing so accumulates
-+ information about operations to be performed on the next level ("carried"
-+ to the parent level). Carried operations are performed, causing possibly
-+ more operations to be carried upward etc. carry() takes care about
-+ locking and pinning znodes while operating on them.
-+
-+ For usage, see comment at the top of fs/reiser4/carry.c
-+
-+*/
-+int reiser4_carry(carry_level * doing /* set of carry operations to be
-+ * performed */ ,
-+ carry_level * done /* set of nodes, already performed
-+ * at the previous level.
-+ * NULL in most cases */)
-+{
-+ int result = 0;
-+ /* queue of new requests */
-+ carry_level *todo;
-+ ON_DEBUG(STORE_COUNTERS);
-+
-+ assert("nikita-888", doing != NULL);
-+ BUG_ON(done != NULL);
-+
-+ todo = doing + 1;
-+ init_carry_level(todo, doing->pool);
-+
-+ /* queue of requests preformed on the previous level */
-+ done = todo + 1;
-+ init_carry_level(done, doing->pool);
-+
-+ /* iterate until there is nothing more to do */
-+ while (result == 0 && doing->ops_num > 0) {
-+ carry_level *tmp;
-+
-+ /* at this point @done is locked. */
-+ /* repeat lock/do/unlock while
-+
-+ (1) lock_carry_level() fails due to deadlock avoidance, or
-+
-+ (2) carry_on_level() decides that more nodes have to
-+ be involved.
-+
-+ (3) some unexpected error occurred while balancing on the
-+ upper levels. In this case all changes are rolled back.
-+
-+ */
-+ while (1) {
-+ result = lock_carry_level(doing);
-+ if (result == 0) {
-+ /* perform operations from @doing and
-+ accumulate new requests in @todo */
-+ result = carry_on_level(doing, todo);
-+ if (result == 0)
-+ break;
-+ else if (result != -E_REPEAT ||
-+ !doing->restartable) {
-+ warning("nikita-1043",
-+ "Fatal error during carry: %i",
-+ result);
-+ print_level("done", done);
-+ print_level("doing", doing);
-+ print_level("todo", todo);
-+ /* do some rough stuff like aborting
-+ all pending transcrashes and thus
-+ pushing tree back to the consistent
-+ state. Alternatvely, just panic.
-+ */
-+ fatal_carry_error(doing, result);
-+ return result;
-+ }
-+ } else if (result != -E_REPEAT) {
-+ fatal_carry_error(doing, result);
-+ return result;
-+ }
-+ unlock_carry_level(doing, 1);
-+ }
-+ /* at this point @done can be safely unlocked */
-+ done_carry_level(done);
-+
-+ /* cyclically shift queues */
-+ tmp = done;
-+ done = doing;
-+ doing = todo;
-+ todo = tmp;
-+ init_carry_level(todo, doing->pool);
-+
-+ /* give other threads chance to run */
-+ reiser4_preempt_point();
-+ }
-+ done_carry_level(done);
-+
-+ /* all counters, but x_refs should remain the same. x_refs can change
-+ owing to transaction manager */
-+ ON_DEBUG(CHECK_COUNTERS);
-+ return result;
-+}
-+
-+/* perform carry operations on given level.
-+
-+ Optimizations proposed by pooh:
-+
-+ (1) don't lock all nodes from queue at the same time. Lock nodes lazily as
-+ required;
-+
-+ (2) unlock node if there are no more operations to be performed upon it and
-+ node didn't add any operation to @todo. This can be implemented by
-+ attaching to each node two counters: counter of operaions working on this
-+ node and counter and operations carried upward from this node.
-+
-+*/
-+static int carry_on_level(carry_level * doing /* queue of carry operations to
-+ * do on this level */ ,
-+ carry_level * todo /* queue where new carry
-+ * operations to be performed on
-+ * the * parent level are
-+ * accumulated during @doing
-+ * processing. */ )
-+{
-+ int result;
-+ int (*f) (carry_op *, carry_level *, carry_level *);
-+ carry_op *op;
-+ carry_op *tmp_op;
-+
-+ assert("nikita-1034", doing != NULL);
-+ assert("nikita-1035", todo != NULL);
-+
-+ /* @doing->nodes are locked. */
-+
-+ /* This function can be split into two phases: analysis and modification
-+
-+ Analysis calculates precisely what items should be moved between
-+ nodes. This information is gathered in some structures attached to
-+ each carry_node in a @doing queue. Analysis also determines whether
-+ new nodes are to be allocated etc.
-+
-+ After analysis is completed, actual modification is performed. Here
-+ we can take advantage of "batch modification": if there are several
-+ operations acting on the same node, modifications can be performed
-+ more efficiently when batched together.
-+
-+ Above is an optimization left for the future.
-+ */
-+ /* Important, but delayed optimization: it's possible to batch
-+ operations together and perform them more efficiently as a
-+ result. For example, deletion of several neighboring items from a
-+ node can be converted to a single ->cut() operation.
-+
-+ Before processing queue, it should be scanned and "mergeable"
-+ operations merged.
-+ */
-+ result = 0;
-+ for_all_ops(doing, op, tmp_op) {
-+ carry_opcode opcode;
-+
-+ assert("nikita-1041", op != NULL);
-+ opcode = op->op;
-+ assert("nikita-1042", op->op < COP_LAST_OP);
-+ f = op_dispatch_table[op->op].handler;
-+ result = f(op, doing, todo);
-+ /* locking can fail with -E_REPEAT. Any different error is fatal
-+ and will be handled by fatal_carry_error() sledgehammer.
-+ */
-+ if (result != 0)
-+ break;
-+ }
-+ if (result == 0) {
-+ carry_plugin_info info;
-+ carry_node *scan;
-+ carry_node *tmp_scan;
-+
-+ info.doing = doing;
-+ info.todo = todo;
-+
-+ assert("nikita-3002",
-+ carry_level_invariant(doing, CARRY_DOING));
-+ for_all_nodes(doing, scan, tmp_scan) {
-+ znode *node;
-+
-+ node = reiser4_carry_real(scan);
-+ assert("nikita-2547", node != NULL);
-+ if (node_is_empty(node)) {
-+ result =
-+ node_plugin_by_node(node)->
-+ prepare_removal(node, &info);
-+ if (result != 0)
-+ break;
-+ }
-+ }
-+ }
-+ return result;
-+}
-+
-+/* post carry operation
-+
-+ This is main function used by external carry clients: node layout plugins
-+ and tree operations to create new carry operation to be performed on some
-+ level.
-+
-+ New operation will be included in the @level queue. To actually perform it,
-+ call carry( level, ... ). This function takes write lock on @node. Carry
-+ manages all its locks by itself, don't worry about this.
-+
-+ This function adds operation and node at the end of the queue. It is up to
-+ caller to guarantee proper ordering of node queue.
-+
-+*/
-+carry_op * reiser4_post_carry(carry_level * level /* queue where new operation
-+ * is to be posted at */ ,
-+ carry_opcode op /* opcode of operation */ ,
-+ znode * node /* node on which this operation
-+ * will operate */ ,
-+ int apply_to_parent_p /* whether operation will
-+ * operate directly on @node
-+ * or on it parent. */)
-+{
-+ carry_op *result;
-+ carry_node *child;
-+
-+ assert("nikita-1046", level != NULL);
-+ assert("nikita-1788", znode_is_write_locked(node));
-+
-+ result = add_op(level, POOLO_LAST, NULL);
-+ if (IS_ERR(result))
-+ return result;
-+ child = reiser4_add_carry(level, POOLO_LAST, NULL);
-+ if (IS_ERR(child)) {
-+ reiser4_pool_free(&level->pool->op_pool, &result->header);
-+ return (carry_op *) child;
-+ }
-+ result->node = child;
-+ result->op = op;
-+ child->parent = apply_to_parent_p;
-+ if (ZF_ISSET(node, JNODE_ORPHAN))
-+ child->left_before = 1;
-+ child->node = node;
-+ return result;
-+}
-+
-+/* initialize carry queue */
-+void init_carry_level(carry_level * level /* level to initialize */ ,
-+ carry_pool * pool /* pool @level will allocate objects
-+ * from */ )
-+{
-+ assert("nikita-1045", level != NULL);
-+ assert("nikita-967", pool != NULL);
-+
-+ memset(level, 0, sizeof *level);
-+ level->pool = pool;
-+
-+ INIT_LIST_HEAD(&level->nodes);
-+ INIT_LIST_HEAD(&level->ops);
-+}
-+
-+/* allocate carry pool and initialize pools within queue */
-+carry_pool *init_carry_pool(int size)
-+{
-+ carry_pool *pool;
-+
-+ assert("", size >= sizeof(carry_pool) + 3 * sizeof(carry_level));
-+ pool = kmalloc(size, reiser4_ctx_gfp_mask_get());
-+ if (pool == NULL)
-+ return ERR_PTR(RETERR(-ENOMEM));
-+
-+ reiser4_init_pool(&pool->op_pool, sizeof(carry_op), CARRIES_POOL_SIZE,
-+ (char *)pool->op);
-+ reiser4_init_pool(&pool->node_pool, sizeof(carry_node),
-+ NODES_LOCKED_POOL_SIZE, (char *)pool->node);
-+ return pool;
-+}
-+
-+/* finish with queue pools */
-+void done_carry_pool(carry_pool * pool/* pool to destroy */)
-+{
-+ reiser4_done_pool(&pool->op_pool);
-+ reiser4_done_pool(&pool->node_pool);
-+ kfree(pool);
-+}
-+
-+/* add new carry node to the @level.
-+
-+ Returns pointer to the new carry node allocated from pool. It's up to
-+ callers to maintain proper order in the @level. Assumption is that if carry
-+ nodes on one level are already sorted and modifications are peroformed from
-+ left to right, carry nodes added on the parent level will be ordered
-+ automatically. To control ordering use @order and @reference parameters.
-+
-+*/
-+carry_node *reiser4_add_carry_skip(carry_level * level /* &carry_level to add
-+ * node to */ ,
-+ pool_ordering order /* where to insert:
-+ * at the beginning of
-+ * @level,
-+ * before @reference,
-+ * after @reference,
-+ * at the end of @level
-+ */ ,
-+ carry_node * reference/* reference node for
-+ * insertion */)
-+{
-+ ON_DEBUG(carry_node * orig_ref = reference);
-+
-+ if (order == POOLO_BEFORE) {
-+ reference = find_left_carry(reference, level);
-+ if (reference == NULL)
-+ reference = list_entry(level->nodes.next, carry_node,
-+ header.level_linkage);
-+ else
-+ reference = list_entry(reference->header.level_linkage.next,
-+ carry_node, header.level_linkage);
-+ } else if (order == POOLO_AFTER) {
-+ reference = find_right_carry(reference, level);
-+ if (reference == NULL)
-+ reference = list_entry(level->nodes.prev, carry_node,
-+ header.level_linkage);
-+ else
-+ reference = list_entry(reference->header.level_linkage.prev,
-+ carry_node, header.level_linkage);
-+ }
-+ assert("nikita-2209",
-+ ergo(orig_ref != NULL,
-+ reiser4_carry_real(reference) ==
-+ reiser4_carry_real(orig_ref)));
-+ return reiser4_add_carry(level, order, reference);
-+}
-+
-+carry_node *reiser4_add_carry(carry_level * level, /* carry_level to add
-+ node to */
-+ pool_ordering order, /* where to insert:
-+ * at the beginning of
-+ * @level;
-+ * before @reference;
-+ * after @reference;
-+ * at the end of @level
-+ */
-+ carry_node * reference /* reference node for
-+ * insertion */)
-+{
-+ carry_node *result;
-+
-+ result =
-+ (carry_node *) reiser4_add_obj(&level->pool->node_pool,
-+ &level->nodes,
-+ order, &reference->header);
-+ if (!IS_ERR(result) && (result != NULL))
-+ ++level->nodes_num;
-+ return result;
-+}
-+
-+/**
-+ * add new carry operation to the @level.
-+ *
-+ * Returns pointer to the new carry operations allocated from pool. It's up to
-+ * callers to maintain proper order in the @level. To control ordering use
-+ * @order and @reference parameters.
-+ */
-+static carry_op *add_op(carry_level * level, /* &carry_level to add node to */
-+ pool_ordering order, /* where to insert:
-+ * at the beginning of @level;
-+ * before @reference;
-+ * after @reference;
-+ * at the end of @level */
-+ carry_op * reference /* reference node for insertion */)
-+{
-+ carry_op *result;
-+
-+ result =
-+ (carry_op *) reiser4_add_obj(&level->pool->op_pool, &level->ops,
-+ order, &reference->header);
-+ if (!IS_ERR(result) && (result != NULL))
-+ ++level->ops_num;
-+ return result;
-+}
-+
-+/**
-+ * Return node on the right of which @node was created.
-+ *
-+ * Each node is created on the right of some existing node (or it is new root,
-+ * which is special case not handled here).
-+ *
-+ * @node is new node created on some level, but not yet inserted into its
-+ * parent, it has corresponding bit (JNODE_ORPHAN) set in zstate.
-+ */
-+static carry_node *find_begetting_brother(carry_node * node,/* node to start
-+ search from */
-+ carry_level * kin UNUSED_ARG
-+ /* level to scan */)
-+{
-+ carry_node *scan;
-+
-+ assert("nikita-1614", node != NULL);
-+ assert("nikita-1615", kin != NULL);
-+ assert("nikita-1616", LOCK_CNT_GTZ(rw_locked_tree));
-+ assert("nikita-1619", ergo(reiser4_carry_real(node) != NULL,
-+ ZF_ISSET(reiser4_carry_real(node),
-+ JNODE_ORPHAN)));
-+ for (scan = node;;
-+ scan = list_entry(scan->header.level_linkage.prev, carry_node,
-+ header.level_linkage)) {
-+ assert("nikita-1617", &kin->nodes != &scan->header.level_linkage);
-+ if ((scan->node != node->node) &&
-+ !ZF_ISSET(scan->node, JNODE_ORPHAN)) {
-+ assert("nikita-1618", reiser4_carry_real(scan) != NULL);
-+ break;
-+ }
-+ }
-+ return scan;
-+}
-+
-+static cmp_t
-+carry_node_cmp(carry_level * level, carry_node * n1, carry_node * n2)
-+{
-+ assert("nikita-2199", n1 != NULL);
-+ assert("nikita-2200", n2 != NULL);
-+
-+ if (n1 == n2)
-+ return EQUAL_TO;
-+ while (1) {
-+ n1 = carry_node_next(n1);
-+ if (carry_node_end(level, n1))
-+ return GREATER_THAN;
-+ if (n1 == n2)
-+ return LESS_THAN;
-+ }
-+ impossible("nikita-2201", "End of level reached");
-+}
-+
-+carry_node *find_carry_node(carry_level * level, const znode * node)
-+{
-+ carry_node *scan;
-+ carry_node *tmp_scan;
-+
-+ assert("nikita-2202", level != NULL);
-+ assert("nikita-2203", node != NULL);
-+
-+ for_all_nodes(level, scan, tmp_scan) {
-+ if (reiser4_carry_real(scan) == node)
-+ return scan;
-+ }
-+ return NULL;
-+}
-+
-+znode *reiser4_carry_real(const carry_node * node)
-+{
-+ assert("nikita-3061", node != NULL);
-+
-+ return node->lock_handle.node;
-+}
-+
-+carry_node *insert_carry_node(carry_level * doing, carry_level * todo,
-+ const znode * node)
-+{
-+ carry_node *base;
-+ carry_node *scan;
-+ carry_node *tmp_scan;
-+ carry_node *proj;
-+
-+ base = find_carry_node(doing, node);
-+ assert("nikita-2204", base != NULL);
-+
-+ for_all_nodes(todo, scan, tmp_scan) {
-+ proj = find_carry_node(doing, scan->node);
-+ assert("nikita-2205", proj != NULL);
-+ if (carry_node_cmp(doing, proj, base) != LESS_THAN)
-+ break;
-+ }
-+ return scan;
-+}
-+
-+static carry_node *add_carry_atplace(carry_level * doing, carry_level * todo,
-+ znode * node)
-+{
-+ carry_node *reference;
-+
-+ assert("nikita-2994", doing != NULL);
-+ assert("nikita-2995", todo != NULL);
-+ assert("nikita-2996", node != NULL);
-+
-+ reference = insert_carry_node(doing, todo, node);
-+ assert("nikita-2997", reference != NULL);
-+
-+ return reiser4_add_carry(todo, POOLO_BEFORE, reference);
-+}
-+
-+/* like reiser4_post_carry(), but designed to be called from node plugin
-+ methods. This function is different from reiser4_post_carry() in that it
-+ finds proper place to insert node in the queue. */
-+carry_op *node_post_carry(carry_plugin_info * info /* carry parameters
-+ * passed down to node
-+ * plugin */ ,
-+ carry_opcode op /* opcode of operation */ ,
-+ znode * node /* node on which this
-+ * operation will operate */ ,
-+ int apply_to_parent_p /* whether operation will
-+ * operate directly on @node
-+ * or on it parent. */ )
-+{
-+ carry_op *result;
-+ carry_node *child;
-+
-+ assert("nikita-2207", info != NULL);
-+ assert("nikita-2208", info->todo != NULL);
-+
-+ if (info->doing == NULL)
-+ return reiser4_post_carry(info->todo, op, node,
-+ apply_to_parent_p);
-+
-+ result = add_op(info->todo, POOLO_LAST, NULL);
-+ if (IS_ERR(result))
-+ return result;
-+ child = add_carry_atplace(info->doing, info->todo, node);
-+ if (IS_ERR(child)) {
-+ reiser4_pool_free(&info->todo->pool->op_pool, &result->header);
-+ return (carry_op *) child;
-+ }
-+ result->node = child;
-+ result->op = op;
-+ child->parent = apply_to_parent_p;
-+ if (ZF_ISSET(node, JNODE_ORPHAN))
-+ child->left_before = 1;
-+ child->node = node;
-+ return result;
-+}
-+
-+/* lock all carry nodes in @level */
-+static int lock_carry_level(carry_level * level/* level to lock */)
-+{
-+ int result;
-+ carry_node *node;
-+ carry_node *tmp_node;
-+
-+ assert("nikita-881", level != NULL);
-+ assert("nikita-2229", carry_level_invariant(level, CARRY_TODO));
-+
-+ /* lock nodes from left to right */
-+ result = 0;
-+ for_all_nodes(level, node, tmp_node) {
-+ result = lock_carry_node(level, node);
-+ if (result != 0)
-+ break;
-+ }
-+ return result;
-+}
-+
-+/* Synchronize delimiting keys between @node and its left neighbor.
-+
-+ To reduce contention on dk key and simplify carry code, we synchronize
-+ delimiting keys only when carry ultimately leaves tree level (carrying
-+ changes upward) and unlocks nodes at this level.
-+
-+ This function first finds left neighbor of @node and then updates left
-+ neighbor's right delimiting key to conincide with least key in @node.
-+
-+*/
-+
-+ON_DEBUG(extern atomic_t delim_key_version;
-+ )
-+
-+static void sync_dkeys(znode * spot/* node to update */)
-+{
-+ reiser4_key pivot;
-+ reiser4_tree *tree;
-+
-+ assert("nikita-1610", spot != NULL);
-+ assert("nikita-1612", LOCK_CNT_NIL(rw_locked_dk));
-+
-+ tree = znode_get_tree(spot);
-+ read_lock_tree(tree);
-+ write_lock_dk(tree);
-+
-+ assert("nikita-2192", znode_is_loaded(spot));
-+
-+ /* sync left delimiting key of @spot with key in its leftmost item */
-+ if (node_is_empty(spot))
-+ pivot = *znode_get_rd_key(spot);
-+ else
-+ leftmost_key_in_node(spot, &pivot);
-+
-+ znode_set_ld_key(spot, &pivot);
-+
-+ /* there can be sequence of empty nodes pending removal on the left of
-+ @spot. Scan them and update their left and right delimiting keys to
-+ match left delimiting key of @spot. Also, update right delimiting
-+ key of first non-empty left neighbor.
-+ */
-+ while (1) {
-+ if (!ZF_ISSET(spot, JNODE_LEFT_CONNECTED))
-+ break;
-+
-+ spot = spot->left;
-+ if (spot == NULL)
-+ break;
-+
-+ znode_set_rd_key(spot, &pivot);
-+ /* don't sink into the domain of another balancing */
-+ if (!znode_is_write_locked(spot))
-+ break;
-+ if (ZF_ISSET(spot, JNODE_HEARD_BANSHEE))
-+ znode_set_ld_key(spot, &pivot);
-+ else
-+ break;
-+ }
-+
-+ write_unlock_dk(tree);
-+ read_unlock_tree(tree);
-+}
-+
-+/* unlock all carry nodes in @level */
-+static void unlock_carry_level(carry_level * level /* level to unlock */ ,
-+ int failure /* true if unlocking owing to
-+ * failure */ )
-+{
-+ carry_node *node;
-+ carry_node *tmp_node;
-+
-+ assert("nikita-889", level != NULL);
-+
-+ if (!failure) {
-+ znode *spot;
-+
-+ spot = NULL;
-+ /* update delimiting keys */
-+ for_all_nodes(level, node, tmp_node) {
-+ if (reiser4_carry_real(node) != spot) {
-+ spot = reiser4_carry_real(node);
-+ sync_dkeys(spot);
-+ }
-+ }
-+ }
-+
-+ /* nodes can be unlocked in arbitrary order. In preemptible
-+ environment it's better to unlock in reverse order of locking,
-+ though.
-+ */
-+ for_all_nodes_back(level, node, tmp_node) {
-+ /* all allocated nodes should be already linked to their
-+ parents at this moment. */
-+ assert("nikita-1631",
-+ ergo(!failure, !ZF_ISSET(reiser4_carry_real(node),
-+ JNODE_ORPHAN)));
-+ ON_DEBUG(check_dkeys(reiser4_carry_real(node)));
-+ unlock_carry_node(level, node, failure);
-+ }
-+ level->new_root = NULL;
-+}
-+
-+/* finish with @level
-+
-+ Unlock nodes and release all allocated resources */
-+static void done_carry_level(carry_level * level/* level to finish */)
-+{
-+ carry_node *node;
-+ carry_node *tmp_node;
-+ carry_op *op;
-+ carry_op *tmp_op;
-+
-+ assert("nikita-1076", level != NULL);
-+
-+ unlock_carry_level(level, 0);
-+ for_all_nodes(level, node, tmp_node) {
-+ assert("nikita-2113", list_empty_careful(&node->lock_handle.locks_link));
-+ assert("nikita-2114", list_empty_careful(&node->lock_handle.owners_link));
-+ reiser4_pool_free(&level->pool->node_pool, &node->header);
-+ }
-+ for_all_ops(level, op, tmp_op)
-+ reiser4_pool_free(&level->pool->op_pool, &op->header);
-+}
-+
-+/* helper function to complete locking of carry node
-+
-+ Finish locking of carry node. There are several ways in which new carry
-+ node can be added into carry level and locked. Normal is through
-+ lock_carry_node(), but also from find_{left|right}_neighbor(). This
-+ function factors out common final part of all locking scenarios. It
-+ supposes that @node -> lock_handle is lock handle for lock just taken and
-+ fills ->real_node from this lock handle.
-+
-+*/
-+int lock_carry_node_tail(carry_node * node/* node to complete locking of */)
-+{
-+ assert("nikita-1052", node != NULL);
-+ assert("nikita-1187", reiser4_carry_real(node) != NULL);
-+ assert("nikita-1188", !node->unlock);
-+
-+ node->unlock = 1;
-+ /* Load node content into memory and install node plugin by
-+ looking at the node header.
-+
-+ Most of the time this call is cheap because the node is
-+ already in memory.
-+
-+ Corresponding zrelse() is in unlock_carry_node()
-+ */
-+ return zload(reiser4_carry_real(node));
-+}
-+
-+/* lock carry node
-+
-+ "Resolve" node to real znode, lock it and mark as locked.
-+ This requires recursive locking of znodes.
-+
-+ When operation is posted to the parent level, node it will be applied to is
-+ not yet known. For example, when shifting data between two nodes,
-+ delimiting has to be updated in parent or parents of nodes involved. But
-+ their parents is not yet locked and, moreover said nodes can be reparented
-+ by concurrent balancing.
-+
-+ To work around this, carry operation is applied to special "carry node"
-+ rather than to the znode itself. Carry node consists of some "base" or
-+ "reference" znode and flags indicating how to get to the target of carry
-+ operation (->real_node field of carry_node) from base.
-+
-+*/
-+int lock_carry_node(carry_level * level /* level @node is in */ ,
-+ carry_node * node/* node to lock */)
-+{
-+ int result;
-+ znode *reference_point;
-+ lock_handle lh;
-+ lock_handle tmp_lh;
-+ reiser4_tree *tree;
-+
-+ assert("nikita-887", level != NULL);
-+ assert("nikita-882", node != NULL);
-+
-+ result = 0;
-+ reference_point = node->node;
-+ init_lh(&lh);
-+ init_lh(&tmp_lh);
-+ if (node->left_before) {
-+ /* handling of new nodes, allocated on the previous level:
-+
-+ some carry ops were propably posted from the new node, but
-+ this node neither has parent pointer set, nor is
-+ connected. This will be done in ->create_hook() for
-+ internal item.
-+
-+ No then less, parent of new node has to be locked. To do
-+ this, first go to the "left" in the carry order. This
-+ depends on the decision to always allocate new node on the
-+ right of existing one.
-+
-+ Loop handles case when multiple nodes, all orphans, were
-+ inserted.
-+
-+ Strictly speaking, taking tree lock is not necessary here,
-+ because all nodes scanned by loop in
-+ find_begetting_brother() are write-locked by this thread,
-+ and thus, their sibling linkage cannot change.
-+
-+ */
-+ tree = znode_get_tree(reference_point);
-+ read_lock_tree(tree);
-+ reference_point = find_begetting_brother(node, level)->node;
-+ read_unlock_tree(tree);
-+ assert("nikita-1186", reference_point != NULL);
-+ }
-+ if (node->parent && (result == 0)) {
-+ result =
-+ reiser4_get_parent(&tmp_lh, reference_point,
-+ ZNODE_WRITE_LOCK);
-+ if (result != 0) {
-+ ; /* nothing */
-+ } else if (znode_get_level(tmp_lh.node) == 0) {
-+ assert("nikita-1347", znode_above_root(tmp_lh.node));
-+ result = add_new_root(level, node, tmp_lh.node);
-+ if (result == 0) {
-+ reference_point = level->new_root;
-+ move_lh(&lh, &node->lock_handle);
-+ }
-+ } else if ((level->new_root != NULL)
-+ && (level->new_root !=
-+ znode_parent_nolock(reference_point))) {
-+ /* parent of node exists, but this level aready
-+ created different new root, so */
-+ warning("nikita-1109",
-+ /* it should be "radicis", but tradition is
-+ tradition. do banshees read latin? */
-+ "hodie natus est radici frater");
-+ result = -EIO;
-+ } else {
-+ move_lh(&lh, &tmp_lh);
-+ reference_point = lh.node;
-+ }
-+ }
-+ if (node->left && (result == 0)) {
-+ assert("nikita-1183", node->parent);
-+ assert("nikita-883", reference_point != NULL);
-+ result =
-+ reiser4_get_left_neighbor(&tmp_lh, reference_point,
-+ ZNODE_WRITE_LOCK,
-+ GN_CAN_USE_UPPER_LEVELS);
-+ if (result == 0) {
-+ done_lh(&lh);
-+ move_lh(&lh, &tmp_lh);
-+ reference_point = lh.node;
-+ }
-+ }
-+ if (!node->parent && !node->left && !node->left_before) {
-+ result =
-+ longterm_lock_znode(&lh, reference_point, ZNODE_WRITE_LOCK,
-+ ZNODE_LOCK_HIPRI);
-+ }
-+ if (result == 0) {
-+ move_lh(&node->lock_handle, &lh);
-+ result = lock_carry_node_tail(node);
-+ }
-+ done_lh(&tmp_lh);
-+ done_lh(&lh);
-+ return result;
-+}
-+
-+/* release a lock on &carry_node.
-+
-+ Release if necessary lock on @node. This opearion is pair of
-+ lock_carry_node() and is idempotent: you can call it more than once on the
-+ same node.
-+
-+*/
-+static void
-+unlock_carry_node(carry_level * level,
-+ carry_node * node /* node to be released */ ,
-+ int failure /* 0 if node is unlocked due
-+ * to some error */ )
-+{
-+ znode *real_node;
-+
-+ assert("nikita-884", node != NULL);
-+
-+ real_node = reiser4_carry_real(node);
-+ /* pair to zload() in lock_carry_node_tail() */
-+ zrelse(real_node);
-+ if (node->unlock && (real_node != NULL)) {
-+ assert("nikita-899", real_node == node->lock_handle.node);
-+ longterm_unlock_znode(&node->lock_handle);
-+ }
-+ if (failure) {
-+ if (node->deallocate && (real_node != NULL)) {
-+ /* free node in bitmap
-+
-+ Prepare node for removal. Last zput() will finish
-+ with it.
-+ */
-+ ZF_SET(real_node, JNODE_HEARD_BANSHEE);
-+ }
-+ if (node->free) {
-+ assert("nikita-2177",
-+ list_empty_careful(&node->lock_handle.locks_link));
-+ assert("nikita-2112",
-+ list_empty_careful(&node->lock_handle.owners_link));
-+ reiser4_pool_free(&level->pool->node_pool,
-+ &node->header);
-+ }
-+ }
-+}
-+
-+/* fatal_carry_error() - all-catching error handling function
-+
-+ It is possible that carry faces unrecoverable error, like unability to
-+ insert pointer at the internal level. Our simple solution is just panic in
-+ this situation. More sophisticated things like attempt to remount
-+ file-system as read-only can be implemented without much difficlties.
-+
-+ It is believed, that:
-+
-+ 1. in stead of panicking, all current transactions can be aborted rolling
-+ system back to the consistent state.
-+
-+Umm, if you simply panic without doing anything more at all, then all current
-+transactions are aborted and the system is rolled back to a consistent state,
-+by virtue of the design of the transactional mechanism. Well, wait, let's be
-+precise. If an internal node is corrupted on disk due to hardware failure,
-+then there may be no consistent state that can be rolled back to, so instead
-+we should say that it will rollback the transactions, which barring other
-+factors means rolling back to a consistent state.
-+
-+# Nikita: there is a subtle difference between panic and aborting
-+# transactions: machine doesn't reboot. Processes aren't killed. Processes
-+# don't using reiser4 (not that we care about such processes), or using other
-+# reiser4 mounts (about them we do care) will simply continue to run. With
-+# some luck, even application using aborted file system can survive: it will
-+# get some error, like EBADF, from each file descriptor on failed file system,
-+# but applications that do care about tolerance will cope with this (squid
-+# will).
-+
-+It would be a nice feature though to support rollback without rebooting
-+followed by remount, but this can wait for later versions.
-+
-+ 2. once isolated transactions will be implemented it will be possible to
-+ roll back offending transaction.
-+
-+2. is additional code complexity of inconsistent value (it implies that a
-+broken tree should be kept in operation), so we must think about it more
-+before deciding if it should be done. -Hans
-+
-+*/
-+static void fatal_carry_error(carry_level * doing UNUSED_ARG /* carry level
-+ * where
-+ * unrecoverable
-+ * error
-+ * occurred */ ,
-+ int ecode/* error code */)
-+{
-+ assert("nikita-1230", doing != NULL);
-+ assert("nikita-1231", ecode < 0);
-+
-+ reiser4_panic("nikita-1232", "Carry failed: %i", ecode);
-+}
-+
-+/**
-+ * Add new root to the tree
-+ *
-+ * This function itself only manages changes in carry structures and delegates
-+ * all hard work (allocation of znode for new root, changes of parent and
-+ * sibling pointers) to the reiser4_add_tree_root().
-+ *
-+ * Locking: old tree root is locked by carry at this point. Fake znode is also
-+ * locked.
-+ */
-+static int add_new_root(carry_level * level,/* carry level in context of which
-+ * operation is performed */
-+ carry_node * node, /* carry node for existing root */
-+ znode * fake /* "fake" znode already locked by
-+ * us */)
-+{
-+ int result;
-+
-+ assert("nikita-1104", level != NULL);
-+ assert("nikita-1105", node != NULL);
-+
-+ assert("nikita-1403", znode_is_write_locked(node->node));
-+ assert("nikita-1404", znode_is_write_locked(fake));
-+
-+ /* trying to create new root. */
-+ /* @node is root and it's already locked by us. This
-+ means that nobody else can be trying to add/remove
-+ tree root right now.
-+ */
-+ if (level->new_root == NULL)
-+ level->new_root = reiser4_add_tree_root(node->node, fake);
-+ if (!IS_ERR(level->new_root)) {
-+ assert("nikita-1210", znode_is_root(level->new_root));
-+ node->deallocate = 1;
-+ result =
-+ longterm_lock_znode(&node->lock_handle, level->new_root,
-+ ZNODE_WRITE_LOCK, ZNODE_LOCK_LOPRI);
-+ if (result == 0)
-+ zput(level->new_root);
-+ } else {
-+ result = PTR_ERR(level->new_root);
-+ level->new_root = NULL;
-+ }
-+ return result;
-+}
-+
-+/* allocate new znode and add the operation that inserts the
-+ pointer to it into the parent node into the todo level
-+
-+ Allocate new znode, add it into carry queue and post into @todo queue
-+ request to add pointer to new node into its parent.
-+
-+ This is carry related routing that calls reiser4_new_node() to allocate new
-+ node.
-+*/
-+carry_node *add_new_znode(znode * brother /* existing left neighbor of new
-+ * node */ ,
-+ carry_node * ref /* carry node after which new
-+ * carry node is to be inserted
-+ * into queue. This affects
-+ * locking. */ ,
-+ carry_level * doing /* carry queue where new node is
-+ * to be added */ ,
-+ carry_level * todo /* carry queue where COP_INSERT
-+ * operation to add pointer to
-+ * new node will ne added */ )
-+{
-+ carry_node *fresh;
-+ znode *new_znode;
-+ carry_op *add_pointer;
-+ carry_plugin_info info;
-+
-+ assert("nikita-1048", brother != NULL);
-+ assert("nikita-1049", todo != NULL);
-+
-+ /* There is a lot of possible variations here: to what parent
-+ new node will be attached and where. For simplicity, always
-+ do the following:
-+
-+ (1) new node and @brother will have the same parent.
-+
-+ (2) new node is added on the right of @brother
-+
-+ */
-+
-+ fresh = reiser4_add_carry_skip(doing,
-+ ref ? POOLO_AFTER : POOLO_LAST, ref);
-+ if (IS_ERR(fresh))
-+ return fresh;
-+
-+ fresh->deallocate = 1;
-+ fresh->free = 1;
-+
-+ new_znode = reiser4_new_node(brother, znode_get_level(brother));
-+ if (IS_ERR(new_znode))
-+ /* @fresh will be deallocated automatically by error
-+ handling code in the caller. */
-+ return (carry_node *) new_znode;
-+
-+ /* new_znode returned znode with x_count 1. Caller has to decrease
-+ it. make_space() does. */
-+
-+ ZF_SET(new_znode, JNODE_ORPHAN);
-+ fresh->node = new_znode;
-+
-+ while (ZF_ISSET(reiser4_carry_real(ref), JNODE_ORPHAN)) {
-+ ref = carry_node_prev(ref);
-+ assert("nikita-1606", !carry_node_end(doing, ref));
-+ }
-+
-+ info.todo = todo;
-+ info.doing = doing;
-+ add_pointer = node_post_carry(&info, COP_INSERT,
-+ reiser4_carry_real(ref), 1);
-+ if (IS_ERR(add_pointer)) {
-+ /* no need to deallocate @new_znode here: it will be
-+ deallocated during carry error handling. */
-+ return (carry_node *) add_pointer;
-+ }
-+
-+ add_pointer->u.insert.type = COPT_CHILD;
-+ add_pointer->u.insert.child = fresh;
-+ add_pointer->u.insert.brother = brother;
-+ /* initially new node spawns empty key range */
-+ write_lock_dk(znode_get_tree(brother));
-+ znode_set_ld_key(new_znode,
-+ znode_set_rd_key(new_znode,
-+ znode_get_rd_key(brother)));
-+ write_unlock_dk(znode_get_tree(brother));
-+ return fresh;
-+}
-+
-+/* DEBUGGING FUNCTIONS.
-+
-+ Probably we also should leave them on even when
-+ debugging is turned off to print dumps at errors.
-+*/
-+#if REISER4_DEBUG
-+static int carry_level_invariant(carry_level * level, carry_queue_state state)
-+{
-+ carry_node *node;
-+ carry_node *tmp_node;
-+
-+ if (level == NULL)
-+ return 0;
-+
-+ if (level->track_type != 0 &&
-+ level->track_type != CARRY_TRACK_NODE &&
-+ level->track_type != CARRY_TRACK_CHANGE)
-+ return 0;
-+
-+ /* check that nodes are in ascending order */
-+ for_all_nodes(level, node, tmp_node) {
-+ znode *left;
-+ znode *right;
-+
-+ reiser4_key lkey;
-+ reiser4_key rkey;
-+
-+ if (node != carry_node_front(level)) {
-+ if (state == CARRY_TODO) {
-+ right = node->node;
-+ left = carry_node_prev(node)->node;
-+ } else {
-+ right = reiser4_carry_real(node);
-+ left = reiser4_carry_real(carry_node_prev(node));
-+ }
-+ if (right == NULL || left == NULL)
-+ continue;
-+ if (node_is_empty(right) || node_is_empty(left))
-+ continue;
-+ if (!keyle(leftmost_key_in_node(left, &lkey),
-+ leftmost_key_in_node(right, &rkey))) {
-+ warning("", "wrong key order");
-+ return 0;
-+ }
-+ }
-+ }
-+ return 1;
-+}
-+#endif
-+
-+/* get symbolic name for boolean */
-+static const char *tf(int boolean/* truth value */)
-+{
-+ return boolean ? "t" : "f";
-+}
-+
-+/* symbolic name for carry operation */
-+static const char *carry_op_name(carry_opcode op/* carry opcode */)
-+{
-+ switch (op) {
-+ case COP_INSERT:
-+ return "COP_INSERT";
-+ case COP_DELETE:
-+ return "COP_DELETE";
-+ case COP_CUT:
-+ return "COP_CUT";
-+ case COP_PASTE:
-+ return "COP_PASTE";
-+ case COP_UPDATE:
-+ return "COP_UPDATE";
-+ case COP_EXTENT:
-+ return "COP_EXTENT";
-+ case COP_INSERT_FLOW:
-+ return "COP_INSERT_FLOW";
-+ default:{
-+ /* not mt safe, but who cares? */
-+ static char buf[20];
-+
-+ sprintf(buf, "unknown op: %x", op);
-+ return buf;
-+ }
-+ }
-+}
-+
-+/* dump information about carry node */
-+static void print_carry(const char *prefix /* prefix to print */ ,
-+ carry_node * node/* node to print */)
-+{
-+ if (node == NULL) {
-+ printk("%s: null\n", prefix);
-+ return;
-+ }
-+ printk
-+ ("%s: %p parent: %s, left: %s, unlock: %s, free: %s, dealloc: %s\n",
-+ prefix, node, tf(node->parent), tf(node->left), tf(node->unlock),
-+ tf(node->free), tf(node->deallocate));
-+}
-+
-+/* dump information about carry operation */
-+static void print_op(const char *prefix /* prefix to print */ ,
-+ carry_op * op/* operation to print */)
-+{
-+ if (op == NULL) {
-+ printk("%s: null\n", prefix);
-+ return;
-+ }
-+ printk("%s: %p carry_opcode: %s\n", prefix, op, carry_op_name(op->op));
-+ print_carry("\tnode", op->node);
-+ switch (op->op) {
-+ case COP_INSERT:
-+ case COP_PASTE:
-+ print_coord("\tcoord",
-+ op->u.insert.d ? op->u.insert.d->coord : NULL, 0);
-+ reiser4_print_key("\tkey",
-+ op->u.insert.d ? op->u.insert.d->key : NULL);
-+ print_carry("\tchild", op->u.insert.child);
-+ break;
-+ case COP_DELETE:
-+ print_carry("\tchild", op->u.delete.child);
-+ break;
-+ case COP_CUT:
-+ if (op->u.cut_or_kill.is_cut) {
-+ print_coord("\tfrom",
-+ op->u.cut_or_kill.u.kill->params.from, 0);
-+ print_coord("\tto", op->u.cut_or_kill.u.kill->params.to,
-+ 0);
-+ } else {
-+ print_coord("\tfrom",
-+ op->u.cut_or_kill.u.cut->params.from, 0);
-+ print_coord("\tto", op->u.cut_or_kill.u.cut->params.to,
-+ 0);
-+ }
-+ break;
-+ case COP_UPDATE:
-+ print_carry("\tleft", op->u.update.left);
-+ break;
-+ default:
-+ /* do nothing */
-+ break;
-+ }
-+}
-+
-+/* dump information about all nodes and operations in a @level */
-+static void print_level(const char *prefix /* prefix to print */ ,
-+ carry_level * level/* level to print */)
-+{
-+ carry_node *node;
-+ carry_node *tmp_node;
-+ carry_op *op;
-+ carry_op *tmp_op;
-+
-+ if (level == NULL) {
-+ printk("%s: null\n", prefix);
-+ return;
-+ }
-+ printk("%s: %p, restartable: %s\n",
-+ prefix, level, tf(level->restartable));
-+
-+ for_all_nodes(level, node, tmp_node)
-+ print_carry("\tcarry node", node);
-+ for_all_ops(level, op, tmp_op)
-+ print_op("\tcarry op", op);
-+}
-+
-+/* Make Linus happy.
-+ Local variables:
-+ c-indentation-style: "K&R"
-+ mode-name: "LC"
-+ c-basic-offset: 8
-+ tab-width: 8
-+ fill-column: 120
-+ scroll-step: 1
-+ End:
-+*/
-diff -urN linux-2.6.35.orig/fs/reiser4/carry.h linux-2.6.35/fs/reiser4/carry.h
---- linux-2.6.35.orig/fs/reiser4/carry.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.35/fs/reiser4/carry.h 2010-08-04 15:44:57.000000000 +0200
-@@ -0,0 +1,445 @@
-+/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by
-+ reiser4/README */
-+
-+/* Functions and data types to "carry" tree modification(s) upward.
-+ See fs/reiser4/carry.c for details. */
-+
-+#if !defined(__FS_REISER4_CARRY_H__)
-+#define __FS_REISER4_CARRY_H__
-+
-+#include "forward.h"
-+#include "debug.h"
-+#include "pool.h"
-+#include "znode.h"
-+
-+#include <linux/types.h>
-+
-+/* &carry_node - "location" of carry node.
-+
-+ "location" of node that is involved or going to be involved into
-+ carry process. Node where operation will be carried to on the
-+ parent level cannot be recorded explicitly. Operation will be carried
-+ usually to the parent of some node (where changes are performed at
-+ the current level) or, to the left neighbor of its parent. But while
-+ modifications are performed at the current level, parent may
-+ change. So, we have to allow some indirection (or, positevly,
-+ flexibility) in locating carry nodes.
-+
-+*/
-+typedef struct carry_node {
-+ /* pool linkage */
-+ struct reiser4_pool_header header;
-+
-+ /* base node from which real_node is calculated. See
-+ fs/reiser4/carry.c:lock_carry_node(). */
-+ znode *node;
-+
-+ /* how to get ->real_node */
-+ /* to get ->real_node obtain parent of ->node */
-+ __u32 parent:1;
-+ /* to get ->real_node obtain left neighbor of parent of
-+ ->node */
-+ __u32 left:1;
-+ __u32 left_before:1;
-+
-+ /* locking */
-+
-+ /* this node was locked by carry process and should be
-+ unlocked when carry leaves a level */
-+ __u32 unlock:1;
-+
-+ /* disk block for this node was allocated by carry process and
-+ should be deallocated when carry leaves a level */
-+ __u32 deallocate:1;
-+ /* this carry node was allocated by carry process and should be
-+ freed when carry leaves a level */
-+ __u32 free:1;
-+
-+ /* type of lock we want to take on this node */
-+ lock_handle lock_handle;
-+} carry_node;
-+
-+/* &carry_opcode - elementary operations that can be carried upward
-+
-+ Operations that carry() can handle. This list is supposed to be
-+ expanded.
-+
-+ Each carry operation (cop) is handled by appropriate function defined
-+ in fs/reiser4/carry.c. For example COP_INSERT is handled by
-+ fs/reiser4/carry.c:carry_insert() etc. These functions in turn
-+ call plugins of nodes affected by operation to modify nodes' content
-+ and to gather operations to be performed on the next level.
-+
-+*/
-+typedef enum {
-+ /* insert new item into node. */
-+ COP_INSERT,
-+ /* delete pointer from parent node */
-+ COP_DELETE,
-+ /* remove part of or whole node. */
-+ COP_CUT,
-+ /* increase size of item. */
-+ COP_PASTE,
-+ /* insert extent (that is sequence of unformatted nodes). */
-+ COP_EXTENT,
-+ /* update delimiting key in least common ancestor of two
-+ nodes. This is performed when items are moved between two
-+ nodes.
-+ */
-+ COP_UPDATE,
-+ /* insert flow */
-+ COP_INSERT_FLOW,
-+ COP_LAST_OP,
-+} carry_opcode;
-+
-+#define CARRY_FLOW_NEW_NODES_LIMIT 20
-+
-+/* mode (or subtype) of COP_{INSERT|PASTE} operation. Specifies how target
-+ item is determined. */
-+typedef enum {
-+ /* target item is one containing pointer to the ->child node */
-+ COPT_CHILD,
-+ /* target item is given explicitly by @coord */
-+ COPT_ITEM_DATA,
-+ /* target item is given by key */
-+ COPT_KEY,
-+ /* see insert_paste_common() for more comments on this. */
-+ COPT_PASTE_RESTARTED,
-+} cop_insert_pos_type;
-+
-+/* flags to cut and delete */
-+typedef enum {
-+ /* don't kill node even if it became completely empty as results of
-+ * cut. This is needed for eottl handling. See carry_extent() for
-+ * details. */
-+ DELETE_RETAIN_EMPTY = (1 << 0)
-+} cop_delete_flag;
-+
-+/*
-+ * carry() implements "lock handle tracking" feature.
-+ *
-+ * Callers supply carry with node where to perform initial operation and lock
-+ * handle on this node. Trying to optimize node utilization carry may actually
-+ * move insertion point to different node. Callers expect that lock handle
-+ * will rebe transferred to the new node also.
-+ *
-+ */
-+typedef enum {
-+ /* transfer lock handle along with insertion point */
-+ CARRY_TRACK_CHANGE = 1,
-+ /* acquire new lock handle to the node where insertion point is. This
-+ * is used when carry() client doesn't initially possess lock handle
-+ * on the insertion point node, for example, by extent insertion
-+ * code. See carry_extent(). */
-+ CARRY_TRACK_NODE = 2
-+} carry_track_type;
-+
-+/* data supplied to COP_{INSERT|PASTE} by callers */
-+typedef struct carry_insert_data {
-+ /* position where new item is to be inserted */
-+ coord_t *coord;
-+ /* new item description */
-+ reiser4_item_data * data;
-+ /* key of new item */
-+ const reiser4_key * key;
-+} carry_insert_data;
-+
-+/* cut and kill are similar, so carry_cut_data and carry_kill_data share the
-+ below structure of parameters */
-+struct cut_kill_params {
-+ /* coord where cut starts (inclusive) */
-+ coord_t *from;
-+ /* coord where cut stops (inclusive, this item/unit will also be
-+ * cut) */
-+ coord_t *to;
-+ /* starting key. This is necessary when item and unit pos don't
-+ * uniquely identify what portion or tree to remove. For example, this
-+ * indicates what portion of extent unit will be affected. */
-+ const reiser4_key * from_key;
-+ /* exclusive stop key */
-+ const reiser4_key * to_key;
-+ /* if this is not NULL, smallest actually removed key is stored
-+ * here. */
-+ reiser4_key *smallest_removed;
-+ /* kill_node_content() is called for file truncate */
-+ int truncate;
-+};
-+
-+struct carry_cut_data {
-+ struct cut_kill_params params;
-+};
-+
-+struct carry_kill_data {
-+ struct cut_kill_params params;
-+ /* parameter to be passed to the ->kill_hook() method of item
-+ * plugin */
-+ /*void *iplug_params; *//* FIXME: unused currently */
-+ /* if not NULL---inode whose items are being removed. This is needed
-+ * for ->kill_hook() of extent item to update VM structures when
-+ * removing pages. */
-+ struct inode *inode;
-+ /* sibling list maintenance is complicated by existence of eottl. When
-+ * eottl whose left and right neighbors are formatted leaves is
-+ * removed, one has to connect said leaves in the sibling list. This
-+ * cannot be done when extent removal is just started as locking rules
-+ * require sibling list update to happen atomically with removal of
-+ * extent item. Therefore: 1. pointers to left and right neighbors
-+ * have to be passed down to the ->kill_hook() of extent item, and
-+ * 2. said neighbors have to be locked. */
-+ lock_handle *left;
<Skipped 74614 lines>
================================================================
---- gitweb:
http://git.pld-linux.org/gitweb.cgi/packages/kernel.git/commitdiff/6d4f4c8f10b9bb7c5a0dc02052b8b56ce84c385f
More information about the pld-cvs-commit
mailing list