SOURCES: reiser4-2.6.13-mm3.patch (NEW) - updated

jpc jpc at pld-linux.org
Thu Sep 15 13:04:49 CEST 2005


Author: jpc                          Date: Thu Sep 15 11:04:49 2005 GMT
Module: SOURCES                       Tag: HEAD
---- Log message:
- updated

---- Files affected:
SOURCES:
   reiser4-2.6.13-mm3.patch (NONE -> 1.1)  (NEW)

---- Diffs:

================================================================
Index: SOURCES/reiser4-2.6.13-mm3.patch
diff -u /dev/null SOURCES/reiser4-2.6.13-mm3.patch:1.1
--- /dev/null	Thu Sep 15 13:04:49 2005
+++ SOURCES/reiser4-2.6.13-mm3.patch	Thu Sep 15 13:04:44 2005
@@ -0,0 +1,95216 @@
+
+From: Hans Reiser <reiser at namesys.com>
+
+This patch adds new operation to struct super_operations - sync_inodes,
+generic implementaion and changes fs-writeback.c:sync_sb_inodes() to call
+filesystem's sync_inodes if it is defined or generic implementaion otherwise. 
+This new operation allows filesystem to decide itself what to flush.
+
+Reiser4 flushes dirty pages on basic of atoms, not of inodes.  sync_sb_inodes
+used to call address space flushing method (writepages) for every dirty inode.
+ For reiser4 it caused having to commit atoms unnecessarily often.  This
+turned into substantial slowdown.  Having this method helped to fix that
+problem.
+
+Signed-off-by: Andrew Morton <akpm at osdl.org>
+---
+
+ fs/fs-writeback.c  |   14 ++++++++++++--
+ include/linux/fs.h |    3 +++
+ 2 files changed, 15 insertions(+), 2 deletions(-)
+
+diff -puN fs/fs-writeback.c~reiser4-sb_sync_inodes fs/fs-writeback.c
+--- devel/fs/fs-writeback.c~reiser4-sb_sync_inodes	2005-08-30 18:46:01.000000000 -0700
++++ devel-akpm/fs/fs-writeback.c	2005-08-30 18:46:01.000000000 -0700
+@@ -300,8 +300,8 @@ __writeback_single_inode(struct inode *i
+  * on the writer throttling path, and we get decent balancing between many
+  * throttled threads: we don't want them all piling up on __wait_on_inode.
+  */
+-static void
+-sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
++void
++generic_sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
+ {
+ 	const unsigned long start = jiffies;	/* livelock avoidance */
+ 
+@@ -386,6 +386,16 @@ sync_sb_inodes(struct super_block *sb, s
+ 	}
+ 	return;		/* Leave any unwritten inodes on s_io */
+ }
++EXPORT_SYMBOL(generic_sync_sb_inodes);
++
++static void
++sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
++{
++	if (sb->s_op->sync_inodes)
++		sb->s_op->sync_inodes(sb, wbc);
++	else
++		generic_sync_sb_inodes(sb, wbc);
++}
+ 
+ /*
+  * Start writeback of dirty pagecache data against all unlocked inodes.
+diff -puN include/linux/fs.h~reiser4-sb_sync_inodes include/linux/fs.h
+--- devel/include/linux/fs.h~reiser4-sb_sync_inodes	2005-08-30 18:46:01.000000000 -0700
++++ devel-akpm/include/linux/fs.h	2005-08-30 18:46:01.000000000 -0700
+@@ -1032,6 +1032,8 @@ struct super_operations {
+ 	void (*clear_inode) (struct inode *);
+ 	void (*umount_begin) (struct super_block *);
+ 
++	void (*sync_inodes) (struct super_block *sb,
++				struct writeback_control *wbc);
+ 	int (*show_options)(struct seq_file *, struct vfsmount *);
+ 
+ 	ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
+@@ -1381,6 +1383,7 @@ extern int invalidate_inode_pages2(struc
+ extern int invalidate_inode_pages2_range(struct address_space *mapping,
+ 					 pgoff_t start, pgoff_t end);
+ extern int write_inode_now(struct inode *, int);
++extern void generic_sync_sb_inodes(struct super_block *, struct writeback_control *);
+ extern int filemap_fdatawrite(struct address_space *);
+ extern int filemap_flush(struct address_space *);
+ extern int filemap_fdatawait(struct address_space *);
+_
+
+From: Vladimir Saveliev <vs at namesys.com>
+
+This patch makes generic_sync_sb_inodes to spin lock itself.  It helps
+reiser4 to get rid of some oddities.
+
+sync_sb_inodes is always called like:
+	spin_lock(&inode_lock);
+	sync_sb_inodes(sb, wbc);
+	spin_unlock(&inode_lock);
+This patch moves spin_lock/spin_unlock down to sync_sb_inodes.
+
+Signed-off-by: Andrew Morton <akpm at osdl.org>
+---
+
+ fs/fs-writeback.c |   12 ++++--------
+ 1 files changed, 4 insertions(+), 8 deletions(-)
+
+diff -puN fs/fs-writeback.c~reiser4-sb_sync_inodes-cleanup fs/fs-writeback.c
+--- 25/fs/fs-writeback.c~reiser4-sb_sync_inodes-cleanup	Mon Jun 13 15:00:14 2005
++++ 25-akpm/fs/fs-writeback.c	Mon Jun 13 15:00:14 2005
+@@ -283,8 +283,6 @@ __writeback_single_inode(struct inode *i
+  * WB_SYNC_HOLD is a hack for sys_sync(): reattach the inode to sb->s_dirty so
+  * that it can be located for waiting on in __writeback_single_inode().
+  *
+- * Called under inode_lock.
+- *
+  * If `bdi' is non-zero then we're being asked to writeback a specific queue.
+  * This function assumes that the blockdev superblock's inodes are backed by
+  * a variety of queues, so all inodes are searched.  For other superblocks,
+@@ -305,6 +303,8 @@ generic_sync_sb_inodes(struct super_bloc
+ {
+ 	const unsigned long start = jiffies;	/* livelock avoidance */
+ 
++	spin_lock(&inode_lock);
++
+ 	if (!wbc->for_kupdate || list_empty(&sb->s_io))
+ 		list_splice_init(&sb->s_dirty, &sb->s_io);
+ 
+@@ -384,6 +384,7 @@ generic_sync_sb_inodes(struct super_bloc
+ 		if (wbc->nr_to_write <= 0)
+ 			break;
+ 	}
++	spin_unlock(&inode_lock);
+ 	return;		/* Leave any unwritten inodes on s_io */
+ }
+ EXPORT_SYMBOL(generic_sync_sb_inodes);
+@@ -436,11 +437,8 @@ restart:
+ 			 * be unmounted by the time it is released.
+ 			 */
+ 			if (down_read_trylock(&sb->s_umount)) {
+-				if (sb->s_root) {
+-					spin_lock(&inode_lock);
++				if (sb->s_root)
+ 					sync_sb_inodes(sb, wbc);
+-					spin_unlock(&inode_lock);
+-				}
+ 				up_read(&sb->s_umount);
+ 			}
+ 			spin_lock(&sb_lock);
+@@ -476,9 +474,7 @@ void sync_inodes_sb(struct super_block *
+ 			(inodes_stat.nr_inodes - inodes_stat.nr_unused) +
+ 			nr_dirty + nr_unstable;
+ 	wbc.nr_to_write += wbc.nr_to_write / 2;		/* Bit more for luck */
+-	spin_lock(&inode_lock);
+ 	sync_sb_inodes(sb, &wbc);
+-	spin_unlock(&inode_lock);
+ }
+ 
+ /*
+_
+
+From: Hans Reiser <reiser at namesys.com>
+
+This patch makes truncate_inode_pages_range from truncate_inode_pages. 
+truncate_inode_pages became a one-liner call to truncate_inode_pages_range.
+
+Reiser4 needs truncate_inode_pages_ranges because it tries to keep
+correspondence between existences of metadata pointing to data pages and pages
+to which those metadata point to.  So, when metadata of certain part of file
+is removed from filesystem tree, only pages of corresponding range are to be
+truncated.
+
+Signed-off-by: Andrew Morton <akpm at osdl.org>
+---
+
+ include/linux/mm.h |    2 ++
+ mm/truncate.c      |   44 +++++++++++++++++++++++++++++++++++++-------
+ 2 files changed, 39 insertions(+), 7 deletions(-)
+
+diff -puN include/linux/mm.h~reiser4-truncate_inode_pages_range include/linux/mm.h
+--- devel/include/linux/mm.h~reiser4-truncate_inode_pages_range	2005-09-03 16:20:02.000000000 -0700
++++ devel-akpm/include/linux/mm.h	2005-09-03 16:20:02.000000000 -0700
+@@ -871,6 +871,8 @@ extern unsigned long do_brk(unsigned lon
+ /* filemap.c */
+ extern unsigned long page_unuse(struct page *);
+ extern void truncate_inode_pages(struct address_space *, loff_t);
++extern void truncate_inode_pages_range(struct address_space *,
++				       loff_t lstart, loff_t lend);
+ 
+ /* generic vm_area_ops exported for stackable file systems */
+ extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *);
+diff -puN mm/truncate.c~reiser4-truncate_inode_pages_range mm/truncate.c
+--- devel/mm/truncate.c~reiser4-truncate_inode_pages_range	2005-09-03 16:20:02.000000000 -0700
++++ devel-akpm/mm/truncate.c	2005-09-03 16:20:02.000000000 -0700
+@@ -91,12 +91,15 @@ invalidate_complete_page(struct address_
+ }
+ 
+ /**
+- * truncate_inode_pages - truncate *all* the pages from an offset
++ * truncate_inode_pages - truncate range of pages specified by start and
++ * end byte offsets
+  * @mapping: mapping to truncate
+  * @lstart: offset from which to truncate
++ * @lend: offset to which to truncate
+  *
+- * Truncate the page cache at a set offset, removing the pages that are beyond
+- * that offset (and zeroing out partial pages).
++ * Truncate the page cache, removing the pages that are between
++ * specified offsets (and zeroing out partial page
++ * (if lstart is not page aligned)).
+  *
+  * Truncate takes two passes - the first pass is nonblocking.  It will not
+  * block on page locks and it will not block on writeback.  The second pass
+@@ -110,12 +113,12 @@ invalidate_complete_page(struct address_
+  * We pass down the cache-hot hint to the page freeing code.  Even if the
+  * mapping is large, it is probably the case that the final pages are the most
+  * recently touched, and freeing happens in ascending file offset order.
+- *
+- * Called under (and serialised by) inode->i_sem.
+  */
+-void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
++void truncate_inode_pages_range(struct address_space *mapping,
++				loff_t lstart, loff_t lend)
+ {
+ 	const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
++	pgoff_t end;
+ 	const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
+ 	struct pagevec pvec;
+ 	pgoff_t next;
+@@ -124,13 +127,22 @@ void truncate_inode_pages(struct address
+ 	if (mapping->nrpages == 0)
+ 		return;
+ 
++	BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
++	end = (lend >> PAGE_CACHE_SHIFT);
++
+ 	pagevec_init(&pvec, 0);
+ 	next = start;
+-	while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
++	while (next <= end &&
++	       pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+ 		for (i = 0; i < pagevec_count(&pvec); i++) {
+ 			struct page *page = pvec.pages[i];
+ 			pgoff_t page_index = page->index;
+ 
++			if (page_index > end) {
++				next = page_index;
++				break;
++			}
++
+ 			if (page_index > next)
+ 				next = page_index;
+ 			next++;
+@@ -166,9 +178,15 @@ void truncate_inode_pages(struct address
+ 			next = start;
+ 			continue;
+ 		}
++		if (pvec.pages[0]->index > end) {
++			pagevec_release(&pvec);
++			break;
++		}
+ 		for (i = 0; i < pagevec_count(&pvec); i++) {
+ 			struct page *page = pvec.pages[i];
+ 
++			if (page->index > end)
++				break;
+ 			lock_page(page);
+ 			wait_on_page_writeback(page);
+ 			if (page->index > next)
+@@ -180,7 +198,19 @@ void truncate_inode_pages(struct address
+ 		pagevec_release(&pvec);
+ 	}
+ }
++EXPORT_SYMBOL(truncate_inode_pages_range);
+ 
++/**
++ * truncate_inode_pages - truncate *all* the pages from an offset
++ * @mapping: mapping to truncate
++ * @lstart: offset from which to truncate
++ *
++ * Called under (and serialised by) inode->i_sem.
++ */
++void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
++{
++	truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
++}
+ EXPORT_SYMBOL(truncate_inode_pages);
+ 
+ /**
+_
+
+From: Hans Reiser <reiser at namesys.com>
+
+Reiser4 is trying to add/remove pages to/from address space, so it needs 
+
+remove_from_page_cache
+__remove_from_page_cache
+add_to_page_cache_lru
+
+to be EXPORT_SYMBOL-ed.
+
+Signed-off-by: Andrew Morton <akpm at osdl.org>
+---
+
+ mm/filemap.c |    3 +++
+ 1 files changed, 3 insertions(+)
+
+diff -puN mm/filemap.c~reiser4-export-remove_from_page_cache mm/filemap.c
+--- devel/mm/filemap.c~reiser4-export-remove_from_page_cache	2005-07-08 23:11:51.000000000 -0700
++++ devel-akpm/mm/filemap.c	2005-07-08 23:11:51.000000000 -0700
+@@ -113,6 +113,7 @@ void __remove_from_page_cache(struct pag
+ 	mapping->nrpages--;
+ 	pagecache_acct(-1);
+ }
++EXPORT_SYMBOL(__remove_from_page_cache);
+ 
+ void remove_from_page_cache(struct page *page)
+ {
+@@ -124,6 +125,7 @@ void remove_from_page_cache(struct page 
+ 	__remove_from_page_cache(page);
+ 	write_unlock_irq(&mapping->tree_lock);
+ }
++EXPORT_SYMBOL(remove_from_page_cache);
+ 
+ static int sync_page(void *word)
+ {
+@@ -404,6 +406,7 @@ int add_to_page_cache_lru(struct page *p
+ 		lru_cache_add(page);
+ 	return ret;
+ }
++EXPORT_SYMBOL(add_to_page_cache_lru);
+ 
+ /*
+  * In order to wait for pages to become available there must be
+_
+
+From: Hans Reiser <reiser at namesys.com>
+
+Reiser4 implements read without help of generic_file_read, but it uses
+page_cache_readahead.  So, this patch makes page_cache_readahead
+EXPROT_SYMBOL.  Future filesystems may find this patch useful as well.
+
+Signed-off-by: Andrew Morton <akpm at osdl.org>
+---
+
+ mm/readahead.c |    2 ++
+ 1 files changed, 2 insertions(+)
+
+diff -puN mm/readahead.c~reiser4-export-page_cache_readahead mm/readahead.c
+--- devel/mm/readahead.c~reiser4-export-page_cache_readahead	2005-08-06 15:35:09.000000000 -0700
++++ devel-akpm/mm/readahead.c	2005-08-06 15:35:09.000000000 -0700
+@@ -526,6 +526,7 @@ page_cache_readahead(struct address_spac
+ out:
+ 	return ra->prev_page + 1;
+ }
++EXPORT_SYMBOL(page_cache_readahead);
+ 
+ /*
+  * handle_ra_miss() is called when it is known that a page which should have
+@@ -543,6 +544,7 @@ void handle_ra_miss(struct address_space
+ 	ra->flags &= ~RA_FLAG_INCACHE;
+ 	ra->cache_hit = 0;
+ }
++EXPORT_SYMBOL(handle_ra_miss);
+ 
+ /*
+  * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
+_
+
+From: Hans Reiser <reiser at namesys.com>
+
+reiser4 keeps its meta-data pages in the page cache, attached to a special
+"fake" inode.  Meta-data blocks have "znodes" attached to them (reiser4 analog
+of buffer_head) and initially don't have real disk block numbers assigned. 
+Later meta-data blocks can be "relocated" to decrease fragmentation.  As a
+result, their pages cannot be easily indexed by block number.  Instead reiser4
+indexes pages of fake inode by some function of znode address.  This looks
+weird, but it works.  The only problem is that there is a race involving
+->releasepage(): there is a window when znode has already been freed by
+reiser4_releasepage(), but its page still exists (albeit locked).  If at this
+moment another znode is allocated at the same address as one just destroyed,
+then some other thread can acquire a reference to lingering page (because it
+is indexed by address of znode), and prevent shrink_list() from freeing it.
+
+To avoid this, reiser4_releasepage() removes page from radix-tree manually. 
+This requires re-checking page->mapping after calling try_to_release_page().
+
+Signed-off-by: Andrew Morton <akpm at osdl.org>
+---
+
+ mm/truncate.c |    7 +++++++
+ mm/vmscan.c   |    5 +++++
+ 2 files changed, 12 insertions(+)
+
+diff -puN mm/truncate.c~reiser4-reget-page-mapping mm/truncate.c
+--- 25/mm/truncate.c~reiser4-reget-page-mapping	2005-06-22 21:58:18.000000000 -0700
++++ 25-akpm/mm/truncate.c	2005-06-22 21:58:18.000000000 -0700
+@@ -76,6 +76,13 @@ invalidate_complete_page(struct address_
+ 	if (PagePrivate(page) && !try_to_release_page(page, 0))
+ 		return 0;
+ 
++	/*
++	 * file system may manually remove page from the page
++	 * cache in ->releasepage(). Check for this.
++	 */
++	if (page->mapping != mapping)
++		return 0;
++
+ 	write_lock_irq(&mapping->tree_lock);
+ 	if (PageDirty(page)) {
+ 		write_unlock_irq(&mapping->tree_lock);
+diff -puN mm/vmscan.c~reiser4-reget-page-mapping mm/vmscan.c
+--- 25/mm/vmscan.c~reiser4-reget-page-mapping	2005-06-22 21:58:18.000000000 -0700
++++ 25-akpm/mm/vmscan.c	2005-06-22 21:58:18.000000000 -0700
+@@ -497,6 +497,11 @@ static int shrink_list(struct list_head 
+ 		if (PagePrivate(page)) {
+ 			if (!try_to_release_page(page, sc->gfp_mask))
+ 				goto activate_locked;
++			/*
++			 * file system may manually remove page from the page
++			 * cache in ->releasepage(). Check for this.
++			 */
++			mapping = page_mapping(page);
+ 			if (!mapping && page_count(page) == 1)
+ 				goto free_it;
+ 		}
+_
+
+From: Hans Reiser <reiser at namesys.com>
+
+This patch is by Dipankar Sarma <dipankar at in.ibm.com>. His description:
+
+  This patch introduces a new interface - rcu_barrier() which waits until
+  all the RCUs queued until this call have been completed.
+
+Reiser4 needs this patch, because we do more than just freeing memory object
+in our RCU callback: we also remove it from the list hanging off super-block. 
+This means, that before freeing reiser4-specific portion of super-block
+(during umount) we have to wait until all pending RCU callbacks are executed.
+
+The only change of reiser4 made to the original patch, is exporting of
+rcu_barrier().
+
+Signed-off-by: Andrew Morton <akpm at osdl.org>
+---
+
+ include/linux/rcupdate.h |    2 ++
+ kernel/rcupdate.c        |   41 +++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 43 insertions(+)
+
+diff -puN include/linux/rcupdate.h~reiser4-rcu-barrier include/linux/rcupdate.h
+--- devel/include/linux/rcupdate.h~reiser4-rcu-barrier	2005-08-21 23:49:42.000000000 -0700
++++ devel-akpm/include/linux/rcupdate.h	2005-08-21 23:49:42.000000000 -0700
+@@ -99,6 +99,7 @@ struct rcu_data {
+ 	struct rcu_head *donelist;
+ 	struct rcu_head **donetail;
+ 	int cpu;
++	struct rcu_head barrier;
+ };
+ 
+ DECLARE_PER_CPU(struct rcu_data, rcu_data);
+@@ -283,6 +284,7 @@ extern void FASTCALL(call_rcu_bh(struct 
+ extern __deprecated_for_modules void synchronize_kernel(void);
+ extern void synchronize_rcu(void);
+ void synchronize_idle(void);
++extern void rcu_barrier(void);
+ 
+ #endif /* __KERNEL__ */
+ #endif /* __LINUX_RCUPDATE_H */
+diff -puN kernel/rcupdate.c~reiser4-rcu-barrier kernel/rcupdate.c
+--- devel/kernel/rcupdate.c~reiser4-rcu-barrier	2005-08-21 23:49:42.000000000 -0700
++++ devel-akpm/kernel/rcupdate.c	2005-08-21 23:49:42.000000000 -0700
+@@ -112,6 +112,10 @@ void fastcall call_rcu(struct rcu_head *
+ 	local_irq_restore(flags);
+ }
+ 
++static atomic_t rcu_barrier_cpu_count;
++static struct semaphore rcu_barrier_sema;
++static struct completion rcu_barrier_completion;
++
+ /**
+  * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
+  * @head: structure to be used for queueing the RCU updates.
+@@ -143,6 +147,42 @@ void fastcall call_rcu_bh(struct rcu_hea
+ 	local_irq_restore(flags);
+ }
+ 
++static void rcu_barrier_callback(struct rcu_head *notused)
++{
++	if (atomic_dec_and_test(&rcu_barrier_cpu_count))
++		complete(&rcu_barrier_completion);
++}
++
++/*
++ * Called with preemption disabled, and from cross-cpu IRQ context.
++ */
++static void rcu_barrier_func(void *notused)
++{
++	int cpu = smp_processor_id();
++	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
++	struct rcu_head *head;
++
++	head = &rdp->barrier;
++	atomic_inc(&rcu_barrier_cpu_count);
++	call_rcu(head, rcu_barrier_callback);
++}
++
++/**
++ * rcu_barrier - Wait until all the in-flight RCUs are complete.
++ */
++void rcu_barrier(void)
++{
++	BUG_ON(in_interrupt());
++	/* Take cpucontrol semaphore to protect against CPU hotplug */
++	down(&rcu_barrier_sema);
++	init_completion(&rcu_barrier_completion);
++	atomic_set(&rcu_barrier_cpu_count, 0);
++	on_each_cpu(rcu_barrier_func, NULL, 0, 1);
++	wait_for_completion(&rcu_barrier_completion);
++	up(&rcu_barrier_sema);
++}
++EXPORT_SYMBOL(rcu_barrier);
++
+ /*
+  * Invoke the completed RCU callbacks. They are expected to be in
+  * a per-cpu list.
+@@ -437,6 +477,7 @@ static struct notifier_block __devinitda
+  */
+ void __init rcu_init(void)
+ {
++	sema_init(&rcu_barrier_sema, 1);
+ 	rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
+ 			(void *)(long)smp_processor_id());
+ 	/* Register notifier for non-boot CPUs */
+_
+
+
+Signed-off-by: Andrew Morton <akpm at osdl.org>
+---
+
+ kernel/rcupdate.c |    2 +-
+ 1 files changed, 1 insertion(+), 1 deletion(-)
+
+diff -puN kernel/rcupdate.c~reiser4-rcu-barrier-license-fix kernel/rcupdate.c
+--- devel/kernel/rcupdate.c~reiser4-rcu-barrier-license-fix	2005-08-21 23:49:42.000000000 -0700
++++ devel-akpm/kernel/rcupdate.c	2005-08-21 23:49:42.000000000 -0700
+@@ -181,7 +181,7 @@ void rcu_barrier(void)
+ 	wait_for_completion(&rcu_barrier_completion);
+ 	up(&rcu_barrier_sema);
+ }
+-EXPORT_SYMBOL(rcu_barrier);
++EXPORT_SYMBOL_GPL(rcu_barrier);
+ 
+ /*
+  * Invoke the completed RCU callbacks. They are expected to be in
+_
+
+From: Hans Reiser <reiser at namesys.com>
+
+This patch exports __pagevec_release and pagevec_lookup_tag.  As it looks like
+they are intended to be external symbols.
+
+Signed-off-by: Andrew Morton <akpm at osdl.org>
+---
+
+ mm/swap.c |    3 ++-
+ 1 files changed, 2 insertions(+), 1 deletion(-)
+
+diff -puN mm/swap.c~reiser4-export-pagevec-funcs mm/swap.c
+--- 25/mm/swap.c~reiser4-export-pagevec-funcs	Mon Jun 13 15:02:57 2005
++++ 25-akpm/mm/swap.c	Mon Jun 13 15:02:57 2005
+@@ -258,6 +258,7 @@ void __pagevec_release(struct pagevec *p
+ 	release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
+ 	pagevec_reinit(pvec);
+ }
++EXPORT_SYMBOL(__pagevec_release);
+ 
+ /*
+  * pagevec_release() for pages which are known to not be on the LRU
+@@ -387,7 +388,7 @@ unsigned pagevec_lookup_tag(struct pagev
+ 					nr_pages, pvec->pages);
+ 	return pagevec_count(pvec);
+ }
+-
++EXPORT_SYMBOL(pagevec_lookup_tag);
+ 
+ #ifdef CONFIG_SMP
+ /*
+_
+
+From: Hans Reiser <reiser at namesys.com>
+
+
+Signed-off-by: Andrew Morton <akpm at osdl.org>
+---
+
+ lib/radix-tree.c |    1 +
+ 1 files changed, 1 insertion(+)
+
+diff -puN lib/radix-tree.c~reiser4-export-radix_tree_preload lib/radix-tree.c
+--- devel/lib/radix-tree.c~reiser4-export-radix_tree_preload	2005-08-30 18:46:02.000000000 -0700
++++ devel-akpm/lib/radix-tree.c	2005-08-30 18:46:02.000000000 -0700
+@@ -134,6 +134,7 @@ int radix_tree_preload(int gfp_mask)
+ out:
+ 	return ret;
+ }
++EXPORT_SYMBOL(radix_tree_preload);
<<Diff was trimmed, longer than 597 lines>>



More information about the pld-cvs-commit mailing list