[packages/kernel/LINUX_4_19] - rel 2; revert mm behaviour to pre 4.19.3 state until mm and fs people solve this

arekm arekm at pld-linux.org
Wed Jan 30 16:54:56 CET 2019


commit 48e8ffca628a8a3614d479fc86fe0b9c71678cf6
Author: Arkadiusz Miśkiewicz <arekm at maven.pl>
Date:   Wed Jan 30 16:54:46 2019 +0100

    - rel 2; revert mm behaviour to pre 4.19.3 state until mm and fs people solve this

 kernel-small_fixes.patch | 92 ++++++++++++++++++++++++++++++++++++++++++++++++
 kernel.spec              |  2 +-
 2 files changed, 93 insertions(+), 1 deletion(-)
---
diff --git a/kernel.spec b/kernel.spec
index c0767cce..ad3eac14 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -68,7 +68,7 @@
 %define		have_pcmcia	0
 %endif
 
-%define		rel		1
+%define		rel		2
 %define		basever		4.19
 %define		postver		.18
 
diff --git a/kernel-small_fixes.patch b/kernel-small_fixes.patch
index eb20ac48..5fcf48ab 100644
--- a/kernel-small_fixes.patch
+++ b/kernel-small_fixes.patch
@@ -224,3 +224,95 @@ index f0e8cd9..059e617 100644
 -- 
 1.8.3.1
 
+From: Dave Chinner <dchinner at redhat.com>
+
+This reverts commit a76cf1a474d7dbcd9336b5f5afb0162baa142cf0.
+
+This change causes serious changes to page cache and inode cache
+behaviour and balance, resulting in major performance regressions
+when combining worklaods such as large file copies and kernel
+compiles.
+
+https://bugzilla.kernel.org/show_bug.cgi?id=202441
+
+This change is a hack to work around the problems introduced by
+changing how agressive shrinkers are on small caches in commit
+172b06c32b94 ("mm: slowly shrink slabs with a relatively small
+number of objects"). It creates more problems than it solves, wasn't
+adequately reviewed or tested, so it needs to be reverted.
+
+cc: <stable at vger.kernel.org>
+Signed-off-by: Dave Chinner <dchinner at redhat.com>
+---
+ fs/inode.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/fs/inode.c b/fs/inode.c
+index 0cd47fe0dbe5..73432e64f874 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -730,11 +730,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
+ 		return LRU_REMOVED;
+ 	}
+ 
+-	/*
+-	 * Recently referenced inodes and inodes with many attached pages
+-	 * get one more pass.
+-	 */
+-	if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) {
++	/* recently referenced inodes get one more pass */
++	if (inode->i_state & I_REFERENCED) {
+ 		inode->i_state &= ~I_REFERENCED;
+ 		spin_unlock(&inode->i_lock);
+ 		return LRU_ROTATE;
+-- 
+2.20.1
+
+This reverts commit 172b06c32b949759fe6313abec514bc4f15014f4.
+
+This change changes the agressiveness of shrinker reclaim, causing
+small cache and low priority reclaim to greatly increase
+scanning pressure on small caches. As a result, light memory
+pressure has a disproportionate affect on small caches, and causes
+large caches to be reclaimed much faster than previously.
+
+As a result, it greatly perturbs the delicate balance of the VFS
+caches (dentry/inode vs file page cache) such that the inode/dentry
+caches are reclaimed much, much faster than the page cache and this
+drives us into several other caching imbalance related problems.
+
+As such, this is a bad change and needs to be reverted.
+
+[ Needs some massaging to retain the later seekless shrinker
+modifications. ]
+
+cc: <stable at vger.kernel.org>
+Signed-off-by: Dave Chinner <dchinner at redhat.com>
+---
+ mm/vmscan.c | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index a714c4f800e9..e979705bbf32 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -491,16 +491,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
+ 		delta = freeable / 2;
+ 	}
+ 
+-	/*
+-	 * Make sure we apply some minimal pressure on default priority
+-	 * even on small cgroups. Stale objects are not only consuming memory
+-	 * by themselves, but can also hold a reference to a dying cgroup,
+-	 * preventing it from being reclaimed. A dying cgroup with all
+-	 * corresponding structures like per-cpu stats and kmem caches
+-	 * can be really big, so it may lead to a significant waste of memory.
+-	 */
+-	delta = max_t(unsigned long long, delta, min(freeable, batch_size));
+-
+ 	total_scan += delta;
+ 	if (total_scan < 0) {
+ 		pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
+-- 
+2.20.1
+
================================================================

---- gitweb:

http://git.pld-linux.org/gitweb.cgi/packages/kernel.git/commitdiff/48e8ffca628a8a3614d479fc86fe0b9c71678cf6



More information about the pld-cvs-commit mailing list