/* version 5 superblocks support inode version counters. */
if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
-From: Dave Chinner <dchinner@redhat.com>
-
-This reverts commit a76cf1a474d7dbcd9336b5f5afb0162baa142cf0.
-
-This change causes serious changes to page cache and inode cache
-behaviour and balance, resulting in major performance regressions
-when combining worklaods such as large file copies and kernel
-compiles.
-
-https://bugzilla.kernel.org/show_bug.cgi?id=202441
-
-This change is a hack to work around the problems introduced by
-changing how agressive shrinkers are on small caches in commit
-172b06c32b94 ("mm: slowly shrink slabs with a relatively small
-number of objects"). It creates more problems than it solves, wasn't
-adequately reviewed or tested, so it needs to be reverted.
-
-cc: <stable@vger.kernel.org>
-Signed-off-by: Dave Chinner <dchinner@redhat.com>
----
- fs/inode.c | 7 ++-----
- 1 file changed, 2 insertions(+), 5 deletions(-)
-
-diff --git a/fs/inode.c b/fs/inode.c
-index 0cd47fe0dbe5..73432e64f874 100644
---- a/fs/inode.c
-+++ b/fs/inode.c
-@@ -730,11 +730,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
- return LRU_REMOVED;
- }
-
-- /*
-- * Recently referenced inodes and inodes with many attached pages
-- * get one more pass.
-- */
-- if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) {
-+ /* recently referenced inodes get one more pass */
-+ if (inode->i_state & I_REFERENCED) {
- inode->i_state &= ~I_REFERENCED;
- spin_unlock(&inode->i_lock);
- return LRU_ROTATE;
---
-2.20.1
-
-This reverts commit 172b06c32b949759fe6313abec514bc4f15014f4.
-
-This change changes the agressiveness of shrinker reclaim, causing
-small cache and low priority reclaim to greatly increase
-scanning pressure on small caches. As a result, light memory
-pressure has a disproportionate affect on small caches, and causes
-large caches to be reclaimed much faster than previously.
-
-As a result, it greatly perturbs the delicate balance of the VFS
-caches (dentry/inode vs file page cache) such that the inode/dentry
-caches are reclaimed much, much faster than the page cache and this
-drives us into several other caching imbalance related problems.
-
-As such, this is a bad change and needs to be reverted.
-
-[ Needs some massaging to retain the later seekless shrinker
-modifications. ]
-
-cc: <stable@vger.kernel.org>
-Signed-off-by: Dave Chinner <dchinner@redhat.com>
----
- mm/vmscan.c | 10 ----------
- 1 file changed, 10 deletions(-)
-
-diff --git a/mm/vmscan.c b/mm/vmscan.c
-index a714c4f800e9..e979705bbf32 100644
---- a/mm/vmscan.c
-+++ b/mm/vmscan.c
-@@ -491,16 +491,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
- delta = freeable / 2;
- }
-
-- /*
-- * Make sure we apply some minimal pressure on default priority
-- * even on small cgroups. Stale objects are not only consuming memory
-- * by themselves, but can also hold a reference to a dying cgroup,
-- * preventing it from being reclaimed. A dying cgroup with all
-- * corresponding structures like per-cpu stats and kmem caches
-- * can be really big, so it may lead to a significant waste of memory.
-- */
-- delta = max_t(unsigned long long, delta, min(freeable, batch_size));
--
- total_scan += delta;
- if (total_scan < 0) {
- pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
---
-2.20.1