]> git.pld-linux.org Git - packages/kernel.git/commitdiff
*** empty log message ***
authorcieciwa <cieciwa@pld-linux.org>
Mon, 10 May 2004 12:02:28 +0000 (12:02 +0000)
committercvs2git <feedback@pld-linux.org>
Sun, 24 Jun 2012 12:13:13 +0000 (12:13 +0000)
Changed files:
    350-autoswap -> 1.1

350-autoswap [new file with mode: 0644]

diff --git a/350-autoswap b/350-autoswap
new file mode 100644 (file)
index 0000000..c5c781a
--- /dev/null
@@ -0,0 +1,79 @@
+diff -upN reference/kernel/sysctl.c current/kernel/sysctl.c
+--- reference/kernel/sysctl.c  2004-04-30 11:23:56.000000000 -0700
++++ current/kernel/sysctl.c    2004-05-02 08:41:25.000000000 -0700
+@@ -721,11 +721,8 @@ static ctl_table vm_table[] = {
+               .procname       = "swappiness",
+               .data           = &vm_swappiness,
+               .maxlen         = sizeof(vm_swappiness),
+-              .mode           = 0644,
+-              .proc_handler   = &proc_dointvec_minmax,
+-              .strategy       = &sysctl_intvec,
+-              .extra1         = &zero,
+-              .extra2         = &one_hundred,
++              .mode           = 0444 /* read-only*/,
++              .proc_handler   = &proc_dointvec,
+       },
+ #ifdef CONFIG_HUGETLB_PAGE
+        {
+diff -upN reference/mm/vmscan.c current/mm/vmscan.c
+--- reference/mm/vmscan.c      2004-04-30 11:23:57.000000000 -0700
++++ current/mm/vmscan.c        2004-05-02 08:41:25.000000000 -0700
+@@ -42,7 +42,7 @@
+ /*
+  * From 0 .. 100.  Higher means more swappy.
+  */
+-int vm_swappiness = 60;
++int vm_swappiness = 0;
+ static long total_memory;
+ #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
+@@ -586,6 +586,7 @@ refill_inactive_zone(struct zone *zone, 
+       LIST_HEAD(l_active);    /* Pages to go onto the active_list */
+       struct page *page;
+       struct pagevec pvec;
++      struct sysinfo i;
+       int reclaim_mapped = 0;
+       long mapped_ratio;
+       long distress;
+@@ -627,14 +628,39 @@ refill_inactive_zone(struct zone *zone, 
+        */
+       mapped_ratio = (ps->nr_mapped * 100) / total_memory;
++      si_swapinfo(&i);
++      if (likely(i.totalswap)) {
++              int app_centile, swap_centile;
++
++              /*
++               * app_centile is the percentage of physical ram used
++               * by application pages.
++               */
++              si_meminfo(&i);
++              app_centile = 100 - ((i.freeram + get_page_cache_size() -
++                      total_swapcache_pages) / (i.totalram / 100));
++
++              /*
++               * swap_centile is the percentage of the last (sizeof physical
++               * ram) of swap free.
++               */
++              swap_centile = i.freeswap / 
++                      (min(i.totalswap, i.totalram) / 100);
++
++              /*
++               * Autoregulate vm_swappiness to be equal to the lowest of
++               * app_centile and swap_centile. -ck
++               */
++              vm_swappiness = min(app_centile, swap_centile);
++      } else 
++              vm_swappiness = 0;
++
+       /*
+        * Now decide how much we really want to unmap some pages.  The mapped
+        * ratio is downgraded - just because there's a lot of mapped memory
+        * doesn't necessarily mean that page reclaim isn't succeeding.
+        *
+        * The distress ratio is important - we don't want to start going oom.
+-       *
+-       * A 100% value of vm_swappiness overrides this algorithm altogether.
+        */
+       swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
This page took 0.08669 seconds and 4 git commands to generate.