1 --- linux-2.6.33/scripts/mod/modpost.c~ 2010-02-24 19:52:17.000000000 +0100
2 +++ linux-2.6.33/scripts/mod/modpost.c 2010-03-07 14:26:47.242168558 +0100
7 -#include "../../include/generated/autoconf.h"
8 +// PLD architectures don't use CONFIG_SYMBOL_PREFIX
9 +//#include "../../include/generated/autoconf.h"
10 #include "../../include/linux/license.h"
12 /* Some toolchains use a `_' prefix for all user symbols. */
14 --- linux-3.0/scripts/kconfig/lxdialog/check-lxdialog.sh~ 2011-07-22 04:17:23.000000000 +0200
15 +++ linux-3.0/scripts/kconfig/lxdialog/check-lxdialog.sh 2011-08-25 21:26:04.799150642 +0200
17 $cc -print-file-name=lib${lib}.${ext} | grep -q /
20 + for libt in tinfow tinfo ; do
21 + $cc -print-file-name=lib${libt}.${ext} | grep -q /
22 + if [ $? -eq 0 ]; then
30 From 7a29ac474a47eb8cf212b45917683ae89d6fa13b Mon Sep 17 00:00:00 2001
31 From: Chris Mason <clm@fb.com>
32 Date: Tue, 10 Nov 2015 10:10:34 +1100
33 Subject: xfs: give all workqueues rescuer threads
35 We're consistently hitting deadlocks here with XFS on recent kernels.
36 After some digging through the crash files, it looks like everyone in
37 the system is waiting for XFS to reclaim memory.
41 PID: 2733434 TASK: ffff8808cd242800 CPU: 19 COMMAND: "java"
42 #0 [ffff880019c53588] __schedule at ffffffff818c4df2
43 #1 [ffff880019c535d8] schedule at ffffffff818c5517
44 #2 [ffff880019c535f8] _xfs_log_force_lsn at ffffffff81316348
45 #3 [ffff880019c53688] xfs_log_force_lsn at ffffffff813164fb
46 #4 [ffff880019c536b8] xfs_iunpin_wait at ffffffff8130835e
47 #5 [ffff880019c53728] xfs_reclaim_inode at ffffffff812fd453
48 #6 [ffff880019c53778] xfs_reclaim_inodes_ag at ffffffff812fd8c7
49 #7 [ffff880019c53928] xfs_reclaim_inodes_nr at ffffffff812fe433
50 #8 [ffff880019c53958] xfs_fs_free_cached_objects at ffffffff8130d3b9
51 #9 [ffff880019c53968] super_cache_scan at ffffffff811a6f73
52 #10 [ffff880019c539c8] shrink_slab at ffffffff811460e6
53 #11 [ffff880019c53aa8] shrink_zone at ffffffff8114a53f
54 #12 [ffff880019c53b48] do_try_to_free_pages at ffffffff8114a8ba
55 #13 [ffff880019c53be8] try_to_free_pages at ffffffff8114ad5a
56 #14 [ffff880019c53c78] __alloc_pages_nodemask at ffffffff8113e1b8
57 #15 [ffff880019c53d88] alloc_kmem_pages_node at ffffffff8113e671
58 #16 [ffff880019c53dd8] copy_process at ffffffff8104f781
59 #17 [ffff880019c53ec8] do_fork at ffffffff8105129c
60 #18 [ffff880019c53f38] sys_clone at ffffffff810515b6
61 #19 [ffff880019c53f48] stub_clone at ffffffff818c8e4d
63 xfs_log_force_lsn is waiting for logs to get cleaned, which is waiting
64 for IO, which is waiting for workers to complete the IO which is waiting
65 for worker threads that don't exist yet:
67 PID: 2752451 TASK: ffff880bd6bdda00 CPU: 37 COMMAND: "kworker/37:1"
68 #0 [ffff8808d20abbb0] __schedule at ffffffff818c4df2
69 #1 [ffff8808d20abc00] schedule at ffffffff818c5517
70 #2 [ffff8808d20abc20] schedule_timeout at ffffffff818c7c6c
71 #3 [ffff8808d20abcc0] wait_for_completion_killable at ffffffff818c6495
72 #4 [ffff8808d20abd30] kthread_create_on_node at ffffffff8106ec82
73 #5 [ffff8808d20abdf0] create_worker at ffffffff8106752f
74 #6 [ffff8808d20abe40] worker_thread at ffffffff810699be
75 #7 [ffff8808d20abec0] kthread at ffffffff8106ef59
76 #8 [ffff8808d20abf50] ret_from_fork at ffffffff818c8ac8
78 I think we should be using WQ_MEM_RECLAIM to make sure this thread
79 pool makes progress when we're not able to allocate new workers.
81 [dchinner: make all workqueues WQ_MEM_RECLAIM]
83 Signed-off-by: Chris Mason <clm@fb.com>
84 Reviewed-by: Dave Chinner <dchinner@redhat.com>
85 Signed-off-by: Dave Chinner <david@fromorbit.com>
87 fs/xfs/xfs_super.c | 7 ++++---
88 1 file changed, 4 insertions(+), 3 deletions(-)
90 diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
91 index 29531ec..65fbfb7 100644
92 --- a/fs/xfs/xfs_super.c
93 +++ b/fs/xfs/xfs_super.c
94 @@ -838,17 +838,18 @@ xfs_init_mount_workqueues(
95 goto out_destroy_unwritten;
97 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
98 - WQ_FREEZABLE, 0, mp->m_fsname);
99 + WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
100 if (!mp->m_reclaim_workqueue)
101 goto out_destroy_cil;
103 mp->m_log_workqueue = alloc_workqueue("xfs-log/%s",
104 - WQ_FREEZABLE|WQ_HIGHPRI, 0, mp->m_fsname);
105 + WQ_MEM_RECLAIM|WQ_FREEZABLE|WQ_HIGHPRI, 0,
107 if (!mp->m_log_workqueue)
108 goto out_destroy_reclaim;
110 mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
111 - WQ_FREEZABLE, 0, mp->m_fsname);
112 + WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
113 if (!mp->m_eofblocks_workqueue)
114 goto out_destroy_log;
119 commit c2d42c16ad83006a706d83e51a7268db04af733a
120 Author: Andrew Morton <akpm@linux-foundation.org>
121 Date: Thu Nov 5 18:48:43 2015 -0800
123 mm/vmstat.c: uninline node_page_state()
125 With x86_64 (config http://ozlabs.org/~akpm/config-akpm2.txt) and old gcc
126 (4.4.4), drivers/base/node.c:node_read_meminfo() is using 2344 bytes of
127 stack. Uninlining node_page_state() reduces this to 440 bytes.
129 The stack consumption issue is fixed by newer gcc (4.8.4) however with
130 that compiler this patch reduces the node.o text size from 7314 bytes to
133 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
134 Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
136 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
137 index 82e7db7..49dfe40 100644
138 --- a/include/linux/vmstat.h
139 +++ b/include/linux/vmstat.h
140 @@ -161,30 +161,8 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
145 - * Determine the per node value of a stat item. This function
146 - * is called frequently in a NUMA machine, so try to be as
147 - * frugal as possible.
149 -static inline unsigned long node_page_state(int node,
150 - enum zone_stat_item item)
152 - struct zone *zones = NODE_DATA(node)->node_zones;
155 -#ifdef CONFIG_ZONE_DMA
156 - zone_page_state(&zones[ZONE_DMA], item) +
158 -#ifdef CONFIG_ZONE_DMA32
159 - zone_page_state(&zones[ZONE_DMA32], item) +
161 -#ifdef CONFIG_HIGHMEM
162 - zone_page_state(&zones[ZONE_HIGHMEM], item) +
164 - zone_page_state(&zones[ZONE_NORMAL], item) +
165 - zone_page_state(&zones[ZONE_MOVABLE], item);
168 +extern unsigned long node_page_state(int node, enum zone_stat_item item);
169 extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
172 diff --git a/mm/vmstat.c b/mm/vmstat.c
173 index fbf1448..ffcb4f5 100644
176 @@ -591,6 +591,28 @@ void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
178 __inc_zone_state(z, NUMA_OTHER);
182 + * Determine the per node value of a stat item.
184 +unsigned long node_page_state(int node, enum zone_stat_item item)
186 + struct zone *zones = NODE_DATA(node)->node_zones;
189 +#ifdef CONFIG_ZONE_DMA
190 + zone_page_state(&zones[ZONE_DMA], item) +
192 +#ifdef CONFIG_ZONE_DMA32
193 + zone_page_state(&zones[ZONE_DMA32], item) +
195 +#ifdef CONFIG_HIGHMEM
196 + zone_page_state(&zones[ZONE_HIGHMEM], item) +
198 + zone_page_state(&zones[ZONE_NORMAL], item) +
199 + zone_page_state(&zones[ZONE_MOVABLE], item);
204 #ifdef CONFIG_COMPACTION
205 commit 016c13daa5c9e4827eca703e2f0621c131f2cca3
206 Author: Mel Gorman <mgorman@techsingularity.net>
207 Date: Fri Nov 6 16:28:18 2015 -0800
209 mm, page_alloc: use masks and shifts when converting GFP flags to migrate types
211 This patch redefines which GFP bits are used for specifying mobility and
212 the order of the migrate types. Once redefined it's possible to convert
213 GFP flags to a migrate type with a simple mask and shift. The only
214 downside is that readers of OOM kill messages and allocation failures may
215 have been used to the existing values but scripts/gfp-translate will help.
217 Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
218 Acked-by: Vlastimil Babka <vbabka@suse.cz>
219 Cc: Christoph Lameter <cl@linux.com>
220 Cc: David Rientjes <rientjes@google.com>
221 Cc: Johannes Weiner <hannes@cmpxchg.org>
222 Cc: Michal Hocko <mhocko@suse.com>
223 Cc: Vitaly Wool <vitalywool@gmail.com>
224 Cc: Rik van Riel <riel@redhat.com>
225 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
226 Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
228 diff --git a/include/linux/gfp.h b/include/linux/gfp.h
229 index f92cbd2..440fca3 100644
230 --- a/include/linux/gfp.h
231 +++ b/include/linux/gfp.h
232 @@ -14,7 +14,7 @@ struct vm_area_struct;
233 #define ___GFP_HIGHMEM 0x02u
234 #define ___GFP_DMA32 0x04u
235 #define ___GFP_MOVABLE 0x08u
236 -#define ___GFP_WAIT 0x10u
237 +#define ___GFP_RECLAIMABLE 0x10u
238 #define ___GFP_HIGH 0x20u
239 #define ___GFP_IO 0x40u
240 #define ___GFP_FS 0x80u
241 @@ -29,7 +29,7 @@ struct vm_area_struct;
242 #define ___GFP_NOMEMALLOC 0x10000u
243 #define ___GFP_HARDWALL 0x20000u
244 #define ___GFP_THISNODE 0x40000u
245 -#define ___GFP_RECLAIMABLE 0x80000u
246 +#define ___GFP_WAIT 0x80000u
247 #define ___GFP_NOACCOUNT 0x100000u
248 #define ___GFP_NOTRACK 0x200000u
249 #define ___GFP_NO_KSWAPD 0x400000u
250 @@ -126,6 +126,7 @@ struct vm_area_struct;
252 /* This mask makes up all the page movable related flags */
253 #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
254 +#define GFP_MOVABLE_SHIFT 3
256 /* Control page allocator reclaim behavior */
257 #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
258 @@ -152,14 +153,15 @@ struct vm_area_struct;
259 /* Convert GFP flags to their corresponding migrate type */
260 static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
262 - WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
263 + VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
264 + BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE);
265 + BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE);
267 if (unlikely(page_group_by_mobility_disabled))
268 return MIGRATE_UNMOVABLE;
270 /* Group based on mobility */
271 - return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
272 - ((gfp_flags & __GFP_RECLAIMABLE) != 0);
273 + return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
276 #ifdef CONFIG_HIGHMEM
277 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
278 index e326843..38bed71 100644
279 --- a/include/linux/mmzone.h
280 +++ b/include/linux/mmzone.h
285 - MIGRATE_RECLAIMABLE,
287 + MIGRATE_RECLAIMABLE,
288 MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
289 MIGRATE_RESERVE = MIGRATE_PCPTYPES,
291 commit 974a786e63c96a2401a78ddba926f34c128474f1
292 Author: Mel Gorman <mgorman@techsingularity.net>
293 Date: Fri Nov 6 16:28:34 2015 -0800
295 mm, page_alloc: remove MIGRATE_RESERVE
297 MIGRATE_RESERVE preserves an old property of the buddy allocator that
298 existed prior to fragmentation avoidance -- min_free_kbytes worth of pages
299 tended to remain contiguous until the only alternative was to fail the
300 allocation. At the time it was discovered that high-order atomic
301 allocations relied on this property so MIGRATE_RESERVE was introduced. A
302 later patch will introduce an alternative MIGRATE_HIGHATOMIC so this patch
303 deletes MIGRATE_RESERVE and supporting code so it'll be easier to review.
304 Note that this patch in isolation may look like a false regression if
305 someone was bisecting high-order atomic allocation failures.
307 Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
308 Acked-by: Vlastimil Babka <vbabka@suse.cz>
309 Cc: Christoph Lameter <cl@linux.com>
310 Cc: David Rientjes <rientjes@google.com>
311 Cc: Johannes Weiner <hannes@cmpxchg.org>
312 Cc: Michal Hocko <mhocko@suse.com>
313 Cc: Vitaly Wool <vitalywool@gmail.com>
314 Cc: Rik van Riel <riel@redhat.com>
315 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
316 Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
318 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
319 index 1e88aae..b86cfa3 100644
320 --- a/include/linux/mmzone.h
321 +++ b/include/linux/mmzone.h
322 @@ -39,8 +39,6 @@ enum {
326 - MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
327 - MIGRATE_RESERVE = MIGRATE_PCPTYPES,
330 * MIGRATE_CMA migration type is designed to mimic the way
331 @@ -63,6 +61,8 @@ enum {
335 +#define MIGRATE_PCPTYPES (MIGRATE_RECLAIMABLE+1)
338 # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
340 @@ -429,12 +429,6 @@ struct zone {
345 - * Number of MIGRATE_RESERVE page block. To maintain for just
346 - * optimization. Protected by zone->lock.
348 - int nr_migrate_reserve_block;
350 #ifdef CONFIG_MEMORY_ISOLATION
352 * Number of isolated pageblock. It is used to solve incorrect
353 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
354 index 9812d46..dabd247 100644
355 --- a/mm/huge_memory.c
356 +++ b/mm/huge_memory.c
357 @@ -116,7 +116,7 @@ static void set_recommended_min_free_kbytes(void)
358 for_each_populated_zone(zone)
361 - /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
362 + /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
363 recommended_min = pageblock_nr_pages * nr_zones * 2;
366 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
367 index 8dc6e3c..5888126 100644
368 --- a/mm/page_alloc.c
369 +++ b/mm/page_alloc.c
370 @@ -817,7 +817,6 @@ static void free_pcppages_bulk(struct zone *zone, int count,
371 if (unlikely(has_isolate_pageblock(zone)))
372 mt = get_pageblock_migratetype(page);
374 - /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
375 __free_one_page(page, page_to_pfn(page), zone, 0, mt);
376 trace_mm_page_pcpu_drain(page, 0, mt);
377 } while (--to_free && --batch_free && !list_empty(list));
378 @@ -1417,15 +1416,14 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
379 * the free lists for the desirable migrate type are depleted
381 static int fallbacks[MIGRATE_TYPES][4] = {
382 - [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
383 - [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
384 - [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
385 + [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
386 + [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
387 + [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
389 - [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
390 + [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */
392 - [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
393 #ifdef CONFIG_MEMORY_ISOLATION
394 - [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
395 + [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */
399 @@ -1598,7 +1596,7 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
402 fallback_mt = fallbacks[migratetype][i];
403 - if (fallback_mt == MIGRATE_RESERVE)
404 + if (fallback_mt == MIGRATE_TYPES)
407 if (list_empty(&area->free_list[fallback_mt]))
408 @@ -1676,25 +1674,13 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
413 page = __rmqueue_smallest(zone, order, migratetype);
415 - if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
416 + if (unlikely(!page)) {
417 if (migratetype == MIGRATE_MOVABLE)
418 page = __rmqueue_cma_fallback(zone, order);
421 page = __rmqueue_fallback(zone, order, migratetype);
424 - * Use MIGRATE_RESERVE rather than fail an allocation. goto
425 - * is used because __rmqueue_smallest is an inline function
426 - * and we want just one call site
429 - migratetype = MIGRATE_RESERVE;
430 - goto retry_reserve;
434 trace_mm_page_alloc_zone_locked(page, order, migratetype);
435 @@ -3492,7 +3478,6 @@ static void show_migration_types(unsigned char type)
436 [MIGRATE_UNMOVABLE] = 'U',
437 [MIGRATE_RECLAIMABLE] = 'E',
438 [MIGRATE_MOVABLE] = 'M',
439 - [MIGRATE_RESERVE] = 'R',
443 @@ -4303,120 +4288,6 @@ static inline unsigned long wait_table_bits(unsigned long size)
447 - * Check if a pageblock contains reserved pages
449 -static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
453 - for (pfn = start_pfn; pfn < end_pfn; pfn++) {
454 - if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
461 - * Mark a number of pageblocks as MIGRATE_RESERVE. The number
462 - * of blocks reserved is based on min_wmark_pages(zone). The memory within
463 - * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
464 - * higher will lead to a bigger reserve which will get freed as contiguous
465 - * blocks as reclaim kicks in
467 -static void setup_zone_migrate_reserve(struct zone *zone)
469 - unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
471 - unsigned long block_migratetype;
476 - * Get the start pfn, end pfn and the number of blocks to reserve
477 - * We have to be careful to be aligned to pageblock_nr_pages to
478 - * make sure that we always check pfn_valid for the first page in
481 - start_pfn = zone->zone_start_pfn;
482 - end_pfn = zone_end_pfn(zone);
483 - start_pfn = roundup(start_pfn, pageblock_nr_pages);
484 - reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
488 - * Reserve blocks are generally in place to help high-order atomic
489 - * allocations that are short-lived. A min_free_kbytes value that
490 - * would result in more than 2 reserve blocks for atomic allocations
491 - * is assumed to be in place to help anti-fragmentation for the
492 - * future allocation of hugepages at runtime.
494 - reserve = min(2, reserve);
495 - old_reserve = zone->nr_migrate_reserve_block;
497 - /* When memory hot-add, we almost always need to do nothing */
498 - if (reserve == old_reserve)
500 - zone->nr_migrate_reserve_block = reserve;
502 - for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
503 - if (!early_page_nid_uninitialised(pfn, zone_to_nid(zone)))
506 - if (!pfn_valid(pfn))
508 - page = pfn_to_page(pfn);
510 - /* Watch out for overlapping nodes */
511 - if (page_to_nid(page) != zone_to_nid(zone))
514 - block_migratetype = get_pageblock_migratetype(page);
516 - /* Only test what is necessary when the reserves are not met */
519 - * Blocks with reserved pages will never free, skip
522 - block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
523 - if (pageblock_is_reserved(pfn, block_end_pfn))
526 - /* If this block is reserved, account for it */
527 - if (block_migratetype == MIGRATE_RESERVE) {
532 - /* Suitable for reserving if this block is movable */
533 - if (block_migratetype == MIGRATE_MOVABLE) {
534 - set_pageblock_migratetype(page,
536 - move_freepages_block(zone, page,
541 - } else if (!old_reserve) {
543 - * At boot time we don't need to scan the whole zone
544 - * for turning off MIGRATE_RESERVE.
550 - * If the reserve is met and this is a previous reserved block,
553 - if (block_migratetype == MIGRATE_RESERVE) {
554 - set_pageblock_migratetype(page, MIGRATE_MOVABLE);
555 - move_freepages_block(zone, page, MIGRATE_MOVABLE);
561 * Initially all pages are reserved - free ones are freed
562 * up by free_all_bootmem() once the early boot process is
563 * done. Non-atomic initialization, single-pass.
564 @@ -4455,9 +4326,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
565 * movable at startup. This will force kernel allocations
566 * to reserve their blocks rather than leaking throughout
567 * the address space during boot when many long-lived
568 - * kernel allocations are made. Later some blocks near
569 - * the start are marked MIGRATE_RESERVE by
570 - * setup_zone_migrate_reserve()
571 + * kernel allocations are made.
573 * bitmap is created for zone's valid pfn range. but memmap
574 * can be created for invalid pages (for alignment)
575 @@ -6018,7 +5887,6 @@ static void __setup_per_zone_wmarks(void)
576 high_wmark_pages(zone) - low_wmark_pages(zone) -
577 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
579 - setup_zone_migrate_reserve(zone);
580 spin_unlock_irqrestore(&zone->lock, flags);
583 diff --git a/mm/vmstat.c b/mm/vmstat.c
584 index ffcb4f5..5b289dc 100644
587 @@ -923,7 +923,6 @@ static char * const migratetype_names[MIGRATE_TYPES] = {
595 diff --git a/mm/backing-dev.c b/mm/backing-dev.c
596 index 8ed2ffd963c5..7340353f8aea 100644
597 --- a/mm/backing-dev.c
598 +++ b/mm/backing-dev.c
599 @@ -957,8 +957,9 @@ EXPORT_SYMBOL(congestion_wait);
600 * jiffies for either a BDI to exit congestion of the given @sync queue
601 * or a write to complete.
603 - * In the absence of zone congestion, cond_resched() is called to yield
604 - * the processor if necessary but otherwise does not sleep.
605 + * In the absence of zone congestion, a short sleep or a cond_resched is
606 + * performed to yield the processor and to allow other subsystems to make
607 + * a forward progress.
609 * The return value is 0 if the sleep is for the full timeout. Otherwise,
610 * it is the number of jiffies that were still remaining when the function
611 @@ -978,7 +979,19 @@ long wait_iff_congested(struct zone *zone, int sync, long timeout)
613 if (atomic_read(&nr_wb_congested[sync]) == 0 ||
614 !test_bit(ZONE_CONGESTED, &zone->flags)) {
618 + * Memory allocation/reclaim might be called from a WQ
619 + * context and the current implementation of the WQ
620 + * concurrency control doesn't recognize that a particular
621 + * WQ is congested if the worker thread is looping without
622 + * ever sleeping. Therefore we have to do a short sleep
623 + * here rather than calling cond_resched().
625 + if (current->flags & PF_WQ_WORKER)
626 + schedule_timeout(1);
630 /* In case we scheduled, work out time remaining */
631 ret = timeout - (jiffies - start);
632 diff --git a/mm/vmstat.c b/mm/vmstat.c
633 index 45dcbcb5c594..0975da8e3432 100644
636 @@ -1381,6 +1381,7 @@ static const struct file_operations proc_vmstat_file_operations = {
637 #endif /* CONFIG_PROC_FS */
640 +static struct workqueue_struct *vmstat_wq;
641 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
642 int sysctl_stat_interval __read_mostly = HZ;
643 static cpumask_var_t cpu_stat_off;
644 @@ -1393,7 +1394,7 @@ static void vmstat_update(struct work_struct *w)
645 * to occur in the future. Keep on running the
646 * update worker thread.
648 - schedule_delayed_work_on(smp_processor_id(),
649 + queue_delayed_work_on(smp_processor_id(), vmstat_wq,
650 this_cpu_ptr(&vmstat_work),
651 round_jiffies_relative(sysctl_stat_interval));
653 @@ -1462,7 +1463,7 @@ static void vmstat_shepherd(struct work_struct *w)
654 if (need_update(cpu) &&
655 cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
657 - schedule_delayed_work_on(cpu,
658 + queue_delayed_work_on(cpu, vmstat_wq,
659 &per_cpu(vmstat_work, cpu), 0);
662 @@ -1551,6 +1552,7 @@ static int __init setup_vmstat(void)
664 start_shepherd_timer();
665 cpu_notifier_register_done();
666 + vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
668 #ifdef CONFIG_PROC_FS
669 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
673 From 09ccfd238e5a0e670d8178cf50180ea81ae09ae1 Mon Sep 17 00:00:00 2001
674 From: WANG Cong <xiyou.wangcong@gmail.com>
675 Date: Mon, 14 Dec 2015 13:48:36 -0800
676 Subject: pptp: verify sockaddr_len in pptp_bind() and pptp_connect()
678 Reported-by: Dmitry Vyukov <dvyukov@gmail.com>
679 Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
680 Signed-off-by: David S. Miller <davem@davemloft.net>
682 drivers/net/ppp/pptp.c | 6 ++++++
683 1 file changed, 6 insertions(+)
685 diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
686 index fc69e41..597c53e 100644
687 --- a/drivers/net/ppp/pptp.c
688 +++ b/drivers/net/ppp/pptp.c
689 @@ -419,6 +419,9 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
690 struct pptp_opt *opt = &po->proto.pptp;
693 + if (sockaddr_len < sizeof(struct sockaddr_pppox))
698 opt->src_addr = sp->sa_addr.pptp;
699 @@ -440,6 +443,9 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
703 + if (sockaddr_len < sizeof(struct sockaddr_pppox))
706 if (sp->sa_protocol != PX_PROTO_PPTP)