]> git.pld-linux.org Git - packages/kernel.git/blob - 2.6.0-t7-memleak-lkml.patch
- obsolete
[packages/kernel.git] / 2.6.0-t7-memleak-lkml.patch
1 --- 2.6/mm/slab.c       2003-10-09 21:23:19.000000000 +0200
2 +++ build-2.6/mm/slab.c 2003-10-16 07:32:06.000000000 +0200
3 @@ -1891,6 +1891,15 @@
4                 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
5                 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
6         }
7 +       {
8 +               int objnr;
9 +               struct slab *slabp;
10 +
11 +               slabp = GET_PAGE_SLAB(virt_to_page(objp));
12 +
13 +               objnr = (objp - slabp->s_mem) / cachep->objsize;
14 +               slab_bufctl(slabp)[objnr] = (int)caller;
15 +       }
16         objp += obj_dbghead(cachep);
17         if (cachep->ctor && cachep->flags & SLAB_POISON) {
18                 unsigned long   ctor_flags = SLAB_CTOR_CONSTRUCTOR;
19 @@ -1952,12 +1961,14 @@
20                 objnr = (objp - slabp->s_mem) / cachep->objsize;
21                 check_slabp(cachep, slabp);
22  #if DEBUG
23 +#if 0
24                 if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
25                         printk(KERN_ERR "slab: double free detected in cache '%s', objp %p.\n",
26                                                 cachep->name, objp);
27                         BUG();
28                 }
29  #endif
30 +#endif
31                 slab_bufctl(slabp)[objnr] = slabp->free;
32                 slabp->free = objnr;
33                 STATS_DEC_ACTIVE(cachep);
34 @@ -2694,6 +2705,22 @@
35         .show   = s_show,
36  };
37  
38 +static void do_dump_slabp(kmem_cache_t *cachep)
39 +{
40 +       struct list_head *q;
41 +
42 +       check_irq_on();
43 +       spin_lock_irq(&cachep->spinlock);
44 +       list_for_each(q,&cachep->lists.slabs_full) {
45 +               struct slab *slabp;
46 +               int i;
47 +               slabp = list_entry(q, struct slab, list);
48 +               for (i=0;i<cachep->num;i++)
49 +                       printk(KERN_DEBUG "obj %p/%d: %p\n", slabp, i, (void*)(slab_bufctl(slabp)[i]));
50 +       }
51 +       spin_unlock_irq(&cachep->spinlock);
52 +}
53 +
54  #define MAX_SLABINFO_WRITE 128
55  /**
56   * slabinfo_write - Tuning for the slab allocator
57 @@ -2734,6 +2761,7 @@
58                             batchcount < 1 ||
59                             batchcount > limit ||
60                             shared < 0) {
61 +                               do_dump_slabp(cachep);
62                                 res = -EINVAL;
63                         } else {
64                                 res = do_tune_cpucache(cachep, limit, batchcount, shared);
This page took 0.025863 seconds and 3 git commands to generate.