--- 2.6/mm/slab.c 2003-10-09 21:23:19.000000000 +0200 +++ build-2.6/mm/slab.c 2003-10-16 07:32:06.000000000 +0200 @@ -1891,6 +1891,15 @@ *dbg_redzone1(cachep, objp) = RED_ACTIVE; *dbg_redzone2(cachep, objp) = RED_ACTIVE; } + { + int objnr; + struct slab *slabp; + + slabp = GET_PAGE_SLAB(virt_to_page(objp)); + + objnr = (objp - slabp->s_mem) / cachep->objsize; + slab_bufctl(slabp)[objnr] = (int)caller; + } objp += obj_dbghead(cachep); if (cachep->ctor && cachep->flags & SLAB_POISON) { unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; @@ -1952,12 +1961,14 @@ objnr = (objp - slabp->s_mem) / cachep->objsize; check_slabp(cachep, slabp); #if DEBUG +#if 0 if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) { printk(KERN_ERR "slab: double free detected in cache '%s', objp %p.\n", cachep->name, objp); BUG(); } #endif +#endif slab_bufctl(slabp)[objnr] = slabp->free; slabp->free = objnr; STATS_DEC_ACTIVE(cachep); @@ -2694,6 +2705,22 @@ .show = s_show, }; +static void do_dump_slabp(kmem_cache_t *cachep) +{ + struct list_head *q; + + check_irq_on(); + spin_lock_irq(&cachep->spinlock); + list_for_each(q,&cachep->lists.slabs_full) { + struct slab *slabp; + int i; + slabp = list_entry(q, struct slab, list); + for (i=0;inum;i++) + printk(KERN_DEBUG "obj %p/%d: %p\n", slabp, i, (void*)(slab_bufctl(slabp)[i])); + } + spin_unlock_irq(&cachep->spinlock); +} + #define MAX_SLABINFO_WRITE 128 /** * slabinfo_write - Tuning for the slab allocator @@ -2734,6 +2761,7 @@ batchcount < 1 || batchcount > limit || shared < 0) { + do_dump_slabp(cachep); res = -EINVAL; } else { res = do_tune_cpucache(cachep, limit, batchcount, shared);