]>
Commit | Line | Data |
---|---|---|
d423733f AM |
1 | 2006-10-11 Jakub Jelinek <jakub@redhat.com> |
2 | ||
3 | * malloc/malloc.c (_int_malloc): Remove unused any_larger variable. | |
4 | ||
5 | 2006-08-31 Jakub Jelinek <jakub@redhat.com> | |
6 | ||
7 | * malloc/malloc.c (_int_malloc): Use full list insert and not | |
8 | shortcut which assumes the list is empty for large requests | |
9 | too. | |
10 | ||
11 | 2006-08-26 Ulrich Drepper <drepper@redhat.com> | |
12 | ||
13 | * malloc/malloc.c (_int_malloc): Fix test for large enough buffer | |
14 | for early termination. When no unsorted block matches perfectly | |
15 | and an exiting block has to be split, use full list insert and | |
16 | not shortcut which assumes the list is empty. | |
17 | ||
18 | 2006-08-19 Ulrich Drepper <drepper@redhat.com> | |
19 | ||
20 | * malloc/malloc.c (_int_malloc): Limit number of unsorted blocks | |
21 | to sort in each call. | |
22 | ||
23 | --- libc/malloc/malloc.c 9 Aug 2006 21:50:30 -0000 | |
24 | +++ libc/malloc/malloc.c 7 Sep 2006 16:06:02 -0000 | |
25 | @@ -4055,6 +4096,7 @@ _int_malloc(mstate av, size_t bytes) | |
26 | ||
27 | for(;;) { | |
28 | ||
29 | + int iters = 0; | |
30 | while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) { | |
31 | bck = victim->bk; | |
32 | if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0) | |
33 | @@ -4150,6 +4193,10 @@ _int_malloc(mstate av, size_t bytes) | |
34 | victim->fd = fwd; | |
35 | fwd->bk = victim; | |
36 | bck->fd = victim; | |
37 | + | |
38 | +#define MAX_ITERS 10000 | |
39 | + if (++iters >= MAX_ITERS) | |
40 | + break; | |
41 | } | |
42 | ||
43 | /* | |
44 | @@ -4182,8 +4231,14 @@ _int_malloc(mstate av, size_t bytes) | |
45 | /* Split */ | |
46 | else { | |
47 | remainder = chunk_at_offset(victim, nb); | |
48 | - unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; | |
49 | - remainder->bk = remainder->fd = unsorted_chunks(av); | |
50 | + /* We cannot assume the unsorted list is empty and therefore | |
51 | + have to perform a complete insert here. */ | |
52 | + bck = unsorted_chunks(av); | |
53 | + fwd = bck->fd; | |
54 | + remainder->bk = bck; | |
55 | + remainder->fd = fwd; | |
56 | + bck->fd = remainder; | |
57 | + fwd->bk = remainder; | |
58 | set_head(victim, nb | PREV_INUSE | | |
59 | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
60 | set_head(remainder, remainder_size | PREV_INUSE); | |
61 | @@ -4268,8 +4323,15 @@ _int_malloc(mstate av, size_t bytes) | |
62 | else { | |
63 | remainder = chunk_at_offset(victim, nb); | |
64 | ||
65 | - unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; | |
66 | - remainder->bk = remainder->fd = unsorted_chunks(av); | |
67 | + /* We cannot assume the unsorted list is empty and therefore | |
68 | + have to perform a complete insert here. */ | |
69 | + bck = unsorted_chunks(av); | |
70 | + fwd = bck->fd; | |
71 | + remainder->bk = bck; | |
72 | + remainder->fd = fwd; | |
73 | + bck->fd = remainder; | |
74 | + fwd->bk = remainder; | |
75 | + | |
76 | /* advertise as last remainder */ | |
77 | if (in_smallbin_range(nb)) | |
78 | av->last_remainder = remainder; |