1 diff -urN xc.org/extras/Mesa/src/glapi.c xc/extras/Mesa/src/glapi.c
2 --- xc.org/extras/Mesa/src/glapi.c 2004-06-07 22:45:05.571381120 +0200
3 +++ xc/extras/Mesa/src/glapi.c 2004-06-07 23:11:34.201872576 +0200
5 0xe8, 0x00, 0x00, 0x00, 0x00,
6 0xff, 0xa0, 0x00, 0x00, 0x00, 0x00
8 - unsigned char *code = (unsigned char *) malloc(sizeof(insn_template));
9 + unsigned char *code = EXEC_MALLOC(sizeof(insn_template), 16);
10 unsigned int next_insn;
12 memcpy(code, insn_template, sizeof(insn_template));
17 - unsigned int *code = (unsigned int *) malloc(sizeof(insn_template));
18 + unsigned int *code = EXEC_MALLOC(sizeof(insn_template), 16);
19 unsigned long glapi_addr = (unsigned long) &_glapi_Dispatch;
21 memcpy(code, insn_template, sizeof(insn_template));
22 diff -urN xc.org/extras/Mesa/src/imports.c xc/extras/Mesa/src/imports.c
23 --- xc.org/extras/Mesa/src/imports.c 2004-06-07 22:45:05.943324576 +0200
24 +++ xc/extras/Mesa/src/imports.c 2004-06-07 23:08:05.289632064 +0200
26 extern int vsnprintf(char *str, size_t count, const char *fmt, va_list arg);
29 +/* Define a struct for our private data. This is preferred over pointer
30 + * arithmetic to access individual pieces of our private data because the
31 + * compiler will help us get alignment correct in a portable way and it
32 + * makes it much easier to add or remove items from our private data */
34 +typedef struct align_malloc_header {
35 + void *alloc_ptr; /* actual allocation ptr */
36 + size_t alloc_size; /* actual allocation size */
37 + void *user_ptr; /* ptr returned to caller */
38 + size_t user_size; /* size caller requested */
39 +} align_malloc_header;
41 +static unsigned long RoundUpPowerOf2(unsigned long val);
43 /**********************************************************************/
44 /* Wrappers for standard C library functions */
50 + * Execute permission implementation notes:
51 + * John Dennis - jdennis@redhat.com - Red Hat Inc.
55 + * Various parts of Mesa generate machine code during run time and
56 + * then executes that code. We will use the term code gen to refer to
57 + * this process. Some operating systems in an attempt to achieve
58 + * better security enforce restrictions on which memory areas may
59 + * contain executable code. In general execute permission is granted
60 + * to .text sections and removed on stack or heap memory. It's the
61 + * heap (and possibly the stack) where code is run time
62 + * generated. This means on systems that enforce execute memory
63 + * security you will get either a SEGV or SIGBUS exception when run
64 + * time generated code executes and the process will be terminated.
68 + * The solution is to provide unique malloc/free functions which
69 + * return memory with execute permission and to make sure these
70 + * allocation functions are called for code gen.
72 + * There are 3 possible implementation solutions.
74 + * Solution A: use mprotect on malloc block.
76 + * In this scenario after a block is allocated via malloc we call
77 + * mprotect on the pages containing the block and add execute
78 + * permission. In theory a free of the block removes the execute
81 + * Pros: Simple to implement
83 + * Cons: Because execute permission is granted memory pages when
84 + * mprotect is called on the page containing the malloc block
85 + * every other malloc block in that page also receives execute
86 + * permission, this is insecure.
88 + * When a malloc block is freed that had been allocated for
89 + * execute permission we should remove the execute permission
90 + * from that block so that when the heap manager resuses that
91 + * memory it will not be executable. But Because exectue
92 + * permission is granted to memory pages and a page may have
93 + * more than one malloc block with execute permission we
94 + * cannot remove execute permission because that would remove
95 + * execute permission on any executable malloc blocks still in
96 + * that page. By not removing the execution permission on free
97 + * we will tend to "leak" executable memory as more and more
98 + * heap pages accumulate execute permission, possible without
101 + * Solution B: use mmap to allocate block
103 + * In this scenario every call to alloc an executable block is
104 + * performed with anonymous mmap. Mmap always allocates pages of
105 + * memory. When free is called we unmap the pages.
107 + * Pros: This is much more secure. The kernel places the allocation
108 + * in special pages that have additional protection. These
109 + * pages are not near any other pages.
111 + * The pages used do not contain any heap allocation that is
112 + * not susposed to be executable, therefore we are not
113 + * inadvertantly granting execute permission to a malloc block
114 + * that happens to live in the same page as a execute malloc
117 + * The allocation can be freed without affecting anyother
118 + * allocation and it will be reused by the kernel.
120 + * Its simple to implement. As simple as solution A.
122 + * Cons: Mmap only allocates in units of pages. Thus even a small
123 + * allocation will use an entire page. However note, only a
124 + * small number exec malloc's are done so the wasted memory
125 + * is not likely to be an issue.
127 + * Because every code generated function will live alone in
128 + * its own page this will probably introduce more cache misses
129 + * and page faults than if the all the code coalesced together
130 + * into one or more pages as would be the case with regular
133 + * Solution C: use separate malloc implementation using mmap'ed heap arena
135 + * In this scenario a new heap manager is introduced which manages a
136 + * heap arena usning anonymous mmap with execute permission. All
137 + * executable allocations are provided using only this heap arena.
139 + * Pros: This is the ideal solution. As in Solution B executable and
140 + * non-executable allocations are never mixed. Executable
141 + * allocations are provided using the most secure pages the
144 + * Pages will likely contain multiple allocations as opposed
145 + * to Solution B where pages will be sparsely used. This
146 + * improves cache and page fault behavior.
148 + * Cons: This is the most involved implementation and requires the
149 + * introduction of a heap manger implementation that has been
150 + * modified to work with anonymous mmap. However, note that
151 + * the GNU malloc implementation has been modified to work
152 + * with anonymous mmap.
156 +#define EXEC_ALLOC_USE_MMAP
158 +#define EXEC_ALLOC_USE_MALLOC
161 +/* If input is power of 2 return that, else round up to next power of 2 */
162 +static unsigned long RoundUpPowerOf2(unsigned long val)
166 + if (val == 0) return(1UL);
167 + if (val > (1UL << (sizeof(unsigned long) * 8 - 1))) {
168 + /* out of range, should be fatal error?, for now return max power of 2 */
169 + return (1UL << (sizeof(unsigned long) * 8 - 1));
172 + for (i = setBits = 0; val && i < sizeof(unsigned long) * 8; i++, val >>= 1) {
173 + if (val & 1UL) setBits++;
176 + return (1UL << i); /* input was not power of 2 */
178 + return (1UL << (i-1)); /* input was power of 2 */
182 + * Allocate N-byte aligned memory in executable region (uninitialized)
185 +#ifdef EXEC_ALLOC_USE_MALLOC
187 +_mesa_exec_malloc(size_t user_size, unsigned long user_align)
189 + unsigned long alloc_ptr, user_ptr, alloc_size, alloc_align;
190 + align_malloc_header *pHeader;
192 + ASSERT( user_align > 0 );
194 + /* We store the pointer to the acutal address and size in a private
195 + * header before the address the client sees. We need the actual
196 + * pointer to free with and we need the size to remove execute permission
199 + if (user_align < sizeof(align_malloc_header))
200 + alloc_align = RoundUpPowerOf2(sizeof(align_malloc_header));
202 + alloc_align = user_align;
203 + alloc_size = user_size + alloc_align;
205 + alloc_ptr = (unsigned long) MALLOC(alloc_size);
207 + if (!alloc_ptr) return(NULL);
209 + user_ptr = (alloc_ptr + alloc_align) & ~(unsigned long)(alloc_align - 1);
210 + pHeader = (align_malloc_header *) (user_ptr - sizeof(align_malloc_header));
211 + pHeader->alloc_ptr = (void *) alloc_ptr;
212 + pHeader->alloc_size = alloc_size;
213 + pHeader->user_ptr = (void *) user_ptr;
214 + pHeader->user_size = user_size;
217 + unsigned page_size, round;
219 + page_size = getpagesize();
220 + round = user_ptr & (page_size-1);
221 + mprotect((void *)(user_ptr - round), (user_size + round + page_size-1) & ~(page_size-1),
222 + PROT_READ | PROT_WRITE | PROT_EXEC);
227 + unsigned char *p = (unsigned char *) alloc_ptr;
228 + unsigned char *stop = (unsigned char *) pHeader;
230 + /* mark the non-aligned area */
231 + for(; p < stop; p++) {
237 + return (void *)user_ptr;
241 + * Free N-byte executable aligned memory
244 +_mesa_exec_free(void *user_ptr)
246 + /* The header giving the real address and size is just prior to the address the client sees. */
247 + align_malloc_header *pHeader;
251 + pHeader = (align_malloc_header *)((char *)user_ptr - sizeof(align_malloc_header));
252 + alloc_ptr = pHeader->alloc_ptr;
253 + user_size = pHeader->user_size;
257 + * Unfortunately we cannot remove the execute permission on this
258 + * malloc block because execute permission is granted on a page
259 + * basis. If the page containing this malloc block also contained
260 + * another malloc block with execute permission that was still in
261 + * effect then we will remove execute permission on a malloc block
262 + * that should still be enforce. This does mean we will tend to
263 + * "leak" execute permission in the heap. See above block comment
264 + * on implementation issues.
266 + * Note, we could keep a ref count on each page and when the ref count
267 + * fell to zero we could remove the execute permission.
269 + * If we did remove the execute permission this is how it would be done.
272 + unsigned page_size, round;
274 + page_size = getpagesize();
275 + round = (unsigned long)user_ptr & (page_size-1);
276 + mprotect((char *)user_ptr - round, (user_size + round + page_size-1) & ~(page_size-1),
277 + PROT_READ | PROT_WRITE);
283 +#elif defined(EXEC_ALLOC_USE_MMAP)
286 +_mesa_exec_malloc(size_t user_size, unsigned long user_align)
288 + unsigned long alloc_ptr, user_ptr, alloc_size, alloc_align;
289 + align_malloc_header *pHeader;
291 + ASSERT( user_align > 0 );
293 + /* We store the pointer to the acutal address and size in a private
294 + * header before the address the client sees. We need the actual
295 + * pointer to free with and we need the size to unmap the region */
297 + if (user_align < sizeof(align_malloc_header))
298 + alloc_align = RoundUpPowerOf2(sizeof(align_malloc_header));
300 + alloc_align = user_align;
301 + alloc_size = user_size + alloc_align;
303 + /* Note, I'm not sure how portable MAP_ANONYMOUS with fd=0 is, on some POSIX
304 + * systems you may need to remove the MAP_ANONYMOUS flag and pass the
305 + * result of posix_typed_mem_open with POSIX_TYPED_MEM_ALLOCATE as the fd. */
307 + alloc_ptr = (unsigned long) mmap(0, alloc_size,
308 + PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
309 + if ((void *)alloc_ptr == MAP_FAILED) {
313 + user_ptr = (alloc_ptr + alloc_align) & ~(unsigned long)(alloc_align - 1);
314 + pHeader = (align_malloc_header *) (user_ptr - sizeof(align_malloc_header));
315 + pHeader->alloc_ptr = (void *) alloc_ptr;
316 + pHeader->alloc_size = alloc_size;
317 + pHeader->user_ptr = (void *) user_ptr;
318 + pHeader->user_size = user_size;
322 + unsigned char *p = (unsigned char *) alloc_ptr;
323 + unsigned char *stop = (unsigned char *) pHeader;
325 + /* mark the non-aligned area */
326 + for(; p < stop; p++) {
332 + return (void *)user_ptr;
336 + * Free N-byte executable aligned memory
339 +_mesa_exec_free(void *user_ptr)
341 + /* The header giving the real address and size is just prior to the address the client sees. */
342 + align_malloc_header *pHeader;
346 + pHeader = (align_malloc_header *)((char *)user_ptr - sizeof(align_malloc_header));
347 + alloc_ptr = pHeader->alloc_ptr;
348 + alloc_size = pHeader->alloc_size;
350 + munmap(alloc_ptr, alloc_size);
355 _mesa_memcpy(void *dest, const void *src, size_t n)
356 diff -urN xc.org/extras/Mesa/src/imports.h xc/extras/Mesa/src/imports.h
357 --- xc.org/extras/Mesa/src/imports.h 2004-06-07 22:45:05.944324424 +0200
358 +++ xc/extras/Mesa/src/imports.h 2004-06-07 23:04:42.561451432 +0200
360 #define ALIGN_MALLOC_STRUCT(T, N) (struct T *) _mesa_align_malloc(sizeof(struct T), N)
361 #define ALIGN_CALLOC_STRUCT(T, N) (struct T *) _mesa_align_calloc(sizeof(struct T), N)
362 #define ALIGN_FREE(PTR) _mesa_align_free(PTR)
363 +/* These allocate aligned memory in a area with execute permission, used for code generation. */
364 +#define EXEC_MALLOC(BYTES, N) (void *) _mesa_exec_malloc(BYTES, N)
365 +#define EXEC_FREE(PTR) _mesa_exec_free(PTR)
367 #define MEMCPY( DST, SRC, BYTES) _mesa_memcpy(DST, SRC, BYTES)
368 #define MEMSET( DST, VAL, N ) _mesa_memset(DST, VAL, N)
370 _mesa_align_free( void *ptr );
373 +_mesa_exec_malloc(size_t bytes, unsigned long alignment);
375 +_mesa_exec_free(void *ptr);
378 _mesa_memcpy( void *dest, const void *src, size_t n );
381 diff -urN xc.org/extras/Mesa/src/mtypes.h xc/extras/Mesa/src/mtypes.h
382 --- xc.org/extras/Mesa/src/mtypes.h 2004-06-07 22:45:05.956322600 +0200
383 +++ xc/extras/Mesa/src/mtypes.h 2004-06-07 23:05:35.023475992 +0200
390 +#include <sys/mman.h>
391 #include "glheader.h"
392 #include "config.h" /* Hardwired parameters */
393 #include "glapitable.h"
394 diff -urN xc.org/extras/Mesa/src/tnl/t_vtx_exec.c xc/extras/Mesa/src/tnl/t_vtx_exec.c
395 --- xc.org/extras/Mesa/src/tnl/t_vtx_exec.c 2004-06-07 22:45:06.597225168 +0200
396 +++ xc/extras/Mesa/src/tnl/t_vtx_exec.c 2004-06-07 23:17:12.494444288 +0200
398 struct dynfn *f, *tmp;
399 foreach_s (f, tmp, l) {
400 remove_from_list( f );
401 - ALIGN_FREE( f->code );
402 + EXEC_FREE( f->code );
406 diff -urN xc.org/extras/Mesa/src/tnl/t_vtx_x86.c xc/extras/Mesa/src/tnl/t_vtx_x86.c
407 --- xc.org/extras/Mesa/src/tnl/t_vtx_x86.c 2004-06-07 22:45:06.608223496 +0200
408 +++ xc/extras/Mesa/src/tnl/t_vtx_x86.c 2004-06-07 23:16:32.268559552 +0200
410 0xff, 0x25, 0, 0, 0, 0 /* jmp NOTIFY */
413 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
414 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
415 memcpy (dfn->code, temp, sizeof(temp));
416 FIXUP(dfn->code, 3, 0x0, (int)&tnl->vertex[2]);
417 FIXUP(dfn->code, 9, 0x0, (int)&tnl->dmaptr);
419 0xff, 0x25, 0,0,0,0 /* jmp *NOTIFY */
422 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
423 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
424 memcpy (dfn->code, temp, sizeof(temp));
425 FIXUP(dfn->code, 2, 0x0, (int)&tnl->dmaptr);
426 FIXUP(dfn->code, 25, 0x0, (int)&tnl->vertex[3]);
428 0xff, 0x25, 0,0,0,0, /* jmp *NOTIFY */
431 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
432 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
433 memcpy (dfn->code, temp, sizeof(temp));
434 FIXUP(dfn->code, 3, 0x0, (int)&tnl->dmaptr);
435 FIXUP(dfn->code, 28, 0x0, (int)&tnl->vertex[3]);
437 0xff, 0x25, 0, 0, 0, 0 /* jmp NOTIFY */
440 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
441 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
442 memcpy (dfn->code, temp, sizeof(temp));
443 FIXUP(dfn->code, 3, 0x0, (int)&tnl->vertex[3]);
444 FIXUP(dfn->code, 9, 0x0, (int)&tnl->dmaptr);
446 0xff, 0x25, 0x08, 0, 0, 0, /* jmp *0x8 */
449 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
450 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
451 memcpy (dfn->code, temp, sizeof(temp));
452 FIXUP(dfn->code, 1, 0x00000000, (int)&tnl->dmaptr);
453 FIXUP(dfn->code, 27, 0x0000001c, (int)&tnl->vertex[3]);
455 0xff, 0x25, 0x08, 0, 0, 0, /* jmp *0x8 */
458 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
459 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
460 memcpy (dfn->code, temp, sizeof(temp));
461 FIXUP(dfn->code, 1, 0x00000000, (int)&tnl->dmaptr);
462 FIXUP(dfn->code, 27, 0x0000001c, (int)&tnl->vertex[3]);
464 0xff, 0x25, 0, 0, 0, 0 /* jmp NOTIFY */
467 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
468 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
469 memcpy (dfn->code, temp, sizeof(temp));
470 FIXUP(dfn->code, 8, 0x01010101, (int)&tnl->dmaptr);
471 FIXUP(dfn->code, 32, 0x00000006, tnl->vertex_size-3);
474 insert_at_head( &tnl->dfn_cache.Normal3fv, dfn );
476 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
477 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
478 memcpy (dfn->code, temp, sizeof(temp));
479 FIXUP(dfn->code, 5, 0x0, (int)tnl->normalptr);
483 insert_at_head( &tnl->dfn_cache.Normal3f, dfn );
485 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
486 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
487 memcpy (dfn->code, temp, sizeof(temp));
488 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->normalptr);
492 insert_at_head( &tnl->dfn_cache.Normal3fv, dfn );
494 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
495 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
496 memcpy (dfn->code, temp, sizeof(temp));
497 FIXUP(dfn->code, 5, 0x0, (int)tnl->normalptr);
501 insert_at_head( &tnl->dfn_cache.Normal3f, dfn );
503 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
504 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
505 memcpy (dfn->code, temp, sizeof(temp));
506 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->normalptr);
512 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
513 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
514 memcpy (dfn->code, temp, sizeof(temp));
515 FIXUP(dfn->code, 5, 0x12345678, (int)tnl->ubytecolorptr);
521 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
522 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
523 memcpy (dfn->code, temp, sizeof(temp));
524 FIXUP(dfn->code, 2, 0x00000000, (int)_mesa_ubyte_to_float_color_tab);
525 FIXUP(dfn->code, 27, 0xdeadbeaf, (int)tnl->floatcolorptr);
527 insert_at_head( &tnl->dfn_cache.Color4ub, dfn );
530 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
531 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
532 memcpy (dfn->code, temp, sizeof(temp));
533 FIXUP(dfn->code, 18, 0x0, (int)tnl->ubytecolorptr);
534 FIXUP(dfn->code, 24, 0x0, (int)tnl->ubytecolorptr+1);
537 insert_at_head( &tnl->dfn_cache.TexCoord2fv, dfn );
539 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
540 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
541 memcpy (dfn->code, temp, sizeof(temp));
542 FIXUP(dfn->code, 5, 0x12345678, (int)tnl->texcoordptr[0]);
546 insert_at_head( &tnl->dfn_cache.TexCoord2f, dfn );
548 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
549 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
550 memcpy (dfn->code, temp, sizeof(temp));
551 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->texcoordptr[0]);
555 insert_at_head( &tnl->dfn_cache.TexCoord2fv, dfn );
557 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
558 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
559 memcpy (dfn->code, temp, sizeof(temp));
560 FIXUP(dfn->code, 5, 0x12345678, (int)tnl->texcoordptr[0]);
564 insert_at_head( &tnl->dfn_cache.TexCoord2f, dfn );
566 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
567 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
568 memcpy (dfn->code, temp, sizeof(temp));
569 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->texcoordptr[0]);
571 diff -urN xc.org/lib/GL/mesa/src/drv/r200/r200_vtxfmt.c xc/lib/GL/mesa/src/drv/r200/r200_vtxfmt.c
572 --- xc.org/lib/GL/mesa/src/drv/r200/r200_vtxfmt.c 2004-06-07 22:44:55.376930912 +0200
573 +++ xc/lib/GL/mesa/src/drv/r200/r200_vtxfmt.c 2004-06-07 22:48:38.196057256 +0200
574 @@ -1074,7 +1074,7 @@
575 struct dynfn *f, *tmp;
576 foreach_s (f, tmp, l) {
577 remove_from_list( f );
578 - ALIGN_FREE( f->code );
579 + EXEC_FREE( f->code );
583 diff -urN xc.org/lib/GL/mesa/src/drv/r200/r200_vtxfmt.h xc/lib/GL/mesa/src/drv/r200/r200_vtxfmt.h
584 --- xc.org/lib/GL/mesa/src/drv/r200/r200_vtxfmt.h 2004-06-07 22:44:55.377930760 +0200
585 +++ xc/lib/GL/mesa/src/drv/r200/r200_vtxfmt.h 2004-06-07 22:48:38.192057864 +0200
587 insert_at_head( &CACHE, dfn ); \
588 dfn->key[0] = key[0]; \
589 dfn->key[1] = key[1]; \
590 - dfn->code = ALIGN_MALLOC( end - start, 16 ); \
591 + dfn->code = EXEC_MALLOC( end - start, 16 ); \
592 memcpy (dfn->code, start, end - start); \
595 diff -urN xc.org/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.c xc/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.c
596 --- xc.org/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.c 2004-06-07 22:44:55.473916168 +0200
597 +++ xc/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.c 2004-06-07 22:48:38.218053912 +0200
598 @@ -1042,7 +1042,7 @@
599 struct dynfn *f, *tmp;
600 foreach_s (f, tmp, l) {
601 remove_from_list( f );
602 - ALIGN_FREE( f->code );
603 + EXEC_FREE( f->code );
607 diff -urN xc.org/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.h xc/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.h
608 --- xc.org/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.h 2004-06-07 22:44:55.473916168 +0200
609 +++ xc/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.h 2004-06-07 22:48:38.214054520 +0200
611 char *end = (char *)&FUNC##_end; \
612 insert_at_head( &CACHE, dfn ); \
614 - dfn->code = ALIGN_MALLOC( end - start, 16 ); \
615 + dfn->code = EXEC_MALLOC( end - start, 16 ); \
616 memcpy (dfn->code, start, end - start); \