]> git.pld-linux.org Git - packages/XFree86.git/blame - XFree86-libGL-exec-shield-fixes-v2.patch
- no mem.h
[packages/XFree86.git] / XFree86-libGL-exec-shield-fixes-v2.patch
CommitLineData
b88abb6c
AM
1diff -urN xc.org/extras/Mesa/src/glapi.c xc/extras/Mesa/src/glapi.c
2--- xc.org/extras/Mesa/src/glapi.c 2004-06-07 22:45:05.571381120 +0200
3+++ xc/extras/Mesa/src/glapi.c 2004-06-07 23:11:34.201872576 +0200
b88abb6c
AM
4@@ -546,7 +547,7 @@
5 0xe8, 0x00, 0x00, 0x00, 0x00,
6 0xff, 0xa0, 0x00, 0x00, 0x00, 0x00
7 };
8- unsigned char *code = (unsigned char *) malloc(sizeof(insn_template));
9+ unsigned char *code = EXEC_MALLOC(sizeof(insn_template), 16);
10 unsigned int next_insn;
11 if (code) {
12 memcpy(code, insn_template, sizeof(insn_template));
13@@ -587,7 +588,7 @@
14 0x01000000 /* nop */
15 };
16 #endif
17- unsigned int *code = (unsigned int *) malloc(sizeof(insn_template));
18+ unsigned int *code = EXEC_MALLOC(sizeof(insn_template), 16);
19 unsigned long glapi_addr = (unsigned long) &_glapi_Dispatch;
20 if (code) {
21 memcpy(code, insn_template, sizeof(insn_template));
22diff -urN xc.org/extras/Mesa/src/imports.c xc/extras/Mesa/src/imports.c
23--- xc.org/extras/Mesa/src/imports.c 2004-06-07 22:45:05.943324576 +0200
24+++ xc/extras/Mesa/src/imports.c 2004-06-07 23:08:05.289632064 +0200
25@@ -59,6 +59,19 @@
26 extern int vsnprintf(char *str, size_t count, const char *fmt, va_list arg);
27 #endif
28
29+/* Define a struct for our private data. This is preferred over pointer
30+ * arithmetic to access individual pieces of our private data because the
31+ * compiler will help us get alignment correct in a portable way and it
32+ * makes it much easier to add or remove items from our private data */
33+
34+typedef struct align_malloc_header {
35+ void *alloc_ptr; /* actual allocation ptr */
36+ size_t alloc_size; /* actual allocation size */
37+ void *user_ptr; /* ptr returned to caller */
38+ size_t user_size; /* size caller requested */
39+} align_malloc_header;
40+
41+static unsigned long RoundUpPowerOf2(unsigned long val);
42
43 /**********************************************************************/
44 /* Wrappers for standard C library functions */
45@@ -175,6 +188,310 @@
46 #endif
47 }
48
49+/*
50+ * Execute permission implementation notes:
51+ * John Dennis - jdennis@redhat.com - Red Hat Inc.
52+ *
53+ * Overview:
54+ *
55+ * Various parts of Mesa generate machine code during run time and
56+ * then executes that code. We will use the term code gen to refer to
57+ * this process. Some operating systems in an attempt to achieve
58+ * better security enforce restrictions on which memory areas may
59+ * contain executable code. In general execute permission is granted
60+ * to .text sections and removed on stack or heap memory. It's the
61+ * heap (and possibly the stack) where code is run time
62+ * generated. This means on systems that enforce execute memory
63+ * security you will get either a SEGV or SIGBUS exception when run
64+ * time generated code executes and the process will be terminated.
65+ *
66+ * Implementation:
67+ *
68+ * The solution is to provide unique malloc/free functions which
69+ * return memory with execute permission and to make sure these
70+ * allocation functions are called for code gen.
71+ *
72+ * There are 3 possible implementation solutions.
73+ *
74+ * Solution A: use mprotect on malloc block.
75+ *
76+ * In this scenario after a block is allocated via malloc we call
77+ * mprotect on the pages containing the block and add execute
78+ * permission. In theory a free of the block removes the execute
79+ * permission.
80+ *
81+ * Pros: Simple to implement
82+ *
83+ * Cons: Because execute permission is granted memory pages when
84+ * mprotect is called on the page containing the malloc block
85+ * every other malloc block in that page also receives execute
86+ * permission, this is insecure.
87+ *
88+ * When a malloc block is freed that had been allocated for
89+ * execute permission we should remove the execute permission
90+ * from that block so that when the heap manager resuses that
91+ * memory it will not be executable. But Because exectue
92+ * permission is granted to memory pages and a page may have
93+ * more than one malloc block with execute permission we
94+ * cannot remove execute permission because that would remove
95+ * execute permission on any executable malloc blocks still in
96+ * that page. By not removing the execution permission on free
97+ * we will tend to "leak" executable memory as more and more
98+ * heap pages accumulate execute permission, possible without
99+ * needing it.
100+ *
101+ * Solution B: use mmap to allocate block
102+ *
103+ * In this scenario every call to alloc an executable block is
104+ * performed with anonymous mmap. Mmap always allocates pages of
105+ * memory. When free is called we unmap the pages.
106+ *
107+ * Pros: This is much more secure. The kernel places the allocation
108+ * in special pages that have additional protection. These
109+ * pages are not near any other pages.
110+ *
111+ * The pages used do not contain any heap allocation that is
112+ * not susposed to be executable, therefore we are not
113+ * inadvertantly granting execute permission to a malloc block
114+ * that happens to live in the same page as a execute malloc
115+ * block.
116+ *
117+ * The allocation can be freed without affecting anyother
118+ * allocation and it will be reused by the kernel.
119+ *
120+ * Its simple to implement. As simple as solution A.
121+ *
122+ * Cons: Mmap only allocates in units of pages. Thus even a small
123+ * allocation will use an entire page. However note, only a
124+ * small number exec malloc's are done so the wasted memory
125+ * is not likely to be an issue.
126+ *
127+ * Because every code generated function will live alone in
128+ * its own page this will probably introduce more cache misses
129+ * and page faults than if the all the code coalesced together
130+ * into one or more pages as would be the case with regular
131+ * .text sections.
132+ *
133+ * Solution C: use separate malloc implementation using mmap'ed heap arena
134+ *
135+ * In this scenario a new heap manager is introduced which manages a
136+ * heap arena usning anonymous mmap with execute permission. All
137+ * executable allocations are provided using only this heap arena.
138+ *
139+ * Pros: This is the ideal solution. As in Solution B executable and
140+ * non-executable allocations are never mixed. Executable
141+ * allocations are provided using the most secure pages the
142+ * kernel manages.
143+ *
144+ * Pages will likely contain multiple allocations as opposed
145+ * to Solution B where pages will be sparsely used. This
146+ * improves cache and page fault behavior.
147+ *
148+ * Cons: This is the most involved implementation and requires the
149+ * introduction of a heap manger implementation that has been
150+ * modified to work with anonymous mmap. However, note that
151+ * the GNU malloc implementation has been modified to work
152+ * with anonymous mmap.
153+ */
154+
155+#if 1
156+#define EXEC_ALLOC_USE_MMAP
157+#else
158+#define EXEC_ALLOC_USE_MALLOC
159+#endif
160+
161+/* If input is power of 2 return that, else round up to next power of 2 */
162+static unsigned long RoundUpPowerOf2(unsigned long val)
163+{
164+ int i, setBits;
165+
166+ if (val == 0) return(1UL);
167+ if (val > (1UL << (sizeof(unsigned long) * 8 - 1))) {
168+ /* out of range, should be fatal error?, for now return max power of 2 */
169+ return (1UL << (sizeof(unsigned long) * 8 - 1));
170+ }
171+
172+ for (i = setBits = 0; val && i < sizeof(unsigned long) * 8; i++, val >>= 1) {
173+ if (val & 1UL) setBits++;
174+ }
175+ if (setBits > 1)
176+ return (1UL << i); /* input was not power of 2 */
177+ else
178+ return (1UL << (i-1)); /* input was power of 2 */
179+}
180+
181+/*
182+ * Allocate N-byte aligned memory in executable region (uninitialized)
183+ */
184+
185+#ifdef EXEC_ALLOC_USE_MALLOC
186+void *
187+_mesa_exec_malloc(size_t user_size, unsigned long user_align)
188+{
189+ unsigned long alloc_ptr, user_ptr, alloc_size, alloc_align;
190+ align_malloc_header *pHeader;
191+
192+ ASSERT( user_align > 0 );
193+
194+ /* We store the pointer to the acutal address and size in a private
195+ * header before the address the client sees. We need the actual
196+ * pointer to free with and we need the size to remove execute permission
197+ * on the block */
198+
199+ if (user_align < sizeof(align_malloc_header))
200+ alloc_align = RoundUpPowerOf2(sizeof(align_malloc_header));
201+ else
202+ alloc_align = user_align;
203+ alloc_size = user_size + alloc_align;
204+
205+ alloc_ptr = (unsigned long) MALLOC(alloc_size);
206+
207+ if (!alloc_ptr) return(NULL);
208+
209+ user_ptr = (alloc_ptr + alloc_align) & ~(unsigned long)(alloc_align - 1);
210+ pHeader = (align_malloc_header *) (user_ptr - sizeof(align_malloc_header));
211+ pHeader->alloc_ptr = (void *) alloc_ptr;
212+ pHeader->alloc_size = alloc_size;
213+ pHeader->user_ptr = (void *) user_ptr;
214+ pHeader->user_size = user_size;
215+
216+ {
217+ unsigned page_size, round;
218+
219+ page_size = getpagesize();
220+ round = user_ptr & (page_size-1);
221+ mprotect((void *)(user_ptr - round), (user_size + round + page_size-1) & ~(page_size-1),
222+ PROT_READ | PROT_WRITE | PROT_EXEC);
223+ }
224+
225+#ifdef DEBUG
226+ {
227+ unsigned char *p = (unsigned char *) alloc_ptr;
228+ unsigned char *stop = (unsigned char *) pHeader;
229+
230+ /* mark the non-aligned area */
231+ for(; p < stop; p++) {
232+ *p = 0xcd;
233+ }
234+ }
235+#endif
236+
237+ return (void *)user_ptr;
238+}
239+
240+/*
241+ * Free N-byte executable aligned memory
242+ */
243+void
244+_mesa_exec_free(void *user_ptr)
245+{
246+ /* The header giving the real address and size is just prior to the address the client sees. */
247+ align_malloc_header *pHeader;
248+ void *alloc_ptr;
249+ size_t user_size;
250+
251+ pHeader = (align_malloc_header *)((char *)user_ptr - sizeof(align_malloc_header));
252+ alloc_ptr = pHeader->alloc_ptr;
253+ user_size = pHeader->user_size;
254+
255+#if 0
256+ /*
257+ * Unfortunately we cannot remove the execute permission on this
258+ * malloc block because execute permission is granted on a page
259+ * basis. If the page containing this malloc block also contained
260+ * another malloc block with execute permission that was still in
261+ * effect then we will remove execute permission on a malloc block
262+ * that should still be enforce. This does mean we will tend to
263+ * "leak" execute permission in the heap. See above block comment
264+ * on implementation issues.
265+ *
266+ * Note, we could keep a ref count on each page and when the ref count
267+ * fell to zero we could remove the execute permission.
268+ *
269+ * If we did remove the execute permission this is how it would be done.
270+ */
271+ {
272+ unsigned page_size, round;
273+
274+ page_size = getpagesize();
275+ round = (unsigned long)user_ptr & (page_size-1);
276+ mprotect((char *)user_ptr - round, (user_size + round + page_size-1) & ~(page_size-1),
277+ PROT_READ | PROT_WRITE);
278+ }
279+#endif
280+ FREE(alloc_ptr);
281+}
282+
283+#elif defined(EXEC_ALLOC_USE_MMAP)
284+
285+void *
286+_mesa_exec_malloc(size_t user_size, unsigned long user_align)
287+{
288+ unsigned long alloc_ptr, user_ptr, alloc_size, alloc_align;
289+ align_malloc_header *pHeader;
290+
291+ ASSERT( user_align > 0 );
292+
293+ /* We store the pointer to the acutal address and size in a private
294+ * header before the address the client sees. We need the actual
295+ * pointer to free with and we need the size to unmap the region */
296+
297+ if (user_align < sizeof(align_malloc_header))
298+ alloc_align = RoundUpPowerOf2(sizeof(align_malloc_header));
299+ else
300+ alloc_align = user_align;
301+ alloc_size = user_size + alloc_align;
302+
303+ /* Note, I'm not sure how portable MAP_ANONYMOUS with fd=0 is, on some POSIX
304+ * systems you may need to remove the MAP_ANONYMOUS flag and pass the
305+ * result of posix_typed_mem_open with POSIX_TYPED_MEM_ALLOCATE as the fd. */
306+
307+ alloc_ptr = (unsigned long) mmap(0, alloc_size,
308+ PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
309+ if ((void *)alloc_ptr == MAP_FAILED) {
310+ return(NULL);
311+ }
312+
313+ user_ptr = (alloc_ptr + alloc_align) & ~(unsigned long)(alloc_align - 1);
314+ pHeader = (align_malloc_header *) (user_ptr - sizeof(align_malloc_header));
315+ pHeader->alloc_ptr = (void *) alloc_ptr;
316+ pHeader->alloc_size = alloc_size;
317+ pHeader->user_ptr = (void *) user_ptr;
318+ pHeader->user_size = user_size;
319+
320+#ifdef DEBUG
321+ {
322+ unsigned char *p = (unsigned char *) alloc_ptr;
323+ unsigned char *stop = (unsigned char *) pHeader;
324+
325+ /* mark the non-aligned area */
326+ for(; p < stop; p++) {
327+ *p = 0xcd;
328+ }
329+ }
330+#endif
331+
332+ return (void *)user_ptr;
333+}
334+
335+/*
336+ * Free N-byte executable aligned memory
337+ */
338+void
339+_mesa_exec_free(void *user_ptr)
340+{
341+ /* The header giving the real address and size is just prior to the address the client sees. */
342+ align_malloc_header *pHeader;
343+ void *alloc_ptr;
344+ size_t alloc_size;
345+
346+ pHeader = (align_malloc_header *)((char *)user_ptr - sizeof(align_malloc_header));
347+ alloc_ptr = pHeader->alloc_ptr;
348+ alloc_size = pHeader->alloc_size;
349+
350+ munmap(alloc_ptr, alloc_size);
351+}
352+#endif
353
354 void *
355 _mesa_memcpy(void *dest, const void *src, size_t n)
356diff -urN xc.org/extras/Mesa/src/imports.h xc/extras/Mesa/src/imports.h
357--- xc.org/extras/Mesa/src/imports.h 2004-06-07 22:45:05.944324424 +0200
358+++ xc/extras/Mesa/src/imports.h 2004-06-07 23:04:42.561451432 +0200
359@@ -50,6 +50,9 @@
360 #define ALIGN_MALLOC_STRUCT(T, N) (struct T *) _mesa_align_malloc(sizeof(struct T), N)
361 #define ALIGN_CALLOC_STRUCT(T, N) (struct T *) _mesa_align_calloc(sizeof(struct T), N)
362 #define ALIGN_FREE(PTR) _mesa_align_free(PTR)
363+/* These allocate aligned memory in a area with execute permission, used for code generation. */
364+#define EXEC_MALLOC(BYTES, N) (void *) _mesa_exec_malloc(BYTES, N)
365+#define EXEC_FREE(PTR) _mesa_exec_free(PTR)
366
367 #define MEMCPY( DST, SRC, BYTES) _mesa_memcpy(DST, SRC, BYTES)
368 #define MEMSET( DST, VAL, N ) _mesa_memset(DST, VAL, N)
369@@ -120,6 +123,11 @@
370 _mesa_align_free( void *ptr );
371
372 extern void *
373+_mesa_exec_malloc(size_t bytes, unsigned long alignment);
374+extern void
375+_mesa_exec_free(void *ptr);
376+
377+extern void *
378 _mesa_memcpy( void *dest, const void *src, size_t n );
379
380 extern void
381diff -urN xc.org/extras/Mesa/src/mtypes.h xc/extras/Mesa/src/mtypes.h
382--- xc.org/extras/Mesa/src/mtypes.h 2004-06-07 22:45:05.956322600 +0200
383+++ xc/extras/Mesa/src/mtypes.h 2004-06-07 23:05:35.023475992 +0200
384@@ -31,7 +31,8 @@
385 #ifndef TYPES_H
386 #define TYPES_H
387
388-
389+#include <unistd.h>
390+#include <sys/mman.h>
391 #include "glheader.h"
392 #include "config.h" /* Hardwired parameters */
393 #include "glapitable.h"
394diff -urN xc.org/extras/Mesa/src/tnl/t_vtx_exec.c xc/extras/Mesa/src/tnl/t_vtx_exec.c
395--- xc.org/extras/Mesa/src/tnl/t_vtx_exec.c 2004-06-07 22:45:06.597225168 +0200
396+++ xc/extras/Mesa/src/tnl/t_vtx_exec.c 2004-06-07 23:17:12.494444288 +0200
397@@ -593,7 +593,7 @@
398 struct dynfn *f, *tmp;
399 foreach_s (f, tmp, l) {
400 remove_from_list( f );
401- ALIGN_FREE( f->code );
402+ EXEC_FREE( f->code );
403 FREE( f );
404 }
405 }
406diff -urN xc.org/extras/Mesa/src/tnl/t_vtx_x86.c xc/extras/Mesa/src/tnl/t_vtx_x86.c
407--- xc.org/extras/Mesa/src/tnl/t_vtx_x86.c 2004-06-07 22:45:06.608223496 +0200
408+++ xc/extras/Mesa/src/tnl/t_vtx_x86.c 2004-06-07 23:16:32.268559552 +0200
409@@ -75,7 +75,7 @@
410 0xff, 0x25, 0, 0, 0, 0 /* jmp NOTIFY */
411 };
412
413- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
414+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
415 memcpy (dfn->code, temp, sizeof(temp));
416 FIXUP(dfn->code, 3, 0x0, (int)&tnl->vertex[2]);
417 FIXUP(dfn->code, 9, 0x0, (int)&tnl->dmaptr);
418@@ -126,7 +126,7 @@
419 0xff, 0x25, 0,0,0,0 /* jmp *NOTIFY */
420 };
421
422- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
423+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
424 memcpy (dfn->code, temp, sizeof(temp));
425 FIXUP(dfn->code, 2, 0x0, (int)&tnl->dmaptr);
426 FIXUP(dfn->code, 25, 0x0, (int)&tnl->vertex[3]);
427@@ -163,7 +163,7 @@
428 0xff, 0x25, 0,0,0,0, /* jmp *NOTIFY */
429 };
430
431- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
432+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
433 memcpy (dfn->code, temp, sizeof(temp));
434 FIXUP(dfn->code, 3, 0x0, (int)&tnl->dmaptr);
435 FIXUP(dfn->code, 28, 0x0, (int)&tnl->vertex[3]);
436@@ -205,7 +205,7 @@
437 0xff, 0x25, 0, 0, 0, 0 /* jmp NOTIFY */
438 };
439
440- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
441+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
442 memcpy (dfn->code, temp, sizeof(temp));
443 FIXUP(dfn->code, 3, 0x0, (int)&tnl->vertex[3]);
444 FIXUP(dfn->code, 9, 0x0, (int)&tnl->dmaptr);
445@@ -259,7 +259,7 @@
446 0xff, 0x25, 0x08, 0, 0, 0, /* jmp *0x8 */
447 };
448
449- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
450+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
451 memcpy (dfn->code, temp, sizeof(temp));
452 FIXUP(dfn->code, 1, 0x00000000, (int)&tnl->dmaptr);
453 FIXUP(dfn->code, 27, 0x0000001c, (int)&tnl->vertex[3]);
454@@ -303,7 +303,7 @@
455 0xff, 0x25, 0x08, 0, 0, 0, /* jmp *0x8 */
456 };
457
458- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
459+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
460 memcpy (dfn->code, temp, sizeof(temp));
461 FIXUP(dfn->code, 1, 0x00000000, (int)&tnl->dmaptr);
462 FIXUP(dfn->code, 27, 0x0000001c, (int)&tnl->vertex[3]);
463@@ -351,7 +351,7 @@
464 0xff, 0x25, 0, 0, 0, 0 /* jmp NOTIFY */
465 };
466
467- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
468+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
469 memcpy (dfn->code, temp, sizeof(temp));
470 FIXUP(dfn->code, 8, 0x01010101, (int)&tnl->dmaptr);
471 FIXUP(dfn->code, 32, 0x00000006, tnl->vertex_size-3);
472@@ -393,7 +393,7 @@
473
474 insert_at_head( &tnl->dfn_cache.Normal3fv, dfn );
475 dfn->key = key;
476- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
477+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
478 memcpy (dfn->code, temp, sizeof(temp));
479 FIXUP(dfn->code, 5, 0x0, (int)tnl->normalptr);
480 return dfn;
481@@ -421,7 +421,7 @@
482
483 insert_at_head( &tnl->dfn_cache.Normal3f, dfn );
484 dfn->key = key;
485- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
486+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
487 memcpy (dfn->code, temp, sizeof(temp));
488 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->normalptr);
489 return dfn;
490@@ -449,7 +449,7 @@
491
492 insert_at_head( &tnl->dfn_cache.Normal3fv, dfn );
493 dfn->key = key;
494- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
495+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
496 memcpy (dfn->code, temp, sizeof(temp));
497 FIXUP(dfn->code, 5, 0x0, (int)tnl->normalptr);
498 return dfn;
499@@ -475,7 +475,7 @@
500
501 insert_at_head( &tnl->dfn_cache.Normal3f, dfn );
502 dfn->key = key;
503- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
504+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
505 memcpy (dfn->code, temp, sizeof(temp));
506 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->normalptr);
507 return dfn;
508@@ -499,7 +499,7 @@
509 0xc3, /* ret */
510 };
511
512- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
513+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
514 memcpy (dfn->code, temp, sizeof(temp));
515 FIXUP(dfn->code, 5, 0x12345678, (int)tnl->ubytecolorptr);
516 return dfn;
517@@ -531,7 +531,7 @@
518 0xc3, /* ret */
519 };
520
521- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
522+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
523 memcpy (dfn->code, temp, sizeof(temp));
524 FIXUP(dfn->code, 2, 0x00000000, (int)_mesa_ubyte_to_float_color_tab);
525 FIXUP(dfn->code, 27, 0xdeadbeaf, (int)tnl->floatcolorptr);
526@@ -567,7 +567,7 @@
527 insert_at_head( &tnl->dfn_cache.Color4ub, dfn );
528 dfn->key = key;
529
530- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
531+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
532 memcpy (dfn->code, temp, sizeof(temp));
533 FIXUP(dfn->code, 18, 0x0, (int)tnl->ubytecolorptr);
534 FIXUP(dfn->code, 24, 0x0, (int)tnl->ubytecolorptr+1);
535@@ -600,7 +600,7 @@
536
537 insert_at_head( &tnl->dfn_cache.TexCoord2fv, dfn );
538 dfn->key = key;
539- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
540+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
541 memcpy (dfn->code, temp, sizeof(temp));
542 FIXUP(dfn->code, 5, 0x12345678, (int)tnl->texcoordptr[0]);
543 return dfn;
544@@ -624,7 +624,7 @@
545
546 insert_at_head( &tnl->dfn_cache.TexCoord2f, dfn );
547 dfn->key = key;
548- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
549+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
550 memcpy (dfn->code, temp, sizeof(temp));
551 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->texcoordptr[0]);
552 return dfn;
553@@ -648,7 +648,7 @@
554
555 insert_at_head( &tnl->dfn_cache.TexCoord2fv, dfn );
556 dfn->key = key;
557- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
558+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
559 memcpy (dfn->code, temp, sizeof(temp));
560 FIXUP(dfn->code, 5, 0x12345678, (int)tnl->texcoordptr[0]);
561 return dfn;
562@@ -670,7 +670,7 @@
563
564 insert_at_head( &tnl->dfn_cache.TexCoord2f, dfn );
565 dfn->key = key;
566- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
567+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
568 memcpy (dfn->code, temp, sizeof(temp));
569 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->texcoordptr[0]);
570 return dfn;
571diff -urN xc.org/lib/GL/mesa/src/drv/r200/r200_vtxfmt.c xc/lib/GL/mesa/src/drv/r200/r200_vtxfmt.c
572--- xc.org/lib/GL/mesa/src/drv/r200/r200_vtxfmt.c 2004-06-07 22:44:55.376930912 +0200
573+++ xc/lib/GL/mesa/src/drv/r200/r200_vtxfmt.c 2004-06-07 22:48:38.196057256 +0200
574@@ -1074,7 +1074,7 @@
575 struct dynfn *f, *tmp;
576 foreach_s (f, tmp, l) {
577 remove_from_list( f );
578- ALIGN_FREE( f->code );
579+ EXEC_FREE( f->code );
580 FREE( f );
581 }
582 }
583diff -urN xc.org/lib/GL/mesa/src/drv/r200/r200_vtxfmt.h xc/lib/GL/mesa/src/drv/r200/r200_vtxfmt.h
584--- xc.org/lib/GL/mesa/src/drv/r200/r200_vtxfmt.h 2004-06-07 22:44:55.377930760 +0200
585+++ xc/lib/GL/mesa/src/drv/r200/r200_vtxfmt.h 2004-06-07 22:48:38.192057864 +0200
586@@ -60,7 +60,7 @@
587 insert_at_head( &CACHE, dfn ); \
588 dfn->key[0] = key[0]; \
589 dfn->key[1] = key[1]; \
590- dfn->code = ALIGN_MALLOC( end - start, 16 ); \
591+ dfn->code = EXEC_MALLOC( end - start, 16 ); \
592 memcpy (dfn->code, start, end - start); \
593 } \
594 while ( 0 )
595diff -urN xc.org/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.c xc/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.c
596--- xc.org/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.c 2004-06-07 22:44:55.473916168 +0200
597+++ xc/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.c 2004-06-07 22:48:38.218053912 +0200
598@@ -1042,7 +1042,7 @@
599 struct dynfn *f, *tmp;
600 foreach_s (f, tmp, l) {
601 remove_from_list( f );
602- ALIGN_FREE( f->code );
603+ EXEC_FREE( f->code );
604 FREE( f );
605 }
606 }
607diff -urN xc.org/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.h xc/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.h
608--- xc.org/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.h 2004-06-07 22:44:55.473916168 +0200
609+++ xc/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.h 2004-06-07 22:48:38.214054520 +0200
610@@ -58,7 +58,7 @@
611 char *end = (char *)&FUNC##_end; \
612 insert_at_head( &CACHE, dfn ); \
613 dfn->key = key; \
614- dfn->code = ALIGN_MALLOC( end - start, 16 ); \
615+ dfn->code = EXEC_MALLOC( end - start, 16 ); \
616 memcpy (dfn->code, start, end - start); \
617 } \
618 while ( 0 )
619
This page took 0.11336 seconds and 4 git commands to generate.