]> git.pld-linux.org Git - packages/XFree86.git/blame - XFree86-libGL-exec-shield-fixes-v2.patch
- outdated
[packages/XFree86.git] / XFree86-libGL-exec-shield-fixes-v2.patch
CommitLineData
d1a9ff2d
AM
1--- xc/lib/GL/glx/Imakefile.redhat-libGL-exec-shield-fixes 2003-09-25 14:43:55.000000000 -0400
2+++ xc/lib/GL/glx/Imakefile 2003-09-25 14:43:55.000000000 -0400
3@@ -43,6 +43,7 @@
4 #ifdef SparcArchitecture
5 LinkSourceFile(glapi_sparc.S, $(MESASRCDIR)/src/SPARC)
6 #endif
7+LinkSourceFile(mem.c, $(MESASRCDIR)/src)
8
9
10 # Maybe some of these could come from
11@@ -70,7 +72,8 @@
12 single2.c \
13 singlepix.c \
14 vertarr.c \
15- xfont.c
16+ xfont.c \
17+ mem.c
18
19 GLX_OBJS = \
20 clientattrib.o \
21@@ -94,7 +97,8 @@
22 single2.o \
23 singlepix.o \
24 vertarr.o \
25- xfont.o
26+ xfont.o \
27+ mem.o
28
29 GLX_DEFS = GlxDefines
30
b88abb6c
AM
31diff -urN xc.org/extras/Mesa/src/glapi.c xc/extras/Mesa/src/glapi.c
32--- xc.org/extras/Mesa/src/glapi.c 2004-06-07 22:45:05.571381120 +0200
33+++ xc/extras/Mesa/src/glapi.c 2004-06-07 23:11:34.201872576 +0200
cc14ac67
AM
34@@ -50,6 +50,7 @@
35 #include "glapioffsets.h"
36 #include "glapitable.h"
37 #include "glthread.h"
38+#include "imports.h"
39
40 extern hidden void *__glapi_noop_table[];
41
b88abb6c
AM
42@@ -546,7 +547,7 @@
43 0xe8, 0x00, 0x00, 0x00, 0x00,
44 0xff, 0xa0, 0x00, 0x00, 0x00, 0x00
45 };
46- unsigned char *code = (unsigned char *) malloc(sizeof(insn_template));
47+ unsigned char *code = EXEC_MALLOC(sizeof(insn_template), 16);
48 unsigned int next_insn;
49 if (code) {
50 memcpy(code, insn_template, sizeof(insn_template));
51@@ -587,7 +588,7 @@
52 0x01000000 /* nop */
53 };
54 #endif
55- unsigned int *code = (unsigned int *) malloc(sizeof(insn_template));
56+ unsigned int *code = EXEC_MALLOC(sizeof(insn_template), 16);
57 unsigned long glapi_addr = (unsigned long) &_glapi_Dispatch;
58 if (code) {
59 memcpy(code, insn_template, sizeof(insn_template));
d1a9ff2d
AM
60--- xc/extras/Mesa/src/mem.c.org 1970-01-01 01:00:00.000000000 +0100
61+++ xc/extras/Mesa/src/mem.c 2004-06-09 02:02:29.152086688 +0200
62@@ -0,0 +1,325 @@
63+#include <unistd.h>
64+#include <sys/mman.h>
65+#include "glheader.h"
66+#include "config.h"
67+#include "macros.h"
68+
b88abb6c
AM
69+/* Define a struct for our private data. This is preferred over pointer
70+ * arithmetic to access individual pieces of our private data because the
71+ * compiler will help us get alignment correct in a portable way and it
72+ * makes it much easier to add or remove items from our private data */
73+
74+typedef struct align_malloc_header {
75+ void *alloc_ptr; /* actual allocation ptr */
76+ size_t alloc_size; /* actual allocation size */
77+ void *user_ptr; /* ptr returned to caller */
78+ size_t user_size; /* size caller requested */
79+} align_malloc_header;
80+
81+static unsigned long RoundUpPowerOf2(unsigned long val);
d1a9ff2d 82+
b88abb6c
AM
83+/*
84+ * Execute permission implementation notes:
85+ * John Dennis - jdennis@redhat.com - Red Hat Inc.
86+ *
87+ * Overview:
88+ *
89+ * Various parts of Mesa generate machine code during run time and
90+ * then executes that code. We will use the term code gen to refer to
91+ * this process. Some operating systems in an attempt to achieve
92+ * better security enforce restrictions on which memory areas may
93+ * contain executable code. In general execute permission is granted
94+ * to .text sections and removed on stack or heap memory. It's the
95+ * heap (and possibly the stack) where code is run time
96+ * generated. This means on systems that enforce execute memory
97+ * security you will get either a SEGV or SIGBUS exception when run
98+ * time generated code executes and the process will be terminated.
99+ *
100+ * Implementation:
101+ *
102+ * The solution is to provide unique malloc/free functions which
103+ * return memory with execute permission and to make sure these
104+ * allocation functions are called for code gen.
105+ *
106+ * There are 3 possible implementation solutions.
107+ *
108+ * Solution A: use mprotect on malloc block.
109+ *
110+ * In this scenario after a block is allocated via malloc we call
111+ * mprotect on the pages containing the block and add execute
112+ * permission. In theory a free of the block removes the execute
113+ * permission.
114+ *
115+ * Pros: Simple to implement
116+ *
117+ * Cons: Because execute permission is granted memory pages when
118+ * mprotect is called on the page containing the malloc block
119+ * every other malloc block in that page also receives execute
120+ * permission, this is insecure.
121+ *
122+ * When a malloc block is freed that had been allocated for
123+ * execute permission we should remove the execute permission
124+ * from that block so that when the heap manager resuses that
125+ * memory it will not be executable. But Because exectue
126+ * permission is granted to memory pages and a page may have
127+ * more than one malloc block with execute permission we
128+ * cannot remove execute permission because that would remove
129+ * execute permission on any executable malloc blocks still in
130+ * that page. By not removing the execution permission on free
131+ * we will tend to "leak" executable memory as more and more
132+ * heap pages accumulate execute permission, possible without
133+ * needing it.
134+ *
135+ * Solution B: use mmap to allocate block
136+ *
137+ * In this scenario every call to alloc an executable block is
138+ * performed with anonymous mmap. Mmap always allocates pages of
139+ * memory. When free is called we unmap the pages.
140+ *
141+ * Pros: This is much more secure. The kernel places the allocation
142+ * in special pages that have additional protection. These
143+ * pages are not near any other pages.
144+ *
145+ * The pages used do not contain any heap allocation that is
146+ * not susposed to be executable, therefore we are not
147+ * inadvertantly granting execute permission to a malloc block
148+ * that happens to live in the same page as a execute malloc
149+ * block.
150+ *
151+ * The allocation can be freed without affecting anyother
152+ * allocation and it will be reused by the kernel.
153+ *
154+ * Its simple to implement. As simple as solution A.
155+ *
156+ * Cons: Mmap only allocates in units of pages. Thus even a small
157+ * allocation will use an entire page. However note, only a
158+ * small number exec malloc's are done so the wasted memory
159+ * is not likely to be an issue.
160+ *
161+ * Because every code generated function will live alone in
162+ * its own page this will probably introduce more cache misses
163+ * and page faults than if the all the code coalesced together
164+ * into one or more pages as would be the case with regular
165+ * .text sections.
166+ *
167+ * Solution C: use separate malloc implementation using mmap'ed heap arena
168+ *
169+ * In this scenario a new heap manager is introduced which manages a
170+ * heap arena usning anonymous mmap with execute permission. All
171+ * executable allocations are provided using only this heap arena.
172+ *
173+ * Pros: This is the ideal solution. As in Solution B executable and
174+ * non-executable allocations are never mixed. Executable
175+ * allocations are provided using the most secure pages the
176+ * kernel manages.
177+ *
178+ * Pages will likely contain multiple allocations as opposed
179+ * to Solution B where pages will be sparsely used. This
180+ * improves cache and page fault behavior.
181+ *
182+ * Cons: This is the most involved implementation and requires the
183+ * introduction of a heap manger implementation that has been
184+ * modified to work with anonymous mmap. However, note that
185+ * the GNU malloc implementation has been modified to work
186+ * with anonymous mmap.
187+ */
188+
189+#if 1
190+#define EXEC_ALLOC_USE_MMAP
191+#else
192+#define EXEC_ALLOC_USE_MALLOC
193+#endif
194+
195+/* If input is power of 2 return that, else round up to next power of 2 */
196+static unsigned long RoundUpPowerOf2(unsigned long val)
197+{
198+ int i, setBits;
199+
200+ if (val == 0) return(1UL);
201+ if (val > (1UL << (sizeof(unsigned long) * 8 - 1))) {
202+ /* out of range, should be fatal error?, for now return max power of 2 */
203+ return (1UL << (sizeof(unsigned long) * 8 - 1));
204+ }
205+
206+ for (i = setBits = 0; val && i < sizeof(unsigned long) * 8; i++, val >>= 1) {
207+ if (val & 1UL) setBits++;
208+ }
209+ if (setBits > 1)
210+ return (1UL << i); /* input was not power of 2 */
211+ else
212+ return (1UL << (i-1)); /* input was power of 2 */
213+}
214+
215+/*
216+ * Allocate N-byte aligned memory in executable region (uninitialized)
217+ */
218+
219+#ifdef EXEC_ALLOC_USE_MALLOC
220+void *
221+_mesa_exec_malloc(size_t user_size, unsigned long user_align)
222+{
223+ unsigned long alloc_ptr, user_ptr, alloc_size, alloc_align;
224+ align_malloc_header *pHeader;
225+
226+ ASSERT( user_align > 0 );
227+
228+ /* We store the pointer to the acutal address and size in a private
229+ * header before the address the client sees. We need the actual
230+ * pointer to free with and we need the size to remove execute permission
231+ * on the block */
232+
233+ if (user_align < sizeof(align_malloc_header))
234+ alloc_align = RoundUpPowerOf2(sizeof(align_malloc_header));
235+ else
236+ alloc_align = user_align;
237+ alloc_size = user_size + alloc_align;
238+
239+ alloc_ptr = (unsigned long) MALLOC(alloc_size);
240+
241+ if (!alloc_ptr) return(NULL);
242+
243+ user_ptr = (alloc_ptr + alloc_align) & ~(unsigned long)(alloc_align - 1);
244+ pHeader = (align_malloc_header *) (user_ptr - sizeof(align_malloc_header));
245+ pHeader->alloc_ptr = (void *) alloc_ptr;
246+ pHeader->alloc_size = alloc_size;
247+ pHeader->user_ptr = (void *) user_ptr;
248+ pHeader->user_size = user_size;
249+
250+ {
251+ unsigned page_size, round;
252+
253+ page_size = getpagesize();
254+ round = user_ptr & (page_size-1);
255+ mprotect((void *)(user_ptr - round), (user_size + round + page_size-1) & ~(page_size-1),
256+ PROT_READ | PROT_WRITE | PROT_EXEC);
257+ }
258+
259+#ifdef DEBUG
260+ {
261+ unsigned char *p = (unsigned char *) alloc_ptr;
262+ unsigned char *stop = (unsigned char *) pHeader;
263+
264+ /* mark the non-aligned area */
265+ for(; p < stop; p++) {
266+ *p = 0xcd;
267+ }
268+ }
269+#endif
270+
271+ return (void *)user_ptr;
272+}
273+
274+/*
275+ * Free N-byte executable aligned memory
276+ */
277+void
278+_mesa_exec_free(void *user_ptr)
279+{
280+ /* The header giving the real address and size is just prior to the address the client sees. */
281+ align_malloc_header *pHeader;
282+ void *alloc_ptr;
283+ size_t user_size;
284+
285+ pHeader = (align_malloc_header *)((char *)user_ptr - sizeof(align_malloc_header));
286+ alloc_ptr = pHeader->alloc_ptr;
287+ user_size = pHeader->user_size;
288+
289+#if 0
290+ /*
291+ * Unfortunately we cannot remove the execute permission on this
292+ * malloc block because execute permission is granted on a page
293+ * basis. If the page containing this malloc block also contained
294+ * another malloc block with execute permission that was still in
295+ * effect then we will remove execute permission on a malloc block
296+ * that should still be enforce. This does mean we will tend to
297+ * "leak" execute permission in the heap. See above block comment
298+ * on implementation issues.
299+ *
300+ * Note, we could keep a ref count on each page and when the ref count
301+ * fell to zero we could remove the execute permission.
302+ *
303+ * If we did remove the execute permission this is how it would be done.
304+ */
305+ {
306+ unsigned page_size, round;
307+
308+ page_size = getpagesize();
309+ round = (unsigned long)user_ptr & (page_size-1);
310+ mprotect((char *)user_ptr - round, (user_size + round + page_size-1) & ~(page_size-1),
311+ PROT_READ | PROT_WRITE);
312+ }
313+#endif
314+ FREE(alloc_ptr);
315+}
316+
317+#elif defined(EXEC_ALLOC_USE_MMAP)
318+
319+void *
320+_mesa_exec_malloc(size_t user_size, unsigned long user_align)
321+{
322+ unsigned long alloc_ptr, user_ptr, alloc_size, alloc_align;
323+ align_malloc_header *pHeader;
324+
325+ ASSERT( user_align > 0 );
326+
327+ /* We store the pointer to the acutal address and size in a private
328+ * header before the address the client sees. We need the actual
329+ * pointer to free with and we need the size to unmap the region */
330+
331+ if (user_align < sizeof(align_malloc_header))
332+ alloc_align = RoundUpPowerOf2(sizeof(align_malloc_header));
333+ else
334+ alloc_align = user_align;
335+ alloc_size = user_size + alloc_align;
336+
337+ /* Note, I'm not sure how portable MAP_ANONYMOUS with fd=0 is, on some POSIX
338+ * systems you may need to remove the MAP_ANONYMOUS flag and pass the
339+ * result of posix_typed_mem_open with POSIX_TYPED_MEM_ALLOCATE as the fd. */
340+
341+ alloc_ptr = (unsigned long) mmap(0, alloc_size,
342+ PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
343+ if ((void *)alloc_ptr == MAP_FAILED) {
344+ return(NULL);
345+ }
346+
347+ user_ptr = (alloc_ptr + alloc_align) & ~(unsigned long)(alloc_align - 1);
348+ pHeader = (align_malloc_header *) (user_ptr - sizeof(align_malloc_header));
349+ pHeader->alloc_ptr = (void *) alloc_ptr;
350+ pHeader->alloc_size = alloc_size;
351+ pHeader->user_ptr = (void *) user_ptr;
352+ pHeader->user_size = user_size;
353+
354+#ifdef DEBUG
355+ {
356+ unsigned char *p = (unsigned char *) alloc_ptr;
357+ unsigned char *stop = (unsigned char *) pHeader;
358+
359+ /* mark the non-aligned area */
360+ for(; p < stop; p++) {
361+ *p = 0xcd;
362+ }
363+ }
364+#endif
365+
366+ return (void *)user_ptr;
367+}
368+
369+/*
370+ * Free N-byte executable aligned memory
371+ */
372+void
373+_mesa_exec_free(void *user_ptr)
374+{
375+ /* The header giving the real address and size is just prior to the address the client sees. */
376+ align_malloc_header *pHeader;
377+ void *alloc_ptr;
378+ size_t alloc_size;
379+
380+ pHeader = (align_malloc_header *)((char *)user_ptr - sizeof(align_malloc_header));
381+ alloc_ptr = pHeader->alloc_ptr;
382+ alloc_size = pHeader->alloc_size;
383+
384+ munmap(alloc_ptr, alloc_size);
385+}
386+#endif
d1a9ff2d 387+
b88abb6c
AM
388diff -urN xc.org/extras/Mesa/src/imports.h xc/extras/Mesa/src/imports.h
389--- xc.org/extras/Mesa/src/imports.h 2004-06-07 22:45:05.944324424 +0200
390+++ xc/extras/Mesa/src/imports.h 2004-06-07 23:04:42.561451432 +0200
391@@ -50,6 +50,9 @@
392 #define ALIGN_MALLOC_STRUCT(T, N) (struct T *) _mesa_align_malloc(sizeof(struct T), N)
393 #define ALIGN_CALLOC_STRUCT(T, N) (struct T *) _mesa_align_calloc(sizeof(struct T), N)
394 #define ALIGN_FREE(PTR) _mesa_align_free(PTR)
395+/* These allocate aligned memory in a area with execute permission, used for code generation. */
396+#define EXEC_MALLOC(BYTES, N) (void *) _mesa_exec_malloc(BYTES, N)
397+#define EXEC_FREE(PTR) _mesa_exec_free(PTR)
398
399 #define MEMCPY( DST, SRC, BYTES) _mesa_memcpy(DST, SRC, BYTES)
400 #define MEMSET( DST, VAL, N ) _mesa_memset(DST, VAL, N)
401@@ -120,6 +123,11 @@
402 _mesa_align_free( void *ptr );
403
404 extern void *
405+_mesa_exec_malloc(size_t bytes, unsigned long alignment);
406+extern void
407+_mesa_exec_free(void *ptr);
408+
409+extern void *
410 _mesa_memcpy( void *dest, const void *src, size_t n );
411
412 extern void
b88abb6c
AM
413diff -urN xc.org/extras/Mesa/src/tnl/t_vtx_exec.c xc/extras/Mesa/src/tnl/t_vtx_exec.c
414--- xc.org/extras/Mesa/src/tnl/t_vtx_exec.c 2004-06-07 22:45:06.597225168 +0200
415+++ xc/extras/Mesa/src/tnl/t_vtx_exec.c 2004-06-07 23:17:12.494444288 +0200
416@@ -593,7 +593,7 @@
417 struct dynfn *f, *tmp;
418 foreach_s (f, tmp, l) {
419 remove_from_list( f );
420- ALIGN_FREE( f->code );
421+ EXEC_FREE( f->code );
422 FREE( f );
423 }
424 }
425diff -urN xc.org/extras/Mesa/src/tnl/t_vtx_x86.c xc/extras/Mesa/src/tnl/t_vtx_x86.c
426--- xc.org/extras/Mesa/src/tnl/t_vtx_x86.c 2004-06-07 22:45:06.608223496 +0200
427+++ xc/extras/Mesa/src/tnl/t_vtx_x86.c 2004-06-07 23:16:32.268559552 +0200
428@@ -75,7 +75,7 @@
429 0xff, 0x25, 0, 0, 0, 0 /* jmp NOTIFY */
430 };
431
432- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
433+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
434 memcpy (dfn->code, temp, sizeof(temp));
435 FIXUP(dfn->code, 3, 0x0, (int)&tnl->vertex[2]);
436 FIXUP(dfn->code, 9, 0x0, (int)&tnl->dmaptr);
437@@ -126,7 +126,7 @@
438 0xff, 0x25, 0,0,0,0 /* jmp *NOTIFY */
439 };
440
441- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
442+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
443 memcpy (dfn->code, temp, sizeof(temp));
444 FIXUP(dfn->code, 2, 0x0, (int)&tnl->dmaptr);
445 FIXUP(dfn->code, 25, 0x0, (int)&tnl->vertex[3]);
446@@ -163,7 +163,7 @@
447 0xff, 0x25, 0,0,0,0, /* jmp *NOTIFY */
448 };
449
450- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
451+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
452 memcpy (dfn->code, temp, sizeof(temp));
453 FIXUP(dfn->code, 3, 0x0, (int)&tnl->dmaptr);
454 FIXUP(dfn->code, 28, 0x0, (int)&tnl->vertex[3]);
455@@ -205,7 +205,7 @@
456 0xff, 0x25, 0, 0, 0, 0 /* jmp NOTIFY */
457 };
458
459- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
460+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
461 memcpy (dfn->code, temp, sizeof(temp));
462 FIXUP(dfn->code, 3, 0x0, (int)&tnl->vertex[3]);
463 FIXUP(dfn->code, 9, 0x0, (int)&tnl->dmaptr);
464@@ -259,7 +259,7 @@
465 0xff, 0x25, 0x08, 0, 0, 0, /* jmp *0x8 */
466 };
467
468- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
469+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
470 memcpy (dfn->code, temp, sizeof(temp));
471 FIXUP(dfn->code, 1, 0x00000000, (int)&tnl->dmaptr);
472 FIXUP(dfn->code, 27, 0x0000001c, (int)&tnl->vertex[3]);
473@@ -303,7 +303,7 @@
474 0xff, 0x25, 0x08, 0, 0, 0, /* jmp *0x8 */
475 };
476
477- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
478+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
479 memcpy (dfn->code, temp, sizeof(temp));
480 FIXUP(dfn->code, 1, 0x00000000, (int)&tnl->dmaptr);
481 FIXUP(dfn->code, 27, 0x0000001c, (int)&tnl->vertex[3]);
482@@ -351,7 +351,7 @@
483 0xff, 0x25, 0, 0, 0, 0 /* jmp NOTIFY */
484 };
485
486- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
487+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
488 memcpy (dfn->code, temp, sizeof(temp));
489 FIXUP(dfn->code, 8, 0x01010101, (int)&tnl->dmaptr);
490 FIXUP(dfn->code, 32, 0x00000006, tnl->vertex_size-3);
491@@ -393,7 +393,7 @@
492
493 insert_at_head( &tnl->dfn_cache.Normal3fv, dfn );
494 dfn->key = key;
495- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
496+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
497 memcpy (dfn->code, temp, sizeof(temp));
498 FIXUP(dfn->code, 5, 0x0, (int)tnl->normalptr);
499 return dfn;
500@@ -421,7 +421,7 @@
501
502 insert_at_head( &tnl->dfn_cache.Normal3f, dfn );
503 dfn->key = key;
504- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
505+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
506 memcpy (dfn->code, temp, sizeof(temp));
507 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->normalptr);
508 return dfn;
509@@ -449,7 +449,7 @@
510
511 insert_at_head( &tnl->dfn_cache.Normal3fv, dfn );
512 dfn->key = key;
513- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
514+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
515 memcpy (dfn->code, temp, sizeof(temp));
516 FIXUP(dfn->code, 5, 0x0, (int)tnl->normalptr);
517 return dfn;
518@@ -475,7 +475,7 @@
519
520 insert_at_head( &tnl->dfn_cache.Normal3f, dfn );
521 dfn->key = key;
522- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
523+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
524 memcpy (dfn->code, temp, sizeof(temp));
525 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->normalptr);
526 return dfn;
527@@ -499,7 +499,7 @@
528 0xc3, /* ret */
529 };
530
531- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
532+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
533 memcpy (dfn->code, temp, sizeof(temp));
534 FIXUP(dfn->code, 5, 0x12345678, (int)tnl->ubytecolorptr);
535 return dfn;
536@@ -531,7 +531,7 @@
537 0xc3, /* ret */
538 };
539
540- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
541+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
542 memcpy (dfn->code, temp, sizeof(temp));
543 FIXUP(dfn->code, 2, 0x00000000, (int)_mesa_ubyte_to_float_color_tab);
544 FIXUP(dfn->code, 27, 0xdeadbeaf, (int)tnl->floatcolorptr);
545@@ -567,7 +567,7 @@
546 insert_at_head( &tnl->dfn_cache.Color4ub, dfn );
547 dfn->key = key;
548
549- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
550+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
551 memcpy (dfn->code, temp, sizeof(temp));
552 FIXUP(dfn->code, 18, 0x0, (int)tnl->ubytecolorptr);
553 FIXUP(dfn->code, 24, 0x0, (int)tnl->ubytecolorptr+1);
554@@ -600,7 +600,7 @@
555
556 insert_at_head( &tnl->dfn_cache.TexCoord2fv, dfn );
557 dfn->key = key;
558- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
559+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
560 memcpy (dfn->code, temp, sizeof(temp));
561 FIXUP(dfn->code, 5, 0x12345678, (int)tnl->texcoordptr[0]);
562 return dfn;
563@@ -624,7 +624,7 @@
564
565 insert_at_head( &tnl->dfn_cache.TexCoord2f, dfn );
566 dfn->key = key;
567- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
568+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
569 memcpy (dfn->code, temp, sizeof(temp));
570 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->texcoordptr[0]);
571 return dfn;
572@@ -648,7 +648,7 @@
573
574 insert_at_head( &tnl->dfn_cache.TexCoord2fv, dfn );
575 dfn->key = key;
576- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
577+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
578 memcpy (dfn->code, temp, sizeof(temp));
579 FIXUP(dfn->code, 5, 0x12345678, (int)tnl->texcoordptr[0]);
580 return dfn;
581@@ -670,7 +670,7 @@
582
583 insert_at_head( &tnl->dfn_cache.TexCoord2f, dfn );
584 dfn->key = key;
585- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
586+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
587 memcpy (dfn->code, temp, sizeof(temp));
588 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->texcoordptr[0]);
589 return dfn;
590diff -urN xc.org/lib/GL/mesa/src/drv/r200/r200_vtxfmt.c xc/lib/GL/mesa/src/drv/r200/r200_vtxfmt.c
591--- xc.org/lib/GL/mesa/src/drv/r200/r200_vtxfmt.c 2004-06-07 22:44:55.376930912 +0200
592+++ xc/lib/GL/mesa/src/drv/r200/r200_vtxfmt.c 2004-06-07 22:48:38.196057256 +0200
593@@ -1074,7 +1074,7 @@
594 struct dynfn *f, *tmp;
595 foreach_s (f, tmp, l) {
596 remove_from_list( f );
597- ALIGN_FREE( f->code );
598+ EXEC_FREE( f->code );
599 FREE( f );
600 }
601 }
602diff -urN xc.org/lib/GL/mesa/src/drv/r200/r200_vtxfmt.h xc/lib/GL/mesa/src/drv/r200/r200_vtxfmt.h
603--- xc.org/lib/GL/mesa/src/drv/r200/r200_vtxfmt.h 2004-06-07 22:44:55.377930760 +0200
604+++ xc/lib/GL/mesa/src/drv/r200/r200_vtxfmt.h 2004-06-07 22:48:38.192057864 +0200
605@@ -60,7 +60,7 @@
606 insert_at_head( &CACHE, dfn ); \
607 dfn->key[0] = key[0]; \
608 dfn->key[1] = key[1]; \
609- dfn->code = ALIGN_MALLOC( end - start, 16 ); \
610+ dfn->code = EXEC_MALLOC( end - start, 16 ); \
611 memcpy (dfn->code, start, end - start); \
612 } \
613 while ( 0 )
614diff -urN xc.org/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.c xc/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.c
615--- xc.org/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.c 2004-06-07 22:44:55.473916168 +0200
616+++ xc/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.c 2004-06-07 22:48:38.218053912 +0200
617@@ -1042,7 +1042,7 @@
618 struct dynfn *f, *tmp;
619 foreach_s (f, tmp, l) {
620 remove_from_list( f );
621- ALIGN_FREE( f->code );
622+ EXEC_FREE( f->code );
623 FREE( f );
624 }
625 }
626diff -urN xc.org/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.h xc/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.h
627--- xc.org/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.h 2004-06-07 22:44:55.473916168 +0200
628+++ xc/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.h 2004-06-07 22:48:38.214054520 +0200
629@@ -58,7 +58,7 @@
630 char *end = (char *)&FUNC##_end; \
631 insert_at_head( &CACHE, dfn ); \
632 dfn->key = key; \
633- dfn->code = ALIGN_MALLOC( end - start, 16 ); \
634+ dfn->code = EXEC_MALLOC( end - start, 16 ); \
635 memcpy (dfn->code, start, end - start); \
636 } \
637 while ( 0 )
638
This page took 0.287131 seconds and 4 git commands to generate.