]> git.pld-linux.org Git - packages/XFree86.git/blame - XFree86-libGL-exec-shield-fixes-v2.patch
- one more
[packages/XFree86.git] / XFree86-libGL-exec-shield-fixes-v2.patch
CommitLineData
d1a9ff2d
AM
1--- xc/lib/GL/glx/Imakefile.redhat-libGL-exec-shield-fixes 2003-09-25 14:43:55.000000000 -0400
2+++ xc/lib/GL/glx/Imakefile 2003-09-25 14:43:55.000000000 -0400
3@@ -43,6 +43,7 @@
4 #ifdef SparcArchitecture
5 LinkSourceFile(glapi_sparc.S, $(MESASRCDIR)/src/SPARC)
6 #endif
7+LinkSourceFile(mem.c, $(MESASRCDIR)/src)
8
9
10 # Maybe some of these could come from
11@@ -70,7 +72,8 @@
12 single2.c \
13 singlepix.c \
14 vertarr.c \
15- xfont.c
16+ xfont.c \
17+ mem.c
18
19 GLX_OBJS = \
20 clientattrib.o \
21@@ -94,7 +97,8 @@
22 single2.o \
23 singlepix.o \
24 vertarr.o \
25- xfont.o
26+ xfont.o \
27+ mem.o
28
29 GLX_DEFS = GlxDefines
30
b88abb6c
AM
31diff -urN xc.org/extras/Mesa/src/glapi.c xc/extras/Mesa/src/glapi.c
32--- xc.org/extras/Mesa/src/glapi.c 2004-06-07 22:45:05.571381120 +0200
33+++ xc/extras/Mesa/src/glapi.c 2004-06-07 23:11:34.201872576 +0200
cc14ac67
AM
34@@ -50,6 +50,7 @@
35 #include "glapioffsets.h"
36 #include "glapitable.h"
37 #include "glthread.h"
38+#include "imports.h"
39
40 extern hidden void *__glapi_noop_table[];
41
42
b88abb6c
AM
43@@ -546,7 +547,7 @@
44 0xe8, 0x00, 0x00, 0x00, 0x00,
45 0xff, 0xa0, 0x00, 0x00, 0x00, 0x00
46 };
47- unsigned char *code = (unsigned char *) malloc(sizeof(insn_template));
48+ unsigned char *code = EXEC_MALLOC(sizeof(insn_template), 16);
49 unsigned int next_insn;
50 if (code) {
51 memcpy(code, insn_template, sizeof(insn_template));
52@@ -587,7 +588,7 @@
53 0x01000000 /* nop */
54 };
55 #endif
56- unsigned int *code = (unsigned int *) malloc(sizeof(insn_template));
57+ unsigned int *code = EXEC_MALLOC(sizeof(insn_template), 16);
58 unsigned long glapi_addr = (unsigned long) &_glapi_Dispatch;
59 if (code) {
60 memcpy(code, insn_template, sizeof(insn_template));
d1a9ff2d
AM
61--- xc/extras/Mesa/src/mem.c.org 1970-01-01 01:00:00.000000000 +0100
62+++ xc/extras/Mesa/src/mem.c 2004-06-09 02:02:29.152086688 +0200
63@@ -0,0 +1,325 @@
64+#include <unistd.h>
65+#include <sys/mman.h>
66+#include "glheader.h"
67+#include "config.h"
68+#include "macros.h"
69+
b88abb6c
AM
70+/* Define a struct for our private data. This is preferred over pointer
71+ * arithmetic to access individual pieces of our private data because the
72+ * compiler will help us get alignment correct in a portable way and it
73+ * makes it much easier to add or remove items from our private data */
74+
75+typedef struct align_malloc_header {
76+ void *alloc_ptr; /* actual allocation ptr */
77+ size_t alloc_size; /* actual allocation size */
78+ void *user_ptr; /* ptr returned to caller */
79+ size_t user_size; /* size caller requested */
80+} align_malloc_header;
81+
82+static unsigned long RoundUpPowerOf2(unsigned long val);
d1a9ff2d 83+
b88abb6c
AM
84+/*
85+ * Execute permission implementation notes:
86+ * John Dennis - jdennis@redhat.com - Red Hat Inc.
87+ *
88+ * Overview:
89+ *
90+ * Various parts of Mesa generate machine code during run time and
91+ * then executes that code. We will use the term code gen to refer to
92+ * this process. Some operating systems in an attempt to achieve
93+ * better security enforce restrictions on which memory areas may
94+ * contain executable code. In general execute permission is granted
95+ * to .text sections and removed on stack or heap memory. It's the
96+ * heap (and possibly the stack) where code is run time
97+ * generated. This means on systems that enforce execute memory
98+ * security you will get either a SEGV or SIGBUS exception when run
99+ * time generated code executes and the process will be terminated.
100+ *
101+ * Implementation:
102+ *
103+ * The solution is to provide unique malloc/free functions which
104+ * return memory with execute permission and to make sure these
105+ * allocation functions are called for code gen.
106+ *
107+ * There are 3 possible implementation solutions.
108+ *
109+ * Solution A: use mprotect on malloc block.
110+ *
111+ * In this scenario after a block is allocated via malloc we call
112+ * mprotect on the pages containing the block and add execute
113+ * permission. In theory a free of the block removes the execute
114+ * permission.
115+ *
116+ * Pros: Simple to implement
117+ *
118+ * Cons: Because execute permission is granted memory pages when
119+ * mprotect is called on the page containing the malloc block
120+ * every other malloc block in that page also receives execute
121+ * permission, this is insecure.
122+ *
123+ * When a malloc block is freed that had been allocated for
124+ * execute permission we should remove the execute permission
125+ * from that block so that when the heap manager resuses that
126+ * memory it will not be executable. But Because exectue
127+ * permission is granted to memory pages and a page may have
128+ * more than one malloc block with execute permission we
129+ * cannot remove execute permission because that would remove
130+ * execute permission on any executable malloc blocks still in
131+ * that page. By not removing the execution permission on free
132+ * we will tend to "leak" executable memory as more and more
133+ * heap pages accumulate execute permission, possible without
134+ * needing it.
135+ *
136+ * Solution B: use mmap to allocate block
137+ *
138+ * In this scenario every call to alloc an executable block is
139+ * performed with anonymous mmap. Mmap always allocates pages of
140+ * memory. When free is called we unmap the pages.
141+ *
142+ * Pros: This is much more secure. The kernel places the allocation
143+ * in special pages that have additional protection. These
144+ * pages are not near any other pages.
145+ *
146+ * The pages used do not contain any heap allocation that is
147+ * not susposed to be executable, therefore we are not
148+ * inadvertantly granting execute permission to a malloc block
149+ * that happens to live in the same page as a execute malloc
150+ * block.
151+ *
152+ * The allocation can be freed without affecting anyother
153+ * allocation and it will be reused by the kernel.
154+ *
155+ * Its simple to implement. As simple as solution A.
156+ *
157+ * Cons: Mmap only allocates in units of pages. Thus even a small
158+ * allocation will use an entire page. However note, only a
159+ * small number exec malloc's are done so the wasted memory
160+ * is not likely to be an issue.
161+ *
162+ * Because every code generated function will live alone in
163+ * its own page this will probably introduce more cache misses
164+ * and page faults than if the all the code coalesced together
165+ * into one or more pages as would be the case with regular
166+ * .text sections.
167+ *
168+ * Solution C: use separate malloc implementation using mmap'ed heap arena
169+ *
170+ * In this scenario a new heap manager is introduced which manages a
171+ * heap arena usning anonymous mmap with execute permission. All
172+ * executable allocations are provided using only this heap arena.
173+ *
174+ * Pros: This is the ideal solution. As in Solution B executable and
175+ * non-executable allocations are never mixed. Executable
176+ * allocations are provided using the most secure pages the
177+ * kernel manages.
178+ *
179+ * Pages will likely contain multiple allocations as opposed
180+ * to Solution B where pages will be sparsely used. This
181+ * improves cache and page fault behavior.
182+ *
183+ * Cons: This is the most involved implementation and requires the
184+ * introduction of a heap manger implementation that has been
185+ * modified to work with anonymous mmap. However, note that
186+ * the GNU malloc implementation has been modified to work
187+ * with anonymous mmap.
188+ */
189+
190+#if 1
191+#define EXEC_ALLOC_USE_MMAP
192+#else
193+#define EXEC_ALLOC_USE_MALLOC
194+#endif
195+
196+/* If input is power of 2 return that, else round up to next power of 2 */
197+static unsigned long RoundUpPowerOf2(unsigned long val)
198+{
199+ int i, setBits;
200+
201+ if (val == 0) return(1UL);
202+ if (val > (1UL << (sizeof(unsigned long) * 8 - 1))) {
203+ /* out of range, should be fatal error?, for now return max power of 2 */
204+ return (1UL << (sizeof(unsigned long) * 8 - 1));
205+ }
206+
207+ for (i = setBits = 0; val && i < sizeof(unsigned long) * 8; i++, val >>= 1) {
208+ if (val & 1UL) setBits++;
209+ }
210+ if (setBits > 1)
211+ return (1UL << i); /* input was not power of 2 */
212+ else
213+ return (1UL << (i-1)); /* input was power of 2 */
214+}
215+
216+/*
217+ * Allocate N-byte aligned memory in executable region (uninitialized)
218+ */
219+
220+#ifdef EXEC_ALLOC_USE_MALLOC
221+void *
222+_mesa_exec_malloc(size_t user_size, unsigned long user_align)
223+{
224+ unsigned long alloc_ptr, user_ptr, alloc_size, alloc_align;
225+ align_malloc_header *pHeader;
226+
227+ ASSERT( user_align > 0 );
228+
229+ /* We store the pointer to the acutal address and size in a private
230+ * header before the address the client sees. We need the actual
231+ * pointer to free with and we need the size to remove execute permission
232+ * on the block */
233+
234+ if (user_align < sizeof(align_malloc_header))
235+ alloc_align = RoundUpPowerOf2(sizeof(align_malloc_header));
236+ else
237+ alloc_align = user_align;
238+ alloc_size = user_size + alloc_align;
239+
240+ alloc_ptr = (unsigned long) MALLOC(alloc_size);
241+
242+ if (!alloc_ptr) return(NULL);
243+
244+ user_ptr = (alloc_ptr + alloc_align) & ~(unsigned long)(alloc_align - 1);
245+ pHeader = (align_malloc_header *) (user_ptr - sizeof(align_malloc_header));
246+ pHeader->alloc_ptr = (void *) alloc_ptr;
247+ pHeader->alloc_size = alloc_size;
248+ pHeader->user_ptr = (void *) user_ptr;
249+ pHeader->user_size = user_size;
250+
251+ {
252+ unsigned page_size, round;
253+
254+ page_size = getpagesize();
255+ round = user_ptr & (page_size-1);
256+ mprotect((void *)(user_ptr - round), (user_size + round + page_size-1) & ~(page_size-1),
257+ PROT_READ | PROT_WRITE | PROT_EXEC);
258+ }
259+
260+#ifdef DEBUG
261+ {
262+ unsigned char *p = (unsigned char *) alloc_ptr;
263+ unsigned char *stop = (unsigned char *) pHeader;
264+
265+ /* mark the non-aligned area */
266+ for(; p < stop; p++) {
267+ *p = 0xcd;
268+ }
269+ }
270+#endif
271+
272+ return (void *)user_ptr;
273+}
274+
275+/*
276+ * Free N-byte executable aligned memory
277+ */
278+void
279+_mesa_exec_free(void *user_ptr)
280+{
281+ /* The header giving the real address and size is just prior to the address the client sees. */
282+ align_malloc_header *pHeader;
283+ void *alloc_ptr;
284+ size_t user_size;
285+
286+ pHeader = (align_malloc_header *)((char *)user_ptr - sizeof(align_malloc_header));
287+ alloc_ptr = pHeader->alloc_ptr;
288+ user_size = pHeader->user_size;
289+
290+#if 0
291+ /*
292+ * Unfortunately we cannot remove the execute permission on this
293+ * malloc block because execute permission is granted on a page
294+ * basis. If the page containing this malloc block also contained
295+ * another malloc block with execute permission that was still in
296+ * effect then we will remove execute permission on a malloc block
297+ * that should still be enforce. This does mean we will tend to
298+ * "leak" execute permission in the heap. See above block comment
299+ * on implementation issues.
300+ *
301+ * Note, we could keep a ref count on each page and when the ref count
302+ * fell to zero we could remove the execute permission.
303+ *
304+ * If we did remove the execute permission this is how it would be done.
305+ */
306+ {
307+ unsigned page_size, round;
308+
309+ page_size = getpagesize();
310+ round = (unsigned long)user_ptr & (page_size-1);
311+ mprotect((char *)user_ptr - round, (user_size + round + page_size-1) & ~(page_size-1),
312+ PROT_READ | PROT_WRITE);
313+ }
314+#endif
315+ FREE(alloc_ptr);
316+}
317+
318+#elif defined(EXEC_ALLOC_USE_MMAP)
319+
320+void *
321+_mesa_exec_malloc(size_t user_size, unsigned long user_align)
322+{
323+ unsigned long alloc_ptr, user_ptr, alloc_size, alloc_align;
324+ align_malloc_header *pHeader;
325+
326+ ASSERT( user_align > 0 );
327+
328+ /* We store the pointer to the acutal address and size in a private
329+ * header before the address the client sees. We need the actual
330+ * pointer to free with and we need the size to unmap the region */
331+
332+ if (user_align < sizeof(align_malloc_header))
333+ alloc_align = RoundUpPowerOf2(sizeof(align_malloc_header));
334+ else
335+ alloc_align = user_align;
336+ alloc_size = user_size + alloc_align;
337+
338+ /* Note, I'm not sure how portable MAP_ANONYMOUS with fd=0 is, on some POSIX
339+ * systems you may need to remove the MAP_ANONYMOUS flag and pass the
340+ * result of posix_typed_mem_open with POSIX_TYPED_MEM_ALLOCATE as the fd. */
341+
342+ alloc_ptr = (unsigned long) mmap(0, alloc_size,
343+ PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
344+ if ((void *)alloc_ptr == MAP_FAILED) {
345+ return(NULL);
346+ }
347+
348+ user_ptr = (alloc_ptr + alloc_align) & ~(unsigned long)(alloc_align - 1);
349+ pHeader = (align_malloc_header *) (user_ptr - sizeof(align_malloc_header));
350+ pHeader->alloc_ptr = (void *) alloc_ptr;
351+ pHeader->alloc_size = alloc_size;
352+ pHeader->user_ptr = (void *) user_ptr;
353+ pHeader->user_size = user_size;
354+
355+#ifdef DEBUG
356+ {
357+ unsigned char *p = (unsigned char *) alloc_ptr;
358+ unsigned char *stop = (unsigned char *) pHeader;
359+
360+ /* mark the non-aligned area */
361+ for(; p < stop; p++) {
362+ *p = 0xcd;
363+ }
364+ }
365+#endif
366+
367+ return (void *)user_ptr;
368+}
369+
370+/*
371+ * Free N-byte executable aligned memory
372+ */
373+void
374+_mesa_exec_free(void *user_ptr)
375+{
376+ /* The header giving the real address and size is just prior to the address the client sees. */
377+ align_malloc_header *pHeader;
378+ void *alloc_ptr;
379+ size_t alloc_size;
380+
381+ pHeader = (align_malloc_header *)((char *)user_ptr - sizeof(align_malloc_header));
382+ alloc_ptr = pHeader->alloc_ptr;
383+ alloc_size = pHeader->alloc_size;
384+
385+ munmap(alloc_ptr, alloc_size);
386+}
387+#endif
d1a9ff2d 388+
b88abb6c
AM
389diff -urN xc.org/extras/Mesa/src/imports.h xc/extras/Mesa/src/imports.h
390--- xc.org/extras/Mesa/src/imports.h 2004-06-07 22:45:05.944324424 +0200
391+++ xc/extras/Mesa/src/imports.h 2004-06-07 23:04:42.561451432 +0200
392@@ -50,6 +50,9 @@
393 #define ALIGN_MALLOC_STRUCT(T, N) (struct T *) _mesa_align_malloc(sizeof(struct T), N)
394 #define ALIGN_CALLOC_STRUCT(T, N) (struct T *) _mesa_align_calloc(sizeof(struct T), N)
395 #define ALIGN_FREE(PTR) _mesa_align_free(PTR)
396+/* These allocate aligned memory in a area with execute permission, used for code generation. */
397+#define EXEC_MALLOC(BYTES, N) (void *) _mesa_exec_malloc(BYTES, N)
398+#define EXEC_FREE(PTR) _mesa_exec_free(PTR)
399
400 #define MEMCPY( DST, SRC, BYTES) _mesa_memcpy(DST, SRC, BYTES)
401 #define MEMSET( DST, VAL, N ) _mesa_memset(DST, VAL, N)
402@@ -120,6 +123,11 @@
403 _mesa_align_free( void *ptr );
404
405 extern void *
406+_mesa_exec_malloc(size_t bytes, unsigned long alignment);
407+extern void
408+_mesa_exec_free(void *ptr);
409+
410+extern void *
411 _mesa_memcpy( void *dest, const void *src, size_t n );
412
413 extern void
b88abb6c
AM
414diff -urN xc.org/extras/Mesa/src/tnl/t_vtx_exec.c xc/extras/Mesa/src/tnl/t_vtx_exec.c
415--- xc.org/extras/Mesa/src/tnl/t_vtx_exec.c 2004-06-07 22:45:06.597225168 +0200
416+++ xc/extras/Mesa/src/tnl/t_vtx_exec.c 2004-06-07 23:17:12.494444288 +0200
417@@ -593,7 +593,7 @@
418 struct dynfn *f, *tmp;
419 foreach_s (f, tmp, l) {
420 remove_from_list( f );
421- ALIGN_FREE( f->code );
422+ EXEC_FREE( f->code );
423 FREE( f );
424 }
425 }
426diff -urN xc.org/extras/Mesa/src/tnl/t_vtx_x86.c xc/extras/Mesa/src/tnl/t_vtx_x86.c
427--- xc.org/extras/Mesa/src/tnl/t_vtx_x86.c 2004-06-07 22:45:06.608223496 +0200
428+++ xc/extras/Mesa/src/tnl/t_vtx_x86.c 2004-06-07 23:16:32.268559552 +0200
429@@ -75,7 +75,7 @@
430 0xff, 0x25, 0, 0, 0, 0 /* jmp NOTIFY */
431 };
432
433- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
434+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
435 memcpy (dfn->code, temp, sizeof(temp));
436 FIXUP(dfn->code, 3, 0x0, (int)&tnl->vertex[2]);
437 FIXUP(dfn->code, 9, 0x0, (int)&tnl->dmaptr);
438@@ -126,7 +126,7 @@
439 0xff, 0x25, 0,0,0,0 /* jmp *NOTIFY */
440 };
441
442- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
443+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
444 memcpy (dfn->code, temp, sizeof(temp));
445 FIXUP(dfn->code, 2, 0x0, (int)&tnl->dmaptr);
446 FIXUP(dfn->code, 25, 0x0, (int)&tnl->vertex[3]);
447@@ -163,7 +163,7 @@
448 0xff, 0x25, 0,0,0,0, /* jmp *NOTIFY */
449 };
450
451- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
452+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
453 memcpy (dfn->code, temp, sizeof(temp));
454 FIXUP(dfn->code, 3, 0x0, (int)&tnl->dmaptr);
455 FIXUP(dfn->code, 28, 0x0, (int)&tnl->vertex[3]);
456@@ -205,7 +205,7 @@
457 0xff, 0x25, 0, 0, 0, 0 /* jmp NOTIFY */
458 };
459
460- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
461+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
462 memcpy (dfn->code, temp, sizeof(temp));
463 FIXUP(dfn->code, 3, 0x0, (int)&tnl->vertex[3]);
464 FIXUP(dfn->code, 9, 0x0, (int)&tnl->dmaptr);
465@@ -259,7 +259,7 @@
466 0xff, 0x25, 0x08, 0, 0, 0, /* jmp *0x8 */
467 };
468
469- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
470+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
471 memcpy (dfn->code, temp, sizeof(temp));
472 FIXUP(dfn->code, 1, 0x00000000, (int)&tnl->dmaptr);
473 FIXUP(dfn->code, 27, 0x0000001c, (int)&tnl->vertex[3]);
474@@ -303,7 +303,7 @@
475 0xff, 0x25, 0x08, 0, 0, 0, /* jmp *0x8 */
476 };
477
478- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
479+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
480 memcpy (dfn->code, temp, sizeof(temp));
481 FIXUP(dfn->code, 1, 0x00000000, (int)&tnl->dmaptr);
482 FIXUP(dfn->code, 27, 0x0000001c, (int)&tnl->vertex[3]);
483@@ -351,7 +351,7 @@
484 0xff, 0x25, 0, 0, 0, 0 /* jmp NOTIFY */
485 };
486
487- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
488+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
489 memcpy (dfn->code, temp, sizeof(temp));
490 FIXUP(dfn->code, 8, 0x01010101, (int)&tnl->dmaptr);
491 FIXUP(dfn->code, 32, 0x00000006, tnl->vertex_size-3);
492@@ -393,7 +393,7 @@
493
494 insert_at_head( &tnl->dfn_cache.Normal3fv, dfn );
495 dfn->key = key;
496- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
497+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
498 memcpy (dfn->code, temp, sizeof(temp));
499 FIXUP(dfn->code, 5, 0x0, (int)tnl->normalptr);
500 return dfn;
501@@ -421,7 +421,7 @@
502
503 insert_at_head( &tnl->dfn_cache.Normal3f, dfn );
504 dfn->key = key;
505- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
506+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
507 memcpy (dfn->code, temp, sizeof(temp));
508 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->normalptr);
509 return dfn;
510@@ -449,7 +449,7 @@
511
512 insert_at_head( &tnl->dfn_cache.Normal3fv, dfn );
513 dfn->key = key;
514- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
515+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
516 memcpy (dfn->code, temp, sizeof(temp));
517 FIXUP(dfn->code, 5, 0x0, (int)tnl->normalptr);
518 return dfn;
519@@ -475,7 +475,7 @@
520
521 insert_at_head( &tnl->dfn_cache.Normal3f, dfn );
522 dfn->key = key;
523- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
524+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
525 memcpy (dfn->code, temp, sizeof(temp));
526 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->normalptr);
527 return dfn;
528@@ -499,7 +499,7 @@
529 0xc3, /* ret */
530 };
531
532- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
533+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
534 memcpy (dfn->code, temp, sizeof(temp));
535 FIXUP(dfn->code, 5, 0x12345678, (int)tnl->ubytecolorptr);
536 return dfn;
537@@ -531,7 +531,7 @@
538 0xc3, /* ret */
539 };
540
541- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
542+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
543 memcpy (dfn->code, temp, sizeof(temp));
544 FIXUP(dfn->code, 2, 0x00000000, (int)_mesa_ubyte_to_float_color_tab);
545 FIXUP(dfn->code, 27, 0xdeadbeaf, (int)tnl->floatcolorptr);
546@@ -567,7 +567,7 @@
547 insert_at_head( &tnl->dfn_cache.Color4ub, dfn );
548 dfn->key = key;
549
550- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
551+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
552 memcpy (dfn->code, temp, sizeof(temp));
553 FIXUP(dfn->code, 18, 0x0, (int)tnl->ubytecolorptr);
554 FIXUP(dfn->code, 24, 0x0, (int)tnl->ubytecolorptr+1);
555@@ -600,7 +600,7 @@
556
557 insert_at_head( &tnl->dfn_cache.TexCoord2fv, dfn );
558 dfn->key = key;
559- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
560+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
561 memcpy (dfn->code, temp, sizeof(temp));
562 FIXUP(dfn->code, 5, 0x12345678, (int)tnl->texcoordptr[0]);
563 return dfn;
564@@ -624,7 +624,7 @@
565
566 insert_at_head( &tnl->dfn_cache.TexCoord2f, dfn );
567 dfn->key = key;
568- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
569+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
570 memcpy (dfn->code, temp, sizeof(temp));
571 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->texcoordptr[0]);
572 return dfn;
573@@ -648,7 +648,7 @@
574
575 insert_at_head( &tnl->dfn_cache.TexCoord2fv, dfn );
576 dfn->key = key;
577- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
578+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
579 memcpy (dfn->code, temp, sizeof(temp));
580 FIXUP(dfn->code, 5, 0x12345678, (int)tnl->texcoordptr[0]);
581 return dfn;
582@@ -670,7 +670,7 @@
583
584 insert_at_head( &tnl->dfn_cache.TexCoord2f, dfn );
585 dfn->key = key;
586- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
587+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
588 memcpy (dfn->code, temp, sizeof(temp));
589 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->texcoordptr[0]);
590 return dfn;
591diff -urN xc.org/lib/GL/mesa/src/drv/r200/r200_vtxfmt.c xc/lib/GL/mesa/src/drv/r200/r200_vtxfmt.c
592--- xc.org/lib/GL/mesa/src/drv/r200/r200_vtxfmt.c 2004-06-07 22:44:55.376930912 +0200
593+++ xc/lib/GL/mesa/src/drv/r200/r200_vtxfmt.c 2004-06-07 22:48:38.196057256 +0200
594@@ -1074,7 +1074,7 @@
595 struct dynfn *f, *tmp;
596 foreach_s (f, tmp, l) {
597 remove_from_list( f );
598- ALIGN_FREE( f->code );
599+ EXEC_FREE( f->code );
600 FREE( f );
601 }
602 }
603diff -urN xc.org/lib/GL/mesa/src/drv/r200/r200_vtxfmt.h xc/lib/GL/mesa/src/drv/r200/r200_vtxfmt.h
604--- xc.org/lib/GL/mesa/src/drv/r200/r200_vtxfmt.h 2004-06-07 22:44:55.377930760 +0200
605+++ xc/lib/GL/mesa/src/drv/r200/r200_vtxfmt.h 2004-06-07 22:48:38.192057864 +0200
606@@ -60,7 +60,7 @@
607 insert_at_head( &CACHE, dfn ); \
608 dfn->key[0] = key[0]; \
609 dfn->key[1] = key[1]; \
610- dfn->code = ALIGN_MALLOC( end - start, 16 ); \
611+ dfn->code = EXEC_MALLOC( end - start, 16 ); \
612 memcpy (dfn->code, start, end - start); \
613 } \
614 while ( 0 )
615diff -urN xc.org/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.c xc/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.c
616--- xc.org/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.c 2004-06-07 22:44:55.473916168 +0200
617+++ xc/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.c 2004-06-07 22:48:38.218053912 +0200
618@@ -1042,7 +1042,7 @@
619 struct dynfn *f, *tmp;
620 foreach_s (f, tmp, l) {
621 remove_from_list( f );
622- ALIGN_FREE( f->code );
623+ EXEC_FREE( f->code );
624 FREE( f );
625 }
626 }
627diff -urN xc.org/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.h xc/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.h
628--- xc.org/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.h 2004-06-07 22:44:55.473916168 +0200
629+++ xc/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.h 2004-06-07 22:48:38.214054520 +0200
630@@ -58,7 +58,7 @@
631 char *end = (char *)&FUNC##_end; \
632 insert_at_head( &CACHE, dfn ); \
633 dfn->key = key; \
634- dfn->code = ALIGN_MALLOC( end - start, 16 ); \
635+ dfn->code = EXEC_MALLOC( end - start, 16 ); \
636 memcpy (dfn->code, start, end - start); \
637 } \
638 while ( 0 )
639
This page took 0.677584 seconds and 4 git commands to generate.