]> git.pld-linux.org Git - packages/XFree86.git/blame - XFree86-libGL-exec-shield-fixes-v2.patch
- 4.8.0 (probably the last XFree86 version ever, for comparison and archival reasons)
[packages/XFree86.git] / XFree86-libGL-exec-shield-fixes-v2.patch
CommitLineData
d1a9ff2d
AM
1--- xc/lib/GL/glx/Imakefile.redhat-libGL-exec-shield-fixes 2003-09-25 14:43:55.000000000 -0400
2+++ xc/lib/GL/glx/Imakefile 2003-09-25 14:43:55.000000000 -0400
3@@ -43,6 +43,7 @@
4 #ifdef SparcArchitecture
5 LinkSourceFile(glapi_sparc.S, $(MESASRCDIR)/src/SPARC)
6 #endif
ccecd55b 7+LinkSourceFile(mem.c, $(MESASRCDIR)/src/mesa/glapi)
d1a9ff2d
AM
8
9
10 # Maybe some of these could come from
11@@ -70,7 +72,8 @@
12 single2.c \
13 singlepix.c \
14 vertarr.c \
15- xfont.c
16+ xfont.c \
17+ mem.c
18
19 GLX_OBJS = \
20 clientattrib.o \
21@@ -94,7 +97,8 @@
22 single2.o \
23 singlepix.o \
24 vertarr.o \
25- xfont.o
26+ xfont.o \
27+ mem.o
28
29 GLX_DEFS = GlxDefines
30
ccecd55b
JB
31--- xc.org/extras/Mesa/src/mesa/glapi/glapi.c 2004-06-07 22:45:05.571381120 +0200
32+++ xc/extras/Mesa/src/mesa/glapi/glapi.c 2004-06-07 23:11:34.201872576 +0200
cc14ac67
AM
33@@ -50,6 +50,7 @@
34 #include "glapioffsets.h"
35 #include "glapitable.h"
36 #include "glthread.h"
37+#include "imports.h"
38
39 extern hidden void *__glapi_noop_table[];
40
b88abb6c
AM
41@@ -546,7 +547,7 @@
42 0xe8, 0x00, 0x00, 0x00, 0x00,
43 0xff, 0xa0, 0x00, 0x00, 0x00, 0x00
44 };
45- unsigned char *code = (unsigned char *) malloc(sizeof(insn_template));
46+ unsigned char *code = EXEC_MALLOC(sizeof(insn_template), 16);
47 unsigned int next_insn;
48 if (code) {
49 memcpy(code, insn_template, sizeof(insn_template));
50@@ -587,7 +588,7 @@
51 0x01000000 /* nop */
52 };
53 #endif
54- unsigned int *code = (unsigned int *) malloc(sizeof(insn_template));
55+ unsigned int *code = EXEC_MALLOC(sizeof(insn_template), 16);
56 unsigned long glapi_addr = (unsigned long) &_glapi_Dispatch;
57 if (code) {
58 memcpy(code, insn_template, sizeof(insn_template));
ccecd55b
JB
59--- xc/extras/Mesa/src/mesa/glapi/mem.c.org 1970-01-01 01:00:00.000000000 +0100
60+++ xc/extras/Mesa/src/mesa/glapi/mem.c 2004-06-09 02:02:29.152086688 +0200
d1a9ff2d
AM
61@@ -0,0 +1,325 @@
62+#include <unistd.h>
63+#include <sys/mman.h>
64+#include "glheader.h"
65+#include "config.h"
66+#include "macros.h"
67+
b88abb6c
AM
68+/* Define a struct for our private data. This is preferred over pointer
69+ * arithmetic to access individual pieces of our private data because the
70+ * compiler will help us get alignment correct in a portable way and it
71+ * makes it much easier to add or remove items from our private data */
72+
73+typedef struct align_malloc_header {
74+ void *alloc_ptr; /* actual allocation ptr */
75+ size_t alloc_size; /* actual allocation size */
76+ void *user_ptr; /* ptr returned to caller */
77+ size_t user_size; /* size caller requested */
78+} align_malloc_header;
79+
80+static unsigned long RoundUpPowerOf2(unsigned long val);
d1a9ff2d 81+
b88abb6c
AM
82+/*
83+ * Execute permission implementation notes:
84+ * John Dennis - jdennis@redhat.com - Red Hat Inc.
85+ *
86+ * Overview:
87+ *
88+ * Various parts of Mesa generate machine code during run time and
89+ * then executes that code. We will use the term code gen to refer to
90+ * this process. Some operating systems in an attempt to achieve
91+ * better security enforce restrictions on which memory areas may
92+ * contain executable code. In general execute permission is granted
93+ * to .text sections and removed on stack or heap memory. It's the
94+ * heap (and possibly the stack) where code is run time
95+ * generated. This means on systems that enforce execute memory
96+ * security you will get either a SEGV or SIGBUS exception when run
97+ * time generated code executes and the process will be terminated.
98+ *
99+ * Implementation:
100+ *
101+ * The solution is to provide unique malloc/free functions which
102+ * return memory with execute permission and to make sure these
103+ * allocation functions are called for code gen.
104+ *
105+ * There are 3 possible implementation solutions.
106+ *
107+ * Solution A: use mprotect on malloc block.
108+ *
109+ * In this scenario after a block is allocated via malloc we call
110+ * mprotect on the pages containing the block and add execute
111+ * permission. In theory a free of the block removes the execute
112+ * permission.
113+ *
114+ * Pros: Simple to implement
115+ *
116+ * Cons: Because execute permission is granted memory pages when
117+ * mprotect is called on the page containing the malloc block
118+ * every other malloc block in that page also receives execute
119+ * permission, this is insecure.
120+ *
121+ * When a malloc block is freed that had been allocated for
122+ * execute permission we should remove the execute permission
123+ * from that block so that when the heap manager resuses that
124+ * memory it will not be executable. But Because exectue
125+ * permission is granted to memory pages and a page may have
126+ * more than one malloc block with execute permission we
127+ * cannot remove execute permission because that would remove
128+ * execute permission on any executable malloc blocks still in
129+ * that page. By not removing the execution permission on free
130+ * we will tend to "leak" executable memory as more and more
131+ * heap pages accumulate execute permission, possible without
132+ * needing it.
133+ *
134+ * Solution B: use mmap to allocate block
135+ *
136+ * In this scenario every call to alloc an executable block is
137+ * performed with anonymous mmap. Mmap always allocates pages of
138+ * memory. When free is called we unmap the pages.
139+ *
140+ * Pros: This is much more secure. The kernel places the allocation
141+ * in special pages that have additional protection. These
142+ * pages are not near any other pages.
143+ *
144+ * The pages used do not contain any heap allocation that is
145+ * not susposed to be executable, therefore we are not
146+ * inadvertantly granting execute permission to a malloc block
147+ * that happens to live in the same page as a execute malloc
148+ * block.
149+ *
150+ * The allocation can be freed without affecting anyother
151+ * allocation and it will be reused by the kernel.
152+ *
153+ * Its simple to implement. As simple as solution A.
154+ *
155+ * Cons: Mmap only allocates in units of pages. Thus even a small
156+ * allocation will use an entire page. However note, only a
157+ * small number exec malloc's are done so the wasted memory
158+ * is not likely to be an issue.
159+ *
160+ * Because every code generated function will live alone in
161+ * its own page this will probably introduce more cache misses
162+ * and page faults than if the all the code coalesced together
163+ * into one or more pages as would be the case with regular
164+ * .text sections.
165+ *
166+ * Solution C: use separate malloc implementation using mmap'ed heap arena
167+ *
168+ * In this scenario a new heap manager is introduced which manages a
169+ * heap arena usning anonymous mmap with execute permission. All
170+ * executable allocations are provided using only this heap arena.
171+ *
172+ * Pros: This is the ideal solution. As in Solution B executable and
173+ * non-executable allocations are never mixed. Executable
174+ * allocations are provided using the most secure pages the
175+ * kernel manages.
176+ *
177+ * Pages will likely contain multiple allocations as opposed
178+ * to Solution B where pages will be sparsely used. This
179+ * improves cache and page fault behavior.
180+ *
181+ * Cons: This is the most involved implementation and requires the
182+ * introduction of a heap manger implementation that has been
183+ * modified to work with anonymous mmap. However, note that
184+ * the GNU malloc implementation has been modified to work
185+ * with anonymous mmap.
186+ */
187+
188+#if 1
189+#define EXEC_ALLOC_USE_MMAP
190+#else
191+#define EXEC_ALLOC_USE_MALLOC
192+#endif
193+
194+/* If input is power of 2 return that, else round up to next power of 2 */
195+static unsigned long RoundUpPowerOf2(unsigned long val)
196+{
197+ int i, setBits;
198+
199+ if (val == 0) return(1UL);
200+ if (val > (1UL << (sizeof(unsigned long) * 8 - 1))) {
201+ /* out of range, should be fatal error?, for now return max power of 2 */
202+ return (1UL << (sizeof(unsigned long) * 8 - 1));
203+ }
204+
205+ for (i = setBits = 0; val && i < sizeof(unsigned long) * 8; i++, val >>= 1) {
206+ if (val & 1UL) setBits++;
207+ }
208+ if (setBits > 1)
209+ return (1UL << i); /* input was not power of 2 */
210+ else
211+ return (1UL << (i-1)); /* input was power of 2 */
212+}
213+
214+/*
215+ * Allocate N-byte aligned memory in executable region (uninitialized)
216+ */
217+
218+#ifdef EXEC_ALLOC_USE_MALLOC
219+void *
220+_mesa_exec_malloc(size_t user_size, unsigned long user_align)
221+{
222+ unsigned long alloc_ptr, user_ptr, alloc_size, alloc_align;
223+ align_malloc_header *pHeader;
224+
225+ ASSERT( user_align > 0 );
226+
227+ /* We store the pointer to the acutal address and size in a private
228+ * header before the address the client sees. We need the actual
229+ * pointer to free with and we need the size to remove execute permission
230+ * on the block */
231+
232+ if (user_align < sizeof(align_malloc_header))
233+ alloc_align = RoundUpPowerOf2(sizeof(align_malloc_header));
234+ else
235+ alloc_align = user_align;
236+ alloc_size = user_size + alloc_align;
237+
238+ alloc_ptr = (unsigned long) MALLOC(alloc_size);
239+
240+ if (!alloc_ptr) return(NULL);
241+
242+ user_ptr = (alloc_ptr + alloc_align) & ~(unsigned long)(alloc_align - 1);
243+ pHeader = (align_malloc_header *) (user_ptr - sizeof(align_malloc_header));
244+ pHeader->alloc_ptr = (void *) alloc_ptr;
245+ pHeader->alloc_size = alloc_size;
246+ pHeader->user_ptr = (void *) user_ptr;
247+ pHeader->user_size = user_size;
248+
249+ {
250+ unsigned page_size, round;
251+
252+ page_size = getpagesize();
253+ round = user_ptr & (page_size-1);
254+ mprotect((void *)(user_ptr - round), (user_size + round + page_size-1) & ~(page_size-1),
255+ PROT_READ | PROT_WRITE | PROT_EXEC);
256+ }
257+
258+#ifdef DEBUG
259+ {
260+ unsigned char *p = (unsigned char *) alloc_ptr;
261+ unsigned char *stop = (unsigned char *) pHeader;
262+
263+ /* mark the non-aligned area */
264+ for(; p < stop; p++) {
265+ *p = 0xcd;
266+ }
267+ }
268+#endif
269+
270+ return (void *)user_ptr;
271+}
272+
273+/*
274+ * Free N-byte executable aligned memory
275+ */
276+void
277+_mesa_exec_free(void *user_ptr)
278+{
279+ /* The header giving the real address and size is just prior to the address the client sees. */
280+ align_malloc_header *pHeader;
281+ void *alloc_ptr;
282+ size_t user_size;
283+
284+ pHeader = (align_malloc_header *)((char *)user_ptr - sizeof(align_malloc_header));
285+ alloc_ptr = pHeader->alloc_ptr;
286+ user_size = pHeader->user_size;
287+
288+#if 0
289+ /*
290+ * Unfortunately we cannot remove the execute permission on this
291+ * malloc block because execute permission is granted on a page
292+ * basis. If the page containing this malloc block also contained
293+ * another malloc block with execute permission that was still in
294+ * effect then we will remove execute permission on a malloc block
295+ * that should still be enforce. This does mean we will tend to
296+ * "leak" execute permission in the heap. See above block comment
297+ * on implementation issues.
298+ *
299+ * Note, we could keep a ref count on each page and when the ref count
300+ * fell to zero we could remove the execute permission.
301+ *
302+ * If we did remove the execute permission this is how it would be done.
303+ */
304+ {
305+ unsigned page_size, round;
306+
307+ page_size = getpagesize();
308+ round = (unsigned long)user_ptr & (page_size-1);
309+ mprotect((char *)user_ptr - round, (user_size + round + page_size-1) & ~(page_size-1),
310+ PROT_READ | PROT_WRITE);
311+ }
312+#endif
313+ FREE(alloc_ptr);
314+}
315+
316+#elif defined(EXEC_ALLOC_USE_MMAP)
317+
318+void *
319+_mesa_exec_malloc(size_t user_size, unsigned long user_align)
320+{
321+ unsigned long alloc_ptr, user_ptr, alloc_size, alloc_align;
322+ align_malloc_header *pHeader;
323+
324+ ASSERT( user_align > 0 );
325+
326+ /* We store the pointer to the acutal address and size in a private
327+ * header before the address the client sees. We need the actual
328+ * pointer to free with and we need the size to unmap the region */
329+
330+ if (user_align < sizeof(align_malloc_header))
331+ alloc_align = RoundUpPowerOf2(sizeof(align_malloc_header));
332+ else
333+ alloc_align = user_align;
334+ alloc_size = user_size + alloc_align;
335+
336+ /* Note, I'm not sure how portable MAP_ANONYMOUS with fd=0 is, on some POSIX
337+ * systems you may need to remove the MAP_ANONYMOUS flag and pass the
338+ * result of posix_typed_mem_open with POSIX_TYPED_MEM_ALLOCATE as the fd. */
339+
340+ alloc_ptr = (unsigned long) mmap(0, alloc_size,
341+ PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
342+ if ((void *)alloc_ptr == MAP_FAILED) {
343+ return(NULL);
344+ }
345+
346+ user_ptr = (alloc_ptr + alloc_align) & ~(unsigned long)(alloc_align - 1);
347+ pHeader = (align_malloc_header *) (user_ptr - sizeof(align_malloc_header));
348+ pHeader->alloc_ptr = (void *) alloc_ptr;
349+ pHeader->alloc_size = alloc_size;
350+ pHeader->user_ptr = (void *) user_ptr;
351+ pHeader->user_size = user_size;
352+
353+#ifdef DEBUG
354+ {
355+ unsigned char *p = (unsigned char *) alloc_ptr;
356+ unsigned char *stop = (unsigned char *) pHeader;
357+
358+ /* mark the non-aligned area */
359+ for(; p < stop; p++) {
360+ *p = 0xcd;
361+ }
362+ }
363+#endif
364+
365+ return (void *)user_ptr;
366+}
367+
368+/*
369+ * Free N-byte executable aligned memory
370+ */
371+void
372+_mesa_exec_free(void *user_ptr)
373+{
374+ /* The header giving the real address and size is just prior to the address the client sees. */
375+ align_malloc_header *pHeader;
376+ void *alloc_ptr;
377+ size_t alloc_size;
378+
379+ pHeader = (align_malloc_header *)((char *)user_ptr - sizeof(align_malloc_header));
380+ alloc_ptr = pHeader->alloc_ptr;
381+ alloc_size = pHeader->alloc_size;
382+
383+ munmap(alloc_ptr, alloc_size);
384+}
385+#endif
d1a9ff2d 386+
ccecd55b
JB
387--- xc.org/extras/Mesa/src/mesa/main/imports.h 2004-06-07 22:45:05.944324424 +0200
388+++ xc/extras/Mesa/src/mesa/main/imports.h 2004-06-07 23:04:42.561451432 +0200
b88abb6c
AM
389@@ -50,6 +50,9 @@
390 #define ALIGN_MALLOC_STRUCT(T, N) (struct T *) _mesa_align_malloc(sizeof(struct T), N)
391 #define ALIGN_CALLOC_STRUCT(T, N) (struct T *) _mesa_align_calloc(sizeof(struct T), N)
392 #define ALIGN_FREE(PTR) _mesa_align_free(PTR)
393+/* These allocate aligned memory in a area with execute permission, used for code generation. */
394+#define EXEC_MALLOC(BYTES, N) (void *) _mesa_exec_malloc(BYTES, N)
395+#define EXEC_FREE(PTR) _mesa_exec_free(PTR)
396
397 #define MEMCPY( DST, SRC, BYTES) _mesa_memcpy(DST, SRC, BYTES)
398 #define MEMSET( DST, VAL, N ) _mesa_memset(DST, VAL, N)
399@@ -120,6 +123,11 @@
400 _mesa_align_free( void *ptr );
401
402 extern void *
403+_mesa_exec_malloc(size_t bytes, unsigned long alignment);
404+extern void
405+_mesa_exec_free(void *ptr);
406+
407+extern void *
408 _mesa_memcpy( void *dest, const void *src, size_t n );
409
410 extern void
ccecd55b
JB
411--- xc.org/extras/Mesa/src/mesa/tnl/t_vtx_exec.c 2004-06-07 22:45:06.597225168 +0200
412+++ xc/extras/Mesa/src/mesa/tnl/t_vtx_exec.c 2004-06-07 23:17:12.494444288 +0200
b88abb6c
AM
413@@ -593,7 +593,7 @@
414 struct dynfn *f, *tmp;
415 foreach_s (f, tmp, l) {
416 remove_from_list( f );
417- ALIGN_FREE( f->code );
418+ EXEC_FREE( f->code );
419 FREE( f );
420 }
421 }
ccecd55b
JB
422--- xc.org/extras/Mesa/src/mesa/tnl/t_vtx_x86.c 2004-06-07 22:45:06.608223496 +0200
423+++ xc/extras/Mesa/src/mesa/tnl/t_vtx_x86.c 2004-06-07 23:16:32.268559552 +0200
b88abb6c
AM
424@@ -75,7 +75,7 @@
425 0xff, 0x25, 0, 0, 0, 0 /* jmp NOTIFY */
426 };
427
428- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
429+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
430 memcpy (dfn->code, temp, sizeof(temp));
431 FIXUP(dfn->code, 3, 0x0, (int)&tnl->vertex[2]);
432 FIXUP(dfn->code, 9, 0x0, (int)&tnl->dmaptr);
433@@ -126,7 +126,7 @@
434 0xff, 0x25, 0,0,0,0 /* jmp *NOTIFY */
435 };
436
437- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
438+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
439 memcpy (dfn->code, temp, sizeof(temp));
440 FIXUP(dfn->code, 2, 0x0, (int)&tnl->dmaptr);
441 FIXUP(dfn->code, 25, 0x0, (int)&tnl->vertex[3]);
442@@ -163,7 +163,7 @@
443 0xff, 0x25, 0,0,0,0, /* jmp *NOTIFY */
444 };
445
446- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
447+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
448 memcpy (dfn->code, temp, sizeof(temp));
449 FIXUP(dfn->code, 3, 0x0, (int)&tnl->dmaptr);
450 FIXUP(dfn->code, 28, 0x0, (int)&tnl->vertex[3]);
451@@ -205,7 +205,7 @@
452 0xff, 0x25, 0, 0, 0, 0 /* jmp NOTIFY */
453 };
454
455- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
456+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
457 memcpy (dfn->code, temp, sizeof(temp));
458 FIXUP(dfn->code, 3, 0x0, (int)&tnl->vertex[3]);
459 FIXUP(dfn->code, 9, 0x0, (int)&tnl->dmaptr);
460@@ -259,7 +259,7 @@
461 0xff, 0x25, 0x08, 0, 0, 0, /* jmp *0x8 */
462 };
463
464- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
465+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
466 memcpy (dfn->code, temp, sizeof(temp));
467 FIXUP(dfn->code, 1, 0x00000000, (int)&tnl->dmaptr);
468 FIXUP(dfn->code, 27, 0x0000001c, (int)&tnl->vertex[3]);
469@@ -303,7 +303,7 @@
470 0xff, 0x25, 0x08, 0, 0, 0, /* jmp *0x8 */
471 };
472
473- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
474+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
475 memcpy (dfn->code, temp, sizeof(temp));
476 FIXUP(dfn->code, 1, 0x00000000, (int)&tnl->dmaptr);
477 FIXUP(dfn->code, 27, 0x0000001c, (int)&tnl->vertex[3]);
478@@ -351,7 +351,7 @@
479 0xff, 0x25, 0, 0, 0, 0 /* jmp NOTIFY */
480 };
481
482- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
483+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
484 memcpy (dfn->code, temp, sizeof(temp));
485 FIXUP(dfn->code, 8, 0x01010101, (int)&tnl->dmaptr);
486 FIXUP(dfn->code, 32, 0x00000006, tnl->vertex_size-3);
487@@ -393,7 +393,7 @@
488
489 insert_at_head( &tnl->dfn_cache.Normal3fv, dfn );
490 dfn->key = key;
491- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
492+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
493 memcpy (dfn->code, temp, sizeof(temp));
494 FIXUP(dfn->code, 5, 0x0, (int)tnl->normalptr);
495 return dfn;
496@@ -421,7 +421,7 @@
497
498 insert_at_head( &tnl->dfn_cache.Normal3f, dfn );
499 dfn->key = key;
500- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
501+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
502 memcpy (dfn->code, temp, sizeof(temp));
503 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->normalptr);
504 return dfn;
505@@ -449,7 +449,7 @@
506
507 insert_at_head( &tnl->dfn_cache.Normal3fv, dfn );
508 dfn->key = key;
509- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
510+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
511 memcpy (dfn->code, temp, sizeof(temp));
512 FIXUP(dfn->code, 5, 0x0, (int)tnl->normalptr);
513 return dfn;
514@@ -475,7 +475,7 @@
515
516 insert_at_head( &tnl->dfn_cache.Normal3f, dfn );
517 dfn->key = key;
518- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
519+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
520 memcpy (dfn->code, temp, sizeof(temp));
521 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->normalptr);
522 return dfn;
523@@ -499,7 +499,7 @@
524 0xc3, /* ret */
525 };
526
527- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
528+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
529 memcpy (dfn->code, temp, sizeof(temp));
530 FIXUP(dfn->code, 5, 0x12345678, (int)tnl->ubytecolorptr);
531 return dfn;
532@@ -531,7 +531,7 @@
533 0xc3, /* ret */
534 };
535
536- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
537+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
538 memcpy (dfn->code, temp, sizeof(temp));
539 FIXUP(dfn->code, 2, 0x00000000, (int)_mesa_ubyte_to_float_color_tab);
540 FIXUP(dfn->code, 27, 0xdeadbeaf, (int)tnl->floatcolorptr);
541@@ -567,7 +567,7 @@
542 insert_at_head( &tnl->dfn_cache.Color4ub, dfn );
543 dfn->key = key;
544
545- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
546+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
547 memcpy (dfn->code, temp, sizeof(temp));
548 FIXUP(dfn->code, 18, 0x0, (int)tnl->ubytecolorptr);
549 FIXUP(dfn->code, 24, 0x0, (int)tnl->ubytecolorptr+1);
550@@ -600,7 +600,7 @@
551
552 insert_at_head( &tnl->dfn_cache.TexCoord2fv, dfn );
553 dfn->key = key;
554- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
555+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
556 memcpy (dfn->code, temp, sizeof(temp));
557 FIXUP(dfn->code, 5, 0x12345678, (int)tnl->texcoordptr[0]);
558 return dfn;
559@@ -624,7 +624,7 @@
560
561 insert_at_head( &tnl->dfn_cache.TexCoord2f, dfn );
562 dfn->key = key;
563- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
564+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
565 memcpy (dfn->code, temp, sizeof(temp));
566 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->texcoordptr[0]);
567 return dfn;
568@@ -648,7 +648,7 @@
569
570 insert_at_head( &tnl->dfn_cache.TexCoord2fv, dfn );
571 dfn->key = key;
572- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
573+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
574 memcpy (dfn->code, temp, sizeof(temp));
575 FIXUP(dfn->code, 5, 0x12345678, (int)tnl->texcoordptr[0]);
576 return dfn;
577@@ -670,7 +670,7 @@
578
579 insert_at_head( &tnl->dfn_cache.TexCoord2f, dfn );
580 dfn->key = key;
581- dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
582+ dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
583 memcpy (dfn->code, temp, sizeof(temp));
584 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->texcoordptr[0]);
585 return dfn;
ccecd55b
JB
586--- xc.org/lib/GL/mesa/drivers/dri/r200/r200_vtxfmt.c 2004-06-07 22:44:55.376930912 +0200
587+++ xc/lib/GL/mesa/drivers/dri/r200/r200_vtxfmt.c 2004-06-07 22:48:38.196057256 +0200
b88abb6c
AM
588@@ -1074,7 +1074,7 @@
589 struct dynfn *f, *tmp;
590 foreach_s (f, tmp, l) {
591 remove_from_list( f );
592- ALIGN_FREE( f->code );
593+ EXEC_FREE( f->code );
594 FREE( f );
595 }
596 }
ccecd55b
JB
597--- xc.org/lib/GL/mesa/drivers/dri/r200/r200_vtxfmt.h 2004-06-07 22:44:55.377930760 +0200
598+++ xc/lib/GL/mesa/drivers/dri/r200/r200_vtxfmt.h 2004-06-07 22:48:38.192057864 +0200
b88abb6c
AM
599@@ -60,7 +60,7 @@
600 insert_at_head( &CACHE, dfn ); \
601 dfn->key[0] = key[0]; \
602 dfn->key[1] = key[1]; \
603- dfn->code = ALIGN_MALLOC( end - start, 16 ); \
604+ dfn->code = EXEC_MALLOC( end - start, 16 ); \
605 memcpy (dfn->code, start, end - start); \
606 } \
607 while ( 0 )
ccecd55b
JB
608--- xc.org/lib/GL/mesa/drivers/dri/radeon/radeon_vtxfmt.c 2004-06-07 22:44:55.473916168 +0200
609+++ xc/lib/GL/mesa/drivers/dri/radeon/radeon_vtxfmt.c 2004-06-07 22:48:38.218053912 +0200
b88abb6c
AM
610@@ -1042,7 +1042,7 @@
611 struct dynfn *f, *tmp;
612 foreach_s (f, tmp, l) {
613 remove_from_list( f );
614- ALIGN_FREE( f->code );
615+ EXEC_FREE( f->code );
616 FREE( f );
617 }
618 }
ccecd55b
JB
619--- xc.org/lib/GL/mesa/drivers/dri/radeon/radeon_vtxfmt.h 2004-06-07 22:44:55.473916168 +0200
620+++ xc/lib/GL/mesa/drivers/dri/radeon/radeon_vtxfmt.h 2004-06-07 22:48:38.214054520 +0200
b88abb6c
AM
621@@ -58,7 +58,7 @@
622 char *end = (char *)&FUNC##_end; \
623 insert_at_head( &CACHE, dfn ); \
624 dfn->key = key; \
625- dfn->code = ALIGN_MALLOC( end - start, 16 ); \
626+ dfn->code = EXEC_MALLOC( end - start, 16 ); \
627 memcpy (dfn->code, start, end - start); \
628 } \
629 while ( 0 )
630
This page took 0.176823 seconds and 4 git commands to generate.