]>
Commit | Line | Data |
---|---|---|
b88abb6c AM |
1 | diff -urN xc.org/extras/Mesa/src/glapi.c xc/extras/Mesa/src/glapi.c |
2 | --- xc.org/extras/Mesa/src/glapi.c 2004-06-07 22:45:05.571381120 +0200 | |
3 | +++ xc/extras/Mesa/src/glapi.c 2004-06-07 23:11:34.201872576 +0200 | |
b88abb6c AM |
4 | @@ -546,7 +547,7 @@ |
5 | 0xe8, 0x00, 0x00, 0x00, 0x00, | |
6 | 0xff, 0xa0, 0x00, 0x00, 0x00, 0x00 | |
7 | }; | |
8 | - unsigned char *code = (unsigned char *) malloc(sizeof(insn_template)); | |
9 | + unsigned char *code = EXEC_MALLOC(sizeof(insn_template), 16); | |
10 | unsigned int next_insn; | |
11 | if (code) { | |
12 | memcpy(code, insn_template, sizeof(insn_template)); | |
13 | @@ -587,7 +588,7 @@ | |
14 | 0x01000000 /* nop */ | |
15 | }; | |
16 | #endif | |
17 | - unsigned int *code = (unsigned int *) malloc(sizeof(insn_template)); | |
18 | + unsigned int *code = EXEC_MALLOC(sizeof(insn_template), 16); | |
19 | unsigned long glapi_addr = (unsigned long) &_glapi_Dispatch; | |
20 | if (code) { | |
21 | memcpy(code, insn_template, sizeof(insn_template)); | |
22 | diff -urN xc.org/extras/Mesa/src/imports.c xc/extras/Mesa/src/imports.c | |
23 | --- xc.org/extras/Mesa/src/imports.c 2004-06-07 22:45:05.943324576 +0200 | |
24 | +++ xc/extras/Mesa/src/imports.c 2004-06-07 23:08:05.289632064 +0200 | |
25 | @@ -59,6 +59,19 @@ | |
26 | extern int vsnprintf(char *str, size_t count, const char *fmt, va_list arg); | |
27 | #endif | |
28 | ||
29 | +/* Define a struct for our private data. This is preferred over pointer | |
30 | + * arithmetic to access individual pieces of our private data because the | |
31 | + * compiler will help us get alignment correct in a portable way and it | |
32 | + * makes it much easier to add or remove items from our private data */ | |
33 | + | |
34 | +typedef struct align_malloc_header { | |
35 | + void *alloc_ptr; /* actual allocation ptr */ | |
36 | + size_t alloc_size; /* actual allocation size */ | |
37 | + void *user_ptr; /* ptr returned to caller */ | |
38 | + size_t user_size; /* size caller requested */ | |
39 | +} align_malloc_header; | |
40 | + | |
41 | +static unsigned long RoundUpPowerOf2(unsigned long val); | |
42 | ||
43 | /**********************************************************************/ | |
44 | /* Wrappers for standard C library functions */ | |
45 | @@ -175,6 +188,310 @@ | |
46 | #endif | |
47 | } | |
48 | ||
49 | +/* | |
50 | + * Execute permission implementation notes: | |
51 | + * John Dennis - jdennis@redhat.com - Red Hat Inc. | |
52 | + * | |
53 | + * Overview: | |
54 | + * | |
55 | + * Various parts of Mesa generate machine code during run time and | |
56 | + * then executes that code. We will use the term code gen to refer to | |
57 | + * this process. Some operating systems in an attempt to achieve | |
58 | + * better security enforce restrictions on which memory areas may | |
59 | + * contain executable code. In general execute permission is granted | |
60 | + * to .text sections and removed on stack or heap memory. It's the | |
61 | + * heap (and possibly the stack) where code is run time | |
62 | + * generated. This means on systems that enforce execute memory | |
63 | + * security you will get either a SEGV or SIGBUS exception when run | |
64 | + * time generated code executes and the process will be terminated. | |
65 | + * | |
66 | + * Implementation: | |
67 | + * | |
68 | + * The solution is to provide unique malloc/free functions which | |
69 | + * return memory with execute permission and to make sure these | |
70 | + * allocation functions are called for code gen. | |
71 | + * | |
72 | + * There are 3 possible implementation solutions. | |
73 | + * | |
74 | + * Solution A: use mprotect on malloc block. | |
75 | + * | |
76 | + * In this scenario after a block is allocated via malloc we call | |
77 | + * mprotect on the pages containing the block and add execute | |
78 | + * permission. In theory a free of the block removes the execute | |
79 | + * permission. | |
80 | + * | |
81 | + * Pros: Simple to implement | |
82 | + * | |
83 | + * Cons: Because execute permission is granted memory pages when | |
84 | + * mprotect is called on the page containing the malloc block | |
85 | + * every other malloc block in that page also receives execute | |
86 | + * permission, this is insecure. | |
87 | + * | |
88 | + * When a malloc block is freed that had been allocated for | |
89 | + * execute permission we should remove the execute permission | |
90 | + * from that block so that when the heap manager resuses that | |
91 | + * memory it will not be executable. But Because exectue | |
92 | + * permission is granted to memory pages and a page may have | |
93 | + * more than one malloc block with execute permission we | |
94 | + * cannot remove execute permission because that would remove | |
95 | + * execute permission on any executable malloc blocks still in | |
96 | + * that page. By not removing the execution permission on free | |
97 | + * we will tend to "leak" executable memory as more and more | |
98 | + * heap pages accumulate execute permission, possible without | |
99 | + * needing it. | |
100 | + * | |
101 | + * Solution B: use mmap to allocate block | |
102 | + * | |
103 | + * In this scenario every call to alloc an executable block is | |
104 | + * performed with anonymous mmap. Mmap always allocates pages of | |
105 | + * memory. When free is called we unmap the pages. | |
106 | + * | |
107 | + * Pros: This is much more secure. The kernel places the allocation | |
108 | + * in special pages that have additional protection. These | |
109 | + * pages are not near any other pages. | |
110 | + * | |
111 | + * The pages used do not contain any heap allocation that is | |
112 | + * not susposed to be executable, therefore we are not | |
113 | + * inadvertantly granting execute permission to a malloc block | |
114 | + * that happens to live in the same page as a execute malloc | |
115 | + * block. | |
116 | + * | |
117 | + * The allocation can be freed without affecting anyother | |
118 | + * allocation and it will be reused by the kernel. | |
119 | + * | |
120 | + * Its simple to implement. As simple as solution A. | |
121 | + * | |
122 | + * Cons: Mmap only allocates in units of pages. Thus even a small | |
123 | + * allocation will use an entire page. However note, only a | |
124 | + * small number exec malloc's are done so the wasted memory | |
125 | + * is not likely to be an issue. | |
126 | + * | |
127 | + * Because every code generated function will live alone in | |
128 | + * its own page this will probably introduce more cache misses | |
129 | + * and page faults than if the all the code coalesced together | |
130 | + * into one or more pages as would be the case with regular | |
131 | + * .text sections. | |
132 | + * | |
133 | + * Solution C: use separate malloc implementation using mmap'ed heap arena | |
134 | + * | |
135 | + * In this scenario a new heap manager is introduced which manages a | |
136 | + * heap arena usning anonymous mmap with execute permission. All | |
137 | + * executable allocations are provided using only this heap arena. | |
138 | + * | |
139 | + * Pros: This is the ideal solution. As in Solution B executable and | |
140 | + * non-executable allocations are never mixed. Executable | |
141 | + * allocations are provided using the most secure pages the | |
142 | + * kernel manages. | |
143 | + * | |
144 | + * Pages will likely contain multiple allocations as opposed | |
145 | + * to Solution B where pages will be sparsely used. This | |
146 | + * improves cache and page fault behavior. | |
147 | + * | |
148 | + * Cons: This is the most involved implementation and requires the | |
149 | + * introduction of a heap manger implementation that has been | |
150 | + * modified to work with anonymous mmap. However, note that | |
151 | + * the GNU malloc implementation has been modified to work | |
152 | + * with anonymous mmap. | |
153 | + */ | |
154 | + | |
155 | +#if 1 | |
156 | +#define EXEC_ALLOC_USE_MMAP | |
157 | +#else | |
158 | +#define EXEC_ALLOC_USE_MALLOC | |
159 | +#endif | |
160 | + | |
161 | +/* If input is power of 2 return that, else round up to next power of 2 */ | |
162 | +static unsigned long RoundUpPowerOf2(unsigned long val) | |
163 | +{ | |
164 | + int i, setBits; | |
165 | + | |
166 | + if (val == 0) return(1UL); | |
167 | + if (val > (1UL << (sizeof(unsigned long) * 8 - 1))) { | |
168 | + /* out of range, should be fatal error?, for now return max power of 2 */ | |
169 | + return (1UL << (sizeof(unsigned long) * 8 - 1)); | |
170 | + } | |
171 | + | |
172 | + for (i = setBits = 0; val && i < sizeof(unsigned long) * 8; i++, val >>= 1) { | |
173 | + if (val & 1UL) setBits++; | |
174 | + } | |
175 | + if (setBits > 1) | |
176 | + return (1UL << i); /* input was not power of 2 */ | |
177 | + else | |
178 | + return (1UL << (i-1)); /* input was power of 2 */ | |
179 | +} | |
180 | + | |
181 | +/* | |
182 | + * Allocate N-byte aligned memory in executable region (uninitialized) | |
183 | + */ | |
184 | + | |
185 | +#ifdef EXEC_ALLOC_USE_MALLOC | |
186 | +void * | |
187 | +_mesa_exec_malloc(size_t user_size, unsigned long user_align) | |
188 | +{ | |
189 | + unsigned long alloc_ptr, user_ptr, alloc_size, alloc_align; | |
190 | + align_malloc_header *pHeader; | |
191 | + | |
192 | + ASSERT( user_align > 0 ); | |
193 | + | |
194 | + /* We store the pointer to the acutal address and size in a private | |
195 | + * header before the address the client sees. We need the actual | |
196 | + * pointer to free with and we need the size to remove execute permission | |
197 | + * on the block */ | |
198 | + | |
199 | + if (user_align < sizeof(align_malloc_header)) | |
200 | + alloc_align = RoundUpPowerOf2(sizeof(align_malloc_header)); | |
201 | + else | |
202 | + alloc_align = user_align; | |
203 | + alloc_size = user_size + alloc_align; | |
204 | + | |
205 | + alloc_ptr = (unsigned long) MALLOC(alloc_size); | |
206 | + | |
207 | + if (!alloc_ptr) return(NULL); | |
208 | + | |
209 | + user_ptr = (alloc_ptr + alloc_align) & ~(unsigned long)(alloc_align - 1); | |
210 | + pHeader = (align_malloc_header *) (user_ptr - sizeof(align_malloc_header)); | |
211 | + pHeader->alloc_ptr = (void *) alloc_ptr; | |
212 | + pHeader->alloc_size = alloc_size; | |
213 | + pHeader->user_ptr = (void *) user_ptr; | |
214 | + pHeader->user_size = user_size; | |
215 | + | |
216 | + { | |
217 | + unsigned page_size, round; | |
218 | + | |
219 | + page_size = getpagesize(); | |
220 | + round = user_ptr & (page_size-1); | |
221 | + mprotect((void *)(user_ptr - round), (user_size + round + page_size-1) & ~(page_size-1), | |
222 | + PROT_READ | PROT_WRITE | PROT_EXEC); | |
223 | + } | |
224 | + | |
225 | +#ifdef DEBUG | |
226 | + { | |
227 | + unsigned char *p = (unsigned char *) alloc_ptr; | |
228 | + unsigned char *stop = (unsigned char *) pHeader; | |
229 | + | |
230 | + /* mark the non-aligned area */ | |
231 | + for(; p < stop; p++) { | |
232 | + *p = 0xcd; | |
233 | + } | |
234 | + } | |
235 | +#endif | |
236 | + | |
237 | + return (void *)user_ptr; | |
238 | +} | |
239 | + | |
240 | +/* | |
241 | + * Free N-byte executable aligned memory | |
242 | + */ | |
243 | +void | |
244 | +_mesa_exec_free(void *user_ptr) | |
245 | +{ | |
246 | + /* The header giving the real address and size is just prior to the address the client sees. */ | |
247 | + align_malloc_header *pHeader; | |
248 | + void *alloc_ptr; | |
249 | + size_t user_size; | |
250 | + | |
251 | + pHeader = (align_malloc_header *)((char *)user_ptr - sizeof(align_malloc_header)); | |
252 | + alloc_ptr = pHeader->alloc_ptr; | |
253 | + user_size = pHeader->user_size; | |
254 | + | |
255 | +#if 0 | |
256 | + /* | |
257 | + * Unfortunately we cannot remove the execute permission on this | |
258 | + * malloc block because execute permission is granted on a page | |
259 | + * basis. If the page containing this malloc block also contained | |
260 | + * another malloc block with execute permission that was still in | |
261 | + * effect then we will remove execute permission on a malloc block | |
262 | + * that should still be enforce. This does mean we will tend to | |
263 | + * "leak" execute permission in the heap. See above block comment | |
264 | + * on implementation issues. | |
265 | + * | |
266 | + * Note, we could keep a ref count on each page and when the ref count | |
267 | + * fell to zero we could remove the execute permission. | |
268 | + * | |
269 | + * If we did remove the execute permission this is how it would be done. | |
270 | + */ | |
271 | + { | |
272 | + unsigned page_size, round; | |
273 | + | |
274 | + page_size = getpagesize(); | |
275 | + round = (unsigned long)user_ptr & (page_size-1); | |
276 | + mprotect((char *)user_ptr - round, (user_size + round + page_size-1) & ~(page_size-1), | |
277 | + PROT_READ | PROT_WRITE); | |
278 | + } | |
279 | +#endif | |
280 | + FREE(alloc_ptr); | |
281 | +} | |
282 | + | |
283 | +#elif defined(EXEC_ALLOC_USE_MMAP) | |
284 | + | |
285 | +void * | |
286 | +_mesa_exec_malloc(size_t user_size, unsigned long user_align) | |
287 | +{ | |
288 | + unsigned long alloc_ptr, user_ptr, alloc_size, alloc_align; | |
289 | + align_malloc_header *pHeader; | |
290 | + | |
291 | + ASSERT( user_align > 0 ); | |
292 | + | |
293 | + /* We store the pointer to the acutal address and size in a private | |
294 | + * header before the address the client sees. We need the actual | |
295 | + * pointer to free with and we need the size to unmap the region */ | |
296 | + | |
297 | + if (user_align < sizeof(align_malloc_header)) | |
298 | + alloc_align = RoundUpPowerOf2(sizeof(align_malloc_header)); | |
299 | + else | |
300 | + alloc_align = user_align; | |
301 | + alloc_size = user_size + alloc_align; | |
302 | + | |
303 | + /* Note, I'm not sure how portable MAP_ANONYMOUS with fd=0 is, on some POSIX | |
304 | + * systems you may need to remove the MAP_ANONYMOUS flag and pass the | |
305 | + * result of posix_typed_mem_open with POSIX_TYPED_MEM_ALLOCATE as the fd. */ | |
306 | + | |
307 | + alloc_ptr = (unsigned long) mmap(0, alloc_size, | |
308 | + PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); | |
309 | + if ((void *)alloc_ptr == MAP_FAILED) { | |
310 | + return(NULL); | |
311 | + } | |
312 | + | |
313 | + user_ptr = (alloc_ptr + alloc_align) & ~(unsigned long)(alloc_align - 1); | |
314 | + pHeader = (align_malloc_header *) (user_ptr - sizeof(align_malloc_header)); | |
315 | + pHeader->alloc_ptr = (void *) alloc_ptr; | |
316 | + pHeader->alloc_size = alloc_size; | |
317 | + pHeader->user_ptr = (void *) user_ptr; | |
318 | + pHeader->user_size = user_size; | |
319 | + | |
320 | +#ifdef DEBUG | |
321 | + { | |
322 | + unsigned char *p = (unsigned char *) alloc_ptr; | |
323 | + unsigned char *stop = (unsigned char *) pHeader; | |
324 | + | |
325 | + /* mark the non-aligned area */ | |
326 | + for(; p < stop; p++) { | |
327 | + *p = 0xcd; | |
328 | + } | |
329 | + } | |
330 | +#endif | |
331 | + | |
332 | + return (void *)user_ptr; | |
333 | +} | |
334 | + | |
335 | +/* | |
336 | + * Free N-byte executable aligned memory | |
337 | + */ | |
338 | +void | |
339 | +_mesa_exec_free(void *user_ptr) | |
340 | +{ | |
341 | + /* The header giving the real address and size is just prior to the address the client sees. */ | |
342 | + align_malloc_header *pHeader; | |
343 | + void *alloc_ptr; | |
344 | + size_t alloc_size; | |
345 | + | |
346 | + pHeader = (align_malloc_header *)((char *)user_ptr - sizeof(align_malloc_header)); | |
347 | + alloc_ptr = pHeader->alloc_ptr; | |
348 | + alloc_size = pHeader->alloc_size; | |
349 | + | |
350 | + munmap(alloc_ptr, alloc_size); | |
351 | +} | |
352 | +#endif | |
353 | ||
354 | void * | |
355 | _mesa_memcpy(void *dest, const void *src, size_t n) | |
356 | diff -urN xc.org/extras/Mesa/src/imports.h xc/extras/Mesa/src/imports.h | |
357 | --- xc.org/extras/Mesa/src/imports.h 2004-06-07 22:45:05.944324424 +0200 | |
358 | +++ xc/extras/Mesa/src/imports.h 2004-06-07 23:04:42.561451432 +0200 | |
359 | @@ -50,6 +50,9 @@ | |
360 | #define ALIGN_MALLOC_STRUCT(T, N) (struct T *) _mesa_align_malloc(sizeof(struct T), N) | |
361 | #define ALIGN_CALLOC_STRUCT(T, N) (struct T *) _mesa_align_calloc(sizeof(struct T), N) | |
362 | #define ALIGN_FREE(PTR) _mesa_align_free(PTR) | |
363 | +/* These allocate aligned memory in a area with execute permission, used for code generation. */ | |
364 | +#define EXEC_MALLOC(BYTES, N) (void *) _mesa_exec_malloc(BYTES, N) | |
365 | +#define EXEC_FREE(PTR) _mesa_exec_free(PTR) | |
366 | ||
367 | #define MEMCPY( DST, SRC, BYTES) _mesa_memcpy(DST, SRC, BYTES) | |
368 | #define MEMSET( DST, VAL, N ) _mesa_memset(DST, VAL, N) | |
369 | @@ -120,6 +123,11 @@ | |
370 | _mesa_align_free( void *ptr ); | |
371 | ||
372 | extern void * | |
373 | +_mesa_exec_malloc(size_t bytes, unsigned long alignment); | |
374 | +extern void | |
375 | +_mesa_exec_free(void *ptr); | |
376 | + | |
377 | +extern void * | |
378 | _mesa_memcpy( void *dest, const void *src, size_t n ); | |
379 | ||
380 | extern void | |
f9db2db8 AM |
381 | diff -urN xc.org/extras/Mesa/src/mtypes.h xc/extras/Mesa/src/mtypes.h |
382 | --- xc.org/extras/Mesa/src/mtypes.h 2004-06-07 22:45:05.956322600 +0200 | |
383 | +++ xc/extras/Mesa/src/mtypes.h 2004-06-07 23:05:35.023475992 +0200 | |
f5b9c4eb | 384 | @@ -36,6 +36,7 @@ |
f9db2db8 AM |
385 | #include "config.h" /* Hardwired parameters */ |
386 | #include "glapitable.h" | |
f5b9c4eb AM |
387 | #include "glthread.h" |
388 | +#include <sys/mman.h> | |
389 | ||
390 | #include "math/m_matrix.h" /* GLmatrix */ | |
391 | ||
b88abb6c AM |
392 | diff -urN xc.org/extras/Mesa/src/tnl/t_vtx_exec.c xc/extras/Mesa/src/tnl/t_vtx_exec.c |
393 | --- xc.org/extras/Mesa/src/tnl/t_vtx_exec.c 2004-06-07 22:45:06.597225168 +0200 | |
394 | +++ xc/extras/Mesa/src/tnl/t_vtx_exec.c 2004-06-07 23:17:12.494444288 +0200 | |
395 | @@ -593,7 +593,7 @@ | |
396 | struct dynfn *f, *tmp; | |
397 | foreach_s (f, tmp, l) { | |
398 | remove_from_list( f ); | |
399 | - ALIGN_FREE( f->code ); | |
400 | + EXEC_FREE( f->code ); | |
401 | FREE( f ); | |
402 | } | |
403 | } | |
404 | diff -urN xc.org/extras/Mesa/src/tnl/t_vtx_x86.c xc/extras/Mesa/src/tnl/t_vtx_x86.c | |
405 | --- xc.org/extras/Mesa/src/tnl/t_vtx_x86.c 2004-06-07 22:45:06.608223496 +0200 | |
406 | +++ xc/extras/Mesa/src/tnl/t_vtx_x86.c 2004-06-07 23:16:32.268559552 +0200 | |
407 | @@ -75,7 +75,7 @@ | |
408 | 0xff, 0x25, 0, 0, 0, 0 /* jmp NOTIFY */ | |
409 | }; | |
410 | ||
411 | - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 ); | |
412 | + dfn->code = EXEC_MALLOC( sizeof(temp), 16 ); | |
413 | memcpy (dfn->code, temp, sizeof(temp)); | |
414 | FIXUP(dfn->code, 3, 0x0, (int)&tnl->vertex[2]); | |
415 | FIXUP(dfn->code, 9, 0x0, (int)&tnl->dmaptr); | |
416 | @@ -126,7 +126,7 @@ | |
417 | 0xff, 0x25, 0,0,0,0 /* jmp *NOTIFY */ | |
418 | }; | |
419 | ||
420 | - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 ); | |
421 | + dfn->code = EXEC_MALLOC( sizeof(temp), 16 ); | |
422 | memcpy (dfn->code, temp, sizeof(temp)); | |
423 | FIXUP(dfn->code, 2, 0x0, (int)&tnl->dmaptr); | |
424 | FIXUP(dfn->code, 25, 0x0, (int)&tnl->vertex[3]); | |
425 | @@ -163,7 +163,7 @@ | |
426 | 0xff, 0x25, 0,0,0,0, /* jmp *NOTIFY */ | |
427 | }; | |
428 | ||
429 | - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 ); | |
430 | + dfn->code = EXEC_MALLOC( sizeof(temp), 16 ); | |
431 | memcpy (dfn->code, temp, sizeof(temp)); | |
432 | FIXUP(dfn->code, 3, 0x0, (int)&tnl->dmaptr); | |
433 | FIXUP(dfn->code, 28, 0x0, (int)&tnl->vertex[3]); | |
434 | @@ -205,7 +205,7 @@ | |
435 | 0xff, 0x25, 0, 0, 0, 0 /* jmp NOTIFY */ | |
436 | }; | |
437 | ||
438 | - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 ); | |
439 | + dfn->code = EXEC_MALLOC( sizeof(temp), 16 ); | |
440 | memcpy (dfn->code, temp, sizeof(temp)); | |
441 | FIXUP(dfn->code, 3, 0x0, (int)&tnl->vertex[3]); | |
442 | FIXUP(dfn->code, 9, 0x0, (int)&tnl->dmaptr); | |
443 | @@ -259,7 +259,7 @@ | |
444 | 0xff, 0x25, 0x08, 0, 0, 0, /* jmp *0x8 */ | |
445 | }; | |
446 | ||
447 | - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 ); | |
448 | + dfn->code = EXEC_MALLOC( sizeof(temp), 16 ); | |
449 | memcpy (dfn->code, temp, sizeof(temp)); | |
450 | FIXUP(dfn->code, 1, 0x00000000, (int)&tnl->dmaptr); | |
451 | FIXUP(dfn->code, 27, 0x0000001c, (int)&tnl->vertex[3]); | |
452 | @@ -303,7 +303,7 @@ | |
453 | 0xff, 0x25, 0x08, 0, 0, 0, /* jmp *0x8 */ | |
454 | }; | |
455 | ||
456 | - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 ); | |
457 | + dfn->code = EXEC_MALLOC( sizeof(temp), 16 ); | |
458 | memcpy (dfn->code, temp, sizeof(temp)); | |
459 | FIXUP(dfn->code, 1, 0x00000000, (int)&tnl->dmaptr); | |
460 | FIXUP(dfn->code, 27, 0x0000001c, (int)&tnl->vertex[3]); | |
461 | @@ -351,7 +351,7 @@ | |
462 | 0xff, 0x25, 0, 0, 0, 0 /* jmp NOTIFY */ | |
463 | }; | |
464 | ||
465 | - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 ); | |
466 | + dfn->code = EXEC_MALLOC( sizeof(temp), 16 ); | |
467 | memcpy (dfn->code, temp, sizeof(temp)); | |
468 | FIXUP(dfn->code, 8, 0x01010101, (int)&tnl->dmaptr); | |
469 | FIXUP(dfn->code, 32, 0x00000006, tnl->vertex_size-3); | |
470 | @@ -393,7 +393,7 @@ | |
471 | ||
472 | insert_at_head( &tnl->dfn_cache.Normal3fv, dfn ); | |
473 | dfn->key = key; | |
474 | - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 ); | |
475 | + dfn->code = EXEC_MALLOC( sizeof(temp), 16 ); | |
476 | memcpy (dfn->code, temp, sizeof(temp)); | |
477 | FIXUP(dfn->code, 5, 0x0, (int)tnl->normalptr); | |
478 | return dfn; | |
479 | @@ -421,7 +421,7 @@ | |
480 | ||
481 | insert_at_head( &tnl->dfn_cache.Normal3f, dfn ); | |
482 | dfn->key = key; | |
483 | - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 ); | |
484 | + dfn->code = EXEC_MALLOC( sizeof(temp), 16 ); | |
485 | memcpy (dfn->code, temp, sizeof(temp)); | |
486 | FIXUP(dfn->code, 1, 0x12345678, (int)tnl->normalptr); | |
487 | return dfn; | |
488 | @@ -449,7 +449,7 @@ | |
489 | ||
490 | insert_at_head( &tnl->dfn_cache.Normal3fv, dfn ); | |
491 | dfn->key = key; | |
492 | - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 ); | |
493 | + dfn->code = EXEC_MALLOC( sizeof(temp), 16 ); | |
494 | memcpy (dfn->code, temp, sizeof(temp)); | |
495 | FIXUP(dfn->code, 5, 0x0, (int)tnl->normalptr); | |
496 | return dfn; | |
497 | @@ -475,7 +475,7 @@ | |
498 | ||
499 | insert_at_head( &tnl->dfn_cache.Normal3f, dfn ); | |
500 | dfn->key = key; | |
501 | - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 ); | |
502 | + dfn->code = EXEC_MALLOC( sizeof(temp), 16 ); | |
503 | memcpy (dfn->code, temp, sizeof(temp)); | |
504 | FIXUP(dfn->code, 1, 0x12345678, (int)tnl->normalptr); | |
505 | return dfn; | |
506 | @@ -499,7 +499,7 @@ | |
507 | 0xc3, /* ret */ | |
508 | }; | |
509 | ||
510 | - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 ); | |
511 | + dfn->code = EXEC_MALLOC( sizeof(temp), 16 ); | |
512 | memcpy (dfn->code, temp, sizeof(temp)); | |
513 | FIXUP(dfn->code, 5, 0x12345678, (int)tnl->ubytecolorptr); | |
514 | return dfn; | |
515 | @@ -531,7 +531,7 @@ | |
516 | 0xc3, /* ret */ | |
517 | }; | |
518 | ||
519 | - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 ); | |
520 | + dfn->code = EXEC_MALLOC( sizeof(temp), 16 ); | |
521 | memcpy (dfn->code, temp, sizeof(temp)); | |
522 | FIXUP(dfn->code, 2, 0x00000000, (int)_mesa_ubyte_to_float_color_tab); | |
523 | FIXUP(dfn->code, 27, 0xdeadbeaf, (int)tnl->floatcolorptr); | |
524 | @@ -567,7 +567,7 @@ | |
525 | insert_at_head( &tnl->dfn_cache.Color4ub, dfn ); | |
526 | dfn->key = key; | |
527 | ||
528 | - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 ); | |
529 | + dfn->code = EXEC_MALLOC( sizeof(temp), 16 ); | |
530 | memcpy (dfn->code, temp, sizeof(temp)); | |
531 | FIXUP(dfn->code, 18, 0x0, (int)tnl->ubytecolorptr); | |
532 | FIXUP(dfn->code, 24, 0x0, (int)tnl->ubytecolorptr+1); | |
533 | @@ -600,7 +600,7 @@ | |
534 | ||
535 | insert_at_head( &tnl->dfn_cache.TexCoord2fv, dfn ); | |
536 | dfn->key = key; | |
537 | - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 ); | |
538 | + dfn->code = EXEC_MALLOC( sizeof(temp), 16 ); | |
539 | memcpy (dfn->code, temp, sizeof(temp)); | |
540 | FIXUP(dfn->code, 5, 0x12345678, (int)tnl->texcoordptr[0]); | |
541 | return dfn; | |
542 | @@ -624,7 +624,7 @@ | |
543 | ||
544 | insert_at_head( &tnl->dfn_cache.TexCoord2f, dfn ); | |
545 | dfn->key = key; | |
546 | - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 ); | |
547 | + dfn->code = EXEC_MALLOC( sizeof(temp), 16 ); | |
548 | memcpy (dfn->code, temp, sizeof(temp)); | |
549 | FIXUP(dfn->code, 1, 0x12345678, (int)tnl->texcoordptr[0]); | |
550 | return dfn; | |
551 | @@ -648,7 +648,7 @@ | |
552 | ||
553 | insert_at_head( &tnl->dfn_cache.TexCoord2fv, dfn ); | |
554 | dfn->key = key; | |
555 | - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 ); | |
556 | + dfn->code = EXEC_MALLOC( sizeof(temp), 16 ); | |
557 | memcpy (dfn->code, temp, sizeof(temp)); | |
558 | FIXUP(dfn->code, 5, 0x12345678, (int)tnl->texcoordptr[0]); | |
559 | return dfn; | |
560 | @@ -670,7 +670,7 @@ | |
561 | ||
562 | insert_at_head( &tnl->dfn_cache.TexCoord2f, dfn ); | |
563 | dfn->key = key; | |
564 | - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 ); | |
565 | + dfn->code = EXEC_MALLOC( sizeof(temp), 16 ); | |
566 | memcpy (dfn->code, temp, sizeof(temp)); | |
567 | FIXUP(dfn->code, 1, 0x12345678, (int)tnl->texcoordptr[0]); | |
568 | return dfn; | |
569 | diff -urN xc.org/lib/GL/mesa/src/drv/r200/r200_vtxfmt.c xc/lib/GL/mesa/src/drv/r200/r200_vtxfmt.c | |
570 | --- xc.org/lib/GL/mesa/src/drv/r200/r200_vtxfmt.c 2004-06-07 22:44:55.376930912 +0200 | |
571 | +++ xc/lib/GL/mesa/src/drv/r200/r200_vtxfmt.c 2004-06-07 22:48:38.196057256 +0200 | |
572 | @@ -1074,7 +1074,7 @@ | |
573 | struct dynfn *f, *tmp; | |
574 | foreach_s (f, tmp, l) { | |
575 | remove_from_list( f ); | |
576 | - ALIGN_FREE( f->code ); | |
577 | + EXEC_FREE( f->code ); | |
578 | FREE( f ); | |
579 | } | |
580 | } | |
581 | diff -urN xc.org/lib/GL/mesa/src/drv/r200/r200_vtxfmt.h xc/lib/GL/mesa/src/drv/r200/r200_vtxfmt.h | |
582 | --- xc.org/lib/GL/mesa/src/drv/r200/r200_vtxfmt.h 2004-06-07 22:44:55.377930760 +0200 | |
583 | +++ xc/lib/GL/mesa/src/drv/r200/r200_vtxfmt.h 2004-06-07 22:48:38.192057864 +0200 | |
584 | @@ -60,7 +60,7 @@ | |
585 | insert_at_head( &CACHE, dfn ); \ | |
586 | dfn->key[0] = key[0]; \ | |
587 | dfn->key[1] = key[1]; \ | |
588 | - dfn->code = ALIGN_MALLOC( end - start, 16 ); \ | |
589 | + dfn->code = EXEC_MALLOC( end - start, 16 ); \ | |
590 | memcpy (dfn->code, start, end - start); \ | |
591 | } \ | |
592 | while ( 0 ) | |
593 | diff -urN xc.org/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.c xc/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.c | |
594 | --- xc.org/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.c 2004-06-07 22:44:55.473916168 +0200 | |
595 | +++ xc/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.c 2004-06-07 22:48:38.218053912 +0200 | |
596 | @@ -1042,7 +1042,7 @@ | |
597 | struct dynfn *f, *tmp; | |
598 | foreach_s (f, tmp, l) { | |
599 | remove_from_list( f ); | |
600 | - ALIGN_FREE( f->code ); | |
601 | + EXEC_FREE( f->code ); | |
602 | FREE( f ); | |
603 | } | |
604 | } | |
605 | diff -urN xc.org/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.h xc/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.h | |
606 | --- xc.org/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.h 2004-06-07 22:44:55.473916168 +0200 | |
607 | +++ xc/lib/GL/mesa/src/drv/radeon/radeon_vtxfmt.h 2004-06-07 22:48:38.214054520 +0200 | |
608 | @@ -58,7 +58,7 @@ | |
609 | char *end = (char *)&FUNC##_end; \ | |
610 | insert_at_head( &CACHE, dfn ); \ | |
611 | dfn->key = key; \ | |
612 | - dfn->code = ALIGN_MALLOC( end - start, 16 ); \ | |
613 | + dfn->code = EXEC_MALLOC( end - start, 16 ); \ | |
614 | memcpy (dfn->code, start, end - start); \ | |
615 | } \ | |
616 | while ( 0 ) | |
617 |