1 --- xc/lib/GL/glx/Imakefile.redhat-libGL-exec-shield-fixes 2003-09-25 14:43:55.000000000 -0400
2 +++ xc/lib/GL/glx/Imakefile 2003-09-25 14:43:55.000000000 -0400
4 #ifdef SparcArchitecture
5 LinkSourceFile(glapi_sparc.S, $(MESASRCDIR)/src/SPARC)
7 +LinkSourceFile(mem.c, $(MESASRCDIR)/src/mesa/glapi)
10 # Maybe some of these could come from
31 --- xc.org/extras/Mesa/src/mesa/glapi/glapi.c 2004-06-07 22:45:05.571381120 +0200
32 +++ xc/extras/Mesa/src/mesa/glapi/glapi.c 2004-06-07 23:11:34.201872576 +0200
34 #include "glapioffsets.h"
35 #include "glapitable.h"
39 extern hidden void *__glapi_noop_table[];
42 0xe8, 0x00, 0x00, 0x00, 0x00,
43 0xff, 0xa0, 0x00, 0x00, 0x00, 0x00
45 - unsigned char *code = (unsigned char *) malloc(sizeof(insn_template));
46 + unsigned char *code = EXEC_MALLOC(sizeof(insn_template), 16);
47 unsigned int next_insn;
49 memcpy(code, insn_template, sizeof(insn_template));
54 - unsigned int *code = (unsigned int *) malloc(sizeof(insn_template));
55 + unsigned int *code = EXEC_MALLOC(sizeof(insn_template), 16);
56 unsigned long glapi_addr = (unsigned long) &_glapi_Dispatch;
58 memcpy(code, insn_template, sizeof(insn_template));
59 --- xc/extras/Mesa/src/mesa/glapi/mem.c.org 1970-01-01 01:00:00.000000000 +0100
60 +++ xc/extras/Mesa/src/mesa/glapi/mem.c 2004-06-09 02:02:29.152086688 +0200
63 +#include <sys/mman.h>
64 +#include "glheader.h"
68 +/* Define a struct for our private data. This is preferred over pointer
69 + * arithmetic to access individual pieces of our private data because the
70 + * compiler will help us get alignment correct in a portable way and it
71 + * makes it much easier to add or remove items from our private data */
73 +typedef struct align_malloc_header {
74 + void *alloc_ptr; /* actual allocation ptr */
75 + size_t alloc_size; /* actual allocation size */
76 + void *user_ptr; /* ptr returned to caller */
77 + size_t user_size; /* size caller requested */
78 +} align_malloc_header;
80 +static unsigned long RoundUpPowerOf2(unsigned long val);
83 + * Execute permission implementation notes:
84 + * John Dennis - jdennis@redhat.com - Red Hat Inc.
88 + * Various parts of Mesa generate machine code during run time and
89 + * then executes that code. We will use the term code gen to refer to
90 + * this process. Some operating systems in an attempt to achieve
91 + * better security enforce restrictions on which memory areas may
92 + * contain executable code. In general execute permission is granted
93 + * to .text sections and removed on stack or heap memory. It's the
94 + * heap (and possibly the stack) where code is run time
95 + * generated. This means on systems that enforce execute memory
96 + * security you will get either a SEGV or SIGBUS exception when run
97 + * time generated code executes and the process will be terminated.
101 + * The solution is to provide unique malloc/free functions which
102 + * return memory with execute permission and to make sure these
103 + * allocation functions are called for code gen.
105 + * There are 3 possible implementation solutions.
107 + * Solution A: use mprotect on malloc block.
109 + * In this scenario after a block is allocated via malloc we call
110 + * mprotect on the pages containing the block and add execute
111 + * permission. In theory a free of the block removes the execute
114 + * Pros: Simple to implement
116 + * Cons: Because execute permission is granted memory pages when
117 + * mprotect is called on the page containing the malloc block
118 + * every other malloc block in that page also receives execute
119 + * permission, this is insecure.
121 + * When a malloc block is freed that had been allocated for
122 + * execute permission we should remove the execute permission
123 + * from that block so that when the heap manager resuses that
124 + * memory it will not be executable. But Because exectue
125 + * permission is granted to memory pages and a page may have
126 + * more than one malloc block with execute permission we
127 + * cannot remove execute permission because that would remove
128 + * execute permission on any executable malloc blocks still in
129 + * that page. By not removing the execution permission on free
130 + * we will tend to "leak" executable memory as more and more
131 + * heap pages accumulate execute permission, possible without
134 + * Solution B: use mmap to allocate block
136 + * In this scenario every call to alloc an executable block is
137 + * performed with anonymous mmap. Mmap always allocates pages of
138 + * memory. When free is called we unmap the pages.
140 + * Pros: This is much more secure. The kernel places the allocation
141 + * in special pages that have additional protection. These
142 + * pages are not near any other pages.
144 + * The pages used do not contain any heap allocation that is
145 + * not susposed to be executable, therefore we are not
146 + * inadvertantly granting execute permission to a malloc block
147 + * that happens to live in the same page as a execute malloc
150 + * The allocation can be freed without affecting anyother
151 + * allocation and it will be reused by the kernel.
153 + * Its simple to implement. As simple as solution A.
155 + * Cons: Mmap only allocates in units of pages. Thus even a small
156 + * allocation will use an entire page. However note, only a
157 + * small number exec malloc's are done so the wasted memory
158 + * is not likely to be an issue.
160 + * Because every code generated function will live alone in
161 + * its own page this will probably introduce more cache misses
162 + * and page faults than if the all the code coalesced together
163 + * into one or more pages as would be the case with regular
166 + * Solution C: use separate malloc implementation using mmap'ed heap arena
168 + * In this scenario a new heap manager is introduced which manages a
169 + * heap arena usning anonymous mmap with execute permission. All
170 + * executable allocations are provided using only this heap arena.
172 + * Pros: This is the ideal solution. As in Solution B executable and
173 + * non-executable allocations are never mixed. Executable
174 + * allocations are provided using the most secure pages the
177 + * Pages will likely contain multiple allocations as opposed
178 + * to Solution B where pages will be sparsely used. This
179 + * improves cache and page fault behavior.
181 + * Cons: This is the most involved implementation and requires the
182 + * introduction of a heap manger implementation that has been
183 + * modified to work with anonymous mmap. However, note that
184 + * the GNU malloc implementation has been modified to work
185 + * with anonymous mmap.
189 +#define EXEC_ALLOC_USE_MMAP
191 +#define EXEC_ALLOC_USE_MALLOC
194 +/* If input is power of 2 return that, else round up to next power of 2 */
195 +static unsigned long RoundUpPowerOf2(unsigned long val)
199 + if (val == 0) return(1UL);
200 + if (val > (1UL << (sizeof(unsigned long) * 8 - 1))) {
201 + /* out of range, should be fatal error?, for now return max power of 2 */
202 + return (1UL << (sizeof(unsigned long) * 8 - 1));
205 + for (i = setBits = 0; val && i < sizeof(unsigned long) * 8; i++, val >>= 1) {
206 + if (val & 1UL) setBits++;
209 + return (1UL << i); /* input was not power of 2 */
211 + return (1UL << (i-1)); /* input was power of 2 */
215 + * Allocate N-byte aligned memory in executable region (uninitialized)
218 +#ifdef EXEC_ALLOC_USE_MALLOC
220 +_mesa_exec_malloc(size_t user_size, unsigned long user_align)
222 + unsigned long alloc_ptr, user_ptr, alloc_size, alloc_align;
223 + align_malloc_header *pHeader;
225 + ASSERT( user_align > 0 );
227 + /* We store the pointer to the acutal address and size in a private
228 + * header before the address the client sees. We need the actual
229 + * pointer to free with and we need the size to remove execute permission
232 + if (user_align < sizeof(align_malloc_header))
233 + alloc_align = RoundUpPowerOf2(sizeof(align_malloc_header));
235 + alloc_align = user_align;
236 + alloc_size = user_size + alloc_align;
238 + alloc_ptr = (unsigned long) MALLOC(alloc_size);
240 + if (!alloc_ptr) return(NULL);
242 + user_ptr = (alloc_ptr + alloc_align) & ~(unsigned long)(alloc_align - 1);
243 + pHeader = (align_malloc_header *) (user_ptr - sizeof(align_malloc_header));
244 + pHeader->alloc_ptr = (void *) alloc_ptr;
245 + pHeader->alloc_size = alloc_size;
246 + pHeader->user_ptr = (void *) user_ptr;
247 + pHeader->user_size = user_size;
250 + unsigned page_size, round;
252 + page_size = getpagesize();
253 + round = user_ptr & (page_size-1);
254 + mprotect((void *)(user_ptr - round), (user_size + round + page_size-1) & ~(page_size-1),
255 + PROT_READ | PROT_WRITE | PROT_EXEC);
260 + unsigned char *p = (unsigned char *) alloc_ptr;
261 + unsigned char *stop = (unsigned char *) pHeader;
263 + /* mark the non-aligned area */
264 + for(; p < stop; p++) {
270 + return (void *)user_ptr;
274 + * Free N-byte executable aligned memory
277 +_mesa_exec_free(void *user_ptr)
279 + /* The header giving the real address and size is just prior to the address the client sees. */
280 + align_malloc_header *pHeader;
284 + pHeader = (align_malloc_header *)((char *)user_ptr - sizeof(align_malloc_header));
285 + alloc_ptr = pHeader->alloc_ptr;
286 + user_size = pHeader->user_size;
290 + * Unfortunately we cannot remove the execute permission on this
291 + * malloc block because execute permission is granted on a page
292 + * basis. If the page containing this malloc block also contained
293 + * another malloc block with execute permission that was still in
294 + * effect then we will remove execute permission on a malloc block
295 + * that should still be enforce. This does mean we will tend to
296 + * "leak" execute permission in the heap. See above block comment
297 + * on implementation issues.
299 + * Note, we could keep a ref count on each page and when the ref count
300 + * fell to zero we could remove the execute permission.
302 + * If we did remove the execute permission this is how it would be done.
305 + unsigned page_size, round;
307 + page_size = getpagesize();
308 + round = (unsigned long)user_ptr & (page_size-1);
309 + mprotect((char *)user_ptr - round, (user_size + round + page_size-1) & ~(page_size-1),
310 + PROT_READ | PROT_WRITE);
316 +#elif defined(EXEC_ALLOC_USE_MMAP)
319 +_mesa_exec_malloc(size_t user_size, unsigned long user_align)
321 + unsigned long alloc_ptr, user_ptr, alloc_size, alloc_align;
322 + align_malloc_header *pHeader;
324 + ASSERT( user_align > 0 );
326 + /* We store the pointer to the acutal address and size in a private
327 + * header before the address the client sees. We need the actual
328 + * pointer to free with and we need the size to unmap the region */
330 + if (user_align < sizeof(align_malloc_header))
331 + alloc_align = RoundUpPowerOf2(sizeof(align_malloc_header));
333 + alloc_align = user_align;
334 + alloc_size = user_size + alloc_align;
336 + /* Note, I'm not sure how portable MAP_ANONYMOUS with fd=0 is, on some POSIX
337 + * systems you may need to remove the MAP_ANONYMOUS flag and pass the
338 + * result of posix_typed_mem_open with POSIX_TYPED_MEM_ALLOCATE as the fd. */
340 + alloc_ptr = (unsigned long) mmap(0, alloc_size,
341 + PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
342 + if ((void *)alloc_ptr == MAP_FAILED) {
346 + user_ptr = (alloc_ptr + alloc_align) & ~(unsigned long)(alloc_align - 1);
347 + pHeader = (align_malloc_header *) (user_ptr - sizeof(align_malloc_header));
348 + pHeader->alloc_ptr = (void *) alloc_ptr;
349 + pHeader->alloc_size = alloc_size;
350 + pHeader->user_ptr = (void *) user_ptr;
351 + pHeader->user_size = user_size;
355 + unsigned char *p = (unsigned char *) alloc_ptr;
356 + unsigned char *stop = (unsigned char *) pHeader;
358 + /* mark the non-aligned area */
359 + for(; p < stop; p++) {
365 + return (void *)user_ptr;
369 + * Free N-byte executable aligned memory
372 +_mesa_exec_free(void *user_ptr)
374 + /* The header giving the real address and size is just prior to the address the client sees. */
375 + align_malloc_header *pHeader;
379 + pHeader = (align_malloc_header *)((char *)user_ptr - sizeof(align_malloc_header));
380 + alloc_ptr = pHeader->alloc_ptr;
381 + alloc_size = pHeader->alloc_size;
383 + munmap(alloc_ptr, alloc_size);
387 --- xc.org/extras/Mesa/src/mesa/main/imports.h 2004-06-07 22:45:05.944324424 +0200
388 +++ xc/extras/Mesa/src/mesa/main/imports.h 2004-06-07 23:04:42.561451432 +0200
390 #define ALIGN_MALLOC_STRUCT(T, N) (struct T *) _mesa_align_malloc(sizeof(struct T), N)
391 #define ALIGN_CALLOC_STRUCT(T, N) (struct T *) _mesa_align_calloc(sizeof(struct T), N)
392 #define ALIGN_FREE(PTR) _mesa_align_free(PTR)
393 +/* These allocate aligned memory in a area with execute permission, used for code generation. */
394 +#define EXEC_MALLOC(BYTES, N) (void *) _mesa_exec_malloc(BYTES, N)
395 +#define EXEC_FREE(PTR) _mesa_exec_free(PTR)
397 #define MEMCPY( DST, SRC, BYTES) _mesa_memcpy(DST, SRC, BYTES)
398 #define MEMSET( DST, VAL, N ) _mesa_memset(DST, VAL, N)
400 _mesa_align_free( void *ptr );
403 +_mesa_exec_malloc(size_t bytes, unsigned long alignment);
405 +_mesa_exec_free(void *ptr);
408 _mesa_memcpy( void *dest, const void *src, size_t n );
411 --- xc.org/extras/Mesa/src/mesa/tnl/t_vtx_exec.c 2004-06-07 22:45:06.597225168 +0200
412 +++ xc/extras/Mesa/src/mesa/tnl/t_vtx_exec.c 2004-06-07 23:17:12.494444288 +0200
414 struct dynfn *f, *tmp;
415 foreach_s (f, tmp, l) {
416 remove_from_list( f );
417 - ALIGN_FREE( f->code );
418 + EXEC_FREE( f->code );
422 --- xc.org/extras/Mesa/src/mesa/tnl/t_vtx_x86.c 2004-06-07 22:45:06.608223496 +0200
423 +++ xc/extras/Mesa/src/mesa/tnl/t_vtx_x86.c 2004-06-07 23:16:32.268559552 +0200
425 0xff, 0x25, 0, 0, 0, 0 /* jmp NOTIFY */
428 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
429 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
430 memcpy (dfn->code, temp, sizeof(temp));
431 FIXUP(dfn->code, 3, 0x0, (int)&tnl->vertex[2]);
432 FIXUP(dfn->code, 9, 0x0, (int)&tnl->dmaptr);
434 0xff, 0x25, 0,0,0,0 /* jmp *NOTIFY */
437 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
438 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
439 memcpy (dfn->code, temp, sizeof(temp));
440 FIXUP(dfn->code, 2, 0x0, (int)&tnl->dmaptr);
441 FIXUP(dfn->code, 25, 0x0, (int)&tnl->vertex[3]);
443 0xff, 0x25, 0,0,0,0, /* jmp *NOTIFY */
446 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
447 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
448 memcpy (dfn->code, temp, sizeof(temp));
449 FIXUP(dfn->code, 3, 0x0, (int)&tnl->dmaptr);
450 FIXUP(dfn->code, 28, 0x0, (int)&tnl->vertex[3]);
452 0xff, 0x25, 0, 0, 0, 0 /* jmp NOTIFY */
455 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
456 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
457 memcpy (dfn->code, temp, sizeof(temp));
458 FIXUP(dfn->code, 3, 0x0, (int)&tnl->vertex[3]);
459 FIXUP(dfn->code, 9, 0x0, (int)&tnl->dmaptr);
461 0xff, 0x25, 0x08, 0, 0, 0, /* jmp *0x8 */
464 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
465 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
466 memcpy (dfn->code, temp, sizeof(temp));
467 FIXUP(dfn->code, 1, 0x00000000, (int)&tnl->dmaptr);
468 FIXUP(dfn->code, 27, 0x0000001c, (int)&tnl->vertex[3]);
470 0xff, 0x25, 0x08, 0, 0, 0, /* jmp *0x8 */
473 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
474 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
475 memcpy (dfn->code, temp, sizeof(temp));
476 FIXUP(dfn->code, 1, 0x00000000, (int)&tnl->dmaptr);
477 FIXUP(dfn->code, 27, 0x0000001c, (int)&tnl->vertex[3]);
479 0xff, 0x25, 0, 0, 0, 0 /* jmp NOTIFY */
482 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
483 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
484 memcpy (dfn->code, temp, sizeof(temp));
485 FIXUP(dfn->code, 8, 0x01010101, (int)&tnl->dmaptr);
486 FIXUP(dfn->code, 32, 0x00000006, tnl->vertex_size-3);
489 insert_at_head( &tnl->dfn_cache.Normal3fv, dfn );
491 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
492 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
493 memcpy (dfn->code, temp, sizeof(temp));
494 FIXUP(dfn->code, 5, 0x0, (int)tnl->normalptr);
498 insert_at_head( &tnl->dfn_cache.Normal3f, dfn );
500 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
501 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
502 memcpy (dfn->code, temp, sizeof(temp));
503 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->normalptr);
507 insert_at_head( &tnl->dfn_cache.Normal3fv, dfn );
509 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
510 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
511 memcpy (dfn->code, temp, sizeof(temp));
512 FIXUP(dfn->code, 5, 0x0, (int)tnl->normalptr);
516 insert_at_head( &tnl->dfn_cache.Normal3f, dfn );
518 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
519 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
520 memcpy (dfn->code, temp, sizeof(temp));
521 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->normalptr);
527 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
528 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
529 memcpy (dfn->code, temp, sizeof(temp));
530 FIXUP(dfn->code, 5, 0x12345678, (int)tnl->ubytecolorptr);
536 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
537 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
538 memcpy (dfn->code, temp, sizeof(temp));
539 FIXUP(dfn->code, 2, 0x00000000, (int)_mesa_ubyte_to_float_color_tab);
540 FIXUP(dfn->code, 27, 0xdeadbeaf, (int)tnl->floatcolorptr);
542 insert_at_head( &tnl->dfn_cache.Color4ub, dfn );
545 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
546 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
547 memcpy (dfn->code, temp, sizeof(temp));
548 FIXUP(dfn->code, 18, 0x0, (int)tnl->ubytecolorptr);
549 FIXUP(dfn->code, 24, 0x0, (int)tnl->ubytecolorptr+1);
552 insert_at_head( &tnl->dfn_cache.TexCoord2fv, dfn );
554 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
555 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
556 memcpy (dfn->code, temp, sizeof(temp));
557 FIXUP(dfn->code, 5, 0x12345678, (int)tnl->texcoordptr[0]);
561 insert_at_head( &tnl->dfn_cache.TexCoord2f, dfn );
563 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
564 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
565 memcpy (dfn->code, temp, sizeof(temp));
566 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->texcoordptr[0]);
570 insert_at_head( &tnl->dfn_cache.TexCoord2fv, dfn );
572 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
573 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
574 memcpy (dfn->code, temp, sizeof(temp));
575 FIXUP(dfn->code, 5, 0x12345678, (int)tnl->texcoordptr[0]);
579 insert_at_head( &tnl->dfn_cache.TexCoord2f, dfn );
581 - dfn->code = ALIGN_MALLOC( sizeof(temp), 16 );
582 + dfn->code = EXEC_MALLOC( sizeof(temp), 16 );
583 memcpy (dfn->code, temp, sizeof(temp));
584 FIXUP(dfn->code, 1, 0x12345678, (int)tnl->texcoordptr[0]);
586 --- xc.org/lib/GL/mesa/drivers/dri/r200/r200_vtxfmt.c 2004-06-07 22:44:55.376930912 +0200
587 +++ xc/lib/GL/mesa/drivers/dri/r200/r200_vtxfmt.c 2004-06-07 22:48:38.196057256 +0200
588 @@ -1074,7 +1074,7 @@
589 struct dynfn *f, *tmp;
590 foreach_s (f, tmp, l) {
591 remove_from_list( f );
592 - ALIGN_FREE( f->code );
593 + EXEC_FREE( f->code );
597 --- xc.org/lib/GL/mesa/drivers/dri/r200/r200_vtxfmt.h 2004-06-07 22:44:55.377930760 +0200
598 +++ xc/lib/GL/mesa/drivers/dri/r200/r200_vtxfmt.h 2004-06-07 22:48:38.192057864 +0200
600 insert_at_head( &CACHE, dfn ); \
601 dfn->key[0] = key[0]; \
602 dfn->key[1] = key[1]; \
603 - dfn->code = ALIGN_MALLOC( end - start, 16 ); \
604 + dfn->code = EXEC_MALLOC( end - start, 16 ); \
605 memcpy (dfn->code, start, end - start); \
608 --- xc.org/lib/GL/mesa/drivers/dri/radeon/radeon_vtxfmt.c 2004-06-07 22:44:55.473916168 +0200
609 +++ xc/lib/GL/mesa/drivers/dri/radeon/radeon_vtxfmt.c 2004-06-07 22:48:38.218053912 +0200
610 @@ -1042,7 +1042,7 @@
611 struct dynfn *f, *tmp;
612 foreach_s (f, tmp, l) {
613 remove_from_list( f );
614 - ALIGN_FREE( f->code );
615 + EXEC_FREE( f->code );
619 --- xc.org/lib/GL/mesa/drivers/dri/radeon/radeon_vtxfmt.h 2004-06-07 22:44:55.473916168 +0200
620 +++ xc/lib/GL/mesa/drivers/dri/radeon/radeon_vtxfmt.h 2004-06-07 22:48:38.214054520 +0200
622 char *end = (char *)&FUNC##_end; \
623 insert_at_head( &CACHE, dfn ); \
625 - dfn->code = ALIGN_MALLOC( end - start, 16 ); \
626 + dfn->code = EXEC_MALLOC( end - start, 16 ); \
627 memcpy (dfn->code, start, end - start); \