]> git.pld-linux.org Git - packages/qemu.git/blame - qemu-0.7.0-gcc4.patch
- disabled kqemu bcond, kqemu requires explicit authorization to distribute
[packages/qemu.git] / qemu-0.7.0-gcc4.patch
CommitLineData
891bbdf5
AM
12005-06-02 Gwenole Beauchesne <gbeauchesne@mandriva.com>
2
3 * dyngen.c (trace_i386_insn): Fix push/imul case with 8-bit
4 immediate.
5
62005-05-11 Paul Brook <paul@codesourcery.com>
7
8 * gcc4 host support.
9
10--- qemu-0.7.0/target-ppc/exec.h.gcc4 2005-04-27 22:52:05.000000000 +0200
11+++ qemu-0.7.0/target-ppc/exec.h 2005-06-02 21:41:51.000000000 +0200
12@@ -33,11 +33,7 @@ register uint32_t T2 asm(AREG3);
13 #define FT1 (env->ft1)
14 #define FT2 (env->ft2)
15
16-#if defined (DEBUG_OP)
17-#define RETURN() __asm__ __volatile__("nop");
18-#else
19-#define RETURN() __asm__ __volatile__("");
20-#endif
21+#define RETURN() FORCE_RET()
22
23 #include "cpu.h"
24 #include "exec-all.h"
25--- qemu-0.7.0/dyngen-exec.h.gcc4 2005-04-27 22:52:05.000000000 +0200
26+++ qemu-0.7.0/dyngen-exec.h 2005-06-02 21:41:51.000000000 +0200
27@@ -155,7 +155,12 @@ extern int printf(const char *, ...);
28 #endif
29
30 /* force GCC to generate only one epilog at the end of the function */
31+#if defined(__i386__) || defined(__x86_64__)
32+/* Also add 4 bytes of padding so that we can replace the ret with a jmp. */
33+#define FORCE_RET() asm volatile ("nop;nop;nop;nop");
34+#else
35 #define FORCE_RET() asm volatile ("");
36+#endif
37
38 #ifndef OPPROTO
39 #define OPPROTO
40@@ -205,12 +210,19 @@ extern int __op_jmp0, __op_jmp1, __op_jm
41 #endif
42
43 #ifdef __i386__
44-#define EXIT_TB() asm volatile ("ret")
45-#define GOTO_LABEL_PARAM(n) asm volatile ("jmp " ASM_NAME(__op_gen_label) #n)
46+/* Dyngen will replace hlt instructions with a ret instruction. Inserting a
47+ ret directly would confuse dyngen. */
48+#define EXIT_TB() asm volatile ("hlt")
49+/* Dyngen will replace cli with 0x9e (jmp).
50+ We generate the offset manually. */
51+#define GOTO_LABEL_PARAM(n) \
52+ asm volatile ("cli;.long " ASM_NAME(__op_gen_label) #n " - 1f;1:")
53 #endif
54 #ifdef __x86_64__
55-#define EXIT_TB() asm volatile ("ret")
56-#define GOTO_LABEL_PARAM(n) asm volatile ("jmp " ASM_NAME(__op_gen_label) #n)
57+/* The same as i386. */
58+#define EXIT_TB() asm volatile ("hlt")
59+#define GOTO_LABEL_PARAM(n) \
60+ asm volatile ("cli;.long " ASM_NAME(__op_gen_label) #n " - 1f;1:")
61 #endif
62 #ifdef __powerpc__
63 #define EXIT_TB() asm volatile ("blr")
64--- qemu-0.7.0/dyngen.c.gcc4 2005-04-27 22:52:05.000000000 +0200
65+++ qemu-0.7.0/dyngen.c 2005-06-02 22:25:06.000000000 +0200
66@@ -32,6 +32,8 @@
67
68 #include "config-host.h"
69
70+//#define DEBUG_OP
71+
72 /* NOTE: we test CONFIG_WIN32 instead of _WIN32 to enabled cross
73 compilation */
74 #if defined(CONFIG_WIN32)
75@@ -1343,6 +1345,644 @@ int arm_emit_ldr_info(const char *name,
76 #endif
77
78
79+#if defined(HOST_I386) || defined(HOST_X86_64)
80+
81+/* This byte is the first byte of an instruction. */
82+#define FLAG_INSN (1 << 0)
83+/* This byte has been processed as part of an instruction. */
84+#define FLAG_SCANNED (1 << 1)
85+/* This instruction is a return instruction. Gcc cometimes generates prefix
86+ bytes, so may be more than one byte long. */
87+#define FLAG_RET (1 << 2)
88+/* This is either the target of a jump, or the preceeding instruction uses
89+ a pc-relative offset. */
90+#define FLAG_TARGET (1 << 3)
91+/* This is a magic instruction that needs fixing up. */
92+#define FLAG_EXIT (1 << 4)
93+#define MAX_EXITS 5
94+
95+static void
96+bad_opcode(const char *name, uint32_t op)
97+{
98+ error("Unsupported opcode %0*x in %s", (op > 0xff) ? 4 : 2, op, name);
99+}
100+
101+/* Mark len bytes as scanned, Returns insn_size + len. Reports an error
102+ if these bytes have already been scanned. */
103+static int
104+eat_bytes(const char *name, char *flags, int insn, int insn_size, int len)
105+{
106+ while (len > 0) {
107+ /* This should never occur in sane code. */
108+ if (flags[insn + insn_size] & FLAG_SCANNED)
109+ error ("Overlapping instructions in %s", name);
110+ flags[insn + insn_size] |= FLAG_SCANNED;
111+ insn_size++;
112+ len--;
113+ }
114+ return insn_size;
115+}
116+
117+static void
118+trace_i386_insn (const char *name, uint8_t *start_p, char *flags, int insn,
119+ int len)
120+{
121+ uint8_t *ptr;
122+ uint8_t op;
123+ int modrm;
124+ int is_prefix;
125+ int op_size;
126+ int addr_size;
127+ int insn_size;
128+ int is_ret;
129+ int is_condjmp;
130+ int is_jmp;
131+ int is_exit;
132+ int is_pcrel;
133+ int immed;
134+ int seen_rexw;
135+ int32_t disp;
136+
137+ ptr = start_p + insn;
138+ /* nonzero if this insn has a ModR/M byte. */
139+ modrm = 1;
140+ /* The size of the immediate value in this instruction. */
141+ immed = 0;
142+ /* The operand size. */
143+ op_size = 4;
144+ /* The address size */
145+ addr_size = 4;
146+ /* The total length of this instruction. */
147+ insn_size = 0;
148+ is_prefix = 1;
149+ is_ret = 0;
150+ is_condjmp = 0;
151+ is_jmp = 0;
152+ is_exit = 0;
153+ seen_rexw = 0;
154+ is_pcrel = 0;
155+
156+ while (is_prefix) {
157+ op = ptr[insn_size];
158+ insn_size = eat_bytes(name, flags, insn, insn_size, 1);
159+ is_prefix = 0;
160+ switch (op >> 4) {
161+ case 0:
162+ case 1:
163+ case 2:
164+ case 3:
165+ if (op == 0x0f) {
166+ /* two-byte opcode. */
167+ op = ptr[insn_size];
168+ insn_size = eat_bytes(name, flags, insn, insn_size, 1);
169+ switch (op >> 4) {
170+ case 0:
171+ if ((op & 0xf) > 3)
172+ modrm = 0;
173+ break;
174+ case 1: /* vector move or prefetch */
175+ case 2: /* various moves and vector compares. */
176+ case 4: /* cmov */
177+ case 5: /* vector instructions */
178+ case 6:
179+ case 13:
180+ case 14:
181+ case 15:
182+ break;
183+ case 7: /* mmx */
184+ if (op & 0x77) /* emms */
185+ modrm = 0;
186+ break;
187+ case 3: /* wrmsr, rdtsc, rdmsr, rdpmc, sysenter, sysexit */
188+ modrm = 0;
189+ break;
190+ case 8: /* long conditional jump */
191+ is_condjmp = 1;
192+ immed = op_size;
193+ modrm = 0;
194+ break;
195+ case 9: /* setcc */
196+ break;
197+ case 10:
198+ switch (op & 0x7) {
199+ case 0: /* push fs/gs */
200+ case 1: /* pop fs/gs */
201+ case 2: /* cpuid/rsm */
202+ modrm = 0;
203+ break;
204+ case 4: /* shld/shrd immediate */
205+ immed = 1;
206+ break;
207+ default: /* Normal instructions with a ModR/M byte. */
208+ break;
209+ }
210+ break;
211+ case 11:
212+ switch (op & 0xf) {
213+ case 10: /* bt, bts, btr, btc */
214+ immed = 1;
215+ break;
216+ default:
217+ /* cmpxchg, lss, btr, lfs, lgs, movzx, btc, bsf, bsr
218+ undefined, and movsx */
219+ break;
220+ }
221+ break;
222+ case 12:
223+ if (op & 8) {
224+ /* bswap */
225+ modrm = 0;
226+ } else {
227+ switch (op & 0x7) {
228+ case 2:
229+ case 4:
230+ case 5:
231+ case 6:
232+ immed = 1;
233+ break;
234+ default:
235+ break;
236+ }
237+ }
238+ break;
239+ }
240+ } else if ((op & 0x07) <= 0x3) {
241+ /* General arithmentic ax. */
242+ } else if ((op & 0x07) <= 0x5) {
243+ /* General arithmetic ax, immediate. */
244+ if (op & 0x01)
245+ immed = op_size;
246+ else
247+ immed = 1;
248+ modrm = 0;
249+ } else if ((op & 0x23) == 0x22) {
250+ /* Segment prefix. */
251+ is_prefix = 1;
252+ } else {
253+ /* Segment register push/pop or DAA/AAA/DAS/AAS. */
254+ modrm = 0;
255+ }
256+ break;
257+
258+#if defined(HOST_X86_64)
259+ case 4: /* rex prefix. */
260+ is_prefix = 1;
261+ /* The address/operand size is actually 64-bit, but the immediate
262+ values in the instruction are still 32-bit. */
263+ op_size = 4;
264+ addr_size = 4;
265+ if (op & 8)
266+ seen_rexw = 1;
267+ break;
268+#else
269+ case 4: /* inc/dec register. */
270+#endif
271+ case 5: /* push/pop general register. */
272+ modrm = 0;
273+ break;
274+
275+ case 6:
276+ switch (op & 0x0f) {
277+ case 0: /* pusha */
278+ case 1: /* popa */
279+ modrm = 0;
280+ break;
281+ case 2: /* bound */
282+ case 3: /* arpl */
283+ break;
284+ case 4: /* FS */
285+ case 5: /* GS */
286+ is_prefix = 1;
287+ break;
288+ case 6: /* opcode size prefix. */
289+ op_size = 2;
290+ is_prefix = 1;
291+ break;
292+ case 7: /* Address size prefix. */
293+ addr_size = 2;
294+ is_prefix = 1;
295+ break;
296+ case 8: /* push immediate */
297+ immed = op_size;
298+ modrm = 0;
299+ break;
300+ case 10: /* push 8-bit immediate */
301+ immed = 1;
302+ modrm = 0;
303+ break;
304+ case 9: /* imul immediate */
305+ immed = op_size;
306+ break;
307+ case 11: /* imul 8-bit immediate */
308+ immed = 1;
309+ break;
310+ case 12: /* insb */
311+ case 13: /* insw */
312+ case 14: /* outsb */
313+ case 15: /* outsw */
314+ modrm = 0;
315+ break;
316+ }
317+ break;
318+
319+ case 7: /* Short conditional jump. */
320+ is_condjmp = 1;
321+ immed = 1;
322+ modrm = 0;
323+ break;
324+
325+ case 8:
326+ if ((op & 0xf) <= 3) {
327+ /* arithmetic immediate. */
328+ if ((op & 3) == 1)
329+ immed = op_size;
330+ else
331+ immed = 1;
332+ }
333+ /* else test, xchg, mov, lea or pop general. */
334+ break;
335+
336+ case 9:
337+ /* Various single-byte opcodes with no modrm byte. */
338+ modrm = 0;
339+ if (op == 10) {
340+ /* Call */
341+ immed = 4;
342+ }
343+ break;
344+
345+ case 10:
346+ switch ((op & 0xe) >> 1) {
347+ case 0: /* mov absoliute immediate. */
348+ case 1:
349+ if (seen_rexw)
350+ immed = 8;
351+ else
352+ immed = addr_size;
353+ break;
354+ case 4: /* test immediate. */
355+ if (op & 1)
356+ immed = op_size;
357+ else
358+ immed = 1;
359+ break;
360+ default: /* Various string ops. */
361+ break;
362+ }
363+ modrm = 0;
364+ break;
365+
366+ case 11: /* move immediate to register */
367+ if (op & 8) {
368+ if (seen_rexw)
369+ immed = 8;
370+ else
371+ immed = op_size;
372+ } else {
373+ immed = 1;
374+ }
375+ modrm = 0;
376+ break;
377+
378+ case 12:
379+ switch (op & 0xf) {
380+ case 0: /* shift immediate */
381+ case 1:
382+ immed = 1;
383+ break;
384+ case 2: /* ret immediate */
385+ immed = 2;
386+ modrm = 0;
387+ bad_opcode(name, op);
388+ break;
389+ case 3: /* ret */
390+ modrm = 0;
391+ is_ret = 1;
392+ case 4: /* les */
393+ case 5: /* lds */
394+ break;
395+ case 6: /* mov immediate byte */
396+ immed = 1;
397+ break;
398+ case 7: /* mov immediate */
399+ immed = op_size;
400+ break;
401+ case 8: /* enter */
402+ /* TODO: Is this right? */
403+ immed = 3;
404+ modrm = 0;
405+ break;
406+ case 10: /* retf immediate */
407+ immed = 2;
408+ modrm = 0;
409+ bad_opcode(name, op);
410+ break;
411+ case 13: /* int */
412+ immed = 1;
413+ modrm = 0;
414+ break;
415+ case 11: /* retf */
416+ case 15: /* iret */
417+ modrm = 0;
418+ bad_opcode(name, op);
419+ break;
420+ default: /* leave, int3 or into */
421+ modrm = 0;
422+ break;
423+ }
424+ break;
425+
426+ case 13:
427+ if ((op & 0xf) >= 8) {
428+ /* Coprocessor escape. For our purposes this is just a normal
429+ instruction with a ModR/M byte. */
430+ } else if ((op & 0xf) >= 4) {
431+ /* AAM, AAD or XLAT */
432+ modrm = 0;
433+ }
434+ /* else shift instruction */
435+ break;
436+
437+ case 14:
438+ switch ((op & 0xc) >> 2) {
439+ case 0: /* loop or jcxz */
440+ is_condjmp = 1;
441+ immed = 1;
442+ break;
443+ case 1: /* in/out immed */
444+ immed = 1;
445+ break;
446+ case 2: /* call or jmp */
447+ switch (op & 3) {
448+ case 0: /* call */
449+ immed = op_size;
450+ break;
451+ case 1: /* long jump */
452+ immed = 4;
453+ is_jmp = 1;
454+ break;
455+ case 2: /* far jmp */
456+ bad_opcode(name, op);
457+ break;
458+ case 3: /* short jmp */
459+ immed = 1;
460+ is_jmp = 1;
461+ break;
462+ }
463+ break;
464+ case 3: /* in/out register */
465+ break;
466+ }
467+ modrm = 0;
468+ break;
469+
470+ case 15:
471+ switch ((op & 0xe) >> 1) {
472+ case 0:
473+ case 1:
474+ is_prefix = 1;
475+ break;
476+ case 2:
477+ case 4:
478+ case 5:
479+ case 6:
480+ modrm = 0;
481+ /* Some privileged insns are used as markers. */
482+ switch (op) {
483+ case 0xf4: /* hlt: Exit translation block. */
484+ is_exit = 1;
485+ break;
486+ case 0xfa: /* cli: Jump to label. */
487+ is_exit = 1;
488+ immed = 4;
489+ break;
490+ case 0xfb: /* sti: TB patch jump. */
491+ /* Mark the insn for patching, but continue sscanning. */
492+ flags[insn] |= FLAG_EXIT;
493+ immed = 4;
494+ break;
495+ }
496+ break;
497+ case 3: /* unary grp3 */
498+ if ((ptr[insn_size] & 0x38) == 0) {
499+ if (op == 0xf7)
500+ immed = op_size;
501+ else
502+ immed = 1; /* test immediate */
503+ }
504+ break;
505+ case 7: /* inc/dec grp4/5 */
506+ /* TODO: This includes indirect jumps. We should fail if we
507+ encounter one of these. */
508+ break;
509+ }
510+ break;
511+ }
512+ }
513+
514+ if (modrm) {
515+ if (addr_size != 4)
516+ error("16-bit addressing mode used in %s", name);
517+
518+ disp = 0;
519+ modrm = ptr[insn_size];
520+ insn_size = eat_bytes(name, flags, insn, insn_size, 1);
521+ modrm &= 0xc7;
522+ switch ((modrm & 0xc0) >> 6) {
523+ case 0:
524+ if (modrm == 5)
525+ disp = 4;
526+ break;
527+ case 1:
528+ disp = 1;
529+ break;
530+ case 2:
531+ disp = 4;
532+ break;
533+ }
534+ if ((modrm & 0xc0) != 0xc0 && (modrm & 0x7) == 4) {
535+ /* SIB byte */
536+ if (modrm == 4 && (ptr[insn_size] & 0x7) == 5) {
537+ disp = 4;
538+ is_pcrel = 1;
539+ }
540+ insn_size = eat_bytes(name, flags, insn, insn_size, 1);
541+ }
542+ insn_size = eat_bytes(name, flags, insn, insn_size, disp);
543+ }
544+ insn_size = eat_bytes(name, flags, insn, insn_size, immed);
545+ if (is_condjmp || is_jmp) {
546+ if (immed == 1) {
547+ disp = (int8_t)*(ptr + insn_size - 1);
548+ } else {
549+ disp = (((int32_t)*(ptr + insn_size - 1)) << 24)
550+ | (((int32_t)*(ptr + insn_size - 2)) << 16)
551+ | (((int32_t)*(ptr + insn_size - 3)) << 8)
552+ | *(ptr + insn_size - 4);
553+ }
554+ disp += insn_size;
555+ /* Jumps to external symbols point to the address of the offset
556+ before relocation. */
557+ /* ??? These are probably a tailcall. We could fix them up by
558+ replacing them with jmp to EOB + call, but it's easier to just
559+ prevent the compiler generating them. */
560+ if (disp == 1)
561+ error("Unconditional jump (sibcall?) in %s", name);
562+ disp += insn;
563+ if (disp < 0 || disp > len)
564+ error("Jump outside instruction in %s", name);
565+
566+ if ((flags[disp] & (FLAG_INSN | FLAG_SCANNED)) == FLAG_SCANNED)
567+ error("Overlapping instructions in %s", name);
568+
569+ flags[disp] |= (FLAG_INSN | FLAG_TARGET);
570+ is_pcrel = 1;
571+ }
572+ if (is_pcrel) {
573+ /* Mark the following insn as a jump target. This will stop
574+ this instruction being moved. */
575+ flags[insn + insn_size] |= FLAG_TARGET;
576+ }
577+ if (is_ret)
578+ flags[insn] |= FLAG_RET;
579+
580+ if (is_exit)
581+ flags[insn] |= FLAG_EXIT;
582+
583+ if (!(is_jmp || is_ret || is_exit))
584+ flags[insn + insn_size] |= FLAG_INSN;
585+}
586+
587+/* Scan a function body. Returns the position of the return sequence.
588+ Sets *patch_bytes to the number of bytes that need to be copied from that
589+ location. If no patching is required (ie. the return is the last insn)
590+ *patch_bytes will be set to -1. *plen is the number of code bytes to copy.
591+ */
592+static int trace_i386_op(const char * name, uint8_t *start_p, int *plen,
593+ int *patch_bytes, int *exit_addrs)
594+{
595+ char *flags;
596+ int more;
597+ int insn;
598+ int retpos;
599+ int bytes;
600+ int num_exits;
601+ int len;
602+ int last_insn;
603+
604+ len = *plen;
605+ flags = malloc(len + 1);
606+ memset(flags, 0, len + 1);
607+ flags[0] |= FLAG_INSN;
608+ more = 1;
609+ while (more) {
610+ more = 0;
611+ for (insn = 0; insn < len; insn++) {
612+ if ((flags[insn] & (FLAG_INSN | FLAG_SCANNED)) == FLAG_INSN) {
613+ trace_i386_insn(name, start_p, flags, insn, len);
614+ more = 1;
615+ }
616+ }
617+ }
618+
619+ /* Strip any unused code at the end of the function. */
620+ while (len > 0 && flags[len - 1] == 0)
621+ len--;
622+
623+ retpos = -1;
624+ num_exits = 0;
625+ last_insn = 0;
626+ for (insn = 0; insn < len; insn++) {
627+ if (flags[insn] & FLAG_RET) {
628+ /* ??? In theory it should be possible to handle multiple return
629+ points. In practice it's not worth the effort. */
630+ if (retpos != -1)
631+ error("Multiple return instructions in %s", name);
632+ retpos = insn;
633+ }
634+ if (flags[insn] & FLAG_EXIT) {
635+ if (num_exits == MAX_EXITS)
636+ error("Too many block exits in %s", name);
637+ exit_addrs[num_exits] = insn;
638+ num_exits++;
639+ }
640+ if (flags[insn] & FLAG_INSN)
641+ last_insn = insn;
642+ }
643+
644+ exit_addrs[num_exits] = -1;
645+ if (retpos == -1) {
646+ if (num_exits == 0) {
647+ error ("No return instruction found in %s", name);
648+ } else {
649+ retpos = len;
650+ last_insn = len;
651+ }
652+ }
653+
654+ /* If the return instruction is the last instruction we can just
655+ remove it. */
656+ if (retpos == last_insn)
657+ *patch_bytes = -1;
658+ else
659+ *patch_bytes = 0;
660+
661+ /* Back up over any nop instructions. */
662+ while (retpos > 0
663+ && (flags[retpos] & FLAG_TARGET) == 0
664+ && (flags[retpos - 1] & FLAG_INSN) != 0
665+ && start_p[retpos - 1] == 0x90) {
666+ retpos--;
667+ }
668+
669+ if (*patch_bytes == -1) {
670+ *plen = retpos;
671+ free (flags);
672+ return retpos;
673+ }
674+ *plen = len;
675+
676+ /* The ret is in the middle of the function. Find four more bytes that
677+ so the ret can be replaced by a jmp. */
678+ /* ??? Use a short jump where possible. */
679+ bytes = 4;
680+ insn = retpos + 1;
681+ /* We can clobber everything up to the next jump target. */
682+ while (insn < len && bytes > 0 && (flags[insn] & FLAG_TARGET) == 0) {
683+ insn++;
684+ bytes--;
685+ }
686+ if (bytes > 0) {
687+ /* ???: Strip out nop blocks. */
688+ /* We can't do the replacement without clobbering anything important.
689+ Copy preceeding instructions(s) to give us some space. */
690+ while (retpos > 0) {
691+ /* If this byte is the target of a jmp we can't move it. */
692+ if (flags[retpos] & FLAG_TARGET)
693+ break;
694+
695+ (*patch_bytes)++;
696+ bytes--;
697+ retpos--;
698+
699+ /* Break out of the loop if we have enough space and this is either
700+ the first byte of an instruction or a pad byte. */
701+ if ((flags[retpos] & (FLAG_INSN | FLAG_SCANNED)) != FLAG_SCANNED
702+ && bytes <= 0) {
703+ break;
704+ }
705+ }
706+ }
707+
708+ if (bytes > 0)
709+ error("Unable to replace ret with jmp in %s\n", name);
710+
711+ free(flags);
712+ return retpos;
713+}
714+
715+#endif
716+
717 #define MAX_ARGS 3
718
719 /* generate op code */
720@@ -1356,6 +1996,11 @@ void gen_code(const char *name, host_ulo
721 uint8_t args_present[MAX_ARGS];
722 const char *sym_name, *p;
723 EXE_RELOC *rel;
724+#if defined(HOST_I386) || defined(HOST_X86_64)
725+ int patch_bytes;
726+ int retpos;
727+ int exit_addrs[MAX_EXITS];
728+#endif
729
730 /* Compute exact size excluding prologue and epilogue instructions.
731 * Increment start_offset to skip epilogue instructions, then compute
732@@ -1366,33 +2011,12 @@ void gen_code(const char *name, host_ulo
733 p_end = p_start + size;
734 start_offset = offset;
735 #if defined(HOST_I386) || defined(HOST_X86_64)
736-#ifdef CONFIG_FORMAT_COFF
737- {
738- uint8_t *p;
739- p = p_end - 1;
740- if (p == p_start)
741- error("empty code for %s", name);
742- while (*p != 0xc3) {
743- p--;
744- if (p <= p_start)
745- error("ret or jmp expected at the end of %s", name);
746- }
747- copy_size = p - p_start;
748- }
749-#else
750 {
751 int len;
752 len = p_end - p_start;
753- if (len == 0)
754- error("empty code for %s", name);
755- if (p_end[-1] == 0xc3) {
756- len--;
757- } else {
758- error("ret or jmp expected at the end of %s", name);
759- }
760+ retpos = trace_i386_op(name, p_start, &len, &patch_bytes, exit_addrs);
761 copy_size = len;
762 }
763-#endif
764 #elif defined(HOST_PPC)
765 {
766 uint8_t *p;
767@@ -1559,6 +2183,13 @@ void gen_code(const char *name, host_ulo
768 }
769
770 if (gen_switch == 2) {
771+#if defined(HOST_I386) || defined(HOST_X86_64)
772+ if (patch_bytes != -1)
773+ copy_size += patch_bytes;
774+#ifdef DEBUG_OP
775+ copy_size += 2;
776+#endif
777+#endif
778 fprintf(outfile, "DEF(%s, %d, %d)\n", name + 3, nb_args, copy_size);
779 } else if (gen_switch == 1) {
780
781@@ -1761,7 +2392,43 @@ void gen_code(const char *name, host_ulo
782 #error unsupport object format
783 #endif
784 }
785+ }
786+ /* Replace the marker instructions with the actual opcodes. */
787+ for (i = 0; exit_addrs[i] != -1; i++) {
788+ int op;
789+ switch (p_start[exit_addrs[i]])
790+ {
791+ case 0xf4: op = 0xc3; break; /* hlt -> ret */
792+ case 0xfa: op = 0xe9; break; /* cli -> jmp */
793+ case 0xfb: op = 0xe9; break; /* sti -> jmp */
794+ default: error("Internal error");
795+ }
796+ fprintf(outfile,
797+ " *(uint8_t *)(gen_code_ptr + %d) = 0x%x;\n",
798+ exit_addrs[i], op);
799 }
800+ /* Fix up the return instruction. */
801+ if (patch_bytes != -1) {
802+ if (patch_bytes) {
803+ fprintf(outfile, " memcpy(gen_code_ptr + %d,"
804+ "gen_code_ptr + %d, %d);\n",
805+ copy_size, retpos, patch_bytes);
806+ }
807+ fprintf(outfile,
808+ " *(uint8_t *)(gen_code_ptr + %d) = 0xe9;\n",
809+ retpos);
810+ fprintf(outfile,
811+ " *(uint32_t *)(gen_code_ptr + %d) = 0x%x;\n",
812+ retpos + 1, copy_size - (retpos + 5));
813+
814+ copy_size += patch_bytes;
815+ }
816+#ifdef DEBUG_OP
817+ fprintf(outfile,
818+ " *(uint16_t *)(gen_code_ptr + %d) = 0x9090;\n",
819+ copy_size);
820+ copy_size += 2;
821+#endif
822 }
823 #elif defined(HOST_X86_64)
824 {
825@@ -1793,6 +2460,42 @@ void gen_code(const char *name, host_ulo
826 }
827 }
828 }
829+ /* Replace the marker instructions with the actual opcodes. */
830+ for (i = 0; exit_addrs[i] != -1; i++) {
831+ int op;
832+ switch (p_start[exit_addrs[i]])
833+ {
834+ case 0xf4: op = 0xc3; break; /* hlt -> ret */
835+ case 0xfa: op = 0xe9; break; /* cli -> jmp */
836+ case 0xfb: op = 0xe9; break; /* sti -> jmp */
837+ default: error("Internal error");
838+ }
839+ fprintf(outfile,
840+ " *(uint8_t *)(gen_code_ptr + %d) = 0x%x;\n",
841+ exit_addrs[i], op);
842+ }
843+ /* Fix up the return instruction. */
844+ if (patch_bytes != -1) {
845+ if (patch_bytes) {
846+ fprintf(outfile, " memcpy(gen_code_ptr + %d,"
847+ "gen_code_ptr + %d, %d);\n",
848+ copy_size, retpos, patch_bytes);
849+ }
850+ fprintf(outfile,
851+ " *(uint8_t *)(gen_code_ptr + %d) = 0xe9;\n",
852+ retpos);
853+ fprintf(outfile,
854+ " *(uint32_t *)(gen_code_ptr + %d) = 0x%x;\n",
855+ retpos + 1, copy_size - (retpos + 5));
856+
857+ copy_size += patch_bytes;
858+ }
859+#ifdef DEBUG_OP
860+ fprintf(outfile,
861+ " *(uint16_t *)(gen_code_ptr + %d) = 0x9090;\n",
862+ copy_size);
863+ copy_size += 2;
864+#endif
865 }
866 #elif defined(HOST_PPC)
867 {
868--- qemu-0.7.0/exec-all.h.gcc4 2005-04-27 22:52:05.000000000 +0200
869+++ qemu-0.7.0/exec-all.h 2005-06-02 21:41:51.000000000 +0200
870@@ -335,14 +335,15 @@ do {\
871
872 #elif defined(__i386__) && defined(USE_DIRECT_JUMP)
873
874-/* we patch the jump instruction directly */
875+/* we patch the jump instruction directly. Use sti in place of the actual
876+ jmp instruction so that dyngen can patch in the correct result. */
877 #define GOTO_TB(opname, tbparam, n)\
878 do {\
879 asm volatile (".section .data\n"\
880 ASM_OP_LABEL_NAME(n, opname) ":\n"\
881 ".long 1f\n"\
882 ASM_PREVIOUS_SECTION \
883- "jmp " ASM_NAME(__op_jmp) #n "\n"\
884+ "sti;.long " ASM_NAME(__op_jmp) #n " - 1f\n"\
885 "1:\n");\
886 } while (0)
887
This page took 1.203402 seconds and 4 git commands to generate.