1 diff -uNr valgrind-3.6.0.orig/coregrind/m_main.c valgrind-3.6.0/coregrind/m_main.c
2 --- valgrind-3.6.0.orig/coregrind/m_main.c 2010-10-20 22:19:45.000000000 +0200
3 +++ valgrind-3.6.0/coregrind/m_main.c 2011-01-17 20:38:26.676472616 +0100
5 VG_(clo_vex_control).guest_chase_thresh, 0, 99) {}
6 else if VG_BOOL_CLO(arg, "--vex-guest-chase-cond",
7 VG_(clo_vex_control).guest_chase_cond) {}
8 + else if VG_BOOL_CLO(arg, "--vex-native-cpuid",
9 + VG_(clo_vex_control).iropt_native_cpuid) {}
11 else if VG_INT_CLO(arg, "--log-fd", tmp_log_fd) {
13 diff -uNr valgrind-3.6.0.orig/VEX/priv/guest_amd64_defs.h valgrind-3.6.0/VEX/priv/guest_amd64_defs.h
14 --- valgrind-3.6.0.orig/VEX/priv/guest_amd64_defs.h 2010-10-20 22:19:51.000000000 +0200
15 +++ valgrind-3.6.0/VEX/priv/guest_amd64_defs.h 2011-01-17 20:38:57.815124615 +0100
17 extern void amd64g_dirtyhelper_CPUID_baseline ( VexGuestAMD64State* st );
18 extern void amd64g_dirtyhelper_CPUID_sse3_and_cx16 ( VexGuestAMD64State* st );
19 extern void amd64g_dirtyhelper_CPUID_sse42_and_cx16 ( VexGuestAMD64State* st );
20 extern void amd64g_dirtyhelper_CPUID_avx_and_cx16 ( VexGuestAMD64State* st );
21 +extern void amd64g_dirtyhelper_CPUID_native ( VexGuestAMD64State* st );
23 extern void amd64g_dirtyhelper_FINIT ( VexGuestAMD64State* );
25 diff -uNr valgrind-3.6.0.orig/VEX/priv/guest_amd64_helpers.c valgrind-3.6.0/VEX/priv/guest_amd64_helpers.c
26 --- valgrind-3.6.0.orig/VEX/priv/guest_amd64_helpers.c 2010-10-20 22:19:51.000000000 +0200
27 +++ valgrind-3.6.0/VEX/priv/guest_amd64_helpers.c 2011-01-17 20:36:00.884903903 +0100
28 @@ -2170,6 +2170,20 @@
32 +void amd64g_dirtyhelper_CPUID_native ( VexGuestAMD64State* st )
34 +# if defined(__x86_64__)
35 + __asm__ __volatile__ ("cpuid" : "=a" (st->guest_RAX),
36 + "=b" (st->guest_RBX),
37 + "=c" (st->guest_RCX),
38 + "=d" (st->guest_RDX)
39 + : "0" (st->guest_RAX), "2" (st->guest_RCX));
46 ULong amd64g_calculate_RCR ( ULong arg,
49 diff -uNr valgrind-3.6.0.orig/VEX/priv/guest_amd64_toIR.c valgrind-3.6.0/VEX/priv/guest_amd64_toIR.c
50 --- valgrind-3.6.0.orig/VEX/priv/guest_amd64_toIR.c 2011-01-17 20:35:34.380376775 +0100
51 +++ valgrind-3.6.0/VEX/priv/guest_amd64_toIR.c 2011-01-17 20:36:00.891571709 +0100
52 @@ -19903,7 +19903,11 @@
55 if (haveF2orF3(pfx)) goto decode_failure;
56 - if (archinfo->hwcaps == (VEX_HWCAPS_AMD64_SSE3
57 + if (vex_control.iropt_native_cpuid) {
58 + fName = "amd64g_dirtyhelper_CPUID_native";
59 + fAddr = &amd64g_dirtyhelper_CPUID_native;
61 + else if (archinfo->hwcaps == (VEX_HWCAPS_AMD64_SSE3
62 |VEX_HWCAPS_AMD64_CX16
63 |VEX_HWCAPS_AMD64_AVX)) {
64 fName = "amd64g_dirtyhelper_CPUID_avx_and_cx16";
65 diff -uNr valgrind-3.6.0.orig/VEX/pub/libvex.h valgrind-3.6.0/VEX/pub/libvex.h
66 --- valgrind-3.6.0.orig/VEX/pub/libvex.h 2010-10-20 22:19:52.000000000 +0200
67 +++ valgrind-3.6.0/VEX/pub/libvex.h 2011-01-17 20:41:02.906490947 +0100
73 /* For a given architecture, these specify extra capabilities beyond
74 the minimum supported (baseline) capabilities. They may be OR'd
75 together, although some combinations don't make sense. (eg, SSE2
77 /* EXPERIMENTAL: chase across conditional branches? Not all
78 front ends honour this. Default: NO. */
79 Bool guest_chase_cond;
80 + /* For x86 and amd64 allow the use of native cpuid inst */
81 + Int iropt_native_cpuid;