]> git.pld-linux.org Git - packages/valgrind.git/blob - valgrind-native-cpuid.patch
9fccee225071f7c0f2a4ce8296a9f466707ad80a
[packages/valgrind.git] / valgrind-native-cpuid.patch
1 diff -uNr valgrind-3.6.0.orig/coregrind/m_main.c valgrind-3.6.0/coregrind/m_main.c
2 --- valgrind-3.6.0.orig/coregrind/m_main.c      2010-10-20 22:19:45.000000000 +0200
3 +++ valgrind-3.6.0/coregrind/m_main.c   2011-01-17 20:38:26.676472616 +0100
4 @@ -519,6 +519,8 @@
5                         VG_(clo_vex_control).guest_chase_thresh, 0, 99) {}
6        else if VG_BOOL_CLO(arg, "--vex-guest-chase-cond",
7                         VG_(clo_vex_control).guest_chase_cond) {}
8 +      else if VG_BOOL_CLO(arg, "--vex-native-cpuid",
9 +                       VG_(clo_vex_control).iropt_native_cpuid) {}
10  
11        else if VG_INT_CLO(arg, "--log-fd", tmp_log_fd) {
12           log_to = VgLogTo_Fd;
13 diff -uNr valgrind-3.6.0.orig/VEX/priv/guest_amd64_defs.h valgrind-3.6.0/VEX/priv/guest_amd64_defs.h
14 --- valgrind-3.6.0.orig/VEX/priv/guest_amd64_defs.h     2010-10-20 22:19:51.000000000 +0200
15 +++ valgrind-3.6.0/VEX/priv/guest_amd64_defs.h  2011-01-17 20:38:57.815124615 +0100
16 @@ -161,7 +161,8 @@
17  extern void  amd64g_dirtyhelper_CPUID_baseline ( VexGuestAMD64State* st );
18  extern void  amd64g_dirtyhelper_CPUID_sse3_and_cx16 ( VexGuestAMD64State* st );
19  extern void  amd64g_dirtyhelper_CPUID_sse42_and_cx16 ( VexGuestAMD64State* st );
20  extern void  amd64g_dirtyhelper_CPUID_avx_and_cx16 ( VexGuestAMD64State* st );
21 +extern void  amd64g_dirtyhelper_CPUID_native ( VexGuestAMD64State* st );
22  
23  extern void  amd64g_dirtyhelper_FINIT ( VexGuestAMD64State* );
24  
25 diff -uNr valgrind-3.6.0.orig/VEX/priv/guest_amd64_helpers.c valgrind-3.6.0/VEX/priv/guest_amd64_helpers.c
26 --- valgrind-3.6.0.orig/VEX/priv/guest_amd64_helpers.c  2010-10-20 22:19:51.000000000 +0200
27 +++ valgrind-3.6.0/VEX/priv/guest_amd64_helpers.c       2011-01-17 20:36:00.884903903 +0100
28 @@ -2170,6 +2170,20 @@
29  }
30  
31  
32 +void amd64g_dirtyhelper_CPUID_native ( VexGuestAMD64State* st )
33 +{
34 +#  if defined(__x86_64__)
35 +   __asm__ __volatile__ ("cpuid" : "=a" (st->guest_RAX),
36 +                        "=b" (st->guest_RBX),
37 +                        "=c" (st->guest_RCX),
38 +                        "=d" (st->guest_RDX)
39 +                        : "0" (st->guest_RAX), "2" (st->guest_RCX));
40 +#  else
41 +/* do nothing */
42 +#  endif
43 +}
44 +
45 +
46  ULong amd64g_calculate_RCR ( ULong arg, 
47                               ULong rot_amt, 
48                               ULong rflags_in, 
49 diff -uNr valgrind-3.6.0.orig/VEX/priv/guest_amd64_toIR.c valgrind-3.6.0/VEX/priv/guest_amd64_toIR.c
50 --- valgrind-3.6.0.orig/VEX/priv/guest_amd64_toIR.c     2011-01-17 20:35:34.380376775 +0100
51 +++ valgrind-3.6.0/VEX/priv/guest_amd64_toIR.c  2011-01-17 20:36:00.891571709 +0100
52 @@ -19903,7 +19903,11 @@
53        HChar*   fName = NULL;
54        void*    fAddr = NULL;
55        if (haveF2orF3(pfx)) goto decode_failure;
56 -      if (archinfo->hwcaps == (VEX_HWCAPS_AMD64_SSE3
57 +      if (vex_control.iropt_native_cpuid) {
58 +         fName = "amd64g_dirtyhelper_CPUID_native";
59 +         fAddr = &amd64g_dirtyhelper_CPUID_native;
60 +      }
61 +      else if (archinfo->hwcaps == (VEX_HWCAPS_AMD64_SSE3
62                                 |VEX_HWCAPS_AMD64_CX16 
63                                 |VEX_HWCAPS_AMD64_AVX)) {
64           fName = "amd64g_dirtyhelper_CPUID_avx_and_cx16";
65 diff -uNr valgrind-3.6.0.orig/VEX/pub/libvex.h valgrind-3.6.0/VEX/pub/libvex.h
66 --- valgrind-3.6.0.orig/VEX/pub/libvex.h        2010-10-20 22:19:52.000000000 +0200
67 +++ valgrind-3.6.0/VEX/pub/libvex.h     2011-01-17 20:41:02.906490947 +0100
68 @@ -60,7 +60,6 @@
69     }
70     VexArch;
71  
72 -
73  /* For a given architecture, these specify extra capabilities beyond
74     the minimum supported (baseline) capabilities.  They may be OR'd
75     together, although some combinations don't make sense.  (eg, SSE2
76 @@ -270,6 +269,8 @@
77        /* EXPERIMENTAL: chase across conditional branches?  Not all
78           front ends honour this.  Default: NO. */
79        Bool guest_chase_cond;
80 +      /* For x86 and amd64 allow the use of native cpuid inst */
81 +      Int iropt_native_cpuid;
82     }
83     VexControl;
84  
This page took 0.045887 seconds and 3 git commands to generate.