]> git.pld-linux.org Git - packages/lttng-modules.git/blame - linux-3.17.patch
- linux 3.17 fixes from upstream
[packages/lttng-modules.git] / linux-3.17.patch
CommitLineData
70ed5922
JR
1commit 458c2022e992c057bd21d02e4c77bcc7d4d6cd6c
2Author: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
3Date: Thu Aug 21 11:15:50 2014 -0400
4
5 Update kvm instrumentation: compile on 3.17-rc1
6
7 Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8
9diff --git a/instrumentation/events/lttng-module/arch/x86/kvm/trace.h b/instrumentation/events/lttng-module/arch/x86/kvm/trace.h
10index 2354884..3c299c5 100644
11--- a/instrumentation/events/lttng-module/arch/x86/kvm/trace.h
12+++ b/instrumentation/events/lttng-module/arch/x86/kvm/trace.h
13@@ -724,7 +724,7 @@ TRACE_EVENT(kvm_emulate_insn,
14 tp_memcpy(insn,
15 vcpu->arch.emulate_ctxt.decode.fetch.data,
16 15)
17-#else
18+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0))
19 tp_assign(rip, vcpu->arch.emulate_ctxt.fetch.start)
20 tp_assign(csbase, kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS))
21 tp_assign(len, vcpu->arch.emulate_ctxt._eip
22@@ -732,6 +732,16 @@ TRACE_EVENT(kvm_emulate_insn,
23 tp_memcpy(insn,
24 vcpu->arch.emulate_ctxt.fetch.data,
25 15)
26+#else
27+ tp_assign(rip, vcpu->arch.emulate_ctxt._eip -
28+ (vcpu->arch.emulate_ctxt.fetch.ptr -
29+ vcpu->arch.emulate_ctxt.fetch.data))
30+ tp_assign(csbase, kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS))
31+ tp_assign(len, vcpu->arch.emulate_ctxt.fetch.ptr -
32+ vcpu->arch.emulate_ctxt.fetch.data)
33+ tp_memcpy(insn,
34+ vcpu->arch.emulate_ctxt.fetch.data,
35+ 15)
36 #endif
37 tp_assign(flags, kei_decode_mode(vcpu->arch.emulate_ctxt.mode))
38 tp_assign(failed, failed)
39commit b07252070edd244987bf160c1ee488d4796bb3a3
40Author: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
41Date: Sun Jul 13 13:20:57 2014 -0400
42
43 Use 3.17 ktime_get_mono_fast_ns() new API
44
45 The new ktime_get_mono_fast_ns() API introduced in kernel 3.17 allows
46 tracing NMI handlers.
47
48 Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
49 Cc: Thomas Gleixner <tglx@linutronix.de>
50
51diff --git a/Makefile b/Makefile
52index aa4835d..eeffdfe 100644
53--- a/Makefile
54+++ b/Makefile
55@@ -35,7 +35,7 @@ lttng-tracer-objs := lttng-events.o lttng-abi.o \
56 lttng-context-vtid.o lttng-context-ppid.o \
57 lttng-context-vppid.o lttng-calibrate.o \
58 lttng-context-hostname.o wrapper/random.o \
59- probes/lttng.o
60+ probes/lttng.o wrapper/trace-clock.o
61
62 obj-m += lttng-statedump.o
63 lttng-statedump-objs := lttng-statedump-impl.o wrapper/irqdesc.o \
64diff --git a/wrapper/trace-clock.c b/wrapper/trace-clock.c
65new file mode 100644
66index 0000000..1b7a217
67--- /dev/null
68+++ b/wrapper/trace-clock.c
69@@ -0,0 +1,29 @@
70+/*
71+ * wrapper/trace-clock.c
72+ *
73+ * Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic
74+ * clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y.
75+ *
76+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
77+ *
78+ * This library is free software; you can redistribute it and/or
79+ * modify it under the terms of the GNU Lesser General Public
80+ * License as published by the Free Software Foundation; only
81+ * version 2.1 of the License.
82+ *
83+ * This library is distributed in the hope that it will be useful,
84+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
85+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
86+ * Lesser General Public License for more details.
87+ *
88+ * You should have received a copy of the GNU Lesser General Public
89+ * License along with this library; if not, write to the Free Software
90+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
91+ */
92+
93+#include "trace-clock.h"
94+
95+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0))
96+DEFINE_PER_CPU(local_t, lttng_last_tsc);
97+EXPORT_PER_CPU_SYMBOL(lttng_last_tsc);
98+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)) */
99diff --git a/wrapper/trace-clock.h b/wrapper/trace-clock.h
100index 9c0c806..b145b82 100644
101--- a/wrapper/trace-clock.h
102+++ b/wrapper/trace-clock.h
103@@ -32,7 +32,9 @@
104 #include <linux/ktime.h>
105 #include <linux/time.h>
106 #include <linux/hrtimer.h>
107+#include <linux/percpu.h>
108 #include <linux/version.h>
109+#include <asm/local.h>
110 #include "../lttng-kernel-version.h"
111 #include "random.h"
112
113@@ -40,6 +42,88 @@
114 #error "Linux kernels 3.10 and 3.11 introduce a deadlock in the timekeeping subsystem. Fixed by commit 7bd36014460f793c19e7d6c94dab67b0afcfcb7f \"timekeeping: Fix HRTICK related deadlock from ntp lock changes\" in Linux."
115 #endif
116
117+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0))
118+
119+DECLARE_PER_CPU(local_t, lttng_last_tsc);
120+
121+#if (BITS_PER_LONG == 32)
122+/*
123+ * Fixup "src_now" using the 32 LSB from "last". We need to handle overflow and
124+ * underflow of the 32nd bit. "last" can be above, below or equal to the 32 LSB
125+ * of "src_now".
126+ */
127+static inline u64 trace_clock_fixup(u64 src_now, u32 last)
128+{
129+ u64 now;
130+
131+ now = src_now & 0xFFFFFFFF00000000ULL;
132+ now |= (u64) last;
133+ /* Detect overflow or underflow between now and last. */
134+ if ((src_now & 0x80000000U) && !(last & 0x80000000U)) {
135+ /*
136+ * If 32nd bit transitions from 1 to 0, and we move forward in
137+ * time from "now" to "last", then we have an overflow.
138+ */
139+ if (((s32) now - (s32) last) < 0)
140+ now += 0x0000000100000000ULL;
141+ } else if (!(src_now & 0x80000000U) && (last & 0x80000000U)) {
142+ /*
143+ * If 32nd bit transitions from 0 to 1, and we move backward in
144+ * time from "now" to "last", then we have an underflow.
145+ */
146+ if (((s32) now - (s32) last) > 0)
147+ now -= 0x0000000100000000ULL;
148+ }
149+ return now;
150+}
151+#else /* #if (BITS_PER_LONG == 32) */
152+/*
153+ * The fixup is pretty easy on 64-bit architectures: "last" is a 64-bit
154+ * value, so we can use last directly as current time.
155+ */
156+static inline u64 trace_clock_fixup(u64 src_now, u64 last)
157+{
158+ return last;
159+}
160+#endif /* #else #if (BITS_PER_LONG == 32) */
161+
162+/*
163+ * Always called with preemption disabled. Can be interrupted.
164+ */
165+static inline u64 trace_clock_monotonic_wrapper(void)
166+{
167+ u64 now;
168+ unsigned long last, result;
169+ local_t *last_tsc;
170+
171+ /* Use fast nmi-safe monotonic clock provided by the Linux kernel. */
172+ last_tsc = &__get_cpu_var(lttng_last_tsc);
173+ last = local_read(last_tsc);
174+ /*
175+ * Read "last" before "now". It is not strictly required, but it ensures
176+ * that an interrupt coming in won't artificially trigger a case where
177+ * "now" < "last". This kind of situation should only happen if the
178+ * mono_fast time source goes slightly backwards.
179+ */
180+ barrier();
181+ now = ktime_get_mono_fast_ns();
182+ if (((long) now - (long) last) < 0)
183+ now = trace_clock_fixup(now, last);
184+ result = local_cmpxchg(last_tsc, last, (unsigned long) now);
185+ if (result == last) {
186+ /* Update done. */
187+ return now;
188+ } else {
189+ /*
190+ * Update not done, due to concurrent update. We can use
191+ * "result", since it has been sampled concurrently with our
192+ * time read, so it should not be far from "now".
193+ */
194+ return trace_clock_fixup(now, result);
195+ }
196+}
197+
198+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) */
199 static inline u64 trace_clock_monotonic_wrapper(void)
200 {
201 ktime_t ktime;
202@@ -54,6 +138,7 @@ static inline u64 trace_clock_monotonic_wrapper(void)
203 ktime = ktime_get();
204 return ktime_to_ns(ktime);
205 }
206+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) */
207
208 static inline u32 trace_clock_read32(void)
209 {
210@@ -75,23 +160,19 @@ static inline int trace_clock_uuid(char *uuid)
211 return wrapper_get_bootid(uuid);
212 }
213
214+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0))
215 static inline int get_trace_clock(void)
216 {
217- /*
218- * LTTng: Using mainline kernel monotonic clock. NMIs will not be
219- * traced, and expect significant performance degradation compared to
220- * the LTTng trace clocks. Integration of the LTTng 0.x trace clocks
221- * into LTTng 2.0 is planned in a near future.
222- */
223- printk(KERN_WARNING "LTTng: Using mainline kernel monotonic clock.\n");
224- printk(KERN_WARNING " * NMIs will not be traced,\n");
225- printk(KERN_WARNING " * expect significant performance degradation compared to the\n");
226- printk(KERN_WARNING " LTTng trace clocks.\n");
227- printk(KERN_WARNING "Integration of the LTTng 0.x trace clocks into LTTng 2.0 is planned\n");
228- printk(KERN_WARNING "in a near future.\n");
229-
230+ printk(KERN_WARNING "LTTng: Using mainline kernel monotonic fast clock, which is NMI-safe.\n");
231+ return 0;
232+}
233+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) */
234+static inline int get_trace_clock(void)
235+{
236+ printk(KERN_WARNING "LTTng: Using mainline kernel monotonic clock. NMIs will not be traced.\n");
237 return 0;
238 }
239+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) */
240
241 static inline void put_trace_clock(void)
242 {
This page took 0.055524 seconds and 4 git commands to generate.