File: trace-clock.h

package info (click to toggle)
lttng-modules 2.14.3-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 4,808 kB
  • sloc: ansic: 74,851; sh: 548; makefile: 62
file content (216 lines) | stat: -rw-r--r-- 5,402 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
 *
 * wrapper/trace-clock.h
 *
 * Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic
 * clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y.
 *
 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
 */

#ifndef _LTTNG_TRACE_CLOCK_H
#define _LTTNG_TRACE_CLOCK_H

#ifdef CONFIG_HAVE_TRACE_CLOCK
#include <linux/trace-clock.h>
#else /* CONFIG_HAVE_TRACE_CLOCK */

#include <linux/hardirq.h>
#include <linux/ktime.h>
#include <linux/time.h>
#include <linux/hrtimer.h>
#include <linux/percpu.h>
#include <linux/percpu-defs.h>

#include <lttng/kernel-version.h>
#include <asm/local.h>
#include <lttng/kernel-version.h>
#include <lttng/clock.h>
#include <wrapper/compiler.h>
#include <wrapper/random.h>

extern struct lttng_trace_clock *lttng_trace_clock;

/*
 * Upstream Linux commit 27727df240c7 ("Avoid taking lock in NMI path with
 * CONFIG_DEBUG_TIMEKEEPING") introduces a buggy ktime_get_mono_fast_ns().
 * This is fixed by patch "timekeeping: Fix __ktime_get_fast_ns() regression".
 */
#if (LTTNG_KERNEL_RANGE(4,8,0, 4,8,2) \
	|| LTTNG_KERNEL_RANGE(4,7,4, 4,7,8) \
	|| LTTNG_KERNEL_RANGE(4,4,20, 4,4,25))
#define LTTNG_CLOCK_NMI_SAFE_BROKEN
#endif

/*
 * We need clock values to be monotonically increasing per-cpu, which is
 * not strictly guaranteed by ktime_get_mono_fast_ns(). It is
 * straightforward to do on architectures with a 64-bit cmpxchg(), but
 * not so on architectures without 64-bit cmpxchg. For now, only enable
 * this feature on 64-bit architectures.
 */

#if (BITS_PER_LONG == 64 && !defined(LTTNG_CLOCK_NMI_SAFE_BROKEN))
#define LTTNG_USE_NMI_SAFE_CLOCK
#endif

#ifdef LTTNG_USE_NMI_SAFE_CLOCK

DECLARE_PER_CPU(u64, lttng_last_timestamp);

/*
 * Sometimes called with preemption enabled. Can be interrupted.
 */
static inline u64 trace_clock_monotonic_wrapper(void)
{
	u64 now, last, result;
	u64 *last_timestamp_ptr;

	/* Use fast nmi-safe monotonic clock provided by the Linux kernel. */
	preempt_disable();
	last_timestamp_ptr = this_cpu_ptr(&lttng_last_timestamp);
	last = *last_timestamp_ptr;
	/*
	 * Read "last" before "now". It is not strictly required, but it ensures
	 * that an interrupt coming in won't artificially trigger a case where
	 * "now" < "last". This kind of situation should only happen if the
	 * mono_fast time source goes slightly backwards.
	 */
	barrier();
	now = ktime_get_mono_fast_ns();
	if (U64_MAX / 2 < now - last)
		now = last;
	result = cmpxchg64_local(last_timestamp_ptr, last, now);
	preempt_enable();
	if (result == last) {
		/* Update done. */
		return now;
	} else {
		/*
		 * Update not done, due to concurrent update. We can use
		 * "result", since it has been sampled concurrently with our
		 * time read, so it should not be far from "now".
		 */
		return result;
	}
}

#else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
static inline u64 trace_clock_monotonic_wrapper(void)
{
	ktime_t ktime;

	/*
	 * Refuse to trace from NMIs with this wrapper, because an NMI could
	 * nest over the xtime write seqlock and deadlock.
	 */
	if (in_nmi())
		return (u64) -EIO;

	ktime = ktime_get();
	return ktime_to_ns(ktime);
}
#endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */

static inline u64 trace_clock_read64_monotonic(void)
{
	return (u64) trace_clock_monotonic_wrapper();
}

static inline u64 trace_clock_freq_monotonic(void)
{
	return (u64) NSEC_PER_SEC;
}

static inline int trace_clock_uuid_monotonic(char *uuid)
{
	return wrapper_get_bootid(uuid);
}

static inline const char *trace_clock_name_monotonic(void)
{
	return "monotonic";
}

static inline const char *trace_clock_description_monotonic(void)
{
	return "Monotonic Clock";
}

#ifdef LTTNG_USE_NMI_SAFE_CLOCK
static inline int get_trace_clock(void)
{
	printk_once(KERN_WARNING "LTTng: Using mainline kernel monotonic fast clock, which is NMI-safe.\n");
	return 0;
}
#else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
static inline int get_trace_clock(void)
{
	printk_once(KERN_WARNING "LTTng: Using mainline kernel monotonic clock. NMIs will not be traced.\n");
	return 0;
}
#endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */

static inline void put_trace_clock(void)
{
}

static inline u64 trace_clock_read64(void)
{
	struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);

	if (likely(!ltc)) {
		return trace_clock_read64_monotonic();
	} else {
		return ltc->read64();
	}
}

static inline u64 trace_clock_freq(void)
{
	struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);

	if (!ltc) {
		return trace_clock_freq_monotonic();
	} else {
		return ltc->freq();
	}
}

static inline int trace_clock_uuid(char *uuid)
{
	struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);

	/* Use default UUID cb when NULL */
	if (!ltc || !ltc->uuid) {
		return trace_clock_uuid_monotonic(uuid);
	} else {
		return ltc->uuid(uuid);
	}
}

static inline const char *trace_clock_name(void)
{
	struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);

	if (!ltc) {
		return trace_clock_name_monotonic();
	} else {
		return ltc->name();
	}
}

static inline const char *trace_clock_description(void)
{
	struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);

	if (!ltc) {
		return trace_clock_description_monotonic();
	} else {
		return ltc->description();
	}
}

#endif /* CONFIG_HAVE_TRACE_CLOCK */

#endif /* _LTTNG_TRACE_CLOCK_H */