1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
|
/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
*
* ringbuffer/frontend_types.h
*
* Ring Buffer Library Synchronization Header (types).
*
* Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* See ring_buffer_frontend.c for more information on wait-free algorithms.
*/
#ifndef _LIB_RING_BUFFER_FRONTEND_TYPES_H
#define _LIB_RING_BUFFER_FRONTEND_TYPES_H
#include <linux/kref.h>
#include <linux/irq_work.h>
#include <ringbuffer/config.h>
#include <ringbuffer/backend_types.h>
#include <lttng/prio_heap.h> /* For per-CPU read-side iterator */
#include <lttng/cpuhotplug.h>
/*
* A switch is done during tracing or as a final flush after tracing (so it
* won't write in the new sub-buffer).
*/
enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH };
/* channel-level read-side iterator */
struct channel_iter {
/* Prio heap of buffers. Lowest timestamps at the top. */
struct lttng_ptr_heap heap; /* Heap of struct lttng_kernel_ring_buffer ptrs */
struct list_head empty_head; /* Empty buffers linked-list head */
int read_open; /* Opened for reading ? */
u64 last_qs; /* Last quiescent state timestamp */
u64 last_timestamp; /* Last timestamp (for WARN_ON) */
int last_cpu; /* Last timestamp cpu */
/*
* read() file operation state.
*/
unsigned long len_left;
};
/* channel: collection of per-cpu ring buffers. */
struct lttng_kernel_ring_buffer_channel {
atomic_t record_disabled;
unsigned long commit_count_mask; /*
* Commit count mask, removing
* the MSBs corresponding to
* bits used to represent the
* subbuffer index.
*/
struct channel_backend backend; /* Associated backend */
unsigned long switch_timer_interval; /* Buffer flush (jiffies) */
unsigned long read_timer_interval; /* Reader wakeup (jiffies) */
#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
struct lttng_cpuhp_node cpuhp_prepare;
struct lttng_cpuhp_node cpuhp_online;
struct lttng_cpuhp_node cpuhp_iter_online;
#else
struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */
struct notifier_block hp_iter_notifier; /* hotplug iterator notifier */
unsigned int cpu_hp_enable:1; /* Enable CPU hotplug notif. */
unsigned int hp_iter_enable:1; /* Enable hp iter notif. */
#endif
struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */
wait_queue_head_t read_wait; /* reader wait queue */
wait_queue_head_t hp_wait; /* CPU hotplug wait queue */
struct irq_work wakeup_pending; /* Pending wakeup irq work */
int finalized; /* Has channel been finalized */
struct channel_iter iter; /* Channel read-side iterator */
struct kref ref; /* Reference count */
int been_active; /* Tracing was started at least once */
};
/* Per-subbuffer commit counters used on the hot path */
struct commit_counters_hot {
union v_atomic cc; /* Commit counter */
union v_atomic seq; /* Consecutive commits */
};
/* Per-subbuffer commit counters used only on cold paths */
struct commit_counters_cold {
union v_atomic cc_sb; /* Incremented _once_ at sb switch */
unsigned long end_events_discarded; /*
* Passing events discarded counter
* read upon try_reserve and try_switch
* that fills a subbuffer to check_deliver
* so it can be written into the packet
* header field.
*/
};
/* Per-buffer read iterator */
struct lttng_kernel_ring_buffer_iter {
u64 timestamp; /* Current record timestamp */
size_t header_len; /* Current record header length */
size_t payload_len; /* Current record payload length */
struct list_head empty_node; /* Linked list of empty buffers */
unsigned long consumed, read_offset, data_size;
enum {
ITER_GET_SUBBUF = 0,
ITER_TEST_RECORD,
ITER_NEXT_RECORD,
ITER_PUT_SUBBUF,
} state;
unsigned int allocated:1;
unsigned int read_open:1; /* Opened for reading ? */
};
/* ring buffer state */
struct lttng_kernel_ring_buffer {
/* First 32 bytes cache-hot cacheline */
union v_atomic offset; /* Current offset in the buffer */
struct commit_counters_hot *commit_hot;
/* Commit count per sub-buffer */
atomic_long_t consumed; /*
* Current offset in the buffer
* standard atomic access (shared)
*/
atomic_t record_disabled;
/* End of first 32 bytes cacheline */
union v_atomic last_timestamp; /*
* Last timestamp written in the buffer.
*/
struct lttng_kernel_ring_buffer_backend backend; /* Associated backend */
struct commit_counters_cold *commit_cold;
/* Commit count per sub-buffer */
u64 *ts_end; /*
* timestamp_end per sub-buffer.
* Time is sampled by the
* switch_*_end() callbacks which
* are the last space reservation
* performed in the sub-buffer
* before it can be fully
* committed and delivered. This
* time value is then read by
* the deliver callback,
* performed by the last commit
* before the buffer becomes
* readable.
*/
atomic_long_t active_readers; /*
* Active readers count
* standard atomic access (shared)
*/
/* Dropped records */
union v_atomic records_lost_full; /* Buffer full */
union v_atomic records_lost_wrap; /* Nested wrap-around */
union v_atomic records_lost_big; /* Events too big */
union v_atomic records_count; /* Number of records written */
union v_atomic records_overrun; /* Number of overwritten records */
wait_queue_head_t read_wait; /* reader buffer-level wait queue */
wait_queue_head_t write_wait; /* writer buffer-level wait queue (for metadata only) */
struct irq_work wakeup_pending; /* Pending wakeup irq work */
int finalized; /* buffer has been finalized */
struct timer_list switch_timer; /* timer for periodical switch */
struct timer_list read_timer; /* timer for read poll */
raw_spinlock_t raw_tick_nohz_spinlock; /* nohz entry lock/trylock */
struct lttng_kernel_ring_buffer_iter iter; /* read-side iterator */
unsigned long get_subbuf_consumed; /* Read-side consumed */
unsigned long prod_snapshot; /* Producer count snapshot */
unsigned long cons_snapshot; /* Consumer count snapshot */
unsigned int get_subbuf:1, /* Sub-buffer being held by reader */
switch_timer_enabled:1, /* Protected by ring_buffer_nohz_lock */
read_timer_enabled:1, /* Protected by ring_buffer_nohz_lock */
quiescent:1;
};
static inline
void *channel_get_private(struct lttng_kernel_ring_buffer_channel *chan)
{
return chan->backend.priv;
}
void lib_ring_buffer_lost_event_too_big(struct lttng_kernel_ring_buffer_channel *chan);
/*
* Issue warnings and disable channels upon internal error.
* Can receive struct lttng_kernel_ring_buffer or struct lttng_kernel_ring_buffer_backend
* parameters.
*/
#define CHAN_WARN_ON(c, cond) \
({ \
struct lttng_kernel_ring_buffer_channel *__chan; \
int _____ret = unlikely(cond); \
if (_____ret) { \
if (__same_type(*(c), struct channel_backend)) \
__chan = container_of((void *) (c), \
struct lttng_kernel_ring_buffer_channel, \
backend); \
else if (__same_type(*(c), struct lttng_kernel_ring_buffer_channel)) \
__chan = (void *) (c); \
else \
BUG_ON(1); \
atomic_inc(&__chan->record_disabled); \
WARN_ON(1); \
} \
_____ret; \
})
#endif /* _LIB_RING_BUFFER_FRONTEND_TYPES_H */
|