File: nv-kthread-q.h

package info (click to toggle)
nvidia-open-gpu-kernel-modules 550.163.01-4
  • links: PTS, VCS
  • area: contrib
  • in suites: forky, sid
  • size: 87,488 kB
  • sloc: ansic: 1,143,669; cpp: 22,547; sh: 3,721; makefile: 627; python: 315
file content (205 lines) | stat: -rw-r--r-- 8,960 bytes parent folder | download | duplicates (4)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
/*
 * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
 * SPDX-License-Identifier: MIT
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

#ifndef __NV_KTHREAD_QUEUE_H__
#define __NV_KTHREAD_QUEUE_H__

struct nv_kthread_q;
struct nv_kthread_q_item;
typedef struct nv_kthread_q nv_kthread_q_t;
typedef struct nv_kthread_q_item nv_kthread_q_item_t;

typedef void (*nv_q_func_t)(void *args);

#include "nv-kthread-q-os.h"

////////////////////////////////////////////////////////////////////////////////
// nv_kthread_q:
//
// 1. API and overview
//
//    This "nv_kthread_q" system implements a simple queuing system for deferred
//    work. The nv_kthread_q system has goals and use cases that are similar to
//    the named workqueues in the Linux kernel, but nv_kthread_q is much (10x or
//    so) smaller, simpler--and correspondingly less general. Deferred work
//    items are put into a queue, and run within the context of a dedicated set
//    of kernel threads (kthread).
//
//    In order to avoid confusion with the Linux workqueue system, I have
//    avoided using the term "work", and instead refer to "queues" (also called
//    "q's") and "queue items" (also called "q_items"), in both variable names
//    and comments.
//
//    This module depends only upon the Linux kernel.
//
//    Queue items that are submitted to separate nv_kthread_q instances are
//    guaranteed to be run in different kthreads.
//
//    Queue items that are submitted to the same nv_kthread_q are not guaranteed
//    to be serialized, nor are they guaranteed to run concurrently.
//
// 2. Allocations
//
//    The caller allocates queues and queue items. The nv_kthread_q APIs do
//    the initialization (zeroing and setup) of queues and queue items.
//    Allocation is handled that way, because one of the first use cases is a
//    bottom half interrupt handler, and for that, queue items should be
//    pre-allocated (for example, one per GPU), so that no allocation is
//    required in the top-half interrupt handler. Relevant API calls:
//
// 3. Queue initialization
//
//    nv_kthread_q_init() initializes a queue on the current NUMA node.
//
//    or
//
//    nv_kthread_q_init_on_node() initializes a queue on a specific NUMA node.
//
// 3. Scheduling things for the queue to run
//
//    The nv_kthread_q_schedule_q_item() routine will schedule a q_item to run.
//
// 4. Stopping the queue(s)
//
//    The nv_kthread_q_stop() routine will flush the queue, and safely stop
//    the kthread, before returning.
//
////////////////////////////////////////////////////////////////////////////////

//
// The queue must not be used before calling this routine.
//
// The caller allocates an nv_kthread_q_t item. This routine initializes
// the queue, and starts up a kernel thread ("kthread") to service the queue.
// The queue will initially be empty; there is intentionally no way to
// pre-initialize the queue with items to run.
//
// In order to avoid external dependencies (specifically, NV_STATUS codes), this
// returns a Linux kernel (negative) errno on failure, and zero on success. It
// is safe to call nv_kthread_q_stop() on a queue that nv_kthread_q_init()
// failed for.
//
// A short prefix of the qname arg will show up in []'s, via the ps(1) utility.
//
// The kernel thread stack is preferably allocated on the specified NUMA node, 
// but fallback to another node is possible because kernel allocators do not
// guarantee affinity. Note that NUMA-affinity applies only to
// the kthread stack. This API does not do anything about limiting the CPU
// affinity of the kthread. That is left to the caller.
//
// Reusing a queue: once a queue is initialized, it must be safely shut down
// (see "Stopping the queue(s)", below), before it can be reused. So, for
// a simple queue use case, the following will work:
//
//     nv_kthread_q_init_on_node(&some_q, "display_name", preferred_node);
//     nv_kthread_q_stop(&some_q);
//     nv_kthread_q_init_on_node(&some_q, "reincarnated", preferred_node);
//     nv_kthread_q_stop(&some_q);
//
int nv_kthread_q_init_on_node(nv_kthread_q_t *q,
                              const char *qname,
                              int preferred_node);

//
// This routine is the same as nv_kthread_q_init_on_node() with the exception
// that the queue stack will be allocated on the NUMA node of the caller.
//
int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname);

//
// The caller is responsible for stopping all queues, by calling this routine
// before, for example, kernel module unloading. This nv_kthread_q_stop()
// routine will flush the queue, and safely stop the kthread, before returning.
//
// You may ONLY call nv_kthread_q_stop() once, unless you reinitialize the
// queue in between, as shown in the nv_kthread_q_init() documentation, above.
//
// Do not add any more items to the queue after calling nv_kthread_q_stop.
//
// Calling nv_kthread_q_stop() on a queue which has been zero-initialized or
// for which nv_kthread_q_init() failed, is a no-op.
//
void nv_kthread_q_stop(nv_kthread_q_t *q);

//
// All items that were in the queue before nv_kthread_q_flush was called, and
// all items scheduled by those items, will get run before this function
// returns.
//
// You may NOT call nv_kthread_q_flush() after having called nv_kthread_q_stop.
//
// This actually flushes the queue twice. That ensures that the queue is fully
// flushed, for an important use case: rescheduling from within one's own
// callback. In order to do that safely, you need to:
//
//     -- set a flag that tells the callback to stop rescheduling itself.
//
//     -- call either nv_kthread_q_flush or nv_kthread_q_stop (which internally
//        calls nv_kthread_q_flush). The nv_kthread_q_flush, in turn, actually
//        flushes the queue *twice*. The first flush waits for any callbacks
//        to finish, that missed seeing the "stop_rescheduling" flag. The
//        second flush waits for callbacks that were already scheduled when the
//        first flush finished.
//
void nv_kthread_q_flush(nv_kthread_q_t *q);

// Assigns function_to_run and function_args to the q_item.
//
// This must be called before calling nv_kthread_q_schedule_q_item.
void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item,
                            nv_q_func_t function_to_run,
                            void *function_args);

//
// The caller must have already set up the queue, via nv_kthread_q_init().
// The caller owns the lifetime of the q_item. The nv_kthread_q system runs
// q_items, and adds or removes them from the queue. However, due to the first
// law of q-dynamics, it neither creates nor destroys q_items.
//
// When the callback (the function_to_run argument) is actually run, it is OK
// to free the q_item from within that routine. The nv_kthread_q system
// promises to be done with the q_item before that point.
//
// nv_kthread_q_schedule_q_item may be called from multiple threads at once,
// without danger of corrupting anything. This routine may also be safely
// called from interrupt context, including top-half ISRs.
//
// It is OK to reschedule the same q_item from within its own callback function.
//
// It is also OK to attempt to reschedule the same q_item, if that q_item is
// already pending in the queue. The q_item will not be rescheduled if it is
// already pending.
//
// Returns true (non-zero) if the item was actually scheduled. Returns false if
// the item was not scheduled, which can happen if:
//
//     -- The q_item was already pending in a queue, or
//     -- The queue is shutting down (or not yet started up).
//
int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q,
                                 nv_kthread_q_item_t *q_item);

// Built-in test. Returns -1 if any subtest failed, or 0 upon success.
int nv_kthread_q_run_self_test(void);

#endif // __NV_KTHREAD_QUEUE_H__