1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
|
/* SPDX-License-Identifier: GPL-2.0
*
* FUSE: Filesystem in Userspace
* Copyright (c) 2023-2024 DataDirect Networks.
*/
#ifndef _FS_FUSE_DEV_URING_I_H
#define _FS_FUSE_DEV_URING_I_H
#include "fuse_i.h"
#ifdef CONFIG_FUSE_IO_URING
#define FUSE_URING_TEARDOWN_TIMEOUT (5 * HZ)
#define FUSE_URING_TEARDOWN_INTERVAL (HZ/20)
enum fuse_ring_req_state {
FRRS_INVALID = 0,
/* The ring entry received from userspace and it is being processed */
FRRS_COMMIT,
/* The ring entry is waiting for new fuse requests */
FRRS_AVAILABLE,
/* The ring entry got assigned a fuse req */
FRRS_FUSE_REQ,
/* The ring entry is in or on the way to user space */
FRRS_USERSPACE,
/* The ring entry is in teardown */
FRRS_TEARDOWN,
/* The ring entry is released, but not freed yet */
FRRS_RELEASED,
};
/** A fuse ring entry, part of the ring queue */
struct fuse_ring_ent {
/* userspace buffer */
struct fuse_uring_req_header __user *headers;
void __user *payload;
/* the ring queue that owns the request */
struct fuse_ring_queue *queue;
/* fields below are protected by queue->lock */
struct io_uring_cmd *cmd;
struct list_head list;
enum fuse_ring_req_state state;
struct fuse_req *fuse_req;
};
struct fuse_ring_queue {
/*
* back pointer to the main fuse uring structure that holds this
* queue
*/
struct fuse_ring *ring;
/* queue id, corresponds to the cpu core */
unsigned int qid;
/*
* queue lock, taken when any value in the queue changes _and_ also
* a ring entry state changes.
*/
spinlock_t lock;
/* available ring entries (struct fuse_ring_ent) */
struct list_head ent_avail_queue;
/*
* entries in the process of being committed or in the process
* to be sent to userspace
*/
struct list_head ent_w_req_queue;
struct list_head ent_commit_queue;
/* entries in userspace */
struct list_head ent_in_userspace;
/* entries that are released */
struct list_head ent_released;
/* fuse requests waiting for an entry slot */
struct list_head fuse_req_queue;
/* background fuse requests */
struct list_head fuse_req_bg_queue;
struct fuse_pqueue fpq;
unsigned int active_background;
bool stopped;
};
/**
* Describes if uring is for communication and holds alls the data needed
* for uring communication
*/
struct fuse_ring {
/* back pointer */
struct fuse_conn *fc;
/* number of ring queues */
size_t nr_queues;
/* maximum payload/arg size */
size_t max_payload_sz;
struct fuse_ring_queue **queues;
/*
* Log ring entry states on stop when entries cannot be released
*/
unsigned int stop_debug_log : 1;
wait_queue_head_t stop_waitq;
/* async tear down */
struct delayed_work async_teardown_work;
/* log */
unsigned long teardown_time;
atomic_t queue_refs;
bool ready;
};
bool fuse_uring_enabled(void);
void fuse_uring_destruct(struct fuse_conn *fc);
void fuse_uring_stop_queues(struct fuse_ring *ring);
void fuse_uring_abort_end_requests(struct fuse_ring *ring);
int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req);
bool fuse_uring_queue_bq_req(struct fuse_req *req);
bool fuse_uring_remove_pending_req(struct fuse_req *req);
bool fuse_uring_request_expired(struct fuse_conn *fc);
static inline void fuse_uring_abort(struct fuse_conn *fc)
{
struct fuse_ring *ring = fc->ring;
if (ring == NULL)
return;
if (atomic_read(&ring->queue_refs) > 0) {
fuse_uring_abort_end_requests(ring);
fuse_uring_stop_queues(ring);
}
}
static inline void fuse_uring_wait_stopped_queues(struct fuse_conn *fc)
{
struct fuse_ring *ring = fc->ring;
if (ring)
wait_event(ring->stop_waitq,
atomic_read(&ring->queue_refs) == 0);
}
static inline bool fuse_uring_ready(struct fuse_conn *fc)
{
return fc->ring && fc->ring->ready;
}
#else /* CONFIG_FUSE_IO_URING */
static inline void fuse_uring_destruct(struct fuse_conn *fc)
{
}
static inline bool fuse_uring_enabled(void)
{
return false;
}
static inline void fuse_uring_abort(struct fuse_conn *fc)
{
}
static inline void fuse_uring_wait_stopped_queues(struct fuse_conn *fc)
{
}
static inline bool fuse_uring_ready(struct fuse_conn *fc)
{
return false;
}
static inline bool fuse_uring_remove_pending_req(struct fuse_req *req)
{
return false;
}
static inline bool fuse_uring_request_expired(struct fuse_conn *fc)
{
return false;
}
#endif /* CONFIG_FUSE_IO_URING */
#endif /* _FS_FUSE_DEV_URING_I_H */
|