1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BTRFS_EXTENT_IO_TREE_H
#define BTRFS_EXTENT_IO_TREE_H
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/refcount.h>
#include <linux/list.h>
#include <linux/wait.h>
#include "misc.h"
struct extent_changeset;
struct btrfs_fs_info;
struct btrfs_inode;
/* Bits for the extent state */
enum {
ENUM_BIT(EXTENT_DIRTY),
ENUM_BIT(EXTENT_LOCKED),
ENUM_BIT(EXTENT_DIO_LOCKED),
ENUM_BIT(EXTENT_NEW),
ENUM_BIT(EXTENT_DELALLOC),
ENUM_BIT(EXTENT_DEFRAG),
ENUM_BIT(EXTENT_BOUNDARY),
ENUM_BIT(EXTENT_NODATASUM),
ENUM_BIT(EXTENT_CLEAR_META_RESV),
ENUM_BIT(EXTENT_NEED_WAIT),
ENUM_BIT(EXTENT_NORESERVE),
ENUM_BIT(EXTENT_QGROUP_RESERVED),
ENUM_BIT(EXTENT_CLEAR_DATA_RESV),
/*
* Must be cleared only during ordered extent completion or on error
* paths if we did not manage to submit bios and create the ordered
* extents for the range. Should not be cleared during page release
* and page invalidation (if there is an ordered extent in flight),
* that is left for the ordered extent completion.
*/
ENUM_BIT(EXTENT_DELALLOC_NEW),
/*
* Mark that a range is being locked for finishing an ordered extent.
* Used together with EXTENT_LOCKED.
*/
ENUM_BIT(EXTENT_FINISHING_ORDERED),
/*
* When an ordered extent successfully completes for a region marked as
* a new delalloc range, use this flag when clearing a new delalloc
* range to indicate that the VFS' inode number of bytes should be
* incremented and the inode's new delalloc bytes decremented, in an
* atomic way to prevent races with stat(2).
*/
ENUM_BIT(EXTENT_ADD_INODE_BYTES),
/*
* Set during truncate when we're clearing an entire range and we just
* want the extent states to go away.
*/
ENUM_BIT(EXTENT_CLEAR_ALL_BITS),
/*
* This must be last.
*
* Bit not representing a state but a request for NOWAIT semantics,
* e.g. when allocating memory, and must be masked out from the other
* bits.
*/
ENUM_BIT(EXTENT_NOWAIT)
};
#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
EXTENT_CLEAR_DATA_RESV)
#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | \
EXTENT_ADD_INODE_BYTES | \
EXTENT_CLEAR_ALL_BITS)
#define EXTENT_LOCK_BITS (EXTENT_LOCKED | EXTENT_DIO_LOCKED)
/*
* Redefined bits above which are used only in the device allocation tree,
* shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
* / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
* manipulation functions
*/
#define CHUNK_ALLOCATED EXTENT_DIRTY
#define CHUNK_TRIMMED EXTENT_DEFRAG
#define CHUNK_STATE_MASK (CHUNK_ALLOCATED | \
CHUNK_TRIMMED)
enum {
IO_TREE_FS_PINNED_EXTENTS,
IO_TREE_FS_EXCLUDED_EXTENTS,
IO_TREE_BTREE_INODE_IO,
IO_TREE_INODE_IO,
IO_TREE_RELOC_BLOCKS,
IO_TREE_TRANS_DIRTY_PAGES,
IO_TREE_ROOT_DIRTY_LOG_PAGES,
IO_TREE_INODE_FILE_EXTENT,
IO_TREE_LOG_CSUM_RANGE,
IO_TREE_SELFTEST,
IO_TREE_DEVICE_ALLOC_STATE,
};
struct extent_io_tree {
struct rb_root state;
/*
* The fs_info is needed for trace points, a tree attached to an inode
* needs the inode.
*
* owner == IO_TREE_INODE_IO - then inode is valid and fs_info can be
* accessed as inode->root->fs_info
*/
union {
struct btrfs_fs_info *fs_info;
struct btrfs_inode *inode;
};
/* Who owns this io tree, should be one of IO_TREE_* */
u8 owner;
spinlock_t lock;
};
struct extent_state {
u64 start;
u64 end; /* inclusive */
struct rb_node rb_node;
/* ADD NEW ELEMENTS AFTER THIS */
wait_queue_head_t wq;
refcount_t refs;
u32 state;
#ifdef CONFIG_BTRFS_DEBUG
struct list_head leak_list;
#endif
};
const struct btrfs_inode *btrfs_extent_io_tree_to_inode(const struct extent_io_tree *tree);
const struct btrfs_fs_info *btrfs_extent_io_tree_to_fs_info(const struct extent_io_tree *tree);
void btrfs_extent_io_tree_init(struct btrfs_fs_info *fs_info,
struct extent_io_tree *tree, unsigned int owner);
void btrfs_extent_io_tree_release(struct extent_io_tree *tree);
int btrfs_lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
struct extent_state **cached);
bool btrfs_try_lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
u32 bits, struct extent_state **cached);
static inline int btrfs_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached)
{
return btrfs_lock_extent_bits(tree, start, end, EXTENT_LOCKED, cached);
}
static inline bool btrfs_try_lock_extent(struct extent_io_tree *tree, u64 start,
u64 end, struct extent_state **cached)
{
return btrfs_try_lock_extent_bits(tree, start, end, EXTENT_LOCKED, cached);
}
int __init btrfs_extent_state_init_cachep(void);
void __cold btrfs_extent_state_free_cachep(void);
u64 btrfs_count_range_bits(struct extent_io_tree *tree,
u64 *start, u64 search_end,
u64 max_bytes, u32 bits, int contig,
struct extent_state **cached_state);
void btrfs_free_extent_state(struct extent_state *state);
bool btrfs_test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
struct extent_state *cached_state);
bool btrfs_test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit);
void btrfs_get_range_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 *bits,
struct extent_state **cached_state);
int btrfs_clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
u32 bits, struct extent_changeset *changeset);
int btrfs_clear_extent_bit_changeset(struct extent_io_tree *tree, u64 start, u64 end,
u32 bits, struct extent_state **cached,
struct extent_changeset *changeset);
static inline int btrfs_clear_extent_bit(struct extent_io_tree *tree, u64 start,
u64 end, u32 bits,
struct extent_state **cached)
{
return btrfs_clear_extent_bit_changeset(tree, start, end, bits, cached, NULL);
}
static inline int btrfs_unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached)
{
return btrfs_clear_extent_bit_changeset(tree, start, end, EXTENT_LOCKED,
cached, NULL);
}
static inline int btrfs_clear_extent_bits(struct extent_io_tree *tree, u64 start,
u64 end, u32 bits)
{
return btrfs_clear_extent_bit(tree, start, end, bits, NULL);
}
int btrfs_set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
u32 bits, struct extent_changeset *changeset);
int btrfs_set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
u32 bits, struct extent_state **cached_state);
static inline int btrfs_clear_extent_dirty(struct extent_io_tree *tree, u64 start,
u64 end, struct extent_state **cached)
{
return btrfs_clear_extent_bit(tree, start, end,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING, cached);
}
int btrfs_convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
u32 bits, u32 clear_bits,
struct extent_state **cached_state);
bool btrfs_find_first_extent_bit(struct extent_io_tree *tree, u64 start,
u64 *start_ret, u64 *end_ret, u32 bits,
struct extent_state **cached_state);
void btrfs_find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
u64 *start_ret, u64 *end_ret, u32 bits);
bool btrfs_find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
u64 *start_ret, u64 *end_ret, u32 bits);
bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
u64 *end, u64 max_bytes,
struct extent_state **cached_state);
static inline int btrfs_lock_dio_extent(struct extent_io_tree *tree, u64 start,
u64 end, struct extent_state **cached)
{
return btrfs_lock_extent_bits(tree, start, end, EXTENT_DIO_LOCKED, cached);
}
static inline bool btrfs_try_lock_dio_extent(struct extent_io_tree *tree, u64 start,
u64 end, struct extent_state **cached)
{
return btrfs_try_lock_extent_bits(tree, start, end, EXTENT_DIO_LOCKED, cached);
}
static inline int btrfs_unlock_dio_extent(struct extent_io_tree *tree, u64 start,
u64 end, struct extent_state **cached)
{
return btrfs_clear_extent_bit_changeset(tree, start, end, EXTENT_DIO_LOCKED,
cached, NULL);
}
struct extent_state *btrfs_next_extent_state(struct extent_io_tree *tree,
struct extent_state *state);
#endif /* BTRFS_EXTENT_IO_TREE_H */
|