1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BTRFS_MISC_H
#define BTRFS_MISC_H
#include <linux/types.h>
#include <linux/bitmap.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/math64.h>
#include <linux/rbtree.h>
#include <linux/bio.h>
/*
* Enumerate bits using enum autoincrement. Define the @name as the n-th bit.
*/
#define ENUM_BIT(name) \
__ ## name ## _BIT, \
name = (1U << __ ## name ## _BIT), \
__ ## name ## _SEQ = __ ## name ## _BIT
static inline phys_addr_t bio_iter_phys(struct bio *bio, struct bvec_iter *iter)
{
struct bio_vec bv = bio_iter_iovec(bio, *iter);
return bvec_phys(&bv);
}
/*
* Iterate bio using btrfs block size.
*
* This will handle large folio and highmem.
*
* @paddr: Physical memory address of each iteration
* @bio: The bio to iterate
* @iter: The bvec_iter (pointer) to use.
* @blocksize: The blocksize to iterate.
*
* This requires all folios in the bio to cover at least one block.
*/
#define btrfs_bio_for_each_block(paddr, bio, iter, blocksize) \
for (; (iter)->bi_size && \
(paddr = bio_iter_phys((bio), (iter)), 1); \
bio_advance_iter_single((bio), (iter), (blocksize)))
/* Initialize a bvec_iter to the size of the specified bio. */
static inline struct bvec_iter init_bvec_iter_for_bio(struct bio *bio)
{
struct bio_vec *bvec;
u32 bio_size = 0;
int i;
bio_for_each_bvec_all(bvec, bio, i)
bio_size += bvec->bv_len;
return (struct bvec_iter) {
.bi_sector = 0,
.bi_size = bio_size,
.bi_idx = 0,
.bi_bvec_done = 0,
};
}
#define btrfs_bio_for_each_block_all(paddr, bio, blocksize) \
for (struct bvec_iter iter = init_bvec_iter_for_bio(bio); \
(iter).bi_size && \
(paddr = bio_iter_phys((bio), &(iter)), 1); \
bio_advance_iter_single((bio), &(iter), (blocksize)))
static inline void cond_wake_up(struct wait_queue_head *wq)
{
/*
* This implies a full smp_mb barrier, see comments for
* waitqueue_active why.
*/
if (wq_has_sleeper(wq))
wake_up(wq);
}
static inline void cond_wake_up_nomb(struct wait_queue_head *wq)
{
/*
* Special case for conditional wakeup where the barrier required for
* waitqueue_active is implied by some of the preceding code. Eg. one
* of such atomic operations (atomic_dec_and_return, ...), or a
* unlock/lock sequence, etc.
*/
if (waitqueue_active(wq))
wake_up(wq);
}
static inline u64 mult_perc(u64 num, u32 percent)
{
return div_u64(num * percent, 100);
}
/* Copy of is_power_of_two that is 64bit safe */
static inline bool is_power_of_two_u64(u64 n)
{
return n != 0 && (n & (n - 1)) == 0;
}
static inline bool has_single_bit_set(u64 n)
{
return is_power_of_two_u64(n);
}
/*
* Simple bytenr based rb_tree relate structures
*
* Any structure wants to use bytenr as single search index should have their
* structure start with these members.
*/
struct rb_simple_node {
struct rb_node rb_node;
u64 bytenr;
};
static inline struct rb_node *rb_simple_search(const struct rb_root *root, u64 bytenr)
{
struct rb_node *node = root->rb_node;
struct rb_simple_node *entry;
while (node) {
entry = rb_entry(node, struct rb_simple_node, rb_node);
if (bytenr < entry->bytenr)
node = node->rb_left;
else if (bytenr > entry->bytenr)
node = node->rb_right;
else
return node;
}
return NULL;
}
/*
* Search @root from an entry that starts or comes after @bytenr.
*
* @root: the root to search.
* @bytenr: bytenr to search from.
*
* Return the rb_node that start at or after @bytenr. If there is no entry at
* or after @bytner return NULL.
*/
static inline struct rb_node *rb_simple_search_first(const struct rb_root *root,
u64 bytenr)
{
struct rb_node *node = root->rb_node, *ret = NULL;
struct rb_simple_node *entry, *ret_entry = NULL;
while (node) {
entry = rb_entry(node, struct rb_simple_node, rb_node);
if (bytenr < entry->bytenr) {
if (!ret || entry->bytenr < ret_entry->bytenr) {
ret = node;
ret_entry = entry;
}
node = node->rb_left;
} else if (bytenr > entry->bytenr) {
node = node->rb_right;
} else {
return node;
}
}
return ret;
}
static int rb_simple_node_bytenr_cmp(struct rb_node *new, const struct rb_node *existing)
{
struct rb_simple_node *new_entry = rb_entry(new, struct rb_simple_node, rb_node);
struct rb_simple_node *existing_entry = rb_entry(existing, struct rb_simple_node, rb_node);
if (new_entry->bytenr < existing_entry->bytenr)
return -1;
else if (new_entry->bytenr > existing_entry->bytenr)
return 1;
return 0;
}
static inline struct rb_node *rb_simple_insert(struct rb_root *root,
struct rb_simple_node *simple_node)
{
return rb_find_add(&simple_node->rb_node, root, rb_simple_node_bytenr_cmp);
}
static inline bool bitmap_test_range_all_set(const unsigned long *addr,
unsigned long start,
unsigned long nbits)
{
unsigned long found_zero;
found_zero = find_next_zero_bit(addr, start + nbits, start);
return (found_zero == start + nbits);
}
static inline bool bitmap_test_range_all_zero(const unsigned long *addr,
unsigned long start,
unsigned long nbits)
{
unsigned long found_set;
found_set = find_next_bit(addr, start + nbits, start);
return (found_set == start + nbits);
}
static inline u64 folio_end(struct folio *folio)
{
return folio_pos(folio) + folio_size(folio);
}
#endif
|