1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
|
#ifndef _LINUX_BLKDEV_H
#define _LINUX_BLKDEV_H
#include <linux/major.h>
#include <linux/sched.h>
#include <linux/genhd.h>
#include <linux/tqueue.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <asm/io.h>
struct request_queue;
typedef struct request_queue request_queue_t;
struct elevator_s;
typedef struct elevator_s elevator_t;
/*
* Ok, this is an expanded form so that we can use the same
* request for paging requests.
*/
struct request {
struct list_head queue;
int elevator_sequence;
volatile int rq_status; /* should split this into a few status bits */
#define RQ_INACTIVE (-1)
#define RQ_ACTIVE 1
#define RQ_SCSI_BUSY 0xffff
#define RQ_SCSI_DONE 0xfffe
#define RQ_SCSI_DISCONNECTING 0xffe0
kdev_t rq_dev;
int cmd; /* READ or WRITE */
int errors;
unsigned long start_time;
unsigned long sector;
unsigned long nr_sectors;
unsigned long hard_sector, hard_nr_sectors;
unsigned int nr_segments;
unsigned int nr_hw_segments;
unsigned long current_nr_sectors, hard_cur_sectors;
void * special;
char * buffer;
struct completion * waiting;
struct buffer_head * bh;
struct buffer_head * bhtail;
request_queue_t *q;
};
#include <linux/elevator.h>
typedef int (merge_request_fn) (request_queue_t *q,
struct request *req,
struct buffer_head *bh,
int);
typedef int (merge_requests_fn) (request_queue_t *q,
struct request *req,
struct request *req2,
int);
typedef void (request_fn_proc) (request_queue_t *q);
typedef request_queue_t * (queue_proc) (kdev_t dev);
typedef int (make_request_fn) (request_queue_t *q, int rw, struct buffer_head *bh);
typedef void (plug_device_fn) (request_queue_t *q, kdev_t device);
typedef void (unplug_device_fn) (void *q);
struct request_list {
unsigned int count;
unsigned int pending[2];
struct list_head free;
};
struct request_queue
{
/*
* the queue request freelist, one for reads and one for writes
*/
struct request_list rq;
/*
* The total number of requests on each queue
*/
int nr_requests;
/*
* Batching threshold for sleep/wakeup decisions
*/
int batch_requests;
/*
* The total number of 512byte blocks on each queue
*/
atomic_t nr_sectors;
/*
* Batching threshold for sleep/wakeup decisions
*/
int batch_sectors;
/*
* The max number of 512byte blocks on each queue
*/
int max_queue_sectors;
/*
* Together with queue_head for cacheline sharing
*/
struct list_head queue_head;
elevator_t elevator;
request_fn_proc * request_fn;
merge_request_fn * back_merge_fn;
merge_request_fn * front_merge_fn;
merge_requests_fn * merge_requests_fn;
make_request_fn * make_request_fn;
plug_device_fn * plug_device_fn;
/*
* The queue owner gets to use this for whatever they like.
* ll_rw_blk doesn't touch it.
*/
void * queuedata;
/*
* This is used to remove the plug when tq_disk runs.
*/
struct tq_struct plug_tq;
/*
* Boolean that indicates whether this queue is plugged or not.
*/
int plugged:1;
/*
* Boolean that indicates whether current_request is active or
* not.
*/
int head_active:1;
/*
* Boolean that indicates you will use blk_started_sectors
* and blk_finished_sectors in addition to blk_started_io
* and blk_finished_io. It enables the throttling code to
* help keep the sectors in flight to a reasonable value
*/
int can_throttle:1;
unsigned long bounce_pfn;
/*
* Is meant to protect the queue in the future instead of
* io_request_lock
*/
spinlock_t queue_lock;
/*
* Tasks wait here for free read and write requests
*/
wait_queue_head_t wait_for_requests;
};
#define blk_queue_plugged(q) (q)->plugged
#define blk_fs_request(rq) ((rq)->cmd == READ || (rq)->cmd == WRITE)
#define blk_queue_empty(q) list_empty(&(q)->queue_head)
extern inline int rq_data_dir(struct request *rq)
{
if (rq->cmd == READ)
return READ;
else if (rq->cmd == WRITE)
return WRITE;
else {
BUG();
return -1; /* ahem */
}
}
extern unsigned long blk_max_low_pfn, blk_max_pfn;
#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
#define BLK_BOUNCE_ANY ((u64)blk_max_pfn << PAGE_SHIFT)
extern void blk_queue_bounce_limit(request_queue_t *, u64);
#ifdef CONFIG_HIGHMEM
extern struct buffer_head *create_bounce(int, struct buffer_head *);
extern inline struct buffer_head *blk_queue_bounce(request_queue_t *q, int rw,
struct buffer_head *bh)
{
struct page *page = bh->b_page;
#ifndef CONFIG_DISCONTIGMEM
if (page - mem_map <= q->bounce_pfn)
#else
if ((page - page_zone(page)->zone_mem_map) + (page_zone(page)->zone_start_paddr >> PAGE_SHIFT) <= q->bounce_pfn)
#endif
return bh;
return create_bounce(rw, bh);
}
#else
#define blk_queue_bounce(q, rw, bh) (bh)
#endif
#define bh_phys(bh) (page_to_phys((bh)->b_page) + bh_offset((bh)))
#define BH_CONTIG(b1, b2) (bh_phys((b1)) + (b1)->b_size == bh_phys((b2)))
#define BH_PHYS_4G(b1, b2) ((bh_phys((b1)) | 0xffffffff) == ((bh_phys((b2)) + (b2)->b_size - 1) | 0xffffffff))
struct blk_dev_struct {
/*
* queue_proc has to be atomic
*/
request_queue_t request_queue;
queue_proc *queue;
void *data;
};
struct sec_size {
unsigned block_size;
unsigned block_size_bits;
};
/*
* Used to indicate the default queue for drivers that don't bother
* to implement multiple queues. We have this access macro here
* so as to eliminate the need for each and every block device
* driver to know about the internal structure of blk_dev[].
*/
#define BLK_DEFAULT_QUEUE(_MAJOR) &blk_dev[_MAJOR].request_queue
extern struct sec_size * blk_sec[MAX_BLKDEV];
extern struct blk_dev_struct blk_dev[MAX_BLKDEV];
extern void grok_partitions(struct gendisk *dev, int drive, unsigned minors, long size);
extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size);
extern void generic_make_request(int rw, struct buffer_head * bh);
extern inline request_queue_t *blk_get_queue(kdev_t dev);
extern void blkdev_release_request(struct request *);
/*
* Access functions for manipulating queue properties
*/
extern int blk_grow_request_list(request_queue_t *q, int nr_requests, int max_queue_sectors);
extern void blk_init_queue(request_queue_t *, request_fn_proc *);
extern void blk_cleanup_queue(request_queue_t *);
extern void blk_queue_headactive(request_queue_t *, int);
extern void blk_queue_throttle_sectors(request_queue_t *, int);
extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
extern void generic_unplug_device(void *);
extern inline int blk_seg_merge_ok(struct buffer_head *, struct buffer_head *);
extern int * blk_size[MAX_BLKDEV];
extern int * blksize_size[MAX_BLKDEV];
extern int * hardsect_size[MAX_BLKDEV];
extern int * max_readahead[MAX_BLKDEV];
extern int * max_sectors[MAX_BLKDEV];
extern int * max_segments[MAX_BLKDEV];
#define MAX_SEGMENTS 128
#define MAX_SECTORS 255
#define MAX_QUEUE_SECTORS (4 << (20 - 9)) /* 4 mbytes when full sized */
#define MAX_NR_REQUESTS 1024 /* 1024k when in 512 units, normally min is 1M in 1k units */
#define PageAlignSize(size) (((size) + PAGE_SIZE -1) & PAGE_MASK)
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queue)
#define blkdev_entry_next_request(entry) blkdev_entry_to_request((entry)->next)
#define blkdev_entry_prev_request(entry) blkdev_entry_to_request((entry)->prev)
#define blkdev_next_request(req) blkdev_entry_to_request((req)->queue.next)
#define blkdev_prev_request(req) blkdev_entry_to_request((req)->queue.prev)
extern void drive_stat_acct (kdev_t dev, int rw,
unsigned long nr_sectors, int new_io);
static inline int get_hardsect_size(kdev_t dev)
{
int retval = 512;
int major = MAJOR(dev);
if (hardsect_size[major]) {
int minor = MINOR(dev);
if (hardsect_size[major][minor])
retval = hardsect_size[major][minor];
}
return retval;
}
static inline int blk_oversized_queue(request_queue_t * q)
{
if (q->can_throttle)
return atomic_read(&q->nr_sectors) > q->max_queue_sectors;
return q->rq.count == 0;
}
static inline int blk_oversized_queue_reads(request_queue_t * q)
{
if (q->can_throttle)
return atomic_read(&q->nr_sectors) > q->max_queue_sectors + q->batch_sectors;
return q->rq.count == 0;
}
static inline int blk_oversized_queue_batch(request_queue_t * q)
{
return atomic_read(&q->nr_sectors) > q->max_queue_sectors - q->batch_sectors;
}
#define blk_finished_io(nsects) do { } while (0)
#define blk_started_io(nsects) do { } while (0)
static inline void blk_started_sectors(struct request *rq, int count)
{
request_queue_t *q = rq->q;
if (q && q->can_throttle) {
atomic_add(count, &q->nr_sectors);
if (atomic_read(&q->nr_sectors) < 0) {
printk("nr_sectors is %d\n", atomic_read(&q->nr_sectors));
BUG();
}
}
}
static inline void blk_finished_sectors(struct request *rq, int count)
{
request_queue_t *q = rq->q;
if (q && q->can_throttle) {
atomic_sub(count, &q->nr_sectors);
smp_mb();
if (q->rq.count >= q->batch_requests && !blk_oversized_queue_batch(q)) {
if (waitqueue_active(&q->wait_for_requests))
wake_up(&q->wait_for_requests);
}
if (atomic_read(&q->nr_sectors) < 0) {
printk("nr_sectors is %d\n", atomic_read(&q->nr_sectors));
BUG();
}
}
}
static inline unsigned int blksize_bits(unsigned int size)
{
unsigned int bits = 8;
do {
bits++;
size >>= 1;
} while (size > 256);
return bits;
}
static inline unsigned int block_size(kdev_t dev)
{
int retval = BLOCK_SIZE;
int major = MAJOR(dev);
if (blksize_size[major]) {
int minor = MINOR(dev);
if (blksize_size[major][minor])
retval = blksize_size[major][minor];
}
return retval;
}
#endif
|