1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
|
/*
* linux/drivers/block/elevator.c
*
* Block device elevator/IO-scheduler.
*
* Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
*
* 30042000 Jens Axboe <axboe@suse.de> :
*
* Split the elevator a bit so that it is possible to choose a different
* one or even write a new "plug in". There are three pieces:
* - elevator_fn, inserts a new request in the queue list
* - elevator_merge_fn, decides whether a new buffer can be merged with
* an existing request
* - elevator_dequeue_fn, called when a request is taken off the active list
*
* 20082000 Dave Jones <davej@suse.de> :
* Removed tests for max-bomb-segments, which was breaking elvtune
* when run without -bN
*
*/
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/blk.h>
#include <linux/module.h>
#include <asm/uaccess.h>
/*
* This is a bit tricky. It's given that bh and rq are for the same
* device, but the next request might of course not be. Run through
* the tests below to check if we want to insert here if we can't merge
* bh into an existing request
*/
inline int bh_rq_in_between(struct buffer_head *bh, struct request *rq,
struct list_head *head)
{
struct list_head *next;
struct request *next_rq;
next = rq->queue.next;
if (next == head)
return 0;
/*
* if the device is different (usually on a different partition),
* just check if bh is after rq
*/
next_rq = blkdev_entry_to_request(next);
if (next_rq->rq_dev != rq->rq_dev)
return bh->b_rsector > rq->sector;
/*
* ok, rq, next_rq and bh are on the same device. if bh is in between
* the two, this is the sweet spot
*/
if (bh->b_rsector < next_rq->sector && bh->b_rsector > rq->sector)
return 1;
/*
* next_rq is ordered wrt rq, but bh is not in between the two
*/
if (next_rq->sector > rq->sector)
return 0;
/*
* next_rq and rq not ordered, if we happen to be either before
* next_rq or after rq insert here anyway
*/
if (bh->b_rsector > rq->sector || bh->b_rsector < next_rq->sector)
return 1;
return 0;
}
int elevator_linus_merge(request_queue_t *q, struct request **req,
struct list_head * head,
struct buffer_head *bh, int rw,
int max_sectors)
{
struct list_head *entry = &q->queue_head;
unsigned int count = bh->b_size >> 9, ret = ELEVATOR_NO_MERGE;
while ((entry = entry->prev) != head) {
struct request *__rq = blkdev_entry_to_request(entry);
/*
* simply "aging" of requests in queue
*/
if (__rq->elevator_sequence-- <= 0)
break;
if (__rq->waiting)
continue;
if (__rq->rq_dev != bh->b_rdev)
continue;
if (!*req && bh_rq_in_between(bh, __rq, &q->queue_head))
*req = __rq;
if (__rq->cmd != rw)
continue;
if (__rq->nr_sectors + count > max_sectors)
continue;
if (__rq->elevator_sequence < count)
break;
if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
ret = ELEVATOR_BACK_MERGE;
*req = __rq;
break;
} else if (__rq->sector - count == bh->b_rsector) {
ret = ELEVATOR_FRONT_MERGE;
__rq->elevator_sequence -= count;
*req = __rq;
break;
}
}
return ret;
}
void elevator_linus_merge_cleanup(request_queue_t *q, struct request *req, int count)
{
struct list_head *entry = &req->queue, *head = &q->queue_head;
/*
* second pass scan of requests that got passed over, if any
*/
while ((entry = entry->next) != head) {
struct request *tmp = blkdev_entry_to_request(entry);
tmp->elevator_sequence -= count;
}
}
void elevator_linus_merge_req(struct request *req, struct request *next)
{
if (next->elevator_sequence < req->elevator_sequence)
req->elevator_sequence = next->elevator_sequence;
}
/*
* See if we can find a request that this buffer can be coalesced with.
*/
int elevator_noop_merge(request_queue_t *q, struct request **req,
struct list_head * head,
struct buffer_head *bh, int rw,
int max_sectors)
{
struct list_head *entry;
unsigned int count = bh->b_size >> 9;
if (list_empty(&q->queue_head))
return ELEVATOR_NO_MERGE;
entry = &q->queue_head;
while ((entry = entry->prev) != head) {
struct request *__rq = blkdev_entry_to_request(entry);
if (__rq->cmd != rw)
continue;
if (__rq->rq_dev != bh->b_rdev)
continue;
if (__rq->nr_sectors + count > max_sectors)
continue;
if (__rq->waiting)
continue;
if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
*req = __rq;
return ELEVATOR_BACK_MERGE;
} else if (__rq->sector - count == bh->b_rsector) {
*req = __rq;
return ELEVATOR_FRONT_MERGE;
}
}
*req = blkdev_entry_to_request(q->queue_head.prev);
return ELEVATOR_NO_MERGE;
}
void elevator_noop_merge_cleanup(request_queue_t *q, struct request *req, int count) {}
void elevator_noop_merge_req(struct request *req, struct request *next) {}
int blkelvget_ioctl(elevator_t * elevator, blkelv_ioctl_arg_t * arg)
{
blkelv_ioctl_arg_t output;
output.queue_ID = elevator->queue_ID;
output.read_latency = elevator->read_latency;
output.write_latency = elevator->write_latency;
output.max_bomb_segments = 0;
if (copy_to_user(arg, &output, sizeof(blkelv_ioctl_arg_t)))
return -EFAULT;
return 0;
}
int blkelvset_ioctl(elevator_t * elevator, const blkelv_ioctl_arg_t * arg)
{
blkelv_ioctl_arg_t input;
if (copy_from_user(&input, arg, sizeof(blkelv_ioctl_arg_t)))
return -EFAULT;
if (input.read_latency < 0)
return -EINVAL;
if (input.write_latency < 0)
return -EINVAL;
elevator->read_latency = input.read_latency;
elevator->write_latency = input.write_latency;
return 0;
}
void elevator_init(elevator_t * elevator, elevator_t type)
{
static unsigned int queue_ID;
*elevator = type;
elevator->queue_ID = queue_ID++;
}
|