From a050e7a84813b1b933f60ab8fcd5df9745a41035 Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Sat, 14 May 2011 18:12:38 +0200 Subject: [PATCH] block: switch from 2.6.38 to 2.6.39 --- block/bfq-iosched.c | 29 ++++++++++------------------- 1 files changed, 10 insertions(+), 19 deletions(-) diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 20f5901..bbe5503 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -120,8 +120,8 @@ static DEFINE_IDA(cic_index_ida); { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 }) #define RQ_CIC(rq) \ - ((struct cfq_io_context *) (rq)->elevator_private) -#define RQ_BFQQ(rq) ((rq)->elevator_private2) + ((struct cfq_io_context *) (rq)->elevator_private[0]) +#define RQ_BFQQ(rq) ((rq)->elevator_private[1]) #include "bfq-ioc.c" #include "bfq-sched.c" @@ -156,13 +156,6 @@ static inline void bfq_schedule_dispatch(struct bfq_data *bfqd) } } -static inline int bfq_queue_empty(struct request_queue *q) -{ - struct bfq_data *bfqd = q->elevator->elevator_data; - - return bfqd->queued == 0; -} - /* * Lifted from AS - choose which of rq1 and rq2 that is best served now. * We choose the request that is closesr to the head right now. Distance @@ -1681,10 +1674,10 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, if (bfqq == bfqd->active_queue) { /* * If there is just this request queued and the request - * is small, just make sure the queue is plugged and exit. + * is small, just exit. * In this way, if the disk is being idled to wait for a new * request from the active queue, we avoid unplugging the - * device for this request. + * device now. * * By doing so, we spare the disk to be committed * to serve just a small request. On the contrary, we wait for @@ -1695,7 +1688,6 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, */ if (bfqq->queued[rq_is_sync(rq)] == 1 && blk_rq_sectors(rq) < 32) { - blk_plug_device(bfqd->queue); return; } if (bfq_bfqq_wait_request(bfqq)) { @@ -1714,7 +1706,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, if (bfq_bfqq_budget_timeout(bfqq)) bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT); - __blk_run_queue(bfqd->queue, false); + __blk_run_queue(bfqd->queue); } } } @@ -1878,8 +1870,8 @@ static void bfq_put_request(struct request *rq) put_io_context(RQ_CIC(rq)->ioc); - rq->elevator_private = NULL; - rq->elevator_private2 = NULL; + rq->elevator_private[0] = NULL; + rq->elevator_private[1] = NULL; bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d", bfqq, bfqq->ref); @@ -1927,8 +1919,8 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, spin_unlock_irqrestore(q->queue_lock, flags); - rq->elevator_private = cic; - rq->elevator_private2 = bfqq; + rq->elevator_private[0] = cic; + rq->elevator_private[1] = bfqq; return 0; @@ -1950,7 +1942,7 @@ static void bfq_kick_queue(struct work_struct *work) unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); - __blk_run_queue(q, false); + __blk_run_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); } @@ -2398,7 +2390,6 @@ static struct elevator_type iosched_bfq = { .elevator_add_req_fn = bfq_insert_request, .elevator_activate_req_fn = bfq_activate_request, .elevator_deactivate_req_fn = bfq_deactivate_request, - .elevator_queue_empty_fn = bfq_queue_empty, .elevator_completed_req_fn = bfq_completed_request, .elevator_former_req_fn = elv_rb_former_request, .elevator_latter_req_fn = elv_rb_latter_request, -- 1.7.0.4