1 commit 6eca9004dfcb274a502438a591df5b197690afb1
2 Author: Jens Axboe <jens.axboe@oracle.com>
3 Date: Thu Oct 25 10:14:47 2007 +0200
5 [BLOCK] Fix bad sharing of tag busy list on queues with shared tag maps
7 For the locking to work, only the tag map and tag bit map may be shared
8 (incidentally, I was just explaining this to Nick yesterday, but I
9 apparently didn't review the code well enough myself). But we also share
10 the busy list! The busy_list must be queue private, or we need a
11 block_queue_tag covering lock as well.
13 So we have to move the busy_list to the queue. This'll work fine, and
14 it'll actually also fix a problem with blk_queue_invalidate_tags() which
15 will invalidate tags across all shared queues. This is a bit confusing,
16 the low level driver should call it for each queue seperately since
17 otherwise you cannot kill tags on just a single queue for eg a hard
18 drive that stops responding. Since the function has no callers
19 currently, it's not an issue.
21 Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
23 diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
24 index a8a1810..56f2646 100644
25 --- a/block/ll_rw_blk.c
26 +++ b/block/ll_rw_blk.c
27 @@ -791,7 +791,6 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
28 retval = atomic_dec_and_test(&bqt->refcnt);
31 - BUG_ON(!list_empty(&bqt->busy_list));
33 kfree(bqt->tag_index);
34 bqt->tag_index = NULL;
35 @@ -903,7 +902,6 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
36 if (init_tag_map(q, tags, depth))
39 - INIT_LIST_HEAD(&tags->busy_list);
41 atomic_set(&tags->refcnt, 1);
43 @@ -954,6 +952,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
46 q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
47 + INIT_LIST_HEAD(&q->tag_busy_list);
51 @@ -1122,7 +1121,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
53 bqt->tag_index[tag] = rq;
54 blkdev_dequeue_request(rq);
55 - list_add(&rq->queuelist, &bqt->busy_list);
56 + list_add(&rq->queuelist, &q->tag_busy_list);
60 @@ -1143,11 +1142,10 @@ EXPORT_SYMBOL(blk_queue_start_tag);
62 void blk_queue_invalidate_tags(struct request_queue *q)
64 - struct blk_queue_tag *bqt = q->queue_tags;
65 struct list_head *tmp, *n;
68 - list_for_each_safe(tmp, n, &bqt->busy_list) {
69 + list_for_each_safe(tmp, n, &q->tag_busy_list) {
70 rq = list_entry_rq(tmp);
73 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
74 index bbf906a..8396db2 100644
75 --- a/include/linux/blkdev.h
76 +++ b/include/linux/blkdev.h
77 @@ -341,7 +341,6 @@ enum blk_queue_state {
78 struct blk_queue_tag {
79 struct request **tag_index; /* map of busy tags */
80 unsigned long *tag_map; /* bit map of free/busy tags */
81 - struct list_head busy_list; /* fifo list of busy tags */
82 int busy; /* current depth */
83 int max_depth; /* what we will send to device */
84 int real_max_depth; /* what the array can hold */
85 @@ -435,6 +434,7 @@ struct request_queue
86 unsigned int dma_alignment;
88 struct blk_queue_tag *queue_tags;
89 + struct list_head tag_busy_list;
91 unsigned int nr_sorted;
92 unsigned int in_flight;