]> git.pld-linux.org Git - packages/kernel.git/blame - linux-2.4.20-andrea-fix-pausing.patch
- IT82XX seriec RAID support
[packages/kernel.git] / linux-2.4.20-andrea-fix-pausing.patch
CommitLineData
b4963737
JR
1diff -urNp linux-2450/drivers/block/ll_rw_blk.c linux-2460/drivers/block/ll_rw_blk.c
2--- linux-2450/drivers/block/ll_rw_blk.c
3+++ linux-2460/drivers/block/ll_rw_blk.c
4@@ -595,12 +595,20 @@ static struct request *__get_request_wai
5 register struct request *rq;
6 DECLARE_WAITQUEUE(wait, current);
7
8- generic_unplug_device(q);
9 add_wait_queue_exclusive(&q->wait_for_requests[rw], &wait);
10 do {
11 set_current_state(TASK_UNINTERRUPTIBLE);
12- if (q->rq[rw].count == 0)
13+ if (q->rq[rw].count == 0) {
14+ /*
15+ * All we care about is not to stall if any request
16+ * is been released after we set TASK_UNINTERRUPTIBLE.
17+ * This is the most efficient place to unplug the queue
18+ * in case we hit the race and we can get the request
19+ * without waiting.
20+ */
21+ generic_unplug_device(q);
22- schedule();
23+ schedule_timeout(HZ);
24+ }
25 spin_lock_irq(&io_request_lock);
26 rq = get_request(q, rw);
27 spin_unlock_irq(&io_request_lock);
28@@ -837,8 +845,10 @@ void blkdev_release_request(struct reque
29 if (q) {
30 list_add(&req->queue, &q->rq[rw].free);
31 if (++q->rq[rw].count >= q->batch_requests &&
32- waitqueue_active(&q->wait_for_requests[rw]))
33+ waitqueue_active(&q->wait_for_requests[rw])) {
34+ smp_mb();
35 wake_up(&q->wait_for_requests[rw]);
36+ }
37 }
38 }
39
40@@ -1210,6 +1219,11 @@ void submit_bh(int rw, struct buffer_hea
41
42 generic_make_request(rw, bh);
43
44+ /* fix race condition with wait_on_buffer() */
45+ smp_mb(); /* spin_unlock may have inclusive semantics */
46+ if (waitqueue_active(&bh->b_wait))
47+ wake_up(&bh->b_wait);
48+
49 switch (rw) {
50 case WRITE:
51 kstat.pgpgout += count;
52diff -urNp linux-2450/fs/buffer.c linux-2460/fs/buffer.c
53--- linux-2450/fs/buffer.c
54+++ linux-2460/fs/buffer.c
55@@ -153,10 +153,23 @@ void __wait_on_buffer(struct buffer_head
56 get_bh(bh);
57 add_wait_queue(&bh->b_wait, &wait);
58 do {
59- run_task_queue(&tq_disk);
60 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
61 if (!buffer_locked(bh))
62 break;
63+ /*
64+ * We must read tq_disk in TQ_ACTIVE after the
65+ * add_wait_queue effect is visible to other cpus.
66+ * We could unplug some line above it wouldn't matter
67+ * but we can't do that right after add_wait_queue
68+ * without an smp_mb() in between because spin_unlock
69+ * has inclusive semantics.
70+ * Doing it here is the most efficient place so we
71+ * don't do a suprious unplug if we get a racy
72+ * wakeup that make buffer_locked to return 0, and
73+ * doing it here avoids an explicit smp_mb() we
74+ * rely on the implicit one in set_task_state.
75+ */
76+ run_task_queue(&tq_disk);
77 schedule();
78 } while (buffer_locked(bh));
79 tsk->state = TASK_RUNNING;
80@@ -1539,6 +1552,9 @@ static int __block_write_full_page(struc
81
82 /* Done - end_buffer_io_async will unlock */
83 SetPageUptodate(page);
84+
85+ wakeup_page_waiters(page);
86+
87 return 0;
88
89 out:
90@@ -1570,6 +1586,7 @@ out:
91 } while (bh != head);
92 if (need_unlock)
93 UnlockPage(page);
94+ wakeup_page_waiters(page);
95 return err;
96 }
97
98@@ -1797,6 +1814,8 @@ int block_read_full_page(struct page *pa
99 else
100 submit_bh(READ, bh);
101 }
102+
103+ wakeup_page_waiters(page);
104
105 return 0;
106 }
107@@ -2410,6 +2429,7 @@ int brw_page(int rw, struct page *page,
108 submit_bh(rw, bh);
109 bh = next;
110 } while (bh != head);
111+ wakeup_page_waiters(page);
112 return 0;
113 }
114
115diff -urNp linux-2450/fs/reiserfs/inode.c linux-2460/fs/reiserfs/inode.c
116--- linux-2450/fs/reiserfs/inode.c
117+++ linux-2460/fs/reiserfs/inode.c
118@@ -1993,6 +1993,7 @@ static int reiserfs_write_full_page(stru
119 */
120 if (nr) {
121 submit_bh_for_writepage(arr, nr) ;
122+ wakeup_page_waiters(page);
123 } else {
124 UnlockPage(page) ;
125 }
126diff -urNp linux-2450/include/linux/pagemap.h linux-2460/include/linux/pagemap.h
127--- linux-2450/include/linux/pagemap.h
128+++ linux-2460/include/linux/pagemap.h
129@@ -97,6 +97,8 @@ static inline void wait_on_page(struct p
130 ___wait_on_page(page);
131 }
132
133+extern void wakeup_page_waiters(struct page * page);
134+
135 /*
136 * Returns locked page at given index in given cache, creating it if needed.
137 */
138diff -urNp linux-2450/kernel/ksyms.c linux-2460/kernel/ksyms.c
139--- linux-2450/kernel/ksyms.c
140+++ linux-2460/kernel/ksyms.c
141@@ -320,6 +320,7 @@ EXPORT_SYMBOL(filemap_fdatasync);
142 EXPORT_SYMBOL(filemap_fdatawait);
143 EXPORT_SYMBOL(lock_page);
144 EXPORT_SYMBOL(unlock_page);
145+EXPORT_SYMBOL_GPL(wakeup_page_waiters);
146
147 /* device registration */
148 EXPORT_SYMBOL(register_chrdev);
149diff -urNp linux-2450/mm/filemap.c linux-2460/mm/filemap.c
150--- linux-2450/mm/filemap.c
151+++ linux-2460/mm/filemap.c
152@@ -810,6 +810,20 @@ static inline wait_queue_head_t *page_waitqueue(struct page *page)
153 return &wait[hash];
154 }
155
156+/*
157+ * This must be called after every submit_bh with end_io
158+ * callbacks that would result into the blkdev layer waking
159+ * up the page after a queue unplug.
160+ */
161+void wakeup_page_waiters(struct page * page)
162+{
163+ wait_queue_head_t * head;
164+
165+ head = page_waitqueue(page);
166+ if (waitqueue_active(head))
167+ wake_up(head);
168+}
169+
170 /*
171 * Wait for a page to get unlocked.
172 *
This page took 0.144046 seconds and 4 git commands to generate.