]> git.pld-linux.org Git - packages/kernel.git/blame - kernel-small_fixes.patch
- 4.1.16
[packages/kernel.git] / kernel-small_fixes.patch
CommitLineData
08aa9d92 1--- linux-2.6.33/scripts/mod/modpost.c~ 2010-02-24 19:52:17.000000000 +0100
2+++ linux-2.6.33/scripts/mod/modpost.c 2010-03-07 14:26:47.242168558 +0100
3@@ -15,7 +15,8 @@
4 #include <stdio.h>
5 #include <ctype.h>
6 #include "modpost.h"
7-#include "../../include/generated/autoconf.h"
8+// PLD architectures don't use CONFIG_SYMBOL_PREFIX
9+//#include "../../include/generated/autoconf.h"
10 #include "../../include/linux/license.h"
11
12 /* Some toolchains use a `_' prefix for all user symbols. */
13
2136e199
AM
14--- linux-3.0/scripts/kconfig/lxdialog/check-lxdialog.sh~ 2011-07-22 04:17:23.000000000 +0200
15+++ linux-3.0/scripts/kconfig/lxdialog/check-lxdialog.sh 2011-08-25 21:26:04.799150642 +0200
16@@ -9,6 +9,12 @@
17 $cc -print-file-name=lib${lib}.${ext} | grep -q /
18 if [ $? -eq 0 ]; then
19 echo "-l${lib}"
20+ for libt in tinfow tinfo ; do
21+ $cc -print-file-name=lib${libt}.${ext} | grep -q /
22+ if [ $? -eq 0 ]; then
23+ echo "-l${libt}"
24+ fi
25+ done
26 exit
27 fi
28 done
7e7bde06 29
75d2b04b
AM
30From 30927520dbae297182990bb21d08762bcc35ce1d Mon Sep 17 00:00:00 2001
31From: Eric Dumazet <edumazet@google.com>
32Date: Wed, 9 Sep 2015 21:55:07 -0700
33Subject: [PATCH] tcp_cubic: better follow cubic curve after idle period
34
35Jana Iyengar found an interesting issue on CUBIC :
36
37The epoch is only updated/reset initially and when experiencing losses.
38The delta "t" of now - epoch_start can be arbitrary large after app idle
39as well as the bic_target. Consequentially the slope (inverse of
40ca->cnt) would be really large, and eventually ca->cnt would be
41lower-bounded in the end to 2 to have delayed-ACK slow-start behavior.
42
43This particularly shows up when slow_start_after_idle is disabled
44as a dangerous cwnd inflation (1.5 x RTT) after few seconds of idle
45time.
46
47Jana initial fix was to reset epoch_start if app limited,
48but Neal pointed out it would ask the CUBIC algorithm to recalculate the
49curve so that we again start growing steeply upward from where cwnd is
50now (as CUBIC does just after a loss). Ideally we'd want the cwnd growth
51curve to be the same shape, just shifted later in time by the amount of
52the idle period.
53
54Reported-by: Jana Iyengar <jri@google.com>
55Signed-off-by: Eric Dumazet <edumazet@google.com>
56Signed-off-by: Yuchung Cheng <ycheng@google.com>
57Signed-off-by: Neal Cardwell <ncardwell@google.com>
58Cc: Stephen Hemminger <stephen@networkplumber.org>
59Cc: Sangtae Ha <sangtae.ha@gmail.com>
60Cc: Lawrence Brakmo <lawrence@brakmo.org>
61Signed-off-by: David S. Miller <davem@davemloft.net>
62---
63 net/ipv4/tcp_cubic.c | 16 ++++++++++++++++
64 1 file changed, 16 insertions(+)
65
66diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
67index 28011fb1..c6ded6b 100644
68--- a/net/ipv4/tcp_cubic.c
69+++ b/net/ipv4/tcp_cubic.c
70@@ -151,6 +151,21 @@ static void bictcp_init(struct sock *sk)
71 tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
72 }
73
74+static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
75+{
76+ if (event == CA_EVENT_TX_START) {
77+ s32 delta = tcp_time_stamp - tcp_sk(sk)->lsndtime;
78+ struct bictcp *ca = inet_csk_ca(sk);
79+
80+ /* We were application limited (idle) for a while.
81+ * Shift epoch_start to keep cwnd growth to cubic curve.
82+ */
83+ if (ca->epoch_start && delta > 0)
84+ ca->epoch_start += delta;
85+ return;
86+ }
87+}
88+
89 /* calculate the cubic root of x using a table lookup followed by one
90 * Newton-Raphson iteration.
91 * Avg err ~= 0.195%
92@@ -450,6 +465,7 @@ static struct tcp_congestion_ops cubictcp __read_mostly = {
93 .cong_avoid = bictcp_cong_avoid,
94 .set_state = bictcp_state,
95 .undo_cwnd = bictcp_undo_cwnd,
96+ .cwnd_event = bictcp_cwnd_event,
97 .pkts_acked = bictcp_acked,
98 .owner = THIS_MODULE,
99 .name = "cubic",
f0d3d1b5
AM
100
101From 7a29ac474a47eb8cf212b45917683ae89d6fa13b Mon Sep 17 00:00:00 2001
102From: Chris Mason <clm@fb.com>
103Date: Tue, 10 Nov 2015 10:10:34 +1100
104Subject: xfs: give all workqueues rescuer threads
105
106We're consistently hitting deadlocks here with XFS on recent kernels.
107After some digging through the crash files, it looks like everyone in
108the system is waiting for XFS to reclaim memory.
109
110Something like this:
111
112PID: 2733434 TASK: ffff8808cd242800 CPU: 19 COMMAND: "java"
113 #0 [ffff880019c53588] __schedule at ffffffff818c4df2
114 #1 [ffff880019c535d8] schedule at ffffffff818c5517
115 #2 [ffff880019c535f8] _xfs_log_force_lsn at ffffffff81316348
116 #3 [ffff880019c53688] xfs_log_force_lsn at ffffffff813164fb
117 #4 [ffff880019c536b8] xfs_iunpin_wait at ffffffff8130835e
118 #5 [ffff880019c53728] xfs_reclaim_inode at ffffffff812fd453
119 #6 [ffff880019c53778] xfs_reclaim_inodes_ag at ffffffff812fd8c7
120 #7 [ffff880019c53928] xfs_reclaim_inodes_nr at ffffffff812fe433
121 #8 [ffff880019c53958] xfs_fs_free_cached_objects at ffffffff8130d3b9
122 #9 [ffff880019c53968] super_cache_scan at ffffffff811a6f73
123#10 [ffff880019c539c8] shrink_slab at ffffffff811460e6
124#11 [ffff880019c53aa8] shrink_zone at ffffffff8114a53f
125#12 [ffff880019c53b48] do_try_to_free_pages at ffffffff8114a8ba
126#13 [ffff880019c53be8] try_to_free_pages at ffffffff8114ad5a
127#14 [ffff880019c53c78] __alloc_pages_nodemask at ffffffff8113e1b8
128#15 [ffff880019c53d88] alloc_kmem_pages_node at ffffffff8113e671
129#16 [ffff880019c53dd8] copy_process at ffffffff8104f781
130#17 [ffff880019c53ec8] do_fork at ffffffff8105129c
131#18 [ffff880019c53f38] sys_clone at ffffffff810515b6
132#19 [ffff880019c53f48] stub_clone at ffffffff818c8e4d
133
134xfs_log_force_lsn is waiting for logs to get cleaned, which is waiting
135for IO, which is waiting for workers to complete the IO which is waiting
136for worker threads that don't exist yet:
137
138PID: 2752451 TASK: ffff880bd6bdda00 CPU: 37 COMMAND: "kworker/37:1"
139 #0 [ffff8808d20abbb0] __schedule at ffffffff818c4df2
140 #1 [ffff8808d20abc00] schedule at ffffffff818c5517
141 #2 [ffff8808d20abc20] schedule_timeout at ffffffff818c7c6c
142 #3 [ffff8808d20abcc0] wait_for_completion_killable at ffffffff818c6495
143 #4 [ffff8808d20abd30] kthread_create_on_node at ffffffff8106ec82
144 #5 [ffff8808d20abdf0] create_worker at ffffffff8106752f
145 #6 [ffff8808d20abe40] worker_thread at ffffffff810699be
146 #7 [ffff8808d20abec0] kthread at ffffffff8106ef59
147 #8 [ffff8808d20abf50] ret_from_fork at ffffffff818c8ac8
148
149I think we should be using WQ_MEM_RECLAIM to make sure this thread
150pool makes progress when we're not able to allocate new workers.
151
152[dchinner: make all workqueues WQ_MEM_RECLAIM]
153
154Signed-off-by: Chris Mason <clm@fb.com>
155Reviewed-by: Dave Chinner <dchinner@redhat.com>
156Signed-off-by: Dave Chinner <david@fromorbit.com>
157---
158 fs/xfs/xfs_super.c | 7 ++++---
159 1 file changed, 4 insertions(+), 3 deletions(-)
160
161diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
162index 29531ec..65fbfb7 100644
163--- a/fs/xfs/xfs_super.c
164+++ b/fs/xfs/xfs_super.c
165@@ -838,17 +838,18 @@ xfs_init_mount_workqueues(
77a0e9d2
AM
166 goto out_destroy_unwritten;
167
168 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
169- WQ_FREEZABLE, 0, mp->m_fsname);
170+ WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
171 if (!mp->m_reclaim_workqueue)
172 goto out_destroy_cil;
173
174 mp->m_log_workqueue = alloc_workqueue("xfs-log/%s",
175- WQ_FREEZABLE|WQ_HIGHPRI, 0, mp->m_fsname);
f0d3d1b5
AM
176+ WQ_MEM_RECLAIM|WQ_FREEZABLE|WQ_HIGHPRI, 0,
177+ mp->m_fsname);
77a0e9d2
AM
178 if (!mp->m_log_workqueue)
179 goto out_destroy_reclaim;
180
181 mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
182- WQ_FREEZABLE, 0, mp->m_fsname);
183+ WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
184 if (!mp->m_eofblocks_workqueue)
185 goto out_destroy_log;
186
f0d3d1b5
AM
187--
188cgit v0.11.2
189
0ca0eed7
AM
190From: Dave Chinner <dchinner@redhat.com>
191
1977e358
AM
192When we do inode readahead in log recovery, we do can do the
193readahead before we've replayed the icreate transaction that stamps
194the buffer with inode cores. The inode readahead verifier catches
195this and marks the buffer as !done to indicate that it doesn't yet
196contain valid inodes.
197
198In adding buffer error notification (i.e. setting b_error = -EIO at
199the same time as as we clear the done flag) to such a readahead
200verifier failure, we can then get subsequent inode recovery failing
201with this error:
202
203XFS (dm-0): metadata I/O error: block 0xa00060 ("xlog_recover_do..(read#2)") error 5 numblks 32
204
205This occurs when readahead completion races with icreate item replay
206such as:
207
208 inode readahead
209 find buffer
210 lock buffer
211 submit RA io
212 ....
213 icreate recovery
214 xfs_trans_get_buffer
215 find buffer
216 lock buffer
217 <blocks on RA completion>
218 .....
219 <ra completion>
220 fails verifier
221 clear XBF_DONE
222 set bp->b_error = -EIO
223 release and unlock buffer
224 <icreate gains lock>
225 icreate initialises buffer
226 marks buffer as done
227 adds buffer to delayed write queue
228 releases buffer
229
230At this point, we have an initialised inode buffer that is up to
231date but has an -EIO state registered against it. When we finally
232get to recovering an inode in that buffer:
233
234 inode item recovery
235 xfs_trans_read_buffer
236 find buffer
237 lock buffer
238 sees XBF_DONE is set, returns buffer
239 sees bp->b_error is set
240 fail log recovery!
241
242Essentially, we need xfs_trans_get_buf_map() to clear the error status of
243the buffer when doing a lookup. This function returns uninitialised
244buffers, so the buffer returned can not be in an error state and
245none of the code that uses this function expects b_error to be set
246on return. Indeed, there is an ASSERT(!bp->b_error); in the
247transaction case in xfs_trans_get_buf_map() that would have caught
248this if log recovery used transactions....
249
250This patch firstly changes the inode readahead failure to set -EIO
251on the buffer, and secondly changes xfs_buf_get_map() to never
252return a buffer with an error state set so this first change doesn't
253cause unexpected log recovery failures.
254
255Signed-off-by: Dave Chinner <dchinner@redhat.com>
256---
257 fs/xfs/libxfs/xfs_inode_buf.c | 12 +++++++-----
258 fs/xfs/xfs_buf.c | 7 +++++++
259 2 files changed, 14 insertions(+), 5 deletions(-)
260
261diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
262index 1b8d98a..ff17c48 100644
263--- a/fs/xfs/libxfs/xfs_inode_buf.c
264+++ b/fs/xfs/libxfs/xfs_inode_buf.c
265@@ -62,11 +62,12 @@ xfs_inobp_check(
266 * has not had the inode cores stamped into it. Hence for readahead, the buffer
267 * may be potentially invalid.
268 *
269- * If the readahead buffer is invalid, we don't want to mark it with an error,
270- * but we do want to clear the DONE status of the buffer so that a followup read
271- * will re-read it from disk. This will ensure that we don't get an unnecessary
272- * warnings during log recovery and we don't get unnecssary panics on debug
273- * kernels.
274+ * If the readahead buffer is invalid, we need to mark it with an error and
275+ * clear the DONE status of the buffer so that a followup read will re-read it
276+ * from disk. We don't report the error otherwise to avoid warnings during log
277+ * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
278+ * because all we want to do is say readahead failed; there is no-one to report
279+ * the error to, so this will distinguish it from a non-ra verifier failure.
280 */
281 static void
282 xfs_inode_buf_verify(
283@@ -93,6 +94,7 @@ xfs_inode_buf_verify(
284 XFS_RANDOM_ITOBP_INOTOBP))) {
285 if (readahead) {
286 bp->b_flags &= ~XBF_DONE;
287+ xfs_buf_ioerror(bp, -EIO);
288 return;
289 }
290
291diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
292index 45a8ea7..ae86b16 100644
293--- a/fs/xfs/xfs_buf.c
294+++ b/fs/xfs/xfs_buf.c
295@@ -604,6 +604,13 @@ found:
296 }
297 }
298
299+ /*
300+ * Clear b_error if this is a lookup from a caller that doesn't expect
301+ * valid data to be found in the buffer.
302+ */
303+ if (!(flags & XBF_READ))
304+ xfs_buf_ioerror(bp, 0);
305+
306 XFS_STATS_INC(xb_get);
307 trace_xfs_buf_get(bp, flags, _RET_IP_);
308 return bp;
309--
3102.5.0
311
312_______________________________________________
313xfs mailing list
314xfs@oss.sgi.com
315http://oss.sgi.com/mailman/listinfo/xfs
316From: Dave Chinner <dchinner@redhat.com>
317
0ca0eed7
AM
318When we do dquot readahead in log recovery, we do not use a verifier
319as the underlying buffer may not have dquots in it. e.g. the
320allocation operation hasn't yet been replayed. Hence we do not want
321to fail recovery because we detect an operation to be replayed has
322not been run yet. This problem was addressed for inodes in commit
323d891400 ("xfs: inode buffers may not be valid during recovery
324readahead") but the problem was not recognised to exist for dquots
325and their buffers as the dquot readahead did not have a verifier.
326
327The result of not using a verifier is that when the buffer is then
328next read to replay a dquot modification, the dquot buffer verifier
329will only be attached to the buffer if *readahead is not complete*.
330Hence we can read the buffer, replay the dquot changes and then add
331it to the delwri submission list without it having a verifier
332attached to it. This then generates warnings in xfs_buf_ioapply(),
333which catches and warns about this case.
334
335Fix this and make it handle the same readahead verifier error cases
336as for inode buffers by adding a new readahead verifier that has a
337write operation as well as a read operation that marks the buffer as
338not done if any corruption is detected. Also make sure we don't run
339readahead if the dquot buffer has been marked as cancelled by
340recovery.
341
342This will result in readahead either succeeding and the buffer
343having a valid write verifier, or readahead failing and the buffer
344state requiring the subsequent read to resubmit the IO with the new
345verifier. In either case, this will result in the buffer always
346ending up with a valid write verifier on it.
347
348Note: we also need to fix the inode buffer readahead error handling
349to mark the buffer with EIO. Brian noticed the code I copied from
350there wrong during review, so fix it at the same time. Add comments
351linking the two functions that handle readahead verifier errors
352together so we don't forget this behavioural link in future.
353
354cc: <stable@vger.kernel.org> # 3.12 - current
355Signed-off-by: Dave Chinner <dchinner@redhat.com>
1977e358
AM
356Reviewed-by: Brian Foster <bfoster@redhat.com>
357Signed-off-by: Dave Chinner <david@fromorbit.com>
0ca0eed7 358---
0ca0eed7 359 fs/xfs/libxfs/xfs_dquot_buf.c | 36 ++++++++++++++++++++++++++++++------
1977e358 360 fs/xfs/libxfs/xfs_inode_buf.c | 2 ++
0ca0eed7
AM
361 fs/xfs/libxfs/xfs_quota_defs.h | 2 +-
362 fs/xfs/libxfs/xfs_shared.h | 1 +
363 fs/xfs/xfs_log_recover.c | 9 +++++++--
1977e358 364 5 files changed, 41 insertions(+), 9 deletions(-)
0ca0eed7
AM
365
366diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
367index 11cefb2..3cc3cf7 100644
368--- a/fs/xfs/libxfs/xfs_dquot_buf.c
369+++ b/fs/xfs/libxfs/xfs_dquot_buf.c
370@@ -54,7 +54,7 @@ xfs_dqcheck(
371 xfs_dqid_t id,
372 uint type, /* used only when IO_dorepair is true */
373 uint flags,
374- char *str)
375+ const char *str)
376 {
377 xfs_dqblk_t *d = (xfs_dqblk_t *)ddq;
378 int errs = 0;
379@@ -207,7 +207,8 @@ xfs_dquot_buf_verify_crc(
380 STATIC bool
381 xfs_dquot_buf_verify(
382 struct xfs_mount *mp,
383- struct xfs_buf *bp)
384+ struct xfs_buf *bp,
385+ int warn)
386 {
387 struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr;
388 xfs_dqid_t id = 0;
389@@ -240,8 +241,7 @@ xfs_dquot_buf_verify(
390 if (i == 0)
391 id = be32_to_cpu(ddq->d_id);
392
393- error = xfs_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN,
394- "xfs_dquot_buf_verify");
395+ error = xfs_dqcheck(mp, ddq, id + i, 0, warn, __func__);
396 if (error)
397 return false;
398 }
399@@ -256,7 +256,7 @@ xfs_dquot_buf_read_verify(
400
401 if (!xfs_dquot_buf_verify_crc(mp, bp))
402 xfs_buf_ioerror(bp, -EFSBADCRC);
403- else if (!xfs_dquot_buf_verify(mp, bp))
404+ else if (!xfs_dquot_buf_verify(mp, bp, XFS_QMOPT_DOWARN))
405 xfs_buf_ioerror(bp, -EFSCORRUPTED);
406
407 if (bp->b_error)
408@@ -264,6 +264,25 @@ xfs_dquot_buf_read_verify(
409 }
410
411 /*
412+ * readahead errors are silent and simply leave the buffer as !done so a real
413+ * read will then be run with the xfs_dquot_buf_ops verifier. See
414+ * xfs_inode_buf_verify() for why we use EIO and ~XBF_DONE here rather than
415+ * reporting the failure.
416+ */
417+static void
418+xfs_dquot_buf_readahead_verify(
419+ struct xfs_buf *bp)
420+{
421+ struct xfs_mount *mp = bp->b_target->bt_mount;
422+
423+ if (!xfs_dquot_buf_verify_crc(mp, bp) ||
424+ !xfs_dquot_buf_verify(mp, bp, 0)) {
425+ xfs_buf_ioerror(bp, -EIO);
426+ bp->b_flags &= ~XBF_DONE;
427+ }
428+}
429+
430+/*
431 * we don't calculate the CRC here as that is done when the dquot is flushed to
432 * the buffer after the update is done. This ensures that the dquot in the
433 * buffer always has an up-to-date CRC value.
434@@ -274,7 +293,7 @@ xfs_dquot_buf_write_verify(
435 {
436 struct xfs_mount *mp = bp->b_target->bt_mount;
437
438- if (!xfs_dquot_buf_verify(mp, bp)) {
439+ if (!xfs_dquot_buf_verify(mp, bp, XFS_QMOPT_DOWARN)) {
440 xfs_buf_ioerror(bp, -EFSCORRUPTED);
441 xfs_verifier_error(bp);
442 return;
443@@ -287,3 +306,8 @@ const struct xfs_buf_ops xfs_dquot_buf_ops = {
444 .verify_write = xfs_dquot_buf_write_verify,
445 };
446
447+const struct xfs_buf_ops xfs_dquot_buf_ra_ops = {
448+
449+ .verify_read = xfs_dquot_buf_readahead_verify,
450+ .verify_write = xfs_dquot_buf_write_verify,
451+};
452diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
1977e358 453index ff17c48..1aabfda 100644
0ca0eed7
AM
454--- a/fs/xfs/libxfs/xfs_inode_buf.c
455+++ b/fs/xfs/libxfs/xfs_inode_buf.c
1977e358
AM
456@@ -68,6 +68,8 @@ xfs_inobp_check(
457 * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
458 * because all we want to do is say readahead failed; there is no-one to report
459 * the error to, so this will distinguish it from a non-ra verifier failure.
0ca0eed7
AM
460+ * Changes to this readahead error behavour also need to be reflected in
461+ * xfs_dquot_buf_readahead_verify().
462 */
463 static void
464 xfs_inode_buf_verify(
0ca0eed7
AM
465diff --git a/fs/xfs/libxfs/xfs_quota_defs.h b/fs/xfs/libxfs/xfs_quota_defs.h
466index 1b0a083..f51078f 100644
467--- a/fs/xfs/libxfs/xfs_quota_defs.h
468+++ b/fs/xfs/libxfs/xfs_quota_defs.h
469@@ -153,7 +153,7 @@ typedef __uint16_t xfs_qwarncnt_t;
470 #define XFS_QMOPT_RESBLK_MASK (XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_RES_RTBLKS)
471
472 extern int xfs_dqcheck(struct xfs_mount *mp, xfs_disk_dquot_t *ddq,
473- xfs_dqid_t id, uint type, uint flags, char *str);
474+ xfs_dqid_t id, uint type, uint flags, const char *str);
475 extern int xfs_calc_dquots_per_chunk(unsigned int nbblks);
476
477 #endif /* __XFS_QUOTA_H__ */
478diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
479index 5be5297..15c3ceb 100644
480--- a/fs/xfs/libxfs/xfs_shared.h
481+++ b/fs/xfs/libxfs/xfs_shared.h
482@@ -49,6 +49,7 @@ extern const struct xfs_buf_ops xfs_inobt_buf_ops;
483 extern const struct xfs_buf_ops xfs_inode_buf_ops;
484 extern const struct xfs_buf_ops xfs_inode_buf_ra_ops;
485 extern const struct xfs_buf_ops xfs_dquot_buf_ops;
486+extern const struct xfs_buf_ops xfs_dquot_buf_ra_ops;
487 extern const struct xfs_buf_ops xfs_sb_buf_ops;
488 extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops;
489 extern const struct xfs_buf_ops xfs_symlink_buf_ops;
490diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
1977e358 491index c5ecaac..5991cdc 100644
0ca0eed7
AM
492--- a/fs/xfs/xfs_log_recover.c
493+++ b/fs/xfs/xfs_log_recover.c
1977e358 494@@ -3204,6 +3204,7 @@ xlog_recover_dquot_ra_pass2(
0ca0eed7
AM
495 struct xfs_disk_dquot *recddq;
496 struct xfs_dq_logformat *dq_f;
497 uint type;
498+ int len;
499
500
501 if (mp->m_qflags == 0)
1977e358 502@@ -3224,8 +3225,12 @@ xlog_recover_dquot_ra_pass2(
0ca0eed7
AM
503 ASSERT(dq_f);
504 ASSERT(dq_f->qlf_len == 1);
505
506- xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno,
507- XFS_FSB_TO_BB(mp, dq_f->qlf_len), NULL);
508+ len = XFS_FSB_TO_BB(mp, dq_f->qlf_len);
509+ if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
510+ return;
511+
512+ xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
513+ &xfs_dquot_buf_ra_ops);
514 }
515
516 STATIC void
1977e358
AM
517--
5182.5.0
0ca0eed7
AM
519
520_______________________________________________
521xfs mailing list
522xfs@oss.sgi.com
523http://oss.sgi.com/mailman/listinfo/xfs
This page took 0.139009 seconds and 4 git commands to generate.