1 --- linux-2.6.32/drivers/infiniband/Kconfig~ 2009-12-05 00:26:03.663774916 +0100
2 +++ linux-2.6.32/drivers/infiniband/Kconfig 2009-12-05 00:26:05.914179759 +0100
4 config INFINIBAND_ADDR_TRANS
7 - depends on !(INFINIBAND = y && IPV6 = m)
10 source "drivers/infiniband/hw/mthca/Kconfig"
11 --- linux-2.6.33/scripts/mod/modpost.c~ 2010-02-24 19:52:17.000000000 +0100
12 +++ linux-2.6.33/scripts/mod/modpost.c 2010-03-07 14:26:47.242168558 +0100
17 -#include "../../include/generated/autoconf.h"
18 +// PLD architectures don't use CONFIG_SYMBOL_PREFIX
19 +//#include "../../include/generated/autoconf.h"
20 #include "../../include/linux/license.h"
22 /* Some toolchains use a `_' prefix for all user symbols. */
24 commit 87b09f1f25cd1e01d7c50bf423c7fe33027d7511
25 Author: stephen hemminger <shemminger@vyatta.com>
26 Date: Fri Feb 12 06:58:00 2010 +0000
28 sky2: dont enable PME legacy mode
30 This bit is not changed by vendor driver, and should be left alone.
31 The documentation implies this a debug bit.
32 0 = WAKE# only asserted when VMAIN not available
33 1 = WAKE# is depend on wake events and independent of VMAIN.
35 Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
36 Signed-off-by: David S. Miller <davem@davemloft.net>
38 diff --git b/drivers/net/sky2.c a/drivers/net/sky2.c
39 index 2494842..edf37aa 100644
40 --- b/drivers/net/sky2.c
41 +++ a/drivers/net/sky2.c
42 @@ -733,6 +733,7 @@ static void sky2_wol_init(struct sky2_port *sky2)
43 unsigned port = sky2->port;
44 enum flow_control save_mode;
48 /* Bring hardware out of reset */
49 sky2_write16(hw, B0_CTST, CS_RST_CLR);
50 @@ -786,6 +787,11 @@ static void sky2_wol_init(struct sky2_port *sky2)
51 /* Disable PiG firmware */
52 sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF);
54 + /* Turn on legacy PCI-Express PME mode */
55 + reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
56 + reg1 |= PCI_Y2_PME_LEGACY;
57 + sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
60 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
62 Date: Mon, 11 Jul 2011 09:59:57 -0400
63 From: Christoph Hellwig <hch@infradead.org>
66 Subject: [PATCH] xfs: start periodic workers later
67 Message-ID: <20110711135957.GA23737@infradead.org>
69 Content-Type: text/plain;
71 Content-Disposition: inline
72 User-Agent: Mutt/1.5.21 (2010-09-15)
74 Start the periodic sync workers only after we have finished xfs_mountfs
75 and thus fully set up the filesystem structures. Without this we can
76 call into xfs_qm_sync before the quotainfo strucute is set up if the
77 mount takes unusually long, and probably hit other incomplete states
80 Also clean up the xfs_fs_fill_super error path by using consistent
81 label names, and removing an impossible to reach case.
83 Reported-by: Arkadiusz Miskiewicz <arekm@maven.pl>
84 Signed-off-by: Christoph Hellwig <hch@lst.de>
86 Index: xfs/fs/xfs/linux-2.6/xfs_super.c
87 ===================================================================
88 --- xfs.orig/fs/xfs/linux-2.6/xfs_super.c 2011-07-11 12:02:56.762758869 +0200
89 +++ xfs/fs/xfs/linux-2.6/xfs_super.c 2011-07-11 12:09:20.817344934 +0200
90 @@ -1411,37 +1411,35 @@ xfs_fs_fill_super(
92 set_posix_acl_flag(sb);
94 - error = xfs_syncd_init(mp);
96 - goto out_filestream_unmount;
98 xfs_inode_shrinker_register(mp);
100 error = xfs_mountfs(mp);
102 - goto out_syncd_stop;
103 + goto out_filestream_unmount;
105 + error = xfs_syncd_init(mp);
109 root = igrab(VFS_I(mp->m_rootip));
113 + goto out_syncd_stop;
115 if (is_bad_inode(root)) {
118 + goto out_syncd_stop;
120 sb->s_root = d_alloc_root(root);
130 - xfs_inode_shrinker_unregister(mp);
131 - xfs_syncd_stop(mp);
132 out_filestream_unmount:
133 + xfs_inode_shrinker_unregister(mp);
134 xfs_filestream_unmount(mp);
137 @@ -1455,17 +1453,12 @@ xfs_fs_fill_super(
150 - xfs_inode_shrinker_unregister(mp);
156 + xfs_inode_shrinker_unregister(mp);
159 * Blow away any referenced inode in the filestreams cache.
160 On Sat, 2 Jul 2011, Andi Kleen wrote:
162 > > The problem is that blk_peek_request() calls scsi_prep_fn(), which
165 > > struct scsi_device *sdev = q->queuedata;
166 > > int ret = BLKPREP_KILL;
168 > > if (req->cmd_type == REQ_TYPE_BLOCK_PC)
169 > > ret = scsi_setup_blk_pc_cmnd(sdev, req);
170 > > return scsi_prep_return(q, req, ret);
172 > > It doesn't check to see if sdev is NULL, nor does
173 > > scsi_setup_blk_pc_cmnd(). That accounts for this error:
175 > I actually added a NULL check in scsi_setup_blk_pc_cmnd early on,
176 > but that just caused RCU CPU stalls afterwards and then eventually
179 The RCU problem is likely to be a separate issue. It might even be a
180 result of the use-after-free problem with the elevator.
182 At any rate, it's clear that the crash in the refcounting log you
183 posted occurred because scsi_setup_blk_pc_cmnd() called
184 scsi_prep_state_check(), which tried to dereference the NULL pointer.
186 Would you like to try this patch to see if it fixes the problem? As I
187 said before, I'm not certain it's the best thing to do, but it worked
195 Index: usb-3.0/drivers/scsi/scsi_lib.c
196 ===================================================================
197 --- usb-3.0.orig/drivers/scsi/scsi_lib.c
198 +++ usb-3.0/drivers/scsi/scsi_lib.c
199 @@ -1247,6 +1247,8 @@ int scsi_prep_fn(struct request_queue *q
200 struct scsi_device *sdev = q->queuedata;
201 int ret = BLKPREP_KILL;
205 if (req->cmd_type == REQ_TYPE_BLOCK_PC)
206 ret = scsi_setup_blk_pc_cmnd(sdev, req);
207 return scsi_prep_return(q, req, ret);
208 Index: usb-3.0/drivers/scsi/scsi_sysfs.c
209 ===================================================================
210 --- usb-3.0.orig/drivers/scsi/scsi_sysfs.c
211 +++ usb-3.0/drivers/scsi/scsi_sysfs.c
212 @@ -322,6 +322,8 @@ static void scsi_device_dev_release_user
216 + /* Freeing the queue signals to block that we're done */
217 + scsi_free_queue(sdev->request_queue);
218 blk_put_queue(sdev->request_queue);
219 /* NULL queue means the device can't be used */
220 sdev->request_queue = NULL;
221 @@ -936,8 +938,6 @@ void __scsi_remove_device(struct scsi_de
222 /* cause the request function to reject all I/O requests */
223 sdev->request_queue->queuedata = NULL;
225 - /* Freeing the queue signals to block that we're done */
226 - scsi_free_queue(sdev->request_queue);
233 To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
234 the body of a message to majordomo@vger.kernel.org
235 More majordomo info at http://vger.kernel.org/majordomo-info.html
236 Please read the FAQ at http://www.tux.org/lkml/
237 commit 3326c784c9f492e988617d93f647ae0cfd4c8d09
238 Author: Jiri Pirko <jpirko@redhat.com>
239 Date: Wed Jul 20 04:54:38 2011 +0000
241 forcedeth: do vlan cleanup
243 - unify vlan and nonvlan rx path
244 - kill np->vlangrp and nv_vlan_rx_register
245 - allow to turn on/off rx vlan accel via ethtool (set_features)
247 Signed-off-by: Jiri Pirko <jpirko@redhat.com>
248 Signed-off-by: David S. Miller <davem@davemloft.net>
250 diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
251 index 537b695..e64cd9c 100644
252 --- a/drivers/net/forcedeth.c
253 +++ b/drivers/net/forcedeth.c
254 @@ -820,9 +820,6 @@ struct fe_priv {
255 struct nv_skb_map *tx_end_flip;
259 - struct vlan_group *vlangrp;
261 /* msi/msi-x fields */
263 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
264 @@ -2766,17 +2763,13 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
265 skb->protocol = eth_type_trans(skb, dev);
268 - if (likely(!np->vlangrp)) {
269 - napi_gro_receive(&np->napi, skb);
271 - vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
272 - if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
273 - vlan_gro_receive(&np->napi, np->vlangrp,
274 - vlanflags & NV_RX3_VLAN_TAG_MASK, skb);
276 - napi_gro_receive(&np->napi, skb);
278 + vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
279 + if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
280 + u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
282 + __vlan_hwaccel_put_tag(skb, vid);
284 + napi_gro_receive(&np->napi, skb);
286 dev->stats.rx_packets++;
287 dev->stats.rx_bytes += len;
288 @@ -4484,6 +4477,27 @@ static u32 nv_fix_features(struct net_device *dev, u32 features)
292 +static void nv_vlan_mode(struct net_device *dev, u32 features)
294 + struct fe_priv *np = get_nvpriv(dev);
296 + spin_lock_irq(&np->lock);
298 + if (features & NETIF_F_HW_VLAN_RX)
299 + np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP;
301 + np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
303 + if (features & NETIF_F_HW_VLAN_TX)
304 + np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS;
306 + np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
308 + writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
310 + spin_unlock_irq(&np->lock);
313 static int nv_set_features(struct net_device *dev, u32 features)
315 struct fe_priv *np = netdev_priv(dev);
316 @@ -4504,6 +4518,9 @@ static int nv_set_features(struct net_device *dev, u32 features)
317 spin_unlock_irq(&np->lock);
320 + if (changed & (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX))
321 + nv_vlan_mode(dev, features);
326 @@ -4879,29 +4896,6 @@ static const struct ethtool_ops ops = {
327 .self_test = nv_self_test,
330 -static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
332 - struct fe_priv *np = get_nvpriv(dev);
334 - spin_lock_irq(&np->lock);
336 - /* save vlan group */
340 - /* enable vlan on MAC */
341 - np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
343 - /* disable vlan on MAC */
344 - np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
345 - np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
348 - writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
350 - spin_unlock_irq(&np->lock);
353 /* The mgmt unit and driver use a semaphore to access the phy during init */
354 static int nv_mgmt_acquire_sema(struct net_device *dev)
356 @@ -5208,7 +5202,6 @@ static const struct net_device_ops nv_netdev_ops = {
357 .ndo_validate_addr = eth_validate_addr,
358 .ndo_set_mac_address = nv_set_mac_address,
359 .ndo_set_multicast_list = nv_set_multicast,
360 - .ndo_vlan_rx_register = nv_vlan_rx_register,
361 #ifdef CONFIG_NET_POLL_CONTROLLER
362 .ndo_poll_controller = nv_poll_controller,
364 @@ -5226,7 +5219,6 @@ static const struct net_device_ops nv_netdev_ops_optimized = {
365 .ndo_validate_addr = eth_validate_addr,
366 .ndo_set_mac_address = nv_set_mac_address,
367 .ndo_set_multicast_list = nv_set_multicast,
368 - .ndo_vlan_rx_register = nv_vlan_rx_register,
369 #ifdef CONFIG_NET_POLL_CONTROLLER
370 .ndo_poll_controller = nv_poll_controller,
372 commit 0891b0e08937aaec2c4734acb94c5ff8042313bb
373 Author: Jiri Pirko <jpirko@redhat.com>
374 Date: Tue Jul 26 10:19:28 2011 +0000
378 For some reason, when rxaccel is disabled, NV_RX3_VLAN_TAG_PRESENT is
379 still set and some pseudorandom vids appear. So check for
380 NETIF_F_HW_VLAN_RX as well. Also set correctly hw_features and set vlan
383 Signed-off-by: Jiri Pirko <jpirko@redhat.com>
384 Signed-off-by: David S. Miller <davem@davemloft.net>
386 diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
387 index e64cd9c..e55df30 100644
388 --- a/drivers/net/forcedeth.c
389 +++ b/drivers/net/forcedeth.c
390 @@ -2764,7 +2764,14 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
393 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
394 - if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
397 + * There's need to check for NETIF_F_HW_VLAN_RX here.
398 + * Even if vlan rx accel is disabled,
399 + * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
401 + if (dev->features & NETIF_F_HW_VLAN_RX &&
402 + vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
403 u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
405 __vlan_hwaccel_put_tag(skb, vid);
406 @@ -5331,15 +5338,16 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
407 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
408 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG |
409 NETIF_F_TSO | NETIF_F_RXCSUM;
410 - dev->features |= dev->hw_features;
413 np->vlanctl_bits = 0;
414 if (id->driver_data & DEV_HAS_VLAN) {
415 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
416 - dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
417 + dev->hw_features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
420 + dev->features |= dev->hw_features;
422 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
423 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
424 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
425 @@ -5607,6 +5615,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
429 + nv_vlan_mode(dev, dev->features);
431 netif_carrier_off(dev);
433 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
434 --- linux-3.0/scripts/kconfig/lxdialog/check-lxdialog.sh~ 2011-07-22 04:17:23.000000000 +0200
435 +++ linux-3.0/scripts/kconfig/lxdialog/check-lxdialog.sh 2011-08-25 21:26:04.799150642 +0200
437 $cc -print-file-name=lib${lib}.${ext} | grep -q /
438 if [ $? -eq 0 ]; then
440 + for libt in tinfow tinfo ; do
441 + $cc -print-file-name=lib${libt}.${ext} | grep -q /
442 + if [ $? -eq 0 ]; then
449 commit 1d8c95a363bf8cd4d4182dd19c01693b635311c2
450 Author: Dave Chinner <dchinner@redhat.com>
451 Date: Mon Jul 18 03:40:16 2011 +0000
453 xfs: use a cursor for bulk AIL insertion
455 Delayed logging can insert tens of thousands of log items into the
456 AIL at the same LSN. When the committing of log commit records
457 occur, we can get insertions occurring at an LSN that is not at the
458 end of the AIL. If there are thousands of items in the AIL on the
459 tail LSN, each insertion has to walk the AIL to find the correct
460 place to insert the new item into the AIL. This can consume large
461 amounts of CPU time and block other operations from occurring while
462 the traversals are in progress.
464 To avoid this repeated walk, use a AIL cursor to record
465 where we should be inserting the new items into the AIL without
466 having to repeat the walk. The cursor infrastructure already
467 provides this functionality for push walks, so is a simple extension
468 of existing code. While this will not avoid the initial walk, it
469 will avoid repeating it tens of thousands of times during a single
472 This version includes logic improvements from Christoph Hellwig.
474 Signed-off-by: Dave Chinner <dchinner@redhat.com>
475 Reviewed-by: Christoph Hellwig <hch@lst.de>
476 Signed-off-by: Alex Elder <aelder@sgi.com>
478 diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
479 index c83f63b..efc147f 100644
480 --- a/fs/xfs/xfs_trans.c
481 +++ b/fs/xfs/xfs_trans.c
482 @@ -1426,6 +1426,7 @@ xfs_trans_committed(
484 xfs_log_item_batch_insert(
485 struct xfs_ail *ailp,
486 + struct xfs_ail_cursor *cur,
487 struct xfs_log_item **log_items,
489 xfs_lsn_t commit_lsn)
490 @@ -1434,7 +1435,7 @@ xfs_log_item_batch_insert(
492 spin_lock(&ailp->xa_lock);
493 /* xfs_trans_ail_update_bulk drops ailp->xa_lock */
494 - xfs_trans_ail_update_bulk(ailp, log_items, nr_items, commit_lsn);
495 + xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
497 for (i = 0; i < nr_items; i++)
498 IOP_UNPIN(log_items[i], 0);
499 @@ -1452,6 +1453,13 @@ xfs_log_item_batch_insert(
500 * as an iclog write error even though we haven't started any IO yet. Hence in
501 * this case all we need to do is IOP_COMMITTED processing, followed by an
502 * IOP_UNPIN(aborted) call.
504 + * The AIL cursor is used to optimise the insert process. If commit_lsn is not
505 + * at the end of the AIL, the insert cursor avoids the need to walk
506 + * the AIL to find the insertion point on every xfs_log_item_batch_insert()
507 + * call. This saves a lot of needless list walking and is a net win, even
508 + * though it slightly increases that amount of AIL lock traffic to set it up
509 + * and tear it down.
512 xfs_trans_committed_bulk(
513 @@ -1463,8 +1471,13 @@ xfs_trans_committed_bulk(
514 #define LOG_ITEM_BATCH_SIZE 32
515 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE];
516 struct xfs_log_vec *lv;
517 + struct xfs_ail_cursor cur;
520 + spin_lock(&ailp->xa_lock);
521 + xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
522 + spin_unlock(&ailp->xa_lock);
524 /* unpin all the log items */
525 for (lv = log_vector; lv; lv = lv->lv_next ) {
526 struct xfs_log_item *lip = lv->lv_item;
527 @@ -1493,7 +1506,9 @@ xfs_trans_committed_bulk(
529 * Not a bulk update option due to unusual item_lsn.
530 * Push into AIL immediately, rechecking the lsn once
531 - * we have the ail lock. Then unpin the item.
532 + * we have the ail lock. Then unpin the item. This does
533 + * not affect the AIL cursor the bulk insert path is
536 spin_lock(&ailp->xa_lock);
537 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
538 @@ -1507,7 +1522,7 @@ xfs_trans_committed_bulk(
539 /* Item is a candidate for bulk AIL insert. */
540 log_items[i++] = lv->lv_item;
541 if (i >= LOG_ITEM_BATCH_SIZE) {
542 - xfs_log_item_batch_insert(ailp, log_items,
543 + xfs_log_item_batch_insert(ailp, &cur, log_items,
544 LOG_ITEM_BATCH_SIZE, commit_lsn);
547 @@ -1515,7 +1530,11 @@ xfs_trans_committed_bulk(
549 /* make sure we insert the remainder! */
551 - xfs_log_item_batch_insert(ailp, log_items, i, commit_lsn);
552 + xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
554 + spin_lock(&ailp->xa_lock);
555 + xfs_trans_ail_cursor_done(ailp, &cur);
556 + spin_unlock(&ailp->xa_lock);
560 diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
561 index 5fc2380..9a69dc0 100644
562 --- a/fs/xfs/xfs_trans_ail.c
563 +++ b/fs/xfs/xfs_trans_ail.c
564 @@ -272,9 +272,9 @@ xfs_trans_ail_cursor_clear(
568 - * Return the item in the AIL with the current lsn.
569 - * Return the current tree generation number for use
570 - * in calls to xfs_trans_next_ail().
571 + * Initialise the cursor to the first item in the AIL with the given @lsn.
572 + * This searches the list from lowest LSN to highest. Pass a @lsn of zero
573 + * to initialise the cursor to the first item in the AIL.
576 xfs_trans_ail_cursor_first(
577 @@ -300,31 +300,97 @@ out:
581 - * splice the log item list into the AIL at the given LSN.
582 + * Initialise the cursor to the last item in the AIL with the given @lsn.
583 + * This searches the list from highest LSN to lowest. If there is no item with
584 + * the value of @lsn, then it sets the cursor to the last item with an LSN lower
587 +static struct xfs_log_item *
588 +__xfs_trans_ail_cursor_last(
589 + struct xfs_ail *ailp,
592 + xfs_log_item_t *lip;
594 + list_for_each_entry_reverse(lip, &ailp->xa_ail, li_ail) {
595 + if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
602 + * Initialise the cursor to the last item in the AIL with the given @lsn.
603 + * This searches the list from highest LSN to lowest.
605 +struct xfs_log_item *
606 +xfs_trans_ail_cursor_last(
607 + struct xfs_ail *ailp,
608 + struct xfs_ail_cursor *cur,
611 + xfs_trans_ail_cursor_init(ailp, cur);
612 + cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
617 + * splice the log item list into the AIL at the given LSN. We splice to the
618 + * tail of the given LSN to maintain insert order for push traversals. The
619 + * cursor is optional, allowing repeated updates to the same LSN to avoid
620 + * repeated traversals.
624 - struct xfs_ail *ailp,
625 - struct list_head *list,
627 + struct xfs_ail *ailp,
628 + struct xfs_ail_cursor *cur,
629 + struct list_head *list,
632 - xfs_log_item_t *next_lip;
633 + struct xfs_log_item *lip = cur ? cur->item : NULL;
634 + struct xfs_log_item *next_lip;
636 - /* If the list is empty, just insert the item. */
637 - if (list_empty(&ailp->xa_ail)) {
638 - list_splice(list, &ailp->xa_ail);
641 + * Get a new cursor if we don't have a placeholder or the existing one
642 + * has been invalidated.
644 + if (!lip || (__psint_t)lip & 1) {
645 + lip = __xfs_trans_ail_cursor_last(ailp, lsn);
648 + /* The list is empty, so just splice and return. */
651 + list_splice(list, &ailp->xa_ail);
656 - list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
657 - if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
660 + * Our cursor points to the item we want to insert _after_, so we have
661 + * to update the cursor to point to the end of the list we are splicing
662 + * in so that it points to the correct location for the next splice.
663 + * i.e. before the splice
665 + * lsn -> lsn -> lsn + x -> lsn + x ...
667 + * | cursor points here
669 + * After the splice we have:
671 + * lsn -> lsn -> lsn -> lsn -> .... -> lsn -> lsn + x -> lsn + x ...
673 + * | cursor points here | needs to move here
675 + * So we set the cursor to the last item in the list to be spliced
676 + * before we execute the splice, resulting in the cursor pointing to
677 + * the correct item after the splice occurs.
680 + next_lip = list_entry(list->prev, struct xfs_log_item, li_ail);
681 + cur->item = next_lip;
684 - ASSERT(&next_lip->li_ail == &ailp->xa_ail ||
685 - XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0);
687 - list_splice_init(list, &next_lip->li_ail);
688 + list_splice(list, &lip->li_ail);
692 @@ -645,6 +711,7 @@ xfs_trans_unlocked_item(
694 xfs_trans_ail_update_bulk(
695 struct xfs_ail *ailp,
696 + struct xfs_ail_cursor *cur,
697 struct xfs_log_item **log_items,
699 xfs_lsn_t lsn) __releases(ailp->xa_lock)
700 @@ -674,7 +741,7 @@ xfs_trans_ail_update_bulk(
701 list_add(&lip->li_ail, &tmp);
704 - xfs_ail_splice(ailp, &tmp, lsn);
705 + xfs_ail_splice(ailp, cur, &tmp, lsn);
708 spin_unlock(&ailp->xa_lock);
709 diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
710 index 6b164e9..c0cb408 100644
711 --- a/fs/xfs/xfs_trans_priv.h
712 +++ b/fs/xfs/xfs_trans_priv.h
713 @@ -82,6 +82,7 @@ struct xfs_ail {
714 extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
716 void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
717 + struct xfs_ail_cursor *cur,
718 struct xfs_log_item **log_items, int nr_items,
719 xfs_lsn_t lsn) __releases(ailp->xa_lock);
721 @@ -90,7 +91,7 @@ xfs_trans_ail_update(
722 struct xfs_log_item *lip,
723 xfs_lsn_t lsn) __releases(ailp->xa_lock)
725 - xfs_trans_ail_update_bulk(ailp, &lip, 1, lsn);
726 + xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn);
729 void xfs_trans_ail_delete_bulk(struct xfs_ail *ailp,
730 @@ -111,10 +112,13 @@ xfs_lsn_t xfs_ail_min_lsn(struct xfs_ail *ailp);
731 void xfs_trans_unlocked_item(struct xfs_ail *,
734 -struct xfs_log_item *xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
735 +struct xfs_log_item * xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
736 struct xfs_ail_cursor *cur,
738 -struct xfs_log_item *xfs_trans_ail_cursor_next(struct xfs_ail *ailp,
739 +struct xfs_log_item * xfs_trans_ail_cursor_last(struct xfs_ail *ailp,
740 + struct xfs_ail_cursor *cur,
742 +struct xfs_log_item * xfs_trans_ail_cursor_next(struct xfs_ail *ailp,
743 struct xfs_ail_cursor *cur);
744 void xfs_trans_ail_cursor_done(struct xfs_ail *ailp,
745 struct xfs_ail_cursor *cur);