--- linux-2.6.32/drivers/infiniband/Kconfig~ 2009-12-05 00:26:03.663774916 +0100 +++ linux-2.6.32/drivers/infiniband/Kconfig 2009-12-05 00:26:05.914179759 +0100 @@ -37,7 +37,6 @@ config INFINIBAND_ADDR_TRANS bool depends on INET - depends on !(INFINIBAND = y && IPV6 = m) default y source "drivers/infiniband/hw/mthca/Kconfig" --- linux-2.6.33/scripts/mod/modpost.c~ 2010-02-24 19:52:17.000000000 +0100 +++ linux-2.6.33/scripts/mod/modpost.c 2010-03-07 14:26:47.242168558 +0100 @@ -15,7 +15,8 @@ #include #include #include "modpost.h" -#include "../../include/generated/autoconf.h" +// PLD architectures don't use CONFIG_SYMBOL_PREFIX +//#include "../../include/generated/autoconf.h" #include "../../include/linux/license.h" /* Some toolchains use a `_' prefix for all user symbols. */ commit 87b09f1f25cd1e01d7c50bf423c7fe33027d7511 Author: stephen hemminger Date: Fri Feb 12 06:58:00 2010 +0000 sky2: dont enable PME legacy mode This bit is not changed by vendor driver, and should be left alone. The documentation implies this a debug bit. 0 = WAKE# only asserted when VMAIN not available 1 = WAKE# is depend on wake events and independent of VMAIN. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git b/drivers/net/sky2.c a/drivers/net/sky2.c index 2494842..edf37aa 100644 --- b/drivers/net/sky2.c +++ a/drivers/net/sky2.c @@ -733,6 +733,7 @@ static void sky2_wol_init(struct sky2_port *sky2) unsigned port = sky2->port; enum flow_control save_mode; u16 ctrl; + u32 reg1; /* Bring hardware out of reset */ sky2_write16(hw, B0_CTST, CS_RST_CLR); @@ -786,6 +787,11 @@ static void sky2_wol_init(struct sky2_port *sky2) /* Disable PiG firmware */ sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF); + /* Turn on legacy PCI-Express PME mode */ + reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); + reg1 |= PCI_Y2_PME_LEGACY; + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); + /* block receiver */ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); } Date: Mon, 11 Jul 2011 09:59:57 -0400 From: Christoph Hellwig To: xfs@oss.sgi.com Cc: arekm@maven.pl Subject: [PATCH] xfs: start periodic workers later Message-ID: <20110711135957.GA23737@infradead.org> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline User-Agent: Mutt/1.5.21 (2010-09-15) Start the periodic sync workers only after we have finished xfs_mountfs and thus fully set up the filesystem structures. Without this we can call into xfs_qm_sync before the quotainfo strucute is set up if the mount takes unusually long, and probably hit other incomplete states as well. Also clean up the xfs_fs_fill_super error path by using consistent label names, and removing an impossible to reach case. Reported-by: Arkadiusz Miskiewicz Signed-off-by: Christoph Hellwig Index: xfs/fs/xfs/linux-2.6/xfs_super.c =================================================================== --- xfs.orig/fs/xfs/linux-2.6/xfs_super.c 2011-07-11 12:02:56.762758869 +0200 +++ xfs/fs/xfs/linux-2.6/xfs_super.c 2011-07-11 12:09:20.817344934 +0200 @@ -1411,37 +1411,35 @@ xfs_fs_fill_super( sb->s_time_gran = 1; set_posix_acl_flag(sb); - error = xfs_syncd_init(mp); - if (error) - goto out_filestream_unmount; - xfs_inode_shrinker_register(mp); error = xfs_mountfs(mp); if (error) - goto out_syncd_stop; + goto out_filestream_unmount; + + error = xfs_syncd_init(mp); + if (error) + goto out_unmount; root = igrab(VFS_I(mp->m_rootip)); if (!root) { error = ENOENT; - goto fail_unmount; + goto out_syncd_stop; } if (is_bad_inode(root)) { error = EINVAL; - goto fail_vnrele; + goto out_syncd_stop; } sb->s_root = d_alloc_root(root); if (!sb->s_root) { error = ENOMEM; - goto fail_vnrele; + goto out_iput; } return 0; - out_syncd_stop: - xfs_inode_shrinker_unregister(mp); - xfs_syncd_stop(mp); out_filestream_unmount: + xfs_inode_shrinker_unregister(mp); xfs_filestream_unmount(mp); out_free_sb: xfs_freesb(mp); @@ -1455,17 +1453,12 @@ xfs_fs_fill_super( out: return -error; - fail_vnrele: - if (sb->s_root) { - dput(sb->s_root); - sb->s_root = NULL; - } else { - iput(root); - } - - fail_unmount: - xfs_inode_shrinker_unregister(mp); + out_iput: + iput(root); + out_syncd_stop: xfs_syncd_stop(mp); + out_unmount: + xfs_inode_shrinker_unregister(mp); /* * Blow away any referenced inode in the filestreams cache. On Sat, 2 Jul 2011, Andi Kleen wrote: > > The problem is that blk_peek_request() calls scsi_prep_fn(), which > > does this: > > > > struct scsi_device *sdev = q->queuedata; > > int ret = BLKPREP_KILL; > > > > if (req->cmd_type == REQ_TYPE_BLOCK_PC) > > ret = scsi_setup_blk_pc_cmnd(sdev, req); > > return scsi_prep_return(q, req, ret); > > > > It doesn't check to see if sdev is NULL, nor does > > scsi_setup_blk_pc_cmnd(). That accounts for this error: > > I actually added a NULL check in scsi_setup_blk_pc_cmnd early on, > but that just caused RCU CPU stalls afterwards and then eventually > a hung system. The RCU problem is likely to be a separate issue. It might even be a result of the use-after-free problem with the elevator. At any rate, it's clear that the crash in the refcounting log you posted occurred because scsi_setup_blk_pc_cmnd() called scsi_prep_state_check(), which tried to dereference the NULL pointer. Would you like to try this patch to see if it fixes the problem? As I said before, I'm not certain it's the best thing to do, but it worked on my system. Alan Stern Index: usb-3.0/drivers/scsi/scsi_lib.c =================================================================== --- usb-3.0.orig/drivers/scsi/scsi_lib.c +++ usb-3.0/drivers/scsi/scsi_lib.c @@ -1247,6 +1247,8 @@ int scsi_prep_fn(struct request_queue *q struct scsi_device *sdev = q->queuedata; int ret = BLKPREP_KILL; + if (!sdev) + return ret; if (req->cmd_type == REQ_TYPE_BLOCK_PC) ret = scsi_setup_blk_pc_cmnd(sdev, req); return scsi_prep_return(q, req, ret); Index: usb-3.0/drivers/scsi/scsi_sysfs.c =================================================================== --- usb-3.0.orig/drivers/scsi/scsi_sysfs.c +++ usb-3.0/drivers/scsi/scsi_sysfs.c @@ -322,6 +322,8 @@ static void scsi_device_dev_release_user kfree(evt); } + /* Freeing the queue signals to block that we're done */ + scsi_free_queue(sdev->request_queue); blk_put_queue(sdev->request_queue); /* NULL queue means the device can't be used */ sdev->request_queue = NULL; @@ -936,8 +938,6 @@ void __scsi_remove_device(struct scsi_de /* cause the request function to reject all I/O requests */ sdev->request_queue->queuedata = NULL; - /* Freeing the queue signals to block that we're done */ - scsi_free_queue(sdev->request_queue); put_device(dev); } -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/ As reported by Ben Greer and Froncois Romieu. The code path in the netif_carrier code leads it to try and disable a late workqueue to reenable it immediately netif_carrier_on -> linkwatch_fire_event -> linkwatch_schedule_work -> cancel_delayed_work -> del_timer_sync If __cancel_delayed_work is used instead then there is no problem of waiting for running linkwatch_event. There is a race between linkwatch_event running re-scheduling but it is harmless to schedule an extra scan of the linkwatch queue. Signed-off-by: Stephen Hemminger --- a/net/core/link_watch.c 2011-07-22 15:25:31.027533604 -0700 +++ b/net/core/link_watch.c 2011-07-22 15:31:27.531520028 -0700 @@ -126,7 +126,7 @@ static void linkwatch_schedule_work(int return; /* It's already running which is good enough. */ - if (!cancel_delayed_work(&linkwatch_work)) + if (!__cancel_delayed_work(&linkwatch_work)) return; /* Otherwise we reschedule it again for immediate execution. */ -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/ commit 3326c784c9f492e988617d93f647ae0cfd4c8d09 Author: Jiri Pirko Date: Wed Jul 20 04:54:38 2011 +0000 forcedeth: do vlan cleanup - unify vlan and nonvlan rx path - kill np->vlangrp and nv_vlan_rx_register - allow to turn on/off rx vlan accel via ethtool (set_features) Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 537b695..e64cd9c 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -820,9 +820,6 @@ struct fe_priv { struct nv_skb_map *tx_end_flip; int tx_stop; - /* vlan fields */ - struct vlan_group *vlangrp; - /* msi/msi-x fields */ u32 msi_flags; struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; @@ -2766,17 +2763,13 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) skb->protocol = eth_type_trans(skb, dev); prefetch(skb->data); - if (likely(!np->vlangrp)) { - napi_gro_receive(&np->napi, skb); - } else { - vlanflags = le32_to_cpu(np->get_rx.ex->buflow); - if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { - vlan_gro_receive(&np->napi, np->vlangrp, - vlanflags & NV_RX3_VLAN_TAG_MASK, skb); - } else { - napi_gro_receive(&np->napi, skb); - } + vlanflags = le32_to_cpu(np->get_rx.ex->buflow); + if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { + u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK; + + __vlan_hwaccel_put_tag(skb, vid); } + napi_gro_receive(&np->napi, skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; @@ -4484,6 +4477,27 @@ static u32 nv_fix_features(struct net_device *dev, u32 features) return features; } +static void nv_vlan_mode(struct net_device *dev, u32 features) +{ + struct fe_priv *np = get_nvpriv(dev); + + spin_lock_irq(&np->lock); + + if (features & NETIF_F_HW_VLAN_RX) + np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP; + else + np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; + + if (features & NETIF_F_HW_VLAN_TX) + np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS; + else + np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; + + writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); + + spin_unlock_irq(&np->lock); +} + static int nv_set_features(struct net_device *dev, u32 features) { struct fe_priv *np = netdev_priv(dev); @@ -4504,6 +4518,9 @@ static int nv_set_features(struct net_device *dev, u32 features) spin_unlock_irq(&np->lock); } + if (changed & (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX)) + nv_vlan_mode(dev, features); + return 0; } @@ -4879,29 +4896,6 @@ static const struct ethtool_ops ops = { .self_test = nv_self_test, }; -static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) -{ - struct fe_priv *np = get_nvpriv(dev); - - spin_lock_irq(&np->lock); - - /* save vlan group */ - np->vlangrp = grp; - - if (grp) { - /* enable vlan on MAC */ - np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; - } else { - /* disable vlan on MAC */ - np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; - np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; - } - - writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); - - spin_unlock_irq(&np->lock); -} - /* The mgmt unit and driver use a semaphore to access the phy during init */ static int nv_mgmt_acquire_sema(struct net_device *dev) { @@ -5208,7 +5202,6 @@ static const struct net_device_ops nv_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = nv_set_mac_address, .ndo_set_multicast_list = nv_set_multicast, - .ndo_vlan_rx_register = nv_vlan_rx_register, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = nv_poll_controller, #endif @@ -5226,7 +5219,6 @@ static const struct net_device_ops nv_netdev_ops_optimized = { .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = nv_set_mac_address, .ndo_set_multicast_list = nv_set_multicast, - .ndo_vlan_rx_register = nv_vlan_rx_register, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = nv_poll_controller, #endif commit 0891b0e08937aaec2c4734acb94c5ff8042313bb Author: Jiri Pirko Date: Tue Jul 26 10:19:28 2011 +0000 forcedeth: fix vlans For some reason, when rxaccel is disabled, NV_RX3_VLAN_TAG_PRESENT is still set and some pseudorandom vids appear. So check for NETIF_F_HW_VLAN_RX as well. Also set correctly hw_features and set vlan mode on probe. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index e64cd9c..e55df30 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -2764,7 +2764,14 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) prefetch(skb->data); vlanflags = le32_to_cpu(np->get_rx.ex->buflow); - if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { + + /* + * There's need to check for NETIF_F_HW_VLAN_RX here. + * Even if vlan rx accel is disabled, + * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set. + */ + if (dev->features & NETIF_F_HW_VLAN_RX && + vlanflags & NV_RX3_VLAN_TAG_PRESENT) { u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK; __vlan_hwaccel_put_tag(skb, vid); @@ -5331,15 +5338,16 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_RXCSUM; - dev->features |= dev->hw_features; } np->vlanctl_bits = 0; if (id->driver_data & DEV_HAS_VLAN) { np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; - dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; + dev->hw_features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; } + dev->features |= dev->hw_features; + np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) || (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) || @@ -5607,6 +5615,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i goto out_error; } + nv_vlan_mode(dev, dev->features); + netif_carrier_off(dev); dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",