++static struct net_device_stats *imq_get_stats(struct net_device *dev)
++{
++ return &dev->stats;
++}
++
++/* called for packets kfree'd in qdiscs at places other than enqueue */
++static void imq_skb_destructor(struct sk_buff *skb)
++{
++ struct nf_queue_entry *entry = skb->nf_queue_entry;
++
++ skb->nf_queue_entry = NULL;
++
++ if (entry) {
++ nf_queue_entry_release_refs(entry);
++ kfree(entry);
++ }
++
++ skb_restore_cb(skb); /* kfree backup */
++}
++
++static void imq_done_check_queue_mapping(struct sk_buff *skb,
++ struct net_device *dev)
++{
++ unsigned int queue_index;
++
++ /* Don't let queue_mapping be left too large after exiting IMQ */
++ if (likely(skb->dev != dev && skb->dev != NULL)) {
++ queue_index = skb_get_queue_mapping(skb);
++ if (unlikely(queue_index >= skb->dev->real_num_tx_queues)) {
++ queue_index = (u16)((u32)queue_index %
++ skb->dev->real_num_tx_queues);
++ skb_set_queue_mapping(skb, queue_index);
++ }
++ } else {
++ /* skb->dev was IMQ device itself or NULL, be on safe side and
++ * just clear queue mapping.
++ */
++ skb_set_queue_mapping(skb, 0);
++ }
++}
++
++static netdev_tx_t imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct nf_queue_entry *entry = skb->nf_queue_entry;
++
++ rcu_read_lock();
++
++ skb->nf_queue_entry = NULL;
++ netif_trans_update(dev);
++
++ dev->stats.tx_bytes += skb->len;
++ dev->stats.tx_packets++;
++
++ if (unlikely(entry == NULL)) {
++ /* We don't know what is going on here.. packet is queued for
++ * imq device, but (probably) not by us.
++ *
++ * If this packet was not send here by imq_nf_queue(), then
++ * skb_save_cb() was not used and skb_free() should not show:
++ * WARNING: IMQ: kfree_skb: skb->cb_next:..
++ * and/or
++ * WARNING: IMQ: kfree_skb: skb->nf_queue_entry...
++ *
++ * However if this message is shown, then IMQ is somehow broken
++ * and you should report this to linuximq.net.
++ */
++
++ /* imq_dev_xmit is black hole that eats all packets, report that
++ * we eat this packet happily and increase dropped counters.
++ */
++
++ dev->stats.tx_dropped++;
++ dev_kfree_skb(skb);
++
++ rcu_read_unlock();
++ return NETDEV_TX_OK;
++ }
++
++ skb_restore_cb(skb); /* restore skb->cb */
++
++ skb->imq_flags = 0;
++ skb->destructor = NULL;
++
++ imq_done_check_queue_mapping(skb, dev);
++
++ nf_reinject(entry, NF_ACCEPT);
++
++ rcu_read_unlock();
++ return NETDEV_TX_OK;
++}
++
++static struct net_device *get_imq_device_by_index(int index)
++{
++ struct net_device *dev = NULL;
++ struct net *net;
++ char buf[8];
++
++ /* get device by name and cache result */
++ snprintf(buf, sizeof(buf), "imq%d", index);
++
++ /* Search device from all namespaces. */
++ for_each_net(net) {
++ dev = dev_get_by_name(net, buf);
++ if (dev)
++ break;
++ }
++
++ if (WARN_ON_ONCE(dev == NULL)) {
++ /* IMQ device not found. Exotic config? */
++ return ERR_PTR(-ENODEV);
++ }
++
++ imq_devs_cache[index] = dev;
++ dev_put(dev);
++
++ return dev;
++}
++
++static struct nf_queue_entry *nf_queue_entry_dup(struct nf_queue_entry *e)
++{
++ struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
++ if (entry) {
++ nf_queue_entry_get_refs(entry);
++ return entry;
++ }
++ return NULL;
++}
++
++#ifdef CONFIG_BRIDGE_NETFILTER
++/* When called from bridge netfilter, skb->data must point to MAC header
++ * before calling skb_gso_segment(). Else, original MAC header is lost
++ * and segmented skbs will be sent to wrong destination.
++ */
++static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
++{
++ if (skb->nf_bridge)
++ __skb_push(skb, skb->network_header - skb->mac_header);
++}
++
++static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
++{
++ if (skb->nf_bridge)
++ __skb_pull(skb, skb->network_header - skb->mac_header);
++}
++#else
++#define nf_bridge_adjust_skb_data(s) do {} while (0)
++#define nf_bridge_adjust_segmented_data(s) do {} while (0)
++#endif
++
++static void free_entry(struct nf_queue_entry *entry)
++{
++ nf_queue_entry_release_refs(entry);
++ kfree(entry);
++}
++
++static int __imq_nf_queue(struct nf_queue_entry *entry, struct net_device *dev);
++
++static int __imq_nf_queue_gso(struct nf_queue_entry *entry,
++ struct net_device *dev, struct sk_buff *skb)
++{
++ int ret = -ENOMEM;
++ struct nf_queue_entry *entry_seg;
++
++ nf_bridge_adjust_segmented_data(skb);
++
++ if (skb->next == NULL) { /* last packet, no need to copy entry */
++ struct sk_buff *gso_skb = entry->skb;
++ entry->skb = skb;
++ ret = __imq_nf_queue(entry, dev);
++ if (ret)
++ entry->skb = gso_skb;
++ return ret;
++ }
++
++ skb->next = NULL;
++
++ entry_seg = nf_queue_entry_dup(entry);
++ if (entry_seg) {
++ entry_seg->skb = skb;
++ ret = __imq_nf_queue(entry_seg, dev);
++ if (ret)
++ free_entry(entry_seg);
++ }
++ return ret;
++}
++