1 diff -Naupr linux-4.10_orig/drivers/net/imq.c linux-4.10/drivers/net/imq.c
2 --- linux-4.10_orig/drivers/net/imq.c 1970-01-01 07:00:00.000000000 +0700
3 +++ linux-4.10/drivers/net/imq.c 2017-02-28 19:03:58.883221583 +0700
6 + * Pseudo-driver for the intermediate queue device.
8 + * This program is free software; you can redistribute it and/or
9 + * modify it under the terms of the GNU General Public License
10 + * as published by the Free Software Foundation; either version
11 + * 2 of the License, or (at your option) any later version.
13 + * Authors: Patrick McHardy, <kaber@trash.net>
15 + * The first version was written by Martin Devera, <devik@cdi.cz>
20 +#include <linux/module.h>
21 +#include <linux/kernel.h>
22 +#include <linux/moduleparam.h>
23 +#include <linux/list.h>
24 +#include <linux/skbuff.h>
25 +#include <linux/netdevice.h>
26 +#include <linux/etherdevice.h>
27 +#include <linux/rtnetlink.h>
28 +#include <linux/if_arp.h>
29 +#include <linux/netfilter.h>
30 +#include <linux/netfilter_ipv4.h>
31 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
32 + #include <linux/netfilter_ipv6.h>
34 +#include <linux/imq.h>
35 +#include <net/pkt_sched.h>
36 +#include <net/netfilter/nf_queue.h>
37 +#include <net/sock.h>
38 +#include <linux/ip.h>
39 +#include <linux/ipv6.h>
40 +#include <linux/if_vlan.h>
41 +#include <linux/if_pppox.h>
43 +#include <net/ipv6.h>
45 +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num);
47 +static nf_hookfn imq_nf_hook;
49 +static struct nf_hook_ops imq_ops[] = {
51 + /* imq_ingress_ipv4 */
52 + .hook = imq_nf_hook,
54 + .hooknum = NF_INET_PRE_ROUTING,
55 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
56 + .priority = NF_IP_PRI_MANGLE + 1,
58 + .priority = NF_IP_PRI_NAT_DST + 1,
62 + /* imq_egress_ipv4 */
63 + .hook = imq_nf_hook,
65 + .hooknum = NF_INET_POST_ROUTING,
66 +#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
67 + .priority = NF_IP_PRI_LAST,
69 + .priority = NF_IP_PRI_NAT_SRC - 1,
72 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
74 + /* imq_ingress_ipv6 */
75 + .hook = imq_nf_hook,
77 + .hooknum = NF_INET_PRE_ROUTING,
78 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
79 + .priority = NF_IP6_PRI_MANGLE + 1,
81 + .priority = NF_IP6_PRI_NAT_DST + 1,
85 + /* imq_egress_ipv6 */
86 + .hook = imq_nf_hook,
88 + .hooknum = NF_INET_POST_ROUTING,
89 +#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
90 + .priority = NF_IP6_PRI_LAST,
92 + .priority = NF_IP6_PRI_NAT_SRC - 1,
98 +#if defined(CONFIG_IMQ_NUM_DEVS)
99 +static int numdevs = CONFIG_IMQ_NUM_DEVS;
101 +static int numdevs = IMQ_MAX_DEVS;
104 +static struct net_device *imq_devs_cache[IMQ_MAX_DEVS];
106 +#define IMQ_MAX_QUEUES 32
107 +static int numqueues = 1;
108 +static u32 imq_hashrnd;
109 +static int imq_dev_accurate_stats = 1;
111 +static inline __be16 pppoe_proto(const struct sk_buff *skb)
113 + return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
114 + sizeof(struct pppoe_hdr)));
117 +static u16 imq_hash(struct net_device *dev, struct sk_buff *skb)
119 + unsigned int pull_len;
120 + u16 protocol = skb->protocol;
132 + switch (protocol) {
133 + case htons(ETH_P_8021Q): {
134 + if (unlikely(skb_pull(skb, VLAN_HLEN) == NULL))
137 + pull_len += VLAN_HLEN;
138 + skb->network_header += VLAN_HLEN;
140 + protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
144 + case htons(ETH_P_PPP_SES): {
145 + if (unlikely(skb_pull(skb, PPPOE_SES_HLEN) == NULL))
148 + pull_len += PPPOE_SES_HLEN;
149 + skb->network_header += PPPOE_SES_HLEN;
151 + protocol = pppoe_proto(skb);
155 + case htons(ETH_P_IP): {
156 + const struct iphdr *iph = ip_hdr(skb);
158 + if (unlikely(!pskb_may_pull(skb, sizeof(struct iphdr))))
161 + addr1 = iph->daddr;
162 + addr2 = iph->saddr;
164 + ip_proto = !(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ?
166 + ihl = ip_hdrlen(skb);
170 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
171 + case htons(ETH_P_IPV6): {
172 + const struct ipv6hdr *iph = ipv6_hdr(skb);
175 + if (unlikely(!pskb_may_pull(skb, sizeof(struct ipv6hdr))))
178 + addr1 = iph->daddr.s6_addr32[3];
179 + addr2 = iph->saddr.s6_addr32[3];
180 + ihl = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &ip_proto,
182 + if (unlikely(ihl < 0))
190 + if (pull_len != 0) {
191 + skb_push(skb, pull_len);
192 + skb->network_header -= pull_len;
195 + return (u16)(ntohs(protocol) % dev->real_num_tx_queues);
199 + swap(addr1, addr2);
201 + switch (ip_proto) {
208 + case IPPROTO_UDPLITE: {
209 + if (likely(skb_copy_bits(skb, ihl, &ports.in32, 4) >= 0)) {
210 + if (ports.in16[0] > ports.in16[1])
211 + swap(ports.in16[0], ports.in16[1]);
221 + if (pull_len != 0) {
222 + skb_push(skb, pull_len);
223 + skb->network_header -= pull_len;
226 + hash = jhash_3words(addr1, addr2, ports.in32, imq_hashrnd ^ ip_proto);
228 + return (u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
231 +static inline bool sk_tx_queue_recorded(struct sock *sk)
233 + return (sk_tx_queue_get(sk) >= 0);
236 +static struct netdev_queue *imq_select_queue(struct net_device *dev,
237 + struct sk_buff *skb)
239 + u16 queue_index = 0;
242 + if (likely(dev->real_num_tx_queues == 1))
245 + /* IMQ can be receiving ingress or engress packets. */
247 + /* Check first for if rx_queue is set */
248 + if (skb_rx_queue_recorded(skb)) {
249 + queue_index = skb_get_rx_queue(skb);
253 + /* Check if socket has tx_queue set */
254 + if (sk_tx_queue_recorded(skb->sk)) {
255 + queue_index = sk_tx_queue_get(skb->sk);
259 + /* Try use socket hash */
260 + if (skb->sk && skb->sk->sk_hash) {
261 + hash = skb->sk->sk_hash;
263 + (u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
267 + /* Generate hash from packet data */
268 + queue_index = imq_hash(dev, skb);
271 + if (unlikely(queue_index >= dev->real_num_tx_queues))
272 + queue_index = (u16)((u32)queue_index % dev->real_num_tx_queues);
274 + skb_set_queue_mapping(skb, queue_index);
275 + return netdev_get_tx_queue(dev, queue_index);
278 +static struct net_device_stats *imq_get_stats(struct net_device *dev)
280 + return &dev->stats;
283 +/* called for packets kfree'd in qdiscs at places other than enqueue */
284 +static void imq_skb_destructor(struct sk_buff *skb)
286 + struct nf_queue_entry *entry = skb->nf_queue_entry;
288 + skb->nf_queue_entry = NULL;
291 + nf_queue_entry_release_refs(entry);
295 + skb_restore_cb(skb); /* kfree backup */
298 +static void imq_done_check_queue_mapping(struct sk_buff *skb,
299 + struct net_device *dev)
301 + unsigned int queue_index;
303 + /* Don't let queue_mapping be left too large after exiting IMQ */
304 + if (likely(skb->dev != dev && skb->dev != NULL)) {
305 + queue_index = skb_get_queue_mapping(skb);
306 + if (unlikely(queue_index >= skb->dev->real_num_tx_queues)) {
307 + queue_index = (u16)((u32)queue_index %
308 + skb->dev->real_num_tx_queues);
309 + skb_set_queue_mapping(skb, queue_index);
312 + /* skb->dev was IMQ device itself or NULL, be on safe side and
313 + * just clear queue mapping.
315 + skb_set_queue_mapping(skb, 0);
319 +static netdev_tx_t imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
321 + struct nf_queue_entry *entry = skb->nf_queue_entry;
325 + skb->nf_queue_entry = NULL;
326 + netif_trans_update(dev);
328 + dev->stats.tx_bytes += skb->len;
329 + dev->stats.tx_packets++;
331 + if (unlikely(entry == NULL)) {
332 + /* We don't know what is going on here.. packet is queued for
333 + * imq device, but (probably) not by us.
335 + * If this packet was not send here by imq_nf_queue(), then
336 + * skb_save_cb() was not used and skb_free() should not show:
337 + * WARNING: IMQ: kfree_skb: skb->cb_next:..
339 + * WARNING: IMQ: kfree_skb: skb->nf_queue_entry...
341 + * However if this message is shown, then IMQ is somehow broken
342 + * and you should report this to linuximq.net.
345 + /* imq_dev_xmit is black hole that eats all packets, report that
346 + * we eat this packet happily and increase dropped counters.
349 + dev->stats.tx_dropped++;
350 + dev_kfree_skb(skb);
353 + return NETDEV_TX_OK;
356 + skb_restore_cb(skb); /* restore skb->cb */
358 + skb->imq_flags = 0;
359 + skb->destructor = NULL;
361 + imq_done_check_queue_mapping(skb, dev);
363 + nf_reinject(entry, NF_ACCEPT);
366 + return NETDEV_TX_OK;
369 +static struct net_device *get_imq_device_by_index(int index)
371 + struct net_device *dev = NULL;
375 + /* get device by name and cache result */
376 + snprintf(buf, sizeof(buf), "imq%d", index);
378 + /* Search device from all namespaces. */
379 + for_each_net(net) {
380 + dev = dev_get_by_name(net, buf);
385 + if (WARN_ON_ONCE(dev == NULL)) {
386 + /* IMQ device not found. Exotic config? */
387 + return ERR_PTR(-ENODEV);
390 + imq_devs_cache[index] = dev;
396 +static struct nf_queue_entry *nf_queue_entry_dup(struct nf_queue_entry *e)
398 + struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
400 + nf_queue_entry_get_refs(entry);
406 +#ifdef CONFIG_BRIDGE_NETFILTER
407 +/* When called from bridge netfilter, skb->data must point to MAC header
408 + * before calling skb_gso_segment(). Else, original MAC header is lost
409 + * and segmented skbs will be sent to wrong destination.
411 +static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
413 + if (skb->nf_bridge)
414 + __skb_push(skb, skb->network_header - skb->mac_header);
417 +static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
419 + if (skb->nf_bridge)
420 + __skb_pull(skb, skb->network_header - skb->mac_header);
423 +#define nf_bridge_adjust_skb_data(s) do {} while (0)
424 +#define nf_bridge_adjust_segmented_data(s) do {} while (0)
427 +static void free_entry(struct nf_queue_entry *entry)
429 + nf_queue_entry_release_refs(entry);
433 +static int __imq_nf_queue(struct nf_queue_entry *entry, struct net_device *dev);
435 +static int __imq_nf_queue_gso(struct nf_queue_entry *entry,
436 + struct net_device *dev, struct sk_buff *skb)
439 + struct nf_queue_entry *entry_seg;
441 + nf_bridge_adjust_segmented_data(skb);
443 + if (skb->next == NULL) { /* last packet, no need to copy entry */
444 + struct sk_buff *gso_skb = entry->skb;
446 + ret = __imq_nf_queue(entry, dev);
448 + entry->skb = gso_skb;
454 + entry_seg = nf_queue_entry_dup(entry);
456 + entry_seg->skb = skb;
457 + ret = __imq_nf_queue(entry_seg, dev);
459 + free_entry(entry_seg);
464 +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num)
466 + struct sk_buff *skb, *segs;
467 + struct net_device *dev;
468 + unsigned int queued;
469 + int index, retval, err;
471 + index = entry->skb->imq_flags & IMQ_F_IFMASK;
472 + if (unlikely(index > numdevs - 1)) {
473 + if (net_ratelimit())
474 + pr_warn("IMQ: invalid device specified, highest is %u\n",
480 + /* check for imq device by index from cache */
481 + dev = imq_devs_cache[index];
482 + if (unlikely(!dev)) {
483 + dev = get_imq_device_by_index(index);
485 + retval = PTR_ERR(dev);
490 + if (unlikely(!(dev->flags & IFF_UP))) {
491 + entry->skb->imq_flags = 0;
492 + retval = -ECANCELED;
496 + /* Since 3.10.x, GSO handling moved here as result of upstream commit
497 + * a5fedd43d5f6c94c71053a66e4c3d2e35f1731a2 (netfilter: move
498 + * skb_gso_segment into nfnetlink_queue module).
500 + * Following code replicates the gso handling from
501 + * 'net/netfilter/nfnetlink_queue_core.c':nfqnl_enqueue_packet().
506 + switch (entry->state.pf) {
508 + skb->protocol = htons(ETH_P_IP);
511 + skb->protocol = htons(ETH_P_IPV6);
515 + if (!skb_is_gso(entry->skb))
516 + return __imq_nf_queue(entry, dev);
518 + nf_bridge_adjust_skb_data(skb);
519 + segs = skb_gso_segment(skb, 0);
520 + /* Does not use PTR_ERR to limit the number of error codes that can be
521 + * returned by nf_queue. For instance, callers rely on -ECANCELED to
522 + * mean 'ignore this hook'.
530 + struct sk_buff *nskb = segs->next;
531 + if (nskb && nskb->next)
532 + nskb->cb_next = NULL;
534 + err = __imq_nf_queue_gso(entry, dev, segs);
543 + if (err) /* some segments are already queued */
550 + nf_bridge_adjust_segmented_data(skb);
556 +static int __imq_nf_queue(struct nf_queue_entry *entry, struct net_device *dev)
558 + struct sk_buff *skb_orig, *skb, *skb_shared, *skb_popd;
560 + struct sk_buff *to_free = NULL;
561 + struct netdev_queue *txq;
562 + spinlock_t *root_lock;
564 + int retval = -EINVAL;
565 + unsigned int orig_queue_index;
567 + dev->last_rx = jiffies;
572 + /* skb has owner? => make clone */
573 + if (unlikely(skb->destructor)) {
575 + skb = skb_clone(skb, GFP_ATOMIC);
576 + if (unlikely(!skb)) {
580 + skb->cb_next = NULL;
584 + dev->stats.rx_bytes += skb->len;
585 + dev->stats.rx_packets++;
588 + /* skb->dev == NULL causes problems, try the find cause. */
589 + if (net_ratelimit()) {
590 + dev_warn(&dev->dev,
591 + "received packet with skb->dev == NULL\n");
598 + /* Disables softirqs for lock below */
599 + rcu_read_lock_bh();
601 + /* Multi-queue selection */
602 + orig_queue_index = skb_get_queue_mapping(skb);
603 + txq = imq_select_queue(dev, skb);
605 + q = rcu_dereference(txq->qdisc);
606 + if (unlikely(!q->enqueue))
607 + goto packet_not_eaten_by_imq_dev;
609 + skb->nf_queue_entry = entry;
610 + root_lock = qdisc_lock(q);
611 + spin_lock(root_lock);
613 + users = atomic_read(&skb->users);
615 + skb_shared = skb_get(skb); /* increase reference count by one */
617 + /* backup skb->cb, as qdisc layer will overwrite it */
618 + skb_save_cb(skb_shared);
619 + qdisc_enqueue_root(skb_shared, q, &to_free); /* might kfree_skb */
620 + if (likely(atomic_read(&skb_shared->users) == users + 1)) {
623 + kfree_skb(skb_shared); /* decrease reference count by one */
625 + skb->destructor = &imq_skb_destructor;
627 + skb_popd = qdisc_dequeue_skb(q, &validate);
630 + if (unlikely(skb_orig))
631 + kfree_skb(skb_orig); /* free original */
633 + spin_unlock(root_lock);
636 + /* schedule qdisc dequeue */
637 + __netif_schedule(q);
639 + if (likely(skb_popd)) {
640 + /* Note that we validate skb (GSO, checksum, ...) outside of locks */
642 + skb_popd = validate_xmit_skb_list(skb_popd, dev);
646 + int cpu = smp_processor_id(); /* ok because BHs are off */
648 + txq = skb_get_tx_queue(dev, skb_popd);
650 + IMQ device will not be frozen or stoped, and it always be successful.
651 + So we need not check its status and return value to accelerate.
653 + if (imq_dev_accurate_stats && txq->xmit_lock_owner != cpu) {
654 + HARD_TX_LOCK(dev, txq, cpu);
655 + if (!netif_xmit_frozen_or_stopped(txq)) {
656 + dev_hard_start_xmit(skb_popd, dev, txq, &dummy_ret);
658 + HARD_TX_UNLOCK(dev, txq);
660 + if (!netif_xmit_frozen_or_stopped(txq)) {
661 + dev_hard_start_xmit(skb_popd, dev, txq, &dummy_ret);
666 + /* No ready skb, then schedule it */
667 + __netif_schedule(q);
670 + rcu_read_unlock_bh();
674 + skb_restore_cb(skb_shared); /* restore skb->cb */
675 + skb->nf_queue_entry = NULL;
677 + * qdisc dropped packet and decreased skb reference count of
678 + * skb, so we don't really want to and try refree as that would
679 + * actually destroy the skb.
681 + spin_unlock(root_lock);
682 + goto packet_not_eaten_by_imq_dev;
685 +packet_not_eaten_by_imq_dev:
686 + skb_set_queue_mapping(skb, orig_queue_index);
687 + rcu_read_unlock_bh();
689 + /* cloned? restore original */
690 + if (unlikely(skb_orig)) {
692 + entry->skb = skb_orig;
696 + if (unlikely(to_free)) {
697 + kfree_skb_list(to_free);
701 +static unsigned int imq_nf_hook(void *priv,
702 + struct sk_buff *skb,
703 + const struct nf_hook_state *state)
705 + return (skb->imq_flags & IMQ_F_ENQUEUE) ? NF_IMQ_QUEUE : NF_ACCEPT;
708 +static int imq_close(struct net_device *dev)
710 + netif_stop_queue(dev);
714 +static int imq_open(struct net_device *dev)
716 + netif_start_queue(dev);
720 +static struct device_type imq_device_type = {
724 +static const struct net_device_ops imq_netdev_ops = {
725 + .ndo_open = imq_open,
726 + .ndo_stop = imq_close,
727 + .ndo_start_xmit = imq_dev_xmit,
728 + .ndo_get_stats = imq_get_stats,
731 +static void imq_setup(struct net_device *dev)
733 + dev->netdev_ops = &imq_netdev_ops;
734 + dev->type = ARPHRD_VOID;
735 + dev->mtu = 16000; /* too small? */
736 + dev->tx_queue_len = 11000; /* too big? */
737 + dev->flags = IFF_NOARP;
738 + dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
739 + NETIF_F_GSO | NETIF_F_HW_CSUM |
741 + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE |
742 + IFF_TX_SKB_SHARING);
745 +static int imq_validate(struct nlattr *tb[], struct nlattr *data[])
749 + if (tb[IFLA_ADDRESS]) {
750 + if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
754 + if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
755 + ret = -EADDRNOTAVAIL;
761 + pr_warn("IMQ: imq_validate failed (%d)\n", ret);
765 +static struct rtnl_link_ops imq_link_ops __read_mostly = {
768 + .setup = imq_setup,
769 + .validate = imq_validate,
772 +static const struct nf_queue_handler imq_nfqh = {
773 + .outfn = imq_nf_queue,
776 +static int __init imq_init_hooks(void)
780 + nf_register_queue_imq_handler(&imq_nfqh);
782 + ret = nf_register_hooks(imq_ops, ARRAY_SIZE(imq_ops));
784 + nf_unregister_queue_imq_handler();
789 +#ifdef CONFIG_LOCKDEP
790 + static struct lock_class_key imq_netdev_addr_lock_key;
792 + static void __init imq_dev_set_lockdep_one(struct net_device *dev,
793 + struct netdev_queue *txq, void *arg)
796 + * the IMQ transmit locks can be taken recursively,
797 + * for example with one IMQ rule for input- and one for
798 + * output network devices in iptables!
799 + * until we find a better solution ignore them.
801 + lockdep_set_novalidate_class(&txq->_xmit_lock);
804 + static void imq_dev_set_lockdep_class(struct net_device *dev)
806 + lockdep_set_class_and_name(&dev->addr_list_lock,
807 + &imq_netdev_addr_lock_key, "_xmit_addr_IMQ");
808 + netdev_for_each_tx_queue(dev, imq_dev_set_lockdep_one, NULL);
811 + static inline void imq_dev_set_lockdep_class(struct net_device *dev)
816 +static int __init imq_init_one(int index)
818 + struct net_device *dev;
821 + dev = alloc_netdev_mq(0, "imq%d", NET_NAME_UNKNOWN, imq_setup, numqueues);
825 + ret = dev_alloc_name(dev, dev->name);
829 + dev->rtnl_link_ops = &imq_link_ops;
830 + SET_NETDEV_DEVTYPE(dev, &imq_device_type);
831 + ret = register_netdevice(dev);
835 + imq_dev_set_lockdep_class(dev);
843 +static int __init imq_init_devs(void)
847 + if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) {
848 + pr_err("IMQ: numdevs has to be betweed 1 and %u\n",
853 + if (numqueues < 1 || numqueues > IMQ_MAX_QUEUES) {
854 + pr_err("IMQ: numqueues has to be betweed 1 and %u\n",
859 + get_random_bytes(&imq_hashrnd, sizeof(imq_hashrnd));
862 + err = __rtnl_link_register(&imq_link_ops);
864 + for (i = 0; i < numdevs && !err; i++)
865 + err = imq_init_one(i);
868 + __rtnl_link_unregister(&imq_link_ops);
869 + memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
876 +static int __init imq_init_module(void)
880 +#if defined(CONFIG_IMQ_NUM_DEVS)
881 + BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16);
882 + BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2);
883 + BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK);
886 + err = imq_init_devs();
888 + pr_err("IMQ: Error trying imq_init_devs(net)\n");
892 + err = imq_init_hooks();
894 + pr_err(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
895 + rtnl_link_unregister(&imq_link_ops);
896 + memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
900 + pr_info("IMQ driver loaded successfully. (numdevs = %d, numqueues = %d, imq_dev_accurate_stats = %d)\n",
901 + numdevs, numqueues, imq_dev_accurate_stats);
903 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
904 + pr_info("\tHooking IMQ before NAT on PREROUTING.\n");
906 + pr_info("\tHooking IMQ after NAT on PREROUTING.\n");
908 +#if defined(CONFIG_IMQ_BEHAVIOR_AB) || defined(CONFIG_IMQ_BEHAVIOR_BB)
909 + pr_info("\tHooking IMQ before NAT on POSTROUTING.\n");
911 + pr_info("\tHooking IMQ after NAT on POSTROUTING.\n");
917 +static void __exit imq_unhook(void)
919 + nf_unregister_hooks(imq_ops, ARRAY_SIZE(imq_ops));
920 + nf_unregister_queue_imq_handler();
923 +static void __exit imq_cleanup_devs(void)
925 + rtnl_link_unregister(&imq_link_ops);
926 + memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
929 +static void __exit imq_exit_module(void)
932 + imq_cleanup_devs();
933 + pr_info("IMQ driver unloaded successfully.\n");
936 +module_init(imq_init_module);
937 +module_exit(imq_exit_module);
939 +module_param(numdevs, int, 0);
940 +module_param(numqueues, int, 0);
941 +module_param(imq_dev_accurate_stats, int, 0);
942 +MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will be created)");
943 +MODULE_PARM_DESC(numqueues, "number of queues per IMQ device");
944 +MODULE_PARM_DESC(imq_dev_accurate_stats, "Notify if need the accurate imq device stats");
946 +MODULE_AUTHOR("https://github.com/imq/linuximq");
947 +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See https://github.com/imq/linuximq/wiki for more information.");
948 +MODULE_LICENSE("GPL");
949 +MODULE_ALIAS_RTNL_LINK("imq");
950 diff -Naupr linux-4.10_orig/drivers/net/Kconfig linux-4.10/drivers/net/Kconfig
951 --- linux-4.10_orig/drivers/net/Kconfig 2017-02-20 05:34:00.000000000 +0700
952 +++ linux-4.10/drivers/net/Kconfig 2017-02-28 18:44:55.978280593 +0700
953 @@ -260,6 +260,125 @@ config RIONET_RX_SIZE
958 + tristate "IMQ (intermediate queueing device) support"
959 + depends on NETDEVICES && NETFILTER
961 + The IMQ device(s) is used as placeholder for QoS queueing
962 + disciplines. Every packet entering/leaving the IP stack can be
963 + directed through the IMQ device where it's enqueued/dequeued to the
964 + attached qdisc. This allows you to treat network devices as classes
965 + and distribute bandwidth among them. Iptables is used to specify
966 + through which IMQ device, if any, packets travel.
968 + More information at: https://github.com/imq/linuximq
970 + To compile this driver as a module, choose M here: the module
971 + will be called imq. If unsure, say N.
974 + prompt "IMQ behavior (PRE/POSTROUTING)"
976 + default IMQ_BEHAVIOR_AB
978 + This setting defines how IMQ behaves in respect to its
979 + hooking in PREROUTING and POSTROUTING.
981 + IMQ can work in any of the following ways:
983 + PREROUTING | POSTROUTING
984 + -----------------|-------------------
985 + #1 After NAT | After NAT
986 + #2 After NAT | Before NAT
987 + #3 Before NAT | After NAT
988 + #4 Before NAT | Before NAT
990 + The default behavior is to hook before NAT on PREROUTING
991 + and after NAT on POSTROUTING (#3).
993 + This settings are specially usefull when trying to use IMQ
994 + to shape NATed clients.
996 + More information can be found at: https://github.com/imq/linuximq
998 + If not sure leave the default settings alone.
1000 +config IMQ_BEHAVIOR_AA
1003 + This setting defines how IMQ behaves in respect to its
1004 + hooking in PREROUTING and POSTROUTING.
1006 + Choosing this option will make IMQ hook like this:
1008 + PREROUTING: After NAT
1009 + POSTROUTING: After NAT
1011 + More information can be found at: https://github.com/imq/linuximq
1013 + If not sure leave the default settings alone.
1015 +config IMQ_BEHAVIOR_AB
1018 + This setting defines how IMQ behaves in respect to its
1019 + hooking in PREROUTING and POSTROUTING.
1021 + Choosing this option will make IMQ hook like this:
1023 + PREROUTING: After NAT
1024 + POSTROUTING: Before NAT
1026 + More information can be found at: https://github.com/imq/linuximq
1028 + If not sure leave the default settings alone.
1030 +config IMQ_BEHAVIOR_BA
1033 + This setting defines how IMQ behaves in respect to its
1034 + hooking in PREROUTING and POSTROUTING.
1036 + Choosing this option will make IMQ hook like this:
1038 + PREROUTING: Before NAT
1039 + POSTROUTING: After NAT
1041 + More information can be found at: https://github.com/imq/linuximq
1043 + If not sure leave the default settings alone.
1045 +config IMQ_BEHAVIOR_BB
1048 + This setting defines how IMQ behaves in respect to its
1049 + hooking in PREROUTING and POSTROUTING.
1051 + Choosing this option will make IMQ hook like this:
1053 + PREROUTING: Before NAT
1054 + POSTROUTING: Before NAT
1056 + More information can be found at: https://github.com/imq/linuximq
1058 + If not sure leave the default settings alone.
1062 +config IMQ_NUM_DEVS
1063 + int "Number of IMQ devices"
1068 + This setting defines how many IMQ devices will be created.
1070 + The default value is 16.
1072 + More information can be found at: https://github.com/imq/linuximq
1074 + If not sure leave the default settings alone.
1077 tristate "Universal TUN/TAP device driver support"
1079 diff -Naupr linux-4.10_orig/drivers/net/Makefile linux-4.10/drivers/net/Makefile
1080 --- linux-4.10_orig/drivers/net/Makefile 2017-02-20 05:34:00.000000000 +0700
1081 +++ linux-4.10/drivers/net/Makefile 2017-02-28 18:44:55.978280593 +0700
1082 @@ -11,6 +11,7 @@ obj-$(CONFIG_DUMMY) += dummy.o
1083 obj-$(CONFIG_EQUALIZER) += eql.o
1084 obj-$(CONFIG_IFB) += ifb.o
1085 obj-$(CONFIG_MACSEC) += macsec.o
1086 +obj-$(CONFIG_IMQ) += imq.o
1087 obj-$(CONFIG_MACVLAN) += macvlan.o
1088 obj-$(CONFIG_MACVTAP) += macvtap.o
1089 obj-$(CONFIG_MII) += mii.o
1090 diff -Naupr linux-4.10_orig/include/linux/imq.h linux-4.10/include/linux/imq.h
1091 --- linux-4.10_orig/include/linux/imq.h 1970-01-01 07:00:00.000000000 +0700
1092 +++ linux-4.10/include/linux/imq.h 2017-02-28 18:44:55.978280593 +0700
1097 +/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */
1098 +#define IMQ_F_BITS 5
1100 +#define IMQ_F_IFMASK 0x0f
1101 +#define IMQ_F_ENQUEUE 0x10
1103 +#define IMQ_MAX_DEVS (IMQ_F_IFMASK + 1)
1105 +#endif /* _IMQ_H */
1107 diff -Naupr linux-4.10_orig/include/linux/netdevice.h linux-4.10/include/linux/netdevice.h
1108 --- linux-4.10_orig/include/linux/netdevice.h 2017-02-20 05:34:00.000000000 +0700
1109 +++ linux-4.10/include/linux/netdevice.h 2017-02-28 18:44:55.978280593 +0700
1110 @@ -3604,6 +3604,19 @@ static inline void netif_tx_unlock_bh(st
1114 +#define HARD_TX_LOCK_BH(dev, txq) { \
1115 + if ((dev->features & NETIF_F_LLTX) == 0) { \
1116 + __netif_tx_lock_bh(txq); \
1120 +#define HARD_TX_UNLOCK_BH(dev, txq) { \
1121 + if ((dev->features & NETIF_F_LLTX) == 0) { \
1122 + __netif_tx_unlock_bh(txq); \
1127 static inline void netif_tx_disable(struct net_device *dev)
1130 diff -Naupr linux-4.10_orig/include/linux/netfilter/xt_IMQ.h linux-4.10/include/linux/netfilter/xt_IMQ.h
1131 --- linux-4.10_orig/include/linux/netfilter/xt_IMQ.h 1970-01-01 07:00:00.000000000 +0700
1132 +++ linux-4.10/include/linux/netfilter/xt_IMQ.h 2017-02-28 18:44:55.981613941 +0700
1137 +struct xt_imq_info {
1138 + unsigned int todev; /* target imq device */
1141 +#endif /* _XT_IMQ_H */
1143 diff -Naupr linux-4.10_orig/include/linux/netfilter_ipv4/ipt_IMQ.h linux-4.10/include/linux/netfilter_ipv4/ipt_IMQ.h
1144 --- linux-4.10_orig/include/linux/netfilter_ipv4/ipt_IMQ.h 1970-01-01 07:00:00.000000000 +0700
1145 +++ linux-4.10/include/linux/netfilter_ipv4/ipt_IMQ.h 2017-02-28 18:44:55.981613941 +0700
1150 +/* Backwards compatibility for old userspace */
1151 +#include <linux/netfilter/xt_IMQ.h>
1153 +#define ipt_imq_info xt_imq_info
1155 +#endif /* _IPT_IMQ_H */
1157 diff -Naupr linux-4.10_orig/include/linux/netfilter_ipv6/ip6t_IMQ.h linux-4.10/include/linux/netfilter_ipv6/ip6t_IMQ.h
1158 --- linux-4.10_orig/include/linux/netfilter_ipv6/ip6t_IMQ.h 1970-01-01 07:00:00.000000000 +0700
1159 +++ linux-4.10/include/linux/netfilter_ipv6/ip6t_IMQ.h 2017-02-28 18:44:55.981613941 +0700
1161 +#ifndef _IP6T_IMQ_H
1162 +#define _IP6T_IMQ_H
1164 +/* Backwards compatibility for old userspace */
1165 +#include <linux/netfilter/xt_IMQ.h>
1167 +#define ip6t_imq_info xt_imq_info
1169 +#endif /* _IP6T_IMQ_H */
1171 diff -Naupr linux-4.10_orig/include/linux/skbuff.h linux-4.10/include/linux/skbuff.h
1172 --- linux-4.10_orig/include/linux/skbuff.h 2017-02-20 05:34:00.000000000 +0700
1173 +++ linux-4.10/include/linux/skbuff.h 2017-02-28 18:44:55.981613941 +0700
1175 #include <linux/in6.h>
1176 #include <linux/if_packet.h>
1177 #include <net/flow.h>
1178 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1179 +#include <linux/imq.h>
1183 /* The interface for checksum offload between the stack and networking drivers
1185 @@ -661,6 +665,9 @@ struct sk_buff {
1186 * first. This is owned by whoever has the skb queued ATM.
1188 char cb[48] __aligned(8);
1189 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1193 unsigned long _skb_refdst;
1194 void (*destructor)(struct sk_buff *skb);
1195 @@ -670,6 +677,9 @@ struct sk_buff {
1196 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1197 struct nf_conntrack *nfct;
1199 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1200 + struct nf_queue_entry *nf_queue_entry;
1202 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1203 struct nf_bridge_info *nf_bridge;
1205 @@ -750,6 +760,9 @@ struct sk_buff {
1206 __u8 offload_fwd_mark:1;
1208 /* 2, 4 or 5 bit hole */
1209 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1210 + __u8 imq_flags:IMQ_F_BITS;
1213 #ifdef CONFIG_NET_SCHED
1214 __u16 tc_index; /* traffic control index */
1215 @@ -910,6 +923,12 @@ void kfree_skb_list(struct sk_buff *segs
1216 void skb_tx_error(struct sk_buff *skb);
1217 void consume_skb(struct sk_buff *skb);
1218 void __kfree_skb(struct sk_buff *skb);
1220 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1221 +int skb_save_cb(struct sk_buff *skb);
1222 +int skb_restore_cb(struct sk_buff *skb);
1225 extern struct kmem_cache *skbuff_head_cache;
1227 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
1228 @@ -3607,6 +3626,10 @@ static inline void __nf_copy(struct sk_b
1230 dst->nfctinfo = src->nfctinfo;
1232 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1233 + dst->imq_flags = src->imq_flags;
1234 + dst->nf_queue_entry = src->nf_queue_entry;
1236 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1237 dst->nf_bridge = src->nf_bridge;
1238 nf_bridge_get(src->nf_bridge);
1239 diff -Naupr linux-4.10_orig/include/net/netfilter/nf_queue.h linux-4.10/include/net/netfilter/nf_queue.h
1240 --- linux-4.10_orig/include/net/netfilter/nf_queue.h 2017-02-20 05:34:00.000000000 +0700
1241 +++ linux-4.10/include/net/netfilter/nf_queue.h 2017-02-28 18:44:55.981613941 +0700
1242 @@ -31,6 +31,12 @@ struct nf_queue_handler {
1243 void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
1244 void nf_unregister_queue_handler(struct net *net);
1245 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
1246 +void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
1248 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1249 +void nf_register_queue_imq_handler(const struct nf_queue_handler *qh);
1250 +void nf_unregister_queue_imq_handler(void);
1253 void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
1254 void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
1255 diff -Naupr linux-4.10_orig/include/net/pkt_sched.h linux-4.10/include/net/pkt_sched.h
1256 --- linux-4.10_orig/include/net/pkt_sched.h 2017-02-20 05:34:00.000000000 +0700
1257 +++ linux-4.10/include/net/pkt_sched.h 2017-02-28 18:44:55.981613941 +0700
1258 @@ -107,6 +107,8 @@ int sch_direct_xmit(struct sk_buff *skb,
1260 void __qdisc_run(struct Qdisc *q);
1262 +struct sk_buff *qdisc_dequeue_skb(struct Qdisc *q, bool *validate);
1264 static inline void qdisc_run(struct Qdisc *q)
1266 if (qdisc_run_begin(q))
1267 diff -Naupr linux-4.10_orig/include/net/sch_generic.h linux-4.10/include/net/sch_generic.h
1268 --- linux-4.10_orig/include/net/sch_generic.h 2017-02-20 05:34:00.000000000 +0700
1269 +++ linux-4.10/include/net/sch_generic.h 2017-02-28 18:44:55.981613941 +0700
1270 @@ -518,6 +518,13 @@ static inline int qdisc_enqueue(struct s
1271 return sch->enqueue(skb, sch, to_free);
1274 +static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch,
1275 + struct sk_buff **to_free)
1277 + qdisc_skb_cb(skb)->pkt_len = skb->len;
1278 + return qdisc_enqueue(skb, sch, to_free) & NET_XMIT_MASK;
1281 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
1283 return q->flags & TCQ_F_CPUSTATS;
1284 diff -Naupr linux-4.10_orig/include/uapi/linux/netfilter.h linux-4.10/include/uapi/linux/netfilter.h
1285 --- linux-4.10_orig/include/uapi/linux/netfilter.h 2017-02-20 05:34:00.000000000 +0700
1286 +++ linux-4.10/include/uapi/linux/netfilter.h 2017-02-28 18:44:55.981613941 +0700
1290 #define NF_STOP 5 /* Deprecated, for userspace nf_queue compatibility. */
1291 -#define NF_MAX_VERDICT NF_STOP
1292 +#define NF_IMQ_QUEUE 6
1293 +#define NF_MAX_VERDICT NF_IMQ_QUEUE
1295 /* we overload the higher bits for encoding auxiliary data such as the queue
1296 * number or errno values. Not nice, but better than additional function
1297 diff -Naupr linux-4.10_orig/net/core/dev.c linux-4.10/net/core/dev.c
1298 --- linux-4.10_orig/net/core/dev.c 2017-02-20 05:34:00.000000000 +0700
1299 +++ linux-4.10/net/core/dev.c 2017-02-28 18:44:55.984947288 +0700
1301 #include <linux/hrtimer.h>
1302 #include <linux/netfilter_ingress.h>
1303 #include <linux/crash_dump.h>
1304 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1305 +#include <linux/imq.h>
1308 #include "net-sysfs.h"
1310 @@ -2881,7 +2884,12 @@ static int xmit_one(struct sk_buff *skb,
1314 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1315 + if ((!list_empty(&ptype_all) || !list_empty(&dev->ptype_all)) &&
1316 + !(skb->imq_flags & IMQ_F_ENQUEUE))
1318 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
1320 dev_queue_xmit_nit(skb, dev);
1323 @@ -2920,6 +2928,8 @@ out:
1327 +EXPORT_SYMBOL_GPL(dev_hard_start_xmit);
1329 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
1330 netdev_features_t features)
1332 diff -Naupr linux-4.10_orig/net/core/skbuff.c linux-4.10/net/core/skbuff.c
1333 --- linux-4.10_orig/net/core/skbuff.c 2017-02-20 05:34:00.000000000 +0700
1334 +++ linux-4.10/net/core/skbuff.c 2017-02-28 18:44:55.984947288 +0700
1335 @@ -82,6 +82,87 @@ struct kmem_cache *skbuff_head_cache __r
1336 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
1337 int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
1338 EXPORT_SYMBOL(sysctl_max_skb_frags);
1339 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1340 +static struct kmem_cache *skbuff_cb_store_cache __read_mostly;
1343 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1344 +/* Control buffer save/restore for IMQ devices */
1345 +struct skb_cb_table {
1346 + char cb[48] __aligned(8);
1351 +static DEFINE_SPINLOCK(skb_cb_store_lock);
1353 +int skb_save_cb(struct sk_buff *skb)
1355 + struct skb_cb_table *next;
1357 + next = kmem_cache_alloc(skbuff_cb_store_cache, GFP_ATOMIC);
1361 + BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
1363 + memcpy(next->cb, skb->cb, sizeof(skb->cb));
1364 + next->cb_next = skb->cb_next;
1366 + atomic_set(&next->refcnt, 1);
1368 + skb->cb_next = next;
1371 +EXPORT_SYMBOL(skb_save_cb);
1373 +int skb_restore_cb(struct sk_buff *skb)
1375 + struct skb_cb_table *next;
1377 + if (!skb->cb_next)
1380 + next = skb->cb_next;
1382 + BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
1384 + memcpy(skb->cb, next->cb, sizeof(skb->cb));
1385 + skb->cb_next = next->cb_next;
1387 + spin_lock(&skb_cb_store_lock);
1389 + if (atomic_dec_and_test(&next->refcnt))
1390 + kmem_cache_free(skbuff_cb_store_cache, next);
1392 + spin_unlock(&skb_cb_store_lock);
1396 +EXPORT_SYMBOL(skb_restore_cb);
1398 +static void skb_copy_stored_cb(struct sk_buff * , const struct sk_buff * ) __attribute__ ((unused));
1399 +static void skb_copy_stored_cb(struct sk_buff *new, const struct sk_buff *__old)
1401 + struct skb_cb_table *next;
1402 + struct sk_buff *old;
1404 + if (!__old->cb_next) {
1405 + new->cb_next = NULL;
1409 + spin_lock(&skb_cb_store_lock);
1411 + old = (struct sk_buff *)__old;
1413 + next = old->cb_next;
1414 + atomic_inc(&next->refcnt);
1415 + new->cb_next = next;
1417 + spin_unlock(&skb_cb_store_lock);
1422 * skb_panic - private function for out-of-line support
1423 @@ -654,6 +735,28 @@ static void skb_release_head_state(struc
1425 skb->destructor(skb);
1427 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1429 + * This should not happen. When it does, avoid memleak by restoring
1430 + * the chain of cb-backups.
1432 + while (skb->cb_next != NULL) {
1433 + if (net_ratelimit())
1434 + pr_warn("IMQ: kfree_skb: skb->cb_next: %08x\n",
1435 + (unsigned int)(uintptr_t)skb->cb_next);
1437 + skb_restore_cb(skb);
1440 + * This should not happen either, nf_queue_entry is nullified in
1441 + * imq_dev_xmit(). If we have non-NULL nf_queue_entry then we are
1442 + * leaking entry pointers, maybe memory. We don't know if this is
1443 + * pointer to already freed memory, or should this be freed.
1444 + * If this happens we need to add refcounting, etc for nf_queue_entry.
1446 + if (skb->nf_queue_entry && net_ratelimit())
1447 + pr_warn("%s\n", "IMQ: kfree_skb: skb->nf_queue_entry != NULL");
1449 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
1450 nf_conntrack_put(skb->nfct);
1452 @@ -843,6 +946,10 @@ static void __copy_skb_header(struct sk_
1453 new->sp = secpath_get(old->sp);
1455 __nf_copy(new, old, false);
1456 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1457 + new->cb_next = NULL;
1458 + /*skb_copy_stored_cb(new, old);*/
1461 /* Note : this field could be in headers_start/headers_end section
1462 * It is not yet because we do not want to have a 16 bit hole
1463 @@ -3465,6 +3572,13 @@ void __init skb_init(void)
1465 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1467 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1468 + skbuff_cb_store_cache = kmem_cache_create("skbuff_cb_store_cache",
1469 + sizeof(struct skb_cb_table),
1471 + SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1477 diff -Naupr linux-4.10_orig/net/ipv6/ip6_output.c linux-4.10/net/ipv6/ip6_output.c
1478 --- linux-4.10_orig/net/ipv6/ip6_output.c 2017-02-20 05:34:00.000000000 +0700
1479 +++ linux-4.10/net/ipv6/ip6_output.c 2017-02-28 18:44:55.988280636 +0700
1480 @@ -67,9 +67,6 @@ static int ip6_finish_output2(struct net
1481 struct in6_addr *nexthop;
1484 - skb->protocol = htons(ETH_P_IPV6);
1487 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
1488 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1490 @@ -159,6 +156,13 @@ int ip6_output(struct net *net, struct s
1495 + * IMQ-patch: moved setting skb->dev and skb->protocol from
1496 + * ip6_finish_output2 to fix crashing at netif_skb_features().
1498 + skb->protocol = htons(ETH_P_IPV6);
1501 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
1502 net, sk, skb, NULL, dev,
1504 diff -Naupr linux-4.10_orig/net/netfilter/core.c linux-4.10/net/netfilter/core.c
1505 --- linux-4.10_orig/net/netfilter/core.c 2017-02-20 05:34:00.000000000 +0700
1506 +++ linux-4.10/net/netfilter/core.c 2017-02-28 18:44:55.988280636 +0700
1507 @@ -318,6 +318,11 @@ int nf_hook_slow(struct sk_buff *skb, st
1511 + case NF_IMQ_QUEUE:
1512 + ret = nf_queue(skb, state, &entry, verdict);
1513 + if (ret == -ECANCELED)
1517 ret = nf_queue(skb, state, &entry, verdict);
1518 if (ret == 1 && entry)
1519 diff -Naupr linux-4.10_orig/net/netfilter/Kconfig linux-4.10/net/netfilter/Kconfig
1520 --- linux-4.10_orig/net/netfilter/Kconfig 2017-02-20 05:34:00.000000000 +0700
1521 +++ linux-4.10/net/netfilter/Kconfig 2017-02-28 18:44:55.988280636 +0700
1522 @@ -852,6 +852,18 @@ config NETFILTER_XT_TARGET_LOG
1524 To compile it as a module, choose M here. If unsure, say N.
1526 +config NETFILTER_XT_TARGET_IMQ
1527 + tristate '"IMQ" target support'
1528 + depends on NETFILTER_XTABLES
1529 + depends on IP_NF_MANGLE || IP6_NF_MANGLE
1531 + default m if NETFILTER_ADVANCED=n
1533 + This option adds a `IMQ' target which is used to specify if and
1534 + to which imq device packets should get enqueued/dequeued.
1536 + To compile it as a module, choose M here. If unsure, say N.
1538 config NETFILTER_XT_TARGET_MARK
1539 tristate '"MARK" target support'
1540 depends on NETFILTER_ADVANCED
1541 diff -Naupr linux-4.10_orig/net/netfilter/Makefile linux-4.10/net/netfilter/Makefile
1542 --- linux-4.10_orig/net/netfilter/Makefile 2017-02-20 05:34:00.000000000 +0700
1543 +++ linux-4.10/net/netfilter/Makefile 2017-02-28 18:44:55.988280636 +0700
1544 @@ -125,6 +125,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CT) +=
1545 obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
1546 obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
1547 obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o
1548 +obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o
1549 obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
1550 obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o
1551 obj-$(CONFIG_NETFILTER_XT_TARGET_NETMAP) += xt_NETMAP.o
1552 diff -Naupr linux-4.10_orig/net/netfilter/nf_queue.c linux-4.10/net/netfilter/nf_queue.c
1553 --- linux-4.10_orig/net/netfilter/nf_queue.c 2017-02-20 05:34:00.000000000 +0700
1554 +++ linux-4.10/net/netfilter/nf_queue.c 2017-02-28 18:44:55.988280636 +0700
1556 * receives, no matter what.
1559 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1560 +static const struct nf_queue_handler __rcu *queue_imq_handler __read_mostly;
1562 +void nf_register_queue_imq_handler(const struct nf_queue_handler *qh)
1564 + rcu_assign_pointer(queue_imq_handler, qh);
1566 +EXPORT_SYMBOL_GPL(nf_register_queue_imq_handler);
1568 +void nf_unregister_queue_imq_handler(void)
1570 + RCU_INIT_POINTER(queue_imq_handler, NULL);
1571 + synchronize_rcu();
1573 +EXPORT_SYMBOL_GPL(nf_unregister_queue_imq_handler);
1576 /* return EBUSY when somebody else is registered, return EEXIST if the
1577 * same handler is registered, return 0 in case of success. */
1578 void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
1579 @@ -108,16 +125,28 @@ void nf_queue_nf_hook_drop(struct net *n
1582 static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
1583 - struct nf_hook_entry *hook_entry, unsigned int queuenum)
1584 + struct nf_hook_entry *hook_entry, unsigned int verdict)
1586 int status = -ENOENT;
1587 struct nf_queue_entry *entry = NULL;
1588 const struct nf_afinfo *afinfo;
1589 const struct nf_queue_handler *qh;
1590 struct net *net = state->net;
1591 + unsigned int queuetype = verdict & NF_VERDICT_MASK;
1592 + unsigned int queuenum = verdict >> NF_VERDICT_QBITS;
1594 /* QUEUE == DROP if no one is waiting, to be safe. */
1595 - qh = rcu_dereference(net->nf.queue_handler);
1596 + if (queuetype == NF_IMQ_QUEUE) {
1597 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1598 + qh = rcu_dereference(queue_imq_handler);
1604 + qh = rcu_dereference(net->nf.queue_handler);
1610 @@ -164,8 +193,17 @@ int nf_queue(struct sk_buff *skb, struct
1611 struct nf_hook_entry *entry = *entryp;
1614 - ret = __nf_queue(skb, state, entry, verdict >> NF_VERDICT_QBITS);
1615 + ret = __nf_queue(skb, state, entry, verdict);
1618 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1620 + if (ret == -ECANCELED && skb->imq_flags == 0) {
1621 + *entryp = rcu_dereference(entry->next);
1626 if (ret == -ESRCH &&
1627 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) {
1628 *entryp = rcu_dereference(entry->next);
1629 @@ -232,6 +270,7 @@ okfn:
1633 + case NF_IMQ_QUEUE:
1634 err = nf_queue(skb, &entry->state, &hook_entry, verdict);
1637 diff -Naupr linux-4.10_orig/net/netfilter/xt_IMQ.c linux-4.10/net/netfilter/xt_IMQ.c
1638 --- linux-4.10_orig/net/netfilter/xt_IMQ.c 1970-01-01 07:00:00.000000000 +0700
1639 +++ linux-4.10/net/netfilter/xt_IMQ.c 2017-02-28 18:44:55.988280636 +0700
1642 + * This target marks packets to be enqueued to an imq device
1644 +#include <linux/module.h>
1645 +#include <linux/skbuff.h>
1646 +#include <linux/netfilter/x_tables.h>
1647 +#include <linux/netfilter/xt_IMQ.h>
1648 +#include <linux/imq.h>
1650 +static unsigned int imq_target(struct sk_buff *pskb,
1651 + const struct xt_action_param *par)
1653 + const struct xt_imq_info *mr = par->targinfo;
1655 + pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE;
1657 + return XT_CONTINUE;
1660 +static int imq_checkentry(const struct xt_tgchk_param *par)
1662 + struct xt_imq_info *mr = par->targinfo;
1664 + if (mr->todev > IMQ_MAX_DEVS - 1) {
1665 + pr_warn("IMQ: invalid device specified, highest is %u\n",
1666 + IMQ_MAX_DEVS - 1);
1673 +static struct xt_target xt_imq_reg[] __read_mostly = {
1676 + .family = AF_INET,
1677 + .checkentry = imq_checkentry,
1678 + .target = imq_target,
1679 + .targetsize = sizeof(struct xt_imq_info),
1680 + .table = "mangle",
1685 + .family = AF_INET6,
1686 + .checkentry = imq_checkentry,
1687 + .target = imq_target,
1688 + .targetsize = sizeof(struct xt_imq_info),
1689 + .table = "mangle",
1694 +static int __init imq_init(void)
1696 + return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1699 +static void __exit imq_fini(void)
1701 + xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1704 +module_init(imq_init);
1705 +module_exit(imq_fini);
1707 +MODULE_AUTHOR("https://github.com/imq/linuximq");
1708 +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See https://github.com/imq/linuximq/wiki for more information.");
1709 +MODULE_LICENSE("GPL");
1710 +MODULE_ALIAS("ipt_IMQ");
1711 +MODULE_ALIAS("ip6t_IMQ");
1713 diff -Naupr linux-4.10_orig/net/sched/sch_generic.c linux-4.10/net/sched/sch_generic.c
1714 --- linux-4.10_orig/net/sched/sch_generic.c 2017-02-20 05:34:00.000000000 +0700
1715 +++ linux-4.10/net/sched/sch_generic.c 2017-02-28 18:44:55.988280636 +0700
1716 @@ -154,6 +154,14 @@ bulk:
1720 +struct sk_buff *qdisc_dequeue_skb(struct Qdisc *q, bool *validate)
1724 + return dequeue_skb(q, validate, &packets);
1726 +EXPORT_SYMBOL(qdisc_dequeue_skb);
1729 * Transmit possibly several skbs, and handle the return status as
1730 * required. Owning running seqcount bit guarantees that