1 diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
2 index c18f9e6..ec43bde 100644
3 --- a/drivers/net/Kconfig
4 +++ b/drivers/net/Kconfig
5 @@ -234,6 +234,125 @@ config RIONET_RX_SIZE
10 + tristate "IMQ (intermediate queueing device) support"
11 + depends on NETDEVICES && NETFILTER
13 + The IMQ device(s) is used as placeholder for QoS queueing
14 + disciplines. Every packet entering/leaving the IP stack can be
15 + directed through the IMQ device where it's enqueued/dequeued to the
16 + attached qdisc. This allows you to treat network devices as classes
17 + and distribute bandwidth among them. Iptables is used to specify
18 + through which IMQ device, if any, packets travel.
20 + More information at: https://github.com/imq/linuximq
22 + To compile this driver as a module, choose M here: the module
23 + will be called imq. If unsure, say N.
26 + prompt "IMQ behavior (PRE/POSTROUTING)"
28 + default IMQ_BEHAVIOR_AB
30 + This setting defines how IMQ behaves in respect to its
31 + hooking in PREROUTING and POSTROUTING.
33 + IMQ can work in any of the following ways:
35 + PREROUTING | POSTROUTING
36 + -----------------|-------------------
37 + #1 After NAT | After NAT
38 + #2 After NAT | Before NAT
39 + #3 Before NAT | After NAT
40 + #4 Before NAT | Before NAT
42 + The default behavior is to hook before NAT on PREROUTING
43 + and after NAT on POSTROUTING (#3).
45 + This settings are specially usefull when trying to use IMQ
46 + to shape NATed clients.
48 + More information can be found at: https://github.com/imq/linuximq
50 + If not sure leave the default settings alone.
52 +config IMQ_BEHAVIOR_AA
55 + This setting defines how IMQ behaves in respect to its
56 + hooking in PREROUTING and POSTROUTING.
58 + Choosing this option will make IMQ hook like this:
60 + PREROUTING: After NAT
61 + POSTROUTING: After NAT
63 + More information can be found at: https://github.com/imq/linuximq
65 + If not sure leave the default settings alone.
67 +config IMQ_BEHAVIOR_AB
70 + This setting defines how IMQ behaves in respect to its
71 + hooking in PREROUTING and POSTROUTING.
73 + Choosing this option will make IMQ hook like this:
75 + PREROUTING: After NAT
76 + POSTROUTING: Before NAT
78 + More information can be found at: https://github.com/imq/linuximq
80 + If not sure leave the default settings alone.
82 +config IMQ_BEHAVIOR_BA
85 + This setting defines how IMQ behaves in respect to its
86 + hooking in PREROUTING and POSTROUTING.
88 + Choosing this option will make IMQ hook like this:
90 + PREROUTING: Before NAT
91 + POSTROUTING: After NAT
93 + More information can be found at: https://github.com/imq/linuximq
95 + If not sure leave the default settings alone.
97 +config IMQ_BEHAVIOR_BB
100 + This setting defines how IMQ behaves in respect to its
101 + hooking in PREROUTING and POSTROUTING.
103 + Choosing this option will make IMQ hook like this:
105 + PREROUTING: Before NAT
106 + POSTROUTING: Before NAT
108 + More information can be found at: https://github.com/imq/linuximq
110 + If not sure leave the default settings alone.
115 + int "Number of IMQ devices"
120 + This setting defines how many IMQ devices will be created.
122 + The default value is 16.
124 + More information can be found at: https://github.com/imq/linuximq
126 + If not sure leave the default settings alone.
129 tristate "Universal TUN/TAP device driver support"
131 diff --git a/drivers/net/Makefile b/drivers/net/Makefile
132 index c12cb22..03b82c6 100644
133 --- a/drivers/net/Makefile
134 +++ b/drivers/net/Makefile
135 @@ -10,6 +10,7 @@ obj-$(CONFIG_IPVLAN) += ipvlan/
136 obj-$(CONFIG_DUMMY) += dummy.o
137 obj-$(CONFIG_EQUALIZER) += eql.o
138 obj-$(CONFIG_IFB) += ifb.o
139 +obj-$(CONFIG_IMQ) += imq.o
140 obj-$(CONFIG_MACVLAN) += macvlan.o
141 obj-$(CONFIG_MACVTAP) += macvtap.o
142 obj-$(CONFIG_MII) += mii.o
143 diff --git a/drivers/net/imq.c b/drivers/net/imq.c
145 index 0000000..c60929b
147 +++ b/drivers/net/imq.c
150 + * Pseudo-driver for the intermediate queue device.
152 + * This program is free software; you can redistribute it and/or
153 + * modify it under the terms of the GNU General Public License
154 + * as published by the Free Software Foundation; either version
155 + * 2 of the License, or (at your option) any later version.
157 + * Authors: Patrick McHardy, <kaber@trash.net>
159 + * The first version was written by Martin Devera, <devik@cdi.cz>
164 +#include <linux/module.h>
165 +#include <linux/kernel.h>
166 +#include <linux/moduleparam.h>
167 +#include <linux/list.h>
168 +#include <linux/skbuff.h>
169 +#include <linux/netdevice.h>
170 +#include <linux/etherdevice.h>
171 +#include <linux/rtnetlink.h>
172 +#include <linux/if_arp.h>
173 +#include <linux/netfilter.h>
174 +#include <linux/netfilter_ipv4.h>
175 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
176 + #include <linux/netfilter_ipv6.h>
178 +#include <linux/imq.h>
179 +#include <net/pkt_sched.h>
180 +#include <net/netfilter/nf_queue.h>
181 +#include <net/sock.h>
182 +#include <linux/ip.h>
183 +#include <linux/ipv6.h>
184 +#include <linux/if_vlan.h>
185 +#include <linux/if_pppox.h>
187 +#include <net/ipv6.h>
189 +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num);
191 +static nf_hookfn imq_nf_hook;
193 +static struct nf_hook_ops imq_ops[] = {
195 + /* imq_ingress_ipv4 */
196 + .hook = imq_nf_hook,
197 + .owner = THIS_MODULE,
199 + .hooknum = NF_INET_PRE_ROUTING,
200 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
201 + .priority = NF_IP_PRI_MANGLE + 1,
203 + .priority = NF_IP_PRI_NAT_DST + 1,
207 + /* imq_egress_ipv4 */
208 + .hook = imq_nf_hook,
209 + .owner = THIS_MODULE,
211 + .hooknum = NF_INET_POST_ROUTING,
212 +#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
213 + .priority = NF_IP_PRI_LAST,
215 + .priority = NF_IP_PRI_NAT_SRC - 1,
218 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
220 + /* imq_ingress_ipv6 */
221 + .hook = imq_nf_hook,
222 + .owner = THIS_MODULE,
224 + .hooknum = NF_INET_PRE_ROUTING,
225 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
226 + .priority = NF_IP6_PRI_MANGLE + 1,
228 + .priority = NF_IP6_PRI_NAT_DST + 1,
232 + /* imq_egress_ipv6 */
233 + .hook = imq_nf_hook,
234 + .owner = THIS_MODULE,
236 + .hooknum = NF_INET_POST_ROUTING,
237 +#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
238 + .priority = NF_IP6_PRI_LAST,
240 + .priority = NF_IP6_PRI_NAT_SRC - 1,
246 +#if defined(CONFIG_IMQ_NUM_DEVS)
247 +static int numdevs = CONFIG_IMQ_NUM_DEVS;
249 +static int numdevs = IMQ_MAX_DEVS;
252 +static struct net_device *imq_devs_cache[IMQ_MAX_DEVS];
254 +#define IMQ_MAX_QUEUES 32
255 +static int numqueues = 1;
256 +static u32 imq_hashrnd;
257 +static int imq_dev_accurate_stats = 1;
259 +static inline __be16 pppoe_proto(const struct sk_buff *skb)
261 + return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
262 + sizeof(struct pppoe_hdr)));
265 +static u16 imq_hash(struct net_device *dev, struct sk_buff *skb)
267 + unsigned int pull_len;
268 + u16 protocol = skb->protocol;
280 + switch (protocol) {
281 + case htons(ETH_P_8021Q): {
282 + if (unlikely(skb_pull(skb, VLAN_HLEN) == NULL))
285 + pull_len += VLAN_HLEN;
286 + skb->network_header += VLAN_HLEN;
288 + protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
292 + case htons(ETH_P_PPP_SES): {
293 + if (unlikely(skb_pull(skb, PPPOE_SES_HLEN) == NULL))
296 + pull_len += PPPOE_SES_HLEN;
297 + skb->network_header += PPPOE_SES_HLEN;
299 + protocol = pppoe_proto(skb);
303 + case htons(ETH_P_IP): {
304 + const struct iphdr *iph = ip_hdr(skb);
306 + if (unlikely(!pskb_may_pull(skb, sizeof(struct iphdr))))
309 + addr1 = iph->daddr;
310 + addr2 = iph->saddr;
312 + ip_proto = !(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ?
314 + ihl = ip_hdrlen(skb);
318 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
319 + case htons(ETH_P_IPV6): {
320 + const struct ipv6hdr *iph = ipv6_hdr(skb);
323 + if (unlikely(!pskb_may_pull(skb, sizeof(struct ipv6hdr))))
326 + addr1 = iph->daddr.s6_addr32[3];
327 + addr2 = iph->saddr.s6_addr32[3];
328 + ihl = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &ip_proto,
330 + if (unlikely(ihl < 0))
338 + if (pull_len != 0) {
339 + skb_push(skb, pull_len);
340 + skb->network_header -= pull_len;
343 + return (u16)(ntohs(protocol) % dev->real_num_tx_queues);
347 + swap(addr1, addr2);
349 + switch (ip_proto) {
356 + case IPPROTO_UDPLITE: {
357 + if (likely(skb_copy_bits(skb, ihl, &ports.in32, 4) >= 0)) {
358 + if (ports.in16[0] > ports.in16[1])
359 + swap(ports.in16[0], ports.in16[1]);
369 + if (pull_len != 0) {
370 + skb_push(skb, pull_len);
371 + skb->network_header -= pull_len;
374 + hash = jhash_3words(addr1, addr2, ports.in32, imq_hashrnd ^ ip_proto);
376 + return (u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
379 +static inline bool sk_tx_queue_recorded(struct sock *sk)
381 + return (sk_tx_queue_get(sk) >= 0);
384 +static struct netdev_queue *imq_select_queue(struct net_device *dev,
385 + struct sk_buff *skb)
387 + u16 queue_index = 0;
390 + if (likely(dev->real_num_tx_queues == 1))
393 + /* IMQ can be receiving ingress or engress packets. */
395 + /* Check first for if rx_queue is set */
396 + if (skb_rx_queue_recorded(skb)) {
397 + queue_index = skb_get_rx_queue(skb);
401 + /* Check if socket has tx_queue set */
402 + if (sk_tx_queue_recorded(skb->sk)) {
403 + queue_index = sk_tx_queue_get(skb->sk);
407 + /* Try use socket hash */
408 + if (skb->sk && skb->sk->sk_hash) {
409 + hash = skb->sk->sk_hash;
411 + (u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
415 + /* Generate hash from packet data */
416 + queue_index = imq_hash(dev, skb);
419 + if (unlikely(queue_index >= dev->real_num_tx_queues))
420 + queue_index = (u16)((u32)queue_index % dev->real_num_tx_queues);
422 + skb_set_queue_mapping(skb, queue_index);
423 + return netdev_get_tx_queue(dev, queue_index);
426 +static struct net_device_stats *imq_get_stats(struct net_device *dev)
428 + return &dev->stats;
431 +/* called for packets kfree'd in qdiscs at places other than enqueue */
432 +static void imq_skb_destructor(struct sk_buff *skb)
434 + struct nf_queue_entry *entry = skb->nf_queue_entry;
436 + skb->nf_queue_entry = NULL;
439 + nf_queue_entry_release_refs(entry);
443 + skb_restore_cb(skb); /* kfree backup */
446 +static void imq_done_check_queue_mapping(struct sk_buff *skb,
447 + struct net_device *dev)
449 + unsigned int queue_index;
451 + /* Don't let queue_mapping be left too large after exiting IMQ */
452 + if (likely(skb->dev != dev && skb->dev != NULL)) {
453 + queue_index = skb_get_queue_mapping(skb);
454 + if (unlikely(queue_index >= skb->dev->real_num_tx_queues)) {
455 + queue_index = (u16)((u32)queue_index %
456 + skb->dev->real_num_tx_queues);
457 + skb_set_queue_mapping(skb, queue_index);
460 + /* skb->dev was IMQ device itself or NULL, be on safe side and
461 + * just clear queue mapping.
463 + skb_set_queue_mapping(skb, 0);
467 +static netdev_tx_t imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
469 + struct nf_queue_entry *entry = skb->nf_queue_entry;
471 + skb->nf_queue_entry = NULL;
472 + dev->trans_start = jiffies;
474 + dev->stats.tx_bytes += skb->len;
475 + dev->stats.tx_packets++;
477 + if (unlikely(entry == NULL)) {
478 + /* We don't know what is going on here.. packet is queued for
479 + * imq device, but (probably) not by us.
481 + * If this packet was not send here by imq_nf_queue(), then
482 + * skb_save_cb() was not used and skb_free() should not show:
483 + * WARNING: IMQ: kfree_skb: skb->cb_next:..
485 + * WARNING: IMQ: kfree_skb: skb->nf_queue_entry...
487 + * However if this message is shown, then IMQ is somehow broken
488 + * and you should report this to linuximq.net.
491 + /* imq_dev_xmit is black hole that eats all packets, report that
492 + * we eat this packet happily and increase dropped counters.
495 + dev->stats.tx_dropped++;
496 + dev_kfree_skb(skb);
498 + return NETDEV_TX_OK;
501 + skb_restore_cb(skb); /* restore skb->cb */
503 + skb->imq_flags = 0;
504 + skb->destructor = NULL;
506 + imq_done_check_queue_mapping(skb, dev);
508 + nf_reinject(entry, NF_ACCEPT);
510 + return NETDEV_TX_OK;
513 +static struct net_device *get_imq_device_by_index(int index)
515 + struct net_device *dev = NULL;
519 + /* get device by name and cache result */
520 + snprintf(buf, sizeof(buf), "imq%d", index);
522 + /* Search device from all namespaces. */
523 + for_each_net(net) {
524 + dev = dev_get_by_name(net, buf);
529 + if (WARN_ON_ONCE(dev == NULL)) {
530 + /* IMQ device not found. Exotic config? */
531 + return ERR_PTR(-ENODEV);
534 + imq_devs_cache[index] = dev;
540 +static struct nf_queue_entry *nf_queue_entry_dup(struct nf_queue_entry *e)
542 + struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
544 + if (nf_queue_entry_get_refs(entry))
551 +#ifdef CONFIG_BRIDGE_NETFILTER
552 +/* When called from bridge netfilter, skb->data must point to MAC header
553 + * before calling skb_gso_segment(). Else, original MAC header is lost
554 + * and segmented skbs will be sent to wrong destination.
556 +static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
558 + if (skb->nf_bridge)
559 + __skb_push(skb, skb->network_header - skb->mac_header);
562 +static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
564 + if (skb->nf_bridge)
565 + __skb_pull(skb, skb->network_header - skb->mac_header);
568 +#define nf_bridge_adjust_skb_data(s) do {} while (0)
569 +#define nf_bridge_adjust_segmented_data(s) do {} while (0)
572 +static void free_entry(struct nf_queue_entry *entry)
574 + nf_queue_entry_release_refs(entry);
578 +static int __imq_nf_queue(struct nf_queue_entry *entry, struct net_device *dev);
580 +static int __imq_nf_queue_gso(struct nf_queue_entry *entry,
581 + struct net_device *dev, struct sk_buff *skb)
584 + struct nf_queue_entry *entry_seg;
586 + nf_bridge_adjust_segmented_data(skb);
588 + if (skb->next == NULL) { /* last packet, no need to copy entry */
589 + struct sk_buff *gso_skb = entry->skb;
591 + ret = __imq_nf_queue(entry, dev);
593 + entry->skb = gso_skb;
599 + entry_seg = nf_queue_entry_dup(entry);
601 + entry_seg->skb = skb;
602 + ret = __imq_nf_queue(entry_seg, dev);
604 + free_entry(entry_seg);
609 +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num)
611 + struct sk_buff *skb, *segs;
612 + struct net_device *dev;
613 + unsigned int queued;
614 + int index, retval, err;
616 + index = entry->skb->imq_flags & IMQ_F_IFMASK;
617 + if (unlikely(index > numdevs - 1)) {
618 + if (net_ratelimit())
619 + pr_warn("IMQ: invalid device specified, highest is %u\n",
625 + /* check for imq device by index from cache */
626 + dev = imq_devs_cache[index];
627 + if (unlikely(!dev)) {
628 + dev = get_imq_device_by_index(index);
630 + retval = PTR_ERR(dev);
635 + if (unlikely(!(dev->flags & IFF_UP))) {
636 + entry->skb->imq_flags = 0;
637 + retval = -ECANCELED;
641 + /* Since 3.10.x, GSO handling moved here as result of upstream commit
642 + * a5fedd43d5f6c94c71053a66e4c3d2e35f1731a2 (netfilter: move
643 + * skb_gso_segment into nfnetlink_queue module).
645 + * Following code replicates the gso handling from
646 + * 'net/netfilter/nfnetlink_queue_core.c':nfqnl_enqueue_packet().
651 + switch (entry->state.pf) {
653 + skb->protocol = htons(ETH_P_IP);
656 + skb->protocol = htons(ETH_P_IPV6);
660 + if (!skb_is_gso(entry->skb))
661 + return __imq_nf_queue(entry, dev);
663 + nf_bridge_adjust_skb_data(skb);
664 + segs = skb_gso_segment(skb, 0);
665 + /* Does not use PTR_ERR to limit the number of error codes that can be
666 + * returned by nf_queue. For instance, callers rely on -ECANCELED to
667 + * mean 'ignore this hook'.
675 + struct sk_buff *nskb = segs->next;
676 + if (nskb && nskb->next)
677 + nskb->cb_next = NULL;
679 + err = __imq_nf_queue_gso(entry, dev, segs);
688 + if (err) /* some segments are already queued */
695 + nf_bridge_adjust_segmented_data(skb);
701 +static int __imq_nf_queue(struct nf_queue_entry *entry, struct net_device *dev)
703 + struct sk_buff *skb_orig, *skb, *skb_shared, *skb_popd;
705 + struct netdev_queue *txq;
706 + spinlock_t *root_lock;
708 + int retval = -EINVAL;
709 + unsigned int orig_queue_index;
711 + dev->last_rx = jiffies;
716 + /* skb has owner? => make clone */
717 + if (unlikely(skb->destructor)) {
719 + skb = skb_clone(skb, GFP_ATOMIC);
720 + if (unlikely(!skb)) {
724 + skb->cb_next = NULL;
728 + dev->stats.rx_bytes += skb->len;
729 + dev->stats.rx_packets++;
732 + /* skb->dev == NULL causes problems, try the find cause. */
733 + if (net_ratelimit()) {
734 + dev_warn(&dev->dev,
735 + "received packet with skb->dev == NULL\n");
742 + /* Disables softirqs for lock below */
743 + rcu_read_lock_bh();
745 + /* Multi-queue selection */
746 + orig_queue_index = skb_get_queue_mapping(skb);
747 + txq = imq_select_queue(dev, skb);
749 + q = rcu_dereference(txq->qdisc);
750 + if (unlikely(!q->enqueue))
751 + goto packet_not_eaten_by_imq_dev;
753 + skb->nf_queue_entry = entry;
754 + root_lock = qdisc_lock(q);
755 + spin_lock(root_lock);
757 + users = atomic_read(&skb->users);
759 + skb_shared = skb_get(skb); /* increase reference count by one */
761 + /* backup skb->cb, as qdisc layer will overwrite it */
762 + skb_save_cb(skb_shared);
763 + qdisc_enqueue_root(skb_shared, q); /* might kfree_skb */
764 + if (likely(atomic_read(&skb_shared->users) == users + 1)) {
767 + kfree_skb(skb_shared); /* decrease reference count by one */
769 + skb->destructor = &imq_skb_destructor;
771 + skb_popd = qdisc_dequeue_skb(q, &validate);
774 + if (unlikely(skb_orig))
775 + kfree_skb(skb_orig); /* free original */
777 + spin_unlock(root_lock);
780 + /* schedule qdisc dequeue */
781 + __netif_schedule(q);
783 + if (likely(skb_popd)) {
784 + /* Note that we validate skb (GSO, checksum, ...) outside of locks */
786 + skb_popd = validate_xmit_skb_list(skb_popd, dev);
790 + int cpu = smp_processor_id(); /* ok because BHs are off */
792 + txq = skb_get_tx_queue(dev, skb_popd);
794 + IMQ device will not be frozen or stoped, and it always be successful.
795 + So we need not check its status and return value to accelerate.
797 + if (imq_dev_accurate_stats && txq->xmit_lock_owner != cpu) {
798 + HARD_TX_LOCK(dev, txq, cpu);
799 + if (!netif_xmit_frozen_or_stopped(txq)) {
800 + dev_hard_start_xmit(skb_popd, dev, txq, &dummy_ret);
802 + HARD_TX_UNLOCK(dev, txq);
804 + if (!netif_xmit_frozen_or_stopped(txq)) {
805 + dev_hard_start_xmit(skb_popd, dev, txq, &dummy_ret);
810 + /* No ready skb, then schedule it */
811 + __netif_schedule(q);
814 + rcu_read_unlock_bh();
818 + skb_restore_cb(skb_shared); /* restore skb->cb */
819 + skb->nf_queue_entry = NULL;
821 + * qdisc dropped packet and decreased skb reference count of
822 + * skb, so we don't really want to and try refree as that would
823 + * actually destroy the skb.
825 + spin_unlock(root_lock);
826 + goto packet_not_eaten_by_imq_dev;
829 +packet_not_eaten_by_imq_dev:
830 + skb_set_queue_mapping(skb, orig_queue_index);
831 + rcu_read_unlock_bh();
833 + /* cloned? restore original */
834 + if (unlikely(skb_orig)) {
836 + entry->skb = skb_orig;
842 +static unsigned int imq_nf_hook(const struct nf_hook_ops *hook_ops,
843 + struct sk_buff *skb,
844 + const struct nf_hook_state *state)
846 + return (skb->imq_flags & IMQ_F_ENQUEUE) ? NF_IMQ_QUEUE : NF_ACCEPT;
849 +static int imq_close(struct net_device *dev)
851 + netif_stop_queue(dev);
855 +static int imq_open(struct net_device *dev)
857 + netif_start_queue(dev);
861 +static const struct net_device_ops imq_netdev_ops = {
862 + .ndo_open = imq_open,
863 + .ndo_stop = imq_close,
864 + .ndo_start_xmit = imq_dev_xmit,
865 + .ndo_get_stats = imq_get_stats,
868 +static void imq_setup(struct net_device *dev)
870 + dev->netdev_ops = &imq_netdev_ops;
871 + dev->type = ARPHRD_VOID;
872 + dev->mtu = 16000; /* too small? */
873 + dev->tx_queue_len = 11000; /* too big? */
874 + dev->flags = IFF_NOARP;
875 + dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
876 + NETIF_F_GSO | NETIF_F_HW_CSUM |
878 + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE |
879 + IFF_TX_SKB_SHARING);
882 +static int imq_validate(struct nlattr *tb[], struct nlattr *data[])
886 + if (tb[IFLA_ADDRESS]) {
887 + if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
891 + if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
892 + ret = -EADDRNOTAVAIL;
898 + pr_warn("IMQ: imq_validate failed (%d)\n", ret);
902 +static struct rtnl_link_ops imq_link_ops __read_mostly = {
905 + .setup = imq_setup,
906 + .validate = imq_validate,
909 +static const struct nf_queue_handler imq_nfqh = {
910 + .outfn = imq_nf_queue,
913 +static int __init imq_init_hooks(void)
917 + nf_register_queue_imq_handler(&imq_nfqh);
919 + ret = nf_register_hooks(imq_ops, ARRAY_SIZE(imq_ops));
921 + nf_unregister_queue_imq_handler();
926 +static int __init imq_init_one(int index)
928 + struct net_device *dev;
931 + dev = alloc_netdev_mq(0, "imq%d", NET_NAME_UNKNOWN, imq_setup, numqueues);
935 + ret = dev_alloc_name(dev, dev->name);
939 + dev->rtnl_link_ops = &imq_link_ops;
940 + ret = register_netdevice(dev);
950 +static int __init imq_init_devs(void)
954 + if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) {
955 + pr_err("IMQ: numdevs has to be betweed 1 and %u\n",
960 + if (numqueues < 1 || numqueues > IMQ_MAX_QUEUES) {
961 + pr_err("IMQ: numqueues has to be betweed 1 and %u\n",
966 + get_random_bytes(&imq_hashrnd, sizeof(imq_hashrnd));
969 + err = __rtnl_link_register(&imq_link_ops);
971 + for (i = 0; i < numdevs && !err; i++)
972 + err = imq_init_one(i);
975 + __rtnl_link_unregister(&imq_link_ops);
976 + memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
983 +static int __init imq_init_module(void)
987 +#if defined(CONFIG_IMQ_NUM_DEVS)
988 + BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16);
989 + BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2);
990 + BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK);
993 + err = imq_init_devs();
995 + pr_err("IMQ: Error trying imq_init_devs(net)\n");
999 + err = imq_init_hooks();
1001 + pr_err(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
1002 + rtnl_link_unregister(&imq_link_ops);
1003 + memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
1007 + pr_info("IMQ driver loaded successfully. (numdevs = %d, numqueues = %d, imq_dev_accurate_stats = %d)\n",
1008 + numdevs, numqueues, imq_dev_accurate_stats);
1010 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
1011 + pr_info("\tHooking IMQ before NAT on PREROUTING.\n");
1013 + pr_info("\tHooking IMQ after NAT on PREROUTING.\n");
1015 +#if defined(CONFIG_IMQ_BEHAVIOR_AB) || defined(CONFIG_IMQ_BEHAVIOR_BB)
1016 + pr_info("\tHooking IMQ before NAT on POSTROUTING.\n");
1018 + pr_info("\tHooking IMQ after NAT on POSTROUTING.\n");
1024 +static void __exit imq_unhook(void)
1026 + nf_unregister_hooks(imq_ops, ARRAY_SIZE(imq_ops));
1027 + nf_unregister_queue_imq_handler();
1030 +static void __exit imq_cleanup_devs(void)
1032 + rtnl_link_unregister(&imq_link_ops);
1033 + memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
1036 +static void __exit imq_exit_module(void)
1039 + imq_cleanup_devs();
1040 + pr_info("IMQ driver unloaded successfully.\n");
1043 +module_init(imq_init_module);
1044 +module_exit(imq_exit_module);
1046 +module_param(numdevs, int, 0);
1047 +module_param(numqueues, int, 0);
1048 +module_param(imq_dev_accurate_stats, int, 0);
1049 +MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will be created)");
1050 +MODULE_PARM_DESC(numqueues, "number of queues per IMQ device");
1051 +MODULE_PARM_DESC(imq_dev_accurate_stats, "Notify if need the accurate imq device stats");
1053 +MODULE_AUTHOR("http://https://github.com/imq/linuximq");
1054 +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See https://github.com/imq/linuximq/wiki for more information.");
1055 +MODULE_LICENSE("GPL");
1056 +MODULE_ALIAS_RTNL_LINK("imq");
1057 diff --git a/include/linux/imq.h b/include/linux/imq.h
1058 new file mode 100644
1059 index 0000000..1babb09
1061 +++ b/include/linux/imq.h
1066 +/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */
1067 +#define IMQ_F_BITS 5
1069 +#define IMQ_F_IFMASK 0x0f
1070 +#define IMQ_F_ENQUEUE 0x10
1072 +#define IMQ_MAX_DEVS (IMQ_F_IFMASK + 1)
1074 +#endif /* _IMQ_H */
1076 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
1077 index e20979d..9c8f9a1 100644
1078 --- a/include/linux/netdevice.h
1079 +++ b/include/linux/netdevice.h
1080 @@ -3279,6 +3279,19 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
1084 +#define HARD_TX_LOCK_BH(dev, txq) { \
1085 + if ((dev->features & NETIF_F_LLTX) == 0) { \
1086 + __netif_tx_lock_bh(txq); \
1090 +#define HARD_TX_UNLOCK_BH(dev, txq) { \
1091 + if ((dev->features & NETIF_F_LLTX) == 0) { \
1092 + __netif_tx_unlock_bh(txq); \
1097 static inline void netif_tx_disable(struct net_device *dev)
1100 diff --git a/include/linux/netfilter/xt_IMQ.h b/include/linux/netfilter/xt_IMQ.h
1101 new file mode 100644
1102 index 0000000..9b07230
1104 +++ b/include/linux/netfilter/xt_IMQ.h
1109 +struct xt_imq_info {
1110 + unsigned int todev; /* target imq device */
1113 +#endif /* _XT_IMQ_H */
1115 diff --git a/include/linux/netfilter_ipv4/ipt_IMQ.h b/include/linux/netfilter_ipv4/ipt_IMQ.h
1116 new file mode 100644
1117 index 0000000..7af320f
1119 +++ b/include/linux/netfilter_ipv4/ipt_IMQ.h
1124 +/* Backwards compatibility for old userspace */
1125 +#include <linux/netfilter/xt_IMQ.h>
1127 +#define ipt_imq_info xt_imq_info
1129 +#endif /* _IPT_IMQ_H */
1131 diff --git a/include/linux/netfilter_ipv6/ip6t_IMQ.h b/include/linux/netfilter_ipv6/ip6t_IMQ.h
1132 new file mode 100644
1133 index 0000000..198ac01
1135 +++ b/include/linux/netfilter_ipv6/ip6t_IMQ.h
1137 +#ifndef _IP6T_IMQ_H
1138 +#define _IP6T_IMQ_H
1140 +/* Backwards compatibility for old userspace */
1141 +#include <linux/netfilter/xt_IMQ.h>
1143 +#define ip6t_imq_info xt_imq_info
1145 +#endif /* _IP6T_IMQ_H */
1147 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
1148 index 9b88536..61686b0 100644
1149 --- a/include/linux/skbuff.h
1150 +++ b/include/linux/skbuff.h
1152 #include <net/flow_dissector.h>
1153 #include <linux/splice.h>
1154 #include <linux/in6.h>
1155 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1156 +#include <linux/imq.h>
1159 /* A. Checksumming of received packets by device.
1161 @@ -548,6 +551,9 @@ struct sk_buff {
1162 * first. This is owned by whoever has the skb queued ATM.
1164 char cb[48] __aligned(8);
1165 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1169 unsigned long _skb_refdst;
1170 void (*destructor)(struct sk_buff *skb);
1171 @@ -557,6 +563,9 @@ struct sk_buff {
1172 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1173 struct nf_conntrack *nfct;
1175 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1176 + struct nf_queue_entry *nf_queue_entry;
1178 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1179 struct nf_bridge_info *nf_bridge;
1181 @@ -624,6 +633,9 @@ struct sk_buff {
1182 __u8 inner_protocol_type:1;
1183 __u8 remcsum_offload:1;
1184 /* 3 or 5 bit hole */
1185 + #if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1186 + __u8 imq_flags:IMQ_F_BITS;
1189 #ifdef CONFIG_NET_SCHED
1190 __u16 tc_index; /* traffic control index */
1191 @@ -774,6 +786,12 @@ void kfree_skb_list(struct sk_buff *segs);
1192 void skb_tx_error(struct sk_buff *skb);
1193 void consume_skb(struct sk_buff *skb);
1194 void __kfree_skb(struct sk_buff *skb);
1196 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1197 +int skb_save_cb(struct sk_buff *skb);
1198 +int skb_restore_cb(struct sk_buff *skb);
1201 extern struct kmem_cache *skbuff_head_cache;
1203 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
1204 @@ -3232,6 +3250,10 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
1206 dst->nfctinfo = src->nfctinfo;
1208 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1209 + dst->imq_flags = src->imq_flags;
1210 + dst->nf_queue_entry = src->nf_queue_entry;
1212 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1213 dst->nf_bridge = src->nf_bridge;
1214 nf_bridge_get(src->nf_bridge);
1215 diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
1216 index e863585..40904cb 100644
1217 --- a/include/net/netfilter/nf_queue.h
1218 +++ b/include/net/netfilter/nf_queue.h
1219 @@ -31,6 +31,12 @@ struct nf_queue_handler {
1220 void nf_register_queue_handler(const struct nf_queue_handler *qh);
1221 void nf_unregister_queue_handler(void);
1222 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
1223 +void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
1225 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1226 +void nf_register_queue_imq_handler(const struct nf_queue_handler *qh);
1227 +void nf_unregister_queue_imq_handler(void);
1230 bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
1231 void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
1232 diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
1233 index 2342bf1..149dec9 100644
1234 --- a/include/net/pkt_sched.h
1235 +++ b/include/net/pkt_sched.h
1236 @@ -104,6 +104,8 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
1238 void __qdisc_run(struct Qdisc *q);
1240 +struct sk_buff *qdisc_dequeue_skb(struct Qdisc *q, bool *validate);
1242 static inline void qdisc_run(struct Qdisc *q)
1244 if (qdisc_run_begin(q))
1245 diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
1246 index 2738f6f..cc0af3e 100644
1247 --- a/include/net/sch_generic.h
1248 +++ b/include/net/sch_generic.h
1249 @@ -501,6 +501,12 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1250 return sch->enqueue(skb, sch);
1253 +static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
1255 + qdisc_skb_cb(skb)->pkt_len = skb->len;
1256 + return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
1259 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
1261 return q->flags & TCQ_F_CPUSTATS;
1262 diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h
1263 index d93f949..23fb6d1 100644
1264 --- a/include/uapi/linux/netfilter.h
1265 +++ b/include/uapi/linux/netfilter.h
1270 -#define NF_MAX_VERDICT NF_STOP
1271 +#define NF_IMQ_QUEUE 6
1272 +#define NF_MAX_VERDICT NF_IMQ_QUEUE
1274 /* we overload the higher bits for encoding auxiliary data such as the queue
1275 * number or errno values. Not nice, but better than additional function
1276 diff --git a/net/core/dev.c b/net/core/dev.c
1277 index a8e4dd4..f84cd5a 100644
1278 --- a/net/core/dev.c
1279 +++ b/net/core/dev.c
1281 #include <linux/errqueue.h>
1282 #include <linux/hrtimer.h>
1283 #include <linux/netfilter_ingress.h>
1284 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1285 +#include <linux/imq.h>
1288 #include "net-sysfs.h"
1290 @@ -2675,7 +2678,12 @@ static int xmit_one(struct sk_buff *skb, struct net_device *dev,
1294 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1295 + if ((!list_empty(&ptype_all) || !list_empty(&dev->ptype_all)) &&
1296 + !(skb->imq_flags & IMQ_F_ENQUEUE))
1298 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
1300 dev_queue_xmit_nit(skb, dev);
1303 @@ -2713,6 +2721,7 @@ out:
1307 +EXPORT_SYMBOL(dev_hard_start_xmit);
1309 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
1310 netdev_features_t features)
1311 @@ -2801,6 +2810,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
1315 +EXPORT_SYMBOL(validate_xmit_skb_list);
1317 static void qdisc_pkt_len_init(struct sk_buff *skb)
1319 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1320 index 7b84330..a313d22 100644
1321 --- a/net/core/skbuff.c
1322 +++ b/net/core/skbuff.c
1325 struct kmem_cache *skbuff_head_cache __read_mostly;
1326 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
1327 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1328 +static struct kmem_cache *skbuff_cb_store_cache __read_mostly;
1331 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1332 +/* Control buffer save/restore for IMQ devices */
1333 +struct skb_cb_table {
1334 + char cb[48] __aligned(8);
1339 +static DEFINE_SPINLOCK(skb_cb_store_lock);
1341 +int skb_save_cb(struct sk_buff *skb)
1343 + struct skb_cb_table *next;
1345 + next = kmem_cache_alloc(skbuff_cb_store_cache, GFP_ATOMIC);
1349 + BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
1351 + memcpy(next->cb, skb->cb, sizeof(skb->cb));
1352 + next->cb_next = skb->cb_next;
1354 + atomic_set(&next->refcnt, 1);
1356 + skb->cb_next = next;
1359 +EXPORT_SYMBOL(skb_save_cb);
1361 +int skb_restore_cb(struct sk_buff *skb)
1363 + struct skb_cb_table *next;
1365 + if (!skb->cb_next)
1368 + next = skb->cb_next;
1370 + BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
1372 + memcpy(skb->cb, next->cb, sizeof(skb->cb));
1373 + skb->cb_next = next->cb_next;
1375 + spin_lock(&skb_cb_store_lock);
1377 + if (atomic_dec_and_test(&next->refcnt))
1378 + kmem_cache_free(skbuff_cb_store_cache, next);
1380 + spin_unlock(&skb_cb_store_lock);
1384 +EXPORT_SYMBOL(skb_restore_cb);
1386 +static void skb_copy_stored_cb(struct sk_buff * , const struct sk_buff * ) __attribute__ ((unused));
1387 +static void skb_copy_stored_cb(struct sk_buff *new, const struct sk_buff *__old)
1389 + struct skb_cb_table *next;
1390 + struct sk_buff *old;
1392 + if (!__old->cb_next) {
1393 + new->cb_next = NULL;
1397 + spin_lock(&skb_cb_store_lock);
1399 + old = (struct sk_buff *)__old;
1401 + next = old->cb_next;
1402 + atomic_inc(&next->refcnt);
1403 + new->cb_next = next;
1405 + spin_unlock(&skb_cb_store_lock);
1410 * skb_panic - private function for out-of-line support
1411 @@ -643,6 +724,28 @@ static void skb_release_head_state(struct sk_buff *skb)
1413 skb->destructor(skb);
1415 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1417 + * This should not happen. When it does, avoid memleak by restoring
1418 + * the chain of cb-backups.
1420 + while (skb->cb_next != NULL) {
1421 + if (net_ratelimit())
1422 + pr_warn("IMQ: kfree_skb: skb->cb_next: %08x\n",
1423 + (unsigned int)(uintptr_t)skb->cb_next);
1425 + skb_restore_cb(skb);
1428 + * This should not happen either, nf_queue_entry is nullified in
1429 + * imq_dev_xmit(). If we have non-NULL nf_queue_entry then we are
1430 + * leaking entry pointers, maybe memory. We don't know if this is
1431 + * pointer to already freed memory, or should this be freed.
1432 + * If this happens we need to add refcounting, etc for nf_queue_entry.
1434 + if (skb->nf_queue_entry && net_ratelimit())
1435 + pr_warn("%s\n", "IMQ: kfree_skb: skb->nf_queue_entry != NULL");
1437 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
1438 nf_conntrack_put(skb->nfct);
1440 @@ -765,6 +868,10 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
1441 new->sp = secpath_get(old->sp);
1443 __nf_copy(new, old, false);
1444 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1445 + new->cb_next = NULL;
1446 + /*skb_copy_stored_cb(new, old);*/
1449 /* Note : this field could be in headers_start/headers_end section
1450 * It is not yet because we do not want to have a 16 bit hole
1451 @@ -3324,6 +3431,13 @@ void __init skb_init(void)
1453 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1455 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1456 + skbuff_cb_store_cache = kmem_cache_create("skbuff_cb_store_cache",
1457 + sizeof(struct skb_cb_table),
1459 + SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1465 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1466 index d5f7716..dd12857 100644
1467 --- a/net/ipv6/ip6_output.c
1468 +++ b/net/ipv6/ip6_output.c
1469 @@ -64,9 +64,6 @@ static int ip6_finish_output2(struct sock *sk, struct sk_buff *skb)
1470 struct in6_addr *nexthop;
1473 - skb->protocol = htons(ETH_P_IPV6);
1476 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
1477 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1479 @@ -143,6 +140,13 @@ int ip6_output(struct sock *sk, struct sk_buff *skb)
1484 + * IMQ-patch: moved setting skb->dev and skb->protocol from
1485 + * ip6_finish_output2 to fix crashing at netif_skb_features().
1487 + skb->protocol = htons(ETH_P_IPV6);
1490 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, sk, skb,
1493 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
1494 index 6eae69a..ca3b763 100644
1495 --- a/net/netfilter/Kconfig
1496 +++ b/net/netfilter/Kconfig
1497 @@ -784,6 +784,18 @@ config NETFILTER_XT_TARGET_LOG
1499 To compile it as a module, choose M here. If unsure, say N.
1501 +config NETFILTER_XT_TARGET_IMQ
1502 + tristate '"IMQ" target support'
1503 + depends on NETFILTER_XTABLES
1504 + depends on IP_NF_MANGLE || IP6_NF_MANGLE
1506 + default m if NETFILTER_ADVANCED=n
1508 + This option adds a `IMQ' target which is used to specify if and
1509 + to which imq device packets should get enqueued/dequeued.
1511 + To compile it as a module, choose M here. If unsure, say N.
1513 config NETFILTER_XT_TARGET_MARK
1514 tristate '"MARK" target support'
1515 depends on NETFILTER_ADVANCED
1516 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
1517 index 70d026d..5469b14 100644
1518 --- a/net/netfilter/Makefile
1519 +++ b/net/netfilter/Makefile
1520 @@ -110,6 +110,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
1521 obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
1522 obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
1523 obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o
1524 +obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o
1525 obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
1526 obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o
1527 obj-$(CONFIG_NETFILTER_XT_TARGET_NETMAP) += xt_NETMAP.o
1528 diff --git a/net/netfilter/core.c b/net/netfilter/core.c
1529 index a0e5497..a24276c 100644
1530 --- a/net/netfilter/core.c
1531 +++ b/net/netfilter/core.c
1532 @@ -206,9 +206,11 @@ next_hook:
1533 ret = NF_DROP_GETERR(verdict);
1536 - } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
1537 + } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE ||
1538 + (verdict & NF_VERDICT_MASK) == NF_IMQ_QUEUE) {
1539 int err = nf_queue(skb, elem, state,
1540 - verdict >> NF_VERDICT_QBITS);
1541 + verdict >> NF_VERDICT_QBITS,
1542 + verdict & NF_VERDICT_MASK);
1544 if (err == -ECANCELED)
1546 diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
1547 index 3992106..35cbc7b 100644
1548 --- a/net/netfilter/nf_internals.h
1549 +++ b/net/netfilter/nf_internals.h
1550 @@ -18,7 +18,7 @@ unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb,
1553 int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem,
1554 - struct nf_hook_state *state, unsigned int queuenum);
1555 + struct nf_hook_state *state, unsigned int queuenum, unsigned int queuetype);
1556 void nf_queue_nf_hook_drop(struct nf_hook_ops *ops);
1557 int __init netfilter_queue_init(void);
1559 diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
1560 index 8a8b2ab..91ba768 100644
1561 --- a/net/netfilter/nf_queue.c
1562 +++ b/net/netfilter/nf_queue.c
1565 static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
1567 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1568 +static const struct nf_queue_handler __rcu *queue_imq_handler __read_mostly;
1570 +void nf_register_queue_imq_handler(const struct nf_queue_handler *qh)
1572 + rcu_assign_pointer(queue_imq_handler, qh);
1574 +EXPORT_SYMBOL_GPL(nf_register_queue_imq_handler);
1576 +void nf_unregister_queue_imq_handler(void)
1578 + RCU_INIT_POINTER(queue_imq_handler, NULL);
1579 + synchronize_rcu();
1581 +EXPORT_SYMBOL_GPL(nf_unregister_queue_imq_handler);
1584 /* return EBUSY when somebody else is registered, return EEXIST if the
1585 * same handler is registered, return 0 in case of success. */
1586 void nf_register_queue_handler(const struct nf_queue_handler *qh)
1587 @@ -129,7 +146,8 @@ void nf_queue_nf_hook_drop(struct nf_hook_ops *ops)
1588 int nf_queue(struct sk_buff *skb,
1589 struct nf_hook_ops *elem,
1590 struct nf_hook_state *state,
1591 - unsigned int queuenum)
1592 + unsigned int queuenum,
1593 + unsigned int queuetype)
1595 int status = -ENOENT;
1596 struct nf_queue_entry *entry = NULL;
1597 @@ -139,7 +157,17 @@ int nf_queue(struct sk_buff *skb,
1598 /* QUEUE == DROP if no one is waiting, to be safe. */
1601 - qh = rcu_dereference(queue_handler);
1602 + if (queuetype == NF_IMQ_QUEUE) {
1603 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1604 + qh = rcu_dereference(queue_imq_handler);
1610 + qh = rcu_dereference(queue_handler);
1616 @@ -225,8 +253,10 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
1620 + case NF_IMQ_QUEUE:
1621 err = nf_queue(skb, elem, &entry->state,
1622 - verdict >> NF_VERDICT_QBITS);
1623 + verdict >> NF_VERDICT_QBITS,
1624 + verdict & NF_VERDICT_MASK);
1626 if (err == -ECANCELED)
1628 diff --git a/net/netfilter/xt_IMQ.c b/net/netfilter/xt_IMQ.c
1629 new file mode 100644
1630 index 0000000..86d7b84
1632 +++ b/net/netfilter/xt_IMQ.c
1635 + * This target marks packets to be enqueued to an imq device
1637 +#include <linux/module.h>
1638 +#include <linux/skbuff.h>
1639 +#include <linux/netfilter/x_tables.h>
1640 +#include <linux/netfilter/xt_IMQ.h>
1641 +#include <linux/imq.h>
1643 +static unsigned int imq_target(struct sk_buff *pskb,
1644 + const struct xt_action_param *par)
1646 + const struct xt_imq_info *mr = par->targinfo;
1648 + pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE;
1650 + return XT_CONTINUE;
1653 +static int imq_checkentry(const struct xt_tgchk_param *par)
1655 + struct xt_imq_info *mr = par->targinfo;
1657 + if (mr->todev > IMQ_MAX_DEVS - 1) {
1658 + pr_warn("IMQ: invalid device specified, highest is %u\n",
1659 + IMQ_MAX_DEVS - 1);
1666 +static struct xt_target xt_imq_reg[] __read_mostly = {
1669 + .family = AF_INET,
1670 + .checkentry = imq_checkentry,
1671 + .target = imq_target,
1672 + .targetsize = sizeof(struct xt_imq_info),
1673 + .table = "mangle",
1678 + .family = AF_INET6,
1679 + .checkentry = imq_checkentry,
1680 + .target = imq_target,
1681 + .targetsize = sizeof(struct xt_imq_info),
1682 + .table = "mangle",
1687 +static int __init imq_init(void)
1689 + return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1692 +static void __exit imq_fini(void)
1694 + xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1697 +module_init(imq_init);
1698 +module_exit(imq_fini);
1700 +MODULE_AUTHOR("http://https://github.com/imq/linuximq");
1701 +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See https://github.com/imq/linuximq/wiki for more information.");
1702 +MODULE_LICENSE("GPL");
1703 +MODULE_ALIAS("ipt_IMQ");
1704 +MODULE_ALIAS("ip6t_IMQ");
1706 diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
1707 index 6efca30..a4e448f 100644
1708 --- a/net/sched/sch_generic.c
1709 +++ b/net/sched/sch_generic.c
1710 @@ -108,6 +108,14 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
1714 +struct sk_buff *qdisc_dequeue_skb(struct Qdisc *q, bool *validate)
1718 + return dequeue_skb(q, validate, &packets);
1720 +EXPORT_SYMBOL(qdisc_dequeue_skb);
1722 static inline int handle_dev_cpu_collision(struct sk_buff *skb,
1723 struct netdev_queue *dev_queue,