1 net: add Intermediate Queueing Device (imq)
3 From: Jussi Kivilinna <jussi.kivilinna@iki.fi>
5 This patch is for kernel version 3.12.4+.
7 See: http://linuximq.net/
9 Signed-off-by: Jussi Kivilinna <jussi.kivilinna@iki.fi>
11 drivers/net/Kconfig | 119 ++++
12 drivers/net/Makefile | 1
13 drivers/net/imq.c | 1007 +++++++++++++++++++++++++++++++
14 include/linux/imq.h | 13
15 include/linux/netfilter/xt_IMQ.h | 9
16 include/linux/netfilter_ipv4/ipt_IMQ.h | 10
17 include/linux/netfilter_ipv6/ip6t_IMQ.h | 10
18 include/linux/skbuff.h | 22 +
19 include/net/netfilter/nf_queue.h | 6
20 include/uapi/linux/netfilter.h | 3
22 net/core/skbuff.c | 112 +++
23 net/ipv6/ip6_output.c | 10
24 net/netfilter/Kconfig | 12
25 net/netfilter/Makefile | 1
26 net/netfilter/core.c | 6
27 net/netfilter/nf_internals.h | 2
28 net/netfilter/nf_queue.c | 36 +
29 net/netfilter/xt_IMQ.c | 72 ++
30 19 files changed, 1449 insertions(+), 10 deletions(-)
31 create mode 100644 drivers/net/imq.c
32 create mode 100644 include/linux/imq.h
33 create mode 100644 include/linux/netfilter/xt_IMQ.h
34 create mode 100644 include/linux/netfilter_ipv4/ipt_IMQ.h
35 create mode 100644 include/linux/netfilter_ipv6/ip6t_IMQ.h
36 create mode 100644 net/netfilter/xt_IMQ.c
38 diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
39 index b45b240..5a20da0 100644
40 --- a/drivers/net/Kconfig
41 +++ b/drivers/net/Kconfig
42 @@ -203,6 +203,125 @@ config RIONET_RX_SIZE
47 + tristate "IMQ (intermediate queueing device) support"
48 + depends on NETDEVICES && NETFILTER
50 + The IMQ device(s) is used as placeholder for QoS queueing
51 + disciplines. Every packet entering/leaving the IP stack can be
52 + directed through the IMQ device where it's enqueued/dequeued to the
53 + attached qdisc. This allows you to treat network devices as classes
54 + and distribute bandwidth among them. Iptables is used to specify
55 + through which IMQ device, if any, packets travel.
57 + More information at: http://www.linuximq.net/
59 + To compile this driver as a module, choose M here: the module
60 + will be called imq. If unsure, say N.
63 + prompt "IMQ behavior (PRE/POSTROUTING)"
65 + default IMQ_BEHAVIOR_AB
67 + This setting defines how IMQ behaves in respect to its
68 + hooking in PREROUTING and POSTROUTING.
70 + IMQ can work in any of the following ways:
72 + PREROUTING | POSTROUTING
73 + -----------------|-------------------
74 + #1 After NAT | After NAT
75 + #2 After NAT | Before NAT
76 + #3 Before NAT | After NAT
77 + #4 Before NAT | Before NAT
79 + The default behavior is to hook before NAT on PREROUTING
80 + and after NAT on POSTROUTING (#3).
82 + This settings are specially usefull when trying to use IMQ
83 + to shape NATed clients.
85 + More information can be found at: www.linuximq.net
87 + If not sure leave the default settings alone.
89 +config IMQ_BEHAVIOR_AA
92 + This setting defines how IMQ behaves in respect to its
93 + hooking in PREROUTING and POSTROUTING.
95 + Choosing this option will make IMQ hook like this:
97 + PREROUTING: After NAT
98 + POSTROUTING: After NAT
100 + More information can be found at: www.linuximq.net
102 + If not sure leave the default settings alone.
104 +config IMQ_BEHAVIOR_AB
107 + This setting defines how IMQ behaves in respect to its
108 + hooking in PREROUTING and POSTROUTING.
110 + Choosing this option will make IMQ hook like this:
112 + PREROUTING: After NAT
113 + POSTROUTING: Before NAT
115 + More information can be found at: www.linuximq.net
117 + If not sure leave the default settings alone.
119 +config IMQ_BEHAVIOR_BA
122 + This setting defines how IMQ behaves in respect to its
123 + hooking in PREROUTING and POSTROUTING.
125 + Choosing this option will make IMQ hook like this:
127 + PREROUTING: Before NAT
128 + POSTROUTING: After NAT
130 + More information can be found at: www.linuximq.net
132 + If not sure leave the default settings alone.
134 +config IMQ_BEHAVIOR_BB
137 + This setting defines how IMQ behaves in respect to its
138 + hooking in PREROUTING and POSTROUTING.
140 + Choosing this option will make IMQ hook like this:
142 + PREROUTING: Before NAT
143 + POSTROUTING: Before NAT
145 + More information can be found at: www.linuximq.net
147 + If not sure leave the default settings alone.
152 + int "Number of IMQ devices"
157 + This setting defines how many IMQ devices will be created.
159 + The default value is 16.
161 + More information can be found at: www.linuximq.net
163 + If not sure leave the default settings alone.
166 tristate "Universal TUN/TAP device driver support"
168 diff --git a/drivers/net/Makefile b/drivers/net/Makefile
169 index 3fef8a8..12dafc0 100644
170 --- a/drivers/net/Makefile
171 +++ b/drivers/net/Makefile
172 @@ -9,6 +9,7 @@ obj-$(CONFIG_BONDING) += bonding/
173 obj-$(CONFIG_DUMMY) += dummy.o
174 obj-$(CONFIG_EQUALIZER) += eql.o
175 obj-$(CONFIG_IFB) += ifb.o
176 +obj-$(CONFIG_IMQ) += imq.o
177 obj-$(CONFIG_MACVLAN) += macvlan.o
178 obj-$(CONFIG_MACVTAP) += macvtap.o
179 obj-$(CONFIG_MII) += mii.o
180 diff --git a/drivers/net/imq.c b/drivers/net/imq.c
182 index 0000000..801bc8c
184 +++ b/drivers/net/imq.c
187 + * Pseudo-driver for the intermediate queue device.
189 + * This program is free software; you can redistribute it and/or
190 + * modify it under the terms of the GNU General Public License
191 + * as published by the Free Software Foundation; either version
192 + * 2 of the License, or (at your option) any later version.
194 + * Authors: Patrick McHardy, <kaber@trash.net>
196 + * The first version was written by Martin Devera, <devik@cdi.cz>
198 + * Credits: Jan Rafaj <imq2t@cedric.vabo.cz>
199 + * - Update patch to 2.4.21
200 + * Sebastian Strollo <sstrollo@nortelnetworks.com>
201 + * - Fix "Dead-loop on netdevice imq"-issue
202 + * Marcel Sebek <sebek64@post.cz>
203 + * - Update to 2.6.2-rc1
205 + * After some time of inactivity there is a group taking care
206 + * of IMQ again: http://www.linuximq.net
209 + * 2004/06/30 - New version of IMQ patch to kernels <=2.6.7
210 + * including the following changes:
212 + * - Correction of ipv6 support "+"s issue (Hasso Tepper)
213 + * - Correction of imq_init_devs() issue that resulted in
214 + * kernel OOPS unloading IMQ as module (Norbert Buchmuller)
215 + * - Addition of functionality to choose number of IMQ devices
216 + * during kernel config (Andre Correa)
217 + * - Addition of functionality to choose how IMQ hooks on
218 + * PRE and POSTROUTING (after or before NAT) (Andre Correa)
219 + * - Cosmetic corrections (Norbert Buchmuller) (Andre Correa)
222 + * 2005/12/16 - IMQ versions between 2.6.7 and 2.6.13 were
223 + * released with almost no problems. 2.6.14-x was released
224 + * with some important changes: nfcache was removed; After
225 + * some weeks of trouble we figured out that some IMQ fields
226 + * in skb were missing in skbuff.c - skb_clone and copy_skb_header.
227 + * These functions are correctly patched by this new patch version.
229 + * Thanks for all who helped to figure out all the problems with
230 + * 2.6.14.x: Patrick McHardy, Rune Kock, VeNoMouS, Max CtRiX,
231 + * Kevin Shanahan, Richard Lucassen, Valery Dachev (hopefully
232 + * I didn't forget anybody). I apologize again for my lack of time.
235 + * 2008/06/17 - 2.6.25 - Changed imq.c to use qdisc_run() instead
236 + * of qdisc_restart() and moved qdisc_run() to tasklet to avoid
237 + * recursive locking. New initialization routines to fix 'rmmod' not
238 + * working anymore. Used code from ifb.c. (Jussi Kivilinna)
240 + * 2008/08/06 - 2.6.26 - (JK)
241 + * - Replaced tasklet with 'netif_schedule()'.
242 + * - Cleaned up and added comments for imq_nf_queue().
245 + * - Add skb_save_cb/skb_restore_cb helper functions for backuping
246 + * control buffer. This is needed because qdisc-layer on kernels
247 + * 2.6.27 and newer overwrite control buffer. (Jussi Kivilinna)
248 + * - Add better locking for IMQ device. Hopefully this will solve
249 + * SMP issues. (Jussi Kivilinna)
252 + * - Port to 2.6.29 + fix rmmod not working
254 + * 2009/04/20 - (Jussi Kivilinna)
255 + * - Use netdevice feature flags to avoid extra packet handling
256 + * by core networking layer and possibly increase performance.
258 + * 2009/09/26 - (Jussi Kivilinna)
259 + * - Add imq_nf_reinject_lockless to fix deadlock with
260 + * imq_nf_queue/imq_nf_reinject.
262 + * 2009/12/08 - (Jussi Kivilinna)
264 + * - Add check for skb->nf_queue_entry==NULL in imq_dev_xmit()
265 + * - Also add better error checking for skb->nf_queue_entry usage
267 + * 2010/02/25 - (Jussi Kivilinna)
270 + * 2010/08/15 - (Jussi Kivilinna)
272 + * - Simplify hook registration by using nf_register_hooks.
273 + * - nf_reinject doesn't need spinlock around it, therefore remove
274 + * imq_nf_reinject function. Other nf_reinject users protect
275 + * their own data with spinlock. With IMQ however all data is
276 + * needed is stored per skbuff, so no locking is needed.
277 + * - Changed IMQ to use 'separate' NF_IMQ_QUEUE instead of
278 + * NF_QUEUE, this allows working coexistance of IMQ and other
280 + * - Make IMQ multi-queue. Number of IMQ device queues can be
281 + * increased with 'numqueues' module parameters. Default number
282 + * of queues is 1, in other words by default IMQ works as
283 + * single-queue device. Multi-queue selection is based on
284 + * IFB multi-queue patch by Changli Gao <xiaosuo@gmail.com>.
286 + * 2011/03/18 - (Jussi Kivilinna)
289 + * 2011/07/12 - (syoder89@gmail.com)
290 + * - Crash fix that happens when the receiving interface has more
291 + * than one queue (add missing skb_set_queue_mapping in
292 + * imq_select_queue).
294 + * 2011/07/26 - (Jussi Kivilinna)
295 + * - Add queue mapping checks for packets exiting IMQ.
298 + * 2011/08/16 - (Jussi Kivilinna)
299 + * - Clear IFF_TX_SKB_SHARING flag that was added for linux 3.0.2
301 + * 2011/11/03 - Germano Michel <germanomichel@gmail.com>
302 + * - Fix IMQ for net namespaces
304 + * 2011/11/04 - Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
306 + * - Clean-up, move 'get imq device pointer by imqX name' to
307 + * separate function from imq_nf_queue().
309 + * 2012/01/05 - Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
312 + * 2012/03/19 - Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
315 + * 2012/12/12 - Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
317 + * - Fix checkpatch.pl warnings
319 + * 2013/09/10 - Jussi Kivilinna <jussi.kivilinna@iki.fi>
320 + * - Fixed GSO handling for 3.10, see imq_nf_queue() for comments.
321 + * - Don't copy skb->cb_next when copying or cloning skbuffs.
323 + * 2013/09/16 - Jussi Kivilinna <jussi.kivilinna@iki.fi>
326 + * 2013/11/12 - Jussi Kivilinna <jussi.kivilinna@iki.fi>
329 + * Also, many thanks to pablo Sebastian Greco for making the initial
330 + * patch and to those who helped the testing.
332 + * More info at: http://www.linuximq.net/ (Andre Correa)
335 +#include <linux/module.h>
336 +#include <linux/kernel.h>
337 +#include <linux/moduleparam.h>
338 +#include <linux/list.h>
339 +#include <linux/skbuff.h>
340 +#include <linux/netdevice.h>
341 +#include <linux/etherdevice.h>
342 +#include <linux/rtnetlink.h>
343 +#include <linux/if_arp.h>
344 +#include <linux/netfilter.h>
345 +#include <linux/netfilter_ipv4.h>
346 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
347 + #include <linux/netfilter_ipv6.h>
349 +#include <linux/imq.h>
350 +#include <net/pkt_sched.h>
351 +#include <net/netfilter/nf_queue.h>
352 +#include <net/sock.h>
353 +#include <linux/ip.h>
354 +#include <linux/ipv6.h>
355 +#include <linux/if_vlan.h>
356 +#include <linux/if_pppox.h>
358 +#include <net/ipv6.h>
360 +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num);
362 +static unsigned int imq_nf_hook(const struct nf_hook_ops *ops,
363 + struct sk_buff *pskb,
364 + const struct net_device *indev,
365 + const struct net_device *outdev,
366 + int (*okfn)(struct sk_buff *));
368 +static struct nf_hook_ops imq_ops[] = {
370 + /* imq_ingress_ipv4 */
371 + .hook = imq_nf_hook,
372 + .owner = THIS_MODULE,
374 + .hooknum = NF_INET_PRE_ROUTING,
375 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
376 + .priority = NF_IP_PRI_MANGLE + 1,
378 + .priority = NF_IP_PRI_NAT_DST + 1,
382 + /* imq_egress_ipv4 */
383 + .hook = imq_nf_hook,
384 + .owner = THIS_MODULE,
386 + .hooknum = NF_INET_POST_ROUTING,
387 +#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
388 + .priority = NF_IP_PRI_LAST,
390 + .priority = NF_IP_PRI_NAT_SRC - 1,
393 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
395 + /* imq_ingress_ipv6 */
396 + .hook = imq_nf_hook,
397 + .owner = THIS_MODULE,
399 + .hooknum = NF_INET_PRE_ROUTING,
400 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
401 + .priority = NF_IP6_PRI_MANGLE + 1,
403 + .priority = NF_IP6_PRI_NAT_DST + 1,
407 + /* imq_egress_ipv6 */
408 + .hook = imq_nf_hook,
409 + .owner = THIS_MODULE,
411 + .hooknum = NF_INET_POST_ROUTING,
412 +#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
413 + .priority = NF_IP6_PRI_LAST,
415 + .priority = NF_IP6_PRI_NAT_SRC - 1,
421 +#if defined(CONFIG_IMQ_NUM_DEVS)
422 +static int numdevs = CONFIG_IMQ_NUM_DEVS;
424 +static int numdevs = IMQ_MAX_DEVS;
427 +static struct net_device *imq_devs_cache[IMQ_MAX_DEVS];
429 +#define IMQ_MAX_QUEUES 32
430 +static int numqueues = 1;
431 +static u32 imq_hashrnd;
433 +static inline __be16 pppoe_proto(const struct sk_buff *skb)
435 + return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
436 + sizeof(struct pppoe_hdr)));
439 +static u16 imq_hash(struct net_device *dev, struct sk_buff *skb)
441 + unsigned int pull_len;
442 + u16 protocol = skb->protocol;
454 + switch (protocol) {
455 + case htons(ETH_P_8021Q): {
456 + if (unlikely(skb_pull(skb, VLAN_HLEN) == NULL))
459 + pull_len += VLAN_HLEN;
460 + skb->network_header += VLAN_HLEN;
462 + protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
466 + case htons(ETH_P_PPP_SES): {
467 + if (unlikely(skb_pull(skb, PPPOE_SES_HLEN) == NULL))
470 + pull_len += PPPOE_SES_HLEN;
471 + skb->network_header += PPPOE_SES_HLEN;
473 + protocol = pppoe_proto(skb);
477 + case htons(ETH_P_IP): {
478 + const struct iphdr *iph = ip_hdr(skb);
480 + if (unlikely(!pskb_may_pull(skb, sizeof(struct iphdr))))
483 + addr1 = iph->daddr;
484 + addr2 = iph->saddr;
486 + ip_proto = !(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ?
488 + ihl = ip_hdrlen(skb);
492 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
493 + case htons(ETH_P_IPV6): {
494 + const struct ipv6hdr *iph = ipv6_hdr(skb);
497 + if (unlikely(!pskb_may_pull(skb, sizeof(struct ipv6hdr))))
500 + addr1 = iph->daddr.s6_addr32[3];
501 + addr2 = iph->saddr.s6_addr32[3];
502 + ihl = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &ip_proto,
504 + if (unlikely(ihl < 0))
512 + if (pull_len != 0) {
513 + skb_push(skb, pull_len);
514 + skb->network_header -= pull_len;
517 + return (u16)(ntohs(protocol) % dev->real_num_tx_queues);
521 + swap(addr1, addr2);
523 + switch (ip_proto) {
530 + case IPPROTO_UDPLITE: {
531 + if (likely(skb_copy_bits(skb, ihl, &ports.in32, 4) >= 0)) {
532 + if (ports.in16[0] > ports.in16[1])
533 + swap(ports.in16[0], ports.in16[1]);
543 + if (pull_len != 0) {
544 + skb_push(skb, pull_len);
545 + skb->network_header -= pull_len;
548 + hash = jhash_3words(addr1, addr2, ports.in32, imq_hashrnd ^ ip_proto);
550 + return (u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
553 +static inline bool sk_tx_queue_recorded(struct sock *sk)
555 + return (sk_tx_queue_get(sk) >= 0);
558 +static struct netdev_queue *imq_select_queue(struct net_device *dev,
559 + struct sk_buff *skb)
561 + u16 queue_index = 0;
564 + if (likely(dev->real_num_tx_queues == 1))
567 + /* IMQ can be receiving ingress or engress packets. */
569 + /* Check first for if rx_queue is set */
570 + if (skb_rx_queue_recorded(skb)) {
571 + queue_index = skb_get_rx_queue(skb);
575 + /* Check if socket has tx_queue set */
576 + if (sk_tx_queue_recorded(skb->sk)) {
577 + queue_index = sk_tx_queue_get(skb->sk);
581 + /* Try use socket hash */
582 + if (skb->sk && skb->sk->sk_hash) {
583 + hash = skb->sk->sk_hash;
585 + (u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
589 + /* Generate hash from packet data */
590 + queue_index = imq_hash(dev, skb);
593 + if (unlikely(queue_index >= dev->real_num_tx_queues))
594 + queue_index = (u16)((u32)queue_index % dev->real_num_tx_queues);
596 + skb_set_queue_mapping(skb, queue_index);
597 + return netdev_get_tx_queue(dev, queue_index);
600 +static struct net_device_stats *imq_get_stats(struct net_device *dev)
602 + return &dev->stats;
605 +/* called for packets kfree'd in qdiscs at places other than enqueue */
606 +static void imq_skb_destructor(struct sk_buff *skb)
608 + struct nf_queue_entry *entry = skb->nf_queue_entry;
610 + skb->nf_queue_entry = NULL;
613 + nf_queue_entry_release_refs(entry);
617 + skb_restore_cb(skb); /* kfree backup */
620 +static void imq_done_check_queue_mapping(struct sk_buff *skb,
621 + struct net_device *dev)
623 + unsigned int queue_index;
625 + /* Don't let queue_mapping be left too large after exiting IMQ */
626 + if (likely(skb->dev != dev && skb->dev != NULL)) {
627 + queue_index = skb_get_queue_mapping(skb);
628 + if (unlikely(queue_index >= skb->dev->real_num_tx_queues)) {
629 + queue_index = (u16)((u32)queue_index %
630 + skb->dev->real_num_tx_queues);
631 + skb_set_queue_mapping(skb, queue_index);
634 + /* skb->dev was IMQ device itself or NULL, be on safe side and
635 + * just clear queue mapping.
637 + skb_set_queue_mapping(skb, 0);
641 +static netdev_tx_t imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
643 + struct nf_queue_entry *entry = skb->nf_queue_entry;
645 + skb->nf_queue_entry = NULL;
646 + dev->trans_start = jiffies;
648 + dev->stats.tx_bytes += skb->len;
649 + dev->stats.tx_packets++;
651 + if (unlikely(entry == NULL)) {
652 + /* We don't know what is going on here.. packet is queued for
653 + * imq device, but (probably) not by us.
655 + * If this packet was not send here by imq_nf_queue(), then
656 + * skb_save_cb() was not used and skb_free() should not show:
657 + * WARNING: IMQ: kfree_skb: skb->cb_next:..
659 + * WARNING: IMQ: kfree_skb: skb->nf_queue_entry...
661 + * However if this message is shown, then IMQ is somehow broken
662 + * and you should report this to linuximq.net.
665 + /* imq_dev_xmit is black hole that eats all packets, report that
666 + * we eat this packet happily and increase dropped counters.
669 + dev->stats.tx_dropped++;
670 + dev_kfree_skb(skb);
672 + return NETDEV_TX_OK;
675 + skb_restore_cb(skb); /* restore skb->cb */
677 + skb->imq_flags = 0;
678 + skb->destructor = NULL;
680 + imq_done_check_queue_mapping(skb, dev);
682 + nf_reinject(entry, NF_ACCEPT);
684 + return NETDEV_TX_OK;
687 +static struct net_device *get_imq_device_by_index(int index)
689 + struct net_device *dev = NULL;
693 + /* get device by name and cache result */
694 + snprintf(buf, sizeof(buf), "imq%d", index);
696 + /* Search device from all namespaces. */
697 + for_each_net(net) {
698 + dev = dev_get_by_name(net, buf);
703 + if (WARN_ON_ONCE(dev == NULL)) {
704 + /* IMQ device not found. Exotic config? */
705 + return ERR_PTR(-ENODEV);
708 + imq_devs_cache[index] = dev;
714 +static struct nf_queue_entry *nf_queue_entry_dup(struct nf_queue_entry *e)
716 + struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
718 + if (nf_queue_entry_get_refs(entry))
725 +#ifdef CONFIG_BRIDGE_NETFILTER
726 +/* When called from bridge netfilter, skb->data must point to MAC header
727 + * before calling skb_gso_segment(). Else, original MAC header is lost
728 + * and segmented skbs will be sent to wrong destination.
730 +static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
732 + if (skb->nf_bridge)
733 + __skb_push(skb, skb->network_header - skb->mac_header);
736 +static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
738 + if (skb->nf_bridge)
739 + __skb_pull(skb, skb->network_header - skb->mac_header);
742 +#define nf_bridge_adjust_skb_data(s) do {} while (0)
743 +#define nf_bridge_adjust_segmented_data(s) do {} while (0)
746 +static void free_entry(struct nf_queue_entry *entry)
748 + nf_queue_entry_release_refs(entry);
752 +static int __imq_nf_queue(struct nf_queue_entry *entry, struct net_device *dev);
754 +static int __imq_nf_queue_gso(struct nf_queue_entry *entry,
755 + struct net_device *dev, struct sk_buff *skb)
758 + struct nf_queue_entry *entry_seg;
760 + nf_bridge_adjust_segmented_data(skb);
762 + if (skb->next == NULL) { /* last packet, no need to copy entry */
763 + struct sk_buff *gso_skb = entry->skb;
765 + ret = __imq_nf_queue(entry, dev);
767 + entry->skb = gso_skb;
773 + entry_seg = nf_queue_entry_dup(entry);
775 + entry_seg->skb = skb;
776 + ret = __imq_nf_queue(entry_seg, dev);
778 + free_entry(entry_seg);
783 +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num)
785 + struct sk_buff *skb, *segs;
786 + struct net_device *dev;
787 + unsigned int queued;
788 + int index, retval, err;
790 + index = entry->skb->imq_flags & IMQ_F_IFMASK;
791 + if (unlikely(index > numdevs - 1)) {
792 + if (net_ratelimit())
793 + pr_warn("IMQ: invalid device specified, highest is %u\n",
799 + /* check for imq device by index from cache */
800 + dev = imq_devs_cache[index];
801 + if (unlikely(!dev)) {
802 + dev = get_imq_device_by_index(index);
804 + retval = PTR_ERR(dev);
809 + if (unlikely(!(dev->flags & IFF_UP))) {
810 + entry->skb->imq_flags = 0;
811 + retval = -ECANCELED;
815 + if (!skb_is_gso(entry->skb))
816 + return __imq_nf_queue(entry, dev);
818 + /* Since 3.10.x, GSO handling moved here as result of upstream commit
819 + * a5fedd43d5f6c94c71053a66e4c3d2e35f1731a2 (netfilter: move
820 + * skb_gso_segment into nfnetlink_queue module).
822 + * Following code replicates the gso handling from
823 + * 'net/netfilter/nfnetlink_queue_core.c':nfqnl_enqueue_packet().
828 + switch (entry->pf) {
830 + skb->protocol = htons(ETH_P_IP);
833 + skb->protocol = htons(ETH_P_IPV6);
837 + nf_bridge_adjust_skb_data(skb);
838 + segs = skb_gso_segment(skb, 0);
839 + /* Does not use PTR_ERR to limit the number of error codes that can be
840 + * returned by nf_queue. For instance, callers rely on -ECANCELED to
841 + * mean 'ignore this hook'.
849 + struct sk_buff *nskb = segs->next;
850 + if (nskb && nskb->next)
851 + nskb->cb_next = NULL;
853 + err = __imq_nf_queue_gso(entry, dev, segs);
862 + if (err) /* some segments are already queued */
869 + nf_bridge_adjust_segmented_data(skb);
875 +static int __imq_nf_queue(struct nf_queue_entry *entry, struct net_device *dev)
877 + struct sk_buff *skb_orig, *skb, *skb_shared;
879 + struct netdev_queue *txq;
880 + spinlock_t *root_lock;
882 + int retval = -EINVAL;
883 + unsigned int orig_queue_index;
885 + dev->last_rx = jiffies;
890 + /* skb has owner? => make clone */
891 + if (unlikely(skb->destructor)) {
893 + skb = skb_clone(skb, GFP_ATOMIC);
894 + if (unlikely(!skb)) {
898 + skb->cb_next = NULL;
902 + skb->nf_queue_entry = entry;
904 + dev->stats.rx_bytes += skb->len;
905 + dev->stats.rx_packets++;
908 + /* skb->dev == NULL causes problems, try the find cause. */
909 + if (net_ratelimit()) {
910 + dev_warn(&dev->dev,
911 + "received packet with skb->dev == NULL\n");
918 + /* Disables softirqs for lock below */
919 + rcu_read_lock_bh();
921 + /* Multi-queue selection */
922 + orig_queue_index = skb_get_queue_mapping(skb);
923 + txq = imq_select_queue(dev, skb);
925 + q = rcu_dereference(txq->qdisc);
926 + if (unlikely(!q->enqueue))
927 + goto packet_not_eaten_by_imq_dev;
929 + root_lock = qdisc_lock(q);
930 + spin_lock(root_lock);
932 + users = atomic_read(&skb->users);
934 + skb_shared = skb_get(skb); /* increase reference count by one */
936 + /* backup skb->cb, as qdisc layer will overwrite it */
937 + skb_save_cb(skb_shared);
938 + qdisc_enqueue_root(skb_shared, q); /* might kfree_skb */
940 + if (likely(atomic_read(&skb_shared->users) == users + 1)) {
941 + kfree_skb(skb_shared); /* decrease reference count by one */
943 + skb->destructor = &imq_skb_destructor;
946 + if (unlikely(skb_orig))
947 + kfree_skb(skb_orig); /* free original */
949 + spin_unlock(root_lock);
950 + rcu_read_unlock_bh();
952 + /* schedule qdisc dequeue */
953 + __netif_schedule(q);
958 + skb_restore_cb(skb_shared); /* restore skb->cb */
959 + skb->nf_queue_entry = NULL;
961 + * qdisc dropped packet and decreased skb reference count of
962 + * skb, so we don't really want to and try refree as that would
963 + * actually destroy the skb.
965 + spin_unlock(root_lock);
966 + goto packet_not_eaten_by_imq_dev;
969 +packet_not_eaten_by_imq_dev:
970 + skb_set_queue_mapping(skb, orig_queue_index);
971 + rcu_read_unlock_bh();
973 + /* cloned? restore original */
974 + if (unlikely(skb_orig)) {
976 + entry->skb = skb_orig;
983 +static unsigned int imq_nf_hook(const struct nf_hook_ops *ops,
984 + struct sk_buff *pskb,
985 + const struct net_device *indev,
986 + const struct net_device *outdev,
987 + int (*okfn)(struct sk_buff *))
989 + return (pskb->imq_flags & IMQ_F_ENQUEUE) ? NF_IMQ_QUEUE : NF_ACCEPT;
992 +static int imq_close(struct net_device *dev)
994 + netif_stop_queue(dev);
998 +static int imq_open(struct net_device *dev)
1000 + netif_start_queue(dev);
1004 +static const struct net_device_ops imq_netdev_ops = {
1005 + .ndo_open = imq_open,
1006 + .ndo_stop = imq_close,
1007 + .ndo_start_xmit = imq_dev_xmit,
1008 + .ndo_get_stats = imq_get_stats,
1011 +static void imq_setup(struct net_device *dev)
1013 + dev->netdev_ops = &imq_netdev_ops;
1014 + dev->type = ARPHRD_VOID;
1015 + dev->mtu = 16000; /* too small? */
1016 + dev->tx_queue_len = 11000; /* too big? */
1017 + dev->flags = IFF_NOARP;
1018 + dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
1019 + NETIF_F_GSO | NETIF_F_HW_CSUM |
1021 + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE |
1022 + IFF_TX_SKB_SHARING);
1025 +static int imq_validate(struct nlattr *tb[], struct nlattr *data[])
1029 + if (tb[IFLA_ADDRESS]) {
1030 + if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
1034 + if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
1035 + ret = -EADDRNOTAVAIL;
1041 + pr_warn("IMQ: imq_validate failed (%d)\n", ret);
1045 +static struct rtnl_link_ops imq_link_ops __read_mostly = {
1048 + .setup = imq_setup,
1049 + .validate = imq_validate,
1052 +static const struct nf_queue_handler imq_nfqh = {
1053 + .outfn = imq_nf_queue,
1056 +static int __init imq_init_hooks(void)
1060 + nf_register_queue_imq_handler(&imq_nfqh);
1062 + ret = nf_register_hooks(imq_ops, ARRAY_SIZE(imq_ops));
1064 + nf_unregister_queue_imq_handler();
1069 +static int __init imq_init_one(int index)
1071 + struct net_device *dev;
1074 + dev = alloc_netdev_mq(0, "imq%d", imq_setup, numqueues);
1078 + ret = dev_alloc_name(dev, dev->name);
1082 + dev->rtnl_link_ops = &imq_link_ops;
1083 + ret = register_netdevice(dev);
1093 +static int __init imq_init_devs(void)
1097 + if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) {
1098 + pr_err("IMQ: numdevs has to be betweed 1 and %u\n",
1103 + if (numqueues < 1 || numqueues > IMQ_MAX_QUEUES) {
1104 + pr_err("IMQ: numqueues has to be betweed 1 and %u\n",
1109 + get_random_bytes(&imq_hashrnd, sizeof(imq_hashrnd));
1112 + err = __rtnl_link_register(&imq_link_ops);
1114 + for (i = 0; i < numdevs && !err; i++)
1115 + err = imq_init_one(i);
1118 + __rtnl_link_unregister(&imq_link_ops);
1119 + memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
1126 +static int __init imq_init_module(void)
1130 +#if defined(CONFIG_IMQ_NUM_DEVS)
1131 + BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16);
1132 + BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2);
1133 + BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK);
1136 + err = imq_init_devs();
1138 + pr_err("IMQ: Error trying imq_init_devs(net)\n");
1142 + err = imq_init_hooks();
1144 + pr_err(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
1145 + rtnl_link_unregister(&imq_link_ops);
1146 + memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
1150 + pr_info("IMQ driver loaded successfully. (numdevs = %d, numqueues = %d)\n",
1151 + numdevs, numqueues);
1153 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
1154 + pr_info("\tHooking IMQ before NAT on PREROUTING.\n");
1156 + pr_info("\tHooking IMQ after NAT on PREROUTING.\n");
1158 +#if defined(CONFIG_IMQ_BEHAVIOR_AB) || defined(CONFIG_IMQ_BEHAVIOR_BB)
1159 + pr_info("\tHooking IMQ before NAT on POSTROUTING.\n");
1161 + pr_info("\tHooking IMQ after NAT on POSTROUTING.\n");
1167 +static void __exit imq_unhook(void)
1169 + nf_unregister_hooks(imq_ops, ARRAY_SIZE(imq_ops));
1170 + nf_unregister_queue_imq_handler();
1173 +static void __exit imq_cleanup_devs(void)
1175 + rtnl_link_unregister(&imq_link_ops);
1176 + memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
1179 +static void __exit imq_exit_module(void)
1182 + imq_cleanup_devs();
1183 + pr_info("IMQ driver unloaded successfully.\n");
1186 +module_init(imq_init_module);
1187 +module_exit(imq_exit_module);
1189 +module_param(numdevs, int, 0);
1190 +module_param(numqueues, int, 0);
1191 +MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will be created)");
1192 +MODULE_PARM_DESC(numqueues, "number of queues per IMQ device");
1193 +MODULE_AUTHOR("http://www.linuximq.net");
1194 +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
1195 +MODULE_LICENSE("GPL");
1196 +MODULE_ALIAS_RTNL_LINK("imq");
1198 diff --git a/include/linux/imq.h b/include/linux/imq.h
1199 new file mode 100644
1200 index 0000000..1babb09
1202 +++ b/include/linux/imq.h
1207 +/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */
1208 +#define IMQ_F_BITS 5
1210 +#define IMQ_F_IFMASK 0x0f
1211 +#define IMQ_F_ENQUEUE 0x10
1213 +#define IMQ_MAX_DEVS (IMQ_F_IFMASK + 1)
1215 +#endif /* _IMQ_H */
1217 diff --git a/include/linux/netfilter/xt_IMQ.h b/include/linux/netfilter/xt_IMQ.h
1218 new file mode 100644
1219 index 0000000..9b07230
1221 +++ b/include/linux/netfilter/xt_IMQ.h
1226 +struct xt_imq_info {
1227 + unsigned int todev; /* target imq device */
1230 +#endif /* _XT_IMQ_H */
1232 diff --git a/include/linux/netfilter_ipv4/ipt_IMQ.h b/include/linux/netfilter_ipv4/ipt_IMQ.h
1233 new file mode 100644
1234 index 0000000..7af320f
1236 +++ b/include/linux/netfilter_ipv4/ipt_IMQ.h
1241 +/* Backwards compatibility for old userspace */
1242 +#include <linux/netfilter/xt_IMQ.h>
1244 +#define ipt_imq_info xt_imq_info
1246 +#endif /* _IPT_IMQ_H */
1248 diff --git a/include/linux/netfilter_ipv6/ip6t_IMQ.h b/include/linux/netfilter_ipv6/ip6t_IMQ.h
1249 new file mode 100644
1250 index 0000000..198ac01
1252 +++ b/include/linux/netfilter_ipv6/ip6t_IMQ.h
1254 +#ifndef _IP6T_IMQ_H
1255 +#define _IP6T_IMQ_H
1257 +/* Backwards compatibility for old userspace */
1258 +#include <linux/netfilter/xt_IMQ.h>
1260 +#define ip6t_imq_info xt_imq_info
1262 +#endif /* _IP6T_IMQ_H */
1264 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
1265 index f66f346..d699b19 100644
1266 --- a/include/linux/skbuff.h
1267 +++ b/include/linux/skbuff.h
1269 #include <linux/dma-mapping.h>
1270 #include <linux/netdev_features.h>
1271 #include <net/flow_keys.h>
1272 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1273 +#include <linux/imq.h>
1276 /* Don't change this without changing skb_csum_unnecessary! */
1277 #define CHECKSUM_NONE 0
1278 @@ -418,6 +421,9 @@ struct sk_buff {
1279 * first. This is owned by whoever has the skb queued ATM.
1281 char cb[48] __aligned(8);
1282 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1286 unsigned long _skb_refdst;
1288 @@ -453,6 +459,9 @@ struct sk_buff {
1289 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1290 struct nf_conntrack *nfct;
1292 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1293 + struct nf_queue_entry *nf_queue_entry;
1295 #ifdef CONFIG_BRIDGE_NETFILTER
1296 struct nf_bridge_info *nf_bridge;
1298 @@ -490,6 +499,9 @@ struct sk_buff {
1300 __u8 encapsulation:1;
1301 /* 6/8 bit hole (depending on ndisc_nodetype presence) */
1302 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1303 + __u8 imq_flags:IMQ_F_BITS;
1305 kmemcheck_bitfield_end(flags2);
1307 #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
1308 @@ -625,6 +637,12 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb)
1309 return (struct rtable *)skb_dst(skb);
1313 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1314 +extern int skb_save_cb(struct sk_buff *skb);
1315 +extern int skb_restore_cb(struct sk_buff *skb);
1318 void kfree_skb(struct sk_buff *skb);
1319 void kfree_skb_list(struct sk_buff *segs);
1320 void skb_tx_error(struct sk_buff *skb);
1321 @@ -2635,6 +2653,10 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
1322 nf_conntrack_get(src->nfct);
1323 dst->nfctinfo = src->nfctinfo;
1325 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1326 + dst->imq_flags = src->imq_flags;
1327 + dst->nf_queue_entry = src->nf_queue_entry;
1329 #ifdef CONFIG_BRIDGE_NETFILTER
1330 dst->nf_bridge = src->nf_bridge;
1331 nf_bridge_get(src->nf_bridge);
1332 diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
1333 index aaba4bb..f6e92a4 100644
1334 --- a/include/net/netfilter/nf_queue.h
1335 +++ b/include/net/netfilter/nf_queue.h
1336 @@ -29,6 +29,12 @@ struct nf_queue_handler {
1337 void nf_register_queue_handler(const struct nf_queue_handler *qh);
1338 void nf_unregister_queue_handler(void);
1339 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
1340 +extern void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
1342 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1343 +extern void nf_register_queue_imq_handler(const struct nf_queue_handler *qh);
1344 +extern void nf_unregister_queue_imq_handler(void);
1347 bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
1348 void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
1349 diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h
1350 index f7dc0eb..58c46a9 100644
1351 --- a/include/uapi/linux/netfilter.h
1352 +++ b/include/uapi/linux/netfilter.h
1357 -#define NF_MAX_VERDICT NF_STOP
1358 +#define NF_IMQ_QUEUE 6
1359 +#define NF_MAX_VERDICT NF_IMQ_QUEUE
1361 /* we overload the higher bits for encoding auxiliary data such as the queue
1362 * number or errno values. Not nice, but better than additional function
1363 diff --git a/net/core/dev.c b/net/core/dev.c
1364 index 3d13874..9842f21 100644
1365 --- a/net/core/dev.c
1366 +++ b/net/core/dev.c
1368 #include <linux/hashtable.h>
1369 #include <linux/vmalloc.h>
1370 #include <linux/if_macvlan.h>
1371 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1372 +#include <linux/imq.h>
1375 #include "net-sysfs.h"
1377 @@ -2595,7 +2598,12 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1381 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1382 + if (!list_empty(&ptype_all) &&
1383 + !(skb->imq_flags & IMQ_F_ENQUEUE))
1385 if (!list_empty(&ptype_all))
1387 dev_queue_xmit_nit(skb, dev);
1390 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1391 index c28c7fe..a5f1888 100644
1392 --- a/net/core/skbuff.c
1393 +++ b/net/core/skbuff.c
1396 struct kmem_cache *skbuff_head_cache __read_mostly;
1397 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
1398 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1399 +static struct kmem_cache *skbuff_cb_store_cache __read_mostly;
1401 +/* Control buffer save/restore for IMQ devices */
1402 +struct skb_cb_table {
1403 + char cb[48] __aligned(8);
1408 +static DEFINE_SPINLOCK(skb_cb_store_lock);
1410 +int skb_save_cb(struct sk_buff *skb)
1412 + struct skb_cb_table *next;
1414 + next = kmem_cache_alloc(skbuff_cb_store_cache, GFP_ATOMIC);
1418 + BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
1420 + memcpy(next->cb, skb->cb, sizeof(skb->cb));
1421 + next->cb_next = skb->cb_next;
1423 + atomic_set(&next->refcnt, 1);
1425 + skb->cb_next = next;
1428 +EXPORT_SYMBOL(skb_save_cb);
1430 +int skb_restore_cb(struct sk_buff *skb)
1432 + struct skb_cb_table *next;
1434 + if (!skb->cb_next)
1437 + next = skb->cb_next;
1439 + BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
1441 + memcpy(skb->cb, next->cb, sizeof(skb->cb));
1442 + skb->cb_next = next->cb_next;
1444 + spin_lock(&skb_cb_store_lock);
1446 + if (atomic_dec_and_test(&next->refcnt))
1447 + kmem_cache_free(skbuff_cb_store_cache, next);
1449 + spin_unlock(&skb_cb_store_lock);
1453 +EXPORT_SYMBOL(skb_restore_cb);
1455 +static void skb_copy_stored_cb(struct sk_buff *new, const struct sk_buff *__old)
1457 + struct skb_cb_table *next;
1458 + struct sk_buff *old;
1460 + if (!__old->cb_next) {
1461 + new->cb_next = NULL;
1465 + spin_lock(&skb_cb_store_lock);
1467 + old = (struct sk_buff *)__old;
1469 + next = old->cb_next;
1470 + atomic_inc(&next->refcnt);
1471 + new->cb_next = next;
1473 + spin_unlock(&skb_cb_store_lock);
1478 * skb_panic - private function for out-of-line support
1479 @@ -577,6 +656,28 @@ static void skb_release_head_state(struct sk_buff *skb)
1481 skb->destructor(skb);
1483 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1485 + * This should not happen. When it does, avoid memleak by restoring
1486 + * the chain of cb-backups.
1488 + while (skb->cb_next != NULL) {
1489 + if (net_ratelimit())
1490 + pr_warn("IMQ: kfree_skb: skb->cb_next: %08x\n",
1491 + (unsigned int)skb->cb_next);
1493 + skb_restore_cb(skb);
1496 + * This should not happen either, nf_queue_entry is nullified in
1497 + * imq_dev_xmit(). If we have non-NULL nf_queue_entry then we are
1498 + * leaking entry pointers, maybe memory. We don't know if this is
1499 + * pointer to already freed memory, or should this be freed.
1500 + * If this happens we need to add refcounting, etc for nf_queue_entry.
1502 + if (skb->nf_queue_entry && net_ratelimit())
1503 + pr_warn("%s\n", "IMQ: kfree_skb: skb->nf_queue_entry != NULL");
1505 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
1506 nf_conntrack_put(skb->nfct);
1508 @@ -709,6 +810,10 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
1509 new->sp = secpath_get(old->sp);
1511 memcpy(new->cb, old->cb, sizeof(old->cb));
1512 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1513 + new->cb_next = NULL;
1514 + /*skb_copy_stored_cb(new, old);*/
1516 new->csum = old->csum;
1517 new->local_df = old->local_df;
1518 new->pkt_type = old->pkt_type;
1519 @@ -3112,6 +3217,13 @@ void __init skb_init(void)
1521 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1523 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1524 + skbuff_cb_store_cache = kmem_cache_create("skbuff_cb_store_cache",
1525 + sizeof(struct skb_cb_table),
1527 + SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1533 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1534 index b6fa35e..08dcfef 100644
1535 --- a/net/ipv6/ip6_output.c
1536 +++ b/net/ipv6/ip6_output.c
1537 @@ -64,9 +64,6 @@ static int ip6_finish_output2(struct sk_buff *skb)
1538 struct in6_addr *nexthop;
1541 - skb->protocol = htons(ETH_P_IPV6);
1544 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
1545 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1547 @@ -143,6 +140,13 @@ int ip6_output(struct sk_buff *skb)
1552 + * IMQ-patch: moved setting skb->dev and skb->protocol from
1553 + * ip6_finish_output2 to fix crashing at netif_skb_features().
1555 + skb->protocol = htons(ETH_P_IPV6);
1558 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
1560 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
1561 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
1562 index 6e839b6..45ac31c 100644
1563 --- a/net/netfilter/Kconfig
1564 +++ b/net/netfilter/Kconfig
1565 @@ -630,6 +630,18 @@ config NETFILTER_XT_TARGET_LOG
1567 To compile it as a module, choose M here. If unsure, say N.
1569 +config NETFILTER_XT_TARGET_IMQ
1570 + tristate '"IMQ" target support'
1571 + depends on NETFILTER_XTABLES
1572 + depends on IP_NF_MANGLE || IP6_NF_MANGLE
1574 + default m if NETFILTER_ADVANCED=n
1576 + This option adds a `IMQ' target which is used to specify if and
1577 + to which imq device packets should get enqueued/dequeued.
1579 + To compile it as a module, choose M here. If unsure, say N.
1581 config NETFILTER_XT_TARGET_MARK
1582 tristate '"MARK" target support'
1583 depends on NETFILTER_ADVANCED
1584 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
1585 index c3a0a12..9647f06 100644
1586 --- a/net/netfilter/Makefile
1587 +++ b/net/netfilter/Makefile
1588 @@ -82,6 +82,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
1589 obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
1590 obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
1591 obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o
1592 +obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o
1593 obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
1594 obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o
1595 obj-$(CONFIG_NETFILTER_XT_TARGET_NETMAP) += xt_NETMAP.o
1596 diff --git a/net/netfilter/core.c b/net/netfilter/core.c
1597 index 593b16e..740cd69 100644
1598 --- a/net/netfilter/core.c
1599 +++ b/net/netfilter/core.c
1600 @@ -191,9 +191,11 @@ next_hook:
1601 ret = NF_DROP_GETERR(verdict);
1604 - } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
1605 + } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE ||
1606 + (verdict & NF_VERDICT_MASK) == NF_IMQ_QUEUE) {
1607 int err = nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
1608 - verdict >> NF_VERDICT_QBITS);
1609 + verdict >> NF_VERDICT_QBITS,
1610 + verdict & NF_VERDICT_MASK);
1612 if (err == -ECANCELED)
1614 diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
1615 index 3deec99..c1a1397 100644
1616 --- a/net/netfilter/nf_internals.h
1617 +++ b/net/netfilter/nf_internals.h
1618 @@ -29,7 +29,7 @@ extern int nf_queue(struct sk_buff *skb,
1619 int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem, u_int8_t pf,
1620 unsigned int hook, struct net_device *indev,
1621 struct net_device *outdev, int (*okfn)(struct sk_buff *),
1622 - unsigned int queuenum);
1623 + unsigned int queuenum, unsigned int queuetype);
1624 int __init netfilter_queue_init(void);
1627 diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
1628 index 5d24b1f..28317dc 100644
1629 --- a/net/netfilter/nf_queue.c
1630 +++ b/net/netfilter/nf_queue.c
1633 static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
1635 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1636 +static const struct nf_queue_handler __rcu *queue_imq_handler __read_mostly;
1638 +void nf_register_queue_imq_handler(const struct nf_queue_handler *qh)
1640 + rcu_assign_pointer(queue_imq_handler, qh);
1642 +EXPORT_SYMBOL_GPL(nf_register_queue_imq_handler);
1644 +void nf_unregister_queue_imq_handler(void)
1646 + RCU_INIT_POINTER(queue_imq_handler, NULL);
1647 + synchronize_rcu();
1649 +EXPORT_SYMBOL_GPL(nf_unregister_queue_imq_handler);
1652 /* return EBUSY when somebody else is registered, return EEXIST if the
1653 * same handler is registered, return 0 in case of success. */
1654 void nf_register_queue_handler(const struct nf_queue_handler *qh)
1655 @@ -105,7 +122,8 @@ int nf_queue(struct sk_buff *skb,
1656 struct net_device *indev,
1657 struct net_device *outdev,
1658 int (*okfn)(struct sk_buff *),
1659 - unsigned int queuenum)
1660 + unsigned int queuenum,
1661 + unsigned int queuetype)
1663 int status = -ENOENT;
1664 struct nf_queue_entry *entry = NULL;
1665 @@ -115,7 +133,17 @@ int nf_queue(struct sk_buff *skb,
1666 /* QUEUE == DROP if no one is waiting, to be safe. */
1669 - qh = rcu_dereference(queue_handler);
1670 + if (queuetype == NF_IMQ_QUEUE) {
1671 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1672 + qh = rcu_dereference(queue_imq_handler);
1678 + qh = rcu_dereference(queue_handler);
1684 @@ -205,9 +233,11 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
1688 + case NF_IMQ_QUEUE:
1689 err = nf_queue(skb, elem, entry->pf, entry->hook,
1690 entry->indev, entry->outdev, entry->okfn,
1691 - verdict >> NF_VERDICT_QBITS);
1692 + verdict >> NF_VERDICT_QBITS,
1693 + verdict & NF_VERDICT_MASK);
1695 if (err == -ECANCELED)
1697 diff --git a/net/netfilter/xt_IMQ.c b/net/netfilter/xt_IMQ.c
1698 new file mode 100644
1699 index 0000000..1c3cd66
1701 +++ b/net/netfilter/xt_IMQ.c
1704 + * This target marks packets to be enqueued to an imq device
1706 +#include <linux/module.h>
1707 +#include <linux/skbuff.h>
1708 +#include <linux/netfilter/x_tables.h>
1709 +#include <linux/netfilter/xt_IMQ.h>
1710 +#include <linux/imq.h>
1712 +static unsigned int imq_target(struct sk_buff *pskb,
1713 + const struct xt_action_param *par)
1715 + const struct xt_imq_info *mr = par->targinfo;
1717 + pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE;
1719 + return XT_CONTINUE;
1722 +static int imq_checkentry(const struct xt_tgchk_param *par)
1724 + struct xt_imq_info *mr = par->targinfo;
1726 + if (mr->todev > IMQ_MAX_DEVS - 1) {
1727 + pr_warn("IMQ: invalid device specified, highest is %u\n",
1728 + IMQ_MAX_DEVS - 1);
1735 +static struct xt_target xt_imq_reg[] __read_mostly = {
1738 + .family = AF_INET,
1739 + .checkentry = imq_checkentry,
1740 + .target = imq_target,
1741 + .targetsize = sizeof(struct xt_imq_info),
1742 + .table = "mangle",
1747 + .family = AF_INET6,
1748 + .checkentry = imq_checkentry,
1749 + .target = imq_target,
1750 + .targetsize = sizeof(struct xt_imq_info),
1751 + .table = "mangle",
1756 +static int __init imq_init(void)
1758 + return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1761 +static void __exit imq_fini(void)
1763 + xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1766 +module_init(imq_init);
1767 +module_exit(imq_fini);
1769 +MODULE_AUTHOR("http://www.linuximq.net");
1770 +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
1771 +MODULE_LICENSE("GPL");
1772 +MODULE_ALIAS("ipt_IMQ");
1773 +MODULE_ALIAS("ip6t_IMQ");