]> git.pld-linux.org Git - packages/kernel.git/blob - kernel-imq.patch
- up to 4.9.217
[packages/kernel.git] / kernel-imq.patch
1 diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
2 index 95c32f2..93fada5 100644
3 --- a/drivers/net/Kconfig
4 +++ b/drivers/net/Kconfig
5 @@ -260,6 +260,125 @@ config RIONET_RX_SIZE
6         depends on RIONET
7         default "128"
8  
9 +config IMQ
10 +       tristate "IMQ (intermediate queueing device) support"
11 +       depends on NETDEVICES && NETFILTER
12 +       ---help---
13 +         The IMQ device(s) is used as placeholder for QoS queueing
14 +         disciplines. Every packet entering/leaving the IP stack can be
15 +         directed through the IMQ device where it's enqueued/dequeued to the
16 +         attached qdisc. This allows you to treat network devices as classes
17 +         and distribute bandwidth among them. Iptables is used to specify
18 +         through which IMQ device, if any, packets travel.
19 +
20 +         More information at: https://github.com/imq/linuximq
21 +
22 +         To compile this driver as a module, choose M here: the module
23 +         will be called imq.  If unsure, say N.
24 +
25 +choice
26 +       prompt "IMQ behavior (PRE/POSTROUTING)"
27 +       depends on IMQ
28 +       default IMQ_BEHAVIOR_AB
29 +       help
30 +         This setting defines how IMQ behaves in respect to its
31 +         hooking in PREROUTING and POSTROUTING.
32 +
33 +         IMQ can work in any of the following ways:
34 +
35 +             PREROUTING   |      POSTROUTING
36 +         -----------------|-------------------
37 +         #1  After NAT    |      After NAT
38 +         #2  After NAT    |      Before NAT
39 +         #3  Before NAT   |      After NAT
40 +         #4  Before NAT   |      Before NAT
41 +
42 +         The default behavior is to hook before NAT on PREROUTING
43 +         and after NAT on POSTROUTING (#3).
44 +
45 +         This settings are specially usefull when trying to use IMQ
46 +         to shape NATed clients.
47 +
48 +         More information can be found at: https://github.com/imq/linuximq
49 +
50 +         If not sure leave the default settings alone.
51 +
52 +config IMQ_BEHAVIOR_AA
53 +       bool "IMQ AA"
54 +       help
55 +         This setting defines how IMQ behaves in respect to its
56 +         hooking in PREROUTING and POSTROUTING.
57 +
58 +         Choosing this option will make IMQ hook like this:
59 +
60 +         PREROUTING:   After NAT
61 +         POSTROUTING:  After NAT
62 +
63 +         More information can be found at: https://github.com/imq/linuximq
64 +
65 +         If not sure leave the default settings alone.
66 +
67 +config IMQ_BEHAVIOR_AB
68 +       bool "IMQ AB"
69 +       help
70 +         This setting defines how IMQ behaves in respect to its
71 +         hooking in PREROUTING and POSTROUTING.
72 +
73 +         Choosing this option will make IMQ hook like this:
74 +
75 +         PREROUTING:   After NAT
76 +         POSTROUTING:  Before NAT
77 +
78 +         More information can be found at: https://github.com/imq/linuximq
79 +
80 +         If not sure leave the default settings alone.
81 +
82 +config IMQ_BEHAVIOR_BA
83 +       bool "IMQ BA"
84 +       help
85 +         This setting defines how IMQ behaves in respect to its
86 +         hooking in PREROUTING and POSTROUTING.
87 +
88 +         Choosing this option will make IMQ hook like this:
89 +
90 +         PREROUTING:   Before NAT
91 +         POSTROUTING:  After NAT
92 +
93 +         More information can be found at: https://github.com/imq/linuximq
94 +
95 +         If not sure leave the default settings alone.
96 +
97 +config IMQ_BEHAVIOR_BB
98 +       bool "IMQ BB"
99 +       help
100 +         This setting defines how IMQ behaves in respect to its
101 +         hooking in PREROUTING and POSTROUTING.
102 +
103 +         Choosing this option will make IMQ hook like this:
104 +
105 +         PREROUTING:   Before NAT
106 +         POSTROUTING:  Before NAT
107 +
108 +         More information can be found at: https://github.com/imq/linuximq
109 +
110 +         If not sure leave the default settings alone.
111 +
112 +endchoice
113 +
114 +config IMQ_NUM_DEVS
115 +       int "Number of IMQ devices"
116 +       range 2 16
117 +       depends on IMQ
118 +       default "16"
119 +       help
120 +         This setting defines how many IMQ devices will be created.
121 +
122 +         The default value is 16.
123 +
124 +         More information can be found at: https://github.com/imq/linuximq
125 +
126 +         If not sure leave the default settings alone.
127 +
128  config TUN
129         tristate "Universal TUN/TAP device driver support"
130         depends on INET
131 diff --git a/drivers/net/Makefile b/drivers/net/Makefile
132 index 7336cbd..d6d7ad4 100644
133 --- a/drivers/net/Makefile
134 +++ b/drivers/net/Makefile
135 @@ -11,6 +11,7 @@ obj-$(CONFIG_DUMMY) += dummy.o
136  obj-$(CONFIG_EQUALIZER) += eql.o
137  obj-$(CONFIG_IFB) += ifb.o
138  obj-$(CONFIG_MACSEC) += macsec.o
139 +obj-$(CONFIG_IMQ) += imq.o
140  obj-$(CONFIG_MACVLAN) += macvlan.o
141  obj-$(CONFIG_MACVTAP) += macvtap.o
142  obj-$(CONFIG_MII) += mii.o
143 diff --git a/drivers/net/imq.c b/drivers/net/imq.c
144 new file mode 100644
145 index 0000000..bc3b997
146 --- /dev/null
147 +++ b/drivers/net/imq.c
148 @@ -0,0 +1,907 @@
149 +/*
150 + *             Pseudo-driver for the intermediate queue device.
151 + *
152 + *             This program is free software; you can redistribute it and/or
153 + *             modify it under the terms of the GNU General Public License
154 + *             as published by the Free Software Foundation; either version
155 + *             2 of the License, or (at your option) any later version.
156 + *
157 + * Authors:    Patrick McHardy, <kaber@trash.net>
158 + *
159 + *            The first version was written by Martin Devera, <devik@cdi.cz>
160 + *
161 + *                        See Creditis.txt
162 + */
163 +
164 +#include <linux/module.h>
165 +#include <linux/kernel.h>
166 +#include <linux/moduleparam.h>
167 +#include <linux/list.h>
168 +#include <linux/skbuff.h>
169 +#include <linux/netdevice.h>
170 +#include <linux/etherdevice.h>
171 +#include <linux/rtnetlink.h>
172 +#include <linux/if_arp.h>
173 +#include <linux/netfilter.h>
174 +#include <linux/netfilter_ipv4.h>
175 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
176 +       #include <linux/netfilter_ipv6.h>
177 +#endif
178 +#include <linux/imq.h>
179 +#include <net/pkt_sched.h>
180 +#include <net/netfilter/nf_queue.h>
181 +#include <net/sock.h>
182 +#include <linux/ip.h>
183 +#include <linux/ipv6.h>
184 +#include <linux/if_vlan.h>
185 +#include <linux/if_pppox.h>
186 +#include <net/ip.h>
187 +#include <net/ipv6.h>
188 +
189 +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num);
190 +
191 +static nf_hookfn imq_nf_hook;
192 +
193 +static struct nf_hook_ops imq_ops[] = {
194 +       {
195 +       /* imq_ingress_ipv4 */
196 +               .hook           = imq_nf_hook,
197 +               .pf             = PF_INET,
198 +               .hooknum        = NF_INET_PRE_ROUTING,
199 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
200 +               .priority       = NF_IP_PRI_MANGLE + 1,
201 +#else
202 +               .priority       = NF_IP_PRI_NAT_DST + 1,
203 +#endif
204 +       },
205 +       {
206 +       /* imq_egress_ipv4 */
207 +               .hook           = imq_nf_hook,
208 +               .pf             = PF_INET,
209 +               .hooknum        = NF_INET_POST_ROUTING,
210 +#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
211 +               .priority       = NF_IP_PRI_LAST,
212 +#else
213 +               .priority       = NF_IP_PRI_NAT_SRC - 1,
214 +#endif
215 +       },
216 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
217 +       {
218 +       /* imq_ingress_ipv6 */
219 +               .hook           = imq_nf_hook,
220 +               .pf             = PF_INET6,
221 +               .hooknum        = NF_INET_PRE_ROUTING,
222 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
223 +               .priority       = NF_IP6_PRI_MANGLE + 1,
224 +#else
225 +               .priority       = NF_IP6_PRI_NAT_DST + 1,
226 +#endif
227 +       },
228 +       {
229 +       /* imq_egress_ipv6 */
230 +               .hook           = imq_nf_hook,
231 +               .pf             = PF_INET6,
232 +               .hooknum        = NF_INET_POST_ROUTING,
233 +#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
234 +               .priority       = NF_IP6_PRI_LAST,
235 +#else
236 +               .priority       = NF_IP6_PRI_NAT_SRC - 1,
237 +#endif
238 +       },
239 +#endif
240 +};
241 +
242 +#if defined(CONFIG_IMQ_NUM_DEVS)
243 +static int numdevs = CONFIG_IMQ_NUM_DEVS;
244 +#else
245 +static int numdevs = IMQ_MAX_DEVS;
246 +#endif
247 +
248 +static struct net_device *imq_devs_cache[IMQ_MAX_DEVS];
249 +
250 +#define IMQ_MAX_QUEUES 32
251 +static int numqueues = 1;
252 +static u32 imq_hashrnd;
253 +static int imq_dev_accurate_stats = 1;
254 +
255 +static inline __be16 pppoe_proto(const struct sk_buff *skb)
256 +{
257 +       return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
258 +                       sizeof(struct pppoe_hdr)));
259 +}
260 +
261 +static u16 imq_hash(struct net_device *dev, struct sk_buff *skb)
262 +{
263 +       unsigned int pull_len;
264 +       u16 protocol = skb->protocol;
265 +       u32 addr1, addr2;
266 +       u32 hash, ihl = 0;
267 +       union {
268 +               u16 in16[2];
269 +               u32 in32;
270 +       } ports;
271 +       u8 ip_proto;
272 +
273 +       pull_len = 0;
274 +
275 +recheck:
276 +       switch (protocol) {
277 +       case htons(ETH_P_8021Q): {
278 +               if (unlikely(skb_pull(skb, VLAN_HLEN) == NULL))
279 +                       goto other;
280 +
281 +               pull_len += VLAN_HLEN;
282 +               skb->network_header += VLAN_HLEN;
283 +
284 +               protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
285 +               goto recheck;
286 +       }
287 +
288 +       case htons(ETH_P_PPP_SES): {
289 +               if (unlikely(skb_pull(skb, PPPOE_SES_HLEN) == NULL))
290 +                       goto other;
291 +
292 +               pull_len += PPPOE_SES_HLEN;
293 +               skb->network_header += PPPOE_SES_HLEN;
294 +
295 +               protocol = pppoe_proto(skb);
296 +               goto recheck;
297 +       }
298 +
299 +       case htons(ETH_P_IP): {
300 +               const struct iphdr *iph = ip_hdr(skb);
301 +
302 +               if (unlikely(!pskb_may_pull(skb, sizeof(struct iphdr))))
303 +                       goto other;
304 +
305 +               addr1 = iph->daddr;
306 +               addr2 = iph->saddr;
307 +
308 +               ip_proto = !(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ?
309 +                                iph->protocol : 0;
310 +               ihl = ip_hdrlen(skb);
311 +
312 +               break;
313 +       }
314 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
315 +       case htons(ETH_P_IPV6): {
316 +               const struct ipv6hdr *iph = ipv6_hdr(skb);
317 +               __be16 fo = 0;
318 +
319 +               if (unlikely(!pskb_may_pull(skb, sizeof(struct ipv6hdr))))
320 +                       goto other;
321 +
322 +               addr1 = iph->daddr.s6_addr32[3];
323 +               addr2 = iph->saddr.s6_addr32[3];
324 +               ihl = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &ip_proto,
325 +                                      &fo);
326 +               if (unlikely(ihl < 0))
327 +                       goto other;
328 +
329 +               break;
330 +       }
331 +#endif
332 +       default:
333 +other:
334 +               if (pull_len != 0) {
335 +                       skb_push(skb, pull_len);
336 +                       skb->network_header -= pull_len;
337 +               }
338 +
339 +               return (u16)(ntohs(protocol) % dev->real_num_tx_queues);
340 +       }
341 +
342 +       if (addr1 > addr2)
343 +               swap(addr1, addr2);
344 +
345 +       switch (ip_proto) {
346 +       case IPPROTO_TCP:
347 +       case IPPROTO_UDP:
348 +       case IPPROTO_DCCP:
349 +       case IPPROTO_ESP:
350 +       case IPPROTO_AH:
351 +       case IPPROTO_SCTP:
352 +       case IPPROTO_UDPLITE: {
353 +               if (likely(skb_copy_bits(skb, ihl, &ports.in32, 4) >= 0)) {
354 +                       if (ports.in16[0] > ports.in16[1])
355 +                               swap(ports.in16[0], ports.in16[1]);
356 +                       break;
357 +               }
358 +               /* fall-through */
359 +       }
360 +       default:
361 +               ports.in32 = 0;
362 +               break;
363 +       }
364 +
365 +       if (pull_len != 0) {
366 +               skb_push(skb, pull_len);
367 +               skb->network_header -= pull_len;
368 +       }
369 +
370 +       hash = jhash_3words(addr1, addr2, ports.in32, imq_hashrnd ^ ip_proto);
371 +
372 +       return (u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
373 +}
374 +
375 +static inline bool sk_tx_queue_recorded(struct sock *sk)
376 +{
377 +       return (sk_tx_queue_get(sk) >= 0);
378 +}
379 +
380 +static struct netdev_queue *imq_select_queue(struct net_device *dev,
381 +                                               struct sk_buff *skb)
382 +{
383 +       u16 queue_index = 0;
384 +       u32 hash;
385 +
386 +       if (likely(dev->real_num_tx_queues == 1))
387 +               goto out;
388 +
389 +       /* IMQ can be receiving ingress or engress packets. */
390 +
391 +       /* Check first for if rx_queue is set */
392 +       if (skb_rx_queue_recorded(skb)) {
393 +               queue_index = skb_get_rx_queue(skb);
394 +               goto out;
395 +       }
396 +
397 +       /* Check if socket has tx_queue set */
398 +       if (sk_tx_queue_recorded(skb->sk)) {
399 +               queue_index = sk_tx_queue_get(skb->sk);
400 +               goto out;
401 +       }
402 +
403 +       /* Try use socket hash */
404 +       if (skb->sk && skb->sk->sk_hash) {
405 +               hash = skb->sk->sk_hash;
406 +               queue_index =
407 +                       (u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
408 +               goto out;
409 +       }
410 +
411 +       /* Generate hash from packet data */
412 +       queue_index = imq_hash(dev, skb);
413 +
414 +out:
415 +       if (unlikely(queue_index >= dev->real_num_tx_queues))
416 +               queue_index = (u16)((u32)queue_index % dev->real_num_tx_queues);
417 +
418 +       skb_set_queue_mapping(skb, queue_index);
419 +       return netdev_get_tx_queue(dev, queue_index);
420 +}
421 +
422 +static struct net_device_stats *imq_get_stats(struct net_device *dev)
423 +{
424 +       return &dev->stats;
425 +}
426 +
427 +/* called for packets kfree'd in qdiscs at places other than enqueue */
428 +static void imq_skb_destructor(struct sk_buff *skb)
429 +{
430 +       struct nf_queue_entry *entry = skb->nf_queue_entry;
431 +
432 +       skb->nf_queue_entry = NULL;
433 +
434 +       if (entry) {
435 +               nf_queue_entry_release_refs(entry);
436 +               kfree(entry);
437 +       }
438 +
439 +       skb_restore_cb(skb); /* kfree backup */
440 +}
441 +
442 +static void imq_done_check_queue_mapping(struct sk_buff *skb,
443 +                                        struct net_device *dev)
444 +{
445 +       unsigned int queue_index;
446 +
447 +       /* Don't let queue_mapping be left too large after exiting IMQ */
448 +       if (likely(skb->dev != dev && skb->dev != NULL)) {
449 +               queue_index = skb_get_queue_mapping(skb);
450 +               if (unlikely(queue_index >= skb->dev->real_num_tx_queues)) {
451 +                       queue_index = (u16)((u32)queue_index %
452 +                                               skb->dev->real_num_tx_queues);
453 +                       skb_set_queue_mapping(skb, queue_index);
454 +               }
455 +       } else {
456 +               /* skb->dev was IMQ device itself or NULL, be on safe side and
457 +                * just clear queue mapping.
458 +                */
459 +               skb_set_queue_mapping(skb, 0);
460 +       }
461 +}
462 +
463 +static netdev_tx_t imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
464 +{
465 +       struct nf_queue_entry *entry = skb->nf_queue_entry;
466 +
467 +       skb->nf_queue_entry = NULL;
468 +       netif_trans_update(dev);
469 +
470 +       dev->stats.tx_bytes += skb->len;
471 +       dev->stats.tx_packets++;
472 +
473 +       if (unlikely(entry == NULL)) {
474 +               /* We don't know what is going on here.. packet is queued for
475 +                * imq device, but (probably) not by us.
476 +                *
477 +                * If this packet was not send here by imq_nf_queue(), then
478 +                * skb_save_cb() was not used and skb_free() should not show:
479 +                *   WARNING: IMQ: kfree_skb: skb->cb_next:..
480 +                * and/or
481 +                *   WARNING: IMQ: kfree_skb: skb->nf_queue_entry...
482 +                *
483 +                * However if this message is shown, then IMQ is somehow broken
484 +                * and you should report this to linuximq.net.
485 +                */
486 +
487 +               /* imq_dev_xmit is black hole that eats all packets, report that
488 +                * we eat this packet happily and increase dropped counters.
489 +                */
490 +
491 +               dev->stats.tx_dropped++;
492 +               dev_kfree_skb(skb);
493 +
494 +               return NETDEV_TX_OK;
495 +       }
496 +
497 +       skb_restore_cb(skb); /* restore skb->cb */
498 +
499 +       skb->imq_flags = 0;
500 +       skb->destructor = NULL;
501 +
502 +       imq_done_check_queue_mapping(skb, dev);
503 +
504 +       nf_reinject(entry, NF_ACCEPT);
505 +
506 +       return NETDEV_TX_OK;
507 +}
508 +
509 +static struct net_device *get_imq_device_by_index(int index)
510 +{
511 +       struct net_device *dev = NULL;
512 +       struct net *net;
513 +       char buf[8];
514 +
515 +       /* get device by name and cache result */
516 +       snprintf(buf, sizeof(buf), "imq%d", index);
517 +
518 +       /* Search device from all namespaces. */
519 +       for_each_net(net) {
520 +               dev = dev_get_by_name(net, buf);
521 +               if (dev)
522 +                       break;
523 +       }
524 +
525 +       if (WARN_ON_ONCE(dev == NULL)) {
526 +               /* IMQ device not found. Exotic config? */
527 +               return ERR_PTR(-ENODEV);
528 +       }
529 +
530 +       imq_devs_cache[index] = dev;
531 +       dev_put(dev);
532 +
533 +       return dev;
534 +}
535 +
536 +static struct nf_queue_entry *nf_queue_entry_dup(struct nf_queue_entry *e)
537 +{
538 +       struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
539 +       if (entry) {
540 +               nf_queue_entry_get_refs(entry);
541 +                       return entry;
542 +       }
543 +       return NULL;
544 +}
545 +
546 +#ifdef CONFIG_BRIDGE_NETFILTER
547 +/* When called from bridge netfilter, skb->data must point to MAC header
548 + * before calling skb_gso_segment(). Else, original MAC header is lost
549 + * and segmented skbs will be sent to wrong destination.
550 + */
551 +static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
552 +{
553 +       if (skb->nf_bridge)
554 +               __skb_push(skb, skb->network_header - skb->mac_header);
555 +}
556 +
557 +static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
558 +{
559 +       if (skb->nf_bridge)
560 +               __skb_pull(skb, skb->network_header - skb->mac_header);
561 +}
562 +#else
563 +#define nf_bridge_adjust_skb_data(s) do {} while (0)
564 +#define nf_bridge_adjust_segmented_data(s) do {} while (0)
565 +#endif
566 +
567 +static void free_entry(struct nf_queue_entry *entry)
568 +{
569 +       nf_queue_entry_release_refs(entry);
570 +       kfree(entry);
571 +}
572 +
573 +static int __imq_nf_queue(struct nf_queue_entry *entry, struct net_device *dev);
574 +
575 +static int __imq_nf_queue_gso(struct nf_queue_entry *entry,
576 +                             struct net_device *dev, struct sk_buff *skb)
577 +{
578 +       int ret = -ENOMEM;
579 +       struct nf_queue_entry *entry_seg;
580 +
581 +       nf_bridge_adjust_segmented_data(skb);
582 +
583 +       if (skb->next == NULL) { /* last packet, no need to copy entry */
584 +               struct sk_buff *gso_skb = entry->skb;
585 +               entry->skb = skb;
586 +               ret = __imq_nf_queue(entry, dev);
587 +               if (ret)
588 +                       entry->skb = gso_skb;
589 +               return ret;
590 +       }
591 +
592 +       skb->next = NULL;
593 +
594 +       entry_seg = nf_queue_entry_dup(entry);
595 +       if (entry_seg) {
596 +               entry_seg->skb = skb;
597 +               ret = __imq_nf_queue(entry_seg, dev);
598 +               if (ret)
599 +                       free_entry(entry_seg);
600 +       }
601 +       return ret;
602 +}
603 +
604 +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num)
605 +{
606 +       struct sk_buff *skb, *segs;
607 +       struct net_device *dev;
608 +       unsigned int queued;
609 +       int index, retval, err;
610 +
611 +       index = entry->skb->imq_flags & IMQ_F_IFMASK;
612 +       if (unlikely(index > numdevs - 1)) {
613 +               if (net_ratelimit())
614 +                       pr_warn("IMQ: invalid device specified, highest is %u\n",
615 +                               numdevs - 1);
616 +               retval = -EINVAL;
617 +               goto out_no_dev;
618 +       }
619 +
620 +       /* check for imq device by index from cache */
621 +       dev = imq_devs_cache[index];
622 +       if (unlikely(!dev)) {
623 +               dev = get_imq_device_by_index(index);
624 +               if (IS_ERR(dev)) {
625 +                       retval = PTR_ERR(dev);
626 +                       goto out_no_dev;
627 +               }
628 +       }
629 +
630 +       if (unlikely(!(dev->flags & IFF_UP))) {
631 +               entry->skb->imq_flags = 0;
632 +               retval = -ECANCELED;
633 +               goto out_no_dev;
634 +       }
635 +
636 +       /* Since 3.10.x, GSO handling moved here as result of upstream commit
637 +        * a5fedd43d5f6c94c71053a66e4c3d2e35f1731a2 (netfilter: move
638 +        * skb_gso_segment into nfnetlink_queue module).
639 +        *
640 +        * Following code replicates the gso handling from
641 +        * 'net/netfilter/nfnetlink_queue_core.c':nfqnl_enqueue_packet().
642 +        */
643 +
644 +       skb = entry->skb;
645 +
646 +       switch (entry->state.pf) {
647 +       case NFPROTO_IPV4:
648 +               skb->protocol = htons(ETH_P_IP);
649 +               break;
650 +       case NFPROTO_IPV6:
651 +               skb->protocol = htons(ETH_P_IPV6);
652 +               break;
653 +       }
654 +
655 +       if (!skb_is_gso(entry->skb))
656 +               return __imq_nf_queue(entry, dev);
657 +
658 +       nf_bridge_adjust_skb_data(skb);
659 +       segs = skb_gso_segment(skb, 0);
660 +       /* Does not use PTR_ERR to limit the number of error codes that can be
661 +        * returned by nf_queue.  For instance, callers rely on -ECANCELED to
662 +        * mean 'ignore this hook'.
663 +        */
664 +       err = -ENOBUFS;
665 +       if (IS_ERR(segs))
666 +               goto out_err;
667 +       queued = 0;
668 +       err = 0;
669 +       do {
670 +               struct sk_buff *nskb = segs->next;
671 +               if (nskb && nskb->next)
672 +                       nskb->cb_next = NULL;
673 +               if (err == 0)
674 +                       err = __imq_nf_queue_gso(entry, dev, segs);
675 +               if (err == 0)
676 +                       queued++;
677 +               else
678 +                       kfree_skb(segs);
679 +               segs = nskb;
680 +       } while (segs);
681 +
682 +       if (queued) {
683 +               if (err) /* some segments are already queued */
684 +                       free_entry(entry);
685 +               kfree_skb(skb);
686 +               return 0;
687 +       }
688 +
689 +out_err:
690 +       nf_bridge_adjust_segmented_data(skb);
691 +       retval = err;
692 +out_no_dev:
693 +       return retval;
694 +}
695 +
696 +static int __imq_nf_queue(struct nf_queue_entry *entry, struct net_device *dev)
697 +{
698 +       struct sk_buff *skb_orig, *skb, *skb_shared, *skb_popd;
699 +       struct Qdisc *q;
700 +       struct sk_buff *to_free = NULL;
701 +       struct netdev_queue *txq;
702 +       spinlock_t *root_lock;
703 +       int users;
704 +       int retval = -EINVAL;
705 +       unsigned int orig_queue_index;
706 +
707 +       dev->last_rx = jiffies;
708 +
709 +       skb = entry->skb;
710 +       skb_orig = NULL;
711 +
712 +       /* skb has owner? => make clone */
713 +       if (unlikely(skb->destructor)) {
714 +               skb_orig = skb;
715 +               skb = skb_clone(skb, GFP_ATOMIC);
716 +               if (unlikely(!skb)) {
717 +                       retval = -ENOMEM;
718 +                       goto out;
719 +               }
720 +               skb->cb_next = NULL;
721 +               entry->skb = skb;
722 +       }
723 +
724 +       dev->stats.rx_bytes += skb->len;
725 +       dev->stats.rx_packets++;
726 +
727 +       if (!skb->dev) {
728 +               /* skb->dev == NULL causes problems, try the find cause. */
729 +               if (net_ratelimit()) {
730 +                       dev_warn(&dev->dev,
731 +                                "received packet with skb->dev == NULL\n");
732 +                       dump_stack();
733 +               }
734 +
735 +               skb->dev = dev;
736 +       }
737 +
738 +       /* Disables softirqs for lock below */
739 +       rcu_read_lock_bh();
740 +
741 +       /* Multi-queue selection */
742 +       orig_queue_index = skb_get_queue_mapping(skb);
743 +       txq = imq_select_queue(dev, skb);
744 +
745 +       q = rcu_dereference(txq->qdisc);
746 +       if (unlikely(!q->enqueue))
747 +               goto packet_not_eaten_by_imq_dev;
748 +
749 +       skb->nf_queue_entry = entry;
750 +       root_lock = qdisc_lock(q);
751 +       spin_lock(root_lock);
752 +
753 +       users = atomic_read(&skb->users);
754 +
755 +       skb_shared = skb_get(skb); /* increase reference count by one */
756 +
757 +       /* backup skb->cb, as qdisc layer will overwrite it */
758 +       skb_save_cb(skb_shared);
759 +       qdisc_enqueue_root(skb_shared, q, &to_free); /* might kfree_skb */
760 +       if (likely(atomic_read(&skb_shared->users) == users + 1)) {
761 +               bool validate;
762 +
763 +               kfree_skb(skb_shared); /* decrease reference count by one */
764 +
765 +               skb->destructor = &imq_skb_destructor;
766 +
767 +               skb_popd = qdisc_dequeue_skb(q, &validate);
768 +
769 +               /* cloned? */
770 +               if (unlikely(skb_orig))
771 +                       kfree_skb(skb_orig); /* free original */
772 +
773 +               spin_unlock(root_lock);
774 +
775 +#if 0
776 +               /* schedule qdisc dequeue */
777 +               __netif_schedule(q);
778 +#else
779 +               if (likely(skb_popd)) {
780 +                       /* Note that we validate skb (GSO, checksum, ...) outside of locks */
781 +                       if (validate)
782 +                       skb_popd = validate_xmit_skb_list(skb_popd, dev);
783 +
784 +                       if (skb_popd) {
785 +                               int dummy_ret;
786 +                               int cpu = smp_processor_id(); /* ok because BHs are off */
787 +
788 +                               txq = skb_get_tx_queue(dev, skb_popd);
789 +                               /*
790 +                               IMQ device will not be frozen or stoped, and it always be successful.
791 +                               So we need not check its status and return value to accelerate.
792 +                               */
793 +                               if (imq_dev_accurate_stats && txq->xmit_lock_owner != cpu) {
794 +                                       HARD_TX_LOCK(dev, txq, cpu);
795 +                                       if (!netif_xmit_frozen_or_stopped(txq)) {
796 +                                               dev_hard_start_xmit(skb_popd, dev, txq, &dummy_ret);
797 +                                       }
798 +                                       HARD_TX_UNLOCK(dev, txq);
799 +                               } else {
800 +                                       if (!netif_xmit_frozen_or_stopped(txq)) {
801 +                                               dev_hard_start_xmit(skb_popd, dev, txq, &dummy_ret);
802 +                                       }
803 +                               }
804 +                       }
805 +               } else {
806 +                       /* No ready skb, then schedule it */
807 +                       __netif_schedule(q);
808 +               }
809 +#endif
810 +               rcu_read_unlock_bh();
811 +               retval = 0;
812 +               goto out;
813 +       } else {
814 +               skb_restore_cb(skb_shared); /* restore skb->cb */
815 +               skb->nf_queue_entry = NULL;
816 +               /*
817 +                * qdisc dropped packet and decreased skb reference count of
818 +                * skb, so we don't really want to and try refree as that would
819 +                * actually destroy the skb.
820 +                */
821 +               spin_unlock(root_lock);
822 +               goto packet_not_eaten_by_imq_dev;
823 +       }
824 +
825 +packet_not_eaten_by_imq_dev:
826 +       skb_set_queue_mapping(skb, orig_queue_index);
827 +       rcu_read_unlock_bh();
828 +
829 +       /* cloned? restore original */
830 +       if (unlikely(skb_orig)) {
831 +               kfree_skb(skb);
832 +               entry->skb = skb_orig;
833 +       }
834 +       retval = -1;
835 +out:
836 +       if (unlikely(to_free)) {
837 +               kfree_skb_list(to_free);
838 +       }
839 +       return retval;
840 +}
841 +static unsigned int imq_nf_hook(void *priv,
842 +                               struct sk_buff *skb,
843 +                               const struct nf_hook_state *state)
844 +{
845 +       return (skb->imq_flags & IMQ_F_ENQUEUE) ? NF_IMQ_QUEUE : NF_ACCEPT;
846 +}
847 +
848 +static int imq_close(struct net_device *dev)
849 +{
850 +       netif_stop_queue(dev);
851 +       return 0;
852 +}
853 +
854 +static int imq_open(struct net_device *dev)
855 +{
856 +       netif_start_queue(dev);
857 +       return 0;
858 +}
859 +
860 +static const struct net_device_ops imq_netdev_ops = {
861 +       .ndo_open               = imq_open,
862 +       .ndo_stop               = imq_close,
863 +       .ndo_start_xmit         = imq_dev_xmit,
864 +       .ndo_get_stats          = imq_get_stats,
865 +};
866 +
867 +static void imq_setup(struct net_device *dev)
868 +{
869 +       dev->netdev_ops         = &imq_netdev_ops;
870 +       dev->type               = ARPHRD_VOID;
871 +       dev->mtu                = 16000; /* too small? */
872 +       dev->tx_queue_len       = 11000; /* too big? */
873 +       dev->flags              = IFF_NOARP;
874 +       dev->features           = NETIF_F_SG | NETIF_F_FRAGLIST |
875 +                                 NETIF_F_GSO | NETIF_F_HW_CSUM |
876 +                                 NETIF_F_HIGHDMA;
877 +       dev->priv_flags         &= ~(IFF_XMIT_DST_RELEASE |
878 +                                    IFF_TX_SKB_SHARING);
879 +}
880 +
881 +static int imq_validate(struct nlattr *tb[], struct nlattr *data[])
882 +{
883 +       int ret = 0;
884 +
885 +       if (tb[IFLA_ADDRESS]) {
886 +               if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
887 +                       ret = -EINVAL;
888 +                       goto end;
889 +               }
890 +               if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
891 +                       ret = -EADDRNOTAVAIL;
892 +                       goto end;
893 +               }
894 +       }
895 +       return 0;
896 +end:
897 +       pr_warn("IMQ: imq_validate failed (%d)\n", ret);
898 +       return ret;
899 +}
900 +
901 +static struct rtnl_link_ops imq_link_ops __read_mostly = {
902 +       .kind           = "imq",
903 +       .priv_size      = 0,
904 +       .setup          = imq_setup,
905 +       .validate       = imq_validate,
906 +};
907 +
908 +static const struct nf_queue_handler imq_nfqh = {
909 +       .outfn = imq_nf_queue,
910 +};
911 +
912 +static int __init imq_init_hooks(void)
913 +{
914 +       int ret;
915 +
916 +       nf_register_queue_imq_handler(&imq_nfqh);
917 +
918 +       ret = nf_register_hooks(imq_ops, ARRAY_SIZE(imq_ops));
919 +       if (ret < 0)
920 +               nf_unregister_queue_imq_handler();
921 +
922 +       return ret;
923 +}
924 +
925 +static int __init imq_init_one(int index)
926 +{
927 +       struct net_device *dev;
928 +       int ret;
929 +
930 +       dev = alloc_netdev_mq(0, "imq%d", NET_NAME_UNKNOWN, imq_setup, numqueues);
931 +       if (!dev)
932 +               return -ENOMEM;
933 +
934 +       ret = dev_alloc_name(dev, dev->name);
935 +       if (ret < 0)
936 +               goto fail;
937 +
938 +       dev->rtnl_link_ops = &imq_link_ops;
939 +       ret = register_netdevice(dev);
940 +       if (ret < 0)
941 +               goto fail;
942 +
943 +       return 0;
944 +fail:
945 +       free_netdev(dev);
946 +       return ret;
947 +}
948 +
949 +static int __init imq_init_devs(void)
950 +{
951 +       int err, i;
952 +
953 +       if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) {
954 +               pr_err("IMQ: numdevs has to be betweed 1 and %u\n",
955 +                      IMQ_MAX_DEVS);
956 +               return -EINVAL;
957 +       }
958 +
959 +       if (numqueues < 1 || numqueues > IMQ_MAX_QUEUES) {
960 +               pr_err("IMQ: numqueues has to be betweed 1 and %u\n",
961 +                      IMQ_MAX_QUEUES);
962 +               return -EINVAL;
963 +       }
964 +
965 +       get_random_bytes(&imq_hashrnd, sizeof(imq_hashrnd));
966 +
967 +       rtnl_lock();
968 +       err = __rtnl_link_register(&imq_link_ops);
969 +
970 +       for (i = 0; i < numdevs && !err; i++)
971 +               err = imq_init_one(i);
972 +
973 +       if (err) {
974 +               __rtnl_link_unregister(&imq_link_ops);
975 +               memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
976 +       }
977 +       rtnl_unlock();
978 +
979 +       return err;
980 +}
981 +
982 +static int __init imq_init_module(void)
983 +{
984 +       int err;
985 +
986 +#if defined(CONFIG_IMQ_NUM_DEVS)
987 +       BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16);
988 +       BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2);
989 +       BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK);
990 +#endif
991 +
992 +       err = imq_init_devs();
993 +       if (err) {
994 +               pr_err("IMQ: Error trying imq_init_devs(net)\n");
995 +               return err;
996 +       }
997 +
998 +       err = imq_init_hooks();
999 +       if (err) {
1000 +               pr_err(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
1001 +               rtnl_link_unregister(&imq_link_ops);
1002 +               memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
1003 +               return err;
1004 +       }
1005 +
1006 +       pr_info("IMQ driver loaded successfully. (numdevs = %d, numqueues = %d, imq_dev_accurate_stats = %d)\n",
1007 +               numdevs, numqueues, imq_dev_accurate_stats);
1008 +
1009 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
1010 +       pr_info("\tHooking IMQ before NAT on PREROUTING.\n");
1011 +#else
1012 +       pr_info("\tHooking IMQ after NAT on PREROUTING.\n");
1013 +#endif
1014 +#if defined(CONFIG_IMQ_BEHAVIOR_AB) || defined(CONFIG_IMQ_BEHAVIOR_BB)
1015 +       pr_info("\tHooking IMQ before NAT on POSTROUTING.\n");
1016 +#else
1017 +       pr_info("\tHooking IMQ after NAT on POSTROUTING.\n");
1018 +#endif
1019 +
1020 +       return 0;
1021 +}
1022 +
1023 +static void __exit imq_unhook(void)
1024 +{
1025 +       nf_unregister_hooks(imq_ops, ARRAY_SIZE(imq_ops));
1026 +       nf_unregister_queue_imq_handler();
1027 +}
1028 +
1029 +static void __exit imq_cleanup_devs(void)
1030 +{
1031 +       rtnl_link_unregister(&imq_link_ops);
1032 +       memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
1033 +}
1034 +
1035 +static void __exit imq_exit_module(void)
1036 +{
1037 +       imq_unhook();
1038 +       imq_cleanup_devs();
1039 +       pr_info("IMQ driver unloaded successfully.\n");
1040 +}
1041 +
1042 +module_init(imq_init_module);
1043 +module_exit(imq_exit_module);
1044 +
1045 +module_param(numdevs, int, 0);
1046 +module_param(numqueues, int, 0);
1047 +module_param(imq_dev_accurate_stats, int, 0);
1048 +MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will be created)");
1049 +MODULE_PARM_DESC(numqueues, "number of queues per IMQ device");
1050 +MODULE_PARM_DESC(imq_dev_accurate_stats, "Notify if need the accurate imq device stats");
1051 +
1052 +MODULE_AUTHOR("https://github.com/imq/linuximq");
1053 +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See https://github.com/imq/linuximq/wiki for more information.");
1054 +MODULE_LICENSE("GPL");
1055 +MODULE_ALIAS_RTNL_LINK("imq");
1056 diff --git a/include/linux/imq.h b/include/linux/imq.h
1057 new file mode 100644
1058 index 0000000..1babb09
1059 --- /dev/null
1060 +++ b/include/linux/imq.h
1061 @@ -0,0 +1,13 @@
1062 +#ifndef _IMQ_H
1063 +#define _IMQ_H
1064 +
1065 +/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */
1066 +#define IMQ_F_BITS     5
1067 +
1068 +#define IMQ_F_IFMASK   0x0f
1069 +#define IMQ_F_ENQUEUE  0x10
1070 +
1071 +#define IMQ_MAX_DEVS   (IMQ_F_IFMASK + 1)
1072 +
1073 +#endif /* _IMQ_H */
1074 +
1075 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
1076 index e16a2a9..4a1090a 100644
1077 --- a/include/linux/netdevice.h
1078 +++ b/include/linux/netdevice.h
1079 @@ -3669,6 +3669,19 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
1080         }                                               \
1081  }
1082  
1083 +#define HARD_TX_LOCK_BH(dev, txq) {           \
1084 +    if ((dev->features & NETIF_F_LLTX) == 0) {  \
1085 +        __netif_tx_lock_bh(txq);      \
1086 +    }                       \
1087 +}
1088 +
1089 +#define HARD_TX_UNLOCK_BH(dev, txq) {          \
1090 +    if ((dev->features & NETIF_F_LLTX) == 0) {  \
1091 +        __netif_tx_unlock_bh(txq);         \
1092 +    }                       \
1093 +}
1094 +
1095 +
1096  static inline void netif_tx_disable(struct net_device *dev)
1097  {
1098         unsigned int i;
1099 diff --git a/include/linux/netfilter/xt_IMQ.h b/include/linux/netfilter/xt_IMQ.h
1100 new file mode 100644
1101 index 0000000..9b07230
1102 --- /dev/null
1103 +++ b/include/linux/netfilter/xt_IMQ.h
1104 @@ -0,0 +1,9 @@
1105 +#ifndef _XT_IMQ_H
1106 +#define _XT_IMQ_H
1107 +
1108 +struct xt_imq_info {
1109 +       unsigned int todev;     /* target imq device */
1110 +};
1111 +
1112 +#endif /* _XT_IMQ_H */
1113 +
1114 diff --git a/include/linux/netfilter_ipv4/ipt_IMQ.h b/include/linux/netfilter_ipv4/ipt_IMQ.h
1115 new file mode 100644
1116 index 0000000..7af320f
1117 --- /dev/null
1118 +++ b/include/linux/netfilter_ipv4/ipt_IMQ.h
1119 @@ -0,0 +1,10 @@
1120 +#ifndef _IPT_IMQ_H
1121 +#define _IPT_IMQ_H
1122 +
1123 +/* Backwards compatibility for old userspace */
1124 +#include <linux/netfilter/xt_IMQ.h>
1125 +
1126 +#define ipt_imq_info xt_imq_info
1127 +
1128 +#endif /* _IPT_IMQ_H */
1129 +
1130 diff --git a/include/linux/netfilter_ipv6/ip6t_IMQ.h b/include/linux/netfilter_ipv6/ip6t_IMQ.h
1131 new file mode 100644
1132 index 0000000..198ac01
1133 --- /dev/null
1134 +++ b/include/linux/netfilter_ipv6/ip6t_IMQ.h
1135 @@ -0,0 +1,10 @@
1136 +#ifndef _IP6T_IMQ_H
1137 +#define _IP6T_IMQ_H
1138 +
1139 +/* Backwards compatibility for old userspace */
1140 +#include <linux/netfilter/xt_IMQ.h>
1141 +
1142 +#define ip6t_imq_info xt_imq_info
1143 +
1144 +#endif /* _IP6T_IMQ_H */
1145 +
1146 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
1147 index 32810f2..4ce1d0a 100644
1148 --- a/include/linux/skbuff.h
1149 +++ b/include/linux/skbuff.h
1150 @@ -39,6 +39,10 @@
1151  #include <linux/in6.h>
1152  #include <linux/if_packet.h>
1153  #include <net/flow.h>
1154 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1155 +#include <linux/imq.h>
1156 +#endif
1157 +
1158  
1159  /* The interface for checksum offload between the stack and networking drivers
1160   * is as follows...
1161 @@ -654,6 +658,9 @@ struct sk_buff {
1162          * first. This is owned by whoever has the skb queued ATM.
1163          */
1164         char                    cb[48] __aligned(8);
1165 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1166 +       void                    *cb_next;
1167 +#endif
1168  
1169         unsigned long           _skb_refdst;
1170         void                    (*destructor)(struct sk_buff *skb);
1171 @@ -663,6 +670,9 @@ struct sk_buff {
1172  #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1173         struct nf_conntrack     *nfct;
1174  #endif
1175 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1176 +       struct nf_queue_entry   *nf_queue_entry;
1177 +#endif
1178  #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1179         struct nf_bridge_info   *nf_bridge;
1180  #endif
1181 @@ -743,6 +753,9 @@ struct sk_buff {
1182         __u8                    offload_fwd_mark:1;
1183  #endif
1184         /* 2, 4 or 5 bit hole */
1185 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1186 +       __u8                    imq_flags:IMQ_F_BITS;
1187 +#endif
1188  
1189  #ifdef CONFIG_NET_SCHED
1190         __u16                   tc_index;       /* traffic control index */
1191 @@ -903,6 +916,12 @@ void kfree_skb_list(struct sk_buff *segs);
1192  void skb_tx_error(struct sk_buff *skb);
1193  void consume_skb(struct sk_buff *skb);
1194  void  __kfree_skb(struct sk_buff *skb);
1195 +
1196 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1197 +int skb_save_cb(struct sk_buff *skb);
1198 +int skb_restore_cb(struct sk_buff *skb);
1199 +#endif
1200 +
1201  extern struct kmem_cache *skbuff_head_cache;
1202  
1203  void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
1204 @@ -3594,6 +3613,10 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
1205         if (copy)
1206                 dst->nfctinfo = src->nfctinfo;
1207  #endif
1208 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1209 +       dst->imq_flags = src->imq_flags;
1210 +       dst->nf_queue_entry = src->nf_queue_entry;
1211 +#endif
1212  #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1213         dst->nf_bridge  = src->nf_bridge;
1214         nf_bridge_get(src->nf_bridge);
1215 diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
1216 index 2280cfe..ec8fa51 100644
1217 --- a/include/net/netfilter/nf_queue.h
1218 +++ b/include/net/netfilter/nf_queue.h
1219 @@ -30,6 +30,12 @@ struct nf_queue_handler {
1220  void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
1221  void nf_unregister_queue_handler(struct net *net);
1222  void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
1223 +void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
1224 +
1225 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1226 +void nf_register_queue_imq_handler(const struct nf_queue_handler *qh);
1227 +void nf_unregister_queue_imq_handler(void);
1228 +#endif
1229  
1230  void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
1231  void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
1232 diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
1233 index cd334c9..6757228 100644
1234 --- a/include/net/pkt_sched.h
1235 +++ b/include/net/pkt_sched.h
1236 @@ -105,6 +105,8 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
1237  
1238  void __qdisc_run(struct Qdisc *q);
1239  
1240 +struct sk_buff *qdisc_dequeue_skb(struct Qdisc *q, bool *validate);
1241 +
1242  static inline void qdisc_run(struct Qdisc *q)
1243  {
1244         if (qdisc_run_begin(q))
1245 diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
1246 index e6aa0a2..08b37dc 100644
1247 --- a/include/net/sch_generic.h
1248 +++ b/include/net/sch_generic.h
1249 @@ -518,6 +518,13 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1250         return sch->enqueue(skb, sch, to_free);
1251  }
1252  
1253 +static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch,
1254 +                                     struct sk_buff **to_free)
1255 +{
1256 +    qdisc_skb_cb(skb)->pkt_len = skb->len;
1257 +    return qdisc_enqueue(skb, sch, to_free) & NET_XMIT_MASK;
1258 +}
1259 +
1260  static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
1261  {
1262         return q->flags & TCQ_F_CPUSTATS;
1263 diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h
1264 index d93f949..23fb6d1 100644
1265 --- a/include/uapi/linux/netfilter.h
1266 +++ b/include/uapi/linux/netfilter.h
1267 @@ -14,7 +14,8 @@
1268  #define NF_QUEUE 3
1269  #define NF_REPEAT 4
1270  #define NF_STOP 5
1271 -#define NF_MAX_VERDICT NF_STOP
1272 +#define NF_IMQ_QUEUE 6
1273 +#define NF_MAX_VERDICT NF_IMQ_QUEUE
1274  
1275  /* we overload the higher bits for encoding auxiliary data such as the queue
1276   * number or errno values. Not nice, but better than additional function
1277 diff --git a/net/core/dev.c b/net/core/dev.c
1278 index 6666b28..3e12add 100644
1279 --- a/net/core/dev.c
1280 +++ b/net/core/dev.c
1281 @@ -141,6 +141,9 @@
1282  #include <linux/netfilter_ingress.h>
1283  #include <linux/sctp.h>
1284  #include <linux/crash_dump.h>
1285 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1286 +#include <linux/imq.h>
1287 +#endif
1288  
1289  #include "net-sysfs.h"
1290  
1291 @@ -2906,7 +2909,12 @@ static int xmit_one(struct sk_buff *skb, struct net_device *dev,
1292         unsigned int len;
1293         int rc;
1294  
1295 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1296 +       if ((!list_empty(&ptype_all) || !list_empty(&dev->ptype_all)) &&
1297 +               !(skb->imq_flags & IMQ_F_ENQUEUE))
1298 +#else
1299         if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
1300 +#endif
1301                 dev_queue_xmit_nit(skb, dev);
1302  
1303         len = skb->len;
1304 @@ -2945,6 +2953,8 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *de
1305         return skb;
1306  }
1307  
1308 +EXPORT_SYMBOL_GPL(dev_hard_start_xmit);
1309 +
1310  static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
1311                                           netdev_features_t features)
1312  {
1313 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1314 index 1e3e008..379236e 100644
1315 --- a/net/core/skbuff.c
1316 +++ b/net/core/skbuff.c
1317 @@ -82,6 +82,87 @@ struct kmem_cache *skbuff_head_cache __read_mostly;
1318  static struct kmem_cache *skbuff_fclone_cache __read_mostly;
1319  int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
1320  EXPORT_SYMBOL(sysctl_max_skb_frags);
1321 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1322 +static struct kmem_cache *skbuff_cb_store_cache __read_mostly;
1323 +#endif
1324 +
1325 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1326 +/* Control buffer save/restore for IMQ devices */
1327 +struct skb_cb_table {
1328 +       char                    cb[48] __aligned(8);
1329 +       void                    *cb_next;
1330 +       atomic_t                refcnt;
1331 +};
1332 +
1333 +static DEFINE_SPINLOCK(skb_cb_store_lock);
1334 +
1335 +int skb_save_cb(struct sk_buff *skb)
1336 +{
1337 +       struct skb_cb_table *next;
1338 +
1339 +       next = kmem_cache_alloc(skbuff_cb_store_cache, GFP_ATOMIC);
1340 +       if (!next)
1341 +               return -ENOMEM;
1342 +
1343 +       BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
1344 +
1345 +       memcpy(next->cb, skb->cb, sizeof(skb->cb));
1346 +       next->cb_next = skb->cb_next;
1347 +
1348 +       atomic_set(&next->refcnt, 1);
1349 +
1350 +       skb->cb_next = next;
1351 +       return 0;
1352 +}
1353 +EXPORT_SYMBOL(skb_save_cb);
1354 +
1355 +int skb_restore_cb(struct sk_buff *skb)
1356 +{
1357 +       struct skb_cb_table *next;
1358 +
1359 +       if (!skb->cb_next)
1360 +               return 0;
1361 +
1362 +       next = skb->cb_next;
1363 +
1364 +       BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
1365 +
1366 +       memcpy(skb->cb, next->cb, sizeof(skb->cb));
1367 +       skb->cb_next = next->cb_next;
1368 +
1369 +       spin_lock(&skb_cb_store_lock);
1370 +
1371 +       if (atomic_dec_and_test(&next->refcnt))
1372 +               kmem_cache_free(skbuff_cb_store_cache, next);
1373 +
1374 +       spin_unlock(&skb_cb_store_lock);
1375 +
1376 +       return 0;
1377 +}
1378 +EXPORT_SYMBOL(skb_restore_cb);
1379 +
1380 +static void skb_copy_stored_cb(struct sk_buff *   , const struct sk_buff *     ) __attribute__ ((unused));
1381 +static void skb_copy_stored_cb(struct sk_buff *new, const struct sk_buff *__old)
1382 +{
1383 +       struct skb_cb_table *next;
1384 +       struct sk_buff *old;
1385 +
1386 +       if (!__old->cb_next) {
1387 +               new->cb_next = NULL;
1388 +               return;
1389 +       }
1390 +
1391 +       spin_lock(&skb_cb_store_lock);
1392 +
1393 +       old = (struct sk_buff *)__old;
1394 +
1395 +       next = old->cb_next;
1396 +       atomic_inc(&next->refcnt);
1397 +       new->cb_next = next;
1398 +
1399 +       spin_unlock(&skb_cb_store_lock);
1400 +}
1401 +#endif
1402  
1403  /**
1404   *     skb_panic - private function for out-of-line support
1405 @@ -654,6 +735,28 @@ static void skb_release_head_state(struct sk_buff *skb)
1406                 WARN_ON(in_irq());
1407                 skb->destructor(skb);
1408         }
1409 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1410 +       /*
1411 +        * This should not happen. When it does, avoid memleak by restoring
1412 +        * the chain of cb-backups.
1413 +        */
1414 +       while (skb->cb_next != NULL) {
1415 +               if (net_ratelimit())
1416 +                       pr_warn("IMQ: kfree_skb: skb->cb_next: %08x\n",
1417 +                               (unsigned int)(uintptr_t)skb->cb_next);
1418 +
1419 +               skb_restore_cb(skb);
1420 +       }
1421 +       /*
1422 +        * This should not happen either, nf_queue_entry is nullified in
1423 +        * imq_dev_xmit(). If we have non-NULL nf_queue_entry then we are
1424 +        * leaking entry pointers, maybe memory. We don't know if this is
1425 +        * pointer to already freed memory, or should this be freed.
1426 +        * If this happens we need to add refcounting, etc for nf_queue_entry.
1427 +        */
1428 +       if (skb->nf_queue_entry && net_ratelimit())
1429 +               pr_warn("%s\n", "IMQ: kfree_skb: skb->nf_queue_entry != NULL");
1430 +#endif
1431  #if IS_ENABLED(CONFIG_NF_CONNTRACK)
1432         nf_conntrack_put(skb->nfct);
1433  #endif
1434 @@ -843,6 +946,10 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
1435         new->sp                 = secpath_get(old->sp);
1436  #endif
1437         __nf_copy(new, old, false);
1438 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1439 +       new->cb_next = NULL;
1440 +       /*skb_copy_stored_cb(new, old);*/
1441 +#endif
1442  
1443         /* Note : this field could be in headers_start/headers_end section
1444          * It is not yet because we do not want to have a 16 bit hole
1445 @@ -3463,6 +3570,13 @@ void __init skb_init(void)
1446                                                 0,
1447                                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1448                                                 NULL);
1449 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1450 +       skbuff_cb_store_cache = kmem_cache_create("skbuff_cb_store_cache",
1451 +                                                 sizeof(struct skb_cb_table),
1452 +                                                 0,
1453 +                                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1454 +                                                 NULL);
1455 +#endif
1456  }
1457  
1458  /**
1459 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1460 index 59eb4ed..8020b07 100644
1461 --- a/net/ipv6/ip6_output.c
1462 +++ b/net/ipv6/ip6_output.c
1463 @@ -66,9 +66,6 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
1464         struct in6_addr *nexthop;
1465         int ret;
1466  
1467 -       skb->protocol = htons(ETH_P_IPV6);
1468 -       skb->dev = dev;
1469 -
1470         if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
1471                 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1472  
1473 @@ -150,6 +147,13 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
1474                 return 0;
1475         }
1476  
1477 +       /*
1478 +       * IMQ-patch: moved setting skb->dev and skb->protocol from
1479 +       * ip6_finish_output2 to fix crashing at netif_skb_features().
1480 +       */
1481 +       skb->protocol = htons(ETH_P_IPV6);
1482 +       skb->dev = dev;
1483 +
1484         return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
1485                             net, sk, skb, NULL, dev,
1486                             ip6_finish_output,
1487 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
1488 index e8d56d9..1ed3468 100644
1489 --- a/net/netfilter/Kconfig
1490 +++ b/net/netfilter/Kconfig
1491 @@ -823,6 +823,18 @@ config NETFILTER_XT_TARGET_LOG
1492  
1493           To compile it as a module, choose M here.  If unsure, say N.
1494  
1495 +config NETFILTER_XT_TARGET_IMQ
1496 +        tristate '"IMQ" target support'
1497 +       depends on NETFILTER_XTABLES
1498 +       depends on IP_NF_MANGLE || IP6_NF_MANGLE
1499 +       select IMQ
1500 +       default m if NETFILTER_ADVANCED=n
1501 +        help
1502 +          This option adds a `IMQ' target which is used to specify if and
1503 +          to which imq device packets should get enqueued/dequeued.
1504 +
1505 +          To compile it as a module, choose M here.  If unsure, say N.
1506 +
1507  config NETFILTER_XT_TARGET_MARK
1508         tristate '"MARK" target support'
1509         depends on NETFILTER_ADVANCED
1510 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
1511 index c23c3c8..99911ef 100644
1512 --- a/net/netfilter/Makefile
1513 +++ b/net/netfilter/Makefile
1514 @@ -119,6 +119,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
1515  obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
1516  obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
1517  obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o
1518 +obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o
1519  obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
1520  obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o
1521  obj-$(CONFIG_NETFILTER_XT_TARGET_NETMAP) += xt_NETMAP.o
1522 diff --git a/net/netfilter/core.c b/net/netfilter/core.c
1523 index 004af03..768a08b 100644
1524 --- a/net/netfilter/core.c
1525 +++ b/net/netfilter/core.c
1526 @@ -360,8 +360,11 @@ int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state)
1527                 ret = NF_DROP_GETERR(verdict);
1528                 if (ret == 0)
1529                         ret = -EPERM;
1530 -       } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
1531 +       } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE ||
1532 +                  (verdict & NF_VERDICT_MASK) == NF_IMQ_QUEUE) {
1533                 ret = nf_queue(skb, state, &entry, verdict);
1534 +               if (ret == -ECANCELED)
1535 +                       goto next_hook;
1536                 if (ret == 1 && entry)
1537                         goto next_hook;
1538         }
1539 diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
1540 index 8f08d75..8d362c0 100644
1541 --- a/net/netfilter/nf_queue.c
1542 +++ b/net/netfilter/nf_queue.c
1543 @@ -27,6 +27,23 @@
1544   * receives, no matter what.
1545   */
1546  
1547 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1548 +static const struct nf_queue_handler __rcu *queue_imq_handler __read_mostly;
1549 +
1550 +void nf_register_queue_imq_handler(const struct nf_queue_handler *qh)
1551 +{
1552 +       rcu_assign_pointer(queue_imq_handler, qh);
1553 +}
1554 +EXPORT_SYMBOL_GPL(nf_register_queue_imq_handler);
1555 +
1556 +void nf_unregister_queue_imq_handler(void)
1557 +{
1558 +       RCU_INIT_POINTER(queue_imq_handler, NULL);
1559 +       synchronize_rcu();
1560 +}
1561 +EXPORT_SYMBOL_GPL(nf_unregister_queue_imq_handler);
1562 +#endif
1563 +
1564  /* return EBUSY when somebody else is registered, return EEXIST if the
1565   * same handler is registered, return 0 in case of success. */
1566  void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
1567 @@ -108,16 +125,28 @@ void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry)
1568  }
1569  
1570  static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
1571 -                     unsigned int queuenum)
1572 +                     unsigned int verdict)
1573  {
1574         int status = -ENOENT;
1575         struct nf_queue_entry *entry = NULL;
1576         const struct nf_afinfo *afinfo;
1577         const struct nf_queue_handler *qh;
1578         struct net *net = state->net;
1579 +       unsigned int queuetype = verdict & NF_VERDICT_MASK;
1580 +       unsigned int queuenum  = verdict >> NF_VERDICT_QBITS;
1581  
1582         /* QUEUE == DROP if no one is waiting, to be safe. */
1583 -       qh = rcu_dereference(net->nf.queue_handler);
1584 +       if (queuetype == NF_IMQ_QUEUE) {
1585 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1586 +               qh = rcu_dereference(queue_imq_handler);
1587 +#else
1588 +               BUG();
1589 +               goto err_unlock;
1590 +#endif
1591 +       } else {
1592 +               qh = rcu_dereference(net->nf.queue_handler);
1593 +       }
1594 +
1595         if (!qh) {
1596                 status = -ESRCH;
1597                 goto err;
1598 @@ -218,6 +247,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
1599                 local_bh_enable();
1600                 break;
1601         case NF_QUEUE:
1602 +       case NF_IMQ_QUEUE:
1603                 err = nf_queue(skb, &entry->state, &hook_entry, verdict);
1604                 if (err == 1) {
1605                         if (hook_entry)
1606 diff --git a/net/netfilter/xt_IMQ.c b/net/netfilter/xt_IMQ.c
1607 new file mode 100644
1608 index 0000000..f9c5817
1609 --- /dev/null
1610 +++ b/net/netfilter/xt_IMQ.c
1611 @@ -0,0 +1,72 @@
1612 +/*
1613 + * This target marks packets to be enqueued to an imq device
1614 + */
1615 +#include <linux/module.h>
1616 +#include <linux/skbuff.h>
1617 +#include <linux/netfilter/x_tables.h>
1618 +#include <linux/netfilter/xt_IMQ.h>
1619 +#include <linux/imq.h>
1620 +
1621 +static unsigned int imq_target(struct sk_buff *pskb,
1622 +                               const struct xt_action_param *par)
1623 +{
1624 +       const struct xt_imq_info *mr = par->targinfo;
1625 +
1626 +       pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE;
1627 +
1628 +       return XT_CONTINUE;
1629 +}
1630 +
1631 +static int imq_checkentry(const struct xt_tgchk_param *par)
1632 +{
1633 +       struct xt_imq_info *mr = par->targinfo;
1634 +
1635 +       if (mr->todev > IMQ_MAX_DEVS - 1) {
1636 +               pr_warn("IMQ: invalid device specified, highest is %u\n",
1637 +                       IMQ_MAX_DEVS - 1);
1638 +               return -EINVAL;
1639 +       }
1640 +
1641 +       return 0;
1642 +}
1643 +
1644 +static struct xt_target xt_imq_reg[] __read_mostly = {
1645 +       {
1646 +               .name           = "IMQ",
1647 +               .family         = AF_INET,
1648 +               .checkentry     = imq_checkentry,
1649 +               .target         = imq_target,
1650 +               .targetsize     = sizeof(struct xt_imq_info),
1651 +               .table          = "mangle",
1652 +               .me             = THIS_MODULE
1653 +       },
1654 +       {
1655 +               .name           = "IMQ",
1656 +               .family         = AF_INET6,
1657 +               .checkentry     = imq_checkentry,
1658 +               .target         = imq_target,
1659 +               .targetsize     = sizeof(struct xt_imq_info),
1660 +               .table          = "mangle",
1661 +               .me             = THIS_MODULE
1662 +       },
1663 +};
1664 +
1665 +static int __init imq_init(void)
1666 +{
1667 +       return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1668 +}
1669 +
1670 +static void __exit imq_fini(void)
1671 +{
1672 +       xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1673 +}
1674 +
1675 +module_init(imq_init);
1676 +module_exit(imq_fini);
1677 +
1678 +MODULE_AUTHOR("https://github.com/imq/linuximq");
1679 +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See https://github.com/imq/linuximq/wiki for more information.");
1680 +MODULE_LICENSE("GPL");
1681 +MODULE_ALIAS("ipt_IMQ");
1682 +MODULE_ALIAS("ip6t_IMQ");
1683 +
1684 diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
1685 index 6cfb6e9..4c675e9 100644
1686 --- a/net/sched/sch_generic.c
1687 +++ b/net/sched/sch_generic.c
1688 @@ -154,6 +154,14 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
1689         return skb;
1690  }
1691  
1692 +struct sk_buff *qdisc_dequeue_skb(struct Qdisc *q, bool *validate)
1693 +{
1694 +       int packets;
1695 +
1696 +       return dequeue_skb(q, validate, &packets);
1697 +}
1698 +EXPORT_SYMBOL(qdisc_dequeue_skb);
1699 +
1700  /*
1701   * Transmit possibly several skbs, and handle the return status as
1702   * required. Owning running seqcount bit guarantees that
This page took 0.315287 seconds and 3 git commands to generate.