]> git.pld-linux.org Git - packages/kernel.git/blob - kernel-imq.patch
- 4.4.297
[packages/kernel.git] / kernel-imq.patch
1 diff -urNp -x '*.orig' linux-4.4/drivers/net/Kconfig linux-4.4/drivers/net/Kconfig
2 --- linux-4.4/drivers/net/Kconfig       2021-02-24 16:53:23.647128839 +0100
3 +++ linux-4.4/drivers/net/Kconfig       2021-02-24 16:53:35.787510557 +0100
4 @@ -234,6 +234,125 @@ config RIONET_RX_SIZE
5         depends on RIONET
6         default "128"
7  
8 +config IMQ
9 +       tristate "IMQ (intermediate queueing device) support"
10 +       depends on NETDEVICES && NETFILTER
11 +       ---help---
12 +         The IMQ device(s) is used as placeholder for QoS queueing
13 +         disciplines. Every packet entering/leaving the IP stack can be
14 +         directed through the IMQ device where it's enqueued/dequeued to the
15 +         attached qdisc. This allows you to treat network devices as classes
16 +         and distribute bandwidth among them. Iptables is used to specify
17 +         through which IMQ device, if any, packets travel.
18 +
19 +         More information at: https://github.com/imq/linuximq
20 +
21 +         To compile this driver as a module, choose M here: the module
22 +         will be called imq.  If unsure, say N.
23 +
24 +choice
25 +       prompt "IMQ behavior (PRE/POSTROUTING)"
26 +       depends on IMQ
27 +       default IMQ_BEHAVIOR_AB
28 +       help
29 +         This setting defines how IMQ behaves in respect to its
30 +         hooking in PREROUTING and POSTROUTING.
31 +
32 +         IMQ can work in any of the following ways:
33 +
34 +             PREROUTING   |      POSTROUTING
35 +         -----------------|-------------------
36 +         #1  After NAT    |      After NAT
37 +         #2  After NAT    |      Before NAT
38 +         #3  Before NAT   |      After NAT
39 +         #4  Before NAT   |      Before NAT
40 +
41 +         The default behavior is to hook before NAT on PREROUTING
42 +         and after NAT on POSTROUTING (#3).
43 +
44 +         This settings are specially usefull when trying to use IMQ
45 +         to shape NATed clients.
46 +
47 +         More information can be found at: https://github.com/imq/linuximq
48 +
49 +         If not sure leave the default settings alone.
50 +
51 +config IMQ_BEHAVIOR_AA
52 +       bool "IMQ AA"
53 +       help
54 +         This setting defines how IMQ behaves in respect to its
55 +         hooking in PREROUTING and POSTROUTING.
56 +
57 +         Choosing this option will make IMQ hook like this:
58 +
59 +         PREROUTING:   After NAT
60 +         POSTROUTING:  After NAT
61 +
62 +         More information can be found at: https://github.com/imq/linuximq
63 +
64 +         If not sure leave the default settings alone.
65 +
66 +config IMQ_BEHAVIOR_AB
67 +       bool "IMQ AB"
68 +       help
69 +         This setting defines how IMQ behaves in respect to its
70 +         hooking in PREROUTING and POSTROUTING.
71 +
72 +         Choosing this option will make IMQ hook like this:
73 +
74 +         PREROUTING:   After NAT
75 +         POSTROUTING:  Before NAT
76 +
77 +         More information can be found at: https://github.com/imq/linuximq
78 +
79 +         If not sure leave the default settings alone.
80 +
81 +config IMQ_BEHAVIOR_BA
82 +       bool "IMQ BA"
83 +       help
84 +         This setting defines how IMQ behaves in respect to its
85 +         hooking in PREROUTING and POSTROUTING.
86 +
87 +         Choosing this option will make IMQ hook like this:
88 +
89 +         PREROUTING:   Before NAT
90 +         POSTROUTING:  After NAT
91 +
92 +         More information can be found at: https://github.com/imq/linuximq
93 +
94 +         If not sure leave the default settings alone.
95 +
96 +config IMQ_BEHAVIOR_BB
97 +       bool "IMQ BB"
98 +       help
99 +         This setting defines how IMQ behaves in respect to its
100 +         hooking in PREROUTING and POSTROUTING.
101 +
102 +         Choosing this option will make IMQ hook like this:
103 +
104 +         PREROUTING:   Before NAT
105 +         POSTROUTING:  Before NAT
106 +
107 +         More information can be found at: https://github.com/imq/linuximq
108 +
109 +         If not sure leave the default settings alone.
110 +
111 +endchoice
112 +
113 +config IMQ_NUM_DEVS
114 +       int "Number of IMQ devices"
115 +       range 2 16
116 +       depends on IMQ
117 +       default "16"
118 +       help
119 +         This setting defines how many IMQ devices will be created.
120 +
121 +         The default value is 16.
122 +
123 +         More information can be found at: https://github.com/imq/linuximq
124 +
125 +         If not sure leave the default settings alone.
126 +
127  config TUN
128         tristate "Universal TUN/TAP device driver support"
129         depends on INET
130 diff -urNp -x '*.orig' linux-4.4/drivers/net/Makefile linux-4.4/drivers/net/Makefile
131 --- linux-4.4/drivers/net/Makefile      2016-01-11 00:01:32.000000000 +0100
132 +++ linux-4.4/drivers/net/Makefile      2021-02-24 16:53:35.787510557 +0100
133 @@ -10,6 +10,7 @@ obj-$(CONFIG_IPVLAN) += ipvlan/
134  obj-$(CONFIG_DUMMY) += dummy.o
135  obj-$(CONFIG_EQUALIZER) += eql.o
136  obj-$(CONFIG_IFB) += ifb.o
137 +obj-$(CONFIG_IMQ) += imq.o
138  obj-$(CONFIG_MACVLAN) += macvlan.o
139  obj-$(CONFIG_MACVTAP) += macvtap.o
140  obj-$(CONFIG_MII) += mii.o
141 diff -urNp -x '*.orig' linux-4.4/drivers/net/imq.c linux-4.4/drivers/net/imq.c
142 --- linux-4.4/drivers/net/imq.c 1970-01-01 01:00:00.000000000 +0100
143 +++ linux-4.4/drivers/net/imq.c 2021-02-24 16:53:35.787510557 +0100
144 @@ -0,0 +1,903 @@
145 +/*
146 + *             Pseudo-driver for the intermediate queue device.
147 + *
148 + *             This program is free software; you can redistribute it and/or
149 + *             modify it under the terms of the GNU General Public License
150 + *             as published by the Free Software Foundation; either version
151 + *             2 of the License, or (at your option) any later version.
152 + *
153 + * Authors:    Patrick McHardy, <kaber@trash.net>
154 + *
155 + *            The first version was written by Martin Devera, <devik@cdi.cz>
156 + *
157 + *                        See Creditis.txt
158 + */
159 +
160 +#include <linux/module.h>
161 +#include <linux/kernel.h>
162 +#include <linux/moduleparam.h>
163 +#include <linux/list.h>
164 +#include <linux/skbuff.h>
165 +#include <linux/netdevice.h>
166 +#include <linux/etherdevice.h>
167 +#include <linux/rtnetlink.h>
168 +#include <linux/if_arp.h>
169 +#include <linux/netfilter.h>
170 +#include <linux/netfilter_ipv4.h>
171 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
172 +       #include <linux/netfilter_ipv6.h>
173 +#endif
174 +#include <linux/imq.h>
175 +#include <net/pkt_sched.h>
176 +#include <net/netfilter/nf_queue.h>
177 +#include <net/sock.h>
178 +#include <linux/ip.h>
179 +#include <linux/ipv6.h>
180 +#include <linux/if_vlan.h>
181 +#include <linux/if_pppox.h>
182 +#include <net/ip.h>
183 +#include <net/ipv6.h>
184 +
185 +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num);
186 +
187 +static nf_hookfn imq_nf_hook;
188 +
189 +static struct nf_hook_ops imq_ops[] = {
190 +       {
191 +       /* imq_ingress_ipv4 */
192 +               .hook           = imq_nf_hook,
193 +               .pf             = PF_INET,
194 +               .hooknum        = NF_INET_PRE_ROUTING,
195 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
196 +               .priority       = NF_IP_PRI_MANGLE + 1,
197 +#else
198 +               .priority       = NF_IP_PRI_NAT_DST + 1,
199 +#endif
200 +       },
201 +       {
202 +       /* imq_egress_ipv4 */
203 +               .hook           = imq_nf_hook,
204 +               .pf             = PF_INET,
205 +               .hooknum        = NF_INET_POST_ROUTING,
206 +#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
207 +               .priority       = NF_IP_PRI_LAST,
208 +#else
209 +               .priority       = NF_IP_PRI_NAT_SRC - 1,
210 +#endif
211 +       },
212 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
213 +       {
214 +       /* imq_ingress_ipv6 */
215 +               .hook           = imq_nf_hook,
216 +               .pf             = PF_INET6,
217 +               .hooknum        = NF_INET_PRE_ROUTING,
218 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
219 +               .priority       = NF_IP6_PRI_MANGLE + 1,
220 +#else
221 +               .priority       = NF_IP6_PRI_NAT_DST + 1,
222 +#endif
223 +       },
224 +       {
225 +       /* imq_egress_ipv6 */
226 +               .hook           = imq_nf_hook,
227 +               .pf             = PF_INET6,
228 +               .hooknum        = NF_INET_POST_ROUTING,
229 +#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
230 +               .priority       = NF_IP6_PRI_LAST,
231 +#else
232 +               .priority       = NF_IP6_PRI_NAT_SRC - 1,
233 +#endif
234 +       },
235 +#endif
236 +};
237 +
238 +#if defined(CONFIG_IMQ_NUM_DEVS)
239 +static int numdevs = CONFIG_IMQ_NUM_DEVS;
240 +#else
241 +static int numdevs = IMQ_MAX_DEVS;
242 +#endif
243 +
244 +static struct net_device *imq_devs_cache[IMQ_MAX_DEVS];
245 +
246 +#define IMQ_MAX_QUEUES 32
247 +static int numqueues = 1;
248 +static u32 imq_hashrnd;
249 +static int imq_dev_accurate_stats = 1;
250 +
251 +static inline __be16 pppoe_proto(const struct sk_buff *skb)
252 +{
253 +       return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
254 +                       sizeof(struct pppoe_hdr)));
255 +}
256 +
257 +static u16 imq_hash(struct net_device *dev, struct sk_buff *skb)
258 +{
259 +       unsigned int pull_len;
260 +       u16 protocol = skb->protocol;
261 +       u32 addr1, addr2;
262 +       u32 hash, ihl = 0;
263 +       union {
264 +               u16 in16[2];
265 +               u32 in32;
266 +       } ports;
267 +       u8 ip_proto;
268 +
269 +       pull_len = 0;
270 +
271 +recheck:
272 +       switch (protocol) {
273 +       case htons(ETH_P_8021Q): {
274 +               if (unlikely(skb_pull(skb, VLAN_HLEN) == NULL))
275 +                       goto other;
276 +
277 +               pull_len += VLAN_HLEN;
278 +               skb->network_header += VLAN_HLEN;
279 +
280 +               protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
281 +               goto recheck;
282 +       }
283 +
284 +       case htons(ETH_P_PPP_SES): {
285 +               if (unlikely(skb_pull(skb, PPPOE_SES_HLEN) == NULL))
286 +                       goto other;
287 +
288 +               pull_len += PPPOE_SES_HLEN;
289 +               skb->network_header += PPPOE_SES_HLEN;
290 +
291 +               protocol = pppoe_proto(skb);
292 +               goto recheck;
293 +       }
294 +
295 +       case htons(ETH_P_IP): {
296 +               const struct iphdr *iph = ip_hdr(skb);
297 +
298 +               if (unlikely(!pskb_may_pull(skb, sizeof(struct iphdr))))
299 +                       goto other;
300 +
301 +               addr1 = iph->daddr;
302 +               addr2 = iph->saddr;
303 +
304 +               ip_proto = !(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ?
305 +                                iph->protocol : 0;
306 +               ihl = ip_hdrlen(skb);
307 +
308 +               break;
309 +       }
310 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
311 +       case htons(ETH_P_IPV6): {
312 +               const struct ipv6hdr *iph = ipv6_hdr(skb);
313 +               __be16 fo = 0;
314 +
315 +               if (unlikely(!pskb_may_pull(skb, sizeof(struct ipv6hdr))))
316 +                       goto other;
317 +
318 +               addr1 = iph->daddr.s6_addr32[3];
319 +               addr2 = iph->saddr.s6_addr32[3];
320 +               ihl = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &ip_proto,
321 +                                      &fo);
322 +               if (unlikely(ihl < 0))
323 +                       goto other;
324 +
325 +               break;
326 +       }
327 +#endif
328 +       default:
329 +other:
330 +               if (pull_len != 0) {
331 +                       skb_push(skb, pull_len);
332 +                       skb->network_header -= pull_len;
333 +               }
334 +
335 +               return (u16)(ntohs(protocol) % dev->real_num_tx_queues);
336 +       }
337 +
338 +       if (addr1 > addr2)
339 +               swap(addr1, addr2);
340 +
341 +       switch (ip_proto) {
342 +       case IPPROTO_TCP:
343 +       case IPPROTO_UDP:
344 +       case IPPROTO_DCCP:
345 +       case IPPROTO_ESP:
346 +       case IPPROTO_AH:
347 +       case IPPROTO_SCTP:
348 +       case IPPROTO_UDPLITE: {
349 +               if (likely(skb_copy_bits(skb, ihl, &ports.in32, 4) >= 0)) {
350 +                       if (ports.in16[0] > ports.in16[1])
351 +                               swap(ports.in16[0], ports.in16[1]);
352 +                       break;
353 +               }
354 +               /* fall-through */
355 +       }
356 +       default:
357 +               ports.in32 = 0;
358 +               break;
359 +       }
360 +
361 +       if (pull_len != 0) {
362 +               skb_push(skb, pull_len);
363 +               skb->network_header -= pull_len;
364 +       }
365 +
366 +       hash = jhash_3words(addr1, addr2, ports.in32, imq_hashrnd ^ ip_proto);
367 +
368 +       return (u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
369 +}
370 +
371 +static inline bool sk_tx_queue_recorded(struct sock *sk)
372 +{
373 +       return (sk_tx_queue_get(sk) >= 0);
374 +}
375 +
376 +static struct netdev_queue *imq_select_queue(struct net_device *dev,
377 +                                               struct sk_buff *skb)
378 +{
379 +       u16 queue_index = 0;
380 +       u32 hash;
381 +
382 +       if (likely(dev->real_num_tx_queues == 1))
383 +               goto out;
384 +
385 +       /* IMQ can be receiving ingress or engress packets. */
386 +
387 +       /* Check first for if rx_queue is set */
388 +       if (skb_rx_queue_recorded(skb)) {
389 +               queue_index = skb_get_rx_queue(skb);
390 +               goto out;
391 +       }
392 +
393 +       /* Check if socket has tx_queue set */
394 +       if (sk_tx_queue_recorded(skb->sk)) {
395 +               queue_index = sk_tx_queue_get(skb->sk);
396 +               goto out;
397 +       }
398 +
399 +       /* Try use socket hash */
400 +       if (skb->sk && skb->sk->sk_hash) {
401 +               hash = skb->sk->sk_hash;
402 +               queue_index =
403 +                       (u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
404 +               goto out;
405 +       }
406 +
407 +       /* Generate hash from packet data */
408 +       queue_index = imq_hash(dev, skb);
409 +
410 +out:
411 +       if (unlikely(queue_index >= dev->real_num_tx_queues))
412 +               queue_index = (u16)((u32)queue_index % dev->real_num_tx_queues);
413 +
414 +       skb_set_queue_mapping(skb, queue_index);
415 +       return netdev_get_tx_queue(dev, queue_index);
416 +}
417 +
418 +static struct net_device_stats *imq_get_stats(struct net_device *dev)
419 +{
420 +       return &dev->stats;
421 +}
422 +
423 +/* called for packets kfree'd in qdiscs at places other than enqueue */
424 +static void imq_skb_destructor(struct sk_buff *skb)
425 +{
426 +       struct nf_queue_entry *entry = skb->nf_queue_entry;
427 +
428 +       skb->nf_queue_entry = NULL;
429 +
430 +       if (entry) {
431 +               nf_queue_entry_release_refs(entry);
432 +               kfree(entry);
433 +       }
434 +
435 +       skb_restore_cb(skb); /* kfree backup */
436 +}
437 +
438 +static void imq_done_check_queue_mapping(struct sk_buff *skb,
439 +                                        struct net_device *dev)
440 +{
441 +       unsigned int queue_index;
442 +
443 +       /* Don't let queue_mapping be left too large after exiting IMQ */
444 +       if (likely(skb->dev != dev && skb->dev != NULL)) {
445 +               queue_index = skb_get_queue_mapping(skb);
446 +               if (unlikely(queue_index >= skb->dev->real_num_tx_queues)) {
447 +                       queue_index = (u16)((u32)queue_index %
448 +                                               skb->dev->real_num_tx_queues);
449 +                       skb_set_queue_mapping(skb, queue_index);
450 +               }
451 +       } else {
452 +               /* skb->dev was IMQ device itself or NULL, be on safe side and
453 +                * just clear queue mapping.
454 +                */
455 +               skb_set_queue_mapping(skb, 0);
456 +       }
457 +}
458 +
459 +static netdev_tx_t imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
460 +{
461 +       struct nf_queue_entry *entry = skb->nf_queue_entry;
462 +
463 +       skb->nf_queue_entry = NULL;
464 +       dev->trans_start = jiffies;
465 +
466 +       dev->stats.tx_bytes += skb->len;
467 +       dev->stats.tx_packets++;
468 +
469 +       if (unlikely(entry == NULL)) {
470 +               /* We don't know what is going on here.. packet is queued for
471 +                * imq device, but (probably) not by us.
472 +                *
473 +                * If this packet was not send here by imq_nf_queue(), then
474 +                * skb_save_cb() was not used and skb_free() should not show:
475 +                *   WARNING: IMQ: kfree_skb: skb->cb_next:..
476 +                * and/or
477 +                *   WARNING: IMQ: kfree_skb: skb->nf_queue_entry...
478 +                *
479 +                * However if this message is shown, then IMQ is somehow broken
480 +                * and you should report this to linuximq.net.
481 +                */
482 +
483 +               /* imq_dev_xmit is black hole that eats all packets, report that
484 +                * we eat this packet happily and increase dropped counters.
485 +                */
486 +
487 +               dev->stats.tx_dropped++;
488 +               dev_kfree_skb(skb);
489 +
490 +               return NETDEV_TX_OK;
491 +       }
492 +
493 +       skb_restore_cb(skb); /* restore skb->cb */
494 +
495 +       skb->imq_flags = 0;
496 +       skb->destructor = NULL;
497 +
498 +       imq_done_check_queue_mapping(skb, dev);
499 +
500 +       nf_reinject(entry, NF_ACCEPT);
501 +
502 +       return NETDEV_TX_OK;
503 +}
504 +
505 +static struct net_device *get_imq_device_by_index(int index)
506 +{
507 +       struct net_device *dev = NULL;
508 +       struct net *net;
509 +       char buf[8];
510 +
511 +       /* get device by name and cache result */
512 +       snprintf(buf, sizeof(buf), "imq%d", index);
513 +
514 +       /* Search device from all namespaces. */
515 +       for_each_net(net) {
516 +               dev = dev_get_by_name(net, buf);
517 +               if (dev)
518 +                       break;
519 +       }
520 +
521 +       if (WARN_ON_ONCE(dev == NULL)) {
522 +               /* IMQ device not found. Exotic config? */
523 +               return ERR_PTR(-ENODEV);
524 +       }
525 +
526 +       imq_devs_cache[index] = dev;
527 +       dev_put(dev);
528 +
529 +       return dev;
530 +}
531 +
532 +static struct nf_queue_entry *nf_queue_entry_dup(struct nf_queue_entry *e)
533 +{
534 +       struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
535 +       if (entry) {
536 +               nf_queue_entry_get_refs(entry);
537 +                       return entry;
538 +       }
539 +       return NULL;
540 +}
541 +
542 +#ifdef CONFIG_BRIDGE_NETFILTER
543 +/* When called from bridge netfilter, skb->data must point to MAC header
544 + * before calling skb_gso_segment(). Else, original MAC header is lost
545 + * and segmented skbs will be sent to wrong destination.
546 + */
547 +static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
548 +{
549 +       if (skb->nf_bridge)
550 +               __skb_push(skb, skb->network_header - skb->mac_header);
551 +}
552 +
553 +static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
554 +{
555 +       if (skb->nf_bridge)
556 +               __skb_pull(skb, skb->network_header - skb->mac_header);
557 +}
558 +#else
559 +#define nf_bridge_adjust_skb_data(s) do {} while (0)
560 +#define nf_bridge_adjust_segmented_data(s) do {} while (0)
561 +#endif
562 +
563 +static void free_entry(struct nf_queue_entry *entry)
564 +{
565 +       nf_queue_entry_release_refs(entry);
566 +       kfree(entry);
567 +}
568 +
569 +static int __imq_nf_queue(struct nf_queue_entry *entry, struct net_device *dev);
570 +
571 +static int __imq_nf_queue_gso(struct nf_queue_entry *entry,
572 +                             struct net_device *dev, struct sk_buff *skb)
573 +{
574 +       int ret = -ENOMEM;
575 +       struct nf_queue_entry *entry_seg;
576 +
577 +       nf_bridge_adjust_segmented_data(skb);
578 +
579 +       if (skb->next == NULL) { /* last packet, no need to copy entry */
580 +               struct sk_buff *gso_skb = entry->skb;
581 +               entry->skb = skb;
582 +               ret = __imq_nf_queue(entry, dev);
583 +               if (ret)
584 +                       entry->skb = gso_skb;
585 +               return ret;
586 +       }
587 +
588 +       skb->next = NULL;
589 +
590 +       entry_seg = nf_queue_entry_dup(entry);
591 +       if (entry_seg) {
592 +               entry_seg->skb = skb;
593 +               ret = __imq_nf_queue(entry_seg, dev);
594 +               if (ret)
595 +                       free_entry(entry_seg);
596 +       }
597 +       return ret;
598 +}
599 +
600 +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num)
601 +{
602 +       struct sk_buff *skb, *segs;
603 +       struct net_device *dev;
604 +       unsigned int queued;
605 +       int index, retval, err;
606 +
607 +       index = entry->skb->imq_flags & IMQ_F_IFMASK;
608 +       if (unlikely(index > numdevs - 1)) {
609 +               if (net_ratelimit())
610 +                       pr_warn("IMQ: invalid device specified, highest is %u\n",
611 +                               numdevs - 1);
612 +               retval = -EINVAL;
613 +               goto out_no_dev;
614 +       }
615 +
616 +       /* check for imq device by index from cache */
617 +       dev = imq_devs_cache[index];
618 +       if (unlikely(!dev)) {
619 +               dev = get_imq_device_by_index(index);
620 +               if (IS_ERR(dev)) {
621 +                       retval = PTR_ERR(dev);
622 +                       goto out_no_dev;
623 +               }
624 +       }
625 +
626 +       if (unlikely(!(dev->flags & IFF_UP))) {
627 +               entry->skb->imq_flags = 0;
628 +               retval = -ECANCELED;
629 +               goto out_no_dev;
630 +       }
631 +
632 +       /* Since 3.10.x, GSO handling moved here as result of upstream commit
633 +        * a5fedd43d5f6c94c71053a66e4c3d2e35f1731a2 (netfilter: move
634 +        * skb_gso_segment into nfnetlink_queue module).
635 +        *
636 +        * Following code replicates the gso handling from
637 +        * 'net/netfilter/nfnetlink_queue_core.c':nfqnl_enqueue_packet().
638 +        */
639 +
640 +       skb = entry->skb;
641 +
642 +       switch (entry->state.pf) {
643 +       case NFPROTO_IPV4:
644 +               skb->protocol = htons(ETH_P_IP);
645 +               break;
646 +       case NFPROTO_IPV6:
647 +               skb->protocol = htons(ETH_P_IPV6);
648 +               break;
649 +       }
650 +
651 +       if (!skb_is_gso(entry->skb))
652 +               return __imq_nf_queue(entry, dev);
653 +
654 +       nf_bridge_adjust_skb_data(skb);
655 +       segs = skb_gso_segment(skb, 0);
656 +       /* Does not use PTR_ERR to limit the number of error codes that can be
657 +        * returned by nf_queue.  For instance, callers rely on -ECANCELED to
658 +        * mean 'ignore this hook'.
659 +        */
660 +       err = -ENOBUFS;
661 +       if (IS_ERR(segs))
662 +               goto out_err;
663 +       queued = 0;
664 +       err = 0;
665 +       do {
666 +               struct sk_buff *nskb = segs->next;
667 +               if (nskb && nskb->next)
668 +                       nskb->cb_next = NULL;
669 +               if (err == 0)
670 +                       err = __imq_nf_queue_gso(entry, dev, segs);
671 +               if (err == 0)
672 +                       queued++;
673 +               else
674 +                       kfree_skb(segs);
675 +               segs = nskb;
676 +       } while (segs);
677 +
678 +       if (queued) {
679 +               if (err) /* some segments are already queued */
680 +                       free_entry(entry);
681 +               kfree_skb(skb);
682 +               return 0;
683 +       }
684 +
685 +out_err:
686 +       nf_bridge_adjust_segmented_data(skb);
687 +       retval = err;
688 +out_no_dev:
689 +       return retval;
690 +}
691 +
692 +static int __imq_nf_queue(struct nf_queue_entry *entry, struct net_device *dev)
693 +{
694 +       struct sk_buff *skb_orig, *skb, *skb_shared, *skb_popd;
695 +       struct Qdisc *q;
696 +       struct netdev_queue *txq;
697 +       spinlock_t *root_lock;
698 +       int users;
699 +       int retval = -EINVAL;
700 +       unsigned int orig_queue_index;
701 +
702 +       dev->last_rx = jiffies;
703 +
704 +       skb = entry->skb;
705 +       skb_orig = NULL;
706 +
707 +       /* skb has owner? => make clone */
708 +       if (unlikely(skb->destructor)) {
709 +               skb_orig = skb;
710 +               skb = skb_clone(skb, GFP_ATOMIC);
711 +               if (unlikely(!skb)) {
712 +                       retval = -ENOMEM;
713 +                       goto out;
714 +               }
715 +               skb->cb_next = NULL;
716 +               entry->skb = skb;
717 +       }
718 +
719 +       dev->stats.rx_bytes += skb->len;
720 +       dev->stats.rx_packets++;
721 +
722 +       if (!skb->dev) {
723 +               /* skb->dev == NULL causes problems, try the find cause. */
724 +               if (net_ratelimit()) {
725 +                       dev_warn(&dev->dev,
726 +                                "received packet with skb->dev == NULL\n");
727 +                       dump_stack();
728 +               }
729 +
730 +               skb->dev = dev;
731 +       }
732 +
733 +       /* Disables softirqs for lock below */
734 +       rcu_read_lock_bh();
735 +
736 +       /* Multi-queue selection */
737 +       orig_queue_index = skb_get_queue_mapping(skb);
738 +       txq = imq_select_queue(dev, skb);
739 +
740 +       q = rcu_dereference(txq->qdisc);
741 +       if (unlikely(!q->enqueue))
742 +               goto packet_not_eaten_by_imq_dev;
743 +
744 +       skb->nf_queue_entry = entry;
745 +       root_lock = qdisc_lock(q);
746 +       spin_lock(root_lock);
747 +
748 +       users = atomic_read(&skb->users);
749 +
750 +       skb_shared = skb_get(skb); /* increase reference count by one */
751 +
752 +       /* backup skb->cb, as qdisc layer will overwrite it */
753 +       skb_save_cb(skb_shared);
754 +       qdisc_enqueue_root(skb_shared, q); /* might kfree_skb */
755 +       if (likely(atomic_read(&skb_shared->users) == users + 1)) {
756 +               bool validate;
757 +
758 +               kfree_skb(skb_shared); /* decrease reference count by one */
759 +
760 +               skb->destructor = &imq_skb_destructor;
761 +
762 +               skb_popd = qdisc_dequeue_skb(q, &validate);
763 +
764 +               /* cloned? */
765 +               if (unlikely(skb_orig))
766 +                       kfree_skb(skb_orig); /* free original */
767 +
768 +               spin_unlock(root_lock);
769 +
770 +#if 0
771 +               /* schedule qdisc dequeue */
772 +               __netif_schedule(q);
773 +#else
774 +               if (likely(skb_popd)) {
775 +                       /* Note that we validate skb (GSO, checksum, ...) outside of locks */
776 +                       if (validate)
777 +                       skb_popd = validate_xmit_skb_list(skb_popd, dev);
778 +                       
779 +                       if (skb_popd) {
780 +                               int dummy_ret;
781 +                               int cpu = smp_processor_id(); /* ok because BHs are off */
782 +
783 +                               txq = skb_get_tx_queue(dev, skb_popd);
784 +                               /* 
785 +                               IMQ device will not be frozen or stoped, and it always be successful.
786 +                               So we need not check its status and return value to accelerate.
787 +                               */
788 +                               if (imq_dev_accurate_stats && txq->xmit_lock_owner != cpu) {
789 +                                       HARD_TX_LOCK(dev, txq, cpu);
790 +                                       if (!netif_xmit_frozen_or_stopped(txq)) {
791 +                                               dev_hard_start_xmit(skb_popd, dev, txq, &dummy_ret);
792 +                                       }
793 +                                       HARD_TX_UNLOCK(dev, txq);
794 +                               } else {
795 +                                       if (!netif_xmit_frozen_or_stopped(txq)) {
796 +                                               dev_hard_start_xmit(skb_popd, dev, txq, &dummy_ret);
797 +                                       }
798 +                               }
799 +                       }
800 +               } else {
801 +                       /* No ready skb, then schedule it */
802 +                       __netif_schedule(q);
803 +               }
804 +#endif
805 +               rcu_read_unlock_bh();
806 +               retval = 0;
807 +               goto out;
808 +       } else {
809 +               skb_restore_cb(skb_shared); /* restore skb->cb */
810 +               skb->nf_queue_entry = NULL;
811 +               /*
812 +                * qdisc dropped packet and decreased skb reference count of
813 +                * skb, so we don't really want to and try refree as that would
814 +                * actually destroy the skb.
815 +                */
816 +               spin_unlock(root_lock);
817 +               goto packet_not_eaten_by_imq_dev;
818 +       }
819 +
820 +packet_not_eaten_by_imq_dev:
821 +       skb_set_queue_mapping(skb, orig_queue_index);
822 +       rcu_read_unlock_bh();
823 +
824 +       /* cloned? restore original */
825 +       if (unlikely(skb_orig)) {
826 +               kfree_skb(skb);
827 +               entry->skb = skb_orig;
828 +       }
829 +       retval = -1;
830 +out:
831 +       return retval;
832 +}
833 +static unsigned int imq_nf_hook(void *priv,
834 +                               struct sk_buff *skb,
835 +                               const struct nf_hook_state *state)
836 +{
837 +       return (skb->imq_flags & IMQ_F_ENQUEUE) ? NF_IMQ_QUEUE : NF_ACCEPT;
838 +}
839 +
840 +static int imq_close(struct net_device *dev)
841 +{
842 +       netif_stop_queue(dev);
843 +       return 0;
844 +}
845 +
846 +static int imq_open(struct net_device *dev)
847 +{
848 +       netif_start_queue(dev);
849 +       return 0;
850 +}
851 +
852 +static const struct net_device_ops imq_netdev_ops = {
853 +       .ndo_open               = imq_open,
854 +       .ndo_stop               = imq_close,
855 +       .ndo_start_xmit         = imq_dev_xmit,
856 +       .ndo_get_stats          = imq_get_stats,
857 +};
858 +
859 +static void imq_setup(struct net_device *dev)
860 +{
861 +       dev->netdev_ops         = &imq_netdev_ops;
862 +       dev->type               = ARPHRD_VOID;
863 +       dev->mtu                = 16000; /* too small? */
864 +       dev->tx_queue_len       = 11000; /* too big? */
865 +       dev->flags              = IFF_NOARP;
866 +       dev->features           = NETIF_F_SG | NETIF_F_FRAGLIST |
867 +                                 NETIF_F_GSO | NETIF_F_HW_CSUM |
868 +                                 NETIF_F_HIGHDMA;
869 +       dev->priv_flags         &= ~(IFF_XMIT_DST_RELEASE |
870 +                                    IFF_TX_SKB_SHARING);
871 +}
872 +
873 +static int imq_validate(struct nlattr *tb[], struct nlattr *data[])
874 +{
875 +       int ret = 0;
876 +
877 +       if (tb[IFLA_ADDRESS]) {
878 +               if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
879 +                       ret = -EINVAL;
880 +                       goto end;
881 +               }
882 +               if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
883 +                       ret = -EADDRNOTAVAIL;
884 +                       goto end;
885 +               }
886 +       }
887 +       return 0;
888 +end:
889 +       pr_warn("IMQ: imq_validate failed (%d)\n", ret);
890 +       return ret;
891 +}
892 +
893 +static struct rtnl_link_ops imq_link_ops __read_mostly = {
894 +       .kind           = "imq",
895 +       .priv_size      = 0,
896 +       .setup          = imq_setup,
897 +       .validate       = imq_validate,
898 +};
899 +
900 +static const struct nf_queue_handler imq_nfqh = {
901 +       .outfn = imq_nf_queue,
902 +};
903 +
904 +static int __init imq_init_hooks(void)
905 +{
906 +       int ret;
907 +
908 +       nf_register_queue_imq_handler(&imq_nfqh);
909 +
910 +       ret = nf_register_hooks(imq_ops, ARRAY_SIZE(imq_ops));
911 +       if (ret < 0)
912 +               nf_unregister_queue_imq_handler();
913 +
914 +       return ret;
915 +}
916 +
917 +static int __init imq_init_one(int index)
918 +{
919 +       struct net_device *dev;
920 +       int ret;
921 +
922 +       dev = alloc_netdev_mq(0, "imq%d", NET_NAME_UNKNOWN, imq_setup, numqueues);
923 +       if (!dev)
924 +               return -ENOMEM;
925 +
926 +       ret = dev_alloc_name(dev, dev->name);
927 +       if (ret < 0)
928 +               goto fail;
929 +
930 +       dev->rtnl_link_ops = &imq_link_ops;
931 +       ret = register_netdevice(dev);
932 +       if (ret < 0)
933 +               goto fail;
934 +
935 +       return 0;
936 +fail:
937 +       free_netdev(dev);
938 +       return ret;
939 +}
940 +
941 +static int __init imq_init_devs(void)
942 +{
943 +       int err, i;
944 +
945 +       if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) {
946 +               pr_err("IMQ: numdevs has to be betweed 1 and %u\n",
947 +                      IMQ_MAX_DEVS);
948 +               return -EINVAL;
949 +       }
950 +
951 +       if (numqueues < 1 || numqueues > IMQ_MAX_QUEUES) {
952 +               pr_err("IMQ: numqueues has to be betweed 1 and %u\n",
953 +                      IMQ_MAX_QUEUES);
954 +               return -EINVAL;
955 +       }
956 +
957 +       get_random_bytes(&imq_hashrnd, sizeof(imq_hashrnd));
958 +
959 +       rtnl_lock();
960 +       err = __rtnl_link_register(&imq_link_ops);
961 +
962 +       for (i = 0; i < numdevs && !err; i++)
963 +               err = imq_init_one(i);
964 +
965 +       if (err) {
966 +               __rtnl_link_unregister(&imq_link_ops);
967 +               memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
968 +       }
969 +       rtnl_unlock();
970 +
971 +       return err;
972 +}
973 +
974 +static int __init imq_init_module(void)
975 +{
976 +       int err;
977 +
978 +#if defined(CONFIG_IMQ_NUM_DEVS)
979 +       BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16);
980 +       BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2);
981 +       BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK);
982 +#endif
983 +
984 +       err = imq_init_devs();
985 +       if (err) {
986 +               pr_err("IMQ: Error trying imq_init_devs(net)\n");
987 +               return err;
988 +       }
989 +
990 +       err = imq_init_hooks();
991 +       if (err) {
992 +               pr_err(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
993 +               rtnl_link_unregister(&imq_link_ops);
994 +               memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
995 +               return err;
996 +       }
997 +
998 +       pr_info("IMQ driver loaded successfully. (numdevs = %d, numqueues = %d, imq_dev_accurate_stats = %d)\n",
999 +               numdevs, numqueues, imq_dev_accurate_stats);
1000 +
1001 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
1002 +       pr_info("\tHooking IMQ before NAT on PREROUTING.\n");
1003 +#else
1004 +       pr_info("\tHooking IMQ after NAT on PREROUTING.\n");
1005 +#endif
1006 +#if defined(CONFIG_IMQ_BEHAVIOR_AB) || defined(CONFIG_IMQ_BEHAVIOR_BB)
1007 +       pr_info("\tHooking IMQ before NAT on POSTROUTING.\n");
1008 +#else
1009 +       pr_info("\tHooking IMQ after NAT on POSTROUTING.\n");
1010 +#endif
1011 +
1012 +       return 0;
1013 +}
1014 +
1015 +static void __exit imq_unhook(void)
1016 +{
1017 +       nf_unregister_hooks(imq_ops, ARRAY_SIZE(imq_ops));
1018 +       nf_unregister_queue_imq_handler();
1019 +}
1020 +
1021 +static void __exit imq_cleanup_devs(void)
1022 +{
1023 +       rtnl_link_unregister(&imq_link_ops);
1024 +       memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
1025 +}
1026 +
1027 +static void __exit imq_exit_module(void)
1028 +{
1029 +       imq_unhook();
1030 +       imq_cleanup_devs();
1031 +       pr_info("IMQ driver unloaded successfully.\n");
1032 +}
1033 +
1034 +module_init(imq_init_module);
1035 +module_exit(imq_exit_module);
1036 +
1037 +module_param(numdevs, int, 0);
1038 +module_param(numqueues, int, 0);
1039 +module_param(imq_dev_accurate_stats, int, 0);
1040 +MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will be created)");
1041 +MODULE_PARM_DESC(numqueues, "number of queues per IMQ device");
1042 +MODULE_PARM_DESC(imq_dev_accurate_stats, "Notify if need the accurate imq device stats");
1043 +
1044 +MODULE_AUTHOR("http://https://github.com/imq/linuximq");
1045 +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See https://github.com/imq/linuximq/wiki for more information.");
1046 +MODULE_LICENSE("GPL");
1047 +MODULE_ALIAS_RTNL_LINK("imq");
1048 diff -urNp -x '*.orig' linux-4.4/include/linux/imq.h linux-4.4/include/linux/imq.h
1049 --- linux-4.4/include/linux/imq.h       1970-01-01 01:00:00.000000000 +0100
1050 +++ linux-4.4/include/linux/imq.h       2021-02-24 16:53:35.787510557 +0100
1051 @@ -0,0 +1,13 @@
1052 +#ifndef _IMQ_H
1053 +#define _IMQ_H
1054 +
1055 +/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */
1056 +#define IMQ_F_BITS     5
1057 +
1058 +#define IMQ_F_IFMASK   0x0f
1059 +#define IMQ_F_ENQUEUE  0x10
1060 +
1061 +#define IMQ_MAX_DEVS   (IMQ_F_IFMASK + 1)
1062 +
1063 +#endif /* _IMQ_H */
1064 +
1065 diff -urNp -x '*.orig' linux-4.4/include/linux/netdevice.h linux-4.4/include/linux/netdevice.h
1066 --- linux-4.4/include/linux/netdevice.h 2021-02-24 16:53:24.400485858 +0100
1067 +++ linux-4.4/include/linux/netdevice.h 2021-02-24 16:53:35.787510557 +0100
1068 @@ -3421,6 +3421,19 @@ static inline void netif_tx_unlock_bh(st
1069         }                                               \
1070  }
1071  
1072 +#define HARD_TX_LOCK_BH(dev, txq) {           \
1073 +    if ((dev->features & NETIF_F_LLTX) == 0) {  \
1074 +        __netif_tx_lock_bh(txq);      \
1075 +    }                       \
1076 +}
1077 +
1078 +#define HARD_TX_UNLOCK_BH(dev, txq) {          \
1079 +    if ((dev->features & NETIF_F_LLTX) == 0) {  \
1080 +        __netif_tx_unlock_bh(txq);         \
1081 +    }                       \
1082 +}
1083 +
1084 +
1085  static inline void netif_tx_disable(struct net_device *dev)
1086  {
1087         unsigned int i;
1088 diff -urNp -x '*.orig' linux-4.4/include/linux/netfilter/xt_IMQ.h linux-4.4/include/linux/netfilter/xt_IMQ.h
1089 --- linux-4.4/include/linux/netfilter/xt_IMQ.h  1970-01-01 01:00:00.000000000 +0100
1090 +++ linux-4.4/include/linux/netfilter/xt_IMQ.h  2021-02-24 16:53:35.787510557 +0100
1091 @@ -0,0 +1,9 @@
1092 +#ifndef _XT_IMQ_H
1093 +#define _XT_IMQ_H
1094 +
1095 +struct xt_imq_info {
1096 +       unsigned int todev;     /* target imq device */
1097 +};
1098 +
1099 +#endif /* _XT_IMQ_H */
1100 +
1101 diff -urNp -x '*.orig' linux-4.4/include/linux/netfilter_ipv4/ipt_IMQ.h linux-4.4/include/linux/netfilter_ipv4/ipt_IMQ.h
1102 --- linux-4.4/include/linux/netfilter_ipv4/ipt_IMQ.h    1970-01-01 01:00:00.000000000 +0100
1103 +++ linux-4.4/include/linux/netfilter_ipv4/ipt_IMQ.h    2021-02-24 16:53:35.790843996 +0100
1104 @@ -0,0 +1,10 @@
1105 +#ifndef _IPT_IMQ_H
1106 +#define _IPT_IMQ_H
1107 +
1108 +/* Backwards compatibility for old userspace */
1109 +#include <linux/netfilter/xt_IMQ.h>
1110 +
1111 +#define ipt_imq_info xt_imq_info
1112 +
1113 +#endif /* _IPT_IMQ_H */
1114 +
1115 diff -urNp -x '*.orig' linux-4.4/include/linux/netfilter_ipv6/ip6t_IMQ.h linux-4.4/include/linux/netfilter_ipv6/ip6t_IMQ.h
1116 --- linux-4.4/include/linux/netfilter_ipv6/ip6t_IMQ.h   1970-01-01 01:00:00.000000000 +0100
1117 +++ linux-4.4/include/linux/netfilter_ipv6/ip6t_IMQ.h   2021-02-24 16:53:35.790843996 +0100
1118 @@ -0,0 +1,10 @@
1119 +#ifndef _IP6T_IMQ_H
1120 +#define _IP6T_IMQ_H
1121 +
1122 +/* Backwards compatibility for old userspace */
1123 +#include <linux/netfilter/xt_IMQ.h>
1124 +
1125 +#define ip6t_imq_info xt_imq_info
1126 +
1127 +#endif /* _IP6T_IMQ_H */
1128 +
1129 diff -urNp -x '*.orig' linux-4.4/include/linux/skbuff.h linux-4.4/include/linux/skbuff.h
1130 --- linux-4.4/include/linux/skbuff.h    2021-02-24 16:53:24.410486173 +0100
1131 +++ linux-4.4/include/linux/skbuff.h    2021-02-24 16:53:35.790843996 +0100
1132 @@ -38,6 +38,10 @@
1133  #include <linux/splice.h>
1134  #include <linux/in6.h>
1135  #include <net/flow.h>
1136 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1137 +#include <linux/imq.h>
1138 +#endif
1139 +
1140  
1141  /* A. Checksumming of received packets by device.
1142   *
1143 @@ -573,6 +577,9 @@ struct sk_buff {
1144          * first. This is owned by whoever has the skb queued ATM.
1145          */
1146         char                    cb[48] __aligned(8);
1147 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1148 +       void                    *cb_next;
1149 +#endif
1150  
1151         unsigned long           _skb_refdst;
1152         void                    (*destructor)(struct sk_buff *skb);
1153 @@ -582,6 +589,9 @@ struct sk_buff {
1154  #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1155         struct nf_conntrack     *nfct;
1156  #endif
1157 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1158 +       struct nf_queue_entry   *nf_queue_entry;
1159 +#endif
1160  #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1161         struct nf_bridge_info   *nf_bridge;
1162  #endif
1163 @@ -648,6 +658,9 @@ struct sk_buff {
1164         __u8                    inner_protocol_type:1;
1165         __u8                    remcsum_offload:1;
1166         /* 3 or 5 bit hole */
1167 +       #if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1168 +       __u8                    imq_flags:IMQ_F_BITS;
1169 +       #endif
1170  
1171  #ifdef CONFIG_NET_SCHED
1172         __u16                   tc_index;       /* traffic control index */
1173 @@ -804,6 +817,12 @@ void kfree_skb_list(struct sk_buff *segs
1174  void skb_tx_error(struct sk_buff *skb);
1175  void consume_skb(struct sk_buff *skb);
1176  void  __kfree_skb(struct sk_buff *skb);
1177 +
1178 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1179 +int skb_save_cb(struct sk_buff *skb);
1180 +int skb_restore_cb(struct sk_buff *skb);
1181 +#endif
1182 +
1183  extern struct kmem_cache *skbuff_head_cache;
1184  
1185  void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
1186 @@ -3443,6 +3462,10 @@ static inline void __nf_copy(struct sk_b
1187         if (copy)
1188                 dst->nfctinfo = src->nfctinfo;
1189  #endif
1190 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1191 +       dst->imq_flags = src->imq_flags;
1192 +       dst->nf_queue_entry = src->nf_queue_entry;
1193 +#endif
1194  #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1195         dst->nf_bridge  = src->nf_bridge;
1196         nf_bridge_get(src->nf_bridge);
1197 diff -urNp -x '*.orig' linux-4.4/include/net/netfilter/nf_queue.h linux-4.4/include/net/netfilter/nf_queue.h
1198 --- linux-4.4/include/net/netfilter/nf_queue.h  2021-02-24 16:53:24.427153363 +0100
1199 +++ linux-4.4/include/net/netfilter/nf_queue.h  2021-02-24 16:53:35.790843996 +0100
1200 @@ -31,6 +31,12 @@ struct nf_queue_handler {
1201  void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
1202  void nf_unregister_queue_handler(struct net *net);
1203  void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
1204 +void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
1205 +
1206 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1207 +void nf_register_queue_imq_handler(const struct nf_queue_handler *qh);
1208 +void nf_unregister_queue_imq_handler(void);
1209 +#endif
1210  
1211  void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
1212  void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
1213 diff -urNp -x '*.orig' linux-4.4/include/net/pkt_sched.h linux-4.4/include/net/pkt_sched.h
1214 --- linux-4.4/include/net/pkt_sched.h   2016-01-11 00:01:32.000000000 +0100
1215 +++ linux-4.4/include/net/pkt_sched.h   2021-02-24 16:53:35.790843996 +0100
1216 @@ -104,6 +104,8 @@ int sch_direct_xmit(struct sk_buff *skb,
1217  
1218  void __qdisc_run(struct Qdisc *q);
1219  
1220 +struct sk_buff *qdisc_dequeue_skb(struct Qdisc *q, bool *validate);
1221 +
1222  static inline void qdisc_run(struct Qdisc *q)
1223  {
1224         if (qdisc_run_begin(q))
1225 diff -urNp -x '*.orig' linux-4.4/include/net/sch_generic.h linux-4.4/include/net/sch_generic.h
1226 --- linux-4.4/include/net/sch_generic.h 2021-02-24 16:53:24.430486801 +0100
1227 +++ linux-4.4/include/net/sch_generic.h 2021-02-24 16:53:35.790843996 +0100
1228 @@ -521,6 +521,12 @@ static inline int qdisc_enqueue(struct s
1229         return sch->enqueue(skb, sch);
1230  }
1231  
1232 +static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
1233 +{
1234 +    qdisc_skb_cb(skb)->pkt_len = skb->len;
1235 +    return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
1236 +}
1237 +
1238  static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
1239  {
1240         return q->flags & TCQ_F_CPUSTATS;
1241 diff -urNp -x '*.orig' linux-4.4/include/uapi/linux/netfilter.h linux-4.4/include/uapi/linux/netfilter.h
1242 --- linux-4.4/include/uapi/linux/netfilter.h    2016-01-11 00:01:32.000000000 +0100
1243 +++ linux-4.4/include/uapi/linux/netfilter.h    2021-02-24 16:53:35.790843996 +0100
1244 @@ -14,7 +14,8 @@
1245  #define NF_QUEUE 3
1246  #define NF_REPEAT 4
1247  #define NF_STOP 5
1248 -#define NF_MAX_VERDICT NF_STOP
1249 +#define NF_IMQ_QUEUE 6
1250 +#define NF_MAX_VERDICT NF_IMQ_QUEUE
1251  
1252  /* we overload the higher bits for encoding auxiliary data such as the queue
1253   * number or errno values. Not nice, but better than additional function
1254 diff -urNp -x '*.orig' linux-4.4/net/core/dev.c linux-4.4/net/core/dev.c
1255 --- linux-4.4/net/core/dev.c    2021-02-24 16:53:24.560490888 +0100
1256 +++ linux-4.4/net/core/dev.c    2021-02-24 16:53:35.790843996 +0100
1257 @@ -138,6 +138,9 @@
1258  #include <linux/errqueue.h>
1259  #include <linux/hrtimer.h>
1260  #include <linux/netfilter_ingress.h>
1261 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1262 +#include <linux/imq.h>
1263 +#endif
1264  
1265  #include "net-sysfs.h"
1266  
1267 @@ -2769,7 +2772,12 @@ static int xmit_one(struct sk_buff *skb,
1268         unsigned int len;
1269         int rc;
1270  
1271 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1272 +       if ((!list_empty(&ptype_all) || !list_empty(&dev->ptype_all)) &&
1273 +               !(skb->imq_flags & IMQ_F_ENQUEUE))
1274 +#else
1275         if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
1276 +#endif
1277                 dev_queue_xmit_nit(skb, dev);
1278  
1279         len = skb->len;
1280 @@ -2807,6 +2815,7 @@ out:
1281         *ret = rc;
1282         return skb;
1283  }
1284 +EXPORT_SYMBOL(dev_hard_start_xmit);
1285  
1286  static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
1287                                           netdev_features_t features)
1288 diff -urNp -x '*.orig' linux-4.4/net/core/skbuff.c linux-4.4/net/core/skbuff.c
1289 --- linux-4.4/net/core/skbuff.c 2021-02-24 16:53:24.567157765 +0100
1290 +++ linux-4.4/net/core/skbuff.c 2021-02-24 16:53:35.790843996 +0100
1291 @@ -81,6 +81,87 @@ struct kmem_cache *skbuff_head_cache __r
1292  static struct kmem_cache *skbuff_fclone_cache __read_mostly;
1293  int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
1294  EXPORT_SYMBOL(sysctl_max_skb_frags);
1295 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1296 +static struct kmem_cache *skbuff_cb_store_cache __read_mostly;
1297 +#endif
1298 +
1299 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1300 +/* Control buffer save/restore for IMQ devices */
1301 +struct skb_cb_table {
1302 +       char                    cb[48] __aligned(8);
1303 +       void                    *cb_next;
1304 +       atomic_t                refcnt;
1305 +};
1306 +
1307 +static DEFINE_SPINLOCK(skb_cb_store_lock);
1308 +
1309 +int skb_save_cb(struct sk_buff *skb)
1310 +{
1311 +       struct skb_cb_table *next;
1312 +
1313 +       next = kmem_cache_alloc(skbuff_cb_store_cache, GFP_ATOMIC);
1314 +       if (!next)
1315 +               return -ENOMEM;
1316 +
1317 +       BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
1318 +
1319 +       memcpy(next->cb, skb->cb, sizeof(skb->cb));
1320 +       next->cb_next = skb->cb_next;
1321 +
1322 +       atomic_set(&next->refcnt, 1);
1323 +
1324 +       skb->cb_next = next;
1325 +       return 0;
1326 +}
1327 +EXPORT_SYMBOL(skb_save_cb);
1328 +
1329 +int skb_restore_cb(struct sk_buff *skb)
1330 +{
1331 +       struct skb_cb_table *next;
1332 +
1333 +       if (!skb->cb_next)
1334 +               return 0;
1335 +
1336 +       next = skb->cb_next;
1337 +
1338 +       BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
1339 +
1340 +       memcpy(skb->cb, next->cb, sizeof(skb->cb));
1341 +       skb->cb_next = next->cb_next;
1342 +
1343 +       spin_lock(&skb_cb_store_lock);
1344 +
1345 +       if (atomic_dec_and_test(&next->refcnt))
1346 +               kmem_cache_free(skbuff_cb_store_cache, next);
1347 +
1348 +       spin_unlock(&skb_cb_store_lock);
1349 +
1350 +       return 0;
1351 +}
1352 +EXPORT_SYMBOL(skb_restore_cb);
1353 +
1354 +static void skb_copy_stored_cb(struct sk_buff *   , const struct sk_buff *     ) __attribute__ ((unused));
1355 +static void skb_copy_stored_cb(struct sk_buff *new, const struct sk_buff *__old)
1356 +{
1357 +       struct skb_cb_table *next;
1358 +       struct sk_buff *old;
1359 +
1360 +       if (!__old->cb_next) {
1361 +               new->cb_next = NULL;
1362 +               return;
1363 +       }
1364 +
1365 +       spin_lock(&skb_cb_store_lock);
1366 +
1367 +       old = (struct sk_buff *)__old;
1368 +
1369 +       next = old->cb_next;
1370 +       atomic_inc(&next->refcnt);
1371 +       new->cb_next = next;
1372 +
1373 +       spin_unlock(&skb_cb_store_lock);
1374 +}
1375 +#endif
1376  
1377  /**
1378   *     skb_panic - private function for out-of-line support
1379 @@ -658,6 +739,28 @@ static void skb_release_head_state(struc
1380                 WARN_ON(in_irq());
1381                 skb->destructor(skb);
1382         }
1383 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1384 +       /*
1385 +        * This should not happen. When it does, avoid memleak by restoring
1386 +        * the chain of cb-backups.
1387 +        */
1388 +       while (skb->cb_next != NULL) {
1389 +               if (net_ratelimit())
1390 +                       pr_warn("IMQ: kfree_skb: skb->cb_next: %08x\n",
1391 +                               (unsigned int)(uintptr_t)skb->cb_next);
1392 +
1393 +               skb_restore_cb(skb);
1394 +       }
1395 +       /*
1396 +        * This should not happen either, nf_queue_entry is nullified in
1397 +        * imq_dev_xmit(). If we have non-NULL nf_queue_entry then we are
1398 +        * leaking entry pointers, maybe memory. We don't know if this is
1399 +        * pointer to already freed memory, or should this be freed.
1400 +        * If this happens we need to add refcounting, etc for nf_queue_entry.
1401 +        */
1402 +       if (skb->nf_queue_entry && net_ratelimit())
1403 +               pr_warn("%s\n", "IMQ: kfree_skb: skb->nf_queue_entry != NULL");
1404 +#endif
1405  #if IS_ENABLED(CONFIG_NF_CONNTRACK)
1406         nf_conntrack_put(skb->nfct);
1407  #endif
1408 @@ -780,6 +883,10 @@ static void __copy_skb_header(struct sk_
1409         new->sp                 = secpath_get(old->sp);
1410  #endif
1411         __nf_copy(new, old, false);
1412 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1413 +       new->cb_next = NULL;
1414 +       /*skb_copy_stored_cb(new, old);*/
1415 +#endif
1416  
1417         /* Note : this field could be in headers_start/headers_end section
1418          * It is not yet because we do not want to have a 16 bit hole
1419 @@ -3390,6 +3497,13 @@ void __init skb_init(void)
1420                                                 0,
1421                                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1422                                                 NULL);
1423 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1424 +       skbuff_cb_store_cache = kmem_cache_create("skbuff_cb_store_cache",
1425 +                                                 sizeof(struct skb_cb_table),
1426 +                                                 0,
1427 +                                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1428 +                                                 NULL);
1429 +#endif
1430  }
1431  
1432  static int
1433 diff -urNp -x '*.orig' linux-4.4/net/ipv6/ip6_output.c linux-4.4/net/ipv6/ip6_output.c
1434 --- linux-4.4/net/ipv6/ip6_output.c     2021-02-24 16:53:24.600492146 +0100
1435 +++ linux-4.4/net/ipv6/ip6_output.c     2021-02-24 16:53:35.794177433 +0100
1436 @@ -65,9 +65,6 @@ static int ip6_finish_output2(struct net
1437         struct in6_addr *nexthop;
1438         int ret;
1439  
1440 -       skb->protocol = htons(ETH_P_IPV6);
1441 -       skb->dev = dev;
1442 -
1443         if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
1444                 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1445  
1446 @@ -142,6 +139,13 @@ int ip6_output(struct net *net, struct s
1447                 return 0;
1448         }
1449  
1450 +       /*
1451 +       * IMQ-patch: moved setting skb->dev and skb->protocol from
1452 +       * ip6_finish_output2 to fix crashing at netif_skb_features().
1453 +       */
1454 +       skb->protocol = htons(ETH_P_IPV6);
1455 +       skb->dev = dev;
1456 +
1457         return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
1458                             net, sk, skb, NULL, dev,
1459                             ip6_finish_output,
1460 diff -urNp -x '*.orig' linux-4.4/net/netfilter/Kconfig linux-4.4/net/netfilter/Kconfig
1461 --- linux-4.4/net/netfilter/Kconfig     2021-02-24 16:53:25.017171913 +0100
1462 +++ linux-4.4/net/netfilter/Kconfig     2021-02-24 16:53:35.794177433 +0100
1463 @@ -785,6 +785,18 @@ config NETFILTER_XT_TARGET_LOG
1464  
1465           To compile it as a module, choose M here.  If unsure, say N.
1466  
1467 +config NETFILTER_XT_TARGET_IMQ
1468 +        tristate '"IMQ" target support'
1469 +       depends on NETFILTER_XTABLES
1470 +       depends on IP_NF_MANGLE || IP6_NF_MANGLE
1471 +       select IMQ
1472 +       default m if NETFILTER_ADVANCED=n
1473 +        help
1474 +          This option adds a `IMQ' target which is used to specify if and
1475 +          to which imq device packets should get enqueued/dequeued.
1476 +
1477 +          To compile it as a module, choose M here.  If unsure, say N.
1478 +
1479  config NETFILTER_XT_TARGET_MARK
1480         tristate '"MARK" target support'
1481         depends on NETFILTER_ADVANCED
1482 diff -urNp -x '*.orig' linux-4.4/net/netfilter/Makefile linux-4.4/net/netfilter/Makefile
1483 --- linux-4.4/net/netfilter/Makefile    2021-02-24 16:53:25.017171913 +0100
1484 +++ linux-4.4/net/netfilter/Makefile    2021-02-24 16:53:35.794177433 +0100
1485 @@ -108,6 +108,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CT) +=
1486  obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
1487  obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
1488  obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o
1489 +obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o
1490  obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
1491  obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o
1492  obj-$(CONFIG_NETFILTER_XT_TARGET_NETMAP) += xt_NETMAP.o
1493 diff -urNp -x '*.orig' linux-4.4/net/netfilter/core.c linux-4.4/net/netfilter/core.c
1494 --- linux-4.4/net/netfilter/core.c      2016-01-11 00:01:32.000000000 +0100
1495 +++ linux-4.4/net/netfilter/core.c      2021-02-24 16:53:35.794177433 +0100
1496 @@ -311,9 +311,11 @@ next_hook:
1497                 ret = NF_DROP_GETERR(verdict);
1498                 if (ret == 0)
1499                         ret = -EPERM;
1500 -       } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
1501 +       } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE ||
1502 +               (verdict & NF_VERDICT_MASK) == NF_IMQ_QUEUE) {
1503                 int err = nf_queue(skb, elem, state,
1504 -                                  verdict >> NF_VERDICT_QBITS);
1505 +                                  verdict >> NF_VERDICT_QBITS,
1506 +                                 verdict & NF_VERDICT_MASK);
1507                 if (err < 0) {
1508                         if (err == -ESRCH &&
1509                            (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
1510 diff -urNp -x '*.orig' linux-4.4/net/netfilter/nf_internals.h linux-4.4/net/netfilter/nf_internals.h
1511 --- linux-4.4/net/netfilter/nf_internals.h      2016-01-11 00:01:32.000000000 +0100
1512 +++ linux-4.4/net/netfilter/nf_internals.h      2021-02-24 16:53:35.794177433 +0100
1513 @@ -18,7 +18,7 @@ unsigned int nf_iterate(struct list_head
1514  
1515  /* nf_queue.c */
1516  int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem,
1517 -            struct nf_hook_state *state, unsigned int queuenum);
1518 +            struct nf_hook_state *state, unsigned int queuenum, unsigned int queuetype);
1519  void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops);
1520  int __init netfilter_queue_init(void);
1521  
1522 diff -urNp -x '*.orig' linux-4.4/net/netfilter/nf_queue.c linux-4.4/net/netfilter/nf_queue.c
1523 --- linux-4.4/net/netfilter/nf_queue.c  2021-02-24 16:53:24.633826528 +0100
1524 +++ linux-4.4/net/netfilter/nf_queue.c  2021-02-24 16:53:35.794177433 +0100
1525 @@ -27,6 +27,23 @@
1526   * receives, no matter what.
1527   */
1528  
1529 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1530 +static const struct nf_queue_handler __rcu *queue_imq_handler __read_mostly;
1531 +
1532 +void nf_register_queue_imq_handler(const struct nf_queue_handler *qh)
1533 +{
1534 +       rcu_assign_pointer(queue_imq_handler, qh);
1535 +}
1536 +EXPORT_SYMBOL_GPL(nf_register_queue_imq_handler);
1537 +
1538 +void nf_unregister_queue_imq_handler(void)
1539 +{
1540 +       RCU_INIT_POINTER(queue_imq_handler, NULL);
1541 +       synchronize_rcu();
1542 +}
1543 +EXPORT_SYMBOL_GPL(nf_unregister_queue_imq_handler);
1544 +#endif
1545 +
1546  /* return EBUSY when somebody else is registered, return EEXIST if the
1547   * same handler is registered, return 0 in case of success. */
1548  void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
1549 @@ -114,7 +131,8 @@ void nf_queue_nf_hook_drop(struct net *n
1550  int nf_queue(struct sk_buff *skb,
1551              struct nf_hook_ops *elem,
1552              struct nf_hook_state *state,
1553 -            unsigned int queuenum)
1554 +            unsigned int queuenum,
1555 +                unsigned int queuetype)
1556  {
1557         int status = -ENOENT;
1558         struct nf_queue_entry *entry = NULL;
1559 @@ -123,7 +141,17 @@ int nf_queue(struct sk_buff *skb,
1560         struct net *net = state->net;
1561  
1562         /* QUEUE == DROP if no one is waiting, to be safe. */
1563 -       qh = rcu_dereference(net->nf.queue_handler);
1564 +       if (queuetype == NF_IMQ_QUEUE) {
1565 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1566 +               qh = rcu_dereference(queue_imq_handler);
1567 +#else
1568 +               BUG();
1569 +               goto err_unlock;
1570 +#endif
1571 +       } else {
1572 +               qh = rcu_dereference(net->nf.queue_handler);
1573 +       }
1574 +
1575         if (!qh) {
1576                 status = -ESRCH;
1577                 goto err;
1578 @@ -198,8 +226,10 @@ void nf_reinject(struct nf_queue_entry *
1579                 local_bh_enable();
1580                 break;
1581         case NF_QUEUE:
1582 +       case NF_IMQ_QUEUE:
1583                 err = nf_queue(skb, elem, &entry->state,
1584 -                              verdict >> NF_VERDICT_QBITS);
1585 +                              verdict >> NF_VERDICT_QBITS,
1586 +                                  verdict & NF_VERDICT_MASK);
1587                 if (err < 0) {
1588                         if (err == -ESRCH &&
1589                            (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
1590 diff -urNp -x '*.orig' linux-4.4/net/netfilter/xt_IMQ.c linux-4.4/net/netfilter/xt_IMQ.c
1591 --- linux-4.4/net/netfilter/xt_IMQ.c    1970-01-01 01:00:00.000000000 +0100
1592 +++ linux-4.4/net/netfilter/xt_IMQ.c    2021-02-24 16:53:35.794177433 +0100
1593 @@ -0,0 +1,72 @@
1594 +/*
1595 + * This target marks packets to be enqueued to an imq device
1596 + */
1597 +#include <linux/module.h>
1598 +#include <linux/skbuff.h>
1599 +#include <linux/netfilter/x_tables.h>
1600 +#include <linux/netfilter/xt_IMQ.h>
1601 +#include <linux/imq.h>
1602 +
1603 +static unsigned int imq_target(struct sk_buff *pskb,
1604 +                               const struct xt_action_param *par)
1605 +{
1606 +       const struct xt_imq_info *mr = par->targinfo;
1607 +
1608 +       pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE;
1609 +
1610 +       return XT_CONTINUE;
1611 +}
1612 +
1613 +static int imq_checkentry(const struct xt_tgchk_param *par)
1614 +{
1615 +       struct xt_imq_info *mr = par->targinfo;
1616 +
1617 +       if (mr->todev > IMQ_MAX_DEVS - 1) {
1618 +               pr_warn("IMQ: invalid device specified, highest is %u\n",
1619 +                       IMQ_MAX_DEVS - 1);
1620 +               return -EINVAL;
1621 +       }
1622 +
1623 +       return 0;
1624 +}
1625 +
1626 +static struct xt_target xt_imq_reg[] __read_mostly = {
1627 +       {
1628 +               .name           = "IMQ",
1629 +               .family         = AF_INET,
1630 +               .checkentry     = imq_checkentry,
1631 +               .target         = imq_target,
1632 +               .targetsize     = sizeof(struct xt_imq_info),
1633 +               .table          = "mangle",
1634 +               .me             = THIS_MODULE
1635 +       },
1636 +       {
1637 +               .name           = "IMQ",
1638 +               .family         = AF_INET6,
1639 +               .checkentry     = imq_checkentry,
1640 +               .target         = imq_target,
1641 +               .targetsize     = sizeof(struct xt_imq_info),
1642 +               .table          = "mangle",
1643 +               .me             = THIS_MODULE
1644 +       },
1645 +};
1646 +
1647 +static int __init imq_init(void)
1648 +{
1649 +       return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1650 +}
1651 +
1652 +static void __exit imq_fini(void)
1653 +{
1654 +       xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1655 +}
1656 +
1657 +module_init(imq_init);
1658 +module_exit(imq_fini);
1659 +
1660 +MODULE_AUTHOR("http://https://github.com/imq/linuximq");
1661 +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See https://github.com/imq/linuximq/wiki for more information.");
1662 +MODULE_LICENSE("GPL");
1663 +MODULE_ALIAS("ipt_IMQ");
1664 +MODULE_ALIAS("ip6t_IMQ");
1665 +
1666 diff -urNp -x '*.orig' linux-4.4/net/sched/sch_generic.c linux-4.4/net/sched/sch_generic.c
1667 --- linux-4.4/net/sched/sch_generic.c   2021-02-24 16:53:24.657160594 +0100
1668 +++ linux-4.4/net/sched/sch_generic.c   2021-02-24 16:53:35.794177433 +0100
1669 @@ -110,6 +110,14 @@ static struct sk_buff *dequeue_skb(struc
1670         return skb;
1671  }
1672  
1673 +struct sk_buff *qdisc_dequeue_skb(struct Qdisc *q, bool *validate)
1674 +{
1675 +       int packets;
1676 +
1677 +       return dequeue_skb(q, validate, &packets);
1678 +}
1679 +EXPORT_SYMBOL(qdisc_dequeue_skb);
1680 +
1681  static inline int handle_dev_cpu_collision(struct sk_buff *skb,
1682                                            struct netdev_queue *dev_queue,
1683                                            struct Qdisc *q)
This page took 0.271353 seconds and 3 git commands to generate.