]> git.pld-linux.org Git - packages/kernel.git/blob - kernel-imq.patch
- updated imq patch to latest upstream
[packages/kernel.git] / kernel-imq.patch
1 diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
2 index df51d60..e937550 100644
3 --- a/drivers/net/Kconfig
4 +++ b/drivers/net/Kconfig
5 @@ -220,6 +220,125 @@ config RIONET_RX_SIZE
6         depends on RIONET
7         default "128"
8  
9 +config IMQ
10 +       tristate "IMQ (intermediate queueing device) support"
11 +       depends on NETDEVICES && NETFILTER
12 +       ---help---
13 +         The IMQ device(s) is used as placeholder for QoS queueing
14 +         disciplines. Every packet entering/leaving the IP stack can be
15 +         directed through the IMQ device where it's enqueued/dequeued to the
16 +         attached qdisc. This allows you to treat network devices as classes
17 +         and distribute bandwidth among them. Iptables is used to specify
18 +         through which IMQ device, if any, packets travel.
19 +
20 +         More information at: https://github.com/imq/linuximq
21 +
22 +         To compile this driver as a module, choose M here: the module
23 +         will be called imq.  If unsure, say N.
24 +
25 +choice
26 +       prompt "IMQ behavior (PRE/POSTROUTING)"
27 +       depends on IMQ
28 +       default IMQ_BEHAVIOR_AB
29 +       help
30 +         This setting defines how IMQ behaves in respect to its
31 +         hooking in PREROUTING and POSTROUTING.
32 +
33 +         IMQ can work in any of the following ways:
34 +
35 +             PREROUTING   |      POSTROUTING
36 +         -----------------|-------------------
37 +         #1  After NAT    |      After NAT
38 +         #2  After NAT    |      Before NAT
39 +         #3  Before NAT   |      After NAT
40 +         #4  Before NAT   |      Before NAT
41 +
42 +         The default behavior is to hook before NAT on PREROUTING
43 +         and after NAT on POSTROUTING (#3).
44 +
45 +         This settings are specially usefull when trying to use IMQ
46 +         to shape NATed clients.
47 +
48 +         More information can be found at: https://github.com/imq/linuximq
49 +
50 +         If not sure leave the default settings alone.
51 +
52 +config IMQ_BEHAVIOR_AA
53 +       bool "IMQ AA"
54 +       help
55 +         This setting defines how IMQ behaves in respect to its
56 +         hooking in PREROUTING and POSTROUTING.
57 +
58 +         Choosing this option will make IMQ hook like this:
59 +
60 +         PREROUTING:   After NAT
61 +         POSTROUTING:  After NAT
62 +
63 +         More information can be found at: https://github.com/imq/linuximq
64 +
65 +         If not sure leave the default settings alone.
66 +
67 +config IMQ_BEHAVIOR_AB
68 +       bool "IMQ AB"
69 +       help
70 +         This setting defines how IMQ behaves in respect to its
71 +         hooking in PREROUTING and POSTROUTING.
72 +
73 +         Choosing this option will make IMQ hook like this:
74 +
75 +         PREROUTING:   After NAT
76 +         POSTROUTING:  Before NAT
77 +
78 +         More information can be found at: https://github.com/imq/linuximq
79 +
80 +         If not sure leave the default settings alone.
81 +
82 +config IMQ_BEHAVIOR_BA
83 +       bool "IMQ BA"
84 +       help
85 +         This setting defines how IMQ behaves in respect to its
86 +         hooking in PREROUTING and POSTROUTING.
87 +
88 +         Choosing this option will make IMQ hook like this:
89 +
90 +         PREROUTING:   Before NAT
91 +         POSTROUTING:  After NAT
92 +
93 +         More information can be found at: https://github.com/imq/linuximq
94 +
95 +         If not sure leave the default settings alone.
96 +
97 +config IMQ_BEHAVIOR_BB
98 +       bool "IMQ BB"
99 +       help
100 +         This setting defines how IMQ behaves in respect to its
101 +         hooking in PREROUTING and POSTROUTING.
102 +
103 +         Choosing this option will make IMQ hook like this:
104 +
105 +         PREROUTING:   Before NAT
106 +         POSTROUTING:  Before NAT
107 +
108 +         More information can be found at: https://github.com/imq/linuximq
109 +
110 +         If not sure leave the default settings alone.
111 +
112 +endchoice
113 +
114 +config IMQ_NUM_DEVS
115 +       int "Number of IMQ devices"
116 +       range 2 16
117 +       depends on IMQ
118 +       default "16"
119 +       help
120 +         This setting defines how many IMQ devices will be created.
121 +
122 +         The default value is 16.
123 +
124 +         More information can be found at: https://github.com/imq/linuximq
125 +
126 +         If not sure leave the default settings alone.
127 +
128  config TUN
129         tristate "Universal TUN/TAP device driver support"
130         depends on INET
131 diff --git a/drivers/net/Makefile b/drivers/net/Makefile
132 index e25fdd7..b411742 100644
133 --- a/drivers/net/Makefile
134 +++ b/drivers/net/Makefile
135 @@ -10,6 +10,7 @@ obj-$(CONFIG_IPVLAN) += ipvlan/
136  obj-$(CONFIG_DUMMY) += dummy.o
137  obj-$(CONFIG_EQUALIZER) += eql.o
138  obj-$(CONFIG_IFB) += ifb.o
139 +obj-$(CONFIG_IMQ) += imq.o
140  obj-$(CONFIG_MACVLAN) += macvlan.o
141  obj-$(CONFIG_MACVTAP) += macvtap.o
142  obj-$(CONFIG_MII) += mii.o
143 diff --git a/drivers/net/imq.c b/drivers/net/imq.c
144 new file mode 100644
145 index 0000000..b010f39
146 --- /dev/null
147 +++ b/drivers/net/imq.c
148 @@ -0,0 +1,903 @@
149 +/*
150 + *             Pseudo-driver for the intermediate queue device.
151 + *
152 + *             This program is free software; you can redistribute it and/or
153 + *             modify it under the terms of the GNU General Public License
154 + *             as published by the Free Software Foundation; either version
155 + *             2 of the License, or (at your option) any later version.
156 + *
157 + * Authors:    Patrick McHardy, <kaber@trash.net>
158 + *
159 + *            The first version was written by Martin Devera, <devik@cdi.cz>
160 + *
161 + *                        See Creditis.txt
162 + */
163 +
164 +#include <linux/module.h>
165 +#include <linux/kernel.h>
166 +#include <linux/moduleparam.h>
167 +#include <linux/list.h>
168 +#include <linux/skbuff.h>
169 +#include <linux/netdevice.h>
170 +#include <linux/etherdevice.h>
171 +#include <linux/rtnetlink.h>
172 +#include <linux/if_arp.h>
173 +#include <linux/netfilter.h>
174 +#include <linux/netfilter_ipv4.h>
175 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
176 +       #include <linux/netfilter_ipv6.h>
177 +#endif
178 +#include <linux/imq.h>
179 +#include <net/pkt_sched.h>
180 +#include <net/netfilter/nf_queue.h>
181 +#include <net/sock.h>
182 +#include <linux/ip.h>
183 +#include <linux/ipv6.h>
184 +#include <linux/if_vlan.h>
185 +#include <linux/if_pppox.h>
186 +#include <net/ip.h>
187 +#include <net/ipv6.h>
188 +
189 +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num);
190 +
191 +static nf_hookfn imq_nf_hook;
192 +
193 +static struct nf_hook_ops imq_ops[] = {
194 +       {
195 +       /* imq_ingress_ipv4 */
196 +               .hook           = imq_nf_hook,
197 +               .owner          = THIS_MODULE,
198 +               .pf             = PF_INET,
199 +               .hooknum        = NF_INET_PRE_ROUTING,
200 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
201 +               .priority       = NF_IP_PRI_MANGLE + 1,
202 +#else
203 +               .priority       = NF_IP_PRI_NAT_DST + 1,
204 +#endif
205 +       },
206 +       {
207 +       /* imq_egress_ipv4 */
208 +               .hook           = imq_nf_hook,
209 +               .owner          = THIS_MODULE,
210 +               .pf             = PF_INET,
211 +               .hooknum        = NF_INET_POST_ROUTING,
212 +#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
213 +               .priority       = NF_IP_PRI_LAST,
214 +#else
215 +               .priority       = NF_IP_PRI_NAT_SRC - 1,
216 +#endif
217 +       },
218 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
219 +       {
220 +       /* imq_ingress_ipv6 */
221 +               .hook           = imq_nf_hook,
222 +               .owner          = THIS_MODULE,
223 +               .pf             = PF_INET6,
224 +               .hooknum        = NF_INET_PRE_ROUTING,
225 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
226 +               .priority       = NF_IP6_PRI_MANGLE + 1,
227 +#else
228 +               .priority       = NF_IP6_PRI_NAT_DST + 1,
229 +#endif
230 +       },
231 +       {
232 +       /* imq_egress_ipv6 */
233 +               .hook           = imq_nf_hook,
234 +               .owner          = THIS_MODULE,
235 +               .pf             = PF_INET6,
236 +               .hooknum        = NF_INET_POST_ROUTING,
237 +#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
238 +               .priority       = NF_IP6_PRI_LAST,
239 +#else
240 +               .priority       = NF_IP6_PRI_NAT_SRC - 1,
241 +#endif
242 +       },
243 +#endif
244 +};
245 +
246 +#if defined(CONFIG_IMQ_NUM_DEVS)
247 +static int numdevs = CONFIG_IMQ_NUM_DEVS;
248 +#else
249 +static int numdevs = IMQ_MAX_DEVS;
250 +#endif
251 +
252 +static struct net_device *imq_devs_cache[IMQ_MAX_DEVS];
253 +
254 +#define IMQ_MAX_QUEUES 32
255 +static int numqueues = 1;
256 +static u32 imq_hashrnd;
257 +static int imq_dev_accurate_stats = 1;
258 +
259 +static inline __be16 pppoe_proto(const struct sk_buff *skb)
260 +{
261 +       return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
262 +                       sizeof(struct pppoe_hdr)));
263 +}
264 +
265 +static u16 imq_hash(struct net_device *dev, struct sk_buff *skb)
266 +{
267 +       unsigned int pull_len;
268 +       u16 protocol = skb->protocol;
269 +       u32 addr1, addr2;
270 +       u32 hash, ihl = 0;
271 +       union {
272 +               u16 in16[2];
273 +               u32 in32;
274 +       } ports;
275 +       u8 ip_proto;
276 +
277 +       pull_len = 0;
278 +
279 +recheck:
280 +       switch (protocol) {
281 +       case htons(ETH_P_8021Q): {
282 +               if (unlikely(skb_pull(skb, VLAN_HLEN) == NULL))
283 +                       goto other;
284 +
285 +               pull_len += VLAN_HLEN;
286 +               skb->network_header += VLAN_HLEN;
287 +
288 +               protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
289 +               goto recheck;
290 +       }
291 +
292 +       case htons(ETH_P_PPP_SES): {
293 +               if (unlikely(skb_pull(skb, PPPOE_SES_HLEN) == NULL))
294 +                       goto other;
295 +
296 +               pull_len += PPPOE_SES_HLEN;
297 +               skb->network_header += PPPOE_SES_HLEN;
298 +
299 +               protocol = pppoe_proto(skb);
300 +               goto recheck;
301 +       }
302 +
303 +       case htons(ETH_P_IP): {
304 +               const struct iphdr *iph = ip_hdr(skb);
305 +
306 +               if (unlikely(!pskb_may_pull(skb, sizeof(struct iphdr))))
307 +                       goto other;
308 +
309 +               addr1 = iph->daddr;
310 +               addr2 = iph->saddr;
311 +
312 +               ip_proto = !(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ?
313 +                                iph->protocol : 0;
314 +               ihl = ip_hdrlen(skb);
315 +
316 +               break;
317 +       }
318 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
319 +       case htons(ETH_P_IPV6): {
320 +               const struct ipv6hdr *iph = ipv6_hdr(skb);
321 +               __be16 fo = 0;
322 +
323 +               if (unlikely(!pskb_may_pull(skb, sizeof(struct ipv6hdr))))
324 +                       goto other;
325 +
326 +               addr1 = iph->daddr.s6_addr32[3];
327 +               addr2 = iph->saddr.s6_addr32[3];
328 +               ihl = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &ip_proto,
329 +                                      &fo);
330 +               if (unlikely(ihl < 0))
331 +                       goto other;
332 +
333 +               break;
334 +       }
335 +#endif
336 +       default:
337 +other:
338 +               if (pull_len != 0) {
339 +                       skb_push(skb, pull_len);
340 +                       skb->network_header -= pull_len;
341 +               }
342 +
343 +               return (u16)(ntohs(protocol) % dev->real_num_tx_queues);
344 +       }
345 +
346 +       if (addr1 > addr2)
347 +               swap(addr1, addr2);
348 +
349 +       switch (ip_proto) {
350 +       case IPPROTO_TCP:
351 +       case IPPROTO_UDP:
352 +       case IPPROTO_DCCP:
353 +       case IPPROTO_ESP:
354 +       case IPPROTO_AH:
355 +       case IPPROTO_SCTP:
356 +       case IPPROTO_UDPLITE: {
357 +               if (likely(skb_copy_bits(skb, ihl, &ports.in32, 4) >= 0)) {
358 +                       if (ports.in16[0] > ports.in16[1])
359 +                               swap(ports.in16[0], ports.in16[1]);
360 +                       break;
361 +               }
362 +               /* fall-through */
363 +       }
364 +       default:
365 +               ports.in32 = 0;
366 +               break;
367 +       }
368 +
369 +       if (pull_len != 0) {
370 +               skb_push(skb, pull_len);
371 +               skb->network_header -= pull_len;
372 +       }
373 +
374 +       hash = jhash_3words(addr1, addr2, ports.in32, imq_hashrnd ^ ip_proto);
375 +
376 +       return (u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
377 +}
378 +
379 +static inline bool sk_tx_queue_recorded(struct sock *sk)
380 +{
381 +       return (sk_tx_queue_get(sk) >= 0);
382 +}
383 +
384 +static struct netdev_queue *imq_select_queue(struct net_device *dev,
385 +                                               struct sk_buff *skb)
386 +{
387 +       u16 queue_index = 0;
388 +       u32 hash;
389 +
390 +       if (likely(dev->real_num_tx_queues == 1))
391 +               goto out;
392 +
393 +       /* IMQ can be receiving ingress or engress packets. */
394 +
395 +       /* Check first for if rx_queue is set */
396 +       if (skb_rx_queue_recorded(skb)) {
397 +               queue_index = skb_get_rx_queue(skb);
398 +               goto out;
399 +       }
400 +
401 +       /* Check if socket has tx_queue set */
402 +       if (sk_tx_queue_recorded(skb->sk)) {
403 +               queue_index = sk_tx_queue_get(skb->sk);
404 +               goto out;
405 +       }
406 +
407 +       /* Try use socket hash */
408 +       if (skb->sk && skb->sk->sk_hash) {
409 +               hash = skb->sk->sk_hash;
410 +               queue_index =
411 +                       (u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
412 +               goto out;
413 +       }
414 +
415 +       /* Generate hash from packet data */
416 +       queue_index = imq_hash(dev, skb);
417 +
418 +out:
419 +       if (unlikely(queue_index >= dev->real_num_tx_queues))
420 +               queue_index = (u16)((u32)queue_index % dev->real_num_tx_queues);
421 +
422 +       skb_set_queue_mapping(skb, queue_index);
423 +       return netdev_get_tx_queue(dev, queue_index);
424 +}
425 +
426 +static struct net_device_stats *imq_get_stats(struct net_device *dev)
427 +{
428 +       return &dev->stats;
429 +}
430 +
431 +/* called for packets kfree'd in qdiscs at places other than enqueue */
432 +static void imq_skb_destructor(struct sk_buff *skb)
433 +{
434 +       struct nf_queue_entry *entry = skb->nf_queue_entry;
435 +
436 +       skb->nf_queue_entry = NULL;
437 +
438 +       if (entry) {
439 +               nf_queue_entry_release_refs(entry);
440 +               kfree(entry);
441 +       }
442 +
443 +       skb_restore_cb(skb); /* kfree backup */
444 +}
445 +
446 +static void imq_done_check_queue_mapping(struct sk_buff *skb,
447 +                                        struct net_device *dev)
448 +{
449 +       unsigned int queue_index;
450 +
451 +       /* Don't let queue_mapping be left too large after exiting IMQ */
452 +       if (likely(skb->dev != dev && skb->dev != NULL)) {
453 +               queue_index = skb_get_queue_mapping(skb);
454 +               if (unlikely(queue_index >= skb->dev->real_num_tx_queues)) {
455 +                       queue_index = (u16)((u32)queue_index %
456 +                                               skb->dev->real_num_tx_queues);
457 +                       skb_set_queue_mapping(skb, queue_index);
458 +               }
459 +       } else {
460 +               /* skb->dev was IMQ device itself or NULL, be on safe side and
461 +                * just clear queue mapping.
462 +                */
463 +               skb_set_queue_mapping(skb, 0);
464 +       }
465 +}
466 +
467 +static netdev_tx_t imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
468 +{
469 +       struct nf_queue_entry *entry = skb->nf_queue_entry;
470 +
471 +       skb->nf_queue_entry = NULL;
472 +       dev->trans_start = jiffies;
473 +
474 +       dev->stats.tx_bytes += skb->len;
475 +       dev->stats.tx_packets++;
476 +
477 +       if (unlikely(entry == NULL)) {
478 +               /* We don't know what is going on here.. packet is queued for
479 +                * imq device, but (probably) not by us.
480 +                *
481 +                * If this packet was not send here by imq_nf_queue(), then
482 +                * skb_save_cb() was not used and skb_free() should not show:
483 +                *   WARNING: IMQ: kfree_skb: skb->cb_next:..
484 +                * and/or
485 +                *   WARNING: IMQ: kfree_skb: skb->nf_queue_entry...
486 +                *
487 +                * However if this message is shown, then IMQ is somehow broken
488 +                * and you should report this to linuximq.net.
489 +                */
490 +
491 +               /* imq_dev_xmit is black hole that eats all packets, report that
492 +                * we eat this packet happily and increase dropped counters.
493 +                */
494 +
495 +               dev->stats.tx_dropped++;
496 +               dev_kfree_skb(skb);
497 +
498 +               return NETDEV_TX_OK;
499 +       }
500 +
501 +       skb_restore_cb(skb); /* restore skb->cb */
502 +
503 +       skb->imq_flags = 0;
504 +       skb->destructor = NULL;
505 +
506 +       imq_done_check_queue_mapping(skb, dev);
507 +
508 +       nf_reinject(entry, NF_ACCEPT);
509 +
510 +       return NETDEV_TX_OK;
511 +}
512 +
513 +static struct net_device *get_imq_device_by_index(int index)
514 +{
515 +       struct net_device *dev = NULL;
516 +       struct net *net;
517 +       char buf[8];
518 +
519 +       /* get device by name and cache result */
520 +       snprintf(buf, sizeof(buf), "imq%d", index);
521 +
522 +       /* Search device from all namespaces. */
523 +       for_each_net(net) {
524 +               dev = dev_get_by_name(net, buf);
525 +               if (dev)
526 +                       break;
527 +       }
528 +
529 +       if (WARN_ON_ONCE(dev == NULL)) {
530 +               /* IMQ device not found. Exotic config? */
531 +               return ERR_PTR(-ENODEV);
532 +       }
533 +
534 +       imq_devs_cache[index] = dev;
535 +       dev_put(dev);
536 +
537 +       return dev;
538 +}
539 +
540 +static struct nf_queue_entry *nf_queue_entry_dup(struct nf_queue_entry *e)
541 +{
542 +       struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
543 +       if (entry) {
544 +               if (nf_queue_entry_get_refs(entry))
545 +                       return entry;
546 +               kfree(entry);
547 +       }
548 +       return NULL;
549 +}
550 +
551 +#ifdef CONFIG_BRIDGE_NETFILTER
552 +/* When called from bridge netfilter, skb->data must point to MAC header
553 + * before calling skb_gso_segment(). Else, original MAC header is lost
554 + * and segmented skbs will be sent to wrong destination.
555 + */
556 +static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
557 +{
558 +       if (skb->nf_bridge)
559 +               __skb_push(skb, skb->network_header - skb->mac_header);
560 +}
561 +
562 +static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
563 +{
564 +       if (skb->nf_bridge)
565 +               __skb_pull(skb, skb->network_header - skb->mac_header);
566 +}
567 +#else
568 +#define nf_bridge_adjust_skb_data(s) do {} while (0)
569 +#define nf_bridge_adjust_segmented_data(s) do {} while (0)
570 +#endif
571 +
572 +static void free_entry(struct nf_queue_entry *entry)
573 +{
574 +       nf_queue_entry_release_refs(entry);
575 +       kfree(entry);
576 +}
577 +
578 +static int __imq_nf_queue(struct nf_queue_entry *entry, struct net_device *dev);
579 +
580 +static int __imq_nf_queue_gso(struct nf_queue_entry *entry,
581 +                             struct net_device *dev, struct sk_buff *skb)
582 +{
583 +       int ret = -ENOMEM;
584 +       struct nf_queue_entry *entry_seg;
585 +
586 +       nf_bridge_adjust_segmented_data(skb);
587 +
588 +       if (skb->next == NULL) { /* last packet, no need to copy entry */
589 +               struct sk_buff *gso_skb = entry->skb;
590 +               entry->skb = skb;
591 +               ret = __imq_nf_queue(entry, dev);
592 +               if (ret)
593 +                       entry->skb = gso_skb;
594 +               return ret;
595 +       }
596 +
597 +       skb->next = NULL;
598 +
599 +       entry_seg = nf_queue_entry_dup(entry);
600 +       if (entry_seg) {
601 +               entry_seg->skb = skb;
602 +               ret = __imq_nf_queue(entry_seg, dev);
603 +               if (ret)
604 +                       free_entry(entry_seg);
605 +       }
606 +       return ret;
607 +}
608 +
609 +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num)
610 +{
611 +       struct sk_buff *skb, *segs;
612 +       struct net_device *dev;
613 +       unsigned int queued;
614 +       int index, retval, err;
615 +
616 +       index = entry->skb->imq_flags & IMQ_F_IFMASK;
617 +       if (unlikely(index > numdevs - 1)) {
618 +               if (net_ratelimit())
619 +                       pr_warn("IMQ: invalid device specified, highest is %u\n",
620 +                               numdevs - 1);
621 +               retval = -EINVAL;
622 +               goto out_no_dev;
623 +       }
624 +
625 +       /* check for imq device by index from cache */
626 +       dev = imq_devs_cache[index];
627 +       if (unlikely(!dev)) {
628 +               dev = get_imq_device_by_index(index);
629 +               if (IS_ERR(dev)) {
630 +                       retval = PTR_ERR(dev);
631 +                       goto out_no_dev;
632 +               }
633 +       }
634 +
635 +       if (unlikely(!(dev->flags & IFF_UP))) {
636 +               entry->skb->imq_flags = 0;
637 +               retval = -ECANCELED;
638 +               goto out_no_dev;
639 +       }
640 +
641 +       /* Since 3.10.x, GSO handling moved here as result of upstream commit
642 +        * a5fedd43d5f6c94c71053a66e4c3d2e35f1731a2 (netfilter: move
643 +        * skb_gso_segment into nfnetlink_queue module).
644 +        *
645 +        * Following code replicates the gso handling from
646 +        * 'net/netfilter/nfnetlink_queue_core.c':nfqnl_enqueue_packet().
647 +        */
648 +
649 +       skb = entry->skb;
650 +
651 +       switch (entry->state.pf) {
652 +       case NFPROTO_IPV4:
653 +               skb->protocol = htons(ETH_P_IP);
654 +               break;
655 +       case NFPROTO_IPV6:
656 +               skb->protocol = htons(ETH_P_IPV6);
657 +               break;
658 +       }
659 +
660 +       if (!skb_is_gso(entry->skb))
661 +               return __imq_nf_queue(entry, dev);
662 +
663 +       nf_bridge_adjust_skb_data(skb);
664 +       segs = skb_gso_segment(skb, 0);
665 +       /* Does not use PTR_ERR to limit the number of error codes that can be
666 +        * returned by nf_queue.  For instance, callers rely on -ECANCELED to
667 +        * mean 'ignore this hook'.
668 +        */
669 +       err = -ENOBUFS;
670 +       if (IS_ERR(segs))
671 +               goto out_err;
672 +       queued = 0;
673 +       err = 0;
674 +       do {
675 +               struct sk_buff *nskb = segs->next;
676 +               if (nskb && nskb->next)
677 +                       nskb->cb_next = NULL;
678 +               if (err == 0)
679 +                       err = __imq_nf_queue_gso(entry, dev, segs);
680 +               if (err == 0)
681 +                       queued++;
682 +               else
683 +                       kfree_skb(segs);
684 +               segs = nskb;
685 +       } while (segs);
686 +
687 +       if (queued) {
688 +               if (err) /* some segments are already queued */
689 +                       free_entry(entry);
690 +               kfree_skb(skb);
691 +               return 0;
692 +       }
693 +
694 +out_err:
695 +       nf_bridge_adjust_segmented_data(skb);
696 +       retval = err;
697 +out_no_dev:
698 +       return retval;
699 +}
700 +
701 +static int __imq_nf_queue(struct nf_queue_entry *entry, struct net_device *dev)
702 +{
703 +       struct sk_buff *skb_orig, *skb, *skb_shared, *skb_popd;
704 +       struct Qdisc *q;
705 +       struct netdev_queue *txq;
706 +       spinlock_t *root_lock;
707 +       int users;
708 +       int retval = -EINVAL;
709 +       unsigned int orig_queue_index;
710 +
711 +       dev->last_rx = jiffies;
712 +
713 +       skb = entry->skb;
714 +       skb_orig = NULL;
715 +
716 +       /* skb has owner? => make clone */
717 +       if (unlikely(skb->destructor)) {
718 +               skb_orig = skb;
719 +               skb = skb_clone(skb, GFP_ATOMIC);
720 +               if (unlikely(!skb)) {
721 +                       retval = -ENOMEM;
722 +                       goto out;
723 +               }
724 +               skb->cb_next = NULL;
725 +               entry->skb = skb;
726 +       }
727 +
728 +       skb->nf_queue_entry = entry;
729 +
730 +       dev->stats.rx_bytes += skb->len;
731 +       dev->stats.rx_packets++;
732 +
733 +       if (!skb->dev) {
734 +               /* skb->dev == NULL causes problems, try the find cause. */
735 +               if (net_ratelimit()) {
736 +                       dev_warn(&dev->dev,
737 +                                "received packet with skb->dev == NULL\n");
738 +                       dump_stack();
739 +               }
740 +
741 +               skb->dev = dev;
742 +       }
743 +
744 +       /* Disables softirqs for lock below */
745 +       rcu_read_lock_bh();
746 +
747 +       /* Multi-queue selection */
748 +       orig_queue_index = skb_get_queue_mapping(skb);
749 +       txq = imq_select_queue(dev, skb);
750 +
751 +       q = rcu_dereference(txq->qdisc);
752 +       if (unlikely(!q->enqueue))
753 +               goto packet_not_eaten_by_imq_dev;
754 +
755 +       root_lock = qdisc_lock(q);
756 +       spin_lock(root_lock);
757 +
758 +       users = atomic_read(&skb->users);
759 +
760 +       skb_shared = skb_get(skb); /* increase reference count by one */
761 +
762 +       /* backup skb->cb, as qdisc layer will overwrite it */
763 +       skb_save_cb(skb_shared);
764 +       qdisc_enqueue_root(skb_shared, q); /* might kfree_skb */
765 +
766 +       if (likely(atomic_read(&skb_shared->users) == users + 1)) {
767 +               bool validate;
768 +
769 +               kfree_skb(skb_shared); /* decrease reference count by one */
770 +
771 +               skb->destructor = &imq_skb_destructor;
772 +
773 +               skb_popd = qdisc_dequeue_skb(q, &validate);
774 +
775 +               /* cloned? */
776 +               if (unlikely(skb_orig))
777 +                       kfree_skb(skb_orig); /* free original */
778 +
779 +               spin_unlock(root_lock);
780 +
781 +#if 0
782 +               /* schedule qdisc dequeue */
783 +               __netif_schedule(q);
784 +#else
785 +               if (likely(skb_popd)) {
786 +                       /* Note that we validate skb (GSO, checksum, ...) outside of locks */
787 +                       if (validate)
788 +                       skb_popd = validate_xmit_skb_list(skb_popd, dev);
789 +                       
790 +                       if (skb_popd) {
791 +                               int dummy_ret;
792 +                               int cpu = smp_processor_id(); /* ok because BHs are off */
793 +
794 +                               txq = skb_get_tx_queue(dev, skb_popd);
795 +                               /* 
796 +                               IMQ device will not be frozen or stoped, and it always be successful.
797 +                               So we need not check its status and return value to accelerate.
798 +                               */
799 +                               if (imq_dev_accurate_stats && txq->xmit_lock_owner != cpu) {
800 +                                       HARD_TX_LOCK(dev, txq, cpu);
801 +                                       dev_hard_start_xmit(skb_popd, dev, txq, &dummy_ret);
802 +                                       HARD_TX_UNLOCK(dev, txq);
803 +                               } else {
804 +                                       dev_hard_start_xmit(skb_popd, dev, txq, &dummy_ret);
805 +                               }
806 +                       }
807 +               }
808 +#endif
809 +               rcu_read_unlock_bh();
810 +               retval = 0;
811 +               goto out;
812 +       } else {
813 +               skb_restore_cb(skb_shared); /* restore skb->cb */
814 +               skb->nf_queue_entry = NULL;
815 +               /*
816 +                * qdisc dropped packet and decreased skb reference count of
817 +                * skb, so we don't really want to and try refree as that would
818 +                * actually destroy the skb.
819 +                */
820 +               spin_unlock(root_lock);
821 +               goto packet_not_eaten_by_imq_dev;
822 +       }
823 +
824 +packet_not_eaten_by_imq_dev:
825 +       skb_set_queue_mapping(skb, orig_queue_index);
826 +       rcu_read_unlock_bh();
827 +
828 +       /* cloned? restore original */
829 +       if (unlikely(skb_orig)) {
830 +               kfree_skb(skb);
831 +               entry->skb = skb_orig;
832 +       }
833 +       retval = -1;
834 +out:
835 +       return retval;
836 +}
837 +static unsigned int imq_nf_hook(const struct nf_hook_ops *hook_ops,
838 +                               struct sk_buff *skb,
839 +                               const struct nf_hook_state *state)
840 +{
841 +       return (skb->imq_flags & IMQ_F_ENQUEUE) ? NF_IMQ_QUEUE : NF_ACCEPT;
842 +}
843 +
844 +static int imq_close(struct net_device *dev)
845 +{
846 +       netif_stop_queue(dev);
847 +       return 0;
848 +}
849 +
850 +static int imq_open(struct net_device *dev)
851 +{
852 +       netif_start_queue(dev);
853 +       return 0;
854 +}
855 +
856 +static const struct net_device_ops imq_netdev_ops = {
857 +       .ndo_open               = imq_open,
858 +       .ndo_stop               = imq_close,
859 +       .ndo_start_xmit         = imq_dev_xmit,
860 +       .ndo_get_stats          = imq_get_stats,
861 +};
862 +
863 +static void imq_setup(struct net_device *dev)
864 +{
865 +       dev->netdev_ops         = &imq_netdev_ops;
866 +       dev->type               = ARPHRD_VOID;
867 +       dev->mtu                = 16000; /* too small? */
868 +       dev->tx_queue_len       = 11000; /* too big? */
869 +       dev->flags              = IFF_NOARP;
870 +       dev->features           = NETIF_F_SG | NETIF_F_FRAGLIST |
871 +                                 NETIF_F_GSO | NETIF_F_HW_CSUM |
872 +                                 NETIF_F_HIGHDMA;
873 +       dev->priv_flags         &= ~(IFF_XMIT_DST_RELEASE |
874 +                                    IFF_TX_SKB_SHARING);
875 +}
876 +
877 +static int imq_validate(struct nlattr *tb[], struct nlattr *data[])
878 +{
879 +       int ret = 0;
880 +
881 +       if (tb[IFLA_ADDRESS]) {
882 +               if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
883 +                       ret = -EINVAL;
884 +                       goto end;
885 +               }
886 +               if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
887 +                       ret = -EADDRNOTAVAIL;
888 +                       goto end;
889 +               }
890 +       }
891 +       return 0;
892 +end:
893 +       pr_warn("IMQ: imq_validate failed (%d)\n", ret);
894 +       return ret;
895 +}
896 +
897 +static struct rtnl_link_ops imq_link_ops __read_mostly = {
898 +       .kind           = "imq",
899 +       .priv_size      = 0,
900 +       .setup          = imq_setup,
901 +       .validate       = imq_validate,
902 +};
903 +
904 +static const struct nf_queue_handler imq_nfqh = {
905 +       .outfn = imq_nf_queue,
906 +};
907 +
908 +static int __init imq_init_hooks(void)
909 +{
910 +       int ret;
911 +
912 +       nf_register_queue_imq_handler(&imq_nfqh);
913 +
914 +       ret = nf_register_hooks(imq_ops, ARRAY_SIZE(imq_ops));
915 +       if (ret < 0)
916 +               nf_unregister_queue_imq_handler();
917 +
918 +       return ret;
919 +}
920 +
921 +static int __init imq_init_one(int index)
922 +{
923 +       struct net_device *dev;
924 +       int ret;
925 +
926 +       dev = alloc_netdev_mq(0, "imq%d", NET_NAME_UNKNOWN, imq_setup, numqueues);
927 +       if (!dev)
928 +               return -ENOMEM;
929 +
930 +       ret = dev_alloc_name(dev, dev->name);
931 +       if (ret < 0)
932 +               goto fail;
933 +
934 +       dev->rtnl_link_ops = &imq_link_ops;
935 +       ret = register_netdevice(dev);
936 +       if (ret < 0)
937 +               goto fail;
938 +
939 +       return 0;
940 +fail:
941 +       free_netdev(dev);
942 +       return ret;
943 +}
944 +
945 +static int __init imq_init_devs(void)
946 +{
947 +       int err, i;
948 +
949 +       if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) {
950 +               pr_err("IMQ: numdevs has to be betweed 1 and %u\n",
951 +                      IMQ_MAX_DEVS);
952 +               return -EINVAL;
953 +       }
954 +
955 +       if (numqueues < 1 || numqueues > IMQ_MAX_QUEUES) {
956 +               pr_err("IMQ: numqueues has to be betweed 1 and %u\n",
957 +                      IMQ_MAX_QUEUES);
958 +               return -EINVAL;
959 +       }
960 +
961 +       get_random_bytes(&imq_hashrnd, sizeof(imq_hashrnd));
962 +
963 +       rtnl_lock();
964 +       err = __rtnl_link_register(&imq_link_ops);
965 +
966 +       for (i = 0; i < numdevs && !err; i++)
967 +               err = imq_init_one(i);
968 +
969 +       if (err) {
970 +               __rtnl_link_unregister(&imq_link_ops);
971 +               memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
972 +       }
973 +       rtnl_unlock();
974 +
975 +       return err;
976 +}
977 +
978 +static int __init imq_init_module(void)
979 +{
980 +       int err;
981 +
982 +#if defined(CONFIG_IMQ_NUM_DEVS)
983 +       BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16);
984 +       BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2);
985 +       BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK);
986 +#endif
987 +
988 +       err = imq_init_devs();
989 +       if (err) {
990 +               pr_err("IMQ: Error trying imq_init_devs(net)\n");
991 +               return err;
992 +       }
993 +
994 +       err = imq_init_hooks();
995 +       if (err) {
996 +               pr_err(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
997 +               rtnl_link_unregister(&imq_link_ops);
998 +               memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
999 +               return err;
1000 +       }
1001 +
1002 +       pr_info("IMQ driver loaded successfully. (numdevs = %d, numqueues = %d, imq_dev_accurate_stats = %d)\n",
1003 +               numdevs, numqueues, imq_dev_accurate_stats);
1004 +
1005 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
1006 +       pr_info("\tHooking IMQ before NAT on PREROUTING.\n");
1007 +#else
1008 +       pr_info("\tHooking IMQ after NAT on PREROUTING.\n");
1009 +#endif
1010 +#if defined(CONFIG_IMQ_BEHAVIOR_AB) || defined(CONFIG_IMQ_BEHAVIOR_BB)
1011 +       pr_info("\tHooking IMQ before NAT on POSTROUTING.\n");
1012 +#else
1013 +       pr_info("\tHooking IMQ after NAT on POSTROUTING.\n");
1014 +#endif
1015 +
1016 +       return 0;
1017 +}
1018 +
1019 +static void __exit imq_unhook(void)
1020 +{
1021 +       nf_unregister_hooks(imq_ops, ARRAY_SIZE(imq_ops));
1022 +       nf_unregister_queue_imq_handler();
1023 +}
1024 +
1025 +static void __exit imq_cleanup_devs(void)
1026 +{
1027 +       rtnl_link_unregister(&imq_link_ops);
1028 +       memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
1029 +}
1030 +
1031 +static void __exit imq_exit_module(void)
1032 +{
1033 +       imq_unhook();
1034 +       imq_cleanup_devs();
1035 +       pr_info("IMQ driver unloaded successfully.\n");
1036 +}
1037 +
1038 +module_init(imq_init_module);
1039 +module_exit(imq_exit_module);
1040 +
1041 +module_param(numdevs, int, 0);
1042 +module_param(numqueues, int, 0);
1043 +module_param(imq_dev_accurate_stats, int, 0);
1044 +MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will be created)");
1045 +MODULE_PARM_DESC(numqueues, "number of queues per IMQ device");
1046 +MODULE_PARM_DESC(imq_dev_accurate_stats, "Notify if need the accurate imq device stats");
1047 +
1048 +MODULE_AUTHOR("http://https://github.com/imq/linuximq");
1049 +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See https://github.com/imq/linuximq/wiki for more information.");
1050 +MODULE_LICENSE("GPL");
1051 +MODULE_ALIAS_RTNL_LINK("imq");
1052 diff --git a/include/linux/imq.h b/include/linux/imq.h
1053 new file mode 100644
1054 index 0000000..1babb09
1055 --- /dev/null
1056 +++ b/include/linux/imq.h
1057 @@ -0,0 +1,13 @@
1058 +#ifndef _IMQ_H
1059 +#define _IMQ_H
1060 +
1061 +/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */
1062 +#define IMQ_F_BITS     5
1063 +
1064 +#define IMQ_F_IFMASK   0x0f
1065 +#define IMQ_F_ENQUEUE  0x10
1066 +
1067 +#define IMQ_MAX_DEVS   (IMQ_F_IFMASK + 1)
1068 +
1069 +#endif /* _IMQ_H */
1070 +
1071 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
1072 index 05b9a69..0c35dff 100644
1073 --- a/include/linux/netdevice.h
1074 +++ b/include/linux/netdevice.h
1075 @@ -3276,6 +3276,19 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
1076         }                                               \
1077  }
1078  
1079 +#define HARD_TX_LOCK_BH(dev, txq) {           \
1080 +    if ((dev->features & NETIF_F_LLTX) == 0) {  \
1081 +        __netif_tx_lock_bh(txq);      \
1082 +    }                       \
1083 +}
1084 +
1085 +#define HARD_TX_UNLOCK_BH(dev, txq) {          \
1086 +    if ((dev->features & NETIF_F_LLTX) == 0) {  \
1087 +        __netif_tx_unlock_bh(txq);         \
1088 +    }                       \
1089 +}
1090 +
1091 +
1092  static inline void netif_tx_disable(struct net_device *dev)
1093  {
1094         unsigned int i;
1095 diff --git a/include/linux/netfilter/xt_IMQ.h b/include/linux/netfilter/xt_IMQ.h
1096 new file mode 100644
1097 index 0000000..9b07230
1098 --- /dev/null
1099 +++ b/include/linux/netfilter/xt_IMQ.h
1100 @@ -0,0 +1,9 @@
1101 +#ifndef _XT_IMQ_H
1102 +#define _XT_IMQ_H
1103 +
1104 +struct xt_imq_info {
1105 +       unsigned int todev;     /* target imq device */
1106 +};
1107 +
1108 +#endif /* _XT_IMQ_H */
1109 +
1110 diff --git a/include/linux/netfilter_ipv4/ipt_IMQ.h b/include/linux/netfilter_ipv4/ipt_IMQ.h
1111 new file mode 100644
1112 index 0000000..7af320f
1113 --- /dev/null
1114 +++ b/include/linux/netfilter_ipv4/ipt_IMQ.h
1115 @@ -0,0 +1,10 @@
1116 +#ifndef _IPT_IMQ_H
1117 +#define _IPT_IMQ_H
1118 +
1119 +/* Backwards compatibility for old userspace */
1120 +#include <linux/netfilter/xt_IMQ.h>
1121 +
1122 +#define ipt_imq_info xt_imq_info
1123 +
1124 +#endif /* _IPT_IMQ_H */
1125 +
1126 diff --git a/include/linux/netfilter_ipv6/ip6t_IMQ.h b/include/linux/netfilter_ipv6/ip6t_IMQ.h
1127 new file mode 100644
1128 index 0000000..198ac01
1129 --- /dev/null
1130 +++ b/include/linux/netfilter_ipv6/ip6t_IMQ.h
1131 @@ -0,0 +1,10 @@
1132 +#ifndef _IP6T_IMQ_H
1133 +#define _IP6T_IMQ_H
1134 +
1135 +/* Backwards compatibility for old userspace */
1136 +#include <linux/netfilter/xt_IMQ.h>
1137 +
1138 +#define ip6t_imq_info xt_imq_info
1139 +
1140 +#endif /* _IP6T_IMQ_H */
1141 +
1142 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
1143 index f15154a..d76d31a 100644
1144 --- a/include/linux/skbuff.h
1145 +++ b/include/linux/skbuff.h
1146 @@ -35,6 +35,9 @@
1147  #include <linux/netdev_features.h>
1148  #include <linux/sched.h>
1149  #include <net/flow_keys.h>
1150 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1151 +#include <linux/imq.h>
1152 +#endif
1153  
1154  /* A. Checksumming of received packets by device.
1155   *
1156 @@ -540,6 +543,9 @@ struct sk_buff {
1157          * first. This is owned by whoever has the skb queued ATM.
1158          */
1159         char                    cb[48] __aligned(8);
1160 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1161 +       void                    *cb_next;
1162 +#endif
1163  
1164         unsigned long           _skb_refdst;
1165         void                    (*destructor)(struct sk_buff *skb);
1166 @@ -549,6 +555,9 @@ struct sk_buff {
1167  #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1168         struct nf_conntrack     *nfct;
1169  #endif
1170 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1171 +       struct nf_queue_entry   *nf_queue_entry;
1172 +#endif
1173  #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1174         struct nf_bridge_info   *nf_bridge;
1175  #endif
1176 @@ -616,6 +625,9 @@ struct sk_buff {
1177         __u8                    inner_protocol_type:1;
1178         __u8                    remcsum_offload:1;
1179         /* 3 or 5 bit hole */
1180 +       #if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1181 +       __u8                    imq_flags:IMQ_F_BITS;
1182 +       #endif
1183  
1184  #ifdef CONFIG_NET_SCHED
1185         __u16                   tc_index;       /* traffic control index */
1186 @@ -766,6 +778,12 @@ void kfree_skb_list(struct sk_buff *segs);
1187  void skb_tx_error(struct sk_buff *skb);
1188  void consume_skb(struct sk_buff *skb);
1189  void  __kfree_skb(struct sk_buff *skb);
1190 +
1191 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1192 +int skb_save_cb(struct sk_buff *skb);
1193 +int skb_restore_cb(struct sk_buff *skb);
1194 +#endif
1195 +
1196  extern struct kmem_cache *skbuff_head_cache;
1197  
1198  void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
1199 @@ -3216,6 +3234,10 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
1200         if (copy)
1201                 dst->nfctinfo = src->nfctinfo;
1202  #endif
1203 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1204 +       dst->imq_flags = src->imq_flags;
1205 +       dst->nf_queue_entry = src->nf_queue_entry;
1206 +#endif
1207  #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1208         dst->nf_bridge  = src->nf_bridge;
1209         nf_bridge_get(src->nf_bridge);
1210 diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
1211 index d81d584..1adc20d 100644
1212 --- a/include/net/netfilter/nf_queue.h
1213 +++ b/include/net/netfilter/nf_queue.h
1214 @@ -29,6 +29,12 @@ struct nf_queue_handler {
1215  void nf_register_queue_handler(const struct nf_queue_handler *qh);
1216  void nf_unregister_queue_handler(void);
1217  void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
1218 +void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
1219 +
1220 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1221 +void nf_register_queue_imq_handler(const struct nf_queue_handler *qh);
1222 +void nf_unregister_queue_imq_handler(void);
1223 +#endif
1224  
1225  bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
1226  void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
1227 diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
1228 index 2342bf1..149dec9 100644
1229 --- a/include/net/pkt_sched.h
1230 +++ b/include/net/pkt_sched.h
1231 @@ -104,6 +104,8 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
1232  
1233  void __qdisc_run(struct Qdisc *q);
1234  
1235 +struct sk_buff *qdisc_dequeue_skb(struct Qdisc *q, bool *validate);
1236 +
1237  static inline void qdisc_run(struct Qdisc *q)
1238  {
1239         if (qdisc_run_begin(q))
1240 diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h
1241 index ef1b1f8..079e5ff 100644
1242 --- a/include/uapi/linux/netfilter.h
1243 +++ b/include/uapi/linux/netfilter.h
1244 @@ -13,7 +13,8 @@
1245  #define NF_QUEUE 3
1246  #define NF_REPEAT 4
1247  #define NF_STOP 5
1248 -#define NF_MAX_VERDICT NF_STOP
1249 +#define NF_IMQ_QUEUE 6
1250 +#define NF_MAX_VERDICT NF_IMQ_QUEUE
1251  
1252  /* we overload the higher bits for encoding auxiliary data such as the queue
1253   * number or errno values. Not nice, but better than additional function
1254 diff --git a/net/core/dev.c b/net/core/dev.c
1255 index aa82f9a..c931d04 100644
1256 --- a/net/core/dev.c
1257 +++ b/net/core/dev.c
1258 @@ -135,6 +135,9 @@
1259  #include <linux/if_macvlan.h>
1260  #include <linux/errqueue.h>
1261  #include <linux/hrtimer.h>
1262 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1263 +#include <linux/imq.h>
1264 +#endif
1265  
1266  #include "net-sysfs.h"
1267  
1268 @@ -2646,7 +2649,12 @@ static int xmit_one(struct sk_buff *skb, struct net_device *dev,
1269         unsigned int len;
1270         int rc;
1271  
1272 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1273 +       if ((!list_empty(&ptype_all) || !list_empty(&dev->ptype_all)) &&
1274 +               !(skb->imq_flags & IMQ_F_ENQUEUE))
1275 +#else
1276         if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
1277 +#endif
1278                 dev_queue_xmit_nit(skb, dev);
1279  
1280         len = skb->len;
1281 @@ -2684,6 +2692,7 @@ out:
1282         *ret = rc;
1283         return skb;
1284  }
1285 +EXPORT_SYMBOL(dev_hard_start_xmit);
1286  
1287  static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
1288                                           netdev_features_t features)
1289 @@ -2772,6 +2781,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
1290         }
1291         return head;
1292  }
1293 +EXPORT_SYMBOL(validate_xmit_skb_list);
1294  
1295  static void qdisc_pkt_len_init(struct sk_buff *skb)
1296  {
1297 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1298 index 41ec022..307f02d 100644
1299 --- a/net/core/skbuff.c
1300 +++ b/net/core/skbuff.c
1301 @@ -79,6 +79,86 @@
1302  
1303  struct kmem_cache *skbuff_head_cache __read_mostly;
1304  static struct kmem_cache *skbuff_fclone_cache __read_mostly;
1305 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1306 +static struct kmem_cache *skbuff_cb_store_cache __read_mostly;
1307 +#endif
1308 +
1309 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1310 +/* Control buffer save/restore for IMQ devices */
1311 +struct skb_cb_table {
1312 +       char                    cb[48] __aligned(8);
1313 +       void                    *cb_next;
1314 +       atomic_t                refcnt;
1315 +};
1316 +
1317 +static DEFINE_SPINLOCK(skb_cb_store_lock);
1318 +
1319 +int skb_save_cb(struct sk_buff *skb)
1320 +{
1321 +       struct skb_cb_table *next;
1322 +
1323 +       next = kmem_cache_alloc(skbuff_cb_store_cache, GFP_ATOMIC);
1324 +       if (!next)
1325 +               return -ENOMEM;
1326 +
1327 +       BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
1328 +
1329 +       memcpy(next->cb, skb->cb, sizeof(skb->cb));
1330 +       next->cb_next = skb->cb_next;
1331 +
1332 +       atomic_set(&next->refcnt, 1);
1333 +
1334 +       skb->cb_next = next;
1335 +       return 0;
1336 +}
1337 +EXPORT_SYMBOL(skb_save_cb);
1338 +
1339 +int skb_restore_cb(struct sk_buff *skb)
1340 +{
1341 +       struct skb_cb_table *next;
1342 +
1343 +       if (!skb->cb_next)
1344 +               return 0;
1345 +
1346 +       next = skb->cb_next;
1347 +
1348 +       BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
1349 +
1350 +       memcpy(skb->cb, next->cb, sizeof(skb->cb));
1351 +       skb->cb_next = next->cb_next;
1352 +
1353 +       spin_lock(&skb_cb_store_lock);
1354 +
1355 +       if (atomic_dec_and_test(&next->refcnt))
1356 +               kmem_cache_free(skbuff_cb_store_cache, next);
1357 +
1358 +       spin_unlock(&skb_cb_store_lock);
1359 +
1360 +       return 0;
1361 +}
1362 +EXPORT_SYMBOL(skb_restore_cb);
1363 +
1364 +static void skb_copy_stored_cb(struct sk_buff *new, const struct sk_buff *__old)
1365 +{
1366 +       struct skb_cb_table *next;
1367 +       struct sk_buff *old;
1368 +
1369 +       if (!__old->cb_next) {
1370 +               new->cb_next = NULL;
1371 +               return;
1372 +       }
1373 +
1374 +       spin_lock(&skb_cb_store_lock);
1375 +
1376 +       old = (struct sk_buff *)__old;
1377 +
1378 +       next = old->cb_next;
1379 +       atomic_inc(&next->refcnt);
1380 +       new->cb_next = next;
1381 +
1382 +       spin_unlock(&skb_cb_store_lock);
1383 +}
1384 +#endif
1385  
1386  /**
1387   *     skb_panic - private function for out-of-line support
1388 @@ -691,6 +771,28 @@ static void skb_release_head_state(struct sk_buff *skb)
1389                 WARN_ON(in_irq());
1390                 skb->destructor(skb);
1391         }
1392 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1393 +       /*
1394 +        * This should not happen. When it does, avoid memleak by restoring
1395 +        * the chain of cb-backups.
1396 +        */
1397 +       while (skb->cb_next != NULL) {
1398 +               if (net_ratelimit())
1399 +                       pr_warn("IMQ: kfree_skb: skb->cb_next: %08x\n",
1400 +                               (unsigned int)skb->cb_next);
1401 +
1402 +               skb_restore_cb(skb);
1403 +       }
1404 +       /*
1405 +        * This should not happen either, nf_queue_entry is nullified in
1406 +        * imq_dev_xmit(). If we have non-NULL nf_queue_entry then we are
1407 +        * leaking entry pointers, maybe memory. We don't know if this is
1408 +        * pointer to already freed memory, or should this be freed.
1409 +        * If this happens we need to add refcounting, etc for nf_queue_entry.
1410 +        */
1411 +       if (skb->nf_queue_entry && net_ratelimit())
1412 +               pr_warn("%s\n", "IMQ: kfree_skb: skb->nf_queue_entry != NULL");
1413 +#endif
1414  #if IS_ENABLED(CONFIG_NF_CONNTRACK)
1415         nf_conntrack_put(skb->nfct);
1416  #endif
1417 @@ -813,6 +915,10 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
1418         new->sp                 = secpath_get(old->sp);
1419  #endif
1420         __nf_copy(new, old, false);
1421 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1422 +       new->cb_next = NULL;
1423 +       /*skb_copy_stored_cb(new, old);*/
1424 +#endif
1425  
1426         /* Note : this field could be in headers_start/headers_end section
1427          * It is not yet because we do not want to have a 16 bit hole
1428 @@ -3342,6 +3448,13 @@ void __init skb_init(void)
1429                                                 0,
1430                                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1431                                                 NULL);
1432 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1433 +       skbuff_cb_store_cache = kmem_cache_create("skbuff_cb_store_cache",
1434 +                                                 sizeof(struct skb_cb_table),
1435 +                                                 0,
1436 +                                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1437 +                                                 NULL);
1438 +#endif
1439  }
1440  
1441  /**
1442 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1443 index bc09cb9..9b6ef9f 100644
1444 --- a/net/ipv6/ip6_output.c
1445 +++ b/net/ipv6/ip6_output.c
1446 @@ -64,9 +64,6 @@ static int ip6_finish_output2(struct sock *sk, struct sk_buff *skb)
1447         struct in6_addr *nexthop;
1448         int ret;
1449  
1450 -       skb->protocol = htons(ETH_P_IPV6);
1451 -       skb->dev = dev;
1452 -
1453         if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
1454                 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1455  
1456 @@ -143,6 +140,13 @@ int ip6_output(struct sock *sk, struct sk_buff *skb)
1457                 return 0;
1458         }
1459  
1460 +       /*
1461 +       * IMQ-patch: moved setting skb->dev and skb->protocol from
1462 +       * ip6_finish_output2 to fix crashing at netif_skb_features().
1463 +       */
1464 +       skb->protocol = htons(ETH_P_IPV6);
1465 +       skb->dev = dev;
1466 +
1467         return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, sk, skb,
1468                             NULL, dev,
1469                             ip6_finish_output,
1470 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
1471 index a0f3e6a3..64239c0 100644
1472 --- a/net/netfilter/Kconfig
1473 +++ b/net/netfilter/Kconfig
1474 @@ -771,6 +771,18 @@ config NETFILTER_XT_TARGET_LOG
1475  
1476           To compile it as a module, choose M here.  If unsure, say N.
1477  
1478 +config NETFILTER_XT_TARGET_IMQ
1479 +        tristate '"IMQ" target support'
1480 +       depends on NETFILTER_XTABLES
1481 +       depends on IP_NF_MANGLE || IP6_NF_MANGLE
1482 +       select IMQ
1483 +       default m if NETFILTER_ADVANCED=n
1484 +        help
1485 +          This option adds a `IMQ' target which is used to specify if and
1486 +          to which imq device packets should get enqueued/dequeued.
1487 +
1488 +          To compile it as a module, choose M here.  If unsure, say N.
1489 +
1490  config NETFILTER_XT_TARGET_MARK
1491         tristate '"MARK" target support'
1492         depends on NETFILTER_ADVANCED
1493 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
1494 index a87d8b8..d1080ff 100644
1495 --- a/net/netfilter/Makefile
1496 +++ b/net/netfilter/Makefile
1497 @@ -109,6 +109,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
1498  obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
1499  obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
1500  obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o
1501 +obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o
1502  obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
1503  obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o
1504  obj-$(CONFIG_NETFILTER_XT_TARGET_NETMAP) += xt_NETMAP.o
1505 diff --git a/net/netfilter/core.c b/net/netfilter/core.c
1506 index e616301..302798c 100644
1507 --- a/net/netfilter/core.c
1508 +++ b/net/netfilter/core.c
1509 @@ -178,9 +178,11 @@ next_hook:
1510                 ret = NF_DROP_GETERR(verdict);
1511                 if (ret == 0)
1512                         ret = -EPERM;
1513 -       } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
1514 +       } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE ||
1515 +               (verdict & NF_VERDICT_MASK) == NF_IMQ_QUEUE) {
1516                 int err = nf_queue(skb, elem, state,
1517 -                                  verdict >> NF_VERDICT_QBITS);
1518 +                                  verdict >> NF_VERDICT_QBITS,
1519 +                                 verdict & NF_VERDICT_MASK);
1520                 if (err < 0) {
1521                         if (err == -ECANCELED)
1522                                 goto next_hook;
1523 diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
1524 index ea7f367..06fe0d6 100644
1525 --- a/net/netfilter/nf_internals.h
1526 +++ b/net/netfilter/nf_internals.h
1527 @@ -18,7 +18,7 @@ unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb,
1528  
1529  /* nf_queue.c */
1530  int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem,
1531 -            struct nf_hook_state *state, unsigned int queuenum);
1532 +            struct nf_hook_state *state, unsigned int queuenum, unsigned int queuetype);
1533  int __init netfilter_queue_init(void);
1534  
1535  /* nf_log.c */
1536 diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
1537 index 2e88032..8524715 100644
1538 --- a/net/netfilter/nf_queue.c
1539 +++ b/net/netfilter/nf_queue.c
1540 @@ -28,6 +28,23 @@
1541   */
1542  static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
1543  
1544 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1545 +static const struct nf_queue_handler __rcu *queue_imq_handler __read_mostly;
1546 +
1547 +void nf_register_queue_imq_handler(const struct nf_queue_handler *qh)
1548 +{
1549 +       rcu_assign_pointer(queue_imq_handler, qh);
1550 +}
1551 +EXPORT_SYMBOL_GPL(nf_register_queue_imq_handler);
1552 +
1553 +void nf_unregister_queue_imq_handler(void)
1554 +{
1555 +       RCU_INIT_POINTER(queue_imq_handler, NULL);
1556 +       synchronize_rcu();
1557 +}
1558 +EXPORT_SYMBOL_GPL(nf_unregister_queue_imq_handler);
1559 +#endif
1560 +
1561  /* return EBUSY when somebody else is registered, return EEXIST if the
1562   * same handler is registered, return 0 in case of success. */
1563  void nf_register_queue_handler(const struct nf_queue_handler *qh)
1564 @@ -112,7 +129,8 @@ EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
1565  int nf_queue(struct sk_buff *skb,
1566              struct nf_hook_ops *elem,
1567              struct nf_hook_state *state,
1568 -            unsigned int queuenum)
1569 +            unsigned int queuenum,
1570 +                unsigned int queuetype)
1571  {
1572         int status = -ENOENT;
1573         struct nf_queue_entry *entry = NULL;
1574 @@ -122,7 +140,17 @@ int nf_queue(struct sk_buff *skb,
1575         /* QUEUE == DROP if no one is waiting, to be safe. */
1576         rcu_read_lock();
1577  
1578 -       qh = rcu_dereference(queue_handler);
1579 +       if (queuetype == NF_IMQ_QUEUE) {
1580 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1581 +               qh = rcu_dereference(queue_imq_handler);
1582 +#else
1583 +               BUG();
1584 +               goto err_unlock;
1585 +#endif
1586 +       } else {
1587 +               qh = rcu_dereference(queue_handler);
1588 +       }
1589 +
1590         if (!qh) {
1591                 status = -ESRCH;
1592                 goto err_unlock;
1593 @@ -208,8 +236,10 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
1594                 local_bh_enable();
1595                 break;
1596         case NF_QUEUE:
1597 +       case NF_IMQ_QUEUE:
1598                 err = nf_queue(skb, elem, &entry->state,
1599 -                              verdict >> NF_VERDICT_QBITS);
1600 +                              verdict >> NF_VERDICT_QBITS,
1601 +                                  verdict & NF_VERDICT_MASK);
1602                 if (err < 0) {
1603                         if (err == -ECANCELED)
1604                                 goto next_hook;
1605 diff --git a/net/netfilter/xt_IMQ.c b/net/netfilter/xt_IMQ.c
1606 new file mode 100644
1607 index 0000000..86d7b84
1608 --- /dev/null
1609 +++ b/net/netfilter/xt_IMQ.c
1610 @@ -0,0 +1,72 @@
1611 +/*
1612 + * This target marks packets to be enqueued to an imq device
1613 + */
1614 +#include <linux/module.h>
1615 +#include <linux/skbuff.h>
1616 +#include <linux/netfilter/x_tables.h>
1617 +#include <linux/netfilter/xt_IMQ.h>
1618 +#include <linux/imq.h>
1619 +
1620 +static unsigned int imq_target(struct sk_buff *pskb,
1621 +                               const struct xt_action_param *par)
1622 +{
1623 +       const struct xt_imq_info *mr = par->targinfo;
1624 +
1625 +       pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE;
1626 +
1627 +       return XT_CONTINUE;
1628 +}
1629 +
1630 +static int imq_checkentry(const struct xt_tgchk_param *par)
1631 +{
1632 +       struct xt_imq_info *mr = par->targinfo;
1633 +
1634 +       if (mr->todev > IMQ_MAX_DEVS - 1) {
1635 +               pr_warn("IMQ: invalid device specified, highest is %u\n",
1636 +                       IMQ_MAX_DEVS - 1);
1637 +               return -EINVAL;
1638 +       }
1639 +
1640 +       return 0;
1641 +}
1642 +
1643 +static struct xt_target xt_imq_reg[] __read_mostly = {
1644 +       {
1645 +               .name           = "IMQ",
1646 +               .family         = AF_INET,
1647 +               .checkentry     = imq_checkentry,
1648 +               .target         = imq_target,
1649 +               .targetsize     = sizeof(struct xt_imq_info),
1650 +               .table          = "mangle",
1651 +               .me             = THIS_MODULE
1652 +       },
1653 +       {
1654 +               .name           = "IMQ",
1655 +               .family         = AF_INET6,
1656 +               .checkentry     = imq_checkentry,
1657 +               .target         = imq_target,
1658 +               .targetsize     = sizeof(struct xt_imq_info),
1659 +               .table          = "mangle",
1660 +               .me             = THIS_MODULE
1661 +       },
1662 +};
1663 +
1664 +static int __init imq_init(void)
1665 +{
1666 +       return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1667 +}
1668 +
1669 +static void __exit imq_fini(void)
1670 +{
1671 +       xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1672 +}
1673 +
1674 +module_init(imq_init);
1675 +module_exit(imq_fini);
1676 +
1677 +MODULE_AUTHOR("http://https://github.com/imq/linuximq");
1678 +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See https://github.com/imq/linuximq/wiki for more information.");
1679 +MODULE_LICENSE("GPL");
1680 +MODULE_ALIAS("ipt_IMQ");
1681 +MODULE_ALIAS("ip6t_IMQ");
1682 +
1683 diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
1684 index 6efca30..a4e448f 100644
1685 --- a/net/sched/sch_generic.c
1686 +++ b/net/sched/sch_generic.c
1687 @@ -108,6 +108,14 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
1688         return skb;
1689  }
1690  
1691 +struct sk_buff *qdisc_dequeue_skb(struct Qdisc *q, bool *validate)
1692 +{
1693 +       int packets;
1694 +
1695 +       return dequeue_skb(q, validate, &packets);
1696 +}
1697 +EXPORT_SYMBOL(qdisc_dequeue_skb);
1698 +
1699  static inline int handle_dev_cpu_collision(struct sk_buff *skb,
1700                                            struct netdev_queue *dev_queue,
1701                                            struct Qdisc *q)
This page took 0.170617 seconds and 4 git commands to generate.