]> git.pld-linux.org Git - packages/kernel.git/blame - 2.6.6-rc1-patch-o-matic-ng-base-20040419.patch
- obsolete
[packages/kernel.git] / 2.6.6-rc1-patch-o-matic-ng-base-20040419.patch
CommitLineData
1a035623 1diff -Nur linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ip_pool.h linux-2.6.6-rc1/include/linux/netfilter_ipv4/ip_pool.h
2--- linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ip_pool.h 1970-01-01 01:00:00.000000000 +0100
3+++ linux-2.6.6-rc1/include/linux/netfilter_ipv4/ip_pool.h 2004-04-19 10:08:36.000000000 +0200
4@@ -0,0 +1,64 @@
5+#ifndef _IP_POOL_H
6+#define _IP_POOL_H
7+
8+/***************************************************************************/
9+/* This program is free software; you can redistribute it and/or modify */
10+/* it under the terms of the GNU General Public License as published by */
11+/* the Free Software Foundation; either version 2 of the License, or */
12+/* (at your option) any later version. */
13+/* */
14+/* This program is distributed in the hope that it will be useful, */
15+/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17+/* GNU General Public License for more details. */
18+/* */
19+/* You should have received a copy of the GNU General Public License */
20+/* along with this program; if not, write to the Free Software */
21+/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA*/
22+/***************************************************************************/
23+
24+/* A sockopt of such quality has hardly ever been seen before on the open
25+ * market! This little beauty, hardly ever used: above 64, so it's
26+ * traditionally used for firewalling, not touched (even once!) by the
27+ * 2.0, 2.2 and 2.4 kernels!
28+ *
29+ * Comes with its own certificate of authenticity, valid anywhere in the
30+ * Free world!
31+ *
32+ * Rusty, 19.4.2000
33+ */
34+#define SO_IP_POOL 81
35+
36+typedef int ip_pool_t; /* pool index */
37+#define IP_POOL_NONE ((ip_pool_t)-1)
38+
39+struct ip_pool_request {
40+ int op;
41+ ip_pool_t index;
42+ u_int32_t addr;
43+ u_int32_t addr2;
44+};
45+
46+/* NOTE: I deliberately break the first cut ippool utility. Nobody uses it. */
47+
48+#define IP_POOL_BAD001 0x00000010
49+
50+#define IP_POOL_FLUSH 0x00000011 /* req.index, no arguments */
51+#define IP_POOL_INIT 0x00000012 /* from addr to addr2 incl. */
52+#define IP_POOL_DESTROY 0x00000013 /* req.index, no arguments */
53+#define IP_POOL_ADD_ADDR 0x00000014 /* add addr to pool */
54+#define IP_POOL_DEL_ADDR 0x00000015 /* del addr from pool */
55+#define IP_POOL_HIGH_NR 0x00000016 /* result in req.index */
56+#define IP_POOL_LOOKUP 0x00000017 /* result in addr and addr2 */
57+#define IP_POOL_USAGE 0x00000018 /* result in addr */
58+#define IP_POOL_TEST_ADDR 0x00000019 /* result (0/1) returned */
59+
60+#ifdef __KERNEL__
61+
62+/* NOTE: ip_pool_match() and ip_pool_mod() expect ADDR to be host byte order */
63+extern int ip_pool_match(ip_pool_t pool, u_int32_t addr);
64+extern int ip_pool_mod(ip_pool_t pool, u_int32_t addr, int isdel);
65+
66+#endif
67+
68+#endif /*_IP_POOL_H*/
69diff -Nur linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_TTL.h linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_TTL.h
70--- linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_TTL.h 1970-01-01 01:00:00.000000000 +0100
71+++ linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_TTL.h 2004-04-19 10:08:28.000000000 +0200
72@@ -0,0 +1,21 @@
73+/* TTL modification module for IP tables
74+ * (C) 2000 by Harald Welte <laforge@gnumonks.org> */
75+
76+#ifndef _IPT_TTL_H
77+#define _IPT_TTL_H
78+
79+enum {
80+ IPT_TTL_SET = 0,
81+ IPT_TTL_INC,
82+ IPT_TTL_DEC
83+};
84+
85+#define IPT_TTL_MAXMODE IPT_TTL_DEC
86+
87+struct ipt_TTL_info {
88+ u_int8_t mode;
89+ u_int8_t ttl;
90+};
91+
92+
93+#endif
94diff -Nur linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_connlimit.h linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_connlimit.h
95--- linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_connlimit.h 1970-01-01 01:00:00.000000000 +0100
96+++ linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_connlimit.h 2004-04-19 10:08:29.000000000 +0200
97@@ -0,0 +1,12 @@
98+#ifndef _IPT_CONNLIMIT_H
99+#define _IPT_CONNLIMIT_H
100+
101+struct ipt_connlimit_data;
102+
103+struct ipt_connlimit_info {
104+ int limit;
105+ int inverse;
106+ u_int32_t mask;
107+ struct ipt_connlimit_data *data;
108+};
109+#endif /* _IPT_CONNLIMIT_H */
110diff -Nur linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_dstlimit.h linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_dstlimit.h
111--- linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_dstlimit.h 1970-01-01 01:00:00.000000000 +0100
112+++ linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_dstlimit.h 2004-04-19 10:08:30.000000000 +0200
113@@ -0,0 +1,39 @@
114+#ifndef _IPT_DSTLIMIT_H
115+#define _IPT_DSTLIMIT_H
116+
117+/* timings are in milliseconds. */
118+#define IPT_DSTLIMIT_SCALE 10000
119+/* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490
120+ seconds, or one every 59 hours. */
121+
122+/* details of this structure hidden by the implementation */
123+struct ipt_dstlimit_htable;
124+
125+#define IPT_DSTLIMIT_HASH_DIP 0x0001
126+#define IPT_DSTLIMIT_HASH_DPT 0x0002
127+#define IPT_DSTLIMIT_HASH_SIP 0x0004
128+
129+struct dstlimit_cfg {
130+ u_int32_t mode; /* bitmask of IPT_DSTLIMIT_HASH_* */
131+ u_int32_t avg; /* Average secs between packets * scale */
132+ u_int32_t burst; /* Period multiplier for upper limit. */
133+
134+ /* user specified */
135+ u_int32_t size; /* how many buckets */
136+ u_int32_t max; /* max number of entries */
137+ u_int32_t gc_interval; /* gc interval */
138+ u_int32_t expire; /* when do entries expire? */
139+};
140+
141+struct ipt_dstlimit_info {
142+ char name [IFNAMSIZ]; /* name */
143+ struct dstlimit_cfg cfg;
144+ struct ipt_dstlimit_htable *hinfo;
145+
146+ /* Used internally by the kernel */
147+ union {
148+ void *ptr;
149+ struct ipt_dstlimit_info *master;
150+ } u;
151+};
152+#endif /*_IPT_DSTLIMIT_H*/
153diff -Nur linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_fuzzy.h linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_fuzzy.h
154--- linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_fuzzy.h 1970-01-01 01:00:00.000000000 +0100
155+++ linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_fuzzy.h 2004-04-19 10:08:31.000000000 +0200
156@@ -0,0 +1,21 @@
157+#ifndef _IPT_FUZZY_H
158+#define _IPT_FUZZY_H
159+
160+#include <linux/param.h>
161+#include <linux/types.h>
162+
163+#define MAXFUZZYRATE 10000000
164+#define MINFUZZYRATE 3
165+
166+struct ipt_fuzzy_info {
167+ u_int32_t minimum_rate;
168+ u_int32_t maximum_rate;
169+ u_int32_t packets_total;
170+ u_int32_t bytes_total;
171+ u_int32_t previous_time;
172+ u_int32_t present_time;
173+ u_int32_t mean_rate;
174+ u_int8_t acceptance_rate;
175+};
176+
177+#endif /*_IPT_FUZZY_H*/
178diff -Nur linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_ipv4options.h linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_ipv4options.h
179--- linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_ipv4options.h 1970-01-01 01:00:00.000000000 +0100
180+++ linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_ipv4options.h 2004-04-19 10:08:32.000000000 +0200
181@@ -0,0 +1,21 @@
182+#ifndef __ipt_ipv4options_h_included__
183+#define __ipt_ipv4options_h_included__
184+
185+#define IPT_IPV4OPTION_MATCH_SSRR 0x01 /* For strict source routing */
186+#define IPT_IPV4OPTION_MATCH_LSRR 0x02 /* For loose source routing */
187+#define IPT_IPV4OPTION_DONT_MATCH_SRR 0x04 /* any source routing */
188+#define IPT_IPV4OPTION_MATCH_RR 0x08 /* For Record route */
189+#define IPT_IPV4OPTION_DONT_MATCH_RR 0x10
190+#define IPT_IPV4OPTION_MATCH_TIMESTAMP 0x20 /* For timestamp request */
191+#define IPT_IPV4OPTION_DONT_MATCH_TIMESTAMP 0x40
192+#define IPT_IPV4OPTION_MATCH_ROUTER_ALERT 0x80 /* For router-alert */
193+#define IPT_IPV4OPTION_DONT_MATCH_ROUTER_ALERT 0x100
194+#define IPT_IPV4OPTION_MATCH_ANY_OPT 0x200 /* match packet with any option */
195+#define IPT_IPV4OPTION_DONT_MATCH_ANY_OPT 0x400 /* match packet with no option */
196+
197+struct ipt_ipv4options_info {
198+ u_int16_t options;
199+};
200+
201+
202+#endif /* __ipt_ipv4options_h_included__ */
203diff -Nur linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_mport.h linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_mport.h
204--- linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_mport.h 1970-01-01 01:00:00.000000000 +0100
205+++ linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_mport.h 2004-04-19 10:08:33.000000000 +0200
206@@ -0,0 +1,24 @@
207+#ifndef _IPT_MPORT_H
208+#define _IPT_MPORT_H
209+#include <linux/netfilter_ipv4/ip_tables.h>
210+
211+#define IPT_MPORT_SOURCE (1<<0)
212+#define IPT_MPORT_DESTINATION (1<<1)
213+#define IPT_MPORT_EITHER (IPT_MPORT_SOURCE|IPT_MPORT_DESTINATION)
214+
215+#define IPT_MULTI_PORTS 15
216+
217+/* Must fit inside union ipt_matchinfo: 32 bytes */
218+/* every entry in ports[] except for the last one has one bit in pflags
219+ * associated with it. If this bit is set, the port is the first port of
220+ * a portrange, with the next entry being the last.
221+ * End of list is marked with pflags bit set and port=65535.
222+ * If 14 ports are used (last one does not have a pflag), the last port
223+ * is repeated to fill the last entry in ports[] */
224+struct ipt_mport
225+{
226+ u_int8_t flags:2; /* Type of comparison */
227+ u_int16_t pflags:14; /* Port flags */
228+ u_int16_t ports[IPT_MULTI_PORTS]; /* Ports */
229+};
230+#endif /*_IPT_MPORT_H*/
231diff -Nur linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_nth.h linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_nth.h
232--- linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_nth.h 1970-01-01 01:00:00.000000000 +0100
233+++ linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_nth.h 2004-04-19 10:08:34.000000000 +0200
234@@ -0,0 +1,19 @@
235+#ifndef _IPT_NTH_H
236+#define _IPT_NTH_H
237+
238+#include <linux/param.h>
239+#include <linux/types.h>
240+
241+#ifndef IPT_NTH_NUM_COUNTERS
242+#define IPT_NTH_NUM_COUNTERS 16
243+#endif
244+
245+struct ipt_nth_info {
246+ u_int8_t every;
247+ u_int8_t not;
248+ u_int8_t startat;
249+ u_int8_t counter;
250+ u_int8_t packet;
251+};
252+
253+#endif /*_IPT_NTH_H*/
254diff -Nur linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_osf.h linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_osf.h
255--- linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_osf.h 1970-01-01 01:00:00.000000000 +0100
256+++ linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_osf.h 2004-04-19 10:08:35.000000000 +0200
257@@ -0,0 +1,148 @@
258+/*
259+ * ipt_osf.h
260+ *
261+ * Copyright (c) 2003 Evgeniy Polyakov <johnpol@2ka.mipt.ru>
262+ *
263+ *
264+ * This program is free software; you can redistribute it and/or modify
265+ * it under the terms of the GNU General Public License as published by
266+ * the Free Software Foundation; either version 2 of the License, or
267+ * (at your option) any later version.
268+ *
269+ * This program is distributed in the hope that it will be useful,
270+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
271+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
272+ * GNU General Public License for more details.
273+ *
274+ * You should have received a copy of the GNU General Public License
275+ * along with this program; if not, write to the Free Software
276+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
277+ */
278+
279+#ifndef _IPT_OSF_H
280+#define _IPT_OSF_H
281+
282+#define MAXGENRELEN 32
283+#define MAXDETLEN 64
284+
285+#define IPT_OSF_GENRE 1
286+#define IPT_OSF_SMART 2
287+#define IPT_OSF_LOG 4
288+#define IPT_OSF_NETLINK 8
289+
290+#define IPT_OSF_LOGLEVEL_ALL 0
291+#define IPT_OSF_LOGLEVEL_FIRST 1
292+
293+#include <linux/list.h>
294+
295+#ifndef __KERNEL__
296+#include <netinet/ip.h>
297+#include <netinet/tcp.h>
298+
299+struct list_head
300+{
301+ struct list_head *prev, *next;
302+};
303+#endif
304+
305+struct ipt_osf_info
306+{
307+ char genre[MAXGENRELEN];
308+ int len;
309+ unsigned long flags;
310+ int loglevel;
311+ int invert; /* UNSUPPORTED */
312+};
313+
314+struct osf_wc
315+{
316+ char wc;
317+ unsigned long val;
318+};
319+
320+/* This struct represents IANA options
321+ * http://www.iana.org/assignments/tcp-parameters
322+ */
323+struct osf_opt
324+{
325+ unsigned char kind;
326+ unsigned char length;
327+ struct osf_wc wc;
328+};
329+
330+struct osf_finger
331+{
332+ struct list_head flist;
333+ struct osf_wc wss;
334+ unsigned char ttl;
335+ unsigned char df;
336+ unsigned long ss;
337+ unsigned char genre[MAXGENRELEN];
338+ unsigned char version[MAXGENRELEN], subtype[MAXGENRELEN];
339+
340+ /* Not needed, but for consistency with original table from Michal Zalewski */
341+ unsigned char details[MAXDETLEN];
342+
343+ int opt_num;
344+ struct osf_opt opt[MAX_IPOPTLEN]; /* In case it is all NOP or EOL */
345+
346+};
347+
348+struct ipt_osf_nlmsg
349+{
350+ struct osf_finger f;
351+ struct iphdr ip;
352+ struct tcphdr tcp;
353+};
354+
355+#ifdef __KERNEL__
356+
357+/* Defines for IANA option kinds */
358+
359+#define OSFOPT_EOL 0 /* End of options */
360+#define OSFOPT_NOP 1 /* NOP */
361+#define OSFOPT_MSS 2 /* Maximum segment size */
362+#define OSFOPT_WSO 3 /* Window scale option */
363+#define OSFOPT_SACKP 4 /* SACK permitted */
364+#define OSFOPT_SACK 5 /* SACK */
365+#define OSFOPT_ECHO 6
366+#define OSFOPT_ECHOREPLY 7
367+#define OSFOPT_TS 8 /* Timestamp option */
368+#define OSFOPT_POCP 9 /* Partial Order Connection Permitted */
369+#define OSFOPT_POSP 10 /* Partial Order Service Profile */
370+/* Others are not used in current OSF */
371+
372+static struct osf_opt IANA_opts[] =
373+{
374+ {0, 1,},
375+ {1, 1,},
376+ {2, 4,},
377+ {3, 3,},
378+ {4, 2,},
379+ {5, 1 ,}, /* SACK length is not defined */
380+ {6, 6,},
381+ {7, 6,},
382+ {8, 10,},
383+ {9, 2,},
384+ {10, 3,},
385+ {11, 1,}, /* CC: Suppose 1 */
386+ {12, 1,}, /* the same */
387+ {13, 1,}, /* and here too */
388+ {14, 3,},
389+ {15, 1,}, /* TCP Alternate Checksum Data. Length is not defined */
390+ {16, 1,},
391+ {17, 1,},
392+ {18, 3,},
393+ {19, 18,},
394+ {20, 1,},
395+ {21, 1,},
396+ {22, 1,},
397+ {23, 1,},
398+ {24, 1,},
399+ {25, 1,},
400+ {26, 1,},
401+};
402+
403+#endif /* __KERNEL__ */
404+
405+#endif /* _IPT_OSF_H */
406diff -Nur linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_pool.h linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_pool.h
407--- linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_pool.h 1970-01-01 01:00:00.000000000 +0100
408+++ linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_pool.h 2004-04-19 10:08:36.000000000 +0200
409@@ -0,0 +1,25 @@
410+#ifndef _IPT_POOL_H
411+#define _IPT_POOL_H
412+
413+#include <linux/netfilter_ipv4/ip_pool.h>
414+
415+#define IPT_POOL_INV_SRC 0x00000001
416+#define IPT_POOL_INV_DST 0x00000002
417+#define IPT_POOL_DEL_SRC 0x00000004
418+#define IPT_POOL_DEL_DST 0x00000008
419+#define IPT_POOL_INV_MOD_SRC 0x00000010
420+#define IPT_POOL_INV_MOD_DST 0x00000020
421+#define IPT_POOL_MOD_SRC_ACCEPT 0x00000040
422+#define IPT_POOL_MOD_DST_ACCEPT 0x00000080
423+#define IPT_POOL_MOD_SRC_DROP 0x00000100
424+#define IPT_POOL_MOD_DST_DROP 0x00000200
425+
426+/* match info */
427+struct ipt_pool_info
428+{
429+ ip_pool_t src;
430+ ip_pool_t dst;
431+ unsigned flags;
432+};
433+
434+#endif /*_IPT_POOL_H*/
435diff -Nur linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_psd.h linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_psd.h
436--- linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_psd.h 1970-01-01 01:00:00.000000000 +0100
437+++ linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_psd.h 2004-04-19 10:08:37.000000000 +0200
438@@ -0,0 +1,40 @@
439+#ifndef _IPT_PSD_H
440+#define _IPT_PSD_H
441+
442+#include <linux/param.h>
443+#include <linux/types.h>
444+
445+/*
446+ * High port numbers have a lower weight to reduce the frequency of false
447+ * positives, such as from passive mode FTP transfers.
448+ */
449+#define PORT_WEIGHT_PRIV 3
450+#define PORT_WEIGHT_HIGH 1
451+
452+/*
453+ * Port scan detection thresholds: at least COUNT ports need to be scanned
454+ * from the same source, with no longer than DELAY ticks between ports.
455+ */
456+#define SCAN_MIN_COUNT 7
457+#define SCAN_MAX_COUNT (SCAN_MIN_COUNT * PORT_WEIGHT_PRIV)
458+#define SCAN_WEIGHT_THRESHOLD SCAN_MAX_COUNT
459+#define SCAN_DELAY_THRESHOLD (HZ * 3)
460+
461+/*
462+ * Keep track of up to LIST_SIZE source addresses, using a hash table of
463+ * HASH_SIZE entries for faster lookups, but limiting hash collisions to
464+ * HASH_MAX source addresses per the same hash value.
465+ */
466+#define LIST_SIZE 0x100
467+#define HASH_LOG 9
468+#define HASH_SIZE (1 << HASH_LOG)
469+#define HASH_MAX 0x10
470+
471+struct ipt_psd_info {
472+ unsigned int weight_threshold;
473+ unsigned int delay_threshold;
474+ unsigned short lo_ports_weight;
475+ unsigned short hi_ports_weight;
476+};
477+
478+#endif /*_IPT_PSD_H*/
479diff -Nur linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_quota.h linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_quota.h
480--- linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_quota.h 1970-01-01 01:00:00.000000000 +0100
481+++ linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_quota.h 2004-04-19 10:08:37.000000000 +0200
482@@ -0,0 +1,11 @@
483+#ifndef _IPT_QUOTA_H
484+#define _IPT_QUOTA_H
485+
486+/* print debug info in both kernel/netfilter module & iptable library */
487+//#define DEBUG_IPT_QUOTA
488+
489+struct ipt_quota_info {
490+ u_int64_t quota;
491+};
492+
493+#endif /*_IPT_QUOTA_H*/
494diff -Nur linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_random.h linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_random.h
495--- linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_random.h 1970-01-01 01:00:00.000000000 +0100
496+++ linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_random.h 2004-04-19 10:08:38.000000000 +0200
497@@ -0,0 +1,11 @@
498+#ifndef _IPT_RAND_H
499+#define _IPT_RAND_H
500+
501+#include <linux/param.h>
502+#include <linux/types.h>
503+
504+struct ipt_rand_info {
505+ u_int8_t average;
506+};
507+
508+#endif /*_IPT_RAND_H*/
509diff -Nur linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_realm.h linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_realm.h
510--- linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_realm.h 1970-01-01 01:00:00.000000000 +0100
511+++ linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_realm.h 2004-04-19 10:08:39.000000000 +0200
512@@ -0,0 +1,9 @@
513+#ifndef _IPT_REALM_H
514+#define _IPT_REALM_H
515+
516+struct ipt_realm_info {
517+ u_int32_t id;
518+ u_int32_t mask;
519+ u_int8_t invert;
520+};
521+#endif /*_IPT_REALM_H*/
522diff -Nur linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_sctp.h linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_sctp.h
523--- linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_sctp.h 1970-01-01 01:00:00.000000000 +0100
524+++ linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_sctp.h 2004-04-19 10:08:40.000000000 +0200
525@@ -0,0 +1,107 @@
526+#ifndef _IPT_SCTP_H_
527+#define _IPT_SCTP_H_
528+
529+#define IPT_SCTP_SRC_PORTS 0x01
530+#define IPT_SCTP_DEST_PORTS 0x02
531+#define IPT_SCTP_CHUNK_TYPES 0x04
532+
533+#define IPT_SCTP_VALID_FLAGS 0x07
534+
535+#define ELEMCOUNT(x) (sizeof(x)/sizeof(x[0]))
536+
537+
538+struct ipt_sctp_flag_info {
539+ u_int8_t chunktype;
540+ u_int8_t flag;
541+ u_int8_t flag_mask;
542+};
543+
544+#define IPT_NUM_SCTP_FLAGS 4
545+
546+struct ipt_sctp_info {
547+ u_int16_t dpts[2]; /* Min, Max */
548+ u_int16_t spts[2]; /* Min, Max */
549+
550+ u_int32_t chunkmap[256 / sizeof (u_int32_t)]; /* Bit mask of chunks to be matched according to RFC 2960 */
551+
552+#define SCTP_CHUNK_MATCH_ANY 0x01 /* Match if any of the chunk types are present */
553+#define SCTP_CHUNK_MATCH_ALL 0x02 /* Match if all of the chunk types are present */
554+#define SCTP_CHUNK_MATCH_ONLY 0x04 /* Match if these are the only chunk types present */
555+
556+ u_int32_t chunk_match_type;
557+ struct ipt_sctp_flag_info flag_info[IPT_NUM_SCTP_FLAGS];
558+ int flag_count;
559+
560+ u_int32_t flags;
561+ u_int32_t invflags;
562+};
563+
564+#define bytes(type) (sizeof(type) * 8)
565+
566+#define SCTP_CHUNKMAP_SET(chunkmap, type) \
567+ do { \
568+ chunkmap[type / bytes(u_int32_t)] |= \
569+ 1 << (type % bytes(u_int32_t)); \
570+ } while (0)
571+
572+#define SCTP_CHUNKMAP_CLEAR(chunkmap, type) \
573+ do { \
574+ chunkmap[type / bytes(u_int32_t)] &= \
575+ ~(1 << (type % bytes(u_int32_t))); \
576+ } while (0)
577+
578+#define SCTP_CHUNKMAP_IS_SET(chunkmap, type) \
579+({ \
580+ (chunkmap[type / bytes (u_int32_t)] & \
581+ (1 << (type % bytes (u_int32_t)))) ? 1: 0; \
582+})
583+
584+#define SCTP_CHUNKMAP_RESET(chunkmap) \
585+ do { \
586+ int i; \
587+ for (i = 0; i < ELEMCOUNT(chunkmap); i++) \
588+ chunkmap[i] = 0; \
589+ } while (0)
590+
591+#define SCTP_CHUNKMAP_SET_ALL(chunkmap) \
592+ do { \
593+ int i; \
594+ for (i = 0; i < ELEMCOUNT(chunkmap); i++) \
595+ chunkmap[i] = ~0; \
596+ } while (0)
597+
598+#define SCTP_CHUNKMAP_COPY(destmap, srcmap) \
599+ do { \
600+ int i; \
601+ for (i = 0; i < ELEMCOUNT(chunkmap); i++) \
602+ destmap[i] = srcmap[i]; \
603+ } while (0)
604+
605+#define SCTP_CHUNKMAP_IS_CLEAR(chunkmap) \
606+({ \
607+ int i; \
608+ int flag = 1; \
609+ for (i = 0; i < ELEMCOUNT(chunkmap); i++) { \
610+ if (chunkmap[i]) { \
611+ flag = 0; \
612+ break; \
613+ } \
614+ } \
615+ flag; \
616+})
617+
618+#define SCTP_CHUNKMAP_IS_ALL_SET(chunkmap) \
619+({ \
620+ int i; \
621+ int flag = 1; \
622+ for (i = 0; i < ELEMCOUNT(chunkmap); i++) { \
623+ if (chunkmap[i] != ~0) { \
624+ flag = 0; \
625+ break; \
626+ } \
627+ } \
628+ flag; \
629+})
630+
631+#endif /* _IPT_SCTP_H_ */
632+
633diff -Nur linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_time.h linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_time.h
634--- linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_time.h 1970-01-01 01:00:00.000000000 +0100
635+++ linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_time.h 2004-04-19 10:08:41.000000000 +0200
636@@ -0,0 +1,13 @@
637+#ifndef __ipt_time_h_included__
638+#define __ipt_time_h_included__
639+
640+
641+struct ipt_time_info {
642+ u_int8_t days_match; /* 1 bit per day. -SMTWTFS */
643+ u_int16_t time_start; /* 0 < time_start < 23*60+59 = 1439 */
644+ u_int16_t time_stop; /* 0:0 < time_stat < 23:59 */
645+ u_int8_t kerneltime; /* ignore skb time (and use kerneltime) or not. */
646+};
647+
648+
649+#endif /* __ipt_time_h_included__ */
650diff -Nur linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_u32.h linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_u32.h
651--- linux-2.6.6-rc1.org/include/linux/netfilter_ipv4/ipt_u32.h 1970-01-01 01:00:00.000000000 +0100
652+++ linux-2.6.6-rc1/include/linux/netfilter_ipv4/ipt_u32.h 2004-04-19 10:08:46.000000000 +0200
653@@ -0,0 +1,40 @@
654+#ifndef _IPT_U32_H
655+#define _IPT_U32_H
656+#include <linux/netfilter_ipv4/ip_tables.h>
657+
658+enum ipt_u32_ops
659+{
660+ IPT_U32_AND,
661+ IPT_U32_LEFTSH,
662+ IPT_U32_RIGHTSH,
663+ IPT_U32_AT
664+};
665+
666+struct ipt_u32_location_element
667+{
668+ u_int32_t number;
669+ u_int8_t nextop;
670+};
671+struct ipt_u32_value_element
672+{
673+ u_int32_t min;
674+ u_int32_t max;
675+};
676+/* *** any way to allow for an arbitrary number of elements?
677+ for now I settle for a limit of 10 of each */
678+#define U32MAXSIZE 10
679+struct ipt_u32_test
680+{
681+ u_int8_t nnums;
682+ struct ipt_u32_location_element location[U32MAXSIZE+1];
683+ u_int8_t nvalues;
684+ struct ipt_u32_value_element value[U32MAXSIZE+1];
685+};
686+
687+struct ipt_u32
688+{
689+ u_int8_t ntests;
690+ struct ipt_u32_test tests[U32MAXSIZE+1];
691+};
692+
693+#endif /*_IPT_U32_H*/
694diff -Nur linux-2.6.6-rc1.org/include/linux/netfilter_ipv6/ip6t_HL.h linux-2.6.6-rc1/include/linux/netfilter_ipv6/ip6t_HL.h
695--- linux-2.6.6-rc1.org/include/linux/netfilter_ipv6/ip6t_HL.h 1970-01-01 01:00:00.000000000 +0100
696+++ linux-2.6.6-rc1/include/linux/netfilter_ipv6/ip6t_HL.h 2004-04-19 10:08:26.000000000 +0200
697@@ -0,0 +1,22 @@
698+/* Hop Limit modification module for ip6tables
699+ * Maciej Soltysiak <solt@dns.toxicfilms.tv>
700+ * Based on HW's TTL module */
701+
702+#ifndef _IP6T_HL_H
703+#define _IP6T_HL_H
704+
705+enum {
706+ IP6T_HL_SET = 0,
707+ IP6T_HL_INC,
708+ IP6T_HL_DEC
709+};
710+
711+#define IP6T_HL_MAXMODE IP6T_HL_DEC
712+
713+struct ip6t_HL_info {
714+ u_int8_t mode;
715+ u_int8_t hop_limit;
716+};
717+
718+
719+#endif
720diff -Nur linux-2.6.6-rc1.org/include/linux/netfilter_ipv6/ip6t_REJECT.h linux-2.6.6-rc1/include/linux/netfilter_ipv6/ip6t_REJECT.h
721--- linux-2.6.6-rc1.org/include/linux/netfilter_ipv6/ip6t_REJECT.h 2004-04-15 03:33:49.000000000 +0200
722+++ linux-2.6.6-rc1/include/linux/netfilter_ipv6/ip6t_REJECT.h 2004-04-19 10:08:28.000000000 +0200
723@@ -2,15 +2,17 @@
724 #define _IP6T_REJECT_H
725
726 enum ip6t_reject_with {
727- IP6T_ICMP_NET_UNREACHABLE,
728- IP6T_ICMP_HOST_UNREACHABLE,
729- IP6T_ICMP_PROT_UNREACHABLE,
730- IP6T_ICMP_PORT_UNREACHABLE,
731- IP6T_ICMP_ECHOREPLY
732+ IP6T_ICMP6_NO_ROUTE,
733+ IP6T_ICMP6_ADM_PROHIBITED,
734+ IP6T_ICMP6_NOT_NEIGHBOUR,
735+ IP6T_ICMP6_ADDR_UNREACH,
736+ IP6T_ICMP6_PORT_UNREACH,
737+ IP6T_ICMP6_ECHOREPLY,
738+ IP6T_TCP_RESET
739 };
740
741 struct ip6t_reject_info {
742 enum ip6t_reject_with with; /* reject type */
743 };
744
745-#endif /*_IPT_REJECT_H*/
746+#endif /*_IP6T_REJECT_H*/
747diff -Nur linux-2.6.6-rc1.org/include/linux/netfilter_ipv6/ip6t_fuzzy.h linux-2.6.6-rc1/include/linux/netfilter_ipv6/ip6t_fuzzy.h
748--- linux-2.6.6-rc1.org/include/linux/netfilter_ipv6/ip6t_fuzzy.h 1970-01-01 01:00:00.000000000 +0100
749+++ linux-2.6.6-rc1/include/linux/netfilter_ipv6/ip6t_fuzzy.h 2004-04-19 10:08:31.000000000 +0200
750@@ -0,0 +1,21 @@
751+#ifndef _IP6T_FUZZY_H
752+#define _IP6T_FUZZY_H
753+
754+#include <linux/param.h>
755+#include <linux/types.h>
756+
757+#define MAXFUZZYRATE 10000000
758+#define MINFUZZYRATE 3
759+
760+struct ip6t_fuzzy_info {
761+ u_int32_t minimum_rate;
762+ u_int32_t maximum_rate;
763+ u_int32_t packets_total;
764+ u_int32_t bytes_total;
765+ u_int32_t previous_time;
766+ u_int32_t present_time;
767+ u_int32_t mean_rate;
768+ u_int8_t acceptance_rate;
769+};
770+
771+#endif /*_IP6T_FUZZY_H*/
772diff -Nur linux-2.6.6-rc1.org/include/linux/netfilter_ipv6/ip6t_nth.h linux-2.6.6-rc1/include/linux/netfilter_ipv6/ip6t_nth.h
773--- linux-2.6.6-rc1.org/include/linux/netfilter_ipv6/ip6t_nth.h 1970-01-01 01:00:00.000000000 +0100
774+++ linux-2.6.6-rc1/include/linux/netfilter_ipv6/ip6t_nth.h 2004-04-19 10:08:34.000000000 +0200
775@@ -0,0 +1,19 @@
776+#ifndef _IP6T_NTH_H
777+#define _IP6T_NTH_H
778+
779+#include <linux/param.h>
780+#include <linux/types.h>
781+
782+#ifndef IP6T_NTH_NUM_COUNTERS
783+#define IP6T_NTH_NUM_COUNTERS 16
784+#endif
785+
786+struct ip6t_nth_info {
787+ u_int8_t every;
788+ u_int8_t not;
789+ u_int8_t startat;
790+ u_int8_t counter;
791+ u_int8_t packet;
792+};
793+
794+#endif /*_IP6T_NTH_H*/
795diff -Nur linux-2.6.6-rc1.org/include/linux/netfilter_ipv6/ip6t_random.h linux-2.6.6-rc1/include/linux/netfilter_ipv6/ip6t_random.h
796--- linux-2.6.6-rc1.org/include/linux/netfilter_ipv6/ip6t_random.h 1970-01-01 01:00:00.000000000 +0100
797+++ linux-2.6.6-rc1/include/linux/netfilter_ipv6/ip6t_random.h 2004-04-19 10:08:38.000000000 +0200
798@@ -0,0 +1,11 @@
799+#ifndef _IP6T_RAND_H
800+#define _IP6T_RAND_H
801+
802+#include <linux/param.h>
803+#include <linux/types.h>
804+
805+struct ip6t_rand_info {
806+ u_int8_t average;
807+};
808+
809+#endif /*_IP6T_RAND_H*/
810diff -Nur linux-2.6.6-rc1.org/include/linux/skbuff.h linux-2.6.6-rc1/include/linux/skbuff.h
811--- linux-2.6.6-rc1.org/include/linux/skbuff.h 2004-04-15 03:35:04.000000000 +0200
812+++ linux-2.6.6-rc1/include/linux/skbuff.h 2004-04-19 10:08:24.000000000 +0200
813@@ -1201,6 +1201,14 @@
814 if (nfct)
815 atomic_inc(&nfct->master->use);
816 }
817+static inline void nf_reset(struct sk_buff *skb)
818+{
819+ nf_conntrack_put(skb->nfct);
820+ skb->nfct = NULL;
821+#ifdef CONFIG_NETFILTER_DEBUG
822+ skb->nf_debug = 0;
823+#endif
824+}
825
826 #ifdef CONFIG_BRIDGE_NETFILTER
827 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
828@@ -1213,9 +1221,10 @@
829 if (nf_bridge)
830 atomic_inc(&nf_bridge->use);
831 }
832-#endif
833-
834-#endif
835+#endif /* CONFIG_BRIDGE_NETFILTER */
836+#else /* CONFIG_NETFILTER */
837+static inline void nf_reset(struct sk_buff *skb) {}
838+#endif /* CONFIG_NETFILTER */
839
840 #endif /* __KERNEL__ */
841 #endif /* _LINUX_SKBUFF_H */
842diff -Nur linux-2.6.6-rc1.org/net/ipv4/ip_gre.c linux-2.6.6-rc1/net/ipv4/ip_gre.c
843--- linux-2.6.6-rc1.org/net/ipv4/ip_gre.c 2004-04-15 03:35:20.000000000 +0200
844+++ linux-2.6.6-rc1/net/ipv4/ip_gre.c 2004-04-19 10:08:24.000000000 +0200
845@@ -643,13 +643,7 @@
846 skb->dev = tunnel->dev;
847 dst_release(skb->dst);
848 skb->dst = NULL;
849-#ifdef CONFIG_NETFILTER
850- nf_conntrack_put(skb->nfct);
851- skb->nfct = NULL;
852-#ifdef CONFIG_NETFILTER_DEBUG
853- skb->nf_debug = 0;
854-#endif
855-#endif
856+ nf_reset(skb);
857 ipgre_ecn_decapsulate(iph, skb);
858 netif_rx(skb);
859 read_unlock(&ipgre_lock);
860@@ -877,13 +871,7 @@
861 }
862 }
863
864-#ifdef CONFIG_NETFILTER
865- nf_conntrack_put(skb->nfct);
866- skb->nfct = NULL;
867-#ifdef CONFIG_NETFILTER_DEBUG
868- skb->nf_debug = 0;
869-#endif
870-#endif
871+ nf_reset(skb);
872
873 IPTUNNEL_XMIT();
874 tunnel->recursion--;
875diff -Nur linux-2.6.6-rc1.org/net/ipv4/ip_input.c linux-2.6.6-rc1/net/ipv4/ip_input.c
876--- linux-2.6.6-rc1.org/net/ipv4/ip_input.c 2004-04-15 03:33:53.000000000 +0200
877+++ linux-2.6.6-rc1/net/ipv4/ip_input.c 2004-04-19 10:08:24.000000000 +0200
878@@ -202,17 +202,13 @@
879
880 #ifdef CONFIG_NETFILTER_DEBUG
881 nf_debug_ip_local_deliver(skb);
882- skb->nf_debug = 0;
883 #endif /*CONFIG_NETFILTER_DEBUG*/
884
885 __skb_pull(skb, ihl);
886
887-#ifdef CONFIG_NETFILTER
888 /* Free reference early: we don't need it any more, and it may
889 hold ip_conntrack module loaded indefinitely. */
890- nf_conntrack_put(skb->nfct);
891- skb->nfct = NULL;
892-#endif /*CONFIG_NETFILTER*/
893+ nf_reset(skb);
894
895 /* Point into the IP datagram, just past the header. */
896 skb->h.raw = skb->data;
897diff -Nur linux-2.6.6-rc1.org/net/ipv4/ipip.c linux-2.6.6-rc1/net/ipv4/ipip.c
898--- linux-2.6.6-rc1.org/net/ipv4/ipip.c 2004-04-15 03:36:03.000000000 +0200
899+++ linux-2.6.6-rc1/net/ipv4/ipip.c 2004-04-19 10:08:24.000000000 +0200
900@@ -496,13 +496,7 @@
901 skb->dev = tunnel->dev;
902 dst_release(skb->dst);
903 skb->dst = NULL;
904-#ifdef CONFIG_NETFILTER
905- nf_conntrack_put(skb->nfct);
906- skb->nfct = NULL;
907-#ifdef CONFIG_NETFILTER_DEBUG
908- skb->nf_debug = 0;
909-#endif
910-#endif
911+ nf_reset(skb);
912 ipip_ecn_decapsulate(iph, skb);
913 netif_rx(skb);
914 read_unlock(&ipip_lock);
915@@ -647,13 +641,7 @@
916 if ((iph->ttl = tiph->ttl) == 0)
917 iph->ttl = old_iph->ttl;
918
919-#ifdef CONFIG_NETFILTER
920- nf_conntrack_put(skb->nfct);
921- skb->nfct = NULL;
922-#ifdef CONFIG_NETFILTER_DEBUG
923- skb->nf_debug = 0;
924-#endif
925-#endif
926+ nf_reset(skb);
927
928 IPTUNNEL_XMIT();
929 tunnel->recursion--;
930diff -Nur linux-2.6.6-rc1.org/net/ipv4/netfilter/Kconfig linux-2.6.6-rc1/net/ipv4/netfilter/Kconfig
931--- linux-2.6.6-rc1.org/net/ipv4/netfilter/Kconfig 2004-04-19 09:59:32.000000000 +0200
932+++ linux-2.6.6-rc1/net/ipv4/netfilter/Kconfig 2004-04-19 10:08:46.000000000 +0200
933@@ -603,5 +603,94 @@
934 <file:Documentation/modules.txt>. If unsure, say `N'.
935 help
936
937+config IP_NF_TARGET_IPV4OPTSSTRIP
938+ tristate 'IPV4OPTSSTRIP target support'
939+ depends on IP_NF_MANGLE
940+ help
941+
942+config IP_NF_TARGET_TTL
943+ tristate 'TTL target support'
944+ depends on IP_NF_MANGLE
945+ help
946+
947+config IP_NF_MATCH_CONNLIMIT
948+ tristate 'Connections/IP limit match support'
949+ depends on IP_NF_IPTABLES
950+ help
951+
952+config IP_NF_MATCH_DSTLIMIT
953+ tristate 'dstlimit match support'
954+ depends on IP_NF_IPTABLES
955+ help
956+
957+config IP_NF_MATCH_FUZZY
958+ tristate 'fuzzy match support'
959+ depends on IP_NF_IPTABLES
960+ help
961+
962+config IP_NF_MATCH_IPV4OPTIONS
963+ tristate 'IPV4OPTIONS match support'
964+ depends on IP_NF_IPTABLES
965+ help
966+
967+config IP_NF_MATCH_MPORT
968+ tristate 'Multiple port with ranges match support'
969+ depends on IP_NF_IPTABLES
970+ help
971+
972+config IP_NF_MATCH_NTH
973+ tristate 'Nth match support'
974+ depends on IP_NF_IPTABLES
975+ help
976+
977+config IP_NF_MATCH_OSF
978+ tristate 'OSF match support'
979+ depends on IP_NF_IPTABLES
980+ help
981+
982+config IP_POOL_STATISTICS
983+ bool 'enable statistics on pool usage'
984+ depends on IP_NF_POOL!=n
985+
986+config IP_NF_POOL
987+ tristate 'IP address pool support'
988+ depends on IP_NF_IPTABLES
989+ help
990+
991+config IP_NF_MATCH_PSD
992+ tristate 'psd match support'
993+ depends on IP_NF_IPTABLES
994+ help
995+
996+config IP_NF_MATCH_QUOTA
997+ tristate 'quota match support'
998+ depends on IP_NF_IPTABLES
999+ help
1000+
1001+config IP_NF_MATCH_RANDOM
1002+ tristate 'random match support'
1003+ depends on IP_NF_IPTABLES
1004+ help
1005+
1006+config IP_NF_MATCH_REALM
1007+ tristate 'realm match support'
1008+ depends on IP_NF_IPTABLES && NET_CLS_ROUTE
1009+ help
1010+
1011+config IP_NF_MATCH_SCTP
1012+ tristate 'SCTP protocol match support'
1013+ depends on IP_NF_IPTABLES
1014+ help
1015+
1016+config IP_NF_MATCH_TIME
1017+ tristate 'TIME match support'
1018+ depends on IP_NF_IPTABLES
1019+ help
1020+
1021+config IP_NF_MATCH_U32
1022+ tristate 'U32 match support'
1023+ depends on IP_NF_IPTABLES
1024+ help
1025+
1026 endmenu
1027
1028diff -Nur linux-2.6.6-rc1.org/net/ipv4/netfilter/Makefile linux-2.6.6-rc1/net/ipv4/netfilter/Makefile
1029--- linux-2.6.6-rc1.org/net/ipv4/netfilter/Makefile 2004-04-19 09:59:32.000000000 +0200
1030+++ linux-2.6.6-rc1/net/ipv4/netfilter/Makefile 2004-04-19 10:08:46.000000000 +0200
1031@@ -43,15 +43,39 @@
1032 # matches
1033 obj-$(CONFIG_IP_NF_MATCH_HELPER) += ipt_helper.o
1034 obj-$(CONFIG_IP_NF_MATCH_LIMIT) += ipt_limit.o
1035+obj-$(CONFIG_IP_NF_MATCH_SCTP) += ipt_sctp.o
1036+obj-$(CONFIG_IP_NF_MATCH_QUOTA) += ipt_quota.o
1037+obj-$(CONFIG_IP_NF_MATCH_DSTLIMIT) += ipt_dstlimit.o
1038 obj-$(CONFIG_IP_NF_MATCH_MARK) += ipt_mark.o
1039+obj-$(CONFIG_IP_NF_POOL) += ipt_pool.o ipt_POOL.o ip_pool.o
1040 obj-$(CONFIG_IP_NF_MATCH_MAC) += ipt_mac.o
1041 obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o
1042
1043 obj-$(CONFIG_IP_NF_MATCH_PKTTYPE) += ipt_pkttype.o
1044 obj-$(CONFIG_IP_NF_MATCH_MULTIPORT) += ipt_multiport.o
1045+
1046+obj-$(CONFIG_IP_NF_MATCH_MPORT) += ipt_mport.o
1047+
1048 obj-$(CONFIG_IP_NF_MATCH_OWNER) += ipt_owner.o
1049 obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o
1050
1051+obj-$(CONFIG_IP_NF_MATCH_TIME) += ipt_time.o
1052+
1053+
1054+obj-$(CONFIG_IP_NF_MATCH_RANDOM) += ipt_random.o
1055+
1056+obj-$(CONFIG_IP_NF_MATCH_PSD) += ipt_psd.o
1057+
1058+obj-$(CONFIG_IP_NF_MATCH_OSF) += ipt_osf.o
1059+
1060+
1061+obj-$(CONFIG_IP_NF_MATCH_NTH) += ipt_nth.o
1062+
1063+obj-$(CONFIG_IP_NF_MATCH_IPV4OPTIONS) += ipt_ipv4options.o
1064+
1065+
1066+obj-$(CONFIG_IP_NF_MATCH_FUZZY) += ipt_fuzzy.o
1067+
1068 obj-$(CONFIG_IP_NF_MATCH_RECENT) += ipt_recent.o
1069
1070 obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
1071@@ -60,10 +84,15 @@
1072
1073 obj-$(CONFIG_IP_NF_MATCH_LENGTH) += ipt_length.o
1074
1075+obj-$(CONFIG_IP_NF_MATCH_U32) += ipt_u32.o
1076+
1077+
1078 obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
1079 obj-$(CONFIG_IP_NF_MATCH_STATE) += ipt_state.o
1080+obj-$(CONFIG_IP_NF_MATCH_CONNLIMIT) += ipt_connlimit.o
1081 obj-$(CONFIG_IP_NF_MATCH_CONNTRACK) += ipt_conntrack.o
1082 obj-$(CONFIG_IP_NF_MATCH_TCPMSS) += ipt_tcpmss.o
1083+obj-$(CONFIG_IP_NF_MATCH_REALM) += ipt_realm.o
1084
1085 obj-$(CONFIG_IP_NF_MATCH_PHYSDEV) += ipt_physdev.o
1086
1087@@ -80,6 +109,8 @@
1088 obj-$(CONFIG_IP_NF_TARGET_CLASSIFY) += ipt_CLASSIFY.o
1089 obj-$(CONFIG_IP_NF_NAT_SNMP_BASIC) += ip_nat_snmp_basic.o
1090 obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
1091+obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
1092+obj-$(CONFIG_IP_NF_TARGET_IPV4OPTSSTRIP) += ipt_IPV4OPTSSTRIP.o
1093 obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
1094 obj-$(CONFIG_IP_NF_TARGET_TCPMSS) += ipt_TCPMSS.o
1095 obj-$(CONFIG_IP_NF_TARGET_NOTRACK) += ipt_NOTRACK.o
1096diff -Nur linux-2.6.6-rc1.org/net/ipv4/netfilter/ip_conntrack_core.c linux-2.6.6-rc1/net/ipv4/netfilter/ip_conntrack_core.c
1097--- linux-2.6.6-rc1.org/net/ipv4/netfilter/ip_conntrack_core.c 2004-04-19 09:59:32.000000000 +0200
1098+++ linux-2.6.6-rc1/net/ipv4/netfilter/ip_conntrack_core.c 2004-04-19 10:08:22.000000000 +0200
1099@@ -692,42 +692,50 @@
1100 struct ip_conntrack_expect *, tuple);
1101 READ_UNLOCK(&ip_conntrack_expect_tuple_lock);
1102
1103- /* If master is not in hash table yet (ie. packet hasn't left
1104- this machine yet), how can other end know about expected?
1105- Hence these are not the droids you are looking for (if
1106- master ct never got confirmed, we'd hold a reference to it
1107- and weird things would happen to future packets). */
1108- if (expected && !is_confirmed(expected->expectant))
1109- expected = NULL;
1110-
1111- /* Look up the conntrack helper for master connections only */
1112- if (!expected)
1113- conntrack->helper = ip_ct_find_helper(&repl_tuple);
1114-
1115- /* If the expectation is dying, then this is a loser. */
1116- if (expected
1117- && expected->expectant->helper->timeout
1118- && ! del_timer(&expected->timeout))
1119- expected = NULL;
1120-
1121 if (expected) {
1122- DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n",
1123- conntrack, expected);
1124- /* Welcome, Mr. Bond. We've been expecting you... */
1125- IP_NF_ASSERT(master_ct(conntrack));
1126- __set_bit(IPS_EXPECTED_BIT, &conntrack->status);
1127- conntrack->master = expected;
1128- expected->sibling = conntrack;
1129- LIST_DELETE(&ip_conntrack_expect_list, expected);
1130- expected->expectant->expecting--;
1131- nf_conntrack_get(&master_ct(conntrack)->infos[0]);
1132- }
1133- atomic_inc(&ip_conntrack_count);
1134+ /* If master is not in hash table yet (ie. packet hasn't left
1135+ this machine yet), how can other end know about expected?
1136+ Hence these are not the droids you are looking for (if
1137+ master ct never got confirmed, we'd hold a reference to it
1138+ and weird things would happen to future packets). */
1139+ if (!is_confirmed(expected->expectant)) {
1140+
1141+ conntrack->helper = ip_ct_find_helper(&repl_tuple);
1142+ goto end;
1143+ }
1144+
1145+ /* Expectation is dying... */
1146+ if (expected->expectant->helper->timeout
1147+ && ! del_timer(&expected->timeout)) {
1148+ goto end;
1149+ }
1150+
1151+ DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n",
1152+ conntrack, expected);
1153+ /* Welcome, Mr. Bond. We've been expecting you... */
1154+ IP_NF_ASSERT(master_ct(conntrack));
1155+ __set_bit(IPS_EXPECTED_BIT, &conntrack->status);
1156+ conntrack->master = expected;
1157+ expected->sibling = conntrack;
1158+ LIST_DELETE(&ip_conntrack_expect_list, expected);
1159+ expected->expectant->expecting--;
1160+ nf_conntrack_get(&master_ct(conntrack)->infos[0]);
1161+
1162+ /* this is a braindead... --pablo */
1163+ atomic_inc(&ip_conntrack_count);
1164+ WRITE_UNLOCK(&ip_conntrack_lock);
1165+
1166+ if (expected->expectfn)
1167+ expected->expectfn(conntrack);
1168+
1169+ goto ret;
1170+ } else
1171+ conntrack->helper = ip_ct_find_helper(&repl_tuple);
1172+
1173+end: atomic_inc(&ip_conntrack_count);
1174 WRITE_UNLOCK(&ip_conntrack_lock);
1175
1176- if (expected && expected->expectfn)
1177- expected->expectfn(conntrack);
1178- return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];
1179+ret: return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];
1180 }
1181
1182 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
1183diff -Nur linux-2.6.6-rc1.org/net/ipv4/netfilter/ip_conntrack_core.c.orig linux-2.6.6-rc1/net/ipv4/netfilter/ip_conntrack_core.c.orig
1184--- linux-2.6.6-rc1.org/net/ipv4/netfilter/ip_conntrack_core.c.orig 1970-01-01 01:00:00.000000000 +0100
1185+++ linux-2.6.6-rc1/net/ipv4/netfilter/ip_conntrack_core.c.orig 2004-04-19 10:02:28.000000000 +0200
1186@@ -0,0 +1,1467 @@
1187+/* Connection state tracking for netfilter. This is separated from,
1188+ but required by, the NAT layer; it can also be used by an iptables
1189+ extension. */
1190+
1191+/* (C) 1999-2001 Paul `Rusty' Russell
1192+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
1193+ *
1194+ * This program is free software; you can redistribute it and/or modify
1195+ * it under the terms of the GNU General Public License version 2 as
1196+ * published by the Free Software Foundation.
1197+ *
1198+ * 23 Apr 2001: Harald Welte <laforge@gnumonks.org>
1199+ * - new API and handling of conntrack/nat helpers
1200+ * - now capable of multiple expectations for one master
1201+ * 16 Jul 2002: Harald Welte <laforge@gnumonks.org>
1202+ * - add usage/reference counts to ip_conntrack_expect
1203+ * - export ip_conntrack[_expect]_{find_get,put} functions
1204+ * */
1205+
1206+#include <linux/config.h>
1207+#include <linux/types.h>
1208+#include <linux/icmp.h>
1209+#include <linux/ip.h>
1210+#include <linux/netfilter.h>
1211+#include <linux/netfilter_ipv4.h>
1212+#include <linux/module.h>
1213+#include <linux/skbuff.h>
1214+#include <linux/proc_fs.h>
1215+#include <linux/vmalloc.h>
1216+#include <net/checksum.h>
1217+#include <linux/stddef.h>
1218+#include <linux/sysctl.h>
1219+#include <linux/slab.h>
1220+#include <linux/random.h>
1221+#include <linux/jhash.h>
1222+/* For ERR_PTR(). Yeah, I know... --RR */
1223+#include <linux/fs.h>
1224+
1225+/* This rwlock protects the main hash table, protocol/helper/expected
1226+ registrations, conntrack timers*/
1227+#define ASSERT_READ_LOCK(x) MUST_BE_READ_LOCKED(&ip_conntrack_lock)
1228+#define ASSERT_WRITE_LOCK(x) MUST_BE_WRITE_LOCKED(&ip_conntrack_lock)
1229+
1230+#include <linux/netfilter_ipv4/ip_conntrack.h>
1231+#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
1232+#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
1233+#include <linux/netfilter_ipv4/ip_conntrack_core.h>
1234+#include <linux/netfilter_ipv4/listhelp.h>
1235+
1236+#define IP_CONNTRACK_VERSION "2.1"
1237+
1238+#if 0
1239+#define DEBUGP printk
1240+#else
1241+#define DEBUGP(format, args...)
1242+#endif
1243+
1244+DECLARE_RWLOCK(ip_conntrack_lock);
1245+DECLARE_RWLOCK(ip_conntrack_expect_tuple_lock);
1246+
1247+void (*ip_conntrack_destroyed)(struct ip_conntrack *conntrack) = NULL;
1248+LIST_HEAD(ip_conntrack_expect_list);
1249+LIST_HEAD(protocol_list);
1250+static LIST_HEAD(helpers);
1251+unsigned int ip_conntrack_htable_size = 0;
1252+int ip_conntrack_max;
1253+static atomic_t ip_conntrack_count = ATOMIC_INIT(0);
1254+struct list_head *ip_conntrack_hash;
1255+static kmem_cache_t *ip_conntrack_cachep;
1256+struct ip_conntrack ip_conntrack_untracked;
1257+
1258+extern struct ip_conntrack_protocol ip_conntrack_generic_protocol;
1259+
1260+static inline int proto_cmpfn(const struct ip_conntrack_protocol *curr,
1261+ u_int8_t protocol)
1262+{
1263+ return protocol == curr->proto;
1264+}
1265+
1266+struct ip_conntrack_protocol *__ip_ct_find_proto(u_int8_t protocol)
1267+{
1268+ struct ip_conntrack_protocol *p;
1269+
1270+ MUST_BE_READ_LOCKED(&ip_conntrack_lock);
1271+ p = LIST_FIND(&protocol_list, proto_cmpfn,
1272+ struct ip_conntrack_protocol *, protocol);
1273+ if (!p)
1274+ p = &ip_conntrack_generic_protocol;
1275+
1276+ return p;
1277+}
1278+
1279+struct ip_conntrack_protocol *ip_ct_find_proto(u_int8_t protocol)
1280+{
1281+ struct ip_conntrack_protocol *p;
1282+
1283+ READ_LOCK(&ip_conntrack_lock);
1284+ p = __ip_ct_find_proto(protocol);
1285+ READ_UNLOCK(&ip_conntrack_lock);
1286+ return p;
1287+}
1288+
1289+inline void
1290+ip_conntrack_put(struct ip_conntrack *ct)
1291+{
1292+ IP_NF_ASSERT(ct);
1293+ IP_NF_ASSERT(ct->infos[0].master);
1294+ /* nf_conntrack_put wants to go via an info struct, so feed it
1295+ one at random. */
1296+ nf_conntrack_put(&ct->infos[0]);
1297+}
1298+
1299+static int ip_conntrack_hash_rnd_initted;
1300+static unsigned int ip_conntrack_hash_rnd;
1301+
1302+static u_int32_t
1303+hash_conntrack(const struct ip_conntrack_tuple *tuple)
1304+{
1305+#if 0
1306+ dump_tuple(tuple);
1307+#endif
1308+ return (jhash_3words(tuple->src.ip,
1309+ (tuple->dst.ip ^ tuple->dst.protonum),
1310+ (tuple->src.u.all | (tuple->dst.u.all << 16)),
1311+ ip_conntrack_hash_rnd) % ip_conntrack_htable_size);
1312+}
1313+
1314+int
1315+get_tuple(const struct iphdr *iph,
1316+ const struct sk_buff *skb,
1317+ unsigned int dataoff,
1318+ struct ip_conntrack_tuple *tuple,
1319+ const struct ip_conntrack_protocol *protocol)
1320+{
1321+ /* Never happen */
1322+ if (iph->frag_off & htons(IP_OFFSET)) {
1323+ printk("ip_conntrack_core: Frag of proto %u.\n",
1324+ iph->protocol);
1325+ return 0;
1326+ }
1327+
1328+ tuple->src.ip = iph->saddr;
1329+ tuple->dst.ip = iph->daddr;
1330+ tuple->dst.protonum = iph->protocol;
1331+
1332+ return protocol->pkt_to_tuple(skb, dataoff, tuple);
1333+}
1334+
1335+static int
1336+invert_tuple(struct ip_conntrack_tuple *inverse,
1337+ const struct ip_conntrack_tuple *orig,
1338+ const struct ip_conntrack_protocol *protocol)
1339+{
1340+ inverse->src.ip = orig->dst.ip;
1341+ inverse->dst.ip = orig->src.ip;
1342+ inverse->dst.protonum = orig->dst.protonum;
1343+
1344+ return protocol->invert_tuple(inverse, orig);
1345+}
1346+
1347+
1348+/* ip_conntrack_expect helper functions */
1349+
1350+/* Compare tuple parts depending on mask. */
1351+static inline int expect_cmp(const struct ip_conntrack_expect *i,
1352+ const struct ip_conntrack_tuple *tuple)
1353+{
1354+ MUST_BE_READ_LOCKED(&ip_conntrack_expect_tuple_lock);
1355+ return ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask);
1356+}
1357+
1358+static void
1359+destroy_expect(struct ip_conntrack_expect *exp)
1360+{
1361+ DEBUGP("destroy_expect(%p) use=%d\n", exp, atomic_read(&exp->use));
1362+ IP_NF_ASSERT(atomic_read(&exp->use));
1363+ IP_NF_ASSERT(!timer_pending(&exp->timeout));
1364+
1365+ kfree(exp);
1366+}
1367+
1368+
1369+inline void ip_conntrack_expect_put(struct ip_conntrack_expect *exp)
1370+{
1371+ IP_NF_ASSERT(exp);
1372+
1373+ if (atomic_dec_and_test(&exp->use)) {
1374+ /* usage count dropped to zero */
1375+ destroy_expect(exp);
1376+ }
1377+}
1378+
1379+static inline struct ip_conntrack_expect *
1380+__ip_ct_expect_find(const struct ip_conntrack_tuple *tuple)
1381+{
1382+ MUST_BE_READ_LOCKED(&ip_conntrack_lock);
1383+ MUST_BE_READ_LOCKED(&ip_conntrack_expect_tuple_lock);
1384+ return LIST_FIND(&ip_conntrack_expect_list, expect_cmp,
1385+ struct ip_conntrack_expect *, tuple);
1386+}
1387+
1388+/* Find a expectation corresponding to a tuple. */
1389+struct ip_conntrack_expect *
1390+ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple)
1391+{
1392+ struct ip_conntrack_expect *exp;
1393+
1394+ READ_LOCK(&ip_conntrack_lock);
1395+ READ_LOCK(&ip_conntrack_expect_tuple_lock);
1396+ exp = __ip_ct_expect_find(tuple);
1397+ if (exp)
1398+ atomic_inc(&exp->use);
1399+ READ_UNLOCK(&ip_conntrack_expect_tuple_lock);
1400+ READ_UNLOCK(&ip_conntrack_lock);
1401+
1402+ return exp;
1403+}
1404+
1405+/* remove one specific expectation from all lists and drop refcount,
1406+ * does _NOT_ delete the timer. */
1407+static void __unexpect_related(struct ip_conntrack_expect *expect)
1408+{
1409+ DEBUGP("unexpect_related(%p)\n", expect);
1410+ MUST_BE_WRITE_LOCKED(&ip_conntrack_lock);
1411+
1412+ /* we're not allowed to unexpect a confirmed expectation! */
1413+ IP_NF_ASSERT(!expect->sibling);
1414+
1415+ /* delete from global and local lists */
1416+ list_del(&expect->list);
1417+ list_del(&expect->expected_list);
1418+
1419+ /* decrement expect-count of master conntrack */
1420+ if (expect->expectant)
1421+ expect->expectant->expecting--;
1422+
1423+ ip_conntrack_expect_put(expect);
1424+}
1425+
1426+/* remove one specific expecatation from all lists, drop refcount
1427+ * and expire timer.
1428+ * This function can _NOT_ be called for confirmed expects! */
1429+static void unexpect_related(struct ip_conntrack_expect *expect)
1430+{
1431+ IP_NF_ASSERT(expect->expectant);
1432+ IP_NF_ASSERT(expect->expectant->helper);
1433+ /* if we are supposed to have a timer, but we can't delete
1434+ * it: race condition. __unexpect_related will
1435+ * be calledd by timeout function */
1436+ if (expect->expectant->helper->timeout
1437+ && !del_timer(&expect->timeout))
1438+ return;
1439+
1440+ __unexpect_related(expect);
1441+}
1442+
1443+/* delete all unconfirmed expectations for this conntrack */
1444+static void remove_expectations(struct ip_conntrack *ct, int drop_refcount)
1445+{
1446+ struct list_head *exp_entry, *next;
1447+ struct ip_conntrack_expect *exp;
1448+
1449+ DEBUGP("remove_expectations(%p)\n", ct);
1450+
1451+ list_for_each_safe(exp_entry, next, &ct->sibling_list) {
1452+ exp = list_entry(exp_entry, struct ip_conntrack_expect,
1453+ expected_list);
1454+
1455+ /* we skip established expectations, as we want to delete
1456+ * the un-established ones only */
1457+ if (exp->sibling) {
1458+ DEBUGP("remove_expectations: skipping established %p of %p\n", exp->sibling, ct);
1459+ if (drop_refcount) {
1460+ /* Indicate that this expectations parent is dead */
1461+ ip_conntrack_put(exp->expectant);
1462+ exp->expectant = NULL;
1463+ }
1464+ continue;
1465+ }
1466+
1467+ IP_NF_ASSERT(list_inlist(&ip_conntrack_expect_list, exp));
1468+ IP_NF_ASSERT(exp->expectant == ct);
1469+
1470+ /* delete expectation from global and private lists */
1471+ unexpect_related(exp);
1472+ }
1473+}
1474+
1475+static void
1476+clean_from_lists(struct ip_conntrack *ct)
1477+{
1478+ unsigned int ho, hr;
1479+
1480+ DEBUGP("clean_from_lists(%p)\n", ct);
1481+ MUST_BE_WRITE_LOCKED(&ip_conntrack_lock);
1482+
1483+ ho = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
1484+ hr = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
1485+ LIST_DELETE(&ip_conntrack_hash[ho], &ct->tuplehash[IP_CT_DIR_ORIGINAL]);
1486+ LIST_DELETE(&ip_conntrack_hash[hr], &ct->tuplehash[IP_CT_DIR_REPLY]);
1487+
1488+ /* Destroy all un-established, pending expectations */
1489+ remove_expectations(ct, 1);
1490+}
1491+
1492+static void
1493+destroy_conntrack(struct nf_conntrack *nfct)
1494+{
1495+ struct ip_conntrack *ct = (struct ip_conntrack *)nfct, *master = NULL;
1496+ struct ip_conntrack_protocol *proto;
1497+
1498+ DEBUGP("destroy_conntrack(%p)\n", ct);
1499+ IP_NF_ASSERT(atomic_read(&nfct->use) == 0);
1500+ IP_NF_ASSERT(!timer_pending(&ct->timeout));
1501+
1502+ /* To make sure we don't get any weird locking issues here:
1503+ * destroy_conntrack() MUST NOT be called with a write lock
1504+ * to ip_conntrack_lock!!! -HW */
1505+ proto = ip_ct_find_proto(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
1506+ if (proto && proto->destroy)
1507+ proto->destroy(ct);
1508+
1509+ if (ip_conntrack_destroyed)
1510+ ip_conntrack_destroyed(ct);
1511+
1512+ WRITE_LOCK(&ip_conntrack_lock);
1513+ /* Delete us from our own list to prevent corruption later */
1514+ list_del(&ct->sibling_list);
1515+
1516+ /* Delete our master expectation */
1517+ if (ct->master) {
1518+ if (ct->master->expectant) {
1519+ /* can't call __unexpect_related here,
1520+ * since it would screw up expect_list */
1521+ list_del(&ct->master->expected_list);
1522+ master = ct->master->expectant;
1523+ }
1524+ kfree(ct->master);
1525+ }
1526+ WRITE_UNLOCK(&ip_conntrack_lock);
1527+
1528+ if (master)
1529+ ip_conntrack_put(master);
1530+
1531+ DEBUGP("destroy_conntrack: returning ct=%p to slab\n", ct);
1532+ kmem_cache_free(ip_conntrack_cachep, ct);
1533+ atomic_dec(&ip_conntrack_count);
1534+}
1535+
1536+static void death_by_timeout(unsigned long ul_conntrack)
1537+{
1538+ struct ip_conntrack *ct = (void *)ul_conntrack;
1539+
1540+ WRITE_LOCK(&ip_conntrack_lock);
1541+ clean_from_lists(ct);
1542+ WRITE_UNLOCK(&ip_conntrack_lock);
1543+ ip_conntrack_put(ct);
1544+}
1545+
1546+static inline int
1547+conntrack_tuple_cmp(const struct ip_conntrack_tuple_hash *i,
1548+ const struct ip_conntrack_tuple *tuple,
1549+ const struct ip_conntrack *ignored_conntrack)
1550+{
1551+ MUST_BE_READ_LOCKED(&ip_conntrack_lock);
1552+ return i->ctrack != ignored_conntrack
1553+ && ip_ct_tuple_equal(tuple, &i->tuple);
1554+}
1555+
1556+static struct ip_conntrack_tuple_hash *
1557+__ip_conntrack_find(const struct ip_conntrack_tuple *tuple,
1558+ const struct ip_conntrack *ignored_conntrack)
1559+{
1560+ struct ip_conntrack_tuple_hash *h;
1561+ unsigned int hash = hash_conntrack(tuple);
1562+
1563+ MUST_BE_READ_LOCKED(&ip_conntrack_lock);
1564+ h = LIST_FIND(&ip_conntrack_hash[hash],
1565+ conntrack_tuple_cmp,
1566+ struct ip_conntrack_tuple_hash *,
1567+ tuple, ignored_conntrack);
1568+ return h;
1569+}
1570+
1571+/* Find a connection corresponding to a tuple. */
1572+struct ip_conntrack_tuple_hash *
1573+ip_conntrack_find_get(const struct ip_conntrack_tuple *tuple,
1574+ const struct ip_conntrack *ignored_conntrack)
1575+{
1576+ struct ip_conntrack_tuple_hash *h;
1577+
1578+ READ_LOCK(&ip_conntrack_lock);
1579+ h = __ip_conntrack_find(tuple, ignored_conntrack);
1580+ if (h)
1581+ atomic_inc(&h->ctrack->ct_general.use);
1582+ READ_UNLOCK(&ip_conntrack_lock);
1583+
1584+ return h;
1585+}
1586+
1587+static inline struct ip_conntrack *
1588+__ip_conntrack_get(struct nf_ct_info *nfct, enum ip_conntrack_info *ctinfo)
1589+{
1590+ struct ip_conntrack *ct
1591+ = (struct ip_conntrack *)nfct->master;
1592+
1593+ /* ctinfo is the index of the nfct inside the conntrack */
1594+ *ctinfo = nfct - ct->infos;
1595+ IP_NF_ASSERT(*ctinfo >= 0 && *ctinfo < IP_CT_NUMBER);
1596+ return ct;
1597+}
1598+
1599+/* Return conntrack and conntrack_info given skb->nfct->master */
1600+struct ip_conntrack *
1601+ip_conntrack_get(struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
1602+{
1603+ if (skb->nfct)
1604+ return __ip_conntrack_get(skb->nfct, ctinfo);
1605+ return NULL;
1606+}
1607+
1608+/* Confirm a connection given skb->nfct; places it in hash table */
1609+int
1610+__ip_conntrack_confirm(struct nf_ct_info *nfct)
1611+{
1612+ unsigned int hash, repl_hash;
1613+ struct ip_conntrack *ct;
1614+ enum ip_conntrack_info ctinfo;
1615+
1616+ ct = __ip_conntrack_get(nfct, &ctinfo);
1617+
1618+ /* ipt_REJECT uses ip_conntrack_attach to attach related
1619+ ICMP/TCP RST packets in other direction. Actual packet
1620+ which created connection will be IP_CT_NEW or for an
1621+ expected connection, IP_CT_RELATED. */
1622+ if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
1623+ return NF_ACCEPT;
1624+
1625+ hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
1626+ repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
1627+
1628+ /* We're not in hash table, and we refuse to set up related
1629+ connections for unconfirmed conns. But packet copies and
1630+ REJECT will give spurious warnings here. */
1631+ /* IP_NF_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
1632+
1633+ /* No external references means noone else could have
1634+ confirmed us. */
1635+ IP_NF_ASSERT(!is_confirmed(ct));
1636+ DEBUGP("Confirming conntrack %p\n", ct);
1637+
1638+ WRITE_LOCK(&ip_conntrack_lock);
1639+ /* See if there's one in the list already, including reverse:
1640+ NAT could have grabbed it without realizing, since we're
1641+ not in the hash. If there is, we lost race. */
1642+ if (!LIST_FIND(&ip_conntrack_hash[hash],
1643+ conntrack_tuple_cmp,
1644+ struct ip_conntrack_tuple_hash *,
1645+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, NULL)
1646+ && !LIST_FIND(&ip_conntrack_hash[repl_hash],
1647+ conntrack_tuple_cmp,
1648+ struct ip_conntrack_tuple_hash *,
1649+ &ct->tuplehash[IP_CT_DIR_REPLY].tuple, NULL)) {
1650+ list_prepend(&ip_conntrack_hash[hash],
1651+ &ct->tuplehash[IP_CT_DIR_ORIGINAL]);
1652+ list_prepend(&ip_conntrack_hash[repl_hash],
1653+ &ct->tuplehash[IP_CT_DIR_REPLY]);
1654+ /* Timer relative to confirmation time, not original
1655+ setting time, otherwise we'd get timer wrap in
1656+ weird delay cases. */
1657+ ct->timeout.expires += jiffies;
1658+ add_timer(&ct->timeout);
1659+ atomic_inc(&ct->ct_general.use);
1660+ set_bit(IPS_CONFIRMED_BIT, &ct->status);
1661+ WRITE_UNLOCK(&ip_conntrack_lock);
1662+ return NF_ACCEPT;
1663+ }
1664+
1665+ WRITE_UNLOCK(&ip_conntrack_lock);
1666+ return NF_DROP;
1667+}
1668+
1669+/* Returns true if a connection correspondings to the tuple (required
1670+ for NAT). */
1671+int
1672+ip_conntrack_tuple_taken(const struct ip_conntrack_tuple *tuple,
1673+ const struct ip_conntrack *ignored_conntrack)
1674+{
1675+ struct ip_conntrack_tuple_hash *h;
1676+
1677+ READ_LOCK(&ip_conntrack_lock);
1678+ h = __ip_conntrack_find(tuple, ignored_conntrack);
1679+ READ_UNLOCK(&ip_conntrack_lock);
1680+
1681+ return h != NULL;
1682+}
1683+
1684+/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
1685+struct ip_conntrack *
1686+icmp_error_track(struct sk_buff *skb,
1687+ enum ip_conntrack_info *ctinfo,
1688+ unsigned int hooknum)
1689+{
1690+ struct ip_conntrack_tuple innertuple, origtuple;
1691+ struct {
1692+ struct icmphdr icmp;
1693+ struct iphdr ip;
1694+ } inside;
1695+ struct ip_conntrack_protocol *innerproto;
1696+ struct ip_conntrack_tuple_hash *h;
1697+ int dataoff;
1698+
1699+ IP_NF_ASSERT(skb->nfct == NULL);
1700+
1701+ /* Not enough header? */
1702+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &inside, sizeof(inside))!=0)
1703+ return NULL;
1704+
1705+ if (inside.icmp.type != ICMP_DEST_UNREACH
1706+ && inside.icmp.type != ICMP_SOURCE_QUENCH
1707+ && inside.icmp.type != ICMP_TIME_EXCEEDED
1708+ && inside.icmp.type != ICMP_PARAMETERPROB
1709+ && inside.icmp.type != ICMP_REDIRECT)
1710+ return NULL;
1711+
1712+ /* Ignore ICMP's containing fragments (shouldn't happen) */
1713+ if (inside.ip.frag_off & htons(IP_OFFSET)) {
1714+ DEBUGP("icmp_error_track: fragment of proto %u\n",
1715+ inside.ip.protocol);
1716+ return NULL;
1717+ }
1718+
1719+ innerproto = ip_ct_find_proto(inside.ip.protocol);
1720+ dataoff = skb->nh.iph->ihl*4 + sizeof(inside.icmp) + inside.ip.ihl*4;
1721+ /* Are they talking about one of our connections? */
1722+ if (!get_tuple(&inside.ip, skb, dataoff, &origtuple, innerproto)) {
1723+ DEBUGP("icmp_error: ! get_tuple p=%u", inside.ip.protocol);
1724+ return NULL;
1725+ }
1726+
1727+ /* Ordinarily, we'd expect the inverted tupleproto, but it's
1728+ been preserved inside the ICMP. */
1729+ if (!invert_tuple(&innertuple, &origtuple, innerproto)) {
1730+ DEBUGP("icmp_error_track: Can't invert tuple\n");
1731+ return NULL;
1732+ }
1733+
1734+ *ctinfo = IP_CT_RELATED;
1735+
1736+ h = ip_conntrack_find_get(&innertuple, NULL);
1737+ if (!h) {
1738+ /* Locally generated ICMPs will match inverted if they
1739+ haven't been SNAT'ed yet */
1740+ /* FIXME: NAT code has to handle half-done double NAT --RR */
1741+ if (hooknum == NF_IP_LOCAL_OUT)
1742+ h = ip_conntrack_find_get(&origtuple, NULL);
1743+
1744+ if (!h) {
1745+ DEBUGP("icmp_error_track: no match\n");
1746+ return NULL;
1747+ }
1748+ /* Reverse direction from that found */
1749+ if (DIRECTION(h) != IP_CT_DIR_REPLY)
1750+ *ctinfo += IP_CT_IS_REPLY;
1751+ } else {
1752+ if (DIRECTION(h) == IP_CT_DIR_REPLY)
1753+ *ctinfo += IP_CT_IS_REPLY;
1754+ }
1755+
1756+ /* Update skb to refer to this connection */
1757+ skb->nfct = &h->ctrack->infos[*ctinfo];
1758+ return h->ctrack;
1759+}
1760+
1761+/* There's a small race here where we may free a just-assured
1762+ connection. Too bad: we're in trouble anyway. */
1763+static inline int unreplied(const struct ip_conntrack_tuple_hash *i)
1764+{
1765+ return !(test_bit(IPS_ASSURED_BIT, &i->ctrack->status));
1766+}
1767+
1768+static int early_drop(struct list_head *chain)
1769+{
1770+ /* Traverse backwards: gives us oldest, which is roughly LRU */
1771+ struct ip_conntrack_tuple_hash *h;
1772+ int dropped = 0;
1773+
1774+ READ_LOCK(&ip_conntrack_lock);
1775+ h = LIST_FIND_B(chain, unreplied, struct ip_conntrack_tuple_hash *);
1776+ if (h)
1777+ atomic_inc(&h->ctrack->ct_general.use);
1778+ READ_UNLOCK(&ip_conntrack_lock);
1779+
1780+ if (!h)
1781+ return dropped;
1782+
1783+ if (del_timer(&h->ctrack->timeout)) {
1784+ death_by_timeout((unsigned long)h->ctrack);
1785+ dropped = 1;
1786+ }
1787+ ip_conntrack_put(h->ctrack);
1788+ return dropped;
1789+}
1790+
1791+static inline int helper_cmp(const struct ip_conntrack_helper *i,
1792+ const struct ip_conntrack_tuple *rtuple)
1793+{
1794+ return ip_ct_tuple_mask_cmp(rtuple, &i->tuple, &i->mask);
1795+}
1796+
1797+struct ip_conntrack_helper *ip_ct_find_helper(const struct ip_conntrack_tuple *tuple)
1798+{
1799+ return LIST_FIND(&helpers, helper_cmp,
1800+ struct ip_conntrack_helper *,
1801+ tuple);
1802+}
1803+
1804+/* Allocate a new conntrack: we return -ENOMEM if classification
1805+ failed due to stress. Otherwise it really is unclassifiable. */
1806+static struct ip_conntrack_tuple_hash *
1807+init_conntrack(const struct ip_conntrack_tuple *tuple,
1808+ struct ip_conntrack_protocol *protocol,
1809+ struct sk_buff *skb)
1810+{
1811+ struct ip_conntrack *conntrack;
1812+ struct ip_conntrack_tuple repl_tuple;
1813+ size_t hash;
1814+ struct ip_conntrack_expect *expected;
1815+ int i;
1816+ static unsigned int drop_next;
1817+
1818+ if (!ip_conntrack_hash_rnd_initted) {
1819+ get_random_bytes(&ip_conntrack_hash_rnd, 4);
1820+ ip_conntrack_hash_rnd_initted = 1;
1821+ }
1822+
1823+ hash = hash_conntrack(tuple);
1824+
1825+ if (ip_conntrack_max &&
1826+ atomic_read(&ip_conntrack_count) >= ip_conntrack_max) {
1827+ /* Try dropping from random chain, or else from the
1828+ chain about to put into (in case they're trying to
1829+ bomb one hash chain). */
1830+ unsigned int next = (drop_next++)%ip_conntrack_htable_size;
1831+
1832+ if (!early_drop(&ip_conntrack_hash[next])
1833+ && !early_drop(&ip_conntrack_hash[hash])) {
1834+ if (net_ratelimit())
1835+ printk(KERN_WARNING
1836+ "ip_conntrack: table full, dropping"
1837+ " packet.\n");
1838+ return ERR_PTR(-ENOMEM);
1839+ }
1840+ }
1841+
1842+ if (!invert_tuple(&repl_tuple, tuple, protocol)) {
1843+ DEBUGP("Can't invert tuple.\n");
1844+ return NULL;
1845+ }
1846+
1847+ conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC);
1848+ if (!conntrack) {
1849+ DEBUGP("Can't allocate conntrack.\n");
1850+ return ERR_PTR(-ENOMEM);
1851+ }
1852+
1853+ memset(conntrack, 0, sizeof(*conntrack));
1854+ atomic_set(&conntrack->ct_general.use, 1);
1855+ conntrack->ct_general.destroy = destroy_conntrack;
1856+ conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *tuple;
1857+ conntrack->tuplehash[IP_CT_DIR_ORIGINAL].ctrack = conntrack;
1858+ conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = repl_tuple;
1859+ conntrack->tuplehash[IP_CT_DIR_REPLY].ctrack = conntrack;
1860+ for (i=0; i < IP_CT_NUMBER; i++)
1861+ conntrack->infos[i].master = &conntrack->ct_general;
1862+
1863+ if (!protocol->new(conntrack, skb)) {
1864+ kmem_cache_free(ip_conntrack_cachep, conntrack);
1865+ return NULL;
1866+ }
1867+ /* Don't set timer yet: wait for confirmation */
1868+ init_timer(&conntrack->timeout);
1869+ conntrack->timeout.data = (unsigned long)conntrack;
1870+ conntrack->timeout.function = death_by_timeout;
1871+
1872+ INIT_LIST_HEAD(&conntrack->sibling_list);
1873+
1874+ WRITE_LOCK(&ip_conntrack_lock);
1875+ /* Need finding and deleting of expected ONLY if we win race */
1876+ READ_LOCK(&ip_conntrack_expect_tuple_lock);
1877+ expected = LIST_FIND(&ip_conntrack_expect_list, expect_cmp,
1878+ struct ip_conntrack_expect *, tuple);
1879+ READ_UNLOCK(&ip_conntrack_expect_tuple_lock);
1880+
1881+ /* If master is not in hash table yet (ie. packet hasn't left
1882+ this machine yet), how can other end know about expected?
1883+ Hence these are not the droids you are looking for (if
1884+ master ct never got confirmed, we'd hold a reference to it
1885+ and weird things would happen to future packets). */
1886+ if (expected && !is_confirmed(expected->expectant))
1887+ expected = NULL;
1888+
1889+ /* Look up the conntrack helper for master connections only */
1890+ if (!expected)
1891+ conntrack->helper = ip_ct_find_helper(&repl_tuple);
1892+
1893+ /* If the expectation is dying, then this is a loser. */
1894+ if (expected
1895+ && expected->expectant->helper->timeout
1896+ && ! del_timer(&expected->timeout))
1897+ expected = NULL;
1898+
1899+ if (expected) {
1900+ DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n",
1901+ conntrack, expected);
1902+ /* Welcome, Mr. Bond. We've been expecting you... */
1903+ IP_NF_ASSERT(master_ct(conntrack));
1904+ __set_bit(IPS_EXPECTED_BIT, &conntrack->status);
1905+ conntrack->master = expected;
1906+ expected->sibling = conntrack;
1907+ LIST_DELETE(&ip_conntrack_expect_list, expected);
1908+ expected->expectant->expecting--;
1909+ nf_conntrack_get(&master_ct(conntrack)->infos[0]);
1910+ }
1911+ atomic_inc(&ip_conntrack_count);
1912+ WRITE_UNLOCK(&ip_conntrack_lock);
1913+
1914+ if (expected && expected->expectfn)
1915+ expected->expectfn(conntrack);
1916+ return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];
1917+}
1918+
1919+/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
1920+static inline struct ip_conntrack *
1921+resolve_normal_ct(struct sk_buff *skb,
1922+ struct ip_conntrack_protocol *proto,
1923+ int *set_reply,
1924+ unsigned int hooknum,
1925+ enum ip_conntrack_info *ctinfo)
1926+{
1927+ struct ip_conntrack_tuple tuple;
1928+ struct ip_conntrack_tuple_hash *h;
1929+
1930+ IP_NF_ASSERT((skb->nh.iph->frag_off & htons(IP_OFFSET)) == 0);
1931+
1932+ if (!get_tuple(skb->nh.iph, skb, skb->nh.iph->ihl*4, &tuple, proto))
1933+ return NULL;
1934+
1935+ /* look for tuple match */
1936+ h = ip_conntrack_find_get(&tuple, NULL);
1937+ if (!h) {
1938+ h = init_conntrack(&tuple, proto, skb);
1939+ if (!h)
1940+ return NULL;
1941+ if (IS_ERR(h))
1942+ return (void *)h;
1943+ }
1944+
1945+ /* It exists; we have (non-exclusive) reference. */
1946+ if (DIRECTION(h) == IP_CT_DIR_REPLY) {
1947+ *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
1948+ /* Please set reply bit if this packet OK */
1949+ *set_reply = 1;
1950+ } else {
1951+ /* Once we've had two way comms, always ESTABLISHED. */
1952+ if (test_bit(IPS_SEEN_REPLY_BIT, &h->ctrack->status)) {
1953+ DEBUGP("ip_conntrack_in: normal packet for %p\n",
1954+ h->ctrack);
1955+ *ctinfo = IP_CT_ESTABLISHED;
1956+ } else if (test_bit(IPS_EXPECTED_BIT, &h->ctrack->status)) {
1957+ DEBUGP("ip_conntrack_in: related packet for %p\n",
1958+ h->ctrack);
1959+ *ctinfo = IP_CT_RELATED;
1960+ } else {
1961+ DEBUGP("ip_conntrack_in: new packet for %p\n",
1962+ h->ctrack);
1963+ *ctinfo = IP_CT_NEW;
1964+ }
1965+ *set_reply = 0;
1966+ }
1967+ skb->nfct = &h->ctrack->infos[*ctinfo];
1968+ return h->ctrack;
1969+}
1970+
1971+/* Netfilter hook itself. */
1972+unsigned int ip_conntrack_in(unsigned int hooknum,
1973+ struct sk_buff **pskb,
1974+ const struct net_device *in,
1975+ const struct net_device *out,
1976+ int (*okfn)(struct sk_buff *))
1977+{
1978+ struct ip_conntrack *ct;
1979+ enum ip_conntrack_info ctinfo;
1980+ struct ip_conntrack_protocol *proto;
1981+ int set_reply;
1982+ int ret;
1983+
1984+ /* Never happen */
1985+ if ((*pskb)->nh.iph->frag_off & htons(IP_OFFSET)) {
1986+ if (net_ratelimit()) {
1987+ printk(KERN_ERR "ip_conntrack_in: Frag of proto %u (hook=%u)\n",
1988+ (*pskb)->nh.iph->protocol, hooknum);
1989+ }
1990+ return NF_DROP;
1991+ }
1992+
1993+ /* FIXME: Do this right please. --RR */
1994+ (*pskb)->nfcache |= NFC_UNKNOWN;
1995+
1996+/* Doesn't cover locally-generated broadcast, so not worth it. */
1997+#if 0
1998+ /* Ignore broadcast: no `connection'. */
1999+ if ((*pskb)->pkt_type == PACKET_BROADCAST) {
2000+ printk("Broadcast packet!\n");
2001+ return NF_ACCEPT;
2002+ } else if (((*pskb)->nh.iph->daddr & htonl(0x000000FF))
2003+ == htonl(0x000000FF)) {
2004+ printk("Should bcast: %u.%u.%u.%u->%u.%u.%u.%u (sk=%p, ptype=%u)\n",
2005+ NIPQUAD((*pskb)->nh.iph->saddr),
2006+ NIPQUAD((*pskb)->nh.iph->daddr),
2007+ (*pskb)->sk, (*pskb)->pkt_type);
2008+ }
2009+#endif
2010+
2011+ /* Previously seen (loopback or untracked)? Ignore. */
2012+ if ((*pskb)->nfct)
2013+ return NF_ACCEPT;
2014+
2015+ proto = ip_ct_find_proto((*pskb)->nh.iph->protocol);
2016+
2017+ /* It may be an icmp error... */
2018+ if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP
2019+ && icmp_error_track(*pskb, &ctinfo, hooknum))
2020+ return NF_ACCEPT;
2021+
2022+ if (!(ct = resolve_normal_ct(*pskb, proto,&set_reply,hooknum,&ctinfo)))
2023+ /* Not valid part of a connection */
2024+ return NF_ACCEPT;
2025+
2026+ if (IS_ERR(ct))
2027+ /* Too stressed to deal. */
2028+ return NF_DROP;
2029+
2030+ IP_NF_ASSERT((*pskb)->nfct);
2031+
2032+ ret = proto->packet(ct, *pskb, ctinfo);
2033+ if (ret == -1) {
2034+ /* Invalid */
2035+ nf_conntrack_put((*pskb)->nfct);
2036+ (*pskb)->nfct = NULL;
2037+ return NF_ACCEPT;
2038+ }
2039+
2040+ if (ret != NF_DROP && ct->helper) {
2041+ ret = ct->helper->help(*pskb, ct, ctinfo);
2042+ if (ret == -1) {
2043+ /* Invalid */
2044+ nf_conntrack_put((*pskb)->nfct);
2045+ (*pskb)->nfct = NULL;
2046+ return NF_ACCEPT;
2047+ }
2048+ }
2049+ if (set_reply)
2050+ set_bit(IPS_SEEN_REPLY_BIT, &ct->status);
2051+
2052+ return ret;
2053+}
2054+
2055+int invert_tuplepr(struct ip_conntrack_tuple *inverse,
2056+ const struct ip_conntrack_tuple *orig)
2057+{
2058+ return invert_tuple(inverse, orig, ip_ct_find_proto(orig->dst.protonum));
2059+}
2060+
2061+static inline int resent_expect(const struct ip_conntrack_expect *i,
2062+ const struct ip_conntrack_tuple *tuple,
2063+ const struct ip_conntrack_tuple *mask)
2064+{
2065+ DEBUGP("resent_expect\n");
2066+ DEBUGP(" tuple: "); DUMP_TUPLE(&i->tuple);
2067+ DEBUGP("ct_tuple: "); DUMP_TUPLE(&i->ct_tuple);
2068+ DEBUGP("test tuple: "); DUMP_TUPLE(tuple);
2069+ return (((i->ct_tuple.dst.protonum == 0 && ip_ct_tuple_equal(&i->tuple, tuple))
2070+ || (i->ct_tuple.dst.protonum && ip_ct_tuple_equal(&i->ct_tuple, tuple)))
2071+ && ip_ct_tuple_equal(&i->mask, mask));
2072+}
2073+
2074+/* Would two expected things clash? */
2075+static inline int expect_clash(const struct ip_conntrack_expect *i,
2076+ const struct ip_conntrack_tuple *tuple,
2077+ const struct ip_conntrack_tuple *mask)
2078+{
2079+ /* Part covered by intersection of masks must be unequal,
2080+ otherwise they clash */
2081+ struct ip_conntrack_tuple intersect_mask
2082+ = { { i->mask.src.ip & mask->src.ip,
2083+ { i->mask.src.u.all & mask->src.u.all } },
2084+ { i->mask.dst.ip & mask->dst.ip,
2085+ { i->mask.dst.u.all & mask->dst.u.all },
2086+ i->mask.dst.protonum & mask->dst.protonum } };
2087+
2088+ return ip_ct_tuple_mask_cmp(&i->tuple, tuple, &intersect_mask);
2089+}
2090+
2091+inline void ip_conntrack_unexpect_related(struct ip_conntrack_expect *expect)
2092+{
2093+ WRITE_LOCK(&ip_conntrack_lock);
2094+ unexpect_related(expect);
2095+ WRITE_UNLOCK(&ip_conntrack_lock);
2096+}
2097+
2098+static void expectation_timed_out(unsigned long ul_expect)
2099+{
2100+ struct ip_conntrack_expect *expect = (void *) ul_expect;
2101+
2102+ DEBUGP("expectation %p timed out\n", expect);
2103+ WRITE_LOCK(&ip_conntrack_lock);
2104+ __unexpect_related(expect);
2105+ WRITE_UNLOCK(&ip_conntrack_lock);
2106+}
2107+
2108+struct ip_conntrack_expect *
2109+ip_conntrack_expect_alloc()
2110+{
2111+ struct ip_conntrack_expect *new;
2112+
2113+ new = (struct ip_conntrack_expect *)
2114+ kmalloc(sizeof(struct ip_conntrack_expect), GFP_ATOMIC);
2115+ if (!new) {
2116+ DEBUGP("expect_related: OOM allocating expect\n");
2117+ return NULL;
2118+ }
2119+
2120+ /* tuple_cmp compares whole union, we have to initialized cleanly */
2121+ memset(new, 0, sizeof(struct ip_conntrack_expect));
2122+
2123+ return new;
2124+}
2125+
2126+static void
2127+ip_conntrack_expect_insert(struct ip_conntrack_expect *new,
2128+ struct ip_conntrack *related_to)
2129+{
2130+ DEBUGP("new expectation %p of conntrack %p\n", new, related_to);
2131+ new->expectant = related_to;
2132+ new->sibling = NULL;
2133+ atomic_set(&new->use, 1);
2134+
2135+ /* add to expected list for this connection */
2136+ list_add(&new->expected_list, &related_to->sibling_list);
2137+ /* add to global list of expectations */
2138+
2139+ list_prepend(&ip_conntrack_expect_list, &new->list);
2140+ /* add and start timer if required */
2141+ if (related_to->helper->timeout) {
2142+ init_timer(&new->timeout);
2143+ new->timeout.data = (unsigned long)new;
2144+ new->timeout.function = expectation_timed_out;
2145+ new->timeout.expires = jiffies +
2146+ related_to->helper->timeout * HZ;
2147+ add_timer(&new->timeout);
2148+ }
2149+ related_to->expecting++;
2150+}
2151+
2152+/* Add a related connection. */
2153+int ip_conntrack_expect_related(struct ip_conntrack_expect *expect,
2154+ struct ip_conntrack *related_to)
2155+{
2156+ struct ip_conntrack_expect *old;
2157+ int ret = 0;
2158+
2159+ WRITE_LOCK(&ip_conntrack_lock);
2160+ /* Because of the write lock, no reader can walk the lists,
2161+ * so there is no need to use the tuple lock too */
2162+
2163+ DEBUGP("ip_conntrack_expect_related %p\n", related_to);
2164+ DEBUGP("tuple: "); DUMP_TUPLE(&expect->tuple);
2165+ DEBUGP("mask: "); DUMP_TUPLE(&expect->mask);
2166+
2167+ old = LIST_FIND(&ip_conntrack_expect_list, resent_expect,
2168+ struct ip_conntrack_expect *, &expect->tuple,
2169+ &expect->mask);
2170+ if (old) {
2171+ /* Helper private data may contain offsets but no pointers
2172+ pointing into the payload - otherwise we should have to copy
2173+ the data filled out by the helper over the old one */
2174+ DEBUGP("expect_related: resent packet\n");
2175+ if (related_to->helper->timeout) {
2176+ if (!del_timer(&old->timeout)) {
2177+ /* expectation is dying. Fall through */
2178+ goto out;
2179+ } else {
2180+ old->timeout.expires = jiffies +
2181+ related_to->helper->timeout * HZ;
2182+ add_timer(&old->timeout);
2183+ }
2184+ }
2185+
2186+ WRITE_UNLOCK(&ip_conntrack_lock);
2187+ kfree(expect);
2188+ return -EEXIST;
2189+
2190+ } else if (related_to->helper->max_expected &&
2191+ related_to->expecting >= related_to->helper->max_expected) {
2192+ struct list_head *cur_item;
2193+ /* old == NULL */
2194+ if (!(related_to->helper->flags &
2195+ IP_CT_HELPER_F_REUSE_EXPECT)) {
2196+ WRITE_UNLOCK(&ip_conntrack_lock);
2197+ if (net_ratelimit())
2198+ printk(KERN_WARNING
2199+ "ip_conntrack: max number of expected "
2200+ "connections %i of %s reached for "
2201+ "%u.%u.%u.%u->%u.%u.%u.%u\n",
2202+ related_to->helper->max_expected,
2203+ related_to->helper->name,
2204+ NIPQUAD(related_to->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip),
2205+ NIPQUAD(related_to->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip));
2206+ kfree(expect);
2207+ return -EPERM;
2208+ }
2209+ DEBUGP("ip_conntrack: max number of expected "
2210+ "connections %i of %s reached for "
2211+ "%u.%u.%u.%u->%u.%u.%u.%u, reusing\n",
2212+ related_to->helper->max_expected,
2213+ related_to->helper->name,
2214+ NIPQUAD(related_to->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip),
2215+ NIPQUAD(related_to->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip));
2216+
2217+ /* choose the the oldest expectation to evict */
2218+ list_for_each(cur_item, &related_to->sibling_list) {
2219+ struct ip_conntrack_expect *cur;
2220+
2221+ cur = list_entry(cur_item,
2222+ struct ip_conntrack_expect,
2223+ expected_list);
2224+ if (cur->sibling == NULL) {
2225+ old = cur;
2226+ break;
2227+ }
2228+ }
2229+
2230+ /* (!old) cannot happen, since related_to->expecting is the
2231+ * number of unconfirmed expects */
2232+ IP_NF_ASSERT(old);
2233+
2234+ /* newnat14 does not reuse the real allocated memory
2235+ * structures but rather unexpects the old and
2236+ * allocates a new. unexpect_related will decrement
2237+ * related_to->expecting.
2238+ */
2239+ unexpect_related(old);
2240+ ret = -EPERM;
2241+ } else if (LIST_FIND(&ip_conntrack_expect_list, expect_clash,
2242+ struct ip_conntrack_expect *, &expect->tuple,
2243+ &expect->mask)) {
2244+ WRITE_UNLOCK(&ip_conntrack_lock);
2245+ DEBUGP("expect_related: busy!\n");
2246+
2247+ kfree(expect);
2248+ return -EBUSY;
2249+ }
2250+
2251+out: ip_conntrack_expect_insert(expect, related_to);
2252+
2253+ WRITE_UNLOCK(&ip_conntrack_lock);
2254+
2255+ return ret;
2256+}
2257+
2258+/* Change tuple in an existing expectation */
2259+int ip_conntrack_change_expect(struct ip_conntrack_expect *expect,
2260+ struct ip_conntrack_tuple *newtuple)
2261+{
2262+ int ret;
2263+
2264+ MUST_BE_READ_LOCKED(&ip_conntrack_lock);
2265+ WRITE_LOCK(&ip_conntrack_expect_tuple_lock);
2266+
2267+ DEBUGP("change_expect:\n");
2268+ DEBUGP("exp tuple: "); DUMP_TUPLE(&expect->tuple);
2269+ DEBUGP("exp mask: "); DUMP_TUPLE(&expect->mask);
2270+ DEBUGP("newtuple: "); DUMP_TUPLE(newtuple);
2271+ if (expect->ct_tuple.dst.protonum == 0) {
2272+ /* Never seen before */
2273+ DEBUGP("change expect: never seen before\n");
2274+ if (!ip_ct_tuple_equal(&expect->tuple, newtuple)
2275+ && LIST_FIND(&ip_conntrack_expect_list, expect_clash,
2276+ struct ip_conntrack_expect *, newtuple, &expect->mask)) {
2277+ /* Force NAT to find an unused tuple */
2278+ ret = -1;
2279+ } else {
2280+ memcpy(&expect->ct_tuple, &expect->tuple, sizeof(expect->tuple));
2281+ memcpy(&expect->tuple, newtuple, sizeof(expect->tuple));
2282+ ret = 0;
2283+ }
2284+ } else {
2285+ /* Resent packet */
2286+ DEBUGP("change expect: resent packet\n");
2287+ if (ip_ct_tuple_equal(&expect->tuple, newtuple)) {
2288+ ret = 0;
2289+ } else {
2290+ /* Force NAT to choose again the same port */
2291+ ret = -1;
2292+ }
2293+ }
2294+ WRITE_UNLOCK(&ip_conntrack_expect_tuple_lock);
2295+
2296+ return ret;
2297+}
2298+
2299+/* Alter reply tuple (maybe alter helper). If it's already taken,
2300+ return 0 and don't do alteration. */
2301+int ip_conntrack_alter_reply(struct ip_conntrack *conntrack,
2302+ const struct ip_conntrack_tuple *newreply)
2303+{
2304+ WRITE_LOCK(&ip_conntrack_lock);
2305+ if (__ip_conntrack_find(newreply, conntrack)) {
2306+ WRITE_UNLOCK(&ip_conntrack_lock);
2307+ return 0;
2308+ }
2309+ /* Should be unconfirmed, so not in hash table yet */
2310+ IP_NF_ASSERT(!is_confirmed(conntrack));
2311+
2312+ DEBUGP("Altering reply tuple of %p to ", conntrack);
2313+ DUMP_TUPLE(newreply);
2314+
2315+ conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
2316+ if (!conntrack->master)
2317+ conntrack->helper = LIST_FIND(&helpers, helper_cmp,
2318+ struct ip_conntrack_helper *,
2319+ newreply);
2320+ WRITE_UNLOCK(&ip_conntrack_lock);
2321+
2322+ return 1;
2323+}
2324+
2325+int ip_conntrack_helper_register(struct ip_conntrack_helper *me)
2326+{
2327+ WRITE_LOCK(&ip_conntrack_lock);
2328+ list_prepend(&helpers, me);
2329+ WRITE_UNLOCK(&ip_conntrack_lock);
2330+
2331+ return 0;
2332+}
2333+
2334+static inline int unhelp(struct ip_conntrack_tuple_hash *i,
2335+ const struct ip_conntrack_helper *me)
2336+{
2337+ if (i->ctrack->helper == me) {
2338+ /* Get rid of any expected. */
2339+ remove_expectations(i->ctrack, 0);
2340+ /* And *then* set helper to NULL */
2341+ i->ctrack->helper = NULL;
2342+ }
2343+ return 0;
2344+}
2345+
2346+void ip_conntrack_helper_unregister(struct ip_conntrack_helper *me)
2347+{
2348+ unsigned int i;
2349+
2350+ /* Need write lock here, to delete helper. */
2351+ WRITE_LOCK(&ip_conntrack_lock);
2352+ LIST_DELETE(&helpers, me);
2353+
2354+ /* Get rid of expecteds, set helpers to NULL. */
2355+ for (i = 0; i < ip_conntrack_htable_size; i++)
2356+ LIST_FIND_W(&ip_conntrack_hash[i], unhelp,
2357+ struct ip_conntrack_tuple_hash *, me);
2358+ WRITE_UNLOCK(&ip_conntrack_lock);
2359+
2360+ /* Someone could be still looking at the helper in a bh. */
2361+ synchronize_net();
2362+}
2363+
2364+/* Refresh conntrack for this many jiffies. */
2365+void ip_ct_refresh(struct ip_conntrack *ct, unsigned long extra_jiffies)
2366+{
2367+ IP_NF_ASSERT(ct->timeout.data == (unsigned long)ct);
2368+
2369+ /* If not in hash table, timer will not be active yet */
2370+ if (!is_confirmed(ct))
2371+ ct->timeout.expires = extra_jiffies;
2372+ else {
2373+ WRITE_LOCK(&ip_conntrack_lock);
2374+ /* Need del_timer for race avoidance (may already be dying). */
2375+ if (del_timer(&ct->timeout)) {
2376+ ct->timeout.expires = jiffies + extra_jiffies;
2377+ add_timer(&ct->timeout);
2378+ }
2379+ WRITE_UNLOCK(&ip_conntrack_lock);
2380+ }
2381+}
2382+
2383+/* Returns new sk_buff, or NULL */
2384+struct sk_buff *
2385+ip_ct_gather_frags(struct sk_buff *skb)
2386+{
2387+ struct sock *sk = skb->sk;
2388+#ifdef CONFIG_NETFILTER_DEBUG
2389+ unsigned int olddebug = skb->nf_debug;
2390+#endif
2391+ if (sk) {
2392+ sock_hold(sk);
2393+ skb_orphan(skb);
2394+ }
2395+
2396+ local_bh_disable();
2397+ skb = ip_defrag(skb);
2398+ local_bh_enable();
2399+
2400+ if (!skb) {
2401+ if (sk)
2402+ sock_put(sk);
2403+ return skb;
2404+ }
2405+
2406+ if (sk) {
2407+ skb_set_owner_w(skb, sk);
2408+ sock_put(sk);
2409+ }
2410+
2411+ ip_send_check(skb->nh.iph);
2412+ skb->nfcache |= NFC_ALTERED;
2413+#ifdef CONFIG_NETFILTER_DEBUG
2414+ /* Packet path as if nothing had happened. */
2415+ skb->nf_debug = olddebug;
2416+#endif
2417+ return skb;
2418+}
2419+
2420+/* Used by ipt_REJECT. */
2421+static void ip_conntrack_attach(struct sk_buff *nskb, struct nf_ct_info *nfct)
2422+{
2423+ struct ip_conntrack *ct;
2424+ enum ip_conntrack_info ctinfo;
2425+
2426+ ct = __ip_conntrack_get(nfct, &ctinfo);
2427+
2428+ /* This ICMP is in reverse direction to the packet which
2429+ caused it */
2430+ if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
2431+ ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
2432+ else
2433+ ctinfo = IP_CT_RELATED;
2434+
2435+ /* Attach new skbuff, and increment count */
2436+ nskb->nfct = &ct->infos[ctinfo];
2437+ atomic_inc(&ct->ct_general.use);
2438+}
2439+
2440+static inline int
2441+do_kill(const struct ip_conntrack_tuple_hash *i,
2442+ int (*kill)(const struct ip_conntrack *i, void *data),
2443+ void *data)
2444+{
2445+ return kill(i->ctrack, data);
2446+}
2447+
2448+/* Bring out ya dead! */
2449+static struct ip_conntrack_tuple_hash *
2450+get_next_corpse(int (*kill)(const struct ip_conntrack *i, void *data),
2451+ void *data, unsigned int *bucket)
2452+{
2453+ struct ip_conntrack_tuple_hash *h = NULL;
2454+
2455+ READ_LOCK(&ip_conntrack_lock);
2456+ for (; !h && *bucket < ip_conntrack_htable_size; (*bucket)++) {
2457+ h = LIST_FIND(&ip_conntrack_hash[*bucket], do_kill,
2458+ struct ip_conntrack_tuple_hash *, kill, data);
2459+ }
2460+ if (h)
2461+ atomic_inc(&h->ctrack->ct_general.use);
2462+ READ_UNLOCK(&ip_conntrack_lock);
2463+
2464+ return h;
2465+}
2466+
2467+void
2468+ip_ct_selective_cleanup(int (*kill)(const struct ip_conntrack *i, void *data),
2469+ void *data)
2470+{
2471+ struct ip_conntrack_tuple_hash *h;
2472+ unsigned int bucket = 0;
2473+
2474+ while ((h = get_next_corpse(kill, data, &bucket)) != NULL) {
2475+ /* Time to push up daises... */
2476+ if (del_timer(&h->ctrack->timeout))
2477+ death_by_timeout((unsigned long)h->ctrack);
2478+ /* ... else the timer will get him soon. */
2479+
2480+ ip_conntrack_put(h->ctrack);
2481+ }
2482+}
2483+
2484+/* Fast function for those who don't want to parse /proc (and I don't
2485+ blame them). */
2486+/* Reversing the socket's dst/src point of view gives us the reply
2487+ mapping. */
2488+static int
2489+getorigdst(struct sock *sk, int optval, void *user, int *len)
2490+{
2491+ struct inet_opt *inet = inet_sk(sk);
2492+ struct ip_conntrack_tuple_hash *h;
2493+ struct ip_conntrack_tuple tuple;
2494+
2495+ IP_CT_TUPLE_U_BLANK(&tuple);
2496+ tuple.src.ip = inet->rcv_saddr;
2497+ tuple.src.u.tcp.port = inet->sport;
2498+ tuple.dst.ip = inet->daddr;
2499+ tuple.dst.u.tcp.port = inet->dport;
2500+ tuple.dst.protonum = IPPROTO_TCP;
2501+
2502+ /* We only do TCP at the moment: is there a better way? */
2503+ if (strcmp(sk->sk_prot->name, "TCP")) {
2504+ DEBUGP("SO_ORIGINAL_DST: Not a TCP socket\n");
2505+ return -ENOPROTOOPT;
2506+ }
2507+
2508+ if ((unsigned int) *len < sizeof(struct sockaddr_in)) {
2509+ DEBUGP("SO_ORIGINAL_DST: len %u not %u\n",
2510+ *len, sizeof(struct sockaddr_in));
2511+ return -EINVAL;
2512+ }
2513+
2514+ h = ip_conntrack_find_get(&tuple, NULL);
2515+ if (h) {
2516+ struct sockaddr_in sin;
2517+
2518+ sin.sin_family = AF_INET;
2519+ sin.sin_port = h->ctrack->tuplehash[IP_CT_DIR_ORIGINAL]
2520+ .tuple.dst.u.tcp.port;
2521+ sin.sin_addr.s_addr = h->ctrack->tuplehash[IP_CT_DIR_ORIGINAL]
2522+ .tuple.dst.ip;
2523+
2524+ DEBUGP("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n",
2525+ NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
2526+ ip_conntrack_put(h->ctrack);
2527+ if (copy_to_user(user, &sin, sizeof(sin)) != 0)
2528+ return -EFAULT;
2529+ else
2530+ return 0;
2531+ }
2532+ DEBUGP("SO_ORIGINAL_DST: Can't find %u.%u.%u.%u/%u-%u.%u.%u.%u/%u.\n",
2533+ NIPQUAD(tuple.src.ip), ntohs(tuple.src.u.tcp.port),
2534+ NIPQUAD(tuple.dst.ip), ntohs(tuple.dst.u.tcp.port));
2535+ return -ENOENT;
2536+}
2537+
2538+static struct nf_sockopt_ops so_getorigdst = {
2539+ .pf = PF_INET,
2540+ .get_optmin = SO_ORIGINAL_DST,
2541+ .get_optmax = SO_ORIGINAL_DST+1,
2542+ .get = &getorigdst,
2543+};
2544+
2545+static int kill_all(const struct ip_conntrack *i, void *data)
2546+{
2547+ return 1;
2548+}
2549+
2550+/* Mishearing the voices in his head, our hero wonders how he's
2551+ supposed to kill the mall. */
2552+void ip_conntrack_cleanup(void)
2553+{
2554+ ip_ct_attach = NULL;
2555+ /* This makes sure all current packets have passed through
2556+ netfilter framework. Roll on, two-stage module
2557+ delete... */
2558+ synchronize_net();
2559+
2560+ i_see_dead_people:
2561+ ip_ct_selective_cleanup(kill_all, NULL);
2562+ if (atomic_read(&ip_conntrack_count) != 0) {
2563+ schedule();
2564+ goto i_see_dead_people;
2565+ }
2566+
2567+ kmem_cache_destroy(ip_conntrack_cachep);
2568+ vfree(ip_conntrack_hash);
2569+ nf_unregister_sockopt(&so_getorigdst);
2570+}
2571+
2572+static int hashsize;
2573+MODULE_PARM(hashsize, "i");
2574+
2575+int __init ip_conntrack_init(void)
2576+{
2577+ unsigned int i;
2578+ int ret;
2579+
2580+ /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
2581+ * machine has 256 buckets. >= 1GB machines have 8192 buckets. */
2582+ if (hashsize) {
2583+ ip_conntrack_htable_size = hashsize;
2584+ } else {
2585+ ip_conntrack_htable_size
2586+ = (((num_physpages << PAGE_SHIFT) / 16384)
2587+ / sizeof(struct list_head));
2588+ if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
2589+ ip_conntrack_htable_size = 8192;
2590+ if (ip_conntrack_htable_size < 16)
2591+ ip_conntrack_htable_size = 16;
2592+ }
2593+ ip_conntrack_max = 8 * ip_conntrack_htable_size;
2594+
2595+ printk("ip_conntrack version %s (%u buckets, %d max)"
2596+ " - %Zd bytes per conntrack\n", IP_CONNTRACK_VERSION,
2597+ ip_conntrack_htable_size, ip_conntrack_max,
2598+ sizeof(struct ip_conntrack));
2599+
2600+ ret = nf_register_sockopt(&so_getorigdst);
2601+ if (ret != 0) {
2602+ printk(KERN_ERR "Unable to register netfilter socket option\n");
2603+ return ret;
2604+ }
2605+
2606+ ip_conntrack_hash = vmalloc(sizeof(struct list_head)
2607+ * ip_conntrack_htable_size);
2608+ if (!ip_conntrack_hash) {
2609+ printk(KERN_ERR "Unable to create ip_conntrack_hash\n");
2610+ goto err_unreg_sockopt;
2611+ }
2612+
2613+ ip_conntrack_cachep = kmem_cache_create("ip_conntrack",
2614+ sizeof(struct ip_conntrack), 0,
2615+ SLAB_HWCACHE_ALIGN, NULL, NULL);
2616+ if (!ip_conntrack_cachep) {
2617+ printk(KERN_ERR "Unable to create ip_conntrack slab cache\n");
2618+ goto err_free_hash;
2619+ }
2620+ /* Don't NEED lock here, but good form anyway. */
2621+ WRITE_LOCK(&ip_conntrack_lock);
2622+ /* Sew in builtin protocols. */
2623+ list_append(&protocol_list, &ip_conntrack_protocol_tcp);
2624+ list_append(&protocol_list, &ip_conntrack_protocol_udp);
2625+ list_append(&protocol_list, &ip_conntrack_protocol_icmp);
2626+ WRITE_UNLOCK(&ip_conntrack_lock);
2627+
2628+ for (i = 0; i < ip_conntrack_htable_size; i++)
2629+ INIT_LIST_HEAD(&ip_conntrack_hash[i]);
2630+
2631+ /* For use by ipt_REJECT */
2632+ ip_ct_attach = ip_conntrack_attach;
2633+
2634+ /* Set up fake conntrack:
2635+ - to never be deleted, not in any hashes */
2636+ atomic_set(&ip_conntrack_untracked.ct_general.use, 1);
2637+ /* - and look it like as a confirmed connection */
2638+ set_bit(IPS_CONFIRMED_BIT, &ip_conntrack_untracked.status);
2639+ /* - and prepare the ctinfo field for REJECT & NAT. */
2640+ ip_conntrack_untracked.infos[IP_CT_NEW].master =
2641+ ip_conntrack_untracked.infos[IP_CT_RELATED].master =
2642+ ip_conntrack_untracked.infos[IP_CT_RELATED + IP_CT_IS_REPLY].master =
2643+ &ip_conntrack_untracked.ct_general;
2644+
2645+ return ret;
2646+
2647+err_free_hash:
2648+ vfree(ip_conntrack_hash);
2649+err_unreg_sockopt:
2650+ nf_unregister_sockopt(&so_getorigdst);
2651+
2652+ return -ENOMEM;
2653+}
2654diff -Nur linux-2.6.6-rc1.org/net/ipv4/netfilter/ip_pool.c linux-2.6.6-rc1/net/ipv4/netfilter/ip_pool.c
2655--- linux-2.6.6-rc1.org/net/ipv4/netfilter/ip_pool.c 1970-01-01 01:00:00.000000000 +0100
2656+++ linux-2.6.6-rc1/net/ipv4/netfilter/ip_pool.c 2004-04-19 10:08:36.000000000 +0200
2657@@ -0,0 +1,332 @@
2658+/* Kernel module for IP pool management */
2659+
2660+#include <linux/module.h>
2661+#include <linux/ip.h>
2662+#include <linux/skbuff.h>
2663+#include <linux/netfilter_ipv4/ip_tables.h>
2664+#include <linux/netfilter_ipv4/ip_pool.h>
2665+#include <linux/errno.h>
2666+#include <asm/uaccess.h>
2667+#include <asm/bitops.h>
2668+#include <linux/interrupt.h>
2669+#include <linux/spinlock.h>
2670+
2671+#if 0
2672+#define DP printk
2673+#else
2674+#define DP(format, args...)
2675+#endif
2676+
2677+MODULE_LICENSE("GPL");
2678+
2679+#define NR_POOL 16
2680+static int nr_pool = NR_POOL;/* overwrite this when loading module */
2681+
2682+struct ip_pool {
2683+ u_int32_t first_ip; /* host byte order, included in range */
2684+ u_int32_t last_ip; /* host byte order, included in range */
2685+ void *members; /* the bitmap proper */
2686+ int nr_use; /* total nr. of tests through this */
2687+ int nr_match; /* total nr. of matches through this */
2688+ rwlock_t lock;
2689+};
2690+
2691+static struct ip_pool *POOL;
2692+
2693+static inline struct ip_pool *lookup(ip_pool_t index)
2694+{
2695+ if (index < 0 || index >= nr_pool) {
2696+ DP("ip_pool:lookup: bad index %d\n", index);
2697+ return 0;
2698+ }
2699+ return POOL+index;
2700+}
2701+
2702+int ip_pool_match(ip_pool_t index, u_int32_t addr)
2703+{
2704+ struct ip_pool *pool = lookup(index);
2705+ int res = 0;
2706+
2707+ if (!pool || !pool->members)
2708+ return 0;
2709+ read_lock_bh(&pool->lock);
2710+ if (pool->members) {
2711+ if (addr >= pool->first_ip && addr <= pool->last_ip) {
2712+ addr -= pool->first_ip;
2713+ if (test_bit(addr, pool->members)) {
2714+ res = 1;
2715+#ifdef CONFIG_IP_POOL_STATISTICS
2716+ pool->nr_match++;
2717+#endif
2718+ }
2719+ }
2720+#ifdef CONFIG_IP_POOL_STATISTICS
2721+ pool->nr_use++;
2722+#endif
2723+ }
2724+ read_unlock_bh(&pool->lock);
2725+ return res;
2726+}
2727+
2728+static int pool_change(ip_pool_t index, u_int32_t addr, int isdel)
2729+{
2730+ struct ip_pool *pool;
2731+ int res = -1;
2732+
2733+ pool = lookup(index);
2734+ if ( !pool || !pool->members
2735+ || addr < pool->first_ip || addr > pool->last_ip)
2736+ return -1;
2737+ read_lock_bh(&pool->lock);
2738+ if (pool->members && addr >= pool->first_ip && addr <= pool->last_ip) {
2739+ addr -= pool->first_ip;
2740+ res = isdel
2741+ ? (0 != test_and_clear_bit(addr, pool->members))
2742+ : (0 != test_and_set_bit(addr, pool->members));
2743+ }
2744+ read_unlock_bh(&pool->lock);
2745+ return res;
2746+}
2747+
2748+int ip_pool_mod(ip_pool_t index, u_int32_t addr, int isdel)
2749+{
2750+ int res = pool_change(index,addr,isdel);
2751+
2752+ if (!isdel) res = !res;
2753+ return res;
2754+}
2755+
2756+static inline int bitmap_bytes(u_int32_t a, u_int32_t b)
2757+{
2758+ return 4*((((b-a+8)/8)+3)/4);
2759+}
2760+
2761+static inline int poolbytes(ip_pool_t index)
2762+{
2763+ struct ip_pool *pool = lookup(index);
2764+
2765+ return pool ? bitmap_bytes(pool->first_ip, pool->last_ip) : 0;
2766+}
2767+
2768+static int setpool(
2769+ struct sock *sk,
2770+ int optval,
2771+ void *user,
2772+ unsigned int len
2773+) {
2774+ struct ip_pool_request req;
2775+
2776+ DP("ip_pool:setpool: optval=%d, user=%p, len=%d\n", optval, user, len);
2777+ if (!capable(CAP_NET_ADMIN))
2778+ return -EPERM;
2779+ if (optval != SO_IP_POOL)
2780+ return -EBADF;
2781+ if (len != sizeof(req))
2782+ return -EINVAL;
2783+ if (copy_from_user(&req, user, sizeof(req)) != 0)
2784+ return -EFAULT;
2785+ printk("obsolete op - upgrade your ippool(8) utility.\n");
2786+ return -EINVAL;
2787+}
2788+
2789+static int getpool(
2790+ struct sock *sk,
2791+ int optval,
2792+ void *user,
2793+ int *len
2794+) {
2795+ struct ip_pool_request req;
2796+ struct ip_pool *pool;
2797+ ip_pool_t i;
2798+ int newbytes;
2799+ void *newmembers;
2800+ int res;
2801+
2802+ DP("ip_pool:getpool: optval=%d, user=%p\n", optval, user);
2803+ if (!capable(CAP_NET_ADMIN))
2804+ return -EINVAL;
2805+ if (optval != SO_IP_POOL)
2806+ return -EINVAL;
2807+ if (*len != sizeof(req)) {
2808+ return -EFAULT;
2809+ }
2810+ if (copy_from_user(&req, user, sizeof(req)) != 0)
2811+ return -EFAULT;
2812+ DP("ip_pool:getpool op=%d, index=%d\n", req.op, req.index);
2813+ if (req.op < IP_POOL_BAD001) {
2814+ printk("obsolete op - upgrade your ippool(8) utility.\n");
2815+ return -EFAULT;
2816+ }
2817+ switch(req.op) {
2818+ case IP_POOL_HIGH_NR:
2819+ DP("ip_pool HIGH_NR\n");
2820+ req.index = IP_POOL_NONE;
2821+ for (i=0; i<nr_pool; i++)
2822+ if (POOL[i].members)
2823+ req.index = i;
2824+ return copy_to_user(user, &req, sizeof(req));
2825+ case IP_POOL_LOOKUP:
2826+ DP("ip_pool LOOKUP\n");
2827+ pool = lookup(req.index);
2828+ if (!pool)
2829+ return -EINVAL;
2830+ if (!pool->members)
2831+ return -EBADF;
2832+ req.addr = htonl(pool->first_ip);
2833+ req.addr2 = htonl(pool->last_ip);
2834+ return copy_to_user(user, &req, sizeof(req));
2835+ case IP_POOL_USAGE:
2836+ DP("ip_pool USE\n");
2837+ pool = lookup(req.index);
2838+ if (!pool)
2839+ return -EINVAL;
2840+ if (!pool->members)
2841+ return -EBADF;
2842+ req.addr = pool->nr_use;
2843+ req.addr2 = pool->nr_match;
2844+ return copy_to_user(user, &req, sizeof(req));
2845+ case IP_POOL_TEST_ADDR:
2846+ DP("ip_pool TEST 0x%08x\n", req.addr);
2847+ pool = lookup(req.index);
2848+ if (!pool)
2849+ return -EINVAL;
2850+ res = 0;
2851+ read_lock_bh(&pool->lock);
2852+ if (!pool->members) {
2853+ DP("ip_pool TEST_ADDR no members in pool\n");
2854+ res = -EBADF;
2855+ goto unlock_and_return_res;
2856+ }
2857+ req.addr = ntohl(req.addr);
2858+ if (req.addr < pool->first_ip) {
2859+ DP("ip_pool TEST_ADDR address < pool bounds\n");
2860+ res = -ERANGE;
2861+ goto unlock_and_return_res;
2862+ }
2863+ if (req.addr > pool->last_ip) {
2864+ DP("ip_pool TEST_ADDR address > pool bounds\n");
2865+ res = -ERANGE;
2866+ goto unlock_and_return_res;
2867+ }
2868+ req.addr = (0 != test_bit((req.addr - pool->first_ip),
2869+ pool->members));
2870+ read_unlock_bh(&pool->lock);
2871+ return copy_to_user(user, &req, sizeof(req));
2872+ case IP_POOL_FLUSH:
2873+ DP("ip_pool FLUSH not yet implemented.\n");
2874+ return -EBUSY;
2875+ case IP_POOL_DESTROY:
2876+ DP("ip_pool DESTROY not yet implemented.\n");
2877+ return -EBUSY;
2878+ case IP_POOL_INIT:
2879+ DP("ip_pool INIT 0x%08x-0x%08x\n", req.addr, req.addr2);
2880+ pool = lookup(req.index);
2881+ if (!pool)
2882+ return -EINVAL;
2883+ req.addr = ntohl(req.addr);
2884+ req.addr2 = ntohl(req.addr2);
2885+ if (req.addr > req.addr2) {
2886+ DP("ip_pool INIT bad ip range\n");
2887+ return -EINVAL;
2888+ }
2889+ newbytes = bitmap_bytes(req.addr, req.addr2);
2890+ newmembers = kmalloc(newbytes, GFP_KERNEL);
2891+ if (!newmembers) {
2892+ DP("ip_pool INIT out of mem for %d bytes\n", newbytes);
2893+ return -ENOMEM;
2894+ }
2895+ memset(newmembers, 0, newbytes);
2896+ write_lock_bh(&pool->lock);
2897+ if (pool->members) {
2898+ DP("ip_pool INIT pool %d exists\n", req.index);
2899+ kfree(newmembers);
2900+ res = -EBUSY;
2901+ goto unlock_and_return_res;
2902+ }
2903+ pool->first_ip = req.addr;
2904+ pool->last_ip = req.addr2;
2905+ pool->nr_use = 0;
2906+ pool->nr_match = 0;
2907+ pool->members = newmembers;
2908+ write_unlock_bh(&pool->lock);
2909+ return 0;
2910+ case IP_POOL_ADD_ADDR:
2911+ DP("ip_pool ADD_ADDR 0x%08x\n", req.addr);
2912+ req.addr = pool_change(req.index, ntohl(req.addr), 0);
2913+ return copy_to_user(user, &req, sizeof(req));
2914+ case IP_POOL_DEL_ADDR:
2915+ DP("ip_pool DEL_ADDR 0x%08x\n", req.addr);
2916+ req.addr = pool_change(req.index, ntohl(req.addr), 1);
2917+ return copy_to_user(user, &req, sizeof(req));
2918+ default:
2919+ DP("ip_pool:getpool bad op %d\n", req.op);
2920+ return -EINVAL;
2921+ }
2922+ return -EINVAL;
2923+
2924+unlock_and_return_res:
2925+ if (pool)
2926+ read_unlock_bh(&pool->lock);
2927+ return res;
2928+}
2929+
2930+static struct nf_sockopt_ops so_pool
2931+= { { NULL, NULL }, PF_INET,
2932+ SO_IP_POOL, SO_IP_POOL+1, &setpool,
2933+ SO_IP_POOL, SO_IP_POOL+1, &getpool,
2934+ 0, NULL };
2935+
2936+MODULE_PARM(nr_pool, "i");
2937+
2938+static int __init init(void)
2939+{
2940+ ip_pool_t i;
2941+ int res;
2942+
2943+ if (nr_pool < 1) {
2944+ printk("ip_pool module init: bad nr_pool %d\n", nr_pool);
2945+ return -EINVAL;
2946+ }
2947+ POOL = kmalloc(nr_pool * sizeof(*POOL), GFP_KERNEL);
2948+ if (!POOL) {
2949+ printk("ip_pool module init: out of memory for nr_pool %d\n",
2950+ nr_pool);
2951+ return -ENOMEM;
2952+ }
2953+ for (i=0; i<nr_pool; i++) {
2954+ POOL[i].first_ip = 0;
2955+ POOL[i].last_ip = 0;
2956+ POOL[i].members = 0;
2957+ POOL[i].nr_use = 0;
2958+ POOL[i].nr_match = 0;
2959+ POOL[i].lock = RW_LOCK_UNLOCKED;
2960+ }
2961+ res = nf_register_sockopt(&so_pool);
2962+ DP("ip_pool:init %d pools, result %d\n", nr_pool, res);
2963+ if (res != 0) {
2964+ kfree(POOL);
2965+ POOL = 0;
2966+ }
2967+ return res;
2968+}
2969+
2970+static void __exit fini(void)
2971+{
2972+ ip_pool_t i;
2973+
2974+ DP("ip_pool:fini BYEBYE\n");
2975+ nf_unregister_sockopt(&so_pool);
2976+ for (i=0; i<nr_pool; i++) {
2977+ if (POOL[i].members) {
2978+ kfree(POOL[i].members);
2979+ POOL[i].members = 0;
2980+ }
2981+ }
2982+ kfree(POOL);
2983+ POOL = 0;
2984+ DP("ip_pool:fini these are the famous last words\n");
2985+ return;
2986+}
2987+
2988+module_init(init);
2989+module_exit(fini);
2990diff -Nur linux-2.6.6-rc1.org/net/ipv4/netfilter/ip_tables.c linux-2.6.6-rc1/net/ipv4/netfilter/ip_tables.c
2991--- linux-2.6.6-rc1.org/net/ipv4/netfilter/ip_tables.c 2004-04-15 03:34:03.000000000 +0200
2992+++ linux-2.6.6-rc1/net/ipv4/netfilter/ip_tables.c 2004-04-19 10:08:25.000000000 +0200
2993@@ -1716,9 +1716,9 @@
2994 };
2995
2996 #ifdef CONFIG_PROC_FS
2997-static inline int print_name(const char *i,
2998- off_t start_offset, char *buffer, int length,
2999- off_t *pos, unsigned int *count)
3000+static int print_name(const char *i,
3001+ off_t start_offset, char *buffer, int length,
3002+ off_t *pos, unsigned int *count)
3003 {
3004 if ((*count)++ >= start_offset) {
3005 unsigned int namelen;
3006@@ -1752,6 +1752,15 @@
3007 return pos;
3008 }
3009
3010+static inline int print_target(const struct ipt_target *t,
3011+ off_t start_offset, char *buffer, int length,
3012+ off_t *pos, unsigned int *count)
3013+{
3014+ if (t != &ipt_standard_target && t != &ipt_error_target)
3015+ return 0;
3016+ return print_name((char *)t, start_offset, buffer, length, pos, count);
3017+}
3018+
3019 static int ipt_get_targets(char *buffer, char **start, off_t offset, int length)
3020 {
3021 off_t pos = 0;
3022@@ -1760,7 +1769,7 @@
3023 if (down_interruptible(&ipt_mutex) != 0)
3024 return 0;
3025
3026- LIST_FIND(&ipt_target, print_name, void *,
3027+ LIST_FIND(&ipt_target, print_target, struct ipt_target *,
3028 offset, buffer, length, &pos, &count);
3029
3030 up(&ipt_mutex);
3031diff -Nur linux-2.6.6-rc1.org/net/ipv4/netfilter/ipt_IPV4OPTSSTRIP.c linux-2.6.6-rc1/net/ipv4/netfilter/ipt_IPV4OPTSSTRIP.c
3032--- linux-2.6.6-rc1.org/net/ipv4/netfilter/ipt_IPV4OPTSSTRIP.c 1970-01-01 01:00:00.000000000 +0100
3033+++ linux-2.6.6-rc1/net/ipv4/netfilter/ipt_IPV4OPTSSTRIP.c 2004-04-19 10:08:26.000000000 +0200
3034@@ -0,0 +1,89 @@
3035+/**
3036+ * Strip all IP options in the IP packet header.
3037+ *
3038+ * (C) 2001 by Fabrice MARIE <fabrice@netfilter.org>
3039+ * This software is distributed under GNU GPL v2, 1991
3040+ */
3041+
3042+#include <linux/module.h>
3043+#include <linux/skbuff.h>
3044+#include <linux/ip.h>
3045+#include <net/checksum.h>
3046+
3047+#include <linux/netfilter_ipv4/ip_tables.h>
3048+
3049+MODULE_AUTHOR("Fabrice MARIE <fabrice@netfilter.org>");
3050+MODULE_DESCRIPTION("Strip all options in IPv4 packets");
3051+MODULE_LICENSE("GPL");
3052+
3053+static unsigned int
3054+target(struct sk_buff **pskb,
3055+ const struct net_device *in,
3056+ const struct net_device *out,
3057+ unsigned int hooknum,
3058+ const void *targinfo,
3059+ void *userinfo)
3060+{
3061+ struct iphdr *iph;
3062+ struct sk_buff *skb;
3063+ struct ip_options *opt;
3064+ unsigned char *optiph;
3065+ int l;
3066+
3067+ if (!skb_ip_make_writable(pskb, (*pskb)->len))
3068+ return NF_DROP;
3069+
3070+ skb = (*pskb);
3071+ iph = (*pskb)->nh.iph;
3072+ optiph = skb->nh.raw;
3073+ l = ((struct ip_options *)(&(IPCB(skb)->opt)))->optlen;
3074+
3075+ /* if no options in packet then nothing to clear. */
3076+ if (iph->ihl * 4 == sizeof(struct iphdr))
3077+ return IPT_CONTINUE;
3078+
3079+ /* else clear all options */
3080+ memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
3081+ memset(optiph+sizeof(struct iphdr), IPOPT_NOOP, l);
3082+ opt = &(IPCB(skb)->opt);
3083+ opt->is_data = 0;
3084+ opt->optlen = l;
3085+
3086+ skb->nfcache |= NFC_ALTERED;
3087+
3088+ return IPT_CONTINUE;
3089+}
3090+
3091+static int
3092+checkentry(const char *tablename,
3093+ const struct ipt_entry *e,
3094+ void *targinfo,
3095+ unsigned int targinfosize,
3096+ unsigned int hook_mask)
3097+{
3098+ if (strcmp(tablename, "mangle")) {
3099+ printk(KERN_WARNING "IPV4OPTSSTRIP: can only be called from \"mangle\" table, not \"%s\"\n", tablename);
3100+ return 0;
3101+ }
3102+ /* nothing else to check because no parameters */
3103+ return 1;
3104+}
3105+
3106+static struct ipt_target ipt_ipv4optsstrip_reg = {
3107+ .name = "IPV4OPTSSTRIP",
3108+ .target = target,
3109+ .checkentry = checkentry,
3110+ .me = THIS_MODULE };
3111+
3112+static int __init init(void)
3113+{
3114+ return ipt_register_target(&ipt_ipv4optsstrip_reg);
3115+}
3116+
3117+static void __exit fini(void)
3118+{
3119+ ipt_unregister_target(&ipt_ipv4optsstrip_reg);
3120+}
3121+
3122+module_init(init);
3123+module_exit(fini);
3124diff -Nur linux-2.6.6-rc1.org/net/ipv4/netfilter/ipt_POOL.c linux-2.6.6-rc1/net/ipv4/netfilter/ipt_POOL.c
3125--- linux-2.6.6-rc1.org/net/ipv4/netfilter/ipt_POOL.c 1970-01-01 01:00:00.000000000 +0100
3126+++ linux-2.6.6-rc1/net/ipv4/netfilter/ipt_POOL.c 2004-04-19 10:08:36.000000000 +0200
3127@@ -0,0 +1,116 @@
3128+/* ipt_POOL.c - netfilter target to manipulate IP pools
3129+ *
3130+ * This target can be used almost everywhere. It acts on some specified
3131+ * IP pool, adding or deleting some IP address in the pool. The address
3132+ * can be either the source (--addsrc, --delsrc), or destination (--add/deldst)
3133+ * of the packet under inspection.
3134+ *
3135+ * The target normally returns IPT_CONTINUE.
3136+ */
3137+
3138+#include <linux/types.h>
3139+#include <linux/ip.h>
3140+#include <linux/timer.h>
3141+#include <linux/module.h>
3142+#include <linux/netfilter.h>
3143+#include <linux/netdevice.h>
3144+#include <linux/if.h>
3145+#include <linux/inetdevice.h>
3146+#include <net/protocol.h>
3147+#include <net/checksum.h>
3148+#include <linux/netfilter_ipv4.h>
3149+#include <linux/netfilter_ipv4/ip_nat_rule.h>
3150+#include <linux/netfilter_ipv4/ipt_pool.h>
3151+
3152+#if 0
3153+#define DEBUGP printk
3154+#else
3155+#define DEBUGP(format, args...)
3156+#endif
3157+
3158+/*** NOTE NOTE NOTE NOTE ***
3159+**
3160+** By sheer luck, I get away with using the "struct ipt_pool_info", as defined
3161+** in <linux/netfilter_ipv4/ipt_pool.h>, both as the match and target info.
3162+** Here, in the target implementation, ipt_pool_info.src, if not IP_POOL_NONE,
3163+** is modified for the source IP address of the packet under inspection.
3164+** The same way, the ipt_pool_info.dst pool is modified for the destination.
3165+**
3166+** The address is added to the pool normally. However, if IPT_POOL_DEL_dir
3167+** flag is set in ipt_pool_info.flags, the address is deleted from the pool.
3168+**
3169+** If a modification was done to the pool, we possibly return ACCEPT or DROP,
3170+** if the right IPT_POOL_MOD_dir_ACCEPT or _MOD_dir_DROP flags are set.
3171+** The IPT_POOL_INV_MOD_dir flag inverts the sense of the check (i.e. the
3172+** ACCEPT and DROP flags are evaluated when the pool was not modified.)
3173+*/
3174+
3175+static int
3176+do_check(const char *tablename,
3177+ const struct ipt_entry *e,
3178+ void *targinfo,
3179+ unsigned int targinfosize,
3180+ unsigned int hook_mask)
3181+{
3182+ const struct ipt_pool_info *ipi = targinfo;
3183+
3184+ if (targinfosize != IPT_ALIGN(sizeof(*ipi))) {
3185+ DEBUGP("POOL_check: size %u.\n", targinfosize);
3186+ return 0;
3187+ }
3188+ DEBUGP("ipt_POOL:do_check(%d,%d,%d)\n",ipi->src,ipi->dst,ipi->flags);
3189+ return 1;
3190+}
3191+
3192+static unsigned int
3193+do_target(struct sk_buff **pskb,
3194+ unsigned int hooknum,
3195+ const struct net_device *in,
3196+ const struct net_device *out,
3197+ const void *targinfo,
3198+ void *userinfo)
3199+{
3200+ const struct ipt_pool_info *ipi = targinfo;
3201+ int modified;
3202+ unsigned int verdict = IPT_CONTINUE;
3203+
3204+ if (ipi->src != IP_POOL_NONE) {
3205+ modified = ip_pool_mod(ipi->src, ntohl((*pskb)->nh.iph->saddr),
3206+ ipi->flags & IPT_POOL_DEL_SRC);
3207+ if (!!modified ^ !!(ipi->flags & IPT_POOL_INV_MOD_SRC)) {
3208+ if (ipi->flags & IPT_POOL_MOD_SRC_ACCEPT)
3209+ verdict = NF_ACCEPT;
3210+ else if (ipi->flags & IPT_POOL_MOD_SRC_DROP)
3211+ verdict = NF_DROP;
3212+ }
3213+ }
3214+ if (verdict == IPT_CONTINUE && ipi->dst != IP_POOL_NONE) {
3215+ modified = ip_pool_mod(ipi->dst, ntohl((*pskb)->nh.iph->daddr),
3216+ ipi->flags & IPT_POOL_DEL_DST);
3217+ if (!!modified ^ !!(ipi->flags & IPT_POOL_INV_MOD_DST)) {
3218+ if (ipi->flags & IPT_POOL_MOD_DST_ACCEPT)
3219+ verdict = NF_ACCEPT;
3220+ else if (ipi->flags & IPT_POOL_MOD_DST_DROP)
3221+ verdict = NF_DROP;
3222+ }
3223+ }
3224+ return verdict;
3225+}
3226+
3227+static struct ipt_target pool_reg
3228+= { { NULL, NULL }, "POOL", do_target, do_check, NULL, THIS_MODULE };
3229+
3230+static int __init init(void)
3231+{
3232+ DEBUGP("init ipt_POOL\n");
3233+ return ipt_register_target(&pool_reg);
3234+}
3235+
3236+static void __exit fini(void)
3237+{
3238+ DEBUGP("fini ipt_POOL\n");
3239+ ipt_unregister_target(&pool_reg);
3240+}
3241+
3242+module_init(init);
3243+module_exit(fini);
3244diff -Nur linux-2.6.6-rc1.org/net/ipv4/netfilter/ipt_TTL.c linux-2.6.6-rc1/net/ipv4/netfilter/ipt_TTL.c
3245--- linux-2.6.6-rc1.org/net/ipv4/netfilter/ipt_TTL.c 1970-01-01 01:00:00.000000000 +0100
3246+++ linux-2.6.6-rc1/net/ipv4/netfilter/ipt_TTL.c 2004-04-19 10:08:28.000000000 +0200
3247@@ -0,0 +1,120 @@
3248+/* TTL modification target for IP tables
3249+ * (C) 2000 by Harald Welte <laforge@gnumonks.org>
3250+ *
3251+ * Version: $Revision$
3252+ *
3253+ * This software is distributed under the terms of GNU GPL
3254+ */
3255+
3256+#include <linux/module.h>
3257+#include <linux/skbuff.h>
3258+#include <linux/ip.h>
3259+#include <net/checksum.h>
3260+
3261+#include <linux/netfilter_ipv4/ip_tables.h>
3262+#include <linux/netfilter_ipv4/ipt_TTL.h>
3263+
3264+MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
3265+MODULE_DESCRIPTION("IP tables TTL modification module");
3266+MODULE_LICENSE("GPL");
3267+
3268+static unsigned int
3269+ipt_ttl_target(struct sk_buff **pskb, const struct net_device *in,
3270+ const struct net_device *out, unsigned int hooknum,
3271+ const void *targinfo, void *userinfo)
3272+{
3273+ struct iphdr *iph;
3274+ const struct ipt_TTL_info *info = targinfo;
3275+ u_int16_t diffs[2];
3276+ int new_ttl;
3277+
3278+ if (!skb_ip_make_writable(pskb, (*pskb)->len))
3279+ return NF_DROP;
3280+
3281+ iph = (*pskb)->nh.iph;
3282+
3283+ switch (info->mode) {
3284+ case IPT_TTL_SET:
3285+ new_ttl = info->ttl;
3286+ break;
3287+ case IPT_TTL_INC:
3288+ new_ttl = iph->ttl + info->ttl;
3289+ if (new_ttl > 255)
3290+ new_ttl = 255;
3291+ break;
3292+ case IPT_TTL_DEC:
3293+ new_ttl = iph->ttl + info->ttl;
3294+ if (new_ttl < 0)
3295+ new_ttl = 0;
3296+ break;
3297+ default:
3298+ new_ttl = iph->ttl;
3299+ break;
3300+ }
3301+
3302+ if (new_ttl != iph->ttl) {
3303+ diffs[0] = htons(((unsigned)iph->ttl) << 8) ^ 0xFFFF;
3304+ iph->ttl = new_ttl;
3305+ diffs[1] = htons(((unsigned)iph->ttl) << 8);
3306+ iph->check = csum_fold(csum_partial((char *)diffs,
3307+ sizeof(diffs),
3308+ iph->check^0xFFFF));
3309+ (*pskb)->nfcache |= NFC_ALTERED;
3310+ }
3311+
3312+ return IPT_CONTINUE;
3313+}
3314+
3315+static int ipt_ttl_checkentry(const char *tablename,
3316+ const struct ipt_entry *e,
3317+ void *targinfo,
3318+ unsigned int targinfosize,
3319+ unsigned int hook_mask)
3320+{
3321+ struct ipt_TTL_info *info = targinfo;
3322+
3323+ if (targinfosize != IPT_ALIGN(sizeof(struct ipt_TTL_info))) {
3324+ printk(KERN_WARNING "TTL: targinfosize %u != %Zu\n",
3325+ targinfosize,
3326+ IPT_ALIGN(sizeof(struct ipt_TTL_info)));
3327+ return 0;
3328+ }
3329+
3330+ if (strcmp(tablename, "mangle")) {
3331+ printk(KERN_WARNING "TTL: can only be called from \"mangle\" table, not \"%s\"\n", tablename);
3332+ return 0;
3333+ }
3334+
3335+ if (info->mode > IPT_TTL_MAXMODE) {
3336+ printk(KERN_WARNING "TTL: invalid or unknown Mode %u\n",
3337+ info->mode);
3338+ return 0;
3339+ }
3340+
3341+ if ((info->mode != IPT_TTL_SET) && (info->ttl == 0)) {
3342+ printk(KERN_WARNING "TTL: increment/decrement doesn't make sense with value 0\n");
3343+ return 0;
3344+ }
3345+
3346+ return 1;
3347+}
3348+
3349+static struct ipt_target ipt_TTL = {
3350+ .name = "TTL",
3351+ .target = ipt_ttl_target,
3352+ .checkentry = ipt_ttl_checkentry,
3353+ .me = THIS_MODULE
3354+};
3355+
3356+static int __init init(void)
3357+{
3358+ return ipt_register_target(&ipt_TTL);
3359+}
3360+
3361+static void __exit fini(void)
3362+{
3363+ ipt_unregister_target(&ipt_TTL);
3364+}
3365+
3366+module_init(init);
3367+module_exit(fini);
3368diff -Nur linux-2.6.6-rc1.org/net/ipv4/netfilter/ipt_connlimit.c linux-2.6.6-rc1/net/ipv4/netfilter/ipt_connlimit.c
3369--- linux-2.6.6-rc1.org/net/ipv4/netfilter/ipt_connlimit.c 1970-01-01 01:00:00.000000000 +0100
3370+++ linux-2.6.6-rc1/net/ipv4/netfilter/ipt_connlimit.c 2004-04-19 10:08:29.000000000 +0200
3371@@ -0,0 +1,230 @@
3372+/*
3373+ * netfilter module to limit the number of parallel tcp
3374+ * connections per IP address.
3375+ * (c) 2000 Gerd Knorr <kraxel@bytesex.org>
3376+ * Nov 2002: Martin Bene <martin.bene@icomedias.com>:
3377+ * only ignore TIME_WAIT or gone connections
3378+ *
3379+ * based on ...
3380+ *
3381+ * Kernel module to match connection tracking information.
3382+ * GPL (C) 1999 Rusty Russell (rusty@rustcorp.com.au).
3383+ */
3384+#include <linux/module.h>
3385+#include <linux/skbuff.h>
3386+#include <linux/list.h>
3387+#include <linux/netfilter_ipv4/ip_conntrack.h>
3388+#include <linux/netfilter_ipv4/ip_conntrack_core.h>
3389+#include <linux/netfilter_ipv4/ip_conntrack_tcp.h>
3390+#include <linux/netfilter_ipv4/ip_tables.h>
3391+#include <linux/netfilter_ipv4/ipt_connlimit.h>
3392+
3393+#define DEBUG 0
3394+
3395+MODULE_LICENSE("GPL");
3396+
3397+/* we'll save the tuples of all connections we care about */
3398+struct ipt_connlimit_conn
3399+{
3400+ struct list_head list;
3401+ struct ip_conntrack_tuple tuple;
3402+};
3403+
3404+struct ipt_connlimit_data {
3405+ spinlock_t lock;
3406+ struct list_head iphash[256];
3407+};
3408+
3409+static int ipt_iphash(u_int32_t addr)
3410+{
3411+ int hash;
3412+
3413+ hash = addr & 0xff;
3414+ hash ^= (addr >> 8) & 0xff;
3415+ hash ^= (addr >> 16) & 0xff;
3416+ hash ^= (addr >> 24) & 0xff;
3417+ return hash;
3418+}
3419+
3420+static int count_them(struct ipt_connlimit_data *data,
3421+ u_int32_t addr, u_int32_t mask,
3422+ struct ip_conntrack *ct)
3423+{
3424+#if DEBUG
3425+ const static char *tcp[] = { "none", "established", "syn_sent", "syn_recv",
3426+ "fin_wait", "time_wait", "close", "close_wait",
3427+ "last_ack", "listen" };
3428+#endif
3429+ int addit = 1, matches = 0;
3430+ struct ip_conntrack_tuple tuple;
3431+ struct ip_conntrack_tuple_hash *found;
3432+ struct ipt_connlimit_conn *conn;
3433+ struct list_head *hash,*lh;
3434+
3435+ spin_lock(&data->lock);
3436+ tuple = ct->tuplehash[0].tuple;
3437+ hash = &data->iphash[ipt_iphash(addr & mask)];
3438+
3439+ /* check the saved connections */
3440+ for (lh = hash->next; lh != hash; lh = lh->next) {
3441+ conn = list_entry(lh,struct ipt_connlimit_conn,list);
3442+ found = ip_conntrack_find_get(&conn->tuple,ct);
3443+ if (0 == memcmp(&conn->tuple,&tuple,sizeof(tuple)) &&
3444+ found != NULL &&
3445+ found->ctrack->proto.tcp.state != TCP_CONNTRACK_TIME_WAIT) {
3446+ /* Just to be sure we have it only once in the list.
3447+ We should'nt see tuples twice unless someone hooks this
3448+ into a table without "-p tcp --syn" */
3449+ addit = 0;
3450+ }
3451+#if DEBUG
3452+ printk("ipt_connlimit [%d]: src=%u.%u.%u.%u:%d dst=%u.%u.%u.%u:%d %s\n",
3453+ ipt_iphash(addr & mask),
3454+ NIPQUAD(conn->tuple.src.ip), ntohs(conn->tuple.src.u.tcp.port),
3455+ NIPQUAD(conn->tuple.dst.ip), ntohs(conn->tuple.dst.u.tcp.port),
3456+ (NULL != found) ? tcp[found->ctrack->proto.tcp.state] : "gone");
3457+#endif
3458+ if (NULL == found) {
3459+ /* this one is gone */
3460+ lh = lh->prev;
3461+ list_del(lh->next);
3462+ kfree(conn);
3463+ continue;
3464+ }
3465+ if (found->ctrack->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT) {
3466+ /* we don't care about connections which are
3467+ closed already -> ditch it */
3468+ lh = lh->prev;
3469+ list_del(lh->next);
3470+ kfree(conn);
3471+ nf_conntrack_put(&found->ctrack->infos[0]);
3472+ continue;
3473+ }
3474+ if ((addr & mask) == (conn->tuple.src.ip & mask)) {
3475+ /* same source IP address -> be counted! */
3476+ matches++;
3477+ }
3478+ nf_conntrack_put(&found->ctrack->infos[0]);
3479+ }
3480+ if (addit) {
3481+ /* save the new connection in our list */
3482+#if DEBUG
3483+ printk("ipt_connlimit [%d]: src=%u.%u.%u.%u:%d dst=%u.%u.%u.%u:%d new\n",
3484+ ipt_iphash(addr & mask),
3485+ NIPQUAD(tuple.src.ip), ntohs(tuple.src.u.tcp.port),
3486+ NIPQUAD(tuple.dst.ip), ntohs(tuple.dst.u.tcp.port));
3487+#endif
3488+ conn = kmalloc(sizeof(*conn),GFP_ATOMIC);
3489+ if (NULL == conn)
3490+ return -1;
3491+ memset(conn,0,sizeof(*conn));
3492+ INIT_LIST_HEAD(&conn->list);
3493+ conn->tuple = tuple;
3494+ list_add(&conn->list,hash);
3495+ matches++;
3496+ }
3497+ spin_unlock(&data->lock);
3498+ return matches;
3499+}
3500+
3501+static int
3502+match(const struct sk_buff *skb,
3503+ const struct net_device *in,
3504+ const struct net_device *out,
3505+ const void *matchinfo,
3506+ int offset,
3507+ int *hotdrop)
3508+{
3509+ const struct ipt_connlimit_info *info = matchinfo;
3510+ int connections, match;
3511+ struct ip_conntrack *ct;
3512+ enum ip_conntrack_info ctinfo;
3513+
3514+ ct = ip_conntrack_get((struct sk_buff *)skb, &ctinfo);
3515+ if (NULL == ct) {
3516+ printk("ipt_connlimit: Oops: invalid ct state ?\n");
3517+ *hotdrop = 1;
3518+ return 0;
3519+ }
3520+ connections = count_them(info->data,skb->nh.iph->saddr,info->mask,ct);
3521+ if (-1 == connections) {
3522+ printk("ipt_connlimit: Hmm, kmalloc failed :-(\n");
3523+ *hotdrop = 1; /* let's free some memory :-) */
3524+ return 0;
3525+ }
3526+ match = (info->inverse) ? (connections <= info->limit) : (connections > info->limit);
3527+#if DEBUG
3528+ printk("ipt_connlimit: src=%u.%u.%u.%u mask=%u.%u.%u.%u "
3529+ "connections=%d limit=%d match=%s\n",
3530+ NIPQUAD(skb->nh.iph->saddr), NIPQUAD(info->mask),
3531+ connections, info->limit, match ? "yes" : "no");
3532+#endif
3533+
3534+ return match;
3535+}
3536+
3537+static int check(const char *tablename,
3538+ const struct ipt_ip *ip,
3539+ void *matchinfo,
3540+ unsigned int matchsize,
3541+ unsigned int hook_mask)
3542+{
3543+ struct ipt_connlimit_info *info = matchinfo;
3544+ int i;
3545+
3546+ /* verify size */
3547+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_connlimit_info)))
3548+ return 0;
3549+
3550+ /* refuse anything but tcp */
3551+ if (ip->proto != IPPROTO_TCP)
3552+ return 0;
3553+
3554+ /* init private data */
3555+ info->data = kmalloc(sizeof(struct ipt_connlimit_data),GFP_KERNEL);
3556+ spin_lock_init(&(info->data->lock));
3557+ for (i = 0; i < 256; i++)
3558+ INIT_LIST_HEAD(&(info->data->iphash[i]));
3559+
3560+ return 1;
3561+}
3562+
3563+static void destroy(void *matchinfo, unsigned int matchinfosize)
3564+{
3565+ struct ipt_connlimit_info *info = matchinfo;
3566+ struct ipt_connlimit_conn *conn;
3567+ struct list_head *hash;
3568+ int i;
3569+
3570+ /* cleanup */
3571+ for (i = 0; i < 256; i++) {
3572+ hash = &(info->data->iphash[i]);
3573+ while (hash != hash->next) {
3574+ conn = list_entry(hash->next,struct ipt_connlimit_conn,list);
3575+ list_del(hash->next);
3576+ kfree(conn);
3577+ }
3578+ }
3579+ kfree(info->data);
3580+}
3581+
3582+static struct ipt_match connlimit_match = {
3583+ .name = "connlimit",
3584+ .match = &match,
3585+ .checkentry = &check,
3586+ .destroy = &destroy,
3587+ .me = THIS_MODULE
3588+};
3589+
3590+static int __init init(void)
3591+{
3592+ return ipt_register_match(&connlimit_match);
3593+}
3594+
3595+static void __exit fini(void)
3596+{
3597+ ipt_unregister_match(&connlimit_match);
3598+}
3599+
3600+module_init(init);
3601+module_exit(fini);
3602diff -Nur linux-2.6.6-rc1.org/net/ipv4/netfilter/ipt_dstlimit.c linux-2.6.6-rc1/net/ipv4/netfilter/ipt_dstlimit.c
3603--- linux-2.6.6-rc1.org/net/ipv4/netfilter/ipt_dstlimit.c 1970-01-01 01:00:00.000000000 +0100
3604+++ linux-2.6.6-rc1/net/ipv4/netfilter/ipt_dstlimit.c 2004-04-19 10:08:30.000000000 +0200
3605@@ -0,0 +1,690 @@
3606+/* iptables match extension to limit the number of packets per second
3607+ * seperately for each destination.
3608+ *
3609+ * (C) 2003 by Harald Welte <laforge@netfilter.org>
3610+ *
3611+ * $Id$
3612+ *
3613+ * Development of this code was funded by Astaro AG, http://www.astaro.com/
3614+ *
3615+ * based on ipt_limit.c by:
3616