]> git.pld-linux.org Git - packages/kernel.git/blob - 2.6.x-patch-o-matic-ng-base-20040305.patch
- obsolete
[packages/kernel.git] / 2.6.x-patch-o-matic-ng-base-20040305.patch
1 diff -Nur linux-2.6.4-rc2.org/include/linux/netfilter_ipv4/ipt_TTL.h linux-2.6.4-rc2/include/linux/netfilter_ipv4/ipt_TTL.h
2 --- linux-2.6.4-rc2.org/include/linux/netfilter_ipv4/ipt_TTL.h  1970-01-01 00:00:00.000000000 +0000
3 +++ linux-2.6.4-rc2/include/linux/netfilter_ipv4/ipt_TTL.h      2004-03-05 07:40:01.000000000 +0000
4 @@ -0,0 +1,21 @@
5 +/* TTL modification module for IP tables
6 + * (C) 2000 by Harald Welte <laforge@gnumonks.org> */
7 +
8 +#ifndef _IPT_TTL_H
9 +#define _IPT_TTL_H
10 +
11 +enum {
12 +       IPT_TTL_SET = 0,
13 +       IPT_TTL_INC,
14 +       IPT_TTL_DEC
15 +};
16 +
17 +#define IPT_TTL_MAXMODE        IPT_TTL_DEC
18 +
19 +struct ipt_TTL_info {
20 +       u_int8_t        mode;
21 +       u_int8_t        ttl;
22 +};
23 +
24 +
25 +#endif
26 diff -Nur linux-2.6.4-rc2.org/include/linux/netfilter_ipv4/ipt_connlimit.h linux-2.6.4-rc2/include/linux/netfilter_ipv4/ipt_connlimit.h
27 --- linux-2.6.4-rc2.org/include/linux/netfilter_ipv4/ipt_connlimit.h    1970-01-01 00:00:00.000000000 +0000
28 +++ linux-2.6.4-rc2/include/linux/netfilter_ipv4/ipt_connlimit.h        2004-03-05 07:40:04.000000000 +0000
29 @@ -0,0 +1,12 @@
30 +#ifndef _IPT_CONNLIMIT_H
31 +#define _IPT_CONNLIMIT_H
32 +
33 +struct ipt_connlimit_data;
34 +
35 +struct ipt_connlimit_info {
36 +       int limit;
37 +       int inverse;
38 +       u_int32_t mask;
39 +       struct ipt_connlimit_data *data;
40 +};
41 +#endif /* _IPT_CONNLIMIT_H */
42 diff -Nur linux-2.6.4-rc2.org/include/linux/netfilter_ipv4/ipt_dstlimit.h linux-2.6.4-rc2/include/linux/netfilter_ipv4/ipt_dstlimit.h
43 --- linux-2.6.4-rc2.org/include/linux/netfilter_ipv4/ipt_dstlimit.h     1970-01-01 00:00:00.000000000 +0000
44 +++ linux-2.6.4-rc2/include/linux/netfilter_ipv4/ipt_dstlimit.h 2004-03-05 07:40:06.000000000 +0000
45 @@ -0,0 +1,39 @@
46 +#ifndef _IPT_DSTLIMIT_H
47 +#define _IPT_DSTLIMIT_H
48 +
49 +/* timings are in milliseconds. */
50 +#define IPT_DSTLIMIT_SCALE 10000
51 +/* 1/10,000 sec period => max of 10,000/sec.  Min rate is then 429490
52 +   seconds, or one every 59 hours. */
53 +
54 +/* details of this structure hidden by the implementation */
55 +struct ipt_dstlimit_htable;
56 +
57 +#define IPT_DSTLIMIT_HASH_DIP  0x0001
58 +#define IPT_DSTLIMIT_HASH_DPT  0x0002
59 +#define IPT_DSTLIMIT_HASH_SIP  0x0004
60 +
61 +struct dstlimit_cfg {
62 +       u_int32_t mode;   /* bitmask of IPT_DSTLIMIT_HASH_* */
63 +       u_int32_t avg;    /* Average secs between packets * scale */
64 +       u_int32_t burst;  /* Period multiplier for upper limit. */
65 +
66 +       /* user specified */
67 +       u_int32_t size;         /* how many buckets */
68 +       u_int32_t max;          /* max number of entries */
69 +       u_int32_t gc_interval;  /* gc interval */
70 +       u_int32_t expire;       /* when do entries expire? */
71 +};
72 +
73 +struct ipt_dstlimit_info {
74 +       char name [IFNAMSIZ];           /* name */
75 +       struct dstlimit_cfg cfg;
76 +       struct ipt_dstlimit_htable *hinfo;
77 +
78 +       /* Used internally by the kernel */
79 +       union {
80 +               void *ptr;
81 +               struct ipt_dstlimit_info *master;
82 +       } u;
83 +};
84 +#endif /*_IPT_DSTLIMIT_H*/
85 diff -Nur linux-2.6.4-rc2.org/include/linux/netfilter_ipv4/ipt_fuzzy.h linux-2.6.4-rc2/include/linux/netfilter_ipv4/ipt_fuzzy.h
86 --- linux-2.6.4-rc2.org/include/linux/netfilter_ipv4/ipt_fuzzy.h        1970-01-01 00:00:00.000000000 +0000
87 +++ linux-2.6.4-rc2/include/linux/netfilter_ipv4/ipt_fuzzy.h    2004-03-05 07:40:08.000000000 +0000
88 @@ -0,0 +1,21 @@
89 +#ifndef _IPT_FUZZY_H
90 +#define _IPT_FUZZY_H
91 +
92 +#include <linux/param.h>
93 +#include <linux/types.h>
94 +
95 +#define MAXFUZZYRATE 10000000
96 +#define MINFUZZYRATE 3
97 +
98 +struct ipt_fuzzy_info {
99 +       u_int32_t minimum_rate;
100 +       u_int32_t maximum_rate;
101 +       u_int32_t packets_total;
102 +       u_int32_t bytes_total;
103 +       u_int32_t previous_time;
104 +       u_int32_t present_time;
105 +       u_int32_t mean_rate;
106 +       u_int8_t acceptance_rate;
107 +};
108 +
109 +#endif /*_IPT_FUZZY_H*/
110 diff -Nur linux-2.6.4-rc2.org/include/linux/netfilter_ipv4/ipt_ipv4options.h linux-2.6.4-rc2/include/linux/netfilter_ipv4/ipt_ipv4options.h
111 --- linux-2.6.4-rc2.org/include/linux/netfilter_ipv4/ipt_ipv4options.h  1970-01-01 00:00:00.000000000 +0000
112 +++ linux-2.6.4-rc2/include/linux/netfilter_ipv4/ipt_ipv4options.h      2004-03-05 07:40:09.000000000 +0000
113 @@ -0,0 +1,21 @@
114 +#ifndef __ipt_ipv4options_h_included__
115 +#define __ipt_ipv4options_h_included__
116 +
117 +#define IPT_IPV4OPTION_MATCH_SSRR              0x01  /* For strict source routing */
118 +#define IPT_IPV4OPTION_MATCH_LSRR              0x02  /* For loose source routing */
119 +#define IPT_IPV4OPTION_DONT_MATCH_SRR          0x04  /* any source routing */
120 +#define IPT_IPV4OPTION_MATCH_RR                        0x08  /* For Record route */
121 +#define IPT_IPV4OPTION_DONT_MATCH_RR           0x10
122 +#define IPT_IPV4OPTION_MATCH_TIMESTAMP         0x20  /* For timestamp request */
123 +#define IPT_IPV4OPTION_DONT_MATCH_TIMESTAMP    0x40
124 +#define IPT_IPV4OPTION_MATCH_ROUTER_ALERT      0x80  /* For router-alert */
125 +#define IPT_IPV4OPTION_DONT_MATCH_ROUTER_ALERT 0x100
126 +#define IPT_IPV4OPTION_MATCH_ANY_OPT           0x200 /* match packet with any option */
127 +#define IPT_IPV4OPTION_DONT_MATCH_ANY_OPT      0x400 /* match packet with no option */
128 +
129 +struct ipt_ipv4options_info {
130 +       u_int16_t options;
131 +};
132 +
133 +
134 +#endif /* __ipt_ipv4options_h_included__ */
135 diff -Nur linux-2.6.4-rc2.org/include/linux/netfilter_ipv4/ipt_mport.h linux-2.6.4-rc2/include/linux/netfilter_ipv4/ipt_mport.h
136 --- linux-2.6.4-rc2.org/include/linux/netfilter_ipv4/ipt_mport.h        1970-01-01 00:00:00.000000000 +0000
137 +++ linux-2.6.4-rc2/include/linux/netfilter_ipv4/ipt_mport.h    2004-03-05 07:40:11.000000000 +0000
138 @@ -0,0 +1,24 @@
139 +#ifndef _IPT_MPORT_H
140 +#define _IPT_MPORT_H
141 +#include <linux/netfilter_ipv4/ip_tables.h>
142 +
143 +#define IPT_MPORT_SOURCE (1<<0)
144 +#define IPT_MPORT_DESTINATION (1<<1)
145 +#define IPT_MPORT_EITHER (IPT_MPORT_SOURCE|IPT_MPORT_DESTINATION)
146 +
147 +#define IPT_MULTI_PORTS        15
148 +
149 +/* Must fit inside union ipt_matchinfo: 32 bytes */
150 +/* every entry in ports[] except for the last one has one bit in pflags
151 + * associated with it. If this bit is set, the port is the first port of
152 + * a portrange, with the next entry being the last.
153 + * End of list is marked with pflags bit set and port=65535.
154 + * If 14 ports are used (last one does not have a pflag), the last port
155 + * is repeated to fill the last entry in ports[] */
156 +struct ipt_mport
157 +{
158 +       u_int8_t flags:2;                       /* Type of comparison */
159 +       u_int16_t pflags:14;                    /* Port flags */
160 +       u_int16_t ports[IPT_MULTI_PORTS];       /* Ports */
161 +};
162 +#endif /*_IPT_MPORT_H*/
163 diff -Nur linux-2.6.4-rc2.org/include/linux/netfilter_ipv4/ipt_nth.h linux-2.6.4-rc2/include/linux/netfilter_ipv4/ipt_nth.h
164 --- linux-2.6.4-rc2.org/include/linux/netfilter_ipv4/ipt_nth.h  1970-01-01 00:00:00.000000000 +0000
165 +++ linux-2.6.4-rc2/include/linux/netfilter_ipv4/ipt_nth.h      2004-03-05 07:40:13.000000000 +0000
166 @@ -0,0 +1,19 @@
167 +#ifndef _IPT_NTH_H
168 +#define _IPT_NTH_H
169 +
170 +#include <linux/param.h>
171 +#include <linux/types.h>
172 +
173 +#ifndef IPT_NTH_NUM_COUNTERS
174 +#define IPT_NTH_NUM_COUNTERS 16
175 +#endif
176 +
177 +struct ipt_nth_info {
178 +       u_int8_t every;
179 +       u_int8_t not;
180 +       u_int8_t startat;
181 +       u_int8_t counter;
182 +       u_int8_t packet;
183 +};
184 +
185 +#endif /*_IPT_NTH_H*/
186 diff -Nur linux-2.6.4-rc2.org/include/linux/netfilter_ipv4/ipt_quota.h linux-2.6.4-rc2/include/linux/netfilter_ipv4/ipt_quota.h
187 --- linux-2.6.4-rc2.org/include/linux/netfilter_ipv4/ipt_quota.h        1970-01-01 00:00:00.000000000 +0000
188 +++ linux-2.6.4-rc2/include/linux/netfilter_ipv4/ipt_quota.h    2004-03-05 07:40:14.000000000 +0000
189 @@ -0,0 +1,11 @@
190 +#ifndef _IPT_QUOTA_H
191 +#define _IPT_QUOTA_H
192 +
193 +/* print debug info in both kernel/netfilter module & iptable library */
194 +//#define DEBUG_IPT_QUOTA
195 +
196 +struct ipt_quota_info {
197 +        u_int64_t quota;
198 +};
199 +
200 +#endif /*_IPT_QUOTA_H*/
201 diff -Nur linux-2.6.4-rc2.org/include/linux/netfilter_ipv4/ipt_realm.h linux-2.6.4-rc2/include/linux/netfilter_ipv4/ipt_realm.h
202 --- linux-2.6.4-rc2.org/include/linux/netfilter_ipv4/ipt_realm.h        1970-01-01 00:00:00.000000000 +0000
203 +++ linux-2.6.4-rc2/include/linux/netfilter_ipv4/ipt_realm.h    2004-03-05 07:40:22.000000000 +0000
204 @@ -0,0 +1,9 @@
205 +#ifndef _IPT_REALM_H
206 +#define _IPT_REALM_H
207 +
208 +struct ipt_realm_info {
209 +       u_int32_t id;
210 +       u_int32_t mask;
211 +       u_int8_t invert;
212 +};
213 +#endif /*_IPT_REALM_H*/
214 diff -Nur linux-2.6.4-rc2.org/include/linux/netfilter_ipv4/ipt_sctp.h linux-2.6.4-rc2/include/linux/netfilter_ipv4/ipt_sctp.h
215 --- linux-2.6.4-rc2.org/include/linux/netfilter_ipv4/ipt_sctp.h 1970-01-01 00:00:00.000000000 +0000
216 +++ linux-2.6.4-rc2/include/linux/netfilter_ipv4/ipt_sctp.h     2004-03-05 07:40:24.000000000 +0000
217 @@ -0,0 +1,107 @@
218 +#ifndef _IPT_SCTP_H_
219 +#define _IPT_SCTP_H_
220 +
221 +#define IPT_SCTP_SRC_PORTS             0x01
222 +#define IPT_SCTP_DEST_PORTS            0x02
223 +#define IPT_SCTP_CHUNK_TYPES           0x04
224 +
225 +#define IPT_SCTP_VALID_FLAGS           0x07
226 +
227 +#define ELEMCOUNT(x) (sizeof(x)/sizeof(x[0]))
228 +
229 +
230 +struct ipt_sctp_flag_info {
231 +       u_int8_t chunktype;
232 +       u_int8_t flag;
233 +       u_int8_t flag_mask;
234 +};
235 +
236 +#define IPT_NUM_SCTP_FLAGS     4
237 +
238 +struct ipt_sctp_info {
239 +       u_int16_t dpts[2];  /* Min, Max */
240 +       u_int16_t spts[2];  /* Min, Max */
241 +
242 +       u_int32_t chunkmap[256 / sizeof (u_int32_t)];  /* Bit mask of chunks to be matched according to RFC 2960 */
243 +
244 +#define SCTP_CHUNK_MATCH_ANY   0x01  /* Match if any of the chunk types are present */
245 +#define SCTP_CHUNK_MATCH_ALL   0x02  /* Match if all of the chunk types are present */
246 +#define SCTP_CHUNK_MATCH_ONLY  0x04  /* Match if these are the only chunk types present */
247 +
248 +       u_int32_t chunk_match_type;
249 +       struct ipt_sctp_flag_info flag_info[IPT_NUM_SCTP_FLAGS];
250 +       int flag_count;
251 +
252 +       u_int32_t flags;
253 +       u_int32_t invflags;
254 +};
255 +
256 +#define bytes(type) (sizeof(type) * 8)
257 +
258 +#define SCTP_CHUNKMAP_SET(chunkmap, type)              \
259 +       do {                                            \
260 +               chunkmap[type / bytes(u_int32_t)] |=    \
261 +                       1 << (type % bytes(u_int32_t)); \
262 +       } while (0)
263 +
264 +#define SCTP_CHUNKMAP_CLEAR(chunkmap, type)                    \
265 +       do {                                                    \
266 +               chunkmap[type / bytes(u_int32_t)] &=            \
267 +                       ~(1 << (type % bytes(u_int32_t)));      \
268 +       } while (0)
269 +
270 +#define SCTP_CHUNKMAP_IS_SET(chunkmap, type)                   \
271 +({                                                             \
272 +       (chunkmap[type / bytes (u_int32_t)] &                   \
273 +               (1 << (type % bytes (u_int32_t)))) ? 1: 0;      \
274 +})
275 +
276 +#define SCTP_CHUNKMAP_RESET(chunkmap)                          \
277 +       do {                                                    \
278 +               int i;                                          \
279 +               for (i = 0; i < ELEMCOUNT(chunkmap); i++)       \
280 +                       chunkmap[i] = 0;                        \
281 +       } while (0)
282 +
283 +#define SCTP_CHUNKMAP_SET_ALL(chunkmap)                        \
284 +       do {                                                    \
285 +               int i;                                          \
286 +               for (i = 0; i < ELEMCOUNT(chunkmap); i++)       \
287 +                       chunkmap[i] = ~0;                       \
288 +       } while (0)
289 +
290 +#define SCTP_CHUNKMAP_COPY(destmap, srcmap)                    \
291 +       do {                                                    \
292 +               int i;                                          \
293 +               for (i = 0; i < ELEMCOUNT(chunkmap); i++)       \
294 +                       destmap[i] = srcmap[i];                 \
295 +       } while (0)
296 +
297 +#define SCTP_CHUNKMAP_IS_CLEAR(chunkmap)               \
298 +({                                                     \
299 +       int i;                                          \
300 +       int flag = 1;                                   \
301 +       for (i = 0; i < ELEMCOUNT(chunkmap); i++) {     \
302 +               if (chunkmap[i]) {                      \
303 +                       flag = 0;                       \
304 +                       break;                          \
305 +               }                                       \
306 +       }                                               \
307 +        flag;                                          \
308 +})
309 +
310 +#define SCTP_CHUNKMAP_IS_ALL_SET(chunkmap)             \
311 +({                                                     \
312 +       int i;                                          \
313 +       int flag = 1;                                   \
314 +       for (i = 0; i < ELEMCOUNT(chunkmap); i++) {     \
315 +               if (chunkmap[i] != ~0) {                \
316 +                       flag = 0;                       \
317 +                               break;                  \
318 +               }                                       \
319 +       }                                               \
320 +        flag;                                          \
321 +})
322 +
323 +#endif /* _IPT_SCTP_H_ */
324 +
325 diff -Nur linux-2.6.4-rc2.org/include/linux/netfilter_ipv6/ip6t_HL.h linux-2.6.4-rc2/include/linux/netfilter_ipv6/ip6t_HL.h
326 --- linux-2.6.4-rc2.org/include/linux/netfilter_ipv6/ip6t_HL.h  1970-01-01 00:00:00.000000000 +0000
327 +++ linux-2.6.4-rc2/include/linux/netfilter_ipv6/ip6t_HL.h      2004-03-05 07:39:53.000000000 +0000
328 @@ -0,0 +1,22 @@
329 +/* Hop Limit modification module for ip6tables
330 + * Maciej Soltysiak <solt@dns.toxicfilms.tv>
331 + * Based on HW's TTL module */
332 +
333 +#ifndef _IP6T_HOPLIMIT_H
334 +#define _IP6T_HOPLIMIT_H
335 +
336 +enum {
337 +       IP6T_HOPLIMIT_SET = 0,
338 +       IP6T_HOPLIMIT_INC,
339 +       IP6T_HOPLIMIT_DEC
340 +};
341 +
342 +#define IP6T_HOPLIMIT_MAXMODE  IP6T_HOPLIMIT_DEC
343 +
344 +struct ip6t_HOPLIMIT_info {
345 +       u_int8_t        mode;
346 +       u_int8_t        hop_limit;
347 +};
348 +
349 +
350 +#endif
351 diff -Nur linux-2.6.4-rc2.org/include/linux/netfilter_ipv6/ip6t_REJECT.h linux-2.6.4-rc2/include/linux/netfilter_ipv6/ip6t_REJECT.h
352 --- linux-2.6.4-rc2.org/include/linux/netfilter_ipv6/ip6t_REJECT.h      2004-03-04 06:16:34.000000000 +0000
353 +++ linux-2.6.4-rc2/include/linux/netfilter_ipv6/ip6t_REJECT.h  2004-03-05 07:39:59.000000000 +0000
354 @@ -2,15 +2,17 @@
355  #define _IP6T_REJECT_H
356  
357  enum ip6t_reject_with {
358 -       IP6T_ICMP_NET_UNREACHABLE,
359 -       IP6T_ICMP_HOST_UNREACHABLE,
360 -       IP6T_ICMP_PROT_UNREACHABLE,
361 -       IP6T_ICMP_PORT_UNREACHABLE,
362 -       IP6T_ICMP_ECHOREPLY
363 +       IP6T_ICMP6_NO_ROUTE,
364 +       IP6T_ICMP6_ADM_PROHIBITED,
365 +       IP6T_ICMP6_NOT_NEIGHBOUR,
366 +       IP6T_ICMP6_ADDR_UNREACH,
367 +       IP6T_ICMP6_PORT_UNREACH,
368 +       IP6T_ICMP6_ECHOREPLY,
369 +       IP6T_TCP_RESET
370  };
371  
372  struct ip6t_reject_info {
373         enum ip6t_reject_with with;      /* reject type */
374  };
375  
376 -#endif /*_IPT_REJECT_H*/
377 +#endif /*_IP6T_REJECT_H*/
378 diff -Nur linux-2.6.4-rc2.org/include/linux/netfilter_ipv6/ip6t_fuzzy.h linux-2.6.4-rc2/include/linux/netfilter_ipv6/ip6t_fuzzy.h
379 --- linux-2.6.4-rc2.org/include/linux/netfilter_ipv6/ip6t_fuzzy.h       1970-01-01 00:00:00.000000000 +0000
380 +++ linux-2.6.4-rc2/include/linux/netfilter_ipv6/ip6t_fuzzy.h   2004-03-05 07:40:08.000000000 +0000
381 @@ -0,0 +1,21 @@
382 +#ifndef _IP6T_FUZZY_H
383 +#define _IP6T_FUZZY_H
384 +
385 +#include <linux/param.h>
386 +#include <linux/types.h>
387 +
388 +#define MAXFUZZYRATE 10000000
389 +#define MINFUZZYRATE 3
390 +
391 +struct ip6t_fuzzy_info {
392 +       u_int32_t minimum_rate;
393 +       u_int32_t maximum_rate;
394 +       u_int32_t packets_total;
395 +       u_int32_t bytes_total;
396 +       u_int32_t previous_time;
397 +       u_int32_t present_time;
398 +       u_int32_t mean_rate;
399 +       u_int8_t acceptance_rate;
400 +};
401 +
402 +#endif /*_IP6T_FUZZY_H*/
403 diff -Nur linux-2.6.4-rc2.org/include/linux/netfilter_ipv6/ip6t_nth.h linux-2.6.4-rc2/include/linux/netfilter_ipv6/ip6t_nth.h
404 --- linux-2.6.4-rc2.org/include/linux/netfilter_ipv6/ip6t_nth.h 1970-01-01 00:00:00.000000000 +0000
405 +++ linux-2.6.4-rc2/include/linux/netfilter_ipv6/ip6t_nth.h     2004-03-05 07:40:13.000000000 +0000
406 @@ -0,0 +1,19 @@
407 +#ifndef _IP6T_NTH_H
408 +#define _IP6T_NTH_H
409 +
410 +#include <linux/param.h>
411 +#include <linux/types.h>
412 +
413 +#ifndef IP6T_NTH_NUM_COUNTERS
414 +#define IP6T_NTH_NUM_COUNTERS 16
415 +#endif
416 +
417 +struct ip6t_nth_info {
418 +       u_int8_t every;
419 +       u_int8_t not;
420 +       u_int8_t startat;
421 +       u_int8_t counter;
422 +       u_int8_t packet;
423 +};
424 +
425 +#endif /*_IP6T_NTH_H*/
426 diff -Nur linux-2.6.4-rc2.org/net/core/netfilter.c linux-2.6.4-rc2/net/core/netfilter.c
427 --- linux-2.6.4-rc2.org/net/core/netfilter.c    2004-03-04 06:16:45.000000000 +0000
428 +++ linux-2.6.4-rc2/net/core/netfilter.c        2004-03-05 07:39:43.000000000 +0000
429 @@ -58,6 +58,10 @@
430  } queue_handler[NPROTO];
431  static rwlock_t queue_handler_lock = RW_LOCK_UNLOCKED;
432  
433 +/**
434 + * nf_register_hook - Register with a netfilter hook
435 + * @reg: Hook operations to be registered
436 + */
437  int nf_register_hook(struct nf_hook_ops *reg)
438  {
439         struct list_head *i;
440 @@ -74,6 +78,10 @@
441         return 0;
442  }
443  
444 +/**
445 + * nf_unregister_hook - Unregister from a netfilter hook
446 + * @reg: hook operations to be unregistered
447 + */
448  void nf_unregister_hook(struct nf_hook_ops *reg)
449  {
450         spin_lock_bh(&nf_hook_lock);
451 @@ -386,6 +394,18 @@
452         return NF_ACCEPT;
453  }
454  
455 +/**
456 + * nf_register_queue_handler - Registere a queue handler with netfilter
457 + * @pf: protocol family
458 + * @outfn: function called by core to enqueue a packet
459 + * @data: opaque parameter, passed through
460 + *
461 + * This function registers a queue handler with netfilter.  There can only
462 + * be one queue handler for every protocol family.
463 + *
464 + * A queue handler _must_ reinject every packet via nf_reinject, no
465 + * matter what.
466 + */
467  int nf_register_queue_handler(int pf, nf_queue_outfn_t outfn, void *data)
468  {      
469         int ret;
470 @@ -403,7 +423,12 @@
471         return ret;
472  }
473  
474 -/* The caller must flush their queue before this */
475 +/**
476 + * nf_unregister_queue_handler - Unregister queue handler from netfilter
477 + * @pf: protocol family
478 + *
479 + * The caller must flush their queue before unregistering
480 + */
481  int nf_unregister_queue_handler(int pf)
482  {
483         write_lock_bh(&queue_handler_lock);
484 @@ -546,6 +571,15 @@
485         return ret;
486  }
487  
488 +/**
489 + * nf_reinject - Reinject a packet from a queue handler
490 + * @skb: the packet to be reinjected
491 + * @info: info which was passed to the outfn() of the queue handler
492 + * @verdict: verdict (NF_ACCEPT, ...) for this packet
493 + *
494 + * This is the function called by a queue handler to reinject a
495 + * packet.
496 + */
497  void nf_reinject(struct sk_buff *skb, struct nf_info *info,
498                  unsigned int verdict)
499  {
500 diff -Nur linux-2.6.4-rc2.org/net/core/netfilter.c.orig linux-2.6.4-rc2/net/core/netfilter.c.orig
501 --- linux-2.6.4-rc2.org/net/core/netfilter.c.orig       1970-01-01 00:00:00.000000000 +0000
502 +++ linux-2.6.4-rc2/net/core/netfilter.c.orig   2004-03-04 06:16:45.000000000 +0000
503 @@ -0,0 +1,772 @@
504 +/* netfilter.c: look after the filters for various protocols. 
505 + * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
506 + *
507 + * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
508 + * way.
509 + *
510 + * Rusty Russell (C)2000 -- This code is GPL.
511 + *
512 + * February 2000: Modified by James Morris to have 1 queue per protocol.
513 + * 15-Mar-2000:   Added NF_REPEAT --RR.
514 + */
515 +#include <linux/config.h>
516 +#include <linux/netfilter.h>
517 +#include <net/protocol.h>
518 +#include <linux/init.h>
519 +#include <linux/skbuff.h>
520 +#include <linux/wait.h>
521 +#include <linux/module.h>
522 +#include <linux/interrupt.h>
523 +#include <linux/if.h>
524 +#include <linux/netdevice.h>
525 +#include <linux/inetdevice.h>
526 +#include <linux/tcp.h>
527 +#include <linux/udp.h>
528 +#include <linux/icmp.h>
529 +#include <net/sock.h>
530 +#include <net/route.h>
531 +#include <linux/ip.h>
532 +
533 +/* In this code, we can be waiting indefinitely for userspace to
534 + * service a packet if a hook returns NF_QUEUE.  We could keep a count
535 + * of skbuffs queued for userspace, and not deregister a hook unless
536 + * this is zero, but that sucks.  Now, we simply check when the
537 + * packets come back: if the hook is gone, the packet is discarded. */
538 +#ifdef CONFIG_NETFILTER_DEBUG
539 +#define NFDEBUG(format, args...)  printk(format , ## args)
540 +#else
541 +#define NFDEBUG(format, args...)
542 +#endif
543 +
544 +/* Sockopts only registered and called from user context, so
545 +   net locking would be overkill.  Also, [gs]etsockopt calls may
546 +   sleep. */
547 +static DECLARE_MUTEX(nf_sockopt_mutex);
548 +
549 +struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS];
550 +static LIST_HEAD(nf_sockopts);
551 +static spinlock_t nf_hook_lock = SPIN_LOCK_UNLOCKED;
552 +
553 +/* 
554 + * A queue handler may be registered for each protocol.  Each is protected by
555 + * long term mutex.  The handler must provide an an outfn() to accept packets
556 + * for queueing and must reinject all packets it receives, no matter what.
557 + */
558 +static struct nf_queue_handler_t {
559 +       nf_queue_outfn_t outfn;
560 +       void *data;
561 +} queue_handler[NPROTO];
562 +static rwlock_t queue_handler_lock = RW_LOCK_UNLOCKED;
563 +
564 +int nf_register_hook(struct nf_hook_ops *reg)
565 +{
566 +       struct list_head *i;
567 +
568 +       spin_lock_bh(&nf_hook_lock);
569 +       list_for_each(i, &nf_hooks[reg->pf][reg->hooknum]) {
570 +               if (reg->priority < ((struct nf_hook_ops *)i)->priority)
571 +                       break;
572 +       }
573 +       list_add_rcu(&reg->list, i->prev);
574 +       spin_unlock_bh(&nf_hook_lock);
575 +
576 +       synchronize_net();
577 +       return 0;
578 +}
579 +
580 +void nf_unregister_hook(struct nf_hook_ops *reg)
581 +{
582 +       spin_lock_bh(&nf_hook_lock);
583 +       list_del_rcu(&reg->list);
584 +       spin_unlock_bh(&nf_hook_lock);
585 +
586 +       synchronize_net();
587 +}
588 +
589 +/* Do exclusive ranges overlap? */
590 +static inline int overlap(int min1, int max1, int min2, int max2)
591 +{
592 +       return max1 > min2 && min1 < max2;
593 +}
594 +
595 +/* Functions to register sockopt ranges (exclusive). */
596 +int nf_register_sockopt(struct nf_sockopt_ops *reg)
597 +{
598 +       struct list_head *i;
599 +       int ret = 0;
600 +
601 +       if (down_interruptible(&nf_sockopt_mutex) != 0)
602 +               return -EINTR;
603 +
604 +       list_for_each(i, &nf_sockopts) {
605 +               struct nf_sockopt_ops *ops = (struct nf_sockopt_ops *)i;
606 +               if (ops->pf == reg->pf
607 +                   && (overlap(ops->set_optmin, ops->set_optmax, 
608 +                               reg->set_optmin, reg->set_optmax)
609 +                       || overlap(ops->get_optmin, ops->get_optmax, 
610 +                                  reg->get_optmin, reg->get_optmax))) {
611 +                       NFDEBUG("nf_sock overlap: %u-%u/%u-%u v %u-%u/%u-%u\n",
612 +                               ops->set_optmin, ops->set_optmax, 
613 +                               ops->get_optmin, ops->get_optmax, 
614 +                               reg->set_optmin, reg->set_optmax,
615 +                               reg->get_optmin, reg->get_optmax);
616 +                       ret = -EBUSY;
617 +                       goto out;
618 +               }
619 +       }
620 +
621 +       list_add(&reg->list, &nf_sockopts);
622 +out:
623 +       up(&nf_sockopt_mutex);
624 +       return ret;
625 +}
626 +
627 +void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
628 +{
629 +       /* No point being interruptible: we're probably in cleanup_module() */
630 + restart:
631 +       down(&nf_sockopt_mutex);
632 +       if (reg->use != 0) {
633 +               /* To be woken by nf_sockopt call... */
634 +               /* FIXME: Stuart Young's name appears gratuitously. */
635 +               set_current_state(TASK_UNINTERRUPTIBLE);
636 +               reg->cleanup_task = current;
637 +               up(&nf_sockopt_mutex);
638 +               schedule();
639 +               goto restart;
640 +       }
641 +       list_del(&reg->list);
642 +       up(&nf_sockopt_mutex);
643 +}
644 +
645 +#ifdef CONFIG_NETFILTER_DEBUG
646 +#include <net/ip.h>
647 +#include <net/tcp.h>
648 +#include <linux/netfilter_ipv4.h>
649 +
650 +static void debug_print_hooks_ip(unsigned int nf_debug)
651 +{
652 +       if (nf_debug & (1 << NF_IP_PRE_ROUTING)) {
653 +               printk("PRE_ROUTING ");
654 +               nf_debug ^= (1 << NF_IP_PRE_ROUTING);
655 +       }
656 +       if (nf_debug & (1 << NF_IP_LOCAL_IN)) {
657 +               printk("LOCAL_IN ");
658 +               nf_debug ^= (1 << NF_IP_LOCAL_IN);
659 +       }
660 +       if (nf_debug & (1 << NF_IP_FORWARD)) {
661 +               printk("FORWARD ");
662 +               nf_debug ^= (1 << NF_IP_FORWARD);
663 +       }
664 +       if (nf_debug & (1 << NF_IP_LOCAL_OUT)) {
665 +               printk("LOCAL_OUT ");
666 +               nf_debug ^= (1 << NF_IP_LOCAL_OUT);
667 +       }
668 +       if (nf_debug & (1 << NF_IP_POST_ROUTING)) {
669 +               printk("POST_ROUTING ");
670 +               nf_debug ^= (1 << NF_IP_POST_ROUTING);
671 +       }
672 +       if (nf_debug)
673 +               printk("Crap bits: 0x%04X", nf_debug);
674 +       printk("\n");
675 +}
676 +
677 +void nf_dump_skb(int pf, struct sk_buff *skb)
678 +{
679 +       printk("skb: pf=%i %s dev=%s len=%u\n", 
680 +              pf,
681 +              skb->sk ? "(owned)" : "(unowned)",
682 +              skb->dev ? skb->dev->name : "(no dev)",
683 +              skb->len);
684 +       switch (pf) {
685 +       case PF_INET: {
686 +               const struct iphdr *ip = skb->nh.iph;
687 +               __u32 *opt = (__u32 *) (ip + 1);
688 +               int opti;
689 +               __u16 src_port = 0, dst_port = 0;
690 +
691 +               if (ip->protocol == IPPROTO_TCP
692 +                   || ip->protocol == IPPROTO_UDP) {
693 +                       struct tcphdr *tcp=(struct tcphdr *)((__u32 *)ip+ip->ihl);
694 +                       src_port = ntohs(tcp->source);
695 +                       dst_port = ntohs(tcp->dest);
696 +               }
697 +       
698 +               printk("PROTO=%d %u.%u.%u.%u:%hu %u.%u.%u.%u:%hu"
699 +                      " L=%hu S=0x%2.2hX I=%hu F=0x%4.4hX T=%hu",
700 +                      ip->protocol, NIPQUAD(ip->saddr),
701 +                      src_port, NIPQUAD(ip->daddr),
702 +                      dst_port,
703 +                      ntohs(ip->tot_len), ip->tos, ntohs(ip->id),
704 +                      ntohs(ip->frag_off), ip->ttl);
705 +
706 +               for (opti = 0; opti < (ip->ihl - sizeof(struct iphdr) / 4); opti++)
707 +                       printk(" O=0x%8.8X", *opt++);
708 +               printk("\n");
709 +       }
710 +       }
711 +}
712 +
713 +void nf_debug_ip_local_deliver(struct sk_buff *skb)
714 +{
715 +       /* If it's a loopback packet, it must have come through
716 +        * NF_IP_LOCAL_OUT, NF_IP_RAW_INPUT, NF_IP_PRE_ROUTING and
717 +        * NF_IP_LOCAL_IN.  Otherwise, must have gone through
718 +        * NF_IP_RAW_INPUT and NF_IP_PRE_ROUTING.  */
719 +       if (!skb->dev) {
720 +               printk("ip_local_deliver: skb->dev is NULL.\n");
721 +       }
722 +       else if (strcmp(skb->dev->name, "lo") == 0) {
723 +               if (skb->nf_debug != ((1 << NF_IP_LOCAL_OUT)
724 +                                     | (1 << NF_IP_POST_ROUTING)
725 +                                     | (1 << NF_IP_PRE_ROUTING)
726 +                                     | (1 << NF_IP_LOCAL_IN))) {
727 +                       printk("ip_local_deliver: bad loopback skb: ");
728 +                       debug_print_hooks_ip(skb->nf_debug);
729 +                       nf_dump_skb(PF_INET, skb);
730 +               }
731 +       }
732 +       else {
733 +               if (skb->nf_debug != ((1<<NF_IP_PRE_ROUTING)
734 +                                     | (1<<NF_IP_LOCAL_IN))) {
735 +                       printk("ip_local_deliver: bad non-lo skb: ");
736 +                       debug_print_hooks_ip(skb->nf_debug);
737 +                       nf_dump_skb(PF_INET, skb);
738 +               }
739 +       }
740 +}
741 +
742 +void nf_debug_ip_loopback_xmit(struct sk_buff *newskb)
743 +{
744 +       if (newskb->nf_debug != ((1 << NF_IP_LOCAL_OUT)
745 +                                | (1 << NF_IP_POST_ROUTING))) {
746 +               printk("ip_dev_loopback_xmit: bad owned skb = %p: ", 
747 +                      newskb);
748 +               debug_print_hooks_ip(newskb->nf_debug);
749 +               nf_dump_skb(PF_INET, newskb);
750 +       }
751 +       /* Clear to avoid confusing input check */
752 +       newskb->nf_debug = 0;
753 +}
754 +
755 +void nf_debug_ip_finish_output2(struct sk_buff *skb)
756 +{
757 +       /* If it's owned, it must have gone through the
758 +        * NF_IP_LOCAL_OUT and NF_IP_POST_ROUTING.
759 +        * Otherwise, must have gone through
760 +        * NF_IP_PRE_ROUTING, NF_IP_FORWARD and NF_IP_POST_ROUTING.
761 +        */
762 +       if (skb->sk) {
763 +               if (skb->nf_debug != ((1 << NF_IP_LOCAL_OUT)
764 +                                     | (1 << NF_IP_POST_ROUTING))) {
765 +                       printk("ip_finish_output: bad owned skb = %p: ", skb);
766 +                       debug_print_hooks_ip(skb->nf_debug);
767 +                       nf_dump_skb(PF_INET, skb);
768 +               }
769 +       } else {
770 +               if (skb->nf_debug != ((1 << NF_IP_PRE_ROUTING)
771 +                                     | (1 << NF_IP_FORWARD)
772 +                                     | (1 << NF_IP_POST_ROUTING))) {
773 +                       /* Fragments, entunnelled packets, TCP RSTs
774 +                           generated by ipt_REJECT will have no
775 +                           owners, but still may be local */
776 +                       if (skb->nf_debug != ((1 << NF_IP_LOCAL_OUT)
777 +                                             | (1 << NF_IP_POST_ROUTING))){
778 +                               printk("ip_finish_output:"
779 +                                      " bad unowned skb = %p: ",skb);
780 +                               debug_print_hooks_ip(skb->nf_debug);
781 +                               nf_dump_skb(PF_INET, skb);
782 +                       }
783 +               }
784 +       }
785 +}
786 +#endif /*CONFIG_NETFILTER_DEBUG*/
787 +
788 +/* Call get/setsockopt() */
789 +static int nf_sockopt(struct sock *sk, int pf, int val, 
790 +                     char *opt, int *len, int get)
791 +{
792 +       struct list_head *i;
793 +       struct nf_sockopt_ops *ops;
794 +       int ret;
795 +
796 +       if (down_interruptible(&nf_sockopt_mutex) != 0)
797 +               return -EINTR;
798 +
799 +       list_for_each(i, &nf_sockopts) {
800 +               ops = (struct nf_sockopt_ops *)i;
801 +               if (ops->pf == pf) {
802 +                       if (get) {
803 +                               if (val >= ops->get_optmin
804 +                                   && val < ops->get_optmax) {
805 +                                       ops->use++;
806 +                                       up(&nf_sockopt_mutex);
807 +                                       ret = ops->get(sk, val, opt, len);
808 +                                       goto out;
809 +                               }
810 +                       } else {
811 +                               if (val >= ops->set_optmin
812 +                                   && val < ops->set_optmax) {
813 +                                       ops->use++;
814 +                                       up(&nf_sockopt_mutex);
815 +                                       ret = ops->set(sk, val, opt, *len);
816 +                                       goto out;
817 +                               }
818 +                       }
819 +               }
820 +       }
821 +       up(&nf_sockopt_mutex);
822 +       return -ENOPROTOOPT;
823 +       
824 + out:
825 +       down(&nf_sockopt_mutex);
826 +       ops->use--;
827 +       if (ops->cleanup_task)
828 +               wake_up_process(ops->cleanup_task);
829 +       up(&nf_sockopt_mutex);
830 +       return ret;
831 +}
832 +
833 +int nf_setsockopt(struct sock *sk, int pf, int val, char *opt,
834 +                 int len)
835 +{
836 +       return nf_sockopt(sk, pf, val, opt, &len, 0);
837 +}
838 +
839 +int nf_getsockopt(struct sock *sk, int pf, int val, char *opt, int *len)
840 +{
841 +       return nf_sockopt(sk, pf, val, opt, len, 1);
842 +}
843 +
844 +static unsigned int nf_iterate(struct list_head *head,
845 +                              struct sk_buff **skb,
846 +                              int hook,
847 +                              const struct net_device *indev,
848 +                              const struct net_device *outdev,
849 +                              struct list_head **i,
850 +                              int (*okfn)(struct sk_buff *),
851 +                              int hook_thresh)
852 +{
853 +       /*
854 +        * The caller must not block between calls to this
855 +        * function because of risk of continuing from deleted element.
856 +        */
857 +       list_for_each_continue_rcu(*i, head) {
858 +               struct nf_hook_ops *elem = (struct nf_hook_ops *)*i;
859 +
860 +               if (hook_thresh > elem->priority)
861 +                       continue;
862 +
863 +               /* Optimization: we don't need to hold module
864 +                   reference here, since function can't sleep. --RR */
865 +               switch (elem->hook(hook, skb, indev, outdev, okfn)) {
866 +               case NF_QUEUE:
867 +                       return NF_QUEUE;
868 +
869 +               case NF_STOLEN:
870 +                       return NF_STOLEN;
871 +
872 +               case NF_DROP:
873 +                       return NF_DROP;
874 +
875 +               case NF_REPEAT:
876 +                       *i = (*i)->prev;
877 +                       break;
878 +
879 +#ifdef CONFIG_NETFILTER_DEBUG
880 +               case NF_ACCEPT:
881 +                       break;
882 +
883 +               default:
884 +                       NFDEBUG("Evil return from %p(%u).\n", 
885 +                               elem->hook, hook);
886 +#endif
887 +               }
888 +       }
889 +       return NF_ACCEPT;
890 +}
891 +
892 +int nf_register_queue_handler(int pf, nf_queue_outfn_t outfn, void *data)
893 +{      
894 +       int ret;
895 +
896 +       write_lock_bh(&queue_handler_lock);
897 +       if (queue_handler[pf].outfn)
898 +               ret = -EBUSY;
899 +       else {
900 +               queue_handler[pf].outfn = outfn;
901 +               queue_handler[pf].data = data;
902 +               ret = 0;
903 +       }
904 +       write_unlock_bh(&queue_handler_lock);
905 +
906 +       return ret;
907 +}
908 +
909 +/* The caller must flush their queue before this */
910 +int nf_unregister_queue_handler(int pf)
911 +{
912 +       write_lock_bh(&queue_handler_lock);
913 +       queue_handler[pf].outfn = NULL;
914 +       queue_handler[pf].data = NULL;
915 +       write_unlock_bh(&queue_handler_lock);
916 +       
917 +       return 0;
918 +}
919 +
920 +/* 
921 + * Any packet that leaves via this function must come back 
922 + * through nf_reinject().
923 + */
924 +static int nf_queue(struct sk_buff *skb, 
925 +                   struct list_head *elem, 
926 +                   int pf, unsigned int hook,
927 +                   struct net_device *indev,
928 +                   struct net_device *outdev,
929 +                   int (*okfn)(struct sk_buff *))
930 +{
931 +       int status;
932 +       struct nf_info *info;
933 +#ifdef CONFIG_BRIDGE_NETFILTER
934 +       struct net_device *physindev = NULL;
935 +       struct net_device *physoutdev = NULL;
936 +#endif
937 +
938 +       /* QUEUE == DROP if noone is waiting, to be safe. */
939 +       read_lock(&queue_handler_lock);
940 +       if (!queue_handler[pf].outfn) {
941 +               read_unlock(&queue_handler_lock);
942 +               kfree_skb(skb);
943 +               return 1;
944 +       }
945 +
946 +       info = kmalloc(sizeof(*info), GFP_ATOMIC);
947 +       if (!info) {
948 +               if (net_ratelimit())
949 +                       printk(KERN_ERR "OOM queueing packet %p\n",
950 +                              skb);
951 +               read_unlock(&queue_handler_lock);
952 +               kfree_skb(skb);
953 +               return 1;
954 +       }
955 +
956 +       *info = (struct nf_info) { 
957 +               (struct nf_hook_ops *)elem, pf, hook, indev, outdev, okfn };
958 +
959 +       /* If it's going away, ignore hook. */
960 +       if (!try_module_get(info->elem->owner)) {
961 +               read_unlock(&queue_handler_lock);
962 +               kfree(info);
963 +               return 0;
964 +       }
965 +
966 +       /* Bump dev refs so they don't vanish while packet is out */
967 +       if (indev) dev_hold(indev);
968 +       if (outdev) dev_hold(outdev);
969 +
970 +#ifdef CONFIG_BRIDGE_NETFILTER
971 +       if (skb->nf_bridge) {
972 +               physindev = skb->nf_bridge->physindev;
973 +               if (physindev) dev_hold(physindev);
974 +               physoutdev = skb->nf_bridge->physoutdev;
975 +               if (physoutdev) dev_hold(physoutdev);
976 +       }
977 +#endif
978 +
979 +       status = queue_handler[pf].outfn(skb, info, queue_handler[pf].data);
980 +       read_unlock(&queue_handler_lock);
981 +
982 +       if (status < 0) {
983 +               /* James M doesn't say fuck enough. */
984 +               if (indev) dev_put(indev);
985 +               if (outdev) dev_put(outdev);
986 +#ifdef CONFIG_BRIDGE_NETFILTER
987 +               if (physindev) dev_put(physindev);
988 +               if (physoutdev) dev_put(physoutdev);
989 +#endif
990 +               module_put(info->elem->owner);
991 +               kfree(info);
992 +               kfree_skb(skb);
993 +               return 1;
994 +       }
995 +       return 1;
996 +}
997 +
998 +int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
999 +                struct net_device *indev,
1000 +                struct net_device *outdev,
1001 +                int (*okfn)(struct sk_buff *),
1002 +                int hook_thresh)
1003 +{
1004 +       struct list_head *elem;
1005 +       unsigned int verdict;
1006 +       int ret = 0;
1007 +
1008 +       if (skb->ip_summed == CHECKSUM_HW) {
1009 +               if (outdev == NULL) {
1010 +                       skb->ip_summed = CHECKSUM_NONE;
1011 +               } else {
1012 +                       skb_checksum_help(skb);
1013 +               }
1014 +       }
1015 +
1016 +       /* We may already have this, but read-locks nest anyway */
1017 +       rcu_read_lock();
1018 +
1019 +#ifdef CONFIG_NETFILTER_DEBUG
1020 +       if (skb->nf_debug & (1 << hook)) {
1021 +               printk("nf_hook: hook %i already set.\n", hook);
1022 +               nf_dump_skb(pf, skb);
1023 +       }
1024 +       skb->nf_debug |= (1 << hook);
1025 +#endif
1026 +
1027 +       elem = &nf_hooks[pf][hook];
1028 + next_hook:
1029 +       verdict = nf_iterate(&nf_hooks[pf][hook], &skb, hook, indev,
1030 +                            outdev, &elem, okfn, hook_thresh);
1031 +       if (verdict == NF_QUEUE) {
1032 +               NFDEBUG("nf_hook: Verdict = QUEUE.\n");
1033 +               if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn))
1034 +                       goto next_hook;
1035 +       }
1036 +
1037 +       switch (verdict) {
1038 +       case NF_ACCEPT:
1039 +               ret = okfn(skb);
1040 +               break;
1041 +
1042 +       case NF_DROP:
1043 +               kfree_skb(skb);
1044 +               ret = -EPERM;
1045 +               break;
1046 +       }
1047 +
1048 +       rcu_read_unlock();
1049 +       return ret;
1050 +}
1051 +
1052 +void nf_reinject(struct sk_buff *skb, struct nf_info *info,
1053 +                unsigned int verdict)
1054 +{
1055 +       struct list_head *elem = &info->elem->list;
1056 +       struct list_head *i;
1057 +
1058 +       rcu_read_lock();
1059 +
1060 +       /* Release those devices we held, or Alexey will kill me. */
1061 +       if (info->indev) dev_put(info->indev);
1062 +       if (info->outdev) dev_put(info->outdev);
1063 +#ifdef CONFIG_BRIDGE_NETFILTER
1064 +       if (skb->nf_bridge) {
1065 +               if (skb->nf_bridge->physindev)
1066 +                       dev_put(skb->nf_bridge->physindev);
1067 +               if (skb->nf_bridge->physoutdev)
1068 +                       dev_put(skb->nf_bridge->physoutdev);
1069 +       }
1070 +#endif
1071 +
1072 +       /* Drop reference to owner of hook which queued us. */
1073 +       module_put(info->elem->owner);
1074 +
1075 +       list_for_each_rcu(i, &nf_hooks[info->pf][info->hook]) {
1076 +               if (i == elem) 
1077 +                       break;
1078 +       }
1079 +  
1080 +       if (elem == &nf_hooks[info->pf][info->hook]) {
1081 +               /* The module which sent it to userspace is gone. */
1082 +               NFDEBUG("%s: module disappeared, dropping packet.\n",
1083 +                       __FUNCTION__);
1084 +               verdict = NF_DROP;
1085 +       }
1086 +
1087 +       /* Continue traversal iff userspace said ok... */
1088 +       if (verdict == NF_REPEAT) {
1089 +               elem = elem->prev;
1090 +               verdict = NF_ACCEPT;
1091 +       }
1092 +
1093 +       if (verdict == NF_ACCEPT) {
1094 +       next_hook:
1095 +               verdict = nf_iterate(&nf_hooks[info->pf][info->hook],
1096 +                                    &skb, info->hook, 
1097 +                                    info->indev, info->outdev, &elem,
1098 +                                    info->okfn, INT_MIN);
1099 +       }
1100 +
1101 +       switch (verdict) {
1102 +       case NF_ACCEPT:
1103 +               info->okfn(skb);
1104 +               break;
1105 +
1106 +       case NF_QUEUE:
1107 +               if (!nf_queue(skb, elem, info->pf, info->hook, 
1108 +                             info->indev, info->outdev, info->okfn))
1109 +                       goto next_hook;
1110 +               break;
1111 +       }
1112 +       rcu_read_unlock();
1113 +
1114 +       if (verdict == NF_DROP)
1115 +               kfree_skb(skb);
1116 +
1117 +       kfree(info);
1118 +       return;
1119 +}
1120 +
1121 +#ifdef CONFIG_INET
1122 +/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
1123 +int ip_route_me_harder(struct sk_buff **pskb)
1124 +{
1125 +       struct iphdr *iph = (*pskb)->nh.iph;
1126 +       struct rtable *rt;
1127 +       struct flowi fl = {};
1128 +       struct dst_entry *odst;
1129 +       unsigned int hh_len;
1130 +
1131 +       /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause
1132 +        * packets with foreign saddr to appear on the NF_IP_LOCAL_OUT hook.
1133 +        */
1134 +       if (inet_addr_type(iph->saddr) == RTN_LOCAL) {
1135 +               fl.nl_u.ip4_u.daddr = iph->daddr;
1136 +               fl.nl_u.ip4_u.saddr = iph->saddr;
1137 +               fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
1138 +               fl.oif = (*pskb)->sk ? (*pskb)->sk->sk_bound_dev_if : 0;
1139 +#ifdef CONFIG_IP_ROUTE_FWMARK
1140 +               fl.nl_u.ip4_u.fwmark = (*pskb)->nfmark;
1141 +#endif
1142 +               if (ip_route_output_key(&rt, &fl) != 0)
1143 +                       return -1;
1144 +
1145 +               /* Drop old route. */
1146 +               dst_release((*pskb)->dst);
1147 +               (*pskb)->dst = &rt->u.dst;
1148 +       } else {
1149 +               /* non-local src, find valid iif to satisfy
1150 +                * rp-filter when calling ip_route_input. */
1151 +               fl.nl_u.ip4_u.daddr = iph->saddr;
1152 +               if (ip_route_output_key(&rt, &fl) != 0)
1153 +                       return -1;
1154 +
1155 +               odst = (*pskb)->dst;
1156 +               if (ip_route_input(*pskb, iph->daddr, iph->saddr,
1157 +                                  RT_TOS(iph->tos), rt->u.dst.dev) != 0) {
1158 +                       dst_release(&rt->u.dst);
1159 +                       return -1;
1160 +               }
1161 +               dst_release(&rt->u.dst);
1162 +               dst_release(odst);
1163 +       }
1164 +       
1165 +       if ((*pskb)->dst->error)
1166 +               return -1;
1167 +
1168 +       /* Change in oif may mean change in hh_len. */
1169 +       hh_len = (*pskb)->dst->dev->hard_header_len;
1170 +       if (skb_headroom(*pskb) < hh_len) {
1171 +               struct sk_buff *nskb;
1172 +
1173 +               nskb = skb_realloc_headroom(*pskb, hh_len);
1174 +               if (!nskb) 
1175 +                       return -1;
1176 +               if ((*pskb)->sk)
1177 +                       skb_set_owner_w(nskb, (*pskb)->sk);
1178 +               kfree_skb(*pskb);
1179 +               *pskb = nskb;
1180 +       }
1181 +
1182 +       return 0;
1183 +}
1184 +
1185 +int skb_ip_make_writable(struct sk_buff **pskb, unsigned int writable_len)
1186 +{
1187 +       struct sk_buff *nskb;
1188 +       unsigned int iplen;
1189 +
1190 +       if (writable_len > (*pskb)->len)
1191 +               return 0;
1192 +
1193 +       /* Not exclusive use of packet?  Must copy. */
1194 +       if (skb_shared(*pskb) || skb_cloned(*pskb))
1195 +               goto copy_skb;
1196 +
1197 +       /* Alexey says IP hdr is always modifiable and linear, so ok. */
1198 +       if (writable_len <= (*pskb)->nh.iph->ihl*4)
1199 +               return 1;
1200 +
1201 +       iplen = writable_len - (*pskb)->nh.iph->ihl*4;
1202 +
1203 +       /* DaveM says protocol headers are also modifiable. */
1204 +       switch ((*pskb)->nh.iph->protocol) {
1205 +       case IPPROTO_TCP: {
1206 +               struct tcphdr hdr;
1207 +               if (skb_copy_bits(*pskb, (*pskb)->nh.iph->ihl*4,
1208 +                                 &hdr, sizeof(hdr)) != 0)
1209 +                       goto copy_skb;
1210 +               if (writable_len <= (*pskb)->nh.iph->ihl*4 + hdr.doff*4)
1211 +                       goto pull_skb;
1212 +               goto copy_skb;
1213 +       }
1214 +       case IPPROTO_UDP:
1215 +               if (writable_len<=(*pskb)->nh.iph->ihl*4+sizeof(struct udphdr))
1216 +                       goto pull_skb;
1217 +               goto copy_skb;
1218 +       case IPPROTO_ICMP:
1219 +               if (writable_len
1220 +                   <= (*pskb)->nh.iph->ihl*4 + sizeof(struct icmphdr))
1221 +                       goto pull_skb;
1222 +               goto copy_skb;
1223 +       /* Insert other cases here as desired */
1224 +       }
1225 +
1226 +copy_skb:
1227 +       nskb = skb_copy(*pskb, GFP_ATOMIC);
1228 +       if (!nskb)
1229 +               return 0;
1230 +       BUG_ON(skb_is_nonlinear(nskb));
1231 +
1232 +       /* Rest of kernel will get very unhappy if we pass it a
1233 +          suddenly-orphaned skbuff */
1234 +       if ((*pskb)->sk)
1235 +               skb_set_owner_w(nskb, (*pskb)->sk);
1236 +       kfree_skb(*pskb);
1237 +       *pskb = nskb;
1238 +       return 1;
1239 +
1240 +pull_skb:
1241 +       return pskb_may_pull(*pskb, writable_len);
1242 +}
1243 +EXPORT_SYMBOL(skb_ip_make_writable);
1244 +#endif /*CONFIG_INET*/
1245 +
1246 +
1247 +/* This does not belong here, but ipt_REJECT needs it if connection
1248 +   tracking in use: without this, connection may not be in hash table,
1249 +   and hence manufactured ICMP or RST packets will not be associated
1250 +   with it. */
1251 +void (*ip_ct_attach)(struct sk_buff *, struct nf_ct_info *);
1252 +
1253 +void __init netfilter_init(void)
1254 +{
1255 +       int i, h;
1256 +
1257 +       for (i = 0; i < NPROTO; i++) {
1258 +               for (h = 0; h < NF_MAX_HOOKS; h++)
1259 +                       INIT_LIST_HEAD(&nf_hooks[i][h]);
1260 +       }
1261 +}
1262 +
1263 +EXPORT_SYMBOL(ip_ct_attach);
1264 +EXPORT_SYMBOL(ip_route_me_harder);
1265 +EXPORT_SYMBOL(nf_getsockopt);
1266 +EXPORT_SYMBOL(nf_hook_slow);
1267 +EXPORT_SYMBOL(nf_hooks);
1268 +EXPORT_SYMBOL(nf_register_hook);
1269 +EXPORT_SYMBOL(nf_register_queue_handler);
1270 +EXPORT_SYMBOL(nf_register_sockopt);
1271 +EXPORT_SYMBOL(nf_reinject);
1272 +EXPORT_SYMBOL(nf_setsockopt);
1273 +EXPORT_SYMBOL(nf_unregister_hook);
1274 +EXPORT_SYMBOL(nf_unregister_queue_handler);
1275 +EXPORT_SYMBOL(nf_unregister_sockopt);
1276 diff -Nur linux-2.6.4-rc2.org/net/ipv4/netfilter/Kconfig linux-2.6.4-rc2/net/ipv4/netfilter/Kconfig
1277 --- linux-2.6.4-rc2.org/net/ipv4/netfilter/Kconfig      2004-03-04 06:16:58.000000000 +0000
1278 +++ linux-2.6.4-rc2/net/ipv4/netfilter/Kconfig  2004-03-05 07:40:24.000000000 +0000
1279 @@ -579,5 +579,60 @@
1280  
1281           To compile it as a module, choose M here.  If unsure, say N.
1282  
1283 +config IP_NF_TARGET_IPV4OPTSSTRIP
1284 +       tristate  'IPV4OPTSSTRIP target support'
1285 +       depends on IP_NF_MANGLE
1286 +         help
1287 +
1288 +config IP_NF_TARGET_TTL
1289 +       tristate  'TTL target support'
1290 +       depends on IP_NF_MANGLE
1291 +         help
1292 +
1293 +config IP_NF_MATCH_CONNLIMIT
1294 +       tristate  'Connections/IP limit match support'
1295 +       depends on IP_NF_IPTABLES
1296 +         help
1297 +
1298 +config IP_NF_MATCH_DSTLIMIT
1299 +       tristate  'dstlimit match support'
1300 +       depends on IP_NF_IPTABLES
1301 +         help
1302 +
1303 +config IP_NF_MATCH_FUZZY
1304 +       tristate  'fuzzy match support'
1305 +       depends on IP_NF_IPTABLES
1306 +         help
1307 +
1308 +config IP_NF_MATCH_IPV4OPTIONS
1309 +       tristate  'IPV4OPTIONS match support'
1310 +       depends on IP_NF_IPTABLES
1311 +         help
1312 +
1313 +config IP_NF_MATCH_MPORT
1314 +       tristate  'Multiple port with ranges match support'
1315 +       depends on IP_NF_IPTABLES
1316 +         help
1317 +
1318 +config IP_NF_MATCH_NTH
1319 +       tristate  'Nth match support'
1320 +       depends on IP_NF_IPTABLES
1321 +         help
1322 +
1323 +config IP_NF_MATCH_QUOTA
1324 +       tristate  'quota match support'
1325 +       depends on IP_NF_IPTABLES
1326 +         help
1327 +
1328 +config IP_NF_MATCH_REALM
1329 +       tristate  'realm match support'
1330 +       depends on IP_NF_IPTABLES && NET_CLS_ROUTE
1331 +         help
1332 +
1333 +config IP_NF_MATCH_SCTP
1334 +       tristate  'SCTP protocol match support'
1335 +       depends on IP_NF_IPTABLES
1336 +         help
1337 +
1338  endmenu
1339  
1340 diff -Nur linux-2.6.4-rc2.org/net/ipv4/netfilter/Makefile linux-2.6.4-rc2/net/ipv4/netfilter/Makefile
1341 --- linux-2.6.4-rc2.org/net/ipv4/netfilter/Makefile     2004-03-04 06:16:38.000000000 +0000
1342 +++ linux-2.6.4-rc2/net/ipv4/netfilter/Makefile 2004-03-05 07:40:24.000000000 +0000
1343 @@ -42,15 +42,28 @@
1344  # matches
1345  obj-$(CONFIG_IP_NF_MATCH_HELPER) += ipt_helper.o
1346  obj-$(CONFIG_IP_NF_MATCH_LIMIT) += ipt_limit.o
1347 +obj-$(CONFIG_IP_NF_MATCH_SCTP) += ipt_sctp.o
1348 +obj-$(CONFIG_IP_NF_MATCH_QUOTA) += ipt_quota.o
1349 +obj-$(CONFIG_IP_NF_MATCH_DSTLIMIT) += ipt_dstlimit.o
1350  obj-$(CONFIG_IP_NF_MATCH_MARK) += ipt_mark.o
1351  obj-$(CONFIG_IP_NF_MATCH_MAC) += ipt_mac.o
1352  obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o
1353  
1354  obj-$(CONFIG_IP_NF_MATCH_PKTTYPE) += ipt_pkttype.o
1355  obj-$(CONFIG_IP_NF_MATCH_MULTIPORT) += ipt_multiport.o
1356 +
1357 +obj-$(CONFIG_IP_NF_MATCH_MPORT) += ipt_mport.o
1358 +
1359  obj-$(CONFIG_IP_NF_MATCH_OWNER) += ipt_owner.o
1360  obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o
1361  
1362 +obj-$(CONFIG_IP_NF_MATCH_NTH) += ipt_nth.o
1363 +
1364 +obj-$(CONFIG_IP_NF_MATCH_IPV4OPTIONS) += ipt_ipv4options.o
1365 +
1366 +
1367 +obj-$(CONFIG_IP_NF_MATCH_FUZZY) += ipt_fuzzy.o
1368 +
1369  obj-$(CONFIG_IP_NF_MATCH_RECENT) += ipt_recent.o
1370  
1371  obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
1372 @@ -61,8 +74,10 @@
1373  
1374  obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
1375  obj-$(CONFIG_IP_NF_MATCH_STATE) += ipt_state.o
1376 +obj-$(CONFIG_IP_NF_MATCH_CONNLIMIT) += ipt_connlimit.o
1377  obj-$(CONFIG_IP_NF_MATCH_CONNTRACK) += ipt_conntrack.o
1378  obj-$(CONFIG_IP_NF_MATCH_TCPMSS) += ipt_tcpmss.o
1379 +obj-$(CONFIG_IP_NF_MATCH_REALM) += ipt_realm.o
1380  
1381  obj-$(CONFIG_IP_NF_MATCH_PHYSDEV) += ipt_physdev.o
1382  
1383 @@ -79,6 +94,8 @@
1384  obj-$(CONFIG_IP_NF_TARGET_CLASSIFY) += ipt_CLASSIFY.o
1385  obj-$(CONFIG_IP_NF_NAT_SNMP_BASIC) += ip_nat_snmp_basic.o
1386  obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
1387 +obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
1388 +obj-$(CONFIG_IP_NF_TARGET_IPV4OPTSSTRIP) += ipt_IPV4OPTSSTRIP.o
1389  obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
1390  obj-$(CONFIG_IP_NF_TARGET_TCPMSS) += ipt_TCPMSS.o
1391  
1392 diff -Nur linux-2.6.4-rc2.org/net/ipv4/netfilter/ip_conntrack_core.c linux-2.6.4-rc2/net/ipv4/netfilter/ip_conntrack_core.c
1393 --- linux-2.6.4-rc2.org/net/ipv4/netfilter/ip_conntrack_core.c  2004-03-04 06:16:34.000000000 +0000
1394 +++ linux-2.6.4-rc2/net/ipv4/netfilter/ip_conntrack_core.c      2004-03-05 07:39:43.000000000 +0000
1395 @@ -15,6 +15,8 @@
1396   * 16 Jul 2002: Harald Welte <laforge@gnumonks.org>
1397   *     - add usage/reference counts to ip_conntrack_expect
1398   *     - export ip_conntrack[_expect]_{find_get,put} functions
1399 + * 05 Aug 2002: Harald Welte <laforge@gnumonks.org>
1400 + *     - added DocBook-style comments for public API
1401   * */
1402  
1403  #include <linux/config.h>
1404 @@ -89,6 +91,10 @@
1405         return p;
1406  }
1407  
1408 +/**
1409 + * ip_ct_find_proto - Find layer 4 protocol helper for given protocol number
1410 + * @protocol: protocol number
1411 + */
1412  struct ip_conntrack_protocol *ip_ct_find_proto(u_int8_t protocol)
1413  {
1414         struct ip_conntrack_protocol *p;
1415 @@ -112,6 +118,11 @@
1416  static int ip_conntrack_hash_rnd_initted;
1417  static unsigned int ip_conntrack_hash_rnd;
1418  
1419 +/**
1420 + * hash_conntrack - Calculate the position of an entry in the connection 
1421 + * tracking table.
1422 + * @tuple: conntrack tuple which we want to calculate the hash position
1423 + */
1424  static u_int32_t
1425  hash_conntrack(const struct ip_conntrack_tuple *tuple)
1426  {
1427 @@ -124,6 +135,19 @@
1428                              ip_conntrack_hash_rnd) % ip_conntrack_htable_size);
1429  }
1430  
1431 +/**
1432 + * get_tuple - set all the fields of a tuple which is passed as parameter
1433 + * given a network buffer.
1434 + * @iph:pointer an IP header. 
1435 + * @skb:network buffer for which we want to generate the tuple
1436 + * @dataoff: FIXME: Deprecated?
1437 + * @tuple: tuple which will be generate. Used as return parameter.
1438 + * @protocol: structure which contains pointer to protocol specific functions.
1439 + *
1440 + * Note: This function doesn't allocate space for the tuple passed as 
1441 + * parameter. The function pkt_to_packet which set all the protocol specific
1442 + * fields of a given tuple.
1443 + */
1444  int
1445  get_tuple(const struct iphdr *iph,
1446           const struct sk_buff *skb,
1447 @@ -145,6 +169,15 @@
1448         return protocol->pkt_to_tuple(skb, dataoff, tuple);
1449  }
1450  
1451 +/**
1452 + * invert_tuple - Returns the inverse of a given tuple. It is used to 
1453 + * calculate the tuple which represents the other sense of the flow
1454 + * of a connection.
1455 + * @inverse: the inverted tuple. Use as return value.
1456 + * @orig: the original tuple which will be inverted.
1457 + * @protocol: a pointer to the protocol structure which contains all the
1458 + * specifical functions available for this tuple.
1459 + */
1460  static int
1461  invert_tuple(struct ip_conntrack_tuple *inverse,
1462              const struct ip_conntrack_tuple *orig,
1463 @@ -160,7 +193,15 @@
1464  
1465  /* ip_conntrack_expect helper functions */
1466  
1467 -/* Compare tuple parts depending on mask. */
1468 +/**
1469 + * expect_cmp - compare a tuple with a expectation depending on a mask
1470 + * @i: pointer to an expectation.
1471 + * @tuple: tuple which will be compared with the expectation tuple.
1472 + *
1473 + * Actually the tuple field of an expectation is compared with a tuple
1474 + * This function is used by LIST_FIND to find a expectation which match a te
1475 + * given tuple.
1476 + */
1477  static inline int expect_cmp(const struct ip_conntrack_expect *i,
1478                              const struct ip_conntrack_tuple *tuple)
1479  {
1480 @@ -168,6 +209,10 @@
1481         return ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask);
1482  }
1483  
1484 +/**
1485 + * destroy_expect - Release all the resources allocated by an expectation.
1486 + * @exp: pointer to the expectation which we want to release.
1487 + */
1488  static void
1489  destroy_expect(struct ip_conntrack_expect *exp)
1490  {
1491 @@ -178,7 +223,11 @@
1492         kfree(exp);
1493  }
1494  
1495 -
1496 +/**
1497 + * ip_conntrack_expect_put - it decrements the counter of use related 
1498 + * associated to an expectation and it calls destroy_expect.
1499 + * @exp: pointer to the expectation which we want to release.
1500 + */
1501  inline void ip_conntrack_expect_put(struct ip_conntrack_expect *exp)
1502  {
1503         IP_NF_ASSERT(exp);
1504 @@ -198,7 +247,14 @@
1505                          struct ip_conntrack_expect *, tuple);
1506  }
1507  
1508 -/* Find a expectation corresponding to a tuple. */
1509 +/**
1510 + * ip_conntrack_find_get - find conntrack according to tuple
1511 + * @tuple: conntrack tuple for which we search conntrack
1512 + * @ignored_conntrack: ignore this conntrack during search
1513 + *
1514 + * This function increments the reference count of the found
1515 + * conntrack (if any).
1516 + */
1517  struct ip_conntrack_expect *
1518  ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple)
1519  {
1520 @@ -381,7 +437,14 @@
1521         return h;
1522  }
1523  
1524 -/* Find a connection corresponding to a tuple. */
1525 +/**
1526 + * ip_conntrack_find_get - find conntrack according to tuple
1527 + * @tuple: conntrack tuple for which we search conntrack
1528 + * @ignored_conntrack: ignore this conntrack during search
1529 + *
1530 + * This function increments the reference count of the found
1531 + * conntrack (if any).
1532 + */
1533  struct ip_conntrack_tuple_hash *
1534  ip_conntrack_find_get(const struct ip_conntrack_tuple *tuple,
1535                       const struct ip_conntrack *ignored_conntrack)
1536 @@ -409,7 +472,14 @@
1537         return ct;
1538  }
1539  
1540 -/* Return conntrack and conntrack_info given skb->nfct->master */
1541 +/**
1542 + * ip_conntrack_get - Return conntrack and conntrack_info for given skb
1543 + * @skb: skb for which we want to find conntrack and conntrack_info
1544 + * @ctinfo: pointer to ctinfo, used as return value
1545 + *
1546 + * This function resolves the respective conntrack and conntrack_info
1547 + * structures for the connection this packet (skb) is part of.
1548 + */
1549  struct ip_conntrack *
1550  ip_conntrack_get(struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
1551  {
1552 @@ -479,8 +549,14 @@
1553         return NF_DROP;
1554  }
1555  
1556 -/* Returns true if a connection correspondings to the tuple (required
1557 -   for NAT). */
1558 +/**
1559 + * ip_conntrack_tuple_taken - Find out if tuple is already in use
1560 + * @tuple: tuple to be used for this test
1561 + * @ignored_conntrack: conntrack which is excluded from result
1562 + *
1563 + * This function is called by the NAT code in order to find out if
1564 + * a particular tuple is already in use by some connection.
1565 + */
1566  int
1567  ip_conntrack_tuple_taken(const struct ip_conntrack_tuple *tuple,
1568                          const struct ip_conntrack *ignored_conntrack)
1569 @@ -606,7 +682,13 @@
1570  {
1571         return ip_ct_tuple_mask_cmp(rtuple, &i->tuple, &i->mask);
1572  }
1573 -
1574 +/**
1575 + * ip_ct_find_helper - Find application helper according to tuple
1576 + * @tuple: tuple for which helper needs to be found
1577 + *
1578 + * This function is used to determine if any registered conntrack helper
1579 + * is to be used for the given tuple.
1580 + */
1581  struct ip_conntrack_helper *ip_ct_find_helper(const struct ip_conntrack_tuple *tuple)
1582  {
1583         return LIST_FIND(&helpers, helper_cmp,
1584 @@ -691,42 +773,50 @@
1585                              struct ip_conntrack_expect *, tuple);
1586         READ_UNLOCK(&ip_conntrack_expect_tuple_lock);
1587  
1588 -       /* If master is not in hash table yet (ie. packet hasn't left
1589 -          this machine yet), how can other end know about expected?
1590 -          Hence these are not the droids you are looking for (if
1591 -          master ct never got confirmed, we'd hold a reference to it
1592 -          and weird things would happen to future packets). */
1593 -       if (expected && !is_confirmed(expected->expectant))
1594 -               expected = NULL;
1595 -
1596 -       /* Look up the conntrack helper for master connections only */
1597 -       if (!expected)
1598 -               conntrack->helper = ip_ct_find_helper(&repl_tuple);
1599 -
1600 -       /* If the expectation is dying, then this is a loser. */
1601 -       if (expected
1602 -           && expected->expectant->helper->timeout
1603 -           && ! del_timer(&expected->timeout))
1604 -               expected = NULL;
1605 -
1606         if (expected) {
1607 -               DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n",
1608 -                       conntrack, expected);
1609 -               /* Welcome, Mr. Bond.  We've been expecting you... */
1610 -               IP_NF_ASSERT(master_ct(conntrack));
1611 -               __set_bit(IPS_EXPECTED_BIT, &conntrack->status);
1612 -               conntrack->master = expected;
1613 -               expected->sibling = conntrack;
1614 -               LIST_DELETE(&ip_conntrack_expect_list, expected);
1615 -               expected->expectant->expecting--;
1616 -               nf_conntrack_get(&master_ct(conntrack)->infos[0]);
1617 -       }
1618 -       atomic_inc(&ip_conntrack_count);
1619 +               /* If master is not in hash table yet (ie. packet hasn't left
1620 +                  this machine yet), how can other end know about expected?
1621 +                  Hence these are not the droids you are looking for (if
1622 +                  master ct never got confirmed, we'd hold a reference to it
1623 +                  and weird things would happen to future packets). */
1624 +               if (!is_confirmed(expected->expectant)) {
1625 +                       
1626 +                       conntrack->helper = ip_ct_find_helper(&repl_tuple);
1627 +                       goto end;
1628 +               }
1629 +
1630 +               /* Expectation is dying... */
1631 +               if (expected->expectant->helper->timeout
1632 +                   && ! del_timer(&expected->timeout)) {
1633 +                       goto end;       
1634 +               }
1635 +
1636 +                DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n",
1637 +                       conntrack, expected);
1638 +                /* Welcome, Mr. Bond.  We've been expecting you... */
1639 +                IP_NF_ASSERT(master_ct(conntrack));
1640 +                __set_bit(IPS_EXPECTED_BIT, &conntrack->status);
1641 +                conntrack->master = expected;
1642 +                expected->sibling = conntrack;
1643 +                LIST_DELETE(&ip_conntrack_expect_list, expected);
1644 +                expected->expectant->expecting--;
1645 +                nf_conntrack_get(&master_ct(conntrack)->infos[0]);
1646 +
1647 +               /* this is a braindead... --pablo */
1648 +               atomic_inc(&ip_conntrack_count);
1649 +               WRITE_UNLOCK(&ip_conntrack_lock);
1650 +
1651 +               if (expected->expectfn)
1652 +                       expected->expectfn(conntrack);
1653 +
1654 +               goto ret;
1655 +        } else 
1656 +                conntrack->helper = ip_ct_find_helper(&repl_tuple);
1657 +
1658 +end:   atomic_inc(&ip_conntrack_count);
1659         WRITE_UNLOCK(&ip_conntrack_lock);
1660  
1661 -       if (expected && expected->expectfn)
1662 -               expected->expectfn(conntrack);
1663 -       return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];
1664 +ret:   return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];
1665  }
1666  
1667  /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
1668 @@ -900,6 +990,14 @@
1669         return ip_ct_tuple_mask_cmp(&i->tuple, tuple, &intersect_mask);
1670  }
1671  
1672 +/**
1673 + * ip_conntrack_unexpect_related - Unexpect a related connection
1674 + * @expect: expecattin to be removed
1675 + *
1676 + * This function removes an existing expectation, that has not yet been
1677 + * confirmed (i.e. expectation was issued, but expected connection didn't
1678 + * arrive yet)
1679 + */
1680  inline void ip_conntrack_unexpect_related(struct ip_conntrack_expect *expect)
1681  {
1682         WRITE_LOCK(&ip_conntrack_lock);
1683 @@ -917,7 +1015,20 @@
1684         WRITE_UNLOCK(&ip_conntrack_lock);
1685  }
1686  
1687 -/* Add a related connection. */
1688 +/**
1689 + * ip_conntrack_expect_related - Expect a related connection
1690 + * @related_to: master conntrack
1691 + * @expect: expectation with all values filled in
1692 + *
1693 + * This function is called by conntrack application helpers who
1694 + * have detected that the control (master) connection is just about
1695 + * to negotiate a related slave connection. 
1696 + *
1697 + * Note: This function allocates it's own struct ip_conntrack_expect,
1698 + * copying the values from the 'expect' parameter.  Thus, 'expect' can
1699 + * be allocated on the stack and does not need to be valid after this
1700 + * function returns.
1701 + */
1702  int ip_conntrack_expect_related(struct ip_conntrack *related_to,
1703                                 struct ip_conntrack_expect *expect)
1704  {
1705 @@ -1047,7 +1158,15 @@
1706         return ret;
1707  }
1708  
1709 -/* Change tuple in an existing expectation */
1710 +/**
1711 + * ip_conntrack_change_expect - Change tuple in existing expectation
1712 + * @expect: expectation which is to be changed
1713 + * @newtuple: new tuple for expect
1714 + *
1715 + * This function is mostly called by NAT application helpers, who want to
1716 + * change an expectation issued by their respective conntrack application
1717 + * helper counterpart.
1718 + */
1719  int ip_conntrack_change_expect(struct ip_conntrack_expect *expect,
1720                                struct ip_conntrack_tuple *newtuple)
1721  {
1722 @@ -1088,8 +1207,15 @@
1723         return ret;
1724  }
1725  
1726 -/* Alter reply tuple (maybe alter helper).  If it's already taken,
1727 -   return 0 and don't do alteration. */
1728 +/**
1729 + * ip_conntrack_alter_reply - Alter reply tuple of conntrack
1730 + * @conntrack: conntrack whose reply tuple we want to alter
1731 + * @newreply: designated reply tuple for this conntrack
1732 + *
1733 + * This function alters the reply tuple of a conntrack to the given
1734 + * newreply tuple.  If this newreply tuple is already taken, return 0
1735 + * and don't do alteration
1736 + */
1737  int ip_conntrack_alter_reply(struct ip_conntrack *conntrack,
1738                              const struct ip_conntrack_tuple *newreply)
1739  {
1740 @@ -1114,6 +1240,13 @@
1741         return 1;
1742  }
1743  
1744 +/**
1745 + * ip_conntrack_helper_register - Register a conntrack application helper
1746 + * @me: structure describing the helper
1747 + *
1748 + * This function is called by conntrack application helpers to register
1749 + * themselves with the conntrack core.
1750 + */
1751  int ip_conntrack_helper_register(struct ip_conntrack_helper *me)
1752  {
1753         WRITE_LOCK(&ip_conntrack_lock);
1754 @@ -1135,6 +1268,13 @@
1755         return 0;
1756  }
1757  
1758 +/**
1759 + * ip_conntrack_helper_unregister - Unregister a conntrack application helper
1760 + * @me: structure describing the helper
1761 + *
1762 + * This function is called by conntrack application helpers to unregister
1763 + * themselvers from the conntrack core.
1764 + */
1765  void ip_conntrack_helper_unregister(struct ip_conntrack_helper *me)
1766  {
1767         unsigned int i;
1768 @@ -1153,7 +1293,14 @@
1769         synchronize_net();
1770  }
1771  
1772 -/* Refresh conntrack for this many jiffies. */
1773 +/**
1774 + * ip_ct_refresh - Refresh conntrack timer for given conntrack
1775 + * @ct: conntrack which we want to refresh
1776 + * @extra_jiffies: number of jiffies to add
1777 + *
1778 + * This function is called by protocol helpers and application helpers in
1779 + * order to change the expiration timer of a conntrack entry.
1780 + */
1781  void ip_ct_refresh(struct ip_conntrack *ct, unsigned long extra_jiffies)
1782  {
1783         IP_NF_ASSERT(ct->timeout.data == (unsigned long)ct);
1784 @@ -1172,7 +1319,16 @@
1785         WRITE_UNLOCK(&ip_conntrack_lock);
1786  }
1787  
1788 -/* Returns new sk_buff, or NULL */
1789 +
1790 +/**
1791 + * ip_ct_gather_frags - Gather fragments of a particular skb
1792 + * @skb: pointer to sk_buff of fragmented IP packet
1793 + *
1794 + * This code is just a wrapper around the defragmentation code in the core IPv4
1795 + * stack.  It also takes care of nonlinear skb's.
1796 + *
1797 + * Returns new sk_buff, or NULL
1798 + */
1799  struct sk_buff *
1800  ip_ct_gather_frags(struct sk_buff *skb)
1801  {
1802 @@ -1256,6 +1412,16 @@
1803         return h;
1804  }
1805  
1806 +/**
1807 + * ip_ct_selective_cleanup - Selectively delete a set of conntrack entries
1808 + * @kill: callback function selecting which entries to delete
1809 + * @data: opaque data pointer, becomes 2nd argument for kill function
1810 + *
1811 + * This function can be used to selectively delete elements of the conntrack
1812 + * hashtable.  The function iterates over the list of conntrack entries and
1813 + * calls the 'kill' function for every entry.  If the return value is true,
1814 + * the connection is deleted (death_by_timeout).
1815 + */
1816  void
1817  ip_ct_selective_cleanup(int (*kill)(const struct ip_conntrack *i, void *data),
1818                         void *data)
1819 diff -Nur linux-2.6.4-rc2.org/net/ipv4/netfilter/ip_conntrack_core.c.orig linux-2.6.4-rc2/net/ipv4/netfilter/ip_conntrack_core.c.orig
1820 --- linux-2.6.4-rc2.org/net/ipv4/netfilter/ip_conntrack_core.c.orig     1970-01-01 00:00:00.000000000 +0000
1821 +++ linux-2.6.4-rc2/net/ipv4/netfilter/ip_conntrack_core.c.orig 2004-03-05 07:39:41.000000000 +0000
1822 @@ -0,0 +1,1441 @@
1823 +/* Connection state tracking for netfilter.  This is separated from,
1824 +   but required by, the NAT layer; it can also be used by an iptables
1825 +   extension. */
1826 +
1827 +/* (C) 1999-2001 Paul `Rusty' Russell  
1828 + * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
1829 + *
1830 + * This program is free software; you can redistribute it and/or modify
1831 + * it under the terms of the GNU General Public License version 2 as
1832 + * published by the Free Software Foundation.
1833 + *
1834 + * 23 Apr 2001: Harald Welte <laforge@gnumonks.org>
1835 + *     - new API and handling of conntrack/nat helpers
1836 + *     - now capable of multiple expectations for one master
1837 + * 16 Jul 2002: Harald Welte <laforge@gnumonks.org>
1838 + *     - add usage/reference counts to ip_conntrack_expect
1839 + *     - export ip_conntrack[_expect]_{find_get,put} functions
1840 + * */
1841 +
1842 +#include <linux/config.h>
1843 +#include <linux/types.h>
1844 +#include <linux/icmp.h>
1845 +#include <linux/ip.h>
1846 +#include <linux/netfilter.h>
1847 +#include <linux/netfilter_ipv4.h>
1848 +#include <linux/module.h>
1849 +#include <linux/skbuff.h>
1850 +#include <linux/proc_fs.h>
1851 +#include <linux/vmalloc.h>
1852 +#include <net/checksum.h>
1853 +#include <linux/stddef.h>
1854 +#include <linux/sysctl.h>
1855 +#include <linux/slab.h>
1856 +#include <linux/random.h>
1857 +#include <linux/jhash.h>
1858 +/* For ERR_PTR().  Yeah, I know... --RR */
1859 +#include <linux/fs.h>
1860 +
1861 +/* This rwlock protects the main hash table, protocol/helper/expected
1862 +   registrations, conntrack timers*/
1863 +#define ASSERT_READ_LOCK(x) MUST_BE_READ_LOCKED(&ip_conntrack_lock)
1864 +#define ASSERT_WRITE_LOCK(x) MUST_BE_WRITE_LOCKED(&ip_conntrack_lock)
1865 +
1866 +#include <linux/netfilter_ipv4/ip_conntrack.h>
1867 +#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
1868 +#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
1869 +#include <linux/netfilter_ipv4/ip_conntrack_core.h>
1870 +#include <linux/netfilter_ipv4/listhelp.h>
1871 +
1872 +#define IP_CONNTRACK_VERSION   "2.1"
1873 +
1874 +#if 0
1875 +#define DEBUGP printk
1876 +#else
1877 +#define DEBUGP(format, args...)
1878 +#endif
1879 +
1880 +DECLARE_RWLOCK(ip_conntrack_lock);
1881 +DECLARE_RWLOCK(ip_conntrack_expect_tuple_lock);
1882 +
1883 +void (*ip_conntrack_destroyed)(struct ip_conntrack *conntrack) = NULL;
1884 +LIST_HEAD(ip_conntrack_expect_list);
1885 +LIST_HEAD(protocol_list);
1886 +static LIST_HEAD(helpers);
1887 +unsigned int ip_conntrack_htable_size = 0;
1888 +int ip_conntrack_max;
1889 +static atomic_t ip_conntrack_count = ATOMIC_INIT(0);
1890 +struct list_head *ip_conntrack_hash;
1891 +static kmem_cache_t *ip_conntrack_cachep;
1892 +
1893 +extern struct ip_conntrack_protocol ip_conntrack_generic_protocol;
1894 +
1895 +static inline int proto_cmpfn(const struct ip_conntrack_protocol *curr,
1896 +                             u_int8_t protocol)
1897 +{
1898 +       return protocol == curr->proto;
1899 +}
1900 +
1901 +struct ip_conntrack_protocol *__ip_ct_find_proto(u_int8_t protocol)
1902 +{
1903 +       struct ip_conntrack_protocol *p;
1904 +
1905 +       MUST_BE_READ_LOCKED(&ip_conntrack_lock);
1906 +       p = LIST_FIND(&protocol_list, proto_cmpfn,
1907 +                     struct ip_conntrack_protocol *, protocol);
1908 +       if (!p)
1909 +               p = &ip_conntrack_generic_protocol;
1910 +
1911 +       return p;
1912 +}
1913 +
1914 +struct ip_conntrack_protocol *ip_ct_find_proto(u_int8_t protocol)
1915 +{
1916 +       struct ip_conntrack_protocol *p;
1917 +
1918 +       READ_LOCK(&ip_conntrack_lock);
1919 +       p = __ip_ct_find_proto(protocol);
1920 +       READ_UNLOCK(&ip_conntrack_lock);
1921 +       return p;
1922 +}
1923 +
1924 +inline void 
1925 +ip_conntrack_put(struct ip_conntrack *ct)
1926 +{
1927 +       IP_NF_ASSERT(ct);
1928 +       IP_NF_ASSERT(ct->infos[0].master);
1929 +       /* nf_conntrack_put wants to go via an info struct, so feed it
1930 +           one at random. */
1931 +       nf_conntrack_put(&ct->infos[0]);
1932 +}
1933 +
1934 +static int ip_conntrack_hash_rnd_initted;
1935 +static unsigned int ip_conntrack_hash_rnd;
1936 +
1937 +static u_int32_t
1938 +hash_conntrack(const struct ip_conntrack_tuple *tuple)
1939 +{
1940 +#if 0
1941 +       dump_tuple(tuple);
1942 +#endif
1943 +       return (jhash_3words(tuple->src.ip,
1944 +                            (tuple->dst.ip ^ tuple->dst.protonum),
1945 +                            (tuple->src.u.all | (tuple->dst.u.all << 16)),
1946 +                            ip_conntrack_hash_rnd) % ip_conntrack_htable_size);
1947 +}
1948 +
1949 +int
1950 +get_tuple(const struct iphdr *iph,
1951 +         const struct sk_buff *skb,
1952 +         unsigned int dataoff,
1953 +         struct ip_conntrack_tuple *tuple,
1954 +         const struct ip_conntrack_protocol *protocol)
1955 +{
1956 +       /* Never happen */
1957 +       if (iph->frag_off & htons(IP_OFFSET)) {
1958 +               printk("ip_conntrack_core: Frag of proto %u.\n",
1959 +                      iph->protocol);
1960 +               return 0;
1961 +       }
1962 +
1963 +       tuple->src.ip = iph->saddr;
1964 +       tuple->dst.ip = iph->daddr;
1965 +       tuple->dst.protonum = iph->protocol;
1966 +
1967 +       return protocol->pkt_to_tuple(skb, dataoff, tuple);
1968 +}
1969 +
1970 +static int
1971 +invert_tuple(struct ip_conntrack_tuple *inverse,
1972 +            const struct ip_conntrack_tuple *orig,
1973 +            const struct ip_conntrack_protocol *protocol)
1974 +{
1975 +       inverse->src.ip = orig->dst.ip;
1976 +       inverse->dst.ip = orig->src.ip;
1977 +       inverse->dst.protonum = orig->dst.protonum;
1978 +
1979 +       return protocol->invert_tuple(inverse, orig);
1980 +}
1981 +
1982 +
1983 +/* ip_conntrack_expect helper functions */
1984 +
1985 +/* Compare tuple parts depending on mask. */
1986 +static inline int expect_cmp(const struct ip_conntrack_expect *i,
1987 +                            const struct ip_conntrack_tuple *tuple)
1988 +{
1989 +       MUST_BE_READ_LOCKED(&ip_conntrack_expect_tuple_lock);
1990 +       return ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask);
1991 +}
1992 +
1993 +static void
1994 +destroy_expect(struct ip_conntrack_expect *exp)
1995 +{
1996 +       DEBUGP("destroy_expect(%p) use=%d\n", exp, atomic_read(&exp->use));
1997 +       IP_NF_ASSERT(atomic_read(&exp->use));
1998 +       IP_NF_ASSERT(!timer_pending(&exp->timeout));
1999 +
2000 +       kfree(exp);
2001 +}
2002 +
2003 +
2004 +inline void ip_conntrack_expect_put(struct ip_conntrack_expect *exp)
2005 +{
2006 +       IP_NF_ASSERT(exp);
2007 +
2008 +       if (atomic_dec_and_test(&exp->use)) {
2009 +               /* usage count dropped to zero */
2010 +               destroy_expect(exp);
2011 +       }
2012 +}
2013 +
2014 +static inline struct ip_conntrack_expect *
2015 +__ip_ct_expect_find(const struct ip_conntrack_tuple *tuple)
2016 +{
2017 +       MUST_BE_READ_LOCKED(&ip_conntrack_lock);
2018 +       MUST_BE_READ_LOCKED(&ip_conntrack_expect_tuple_lock);
2019 +       return LIST_FIND(&ip_conntrack_expect_list, expect_cmp, 
2020 +                        struct ip_conntrack_expect *, tuple);
2021 +}
2022 +
2023 +/* Find a expectation corresponding to a tuple. */
2024 +struct ip_conntrack_expect *
2025 +ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple)
2026 +{
2027 +       struct ip_conntrack_expect *exp;
2028 +
2029 +       READ_LOCK(&ip_conntrack_lock);
2030 +       READ_LOCK(&ip_conntrack_expect_tuple_lock);
2031 +       exp = __ip_ct_expect_find(tuple);
2032 +       if (exp)
2033 +               atomic_inc(&exp->use);
2034 +       READ_UNLOCK(&ip_conntrack_expect_tuple_lock);
2035 +       READ_UNLOCK(&ip_conntrack_lock);
2036 +
2037 +       return exp;
2038 +}
2039 +
2040 +/* remove one specific expectation from all lists and drop refcount,
2041 + * does _NOT_ delete the timer. */
2042 +static void __unexpect_related(struct ip_conntrack_expect *expect)
2043 +{
2044 +       DEBUGP("unexpect_related(%p)\n", expect);
2045 +       MUST_BE_WRITE_LOCKED(&ip_conntrack_lock);
2046 +
2047 +       /* we're not allowed to unexpect a confirmed expectation! */
2048 +       IP_NF_ASSERT(!expect->sibling);
2049 +
2050 +       /* delete from global and local lists */
2051 +       list_del(&expect->list);
2052 +       list_del(&expect->expected_list);
2053 +
2054 +       /* decrement expect-count of master conntrack */
2055 +       if (expect->expectant)
2056 +               expect->expectant->expecting--;
2057 +
2058 +       ip_conntrack_expect_put(expect);
2059 +}
2060 +
2061 +/* remove one specific expecatation from all lists, drop refcount
2062 + * and expire timer. 
2063 + * This function can _NOT_ be called for confirmed expects! */
2064 +static void unexpect_related(struct ip_conntrack_expect *expect)
2065 +{
2066 +       IP_NF_ASSERT(expect->expectant);
2067 +       IP_NF_ASSERT(expect->expectant->helper);
2068 +       /* if we are supposed to have a timer, but we can't delete
2069 +        * it: race condition.  __unexpect_related will
2070 +        * be calledd by timeout function */
2071 +       if (expect->expectant->helper->timeout
2072 +           && !del_timer(&expect->timeout))
2073 +               return;
2074 +
2075 +       __unexpect_related(expect);
2076 +}
2077 +
2078 +/* delete all unconfirmed expectations for this conntrack */
2079 +static void remove_expectations(struct ip_conntrack *ct, int drop_refcount)
2080 +{
2081 +       struct list_head *exp_entry, *next;
2082 +       struct ip_conntrack_expect *exp;
2083 +
2084 +       DEBUGP("remove_expectations(%p)\n", ct);
2085 +
2086 +       list_for_each_safe(exp_entry, next, &ct->sibling_list) {
2087 +               exp = list_entry(exp_entry, struct ip_conntrack_expect,
2088 +                                expected_list);
2089 +
2090 +               /* we skip established expectations, as we want to delete
2091 +                * the un-established ones only */
2092 +               if (exp->sibling) {
2093 +                       DEBUGP("remove_expectations: skipping established %p of %p\n", exp->sibling, ct);
2094 +                       if (drop_refcount) {
2095 +                               /* Indicate that this expectations parent is dead */
2096 +                               ip_conntrack_put(exp->expectant);
2097 +                               exp->expectant = NULL;
2098 +                       }
2099 +                       continue;
2100 +               }
2101 +
2102 +               IP_NF_ASSERT(list_inlist(&ip_conntrack_expect_list, exp));
2103 +               IP_NF_ASSERT(exp->expectant == ct);
2104 +
2105 +               /* delete expectation from global and private lists */
2106 +               unexpect_related(exp);
2107 +       }
2108 +}
2109 +
2110 +static void
2111 +clean_from_lists(struct ip_conntrack *ct)
2112 +{
2113 +       unsigned int ho, hr;
2114 +       
2115 +       DEBUGP("clean_from_lists(%p)\n", ct);
2116 +       MUST_BE_WRITE_LOCKED(&ip_conntrack_lock);
2117 +
2118 +       ho = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
2119 +       hr = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
2120 +       LIST_DELETE(&ip_conntrack_hash[ho], &ct->tuplehash[IP_CT_DIR_ORIGINAL]);
2121 +       LIST_DELETE(&ip_conntrack_hash[hr], &ct->tuplehash[IP_CT_DIR_REPLY]);
2122 +
2123 +       /* Destroy all un-established, pending expectations */
2124 +       remove_expectations(ct, 1);
2125 +}
2126 +
2127 +static void
2128 +destroy_conntrack(struct nf_conntrack *nfct)
2129 +{
2130 +       struct ip_conntrack *ct = (struct ip_conntrack *)nfct, *master = NULL;
2131 +       struct ip_conntrack_protocol *proto;
2132 +
2133 +       DEBUGP("destroy_conntrack(%p)\n", ct);
2134 +       IP_NF_ASSERT(atomic_read(&nfct->use) == 0);
2135 +       IP_NF_ASSERT(!timer_pending(&ct->timeout));
2136 +
2137 +       /* To make sure we don't get any weird locking issues here:
2138 +        * destroy_conntrack() MUST NOT be called with a write lock
2139 +        * to ip_conntrack_lock!!! -HW */
2140 +       proto = ip_ct_find_proto(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
2141 +       if (proto && proto->destroy)
2142 +               proto->destroy(ct);
2143 +
2144 +       if (ip_conntrack_destroyed)
2145 +               ip_conntrack_destroyed(ct);
2146 +
2147 +       WRITE_LOCK(&ip_conntrack_lock);
2148 +       /* Delete us from our own list to prevent corruption later */
2149 +       list_del(&ct->sibling_list);
2150 +
2151 +       /* Delete our master expectation */
2152 +       if (ct->master) {
2153 +               if (ct->master->expectant) {
2154 +                       /* can't call __unexpect_related here,
2155 +                        * since it would screw up expect_list */
2156 +                       list_del(&ct->master->expected_list);
2157 +                       master = ct->master->expectant;
2158 +               }
2159 +               kfree(ct->master);
2160 +       }
2161 +       WRITE_UNLOCK(&ip_conntrack_lock);
2162 +
2163 +       if (master)
2164 +               ip_conntrack_put(master);
2165 +
2166 +       DEBUGP("destroy_conntrack: returning ct=%p to slab\n", ct);
2167 +       kmem_cache_free(ip_conntrack_cachep, ct);
2168 +       atomic_dec(&ip_conntrack_count);
2169 +}
2170 +
2171 +static void death_by_timeout(unsigned long ul_conntrack)
2172 +{
2173 +       struct ip_conntrack *ct = (void *)ul_conntrack;
2174 +
2175 +       WRITE_LOCK(&ip_conntrack_lock);
2176 +       clean_from_lists(ct);
2177 +       WRITE_UNLOCK(&ip_conntrack_lock);
2178 +       ip_conntrack_put(ct);
2179 +}
2180 +
2181 +static inline int
2182 +conntrack_tuple_cmp(const struct ip_conntrack_tuple_hash *i,
2183 +                   const struct ip_conntrack_tuple *tuple,
2184 +                   const struct ip_conntrack *ignored_conntrack)
2185 +{
2186 +       MUST_BE_READ_LOCKED(&ip_conntrack_lock);
2187 +       return i->ctrack != ignored_conntrack
2188 +               && ip_ct_tuple_equal(tuple, &i->tuple);
2189 +}
2190 +
2191 +static struct ip_conntrack_tuple_hash *
2192 +__ip_conntrack_find(const struct ip_conntrack_tuple *tuple,
2193 +                   const struct ip_conntrack *ignored_conntrack)
2194 +{
2195 +       struct ip_conntrack_tuple_hash *h;
2196 +       unsigned int hash = hash_conntrack(tuple);
2197 +
2198 +       MUST_BE_READ_LOCKED(&ip_conntrack_lock);
2199 +       h = LIST_FIND(&ip_conntrack_hash[hash],
2200 +                     conntrack_tuple_cmp,
2201 +                     struct ip_conntrack_tuple_hash *,
2202 +                     tuple, ignored_conntrack);
2203 +       return h;
2204 +}
2205 +
2206 +/* Find a connection corresponding to a tuple. */
2207 +struct ip_conntrack_tuple_hash *
2208 +ip_conntrack_find_get(const struct ip_conntrack_tuple *tuple,
2209 +                     const struct ip_conntrack *ignored_conntrack)
2210 +{
2211 +       struct ip_conntrack_tuple_hash *h;
2212 +
2213 +       READ_LOCK(&ip_conntrack_lock);
2214 +       h = __ip_conntrack_find(tuple, ignored_conntrack);
2215 +       if (h)
2216 +               atomic_inc(&h->ctrack->ct_general.use);
2217 +       READ_UNLOCK(&ip_conntrack_lock);
2218 +
2219 +       return h;
2220 +}
2221 +
2222 +static inline struct ip_conntrack *
2223 +__ip_conntrack_get(struct nf_ct_info *nfct, enum ip_conntrack_info *ctinfo)
2224 +{
2225 +       struct ip_conntrack *ct
2226 +               = (struct ip_conntrack *)nfct->master;
2227 +
2228 +       /* ctinfo is the index of the nfct inside the conntrack */
2229 +       *ctinfo = nfct - ct->infos;
2230 +       IP_NF_ASSERT(*ctinfo >= 0 && *ctinfo < IP_CT_NUMBER);
2231 +       return ct;
2232 +}
2233 +
2234 +/* Return conntrack and conntrack_info given skb->nfct->master */
2235 +struct ip_conntrack *
2236 +ip_conntrack_get(struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
2237 +{
2238 +       if (skb->nfct) 
2239 +               return __ip_conntrack_get(skb->nfct, ctinfo);
2240 +       return NULL;
2241 +}
2242 +
2243 +/* Confirm a connection given skb->nfct; places it in hash table */
2244 +int
2245 +__ip_conntrack_confirm(struct nf_ct_info *nfct)
2246 +{
2247 +       unsigned int hash, repl_hash;
2248 +       struct ip_conntrack *ct;
2249 +       enum ip_conntrack_info ctinfo;
2250 +
2251 +       ct = __ip_conntrack_get(nfct, &ctinfo);
2252 +
2253 +       /* ipt_REJECT uses ip_conntrack_attach to attach related
2254 +          ICMP/TCP RST packets in other direction.  Actual packet
2255 +          which created connection will be IP_CT_NEW or for an
2256 +          expected connection, IP_CT_RELATED. */
2257 +       if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
2258 +               return NF_ACCEPT;
2259 +
2260 +       hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
2261 +       repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
2262 +
2263 +       /* We're not in hash table, and we refuse to set up related
2264 +          connections for unconfirmed conns.  But packet copies and
2265 +          REJECT will give spurious warnings here. */
2266 +       /* IP_NF_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
2267 +
2268 +       /* No external references means noone else could have
2269 +           confirmed us. */
2270 +       IP_NF_ASSERT(!is_confirmed(ct));
2271 +       DEBUGP("Confirming conntrack %p\n", ct);
2272 +
2273 +       WRITE_LOCK(&ip_conntrack_lock);
2274 +       /* See if there's one in the list already, including reverse:
2275 +           NAT could have grabbed it without realizing, since we're
2276 +           not in the hash.  If there is, we lost race. */
2277 +       if (!LIST_FIND(&ip_conntrack_hash[hash],
2278 +                      conntrack_tuple_cmp,
2279 +                      struct ip_conntrack_tuple_hash *,
2280 +                      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, NULL)
2281 +           && !LIST_FIND(&ip_conntrack_hash[repl_hash],
2282 +                         conntrack_tuple_cmp,
2283 +                         struct ip_conntrack_tuple_hash *,
2284 +                         &ct->tuplehash[IP_CT_DIR_REPLY].tuple, NULL)) {
2285 +               list_prepend(&ip_conntrack_hash[hash],
2286 +                            &ct->tuplehash[IP_CT_DIR_ORIGINAL]);
2287 +               list_prepend(&ip_conntrack_hash[repl_hash],
2288 +                            &ct->tuplehash[IP_CT_DIR_REPLY]);
2289 +               /* Timer relative to confirmation time, not original
2290 +                  setting time, otherwise we'd get timer wrap in
2291 +                  weird delay cases. */
2292 +               ct->timeout.expires += jiffies;
2293 +               add_timer(&ct->timeout);
2294 +               atomic_inc(&ct->ct_general.use);
2295 +               set_bit(IPS_CONFIRMED_BIT, &ct->status);
2296 +               WRITE_UNLOCK(&ip_conntrack_lock);
2297 +               return NF_ACCEPT;
2298 +       }
2299 +
2300 +       WRITE_UNLOCK(&ip_conntrack_lock);
2301 +       return NF_DROP;
2302 +}
2303 +
2304 +/* Returns true if a connection correspondings to the tuple (required
2305 +   for NAT). */
2306 +int
2307 +ip_conntrack_tuple_taken(const struct ip_conntrack_tuple *tuple,
2308 +                        const struct ip_conntrack *ignored_conntrack)
2309 +{
2310 +       struct ip_conntrack_tuple_hash *h;
2311 +
2312 +       READ_LOCK(&ip_conntrack_lock);
2313 +       h = __ip_conntrack_find(tuple, ignored_conntrack);
2314 +       READ_UNLOCK(&ip_conntrack_lock);
2315 +
2316 +       return h != NULL;
2317 +}
2318 +
2319 +/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
2320 +struct ip_conntrack *
2321 +icmp_error_track(struct sk_buff *skb,
2322 +                enum ip_conntrack_info *ctinfo,
2323 +                unsigned int hooknum)
2324 +{
2325 +       struct ip_conntrack_tuple innertuple, origtuple;
2326 +       struct {
2327 +               struct icmphdr icmp;
2328 +               struct iphdr ip;
2329 +       } inside;
2330 +       struct ip_conntrack_protocol *innerproto;
2331 +       struct ip_conntrack_tuple_hash *h;
2332 +       int dataoff;
2333 +
2334 +       IP_NF_ASSERT(skb->nfct == NULL);
2335 +
2336 +       /* Not enough header? */
2337 +       if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &inside, sizeof(inside))!=0)
2338 +               return NULL;
2339 +
2340 +       if (inside.icmp.type != ICMP_DEST_UNREACH
2341 +           && inside.icmp.type != ICMP_SOURCE_QUENCH
2342 +           && inside.icmp.type != ICMP_TIME_EXCEEDED
2343 +           && inside.icmp.type != ICMP_PARAMETERPROB
2344 +           && inside.icmp.type != ICMP_REDIRECT)
2345 +               return NULL;
2346 +
2347 +       /* Ignore ICMP's containing fragments (shouldn't happen) */
2348 +       if (inside.ip.frag_off & htons(IP_OFFSET)) {
2349 +               DEBUGP("icmp_error_track: fragment of proto %u\n",
2350 +                      inside.ip.protocol);
2351 +               return NULL;
2352 +       }
2353 +
2354 +       innerproto = ip_ct_find_proto(inside.ip.protocol);
2355 +       dataoff = skb->nh.iph->ihl*4 + sizeof(inside.icmp) + inside.ip.ihl*4;
2356 +       /* Are they talking about one of our connections? */
2357 +       if (!get_tuple(&inside.ip, skb, dataoff, &origtuple, innerproto)) {
2358 +               DEBUGP("icmp_error: ! get_tuple p=%u", inside.ip.protocol);
2359 +               return NULL;
2360 +       }
2361 +
2362 +       /* Ordinarily, we'd expect the inverted tupleproto, but it's
2363 +          been preserved inside the ICMP. */
2364 +       if (!invert_tuple(&innertuple, &origtuple, innerproto)) {
2365 +               DEBUGP("icmp_error_track: Can't invert tuple\n");
2366 +               return NULL;
2367 +       }
2368 +
2369 +       *ctinfo = IP_CT_RELATED;
2370 +
2371 +       h = ip_conntrack_find_get(&innertuple, NULL);
2372 +       if (!h) {
2373 +               /* Locally generated ICMPs will match inverted if they
2374 +                  haven't been SNAT'ed yet */
2375 +               /* FIXME: NAT code has to handle half-done double NAT --RR */
2376 +               if (hooknum == NF_IP_LOCAL_OUT)
2377 +                       h = ip_conntrack_find_get(&origtuple, NULL);
2378 +
2379 +               if (!h) {
2380 +                       DEBUGP("icmp_error_track: no match\n");
2381 +                       return NULL;
2382 +               }
2383 +               /* Reverse direction from that found */
2384 +               if (DIRECTION(h) != IP_CT_DIR_REPLY)
2385 +                       *ctinfo += IP_CT_IS_REPLY;
2386 +       } else {
2387 +               if (DIRECTION(h) == IP_CT_DIR_REPLY)
2388 +                       *ctinfo += IP_CT_IS_REPLY;
2389 +       }
2390 +
2391 +       /* Update skb to refer to this connection */
2392 +       skb->nfct = &h->ctrack->infos[*ctinfo];
2393 +       return h->ctrack;
2394 +}
2395 +
2396 +/* There's a small race here where we may free a just-assured
2397 +   connection.  Too bad: we're in trouble anyway. */
2398 +static inline int unreplied(const struct ip_conntrack_tuple_hash *i)
2399 +{
2400 +       return !(test_bit(IPS_ASSURED_BIT, &i->ctrack->status));
2401 +}
2402 +
2403 +static int early_drop(struct list_head *chain)
2404 +{
2405 +       /* Traverse backwards: gives us oldest, which is roughly LRU */
2406 +       struct ip_conntrack_tuple_hash *h;
2407 +       int dropped = 0;
2408 +
2409 +       READ_LOCK(&ip_conntrack_lock);
2410 +       h = LIST_FIND_B(chain, unreplied, struct ip_conntrack_tuple_hash *);
2411 +       if (h)
2412 +               atomic_inc(&h->ctrack->ct_general.use);
2413 +       READ_UNLOCK(&ip_conntrack_lock);
2414 +
2415 +       if (!h)
2416 +               return dropped;
2417 +
2418 +       if (del_timer(&h->ctrack->timeout)) {
2419 +               death_by_timeout((unsigned long)h->ctrack);
2420 +               dropped = 1;
2421 +       }
2422 +       ip_conntrack_put(h->ctrack);
2423 +       return dropped;
2424 +}
2425 +
2426 +static inline int helper_cmp(const struct ip_conntrack_helper *i,
2427 +                            const struct ip_conntrack_tuple *rtuple)
2428 +{
2429 +       return ip_ct_tuple_mask_cmp(rtuple, &i->tuple, &i->mask);
2430 +}
2431 +
2432 +struct ip_conntrack_helper *ip_ct_find_helper(const struct ip_conntrack_tuple *tuple)
2433 +{
2434 +       return LIST_FIND(&helpers, helper_cmp,
2435 +                        struct ip_conntrack_helper *,
2436 +                        tuple);
2437 +}
2438 +
2439 +/* Allocate a new conntrack: we return -ENOMEM if classification
2440 +   failed due to stress.  Otherwise it really is unclassifiable. */
2441 +static struct ip_conntrack_tuple_hash *
2442 +init_conntrack(const struct ip_conntrack_tuple *tuple,
2443 +              struct ip_conntrack_protocol *protocol,
2444 +              struct sk_buff *skb)
2445 +{
2446 +       struct ip_conntrack *conntrack;
2447 +       struct ip_conntrack_tuple repl_tuple;
2448 +       size_t hash;
2449 +       struct ip_conntrack_expect *expected;
2450 +       int i;
2451 +       static unsigned int drop_next;
2452 +
2453 +       if (!ip_conntrack_hash_rnd_initted) {
2454 +               get_random_bytes(&ip_conntrack_hash_rnd, 4);
2455 +               ip_conntrack_hash_rnd_initted = 1;
2456 +       }
2457 +
2458 +       hash = hash_conntrack(tuple);
2459 +
2460 +       if (ip_conntrack_max &&
2461 +           atomic_read(&ip_conntrack_count) >= ip_conntrack_max) {
2462 +               /* Try dropping from random chain, or else from the
2463 +                   chain about to put into (in case they're trying to
2464 +                   bomb one hash chain). */
2465 +               unsigned int next = (drop_next++)%ip_conntrack_htable_size;
2466 +
2467 +               if (!early_drop(&ip_conntrack_hash[next])
2468 +                   && !early_drop(&ip_conntrack_hash[hash])) {
2469 +                       if (net_ratelimit())
2470 +                               printk(KERN_WARNING
2471 +                                      "ip_conntrack: table full, dropping"
2472 +                                      " packet.\n");
2473 +                       return ERR_PTR(-ENOMEM);
2474 +               }
2475 +       }
2476 +
2477 +       if (!invert_tuple(&repl_tuple, tuple, protocol)) {
2478 +               DEBUGP("Can't invert tuple.\n");
2479 +               return NULL;
2480 +       }
2481 +
2482 +       conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC);
2483 +       if (!conntrack) {
2484 +               DEBUGP("Can't allocate conntrack.\n");
2485 +               return ERR_PTR(-ENOMEM);
2486 +       }
2487 +
2488 +       memset(conntrack, 0, sizeof(*conntrack));
2489 +       atomic_set(&conntrack->ct_general.use, 1);
2490 +       conntrack->ct_general.destroy = destroy_conntrack;
2491 +       conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *tuple;
2492 +       conntrack->tuplehash[IP_CT_DIR_ORIGINAL].ctrack = conntrack;
2493 +       conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = repl_tuple;
2494 +       conntrack->tuplehash[IP_CT_DIR_REPLY].ctrack = conntrack;
2495 +       for (i=0; i < IP_CT_NUMBER; i++)
2496 +               conntrack->infos[i].master = &conntrack->ct_general;
2497 +
2498 +       if (!protocol->new(conntrack, skb)) {
2499 +               kmem_cache_free(ip_conntrack_cachep, conntrack);
2500 +               return NULL;
2501 +       }
2502 +       /* Don't set timer yet: wait for confirmation */
2503 +       init_timer(&conntrack->timeout);
2504 +       conntrack->timeout.data = (unsigned long)conntrack;
2505 +       conntrack->timeout.function = death_by_timeout;
2506 +
2507 +       INIT_LIST_HEAD(&conntrack->sibling_list);
2508 +
2509 +       WRITE_LOCK(&ip_conntrack_lock);
2510 +       /* Need finding and deleting of expected ONLY if we win race */
2511 +       READ_LOCK(&ip_conntrack_expect_tuple_lock);
2512 +       expected = LIST_FIND(&ip_conntrack_expect_list, expect_cmp,
2513 +                            struct ip_conntrack_expect *, tuple);
2514 +       READ_UNLOCK(&ip_conntrack_expect_tuple_lock);
2515 +
2516 +       if (expected) {
2517 +               /* If master is not in hash table yet (ie. packet hasn't left
2518 +                  this machine yet), how can other end know about expected?
2519 +                  Hence these are not the droids you are looking for (if
2520 +                  master ct never got confirmed, we'd hold a reference to it
2521 +                  and weird things would happen to future packets). */
2522 +               if (!is_confirmed(expected->expectant)) {
2523 +                       
2524 +                       conntrack->helper = ip_ct_find_helper(&repl_tuple);
2525 +                       goto end;
2526 +               }
2527 +
2528 +               /* Expectation is dying... */
2529 +               if (expected->expectant->helper->timeout
2530 +                   && ! del_timer(&expected->timeout)) {
2531 +                       goto end;       
2532 +               }
2533 +
2534 +                DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n",
2535 +                       conntrack, expected);
2536 +                /* Welcome, Mr. Bond.  We've been expecting you... */
2537 +                IP_NF_ASSERT(master_ct(conntrack));
2538 +                __set_bit(IPS_EXPECTED_BIT, &conntrack->status);
2539 +                conntrack->master = expected;
2540 +                expected->sibling = conntrack;
2541 +                LIST_DELETE(&ip_conntrack_expect_list, expected);
2542 +                expected->expectant->expecting--;
2543 +                nf_conntrack_get(&master_ct(conntrack)->infos[0]);
2544 +
2545 +               /* this is a braindead... --pablo */
2546 +               atomic_inc(&ip_conntrack_count);
2547 +               WRITE_UNLOCK(&ip_conntrack_lock);
2548 +
2549 +               if (expected->expectfn)
2550 +                       expected->expectfn(conntrack);
2551 +
2552 +               goto ret;
2553 +        } else 
2554 +                conntrack->helper = ip_ct_find_helper(&repl_tuple);
2555 +
2556 +end:   atomic_inc(&ip_conntrack_count);
2557 +       WRITE_UNLOCK(&ip_conntrack_lock);
2558 +
2559 +ret:   return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];
2560 +}
2561 +
2562 +/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
2563 +static inline struct ip_conntrack *
2564 +resolve_normal_ct(struct sk_buff *skb,
2565 +                 struct ip_conntrack_protocol *proto,
2566 +                 int *set_reply,
2567 +                 unsigned int hooknum,
2568 +                 enum ip_conntrack_info *ctinfo)
2569 +{
2570 +       struct ip_conntrack_tuple tuple;
2571 +       struct ip_conntrack_tuple_hash *h;
2572 +
2573 +       IP_NF_ASSERT((skb->nh.iph->frag_off & htons(IP_OFFSET)) == 0);
2574 +
2575 +       if (!get_tuple(skb->nh.iph, skb, skb->nh.iph->ihl*4, &tuple, proto))
2576 +               return NULL;
2577 +
2578 +       /* look for tuple match */
2579 +       h = ip_conntrack_find_get(&tuple, NULL);
2580 +       if (!h) {
2581 +               h = init_conntrack(&tuple, proto, skb);
2582 +               if (!h)
2583 +                       return NULL;
2584 +               if (IS_ERR(h))
2585 +                       return (void *)h;
2586 +       }
2587 +
2588 +       /* It exists; we have (non-exclusive) reference. */
2589 +       if (DIRECTION(h) == IP_CT_DIR_REPLY) {
2590 +               *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
2591 +               /* Please set reply bit if this packet OK */
2592 +               *set_reply = 1;
2593 +       } else {
2594 +               /* Once we've had two way comms, always ESTABLISHED. */
2595 +               if (test_bit(IPS_SEEN_REPLY_BIT, &h->ctrack->status)) {
2596 +                       DEBUGP("ip_conntrack_in: normal packet for %p\n",
2597 +                              h->ctrack);
2598 +                       *ctinfo = IP_CT_ESTABLISHED;
2599 +               } else if (test_bit(IPS_EXPECTED_BIT, &h->ctrack->status)) {
2600 +                       DEBUGP("ip_conntrack_in: related packet for %p\n",
2601 +                              h->ctrack);
2602 +                       *ctinfo = IP_CT_RELATED;
2603 +               } else {
2604 +                       DEBUGP("ip_conntrack_in: new packet for %p\n",
2605 +                              h->ctrack);
2606 +                       *ctinfo = IP_CT_NEW;
2607 +               }
2608 +               *set_reply = 0;
2609 +       }
2610 +       skb->nfct = &h->ctrack->infos[*ctinfo];
2611 +       return h->ctrack;
2612 +}
2613 +
2614 +/* Netfilter hook itself. */
2615 +unsigned int ip_conntrack_in(unsigned int hooknum,
2616 +                            struct sk_buff **pskb,
2617 +                            const struct net_device *in,
2618 +                            const struct net_device *out,
2619 +                            int (*okfn)(struct sk_buff *))
2620 +{
2621 +       struct ip_conntrack *ct;
2622 +       enum ip_conntrack_info ctinfo;
2623 +       struct ip_conntrack_protocol *proto;
2624 +       int set_reply;
2625 +       int ret;
2626 +
2627 +       /* FIXME: Do this right please. --RR */
2628 +       (*pskb)->nfcache |= NFC_UNKNOWN;
2629 +
2630 +/* Doesn't cover locally-generated broadcast, so not worth it. */
2631 +#if 0
2632 +       /* Ignore broadcast: no `connection'. */
2633 +       if ((*pskb)->pkt_type == PACKET_BROADCAST) {
2634 +               printk("Broadcast packet!\n");
2635 +               return NF_ACCEPT;
2636 +       } else if (((*pskb)->nh.iph->daddr & htonl(0x000000FF)) 
2637 +                  == htonl(0x000000FF)) {
2638 +               printk("Should bcast: %u.%u.%u.%u->%u.%u.%u.%u (sk=%p, ptype=%u)\n",
2639 +                      NIPQUAD((*pskb)->nh.iph->saddr),
2640 +                      NIPQUAD((*pskb)->nh.iph->daddr),
2641 +                      (*pskb)->sk, (*pskb)->pkt_type);
2642 +       }
2643 +#endif
2644 +
2645 +       /* Previously seen (loopback)?  Ignore.  Do this before
2646 +           fragment check. */
2647 +       if ((*pskb)->nfct)
2648 +               return NF_ACCEPT;
2649 +
2650 +       /* Gather fragments. */
2651 +       if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
2652 +               *pskb = ip_ct_gather_frags(*pskb);
2653 +               if (!*pskb)
2654 +                       return NF_STOLEN;
2655 +       }
2656 +
2657 +       proto = ip_ct_find_proto((*pskb)->nh.iph->protocol);
2658 +
2659 +       /* It may be an icmp error... */
2660 +       if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP 
2661 +           && icmp_error_track(*pskb, &ctinfo, hooknum))
2662 +               return NF_ACCEPT;
2663 +
2664 +       if (!(ct = resolve_normal_ct(*pskb, proto,&set_reply,hooknum,&ctinfo)))
2665 +               /* Not valid part of a connection */
2666 +               return NF_ACCEPT;
2667 +
2668 +       if (IS_ERR(ct))
2669 +               /* Too stressed to deal. */
2670 +               return NF_DROP;
2671 +
2672 +       IP_NF_ASSERT((*pskb)->nfct);
2673 +
2674 +       ret = proto->packet(ct, *pskb, ctinfo);
2675 +       if (ret == -1) {
2676 +               /* Invalid */
2677 +               nf_conntrack_put((*pskb)->nfct);
2678 +               (*pskb)->nfct = NULL;
2679 +               return NF_ACCEPT;
2680 +       }
2681 +
2682 +       if (ret != NF_DROP && ct->helper) {
2683 +               ret = ct->helper->help(*pskb, ct, ctinfo);
2684 +               if (ret == -1) {
2685 +                       /* Invalid */
2686 +                       nf_conntrack_put((*pskb)->nfct);
2687 +                       (*pskb)->nfct = NULL;
2688 +                       return NF_ACCEPT;
2689 +               }
2690 +       }
2691 +       if (set_reply)
2692 +               set_bit(IPS_SEEN_REPLY_BIT, &ct->status);
2693 +
2694 +       return ret;
2695 +}
2696 +
2697 +int invert_tuplepr(struct ip_conntrack_tuple *inverse,
2698 +                  const struct ip_conntrack_tuple *orig)
2699 +{
2700 +       return invert_tuple(inverse, orig, ip_ct_find_proto(orig->dst.protonum));
2701 +}
2702 +
2703 +static inline int resent_expect(const struct ip_conntrack_expect *i,
2704 +                               const struct ip_conntrack_tuple *tuple,
2705 +                               const struct ip_conntrack_tuple *mask)
2706 +{
2707 +       DEBUGP("resent_expect\n");
2708 +       DEBUGP("   tuple:   "); DUMP_TUPLE(&i->tuple);
2709 +       DEBUGP("ct_tuple:   "); DUMP_TUPLE(&i->ct_tuple);
2710 +       DEBUGP("test tuple: "); DUMP_TUPLE(tuple);
2711 +       return (((i->ct_tuple.dst.protonum == 0 && ip_ct_tuple_equal(&i->tuple, tuple))
2712 +                || (i->ct_tuple.dst.protonum && ip_ct_tuple_equal(&i->ct_tuple, tuple)))
2713 +               && ip_ct_tuple_equal(&i->mask, mask));
2714 +}
2715 +
2716 +/* Would two expected things clash? */
2717 +static inline int expect_clash(const struct ip_conntrack_expect *i,
2718 +                              const struct ip_conntrack_tuple *tuple,
2719 +                              const struct ip_conntrack_tuple *mask)
2720 +{
2721 +       /* Part covered by intersection of masks must be unequal,
2722 +           otherwise they clash */
2723 +       struct ip_conntrack_tuple intersect_mask
2724 +               = { { i->mask.src.ip & mask->src.ip,
2725 +                     { i->mask.src.u.all & mask->src.u.all } },
2726 +                   { i->mask.dst.ip & mask->dst.ip,
2727 +                     { i->mask.dst.u.all & mask->dst.u.all },
2728 +                     i->mask.dst.protonum & mask->dst.protonum } };
2729 +
2730 +       return ip_ct_tuple_mask_cmp(&i->tuple, tuple, &intersect_mask);
2731 +}
2732 +
2733 +inline void ip_conntrack_unexpect_related(struct ip_conntrack_expect *expect)
2734 +{
2735 +       WRITE_LOCK(&ip_conntrack_lock);
2736 +       unexpect_related(expect);
2737 +       WRITE_UNLOCK(&ip_conntrack_lock);
2738 +}
2739 +       
2740 +static void expectation_timed_out(unsigned long ul_expect)
2741 +{
2742 +       struct ip_conntrack_expect *expect = (void *) ul_expect;
2743 +
2744 +       DEBUGP("expectation %p timed out\n", expect);   
2745 +       WRITE_LOCK(&ip_conntrack_lock);
2746 +       __unexpect_related(expect);
2747 +       WRITE_UNLOCK(&ip_conntrack_lock);
2748 +}
2749 +
2750 +/* Add a related connection. */
2751 +int ip_conntrack_expect_related(struct ip_conntrack *related_to,
2752 +                               struct ip_conntrack_expect *expect)
2753 +{
2754 +       struct ip_conntrack_expect *old, *new;
2755 +       int ret = 0;
2756 +
2757 +       WRITE_LOCK(&ip_conntrack_lock);
2758 +       /* Because of the write lock, no reader can walk the lists,
2759 +        * so there is no need to use the tuple lock too */
2760 +
2761 +       DEBUGP("ip_conntrack_expect_related %p\n", related_to);
2762 +       DEBUGP("tuple: "); DUMP_TUPLE(&expect->tuple);
2763 +       DEBUGP("mask:  "); DUMP_TUPLE(&expect->mask);
2764 +
2765 +       old = LIST_FIND(&ip_conntrack_expect_list, resent_expect,
2766 +                       struct ip_conntrack_expect *, &expect->tuple, 
2767 +                       &expect->mask);
2768 +       if (old) {
2769 +               /* Helper private data may contain offsets but no pointers
2770 +                  pointing into the payload - otherwise we should have to copy 
2771 +                  the data filled out by the helper over the old one */
2772 +               DEBUGP("expect_related: resent packet\n");
2773 +               if (related_to->helper->timeout) {
2774 +                       if (!del_timer(&old->timeout)) {
2775 +                               /* expectation is dying. Fall through */
2776 +                               old = NULL;
2777 +                       } else {
2778 +                               old->timeout.expires = jiffies + 
2779 +                                       related_to->helper->timeout * HZ;
2780 +                               add_timer(&old->timeout);
2781 +                       }
2782 +               }
2783 +
2784 +               if (old) {
2785 +                       WRITE_UNLOCK(&ip_conntrack_lock);
2786 +                       return -EEXIST;
2787 +               }
2788 +       } else if (related_to->helper->max_expected && 
2789 +                  related_to->expecting >= related_to->helper->max_expected) {
2790 +               struct list_head *cur_item;
2791 +               /* old == NULL */
2792 +               if (!(related_to->helper->flags & 
2793 +                     IP_CT_HELPER_F_REUSE_EXPECT)) {
2794 +                       WRITE_UNLOCK(&ip_conntrack_lock);
2795 +                       if (net_ratelimit())
2796 +                               printk(KERN_WARNING
2797 +                                      "ip_conntrack: max number of expected "
2798 +                                      "connections %i of %s reached for "
2799 +                                      "%u.%u.%u.%u->%u.%u.%u.%u\n",
2800 +                                      related_to->helper->max_expected,
2801 +                                      related_to->helper->name,
2802 +                                      NIPQUAD(related_to->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip),
2803 +                                      NIPQUAD(related_to->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip));
2804 +                       return -EPERM;
2805 +               }
2806 +               DEBUGP("ip_conntrack: max number of expected "
2807 +                      "connections %i of %s reached for "
2808 +                      "%u.%u.%u.%u->%u.%u.%u.%u, reusing\n",
2809 +                      related_to->helper->max_expected,
2810 +                      related_to->helper->name,
2811 +                      NIPQUAD(related_to->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip),
2812 +                      NIPQUAD(related_to->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip));
2813
2814 +               /* choose the the oldest expectation to evict */
2815 +               list_for_each(cur_item, &related_to->sibling_list) { 
2816 +                       struct ip_conntrack_expect *cur;
2817 +
2818 +                       cur = list_entry(cur_item, 
2819 +                                        struct ip_conntrack_expect,
2820 +                                        expected_list);
2821 +                       if (cur->sibling == NULL) {
2822 +                               old = cur;
2823 +                               break;
2824 +                       }
2825 +               }
2826 +
2827 +               /* (!old) cannot happen, since related_to->expecting is the
2828 +                * number of unconfirmed expects */
2829 +               IP_NF_ASSERT(old);
2830 +
2831 +               /* newnat14 does not reuse the real allocated memory
2832 +                * structures but rather unexpects the old and
2833 +                * allocates a new.  unexpect_related will decrement
2834 +                * related_to->expecting. 
2835 +                */
2836 +               unexpect_related(old);
2837 +               ret = -EPERM;
2838 +       } else if (LIST_FIND(&ip_conntrack_expect_list, expect_clash,
2839 +                            struct ip_conntrack_expect *, &expect->tuple, 
2840 +                            &expect->mask)) {
2841 +               WRITE_UNLOCK(&ip_conntrack_lock);
2842 +               DEBUGP("expect_related: busy!\n");
2843 +               return -EBUSY;
2844 +       }
2845 +       
2846 +       new = (struct ip_conntrack_expect *) 
2847 +             kmalloc(sizeof(struct ip_conntrack_expect), GFP_ATOMIC);
2848 +       if (!new) {
2849 +               WRITE_UNLOCK(&ip_conntrack_lock);
2850 +               DEBUGP("expect_relaed: OOM allocating expect\n");
2851 +               return -ENOMEM;
2852 +       }
2853 +       
2854 +       DEBUGP("new expectation %p of conntrack %p\n", new, related_to);
2855 +       memcpy(new, expect, sizeof(*expect));
2856 +       new->expectant = related_to;
2857 +       new->sibling = NULL;
2858 +       atomic_set(&new->use, 1);
2859 +       
2860 +       /* add to expected list for this connection */  
2861 +       list_add(&new->expected_list, &related_to->sibling_list);
2862 +       /* add to global list of expectations */
2863 +       list_prepend(&ip_conntrack_expect_list, &new->list);
2864 +       /* add and start timer if required */
2865 +       if (related_to->helper->timeout) {
2866 +               init_timer(&new->timeout);
2867 +               new->timeout.data = (unsigned long)new;
2868 +               new->timeout.function = expectation_timed_out;
2869 +               new->timeout.expires = jiffies + 
2870 +                                       related_to->helper->timeout * HZ;
2871 +               add_timer(&new->timeout);
2872 +       }
2873 +       related_to->expecting++;
2874 +
2875 +       WRITE_UNLOCK(&ip_conntrack_lock);
2876 +
2877 +       return ret;
2878 +}
2879 +
2880 +/* Change tuple in an existing expectation */
2881 +int ip_conntrack_change_expect(struct ip_conntrack_expect *expect,
2882 +                              struct ip_conntrack_tuple *newtuple)
2883 +{
2884 +       int ret;
2885 +
2886 +       MUST_BE_READ_LOCKED(&ip_conntrack_lock);
2887 +       WRITE_LOCK(&ip_conntrack_expect_tuple_lock);
2888 +
2889 +       DEBUGP("change_expect:\n");
2890 +       DEBUGP("exp tuple: "); DUMP_TUPLE(&expect->tuple);
2891 +       DEBUGP("exp mask:  "); DUMP_TUPLE(&expect->mask);
2892 +       DEBUGP("newtuple:  "); DUMP_TUPLE(newtuple);
2893 +       if (expect->ct_tuple.dst.protonum == 0) {
2894 +               /* Never seen before */
2895 +               DEBUGP("change expect: never seen before\n");
2896 +               if (!ip_ct_tuple_equal(&expect->tuple, newtuple) 
2897 +                   && LIST_FIND(&ip_conntrack_expect_list, expect_clash,
2898 +                                struct ip_conntrack_expect *, newtuple, &expect->mask)) {
2899 +                       /* Force NAT to find an unused tuple */
2900 +                       ret = -1;
2901 +               } else {
2902 +                       memcpy(&expect->ct_tuple, &expect->tuple, sizeof(expect->tuple));
2903 +                       memcpy(&expect->tuple, newtuple, sizeof(expect->tuple));
2904 +                       ret = 0;
2905 +               }
2906 +       } else {
2907 +               /* Resent packet */
2908 +               DEBUGP("change expect: resent packet\n");
2909 +               if (ip_ct_tuple_equal(&expect->tuple, newtuple)) {
2910 +                       ret = 0;
2911 +               } else {
2912 +                       /* Force NAT to choose again the same port */
2913 +                       ret = -1;
2914 +               }
2915 +       }
2916 +       WRITE_UNLOCK(&ip_conntrack_expect_tuple_lock);
2917 +       
2918 +       return ret;
2919 +}
2920 +
2921 +/* Alter reply tuple (maybe alter helper).  If it's already taken,
2922 +   return 0 and don't do alteration. */
2923 +int ip_conntrack_alter_reply(struct ip_conntrack *conntrack,
2924 +                            const struct ip_conntrack_tuple *newreply)
2925 +{
2926 +       WRITE_LOCK(&ip_conntrack_lock);
2927 +       if (__ip_conntrack_find(newreply, conntrack)) {
2928 +               WRITE_UNLOCK(&ip_conntrack_lock);
2929 +               return 0;
2930 +       }
2931 +       /* Should be unconfirmed, so not in hash table yet */
2932 +       IP_NF_ASSERT(!is_confirmed(conntrack));
2933 +
2934 +       DEBUGP("Altering reply tuple of %p to ", conntrack);
2935 +       DUMP_TUPLE(newreply);
2936 +
2937 +       conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
2938 +       if (!conntrack->master)
2939 +               conntrack->helper = LIST_FIND(&helpers, helper_cmp,
2940 +                                             struct ip_conntrack_helper *,
2941 +                                             newreply);
2942 +       WRITE_UNLOCK(&ip_conntrack_lock);
2943 +
2944 +       return 1;
2945 +}
2946 +
2947 +int ip_conntrack_helper_register(struct ip_conntrack_helper *me)
2948 +{
2949 +       WRITE_LOCK(&ip_conntrack_lock);
2950 +       list_prepend(&helpers, me);
2951 +       WRITE_UNLOCK(&ip_conntrack_lock);
2952 +
2953 +       return 0;
2954 +}
2955 +
2956 +static inline int unhelp(struct ip_conntrack_tuple_hash *i,
2957 +                        const struct ip_conntrack_helper *me)
2958 +{
2959 +       if (i->ctrack->helper == me) {
2960 +               /* Get rid of any expected. */
2961 +               remove_expectations(i->ctrack, 0);
2962 +               /* And *then* set helper to NULL */
2963 +               i->ctrack->helper = NULL;
2964 +       }
2965 +       return 0;
2966 +}
2967 +
2968 +void ip_conntrack_helper_unregister(struct ip_conntrack_helper *me)
2969 +{
2970 +       unsigned int i;
2971 +
2972 +       /* Need write lock here, to delete helper. */
2973 +       WRITE_LOCK(&ip_conntrack_lock);
2974 +       LIST_DELETE(&helpers, me);
2975 +
2976 +       /* Get rid of expecteds, set helpers to NULL. */
2977 +       for (i = 0; i < ip_conntrack_htable_size; i++)
2978 +               LIST_FIND_W(&ip_conntrack_hash[i], unhelp,
2979 +                           struct ip_conntrack_tuple_hash *, me);
2980 +       WRITE_UNLOCK(&ip_conntrack_lock);
2981 +
2982 +       /* Someone could be still looking at the helper in a bh. */
2983 +       synchronize_net();
2984 +}
2985 +
2986 +/* Refresh conntrack for this many jiffies. */
2987 +void ip_ct_refresh(struct ip_conntrack *ct, unsigned long extra_jiffies)
2988 +{
2989 +       IP_NF_ASSERT(ct->timeout.data == (unsigned long)ct);
2990 +
2991 +       WRITE_LOCK(&ip_conntrack_lock);
2992 +       /* If not in hash table, timer will not be active yet */
2993 +       if (!is_confirmed(ct))
2994 +               ct->timeout.expires = extra_jiffies;
2995 +       else {
2996 +               /* Need del_timer for race avoidance (may already be dying). */
2997 +               if (del_timer(&ct->timeout)) {
2998 +                       ct->timeout.expires = jiffies + extra_jiffies;
2999 +                       add_timer(&ct->timeout);
3000 +               }
3001 +       }
3002 +       WRITE_UNLOCK(&ip_conntrack_lock);
3003 +}
3004 +
3005 +/* Returns new sk_buff, or NULL */
3006 +struct sk_buff *
3007 +ip_ct_gather_frags(struct sk_buff *skb)
3008 +{
3009 +       struct sock *sk = skb->sk;
3010 +#ifdef CONFIG_NETFILTER_DEBUG
3011 +       unsigned int olddebug = skb->nf_debug;
3012 +#endif
3013 +       if (sk) {
3014 +               sock_hold(sk);
3015 +               skb_orphan(skb);
3016 +       }
3017 +
3018 +       local_bh_disable(); 
3019 +       skb = ip_defrag(skb);
3020 +       local_bh_enable();
3021 +
3022 +       if (!skb) {
3023 +               if (sk)
3024 +                       sock_put(sk);
3025 +               return skb;
3026 +       }
3027 +
3028 +       if (sk) {
3029 +               skb_set_owner_w(skb, sk);
3030 +               sock_put(sk);
3031 +       }
3032 +
3033 +       ip_send_check(skb->nh.iph);
3034 +       skb->nfcache |= NFC_ALTERED;
3035 +#ifdef CONFIG_NETFILTER_DEBUG
3036 +       /* Packet path as if nothing had happened. */
3037 +       skb->nf_debug = olddebug;
3038 +#endif
3039 +       return skb;
3040 +}
3041 +
3042 +/* Used by ipt_REJECT. */
3043 +static void ip_conntrack_attach(struct sk_buff *nskb, struct nf_ct_info *nfct)
3044 +{
3045 +       struct ip_conntrack *ct;
3046 +       enum ip_conntrack_info ctinfo;
3047 +
3048 +       ct = __ip_conntrack_get(nfct, &ctinfo);
3049 +
3050 +       /* This ICMP is in reverse direction to the packet which
3051 +           caused it */
3052 +       if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
3053 +               ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
3054 +       else
3055 +               ctinfo = IP_CT_RELATED;
3056 +
3057 +       /* Attach new skbuff, and increment count */
3058 +       nskb->nfct = &ct->infos[ctinfo];
3059 +       atomic_inc(&ct->ct_general.use);
3060 +}
3061 +
3062 +static inline int
3063 +do_kill(const struct ip_conntrack_tuple_hash *i,
3064 +       int (*kill)(const struct ip_conntrack *i, void *data),
3065 +       void *data)
3066 +{
3067 +       return kill(i->ctrack, data);
3068 +}
3069 +
3070 +/* Bring out ya dead! */
3071 +static struct ip_conntrack_tuple_hash *
3072 +get_next_corpse(int (*kill)(const struct ip_conntrack *i, void *data),
3073 +               void *data, unsigned int *bucket)
3074 +{
3075 +       struct ip_conntrack_tuple_hash *h = NULL;
3076 +
3077 +       READ_LOCK(&ip_conntrack_lock);
3078 +       for (; !h && *bucket < ip_conntrack_htable_size; (*bucket)++) {
3079 +               h = LIST_FIND(&ip_conntrack_hash[*bucket], do_kill,
3080 +                             struct ip_conntrack_tuple_hash *, kill, data);
3081 +       }
3082 +       if (h)
3083 +               atomic_inc(&h->ctrack->ct_general.use);
3084 +       READ_UNLOCK(&ip_conntrack_lock);
3085 +
3086 +       return h;
3087 +}
3088 +
3089 +void
3090 +ip_ct_selective_cleanup(int (*kill)(const struct ip_conntrack *i, void *data),
3091 +                       void *data)
3092 +{
3093 +       struct ip_conntrack_tuple_hash *h;
3094 +       unsigned int bucket = 0;
3095 +
3096 +       while ((h = get_next_corpse(kill, data, &bucket)) != NULL) {
3097 +               /* Time to push up daises... */
3098 +               if (del_timer(&h->ctrack->timeout))
3099 +                       death_by_timeout((unsigned long)h->ctrack);
3100 +               /* ... else the timer will get him soon. */
3101 +
3102 +               ip_conntrack_put(h->ctrack);
3103 +       }
3104 +}
3105 +
3106 +/* Fast function for those who don't want to parse /proc (and I don't
3107 +   blame them). */
3108 +/* Reversing the socket's dst/src point of view gives us the reply
3109 +   mapping. */
3110 +static int
3111 +getorigdst(struct sock *sk, int optval, void *user, int *len)
3112 +{
3113 +       struct inet_opt *inet = inet_sk(sk);
3114 +       struct ip_conntrack_tuple_hash *h;
3115 +       struct ip_conntrack_tuple tuple;
3116 +       
3117 +       IP_CT_TUPLE_U_BLANK(&tuple);
3118 +       tuple.src.ip = inet->rcv_saddr;
3119 +       tuple.src.u.tcp.port = inet->sport;
3120 +       tuple.dst.ip = inet->daddr;
3121 +       tuple.dst.u.tcp.port = inet->dport;
3122 +       tuple.dst.protonum = IPPROTO_TCP;
3123 +
3124 +       /* We only do TCP at the moment: is there a better way? */
3125 +       if (strcmp(sk->sk_prot->name, "TCP")) {
3126 +               DEBUGP("SO_ORIGINAL_DST: Not a TCP socket\n");
3127 +               return -ENOPROTOOPT;
3128 +       }
3129 +
3130 +       if ((unsigned int) *len < sizeof(struct sockaddr_in)) {
3131 +               DEBUGP("SO_ORIGINAL_DST: len %u not %u\n",
3132 +                      *len, sizeof(struct sockaddr_in));
3133 +               return -EINVAL;
3134 +       }
3135 +
3136 +       h = ip_conntrack_find_get(&tuple, NULL);
3137 +       if (h) {
3138 +               struct sockaddr_in sin;
3139 +
3140 +               sin.sin_family = AF_INET;
3141 +               sin.sin_port = h->ctrack->tuplehash[IP_CT_DIR_ORIGINAL]
3142 +                       .tuple.dst.u.tcp.port;
3143 +               sin.sin_addr.s_addr = h->ctrack->tuplehash[IP_CT_DIR_ORIGINAL]
3144 +                       .tuple.dst.ip;
3145 +
3146 +               DEBUGP("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n",
3147 +                      NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
3148 +               ip_conntrack_put(h->ctrack);
3149 +               if (copy_to_user(user, &sin, sizeof(sin)) != 0)
3150 +                       return -EFAULT;
3151 +               else
3152 +                       return 0;
3153 +       }
3154 +       DEBUGP("SO_ORIGINAL_DST: Can't find %u.%u.%u.%u/%u-%u.%u.%u.%u/%u.\n",
3155 +              NIPQUAD(tuple.src.ip), ntohs(tuple.src.u.tcp.port),
3156 +              NIPQUAD(tuple.dst.ip), ntohs(tuple.dst.u.tcp.port));
3157 +       return -ENOENT;
3158 +}
3159 +
3160 +static struct nf_sockopt_ops so_getorigdst = {
3161 +       .pf             = PF_INET,
3162 +       .get_optmin     = SO_ORIGINAL_DST,
3163 +       .get_optmax     = SO_ORIGINAL_DST+1,
3164 +       .get            = &getorigdst,
3165 +};
3166 +
3167 +static int kill_all(const struct ip_conntrack *i, void *data)
3168 +{
3169 +       return 1;
3170 +}
3171 +
3172 +/* Mishearing the voices in his head, our hero wonders how he's
3173 +   supposed to kill the mall. */
3174 +void ip_conntrack_cleanup(void)
3175 +{
3176 +       ip_ct_attach = NULL;
3177 +       /* This makes sure all current packets have passed through
3178 +           netfilter framework.  Roll on, two-stage module
3179 +           delete... */
3180 +       synchronize_net();
3181
3182 + i_see_dead_people:
3183 +       ip_ct_selective_cleanup(kill_all, NULL);
3184 +       if (atomic_read(&ip_conntrack_count) != 0) {
3185 +               schedule();
3186 +               goto i_see_dead_people;
3187 +       }
3188 +
3189 +       kmem_cache_destroy(ip_conntrack_cachep);
3190 +       vfree(ip_conntrack_hash);
3191 +       nf_unregister_sockopt(&so_getorigdst);
3192 +}
3193 +
3194 +static int hashsize;
3195 +MODULE_PARM(hashsize, "i");
3196 +
3197 +int __init ip_conntrack_init(void)
3198 +{
3199 +       unsigned int i;
3200 +       int ret;
3201 +
3202 +       /* Idea from tcp.c: use 1/16384 of memory.  On i386: 32MB
3203 +        * machine has 256 buckets.  >= 1GB machines have 8192 buckets. */
3204 +       if (hashsize) {
3205 +               ip_conntrack_htable_size = hashsize;
3206 +       } else {
3207 +               ip_conntrack_htable_size
3208 +                       = (((num_physpages << PAGE_SHIFT) / 16384)
3209 +                          / sizeof(struct list_head));
3210 +               if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
3211 +                       ip_conntrack_htable_size = 8192;
3212 +               if (ip_conntrack_htable_size < 16)
3213 +                       ip_conntrack_htable_size = 16;
3214 +       }
3215 +       ip_conntrack_max = 8 * ip_conntrack_htable_size;
3216 +
3217 +       printk("ip_conntrack version %s (%u buckets, %d max)"
3218 +              " - %Zd bytes per conntrack\n", IP_CONNTRACK_VERSION,
3219 +              ip_conntrack_htable_size, ip_conntrack_max,
3220 +              sizeof(struct ip_conntrack));
3221 +
3222 +       ret = nf_register_sockopt(&so_getorigdst);
3223 +       if (ret != 0) {
3224 +               printk(KERN_ERR "Unable to register netfilter socket option\n");
3225 +               return ret;
3226 +       }
3227 +
3228 +       ip_conntrack_hash = vmalloc(sizeof(struct list_head)
3229 +                                   * ip_conntrack_htable_size);
3230 +       if (!ip_conntrack_hash) {
3231 +               printk(KERN_ERR "Unable to create ip_conntrack_hash\n");
3232 +               goto err_unreg_sockopt;
3233 +       }
3234 +
3235 +       ip_conntrack_cachep = kmem_cache_create("ip_conntrack",
3236 +                                               sizeof(struct ip_conntrack), 0,
3237 +                                               SLAB_HWCACHE_ALIGN, NULL, NULL);
3238 +       if (!ip_conntrack_cachep) {
3239 +               printk(KERN_ERR "Unable to create ip_conntrack slab cache\n");
3240 +               goto err_free_hash;
3241 +       }
3242 +       /* Don't NEED lock here, but good form anyway. */
3243 +       WRITE_LOCK(&ip_conntrack_lock);
3244 +       /* Sew in builtin protocols. */
3245 +       list_append(&protocol_list, &ip_conntrack_protocol_tcp);
3246 +       list_append(&protocol_list, &ip_conntrack_protocol_udp);
3247 +       list_append(&protocol_list, &ip_conntrack_protocol_icmp);
3248 +       WRITE_UNLOCK(&ip_conntrack_lock);
3249 +
3250 +       for (i = 0; i < ip_conntrack_htable_size; i++)
3251 +               INIT_LIST_HEAD(&ip_conntrack_hash[i]);
3252 +
3253 +       /* For use by ipt_REJECT */
3254 +       ip_ct_attach = ip_conntrack_attach;
3255 +       return ret;
3256 +
3257 +err_free_hash:
3258 +       vfree(ip_conntrack_hash);
3259 +err_unreg_sockopt:
3260 +       nf_unregister_sockopt(&so_getorigdst);
3261 +
3262 +       return -ENOMEM;
3263 +}
3264 diff -Nur linux-2.6.4-rc2.org/net/ipv4/netfilter/ip_conntrack_standalone.c linux-2.6.4-rc2/net/ipv4/netfilter/ip_conntrack_standalone.c
3265 --- linux-2.6.4-rc2.org/net/ipv4/netfilter/ip_conntrack_standalone.c    2004-03-04 06:16:44.000000000 +0000
3266 +++ linux-2.6.4-rc2/net/ipv4/netfilter/ip_conntrack_standalone.c        2004-03-05 07:39:43.000000000 +0000
3267 @@ -519,13 +519,20 @@
3268         return ret;
3269  }
3270  
3271 -/* FIXME: Allow NULL functions and sub in pointers to generic for
3272 -   them. --RR */
3273 +/**
3274 + * ip_conntrack_protocol_register - Register layer 4 protocol helper
3275 + * @proto: structure describing this layer 4 protocol helper
3276 + *
3277 + * This function is called by layer 4 protocol helpers to register 
3278 + * themselves with the conntrack core.
3279 + */
3280  int ip_conntrack_protocol_register(struct ip_conntrack_protocol *proto)
3281  {
3282         int ret = 0;
3283         struct list_head *i;
3284  
3285 +       /* FIXME: Allow NULL functions and sub in pointers to generic for
3286 +          them. --RR */
3287         WRITE_LOCK(&ip_conntrack_lock);
3288         list_for_each(i, &protocol_list) {
3289                 if (((struct ip_conntrack_protocol *)i)->proto
3290 @@ -542,12 +549,20 @@
3291         return ret;
3292  }
3293  
3294 +/**
3295 + * ip_conntrack_protocol_unregister - Unregister layer 4 protocol helper
3296 + * @proto: structure describing this layer 4 protocol helper
3297 + *
3298 + * This function is called byh layer 4 protocol helpers to unregister
3299 + * themselvers from the conntrack core.  Please note that all conntrack
3300 + * entries for this protocol are deleted from the conntrack hash table.
3301 + */
3302  void ip_conntrack_protocol_unregister(struct ip_conntrack_protocol *proto)
3303  {
3304         WRITE_LOCK(&ip_conntrack_lock);
3305  
3306 -       /* ip_ct_find_proto() returns proto_generic in case there is no protocol 
3307 -        * helper. So this should be enough - HW */
3308 +       /* ip_ct_find_proto() returns proto_generic in case there is no
3309 +        * protocol helper. So this should be enough - HW */
3310         LIST_DELETE(&protocol_list, proto);
3311         WRITE_UNLOCK(&ip_conntrack_lock);
3312         
3313 diff -Nur linux-2.6.4-rc2.org/net/ipv4/netfilter/ip_conntrack_standalone.c.orig linux-2.6.4-rc2/net/ipv4/netfilter/ip_conntrack_standalone.c.orig
3314 --- linux-2.6.4-rc2.org/net/ipv4/netfilter/ip_conntrack_standalone.c.orig       1970-01-01 00:00:00.000000000 +0000
3315 +++ linux-2.6.4-rc2/net/ipv4/netfilter/ip_conntrack_standalone.c.orig   2004-03-04 06:16:44.000000000 +0000
3316 @@ -0,0 +1,606 @@
3317 +/* This file contains all the functions required for the standalone
3318 +   ip_conntrack module.
3319 +
3320 +   These are not required by the compatibility layer.
3321 +*/
3322 +
3323 +/* (C) 1999-2001 Paul `Rusty' Russell
3324 + * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
3325 + *
3326 + * This program is free software; you can redistribute it and/or modify
3327 + * it under the terms of the GNU General Public License version 2 as
3328 + * published by the Free Software Foundation.
3329 + */
3330 +
3331 +#include <linux/config.h>
3332 +#include <linux/types.h>
3333 +#include <linux/ip.h>
3334 +#include <linux/netfilter.h>
3335 +#include <linux/netfilter_ipv4.h>
3336 +#include <linux/module.h>
3337 +#include <linux/skbuff.h>
3338 +#include <linux/proc_fs.h>
3339 +#ifdef CONFIG_SYSCTL
3340 +#include <linux/sysctl.h>
3341 +#endif
3342 +#include <net/checksum.h>
3343 +
3344 +#define ASSERT_READ_LOCK(x) MUST_BE_READ_LOCKED(&ip_conntrack_lock)
3345 +#define ASSERT_WRITE_LOCK(x) MUST_BE_WRITE_LOCKED(&ip_conntrack_lock)
3346 +
3347 +#include <linux/netfilter_ipv4/ip_conntrack.h>
3348 +#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
3349 +#include <linux/netfilter_ipv4/ip_conntrack_core.h>
3350 +#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
3351 +#include <linux/netfilter_ipv4/listhelp.h>
3352 +
3353 +#if 0
3354 +#define DEBUGP printk
3355 +#else
3356 +#define DEBUGP(format, args...)
3357 +#endif
3358 +
3359 +MODULE_LICENSE("GPL");
3360 +
3361 +static int kill_proto(const struct ip_conntrack *i, void *data)
3362 +{
3363 +       return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum == 
3364 +                       *((u_int8_t *) data));
3365 +}
3366 +
3367 +static unsigned int
3368 +print_tuple(char *buffer, const struct ip_conntrack_tuple *tuple,
3369 +           struct ip_conntrack_protocol *proto)
3370 +{
3371 +       int len;
3372 +
3373 +       len = sprintf(buffer, "src=%u.%u.%u.%u dst=%u.%u.%u.%u ",
3374 +                     NIPQUAD(tuple->src.ip), NIPQUAD(tuple->dst.ip));
3375 +
3376 +       len += proto->print_tuple(buffer + len, tuple);
3377 +
3378 +       return len;
3379 +}
3380 +
3381 +/* FIXME: Don't print source proto part. --RR */
3382 +static unsigned int
3383 +print_expect(char *buffer, const struct ip_conntrack_expect *expect)
3384 +{
3385 +       unsigned int len;
3386 +
3387 +       if (expect->expectant->helper->timeout)
3388 +               len = sprintf(buffer, "EXPECTING: %lu ",
3389 +                             timer_pending(&expect->timeout)
3390 +                             ? (expect->timeout.expires - jiffies)/HZ : 0);
3391 +       else
3392 +               len = sprintf(buffer, "EXPECTING: - ");
3393 +       len += sprintf(buffer + len, "use=%u proto=%u ",
3394 +                     atomic_read(&expect->use), expect->tuple.dst.protonum);
3395 +       len += print_tuple(buffer + len, &expect->tuple,
3396 +                          __ip_ct_find_proto(expect->tuple.dst.protonum));
3397 +       len += sprintf(buffer + len, "\n");
3398 +       return len;
3399 +}
3400 +
3401 +static unsigned int
3402 +print_conntrack(char *buffer, struct ip_conntrack *conntrack)
3403 +{
3404 +       unsigned int len;
3405 +       struct ip_conntrack_protocol *proto
3406 +               = __ip_ct_find_proto(conntrack->tuplehash[IP_CT_DIR_ORIGINAL]
3407 +                              .tuple.dst.protonum);
3408 +
3409 +       len = sprintf(buffer, "%-8s %u %lu ",
3410 +                     proto->name,
3411 +                     conntrack->tuplehash[IP_CT_DIR_ORIGINAL]
3412 +                     .tuple.dst.protonum,
3413 +                     timer_pending(&conntrack->timeout)
3414 +                     ? (conntrack->timeout.expires - jiffies)/HZ : 0);
3415 +
3416 +       len += proto->print_conntrack(buffer + len, conntrack);
3417 +       len += print_tuple(buffer + len,
3418 +                          &conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
3419 +                          proto);
3420 +       if (!(test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)))
3421 +               len += sprintf(buffer + len, "[UNREPLIED] ");
3422 +       len += print_tuple(buffer + len,
3423 +                          &conntrack->tuplehash[IP_CT_DIR_REPLY].tuple,
3424 +                          proto);
3425 +       if (test_bit(IPS_ASSURED_BIT, &conntrack->status))
3426 +               len += sprintf(buffer + len, "[ASSURED] ");
3427 +       len += sprintf(buffer + len, "use=%u ",
3428 +                      atomic_read(&conntrack->ct_general.use));
3429 +       len += sprintf(buffer + len, "\n");
3430 +
3431 +       return len;
3432 +}
3433 +
3434 +/* Returns true when finished. */
3435 +static inline int
3436 +conntrack_iterate(const struct ip_conntrack_tuple_hash *hash,
3437 +                 char *buffer, off_t offset, off_t *upto,
3438 +                 unsigned int *len, unsigned int maxlen)
3439 +{
3440 +       unsigned int newlen;
3441 +       IP_NF_ASSERT(hash->ctrack);
3442 +
3443 +       MUST_BE_READ_LOCKED(&ip_conntrack_lock);
3444 +
3445 +       /* Only count originals */
3446 +       if (DIRECTION(hash))
3447 +               return 0;
3448 +
3449 +       if ((*upto)++ < offset)
3450 +               return 0;
3451 +
3452 +       newlen = print_conntrack(buffer + *len, hash->ctrack);
3453 +       if (*len + newlen > maxlen)
3454 +               return 1;
3455 +       else *len += newlen;
3456 +
3457 +       return 0;
3458 +}
3459 +
3460 +static int
3461 +list_conntracks(char *buffer, char **start, off_t offset, int length)
3462 +{
3463 +       unsigned int i;
3464 +       unsigned int len = 0;
3465 +       off_t upto = 0;
3466 +       struct list_head *e;
3467 +
3468 +       READ_LOCK(&ip_conntrack_lock);
3469 +       /* Traverse hash; print originals then reply. */
3470 +       for (i = 0; i < ip_conntrack_htable_size; i++) {
3471 +               if (LIST_FIND(&ip_conntrack_hash[i], conntrack_iterate,
3472 +                             struct ip_conntrack_tuple_hash *,
3473 +                             buffer, offset, &upto, &len, length))
3474 +                       goto finished;
3475 +       }
3476 +
3477 +       /* Now iterate through expecteds. */
3478 +       READ_LOCK(&ip_conntrack_expect_tuple_lock);
3479 +       list_for_each(e, &ip_conntrack_expect_list) {
3480 +               unsigned int last_len;
3481 +               struct ip_conntrack_expect *expect
3482 +                       = (struct ip_conntrack_expect *)e;
3483 +               if (upto++ < offset) continue;
3484 +
3485 +               last_len = len;
3486 +               len += print_expect(buffer + len, expect);
3487 +               if (len > length) {
3488 +                       len = last_len;
3489 +                       goto finished_expects;
3490 +               }
3491 +       }
3492 +
3493 + finished_expects:
3494 +       READ_UNLOCK(&ip_conntrack_expect_tuple_lock);
3495 + finished:
3496 +       READ_UNLOCK(&ip_conntrack_lock);
3497 +
3498 +       /* `start' hack - see fs/proc/generic.c line ~165 */
3499 +       *start = (char *)((unsigned int)upto - offset);
3500 +       return len;
3501 +}
3502 +
3503 +static unsigned int ip_confirm(unsigned int hooknum,
3504 +                              struct sk_buff **pskb,
3505 +                              const struct net_device *in,
3506 +                              const struct net_device *out,
3507 +                              int (*okfn)(struct sk_buff *))
3508 +{
3509 +       /* We've seen it coming out the other side: confirm it */
3510 +       return ip_conntrack_confirm(*pskb);
3511 +}
3512 +
3513 +static unsigned int ip_refrag(unsigned int hooknum,
3514 +                             struct sk_buff **pskb,
3515 +                             const struct net_device *in,
3516 +                             const struct net_device *out,
3517 +                             int (*okfn)(struct sk_buff *))
3518 +{
3519 +       struct rtable *rt = (struct rtable *)(*pskb)->dst;
3520 +
3521 +       /* We've seen it coming out the other side: confirm */
3522 +       if (ip_confirm(hooknum, pskb, in, out, okfn) != NF_ACCEPT)
3523 +               return NF_DROP;
3524 +
3525 +       /* Local packets are never produced too large for their
3526 +          interface.  We degfragment them at LOCAL_OUT, however,
3527 +          so we have to refragment them here. */
3528 +       if ((*pskb)->len > dst_pmtu(&rt->u.dst) &&
3529 +           !skb_shinfo(*pskb)->tso_size) {
3530 +               /* No hook can be after us, so this should be OK. */
3531 +               ip_fragment(*pskb, okfn);
3532 +               return NF_STOLEN;
3533 +       }
3534 +       return NF_ACCEPT;
3535 +}
3536 +
3537 +static unsigned int ip_conntrack_local(unsigned int hooknum,
3538 +                                      struct sk_buff **pskb,
3539 +                                      const struct net_device *in,
3540 +                                      const struct net_device *out,
3541 +                                      int (*okfn)(struct sk_buff *))
3542 +{
3543 +       /* root is playing with raw sockets. */
3544 +       if ((*pskb)->len < sizeof(struct iphdr)
3545 +           || (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) {
3546 +               if (net_ratelimit())
3547 +                       printk("ipt_hook: happy cracking.\n");
3548 +               return NF_ACCEPT;
3549 +       }
3550 +       return ip_conntrack_in(hooknum, pskb, in, out, okfn);
3551 +}
3552 +
3553 +/* Connection tracking may drop packets, but never alters them, so
3554 +   make it the first hook. */
3555 +static struct nf_hook_ops ip_conntrack_in_ops = {
3556 +       .hook           = ip_conntrack_in,
3557 +       .owner          = THIS_MODULE,
3558 +       .pf             = PF_INET,
3559 +       .hooknum        = NF_IP_PRE_ROUTING,
3560 +       .priority       = NF_IP_PRI_CONNTRACK,
3561 +};
3562 +
3563 +static struct nf_hook_ops ip_conntrack_local_out_ops = {
3564 +       .hook           = ip_conntrack_local,
3565 +       .owner          = THIS_MODULE,
3566 +       .pf             = PF_INET,
3567 +       .hooknum        = NF_IP_LOCAL_OUT,
3568 +       .priority       = NF_IP_PRI_CONNTRACK,
3569 +};
3570 +
3571 +/* Refragmenter; last chance. */
3572 +static struct nf_hook_ops ip_conntrack_out_ops = {
3573 +       .hook           = ip_refrag,
3574 +       .owner          = THIS_MODULE,
3575 +       .pf             = PF_INET,
3576 +       .hooknum        = NF_IP_POST_ROUTING,
3577 +       .priority       = NF_IP_PRI_LAST,
3578 +};
3579 +
3580 +static struct nf_hook_ops ip_conntrack_local_in_ops = {
3581 +       .hook           = ip_confirm,
3582 +       .owner          = THIS_MODULE,
3583 +       .pf             = PF_INET,
3584 +       .hooknum        = NF_IP_LOCAL_IN,
3585 +       .priority       = NF_IP_PRI_LAST-1,
3586 +};
3587 +
3588 +/* Sysctl support */
3589 +
3590 +#ifdef CONFIG_SYSCTL
3591 +
3592 +/* From ip_conntrack_core.c */
3593 +extern int ip_conntrack_max;
3594 +extern unsigned int ip_conntrack_htable_size;
3595 +
3596 +/* From ip_conntrack_proto_tcp.c */
3597 +extern unsigned long ip_ct_tcp_timeout_syn_sent;
3598 +extern unsigned long ip_ct_tcp_timeout_syn_recv;
3599 +extern unsigned long ip_ct_tcp_timeout_established;
3600 +extern unsigned long ip_ct_tcp_timeout_fin_wait;
3601 +extern unsigned long ip_ct_tcp_timeout_close_wait;
3602 +extern unsigned long ip_ct_tcp_timeout_last_ack;
3603 +extern unsigned long ip_ct_tcp_timeout_time_wait;
3604 +extern unsigned long ip_ct_tcp_timeout_close;
3605 +
3606 +/* From ip_conntrack_proto_udp.c */
3607 +extern unsigned long ip_ct_udp_timeout;
3608 +extern unsigned long ip_ct_udp_timeout_stream;
3609 +
3610 +/* From ip_conntrack_proto_icmp.c */
3611 +extern unsigned long ip_ct_icmp_timeout;
3612 +
3613 +/* From ip_conntrack_proto_icmp.c */
3614 +extern unsigned long ip_ct_generic_timeout;
3615 +
3616 +static struct ctl_table_header *ip_ct_sysctl_header;
3617 +
3618 +static ctl_table ip_ct_sysctl_table[] = {
3619 +       {
3620 +               .ctl_name       = NET_IPV4_NF_CONNTRACK_MAX,
3621 +               .procname       = "ip_conntrack_max",
3622 +               .data           = &ip_conntrack_max,
3623 +               .maxlen         = sizeof(int),
3624 +               .mode           = 0644,
3625 +               .proc_handler   = &proc_dointvec,
3626 +       },
3627 +       {
3628 +               .ctl_name       = NET_IPV4_NF_CONNTRACK_BUCKETS,
3629 +               .procname       = "ip_conntrack_buckets",
3630 +               .data           = &ip_conntrack_htable_size,
3631 +               .maxlen         = sizeof(unsigned int),
3632 +               .mode           = 0444,
3633 +               .proc_handler   = &proc_dointvec,
3634 +       },
3635 +       {
3636 +               .ctl_name       = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT,
3637 +               .procname       = "ip_conntrack_tcp_timeout_syn_sent",
3638 +               .data           = &ip_ct_tcp_timeout_syn_sent,
3639 +               .maxlen         = sizeof(unsigned int),
3640 +               .mode           = 0644,
3641 +               .proc_handler   = &proc_dointvec_jiffies,
3642 +       },
3643 +       {
3644 +               .ctl_name       = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV,
3645 +               .procname       = "ip_conntrack_tcp_timeout_syn_recv",
3646 +               .data           = &ip_ct_tcp_timeout_syn_recv,
3647 +               .maxlen         = sizeof(unsigned int),
3648 +               .mode           = 0644,
3649 +               .proc_handler   = &proc_dointvec_jiffies,
3650 +       },
3651 +       {
3652 +               .ctl_name       = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED,
3653 +               .procname       = "ip_conntrack_tcp_timeout_established",
3654 +               .data           = &ip_ct_tcp_timeout_established,
3655 +               .maxlen         = sizeof(unsigned int),
3656 +               .mode           = 0644,
3657 +               .proc_handler   = &proc_dointvec_jiffies,
3658 +       },
3659 +       {
3660 +               .ctl_name       = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT,
3661 +               .procname       = "ip_conntrack_tcp_timeout_fin_wait",
3662 +               .data           = &ip_ct_tcp_timeout_fin_wait,
3663 +               .maxlen         = sizeof(unsigned int),
3664 +               .mode           = 0644,
3665 +               .proc_handler   = &proc_dointvec_jiffies,
3666 +       },
3667 +       {
3668 +               .ctl_name       = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT,
3669 +               .procname       = "ip_conntrack_tcp_timeout_close_wait",
3670 +               .data           = &ip_ct_tcp_timeout_close_wait,
3671 +               .maxlen         = sizeof(unsigned int),
3672 +               .mode           = 0644,
3673 +               .proc_handler   = &proc_dointvec_jiffies,
3674 +       },
3675 +       {
3676 +               .ctl_name       = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK,
3677 +               .procname       = "ip_conntrack_tcp_timeout_last_ack",
3678 +               .data           = &ip_ct_tcp_timeout_last_ack,
3679 +               .maxlen         = sizeof(unsigned int),
3680 +               .mode           = 0644,
3681 +               .proc_handler   = &proc_dointvec_jiffies,
3682 +       },
3683 +       {
3684 +               .ctl_name       = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT,
3685 +               .procname       = "ip_conntrack_tcp_timeout_time_wait",
3686 +               .data           = &ip_ct_tcp_timeout_time_wait,
3687 +               .maxlen         = sizeof(unsigned int),
3688 +               .mode           = 0644,
3689 +               .proc_handler   = &proc_dointvec_jiffies,
3690 +       },
3691 +       {
3692 +               .ctl_name       = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE,
3693 +               .procname       = "ip_conntrack_tcp_timeout_close",
3694 +               .data           = &ip_ct_tcp_timeout_close,
3695 +               .maxlen         = sizeof(unsigned int),
3696 +               .mode           = 0644,
3697 +               .proc_handler   = &proc_dointvec_jiffies,
3698 +       },
3699 +       {
3700 +               .ctl_name       = NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT,
3701 +               .procname       = "ip_conntrack_udp_timeout",
3702 +               .data           = &ip_ct_udp_timeout,
3703 +               .maxlen         = sizeof(unsigned int),
3704 +               .mode           = 0644,
3705 +               .proc_handler   = &proc_dointvec_jiffies,
3706 +       },
3707 +       {
3708 +               .ctl_name       = NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM,
3709 +               .procname       = "ip_conntrack_udp_timeout_stream",
3710 +               .data           = &ip_ct_udp_timeout_stream,
3711 +               .maxlen         = sizeof(unsigned int),
3712 +               .mode           = 0644,
3713 +               .proc_handler   = &proc_dointvec_jiffies,
3714 +       },
3715 +       {
3716 +               .ctl_name       = NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT,
3717 +               .procname       = "ip_conntrack_icmp_timeout",
3718 +               .data           = &ip_ct_icmp_timeout,
3719 +               .maxlen         = sizeof(unsigned int),
3720 +               .mode           = 0644,
3721 +               .proc_handler   = &proc_dointvec_jiffies,
3722 +       },
3723 +       {
3724 +               .ctl_name       = NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT,
3725 +               .procname       = "ip_conntrack_generic_timeout",
3726 +               .data           = &ip_ct_generic_timeout,
3727 +               .maxlen         = sizeof(unsigned int),
3728 +               .mode           = 0644,
3729 +               .proc_handler   = &proc_dointvec_jiffies,
3730 +       },
3731 +       { .ctl_name = 0 }
3732 +};
3733 +
3734 +#define NET_IP_CONNTRACK_MAX 2089
3735 +
3736 +static ctl_table ip_ct_netfilter_table[] = {
3737 +       {
3738 +               .ctl_name       = NET_IPV4_NETFILTER,
3739 +               .procname       = "netfilter",
3740 +               .mode           = 0555,
3741 +               .child          = ip_ct_sysctl_table,
3742 +       },
3743 +       {
3744 +               .ctl_name       = NET_IP_CONNTRACK_MAX,
3745 +               .procname       = "ip_conntrack_max",
3746 +               .data           = &ip_conntrack_max,
3747 +               .maxlen         = sizeof(int),
3748 +               .mode           = 0644,
3749 +               .proc_handler   = &proc_dointvec
3750 +       },
3751 +       { .ctl_name = 0 }
3752 +};
3753 +
3754 +static ctl_table ip_ct_ipv4_table[] = {
3755 +       {
3756 +               .ctl_name       = NET_IPV4,
3757 +               .procname       = "ipv4",
3758 +               .mode           = 0555,
3759 +               .child          = ip_ct_netfilter_table,
3760 +       },
3761 +       { .ctl_name = 0 }
3762 +};
3763 +
3764 +static ctl_table ip_ct_net_table[] = {
3765 +       {
3766 +               .ctl_name       = CTL_NET,
3767 +               .procname       = "net",
3768 +               .mode           = 0555, 
3769 +               .child          = ip_ct_ipv4_table,
3770 +       },
3771 +       { .ctl_name = 0 }
3772 +};
3773 +#endif
3774 +static int init_or_cleanup(int init)
3775 +{
3776 +       struct proc_dir_entry *proc;
3777 +       int ret = 0;
3778 +
3779 +       if (!init) goto cleanup;
3780 +
3781 +       ret = ip_conntrack_init();
3782 +       if (ret < 0)
3783 +               goto cleanup_nothing;
3784 +
3785 +       proc = proc_net_create("ip_conntrack",0,list_conntracks);
3786 +       if (!proc) goto cleanup_init;
3787 +       proc->owner = THIS_MODULE;
3788 +
3789 +       ret = nf_register_hook(&ip_conntrack_in_ops);
3790 +       if (ret < 0) {
3791 +               printk("ip_conntrack: can't register pre-routing hook.\n");
3792 +               goto cleanup_proc;
3793 +       }
3794 +       ret = nf_register_hook(&ip_conntrack_local_out_ops);
3795 +       if (ret < 0) {
3796 +               printk("ip_conntrack: can't register local out hook.\n");
3797 +               goto cleanup_inops;
3798 +       }
3799 +       ret = nf_register_hook(&ip_conntrack_out_ops);
3800 +       if (ret < 0) {
3801 +               printk("ip_conntrack: can't register post-routing hook.\n");
3802 +               goto cleanup_inandlocalops;
3803 +       }
3804 +       ret = nf_register_hook(&ip_conntrack_local_in_ops);
3805 +       if (ret < 0) {
3806 +               printk("ip_conntrack: can't register local in hook.\n");
3807 +               goto cleanup_inoutandlocalops;
3808 +       }
3809 +#ifdef CONFIG_SYSCTL
3810 +       ip_ct_sysctl_header = register_sysctl_table(ip_ct_net_table, 0);
3811 +       if (ip_ct_sysctl_header == NULL) {
3812 +               printk("ip_conntrack: can't register to sysctl.\n");
3813 +               goto cleanup;
3814 +       }
3815 +#endif
3816 +
3817 +       return ret;
3818 +
3819 + cleanup:
3820 +#ifdef CONFIG_SYSCTL
3821 +       unregister_sysctl_table(ip_ct_sysctl_header);
3822 +#endif
3823 +       nf_unregister_hook(&ip_conntrack_local_in_ops);
3824 + cleanup_inoutandlocalops:
3825 +       nf_unregister_hook(&ip_conntrack_out_ops);
3826 + cleanup_inandlocalops:
3827 +       nf_unregister_hook(&ip_conntrack_local_out_ops);
3828 + cleanup_inops:
3829 +       nf_unregister_hook(&ip_conntrack_in_ops);
3830 + cleanup_proc:
3831 +       proc_net_remove("ip_conntrack");
3832 + cleanup_init:
3833 +       ip_conntrack_cleanup();
3834 + cleanup_nothing:
3835 +       return ret;
3836 +}
3837 +
3838 +/* FIXME: Allow NULL functions and sub in pointers to generic for
3839 +   them. --RR */
3840 +int ip_conntrack_protocol_register(struct ip_conntrack_protocol *proto)
3841 +{
3842 +       int ret = 0;
3843 +       struct list_head *i;
3844 +
3845 +       WRITE_LOCK(&ip_conntrack_lock);
3846 +       list_for_each(i, &protocol_list) {
3847 +               if (((struct ip_conntrack_protocol *)i)->proto
3848 +                   == proto->proto) {
3849 +                       ret = -EBUSY;
3850 +                       goto out;
3851 +               }
3852 +       }
3853 +
3854 +       list_prepend(&protocol_list, proto);
3855 +
3856 + out:
3857 +       WRITE_UNLOCK(&ip_conntrack_lock);
3858 +       return ret;
3859 +}
3860 +
3861 +void ip_conntrack_protocol_unregister(struct ip_conntrack_protocol *proto)
3862 +{
3863 +       WRITE_LOCK(&ip_conntrack_lock);
3864 +
3865 +       /* ip_ct_find_proto() returns proto_generic in case there is no protocol 
3866 +        * helper. So this should be enough - HW */
3867 +       LIST_DELETE(&protocol_list, proto);
3868 +       WRITE_UNLOCK(&ip_conntrack_lock);
3869 +       
3870 +       /* Somebody could be still looking at the proto in bh. */
3871 +       synchronize_net();
3872 +
3873 +       /* Remove all contrack entries for this protocol */
3874 +       ip_ct_selective_cleanup(kill_proto, &proto->proto);
3875 +}
3876 +
3877 +static int __init init(void)
3878 +{
3879 +       return init_or_cleanup(1);
3880 +}
3881 +
3882 +static void __exit fini(void)
3883 +{
3884 +       init_or_cleanup(0);
3885 +}
3886 +
3887 +module_init(init);
3888 +module_exit(fini);
3889 +
3890 +/* Some modules need us, but don't depend directly on any symbol.
3891 +   They should call this. */
3892 +void need_ip_conntrack(void)
3893 +{
3894 +}
3895 +
3896 +EXPORT_SYMBOL(ip_conntrack_protocol_register);
3897 +EXPORT_SYMBOL(ip_conntrack_protocol_unregister);
3898 +EXPORT_SYMBOL(invert_tuplepr);
3899 +EXPORT_SYMBOL(ip_conntrack_alter_reply);
3900 +EXPORT_SYMBOL(ip_conntrack_destroyed);
3901 +EXPORT_SYMBOL(ip_conntrack_get);
3902 +EXPORT_SYMBOL(need_ip_conntrack);
3903 +EXPORT_SYMBOL(ip_conntrack_helper_register);
3904 +EXPORT_SYMBOL(ip_conntrack_helper_unregister);
3905 +EXPORT_SYMBOL(ip_ct_selective_cleanup);
3906 +EXPORT_SYMBOL(ip_ct_refresh);
3907 +EXPORT_SYMBOL(ip_ct_find_proto);
3908 +EXPORT_SYMBOL(__ip_ct_find_proto);
3909 +EXPORT_SYMBOL(ip_ct_find_helper);
3910 +EXPORT_SYMBOL(ip_conntrack_expect_related);
3911 +EXPORT_SYMBOL(ip_conntrack_change_expect);
3912 +EXPORT_SYMBOL(ip_conntrack_unexpect_related);
3913 +EXPORT_SYMBOL_GPL(ip_conntrack_expect_find_get);
3914 +EXPORT_SYMBOL_GPL(ip_conntrack_expect_put);
3915 +EXPORT_SYMBOL(ip_conntrack_tuple_taken);
3916 +EXPORT_SYMBOL(ip_ct_gather_frags);
3917 +EXPORT_SYMBOL(ip_conntrack_htable_size);
3918 +EXPORT_SYMBOL(ip_conntrack_expect_list);
3919 +EXPORT_SYMBOL(ip_conntrack_lock);
3920 +EXPORT_SYMBOL(ip_conntrack_hash);
3921 +EXPORT_SYMBOL_GPL(ip_conntrack_find_get);
3922 +EXPORT_SYMBOL_GPL(ip_conntrack_put);
3923 diff -Nur linux-2.6.4-rc2.org/net/ipv4/netfilter/ip_nat_core.c linux-2.6.4-rc2/net/ipv4/netfilter/ip_nat_core.c
3924 --- linux-2.6.4-rc2.org/net/ipv4/netfilter/ip_nat_core.c        2004-03-04 06:16:37.000000000 +0000
3925 +++ linux-2.6.4-rc2/net/ipv4/netfilter/ip_nat_core.c    2004-03-05 07:39:43.000000000 +0000
3926 @@ -96,9 +96,16 @@
3927         WRITE_UNLOCK(&ip_nat_lock);
3928  }
3929  
3930 -/* We do checksum mangling, so if they were wrong before they're still
3931 - * wrong.  Also works for incomplete packets (eg. ICMP dest
3932 - * unreachables.) */
3933 +/**
3934 + * ip_nat_cheat_check - Incremental checksum change for IP/TCP checksum
3935 + * @oldvalinv: bit-inverted old value of 32bit word
3936 + * @newval: new value of 32bit word
3937 + * @oldcheck: old checksum value
3938 + *
3939 + * This function implements incremental checksum mangling, so if a checksum
3940 + * was wrong it will still be wrong after mangling.  Also works for incomplete
3941 + * packets (eg. ICMP dest unreachables).  Return value is the new checksum.
3942 + */
3943  u_int16_t
3944  ip_nat_cheat_check(u_int32_t oldvalinv, u_int32_t newval, u_int16_t oldcheck)
3945  {
3946 @@ -124,7 +131,14 @@
3947         return i;
3948  }
3949  
3950 -/* Is this tuple already taken? (not by us) */
3951 +/**
3952 + * ip_nat_used_tuple - Is this tuple already in use?
3953 + * @tuple: tuple to be used for this check
3954 + * @ignored_conntrack: conntrack excluded from this check
3955 + *
3956 + * This function checks for the reply (inverted) tuple in the conntrack
3957 + * hash.  This is necessarry with NAT, since there is no fixed mapping.
3958 + */
3959  int
3960  ip_nat_used_tuple(const struct ip_conntrack_tuple *tuple,
3961                   const struct ip_conntrack *ignored_conntrack)
3962 @@ -515,6 +529,19 @@
3963  #endif
3964  };
3965  
3966 +/**
3967 + * ip_nat_setup_info - Set up NAT mappings for NEW packet
3968 + * @conntrack: conntrack on which we operate
3969 + * @mr: address/port range which is valid for this NAT mapping
3970 + * @hooknum: hook at which this NAT mapping applies
3971 + *
3972 + * This function is called by NAT targets (SNAT,DNAT,...) and by
3973 + * the NAT application helper modules.  It is called for the NEW packet
3974 + * of a connection in order to specify which NAT mappings shall apply to
3975 + * this connection at a given hook.
3976 + *
3977 + * Note: The reply mappings are created automagically by this function. 
3978 + */
3979  unsigned int
3980  ip_nat_setup_info(struct ip_conntrack *conntrack,
3981                   const struct ip_nat_multi_range *mr,
3982 diff -Nur linux-2.6.4-rc2.org/net/ipv4/netfilter/ip_nat_helper.c linux-2.6.4-rc2/net/ipv4/netfilter/ip_nat_helper.c
3983 --- linux-2.6.4-rc2.org/net/ipv4/netfilter/ip_nat_helper.c      2004-03-04 06:16:38.000000000 +0000
3984 +++ linux-2.6.4-rc2/net/ipv4/netfilter/ip_nat_helper.c  2004-03-05 07:39:43.000000000 +0000
3985 @@ -150,9 +150,19 @@
3986         return 1;
3987  }
3988  
3989 -/* Generic function for mangling variable-length address changes inside
3990 - * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
3991 - * command in FTP).
3992 +/**
3993 + * ip_nat_mangle_tcp_packet - Mangle and potentially resize payload packet
3994 + * @skb: pointer to skb of packet on which we operate
3995 + * @ct: conntrack of the connection to which this packet belongs
3996 + * @ctinfo: conntrack_info of the connection to which this packet belongs
3997 + * @match_offset: offset in bytes where to-be-manipulated part starts
3998 + * @match_len: lenght of the to-be-manipulated part
3999 + * @rep_buffer: pointer to buffer containing replacement
4000 + * @rep_len: length of replacement
4001 + *
4002 + * Generic function for mangling fixed and variable-length changes inside
4003 + * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX command 
4004 + * in FTP).
4005   *
4006   * Takes care about all the nasty sequence number changes, checksumming,
4007   * skb enlargement, ...
4008 @@ -198,16 +208,27 @@
4009         return 1;
4010  }
4011                         
4012 -/* Generic function for mangling variable-length address changes inside
4013 - * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
4014 - * command in the Amanda protocol)
4015 +/**
4016 + * ip_nat_mangle_udp_packet - Mangle and potentially resize payload packet
4017 + * @skb: pointer to skb of packet on which we operate
4018 + * @ct: conntrack of the connection to which this packet belongs
4019 + * @ctinfo: conntrack_info of the connection to which this packet belongs
4020 + * @match_offset: offset in bytes where to-be-manipulated part starts
4021 + * @match_len: lenght of the to-be-manipulated part
4022 + * @rep_buffer: pointer to buffer containing replacement
4023 + * @rep_len: length of replacement
4024 + *
4025 + * Generic function for mangling fixed and variable-length changes inside
4026 + * NATed TCP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
4027 + * commad in the Amanda protocol)
4028   *
4029   * Takes care about all the nasty sequence number changes, checksumming,
4030   * skb enlargement, ...
4031   *
4032 - * XXX - This function could be merged with ip_nat_mangle_tcp_packet which
4033 - *       should be fairly easy to do.
4034 - */
4035 + * FIXME: should be unified with ip_nat_mangle_tcp_packet!!
4036 + *
4037 + * */
4038 +
4039  int 
4040  ip_nat_mangle_udp_packet(struct sk_buff **pskb,
4041                          struct ip_conntrack *ct,
4042 @@ -405,6 +426,13 @@
4043         return ip_ct_tuple_mask_cmp(tuple, &helper->tuple, &helper->mask);
4044  }
4045  
4046 +/**
4047 + * ip_nat_helper_register - Register NAT application helper
4048 + * @me: structure describing the helper
4049 + *
4050 + * This function is called by NAT application helpers to register
4051 + * themselves with the NAT core.
4052 + */
4053  int ip_nat_helper_register(struct ip_nat_helper *me)
4054  {
4055         int ret = 0;
4056 @@ -431,6 +459,13 @@
4057         return ret;
4058  }
4059  
4060 +/**
4061 + * ip_nat_helper_unregister - Unregister NAT application helper
4062 + * @me: structure describing the helper
4063 + *
4064 + * This function is called by NAT application helpers to unregister
4065 + * themselves from the NAT core.
4066 + */
4067  void ip_nat_helper_unregister(struct ip_nat_helper *me)
4068  {
4069         WRITE_LOCK(&ip_nat_lock);
4070 diff -Nur linux-2.6.4-rc2.org/net/ipv4/netfilter/ip_nat_standalone.c linux-2.6.4-rc2/net/ipv4/netfilter/ip_nat_standalone.c
4071 --- linux-2.6.4-rc2.org/net/ipv4/netfilter/ip_nat_standalone.c  2004-03-04 06:16:55.000000000 +0000
4072 +++ linux-2.6.4-rc2/net/ipv4/netfilter/ip_nat_standalone.c      2004-03-05 07:39:43.000000000 +0000
4073 @@ -266,7 +266,13 @@
4074  };
4075  #endif
4076  
4077 -/* Protocol registration. */
4078 +/**
4079 + * ip_nat_protocol_register - Register a layer 4 protocol helper
4080 + * @proto: structure describing this helper
4081 + * 
4082 + * This function is called by NAT layer 4 protocol helpers to register
4083 + * themselvers with the NAT core.
4084 + */
4085  int ip_nat_protocol_register(struct ip_nat_protocol *proto)
4086  {
4087         int ret = 0;
4088 @@ -287,9 +293,16 @@
4089         return ret;
4090  }
4091  
4092 -/* Noone stores the protocol anywhere; simply delete it. */
4093 +/**
4094 + * ip_nat_protocol_unregister - Unregister a layer 4 protocol helper
4095 + * @proto: structure describing the helper
4096 + *
4097 + * This function is called by NAT layer 4 protocol helpers to
4098 + * unregister themselves from the NAT core.
4099 + */
4100  void ip_nat_protocol_unregister(struct ip_nat_protocol *proto)
4101  {
4102 +       /* Noone stores the protocol anywhere; simply delete it. */
4103         WRITE_LOCK(&ip_nat_lock);
4104         LIST_DELETE(&protos, proto);
4105         WRITE_UNLOCK(&ip_nat_lock);
4106 diff -Nur linux-2.6.4-rc2.org/net/ipv4/netfilter/ipt_IPV4OPTSSTRIP.c linux-2.6.4-rc2/net/ipv4/netfilter/ipt_IPV4OPTSSTRIP.c
4107 --- linux-2.6.4-rc2.org/net/ipv4/netfilter/ipt_IPV4OPTSSTRIP.c  1970-01-01 00:00:00.000000000 +0000
4108 +++ linux-2.6.4-rc2/net/ipv4/netfilter/ipt_IPV4OPTSSTRIP.c      2004-03-05 07:39:55.000000000 +0000
4109 @@ -0,0 +1,89 @@
4110 +/**
4111 + * Strip all IP options in the IP packet header.
4112 + *
4113 + * (C) 2001 by Fabrice MARIE <fabrice@netfilter.org>
4114 + * This software is distributed under GNU GPL v2, 1991
4115 + */
4116 +
4117 +#include <linux/module.h>
4118 +#include <linux/skbuff.h>
4119 +#include <linux/ip.h>
4120 +#include <net/checksum.h>
4121 +
4122 +#include <linux/netfilter_ipv4/ip_tables.h>
4123 +
4124 +MODULE_AUTHOR("Fabrice MARIE <fabrice@netfilter.org>");
4125 +MODULE_DESCRIPTION("Strip all options in IPv4 packets");
4126 +MODULE_LICENSE("GPL");
4127 +
4128 +static unsigned int
4129 +target(struct sk_buff **pskb,
4130 +       const struct net_device *in,
4131 +       const struct net_device *out,
4132 +       unsigned int hooknum,
4133 +       const void *targinfo,
4134 +       void *userinfo)
4135 +{
4136 +       struct iphdr *iph;
4137 +       struct sk_buff *skb;
4138 +       struct ip_options *opt;
4139 +       unsigned char *optiph;
4140 +       int l;
4141 +       
4142 +       if (!skb_ip_make_writable(pskb, (*pskb)->len))
4143 +               return NF_DROP;
4144
4145 +       skb = (*pskb);
4146 +       iph = (*pskb)->nh.iph;
4147 +       optiph = skb->nh.raw;
4148 +       l = ((struct ip_options *)(&(IPCB(skb)->opt)))->optlen;
4149 +
4150 +       /* if no options in packet then nothing to clear. */
4151 +       if (iph->ihl * 4 == sizeof(struct iphdr))
4152 +               return IPT_CONTINUE;
4153 +
4154 +       /* else clear all options */
4155 +       memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
4156 +       memset(optiph+sizeof(struct iphdr), IPOPT_NOOP, l);
4157 +       opt = &(IPCB(skb)->opt);
4158 +       opt->is_data = 0;
4159 +       opt->optlen = l;
4160 +
4161 +       skb->nfcache |= NFC_ALTERED;
4162 +
4163 +        return IPT_CONTINUE;
4164 +}
4165 +
4166 +static int
4167 +checkentry(const char *tablename,
4168 +          const struct ipt_entry *e,
4169 +           void *targinfo,
4170 +           unsigned int targinfosize,
4171 +           unsigned int hook_mask)
4172 +{
4173 +       if (strcmp(tablename, "mangle")) {
4174 +               printk(KERN_WARNING "IPV4OPTSSTRIP: can only be called from \"mangle\" table, not \"%s\"\n", tablename);
4175 +               return 0;
4176 +       }
4177 +       /* nothing else to check because no parameters */
4178 +       return 1;
4179 +}
4180 +
4181 +static struct ipt_target ipt_ipv4optsstrip_reg = { 
4182 +       .name = "IPV4OPTSSTRIP",
4183 +       .target = target,
4184 +       .checkentry = checkentry,
4185 +       .me = THIS_MODULE };
4186 +
4187 +static int __init init(void)
4188 +{
4189 +       return ipt_register_target(&ipt_ipv4optsstrip_reg);
4190 +}
4191 +
4192 +static void __exit fini(void)
4193 +{
4194 +       ipt_unregister_target(&ipt_ipv4optsstrip_reg);
4195 +}
4196 +
4197 +module_init(init);
4198 +module_exit(fini);
4199 diff -Nur linux-2.6.4-rc2.org/net/ipv4/netfilter/ipt_TTL.c linux-2.6.4-rc2/net/ipv4/netfilter/ipt_TTL.c
4200 --- linux-2.6.4-rc2.org/net/ipv4/netfilter/ipt_TTL.c    1970-01-01 00:00:00.000000000 +0000
4201 +++ linux-2.6.4-rc2/net/ipv4/netfilter/ipt_TTL.c        2004-03-05 07:40:01.000000000 +0000
4202 @@ -0,0 +1,120 @@
4203 +/* TTL modification target for IP tables
4204 + * (C) 2000 by Harald Welte <laforge@gnumonks.org>
4205 + *
4206 + * Version: $Revision$
4207 + *
4208 + * This software is distributed under the terms of GNU GPL
4209 + */
4210 +
4211 +#include <linux/module.h>
4212 +#include <linux/skbuff.h>
4213 +#include <linux/ip.h>
4214 +#include <net/checksum.h>
4215 +
4216 +#include <linux/netfilter_ipv4/ip_tables.h>
4217 +#include <linux/netfilter_ipv4/ipt_TTL.h>
4218 +
4219 +MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
4220 +MODULE_DESCRIPTION("IP tables TTL modification module");
4221 +MODULE_LICENSE("GPL");
4222 +
4223 +static unsigned int 
4224 +ipt_ttl_target(struct sk_buff **pskb, const struct net_device *in, 
4225 +               const struct net_device *out, unsigned int hooknum, 
4226 +               const void *targinfo, void *userinfo)
4227 +{
4228 +       struct iphdr *iph;
4229 +       const struct ipt_TTL_info *info = targinfo;
4230 +       u_int16_t diffs[2];
4231 +       int new_ttl;
4232 +
4233 +       if (!skb_ip_make_writable(pskb, (*pskb)->len))
4234 +               return NF_DROP;
4235 +
4236 +       iph = (*pskb)->nh.iph;
4237 +                        
4238 +       switch (info->mode) {
4239 +               case IPT_TTL_SET:
4240 +                       new_ttl = info->ttl;
4241 +                       break;
4242 +               case IPT_TTL_INC:
4243 +                       new_ttl = iph->ttl + info->ttl;
4244 +                       if (new_ttl > 255)
4245 +                               new_ttl = 255;
4246 +                       break;
4247 +               case IPT_TTL_DEC:
4248 +                       new_ttl = iph->ttl + info->ttl;
4249 +                       if (new_ttl < 0)
4250 +                               new_ttl = 0;
4251 +                       break;
4252 +               default:
4253 +                       new_ttl = iph->ttl;
4254 +                       break;
4255 +       }
4256 +
4257 +       if (new_ttl != iph->ttl) {
4258 +               diffs[0] = htons(((unsigned)iph->ttl) << 8) ^ 0xFFFF;
4259 +               iph->ttl = new_ttl;
4260 +               diffs[1] = htons(((unsigned)iph->ttl) << 8);
4261 +               iph->check = csum_fold(csum_partial((char *)diffs,
4262 +                                                   sizeof(diffs),
4263 +                                                   iph->check^0xFFFF));
4264 +                                                                                               (*pskb)->nfcache |= NFC_ALTERED;
4265 +       }
4266 +
4267 +       return IPT_CONTINUE;
4268 +}
4269 +
4270 +static int ipt_ttl_checkentry(const char *tablename,
4271 +               const struct ipt_entry *e,
4272 +               void *targinfo,
4273 +               unsigned int targinfosize,
4274 +               unsigned int hook_mask)
4275 +{
4276 +       struct ipt_TTL_info *info = targinfo;
4277 +
4278 +       if (targinfosize != IPT_ALIGN(sizeof(struct ipt_TTL_info))) {
4279 +               printk(KERN_WARNING "TTL: targinfosize %u != %Zu\n",
4280 +                               targinfosize,
4281 +                               IPT_ALIGN(sizeof(struct ipt_TTL_info)));
4282 +               return 0;       
4283 +       }       
4284 +
4285 +       if (strcmp(tablename, "mangle")) {
4286 +               printk(KERN_WARNING "TTL: can only be called from \"mangle\" table, not \"%s\"\n", tablename);
4287 +               return 0;
4288 +       }
4289 +
4290 +       if (info->mode > IPT_TTL_MAXMODE) {
4291 +               printk(KERN_WARNING "TTL: invalid or unknown Mode %u\n", 
4292 +                       info->mode);
4293 +               return 0;
4294 +       }
4295 +
4296 +       if ((info->mode != IPT_TTL_SET) && (info->ttl == 0)) {
4297 +               printk(KERN_WARNING "TTL: increment/decrement doesn't make sense with value 0\n");
4298 +               return 0;
4299 +       }
4300 +       
4301 +       return 1;
4302 +}
4303 +
4304 +static struct ipt_target ipt_TTL = { 
4305 +       .name = "TTL",
4306 +       .target = ipt_ttl_target, 
4307 +       .checkentry = ipt_ttl_checkentry, 
4308 +       .me = THIS_MODULE 
4309 +};
4310 +
4311 +static int __init init(void)
4312 +{
4313 +       return ipt_register_target(&ipt_TTL);
4314 +}
4315 +
4316 +static void __exit fini(void)
4317 +{
4318 +       ipt_unregister_target(&ipt_TTL);
4319 +}
4320 +
4321 +module_init(init);
4322 +module_exit(fini);
4323 diff -Nur linux-2.6.4-rc2.org/net/ipv4/netfilter/ipt_connlimit.c linux-2.6.4-rc2/net/ipv4/netfilter/ipt_connlimit.c
4324 --- linux-2.6.4-rc2.org/net/ipv4/netfilter/ipt_connlimit.c      1970-01-01 00:00:00.000000000 +0000
4325 +++ linux-2.6.4-rc2/net/ipv4/netfilter/ipt_connlimit.c  2004-03-05 07:40:04.000000000 +0000
4326 @@ -0,0 +1,230 @@
4327 +/*
4328 + * netfilter module to limit the number of parallel tcp
4329 + * connections per IP address.
4330 + *   (c) 2000 Gerd Knorr <kraxel@bytesex.org>
4331 + *   Nov 2002: Martin Bene <martin.bene@icomedias.com>:
4332 + *             only ignore TIME_WAIT or gone connections
4333 + *
4334 + * based on ...
4335 + *
4336 + * Kernel module to match connection tracking information.
4337 + * GPL (C) 1999  Rusty Russell (rusty@rustcorp.com.au).
4338 + */
4339 +#include <linux/module.h>
4340 +#include <linux/skbuff.h>
4341 +#include <linux/list.h>
4342 +#include <linux/netfilter_ipv4/ip_conntrack.h>
4343 +#include <linux/netfilter_ipv4/ip_conntrack_core.h>
4344 +#include <linux/netfilter_ipv4/ip_conntrack_tcp.h>
4345 +#include <linux/netfilter_ipv4/ip_tables.h>
4346 +#include <linux/netfilter_ipv4/ipt_connlimit.h>
4347 +
4348 +#define DEBUG 0
4349 +
4350 +MODULE_LICENSE("GPL");
4351 +
4352 +/* we'll save the tuples of all connections we care about */
4353 +struct ipt_connlimit_conn
4354 +{
4355 +        struct list_head list;
4356 +       struct ip_conntrack_tuple tuple;
4357 +};
4358 +
4359 +struct ipt_connlimit_data {
4360 +       spinlock_t lock;
4361 +       struct list_head iphash[256];
4362 +};
4363 +
4364 +static int ipt_iphash(u_int32_t addr)
4365 +{
4366 +       int hash;
4367 +
4368 +       hash  =  addr        & 0xff;
4369 +       hash ^= (addr >>  8) & 0xff;
4370 +       hash ^= (addr >> 16) & 0xff;
4371 +       hash ^= (addr >> 24) & 0xff;
4372 +       return hash;
4373 +}
4374 +
4375 +static int count_them(struct ipt_connlimit_data *data,
4376 +                     u_int32_t addr, u_int32_t mask,
4377 +                     struct ip_conntrack *ct)
4378 +{
4379 +#if DEBUG
4380 +       const static char *tcp[] = { "none", "established", "syn_sent", "syn_recv",
4381 +                                    "fin_wait", "time_wait", "close", "close_wait",
4382 +                                    "last_ack", "listen" };
4383 +#endif
4384 +       int addit = 1, matches = 0;
4385 +       struct ip_conntrack_tuple tuple;
4386 +       struct ip_conntrack_tuple_hash *found;
4387 +       struct ipt_connlimit_conn *conn;
4388 +       struct list_head *hash,*lh;
4389 +
4390 +       spin_lock(&data->lock);
4391 +       tuple = ct->tuplehash[0].tuple;
4392 +       hash = &data->iphash[ipt_iphash(addr & mask)];
4393 +
4394 +       /* check the saved connections */
4395 +       for (lh = hash->next; lh != hash; lh = lh->next) {
4396 +               conn = list_entry(lh,struct ipt_connlimit_conn,list);
4397 +               found = ip_conntrack_find_get(&conn->tuple,ct);
4398 +               if (0 == memcmp(&conn->tuple,&tuple,sizeof(tuple)) &&
4399 +                   found != NULL &&
4400 +                   found->ctrack->proto.tcp.state != TCP_CONNTRACK_TIME_WAIT) {
4401 +                       /* Just to be sure we have it only once in the list.
4402 +                          We should'nt see tuples twice unless someone hooks this
4403 +                          into a table without "-p tcp --syn" */
4404 +                       addit = 0;
4405 +               }
4406 +#if DEBUG
4407 +               printk("ipt_connlimit [%d]: src=%u.%u.%u.%u:%d dst=%u.%u.%u.%u:%d %s\n",
4408 +                      ipt_iphash(addr & mask),
4409 +                      NIPQUAD(conn->tuple.src.ip), ntohs(conn->tuple.src.u.tcp.port),
4410 +                      NIPQUAD(conn->tuple.dst.ip), ntohs(conn->tuple.dst.u.tcp.port),
4411 +                      (NULL != found) ? tcp[found->ctrack->proto.tcp.state] : "gone");
4412 +#endif
4413 +               if (NULL == found) {
4414 +                       /* this one is gone */
4415 +                       lh = lh->prev;
4416 +                       list_del(lh->next);
4417 +                       kfree(conn);
4418 +                       continue;
4419 +               }
4420 +               if (found->ctrack->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT) {
4421 +                       /* we don't care about connections which are
4422 +                          closed already -> ditch it */
4423 +                       lh = lh->prev;
4424 +                       list_del(lh->next);
4425 +                       kfree(conn);
4426 +                       nf_conntrack_put(&found->ctrack->infos[0]);
4427 +                       continue;
4428 +               }
4429 +               if ((addr & mask) == (conn->tuple.src.ip & mask)) {
4430 +                       /* same source IP address -> be counted! */
4431 +                       matches++;
4432 +               }
4433 +               nf_conntrack_put(&found->ctrack->infos[0]);
4434 +       }
4435 +       if (addit) {
4436 +               /* save the new connection in our list */
4437 +#if DEBUG
4438 +               printk("ipt_connlimit [%d]: src=%u.%u.%u.%u:%d dst=%u.%u.%u.%u:%d new\n",
4439 +                      ipt_iphash(addr & mask),
4440 +                      NIPQUAD(tuple.src.ip), ntohs(tuple.src.u.tcp.port),
4441 +                      NIPQUAD(tuple.dst.ip), ntohs(tuple.dst.u.tcp.port));
4442 +#endif
4443 +               conn = kmalloc(sizeof(*conn),GFP_ATOMIC);
4444 +               if (NULL == conn)
4445 +                       return -1;
4446 +               memset(conn,0,sizeof(*conn));
4447 +               INIT_LIST_HEAD(&conn->list);
4448 +               conn->tuple = tuple;
4449 +               list_add(&conn->list,hash);
4450 +               matches++;
4451 +       }
4452 +       spin_unlock(&data->lock);
4453 +       return matches;
4454 +}
4455 +
4456 +static int
4457 +match(const struct sk_buff *skb,
4458 +      const struct net_device *in,
4459 +      const struct net_device *out,
4460 +      const void *matchinfo,
4461 +      int offset,
4462 +      int *hotdrop)
4463 +{
4464 +       const struct ipt_connlimit_info *info = matchinfo;
4465 +       int connections, match;
4466 +       struct ip_conntrack *ct;
4467 +       enum ip_conntrack_info ctinfo;
4468 +
4469 +       ct = ip_conntrack_get((struct sk_buff *)skb, &ctinfo);
4470 +       if (NULL == ct) {
4471 +               printk("ipt_connlimit: Oops: invalid ct state ?\n");
4472 +               *hotdrop = 1;
4473 +               return 0;
4474 +       }
4475 +       connections = count_them(info->data,skb->nh.iph->saddr,info->mask,ct);
4476 +       if (-1 == connections) {
4477 +               printk("ipt_connlimit: Hmm, kmalloc failed :-(\n");
4478 +               *hotdrop = 1; /* let's free some memory :-) */
4479 +               return 0;
4480 +       }
4481 +        match = (info->inverse) ? (connections <= info->limit) : (connections > info->limit);
4482 +#if DEBUG
4483 +       printk("ipt_connlimit: src=%u.%u.%u.%u mask=%u.%u.%u.%u "
4484 +              "connections=%d limit=%d match=%s\n",
4485 +              NIPQUAD(skb->nh.iph->saddr), NIPQUAD(info->mask),
4486 +              connections, info->limit, match ? "yes" : "no");
4487 +#endif
4488 +
4489 +       return match;
4490 +}
4491 +
4492 +static int check(const char *tablename,
4493 +                const struct ipt_ip *ip,
4494 +                void *matchinfo,
4495 +                unsigned int matchsize,
4496 +                unsigned int hook_mask)
4497 +{
4498 +       struct ipt_connlimit_info *info = matchinfo;
4499 +       int i;
4500 +
4501 +       /* verify size */
4502 +       if (matchsize != IPT_ALIGN(sizeof(struct ipt_connlimit_info)))
4503 +               return 0;
4504 +
4505 +       /* refuse anything but tcp */
4506 +       if (ip->proto != IPPROTO_TCP)
4507 +               return 0;
4508 +
4509 +       /* init private data */
4510 +       info->data = kmalloc(sizeof(struct ipt_connlimit_data),GFP_KERNEL);
4511 +       spin_lock_init(&(info->data->lock));
4512 +       for (i = 0; i < 256; i++)
4513 +               INIT_LIST_HEAD(&(info->data->iphash[i]));
4514 +       
4515 +       return 1;
4516 +}
4517 +
4518 +static void destroy(void *matchinfo, unsigned int matchinfosize)
4519 +{
4520 +       struct ipt_connlimit_info *info = matchinfo;
4521 +       struct ipt_connlimit_conn *conn;
4522 +       struct list_head *hash;
4523 +       int i;
4524 +
4525 +       /* cleanup */
4526 +       for (i = 0; i < 256; i++) {
4527 +               hash = &(info->data->iphash[i]);
4528 +               while (hash != hash->next) {
4529 +                       conn = list_entry(hash->next,struct ipt_connlimit_conn,list);
4530 +                       list_del(hash->next);
4531 +                       kfree(conn);
4532 +               }
4533 +       }
4534 +       kfree(info->data);
4535 +}
4536 +
4537 +static struct ipt_match connlimit_match = { 
4538 +       .name = "connlimit",
4539 +       .match = &match,
4540 +       .checkentry = &check,
4541 +       .destroy = &destroy,
4542 +       .me = THIS_MODULE
4543 +};
4544 +
4545 +static int __init init(void)
4546 +{
4547 +       return ipt_register_match(&connlimit_match);
4548 +}
4549 +
4550 +static void __exit fini(void)
4551 +{
4552 +       ipt_unregister_match(&connlimit_match);
4553 +}
4554 +
4555 +module_init(init);
4556 +module_exit(fini);
4557 diff -Nur linux-2.6.4-rc2.org/net/ipv4/netfilter/ipt_dstlimit.c linux-2.6.4-rc2/net/ipv4/netfilter/ipt_dstlimit.c
4558 --- linux-2.6.4-rc2.org/net/ipv4/netfilter/ipt_dstlimit.c       1970-01-01 00:00:00.000000000 +0000
4559 +++ linux-2.6.4-rc2/net/ipv4/netfilter/ipt_dstlimit.c   2004-03-05 07:40:06.000000000 +0000
4560 @@ -0,0 +1,690 @@
4561 +/* iptables match extension to limit the number of packets per second
4562 + * seperately for each destination.
4563 + *
4564 + * (C) 2003 by Harald Welte <laforge@netfilter.org>
4565 + *
4566 + * $Id$
4567 + *
4568 + * Development of this code was funded by Astaro AG, http://www.astaro.com/
4569 + *
4570 + * based on ipt_limit.c by:
4571 + * Jérôme de Vivie     <devivie@info.enserb.u-bordeaux.fr>
4572 + * Hervé Eychenne      <eychenne@info.enserb.u-bordeaux.fr>
4573 + * Rusty Russell       <rusty@rustcorp.com.au>
4574 + *
4575 + * The general idea is to create a hash table for every dstip and have a
4576 + * seperate limit counter per tuple.  This way you can do something like 'limit
4577 + * the number of syn packets for each of my internal addresses.
4578 + *
4579 + * Ideally this would just be implemented as a general 'hash' match, which would
4580 + * allow us to attach any iptables target to it's hash buckets.  But this is
4581 + * not possible in the current iptables architecture.  As always, pkttables for
4582 + * 2.7.x will help ;)
4583 + */
4584 +#include <linux/module.h>
4585 +#include <linux/skbuff.h>
4586 +#include <linux/spinlock.h>
4587 +#include <linux/random.h>
4588 +#include <linux/jhash.h>
4589 +#include <linux/slab.h>
4590 +#include <linux/vmalloc.h>
4591 +#include <linux/tcp.h>
4592 +#include <linux/udp.h>
4593 +#include <linux/proc_fs.h>
4594 +#include <linux/seq_file.h>
4595 +
4596 +#define ASSERT_READ_LOCK(x) 
4597 +#define ASSERT_WRITE_LOCK(x) 
4598 +#include <linux/netfilter_ipv4/lockhelp.h>
4599 +#include <linux/netfilter_ipv4/listhelp.h>
4600 +
4601 +#include <linux/netfilter_ipv4/ip_tables.h>
4602 +#include <linux/netfilter_ipv4/ipt_dstlimit.h>
4603 +
4604 +/* FIXME: this is just for IP_NF_ASSERRT */
4605 +#include <linux/netfilter_ipv4/ip_conntrack.h>
4606 +
4607 +#define MS2JIFFIES(x) ((x*HZ)/1000)
4608 +
4609 +MODULE_LICENSE("GPL");
4610 +MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
4611 +MODULE_DESCRIPTION("iptables match for limiting per destination");
4612 +
4613 +/* need to declare this at the top */
4614 +static struct proc_dir_entry *dstlimit_procdir;
4615 +static struct file_operations dl_file_ops;
4616 +
4617 +/* hash table crap */
4618 +
4619 +struct dsthash_dst {
4620 +       u_int32_t src_ip;
4621 +       u_int32_t dst_ip;
4622 +       u_int16_t port;
4623 +};
4624 +
4625 +struct dsthash_ent {
4626 +       /* static / read-only parts in the beginning */
4627 +       struct list_head list;
4628 +       struct dsthash_dst dst;
4629 +
4630 +       /* modified structure members in the end */
4631 +       unsigned long expires;          /* precalculated expiry time */
4632 +       struct {
4633 +               unsigned long prev;     /* last modification */
4634 +               u_int32_t credit;
4635 +               u_int32_t credit_cap, cost;
4636 +       } rateinfo;
4637 +};
4638 +
4639 +struct ipt_dstlimit_htable {
4640 +       struct list_head list;          /* global list of all htables */
4641 +       atomic_t use;
4642 +
4643 +       struct dstlimit_cfg cfg;        /* config */
4644 +
4645 +       /* used internally */
4646 +       spinlock_t lock;                /* lock for list_head */
4647 +       u_int32_t rnd;                  /* random seed for hash */
4648 +       struct timer_list timer;        /* timer for gc */
4649 +       atomic_t count;                 /* number entries in table */
4650 +
4651 +       /* seq_file stuff */
4652 +       struct proc_dir_entry *pde;
4653 +
4654 +       struct list_head hash[0];       /* hashtable itself */
4655 +};
4656 +
4657 +DECLARE_RWLOCK(dstlimit_lock);         /* protects htables list */
4658 +static LIST_HEAD(dstlimit_htables);
4659 +static kmem_cache_t *dstlimit_cachep;
4660 +
4661 +static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b)
4662 +{
4663 +       return (ent->dst.dst_ip == b->dst_ip 
4664 +               && ent->dst.port == b->port
4665 +               && ent->dst.src_ip == b->src_ip);
4666 +}
4667 +
4668 +static inline u_int32_t
4669 +hash_dst(const struct ipt_dstlimit_htable *ht, const struct dsthash_dst *dst)
4670 +{
4671 +       return (jhash_3words(dst->dst_ip, dst->port, 
4672 +                            dst->src_ip, ht->rnd) % ht->cfg.size);
4673 +}
4674 +
4675 +static inline struct dsthash_ent *
4676 +__dsthash_find(const struct ipt_dstlimit_htable *ht, struct dsthash_dst *dst)
4677 +{
4678 +       struct dsthash_ent *ent;
4679 +       u_int32_t hash = hash_dst(ht, dst);
4680 +       MUST_BE_LOCKED(&ht->lock);
4681 +       ent = LIST_FIND(&ht->hash[hash], dst_cmp, struct dsthash_ent *, dst);
4682 +       return ent;
4683 +}
4684 +
4685 +/* allocate dsthash_ent, initialize dst, put in htable and lock it */
4686 +static struct dsthash_ent *
4687 +__dsthash_alloc_init(struct ipt_dstlimit_htable *ht, struct dsthash_dst *dst)
4688 +{
4689 +       struct dsthash_ent *ent;
4690 +
4691 +       /* initialize hash with random val at the time we allocate
4692 +        * the first hashtable entry */
4693 +       if (!ht->rnd)
4694 +               get_random_bytes(&ht->rnd, 4);
4695 +
4696 +       if (ht->cfg.max &&
4697 +           atomic_read(&ht->count) >= ht->cfg.max) {
4698 +               /* FIXME: do something. question is what.. */
4699 +               if (net_ratelimit())
4700 +                       printk(KERN_WARNING 
4701 +                               "ipt_dstlimit: max count of %u reached\n", 
4702 +                               ht->cfg.max);
4703 +               return NULL;
4704 +       }
4705 +
4706 +       ent = kmem_cache_alloc(dstlimit_cachep, GFP_ATOMIC);
4707 +       if (!ent) {
4708 +               if (net_ratelimit())
4709 +                       printk(KERN_ERR 
4710 +                               "ipt_dstlimit: can't allocate dsthash_ent\n");
4711 +               return NULL;
4712 +       }
4713 +
4714 +       atomic_inc(&ht->count);
4715 +
4716 +       ent->dst.dst_ip = dst->dst_ip;
4717 +       ent->dst.port = dst->port;
4718 +       ent->dst.src_ip = dst->src_ip;
4719 +
4720 +       list_add(&ent->list, &ht->hash[hash_dst(ht, dst)]);
4721 +
4722 +       return ent;
4723 +}
4724 +
4725 +static inline void 
4726 +__dsthash_free(struct ipt_dstlimit_htable *ht, struct dsthash_ent *ent)
4727 +{
4728 +       MUST_BE_LOCKED(&ht->lock);
4729 +
4730 +       list_del(&ent->list);
4731 +       kmem_cache_free(dstlimit_cachep, ent);
4732 +       atomic_dec(&ht->count);
4733 +}
4734 +static void htable_gc(unsigned long htlong);
4735 +
4736 +static int htable_create(struct ipt_dstlimit_info *minfo)
4737 +{
4738 +       int i;
4739 +       unsigned int size;
4740 +       struct ipt_dstlimit_htable *hinfo;
4741 +
4742 +       if (minfo->cfg.size)
4743 +               size = minfo->cfg.size;
4744 +       else {
4745 +               size = (((num_physpages << PAGE_SHIFT) / 16384)
4746 +                        / sizeof(struct list_head));
4747 +               if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
4748 +                       size = 8192;
4749 +               if (size < 16)
4750 +                       size = 16;
4751 +       }
4752 +       /* FIXME: don't use vmalloc() here or anywhere else -HW */
4753 +       hinfo = vmalloc(sizeof(struct ipt_dstlimit_htable)
4754 +                       + (sizeof(struct list_head) * size));
4755 +       if (!hinfo) {
4756 +               printk(KERN_ERR "ipt_dstlimit: Unable to create hashtable\n");
4757 +               return -1;
4758 +       }
4759 +       minfo->hinfo = hinfo;
4760 +
4761 +       /* copy match config into hashtable config */
4762 +       memcpy(&hinfo->cfg, &minfo->cfg, sizeof(hinfo->cfg));
4763 +       hinfo->cfg.size = size;
4764 +       if (!hinfo->cfg.max)
4765 +               hinfo->cfg.max = 8 * hinfo->cfg.size;
4766 +       else if (hinfo->cfg.max < hinfo->cfg.size)
4767 +               hinfo->cfg.max = hinfo->cfg.size;
4768 +
4769 +       for (i = 0; i < hinfo->cfg.size; i++)
4770 +               INIT_LIST_HEAD(&hinfo->hash[i]);
4771 +
4772 +       atomic_set(&hinfo->count, 0);
4773 +       atomic_set(&hinfo->use, 1);
4774 +       hinfo->rnd = 0;
4775 +       hinfo->lock = SPIN_LOCK_UNLOCKED;
4776 +       hinfo->pde = create_proc_entry(minfo->name, 0, dstlimit_procdir);
4777 +       if (!hinfo->pde) {
4778 +               vfree(hinfo);
4779 +               return -1;
4780 +       }
4781 +       hinfo->pde->proc_fops = &dl_file_ops;
4782 +       hinfo->pde->data = hinfo;
4783 +
4784 +       init_timer(&hinfo->timer);
4785 +       hinfo->timer.expires = jiffies + MS2JIFFIES(hinfo->cfg.gc_interval);
4786 +       hinfo->timer.data = (unsigned long )hinfo;
4787 +       hinfo->timer.function = htable_gc;
4788 +       add_timer(&hinfo->timer);
4789 +
4790 +       WRITE_LOCK(&dstlimit_lock);
4791 +       list_add(&hinfo->list, &dstlimit_htables);
4792 +       WRITE_UNLOCK(&dstlimit_lock);
4793 +
4794 +       return 0;
4795 +}
4796 +
4797 +static int select_all(struct ipt_dstlimit_htable *ht, struct dsthash_ent *he)
4798 +{
4799 +       return 1;
4800 +}
4801 +
4802 +static int select_gc(struct ipt_dstlimit_htable *ht, struct dsthash_ent *he)
4803 +{
4804 +       return (jiffies >= he->expires);
4805 +}
4806 +
4807 +static void htable_selective_cleanup(struct ipt_dstlimit_htable *ht,
4808 +                               int (*select)(struct ipt_dstlimit_htable *ht, 
4809 +                                             struct dsthash_ent *he))
4810 +{
4811 +       int i;
4812 +
4813 +       IP_NF_ASSERT(ht->cfg.size && ht->cfg.max);
4814 +
4815 +       /* lock hash table and iterate over it */
4816 +       LOCK_BH(&ht->lock);
4817 +       for (i = 0; i < ht->cfg.size; i++) {
4818 +               struct dsthash_ent *dh, *n;
4819 +               list_for_each_entry_safe(dh, n, &ht->hash[i], list) {
4820 +                       if ((*select)(ht, dh))
4821 +                               __dsthash_free(ht, dh);
4822 +               }
4823 +       }
4824 +       UNLOCK_BH(&ht->lock);
4825 +}
4826 +
4827 +/* hash table garbage collector, run by timer */
4828 +static void htable_gc(unsigned long htlong)
4829 +{
4830 +       struct ipt_dstlimit_htable *ht = (struct ipt_dstlimit_htable *)htlong;
4831 +
4832 +       htable_selective_cleanup(ht, select_gc);
4833 +
4834 +       /* re-add the timer accordingly */
4835 +       ht->timer.expires = jiffies + MS2JIFFIES(ht->cfg.gc_interval);
4836 +       add_timer(&ht->timer);
4837 +}
4838 +
4839 +static void htable_destroy(struct ipt_dstlimit_htable *hinfo)
4840 +{
4841 +       /* remove timer, if it is pending */
4842 +       if (timer_pending(&hinfo->timer))
4843 +               del_timer(&hinfo->timer);
4844 +
4845 +       /* remove proc entry */
4846 +       remove_proc_entry(hinfo->pde->name, dstlimit_procdir);
4847 +
4848 +       htable_selective_cleanup(hinfo, select_all);
4849 +       vfree(hinfo);
4850 +}
4851 +
4852 +static struct ipt_dstlimit_htable *htable_find_get(char *name)
4853 +{
4854 +       struct ipt_dstlimit_htable *hinfo;
4855 +
4856 +       READ_LOCK(&dstlimit_lock);
4857 +       list_for_each_entry(hinfo, &dstlimit_htables, list) {
4858 +               if (!strcmp(name, hinfo->pde->name)) {
4859 +                       atomic_inc(&hinfo->use);
4860 +                       READ_UNLOCK(&dstlimit_lock);
4861 +                       return hinfo;
4862 +               }
4863 +       }
4864 +       READ_UNLOCK(&dstlimit_lock);
4865 +
4866 +       return NULL;
4867 +}
4868 +
4869 +static void htable_put(struct ipt_dstlimit_htable *hinfo)
4870 +{
4871 +       if (atomic_dec_and_test(&hinfo->use)) {
4872 +               WRITE_LOCK(&dstlimit_lock);
4873 +               list_del(&hinfo->list);
4874 +               WRITE_UNLOCK(&dstlimit_lock);
4875 +               htable_destroy(hinfo);
4876 +       }
4877 +}
4878 +
4879 +
4880 +/* The algorithm used is the Simple Token Bucket Filter (TBF)
4881 + * see net/sched/sch_tbf.c in the linux source tree
4882 + */
4883 +
4884 +/* Rusty: This is my (non-mathematically-inclined) understanding of
4885 +   this algorithm.  The `average rate' in jiffies becomes your initial
4886 +   amount of credit `credit' and the most credit you can ever have
4887 +   `credit_cap'.  The `peak rate' becomes the cost of passing the
4888 +   test, `cost'.
4889 +
4890 +   `prev' tracks the last packet hit: you gain one credit per jiffy.
4891 +   If you get credit balance more than this, the extra credit is
4892 +   discarded.  Every time the match passes, you lose `cost' credits;
4893 +   if you don't have that many, the test fails.
4894 +
4895 +   See Alexey's formal explanation in net/sched/sch_tbf.c.
4896 +
4897 +   To get the maximum range, we multiply by this factor (ie. you get N
4898 +   credits per jiffy).  We want to allow a rate as low as 1 per day
4899 +   (slowest userspace tool allows), which means
4900 +   CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie.
4901 +*/
4902 +#define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24))
4903 +
4904 +/* Repeated shift and or gives us all 1s, final shift and add 1 gives
4905 + * us the power of 2 below the theoretical max, so GCC simply does a
4906 + * shift. */
4907 +#define _POW2_BELOW2(x) ((x)|((x)>>1))
4908 +#define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2))
4909 +#define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4))
4910 +#define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8))
4911 +#define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16))
4912 +#define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1)
4913 +
4914 +#define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
4915 +
4916 +/* Precision saver. */
4917 +static inline u_int32_t
4918 +user2credits(u_int32_t user)
4919 +{
4920 +       /* If multiplying would overflow... */
4921 +       if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
4922 +               /* Divide first. */
4923 +               return (user / IPT_DSTLIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
4924 +
4925 +       return (user * HZ * CREDITS_PER_JIFFY) / IPT_DSTLIMIT_SCALE;
4926 +}
4927 +
4928 +static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
4929 +{
4930 +       dh->rateinfo.credit += (now - xchg(&dh->rateinfo.prev, now)) 
4931 +                                       * CREDITS_PER_JIFFY;
4932 +       if (dh->rateinfo.credit > dh->rateinfo.credit_cap)
4933 +               dh->rateinfo.credit = dh->rateinfo.credit_cap;
4934 +}
4935 +
4936 +static int
4937 +dstlimit_match(const struct sk_buff *skb,
4938 +               const struct net_device *in,
4939 +               const struct net_device *out,
4940 +               const void *matchinfo,
4941 +               int offset,
4942 +               int *hotdrop)
4943 +{
4944 +       struct ipt_dstlimit_info *r = 
4945 +               ((struct ipt_dstlimit_info *)matchinfo)->u.master;
4946 +       struct ipt_dstlimit_htable *hinfo = r->hinfo;
4947 +       unsigned long now = jiffies;
4948 +       struct dsthash_ent *dh;
4949 +       struct dsthash_dst dst;
4950 +
4951 +       memset(&dst, 0, sizeof(dst));
4952 +
4953 +       /* dest ip is always in hash */
4954 +       dst.dst_ip = skb->nh.iph->daddr;
4955 +
4956 +       /* source ip only if respective hashmode, otherwise set to
4957 +        * zero */
4958 +       if (hinfo->cfg.mode & IPT_DSTLIMIT_HASH_SIP)
4959 +               dst.src_ip = skb->nh.iph->saddr;
4960 +
4961 +       /* dest port only if respective mode */
4962 +       if (hinfo->cfg.mode & IPT_DSTLIMIT_HASH_DPT) {
4963 +               u16 ports[2];
4964 +
4965 +               /* Must not be a fragment. */
4966 +               if (offset)
4967 +                       return 0;
4968 +
4969 +               /* Must be big enough to read ports (both UDP and TCP have
4970 +                  them at the start). */
4971 +               if (skb_copy_bits(skb, skb->nh.iph->ihl*4, ports, sizeof(ports)) < 0) {
4972 +                       /* We've been asked to examine this packet, and we
4973 +                          can't.  Hence, no choice but to drop. */
4974 +                       *hotdrop = 1;
4975 +                       return 0;
4976 +               }
4977 +
4978 +               switch (skb->nh.iph->protocol) {
4979 +                       struct tcphdr *th;
4980 +                       struct udphdr *uh;
4981 +               case IPPROTO_TCP:
4982 +                       th = (void *)skb->nh.iph+skb->nh.iph->ihl*4;
4983 +                       dst.port = th->dest;
4984 +                       break;
4985 +               case IPPROTO_UDP:
4986 +                       uh = (void *)skb->nh.iph+skb->nh.iph->ihl*4;
4987 +                       dst.port = uh->dest;
4988 +                       break;
4989 +               default:
4990 +                       break;
4991 +               }
4992 +       } 
4993 +
4994 +       LOCK_BH(&hinfo->lock);
4995 +       dh = __dsthash_find(hinfo, &dst);
4996 +       if (!dh) {
4997 +               dh = __dsthash_alloc_init(hinfo, &dst);
4998 +
4999 +               if (!dh) {
5000 +                       /* enomem... don't match == DROP */
5001 +                       if (net_ratelimit())
5002 +                               printk(KERN_ERR "%s: ENOMEM\n", __FUNCTION__);
5003 +                       UNLOCK_BH(&hinfo->lock);
5004 +                       return 0;
5005 +               }
5006 +
5007 +               dh->expires = jiffies + MS2JIFFIES(hinfo->cfg.expire);
5008 +
5009 +               dh->rateinfo.prev = jiffies;
5010 +               dh->rateinfo.credit = user2credits(hinfo->cfg.avg * 
5011 +                                                       hinfo->cfg.burst);
5012 +               dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg * 
5013 +                                                       hinfo->cfg.burst);
5014 +               dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
5015 +
5016 +               UNLOCK_BH(&hinfo->lock);
5017 +               return 1;
5018 +       }
5019 +
5020 +       /* update expiration timeout */
5021 +       dh->expires = now + MS2JIFFIES(hinfo->cfg.expire);
5022 +
5023 +       rateinfo_recalc(dh, now);
5024 +       if (dh->rateinfo.credit >= dh->rateinfo.cost) {
5025 +               /* We're underlimit. */
5026 +               dh->rateinfo.credit -= dh->rateinfo.cost;
5027 +               UNLOCK_BH(&hinfo->lock);
5028 +               return 1;
5029 +       }
5030 +
5031 +               UNLOCK_BH(&hinfo->lock);
5032 +
5033 +       /* default case: we're overlimit, thus don't match */
5034 +       return 0;
5035 +}
5036 +
5037 +static int
5038 +dstlimit_checkentry(const char *tablename,
5039 +                    const struct ipt_ip *ip,
5040 +                    void *matchinfo,
5041 +                    unsigned int matchsize,
5042 +                    unsigned int hook_mask)
5043 +{
5044 +       struct ipt_dstlimit_info *r = matchinfo;
5045 +
5046 +       if (matchsize != IPT_ALIGN(sizeof(struct ipt_dstlimit_info)))
5047 +               return 0;
5048 +
5049 +       /* Check for overflow. */
5050 +       if (r->cfg.burst == 0
5051 +           || user2credits(r->cfg.avg * r->cfg.burst) < 
5052 +                                       user2credits(r->cfg.avg)) {
5053 +               printk(KERN_ERR "ipt_dstlimit: Overflow, try lower: %u/%u\n",
5054 +                      r->cfg.avg, r->cfg.burst);
5055 +               return 0;
5056 +       }
5057 +
5058 +       if (r->cfg.mode == 0 
5059 +           || r->cfg.mode > (IPT_DSTLIMIT_HASH_DPT
5060 +                         |IPT_DSTLIMIT_HASH_DIP
5061 +                         |IPT_DSTLIMIT_HASH_SIP))
5062 +               return 0;
5063 +
5064 +       if (!r->cfg.gc_interval)
5065 +               return 0;
5066 +       
5067 +       if (!r->cfg.expire)
5068 +               return 0;
5069 +
5070 +       r->hinfo = htable_find_get(r->name);
5071 +       if (!r->hinfo && (htable_create(r) != 0)) {
5072 +               return 0;
5073 +       }
5074 +
5075 +       /* Ugly hack: For SMP, we only want to use one set */
5076 +       r->u.master = r;
5077 +
5078 +       return 1;
5079 +}
5080 +
5081 +static void
5082 +dstlimit_destroy(void *matchinfo, unsigned int matchsize)
5083 +{
5084 +       struct ipt_dstlimit_info *r = (struct ipt_dstlimit_info *) matchinfo;
5085 +
5086 +       htable_put(r->hinfo);
5087 +}
5088 +
5089 +static struct ipt_match ipt_dstlimit = { 
5090 +       .list = { .prev = NULL, .next = NULL }, 
5091 +       .name = "dstlimit", 
5092 +       .match = dstlimit_match, 
5093 +       .checkentry = dstlimit_checkentry, 
5094 +       .destroy = dstlimit_destroy,
5095 +       .me = THIS_MODULE 
5096 +};
5097 +
5098 +/* PROC stuff */
5099 +
5100 +static void *dl_seq_start(struct seq_file *s, loff_t *pos)
5101 +{
5102 +       struct proc_dir_entry *pde = s->private;
5103 +       struct ipt_dstlimit_htable *htable = pde->data;
5104 +       unsigned int *bucket;
5105 +
5106 +       LOCK_BH(&htable->lock);
5107 +       if (*pos >= htable->cfg.size)
5108 +               return NULL;
5109 +
5110 +       bucket = kmalloc(sizeof(unsigned int), GFP_KERNEL);
5111 +       if (!bucket)
5112 +               return ERR_PTR(-ENOMEM);
5113 +
5114 +       *bucket = *pos;
5115 +       return bucket;
5116 +}
5117 +
5118 +static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
5119 +{
5120 +       struct proc_dir_entry *pde = s->private;
5121 +       struct ipt_dstlimit_htable *htable = pde->data;
5122 +       unsigned int *bucket = (unsigned int *)v;
5123 +
5124 +       *pos = ++(*bucket);
5125 +       if (*pos >= htable->cfg.size) {
5126 +               kfree(v);
5127 +               return NULL;
5128 +       }
5129 +       return bucket;
5130 +}
5131 +
5132 +static void dl_seq_stop(struct seq_file *s, void *v)
5133 +{
5134 +       struct proc_dir_entry *pde = s->private;
5135 +       struct ipt_dstlimit_htable *htable = pde->data;
5136 +       unsigned int *bucket = (unsigned int *)v;
5137 +
5138 +       kfree(bucket);
5139 +
5140 +       UNLOCK_BH(&htable->lock);
5141 +}
5142 +
5143 +static inline int dl_seq_real_show(struct dsthash_ent *ent, struct seq_file *s)
5144 +{
5145 +       /* recalculate to show accurate numbers */
5146 +       rateinfo_recalc(ent, jiffies);
5147 +
5148 +       return seq_printf(s, "%ld %u.%u.%u.%u->%u.%u.%u.%u:%u %u %u %u\n",
5149 +                       (ent->expires - jiffies)/HZ,
5150 +                       NIPQUAD(ent->dst.src_ip),
5151 +                       NIPQUAD(ent->dst.dst_ip), ntohs(ent->dst.port),
5152 +                       ent->rateinfo.credit, ent->rateinfo.credit_cap,
5153 +                       ent->rateinfo.cost);
5154 +}
5155 +
5156 +static int dl_seq_show(struct seq_file *s, void *v)
5157 +{
5158 +       struct proc_dir_entry *pde = s->private;
5159 +       struct ipt_dstlimit_htable *htable = pde->data;
5160 +       unsigned int *bucket = (unsigned int *)v;
5161 +
5162 +       if (LIST_FIND_W(&htable->hash[*bucket], dl_seq_real_show,
5163 +                     struct dsthash_ent *, s)) {
5164 +               /* buffer was filled and unable to print that tuple */
5165 +               return 1;
5166 +       }
5167 +       return 0;
5168 +}
5169 +
5170 +static struct seq_operations dl_seq_ops = {
5171 +       .start = dl_seq_start,
5172 +       .next  = dl_seq_next,
5173 +       .stop  = dl_seq_stop,
5174 +       .show  = dl_seq_show
5175 +};
5176 +
5177 +static int dl_proc_open(struct inode *inode, struct file *file)
5178 +{
5179 +       int ret = seq_open(file, &dl_seq_ops);
5180 +
5181 +       if (!ret) {
5182 +               struct seq_file *sf = file->private_data;
5183 +               sf->private = PDE(inode);
5184 +       }
5185 +       return ret;
5186 +}
5187 +
5188 +static struct file_operations dl_file_ops = {
5189 +       .owner   = THIS_MODULE,
5190 +       .open    = dl_proc_open,
5191 +       .read    = seq_read,
5192 +       .llseek  = seq_lseek,
5193 +       .release = seq_release
5194 +};
5195 +
5196 +static int init_or_fini(int fini)
5197 +{
5198 +       int ret = 0;
5199 +
5200 +       if (fini)
5201 +               goto cleanup;
5202 +
5203 +       if (ipt_register_match(&ipt_dstlimit)) {
5204 +               ret = -EINVAL;
5205 +               goto cleanup_nothing;
5206 +       }
5207 +
5208 +       /* FIXME: do we really want HWCACHE_ALIGN since our objects are
5209 +        * quite small ? */
5210 +       dstlimit_cachep = kmem_cache_create("ipt_dstlimit",
5211 +                                           sizeof(struct dsthash_ent), 0,
5212 +                                           SLAB_HWCACHE_ALIGN, NULL, NULL);
5213 +       if (!dstlimit_cachep) {
5214 +               printk(KERN_ERR "Unable to create ipt_dstlimit slab cache\n");
5215 +               ret = -ENOMEM;
5216 +               goto cleanup_unreg_match;
5217 +       }
5218 +
5219 +       dstlimit_procdir = proc_mkdir("ipt_dstlimit", proc_net);
5220 +       if (!dstlimit_procdir) {
5221 +               printk(KERN_ERR "Unable to create proc dir entry\n");
5222 +               ret = -ENOMEM;
5223 +               goto cleanup_free_slab;
5224 +       }
5225 +
5226 +       return ret;
5227 +
5228 +cleanup:
5229 +       remove_proc_entry("ipt_dstlimit", proc_net);
5230 +cleanup_free_slab:
5231 +       kmem_cache_destroy(dstlimit_cachep);
5232 +cleanup_unreg_match:
5233 +       ipt_unregister_match(&ipt_dstlimit);
5234 +cleanup_nothing:
5235 +       return ret;
5236 +       
5237 +}
5238 +
5239 +static int __init init(void)
5240 +{
5241 +       return init_or_fini(0);
5242 +}
5243 +
5244 +static void __exit fini(void)
5245 +{
5246 +       init_or_fini(1);
5247 +}
5248 +
5249 +module_init(init);
5250 +module_exit(fini);
5251 diff -Nur linux-2.6.4-rc2.org/net/ipv4/netfilter/ipt_fuzzy.c linux-2.6.4-rc2/net/ipv4/netfilter/ipt_fuzzy.c
5252 --- linux-2.6.4-rc2.org/net/ipv4/netfilter/ipt_fuzzy.c  1970-01-01 00:00:00.000000000 +0000
5253 +++ linux-2.6.4-rc2/net/ipv4/netfilter/ipt_fuzzy.c      2004-03-05 07:40:08.000000000 +0000
5254 @@ -0,0 +1,185 @@
5255 +/*
5256 + *  This module implements a simple TSK FLC 
5257 + * (Takagi-Sugeno-Kang Fuzzy Logic Controller) that aims
5258 + * to limit , in an adaptive and flexible way , the packet rate crossing 
5259 + * a given stream . It serves as an initial and very simple (but effective)
5260 + * example of how Fuzzy Logic techniques can be applied to defeat DoS attacks.
5261 + *  As a matter of fact , Fuzzy Logic can help us to insert any "behavior"  
5262 + * into our code in a precise , adaptive and efficient manner. 
5263 + *  The goal is very similar to that of "limit" match , but using techniques of
5264 + * Fuzzy Control , that allow us to shape the transfer functions precisely ,
5265 + * avoiding over and undershoots - and stuff like that .
5266 + *
5267 + *
5268 + * 2002-08-10  Hime Aguiar e Oliveira Jr. <hime@engineer.com> : Initial version.
5269 + * 2002-08-17  : Changed to eliminate floating point operations .
5270 + * 2002-08-23  : Coding style changes .
5271 +*/
5272 +
5273 +#include <linux/module.h>
5274 +#include <linux/skbuff.h>
5275 +#include <linux/ip.h>
5276 +#include <linux/random.h>
5277 +#include <net/tcp.h>
5278 +#include <linux/spinlock.h>
5279 +#include <linux/netfilter_ipv4/ip_tables.h>
5280 +#include <linux/netfilter_ipv4/ipt_fuzzy.h>
5281 +
5282 +/*
5283 + Packet Acceptance Rate - LOW and Packet Acceptance Rate - HIGH
5284 + Expressed in percentage
5285 +*/
5286 +
5287 +#define PAR_LOW                1/100
5288 +#define PAR_HIGH       1
5289 +
5290 +static spinlock_t fuzzy_lock = SPIN_LOCK_UNLOCKED ;
5291 +
5292 +MODULE_AUTHOR("Hime Aguiar e Oliveira Junior <hime@engineer.com>");
5293 +MODULE_DESCRIPTION("IP tables Fuzzy Logic Controller match module");
5294 +MODULE_LICENSE("GPL");
5295 +
5296 +static  u_int8_t mf_high(u_int32_t tx,u_int32_t mini,u_int32_t maxi)
5297 +{
5298 +       if (tx >= maxi)
5299 +               return 100;
5300 +
5301 +       if (tx <= mini)
5302 +               return 0;
5303 +
5304 +       return ( (100*(tx-mini)) / (maxi-mini) );
5305 +}
5306 +
5307 +static u_int8_t mf_low(u_int32_t tx,u_int32_t mini,u_int32_t maxi)
5308 +{
5309 +       if (tx <= mini)
5310 +               return 100;
5311 +
5312 +       if (tx >= maxi)
5313 +               return 0;
5314 +
5315 +       return ( (100*( maxi - tx ))  / ( maxi - mini ) );
5316 +}
5317 +
5318 +static int
5319 +ipt_fuzzy_match(const struct sk_buff *pskb,
5320 +              const struct net_device *in,
5321 +              const struct net_device *out,
5322 +              const void *matchinfo,
5323 +              int offset,
5324 +              int *hotdrop)
5325 +{
5326 +       /* From userspace */
5327 +       
5328 +       struct ipt_fuzzy_info *info = (struct ipt_fuzzy_info *) matchinfo;
5329 +
5330 +       u_int8_t random_number;
5331 +       unsigned long amount;
5332 +       u_int8_t howhigh, howlow;
5333 +       
5334 +
5335 +       spin_lock_bh(&fuzzy_lock); /* Rise the lock */
5336 +
5337 +       info->bytes_total += pskb->len;
5338 +       info->packets_total++;
5339 +
5340 +       info->present_time = jiffies;
5341 +       
5342 +       if (info->present_time >= info->previous_time)
5343 +               amount = info->present_time - info->previous_time;
5344 +       else { 
5345 +               /* There was a transition : I choose to re-sample 
5346 +                  and keep the old acceptance rate...
5347 +               */
5348 +
5349 +               amount = 0;
5350 +               info->previous_time = info->present_time;
5351 +               info->bytes_total = info->packets_total = 0;
5352 +       };
5353 +       
5354 +       if (amount > HZ/10) /* More than 100 ms elapsed ... */
5355 +       {
5356 +
5357 +               info->mean_rate = (u_int32_t) ((HZ*info->packets_total)  \
5358 +                                       / amount );
5359 +
5360 +               info->previous_time = info->present_time;
5361 +               info->bytes_total = info->packets_total = 0;
5362 +
5363 +               howhigh = mf_high(info->mean_rate,info->minimum_rate,info->maximum_rate);
5364 +               howlow  = mf_low(info->mean_rate,info->minimum_rate,info->maximum_rate);
5365 +
5366 +               info->acceptance_rate = (u_int8_t) \
5367 +                          (howhigh*PAR_LOW + PAR_HIGH*howlow);
5368 +
5369 +               /* In fact , the above defuzzification would require a denominator
5370 +                  proportional to (howhigh+howlow) but , in this particular case ,
5371 +                  that expression is constant .
5372 +                  An imediate consequence is that it isn't necessary to call 
5373 +                  both mf_high and mf_low - but to keep things understandable ,
5374 +                  I did so .  */ 
5375 +
5376 +       }
5377 +       
5378 +       spin_unlock_bh(&fuzzy_lock); /* Release the lock */
5379 +
5380 +
5381 +       if ( info->acceptance_rate < 100 )
5382 +       {                
5383 +               get_random_bytes((void *)(&random_number), 1);
5384 +
5385 +               /*  If within the acceptance , it can pass => don't match */
5386 +               if (random_number <= (255 * info->acceptance_rate) / 100)
5387 +                       return 0;
5388 +               else
5389 +                       return 1; /* It can't pass ( It matches ) */
5390 +       } ;
5391 +
5392 +       return 0; /* acceptance_rate == 100 % => Everything passes ... */
5393 +       
5394 +}
5395 +
5396 +static int
5397 +ipt_fuzzy_checkentry(const char *tablename,
5398 +                  const struct ipt_ip *e,
5399 +                  void *matchinfo,
5400 +                  unsigned int matchsize,
5401 +                  unsigned int hook_mask)
5402 +{
5403 +       
5404 +       const struct ipt_fuzzy_info *info = matchinfo;
5405 +
5406 +       if (matchsize != IPT_ALIGN(sizeof(struct ipt_fuzzy_info))) {
5407 +               printk("ipt_fuzzy: matchsize %u != %u\n", matchsize,
5408 +                      IPT_ALIGN(sizeof(struct ipt_fuzzy_info)));
5409 +               return 0;
5410 +       }
5411 +
5412 +       if ((info->minimum_rate < MINFUZZYRATE ) || (info->maximum_rate > MAXFUZZYRATE)
5413 +           || (info->minimum_rate >= info->maximum_rate )) {
5414 +               printk("ipt_fuzzy: BAD limits , please verify !!!\n");
5415 +               return 0;
5416 +       }
5417 +
5418 +       return 1;
5419 +}
5420 +
5421 +static struct ipt_match ipt_fuzzy_reg = { 
5422 +       .name = "fuzzy",
5423 +       .match = ipt_fuzzy_match,
5424 +       .checkentry = ipt_fuzzy_checkentry,
5425 +       .me = THIS_MODULE
5426 +};
5427 +
5428 +static int __init init(void)
5429 +{
5430 +       return ipt_register_match(&ipt_fuzzy_reg);
5431 +}
5432 +
5433 +static void __exit fini(void)
5434 +{
5435 +       ipt_unregister_match(&ipt_fuzzy_reg);
5436 +}
5437 +
5438 +module_init(init);
5439 +module_exit(fini);
5440 diff -Nur linux-2.6.4-rc2.org/net/ipv4/netfilter/ipt_ipv4options.c linux-2.6.4-rc2/net/ipv4/netfilter/ipt_ipv4options.c
5441 --- linux-2.6.4-rc2.org/net/ipv4/netfilter/ipt_ipv4options.c    1970-01-01 00:00:00.000000000 +0000
5442 +++ linux-2.6.4-rc2/net/ipv4/netfilter/ipt_ipv4options.c        2004-03-05 07:40:09.000000000 +0000
5443 @@ -0,0 +1,172 @@
5444 +/*
5445 +  This is a module which is used to match ipv4 options.
5446 +  This file is distributed under the terms of the GNU General Public
5447 +  License (GPL). Copies of the GPL can be obtained from:
5448 +  ftp://prep.ai.mit.edu/pub/gnu/GPL
5449 +
5450 +  11-mars-2001 Fabrice MARIE <fabrice@netfilter.org> : initial development.
5451 +  12-july-2001 Fabrice MARIE <fabrice@netfilter.org> : added router-alert otions matching. Fixed a bug with no-srr
5452 +  12-august-2001 Imran Patel <ipatel@crosswinds.net> : optimization of the match.
5453 +  18-november-2001 Fabrice MARIE <fabrice@netfilter.org> : added [!] 'any' option match.
5454 +  19-february-2004 Harald Welte <laforge@netfilter.org> : merge with 2.6.x
5455 +*/
5456 +
5457 +#include <linux/module.h>
5458 +#include <linux/skbuff.h>
5459 +#include <net/ip.h>
5460 +
5461 +#include <linux/netfilter_ipv4/ip_tables.h>
5462 +#include <linux/netfilter_ipv4/ipt_ipv4options.h>
5463 +
5464 +MODULE_LICENSE("GPL");
5465 +MODULE_AUTHOR("Fabrice Marie <fabrice@netfilter.org>");
5466 +
5467 +static int
5468 +match(const struct sk_buff *skb,
5469 +      const struct net_device *in,
5470 +      const struct net_device *out,
5471 +      const void *matchinfo,
5472 +      int offset,
5473 +      int *hotdrop)
5474 +{
5475 +       const struct ipt_ipv4options_info *info = matchinfo;   /* match info for rule */
5476 +       const struct iphdr *iph = skb->nh.iph;
5477 +       const struct ip_options *opt;
5478 +
5479 +       if (iph->ihl * 4 == sizeof(struct iphdr)) {
5480 +               /* No options, so we match only the "DONTs" and the "IGNOREs" */
5481 +
5482 +               if (((info->options & IPT_IPV4OPTION_MATCH_ANY_OPT) == IPT_IPV4OPTION_MATCH_ANY_OPT) ||
5483 +                   ((info->options & IPT_IPV4OPTION_MATCH_SSRR) == IPT_IPV4OPTION_MATCH_SSRR) ||
5484 +                   ((info->options & IPT_IPV4OPTION_MATCH_LSRR) == IPT_IPV4OPTION_MATCH_LSRR) ||
5485 +                   ((info->options & IPT_IPV4OPTION_MATCH_RR) == IPT_IPV4OPTION_MATCH_RR) ||
5486 +                   ((info->options & IPT_IPV4OPTION_MATCH_TIMESTAMP) == IPT_IPV4OPTION_MATCH_TIMESTAMP) ||
5487 +                    ((info->options & IPT_IPV4OPTION_MATCH_ROUTER_ALERT) == IPT_IPV4OPTION_MATCH_ROUTER_ALERT))
5488 +                       return 0;
5489 +               return 1;
5490 +       }
5491 +       else {
5492 +               if ((info->options & IPT_IPV4OPTION_MATCH_ANY_OPT) == IPT_IPV4OPTION_MATCH_ANY_OPT)
5493 +                       /* there are options, and we don't need to care which one */
5494 +                       return 1;
5495 +               else {
5496 +                       if ((info->options & IPT_IPV4OPTION_DONT_MATCH_ANY_OPT) == IPT_IPV4OPTION_DONT_MATCH_ANY_OPT)
5497 +                               /* there are options but we don't want any ! */
5498 +                               return 0;
5499 +               }
5500 +       }
5501 +
5502 +       opt = &(IPCB(skb)->opt);
5503 +
5504 +       /* source routing */
5505 +       if ((info->options & IPT_IPV4OPTION_MATCH_SSRR) == IPT_IPV4OPTION_MATCH_SSRR) {
5506 +               if (!((opt->srr) & (opt->is_strictroute)))
5507 +                       return 0;
5508 +       }
5509 +       else if ((info->options & IPT_IPV4OPTION_MATCH_LSRR) == IPT_IPV4OPTION_MATCH_LSRR) {
5510 +               if (!((opt->srr) & (!opt->is_strictroute)))
5511 +                       return 0;
5512 +       }
5513 +       else if ((info->options & IPT_IPV4OPTION_DONT_MATCH_SRR) == IPT_IPV4OPTION_DONT_MATCH_SRR) {
5514 +               if (opt->srr)
5515 +                       return 0;
5516 +       }
5517 +       /* record route */
5518 +       if ((info->options & IPT_IPV4OPTION_MATCH_RR) == IPT_IPV4OPTION_MATCH_RR) {
5519 +               if (!opt->rr)
5520 +                       return 0;
5521 +       }
5522 +       else if ((info->options & IPT_IPV4OPTION_DONT_MATCH_RR) == IPT_IPV4OPTION_DONT_MATCH_RR) {
5523 +               if (opt->rr)
5524 +                       return 0;
5525 +       }
5526 +       /* timestamp */
5527 +       if ((info->options & IPT_IPV4OPTION_MATCH_TIMESTAMP) == IPT_IPV4OPTION_MATCH_TIMESTAMP) {
5528 +               if (!opt->ts)
5529 +                       return 0;
5530 +       }
5531 +       else if ((info->options & IPT_IPV4OPTION_DONT_MATCH_TIMESTAMP) == IPT_IPV4OPTION_DONT_MATCH_TIMESTAMP) {
5532 +               if (opt->ts)
5533 +                       return 0;
5534 +       }
5535 +       /* router-alert option  */
5536 +       if ((info->options & IPT_IPV4OPTION_MATCH_ROUTER_ALERT) == IPT_IPV4OPTION_MATCH_ROUTER_ALERT) {
5537 +               if (!opt->router_alert)
5538 +                       return 0;
5539 +       }
5540 +       else if ((info->options & IPT_IPV4OPTION_DONT_MATCH_ROUTER_ALERT) == IPT_IPV4OPTION_DONT_MATCH_ROUTER_ALERT) {
5541 +               if (opt->router_alert)
5542 +                       return 0;
5543 +       }
5544 +
5545 +       /* we match ! */
5546 +       return 1;
5547 +}
5548 +
5549 +static int
5550 +checkentry(const char *tablename,
5551 +          const struct ipt_ip *ip,
5552 +          void *matchinfo,
5553 +          unsigned int matchsize,
5554 +          unsigned int hook_mask)
5555 +{
5556 +       const struct ipt_ipv4options_info *info = matchinfo;   /* match info for rule */
5557 +       /* Check the size */
5558 +       if (matchsize != IPT_ALIGN(sizeof(struct ipt_ipv4options_info)))
5559 +               return 0;
5560 +       /* Now check the coherence of the data ... */
5561 +       if (((info->options & IPT_IPV4OPTION_MATCH_ANY_OPT) == IPT_IPV4OPTION_MATCH_ANY_OPT) &&
5562 +           (((info->options & IPT_IPV4OPTION_DONT_MATCH_SRR) == IPT_IPV4OPTION_DONT_MATCH_SRR) ||
5563 +            ((info->options & IPT_IPV4OPTION_DONT_MATCH_RR) == IPT_IPV4OPTION_DONT_MATCH_RR) ||
5564 +            ((info->options & IPT_IPV4OPTION_DONT_MATCH_TIMESTAMP) == IPT_IPV4OPTION_DONT_MATCH_TIMESTAMP) ||
5565 +            ((info->options & IPT_IPV4OPTION_DONT_MATCH_ROUTER_ALERT) == IPT_IPV4OPTION_DONT_MATCH_ROUTER_ALERT) ||
5566 +            ((info->options & IPT_IPV4OPTION_DONT_MATCH_ANY_OPT) == IPT_IPV4OPTION_DONT_MATCH_ANY_OPT)))
5567 +               return 0; /* opposites */
5568 +       if (((info->options & IPT_IPV4OPTION_DONT_MATCH_ANY_OPT) == IPT_IPV4OPTION_DONT_MATCH_ANY_OPT) &&
5569 +           (((info->options & IPT_IPV4OPTION_MATCH_LSRR) == IPT_IPV4OPTION_MATCH_LSRR) ||
5570 +            ((info->options & IPT_IPV4OPTION_MATCH_SSRR) == IPT_IPV4OPTION_MATCH_SSRR) ||
5571 +            ((info->options & IPT_IPV4OPTION_MATCH_RR) == IPT_IPV4OPTION_MATCH_RR) ||
5572 +            ((info->options & IPT_IPV4OPTION_MATCH_TIMESTAMP) == IPT_IPV4OPTION_MATCH_TIMESTAMP) ||
5573 +            ((info->options & IPT_IPV4OPTION_MATCH_ROUTER_ALERT) == IPT_IPV4OPTION_MATCH_ROUTER_ALERT) ||
5574 +            ((info->options & IPT_IPV4OPTION_MATCH_ANY_OPT) == IPT_IPV4OPTION_MATCH_ANY_OPT)))
5575 +               return 0; /* opposites */
5576 +       if (((info->options & IPT_IPV4OPTION_MATCH_SSRR) == IPT_IPV4OPTION_MATCH_SSRR) &&
5577 +           ((info->options & IPT_IPV4OPTION_MATCH_LSRR) == IPT_IPV4OPTION_MATCH_LSRR))
5578 +               return 0; /* cannot match in the same time loose and strict source routing */
5579 +       if ((((info->options & IPT_IPV4OPTION_MATCH_SSRR) == IPT_IPV4OPTION_MATCH_SSRR) ||
5580 +            ((info->options & IPT_IPV4OPTION_MATCH_LSRR) == IPT_IPV4OPTION_MATCH_LSRR)) &&
5581 +           ((info->options & IPT_IPV4OPTION_DONT_MATCH_SRR) == IPT_IPV4OPTION_DONT_MATCH_SRR))
5582 +               return 0; /* opposites */
5583 +       if (((info->options & IPT_IPV4OPTION_MATCH_RR) == IPT_IPV4OPTION_MATCH_RR) &&
5584 +           ((info->options & IPT_IPV4OPTION_DONT_MATCH_RR) == IPT_IPV4OPTION_DONT_MATCH_RR))
5585 +               return 0; /* opposites */
5586 +       if (((info->options & IPT_IPV4OPTION_MATCH_TIMESTAMP) == IPT_IPV4OPTION_MATCH_TIMESTAMP) &&
5587 +           ((info->options & IPT_IPV4OPTION_DONT_MATCH_TIMESTAMP) == IPT_IPV4OPTION_DONT_MATCH_TIMESTAMP))
5588 +               return 0; /* opposites */
5589 +       if (((info->options & IPT_IPV4OPTION_MATCH_ROUTER_ALERT) == IPT_IPV4OPTION_MATCH_ROUTER_ALERT) &&
5590 +           ((info->options & IPT_IPV4OPTION_DONT_MATCH_ROUTER_ALERT) == IPT_IPV4OPTION_DONT_MATCH_ROUTER_ALERT))
5591 +               return 0; /* opposites */
5592 +
5593 +       /* everything looks ok. */
5594 +       return 1;
5595 +}
5596 +
5597 +static struct ipt_match ipv4options_match = { 
5598 +       .name = "ipv4options",
5599 +       .match = match,
5600 +       .checkentry = checkentry,
5601 +       .me = THIS_MODULE
5602 +};
5603 +
5604 +static int __init init(void)
5605 +{
5606 +       return ipt_register_match(&ipv4options_match);
5607 +}
5608 +
5609 +static void __exit fini(void)
5610 +{
5611 +       ipt_unregister_match(&ipv4options_match);
5612 +}
5613 +
5614 +module_init(init);
5615 +module_exit(fini);
5616 diff -Nur linux-2.6.4-rc2.org/net/ipv4/netfilter/ipt_mport.c linux-2.6.4-rc2/net/ipv4/netfilter/ipt_mport.c
5617 --- linux-2.6.4-rc2.org/net/ipv4/netfilter/ipt_mport.c  1970-01-01 00:00:00.000000000 +0000
5618 +++ linux-2.6.4-rc2/net/ipv4/netfilter/ipt_mport.c      2004-03-05 07:40:11.000000000 +0000
5619 @@ -0,0 +1,116 @@
5620 +/* Kernel module to match one of a list of TCP/UDP ports: ports are in
5621 +   the same place so we can treat them as equal. */
5622 +#include <linux/module.h>
5623 +#include <linux/types.h>
5624 +#include <linux/udp.h>
5625 +#include <linux/skbuff.h>
5626 +
5627 +#include <linux/netfilter_ipv4/ipt_mport.h>
5628 +#include <linux/netfilter_ipv4/ip_tables.h>
5629 +
5630 +MODULE_LICENSE("GPL");
5631 +
5632 +#if 0
5633 +#define duprintf(format, args...) printk(format , ## args)
5634 +#else
5635 +#define duprintf(format, args...)
5636 +#endif
5637 +
5638 +/* Returns 1 if the port is matched by the test, 0 otherwise. */
5639 +static inline int
5640 +ports_match(const struct ipt_mport *minfo, u_int16_t src, u_int16_t dst)
5641 +{
5642 +       unsigned int i;
5643 +        unsigned int m;
5644 +        u_int16_t pflags = minfo->pflags;
5645 +       for (i=0, m=1; i<IPT_MULTI_PORTS; i++, m<<=1) {
5646 +                u_int16_t s, e;
5647 +
5648 +                if (pflags & m
5649 +                    && minfo->ports[i] == 65535)
5650 +                        return 0;
5651 +
5652 +                s = minfo->ports[i];
5653 +
5654 +                if (pflags & m) {
5655 +                        e = minfo->ports[++i];
5656 +                        m <<= 1;
5657 +                } else
5658 +                        e = s;
5659 +
5660 +                if (minfo->flags & IPT_MPORT_SOURCE
5661 +                    && src >= s && src <= e)
5662 +                        return 1;
5663 +
5664 +               if (minfo->flags & IPT_MPORT_DESTINATION
5665 +                   && dst >= s && dst <= e)
5666 +                       return 1;
5667 +       }
5668 +
5669 +       return 0;
5670 +}
5671 +
5672 +static int
5673 +match(const struct sk_buff *skb,
5674 +      const struct net_device *in,
5675 +      const struct net_device *out,
5676 +      const void *matchinfo,
5677 +      int offset,
5678 +      int *hotdrop)
5679 +{
5680 +       u16 ports[2];
5681 +       const struct ipt_mport *minfo = matchinfo;
5682 +
5683 +       if (offset)
5684 +               return 0;
5685 +
5686 +       /* Must be big enough to read ports (both UDP and TCP have
5687 +           them at the start). */
5688 +       if (skb_copy_bits(skb, skb->nh.iph->ihl*4, ports, sizeof(ports)) < 0) {
5689 +               /* We've been asked to examine this packet, and we
5690 +                  can't.  Hence, no choice but to drop. */
5691 +                       duprintf("ipt_multiport:"
5692 +                                " Dropping evil offset=0 tinygram.\n");
5693 +                       *hotdrop = 1;
5694 +                       return 0;
5695 +       }
5696 +
5697 +       return ports_match(minfo, ntohs(ports[0]), ntohs(ports[1]));
5698 +}
5699 +
5700 +/* Called when user tries to insert an entry of this type. */
5701 +static int
5702 +checkentry(const char *tablename,
5703 +          const struct ipt_ip *ip,
5704 +          void *matchinfo,
5705 +          unsigned int matchsize,
5706 +          unsigned int hook_mask)
5707 +{
5708 +       if (matchsize != IPT_ALIGN(sizeof(struct ipt_mport)))
5709 +               return 0;
5710 +
5711 +       /* Must specify proto == TCP/UDP, no unknown flags or bad count */
5712 +       return (ip->proto == IPPROTO_TCP || ip->proto == IPPROTO_UDP)
5713 +               && !(ip->invflags & IPT_INV_PROTO)
5714 +               && matchsize == IPT_ALIGN(sizeof(struct ipt_mport));
5715 +}
5716 +
5717 +static struct ipt_match mport_match = { 
5718 +       .name = "mport",
5719 +       .match = &match,
5720 +       .checkentry = &checkentry,
5721 +       .me = THIS_MODULE
5722 +};
5723 +
5724 +static int __init init(void)
5725 +{
5726 +       return ipt_register_match(&mport_match);
5727 +}
5728 +
5729 +static void __exit fini(void)
5730 +{
5731 +       ipt_unregister_match(&mport_match);
5732 +}
5733 +
5734 +module_init(init);
5735 +module_exit(fini);
5736 diff -Nur linux-2.6.4-rc2.org/net/ipv4/netfilter/ipt_nth.c linux-2.6.4-rc2/net/ipv4/netfilter/ipt_nth.c
5737 --- linux-2.6.4-rc2.org/net/ipv4/netfilter/ipt_nth.c    1970-01-01 00:00:00.000000000 +0000
5738 +++ linux-2.6.4-rc2/net/ipv4/netfilter/ipt_nth.c        2004-03-05 07:40:13.000000000 +0000
5739 @@ -0,0 +1,166 @@
5740 +/*
5741 +  This is a module which is used for match support for every Nth packet
5742 +  This file is distributed under the terms of the GNU General Public
5743 +  License (GPL). Copies of the GPL can be obtained from:
5744 +     ftp://prep.ai.mit.edu/pub/gnu/GPL
5745 +
5746 +  2001-07-18 Fabrice MARIE <fabrice@netfilter.org> : initial implementation.
5747 +  2001-09-20 Richard Wagner (rwagner@cloudnet.com)
5748 +        * added support for multiple counters
5749 +        * added support for matching on individual packets
5750 +          in the counter cycle
5751 +  2004-02-19 Harald Welte <laforge@netfilter.org>
5752 +       * port to 2.6.x
5753 +
5754 +*/
5755 +
5756 +#include <linux/module.h>
5757 +#include <linux/skbuff.h>
5758 +#include <linux/ip.h>
5759 +#include <net/tcp.h>
5760 +#include <linux/spinlock.h>
5761 +#include <linux/netfilter_ipv4/ip_tables.h>
5762 +#include <linux/netfilter_ipv4/ipt_nth.h>
5763 +
5764 +MODULE_LICENSE("GPL");
5765 +MODULE_AUTHOR("Fabrice Marie <fabrice@netfilter.org>");
5766 +
5767 +/*
5768 + * State information.
5769 + */
5770 +struct state {
5771 +       spinlock_t lock;
5772 +       u_int16_t number;
5773 +};
5774 +
5775 +static struct state states[IPT_NTH_NUM_COUNTERS];
5776 +
5777 +static int
5778 +ipt_nth_match(const struct sk_buff *pskb,
5779 +             const struct net_device *in,
5780 +             const struct net_device *out,
5781 +             const void *matchinfo,
5782 +             int offset,
5783 +             int *hotdrop)
5784 +{
5785 +       /* Parameters from userspace */
5786 +       const struct ipt_nth_info *info = matchinfo;
5787 +        unsigned counter = info->counter;
5788 +               if((counter < 0) || (counter >= IPT_NTH_NUM_COUNTERS)) 
5789 +       {
5790 +                       printk(KERN_WARNING "nth: invalid counter %u. counter between 0 and %u\n", counter, IPT_NTH_NUM_COUNTERS-1);
5791 +               return 0;
5792 +        };
5793 +
5794 +        spin_lock(&states[counter].lock);
5795 +
5796 +        /* Are we matching every nth packet?*/
5797 +        if (info->packet == 0xFF)
5798 +        {
5799 +               /* We're matching every nth packet and only every nth packet*/
5800 +               /* Do we match or invert match? */
5801 +               if (info->not == 0)
5802 +               {
5803 +                       if (states[counter].number == 0)
5804 +                       {
5805 +                               ++states[counter].number;
5806 +                               goto match;
5807 +                       }
5808 +                       if (states[counter].number >= info->every)
5809 +                               states[counter].number = 0; /* reset the counter */
5810 +                       else
5811 +                               ++states[counter].number;
5812 +                       goto dontmatch;
5813 +               }
5814 +               else
5815 +               {
5816 +                       if (states[counter].number == 0)
5817 +                       {
5818 +                               ++states[counter].number;
5819 +                               goto dontmatch;
5820 +                       }
5821 +                       if (states[counter].number >= info->every)
5822 +                               states[counter].number = 0;
5823 +                       else
5824 +                               ++states[counter].number;
5825 +                       goto match;
5826 +               }
5827 +        }
5828 +        else
5829 +        {
5830 +               /* We're using the --packet, so there must be a rule for every value */
5831 +               if (states[counter].number == info->packet)
5832 +               {
5833 +                       /* only increment the counter when a match happens */
5834 +                       if (states[counter].number >= info->every)
5835 +                               states[counter].number = 0; /* reset the counter */
5836 +                       else
5837 +                               ++states[counter].number;
5838 +                       goto match;
5839 +               }
5840 +               else
5841 +                       goto dontmatch;
5842 +       }
5843 +
5844 + dontmatch:
5845 +       /* don't match */
5846 +       spin_unlock(&states[counter].lock);
5847 +       return 0;
5848 +
5849 + match:
5850 +       spin_unlock(&states[counter].lock);
5851 +       return 1;
5852 +}
5853 +
5854 +static int
5855 +ipt_nth_checkentry(const char *tablename,
5856 +                  const struct ipt_ip *e,
5857 +                  void *matchinfo,
5858 +                  unsigned int matchsize,
5859 +                  unsigned int hook_mask)
5860 +{
5861 +       /* Parameters from userspace */
5862 +       const struct ipt_nth_info *info = matchinfo;
5863 +        unsigned counter = info->counter;
5864 +        if((counter < 0) || (counter >= IPT_NTH_NUM_COUNTERS)) 
5865 +       {
5866 +               printk(KERN_WARNING "nth: invalid counter %u. counter between 0 and %u\n", counter, IPT_NTH_NUM_COUNTERS-1);
5867 +                       return 0;
5868 +               };
5869 +
5870 +       if (matchsize != IPT_ALIGN(sizeof(struct ipt_nth_info))) {
5871 +               printk("nth: matchsize %u != %u\n", matchsize,
5872 +                      IPT_ALIGN(sizeof(struct ipt_nth_info)));
5873 +               return 0;
5874 +       }
5875 +
5876 +       states[counter].number = info->startat;
5877 +
5878 +       return 1;
5879 +}
5880 +
5881 +static struct ipt_match ipt_nth_reg = { 
5882 +       .name = "nth",
5883 +       .match = ipt_nth_match,
5884 +       .checkentry = ipt_nth_checkentry,
5885 +       .me = THIS_MODULE
5886 +};
5887 +
5888 +static int __init init(void)
5889 +{
5890 +       unsigned counter;
5891 +
5892 +       memset(&states, 0, sizeof(states));
5893 +        for (counter = 0; counter < IPT_NTH_NUM_COUNTERS; counter++) 
5894 +               spin_lock_init(&(states[counter].lock));
5895 +
5896 +       return ipt_register_match(&ipt_nth_reg);
5897 +}
5898 +
5899 +static void __exit fini(void)
5900 +{
5901 +       ipt_unregister_match(&ipt_nth_reg);
5902 +}
5903 +
5904 +module_init(init);
5905 +module_exit(fini);
5906 diff -Nur linux-2.6.4-rc2.org/net/ipv4/netfilter/ipt_quota.c linux-2.6.4-rc2/net/ipv4/netfilter/ipt_quota.c
5907 --- linux-2.6.4-rc2.org/net/ipv4/netfilter/ipt_quota.c  1970-01-01 00:00:00.000000000 +0000
5908 +++ linux-2.6.4-rc2/net/ipv4/netfilter/ipt_quota.c      2004-03-05 07:40:14.000000000 +0000
5909 @@ -0,0 +1,91 @@
5910 +/* 
5911 + * netfilter module to enforce network quotas
5912 + *
5913 + * Sam Johnston <samj@samj.net>
5914 + */
5915 +#include <linux/module.h>
5916 +#include <linux/skbuff.h>
5917 +#include <linux/spinlock.h>
5918 +#include <linux/interrupt.h>
5919 +
5920 +#include <linux/netfilter_ipv4/ip_tables.h>
5921 +#include <linux/netfilter_ipv4/ipt_quota.h>
5922 +
5923 +MODULE_LICENSE("GPL");
5924 +MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
5925 +
5926 +static spinlock_t quota_lock = SPIN_LOCK_UNLOCKED;
5927 +
5928 +static int
5929 +match(const struct sk_buff *skb,
5930 +      const struct net_device *in,
5931 +      const struct net_device *out,
5932 +      const void *matchinfo,
5933 +      int offset, int *hotdrop)
5934 +{
5935 +        struct ipt_quota_info *q = (struct ipt_quota_info *) matchinfo;
5936 +       unsigned int datalen;
5937 +
5938 +       if (skb->len < sizeof(struct iphdr))
5939 +               return NF_ACCEPT;
5940 +       
5941 +       datalen = skb->len - skb->nh.iph->ihl*4;
5942 +
5943 +        spin_lock_bh(&quota_lock);
5944 +
5945 +        if (q->quota >= datalen) {
5946 +                /* we can afford this one */
5947 +                q->quota -= datalen;
5948 +                spin_unlock_bh(&quota_lock);
5949 +
5950 +#ifdef DEBUG_IPT_QUOTA
5951 +                printk("IPT Quota OK: %llu datlen %d \n", q->quota, datalen);
5952 +#endif
5953 +                return 1;
5954 +        }
5955 +
5956 +        /* so we do not allow even small packets from now on */
5957 +        q->quota = 0;
5958 +
5959 +#ifdef DEBUG_IPT_QUOTA
5960 +        printk("IPT Quota Failed: %llu datlen %d \n", q->quota, datalen);
5961 +#endif
5962 +
5963 +        spin_unlock_bh(&quota_lock);
5964 +        return 0;
5965 +}
5966 +
5967 +static int
5968 +checkentry(const char *tablename,
5969 +           const struct ipt_ip *ip,
5970 +           void *matchinfo, unsigned int matchsize, unsigned int hook_mask)
5971 +{
5972 +        /* TODO: spinlocks? sanity checks? */
5973 +        if (matchsize != IPT_ALIGN(sizeof (struct ipt_quota_info)))
5974 +                return 0;
5975 +
5976 +        return 1;
5977 +}
5978 +
5979 +static struct ipt_match quota_match = {
5980 +       .name = "quota",
5981 +       .match = match,
5982 +       .checkentry = checkentry,
5983 +       .me = THIS_MODULE
5984 +};
5985 +
5986 +static int __init
5987 +init(void)
5988 +{
5989 +        return ipt_register_match(&quota_match);
5990 +}
5991 +
5992 +static void __exit
5993 +fini(void)
5994 +{
5995 +        ipt_unregister_match(&quota_match);
5996 +}
5997 +
5998 +module_init(init);
5999 +module_exit(fini);
6000 +
6001 diff -Nur linux-2.6.4-rc2.org/net/ipv4/netfilter/ipt_realm.c linux-2.6.4-rc2/net/ipv4/netfilter/ipt_realm.c
6002 --- linux-2.6.4-rc2.org/net/ipv4/netfilter/ipt_realm.c  1970-01-01 00:00:00.000000000 +0000
6003 +++ linux-2.6.4-rc2/net/ipv4/netfilter/ipt_realm.c      2004-03-05 07:40:22.000000000 +0000
6004 @@ -0,0 +1,70 @@
6005 +/* Kernel module to match realm from routing. */
6006 +#include <linux/module.h>
6007 +#include <linux/skbuff.h>
6008 +#include <linux/netdevice.h>
6009 +#include <net/route.h>
6010 +
6011 +#include <linux/netfilter_ipv4/ipt_realm.h>
6012 +#include <linux/netfilter_ipv4/ip_tables.h>
6013 +
6014 +MODULE_AUTHOR("Sampsa Ranta <sampsa@netsonic.fi>");
6015 +MODULE_LICENSE("GPL");
6016 +
6017 +static int
6018 +match(const struct sk_buff *skb,
6019 +      const struct net_device *in,
6020 +      const struct net_device *out,
6021 +      const void *matchinfo,
6022 +      int offset,
6023 +      int *hotdrop)
6024 +{
6025 +       const struct ipt_realm_info *info = matchinfo;
6026 +       struct dst_entry *dst = skb->dst;
6027 +       u32 id;
6028 +    
6029 +       if(dst == NULL)
6030 +               return 0;
6031 +       id = dst->tclassid;
6032 +
6033 +       return (info->id == (id & info->mask)) ^ info->invert;
6034 +}
6035 +
6036 +static int check(const char *tablename,
6037 +                 const struct ipt_ip *ip,
6038 +                 void *matchinfo,
6039 +                 unsigned int matchsize,
6040 +                 unsigned int hook_mask)
6041 +{
6042 +       if (hook_mask
6043 +           & ~((1 << NF_IP_POST_ROUTING) | (1 << NF_IP_FORWARD) |
6044 +               (1 << NF_IP_LOCAL_OUT)| (1 << NF_IP_LOCAL_IN))) {
6045 +               printk("ipt_realm: only valid for POST_ROUTING, LOCAL_OUT, "
6046 +                      "LOCAL_IN or FORWARD.\n");
6047 +               return 0;
6048 +       }
6049 +
6050 +       if (matchsize != IPT_ALIGN(sizeof(struct ipt_realm_info)))
6051 +               return 0;
6052 +
6053 +       return 1;
6054 +}
6055 +
6056 +static struct ipt_match realm_match = {
6057 +       .name = "realm",
6058 +       .match = match, 
6059 +       .checkentry = check,
6060 +       .me = THIS_MODULE
6061 +};
6062 +
6063 +static int __init init(void)
6064 +{
6065 +       return ipt_register_match(&realm_match);
6066 +}
6067 +
6068 +static void __exit fini(void)
6069 +{
6070 +       ipt_unregister_match(&realm_match);
6071 +}
6072 +
6073 +module_init(init);
6074 +module_exit(fini);
6075 diff -Nur linux-2.6.4-rc2.org/net/ipv4/netfilter/ipt_sctp.c linux-2.6.4-rc2/net/ipv4/netfilter/ipt_sctp.c
6076 --- linux-2.6.4-rc2.org/net/ipv4/netfilter/ipt_sctp.c   1970-01-01 00:00:00.000000000 +0000
6077 +++ linux-2.6.4-rc2/net/ipv4/netfilter/ipt_sctp.c       2004-03-05 07:40:24.000000000 +0000
6078 @@ -0,0 +1,198 @@
6079 +#include <linux/module.h>
6080 +#include <linux/skbuff.h>
6081 +#include <net/ip.h>
6082 +#include <linux/sctp.h>
6083 +
6084 +#include <linux/netfilter_ipv4/ip_tables.h>
6085 +#include <linux/netfilter_ipv4/ipt_sctp.h>
6086 +
6087 +#if 1
6088 +#define duprintf(format, args...) printk(format , ## args)
6089 +#else
6090 +#define duprintf(format, args...)
6091 +#endif
6092 +
6093 +#define SCCHECK(cond, option, flag, invflag) (!((flag) & (option)) \
6094 +                                             || (!!((invflag) & (option)) ^ (cond)))
6095 +
6096 +static int
6097 +match_flags(const struct ipt_sctp_flag_info *flag_info,
6098 +           const int flag_count,
6099 +           u_int8_t chunktype,
6100 +           u_int8_t chunkflags)
6101 +{
6102 +       int i;
6103 +
6104 +       for (i = 0; i < flag_count; i++) {
6105 +               if (flag_info[i].chunktype == chunktype) {
6106 +                       return (chunkflags & flag_info[i].flag_mask) == flag_info[i].flag;
6107 +               }
6108 +       }
6109 +
6110 +       return 1;
6111 +}
6112 +
6113 +static int
6114 +match_packet(const struct sk_buff *skb,
6115 +            const u_int32_t *chunkmap,
6116 +            int chunk_match_type,
6117 +            const struct ipt_sctp_flag_info *flag_info,
6118 +            const int flag_count,
6119 +            int *hotdrop)
6120 +{
6121 +       int offset;
6122 +       u_int32_t chunkmapcopy[256 / sizeof (u_int32_t)];
6123 +       sctp_chunkhdr_t sch;
6124 +
6125 +       if (chunk_match_type == SCTP_CHUNK_MATCH_ALL) {
6126 +               SCTP_CHUNKMAP_COPY(chunkmapcopy, chunkmap);
6127 +       }
6128 +
6129 +       offset = skb->nh.iph->ihl * 4 + sizeof (sctp_sctphdr_t);
6130 +       do {
6131 +               if (skb_copy_bits(skb, offset, &sch, sizeof(sch)) < 0) {
6132 +                       duprintf("Dropping invalid SCTP packet.\n");
6133 +                       *hotdrop = 1;
6134 +                       return 0;
6135 +               }
6136 +
6137 +//             ERROR needed fix !!!!!!
6138 +//             duprintf("Chunk num: %d\toffset: %d\ttype: %d\tlength: %d\tflags: %x\n", 
6139 +//                             ++i, offset, sch.type, htons(sch.length), sch.flags);
6140 +
6141 +               offset += (htons(sch.length) + 3) & ~3;
6142 +
6143 +               duprintf("skb->len: %d\toffset: %d\n", skb->len, offset);
6144 +
6145 +               if (SCTP_CHUNKMAP_IS_SET(chunkmap, sch.type)) {
6146 +                       switch (chunk_match_type) {
6147 +                       case SCTP_CHUNK_MATCH_ANY:
6148 +                               if (match_flags(flag_info, flag_count, 
6149 +                                       sch.type, sch.flags)) {
6150 +                                       return 1;
6151 +                               }
6152 +                               break;
6153 +
6154 +                       case SCTP_CHUNK_MATCH_ALL:
6155 +                               if (match_flags(flag_info, flag_count, 
6156 +                                       sch.type, sch.flags)) {
6157 +                                       SCTP_CHUNKMAP_CLEAR(chunkmapcopy, sch.type);
6158 +                               }
6159 +                               break;
6160 +
6161 +                       case SCTP_CHUNK_MATCH_ONLY:
6162 +                               if (!match_flags(flag_info, flag_count, 
6163 +                                       sch.type, sch.flags)) {
6164 +                                       return 0;
6165 +                               }
6166 +                               break;
6167 +                       }
6168 +               } else {
6169 +                       switch (chunk_match_type) {
6170 +                       case SCTP_CHUNK_MATCH_ONLY:
6171 +                               return 0;
6172 +                       }
6173 +               }
6174 +       } while (offset < skb->len);
6175 +
6176 +       switch (chunk_match_type) {
6177 +       case SCTP_CHUNK_MATCH_ALL:
6178 +               return SCTP_CHUNKMAP_IS_CLEAR(chunkmap);
6179 +       case SCTP_CHUNK_MATCH_ANY:
6180 +               return 0;
6181 +       case SCTP_CHUNK_MATCH_ONLY:
6182 +               return 1;
6183 +       }
6184 +
6185 +       /* This will never be reached, but required to stop compiler whine */
6186 +       return 0;
6187 +}
6188 +
6189 +static int
6190 +match(const struct sk_buff *skb,
6191 +      const struct net_device *in,
6192 +      const struct net_device *out,
6193 +      const void *matchinfo,
6194 +      int offset,
6195 +      int *hotdrop)
6196 +{
6197 +       const struct ipt_sctp_info *info;
6198 +       sctp_sctphdr_t sh;
6199 +
6200 +       info = (const struct ipt_sctp_info *)matchinfo;
6201 +
6202 +       if (offset) {
6203 +               duprintf("Dropping non-first fragment.. FIXME\n");
6204 +               return 0;
6205 +       }
6206 +       
6207 +       if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &sh, sizeof(sh)) < 0) {
6208 +               duprintf("Dropping evil TCP offset=0 tinygram.\n");
6209 +               *hotdrop = 1;
6210 +               return 0;
6211 +               }
6212 +       duprintf("spt: %d\tdpt: %d\n", ntohs(sh.source), ntohs(sh.dest));
6213 +
6214 +       return  SCCHECK(((ntohs(sh.source) >= info->spts[0]) 
6215 +                       && (ntohs(sh.source) <= info->spts[1])), 
6216 +                       IPT_SCTP_SRC_PORTS, info->flags, info->invflags)
6217 +               && SCCHECK(((ntohs(sh.dest) >= info->dpts[0]) 
6218 +                       && (ntohs(sh.dest) <= info->dpts[1])), 
6219 +                       IPT_SCTP_DEST_PORTS, info->flags, info->invflags)
6220 +               && SCCHECK(match_packet(skb, info->chunkmap, info->chunk_match_type,
6221 +                                       info->flag_info, info->flag_count, 
6222 +                                       hotdrop),
6223 +                          IPT_SCTP_CHUNK_TYPES, info->flags, info->invflags);
6224 +}
6225 +
6226 +static int
6227 +checkentry(const char *tablename,
6228 +          const struct ipt_ip *ip,
6229 +          void *matchinfo,
6230 +          unsigned int matchsize,
6231 +          unsigned int hook_mask)
6232 +{
6233 +       const struct ipt_sctp_info *info;
6234 +
6235 +       info = (const struct ipt_sctp_info *)matchinfo;
6236 +
6237 +       return ip->proto == IPPROTO_SCTP
6238 +               && !(ip->invflags & IPT_INV_PROTO)
6239 +               && matchsize == IPT_ALIGN(sizeof(struct ipt_sctp_info))
6240 +               && !(info->flags & ~IPT_SCTP_VALID_FLAGS)
6241 +               && !(info->invflags & ~IPT_SCTP_VALID_FLAGS)
6242 +               && !(info->invflags & ~info->flags)
6243 +               && ((!(info->flags & IPT_SCTP_CHUNK_TYPES)) || 
6244 +                       (info->chunk_match_type &
6245 +                               (SCTP_CHUNK_MATCH_ALL 
6246 +                               | SCTP_CHUNK_MATCH_ANY
6247 +                               | SCTP_CHUNK_MATCH_ONLY)));
6248 +}
6249 +
6250 +static struct ipt_match sctp_match = 
6251 +{ 
6252 +       .list = { NULL, NULL},
6253 +       .name = "sctp",
6254 +       .match = &match,
6255 +       .checkentry = &checkentry,
6256 +       .destroy = NULL,
6257 +       .me = THIS_MODULE
6258 +};
6259 +
6260 +static int __init init(void)
6261 +{
6262 +       return ipt_register_match(&sctp_match);
6263 +}
6264 +
6265 +static void __exit fini(void)
6266 +{
6267 +       ipt_unregister_match(&sctp_match);
6268 +}
6269 +
6270 +module_init(init);
6271 +module_exit(fini);
6272 +
6273 +MODULE_LICENSE("GPL");
6274 +MODULE_AUTHOR("Kiran Kumar Immidi");
6275 +MODULE_DESCRIPTION("Match for SCTP protocol packets");
6276 +
6277 diff -Nur linux-2.6.4-rc2.org/net/ipv6/netfilter/Kconfig linux-2.6.4-rc2/net/ipv6/netfilter/Kconfig
6278 --- linux-2.6.4-rc2.org/net/ipv6/netfilter/Kconfig      2004-03-04 06:17:03.000000000 +0000
6279 +++ linux-2.6.4-rc2/net/ipv6/netfilter/Kconfig  2004-03-05 07:40:13.000000000 +0000
6280 @@ -218,5 +218,25 @@
6281           To compile it as a module, choose M here.  If unsure, say N.
6282  
6283  #dep_tristate '  LOG target support' CONFIG_IP6_NF_TARGET_LOG $CONFIG_IP6_NF_IPTABLES
6284 +config IP6_NF_TARGET_HOPLIMIT
6285 +       tristate  'HOPLIMIT target support'
6286 +       depends on IP6_NF_MANGLE
6287 +         help
6288 +
6289 +config IP6_NF_TARGET_REJECT
6290 +       tristate  'REJECT target support'
6291 +       depends on IP6_NF_FILTER
6292 +         help
6293 +
6294 +config IP6_NF_MATCH_FUZZY
6295 +       tristate  'Fuzzy match support'
6296 +       depends on IP6_NF_FILTER
6297 +         help
6298 +
6299 +config IP6_NF_MATCH_NTH
6300 +       tristate  'Nth match support'
6301 +       depends on IP6_NF_IPTABLES
6302 +         help
6303 +
6304  endmenu
6305  
6306 diff -Nur linux-2.6.4-rc2.org/net/ipv6/netfilter/Makefile linux-2.6.4-rc2/net/ipv6/netfilter/Makefile
6307 --- linux-2.6.4-rc2.org/net/ipv6/netfilter/Makefile     2004-03-04 06:16:48.000000000 +0000
6308 +++ linux-2.6.4-rc2/net/ipv6/netfilter/Makefile 2004-03-05 07:40:13.000000000 +0000
6309 @@ -8,6 +8,7 @@
6310  obj-$(CONFIG_IP6_NF_MATCH_MARK) += ip6t_mark.o
6311  obj-$(CONFIG_IP6_NF_MATCH_LENGTH) += ip6t_length.o
6312  obj-$(CONFIG_IP6_NF_MATCH_MAC) += ip6t_mac.o
6313 +obj-$(CONFIG_IP6_NF_MATCH_FUZZY) += ip6t_fuzzy.o
6314  obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
6315  obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o ip6t_dst.o
6316  obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o
6317 @@ -19,6 +20,10 @@
6318  obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
6319  obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
6320  obj-$(CONFIG_IP6_NF_TARGET_MARK) += ip6t_MARK.o
6321 +obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
6322  obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o
6323  obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
6324 +
6325 +obj-$(CONFIG_IP6_NF_MATCH_NTH) += ip6t_nth.o
6326 +obj-$(CONFIG_IP6_NF_TARGET_HOPLIMIT) += ip6t_HL.o
6327  obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o
6328 diff -Nur linux-2.6.4-rc2.org/net/ipv6/netfilter/ip6t_HL.c linux-2.6.4-rc2/net/ipv6/netfilter/ip6t_HL.c
6329 --- linux-2.6.4-rc2.org/net/ipv6/netfilter/ip6t_HL.c    1970-01-01 00:00:00.000000000 +0000
6330 +++ linux-2.6.4-rc2/net/ipv6/netfilter/ip6t_HL.c        2004-03-05 07:39:53.000000000 +0000
6331 @@ -0,0 +1,105 @@
6332 +/* 
6333 + * Hop Limit modification target for ip6tables
6334 + * Maciej Soltysiak <solt@dns.toxicfilms.tv>
6335 + * Based on HW's TTL module
6336 + *
6337 + * This software is distributed under the terms of GNU GPL
6338 + */
6339 +
6340 +#include <linux/module.h>
6341 +#include <linux/skbuff.h>
6342 +#include <linux/ip.h>
6343 +
6344 +#include <linux/netfilter_ipv6/ip6_tables.h>
6345 +#include <linux/netfilter_ipv6/ip6t_HL.h>
6346 +
6347 +MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>");
6348 +MODULE_DESCRIPTION("IP tables Hop Limit modification module");
6349 +MODULE_LICENSE("GPL");
6350 +
6351 +static unsigned int ip6t_hl_target(struct sk_buff **pskb, unsigned int hooknum,
6352 +               const struct net_device *in, const struct net_device *out,
6353 +               const void *targinfo, void *userinfo)
6354 +{
6355 +       struct ipv6hdr *ip6h = (*pskb)->nh.ipv6h;
6356 +       const struct ip6t_HOPLIMIT_info *info = targinfo;
6357 +       u_int16_t diffs[2];
6358 +       int new_hl;
6359 +                        
6360 +       switch (info->mode) {
6361 +               case IP6T_HOPLIMIT_SET:
6362 +                       new_hl = info->hop_limit;
6363 +                       break;
6364 +               case IP6T_HOPLIMIT_INC:
6365 +                       new_hl = ip6h->hop_limit + info->hop_limit;
6366 +                       if (new_hl > 255)
6367 +                               new_hl = 255;
6368 +                       break;
6369 +               case IP6T_HOPLIMIT_DEC:
6370 +                       new_hl = ip6h->hop_limit + info->hop_limit;
6371 +                       if (new_hl < 0)
6372 +                               new_hl = 0;
6373 +                       break;
6374 +               default:
6375 +                       new_hl = ip6h->hop_limit;
6376 +                       break;
6377 +       }
6378 +
6379 +       if (new_hl != ip6h->hop_limit) {
6380 +               diffs[0] = htons(((unsigned)ip6h->hop_limit) << 8) ^ 0xFFFF;
6381 +               ip6h->hop_limit = new_hl;
6382 +               diffs[1] = htons(((unsigned)ip6h->hop_limit) << 8);
6383 +       }
6384 +
6385 +       return IP6T_CONTINUE;
6386 +}
6387 +
6388 +static int ip6t_hl_checkentry(const char *tablename,
6389 +               const struct ip6t_entry *e,
6390 +               void *targinfo,
6391 +               unsigned int targinfosize,
6392 +               unsigned int hook_mask)
6393 +{
6394 +       struct ip6t_HOPLIMIT_info *info = targinfo;
6395 +
6396 +       if (targinfosize != IP6T_ALIGN(sizeof(struct ip6t_HOPLIMIT_info))) {
6397 +               printk(KERN_WARNING "HOPLIMIT: targinfosize %u != %Zu\n",
6398 +                               targinfosize,
6399 +                               IP6T_ALIGN(sizeof(struct ip6t_HOPLIMIT_info)));
6400 +               return 0;       
6401 +       }       
6402 +
6403 +       if (strcmp(tablename, "mangle")) {
6404 +               printk(KERN_WARNING "HOPLIMIT: can only be called from \"mangle\" table, not \"%s\"\n", tablename);
6405 +               return 0;
6406 +       }
6407 +
6408 +       if (info->mode > IP6T_HOPLIMIT_MAXMODE) {
6409 +               printk(KERN_WARNING "HOPLIMIT: invalid or unknown Mode %u\n", 
6410 +                       info->mode);
6411 +               return 0;
6412 +       }
6413 +
6414 +       if ((info->mode != IP6T_HOPLIMIT_SET) && (info->hop_limit == 0)) {
6415 +               printk(KERN_WARNING "HOPLIMIT: increment/decrement doesn't make sense with value 0\n");
6416 +               return 0;
6417 +       }
6418 +       
6419 +       return 1;
6420 +}
6421 +
6422 +static struct ip6t_target ip6t_HOPLIMIT = { { NULL, NULL }, "HL", 
6423 +       ip6t_hl_target, ip6t_hl_checkentry, NULL, THIS_MODULE };
6424 +
6425 +static int __init init(void)
6426 +{
6427 +       return ip6t_register_target(&ip6t_HOPLIMIT);
6428 +}
6429 +
6430 +static void __exit fini(void)
6431 +{
6432 +       ip6t_unregister_target(&ip6t_HOPLIMIT);
6433 +}
6434 +
6435 +module_init(init);
6436 +module_exit(fini);
6437 diff -Nur linux-2.6.4-rc2.org/net/ipv6/netfilter/ip6t_REJECT.c linux-2.6.4-rc2/net/ipv6/netfilter/ip6t_REJECT.c
6438 --- linux-2.6.4-rc2.org/net/ipv6/netfilter/ip6t_REJECT.c        1970-01-01 00:00:00.000000000 +0000
6439 +++ linux-2.6.4-rc2/net/ipv6/netfilter/ip6t_REJECT.c    2004-03-05 07:39:59.000000000 +0000
6440 @@ -0,0 +1,274 @@
6441 +/*
6442 + * This is a module which is used for rejecting packets.
6443 + *     Added support for customized reject packets (Jozsef Kadlecsik).
6444 + * Sun 12 Nov 2000
6445 + *     Port to IPv6 / ip6tables (Harald Welte <laforge@gnumonks.org>)
6446 + */
6447 +#include <linux/config.h>
6448 +#include <linux/module.h>
6449 +#include <linux/skbuff.h>
6450 +#include <linux/icmpv6.h>
6451 +#include <net/tcp.h>
6452 +#include <linux/netfilter_ipv6/ip6_tables.h>
6453 +#include <linux/netfilter_ipv6/ip6t_REJECT.h>
6454 +
6455 +#if 1
6456 +#define DEBUGP printk
6457 +#else
6458 +#define DEBUGP(format, args...)
6459 +#endif
6460 +
6461 +#if 0
6462 +/* Send RST reply */
6463 +static void send_reset(struct sk_buff *oldskb)
6464 +{
6465 +       struct sk_buff *nskb;
6466 +       struct tcphdr *otcph, *tcph;
6467 +       struct rtable *rt;
6468 +       unsigned int otcplen;
6469 +       int needs_ack;
6470 +
6471 +       /* IP header checks: fragment, too short. */
6472 +       if (oldskb->nh.iph->frag_off & htons(IP_OFFSET)
6473 +           || oldskb->len < (oldskb->nh.iph->ihl<<2) + sizeof(struct tcphdr))
6474 +               return;
6475 +
6476 +       otcph = (struct tcphdr *)((u_int32_t*)oldskb->nh.iph + oldskb->nh.iph->ihl);
6477 +       otcplen = oldskb->len - oldskb->nh.iph->ihl*4;
6478 +
6479 +       /* No RST for RST. */
6480 +       if (otcph->rst)
6481 +               return;
6482 +
6483 +       /* Check checksum. */
6484 +       if (tcp_v4_check(otcph, otcplen, oldskb->nh.iph->saddr,
6485 +                        oldskb->nh.iph->daddr,
6486 +                        csum_partial((char *)otcph, otcplen, 0)) != 0)
6487 +               return;
6488 +
6489 +       /* Copy skb (even if skb is about to be dropped, we can't just
6490 +           clone it because there may be other things, such as tcpdump,
6491 +           interested in it) */
6492 +       nskb = skb_copy(oldskb, GFP_ATOMIC);
6493 +       if (!nskb)
6494 +               return;
6495 +
6496 +       /* This packet will not be the same as the other: clear nf fields */
6497 +       nf_conntrack_put(nskb->nfct);
6498 +       nskb->nfct = NULL;
6499 +       nskb->nfcache = 0;
6500 +#ifdef CONFIG_NETFILTER_DEBUG
6501 +       nskb->nf_debug = 0;
6502 +#endif
6503 +
6504 +       tcph = (struct tcphdr *)((u_int32_t*)nskb->nh.iph + nskb->nh.iph->ihl);
6505 +
6506 +       nskb->nh.iph->daddr = xchg(&nskb->nh.iph->saddr, nskb->nh.iph->daddr);
6507 +       tcph->source = xchg(&tcph->dest, tcph->source);
6508 +
6509 +       /* Truncate to length (no data) */
6510 +       tcph->doff = sizeof(struct tcphdr)/4;
6511 +       skb_trim(nskb, nskb->nh.iph->ihl*4 + sizeof(struct tcphdr));
6512 +       nskb->nh.iph->tot_len = htons(nskb->len);
6513 +
6514 +       if (tcph->ack) {
6515 +               needs_ack = 0;
6516 +               tcph->seq = otcph->ack_seq;
6517 +               tcph->ack_seq = 0;
6518 +       } else {
6519 +               needs_ack = 1;
6520 +               tcph->ack_seq = htonl(ntohl(otcph->seq) + otcph->syn + otcph->fin
6521 +                                     + otcplen - (otcph->doff<<2));
6522 +               tcph->seq = 0;
6523 +       }
6524 +
6525 +       /* Reset flags */
6526 +       ((u_int8_t *)tcph)[13] = 0;
6527 +       tcph->rst = 1;
6528 +       tcph->ack = needs_ack;
6529 +
6530 +       tcph->window = 0;
6531 +       tcph->urg_ptr = 0;
6532 +
6533 +       /* Adjust TCP checksum */
6534 +       tcph->check = 0;
6535 +       tcph->check = tcp_v4_check(tcph, sizeof(struct tcphdr),
6536 +                                  nskb->nh.iph->saddr,
6537 +                                  nskb->nh.iph->daddr,
6538 +                                  csum_partial((char *)tcph,
6539 +                                               sizeof(struct tcphdr), 0));
6540 +
6541 +       /* Adjust IP TTL, DF */
6542 +       nskb->nh.iph->ttl = MAXTTL;
6543 +       /* Set DF, id = 0 */
6544 +       nskb->nh.iph->frag_off = htons(IP_DF);
6545 +       nskb->nh.iph->id = 0;
6546 +
6547 +       /* Adjust IP checksum */
6548 +       nskb->nh.iph->check = 0;
6549 +       nskb->nh.iph->check = ip_fast_csum((unsigned char *)nskb->nh.iph, 
6550 +                                          nskb->nh.iph->ihl);
6551 +
6552 +       /* Routing */
6553 +       if (ip_route_output(&rt, nskb->nh.iph->daddr, nskb->nh.iph->saddr,
6554 +                           RT_TOS(nskb->nh.iph->tos) | RTO_CONN,
6555 +                           0) != 0)
6556 +               goto free_nskb;
6557 +
6558 +       dst_release(nskb->dst);
6559 +       nskb->dst = &rt->u.dst;
6560 +
6561 +       /* "Never happens" */
6562 +       if (nskb->len > nskb->dst->pmtu)
6563 +               goto free_nskb;
6564 +
6565 +       NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, nskb, NULL, nskb->dst->dev,
6566 +               ip_finish_output);
6567 +       return;
6568 +
6569 + free_nskb:
6570 +       kfree_skb(nskb);
6571 +}
6572 +#endif
6573 +
6574 +static unsigned int reject6_target(struct sk_buff **pskb,
6575 +                          unsigned int hooknum,
6576 +                          const struct net_device *in,
6577 +                          const struct net_device *out,
6578 +                          const void *targinfo,
6579 +                          void *userinfo)
6580 +{
6581 +       const struct ip6t_reject_info *reject = targinfo;
6582 +
6583 +       /* WARNING: This code causes reentry within ip6tables.
6584 +          This means that the ip6tables jump stack is now crap.  We
6585 +          must return an absolute verdict. --RR */
6586 +       DEBUGP("REJECTv6: calling icmpv6_send\n");
6587 +       switch (reject->with) {
6588 +       case IP6T_ICMP6_NO_ROUTE:
6589 +               icmpv6_send(*pskb, ICMPV6_DEST_UNREACH, ICMPV6_NOROUTE, 0, out);
6590 +               break;
6591 +       case IP6T_ICMP6_ADM_PROHIBITED:
6592 +               icmpv6_send(*pskb, ICMPV6_DEST_UNREACH, ICMPV6_ADM_PROHIBITED, 0, out);
6593 +               break;
6594 +       case IP6T_ICMP6_NOT_NEIGHBOUR:
6595 +               icmpv6_send(*pskb, ICMPV6_DEST_UNREACH, ICMPV6_NOT_NEIGHBOUR, 0, out);
6596 +               break;
6597 +       case IP6T_ICMP6_ADDR_UNREACH:
6598 +               icmpv6_send(*pskb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, out);
6599 +               break;
6600 +       case IP6T_ICMP6_PORT_UNREACH:
6601 +               icmpv6_send(*pskb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, out);
6602 +               break;
6603 +#if 0
6604 +       case IPT_ICMP_ECHOREPLY: {
6605 +               struct icmp6hdr *icmph  = (struct icmphdr *)
6606 +                       ((u_int32_t *)(*pskb)->nh.iph + (*pskb)->nh.iph->ihl);
6607 +               unsigned int datalen = (*pskb)->len - (*pskb)->nh.iph->ihl * 4;
6608 +
6609 +               /* Not non-head frags, or truncated */
6610 +               if (((ntohs((*pskb)->nh.iph->frag_off) & IP_OFFSET) == 0)
6611 +                   && datalen >= 4) {
6612 +                       /* Usually I don't like cut & pasting code,
6613 +                           but dammit, my party is starting in 45
6614 +                           mins! --RR */
6615 +                       struct icmp_bxm icmp_param;
6616 +
6617 +                       icmp_param.icmph=*icmph;
6618 +                       icmp_param.icmph.type=ICMP_ECHOREPLY;
6619 +                       icmp_param.data_ptr=(icmph+1);
6620 +                       icmp_param.data_len=datalen;
6621 +                       icmp_reply(&icmp_param, *pskb);
6622 +               }
6623 +       }
6624 +       break;
6625 +       case IPT_TCP_RESET:
6626 +               send_reset(*pskb);
6627 +               break;
6628 +#endif
6629 +       default:
6630 +               printk(KERN_WARNING "REJECTv6: case %u not handled yet\n", reject->with);
6631 +               break;
6632 +       }
6633 +
6634 +       return NF_DROP;
6635 +}
6636 +
6637 +static inline int find_ping_match(const struct ip6t_entry_match *m)
6638 +{
6639 +       const struct ip6t_icmp *icmpinfo = (const struct ip6t_icmp *)m->data;
6640 +
6641 +       if (strcmp(m->u.kernel.match->name, "icmp6") == 0
6642 +           && icmpinfo->type == ICMPV6_ECHO_REQUEST
6643 +           && !(icmpinfo->invflags & IP6T_ICMP_INV))
6644 +               return 1;
6645 +
6646 +       return 0;
6647 +}
6648 +
6649 +static int check(const char *tablename,
6650 +                const struct ip6t_entry *e,
6651 +                void *targinfo,
6652 +                unsigned int targinfosize,
6653 +                unsigned int hook_mask)
6654 +{
6655 +       const struct ip6t_reject_info *rejinfo = targinfo;
6656 +
6657 +       if (targinfosize != IP6T_ALIGN(sizeof(struct ip6t_reject_info))) {
6658 +               DEBUGP("REJECTv6: targinfosize %u != 0\n", targinfosize);
6659 +               return 0;
6660 +       }
6661 +
6662 +       /* Only allow these for packet filtering. */
6663 +       if (strcmp(tablename, "filter") != 0) {
6664 +               DEBUGP("REJECTv6: bad table `%s'.\n", tablename);
6665 +               return 0;
6666 +       }
6667 +       if ((hook_mask & ~((1 << NF_IP6_LOCAL_IN)
6668 +                          | (1 << NF_IP6_FORWARD)
6669 +                          | (1 << NF_IP6_LOCAL_OUT))) != 0) {
6670 +               DEBUGP("REJECTv6: bad hook mask %X\n", hook_mask);
6671 +               return 0;
6672 +       }
6673 +
6674 +       if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) {
6675 +               /* Must specify that it's an ICMP ping packet. */
6676 +               if (e->ipv6.proto != IPPROTO_ICMPV6
6677 +                   || (e->ipv6.invflags & IP6T_INV_PROTO)) {
6678 +                       DEBUGP("REJECTv6: ECHOREPLY illegal for non-icmp\n");
6679 +                       return 0;
6680 +               }
6681 +               /* Must contain ICMP match. */
6682 +               if (IP6T_MATCH_ITERATE(e, find_ping_match) == 0) {
6683 +                       DEBUGP("REJECTv6: ECHOREPLY illegal for non-ping\n");
6684 +                       return 0;
6685 +               }
6686 +       } else if (rejinfo->with == IP6T_TCP_RESET) {
6687 +               /* Must specify that it's a TCP packet */
6688 +               if (e->ipv6.proto != IPPROTO_TCP
6689 +                   || (e->ipv6.invflags & IP6T_INV_PROTO)) {
6690 +                       DEBUGP("REJECTv6: TCP_RESET illegal for non-tcp\n");
6691 +                       return 0;
6692 +               }
6693 +       }
6694 +
6695 +       return 1;
6696 +}
6697 +
6698 +static struct ip6t_target ip6t_reject_reg
6699 += { { NULL, NULL }, "REJECT", reject6_target, check, NULL, THIS_MODULE };
6700 +
6701 +static int __init init(void)
6702 +{
6703 +       if (ip6t_register_target(&ip6t_reject_reg))
6704 +               return -EINVAL;
6705 +       return 0;
6706 +}
6707 +
6708 +static void __exit fini(void)
6709 +{
6710 +       ip6t_unregister_target(&ip6t_reject_reg);
6711 +}
6712 +
6713 +module_init(init);
6714 +module_exit(fini);
6715 diff -Nur linux-2.6.4-rc2.org/net/ipv6/netfilter/ip6t_fuzzy.c linux-2.6.4-rc2/net/ipv6/netfilter/ip6t_fuzzy.c
6716 --- linux-2.6.4-rc2.org/net/ipv6/netfilter/ip6t_fuzzy.c 1970-01-01 00:00:00.000000000 +0000
6717 +++ linux-2.6.4-rc2/net/ipv6/netfilter/ip6t_fuzzy.c     2004-03-05 07:40:08.000000000 +0000
6718 @@ -0,0 +1,189 @@
6719 +/*
6720 + * This module implements a simple TSK FLC
6721 + * (Takagi-Sugeno-Kang Fuzzy Logic Controller) that aims
6722 + * to limit , in an adaptive and flexible way , the packet rate crossing
6723 + * a given stream . It serves as an initial and very simple (but effective)
6724 + * example of how Fuzzy Logic techniques can be applied to defeat DoS attacks.
6725 + *  As a matter of fact , Fuzzy Logic can help us to insert any "behavior"
6726 + * into our code in a precise , adaptive and efficient manner.
6727 + *  The goal is very similar to that of "limit" match , but using techniques of
6728 + * Fuzzy Control , that allow us to shape the transfer functions precisely ,
6729 + * avoiding over and undershoots - and stuff like that .
6730 + *
6731 + *
6732 + * 2002-08-10  Hime Aguiar e Oliveira Jr. <hime@engineer.com> : Initial version.
6733 + * 2002-08-17  : Changed to eliminate floating point operations .
6734 + * 2002-08-23  : Coding style changes .
6735 + * 2003-04-08  Maciej Soltysiak <solt@dns.toxicilms.tv> : IPv6 Port
6736 + */
6737 +
6738 +#include <linux/module.h>
6739 +#include <linux/skbuff.h>
6740 +#include <linux/ipv6.h>
6741 +#include <linux/random.h>
6742 +#include <net/tcp.h>
6743 +#include <linux/spinlock.h>
6744 +#include <linux/netfilter_ipv6/ip6_tables.h>
6745 +#include <linux/netfilter_ipv6/ip6t_fuzzy.h>
6746 +
6747 +/*
6748 + Packet Acceptance Rate - LOW and Packet Acceptance Rate - HIGH
6749 + Expressed in percentage
6750 +*/
6751 +
6752 +#define PAR_LOW                1/100
6753 +#define PAR_HIGH       1
6754 +
6755 +static spinlock_t fuzzy_lock = SPIN_LOCK_UNLOCKED;
6756 +
6757 +MODULE_AUTHOR("Hime Aguiar e Oliveira Junior <hime@engineer.com>");
6758 +MODULE_DESCRIPTION("IP tables Fuzzy Logic Controller match module");
6759 +MODULE_LICENSE("GPL");
6760 +
6761 +static  u_int8_t mf_high(u_int32_t tx,u_int32_t mini,u_int32_t maxi)
6762 +{
6763 +       if (tx >= maxi) return 100;
6764 +
6765 +       if (tx <= mini) return 0;
6766 +
6767 +       return ((100 * (tx-mini)) / (maxi-mini));
6768 +}
6769 +
6770 +static u_int8_t mf_low(u_int32_t tx,u_int32_t mini,u_int32_t maxi)
6771 +{
6772 +       if (tx <= mini) return 100;
6773 +
6774 +       if (tx >= maxi) return 0;
6775 +
6776 +       return ((100 * (maxi - tx)) / (maxi - mini));
6777 +
6778 +}
6779 +
6780 +static int
6781 +ip6t_fuzzy_match(const struct sk_buff *pskb,
6782 +              const struct net_device *in,
6783 +              const struct net_device *out,
6784 +              const void *matchinfo,
6785 +              int offset,
6786 +              const void *hdr,
6787 +              u_int16_t datalen,
6788 +              int *hotdrop)
6789 +{
6790 +       /* From userspace */
6791 +
6792 +       struct ip6t_fuzzy_info *info = (struct ip6t_fuzzy_info *) matchinfo;
6793 +
6794 +       u_int8_t random_number;
6795 +       unsigned long amount;
6796 +       u_int8_t howhigh, howlow;
6797 +
6798 +
6799 +       spin_lock_bh(&fuzzy_lock); /* Rise the lock */
6800 +
6801 +       info->bytes_total += pskb->len;
6802 +       info->packets_total++;
6803 +
6804 +       info->present_time = jiffies;
6805 +
6806 +       if (info->present_time >= info->previous_time)
6807 +               amount = info->present_time - info->previous_time;
6808 +       else {
6809 +               /* There was a transition : I choose to re-sample
6810 +                  and keep the old acceptance rate...
6811 +               */
6812 +
6813 +               amount = 0;
6814 +               info->previous_time = info->present_time;
6815 +               info->bytes_total = info->packets_total = 0;
6816 +            };
6817 +
6818 +       if ( amount > HZ/10) {/* More than 100 ms elapsed ... */
6819 +
6820 +               info->mean_rate = (u_int32_t) ((HZ * info->packets_total) \
6821 +                                       / amount);
6822 +
6823 +               info->previous_time = info->present_time;
6824 +               info->bytes_total = info->packets_total = 0;
6825 +
6826 +               howhigh = mf_high(info->mean_rate,info->minimum_rate,info->maximum_rate);
6827 +               howlow  = mf_low(info->mean_rate,info->minimum_rate,info->maximum_rate);
6828 +
6829 +               info->acceptance_rate = (u_int8_t) \
6830 +                               (howhigh * PAR_LOW + PAR_HIGH * howlow);
6831 +
6832 +       /* In fact, the above defuzzification would require a denominator
6833 +        * proportional to (howhigh+howlow) but, in this particular case,
6834 +        * that expression is constant.
6835 +        * An imediate consequence is that it is not necessary to call
6836 +        * both mf_high and mf_low - but to keep things understandable,
6837 +        * I did so.
6838 +        */
6839 +
6840 +       }
6841 +
6842 +       spin_unlock_bh(&fuzzy_lock); /* Release the lock */
6843 +
6844 +
6845 +       if (info->acceptance_rate < 100)
6846 +       {
6847 +               get_random_bytes((void *)(&random_number), 1);
6848 +
6849 +               /*  If within the acceptance , it can pass => don't match */
6850 +               if (random_number <= (255 * info->acceptance_rate) / 100)
6851 +                       return 0;
6852 +               else
6853 +                       return 1; /* It can't pass (It matches) */
6854 +       };
6855 +
6856 +       return 0; /* acceptance_rate == 100 % => Everything passes ... */
6857 +
6858 +}
6859 +
6860 +static int
6861 +ip6t_fuzzy_checkentry(const char *tablename,
6862 +                  const struct ip6t_ip6 *ip,
6863 +                  void *matchinfo,
6864 +                  unsigned int matchsize,
6865 +                  unsigned int hook_mask)
6866 +{
6867 +
6868 +       const struct ip6t_fuzzy_info *info = matchinfo;
6869 +
6870 +       if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_fuzzy_info))) {
6871 +               printk("ip6t_fuzzy: matchsize %u != %u\n", matchsize,
6872 +                      IP6T_ALIGN(sizeof(struct ip6t_fuzzy_info)));
6873 +               return 0;
6874 +       }
6875 +
6876 +       if ((info->minimum_rate < MINFUZZYRATE) || (info->maximum_rate > MAXFUZZYRATE)
6877 +        || (info->minimum_rate >= info->maximum_rate)) {
6878 +               printk("ip6t_fuzzy: BAD limits , please verify !!!\n");
6879 +               return 0;
6880 +       }
6881 +
6882 +       return 1;
6883 +}
6884 +
6885 +static struct ip6t_match ip6t_fuzzy_reg = {
6886 +       {NULL, NULL},
6887 +       "fuzzy",
6888 +       ip6t_fuzzy_match,
6889 +       ip6t_fuzzy_checkentry,
6890 +       NULL,
6891 +       THIS_MODULE };
6892 +
6893 +static int __init init(void)
6894 +{
6895 +       if (ip6t_register_match(&ip6t_fuzzy_reg))
6896 +               return -EINVAL;
6897 +
6898 +       return 0;
6899 +}
6900 +
6901 +static void __exit fini(void)
6902 +{
6903 +       ip6t_unregister_match(&ip6t_fuzzy_reg);
6904 +}
6905 +
6906 +module_init(init);
6907 +module_exit(fini);
6908 diff -Nur linux-2.6.4-rc2.org/net/ipv6/netfilter/ip6t_nth.c linux-2.6.4-rc2/net/ipv6/netfilter/ip6t_nth.c
6909 --- linux-2.6.4-rc2.org/net/ipv6/netfilter/ip6t_nth.c   1970-01-01 00:00:00.000000000 +0000
6910 +++ linux-2.6.4-rc2/net/ipv6/netfilter/ip6t_nth.c       2004-03-05 07:40:13.000000000 +0000
6911 @@ -0,0 +1,173 @@
6912 +/*
6913 +  This is a module which is used for match support for every Nth packet
6914 +  This file is distributed under the terms of the GNU General Public
6915 +  License (GPL). Copies of the GPL can be obtained from:
6916 +     ftp://prep.ai.mit.edu/pub/gnu/GPL
6917 +
6918 +  2001-07-18 Fabrice MARIE <fabrice@netfilter.org> : initial implementation.
6919 +  2001-09-20 Richard Wagner (rwagner@cloudnet.com)
6920 +        * added support for multiple counters
6921 +        * added support for matching on individual packets
6922 +          in the counter cycle
6923 +  2003-04-30 Maciej Soltysiak <solt@dns.toxicfilms.tv> : IPv6 Port
6924 +
6925 +*/
6926 +
6927 +#include <linux/module.h>
6928 +#include <linux/skbuff.h>
6929 +#include <linux/ip.h>
6930 +#include <net/tcp.h>
6931 +#include <linux/spinlock.h>
6932 +#include <linux/netfilter_ipv6/ip6_tables.h>
6933 +#include <linux/netfilter_ipv6/ip6t_nth.h>
6934 +
6935 +MODULE_LICENSE("GPL");
6936 +
6937 +/*
6938 + * State information.
6939 + */
6940 +struct state {
6941 +       spinlock_t lock;
6942 +       u_int16_t number;
6943 +};
6944 +
6945 +static struct state states[IP6T_NTH_NUM_COUNTERS];
6946 +
6947 +static int
6948 +ip6t_nth_match(const struct sk_buff *pskb,
6949 +             const struct net_device *in,
6950 +             const struct net_device *out,
6951 +             const void *matchinfo,
6952 +             int offset,
6953 +             const void *hdr,
6954 +             u_int16_t datalen,
6955 +             int *hotdrop)
6956 +{
6957 +       /* Parameters from userspace */
6958 +       const struct ip6t_nth_info *info = matchinfo;
6959 +        unsigned counter = info->counter;
6960 +               if((counter < 0) || (counter >= IP6T_NTH_NUM_COUNTERS)) 
6961 +       {
6962 +                       printk(KERN_WARNING "nth: invalid counter %u. counter between 0 and %u\n", counter, IP6T_NTH_NUM_COUNTERS-1);
6963 +               return 0;
6964 +        };
6965 +
6966 +        spin_lock(&states[counter].lock);
6967 +
6968 +        /* Are we matching every nth packet?*/
6969 +        if (info->packet == 0xFF)
6970 +        {
6971 +               /* We're matching every nth packet and only every nth packet*/
6972 +               /* Do we match or invert match? */
6973 +               if (info->not == 0)
6974 +               {
6975 +                       if (states[counter].number == 0)
6976 +                       {
6977 +                               ++states[counter].number;
6978 +                               goto match;
6979 +                       }
6980 +                       if (states[counter].number >= info->every)
6981 +                               states[counter].number = 0; /* reset the counter */
6982 +                       else
6983 +                               ++states[counter].number;
6984 +                       goto dontmatch;
6985 +               }
6986 +               else
6987 +               {
6988 +                       if (states[counter].number == 0)
6989 +                       {
6990 +                               ++states[counter].number;
6991 +                               goto dontmatch;
6992 +                       }
6993 +                       if (states[counter].number >= info->every)
6994 +                               states[counter].number = 0;
6995 +                       else
6996 +                               ++states[counter].number;
6997 +                       goto match;
6998 +               }
6999 +        }
7000 +        else
7001 +        {
7002 +               /* We're using the --packet, so there must be a rule for every value */
7003 +               if (states[counter].number == info->packet)
7004 +               {
7005 +                       /* only increment the counter when a match happens */
7006 +                       if (states[counter].number >= info->every)
7007 +                               states[counter].number = 0; /* reset the counter */
7008 +                       else
7009 +                               ++states[counter].number;
7010 +                       goto match;
7011 +               }
7012 +               else
7013 +                       goto dontmatch;
7014 +       }
7015 +
7016 + dontmatch:
7017 +       /* don't match */
7018 +       spin_unlock(&states[counter].lock);
7019 +       return 0;
7020 +
7021 + match:
7022 +       spin_unlock(&states[counter].lock);
7023 +       return 1;
7024 +}
7025 +
7026 +static int
7027 +ip6t_nth_checkentry(const char *tablename,
7028 +                  const struct ip6t_ip6 *e,
7029 +                  void *matchinfo,
7030 +                  unsigned int matchsize,
7031 +                  unsigned int hook_mask)
7032 +{
7033 +       /* Parameters from userspace */
7034 +       const struct ip6t_nth_info *info = matchinfo;
7035 +        unsigned counter = info->counter;
7036 +        if((counter < 0) || (counter >= IP6T_NTH_NUM_COUNTERS)) 
7037 +       {
7038 +               printk(KERN_WARNING "nth: invalid counter %u. counter between 0 and %u\n", counter, IP6T_NTH_NUM_COUNTERS-1);
7039 +                       return 0;
7040 +               };
7041 +
7042 +       if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_nth_info))) {
7043 +               printk("nth: matchsize %u != %u\n", matchsize,
7044 +                      IP6T_ALIGN(sizeof(struct ip6t_nth_info)));
7045 +               return 0;
7046 +       }
7047 +
7048 +       states[counter].number = info->startat;
7049 +
7050 +       return 1;
7051 +}
7052 +
7053 +static struct ip6t_match ip6t_nth_reg = { 
7054 +       {NULL, NULL},
7055 +       "nth",
7056 +       ip6t_nth_match,
7057 +       ip6t_nth_checkentry,
7058 +       NULL,
7059 +       THIS_MODULE };
7060 +
7061 +static int __init init(void)
7062 +{
7063 +       unsigned counter;
7064 +        memset(&states, 0, sizeof(states));
7065 +       if (ip6t_register_match(&ip6t_nth_reg))
7066 +               return -EINVAL;
7067 +
7068 +        for(counter = 0; counter < IP6T_NTH_NUM_COUNTERS; counter++) 
7069 +       {
7070 +               spin_lock_init(&(states[counter].lock));
7071 +        };
7072 +
7073 +       printk("ip6t_nth match loaded\n");
7074 +       return 0;
7075 +}
7076 +
7077 +static void __exit fini(void)
7078 +{
7079 +       ip6t_unregister_match(&ip6t_nth_reg);
7080 +       printk("ip6t_nth match unloaded\n");
7081 +}
7082 +
7083 +module_init(init);
7084 +module_exit(fini);
This page took 0.624752 seconds and 3 git commands to generate.