--- /dev/null
+diff -Nur linux-2.6.3.org/include/linux/netfilter.h linux-2.6.3/include/linux/netfilter.h
+--- linux-2.6.3.org/include/linux/netfilter.h 2004-02-18 04:57:59.000000000 +0100
++++ linux-2.6.3/include/linux/netfilter.h 2004-02-27 00:03:00.000228144 +0100
+@@ -99,6 +99,24 @@
+
+ extern struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS];
+
++typedef void nf_logfn(unsigned int hooknum,
++ const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const char *prefix);
++
++/* Function to register/unregister log function. */
++int nf_log_register(int pf, nf_logfn *logfn);
++void nf_log_unregister(int pf, nf_logfn *logfn);
++
++/* Calls the registered backend logging function */
++void nf_log_packet(int pf,
++ unsigned int hooknum,
++ const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const char *fmt, ...);
++
+ /* Activate hook; either okfn or kfree_skb called, unless a hook
+ returns NF_STOLEN (in which case, it's up to the hook to deal with
+ the consequences).
+diff -Nur linux-2.6.3.org/include/linux/netfilter_ipv4/ip_conntrack.h linux-2.6.3/include/linux/netfilter_ipv4/ip_conntrack.h
+--- linux-2.6.3.org/include/linux/netfilter_ipv4/ip_conntrack.h 2004-02-18 04:59:30.000000000 +0100
++++ linux-2.6.3/include/linux/netfilter_ipv4/ip_conntrack.h 2004-02-27 00:03:14.480026880 +0100
+@@ -251,6 +251,9 @@
+ /* Call me when a conntrack is destroyed. */
+ extern void (*ip_conntrack_destroyed)(struct ip_conntrack *conntrack);
+
++/* Fake conntrack entry for untracked connections */
++extern struct ip_conntrack ip_conntrack_untracked;
++
+ /* Returns new sk_buff, or NULL */
+ struct sk_buff *
+ ip_ct_gather_frags(struct sk_buff *skb);
+diff -Nur linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_connlimit.h linux-2.6.3/include/linux/netfilter_ipv4/ipt_connlimit.h
+--- linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_connlimit.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/include/linux/netfilter_ipv4/ipt_connlimit.h 2004-02-27 00:03:07.981014880 +0100
+@@ -0,0 +1,12 @@
++#ifndef _IPT_CONNLIMIT_H
++#define _IPT_CONNLIMIT_H
++
++struct ipt_connlimit_data;
++
++struct ipt_connlimit_info {
++ int limit;
++ int inverse;
++ u_int32_t mask;
++ struct ipt_connlimit_data *data;
++};
++#endif /* _IPT_CONNLIMIT_H */
+diff -Nur linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_conntrack.h linux-2.6.3/include/linux/netfilter_ipv4/ipt_conntrack.h
+--- linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_conntrack.h 2004-02-18 04:59:05.000000000 +0100
++++ linux-2.6.3/include/linux/netfilter_ipv4/ipt_conntrack.h 2004-02-27 00:03:14.480026880 +0100
+@@ -10,6 +10,7 @@
+
+ #define IPT_CONNTRACK_STATE_SNAT (1 << (IP_CT_NUMBER + 1))
+ #define IPT_CONNTRACK_STATE_DNAT (1 << (IP_CT_NUMBER + 2))
++#define IPT_CONNTRACK_STATE_UNTRACKED (1 << (IP_CT_NUMBER + 3))
+
+ /* flags, invflags: */
+ #define IPT_CONNTRACK_STATE 0x01
+diff -Nur linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_dstlimit.h linux-2.6.3/include/linux/netfilter_ipv4/ipt_dstlimit.h
+--- linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_dstlimit.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/include/linux/netfilter_ipv4/ipt_dstlimit.h 2004-02-27 00:03:08.651912888 +0100
+@@ -0,0 +1,39 @@
++#ifndef _IPT_DSTLIMIT_H
++#define _IPT_DSTLIMIT_H
++
++/* timings are in milliseconds. */
++#define IPT_DSTLIMIT_SCALE 10000
++/* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490
++ seconds, or one every 59 hours. */
++
++/* details of this structure hidden by the implementation */
++struct ipt_dstlimit_htable;
++
++#define IPT_DSTLIMIT_HASH_DIP 0x0001
++#define IPT_DSTLIMIT_HASH_DPT 0x0002
++#define IPT_DSTLIMIT_HASH_SIP 0x0004
++
++struct dstlimit_cfg {
++ u_int32_t mode; /* bitmask of IPT_DSTLIMIT_HASH_* */
++ u_int32_t avg; /* Average secs between packets * scale */
++ u_int32_t burst; /* Period multiplier for upper limit. */
++
++ /* user specified */
++ u_int32_t size; /* how many buckets */
++ u_int32_t max; /* max number of entries */
++ u_int32_t gc_interval; /* gc interval */
++ u_int32_t expire; /* when do entries expire? */
++};
++
++struct ipt_dstlimit_info {
++ char name [IFNAMSIZ]; /* name */
++ struct dstlimit_cfg cfg;
++ struct ipt_dstlimit_htable *hinfo;
++
++ /* Used internally by the kernel */
++ union {
++ void *ptr;
++ struct ipt_dstlimit_info *master;
++ } u;
++};
++#endif /*_IPT_DSTLIMIT_H*/
+diff -Nur linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_fuzzy.h linux-2.6.3/include/linux/netfilter_ipv4/ipt_fuzzy.h
+--- linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_fuzzy.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/include/linux/netfilter_ipv4/ipt_fuzzy.h 2004-02-27 00:03:09.359805272 +0100
+@@ -0,0 +1,21 @@
++#ifndef _IPT_FUZZY_H
++#define _IPT_FUZZY_H
++
++#include <linux/param.h>
++#include <linux/types.h>
++
++#define MAXFUZZYRATE 10000000
++#define MINFUZZYRATE 3
++
++struct ipt_fuzzy_info {
++ u_int32_t minimum_rate;
++ u_int32_t maximum_rate;
++ u_int32_t packets_total;
++ u_int32_t bytes_total;
++ u_int32_t previous_time;
++ u_int32_t present_time;
++ u_int32_t mean_rate;
++ u_int8_t acceptance_rate;
++};
++
++#endif /*_IPT_FUZZY_H*/
+diff -Nur linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_ipv4options.h linux-2.6.3/include/linux/netfilter_ipv4/ipt_ipv4options.h
+--- linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_ipv4options.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/include/linux/netfilter_ipv4/ipt_ipv4options.h 2004-02-27 00:03:10.065697960 +0100
+@@ -0,0 +1,21 @@
++#ifndef __ipt_ipv4options_h_included__
++#define __ipt_ipv4options_h_included__
++
++#define IPT_IPV4OPTION_MATCH_SSRR 0x01 /* For strict source routing */
++#define IPT_IPV4OPTION_MATCH_LSRR 0x02 /* For loose source routing */
++#define IPT_IPV4OPTION_DONT_MATCH_SRR 0x04 /* any source routing */
++#define IPT_IPV4OPTION_MATCH_RR 0x08 /* For Record route */
++#define IPT_IPV4OPTION_DONT_MATCH_RR 0x10
++#define IPT_IPV4OPTION_MATCH_TIMESTAMP 0x20 /* For timestamp request */
++#define IPT_IPV4OPTION_DONT_MATCH_TIMESTAMP 0x40
++#define IPT_IPV4OPTION_MATCH_ROUTER_ALERT 0x80 /* For router-alert */
++#define IPT_IPV4OPTION_DONT_MATCH_ROUTER_ALERT 0x100
++#define IPT_IPV4OPTION_MATCH_ANY_OPT 0x200 /* match packet with any option */
++#define IPT_IPV4OPTION_DONT_MATCH_ANY_OPT 0x400 /* match packet with no option */
++
++struct ipt_ipv4options_info {
++ u_int16_t options;
++};
++
++
++#endif /* __ipt_ipv4options_h_included__ */
+diff -Nur linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_mport.h linux-2.6.3/include/linux/netfilter_ipv4/ipt_mport.h
+--- linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_mport.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/include/linux/netfilter_ipv4/ipt_mport.h 2004-02-27 00:03:10.772590496 +0100
+@@ -0,0 +1,24 @@
++#ifndef _IPT_MPORT_H
++#define _IPT_MPORT_H
++#include <linux/netfilter_ipv4/ip_tables.h>
++
++#define IPT_MPORT_SOURCE (1<<0)
++#define IPT_MPORT_DESTINATION (1<<1)
++#define IPT_MPORT_EITHER (IPT_MPORT_SOURCE|IPT_MPORT_DESTINATION)
++
++#define IPT_MULTI_PORTS 15
++
++/* Must fit inside union ipt_matchinfo: 32 bytes */
++/* every entry in ports[] except for the last one has one bit in pflags
++ * associated with it. If this bit is set, the port is the first port of
++ * a portrange, with the next entry being the last.
++ * End of list is marked with pflags bit set and port=65535.
++ * If 14 ports are used (last one does not have a pflag), the last port
++ * is repeated to fill the last entry in ports[] */
++struct ipt_mport
++{
++ u_int8_t flags:2; /* Type of comparison */
++ u_int16_t pflags:14; /* Port flags */
++ u_int16_t ports[IPT_MULTI_PORTS]; /* Ports */
++};
++#endif /*_IPT_MPORT_H*/
+diff -Nur linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_nth.h linux-2.6.3/include/linux/netfilter_ipv4/ipt_nth.h
+--- linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_nth.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/include/linux/netfilter_ipv4/ipt_nth.h 2004-02-27 00:03:12.719294552 +0100
+@@ -0,0 +1,19 @@
++#ifndef _IPT_NTH_H
++#define _IPT_NTH_H
++
++#include <linux/param.h>
++#include <linux/types.h>
++
++#ifndef IPT_NTH_NUM_COUNTERS
++#define IPT_NTH_NUM_COUNTERS 16
++#endif
++
++struct ipt_nth_info {
++ u_int8_t every;
++ u_int8_t not;
++ u_int8_t startat;
++ u_int8_t counter;
++ u_int8_t packet;
++};
++
++#endif /*_IPT_NTH_H*/
+diff -Nur linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_quota.h linux-2.6.3/include/linux/netfilter_ipv4/ipt_quota.h
+--- linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_quota.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/include/linux/netfilter_ipv4/ipt_quota.h 2004-02-27 00:03:13.672149696 +0100
+@@ -0,0 +1,11 @@
++#ifndef _IPT_QUOTA_H
++#define _IPT_QUOTA_H
++
++/* print debug info in both kernel/netfilter module & iptable library */
++//#define DEBUG_IPT_QUOTA
++
++struct ipt_quota_info {
++ u_int64_t quota;
++};
++
++#endif /*_IPT_QUOTA_H*/
+diff -Nur linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_realm.h linux-2.6.3/include/linux/netfilter_ipv4/ipt_realm.h
+--- linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_realm.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/include/linux/netfilter_ipv4/ipt_realm.h 2004-02-27 00:03:15.261908016 +0100
+@@ -0,0 +1,9 @@
++#ifndef _IPT_REALM_H
++#define _IPT_REALM_H
++
++struct ipt_realm_info {
++ u_int32_t id;
++ u_int32_t mask;
++ u_int8_t invert;
++};
++#endif /*_IPT_REALM_H*/
+diff -Nur linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_sctp.h linux-2.6.3/include/linux/netfilter_ipv4/ipt_sctp.h
+--- linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_sctp.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/include/linux/netfilter_ipv4/ipt_sctp.h 2004-02-27 00:03:16.145773648 +0100
+@@ -0,0 +1,96 @@
++#ifndef _IPT_SCTP_H_
++#define _IPT_SCTP_H_
++
++#define IPT_SCTP_SRC_PORTS 0x01
++#define IPT_SCTP_DEST_PORTS 0x02
++#define IPT_SCTP_CHUNK_TYPES 0x04
++
++#define IPT_SCTP_VALID_FLAGS 0x07
++
++#define ELEMCOUNT(x) (sizeof(x)/sizeof(x[0]))
++
++struct ipt_sctp_info {
++ u_int16_t dpts[2]; /* Min, Max */
++ u_int16_t spts[2]; /* Min, Max */
++
++ u_int32_t chunkmap[256 / sizeof (u_int32_t)]; /* Bit mask of chunks to be matched according to RFC 2960 */
++
++#define SCTP_CHUNK_MATCH_ANY 0x01 /* Match if any of the chunk types are present */
++#define SCTP_CHUNK_MATCH_ALL 0x02 /* Match if all of the chunk types are present */
++#define SCTP_CHUNK_MATCH_ONLY 0x04 /* Match if these are the only chunk types present */
++
++ u_int32_t chunk_match_type;
++
++ u_int32_t flags;
++ u_int32_t invflags;
++};
++
++#define bytes(type) (sizeof(type) * 8)
++
++#define SCTP_CHUNKMAP_SET(chunkmap, type) \
++ do { \
++ chunkmap[type / bytes(u_int32_t)] |= \
++ 1 << (type % bytes(u_int32_t)); \
++ } while (0)
++
++#define SCTP_CHUNKMAP_CLEAR(chunkmap, type) \
++ do { \
++ chunkmap[type / bytes(u_int32_t)] &= \
++ ~(1 << (type % bytes(u_int32_t))); \
++ } while (0)
++
++#define SCTP_CHUNKMAP_IS_SET(chunkmap, type) \
++({ \
++ (chunkmap[type / bytes (u_int32_t)] & \
++ (1 << (type % bytes (u_int32_t)))) ? 1: 0; \
++})
++
++#define SCTP_CHUNKMAP_RESET(chunkmap) \
++ do { \
++ int i; \
++ for (i = 0; i < ELEMCOUNT(chunkmap); i++) \
++ chunkmap[i] = 0; \
++ } while (0)
++
++#define SCTP_CHUNKMAP_SET_ALL(chunkmap) \
++ do { \
++ int i; \
++ for (i = 0; i < ELEMCOUNT(chunkmap); i++) \
++ chunkmap[i] = ~0; \
++ } while (0)
++
++#define SCTP_CHUNKMAP_COPY(destmap, srcmap) \
++ do { \
++ int i; \
++ for (i = 0; i < ELEMCOUNT(chunkmap); i++) \
++ destmap[i] = srcmap[i]; \
++ } while (0)
++
++#define SCTP_CHUNKMAP_IS_CLEAR(chunkmap) \
++({ \
++ int i; \
++ int flag = 1; \
++ for (i = 0; i < ELEMCOUNT(chunkmap); i++) { \
++ if (chunkmap[i]) { \
++ flag = 0; \
++ break; \
++ } \
++ } \
++ flag; \
++})
++
++#define SCTP_CHUNKMAP_IS_ALL_SET(chunkmap) \
++({ \
++ int i; \
++ int flag = 1; \
++ for (i = 0; i < ELEMCOUNT(chunkmap); i++) { \
++ if (chunkmap[i] != ~0) { \
++ flag = 0; \
++ break; \
++ } \
++ } \
++ flag; \
++})
++
++#endif /* _IPT_SCTP_H_ */
++
+diff -Nur linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_state.h linux-2.6.3/include/linux/netfilter_ipv4/ipt_state.h
+--- linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_state.h 2004-02-18 04:59:18.000000000 +0100
++++ linux-2.6.3/include/linux/netfilter_ipv4/ipt_state.h 2004-02-27 00:03:14.480026880 +0100
+@@ -4,6 +4,8 @@
+ #define IPT_STATE_BIT(ctinfo) (1 << ((ctinfo)%IP_CT_IS_REPLY+1))
+ #define IPT_STATE_INVALID (1 << 0)
+
++#define IPT_STATE_UNTRACKED (1 << (IP_CT_NUMBER + 1))
++
+ struct ipt_state_info
+ {
+ unsigned int statemask;
+diff -Nur linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_TTL.h linux-2.6.3/include/linux/netfilter_ipv4/ipt_TTL.h
+--- linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_TTL.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/include/linux/netfilter_ipv4/ipt_TTL.h 2004-02-27 00:03:07.345111552 +0100
+@@ -0,0 +1,21 @@
++/* TTL modification module for IP tables
++ * (C) 2000 by Harald Welte <laforge@gnumonks.org> */
++
++#ifndef _IPT_TTL_H
++#define _IPT_TTL_H
++
++enum {
++ IPT_TTL_SET = 0,
++ IPT_TTL_INC,
++ IPT_TTL_DEC
++};
++
++#define IPT_TTL_MAXMODE IPT_TTL_DEC
++
++struct ipt_TTL_info {
++ u_int8_t mode;
++ u_int8_t ttl;
++};
++
++
++#endif
+diff -Nur linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_ULOG.h linux-2.6.3/include/linux/netfilter_ipv4/ipt_ULOG.h
+--- linux-2.6.3.org/include/linux/netfilter_ipv4/ipt_ULOG.h 2004-02-18 04:57:31.000000000 +0100
++++ linux-2.6.3/include/linux/netfilter_ipv4/ipt_ULOG.h 2004-02-27 00:03:00.000228144 +0100
+@@ -11,6 +11,9 @@
+ #define NETLINK_NFLOG 5
+ #endif
+
++#define ULOG_DEFAULT_NLGROUP 1
++#define ULOG_DEFAULT_QTHRESHOLD 1
++
+ #define ULOG_MAC_LEN 80
+ #define ULOG_PREFIX_LEN 32
+
+diff -Nur linux-2.6.3.org/include/linux/netfilter_ipv4.h linux-2.6.3/include/linux/netfilter_ipv4.h
+--- linux-2.6.3.org/include/linux/netfilter_ipv4.h 2004-02-18 04:59:16.000000000 +0100
++++ linux-2.6.3/include/linux/netfilter_ipv4.h 2004-02-27 00:03:14.480026880 +0100
+@@ -51,6 +51,8 @@
+
+ enum nf_ip_hook_priorities {
+ NF_IP_PRI_FIRST = INT_MIN,
++ NF_IP_PRI_CONNTRACK_DEFRAG = -400,
++ NF_IP_PRI_RAW = -300,
+ NF_IP_PRI_SELINUX_FIRST = -225,
+ NF_IP_PRI_CONNTRACK = -200,
+ NF_IP_PRI_BRIDGE_SABOTAGE_FORWARD = -175,
+diff -Nur linux-2.6.3.org/include/linux/netfilter_ipv6/ip6t_fuzzy.h linux-2.6.3/include/linux/netfilter_ipv6/ip6t_fuzzy.h
+--- linux-2.6.3.org/include/linux/netfilter_ipv6/ip6t_fuzzy.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/include/linux/netfilter_ipv6/ip6t_fuzzy.h 2004-02-27 00:03:09.360805120 +0100
+@@ -0,0 +1,21 @@
++#ifndef _IP6T_FUZZY_H
++#define _IP6T_FUZZY_H
++
++#include <linux/param.h>
++#include <linux/types.h>
++
++#define MAXFUZZYRATE 10000000
++#define MINFUZZYRATE 3
++
++struct ip6t_fuzzy_info {
++ u_int32_t minimum_rate;
++ u_int32_t maximum_rate;
++ u_int32_t packets_total;
++ u_int32_t bytes_total;
++ u_int32_t previous_time;
++ u_int32_t present_time;
++ u_int32_t mean_rate;
++ u_int8_t acceptance_rate;
++};
++
++#endif /*_IP6T_FUZZY_H*/
+diff -Nur linux-2.6.3.org/include/linux/netfilter_ipv6/ip6t_HL.h linux-2.6.3/include/linux/netfilter_ipv6/ip6t_HL.h
+--- linux-2.6.3.org/include/linux/netfilter_ipv6/ip6t_HL.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/include/linux/netfilter_ipv6/ip6t_HL.h 2004-02-27 00:03:05.118450056 +0100
+@@ -0,0 +1,22 @@
++/* Hop Limit modification module for ip6tables
++ * Maciej Soltysiak <solt@dns.toxicfilms.tv>
++ * Based on HW's TTL module */
++
++#ifndef _IP6T_HL_H
++#define _IP6T_HL_H
++
++enum {
++ IP6T_HL_SET = 0,
++ IP6T_HL_INC,
++ IP6T_HL_DEC
++};
++
++#define IP6T_HL_MAXMODE IP6T_HL_DEC
++
++struct ip6t_HL_info {
++ u_int8_t mode;
++ u_int8_t hop_limit;
++};
++
++
++#endif
+diff -Nur linux-2.6.3.org/include/linux/netfilter_ipv6/ip6t_nth.h linux-2.6.3/include/linux/netfilter_ipv6/ip6t_nth.h
+--- linux-2.6.3.org/include/linux/netfilter_ipv6/ip6t_nth.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/include/linux/netfilter_ipv6/ip6t_nth.h 2004-02-27 00:03:12.719294552 +0100
+@@ -0,0 +1,19 @@
++#ifndef _IP6T_NTH_H
++#define _IP6T_NTH_H
++
++#include <linux/param.h>
++#include <linux/types.h>
++
++#ifndef IP6T_NTH_NUM_COUNTERS
++#define IP6T_NTH_NUM_COUNTERS 16
++#endif
++
++struct ip6t_nth_info {
++ u_int8_t every;
++ u_int8_t not;
++ u_int8_t startat;
++ u_int8_t counter;
++ u_int8_t packet;
++};
++
++#endif /*_IP6T_NTH_H*/
+diff -Nur linux-2.6.3.org/include/linux/netfilter_ipv6/ip6t_REJECT.h linux-2.6.3/include/linux/netfilter_ipv6/ip6t_REJECT.h
+--- linux-2.6.3.org/include/linux/netfilter_ipv6/ip6t_REJECT.h 2004-02-18 04:57:12.000000000 +0100
++++ linux-2.6.3/include/linux/netfilter_ipv6/ip6t_REJECT.h 2004-02-27 00:03:06.649217344 +0100
+@@ -2,15 +2,17 @@
+ #define _IP6T_REJECT_H
+
+ enum ip6t_reject_with {
+- IP6T_ICMP_NET_UNREACHABLE,
+- IP6T_ICMP_HOST_UNREACHABLE,
+- IP6T_ICMP_PROT_UNREACHABLE,
+- IP6T_ICMP_PORT_UNREACHABLE,
+- IP6T_ICMP_ECHOREPLY
++ IP6T_ICMP6_NO_ROUTE,
++ IP6T_ICMP6_ADM_PROHIBITED,
++ IP6T_ICMP6_NOT_NEIGHBOUR,
++ IP6T_ICMP6_ADDR_UNREACH,
++ IP6T_ICMP6_PORT_UNREACH,
++ IP6T_ICMP6_ECHOREPLY,
++ IP6T_TCP_RESET
+ };
+
+ struct ip6t_reject_info {
+ enum ip6t_reject_with with; /* reject type */
+ };
+
+-#endif /*_IPT_REJECT_H*/
++#endif /*_IP6T_REJECT_H*/
+diff -Nur linux-2.6.3.org/net/core/netfilter.c linux-2.6.3/net/core/netfilter.c
+--- linux-2.6.3.org/net/core/netfilter.c 2004-02-26 23:36:59.000000000 +0100
++++ linux-2.6.3/net/core/netfilter.c 2004-02-27 00:03:00.001227992 +0100
+@@ -8,8 +8,10 @@
+ *
+ * February 2000: Modified by James Morris to have 1 queue per protocol.
+ * 15-Mar-2000: Added NF_REPEAT --RR.
++ * 08-May-2003: Internal logging interface added by Jozsef Kadlecsik.
+ */
+ #include <linux/config.h>
++#include <linux/kernel.h>
+ #include <linux/netfilter.h>
+ #include <net/protocol.h>
+ #include <linux/init.h>
+@@ -740,6 +742,72 @@
+ EXPORT_SYMBOL(skb_ip_make_writable);
+ #endif /*CONFIG_INET*/
+
++/* Internal logging interface, which relies on the real
++ LOG target modules */
++
++#define NF_LOG_PREFIXLEN 128
++
++static nf_logfn *nf_logging[NPROTO]; /* = NULL */
++static int reported = 0;
++static spinlock_t nf_log_lock = SPIN_LOCK_UNLOCKED;
++
++int nf_log_register(int pf, nf_logfn *logfn)
++{
++ int ret = -EBUSY;
++
++ /* Any setup of logging members must be done before
++ * substituting pointer. */
++ smp_wmb();
++ spin_lock(&nf_log_lock);
++ if (!nf_logging[pf]) {
++ nf_logging[pf] = logfn;
++ ret = 0;
++ }
++ spin_unlock(&nf_log_lock);
++ return ret;
++}
++
++void nf_log_unregister(int pf, nf_logfn *logfn)
++{
++ spin_lock(&nf_log_lock);
++ if (nf_logging[pf] == logfn)
++ nf_logging[pf] = NULL;
++ spin_unlock(&nf_log_lock);
++
++ /* Give time to concurrent readers. */
++ synchronize_net();
++}
++
++void nf_log_packet(int pf,
++ unsigned int hooknum,
++ const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const char *fmt, ...)
++{
++ va_list args;
++ char prefix[NF_LOG_PREFIXLEN];
++ nf_logfn *logfn;
++
++ rcu_read_lock();
++ logfn = nf_logging[pf];
++ if (logfn) {
++ va_start(args, fmt);
++ vsnprintf(prefix, sizeof(prefix), fmt, args);
++ va_end(args);
++ /* We must read logging before nf_logfn[pf] */
++ smp_read_barrier_depends();
++ logfn(hooknum, skb, in, out, prefix);
++ } else if (!reported) {
++ printk(KERN_WARNING "nf_log_packet: can\'t log yet, "
++ "no backend logging module loaded in!\n");
++ reported++;
++ }
++ rcu_read_unlock();
++}
++EXPORT_SYMBOL(nf_log_register);
++EXPORT_SYMBOL(nf_log_unregister);
++EXPORT_SYMBOL(nf_log_packet);
+
+ /* This does not belong here, but ipt_REJECT needs it if connection
+ tracking in use: without this, connection may not be in hash table,
+diff -Nur linux-2.6.3.org/net/core/netfilter.c.orig linux-2.6.3/net/core/netfilter.c.orig
+--- linux-2.6.3.org/net/core/netfilter.c.orig 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/core/netfilter.c.orig 2004-02-27 00:02:49.299854848 +0100
+@@ -0,0 +1,772 @@
++/* netfilter.c: look after the filters for various protocols.
++ * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
++ *
++ * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
++ * way.
++ *
++ * Rusty Russell (C)2000 -- This code is GPL.
++ *
++ * February 2000: Modified by James Morris to have 1 queue per protocol.
++ * 15-Mar-2000: Added NF_REPEAT --RR.
++ */
++#include <linux/config.h>
++#include <linux/netfilter.h>
++#include <net/protocol.h>
++#include <linux/init.h>
++#include <linux/skbuff.h>
++#include <linux/wait.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/if.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/icmp.h>
++#include <net/sock.h>
++#include <net/route.h>
++#include <linux/ip.h>
++
++/* In this code, we can be waiting indefinitely for userspace to
++ * service a packet if a hook returns NF_QUEUE. We could keep a count
++ * of skbuffs queued for userspace, and not deregister a hook unless
++ * this is zero, but that sucks. Now, we simply check when the
++ * packets come back: if the hook is gone, the packet is discarded. */
++#ifdef CONFIG_NETFILTER_DEBUG
++#define NFDEBUG(format, args...) printk(format , ## args)
++#else
++#define NFDEBUG(format, args...)
++#endif
++
++/* Sockopts only registered and called from user context, so
++ net locking would be overkill. Also, [gs]etsockopt calls may
++ sleep. */
++static DECLARE_MUTEX(nf_sockopt_mutex);
++
++struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS];
++static LIST_HEAD(nf_sockopts);
++static spinlock_t nf_hook_lock = SPIN_LOCK_UNLOCKED;
++
++/*
++ * A queue handler may be registered for each protocol. Each is protected by
++ * long term mutex. The handler must provide an an outfn() to accept packets
++ * for queueing and must reinject all packets it receives, no matter what.
++ */
++static struct nf_queue_handler_t {
++ nf_queue_outfn_t outfn;
++ void *data;
++} queue_handler[NPROTO];
++static rwlock_t queue_handler_lock = RW_LOCK_UNLOCKED;
++
++int nf_register_hook(struct nf_hook_ops *reg)
++{
++ struct list_head *i;
++
++ spin_lock_bh(&nf_hook_lock);
++ list_for_each(i, &nf_hooks[reg->pf][reg->hooknum]) {
++ if (reg->priority < ((struct nf_hook_ops *)i)->priority)
++ break;
++ }
++ list_add_rcu(®->list, i->prev);
++ spin_unlock_bh(&nf_hook_lock);
++
++ synchronize_net();
++ return 0;
++}
++
++void nf_unregister_hook(struct nf_hook_ops *reg)
++{
++ spin_lock_bh(&nf_hook_lock);
++ list_del_rcu(®->list);
++ spin_unlock_bh(&nf_hook_lock);
++
++ synchronize_net();
++}
++
++/* Do exclusive ranges overlap? */
++static inline int overlap(int min1, int max1, int min2, int max2)
++{
++ return max1 > min2 && min1 < max2;
++}
++
++/* Functions to register sockopt ranges (exclusive). */
++int nf_register_sockopt(struct nf_sockopt_ops *reg)
++{
++ struct list_head *i;
++ int ret = 0;
++
++ if (down_interruptible(&nf_sockopt_mutex) != 0)
++ return -EINTR;
++
++ list_for_each(i, &nf_sockopts) {
++ struct nf_sockopt_ops *ops = (struct nf_sockopt_ops *)i;
++ if (ops->pf == reg->pf
++ && (overlap(ops->set_optmin, ops->set_optmax,
++ reg->set_optmin, reg->set_optmax)
++ || overlap(ops->get_optmin, ops->get_optmax,
++ reg->get_optmin, reg->get_optmax))) {
++ NFDEBUG("nf_sock overlap: %u-%u/%u-%u v %u-%u/%u-%u\n",
++ ops->set_optmin, ops->set_optmax,
++ ops->get_optmin, ops->get_optmax,
++ reg->set_optmin, reg->set_optmax,
++ reg->get_optmin, reg->get_optmax);
++ ret = -EBUSY;
++ goto out;
++ }
++ }
++
++ list_add(®->list, &nf_sockopts);
++out:
++ up(&nf_sockopt_mutex);
++ return ret;
++}
++
++void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
++{
++ /* No point being interruptible: we're probably in cleanup_module() */
++ restart:
++ down(&nf_sockopt_mutex);
++ if (reg->use != 0) {
++ /* To be woken by nf_sockopt call... */
++ /* FIXME: Stuart Young's name appears gratuitously. */
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ reg->cleanup_task = current;
++ up(&nf_sockopt_mutex);
++ schedule();
++ goto restart;
++ }
++ list_del(®->list);
++ up(&nf_sockopt_mutex);
++}
++
++#ifdef CONFIG_NETFILTER_DEBUG
++#include <net/ip.h>
++#include <net/tcp.h>
++#include <linux/netfilter_ipv4.h>
++
++static void debug_print_hooks_ip(unsigned int nf_debug)
++{
++ if (nf_debug & (1 << NF_IP_PRE_ROUTING)) {
++ printk("PRE_ROUTING ");
++ nf_debug ^= (1 << NF_IP_PRE_ROUTING);
++ }
++ if (nf_debug & (1 << NF_IP_LOCAL_IN)) {
++ printk("LOCAL_IN ");
++ nf_debug ^= (1 << NF_IP_LOCAL_IN);
++ }
++ if (nf_debug & (1 << NF_IP_FORWARD)) {
++ printk("FORWARD ");
++ nf_debug ^= (1 << NF_IP_FORWARD);
++ }
++ if (nf_debug & (1 << NF_IP_LOCAL_OUT)) {
++ printk("LOCAL_OUT ");
++ nf_debug ^= (1 << NF_IP_LOCAL_OUT);
++ }
++ if (nf_debug & (1 << NF_IP_POST_ROUTING)) {
++ printk("POST_ROUTING ");
++ nf_debug ^= (1 << NF_IP_POST_ROUTING);
++ }
++ if (nf_debug)
++ printk("Crap bits: 0x%04X", nf_debug);
++ printk("\n");
++}
++
++void nf_dump_skb(int pf, struct sk_buff *skb)
++{
++ printk("skb: pf=%i %s dev=%s len=%u\n",
++ pf,
++ skb->sk ? "(owned)" : "(unowned)",
++ skb->dev ? skb->dev->name : "(no dev)",
++ skb->len);
++ switch (pf) {
++ case PF_INET: {
++ const struct iphdr *ip = skb->nh.iph;
++ __u32 *opt = (__u32 *) (ip + 1);
++ int opti;
++ __u16 src_port = 0, dst_port = 0;
++
++ if (ip->protocol == IPPROTO_TCP
++ || ip->protocol == IPPROTO_UDP) {
++ struct tcphdr *tcp=(struct tcphdr *)((__u32 *)ip+ip->ihl);
++ src_port = ntohs(tcp->source);
++ dst_port = ntohs(tcp->dest);
++ }
++
++ printk("PROTO=%d %u.%u.%u.%u:%hu %u.%u.%u.%u:%hu"
++ " L=%hu S=0x%2.2hX I=%hu F=0x%4.4hX T=%hu",
++ ip->protocol, NIPQUAD(ip->saddr),
++ src_port, NIPQUAD(ip->daddr),
++ dst_port,
++ ntohs(ip->tot_len), ip->tos, ntohs(ip->id),
++ ntohs(ip->frag_off), ip->ttl);
++
++ for (opti = 0; opti < (ip->ihl - sizeof(struct iphdr) / 4); opti++)
++ printk(" O=0x%8.8X", *opt++);
++ printk("\n");
++ }
++ }
++}
++
++void nf_debug_ip_local_deliver(struct sk_buff *skb)
++{
++ /* If it's a loopback packet, it must have come through
++ * NF_IP_LOCAL_OUT, NF_IP_RAW_INPUT, NF_IP_PRE_ROUTING and
++ * NF_IP_LOCAL_IN. Otherwise, must have gone through
++ * NF_IP_RAW_INPUT and NF_IP_PRE_ROUTING. */
++ if (!skb->dev) {
++ printk("ip_local_deliver: skb->dev is NULL.\n");
++ }
++ else if (strcmp(skb->dev->name, "lo") == 0) {
++ if (skb->nf_debug != ((1 << NF_IP_LOCAL_OUT)
++ | (1 << NF_IP_POST_ROUTING)
++ | (1 << NF_IP_PRE_ROUTING)
++ | (1 << NF_IP_LOCAL_IN))) {
++ printk("ip_local_deliver: bad loopback skb: ");
++ debug_print_hooks_ip(skb->nf_debug);
++ nf_dump_skb(PF_INET, skb);
++ }
++ }
++ else {
++ if (skb->nf_debug != ((1<<NF_IP_PRE_ROUTING)
++ | (1<<NF_IP_LOCAL_IN))) {
++ printk("ip_local_deliver: bad non-lo skb: ");
++ debug_print_hooks_ip(skb->nf_debug);
++ nf_dump_skb(PF_INET, skb);
++ }
++ }
++}
++
++void nf_debug_ip_loopback_xmit(struct sk_buff *newskb)
++{
++ if (newskb->nf_debug != ((1 << NF_IP_LOCAL_OUT)
++ | (1 << NF_IP_POST_ROUTING))) {
++ printk("ip_dev_loopback_xmit: bad owned skb = %p: ",
++ newskb);
++ debug_print_hooks_ip(newskb->nf_debug);
++ nf_dump_skb(PF_INET, newskb);
++ }
++ /* Clear to avoid confusing input check */
++ newskb->nf_debug = 0;
++}
++
++void nf_debug_ip_finish_output2(struct sk_buff *skb)
++{
++ /* If it's owned, it must have gone through the
++ * NF_IP_LOCAL_OUT and NF_IP_POST_ROUTING.
++ * Otherwise, must have gone through
++ * NF_IP_PRE_ROUTING, NF_IP_FORWARD and NF_IP_POST_ROUTING.
++ */
++ if (skb->sk) {
++ if (skb->nf_debug != ((1 << NF_IP_LOCAL_OUT)
++ | (1 << NF_IP_POST_ROUTING))) {
++ printk("ip_finish_output: bad owned skb = %p: ", skb);
++ debug_print_hooks_ip(skb->nf_debug);
++ nf_dump_skb(PF_INET, skb);
++ }
++ } else {
++ if (skb->nf_debug != ((1 << NF_IP_PRE_ROUTING)
++ | (1 << NF_IP_FORWARD)
++ | (1 << NF_IP_POST_ROUTING))) {
++ /* Fragments, entunnelled packets, TCP RSTs
++ generated by ipt_REJECT will have no
++ owners, but still may be local */
++ if (skb->nf_debug != ((1 << NF_IP_LOCAL_OUT)
++ | (1 << NF_IP_POST_ROUTING))){
++ printk("ip_finish_output:"
++ " bad unowned skb = %p: ",skb);
++ debug_print_hooks_ip(skb->nf_debug);
++ nf_dump_skb(PF_INET, skb);
++ }
++ }
++ }
++}
++#endif /*CONFIG_NETFILTER_DEBUG*/
++
++/* Call get/setsockopt() */
++static int nf_sockopt(struct sock *sk, int pf, int val,
++ char *opt, int *len, int get)
++{
++ struct list_head *i;
++ struct nf_sockopt_ops *ops;
++ int ret;
++
++ if (down_interruptible(&nf_sockopt_mutex) != 0)
++ return -EINTR;
++
++ list_for_each(i, &nf_sockopts) {
++ ops = (struct nf_sockopt_ops *)i;
++ if (ops->pf == pf) {
++ if (get) {
++ if (val >= ops->get_optmin
++ && val < ops->get_optmax) {
++ ops->use++;
++ up(&nf_sockopt_mutex);
++ ret = ops->get(sk, val, opt, len);
++ goto out;
++ }
++ } else {
++ if (val >= ops->set_optmin
++ && val < ops->set_optmax) {
++ ops->use++;
++ up(&nf_sockopt_mutex);
++ ret = ops->set(sk, val, opt, *len);
++ goto out;
++ }
++ }
++ }
++ }
++ up(&nf_sockopt_mutex);
++ return -ENOPROTOOPT;
++
++ out:
++ down(&nf_sockopt_mutex);
++ ops->use--;
++ if (ops->cleanup_task)
++ wake_up_process(ops->cleanup_task);
++ up(&nf_sockopt_mutex);
++ return ret;
++}
++
++int nf_setsockopt(struct sock *sk, int pf, int val, char *opt,
++ int len)
++{
++ return nf_sockopt(sk, pf, val, opt, &len, 0);
++}
++
++int nf_getsockopt(struct sock *sk, int pf, int val, char *opt, int *len)
++{
++ return nf_sockopt(sk, pf, val, opt, len, 1);
++}
++
++static unsigned int nf_iterate(struct list_head *head,
++ struct sk_buff **skb,
++ int hook,
++ const struct net_device *indev,
++ const struct net_device *outdev,
++ struct list_head **i,
++ int (*okfn)(struct sk_buff *),
++ int hook_thresh)
++{
++ /*
++ * The caller must not block between calls to this
++ * function because of risk of continuing from deleted element.
++ */
++ list_for_each_continue_rcu(*i, head) {
++ struct nf_hook_ops *elem = (struct nf_hook_ops *)*i;
++
++ if (hook_thresh > elem->priority)
++ continue;
++
++ /* Optimization: we don't need to hold module
++ reference here, since function can't sleep. --RR */
++ switch (elem->hook(hook, skb, indev, outdev, okfn)) {
++ case NF_QUEUE:
++ return NF_QUEUE;
++
++ case NF_STOLEN:
++ return NF_STOLEN;
++
++ case NF_DROP:
++ return NF_DROP;
++
++ case NF_REPEAT:
++ *i = (*i)->prev;
++ break;
++
++#ifdef CONFIG_NETFILTER_DEBUG
++ case NF_ACCEPT:
++ break;
++
++ default:
++ NFDEBUG("Evil return from %p(%u).\n",
++ elem->hook, hook);
++#endif
++ }
++ }
++ return NF_ACCEPT;
++}
++
++int nf_register_queue_handler(int pf, nf_queue_outfn_t outfn, void *data)
++{
++ int ret;
++
++ write_lock_bh(&queue_handler_lock);
++ if (queue_handler[pf].outfn)
++ ret = -EBUSY;
++ else {
++ queue_handler[pf].outfn = outfn;
++ queue_handler[pf].data = data;
++ ret = 0;
++ }
++ write_unlock_bh(&queue_handler_lock);
++
++ return ret;
++}
++
++/* The caller must flush their queue before this */
++int nf_unregister_queue_handler(int pf)
++{
++ write_lock_bh(&queue_handler_lock);
++ queue_handler[pf].outfn = NULL;
++ queue_handler[pf].data = NULL;
++ write_unlock_bh(&queue_handler_lock);
++
++ return 0;
++}
++
++/*
++ * Any packet that leaves via this function must come back
++ * through nf_reinject().
++ */
++static int nf_queue(struct sk_buff *skb,
++ struct list_head *elem,
++ int pf, unsigned int hook,
++ struct net_device *indev,
++ struct net_device *outdev,
++ int (*okfn)(struct sk_buff *))
++{
++ int status;
++ struct nf_info *info;
++#ifdef CONFIG_BRIDGE_NETFILTER
++ struct net_device *physindev = NULL;
++ struct net_device *physoutdev = NULL;
++#endif
++
++ /* QUEUE == DROP if noone is waiting, to be safe. */
++ read_lock(&queue_handler_lock);
++ if (!queue_handler[pf].outfn) {
++ read_unlock(&queue_handler_lock);
++ kfree_skb(skb);
++ return 1;
++ }
++
++ info = kmalloc(sizeof(*info), GFP_ATOMIC);
++ if (!info) {
++ if (net_ratelimit())
++ printk(KERN_ERR "OOM queueing packet %p\n",
++ skb);
++ read_unlock(&queue_handler_lock);
++ kfree_skb(skb);
++ return 1;
++ }
++
++ *info = (struct nf_info) {
++ (struct nf_hook_ops *)elem, pf, hook, indev, outdev, okfn };
++
++ /* If it's going away, ignore hook. */
++ if (!try_module_get(info->elem->owner)) {
++ read_unlock(&queue_handler_lock);
++ kfree(info);
++ return 0;
++ }
++
++ /* Bump dev refs so they don't vanish while packet is out */
++ if (indev) dev_hold(indev);
++ if (outdev) dev_hold(outdev);
++
++#ifdef CONFIG_BRIDGE_NETFILTER
++ if (skb->nf_bridge) {
++ physindev = skb->nf_bridge->physindev;
++ if (physindev) dev_hold(physindev);
++ physoutdev = skb->nf_bridge->physoutdev;
++ if (physoutdev) dev_hold(physoutdev);
++ }
++#endif
++
++ status = queue_handler[pf].outfn(skb, info, queue_handler[pf].data);
++ read_unlock(&queue_handler_lock);
++
++ if (status < 0) {
++ /* James M doesn't say fuck enough. */
++ if (indev) dev_put(indev);
++ if (outdev) dev_put(outdev);
++#ifdef CONFIG_BRIDGE_NETFILTER
++ if (physindev) dev_put(physindev);
++ if (physoutdev) dev_put(physoutdev);
++#endif
++ module_put(info->elem->owner);
++ kfree(info);
++ kfree_skb(skb);
++ return 1;
++ }
++ return 1;
++}
++
++int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
++ struct net_device *indev,
++ struct net_device *outdev,
++ int (*okfn)(struct sk_buff *),
++ int hook_thresh)
++{
++ struct list_head *elem;
++ unsigned int verdict;
++ int ret = 0;
++
++ if (skb->ip_summed == CHECKSUM_HW) {
++ if (outdev == NULL) {
++ skb->ip_summed = CHECKSUM_NONE;
++ } else {
++ skb_checksum_help(skb);
++ }
++ }
++
++ /* We may already have this, but read-locks nest anyway */
++ rcu_read_lock();
++
++#ifdef CONFIG_NETFILTER_DEBUG
++ if (skb->nf_debug & (1 << hook)) {
++ printk("nf_hook: hook %i already set.\n", hook);
++ nf_dump_skb(pf, skb);
++ }
++ skb->nf_debug |= (1 << hook);
++#endif
++
++ elem = &nf_hooks[pf][hook];
++ next_hook:
++ verdict = nf_iterate(&nf_hooks[pf][hook], &skb, hook, indev,
++ outdev, &elem, okfn, hook_thresh);
++ if (verdict == NF_QUEUE) {
++ NFDEBUG("nf_hook: Verdict = QUEUE.\n");
++ if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn))
++ goto next_hook;
++ }
++
++ switch (verdict) {
++ case NF_ACCEPT:
++ ret = okfn(skb);
++ break;
++
++ case NF_DROP:
++ kfree_skb(skb);
++ ret = -EPERM;
++ break;
++ }
++
++ rcu_read_unlock();
++ return ret;
++}
++
++void nf_reinject(struct sk_buff *skb, struct nf_info *info,
++ unsigned int verdict)
++{
++ struct list_head *elem = &info->elem->list;
++ struct list_head *i;
++
++ rcu_read_lock();
++
++ /* Release those devices we held, or Alexey will kill me. */
++ if (info->indev) dev_put(info->indev);
++ if (info->outdev) dev_put(info->outdev);
++#ifdef CONFIG_BRIDGE_NETFILTER
++ if (skb->nf_bridge) {
++ if (skb->nf_bridge->physindev)
++ dev_put(skb->nf_bridge->physindev);
++ if (skb->nf_bridge->physoutdev)
++ dev_put(skb->nf_bridge->physoutdev);
++ }
++#endif
++
++ /* Drop reference to owner of hook which queued us. */
++ module_put(info->elem->owner);
++
++ list_for_each_rcu(i, &nf_hooks[info->pf][info->hook]) {
++ if (i == elem)
++ break;
++ }
++
++ if (elem == &nf_hooks[info->pf][info->hook]) {
++ /* The module which sent it to userspace is gone. */
++ NFDEBUG("%s: module disappeared, dropping packet.\n",
++ __FUNCTION__);
++ verdict = NF_DROP;
++ }
++
++ /* Continue traversal iff userspace said ok... */
++ if (verdict == NF_REPEAT) {
++ elem = elem->prev;
++ verdict = NF_ACCEPT;
++ }
++
++ if (verdict == NF_ACCEPT) {
++ next_hook:
++ verdict = nf_iterate(&nf_hooks[info->pf][info->hook],
++ &skb, info->hook,
++ info->indev, info->outdev, &elem,
++ info->okfn, INT_MIN);
++ }
++
++ switch (verdict) {
++ case NF_ACCEPT:
++ info->okfn(skb);
++ break;
++
++ case NF_QUEUE:
++ if (!nf_queue(skb, elem, info->pf, info->hook,
++ info->indev, info->outdev, info->okfn))
++ goto next_hook;
++ break;
++ }
++ rcu_read_unlock();
++
++ if (verdict == NF_DROP)
++ kfree_skb(skb);
++
++ kfree(info);
++ return;
++}
++
++#ifdef CONFIG_INET
++/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
++int ip_route_me_harder(struct sk_buff **pskb)
++{
++ struct iphdr *iph = (*pskb)->nh.iph;
++ struct rtable *rt;
++ struct flowi fl = {};
++ struct dst_entry *odst;
++ unsigned int hh_len;
++
++ /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause
++ * packets with foreign saddr to appear on the NF_IP_LOCAL_OUT hook.
++ */
++ if (inet_addr_type(iph->saddr) == RTN_LOCAL) {
++ fl.nl_u.ip4_u.daddr = iph->daddr;
++ fl.nl_u.ip4_u.saddr = iph->saddr;
++ fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
++ fl.oif = (*pskb)->sk ? (*pskb)->sk->sk_bound_dev_if : 0;
++#ifdef CONFIG_IP_ROUTE_FWMARK
++ fl.nl_u.ip4_u.fwmark = (*pskb)->nfmark;
++#endif
++ if (ip_route_output_key(&rt, &fl) != 0)
++ return -1;
++
++ /* Drop old route. */
++ dst_release((*pskb)->dst);
++ (*pskb)->dst = &rt->u.dst;
++ } else {
++ /* non-local src, find valid iif to satisfy
++ * rp-filter when calling ip_route_input. */
++ fl.nl_u.ip4_u.daddr = iph->saddr;
++ if (ip_route_output_key(&rt, &fl) != 0)
++ return -1;
++
++ odst = (*pskb)->dst;
++ if (ip_route_input(*pskb, iph->daddr, iph->saddr,
++ RT_TOS(iph->tos), rt->u.dst.dev) != 0) {
++ dst_release(&rt->u.dst);
++ return -1;
++ }
++ dst_release(&rt->u.dst);
++ dst_release(odst);
++ }
++
++ if ((*pskb)->dst->error)
++ return -1;
++
++ /* Change in oif may mean change in hh_len. */
++ hh_len = (*pskb)->dst->dev->hard_header_len;
++ if (skb_headroom(*pskb) < hh_len) {
++ struct sk_buff *nskb;
++
++ nskb = skb_realloc_headroom(*pskb, hh_len);
++ if (!nskb)
++ return -1;
++ if ((*pskb)->sk)
++ skb_set_owner_w(nskb, (*pskb)->sk);
++ kfree_skb(*pskb);
++ *pskb = nskb;
++ }
++
++ return 0;
++}
++
++int skb_ip_make_writable(struct sk_buff **pskb, unsigned int writable_len)
++{
++ struct sk_buff *nskb;
++ unsigned int iplen;
++
++ if (writable_len > (*pskb)->len)
++ return 0;
++
++ /* Not exclusive use of packet? Must copy. */
++ if (skb_shared(*pskb) || skb_cloned(*pskb))
++ goto copy_skb;
++
++ /* Alexey says IP hdr is always modifiable and linear, so ok. */
++ if (writable_len <= (*pskb)->nh.iph->ihl*4)
++ return 1;
++
++ iplen = writable_len - (*pskb)->nh.iph->ihl*4;
++
++ /* DaveM says protocol headers are also modifiable. */
++ switch ((*pskb)->nh.iph->protocol) {
++ case IPPROTO_TCP: {
++ struct tcphdr hdr;
++ if (skb_copy_bits(*pskb, (*pskb)->nh.iph->ihl*4,
++ &hdr, sizeof(hdr)) != 0)
++ goto copy_skb;
++ if (writable_len <= (*pskb)->nh.iph->ihl*4 + hdr.doff*4)
++ goto pull_skb;
++ goto copy_skb;
++ }
++ case IPPROTO_UDP:
++ if (writable_len<=(*pskb)->nh.iph->ihl*4+sizeof(struct udphdr))
++ goto pull_skb;
++ goto copy_skb;
++ case IPPROTO_ICMP:
++ if (writable_len
++ <= (*pskb)->nh.iph->ihl*4 + sizeof(struct icmphdr))
++ goto pull_skb;
++ goto copy_skb;
++ /* Insert other cases here as desired */
++ }
++
++copy_skb:
++ nskb = skb_copy(*pskb, GFP_ATOMIC);
++ if (!nskb)
++ return 0;
++ BUG_ON(skb_is_nonlinear(nskb));
++
++ /* Rest of kernel will get very unhappy if we pass it a
++ suddenly-orphaned skbuff */
++ if ((*pskb)->sk)
++ skb_set_owner_w(nskb, (*pskb)->sk);
++ kfree_skb(*pskb);
++ *pskb = nskb;
++ return 1;
++
++pull_skb:
++ return pskb_may_pull(*pskb, writable_len);
++}
++EXPORT_SYMBOL(skb_ip_make_writable);
++#endif /*CONFIG_INET*/
++
++
++/* This does not belong here, but ipt_REJECT needs it if connection
++ tracking in use: without this, connection may not be in hash table,
++ and hence manufactured ICMP or RST packets will not be associated
++ with it. */
++void (*ip_ct_attach)(struct sk_buff *, struct nf_ct_info *);
++
++void __init netfilter_init(void)
++{
++ int i, h;
++
++ for (i = 0; i < NPROTO; i++) {
++ for (h = 0; h < NF_MAX_HOOKS; h++)
++ INIT_LIST_HEAD(&nf_hooks[i][h]);
++ }
++}
++
++EXPORT_SYMBOL(ip_ct_attach);
++EXPORT_SYMBOL(ip_route_me_harder);
++EXPORT_SYMBOL(nf_getsockopt);
++EXPORT_SYMBOL(nf_hook_slow);
++EXPORT_SYMBOL(nf_hooks);
++EXPORT_SYMBOL(nf_register_hook);
++EXPORT_SYMBOL(nf_register_queue_handler);
++EXPORT_SYMBOL(nf_register_sockopt);
++EXPORT_SYMBOL(nf_reinject);
++EXPORT_SYMBOL(nf_setsockopt);
++EXPORT_SYMBOL(nf_unregister_hook);
++EXPORT_SYMBOL(nf_unregister_queue_handler);
++EXPORT_SYMBOL(nf_unregister_sockopt);
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ip_conntrack_core.c linux-2.6.3/net/ipv4/netfilter/ip_conntrack_core.c
+--- linux-2.6.3.org/net/ipv4/netfilter/ip_conntrack_core.c 2004-02-26 23:36:59.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ip_conntrack_core.c 2004-02-27 00:03:14.481026728 +0100
+@@ -67,6 +67,7 @@
+ static atomic_t ip_conntrack_count = ATOMIC_INIT(0);
+ struct list_head *ip_conntrack_hash;
+ static kmem_cache_t *ip_conntrack_cachep;
++struct ip_conntrack ip_conntrack_untracked;
+
+ extern struct ip_conntrack_protocol ip_conntrack_generic_protocol;
+
+@@ -794,6 +795,15 @@
+ int set_reply;
+ int ret;
+
++ /* Never happen */
++ if ((*pskb)->nh.iph->frag_off & htons(IP_OFFSET)) {
++ if (net_ratelimit()) {
++ printk(KERN_ERR "ip_conntrack_in: Frag of proto %u (hook=%u)\n",
++ (*pskb)->nh.iph->protocol, hooknum);
++ }
++ return NF_DROP;
++ }
++
+ /* FIXME: Do this right please. --RR */
+ (*pskb)->nfcache |= NFC_UNKNOWN;
+
+@@ -812,18 +822,10 @@
+ }
+ #endif
+
+- /* Previously seen (loopback)? Ignore. Do this before
+- fragment check. */
++ /* Previously seen (loopback or untracked)? Ignore. */
+ if ((*pskb)->nfct)
+ return NF_ACCEPT;
+
+- /* Gather fragments. */
+- if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
+- *pskb = ip_ct_gather_frags(*pskb);
+- if (!*pskb)
+- return NF_STOLEN;
+- }
+-
+ proto = ip_ct_find_proto((*pskb)->nh.iph->protocol);
+
+ /* It may be an icmp error... */
+@@ -1422,6 +1424,18 @@
+
+ /* For use by ipt_REJECT */
+ ip_ct_attach = ip_conntrack_attach;
++
++ /* Set up fake conntrack:
++ - to never be deleted, not in any hashes */
++ atomic_set(&ip_conntrack_untracked.ct_general.use, 1);
++ /* - and look it like as a confirmed connection */
++ set_bit(IPS_CONFIRMED_BIT, &ip_conntrack_untracked.status);
++ /* - and prepare the ctinfo field for REJECT & NAT. */
++ ip_conntrack_untracked.infos[IP_CT_NEW].master =
++ ip_conntrack_untracked.infos[IP_CT_RELATED].master =
++ ip_conntrack_untracked.infos[IP_CT_RELATED + IP_CT_IS_REPLY].master =
++ &ip_conntrack_untracked.ct_general;
++
+ return ret;
+
+ err_free_hash:
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ip_conntrack_core.c.orig linux-2.6.3/net/ipv4/netfilter/ip_conntrack_core.c.orig
+--- linux-2.6.3.org/net/ipv4/netfilter/ip_conntrack_core.c.orig 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ip_conntrack_core.c.orig 2004-02-27 00:02:49.320851656 +0100
+@@ -0,0 +1,1433 @@
++/* Connection state tracking for netfilter. This is separated from,
++ but required by, the NAT layer; it can also be used by an iptables
++ extension. */
++
++/* (C) 1999-2001 Paul `Rusty' Russell
++ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * 23 Apr 2001: Harald Welte <laforge@gnumonks.org>
++ * - new API and handling of conntrack/nat helpers
++ * - now capable of multiple expectations for one master
++ * 16 Jul 2002: Harald Welte <laforge@gnumonks.org>
++ * - add usage/reference counts to ip_conntrack_expect
++ * - export ip_conntrack[_expect]_{find_get,put} functions
++ * */
++
++#include <linux/config.h>
++#include <linux/types.h>
++#include <linux/icmp.h>
++#include <linux/ip.h>
++#include <linux/netfilter.h>
++#include <linux/netfilter_ipv4.h>
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/proc_fs.h>
++#include <linux/vmalloc.h>
++#include <net/checksum.h>
++#include <linux/stddef.h>
++#include <linux/sysctl.h>
++#include <linux/slab.h>
++#include <linux/random.h>
++#include <linux/jhash.h>
++/* For ERR_PTR(). Yeah, I know... --RR */
++#include <linux/fs.h>
++
++/* This rwlock protects the main hash table, protocol/helper/expected
++ registrations, conntrack timers*/
++#define ASSERT_READ_LOCK(x) MUST_BE_READ_LOCKED(&ip_conntrack_lock)
++#define ASSERT_WRITE_LOCK(x) MUST_BE_WRITE_LOCKED(&ip_conntrack_lock)
++
++#include <linux/netfilter_ipv4/ip_conntrack.h>
++#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
++#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
++#include <linux/netfilter_ipv4/ip_conntrack_core.h>
++#include <linux/netfilter_ipv4/listhelp.h>
++
++#define IP_CONNTRACK_VERSION "2.1"
++
++#if 0
++#define DEBUGP printk
++#else
++#define DEBUGP(format, args...)
++#endif
++
++DECLARE_RWLOCK(ip_conntrack_lock);
++DECLARE_RWLOCK(ip_conntrack_expect_tuple_lock);
++
++void (*ip_conntrack_destroyed)(struct ip_conntrack *conntrack) = NULL;
++LIST_HEAD(ip_conntrack_expect_list);
++LIST_HEAD(protocol_list);
++static LIST_HEAD(helpers);
++unsigned int ip_conntrack_htable_size = 0;
++int ip_conntrack_max;
++static atomic_t ip_conntrack_count = ATOMIC_INIT(0);
++struct list_head *ip_conntrack_hash;
++static kmem_cache_t *ip_conntrack_cachep;
++
++extern struct ip_conntrack_protocol ip_conntrack_generic_protocol;
++
++static inline int proto_cmpfn(const struct ip_conntrack_protocol *curr,
++ u_int8_t protocol)
++{
++ return protocol == curr->proto;
++}
++
++struct ip_conntrack_protocol *__ip_ct_find_proto(u_int8_t protocol)
++{
++ struct ip_conntrack_protocol *p;
++
++ MUST_BE_READ_LOCKED(&ip_conntrack_lock);
++ p = LIST_FIND(&protocol_list, proto_cmpfn,
++ struct ip_conntrack_protocol *, protocol);
++ if (!p)
++ p = &ip_conntrack_generic_protocol;
++
++ return p;
++}
++
++struct ip_conntrack_protocol *ip_ct_find_proto(u_int8_t protocol)
++{
++ struct ip_conntrack_protocol *p;
++
++ READ_LOCK(&ip_conntrack_lock);
++ p = __ip_ct_find_proto(protocol);
++ READ_UNLOCK(&ip_conntrack_lock);
++ return p;
++}
++
++inline void
++ip_conntrack_put(struct ip_conntrack *ct)
++{
++ IP_NF_ASSERT(ct);
++ IP_NF_ASSERT(ct->infos[0].master);
++ /* nf_conntrack_put wants to go via an info struct, so feed it
++ one at random. */
++ nf_conntrack_put(&ct->infos[0]);
++}
++
++static int ip_conntrack_hash_rnd_initted;
++static unsigned int ip_conntrack_hash_rnd;
++
++static u_int32_t
++hash_conntrack(const struct ip_conntrack_tuple *tuple)
++{
++#if 0
++ dump_tuple(tuple);
++#endif
++ return (jhash_3words(tuple->src.ip,
++ (tuple->dst.ip ^ tuple->dst.protonum),
++ (tuple->src.u.all | (tuple->dst.u.all << 16)),
++ ip_conntrack_hash_rnd) % ip_conntrack_htable_size);
++}
++
++int
++get_tuple(const struct iphdr *iph,
++ const struct sk_buff *skb,
++ unsigned int dataoff,
++ struct ip_conntrack_tuple *tuple,
++ const struct ip_conntrack_protocol *protocol)
++{
++ /* Never happen */
++ if (iph->frag_off & htons(IP_OFFSET)) {
++ printk("ip_conntrack_core: Frag of proto %u.\n",
++ iph->protocol);
++ return 0;
++ }
++
++ tuple->src.ip = iph->saddr;
++ tuple->dst.ip = iph->daddr;
++ tuple->dst.protonum = iph->protocol;
++
++ return protocol->pkt_to_tuple(skb, dataoff, tuple);
++}
++
++static int
++invert_tuple(struct ip_conntrack_tuple *inverse,
++ const struct ip_conntrack_tuple *orig,
++ const struct ip_conntrack_protocol *protocol)
++{
++ inverse->src.ip = orig->dst.ip;
++ inverse->dst.ip = orig->src.ip;
++ inverse->dst.protonum = orig->dst.protonum;
++
++ return protocol->invert_tuple(inverse, orig);
++}
++
++
++/* ip_conntrack_expect helper functions */
++
++/* Compare tuple parts depending on mask. */
++static inline int expect_cmp(const struct ip_conntrack_expect *i,
++ const struct ip_conntrack_tuple *tuple)
++{
++ MUST_BE_READ_LOCKED(&ip_conntrack_expect_tuple_lock);
++ return ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask);
++}
++
++static void
++destroy_expect(struct ip_conntrack_expect *exp)
++{
++ DEBUGP("destroy_expect(%p) use=%d\n", exp, atomic_read(&exp->use));
++ IP_NF_ASSERT(atomic_read(&exp->use));
++ IP_NF_ASSERT(!timer_pending(&exp->timeout));
++
++ kfree(exp);
++}
++
++
++inline void ip_conntrack_expect_put(struct ip_conntrack_expect *exp)
++{
++ IP_NF_ASSERT(exp);
++
++ if (atomic_dec_and_test(&exp->use)) {
++ /* usage count dropped to zero */
++ destroy_expect(exp);
++ }
++}
++
++static inline struct ip_conntrack_expect *
++__ip_ct_expect_find(const struct ip_conntrack_tuple *tuple)
++{
++ MUST_BE_READ_LOCKED(&ip_conntrack_lock);
++ MUST_BE_READ_LOCKED(&ip_conntrack_expect_tuple_lock);
++ return LIST_FIND(&ip_conntrack_expect_list, expect_cmp,
++ struct ip_conntrack_expect *, tuple);
++}
++
++/* Find a expectation corresponding to a tuple. */
++struct ip_conntrack_expect *
++ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple)
++{
++ struct ip_conntrack_expect *exp;
++
++ READ_LOCK(&ip_conntrack_lock);
++ READ_LOCK(&ip_conntrack_expect_tuple_lock);
++ exp = __ip_ct_expect_find(tuple);
++ if (exp)
++ atomic_inc(&exp->use);
++ READ_UNLOCK(&ip_conntrack_expect_tuple_lock);
++ READ_UNLOCK(&ip_conntrack_lock);
++
++ return exp;
++}
++
++/* remove one specific expectation from all lists and drop refcount,
++ * does _NOT_ delete the timer. */
++static void __unexpect_related(struct ip_conntrack_expect *expect)
++{
++ DEBUGP("unexpect_related(%p)\n", expect);
++ MUST_BE_WRITE_LOCKED(&ip_conntrack_lock);
++
++ /* we're not allowed to unexpect a confirmed expectation! */
++ IP_NF_ASSERT(!expect->sibling);
++
++ /* delete from global and local lists */
++ list_del(&expect->list);
++ list_del(&expect->expected_list);
++
++ /* decrement expect-count of master conntrack */
++ if (expect->expectant)
++ expect->expectant->expecting--;
++
++ ip_conntrack_expect_put(expect);
++}
++
++/* remove one specific expecatation from all lists, drop refcount
++ * and expire timer.
++ * This function can _NOT_ be called for confirmed expects! */
++static void unexpect_related(struct ip_conntrack_expect *expect)
++{
++ IP_NF_ASSERT(expect->expectant);
++ IP_NF_ASSERT(expect->expectant->helper);
++ /* if we are supposed to have a timer, but we can't delete
++ * it: race condition. __unexpect_related will
++ * be calledd by timeout function */
++ if (expect->expectant->helper->timeout
++ && !del_timer(&expect->timeout))
++ return;
++
++ __unexpect_related(expect);
++}
++
++/* delete all unconfirmed expectations for this conntrack */
++static void remove_expectations(struct ip_conntrack *ct, int drop_refcount)
++{
++ struct list_head *exp_entry, *next;
++ struct ip_conntrack_expect *exp;
++
++ DEBUGP("remove_expectations(%p)\n", ct);
++
++ list_for_each_safe(exp_entry, next, &ct->sibling_list) {
++ exp = list_entry(exp_entry, struct ip_conntrack_expect,
++ expected_list);
++
++ /* we skip established expectations, as we want to delete
++ * the un-established ones only */
++ if (exp->sibling) {
++ DEBUGP("remove_expectations: skipping established %p of %p\n", exp->sibling, ct);
++ if (drop_refcount) {
++ /* Indicate that this expectations parent is dead */
++ ip_conntrack_put(exp->expectant);
++ exp->expectant = NULL;
++ }
++ continue;
++ }
++
++ IP_NF_ASSERT(list_inlist(&ip_conntrack_expect_list, exp));
++ IP_NF_ASSERT(exp->expectant == ct);
++
++ /* delete expectation from global and private lists */
++ unexpect_related(exp);
++ }
++}
++
++static void
++clean_from_lists(struct ip_conntrack *ct)
++{
++ unsigned int ho, hr;
++
++ DEBUGP("clean_from_lists(%p)\n", ct);
++ MUST_BE_WRITE_LOCKED(&ip_conntrack_lock);
++
++ ho = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
++ hr = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
++ LIST_DELETE(&ip_conntrack_hash[ho], &ct->tuplehash[IP_CT_DIR_ORIGINAL]);
++ LIST_DELETE(&ip_conntrack_hash[hr], &ct->tuplehash[IP_CT_DIR_REPLY]);
++
++ /* Destroy all un-established, pending expectations */
++ remove_expectations(ct, 1);
++}
++
++static void
++destroy_conntrack(struct nf_conntrack *nfct)
++{
++ struct ip_conntrack *ct = (struct ip_conntrack *)nfct, *master = NULL;
++ struct ip_conntrack_protocol *proto;
++
++ DEBUGP("destroy_conntrack(%p)\n", ct);
++ IP_NF_ASSERT(atomic_read(&nfct->use) == 0);
++ IP_NF_ASSERT(!timer_pending(&ct->timeout));
++
++ /* To make sure we don't get any weird locking issues here:
++ * destroy_conntrack() MUST NOT be called with a write lock
++ * to ip_conntrack_lock!!! -HW */
++ proto = ip_ct_find_proto(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
++ if (proto && proto->destroy)
++ proto->destroy(ct);
++
++ if (ip_conntrack_destroyed)
++ ip_conntrack_destroyed(ct);
++
++ WRITE_LOCK(&ip_conntrack_lock);
++ /* Delete us from our own list to prevent corruption later */
++ list_del(&ct->sibling_list);
++
++ /* Delete our master expectation */
++ if (ct->master) {
++ if (ct->master->expectant) {
++ /* can't call __unexpect_related here,
++ * since it would screw up expect_list */
++ list_del(&ct->master->expected_list);
++ master = ct->master->expectant;
++ }
++ kfree(ct->master);
++ }
++ WRITE_UNLOCK(&ip_conntrack_lock);
++
++ if (master)
++ ip_conntrack_put(master);
++
++ DEBUGP("destroy_conntrack: returning ct=%p to slab\n", ct);
++ kmem_cache_free(ip_conntrack_cachep, ct);
++ atomic_dec(&ip_conntrack_count);
++}
++
++static void death_by_timeout(unsigned long ul_conntrack)
++{
++ struct ip_conntrack *ct = (void *)ul_conntrack;
++
++ WRITE_LOCK(&ip_conntrack_lock);
++ clean_from_lists(ct);
++ WRITE_UNLOCK(&ip_conntrack_lock);
++ ip_conntrack_put(ct);
++}
++
++static inline int
++conntrack_tuple_cmp(const struct ip_conntrack_tuple_hash *i,
++ const struct ip_conntrack_tuple *tuple,
++ const struct ip_conntrack *ignored_conntrack)
++{
++ MUST_BE_READ_LOCKED(&ip_conntrack_lock);
++ return i->ctrack != ignored_conntrack
++ && ip_ct_tuple_equal(tuple, &i->tuple);
++}
++
++static struct ip_conntrack_tuple_hash *
++__ip_conntrack_find(const struct ip_conntrack_tuple *tuple,
++ const struct ip_conntrack *ignored_conntrack)
++{
++ struct ip_conntrack_tuple_hash *h;
++ unsigned int hash = hash_conntrack(tuple);
++
++ MUST_BE_READ_LOCKED(&ip_conntrack_lock);
++ h = LIST_FIND(&ip_conntrack_hash[hash],
++ conntrack_tuple_cmp,
++ struct ip_conntrack_tuple_hash *,
++ tuple, ignored_conntrack);
++ return h;
++}
++
++/* Find a connection corresponding to a tuple. */
++struct ip_conntrack_tuple_hash *
++ip_conntrack_find_get(const struct ip_conntrack_tuple *tuple,
++ const struct ip_conntrack *ignored_conntrack)
++{
++ struct ip_conntrack_tuple_hash *h;
++
++ READ_LOCK(&ip_conntrack_lock);
++ h = __ip_conntrack_find(tuple, ignored_conntrack);
++ if (h)
++ atomic_inc(&h->ctrack->ct_general.use);
++ READ_UNLOCK(&ip_conntrack_lock);
++
++ return h;
++}
++
++static inline struct ip_conntrack *
++__ip_conntrack_get(struct nf_ct_info *nfct, enum ip_conntrack_info *ctinfo)
++{
++ struct ip_conntrack *ct
++ = (struct ip_conntrack *)nfct->master;
++
++ /* ctinfo is the index of the nfct inside the conntrack */
++ *ctinfo = nfct - ct->infos;
++ IP_NF_ASSERT(*ctinfo >= 0 && *ctinfo < IP_CT_NUMBER);
++ return ct;
++}
++
++/* Return conntrack and conntrack_info given skb->nfct->master */
++struct ip_conntrack *
++ip_conntrack_get(struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
++{
++ if (skb->nfct)
++ return __ip_conntrack_get(skb->nfct, ctinfo);
++ return NULL;
++}
++
++/* Confirm a connection given skb->nfct; places it in hash table */
++int
++__ip_conntrack_confirm(struct nf_ct_info *nfct)
++{
++ unsigned int hash, repl_hash;
++ struct ip_conntrack *ct;
++ enum ip_conntrack_info ctinfo;
++
++ ct = __ip_conntrack_get(nfct, &ctinfo);
++
++ /* ipt_REJECT uses ip_conntrack_attach to attach related
++ ICMP/TCP RST packets in other direction. Actual packet
++ which created connection will be IP_CT_NEW or for an
++ expected connection, IP_CT_RELATED. */
++ if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
++ return NF_ACCEPT;
++
++ hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
++ repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
++
++ /* We're not in hash table, and we refuse to set up related
++ connections for unconfirmed conns. But packet copies and
++ REJECT will give spurious warnings here. */
++ /* IP_NF_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
++
++ /* No external references means noone else could have
++ confirmed us. */
++ IP_NF_ASSERT(!is_confirmed(ct));
++ DEBUGP("Confirming conntrack %p\n", ct);
++
++ WRITE_LOCK(&ip_conntrack_lock);
++ /* See if there's one in the list already, including reverse:
++ NAT could have grabbed it without realizing, since we're
++ not in the hash. If there is, we lost race. */
++ if (!LIST_FIND(&ip_conntrack_hash[hash],
++ conntrack_tuple_cmp,
++ struct ip_conntrack_tuple_hash *,
++ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, NULL)
++ && !LIST_FIND(&ip_conntrack_hash[repl_hash],
++ conntrack_tuple_cmp,
++ struct ip_conntrack_tuple_hash *,
++ &ct->tuplehash[IP_CT_DIR_REPLY].tuple, NULL)) {
++ list_prepend(&ip_conntrack_hash[hash],
++ &ct->tuplehash[IP_CT_DIR_ORIGINAL]);
++ list_prepend(&ip_conntrack_hash[repl_hash],
++ &ct->tuplehash[IP_CT_DIR_REPLY]);
++ /* Timer relative to confirmation time, not original
++ setting time, otherwise we'd get timer wrap in
++ weird delay cases. */
++ ct->timeout.expires += jiffies;
++ add_timer(&ct->timeout);
++ atomic_inc(&ct->ct_general.use);
++ set_bit(IPS_CONFIRMED_BIT, &ct->status);
++ WRITE_UNLOCK(&ip_conntrack_lock);
++ return NF_ACCEPT;
++ }
++
++ WRITE_UNLOCK(&ip_conntrack_lock);
++ return NF_DROP;
++}
++
++/* Returns true if a connection correspondings to the tuple (required
++ for NAT). */
++int
++ip_conntrack_tuple_taken(const struct ip_conntrack_tuple *tuple,
++ const struct ip_conntrack *ignored_conntrack)
++{
++ struct ip_conntrack_tuple_hash *h;
++
++ READ_LOCK(&ip_conntrack_lock);
++ h = __ip_conntrack_find(tuple, ignored_conntrack);
++ READ_UNLOCK(&ip_conntrack_lock);
++
++ return h != NULL;
++}
++
++/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
++struct ip_conntrack *
++icmp_error_track(struct sk_buff *skb,
++ enum ip_conntrack_info *ctinfo,
++ unsigned int hooknum)
++{
++ struct ip_conntrack_tuple innertuple, origtuple;
++ struct {
++ struct icmphdr icmp;
++ struct iphdr ip;
++ } inside;
++ struct ip_conntrack_protocol *innerproto;
++ struct ip_conntrack_tuple_hash *h;
++ int dataoff;
++
++ IP_NF_ASSERT(skb->nfct == NULL);
++
++ /* Not enough header? */
++ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &inside, sizeof(inside))!=0)
++ return NULL;
++
++ if (inside.icmp.type != ICMP_DEST_UNREACH
++ && inside.icmp.type != ICMP_SOURCE_QUENCH
++ && inside.icmp.type != ICMP_TIME_EXCEEDED
++ && inside.icmp.type != ICMP_PARAMETERPROB
++ && inside.icmp.type != ICMP_REDIRECT)
++ return NULL;
++
++ /* Ignore ICMP's containing fragments (shouldn't happen) */
++ if (inside.ip.frag_off & htons(IP_OFFSET)) {
++ DEBUGP("icmp_error_track: fragment of proto %u\n",
++ inside.ip.protocol);
++ return NULL;
++ }
++
++ innerproto = ip_ct_find_proto(inside.ip.protocol);
++ dataoff = skb->nh.iph->ihl*4 + sizeof(inside.icmp) + inside.ip.ihl*4;
++ /* Are they talking about one of our connections? */
++ if (!get_tuple(&inside.ip, skb, dataoff, &origtuple, innerproto)) {
++ DEBUGP("icmp_error: ! get_tuple p=%u", inside.ip.protocol);
++ return NULL;
++ }
++
++ /* Ordinarily, we'd expect the inverted tupleproto, but it's
++ been preserved inside the ICMP. */
++ if (!invert_tuple(&innertuple, &origtuple, innerproto)) {
++ DEBUGP("icmp_error_track: Can't invert tuple\n");
++ return NULL;
++ }
++
++ *ctinfo = IP_CT_RELATED;
++
++ h = ip_conntrack_find_get(&innertuple, NULL);
++ if (!h) {
++ /* Locally generated ICMPs will match inverted if they
++ haven't been SNAT'ed yet */
++ /* FIXME: NAT code has to handle half-done double NAT --RR */
++ if (hooknum == NF_IP_LOCAL_OUT)
++ h = ip_conntrack_find_get(&origtuple, NULL);
++
++ if (!h) {
++ DEBUGP("icmp_error_track: no match\n");
++ return NULL;
++ }
++ /* Reverse direction from that found */
++ if (DIRECTION(h) != IP_CT_DIR_REPLY)
++ *ctinfo += IP_CT_IS_REPLY;
++ } else {
++ if (DIRECTION(h) == IP_CT_DIR_REPLY)
++ *ctinfo += IP_CT_IS_REPLY;
++ }
++
++ /* Update skb to refer to this connection */
++ skb->nfct = &h->ctrack->infos[*ctinfo];
++ return h->ctrack;
++}
++
++/* There's a small race here where we may free a just-assured
++ connection. Too bad: we're in trouble anyway. */
++static inline int unreplied(const struct ip_conntrack_tuple_hash *i)
++{
++ return !(test_bit(IPS_ASSURED_BIT, &i->ctrack->status));
++}
++
++static int early_drop(struct list_head *chain)
++{
++ /* Traverse backwards: gives us oldest, which is roughly LRU */
++ struct ip_conntrack_tuple_hash *h;
++ int dropped = 0;
++
++ READ_LOCK(&ip_conntrack_lock);
++ h = LIST_FIND_B(chain, unreplied, struct ip_conntrack_tuple_hash *);
++ if (h)
++ atomic_inc(&h->ctrack->ct_general.use);
++ READ_UNLOCK(&ip_conntrack_lock);
++
++ if (!h)
++ return dropped;
++
++ if (del_timer(&h->ctrack->timeout)) {
++ death_by_timeout((unsigned long)h->ctrack);
++ dropped = 1;
++ }
++ ip_conntrack_put(h->ctrack);
++ return dropped;
++}
++
++static inline int helper_cmp(const struct ip_conntrack_helper *i,
++ const struct ip_conntrack_tuple *rtuple)
++{
++ return ip_ct_tuple_mask_cmp(rtuple, &i->tuple, &i->mask);
++}
++
++struct ip_conntrack_helper *ip_ct_find_helper(const struct ip_conntrack_tuple *tuple)
++{
++ return LIST_FIND(&helpers, helper_cmp,
++ struct ip_conntrack_helper *,
++ tuple);
++}
++
++/* Allocate a new conntrack: we return -ENOMEM if classification
++ failed due to stress. Otherwise it really is unclassifiable. */
++static struct ip_conntrack_tuple_hash *
++init_conntrack(const struct ip_conntrack_tuple *tuple,
++ struct ip_conntrack_protocol *protocol,
++ struct sk_buff *skb)
++{
++ struct ip_conntrack *conntrack;
++ struct ip_conntrack_tuple repl_tuple;
++ size_t hash;
++ struct ip_conntrack_expect *expected;
++ int i;
++ static unsigned int drop_next;
++
++ if (!ip_conntrack_hash_rnd_initted) {
++ get_random_bytes(&ip_conntrack_hash_rnd, 4);
++ ip_conntrack_hash_rnd_initted = 1;
++ }
++
++ hash = hash_conntrack(tuple);
++
++ if (ip_conntrack_max &&
++ atomic_read(&ip_conntrack_count) >= ip_conntrack_max) {
++ /* Try dropping from random chain, or else from the
++ chain about to put into (in case they're trying to
++ bomb one hash chain). */
++ unsigned int next = (drop_next++)%ip_conntrack_htable_size;
++
++ if (!early_drop(&ip_conntrack_hash[next])
++ && !early_drop(&ip_conntrack_hash[hash])) {
++ if (net_ratelimit())
++ printk(KERN_WARNING
++ "ip_conntrack: table full, dropping"
++ " packet.\n");
++ return ERR_PTR(-ENOMEM);
++ }
++ }
++
++ if (!invert_tuple(&repl_tuple, tuple, protocol)) {
++ DEBUGP("Can't invert tuple.\n");
++ return NULL;
++ }
++
++ conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC);
++ if (!conntrack) {
++ DEBUGP("Can't allocate conntrack.\n");
++ return ERR_PTR(-ENOMEM);
++ }
++
++ memset(conntrack, 0, sizeof(*conntrack));
++ atomic_set(&conntrack->ct_general.use, 1);
++ conntrack->ct_general.destroy = destroy_conntrack;
++ conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *tuple;
++ conntrack->tuplehash[IP_CT_DIR_ORIGINAL].ctrack = conntrack;
++ conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = repl_tuple;
++ conntrack->tuplehash[IP_CT_DIR_REPLY].ctrack = conntrack;
++ for (i=0; i < IP_CT_NUMBER; i++)
++ conntrack->infos[i].master = &conntrack->ct_general;
++
++ if (!protocol->new(conntrack, skb)) {
++ kmem_cache_free(ip_conntrack_cachep, conntrack);
++ return NULL;
++ }
++ /* Don't set timer yet: wait for confirmation */
++ init_timer(&conntrack->timeout);
++ conntrack->timeout.data = (unsigned long)conntrack;
++ conntrack->timeout.function = death_by_timeout;
++
++ INIT_LIST_HEAD(&conntrack->sibling_list);
++
++ WRITE_LOCK(&ip_conntrack_lock);
++ /* Need finding and deleting of expected ONLY if we win race */
++ READ_LOCK(&ip_conntrack_expect_tuple_lock);
++ expected = LIST_FIND(&ip_conntrack_expect_list, expect_cmp,
++ struct ip_conntrack_expect *, tuple);
++ READ_UNLOCK(&ip_conntrack_expect_tuple_lock);
++
++ /* If master is not in hash table yet (ie. packet hasn't left
++ this machine yet), how can other end know about expected?
++ Hence these are not the droids you are looking for (if
++ master ct never got confirmed, we'd hold a reference to it
++ and weird things would happen to future packets). */
++ if (expected && !is_confirmed(expected->expectant))
++ expected = NULL;
++
++ /* Look up the conntrack helper for master connections only */
++ if (!expected)
++ conntrack->helper = ip_ct_find_helper(&repl_tuple);
++
++ /* If the expectation is dying, then this is a loser. */
++ if (expected
++ && expected->expectant->helper->timeout
++ && ! del_timer(&expected->timeout))
++ expected = NULL;
++
++ if (expected) {
++ DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n",
++ conntrack, expected);
++ /* Welcome, Mr. Bond. We've been expecting you... */
++ IP_NF_ASSERT(master_ct(conntrack));
++ __set_bit(IPS_EXPECTED_BIT, &conntrack->status);
++ conntrack->master = expected;
++ expected->sibling = conntrack;
++ LIST_DELETE(&ip_conntrack_expect_list, expected);
++ expected->expectant->expecting--;
++ nf_conntrack_get(&master_ct(conntrack)->infos[0]);
++ }
++ atomic_inc(&ip_conntrack_count);
++ WRITE_UNLOCK(&ip_conntrack_lock);
++
++ if (expected && expected->expectfn)
++ expected->expectfn(conntrack);
++ return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];
++}
++
++/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
++static inline struct ip_conntrack *
++resolve_normal_ct(struct sk_buff *skb,
++ struct ip_conntrack_protocol *proto,
++ int *set_reply,
++ unsigned int hooknum,
++ enum ip_conntrack_info *ctinfo)
++{
++ struct ip_conntrack_tuple tuple;
++ struct ip_conntrack_tuple_hash *h;
++
++ IP_NF_ASSERT((skb->nh.iph->frag_off & htons(IP_OFFSET)) == 0);
++
++ if (!get_tuple(skb->nh.iph, skb, skb->nh.iph->ihl*4, &tuple, proto))
++ return NULL;
++
++ /* look for tuple match */
++ h = ip_conntrack_find_get(&tuple, NULL);
++ if (!h) {
++ h = init_conntrack(&tuple, proto, skb);
++ if (!h)
++ return NULL;
++ if (IS_ERR(h))
++ return (void *)h;
++ }
++
++ /* It exists; we have (non-exclusive) reference. */
++ if (DIRECTION(h) == IP_CT_DIR_REPLY) {
++ *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
++ /* Please set reply bit if this packet OK */
++ *set_reply = 1;
++ } else {
++ /* Once we've had two way comms, always ESTABLISHED. */
++ if (test_bit(IPS_SEEN_REPLY_BIT, &h->ctrack->status)) {
++ DEBUGP("ip_conntrack_in: normal packet for %p\n",
++ h->ctrack);
++ *ctinfo = IP_CT_ESTABLISHED;
++ } else if (test_bit(IPS_EXPECTED_BIT, &h->ctrack->status)) {
++ DEBUGP("ip_conntrack_in: related packet for %p\n",
++ h->ctrack);
++ *ctinfo = IP_CT_RELATED;
++ } else {
++ DEBUGP("ip_conntrack_in: new packet for %p\n",
++ h->ctrack);
++ *ctinfo = IP_CT_NEW;
++ }
++ *set_reply = 0;
++ }
++ skb->nfct = &h->ctrack->infos[*ctinfo];
++ return h->ctrack;
++}
++
++/* Netfilter hook itself. */
++unsigned int ip_conntrack_in(unsigned int hooknum,
++ struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ struct ip_conntrack *ct;
++ enum ip_conntrack_info ctinfo;
++ struct ip_conntrack_protocol *proto;
++ int set_reply;
++ int ret;
++
++ /* FIXME: Do this right please. --RR */
++ (*pskb)->nfcache |= NFC_UNKNOWN;
++
++/* Doesn't cover locally-generated broadcast, so not worth it. */
++#if 0
++ /* Ignore broadcast: no `connection'. */
++ if ((*pskb)->pkt_type == PACKET_BROADCAST) {
++ printk("Broadcast packet!\n");
++ return NF_ACCEPT;
++ } else if (((*pskb)->nh.iph->daddr & htonl(0x000000FF))
++ == htonl(0x000000FF)) {
++ printk("Should bcast: %u.%u.%u.%u->%u.%u.%u.%u (sk=%p, ptype=%u)\n",
++ NIPQUAD((*pskb)->nh.iph->saddr),
++ NIPQUAD((*pskb)->nh.iph->daddr),
++ (*pskb)->sk, (*pskb)->pkt_type);
++ }
++#endif
++
++ /* Previously seen (loopback)? Ignore. Do this before
++ fragment check. */
++ if ((*pskb)->nfct)
++ return NF_ACCEPT;
++
++ /* Gather fragments. */
++ if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
++ *pskb = ip_ct_gather_frags(*pskb);
++ if (!*pskb)
++ return NF_STOLEN;
++ }
++
++ proto = ip_ct_find_proto((*pskb)->nh.iph->protocol);
++
++ /* It may be an icmp error... */
++ if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP
++ && icmp_error_track(*pskb, &ctinfo, hooknum))
++ return NF_ACCEPT;
++
++ if (!(ct = resolve_normal_ct(*pskb, proto,&set_reply,hooknum,&ctinfo)))
++ /* Not valid part of a connection */
++ return NF_ACCEPT;
++
++ if (IS_ERR(ct))
++ /* Too stressed to deal. */
++ return NF_DROP;
++
++ IP_NF_ASSERT((*pskb)->nfct);
++
++ ret = proto->packet(ct, *pskb, ctinfo);
++ if (ret == -1) {
++ /* Invalid */
++ nf_conntrack_put((*pskb)->nfct);
++ (*pskb)->nfct = NULL;
++ return NF_ACCEPT;
++ }
++
++ if (ret != NF_DROP && ct->helper) {
++ ret = ct->helper->help(*pskb, ct, ctinfo);
++ if (ret == -1) {
++ /* Invalid */
++ nf_conntrack_put((*pskb)->nfct);
++ (*pskb)->nfct = NULL;
++ return NF_ACCEPT;
++ }
++ }
++ if (set_reply)
++ set_bit(IPS_SEEN_REPLY_BIT, &ct->status);
++
++ return ret;
++}
++
++int invert_tuplepr(struct ip_conntrack_tuple *inverse,
++ const struct ip_conntrack_tuple *orig)
++{
++ return invert_tuple(inverse, orig, ip_ct_find_proto(orig->dst.protonum));
++}
++
++static inline int resent_expect(const struct ip_conntrack_expect *i,
++ const struct ip_conntrack_tuple *tuple,
++ const struct ip_conntrack_tuple *mask)
++{
++ DEBUGP("resent_expect\n");
++ DEBUGP(" tuple: "); DUMP_TUPLE(&i->tuple);
++ DEBUGP("ct_tuple: "); DUMP_TUPLE(&i->ct_tuple);
++ DEBUGP("test tuple: "); DUMP_TUPLE(tuple);
++ return (((i->ct_tuple.dst.protonum == 0 && ip_ct_tuple_equal(&i->tuple, tuple))
++ || (i->ct_tuple.dst.protonum && ip_ct_tuple_equal(&i->ct_tuple, tuple)))
++ && ip_ct_tuple_equal(&i->mask, mask));
++}
++
++/* Would two expected things clash? */
++static inline int expect_clash(const struct ip_conntrack_expect *i,
++ const struct ip_conntrack_tuple *tuple,
++ const struct ip_conntrack_tuple *mask)
++{
++ /* Part covered by intersection of masks must be unequal,
++ otherwise they clash */
++ struct ip_conntrack_tuple intersect_mask
++ = { { i->mask.src.ip & mask->src.ip,
++ { i->mask.src.u.all & mask->src.u.all } },
++ { i->mask.dst.ip & mask->dst.ip,
++ { i->mask.dst.u.all & mask->dst.u.all },
++ i->mask.dst.protonum & mask->dst.protonum } };
++
++ return ip_ct_tuple_mask_cmp(&i->tuple, tuple, &intersect_mask);
++}
++
++inline void ip_conntrack_unexpect_related(struct ip_conntrack_expect *expect)
++{
++ WRITE_LOCK(&ip_conntrack_lock);
++ unexpect_related(expect);
++ WRITE_UNLOCK(&ip_conntrack_lock);
++}
++
++static void expectation_timed_out(unsigned long ul_expect)
++{
++ struct ip_conntrack_expect *expect = (void *) ul_expect;
++
++ DEBUGP("expectation %p timed out\n", expect);
++ WRITE_LOCK(&ip_conntrack_lock);
++ __unexpect_related(expect);
++ WRITE_UNLOCK(&ip_conntrack_lock);
++}
++
++/* Add a related connection. */
++int ip_conntrack_expect_related(struct ip_conntrack *related_to,
++ struct ip_conntrack_expect *expect)
++{
++ struct ip_conntrack_expect *old, *new;
++ int ret = 0;
++
++ WRITE_LOCK(&ip_conntrack_lock);
++ /* Because of the write lock, no reader can walk the lists,
++ * so there is no need to use the tuple lock too */
++
++ DEBUGP("ip_conntrack_expect_related %p\n", related_to);
++ DEBUGP("tuple: "); DUMP_TUPLE(&expect->tuple);
++ DEBUGP("mask: "); DUMP_TUPLE(&expect->mask);
++
++ old = LIST_FIND(&ip_conntrack_expect_list, resent_expect,
++ struct ip_conntrack_expect *, &expect->tuple,
++ &expect->mask);
++ if (old) {
++ /* Helper private data may contain offsets but no pointers
++ pointing into the payload - otherwise we should have to copy
++ the data filled out by the helper over the old one */
++ DEBUGP("expect_related: resent packet\n");
++ if (related_to->helper->timeout) {
++ if (!del_timer(&old->timeout)) {
++ /* expectation is dying. Fall through */
++ old = NULL;
++ } else {
++ old->timeout.expires = jiffies +
++ related_to->helper->timeout * HZ;
++ add_timer(&old->timeout);
++ }
++ }
++
++ if (old) {
++ WRITE_UNLOCK(&ip_conntrack_lock);
++ return -EEXIST;
++ }
++ } else if (related_to->helper->max_expected &&
++ related_to->expecting >= related_to->helper->max_expected) {
++ struct list_head *cur_item;
++ /* old == NULL */
++ if (!(related_to->helper->flags &
++ IP_CT_HELPER_F_REUSE_EXPECT)) {
++ WRITE_UNLOCK(&ip_conntrack_lock);
++ if (net_ratelimit())
++ printk(KERN_WARNING
++ "ip_conntrack: max number of expected "
++ "connections %i of %s reached for "
++ "%u.%u.%u.%u->%u.%u.%u.%u\n",
++ related_to->helper->max_expected,
++ related_to->helper->name,
++ NIPQUAD(related_to->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip),
++ NIPQUAD(related_to->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip));
++ return -EPERM;
++ }
++ DEBUGP("ip_conntrack: max number of expected "
++ "connections %i of %s reached for "
++ "%u.%u.%u.%u->%u.%u.%u.%u, reusing\n",
++ related_to->helper->max_expected,
++ related_to->helper->name,
++ NIPQUAD(related_to->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip),
++ NIPQUAD(related_to->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip));
++
++ /* choose the the oldest expectation to evict */
++ list_for_each(cur_item, &related_to->sibling_list) {
++ struct ip_conntrack_expect *cur;
++
++ cur = list_entry(cur_item,
++ struct ip_conntrack_expect,
++ expected_list);
++ if (cur->sibling == NULL) {
++ old = cur;
++ break;
++ }
++ }
++
++ /* (!old) cannot happen, since related_to->expecting is the
++ * number of unconfirmed expects */
++ IP_NF_ASSERT(old);
++
++ /* newnat14 does not reuse the real allocated memory
++ * structures but rather unexpects the old and
++ * allocates a new. unexpect_related will decrement
++ * related_to->expecting.
++ */
++ unexpect_related(old);
++ ret = -EPERM;
++ } else if (LIST_FIND(&ip_conntrack_expect_list, expect_clash,
++ struct ip_conntrack_expect *, &expect->tuple,
++ &expect->mask)) {
++ WRITE_UNLOCK(&ip_conntrack_lock);
++ DEBUGP("expect_related: busy!\n");
++ return -EBUSY;
++ }
++
++ new = (struct ip_conntrack_expect *)
++ kmalloc(sizeof(struct ip_conntrack_expect), GFP_ATOMIC);
++ if (!new) {
++ WRITE_UNLOCK(&ip_conntrack_lock);
++ DEBUGP("expect_relaed: OOM allocating expect\n");
++ return -ENOMEM;
++ }
++
++ DEBUGP("new expectation %p of conntrack %p\n", new, related_to);
++ memcpy(new, expect, sizeof(*expect));
++ new->expectant = related_to;
++ new->sibling = NULL;
++ atomic_set(&new->use, 1);
++
++ /* add to expected list for this connection */
++ list_add(&new->expected_list, &related_to->sibling_list);
++ /* add to global list of expectations */
++ list_prepend(&ip_conntrack_expect_list, &new->list);
++ /* add and start timer if required */
++ if (related_to->helper->timeout) {
++ init_timer(&new->timeout);
++ new->timeout.data = (unsigned long)new;
++ new->timeout.function = expectation_timed_out;
++ new->timeout.expires = jiffies +
++ related_to->helper->timeout * HZ;
++ add_timer(&new->timeout);
++ }
++ related_to->expecting++;
++
++ WRITE_UNLOCK(&ip_conntrack_lock);
++
++ return ret;
++}
++
++/* Change tuple in an existing expectation */
++int ip_conntrack_change_expect(struct ip_conntrack_expect *expect,
++ struct ip_conntrack_tuple *newtuple)
++{
++ int ret;
++
++ MUST_BE_READ_LOCKED(&ip_conntrack_lock);
++ WRITE_LOCK(&ip_conntrack_expect_tuple_lock);
++
++ DEBUGP("change_expect:\n");
++ DEBUGP("exp tuple: "); DUMP_TUPLE(&expect->tuple);
++ DEBUGP("exp mask: "); DUMP_TUPLE(&expect->mask);
++ DEBUGP("newtuple: "); DUMP_TUPLE(newtuple);
++ if (expect->ct_tuple.dst.protonum == 0) {
++ /* Never seen before */
++ DEBUGP("change expect: never seen before\n");
++ if (!ip_ct_tuple_equal(&expect->tuple, newtuple)
++ && LIST_FIND(&ip_conntrack_expect_list, expect_clash,
++ struct ip_conntrack_expect *, newtuple, &expect->mask)) {
++ /* Force NAT to find an unused tuple */
++ ret = -1;
++ } else {
++ memcpy(&expect->ct_tuple, &expect->tuple, sizeof(expect->tuple));
++ memcpy(&expect->tuple, newtuple, sizeof(expect->tuple));
++ ret = 0;
++ }
++ } else {
++ /* Resent packet */
++ DEBUGP("change expect: resent packet\n");
++ if (ip_ct_tuple_equal(&expect->tuple, newtuple)) {
++ ret = 0;
++ } else {
++ /* Force NAT to choose again the same port */
++ ret = -1;
++ }
++ }
++ WRITE_UNLOCK(&ip_conntrack_expect_tuple_lock);
++
++ return ret;
++}
++
++/* Alter reply tuple (maybe alter helper). If it's already taken,
++ return 0 and don't do alteration. */
++int ip_conntrack_alter_reply(struct ip_conntrack *conntrack,
++ const struct ip_conntrack_tuple *newreply)
++{
++ WRITE_LOCK(&ip_conntrack_lock);
++ if (__ip_conntrack_find(newreply, conntrack)) {
++ WRITE_UNLOCK(&ip_conntrack_lock);
++ return 0;
++ }
++ /* Should be unconfirmed, so not in hash table yet */
++ IP_NF_ASSERT(!is_confirmed(conntrack));
++
++ DEBUGP("Altering reply tuple of %p to ", conntrack);
++ DUMP_TUPLE(newreply);
++
++ conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
++ if (!conntrack->master)
++ conntrack->helper = LIST_FIND(&helpers, helper_cmp,
++ struct ip_conntrack_helper *,
++ newreply);
++ WRITE_UNLOCK(&ip_conntrack_lock);
++
++ return 1;
++}
++
++int ip_conntrack_helper_register(struct ip_conntrack_helper *me)
++{
++ WRITE_LOCK(&ip_conntrack_lock);
++ list_prepend(&helpers, me);
++ WRITE_UNLOCK(&ip_conntrack_lock);
++
++ return 0;
++}
++
++static inline int unhelp(struct ip_conntrack_tuple_hash *i,
++ const struct ip_conntrack_helper *me)
++{
++ if (i->ctrack->helper == me) {
++ /* Get rid of any expected. */
++ remove_expectations(i->ctrack, 0);
++ /* And *then* set helper to NULL */
++ i->ctrack->helper = NULL;
++ }
++ return 0;
++}
++
++void ip_conntrack_helper_unregister(struct ip_conntrack_helper *me)
++{
++ unsigned int i;
++
++ /* Need write lock here, to delete helper. */
++ WRITE_LOCK(&ip_conntrack_lock);
++ LIST_DELETE(&helpers, me);
++
++ /* Get rid of expecteds, set helpers to NULL. */
++ for (i = 0; i < ip_conntrack_htable_size; i++)
++ LIST_FIND_W(&ip_conntrack_hash[i], unhelp,
++ struct ip_conntrack_tuple_hash *, me);
++ WRITE_UNLOCK(&ip_conntrack_lock);
++
++ /* Someone could be still looking at the helper in a bh. */
++ synchronize_net();
++}
++
++/* Refresh conntrack for this many jiffies. */
++void ip_ct_refresh(struct ip_conntrack *ct, unsigned long extra_jiffies)
++{
++ IP_NF_ASSERT(ct->timeout.data == (unsigned long)ct);
++
++ WRITE_LOCK(&ip_conntrack_lock);
++ /* If not in hash table, timer will not be active yet */
++ if (!is_confirmed(ct))
++ ct->timeout.expires = extra_jiffies;
++ else {
++ /* Need del_timer for race avoidance (may already be dying). */
++ if (del_timer(&ct->timeout)) {
++ ct->timeout.expires = jiffies + extra_jiffies;
++ add_timer(&ct->timeout);
++ }
++ }
++ WRITE_UNLOCK(&ip_conntrack_lock);
++}
++
++/* Returns new sk_buff, or NULL */
++struct sk_buff *
++ip_ct_gather_frags(struct sk_buff *skb)
++{
++ struct sock *sk = skb->sk;
++#ifdef CONFIG_NETFILTER_DEBUG
++ unsigned int olddebug = skb->nf_debug;
++#endif
++ if (sk) {
++ sock_hold(sk);
++ skb_orphan(skb);
++ }
++
++ local_bh_disable();
++ skb = ip_defrag(skb);
++ local_bh_enable();
++
++ if (!skb) {
++ if (sk)
++ sock_put(sk);
++ return skb;
++ }
++
++ if (sk) {
++ skb_set_owner_w(skb, sk);
++ sock_put(sk);
++ }
++
++ ip_send_check(skb->nh.iph);
++ skb->nfcache |= NFC_ALTERED;
++#ifdef CONFIG_NETFILTER_DEBUG
++ /* Packet path as if nothing had happened. */
++ skb->nf_debug = olddebug;
++#endif
++ return skb;
++}
++
++/* Used by ipt_REJECT. */
++static void ip_conntrack_attach(struct sk_buff *nskb, struct nf_ct_info *nfct)
++{
++ struct ip_conntrack *ct;
++ enum ip_conntrack_info ctinfo;
++
++ ct = __ip_conntrack_get(nfct, &ctinfo);
++
++ /* This ICMP is in reverse direction to the packet which
++ caused it */
++ if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
++ ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
++ else
++ ctinfo = IP_CT_RELATED;
++
++ /* Attach new skbuff, and increment count */
++ nskb->nfct = &ct->infos[ctinfo];
++ atomic_inc(&ct->ct_general.use);
++}
++
++static inline int
++do_kill(const struct ip_conntrack_tuple_hash *i,
++ int (*kill)(const struct ip_conntrack *i, void *data),
++ void *data)
++{
++ return kill(i->ctrack, data);
++}
++
++/* Bring out ya dead! */
++static struct ip_conntrack_tuple_hash *
++get_next_corpse(int (*kill)(const struct ip_conntrack *i, void *data),
++ void *data, unsigned int *bucket)
++{
++ struct ip_conntrack_tuple_hash *h = NULL;
++
++ READ_LOCK(&ip_conntrack_lock);
++ for (; !h && *bucket < ip_conntrack_htable_size; (*bucket)++) {
++ h = LIST_FIND(&ip_conntrack_hash[*bucket], do_kill,
++ struct ip_conntrack_tuple_hash *, kill, data);
++ }
++ if (h)
++ atomic_inc(&h->ctrack->ct_general.use);
++ READ_UNLOCK(&ip_conntrack_lock);
++
++ return h;
++}
++
++void
++ip_ct_selective_cleanup(int (*kill)(const struct ip_conntrack *i, void *data),
++ void *data)
++{
++ struct ip_conntrack_tuple_hash *h;
++ unsigned int bucket = 0;
++
++ while ((h = get_next_corpse(kill, data, &bucket)) != NULL) {
++ /* Time to push up daises... */
++ if (del_timer(&h->ctrack->timeout))
++ death_by_timeout((unsigned long)h->ctrack);
++ /* ... else the timer will get him soon. */
++
++ ip_conntrack_put(h->ctrack);
++ }
++}
++
++/* Fast function for those who don't want to parse /proc (and I don't
++ blame them). */
++/* Reversing the socket's dst/src point of view gives us the reply
++ mapping. */
++static int
++getorigdst(struct sock *sk, int optval, void *user, int *len)
++{
++ struct inet_opt *inet = inet_sk(sk);
++ struct ip_conntrack_tuple_hash *h;
++ struct ip_conntrack_tuple tuple;
++
++ IP_CT_TUPLE_U_BLANK(&tuple);
++ tuple.src.ip = inet->rcv_saddr;
++ tuple.src.u.tcp.port = inet->sport;
++ tuple.dst.ip = inet->daddr;
++ tuple.dst.u.tcp.port = inet->dport;
++ tuple.dst.protonum = IPPROTO_TCP;
++
++ /* We only do TCP at the moment: is there a better way? */
++ if (strcmp(sk->sk_prot->name, "TCP")) {
++ DEBUGP("SO_ORIGINAL_DST: Not a TCP socket\n");
++ return -ENOPROTOOPT;
++ }
++
++ if ((unsigned int) *len < sizeof(struct sockaddr_in)) {
++ DEBUGP("SO_ORIGINAL_DST: len %u not %u\n",
++ *len, sizeof(struct sockaddr_in));
++ return -EINVAL;
++ }
++
++ h = ip_conntrack_find_get(&tuple, NULL);
++ if (h) {
++ struct sockaddr_in sin;
++
++ sin.sin_family = AF_INET;
++ sin.sin_port = h->ctrack->tuplehash[IP_CT_DIR_ORIGINAL]
++ .tuple.dst.u.tcp.port;
++ sin.sin_addr.s_addr = h->ctrack->tuplehash[IP_CT_DIR_ORIGINAL]
++ .tuple.dst.ip;
++
++ DEBUGP("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n",
++ NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
++ ip_conntrack_put(h->ctrack);
++ if (copy_to_user(user, &sin, sizeof(sin)) != 0)
++ return -EFAULT;
++ else
++ return 0;
++ }
++ DEBUGP("SO_ORIGINAL_DST: Can't find %u.%u.%u.%u/%u-%u.%u.%u.%u/%u.\n",
++ NIPQUAD(tuple.src.ip), ntohs(tuple.src.u.tcp.port),
++ NIPQUAD(tuple.dst.ip), ntohs(tuple.dst.u.tcp.port));
++ return -ENOENT;
++}
++
++static struct nf_sockopt_ops so_getorigdst = {
++ .pf = PF_INET,
++ .get_optmin = SO_ORIGINAL_DST,
++ .get_optmax = SO_ORIGINAL_DST+1,
++ .get = &getorigdst,
++};
++
++static int kill_all(const struct ip_conntrack *i, void *data)
++{
++ return 1;
++}
++
++/* Mishearing the voices in his head, our hero wonders how he's
++ supposed to kill the mall. */
++void ip_conntrack_cleanup(void)
++{
++ ip_ct_attach = NULL;
++ /* This makes sure all current packets have passed through
++ netfilter framework. Roll on, two-stage module
++ delete... */
++ synchronize_net();
++
++ i_see_dead_people:
++ ip_ct_selective_cleanup(kill_all, NULL);
++ if (atomic_read(&ip_conntrack_count) != 0) {
++ schedule();
++ goto i_see_dead_people;
++ }
++
++ kmem_cache_destroy(ip_conntrack_cachep);
++ vfree(ip_conntrack_hash);
++ nf_unregister_sockopt(&so_getorigdst);
++}
++
++static int hashsize;
++MODULE_PARM(hashsize, "i");
++
++int __init ip_conntrack_init(void)
++{
++ unsigned int i;
++ int ret;
++
++ /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
++ * machine has 256 buckets. >= 1GB machines have 8192 buckets. */
++ if (hashsize) {
++ ip_conntrack_htable_size = hashsize;
++ } else {
++ ip_conntrack_htable_size
++ = (((num_physpages << PAGE_SHIFT) / 16384)
++ / sizeof(struct list_head));
++ if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
++ ip_conntrack_htable_size = 8192;
++ if (ip_conntrack_htable_size < 16)
++ ip_conntrack_htable_size = 16;
++ }
++ ip_conntrack_max = 8 * ip_conntrack_htable_size;
++
++ printk("ip_conntrack version %s (%u buckets, %d max)"
++ " - %Zd bytes per conntrack\n", IP_CONNTRACK_VERSION,
++ ip_conntrack_htable_size, ip_conntrack_max,
++ sizeof(struct ip_conntrack));
++
++ ret = nf_register_sockopt(&so_getorigdst);
++ if (ret != 0) {
++ printk(KERN_ERR "Unable to register netfilter socket option\n");
++ return ret;
++ }
++
++ ip_conntrack_hash = vmalloc(sizeof(struct list_head)
++ * ip_conntrack_htable_size);
++ if (!ip_conntrack_hash) {
++ printk(KERN_ERR "Unable to create ip_conntrack_hash\n");
++ goto err_unreg_sockopt;
++ }
++
++ ip_conntrack_cachep = kmem_cache_create("ip_conntrack",
++ sizeof(struct ip_conntrack), 0,
++ SLAB_HWCACHE_ALIGN, NULL, NULL);
++ if (!ip_conntrack_cachep) {
++ printk(KERN_ERR "Unable to create ip_conntrack slab cache\n");
++ goto err_free_hash;
++ }
++ /* Don't NEED lock here, but good form anyway. */
++ WRITE_LOCK(&ip_conntrack_lock);
++ /* Sew in builtin protocols. */
++ list_append(&protocol_list, &ip_conntrack_protocol_tcp);
++ list_append(&protocol_list, &ip_conntrack_protocol_udp);
++ list_append(&protocol_list, &ip_conntrack_protocol_icmp);
++ WRITE_UNLOCK(&ip_conntrack_lock);
++
++ for (i = 0; i < ip_conntrack_htable_size; i++)
++ INIT_LIST_HEAD(&ip_conntrack_hash[i]);
++
++ /* For use by ipt_REJECT */
++ ip_ct_attach = ip_conntrack_attach;
++ return ret;
++
++err_free_hash:
++ vfree(ip_conntrack_hash);
++err_unreg_sockopt:
++ nf_unregister_sockopt(&so_getorigdst);
++
++ return -ENOMEM;
++}
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ip_conntrack_standalone.c linux-2.6.3/net/ipv4/netfilter/ip_conntrack_standalone.c
+--- linux-2.6.3.org/net/ipv4/netfilter/ip_conntrack_standalone.c 2004-02-26 23:36:59.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ip_conntrack_standalone.c 2004-02-27 00:03:14.482026576 +0100
+@@ -194,6 +194,26 @@
+ return ip_conntrack_confirm(*pskb);
+ }
+
++static unsigned int ip_conntrack_defrag(unsigned int hooknum,
++ struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ /* Previously seen (loopback)? Ignore. Do this before
++ fragment check. */
++ if ((*pskb)->nfct)
++ return NF_ACCEPT;
++
++ /* Gather fragments. */
++ if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
++ *pskb = ip_ct_gather_frags(*pskb);
++ if (!*pskb)
++ return NF_STOLEN;
++ }
++ return NF_ACCEPT;
++}
++
+ static unsigned int ip_refrag(unsigned int hooknum,
+ struct sk_buff **pskb,
+ const struct net_device *in,
+@@ -236,6 +256,14 @@
+
+ /* Connection tracking may drop packets, but never alters them, so
+ make it the first hook. */
++static struct nf_hook_ops ip_conntrack_defrag_ops = {
++ .hook = ip_conntrack_defrag,
++ .owner = THIS_MODULE,
++ .pf = PF_INET,
++ .hooknum = NF_IP_PRE_ROUTING,
++ .priority = NF_IP_PRI_CONNTRACK_DEFRAG,
++};
++
+ static struct nf_hook_ops ip_conntrack_in_ops = {
+ .hook = ip_conntrack_in,
+ .owner = THIS_MODULE,
+@@ -244,6 +272,14 @@
+ .priority = NF_IP_PRI_CONNTRACK,
+ };
+
++static struct nf_hook_ops ip_conntrack_defrag_local_out_ops = {
++ .hook = ip_conntrack_defrag,
++ .owner = THIS_MODULE,
++ .pf = PF_INET,
++ .hooknum = NF_IP_LOCAL_OUT,
++ .priority = NF_IP_PRI_CONNTRACK_DEFRAG,
++};
++
+ static struct nf_hook_ops ip_conntrack_local_out_ops = {
+ .hook = ip_conntrack_local,
+ .owner = THIS_MODULE,
+@@ -470,10 +506,20 @@
+ if (!proc) goto cleanup_init;
+ proc->owner = THIS_MODULE;
+
++ ret = nf_register_hook(&ip_conntrack_defrag_ops);
++ if (ret < 0) {
++ printk("ip_conntrack: can't register pre-routing defrag hook.\n");
++ goto cleanup_proc;
++ }
++ ret = nf_register_hook(&ip_conntrack_defrag_local_out_ops);
++ if (ret < 0) {
++ printk("ip_conntrack: can't register local_out defrag hook.\n");
++ goto cleanup_defragops;
++ }
+ ret = nf_register_hook(&ip_conntrack_in_ops);
+ if (ret < 0) {
+ printk("ip_conntrack: can't register pre-routing hook.\n");
+- goto cleanup_proc;
++ goto cleanup_defraglocalops;
+ }
+ ret = nf_register_hook(&ip_conntrack_local_out_ops);
+ if (ret < 0) {
+@@ -511,6 +557,10 @@
+ nf_unregister_hook(&ip_conntrack_local_out_ops);
+ cleanup_inops:
+ nf_unregister_hook(&ip_conntrack_in_ops);
++ cleanup_defraglocalops:
++ nf_unregister_hook(&ip_conntrack_defrag_local_out_ops);
++ cleanup_defragops:
++ nf_unregister_hook(&ip_conntrack_defrag_ops);
+ cleanup_proc:
+ proc_net_remove("ip_conntrack");
+ cleanup_init:
+@@ -602,5 +652,6 @@
+ EXPORT_SYMBOL(ip_conntrack_expect_list);
+ EXPORT_SYMBOL(ip_conntrack_lock);
+ EXPORT_SYMBOL(ip_conntrack_hash);
++EXPORT_SYMBOL(ip_conntrack_untracked);
+ EXPORT_SYMBOL_GPL(ip_conntrack_find_get);
+ EXPORT_SYMBOL_GPL(ip_conntrack_put);
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ip_conntrack_standalone.c.orig linux-2.6.3/net/ipv4/netfilter/ip_conntrack_standalone.c.orig
+--- linux-2.6.3.org/net/ipv4/netfilter/ip_conntrack_standalone.c.orig 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ip_conntrack_standalone.c.orig 2004-02-27 00:02:49.321851504 +0100
+@@ -0,0 +1,606 @@
++/* This file contains all the functions required for the standalone
++ ip_conntrack module.
++
++ These are not required by the compatibility layer.
++*/
++
++/* (C) 1999-2001 Paul `Rusty' Russell
++ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/config.h>
++#include <linux/types.h>
++#include <linux/ip.h>
++#include <linux/netfilter.h>
++#include <linux/netfilter_ipv4.h>
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/proc_fs.h>
++#ifdef CONFIG_SYSCTL
++#include <linux/sysctl.h>
++#endif
++#include <net/checksum.h>
++
++#define ASSERT_READ_LOCK(x) MUST_BE_READ_LOCKED(&ip_conntrack_lock)
++#define ASSERT_WRITE_LOCK(x) MUST_BE_WRITE_LOCKED(&ip_conntrack_lock)
++
++#include <linux/netfilter_ipv4/ip_conntrack.h>
++#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
++#include <linux/netfilter_ipv4/ip_conntrack_core.h>
++#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
++#include <linux/netfilter_ipv4/listhelp.h>
++
++#if 0
++#define DEBUGP printk
++#else
++#define DEBUGP(format, args...)
++#endif
++
++MODULE_LICENSE("GPL");
++
++static int kill_proto(const struct ip_conntrack *i, void *data)
++{
++ return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum ==
++ *((u_int8_t *) data));
++}
++
++static unsigned int
++print_tuple(char *buffer, const struct ip_conntrack_tuple *tuple,
++ struct ip_conntrack_protocol *proto)
++{
++ int len;
++
++ len = sprintf(buffer, "src=%u.%u.%u.%u dst=%u.%u.%u.%u ",
++ NIPQUAD(tuple->src.ip), NIPQUAD(tuple->dst.ip));
++
++ len += proto->print_tuple(buffer + len, tuple);
++
++ return len;
++}
++
++/* FIXME: Don't print source proto part. --RR */
++static unsigned int
++print_expect(char *buffer, const struct ip_conntrack_expect *expect)
++{
++ unsigned int len;
++
++ if (expect->expectant->helper->timeout)
++ len = sprintf(buffer, "EXPECTING: %lu ",
++ timer_pending(&expect->timeout)
++ ? (expect->timeout.expires - jiffies)/HZ : 0);
++ else
++ len = sprintf(buffer, "EXPECTING: - ");
++ len += sprintf(buffer + len, "use=%u proto=%u ",
++ atomic_read(&expect->use), expect->tuple.dst.protonum);
++ len += print_tuple(buffer + len, &expect->tuple,
++ __ip_ct_find_proto(expect->tuple.dst.protonum));
++ len += sprintf(buffer + len, "\n");
++ return len;
++}
++
++static unsigned int
++print_conntrack(char *buffer, struct ip_conntrack *conntrack)
++{
++ unsigned int len;
++ struct ip_conntrack_protocol *proto
++ = __ip_ct_find_proto(conntrack->tuplehash[IP_CT_DIR_ORIGINAL]
++ .tuple.dst.protonum);
++
++ len = sprintf(buffer, "%-8s %u %lu ",
++ proto->name,
++ conntrack->tuplehash[IP_CT_DIR_ORIGINAL]
++ .tuple.dst.protonum,
++ timer_pending(&conntrack->timeout)
++ ? (conntrack->timeout.expires - jiffies)/HZ : 0);
++
++ len += proto->print_conntrack(buffer + len, conntrack);
++ len += print_tuple(buffer + len,
++ &conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
++ proto);
++ if (!(test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)))
++ len += sprintf(buffer + len, "[UNREPLIED] ");
++ len += print_tuple(buffer + len,
++ &conntrack->tuplehash[IP_CT_DIR_REPLY].tuple,
++ proto);
++ if (test_bit(IPS_ASSURED_BIT, &conntrack->status))
++ len += sprintf(buffer + len, "[ASSURED] ");
++ len += sprintf(buffer + len, "use=%u ",
++ atomic_read(&conntrack->ct_general.use));
++ len += sprintf(buffer + len, "\n");
++
++ return len;
++}
++
++/* Returns true when finished. */
++static inline int
++conntrack_iterate(const struct ip_conntrack_tuple_hash *hash,
++ char *buffer, off_t offset, off_t *upto,
++ unsigned int *len, unsigned int maxlen)
++{
++ unsigned int newlen;
++ IP_NF_ASSERT(hash->ctrack);
++
++ MUST_BE_READ_LOCKED(&ip_conntrack_lock);
++
++ /* Only count originals */
++ if (DIRECTION(hash))
++ return 0;
++
++ if ((*upto)++ < offset)
++ return 0;
++
++ newlen = print_conntrack(buffer + *len, hash->ctrack);
++ if (*len + newlen > maxlen)
++ return 1;
++ else *len += newlen;
++
++ return 0;
++}
++
++static int
++list_conntracks(char *buffer, char **start, off_t offset, int length)
++{
++ unsigned int i;
++ unsigned int len = 0;
++ off_t upto = 0;
++ struct list_head *e;
++
++ READ_LOCK(&ip_conntrack_lock);
++ /* Traverse hash; print originals then reply. */
++ for (i = 0; i < ip_conntrack_htable_size; i++) {
++ if (LIST_FIND(&ip_conntrack_hash[i], conntrack_iterate,
++ struct ip_conntrack_tuple_hash *,
++ buffer, offset, &upto, &len, length))
++ goto finished;
++ }
++
++ /* Now iterate through expecteds. */
++ READ_LOCK(&ip_conntrack_expect_tuple_lock);
++ list_for_each(e, &ip_conntrack_expect_list) {
++ unsigned int last_len;
++ struct ip_conntrack_expect *expect
++ = (struct ip_conntrack_expect *)e;
++ if (upto++ < offset) continue;
++
++ last_len = len;
++ len += print_expect(buffer + len, expect);
++ if (len > length) {
++ len = last_len;
++ goto finished_expects;
++ }
++ }
++
++ finished_expects:
++ READ_UNLOCK(&ip_conntrack_expect_tuple_lock);
++ finished:
++ READ_UNLOCK(&ip_conntrack_lock);
++
++ /* `start' hack - see fs/proc/generic.c line ~165 */
++ *start = (char *)((unsigned int)upto - offset);
++ return len;
++}
++
++static unsigned int ip_confirm(unsigned int hooknum,
++ struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ /* We've seen it coming out the other side: confirm it */
++ return ip_conntrack_confirm(*pskb);
++}
++
++static unsigned int ip_refrag(unsigned int hooknum,
++ struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ struct rtable *rt = (struct rtable *)(*pskb)->dst;
++
++ /* We've seen it coming out the other side: confirm */
++ if (ip_confirm(hooknum, pskb, in, out, okfn) != NF_ACCEPT)
++ return NF_DROP;
++
++ /* Local packets are never produced too large for their
++ interface. We degfragment them at LOCAL_OUT, however,
++ so we have to refragment them here. */
++ if ((*pskb)->len > dst_pmtu(&rt->u.dst) &&
++ !skb_shinfo(*pskb)->tso_size) {
++ /* No hook can be after us, so this should be OK. */
++ ip_fragment(*pskb, okfn);
++ return NF_STOLEN;
++ }
++ return NF_ACCEPT;
++}
++
++static unsigned int ip_conntrack_local(unsigned int hooknum,
++ struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ /* root is playing with raw sockets. */
++ if ((*pskb)->len < sizeof(struct iphdr)
++ || (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) {
++ if (net_ratelimit())
++ printk("ipt_hook: happy cracking.\n");
++ return NF_ACCEPT;
++ }
++ return ip_conntrack_in(hooknum, pskb, in, out, okfn);
++}
++
++/* Connection tracking may drop packets, but never alters them, so
++ make it the first hook. */
++static struct nf_hook_ops ip_conntrack_in_ops = {
++ .hook = ip_conntrack_in,
++ .owner = THIS_MODULE,
++ .pf = PF_INET,
++ .hooknum = NF_IP_PRE_ROUTING,
++ .priority = NF_IP_PRI_CONNTRACK,
++};
++
++static struct nf_hook_ops ip_conntrack_local_out_ops = {
++ .hook = ip_conntrack_local,
++ .owner = THIS_MODULE,
++ .pf = PF_INET,
++ .hooknum = NF_IP_LOCAL_OUT,
++ .priority = NF_IP_PRI_CONNTRACK,
++};
++
++/* Refragmenter; last chance. */
++static struct nf_hook_ops ip_conntrack_out_ops = {
++ .hook = ip_refrag,
++ .owner = THIS_MODULE,
++ .pf = PF_INET,
++ .hooknum = NF_IP_POST_ROUTING,
++ .priority = NF_IP_PRI_LAST,
++};
++
++static struct nf_hook_ops ip_conntrack_local_in_ops = {
++ .hook = ip_confirm,
++ .owner = THIS_MODULE,
++ .pf = PF_INET,
++ .hooknum = NF_IP_LOCAL_IN,
++ .priority = NF_IP_PRI_LAST-1,
++};
++
++/* Sysctl support */
++
++#ifdef CONFIG_SYSCTL
++
++/* From ip_conntrack_core.c */
++extern int ip_conntrack_max;
++extern unsigned int ip_conntrack_htable_size;
++
++/* From ip_conntrack_proto_tcp.c */
++extern unsigned long ip_ct_tcp_timeout_syn_sent;
++extern unsigned long ip_ct_tcp_timeout_syn_recv;
++extern unsigned long ip_ct_tcp_timeout_established;
++extern unsigned long ip_ct_tcp_timeout_fin_wait;
++extern unsigned long ip_ct_tcp_timeout_close_wait;
++extern unsigned long ip_ct_tcp_timeout_last_ack;
++extern unsigned long ip_ct_tcp_timeout_time_wait;
++extern unsigned long ip_ct_tcp_timeout_close;
++
++/* From ip_conntrack_proto_udp.c */
++extern unsigned long ip_ct_udp_timeout;
++extern unsigned long ip_ct_udp_timeout_stream;
++
++/* From ip_conntrack_proto_icmp.c */
++extern unsigned long ip_ct_icmp_timeout;
++
++/* From ip_conntrack_proto_icmp.c */
++extern unsigned long ip_ct_generic_timeout;
++
++static struct ctl_table_header *ip_ct_sysctl_header;
++
++static ctl_table ip_ct_sysctl_table[] = {
++ {
++ .ctl_name = NET_IPV4_NF_CONNTRACK_MAX,
++ .procname = "ip_conntrack_max",
++ .data = &ip_conntrack_max,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .ctl_name = NET_IPV4_NF_CONNTRACK_BUCKETS,
++ .procname = "ip_conntrack_buckets",
++ .data = &ip_conntrack_htable_size,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0444,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT,
++ .procname = "ip_conntrack_tcp_timeout_syn_sent",
++ .data = &ip_ct_tcp_timeout_syn_sent,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_jiffies,
++ },
++ {
++ .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV,
++ .procname = "ip_conntrack_tcp_timeout_syn_recv",
++ .data = &ip_ct_tcp_timeout_syn_recv,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_jiffies,
++ },
++ {
++ .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED,
++ .procname = "ip_conntrack_tcp_timeout_established",
++ .data = &ip_ct_tcp_timeout_established,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_jiffies,
++ },
++ {
++ .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT,
++ .procname = "ip_conntrack_tcp_timeout_fin_wait",
++ .data = &ip_ct_tcp_timeout_fin_wait,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_jiffies,
++ },
++ {
++ .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT,
++ .procname = "ip_conntrack_tcp_timeout_close_wait",
++ .data = &ip_ct_tcp_timeout_close_wait,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_jiffies,
++ },
++ {
++ .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK,
++ .procname = "ip_conntrack_tcp_timeout_last_ack",
++ .data = &ip_ct_tcp_timeout_last_ack,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_jiffies,
++ },
++ {
++ .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT,
++ .procname = "ip_conntrack_tcp_timeout_time_wait",
++ .data = &ip_ct_tcp_timeout_time_wait,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_jiffies,
++ },
++ {
++ .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE,
++ .procname = "ip_conntrack_tcp_timeout_close",
++ .data = &ip_ct_tcp_timeout_close,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_jiffies,
++ },
++ {
++ .ctl_name = NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT,
++ .procname = "ip_conntrack_udp_timeout",
++ .data = &ip_ct_udp_timeout,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_jiffies,
++ },
++ {
++ .ctl_name = NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM,
++ .procname = "ip_conntrack_udp_timeout_stream",
++ .data = &ip_ct_udp_timeout_stream,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_jiffies,
++ },
++ {
++ .ctl_name = NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT,
++ .procname = "ip_conntrack_icmp_timeout",
++ .data = &ip_ct_icmp_timeout,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_jiffies,
++ },
++ {
++ .ctl_name = NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT,
++ .procname = "ip_conntrack_generic_timeout",
++ .data = &ip_ct_generic_timeout,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_jiffies,
++ },
++ { .ctl_name = 0 }
++};
++
++#define NET_IP_CONNTRACK_MAX 2089
++
++static ctl_table ip_ct_netfilter_table[] = {
++ {
++ .ctl_name = NET_IPV4_NETFILTER,
++ .procname = "netfilter",
++ .mode = 0555,
++ .child = ip_ct_sysctl_table,
++ },
++ {
++ .ctl_name = NET_IP_CONNTRACK_MAX,
++ .procname = "ip_conntrack_max",
++ .data = &ip_conntrack_max,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec
++ },
++ { .ctl_name = 0 }
++};
++
++static ctl_table ip_ct_ipv4_table[] = {
++ {
++ .ctl_name = NET_IPV4,
++ .procname = "ipv4",
++ .mode = 0555,
++ .child = ip_ct_netfilter_table,
++ },
++ { .ctl_name = 0 }
++};
++
++static ctl_table ip_ct_net_table[] = {
++ {
++ .ctl_name = CTL_NET,
++ .procname = "net",
++ .mode = 0555,
++ .child = ip_ct_ipv4_table,
++ },
++ { .ctl_name = 0 }
++};
++#endif
++static int init_or_cleanup(int init)
++{
++ struct proc_dir_entry *proc;
++ int ret = 0;
++
++ if (!init) goto cleanup;
++
++ ret = ip_conntrack_init();
++ if (ret < 0)
++ goto cleanup_nothing;
++
++ proc = proc_net_create("ip_conntrack",0,list_conntracks);
++ if (!proc) goto cleanup_init;
++ proc->owner = THIS_MODULE;
++
++ ret = nf_register_hook(&ip_conntrack_in_ops);
++ if (ret < 0) {
++ printk("ip_conntrack: can't register pre-routing hook.\n");
++ goto cleanup_proc;
++ }
++ ret = nf_register_hook(&ip_conntrack_local_out_ops);
++ if (ret < 0) {
++ printk("ip_conntrack: can't register local out hook.\n");
++ goto cleanup_inops;
++ }
++ ret = nf_register_hook(&ip_conntrack_out_ops);
++ if (ret < 0) {
++ printk("ip_conntrack: can't register post-routing hook.\n");
++ goto cleanup_inandlocalops;
++ }
++ ret = nf_register_hook(&ip_conntrack_local_in_ops);
++ if (ret < 0) {
++ printk("ip_conntrack: can't register local in hook.\n");
++ goto cleanup_inoutandlocalops;
++ }
++#ifdef CONFIG_SYSCTL
++ ip_ct_sysctl_header = register_sysctl_table(ip_ct_net_table, 0);
++ if (ip_ct_sysctl_header == NULL) {
++ printk("ip_conntrack: can't register to sysctl.\n");
++ goto cleanup;
++ }
++#endif
++
++ return ret;
++
++ cleanup:
++#ifdef CONFIG_SYSCTL
++ unregister_sysctl_table(ip_ct_sysctl_header);
++#endif
++ nf_unregister_hook(&ip_conntrack_local_in_ops);
++ cleanup_inoutandlocalops:
++ nf_unregister_hook(&ip_conntrack_out_ops);
++ cleanup_inandlocalops:
++ nf_unregister_hook(&ip_conntrack_local_out_ops);
++ cleanup_inops:
++ nf_unregister_hook(&ip_conntrack_in_ops);
++ cleanup_proc:
++ proc_net_remove("ip_conntrack");
++ cleanup_init:
++ ip_conntrack_cleanup();
++ cleanup_nothing:
++ return ret;
++}
++
++/* FIXME: Allow NULL functions and sub in pointers to generic for
++ them. --RR */
++int ip_conntrack_protocol_register(struct ip_conntrack_protocol *proto)
++{
++ int ret = 0;
++ struct list_head *i;
++
++ WRITE_LOCK(&ip_conntrack_lock);
++ list_for_each(i, &protocol_list) {
++ if (((struct ip_conntrack_protocol *)i)->proto
++ == proto->proto) {
++ ret = -EBUSY;
++ goto out;
++ }
++ }
++
++ list_prepend(&protocol_list, proto);
++
++ out:
++ WRITE_UNLOCK(&ip_conntrack_lock);
++ return ret;
++}
++
++void ip_conntrack_protocol_unregister(struct ip_conntrack_protocol *proto)
++{
++ WRITE_LOCK(&ip_conntrack_lock);
++
++ /* ip_ct_find_proto() returns proto_generic in case there is no protocol
++ * helper. So this should be enough - HW */
++ LIST_DELETE(&protocol_list, proto);
++ WRITE_UNLOCK(&ip_conntrack_lock);
++
++ /* Somebody could be still looking at the proto in bh. */
++ synchronize_net();
++
++ /* Remove all contrack entries for this protocol */
++ ip_ct_selective_cleanup(kill_proto, &proto->proto);
++}
++
++static int __init init(void)
++{
++ return init_or_cleanup(1);
++}
++
++static void __exit fini(void)
++{
++ init_or_cleanup(0);
++}
++
++module_init(init);
++module_exit(fini);
++
++/* Some modules need us, but don't depend directly on any symbol.
++ They should call this. */
++void need_ip_conntrack(void)
++{
++}
++
++EXPORT_SYMBOL(ip_conntrack_protocol_register);
++EXPORT_SYMBOL(ip_conntrack_protocol_unregister);
++EXPORT_SYMBOL(invert_tuplepr);
++EXPORT_SYMBOL(ip_conntrack_alter_reply);
++EXPORT_SYMBOL(ip_conntrack_destroyed);
++EXPORT_SYMBOL(ip_conntrack_get);
++EXPORT_SYMBOL(need_ip_conntrack);
++EXPORT_SYMBOL(ip_conntrack_helper_register);
++EXPORT_SYMBOL(ip_conntrack_helper_unregister);
++EXPORT_SYMBOL(ip_ct_selective_cleanup);
++EXPORT_SYMBOL(ip_ct_refresh);
++EXPORT_SYMBOL(ip_ct_find_proto);
++EXPORT_SYMBOL(__ip_ct_find_proto);
++EXPORT_SYMBOL(ip_ct_find_helper);
++EXPORT_SYMBOL(ip_conntrack_expect_related);
++EXPORT_SYMBOL(ip_conntrack_change_expect);
++EXPORT_SYMBOL(ip_conntrack_unexpect_related);
++EXPORT_SYMBOL_GPL(ip_conntrack_expect_find_get);
++EXPORT_SYMBOL_GPL(ip_conntrack_expect_put);
++EXPORT_SYMBOL(ip_conntrack_tuple_taken);
++EXPORT_SYMBOL(ip_ct_gather_frags);
++EXPORT_SYMBOL(ip_conntrack_htable_size);
++EXPORT_SYMBOL(ip_conntrack_expect_list);
++EXPORT_SYMBOL(ip_conntrack_lock);
++EXPORT_SYMBOL(ip_conntrack_hash);
++EXPORT_SYMBOL_GPL(ip_conntrack_find_get);
++EXPORT_SYMBOL_GPL(ip_conntrack_put);
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ip_nat_core.c linux-2.6.3/net/ipv4/netfilter/ip_nat_core.c
+--- linux-2.6.3.org/net/ipv4/netfilter/ip_nat_core.c 2004-02-18 04:57:16.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ip_nat_core.c 2004-02-27 00:03:14.483026424 +0100
+@@ -1016,6 +1016,10 @@
+ /* FIXME: Man, this is a hack. <SIGH> */
+ IP_NF_ASSERT(ip_conntrack_destroyed == NULL);
+ ip_conntrack_destroyed = &ip_nat_cleanup_conntrack;
++
++ /* Initialize fake conntrack so that NAT will skip it */
++ ip_conntrack_untracked.nat.info.initialized |=
++ (1 << IP_NAT_MANIP_SRC) | (1 << IP_NAT_MANIP_DST);
+
+ return 0;
+ }
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ip_nat_core.c.orig linux-2.6.3/net/ipv4/netfilter/ip_nat_core.c.orig
+--- linux-2.6.3.org/net/ipv4/netfilter/ip_nat_core.c.orig 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ip_nat_core.c.orig 2004-02-18 04:57:16.000000000 +0100
+@@ -0,0 +1,1036 @@
++/* NAT for netfilter; shared with compatibility layer. */
++
++/* (C) 1999-2001 Paul `Rusty' Russell
++ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/timer.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4.h>
++#include <linux/vmalloc.h>
++#include <net/checksum.h>
++#include <net/icmp.h>
++#include <net/ip.h>
++#include <net/tcp.h> /* For tcp_prot in getorigdst */
++#include <linux/icmp.h>
++#include <linux/udp.h>
++
++#define ASSERT_READ_LOCK(x) MUST_BE_READ_LOCKED(&ip_nat_lock)
++#define ASSERT_WRITE_LOCK(x) MUST_BE_WRITE_LOCKED(&ip_nat_lock)
++
++#include <linux/netfilter_ipv4/ip_conntrack.h>
++#include <linux/netfilter_ipv4/ip_conntrack_core.h>
++#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
++#include <linux/netfilter_ipv4/ip_nat.h>
++#include <linux/netfilter_ipv4/ip_nat_protocol.h>
++#include <linux/netfilter_ipv4/ip_nat_core.h>
++#include <linux/netfilter_ipv4/ip_nat_helper.h>
++#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
++#include <linux/netfilter_ipv4/listhelp.h>
++
++#if 0
++#define DEBUGP printk
++#else
++#define DEBUGP(format, args...)
++#endif
++
++DECLARE_RWLOCK(ip_nat_lock);
++DECLARE_RWLOCK_EXTERN(ip_conntrack_lock);
++
++/* Calculated at init based on memory size */
++static unsigned int ip_nat_htable_size;
++
++static struct list_head *bysource;
++static struct list_head *byipsproto;
++LIST_HEAD(protos);
++LIST_HEAD(helpers);
++
++extern struct ip_nat_protocol unknown_nat_protocol;
++
++/* We keep extra hashes for each conntrack, for fast searching. */
++static inline size_t
++hash_by_ipsproto(u_int32_t src, u_int32_t dst, u_int16_t proto)
++{
++ /* Modified src and dst, to ensure we don't create two
++ identical streams. */
++ return (src + dst + proto) % ip_nat_htable_size;
++}
++
++static inline size_t
++hash_by_src(const struct ip_conntrack_manip *manip, u_int16_t proto)
++{
++ /* Original src, to ensure we map it consistently if poss. */
++ return (manip->ip + manip->u.all + proto) % ip_nat_htable_size;
++}
++
++/* Noone using conntrack by the time this called. */
++static void ip_nat_cleanup_conntrack(struct ip_conntrack *conn)
++{
++ struct ip_nat_info *info = &conn->nat.info;
++ unsigned int hs, hp;
++
++ if (!info->initialized)
++ return;
++
++ IP_NF_ASSERT(info->bysource.conntrack);
++ IP_NF_ASSERT(info->byipsproto.conntrack);
++
++ hs = hash_by_src(&conn->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src,
++ conn->tuplehash[IP_CT_DIR_ORIGINAL]
++ .tuple.dst.protonum);
++
++ hp = hash_by_ipsproto(conn->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip,
++ conn->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip,
++ conn->tuplehash[IP_CT_DIR_REPLY]
++ .tuple.dst.protonum);
++
++ WRITE_LOCK(&ip_nat_lock);
++ LIST_DELETE(&bysource[hs], &info->bysource);
++ LIST_DELETE(&byipsproto[hp], &info->byipsproto);
++ WRITE_UNLOCK(&ip_nat_lock);
++}
++
++/* We do checksum mangling, so if they were wrong before they're still
++ * wrong. Also works for incomplete packets (eg. ICMP dest
++ * unreachables.) */
++u_int16_t
++ip_nat_cheat_check(u_int32_t oldvalinv, u_int32_t newval, u_int16_t oldcheck)
++{
++ u_int32_t diffs[] = { oldvalinv, newval };
++ return csum_fold(csum_partial((char *)diffs, sizeof(diffs),
++ oldcheck^0xFFFF));
++}
++
++static inline int cmp_proto(const struct ip_nat_protocol *i, int proto)
++{
++ return i->protonum == proto;
++}
++
++struct ip_nat_protocol *
++find_nat_proto(u_int16_t protonum)
++{
++ struct ip_nat_protocol *i;
++
++ MUST_BE_READ_LOCKED(&ip_nat_lock);
++ i = LIST_FIND(&protos, cmp_proto, struct ip_nat_protocol *, protonum);
++ if (!i)
++ i = &unknown_nat_protocol;
++ return i;
++}
++
++/* Is this tuple already taken? (not by us) */
++int
++ip_nat_used_tuple(const struct ip_conntrack_tuple *tuple,
++ const struct ip_conntrack *ignored_conntrack)
++{
++ /* Conntrack tracking doesn't keep track of outgoing tuples; only
++ incoming ones. NAT means they don't have a fixed mapping,
++ so we invert the tuple and look for the incoming reply.
++
++ We could keep a separate hash if this proves too slow. */
++ struct ip_conntrack_tuple reply;
++
++ invert_tuplepr(&reply, tuple);
++ return ip_conntrack_tuple_taken(&reply, ignored_conntrack);
++}
++
++/* Does tuple + the source manip come within the range mr */
++static int
++in_range(const struct ip_conntrack_tuple *tuple,
++ const struct ip_conntrack_manip *manip,
++ const struct ip_nat_multi_range *mr)
++{
++ struct ip_nat_protocol *proto = find_nat_proto(tuple->dst.protonum);
++ unsigned int i;
++ struct ip_conntrack_tuple newtuple = { *manip, tuple->dst };
++
++ for (i = 0; i < mr->rangesize; i++) {
++ /* If we are allowed to map IPs, then we must be in the
++ range specified, otherwise we must be unchanged. */
++ if (mr->range[i].flags & IP_NAT_RANGE_MAP_IPS) {
++ if (ntohl(newtuple.src.ip) < ntohl(mr->range[i].min_ip)
++ || (ntohl(newtuple.src.ip)
++ > ntohl(mr->range[i].max_ip)))
++ continue;
++ } else {
++ if (newtuple.src.ip != tuple->src.ip)
++ continue;
++ }
++
++ if (!(mr->range[i].flags & IP_NAT_RANGE_PROTO_SPECIFIED)
++ || proto->in_range(&newtuple, IP_NAT_MANIP_SRC,
++ &mr->range[i].min, &mr->range[i].max))
++ return 1;
++ }
++ return 0;
++}
++
++static inline int
++src_cmp(const struct ip_nat_hash *i,
++ const struct ip_conntrack_tuple *tuple,
++ const struct ip_nat_multi_range *mr)
++{
++ return (i->conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum
++ == tuple->dst.protonum
++ && i->conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip
++ == tuple->src.ip
++ && i->conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all
++ == tuple->src.u.all
++ && in_range(tuple,
++ &i->conntrack->tuplehash[IP_CT_DIR_ORIGINAL]
++ .tuple.src,
++ mr));
++}
++
++/* Only called for SRC manip */
++static struct ip_conntrack_manip *
++find_appropriate_src(const struct ip_conntrack_tuple *tuple,
++ const struct ip_nat_multi_range *mr)
++{
++ unsigned int h = hash_by_src(&tuple->src, tuple->dst.protonum);
++ struct ip_nat_hash *i;
++
++ MUST_BE_READ_LOCKED(&ip_nat_lock);
++ i = LIST_FIND(&bysource[h], src_cmp, struct ip_nat_hash *, tuple, mr);
++ if (i)
++ return &i->conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src;
++ else
++ return NULL;
++}
++
++#ifdef CONFIG_IP_NF_NAT_LOCAL
++/* If it's really a local destination manip, it may need to do a
++ source manip too. */
++static int
++do_extra_mangle(u_int32_t var_ip, u_int32_t *other_ipp)
++{
++ struct flowi fl = { .nl_u = { .ip4_u = { .daddr = var_ip } } };
++ struct rtable *rt;
++
++ /* FIXME: IPTOS_TOS(iph->tos) --RR */
++ if (ip_route_output_key(&rt, &fl) != 0) {
++ DEBUGP("do_extra_mangle: Can't get route to %u.%u.%u.%u\n",
++ NIPQUAD(var_ip));
++ return 0;
++ }
++
++ *other_ipp = rt->rt_src;
++ ip_rt_put(rt);
++ return 1;
++}
++#endif
++
++/* Simple way to iterate through all. */
++static inline int fake_cmp(const struct ip_nat_hash *i,
++ u_int32_t src, u_int32_t dst, u_int16_t protonum,
++ unsigned int *score,
++ const struct ip_conntrack *conntrack)
++{
++ /* Compare backwards: we're dealing with OUTGOING tuples, and
++ inside the conntrack is the REPLY tuple. Don't count this
++ conntrack. */
++ if (i->conntrack != conntrack
++ && i->conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip == dst
++ && i->conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip == src
++ && (i->conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum
++ == protonum))
++ (*score)++;
++ return 0;
++}
++
++static inline unsigned int
++count_maps(u_int32_t src, u_int32_t dst, u_int16_t protonum,
++ const struct ip_conntrack *conntrack)
++{
++ unsigned int score = 0;
++ unsigned int h;
++
++ MUST_BE_READ_LOCKED(&ip_nat_lock);
++ h = hash_by_ipsproto(src, dst, protonum);
++ LIST_FIND(&byipsproto[h], fake_cmp, struct ip_nat_hash *,
++ src, dst, protonum, &score, conntrack);
++
++ return score;
++}
++
++/* For [FUTURE] fragmentation handling, we want the least-used
++ src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus
++ if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
++ 1-65535, we don't do pro-rata allocation based on ports; we choose
++ the ip with the lowest src-ip/dst-ip/proto usage.
++
++ If an allocation then fails (eg. all 6 ports used in the 1.2.3.4
++ range), we eliminate that and try again. This is not the most
++ efficient approach, but if you're worried about that, don't hand us
++ ranges you don't really have. */
++static struct ip_nat_range *
++find_best_ips_proto(struct ip_conntrack_tuple *tuple,
++ const struct ip_nat_multi_range *mr,
++ const struct ip_conntrack *conntrack,
++ unsigned int hooknum)
++{
++ unsigned int i;
++ struct {
++ const struct ip_nat_range *range;
++ unsigned int score;
++ struct ip_conntrack_tuple tuple;
++ } best = { NULL, 0xFFFFFFFF };
++ u_int32_t *var_ipp, *other_ipp, saved_ip, orig_dstip;
++ static unsigned int randomness;
++
++ if (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC) {
++ var_ipp = &tuple->src.ip;
++ saved_ip = tuple->dst.ip;
++ other_ipp = &tuple->dst.ip;
++ } else {
++ var_ipp = &tuple->dst.ip;
++ saved_ip = tuple->src.ip;
++ other_ipp = &tuple->src.ip;
++ }
++ /* Don't do do_extra_mangle unless necessary (overrides
++ explicit socket bindings, for example) */
++ orig_dstip = tuple->dst.ip;
++
++ IP_NF_ASSERT(mr->rangesize >= 1);
++ for (i = 0; i < mr->rangesize; i++) {
++ /* Host order */
++ u_int32_t minip, maxip, j;
++
++ /* Don't do ranges which are already eliminated. */
++ if (mr->range[i].flags & IP_NAT_RANGE_FULL) {
++ continue;
++ }
++
++ if (mr->range[i].flags & IP_NAT_RANGE_MAP_IPS) {
++ minip = ntohl(mr->range[i].min_ip);
++ maxip = ntohl(mr->range[i].max_ip);
++ } else
++ minip = maxip = ntohl(*var_ipp);
++
++ randomness++;
++ for (j = 0; j < maxip - minip + 1; j++) {
++ unsigned int score;
++
++ *var_ipp = htonl(minip + (randomness + j)
++ % (maxip - minip + 1));
++
++ /* Reset the other ip in case it was mangled by
++ * do_extra_mangle last time. */
++ *other_ipp = saved_ip;
++
++#ifdef CONFIG_IP_NF_NAT_LOCAL
++ if (hooknum == NF_IP_LOCAL_OUT
++ && *var_ipp != orig_dstip
++ && !do_extra_mangle(*var_ipp, other_ipp)) {
++ DEBUGP("Range %u %u.%u.%u.%u rt failed!\n",
++ i, NIPQUAD(*var_ipp));
++ /* Can't route? This whole range part is
++ * probably screwed, but keep trying
++ * anyway. */
++ continue;
++ }
++#endif
++
++ /* Count how many others map onto this. */
++ score = count_maps(tuple->src.ip, tuple->dst.ip,
++ tuple->dst.protonum, conntrack);
++ if (score < best.score) {
++ /* Optimization: doesn't get any better than
++ this. */
++ if (score == 0)
++ return (struct ip_nat_range *)
++ &mr->range[i];
++
++ best.score = score;
++ best.tuple = *tuple;
++ best.range = &mr->range[i];
++ }
++ }
++ }
++ *tuple = best.tuple;
++
++ /* Discard const. */
++ return (struct ip_nat_range *)best.range;
++}
++
++/* Fast version doesn't iterate through hash chains, but only handles
++ common case of single IP address (null NAT, masquerade) */
++static struct ip_nat_range *
++find_best_ips_proto_fast(struct ip_conntrack_tuple *tuple,
++ const struct ip_nat_multi_range *mr,
++ const struct ip_conntrack *conntrack,
++ unsigned int hooknum)
++{
++ if (mr->rangesize != 1
++ || (mr->range[0].flags & IP_NAT_RANGE_FULL)
++ || ((mr->range[0].flags & IP_NAT_RANGE_MAP_IPS)
++ && mr->range[0].min_ip != mr->range[0].max_ip))
++ return find_best_ips_proto(tuple, mr, conntrack, hooknum);
++
++ if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) {
++ if (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC)
++ tuple->src.ip = mr->range[0].min_ip;
++ else {
++ /* Only do extra mangle when required (breaks
++ socket binding) */
++#ifdef CONFIG_IP_NF_NAT_LOCAL
++ if (tuple->dst.ip != mr->range[0].min_ip
++ && hooknum == NF_IP_LOCAL_OUT
++ && !do_extra_mangle(mr->range[0].min_ip,
++ &tuple->src.ip))
++ return NULL;
++#endif
++ tuple->dst.ip = mr->range[0].min_ip;
++ }
++ }
++
++ /* Discard const. */
++ return (struct ip_nat_range *)&mr->range[0];
++}
++
++static int
++get_unique_tuple(struct ip_conntrack_tuple *tuple,
++ const struct ip_conntrack_tuple *orig_tuple,
++ const struct ip_nat_multi_range *mrr,
++ struct ip_conntrack *conntrack,
++ unsigned int hooknum)
++{
++ struct ip_nat_protocol *proto
++ = find_nat_proto(orig_tuple->dst.protonum);
++ struct ip_nat_range *rptr;
++ unsigned int i;
++ int ret;
++
++ /* We temporarily use flags for marking full parts, but we
++ always clean up afterwards */
++ struct ip_nat_multi_range *mr = (void *)mrr;
++
++ /* 1) If this srcip/proto/src-proto-part is currently mapped,
++ and that same mapping gives a unique tuple within the given
++ range, use that.
++
++ This is only required for source (ie. NAT/masq) mappings.
++ So far, we don't do local source mappings, so multiple
++ manips not an issue. */
++ if (hooknum == NF_IP_POST_ROUTING) {
++ struct ip_conntrack_manip *manip;
++
++ manip = find_appropriate_src(orig_tuple, mr);
++ if (manip) {
++ /* Apply same source manipulation. */
++ *tuple = ((struct ip_conntrack_tuple)
++ { *manip, orig_tuple->dst });
++ DEBUGP("get_unique_tuple: Found current src map\n");
++ if (!ip_nat_used_tuple(tuple, conntrack))
++ return 1;
++ }
++ }
++
++ /* 2) Select the least-used IP/proto combination in the given
++ range.
++ */
++ *tuple = *orig_tuple;
++ while ((rptr = find_best_ips_proto_fast(tuple, mr, conntrack, hooknum))
++ != NULL) {
++ DEBUGP("Found best for "); DUMP_TUPLE(tuple);
++ /* 3) The per-protocol part of the manip is made to
++ map into the range to make a unique tuple. */
++
++ /* Only bother mapping if it's not already in range
++ and unique */
++ if ((!(rptr->flags & IP_NAT_RANGE_PROTO_SPECIFIED)
++ || proto->in_range(tuple, HOOK2MANIP(hooknum),
++ &rptr->min, &rptr->max))
++ && !ip_nat_used_tuple(tuple, conntrack)) {
++ ret = 1;
++ goto clear_fulls;
++ } else {
++ if (proto->unique_tuple(tuple, rptr,
++ HOOK2MANIP(hooknum),
++ conntrack)) {
++ /* Must be unique. */
++ IP_NF_ASSERT(!ip_nat_used_tuple(tuple,
++ conntrack));
++ ret = 1;
++ goto clear_fulls;
++ } else if (HOOK2MANIP(hooknum) == IP_NAT_MANIP_DST) {
++ /* Try implicit source NAT; protocol
++ may be able to play with ports to
++ make it unique. */
++ struct ip_nat_range r
++ = { IP_NAT_RANGE_MAP_IPS,
++ tuple->src.ip, tuple->src.ip,
++ { 0 }, { 0 } };
++ DEBUGP("Trying implicit mapping\n");
++ if (proto->unique_tuple(tuple, &r,
++ IP_NAT_MANIP_SRC,
++ conntrack)) {
++ /* Must be unique. */
++ IP_NF_ASSERT(!ip_nat_used_tuple
++ (tuple, conntrack));
++ ret = 1;
++ goto clear_fulls;
++ }
++ }
++ DEBUGP("Protocol can't get unique tuple %u.\n",
++ hooknum);
++ }
++
++ /* Eliminate that from range, and try again. */
++ rptr->flags |= IP_NAT_RANGE_FULL;
++ *tuple = *orig_tuple;
++ }
++
++ ret = 0;
++
++ clear_fulls:
++ /* Clear full flags. */
++ IP_NF_ASSERT(mr->rangesize >= 1);
++ for (i = 0; i < mr->rangesize; i++)
++ mr->range[i].flags &= ~IP_NAT_RANGE_FULL;
++
++ return ret;
++}
++
++static inline int
++helper_cmp(const struct ip_nat_helper *helper,
++ const struct ip_conntrack_tuple *tuple)
++{
++ return ip_ct_tuple_mask_cmp(tuple, &helper->tuple, &helper->mask);
++}
++
++/* Where to manip the reply packets (will be reverse manip). */
++static unsigned int opposite_hook[NF_IP_NUMHOOKS]
++= { [NF_IP_PRE_ROUTING] = NF_IP_POST_ROUTING,
++ [NF_IP_POST_ROUTING] = NF_IP_PRE_ROUTING,
++#ifdef CONFIG_IP_NF_NAT_LOCAL
++ [NF_IP_LOCAL_OUT] = NF_IP_LOCAL_IN,
++ [NF_IP_LOCAL_IN] = NF_IP_LOCAL_OUT,
++#endif
++};
++
++unsigned int
++ip_nat_setup_info(struct ip_conntrack *conntrack,
++ const struct ip_nat_multi_range *mr,
++ unsigned int hooknum)
++{
++ struct ip_conntrack_tuple new_tuple, inv_tuple, reply;
++ struct ip_conntrack_tuple orig_tp;
++ struct ip_nat_info *info = &conntrack->nat.info;
++ int in_hashes = info->initialized;
++
++ MUST_BE_WRITE_LOCKED(&ip_nat_lock);
++ IP_NF_ASSERT(hooknum == NF_IP_PRE_ROUTING
++ || hooknum == NF_IP_POST_ROUTING
++ || hooknum == NF_IP_LOCAL_OUT);
++ IP_NF_ASSERT(info->num_manips < IP_NAT_MAX_MANIPS);
++ IP_NF_ASSERT(!(info->initialized & (1 << HOOK2MANIP(hooknum))));
++
++ /* What we've got will look like inverse of reply. Normally
++ this is what is in the conntrack, except for prior
++ manipulations (future optimization: if num_manips == 0,
++ orig_tp =
++ conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple) */
++ invert_tuplepr(&orig_tp,
++ &conntrack->tuplehash[IP_CT_DIR_REPLY].tuple);
++
++#if 0
++ {
++ unsigned int i;
++
++ DEBUGP("Hook %u (%s), ", hooknum,
++ HOOK2MANIP(hooknum)==IP_NAT_MANIP_SRC ? "SRC" : "DST");
++ DUMP_TUPLE(&orig_tp);
++ DEBUGP("Range %p: ", mr);
++ for (i = 0; i < mr->rangesize; i++) {
++ DEBUGP("%u:%s%s%s %u.%u.%u.%u - %u.%u.%u.%u %u - %u\n",
++ i,
++ (mr->range[i].flags & IP_NAT_RANGE_MAP_IPS)
++ ? " MAP_IPS" : "",
++ (mr->range[i].flags
++ & IP_NAT_RANGE_PROTO_SPECIFIED)
++ ? " PROTO_SPECIFIED" : "",
++ (mr->range[i].flags & IP_NAT_RANGE_FULL)
++ ? " FULL" : "",
++ NIPQUAD(mr->range[i].min_ip),
++ NIPQUAD(mr->range[i].max_ip),
++ mr->range[i].min.all,
++ mr->range[i].max.all);
++ }
++ }
++#endif
++
++ do {
++ if (!get_unique_tuple(&new_tuple, &orig_tp, mr, conntrack,
++ hooknum)) {
++ DEBUGP("ip_nat_setup_info: Can't get unique for %p.\n",
++ conntrack);
++ return NF_DROP;
++ }
++
++#if 0
++ DEBUGP("Hook %u (%s) %p\n", hooknum,
++ HOOK2MANIP(hooknum)==IP_NAT_MANIP_SRC ? "SRC" : "DST",
++ conntrack);
++ DEBUGP("Original: ");
++ DUMP_TUPLE(&orig_tp);
++ DEBUGP("New: ");
++ DUMP_TUPLE(&new_tuple);
++#endif
++
++ /* We now have two tuples (SRCIP/SRCPT/DSTIP/DSTPT):
++ the original (A/B/C/D') and the mangled one (E/F/G/H').
++
++ We're only allowed to work with the SRC per-proto
++ part, so we create inverses of both to start, then
++ derive the other fields we need. */
++
++ /* Reply connection: simply invert the new tuple
++ (G/H/E/F') */
++ invert_tuplepr(&reply, &new_tuple);
++
++ /* Alter conntrack table so it recognizes replies.
++ If fail this race (reply tuple now used), repeat. */
++ } while (!ip_conntrack_alter_reply(conntrack, &reply));
++
++ /* FIXME: We can simply used existing conntrack reply tuple
++ here --RR */
++ /* Create inverse of original: C/D/A/B' */
++ invert_tuplepr(&inv_tuple, &orig_tp);
++
++ /* Has source changed?. */
++ if (!ip_ct_tuple_src_equal(&new_tuple, &orig_tp)) {
++ /* In this direction, a source manip. */
++ info->manips[info->num_manips++] =
++ ((struct ip_nat_info_manip)
++ { IP_CT_DIR_ORIGINAL, hooknum,
++ IP_NAT_MANIP_SRC, new_tuple.src });
++
++ IP_NF_ASSERT(info->num_manips < IP_NAT_MAX_MANIPS);
++
++ /* In the reverse direction, a destination manip. */
++ info->manips[info->num_manips++] =
++ ((struct ip_nat_info_manip)
++ { IP_CT_DIR_REPLY, opposite_hook[hooknum],
++ IP_NAT_MANIP_DST, orig_tp.src });
++ IP_NF_ASSERT(info->num_manips <= IP_NAT_MAX_MANIPS);
++ }
++
++ /* Has destination changed? */
++ if (!ip_ct_tuple_dst_equal(&new_tuple, &orig_tp)) {
++ /* In this direction, a destination manip */
++ info->manips[info->num_manips++] =
++ ((struct ip_nat_info_manip)
++ { IP_CT_DIR_ORIGINAL, hooknum,
++ IP_NAT_MANIP_DST, reply.src });
++
++ IP_NF_ASSERT(info->num_manips < IP_NAT_MAX_MANIPS);
++
++ /* In the reverse direction, a source manip. */
++ info->manips[info->num_manips++] =
++ ((struct ip_nat_info_manip)
++ { IP_CT_DIR_REPLY, opposite_hook[hooknum],
++ IP_NAT_MANIP_SRC, inv_tuple.src });
++ IP_NF_ASSERT(info->num_manips <= IP_NAT_MAX_MANIPS);
++ }
++
++ /* If there's a helper, assign it; based on new tuple. */
++ if (!conntrack->master)
++ info->helper = LIST_FIND(&helpers, helper_cmp, struct ip_nat_helper *,
++ &reply);
++
++ /* It's done. */
++ info->initialized |= (1 << HOOK2MANIP(hooknum));
++
++ if (in_hashes) {
++ IP_NF_ASSERT(info->bysource.conntrack);
++ replace_in_hashes(conntrack, info);
++ } else {
++ place_in_hashes(conntrack, info);
++ }
++
++ return NF_ACCEPT;
++}
++
++void replace_in_hashes(struct ip_conntrack *conntrack,
++ struct ip_nat_info *info)
++{
++ /* Source has changed, so replace in hashes. */
++ unsigned int srchash
++ = hash_by_src(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL]
++ .tuple.src,
++ conntrack->tuplehash[IP_CT_DIR_ORIGINAL]
++ .tuple.dst.protonum);
++ /* We place packet as seen OUTGOUNG in byips_proto hash
++ (ie. reverse dst and src of reply packet. */
++ unsigned int ipsprotohash
++ = hash_by_ipsproto(conntrack->tuplehash[IP_CT_DIR_REPLY]
++ .tuple.dst.ip,
++ conntrack->tuplehash[IP_CT_DIR_REPLY]
++ .tuple.src.ip,
++ conntrack->tuplehash[IP_CT_DIR_REPLY]
++ .tuple.dst.protonum);
++
++ IP_NF_ASSERT(info->bysource.conntrack == conntrack);
++ MUST_BE_WRITE_LOCKED(&ip_nat_lock);
++
++ list_del(&info->bysource.list);
++ list_del(&info->byipsproto.list);
++
++ list_prepend(&bysource[srchash], &info->bysource);
++ list_prepend(&byipsproto[ipsprotohash], &info->byipsproto);
++}
++
++void place_in_hashes(struct ip_conntrack *conntrack,
++ struct ip_nat_info *info)
++{
++ unsigned int srchash
++ = hash_by_src(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL]
++ .tuple.src,
++ conntrack->tuplehash[IP_CT_DIR_ORIGINAL]
++ .tuple.dst.protonum);
++ /* We place packet as seen OUTGOUNG in byips_proto hash
++ (ie. reverse dst and src of reply packet. */
++ unsigned int ipsprotohash
++ = hash_by_ipsproto(conntrack->tuplehash[IP_CT_DIR_REPLY]
++ .tuple.dst.ip,
++ conntrack->tuplehash[IP_CT_DIR_REPLY]
++ .tuple.src.ip,
++ conntrack->tuplehash[IP_CT_DIR_REPLY]
++ .tuple.dst.protonum);
++
++ IP_NF_ASSERT(!info->bysource.conntrack);
++
++ MUST_BE_WRITE_LOCKED(&ip_nat_lock);
++ info->byipsproto.conntrack = conntrack;
++ info->bysource.conntrack = conntrack;
++
++ list_prepend(&bysource[srchash], &info->bysource);
++ list_prepend(&byipsproto[ipsprotohash], &info->byipsproto);
++}
++
++/* Returns true if succeeded. */
++static int
++manip_pkt(u_int16_t proto,
++ struct sk_buff **pskb,
++ unsigned int iphdroff,
++ const struct ip_conntrack_manip *manip,
++ enum ip_nat_manip_type maniptype)
++{
++ struct iphdr *iph;
++
++ (*pskb)->nfcache |= NFC_ALTERED;
++ if (!skb_ip_make_writable(pskb, iphdroff+sizeof(iph)))
++ return 0;
++
++ iph = (void *)(*pskb)->data + iphdroff;
++
++ /* Manipulate protcol part. */
++ if (!find_nat_proto(proto)->manip_pkt(pskb,
++ iphdroff + iph->ihl*4,
++ manip, maniptype))
++ return 0;
++
++ iph = (void *)(*pskb)->data + iphdroff;
++
++ if (maniptype == IP_NAT_MANIP_SRC) {
++ iph->check = ip_nat_cheat_check(~iph->saddr, manip->ip,
++ iph->check);
++ iph->saddr = manip->ip;
++ } else {
++ iph->check = ip_nat_cheat_check(~iph->daddr, manip->ip,
++ iph->check);
++ iph->daddr = manip->ip;
++ }
++ return 1;
++}
++
++static inline int exp_for_packet(struct ip_conntrack_expect *exp,
++ struct sk_buff *skb)
++{
++ struct ip_conntrack_protocol *proto;
++ int ret = 1;
++
++ MUST_BE_READ_LOCKED(&ip_conntrack_lock);
++ proto = __ip_ct_find_proto(skb->nh.iph->protocol);
++ if (proto->exp_matches_pkt)
++ ret = proto->exp_matches_pkt(exp, skb);
++
++ return ret;
++}
++
++/* Do packet manipulations according to binding. */
++unsigned int
++do_bindings(struct ip_conntrack *ct,
++ enum ip_conntrack_info ctinfo,
++ struct ip_nat_info *info,
++ unsigned int hooknum,
++ struct sk_buff **pskb)
++{
++ unsigned int i;
++ struct ip_nat_helper *helper;
++ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
++ int proto = (*pskb)->nh.iph->protocol;
++
++ /* Need nat lock to protect against modification, but neither
++ conntrack (referenced) and helper (deleted with
++ synchronize_bh()) can vanish. */
++ READ_LOCK(&ip_nat_lock);
++ for (i = 0; i < info->num_manips; i++) {
++ if (info->manips[i].direction == dir
++ && info->manips[i].hooknum == hooknum) {
++ DEBUGP("Mangling %p: %s to %u.%u.%u.%u %u\n",
++ *pskb,
++ info->manips[i].maniptype == IP_NAT_MANIP_SRC
++ ? "SRC" : "DST",
++ NIPQUAD(info->manips[i].manip.ip),
++ htons(info->manips[i].manip.u.all));
++ if (!manip_pkt(proto, pskb, 0,
++ &info->manips[i].manip,
++ info->manips[i].maniptype)) {
++ READ_UNLOCK(&ip_nat_lock);
++ return NF_DROP;
++ }
++ }
++ }
++ helper = info->helper;
++ READ_UNLOCK(&ip_nat_lock);
++
++ if (helper) {
++ struct ip_conntrack_expect *exp = NULL;
++ struct list_head *cur_item;
++ int ret = NF_ACCEPT;
++ int helper_called = 0;
++
++ DEBUGP("do_bindings: helper existing for (%p)\n", ct);
++
++ /* Always defragged for helpers */
++ IP_NF_ASSERT(!((*pskb)->nh.iph->frag_off
++ & htons(IP_MF|IP_OFFSET)));
++
++ /* Have to grab read lock before sibling_list traversal */
++ READ_LOCK(&ip_conntrack_lock);
++ list_for_each(cur_item, &ct->sibling_list) {
++ exp = list_entry(cur_item, struct ip_conntrack_expect,
++ expected_list);
++
++ /* if this expectation is already established, skip */
++ if (exp->sibling)
++ continue;
++
++ if (exp_for_packet(exp, *pskb)) {
++ /* FIXME: May be true multiple times in the
++ * case of UDP!! */
++ DEBUGP("calling nat helper (exp=%p) for packet\n", exp);
++ ret = helper->help(ct, exp, info, ctinfo,
++ hooknum, pskb);
++ if (ret != NF_ACCEPT) {
++ READ_UNLOCK(&ip_conntrack_lock);
++ return ret;
++ }
++ helper_called = 1;
++ }
++ }
++ /* Helper might want to manip the packet even when there is no
++ * matching expectation for this packet */
++ if (!helper_called && helper->flags & IP_NAT_HELPER_F_ALWAYS) {
++ DEBUGP("calling nat helper for packet without expectation\n");
++ ret = helper->help(ct, NULL, info, ctinfo,
++ hooknum, pskb);
++ if (ret != NF_ACCEPT) {
++ READ_UNLOCK(&ip_conntrack_lock);
++ return ret;
++ }
++ }
++ READ_UNLOCK(&ip_conntrack_lock);
++
++ /* Adjust sequence number only once per packet
++ * (helper is called at all hooks) */
++ if (proto == IPPROTO_TCP
++ && (hooknum == NF_IP_POST_ROUTING
++ || hooknum == NF_IP_LOCAL_IN)) {
++ DEBUGP("ip_nat_core: adjusting sequence number\n");
++ /* future: put this in a l4-proto specific function,
++ * and call this function here. */
++ if (!ip_nat_seq_adjust(pskb, ct, ctinfo))
++ ret = NF_DROP;
++ }
++
++ return ret;
++
++ } else
++ return NF_ACCEPT;
++
++ /* not reached */
++}
++
++int
++icmp_reply_translation(struct sk_buff **pskb,
++ struct ip_conntrack *conntrack,
++ unsigned int hooknum,
++ int dir)
++{
++ struct {
++ struct icmphdr icmp;
++ struct iphdr ip;
++ } *inside;
++ unsigned int i;
++ struct ip_nat_info *info = &conntrack->nat.info;
++ int hdrlen;
++
++ if (!skb_ip_make_writable(pskb,(*pskb)->nh.iph->ihl*4+sizeof(*inside)))
++ return 0;
++ inside = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4;
++
++ /* We're actually going to mangle it beyond trivial checksum
++ adjustment, so make sure the current checksum is correct. */
++ if ((*pskb)->ip_summed != CHECKSUM_UNNECESSARY) {
++ hdrlen = (*pskb)->nh.iph->ihl * 4;
++ if ((u16)csum_fold(skb_checksum(*pskb, hdrlen,
++ (*pskb)->len - hdrlen, 0)))
++ return 0;
++ }
++
++ /* Must be RELATED */
++ IP_NF_ASSERT((*pskb)->nfct
++ - (struct ip_conntrack *)(*pskb)->nfct->master
++ == IP_CT_RELATED
++ || (*pskb)->nfct
++ - (struct ip_conntrack *)(*pskb)->nfct->master
++ == IP_CT_RELATED+IP_CT_IS_REPLY);
++
++ /* Redirects on non-null nats must be dropped, else they'll
++ start talking to each other without our translation, and be
++ confused... --RR */
++ if (inside->icmp.type == ICMP_REDIRECT) {
++ /* Don't care about races here. */
++ if (info->initialized
++ != ((1 << IP_NAT_MANIP_SRC) | (1 << IP_NAT_MANIP_DST))
++ || info->num_manips != 0)
++ return 0;
++ }
++
++ DEBUGP("icmp_reply_translation: translating error %p hook %u dir %s\n",
++ *pskb, hooknum, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
++ /* Note: May not be from a NAT'd host, but probably safest to
++ do translation always as if it came from the host itself
++ (even though a "host unreachable" coming from the host
++ itself is a bit weird).
++
++ More explanation: some people use NAT for anonymizing.
++ Also, CERT recommends dropping all packets from private IP
++ addresses (although ICMP errors from internal links with
++ such addresses are not too uncommon, as Alan Cox points
++ out) */
++
++ READ_LOCK(&ip_nat_lock);
++ for (i = 0; i < info->num_manips; i++) {
++ DEBUGP("icmp_reply: manip %u dir %s hook %u\n",
++ i, info->manips[i].direction == IP_CT_DIR_ORIGINAL ?
++ "ORIG" : "REPLY", info->manips[i].hooknum);
++
++ if (info->manips[i].direction != dir)
++ continue;
++
++ /* Mapping the inner packet is just like a normal
++ packet, except it was never src/dst reversed, so
++ where we would normally apply a dst manip, we apply
++ a src, and vice versa. */
++ if (info->manips[i].hooknum == hooknum) {
++ DEBUGP("icmp_reply: inner %s -> %u.%u.%u.%u %u\n",
++ info->manips[i].maniptype == IP_NAT_MANIP_SRC
++ ? "DST" : "SRC",
++ NIPQUAD(info->manips[i].manip.ip),
++ ntohs(info->manips[i].manip.u.udp.port));
++ if (!manip_pkt(inside->ip.protocol, pskb,
++ (*pskb)->nh.iph->ihl*4
++ + sizeof(inside->icmp),
++ &info->manips[i].manip,
++ !info->manips[i].maniptype))
++ goto unlock_fail;
++
++ /* Outer packet needs to have IP header NATed like
++ it's a reply. */
++
++ /* Use mapping to map outer packet: 0 give no
++ per-proto mapping */
++ DEBUGP("icmp_reply: outer %s -> %u.%u.%u.%u\n",
++ info->manips[i].maniptype == IP_NAT_MANIP_SRC
++ ? "SRC" : "DST",
++ NIPQUAD(info->manips[i].manip.ip));
++ if (!manip_pkt(0, pskb, 0,
++ &info->manips[i].manip,
++ info->manips[i].maniptype))
++ goto unlock_fail;
++ }
++ }
++ READ_UNLOCK(&ip_nat_lock);
++
++ hdrlen = (*pskb)->nh.iph->ihl * 4;
++
++ inside = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4;
++
++ inside->icmp.checksum = 0;
++ inside->icmp.checksum = csum_fold(skb_checksum(*pskb, hdrlen,
++ (*pskb)->len - hdrlen,
++ 0));
++ return 1;
++
++ unlock_fail:
++ READ_UNLOCK(&ip_nat_lock);
++ return 0;
++}
++
++int __init ip_nat_init(void)
++{
++ size_t i;
++
++ /* Leave them the same for the moment. */
++ ip_nat_htable_size = ip_conntrack_htable_size;
++
++ /* One vmalloc for both hash tables */
++ bysource = vmalloc(sizeof(struct list_head) * ip_nat_htable_size*2);
++ if (!bysource) {
++ return -ENOMEM;
++ }
++ byipsproto = bysource + ip_nat_htable_size;
++
++ /* Sew in builtin protocols. */
++ WRITE_LOCK(&ip_nat_lock);
++ list_append(&protos, &ip_nat_protocol_tcp);
++ list_append(&protos, &ip_nat_protocol_udp);
++ list_append(&protos, &ip_nat_protocol_icmp);
++ WRITE_UNLOCK(&ip_nat_lock);
++
++ for (i = 0; i < ip_nat_htable_size; i++) {
++ INIT_LIST_HEAD(&bysource[i]);
++ INIT_LIST_HEAD(&byipsproto[i]);
++ }
++
++ /* FIXME: Man, this is a hack. <SIGH> */
++ IP_NF_ASSERT(ip_conntrack_destroyed == NULL);
++ ip_conntrack_destroyed = &ip_nat_cleanup_conntrack;
++
++ return 0;
++}
++
++/* Clear NAT section of all conntracks, in case we're loaded again. */
++static int clean_nat(const struct ip_conntrack *i, void *data)
++{
++ memset((void *)&i->nat, 0, sizeof(i->nat));
++ return 0;
++}
++
++/* Not __exit: called from ip_nat_standalone.c:init_or_cleanup() --RR */
++void ip_nat_cleanup(void)
++{
++ ip_ct_selective_cleanup(&clean_nat, NULL);
++ ip_conntrack_destroyed = NULL;
++ vfree(bysource);
++}
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/iptable_raw.c linux-2.6.3/net/ipv4/netfilter/iptable_raw.c
+--- linux-2.6.3.org/net/ipv4/netfilter/iptable_raw.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/iptable_raw.c 2004-02-27 00:03:14.470028400 +0100
+@@ -0,0 +1,149 @@
++/*
++ * 'raw' table, which is the very first hooked in at PRE_ROUTING and LOCAL_OUT .
++ *
++ * Copyright (C) 2003 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ */
++#include <linux/module.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++
++#define RAW_VALID_HOOKS ((1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_LOCAL_OUT))
++
++/* Standard entry. */
++struct ipt_standard
++{
++ struct ipt_entry entry;
++ struct ipt_standard_target target;
++};
++
++struct ipt_error_target
++{
++ struct ipt_entry_target target;
++ char errorname[IPT_FUNCTION_MAXNAMELEN];
++};
++
++struct ipt_error
++{
++ struct ipt_entry entry;
++ struct ipt_error_target target;
++};
++
++static struct
++{
++ struct ipt_replace repl;
++ struct ipt_standard entries[2];
++ struct ipt_error term;
++} initial_table __initdata
++= { { "raw", RAW_VALID_HOOKS, 3,
++ sizeof(struct ipt_standard) * 2 + sizeof(struct ipt_error),
++ { [NF_IP_PRE_ROUTING] 0,
++ [NF_IP_LOCAL_OUT] sizeof(struct ipt_standard) },
++ { [NF_IP_PRE_ROUTING] 0,
++ [NF_IP_LOCAL_OUT] sizeof(struct ipt_standard) },
++ 0, NULL, { } },
++ {
++ /* PRE_ROUTING */
++ { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
++ 0,
++ sizeof(struct ipt_entry),
++ sizeof(struct ipt_standard),
++ 0, { 0, 0 }, { } },
++ { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
++ -NF_ACCEPT - 1 } },
++ /* LOCAL_OUT */
++ { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
++ 0,
++ sizeof(struct ipt_entry),
++ sizeof(struct ipt_standard),
++ 0, { 0, 0 }, { } },
++ { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
++ -NF_ACCEPT - 1 } }
++ },
++ /* ERROR */
++ { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
++ 0,
++ sizeof(struct ipt_entry),
++ sizeof(struct ipt_error),
++ 0, { 0, 0 }, { } },
++ { { { { IPT_ALIGN(sizeof(struct ipt_error_target)), IPT_ERROR_TARGET } },
++ { } },
++ "ERROR"
++ }
++ }
++};
++
++static struct ipt_table packet_raw = {
++ .name = "raw",
++ .table = &initial_table.repl,
++ .valid_hooks = RAW_VALID_HOOKS,
++ .lock = RW_LOCK_UNLOCKED,
++ .me = THIS_MODULE
++};
++
++/* The work comes in here from netfilter.c. */
++static unsigned int
++ipt_hook(unsigned int hook,
++ struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ return ipt_do_table(pskb, hook, in, out, &packet_raw, NULL);
++}
++
++/* 'raw' is the very first table. */
++static struct nf_hook_ops ipt_ops[] = {
++ {
++ .hook = ipt_hook,
++ .pf = PF_INET,
++ .hooknum = NF_IP_PRE_ROUTING,
++ .priority = NF_IP_PRI_RAW
++ },
++ {
++ .hook = ipt_hook,
++ .pf = PF_INET,
++ .hooknum = NF_IP_LOCAL_OUT,
++ .priority = NF_IP_PRI_RAW
++ },
++};
++
++static int __init init(void)
++{
++ int ret;
++
++ /* Register table */
++ ret = ipt_register_table(&packet_raw);
++ if (ret < 0)
++ return ret;
++
++ /* Register hooks */
++ ret = nf_register_hook(&ipt_ops[0]);
++ if (ret < 0)
++ goto cleanup_table;
++
++ ret = nf_register_hook(&ipt_ops[1]);
++ if (ret < 0)
++ goto cleanup_hook0;
++
++ return ret;
++
++ cleanup_hook0:
++ nf_unregister_hook(&ipt_ops[0]);
++ cleanup_table:
++ ipt_unregister_table(&packet_raw);
++
++ return ret;
++}
++
++static void __exit fini(void)
++{
++ unsigned int i;
++
++ for (i = 0; i < sizeof(ipt_ops)/sizeof(struct nf_hook_ops); i++)
++ nf_unregister_hook(&ipt_ops[i]);
++
++ ipt_unregister_table(&packet_raw);
++}
++
++module_init(init);
++module_exit(fini);
++MODULE_LICENSE("GPL");
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ipt_connlimit.c linux-2.6.3/net/ipv4/netfilter/ipt_connlimit.c
+--- linux-2.6.3.org/net/ipv4/netfilter/ipt_connlimit.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ipt_connlimit.c 2004-02-27 00:03:07.981014880 +0100
+@@ -0,0 +1,230 @@
++/*
++ * netfilter module to limit the number of parallel tcp
++ * connections per IP address.
++ * (c) 2000 Gerd Knorr <kraxel@bytesex.org>
++ * Nov 2002: Martin Bene <martin.bene@icomedias.com>:
++ * only ignore TIME_WAIT or gone connections
++ *
++ * based on ...
++ *
++ * Kernel module to match connection tracking information.
++ * GPL (C) 1999 Rusty Russell (rusty@rustcorp.com.au).
++ */
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/list.h>
++#include <linux/netfilter_ipv4/ip_conntrack.h>
++#include <linux/netfilter_ipv4/ip_conntrack_core.h>
++#include <linux/netfilter_ipv4/ip_conntrack_tcp.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_connlimit.h>
++
++#define DEBUG 0
++
++MODULE_LICENSE("GPL");
++
++/* we'll save the tuples of all connections we care about */
++struct ipt_connlimit_conn
++{
++ struct list_head list;
++ struct ip_conntrack_tuple tuple;
++};
++
++struct ipt_connlimit_data {
++ spinlock_t lock;
++ struct list_head iphash[256];
++};
++
++static int ipt_iphash(u_int32_t addr)
++{
++ int hash;
++
++ hash = addr & 0xff;
++ hash ^= (addr >> 8) & 0xff;
++ hash ^= (addr >> 16) & 0xff;
++ hash ^= (addr >> 24) & 0xff;
++ return hash;
++}
++
++static int count_them(struct ipt_connlimit_data *data,
++ u_int32_t addr, u_int32_t mask,
++ struct ip_conntrack *ct)
++{
++#if DEBUG
++ const static char *tcp[] = { "none", "established", "syn_sent", "syn_recv",
++ "fin_wait", "time_wait", "close", "close_wait",
++ "last_ack", "listen" };
++#endif
++ int addit = 1, matches = 0;
++ struct ip_conntrack_tuple tuple;
++ struct ip_conntrack_tuple_hash *found;
++ struct ipt_connlimit_conn *conn;
++ struct list_head *hash,*lh;
++
++ spin_lock(&data->lock);
++ tuple = ct->tuplehash[0].tuple;
++ hash = &data->iphash[ipt_iphash(addr & mask)];
++
++ /* check the saved connections */
++ for (lh = hash->next; lh != hash; lh = lh->next) {
++ conn = list_entry(lh,struct ipt_connlimit_conn,list);
++ found = ip_conntrack_find_get(&conn->tuple,ct);
++ if (0 == memcmp(&conn->tuple,&tuple,sizeof(tuple)) &&
++ found != NULL &&
++ found->ctrack->proto.tcp.state != TCP_CONNTRACK_TIME_WAIT) {
++ /* Just to be sure we have it only once in the list.
++ We should'nt see tuples twice unless someone hooks this
++ into a table without "-p tcp --syn" */
++ addit = 0;
++ }
++#if DEBUG
++ printk("ipt_connlimit [%d]: src=%u.%u.%u.%u:%d dst=%u.%u.%u.%u:%d %s\n",
++ ipt_iphash(addr & mask),
++ NIPQUAD(conn->tuple.src.ip), ntohs(conn->tuple.src.u.tcp.port),
++ NIPQUAD(conn->tuple.dst.ip), ntohs(conn->tuple.dst.u.tcp.port),
++ (NULL != found) ? tcp[found->ctrack->proto.tcp.state] : "gone");
++#endif
++ if (NULL == found) {
++ /* this one is gone */
++ lh = lh->prev;
++ list_del(lh->next);
++ kfree(conn);
++ continue;
++ }
++ if (found->ctrack->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT) {
++ /* we don't care about connections which are
++ closed already -> ditch it */
++ lh = lh->prev;
++ list_del(lh->next);
++ kfree(conn);
++ nf_conntrack_put(&found->ctrack->infos[0]);
++ continue;
++ }
++ if ((addr & mask) == (conn->tuple.src.ip & mask)) {
++ /* same source IP address -> be counted! */
++ matches++;
++ }
++ nf_conntrack_put(&found->ctrack->infos[0]);
++ }
++ if (addit) {
++ /* save the new connection in our list */
++#if DEBUG
++ printk("ipt_connlimit [%d]: src=%u.%u.%u.%u:%d dst=%u.%u.%u.%u:%d new\n",
++ ipt_iphash(addr & mask),
++ NIPQUAD(tuple.src.ip), ntohs(tuple.src.u.tcp.port),
++ NIPQUAD(tuple.dst.ip), ntohs(tuple.dst.u.tcp.port));
++#endif
++ conn = kmalloc(sizeof(*conn),GFP_ATOMIC);
++ if (NULL == conn)
++ return -1;
++ memset(conn,0,sizeof(*conn));
++ INIT_LIST_HEAD(&conn->list);
++ conn->tuple = tuple;
++ list_add(&conn->list,hash);
++ matches++;
++ }
++ spin_unlock(&data->lock);
++ return matches;
++}
++
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ int *hotdrop)
++{
++ const struct ipt_connlimit_info *info = matchinfo;
++ int connections, match;
++ struct ip_conntrack *ct;
++ enum ip_conntrack_info ctinfo;
++
++ ct = ip_conntrack_get((struct sk_buff *)skb, &ctinfo);
++ if (NULL == ct) {
++ printk("ipt_connlimit: Oops: invalid ct state ?\n");
++ *hotdrop = 1;
++ return 0;
++ }
++ connections = count_them(info->data,skb->nh.iph->saddr,info->mask,ct);
++ if (-1 == connections) {
++ printk("ipt_connlimit: Hmm, kmalloc failed :-(\n");
++ *hotdrop = 1; /* let's free some memory :-) */
++ return 0;
++ }
++ match = (info->inverse) ? (connections <= info->limit) : (connections > info->limit);
++#if DEBUG
++ printk("ipt_connlimit: src=%u.%u.%u.%u mask=%u.%u.%u.%u "
++ "connections=%d limit=%d match=%s\n",
++ NIPQUAD(skb->nh.iph->saddr), NIPQUAD(info->mask),
++ connections, info->limit, match ? "yes" : "no");
++#endif
++
++ return match;
++}
++
++static int check(const char *tablename,
++ const struct ipt_ip *ip,
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++{
++ struct ipt_connlimit_info *info = matchinfo;
++ int i;
++
++ /* verify size */
++ if (matchsize != IPT_ALIGN(sizeof(struct ipt_connlimit_info)))
++ return 0;
++
++ /* refuse anything but tcp */
++ if (ip->proto != IPPROTO_TCP)
++ return 0;
++
++ /* init private data */
++ info->data = kmalloc(sizeof(struct ipt_connlimit_data),GFP_KERNEL);
++ spin_lock_init(&(info->data->lock));
++ for (i = 0; i < 256; i++)
++ INIT_LIST_HEAD(&(info->data->iphash[i]));
++
++ return 1;
++}
++
++static void destroy(void *matchinfo, unsigned int matchinfosize)
++{
++ struct ipt_connlimit_info *info = matchinfo;
++ struct ipt_connlimit_conn *conn;
++ struct list_head *hash;
++ int i;
++
++ /* cleanup */
++ for (i = 0; i < 256; i++) {
++ hash = &(info->data->iphash[i]);
++ while (hash != hash->next) {
++ conn = list_entry(hash->next,struct ipt_connlimit_conn,list);
++ list_del(hash->next);
++ kfree(conn);
++ }
++ }
++ kfree(info->data);
++}
++
++static struct ipt_match connlimit_match = {
++ .name = "connlimit",
++ .match = &match,
++ .checkentry = &check,
++ .destroy = &destroy,
++ .me = THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ipt_register_match(&connlimit_match);
++}
++
++static void __exit fini(void)
++{
++ ipt_unregister_match(&connlimit_match);
++}
++
++module_init(init);
++module_exit(fini);
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ipt_conntrack.c linux-2.6.3/net/ipv4/netfilter/ipt_conntrack.c
+--- linux-2.6.3.org/net/ipv4/netfilter/ipt_conntrack.c 2004-02-18 04:59:26.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ipt_conntrack.c 2004-02-27 00:03:14.483026424 +0100
+@@ -35,11 +35,13 @@
+
+ #define FWINV(bool,invflg) ((bool) ^ !!(sinfo->invflags & invflg))
+
+- if (ct)
+- statebit = IPT_CONNTRACK_STATE_BIT(ctinfo);
+- else
+- statebit = IPT_CONNTRACK_STATE_INVALID;
+-
++ if (skb->nfct == &ip_conntrack_untracked.infos[IP_CT_NEW])
++ statebit = IPT_CONNTRACK_STATE_UNTRACKED;
++ else if (ct)
++ statebit = IPT_CONNTRACK_STATE_BIT(ctinfo);
++ else
++ statebit = IPT_CONNTRACK_STATE_INVALID;
++
+ if(sinfo->flags & IPT_CONNTRACK_STATE) {
+ if (ct) {
+ if(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip !=
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ipt_conntrack.c.orig linux-2.6.3/net/ipv4/netfilter/ipt_conntrack.c.orig
+--- linux-2.6.3.org/net/ipv4/netfilter/ipt_conntrack.c.orig 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ipt_conntrack.c.orig 2004-02-18 04:59:26.000000000 +0100
+@@ -0,0 +1,134 @@
++/* Kernel module to match connection tracking information.
++ * Superset of Rusty's minimalistic state match.
++ *
++ * (C) 2001 Marc Boucher (marc@mbsi.ca).
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_conntrack.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_conntrack.h>
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
++MODULE_DESCRIPTION("iptables connection tracking match module");
++
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ int *hotdrop)
++{
++ const struct ipt_conntrack_info *sinfo = matchinfo;
++ struct ip_conntrack *ct;
++ enum ip_conntrack_info ctinfo;
++ unsigned int statebit;
++
++ ct = ip_conntrack_get((struct sk_buff *)skb, &ctinfo);
++
++#define FWINV(bool,invflg) ((bool) ^ !!(sinfo->invflags & invflg))
++
++ if (ct)
++ statebit = IPT_CONNTRACK_STATE_BIT(ctinfo);
++ else
++ statebit = IPT_CONNTRACK_STATE_INVALID;
++
++ if(sinfo->flags & IPT_CONNTRACK_STATE) {
++ if (ct) {
++ if(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip !=
++ ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip)
++ statebit |= IPT_CONNTRACK_STATE_SNAT;
++
++ if(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip !=
++ ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip)
++ statebit |= IPT_CONNTRACK_STATE_DNAT;
++ }
++
++ if (FWINV((statebit & sinfo->statemask) == 0, IPT_CONNTRACK_STATE))
++ return 0;
++ }
++
++ if(sinfo->flags & IPT_CONNTRACK_PROTO) {
++ if (!ct || FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum != sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum, IPT_CONNTRACK_PROTO))
++ return 0;
++ }
++
++ if(sinfo->flags & IPT_CONNTRACK_ORIGSRC) {
++ if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip&sinfo->sipmsk[IP_CT_DIR_ORIGINAL].s_addr) != sinfo->tuple[IP_CT_DIR_ORIGINAL].src.ip, IPT_CONNTRACK_ORIGSRC))
++ return 0;
++ }
++
++ if(sinfo->flags & IPT_CONNTRACK_ORIGDST) {
++ if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip&sinfo->dipmsk[IP_CT_DIR_ORIGINAL].s_addr) != sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.ip, IPT_CONNTRACK_ORIGDST))
++ return 0;
++ }
++
++ if(sinfo->flags & IPT_CONNTRACK_REPLSRC) {
++ if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip&sinfo->sipmsk[IP_CT_DIR_REPLY].s_addr) != sinfo->tuple[IP_CT_DIR_REPLY].src.ip, IPT_CONNTRACK_REPLSRC))
++ return 0;
++ }
++
++ if(sinfo->flags & IPT_CONNTRACK_REPLDST) {
++ if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip&sinfo->dipmsk[IP_CT_DIR_REPLY].s_addr) != sinfo->tuple[IP_CT_DIR_REPLY].dst.ip, IPT_CONNTRACK_REPLDST))
++ return 0;
++ }
++
++ if(sinfo->flags & IPT_CONNTRACK_STATUS) {
++ if (!ct || FWINV((ct->status & sinfo->statusmask) == 0, IPT_CONNTRACK_STATUS))
++ return 0;
++ }
++
++ if(sinfo->flags & IPT_CONNTRACK_EXPIRES) {
++ unsigned long expires;
++
++ if(!ct)
++ return 0;
++
++ expires = timer_pending(&ct->timeout) ? (ct->timeout.expires - jiffies)/HZ : 0;
++
++ if (FWINV(!(expires >= sinfo->expires_min && expires <= sinfo->expires_max), IPT_CONNTRACK_EXPIRES))
++ return 0;
++ }
++
++ return 1;
++}
++
++static int check(const char *tablename,
++ const struct ipt_ip *ip,
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++{
++ if (matchsize != IPT_ALIGN(sizeof(struct ipt_conntrack_info)))
++ return 0;
++
++ return 1;
++}
++
++static struct ipt_match conntrack_match = {
++ .name = "conntrack",
++ .match = &match,
++ .checkentry = &check,
++ .me = THIS_MODULE,
++};
++
++static int __init init(void)
++{
++ need_ip_conntrack();
++ return ipt_register_match(&conntrack_match);
++}
++
++static void __exit fini(void)
++{
++ ipt_unregister_match(&conntrack_match);
++}
++
++module_init(init);
++module_exit(fini);
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ipt_dstlimit.c linux-2.6.3/net/ipv4/netfilter/ipt_dstlimit.c
+--- linux-2.6.3.org/net/ipv4/netfilter/ipt_dstlimit.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ipt_dstlimit.c 2004-02-27 00:03:08.652912736 +0100
+@@ -0,0 +1,690 @@
++/* iptables match extension to limit the number of packets per second
++ * seperately for each destination.
++ *
++ * (C) 2003 by Harald Welte <laforge@netfilter.org>
++ *
++ * ipt_dstlimit.c,v 1.3 2004/02/23 00:15:45 laforge Exp
++ *
++ * Development of this code was funded by Astaro AG, http://www.astaro.com/
++ *
++ * based on ipt_limit.c by:
++ * Jérôme de Vivie <devivie@info.enserb.u-bordeaux.fr>
++ * Hervé Eychenne <eychenne@info.enserb.u-bordeaux.fr>
++ * Rusty Russell <rusty@rustcorp.com.au>
++ *
++ * The general idea is to create a hash table for every dstip and have a
++ * seperate limit counter per tuple. This way you can do something like 'limit
++ * the number of syn packets for each of my internal addresses.
++ *
++ * Ideally this would just be implemented as a general 'hash' match, which would
++ * allow us to attach any iptables target to it's hash buckets. But this is
++ * not possible in the current iptables architecture. As always, pkttables for
++ * 2.7.x will help ;)
++ */
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/spinlock.h>
++#include <linux/random.h>
++#include <linux/jhash.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++
++#define ASSERT_READ_LOCK(x)
++#define ASSERT_WRITE_LOCK(x)
++#include <linux/netfilter_ipv4/lockhelp.h>
++#include <linux/netfilter_ipv4/listhelp.h>
++
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_dstlimit.h>
++
++/* FIXME: this is just for IP_NF_ASSERRT */
++#include <linux/netfilter_ipv4/ip_conntrack.h>
++
++#define MS2JIFFIES(x) ((x*HZ)/1000)
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
++MODULE_DESCRIPTION("iptables match for limiting per destination");
++
++/* need to declare this at the top */
++static struct proc_dir_entry *dstlimit_procdir;
++static struct file_operations dl_file_ops;
++
++/* hash table crap */
++
++struct dsthash_dst {
++ u_int32_t src_ip;
++ u_int32_t dst_ip;
++ u_int16_t port;
++};
++
++struct dsthash_ent {
++ /* static / read-only parts in the beginning */
++ struct list_head list;
++ struct dsthash_dst dst;
++
++ /* modified structure members in the end */
++ unsigned long expires; /* precalculated expiry time */
++ struct {
++ unsigned long prev; /* last modification */
++ u_int32_t credit;
++ u_int32_t credit_cap, cost;
++ } rateinfo;
++};
++
++struct ipt_dstlimit_htable {
++ struct list_head list; /* global list of all htables */
++ atomic_t use;
++
++ struct dstlimit_cfg cfg; /* config */
++
++ /* used internally */
++ spinlock_t lock; /* lock for list_head */
++ u_int32_t rnd; /* random seed for hash */
++ struct timer_list timer; /* timer for gc */
++ atomic_t count; /* number entries in table */
++
++ /* seq_file stuff */
++ struct proc_dir_entry *pde;
++
++ struct list_head hash[0]; /* hashtable itself */
++};
++
++DECLARE_RWLOCK(dstlimit_lock); /* protects htables list */
++static LIST_HEAD(dstlimit_htables);
++static kmem_cache_t *dstlimit_cachep;
++
++static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b)
++{
++ return (ent->dst.dst_ip == b->dst_ip
++ && ent->dst.port == b->port
++ && ent->dst.src_ip == b->src_ip);
++}
++
++static inline u_int32_t
++hash_dst(const struct ipt_dstlimit_htable *ht, const struct dsthash_dst *dst)
++{
++ return (jhash_3words(dst->dst_ip, dst->port,
++ dst->src_ip, ht->rnd) % ht->cfg.size);
++}
++
++static inline struct dsthash_ent *
++__dsthash_find(const struct ipt_dstlimit_htable *ht, struct dsthash_dst *dst)
++{
++ struct dsthash_ent *ent;
++ u_int32_t hash = hash_dst(ht, dst);
++ MUST_BE_LOCKED(&ht->lock);
++ ent = LIST_FIND(&ht->hash[hash], dst_cmp, struct dsthash_ent *, dst);
++ return ent;
++}
++
++/* allocate dsthash_ent, initialize dst, put in htable and lock it */
++static struct dsthash_ent *
++__dsthash_alloc_init(struct ipt_dstlimit_htable *ht, struct dsthash_dst *dst)
++{
++ struct dsthash_ent *ent;
++
++ /* initialize hash with random val at the time we allocate
++ * the first hashtable entry */
++ if (!ht->rnd)
++ get_random_bytes(&ht->rnd, 4);
++
++ if (ht->cfg.max &&
++ atomic_read(&ht->count) >= ht->cfg.max) {
++ /* FIXME: do something. question is what.. */
++ if (net_ratelimit())
++ printk(KERN_WARNING
++ "ipt_dstlimit: max count of %u reached\n",
++ ht->cfg.max);
++ return NULL;
++ }
++
++ ent = kmem_cache_alloc(dstlimit_cachep, GFP_ATOMIC);
++ if (!ent) {
++ if (net_ratelimit())
++ printk(KERN_ERR
++ "ipt_dstlimit: can't allocate dsthash_ent\n");
++ return NULL;
++ }
++
++ atomic_inc(&ht->count);
++
++ ent->dst.dst_ip = dst->dst_ip;
++ ent->dst.port = dst->port;
++ ent->dst.src_ip = dst->src_ip;
++
++ list_add(&ent->list, &ht->hash[hash_dst(ht, dst)]);
++
++ return ent;
++}
++
++static inline void
++__dsthash_free(struct ipt_dstlimit_htable *ht, struct dsthash_ent *ent)
++{
++ MUST_BE_LOCKED(&ht->lock);
++
++ list_del(&ent->list);
++ kmem_cache_free(dstlimit_cachep, ent);
++ atomic_dec(&ht->count);
++}
++static void htable_gc(unsigned long htlong);
++
++static int htable_create(struct ipt_dstlimit_info *minfo)
++{
++ int i;
++ unsigned int size;
++ struct ipt_dstlimit_htable *hinfo;
++
++ if (minfo->cfg.size)
++ size = minfo->cfg.size;
++ else {
++ size = (((num_physpages << PAGE_SHIFT) / 16384)
++ / sizeof(struct list_head));
++ if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
++ size = 8192;
++ if (size < 16)
++ size = 16;
++ }
++ /* FIXME: don't use vmalloc() here or anywhere else -HW */
++ hinfo = vmalloc(sizeof(struct ipt_dstlimit_htable)
++ + (sizeof(struct list_head) * size));
++ if (!hinfo) {
++ printk(KERN_ERR "ipt_dstlimit: Unable to create hashtable\n");
++ return -1;
++ }
++ minfo->hinfo = hinfo;
++
++ /* copy match config into hashtable config */
++ memcpy(&hinfo->cfg, &minfo->cfg, sizeof(hinfo->cfg));
++ hinfo->cfg.size = size;
++ if (!hinfo->cfg.max)
++ hinfo->cfg.max = 8 * hinfo->cfg.size;
++ else if (hinfo->cfg.max < hinfo->cfg.size)
++ hinfo->cfg.max = hinfo->cfg.size;
++
++ for (i = 0; i < hinfo->cfg.size; i++)
++ INIT_LIST_HEAD(&hinfo->hash[i]);
++
++ atomic_set(&hinfo->count, 0);
++ atomic_set(&hinfo->use, 1);
++ hinfo->rnd = 0;
++ hinfo->lock = SPIN_LOCK_UNLOCKED;
++ hinfo->pde = create_proc_entry(minfo->name, 0, dstlimit_procdir);
++ if (!hinfo->pde) {
++ vfree(hinfo);
++ return -1;
++ }
++ hinfo->pde->proc_fops = &dl_file_ops;
++ hinfo->pde->data = hinfo;
++
++ init_timer(&hinfo->timer);
++ hinfo->timer.expires = jiffies + MS2JIFFIES(hinfo->cfg.gc_interval);
++ hinfo->timer.data = (unsigned long )hinfo;
++ hinfo->timer.function = htable_gc;
++ add_timer(&hinfo->timer);
++
++ WRITE_LOCK(&dstlimit_lock);
++ list_add(&hinfo->list, &dstlimit_htables);
++ WRITE_UNLOCK(&dstlimit_lock);
++
++ return 0;
++}
++
++static int select_all(struct ipt_dstlimit_htable *ht, struct dsthash_ent *he)
++{
++ return 1;
++}
++
++static int select_gc(struct ipt_dstlimit_htable *ht, struct dsthash_ent *he)
++{
++ return (jiffies >= he->expires);
++}
++
++static void htable_selective_cleanup(struct ipt_dstlimit_htable *ht,
++ int (*select)(struct ipt_dstlimit_htable *ht,
++ struct dsthash_ent *he))
++{
++ int i;
++
++ IP_NF_ASSERT(ht->cfg.size && ht->cfg.max);
++
++ /* lock hash table and iterate over it */
++ LOCK_BH(&ht->lock);
++ for (i = 0; i < ht->cfg.size; i++) {
++ struct dsthash_ent *dh, *n;
++ list_for_each_entry_safe(dh, n, &ht->hash[i], list) {
++ if ((*select)(ht, dh))
++ __dsthash_free(ht, dh);
++ }
++ }
++ UNLOCK_BH(&ht->lock);
++}
++
++/* hash table garbage collector, run by timer */
++static void htable_gc(unsigned long htlong)
++{
++ struct ipt_dstlimit_htable *ht = (struct ipt_dstlimit_htable *)htlong;
++
++ htable_selective_cleanup(ht, select_gc);
++
++ /* re-add the timer accordingly */
++ ht->timer.expires = jiffies + MS2JIFFIES(ht->cfg.gc_interval);
++ add_timer(&ht->timer);
++}
++
++static void htable_destroy(struct ipt_dstlimit_htable *hinfo)
++{
++ /* remove timer, if it is pending */
++ if (timer_pending(&hinfo->timer))
++ del_timer(&hinfo->timer);
++
++ /* remove proc entry */
++ remove_proc_entry(hinfo->pde->name, dstlimit_procdir);
++
++ htable_selective_cleanup(hinfo, select_all);
++ vfree(hinfo);
++}
++
++static struct ipt_dstlimit_htable *htable_find_get(char *name)
++{
++ struct ipt_dstlimit_htable *hinfo;
++
++ READ_LOCK(&dstlimit_lock);
++ list_for_each_entry(hinfo, &dstlimit_htables, list) {
++ if (!strcmp(name, hinfo->pde->name)) {
++ atomic_inc(&hinfo->use);
++ READ_UNLOCK(&dstlimit_lock);
++ return hinfo;
++ }
++ }
++ READ_UNLOCK(&dstlimit_lock);
++
++ return NULL;
++}
++
++static void htable_put(struct ipt_dstlimit_htable *hinfo)
++{
++ if (atomic_dec_and_test(&hinfo->use)) {
++ WRITE_LOCK(&dstlimit_lock);
++ list_del(&hinfo->list);
++ WRITE_UNLOCK(&dstlimit_lock);
++ htable_destroy(hinfo);
++ }
++}
++
++
++/* The algorithm used is the Simple Token Bucket Filter (TBF)
++ * see net/sched/sch_tbf.c in the linux source tree
++ */
++
++/* Rusty: This is my (non-mathematically-inclined) understanding of
++ this algorithm. The `average rate' in jiffies becomes your initial
++ amount of credit `credit' and the most credit you can ever have
++ `credit_cap'. The `peak rate' becomes the cost of passing the
++ test, `cost'.
++
++ `prev' tracks the last packet hit: you gain one credit per jiffy.
++ If you get credit balance more than this, the extra credit is
++ discarded. Every time the match passes, you lose `cost' credits;
++ if you don't have that many, the test fails.
++
++ See Alexey's formal explanation in net/sched/sch_tbf.c.
++
++ To get the maximum range, we multiply by this factor (ie. you get N
++ credits per jiffy). We want to allow a rate as low as 1 per day
++ (slowest userspace tool allows), which means
++ CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie.
++*/
++#define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24))
++
++/* Repeated shift and or gives us all 1s, final shift and add 1 gives
++ * us the power of 2 below the theoretical max, so GCC simply does a
++ * shift. */
++#define _POW2_BELOW2(x) ((x)|((x)>>1))
++#define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2))
++#define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4))
++#define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8))
++#define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16))
++#define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1)
++
++#define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
++
++/* Precision saver. */
++static inline u_int32_t
++user2credits(u_int32_t user)
++{
++ /* If multiplying would overflow... */
++ if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
++ /* Divide first. */
++ return (user / IPT_DSTLIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
++
++ return (user * HZ * CREDITS_PER_JIFFY) / IPT_DSTLIMIT_SCALE;
++}
++
++static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
++{
++ dh->rateinfo.credit += (now - xchg(&dh->rateinfo.prev, now))
++ * CREDITS_PER_JIFFY;
++ if (dh->rateinfo.credit > dh->rateinfo.credit_cap)
++ dh->rateinfo.credit = dh->rateinfo.credit_cap;
++}
++
++static int
++dstlimit_match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ int *hotdrop)
++{
++ struct ipt_dstlimit_info *r =
++ ((struct ipt_dstlimit_info *)matchinfo)->u.master;
++ struct ipt_dstlimit_htable *hinfo = r->hinfo;
++ unsigned long now = jiffies;
++ struct dsthash_ent *dh;
++ struct dsthash_dst dst;
++
++ memset(&dst, 0, sizeof(dst));
++
++ /* dest ip is always in hash */
++ dst.dst_ip = skb->nh.iph->daddr;
++
++ /* source ip only if respective hashmode, otherwise set to
++ * zero */
++ if (hinfo->cfg.mode & IPT_DSTLIMIT_HASH_SIP)
++ dst.src_ip = skb->nh.iph->saddr;
++
++ /* dest port only if respective mode */
++ if (hinfo->cfg.mode & IPT_DSTLIMIT_HASH_DPT) {
++ u16 ports[2];
++
++ /* Must not be a fragment. */
++ if (offset)
++ return 0;
++
++ /* Must be big enough to read ports (both UDP and TCP have
++ them at the start). */
++ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, ports, sizeof(ports)) < 0) {
++ /* We've been asked to examine this packet, and we
++ can't. Hence, no choice but to drop. */
++ *hotdrop = 1;
++ return 0;
++ }
++
++ switch (skb->nh.iph->protocol) {
++ struct tcphdr *th;
++ struct udphdr *uh;
++ case IPPROTO_TCP:
++ th = (void *)skb->nh.iph+skb->nh.iph->ihl*4;
++ dst.port = th->dest;
++ break;
++ case IPPROTO_UDP:
++ uh = (void *)skb->nh.iph+skb->nh.iph->ihl*4;
++ dst.port = uh->dest;
++ break;
++ default:
++ break;
++ }
++ }
++
++ LOCK_BH(&hinfo->lock);
++ dh = __dsthash_find(hinfo, &dst);
++ if (!dh) {
++ dh = __dsthash_alloc_init(hinfo, &dst);
++
++ if (!dh) {
++ /* enomem... don't match == DROP */
++ if (net_ratelimit())
++ printk(KERN_ERR "%s: ENOMEM\n", __FUNCTION__);
++ UNLOCK_BH(&hinfo->lock);
++ return 0;
++ }
++
++ dh->expires = jiffies + MS2JIFFIES(hinfo->cfg.expire);
++
++ dh->rateinfo.prev = jiffies;
++ dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
++ hinfo->cfg.burst);
++ dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
++ hinfo->cfg.burst);
++ dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
++
++ UNLOCK_BH(&hinfo->lock);
++ return 1;
++ }
++
++ /* update expiration timeout */
++ dh->expires = now + MS2JIFFIES(hinfo->cfg.expire);
++
++ rateinfo_recalc(dh, now);
++ if (dh->rateinfo.credit >= dh->rateinfo.cost) {
++ /* We're underlimit. */
++ dh->rateinfo.credit -= dh->rateinfo.cost;
++ UNLOCK_BH(&hinfo->lock);
++ return 1;
++ }
++
++ UNLOCK_BH(&hinfo->lock);
++
++ /* default case: we're overlimit, thus don't match */
++ return 0;
++}
++
++static int
++dstlimit_checkentry(const char *tablename,
++ const struct ipt_ip *ip,
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++{
++ struct ipt_dstlimit_info *r = matchinfo;
++
++ if (matchsize != IPT_ALIGN(sizeof(struct ipt_dstlimit_info)))
++ return 0;
++
++ /* Check for overflow. */
++ if (r->cfg.burst == 0
++ || user2credits(r->cfg.avg * r->cfg.burst) <
++ user2credits(r->cfg.avg)) {
++ printk(KERN_ERR "ipt_dstlimit: Overflow, try lower: %u/%u\n",
++ r->cfg.avg, r->cfg.burst);
++ return 0;
++ }
++
++ if (r->cfg.mode == 0
++ || r->cfg.mode > (IPT_DSTLIMIT_HASH_DPT
++ |IPT_DSTLIMIT_HASH_DIP
++ |IPT_DSTLIMIT_HASH_SIP))
++ return 0;
++
++ if (!r->cfg.gc_interval)
++ return 0;
++
++ if (!r->cfg.expire)
++ return 0;
++
++ r->hinfo = htable_find_get(r->name);
++ if (!r->hinfo && (htable_create(r) != 0)) {
++ return 0;
++ }
++
++ /* Ugly hack: For SMP, we only want to use one set */
++ r->u.master = r;
++
++ return 1;
++}
++
++static void
++dstlimit_destroy(void *matchinfo, unsigned int matchsize)
++{
++ struct ipt_dstlimit_info *r = (struct ipt_dstlimit_info *) matchinfo;
++
++ htable_put(r->hinfo);
++}
++
++static struct ipt_match ipt_dstlimit = {
++ .list = { .prev = NULL, .next = NULL },
++ .name = "dstlimit",
++ .match = dstlimit_match,
++ .checkentry = dstlimit_checkentry,
++ .destroy = dstlimit_destroy,
++ .me = THIS_MODULE
++};
++
++/* PROC stuff */
++
++static void *dl_seq_start(struct seq_file *s, loff_t *pos)
++{
++ struct proc_dir_entry *pde = s->private;
++ struct ipt_dstlimit_htable *htable = pde->data;
++ unsigned int *bucket;
++
++ LOCK_BH(&htable->lock);
++ if (*pos >= htable->cfg.size)
++ return NULL;
++
++ bucket = kmalloc(sizeof(unsigned int), GFP_KERNEL);
++ if (!bucket)
++ return ERR_PTR(-ENOMEM);
++
++ *bucket = *pos;
++ return bucket;
++}
++
++static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
++{
++ struct proc_dir_entry *pde = s->private;
++ struct ipt_dstlimit_htable *htable = pde->data;
++ unsigned int *bucket = (unsigned int *)v;
++
++ *pos = ++(*bucket);
++ if (*pos >= htable->cfg.size) {
++ kfree(v);
++ return NULL;
++ }
++ return bucket;
++}
++
++static void dl_seq_stop(struct seq_file *s, void *v)
++{
++ struct proc_dir_entry *pde = s->private;
++ struct ipt_dstlimit_htable *htable = pde->data;
++ unsigned int *bucket = (unsigned int *)v;
++
++ kfree(bucket);
++
++ UNLOCK_BH(&htable->lock);
++}
++
++static inline int dl_seq_real_show(struct dsthash_ent *ent, struct seq_file *s)
++{
++ /* recalculate to show accurate numbers */
++ rateinfo_recalc(ent, jiffies);
++
++ return seq_printf(s, "%ld %u.%u.%u.%u->%u.%u.%u.%u:%u %u %u %u\n",
++ (ent->expires - jiffies)/HZ,
++ NIPQUAD(ent->dst.src_ip),
++ NIPQUAD(ent->dst.dst_ip), ntohs(ent->dst.port),
++ ent->rateinfo.credit, ent->rateinfo.credit_cap,
++ ent->rateinfo.cost);
++}
++
++static int dl_seq_show(struct seq_file *s, void *v)
++{
++ struct proc_dir_entry *pde = s->private;
++ struct ipt_dstlimit_htable *htable = pde->data;
++ unsigned int *bucket = (unsigned int *)v;
++
++ if (LIST_FIND_W(&htable->hash[*bucket], dl_seq_real_show,
++ struct dsthash_ent *, s)) {
++ /* buffer was filled and unable to print that tuple */
++ return 1;
++ }
++ return 0;
++}
++
++static struct seq_operations dl_seq_ops = {
++ .start = dl_seq_start,
++ .next = dl_seq_next,
++ .stop = dl_seq_stop,
++ .show = dl_seq_show
++};
++
++static int dl_proc_open(struct inode *inode, struct file *file)
++{
++ int ret = seq_open(file, &dl_seq_ops);
++
++ if (!ret) {
++ struct seq_file *sf = file->private_data;
++ sf->private = PDE(inode);
++ }
++ return ret;
++}
++
++static struct file_operations dl_file_ops = {
++ .owner = THIS_MODULE,
++ .open = dl_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = seq_release
++};
++
++static int init_or_fini(int fini)
++{
++ int ret = 0;
++
++ if (fini)
++ goto cleanup;
++
++ if (ipt_register_match(&ipt_dstlimit)) {
++ ret = -EINVAL;
++ goto cleanup_nothing;
++ }
++
++ /* FIXME: do we really want HWCACHE_ALIGN since our objects are
++ * quite small ? */
++ dstlimit_cachep = kmem_cache_create("ipt_dstlimit",
++ sizeof(struct dsthash_ent), 0,
++ SLAB_HWCACHE_ALIGN, NULL, NULL);
++ if (!dstlimit_cachep) {
++ printk(KERN_ERR "Unable to create ipt_dstlimit slab cache\n");
++ ret = -ENOMEM;
++ goto cleanup_unreg_match;
++ }
++
++ dstlimit_procdir = proc_mkdir("ipt_dstlimit", proc_net);
++ if (!dstlimit_procdir) {
++ printk(KERN_ERR "Unable to create proc dir entry\n");
++ ret = -ENOMEM;
++ goto cleanup_free_slab;
++ }
++
++ return ret;
++
++cleanup:
++ remove_proc_entry("ipt_dstlimit", proc_net);
++cleanup_free_slab:
++ kmem_cache_destroy(dstlimit_cachep);
++cleanup_unreg_match:
++ ipt_unregister_match(&ipt_dstlimit);
++cleanup_nothing:
++ return ret;
++
++}
++
++static int __init init(void)
++{
++ return init_or_fini(0);
++}
++
++static void __exit fini(void)
++{
++ init_or_fini(1);
++}
++
++module_init(init);
++module_exit(fini);
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ipt_fuzzy.c linux-2.6.3/net/ipv4/netfilter/ipt_fuzzy.c
+--- linux-2.6.3.org/net/ipv4/netfilter/ipt_fuzzy.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ipt_fuzzy.c 2004-02-27 00:03:09.360805120 +0100
+@@ -0,0 +1,185 @@
++/*
++ * This module implements a simple TSK FLC
++ * (Takagi-Sugeno-Kang Fuzzy Logic Controller) that aims
++ * to limit , in an adaptive and flexible way , the packet rate crossing
++ * a given stream . It serves as an initial and very simple (but effective)
++ * example of how Fuzzy Logic techniques can be applied to defeat DoS attacks.
++ * As a matter of fact , Fuzzy Logic can help us to insert any "behavior"
++ * into our code in a precise , adaptive and efficient manner.
++ * The goal is very similar to that of "limit" match , but using techniques of
++ * Fuzzy Control , that allow us to shape the transfer functions precisely ,
++ * avoiding over and undershoots - and stuff like that .
++ *
++ *
++ * 2002-08-10 Hime Aguiar e Oliveira Jr. <hime@engineer.com> : Initial version.
++ * 2002-08-17 : Changed to eliminate floating point operations .
++ * 2002-08-23 : Coding style changes .
++*/
++
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/ip.h>
++#include <linux/random.h>
++#include <net/tcp.h>
++#include <linux/spinlock.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_fuzzy.h>
++
++/*
++ Packet Acceptance Rate - LOW and Packet Acceptance Rate - HIGH
++ Expressed in percentage
++*/
++
++#define PAR_LOW 1/100
++#define PAR_HIGH 1
++
++static spinlock_t fuzzy_lock = SPIN_LOCK_UNLOCKED ;
++
++MODULE_AUTHOR("Hime Aguiar e Oliveira Junior <hime@engineer.com>");
++MODULE_DESCRIPTION("IP tables Fuzzy Logic Controller match module");
++MODULE_LICENSE("GPL");
++
++static u_int8_t mf_high(u_int32_t tx,u_int32_t mini,u_int32_t maxi)
++{
++ if (tx >= maxi)
++ return 100;
++
++ if (tx <= mini)
++ return 0;
++
++ return ( (100*(tx-mini)) / (maxi-mini) );
++}
++
++static u_int8_t mf_low(u_int32_t tx,u_int32_t mini,u_int32_t maxi)
++{
++ if (tx <= mini)
++ return 100;
++
++ if (tx >= maxi)
++ return 0;
++
++ return ( (100*( maxi - tx )) / ( maxi - mini ) );
++}
++
++static int
++ipt_fuzzy_match(const struct sk_buff *pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ int *hotdrop)
++{
++ /* From userspace */
++
++ struct ipt_fuzzy_info *info = (struct ipt_fuzzy_info *) matchinfo;
++
++ u_int8_t random_number;
++ unsigned long amount;
++ u_int8_t howhigh, howlow;
++
++
++ spin_lock_bh(&fuzzy_lock); /* Rise the lock */
++
++ info->bytes_total += pskb->len;
++ info->packets_total++;
++
++ info->present_time = jiffies;
++
++ if (info->present_time >= info->previous_time)
++ amount = info->present_time - info->previous_time;
++ else {
++ /* There was a transition : I choose to re-sample
++ and keep the old acceptance rate...
++ */
++
++ amount = 0;
++ info->previous_time = info->present_time;
++ info->bytes_total = info->packets_total = 0;
++ };
++
++ if (amount > HZ/10) /* More than 100 ms elapsed ... */
++ {
++
++ info->mean_rate = (u_int32_t) ((HZ*info->packets_total) \
++ / amount );
++
++ info->previous_time = info->present_time;
++ info->bytes_total = info->packets_total = 0;
++
++ howhigh = mf_high(info->mean_rate,info->minimum_rate,info->maximum_rate);
++ howlow = mf_low(info->mean_rate,info->minimum_rate,info->maximum_rate);
++
++ info->acceptance_rate = (u_int8_t) \
++ (howhigh*PAR_LOW + PAR_HIGH*howlow);
++
++ /* In fact , the above defuzzification would require a denominator
++ proportional to (howhigh+howlow) but , in this particular case ,
++ that expression is constant .
++ An imediate consequence is that it isn't necessary to call
++ both mf_high and mf_low - but to keep things understandable ,
++ I did so . */
++
++ }
++
++ spin_unlock_bh(&fuzzy_lock); /* Release the lock */
++
++
++ if ( info->acceptance_rate < 100 )
++ {
++ get_random_bytes((void *)(&random_number), 1);
++
++ /* If within the acceptance , it can pass => don't match */
++ if (random_number <= (255 * info->acceptance_rate) / 100)
++ return 0;
++ else
++ return 1; /* It can't pass ( It matches ) */
++ } ;
++
++ return 0; /* acceptance_rate == 100 % => Everything passes ... */
++
++}
++
++static int
++ipt_fuzzy_checkentry(const char *tablename,
++ const struct ipt_ip *e,
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++{
++
++ const struct ipt_fuzzy_info *info = matchinfo;
++
++ if (matchsize != IPT_ALIGN(sizeof(struct ipt_fuzzy_info))) {
++ printk("ipt_fuzzy: matchsize %u != %u\n", matchsize,
++ IPT_ALIGN(sizeof(struct ipt_fuzzy_info)));
++ return 0;
++ }
++
++ if ((info->minimum_rate < MINFUZZYRATE ) || (info->maximum_rate > MAXFUZZYRATE)
++ || (info->minimum_rate >= info->maximum_rate )) {
++ printk("ipt_fuzzy: BAD limits , please verify !!!\n");
++ return 0;
++ }
++
++ return 1;
++}
++
++static struct ipt_match ipt_fuzzy_reg = {
++ .name = "fuzzy",
++ .match = ipt_fuzzy_match,
++ .checkentry = ipt_fuzzy_checkentry,
++ .me = THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ipt_register_match(&ipt_fuzzy_reg);
++}
++
++static void __exit fini(void)
++{
++ ipt_unregister_match(&ipt_fuzzy_reg);
++}
++
++module_init(init);
++module_exit(fini);
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ipt_ipv4options.c linux-2.6.3/net/ipv4/netfilter/ipt_ipv4options.c
+--- linux-2.6.3.org/net/ipv4/netfilter/ipt_ipv4options.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ipt_ipv4options.c 2004-02-27 00:03:10.065697960 +0100
+@@ -0,0 +1,172 @@
++/*
++ This is a module which is used to match ipv4 options.
++ This file is distributed under the terms of the GNU General Public
++ License (GPL). Copies of the GPL can be obtained from:
++ ftp://prep.ai.mit.edu/pub/gnu/GPL
++
++ 11-mars-2001 Fabrice MARIE <fabrice@netfilter.org> : initial development.
++ 12-july-2001 Fabrice MARIE <fabrice@netfilter.org> : added router-alert otions matching. Fixed a bug with no-srr
++ 12-august-2001 Imran Patel <ipatel@crosswinds.net> : optimization of the match.
++ 18-november-2001 Fabrice MARIE <fabrice@netfilter.org> : added [!] 'any' option match.
++ 19-february-2004 Harald Welte <laforge@netfilter.org> : merge with 2.6.x
++*/
++
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_ipv4options.h>
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Fabrice Marie <fabrice@netfilter.org>");
++
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ int *hotdrop)
++{
++ const struct ipt_ipv4options_info *info = matchinfo; /* match info for rule */
++ const struct iphdr *iph = skb->nh.iph;
++ const struct ip_options *opt;
++
++ if (iph->ihl * 4 == sizeof(struct iphdr)) {
++ /* No options, so we match only the "DONTs" and the "IGNOREs" */
++
++ if (((info->options & IPT_IPV4OPTION_MATCH_ANY_OPT) == IPT_IPV4OPTION_MATCH_ANY_OPT) ||
++ ((info->options & IPT_IPV4OPTION_MATCH_SSRR) == IPT_IPV4OPTION_MATCH_SSRR) ||
++ ((info->options & IPT_IPV4OPTION_MATCH_LSRR) == IPT_IPV4OPTION_MATCH_LSRR) ||
++ ((info->options & IPT_IPV4OPTION_MATCH_RR) == IPT_IPV4OPTION_MATCH_RR) ||
++ ((info->options & IPT_IPV4OPTION_MATCH_TIMESTAMP) == IPT_IPV4OPTION_MATCH_TIMESTAMP) ||
++ ((info->options & IPT_IPV4OPTION_MATCH_ROUTER_ALERT) == IPT_IPV4OPTION_MATCH_ROUTER_ALERT))
++ return 0;
++ return 1;
++ }
++ else {
++ if ((info->options & IPT_IPV4OPTION_MATCH_ANY_OPT) == IPT_IPV4OPTION_MATCH_ANY_OPT)
++ /* there are options, and we don't need to care which one */
++ return 1;
++ else {
++ if ((info->options & IPT_IPV4OPTION_DONT_MATCH_ANY_OPT) == IPT_IPV4OPTION_DONT_MATCH_ANY_OPT)
++ /* there are options but we don't want any ! */
++ return 0;
++ }
++ }
++
++ opt = &(IPCB(skb)->opt);
++
++ /* source routing */
++ if ((info->options & IPT_IPV4OPTION_MATCH_SSRR) == IPT_IPV4OPTION_MATCH_SSRR) {
++ if (!((opt->srr) & (opt->is_strictroute)))
++ return 0;
++ }
++ else if ((info->options & IPT_IPV4OPTION_MATCH_LSRR) == IPT_IPV4OPTION_MATCH_LSRR) {
++ if (!((opt->srr) & (!opt->is_strictroute)))
++ return 0;
++ }
++ else if ((info->options & IPT_IPV4OPTION_DONT_MATCH_SRR) == IPT_IPV4OPTION_DONT_MATCH_SRR) {
++ if (opt->srr)
++ return 0;
++ }
++ /* record route */
++ if ((info->options & IPT_IPV4OPTION_MATCH_RR) == IPT_IPV4OPTION_MATCH_RR) {
++ if (!opt->rr)
++ return 0;
++ }
++ else if ((info->options & IPT_IPV4OPTION_DONT_MATCH_RR) == IPT_IPV4OPTION_DONT_MATCH_RR) {
++ if (opt->rr)
++ return 0;
++ }
++ /* timestamp */
++ if ((info->options & IPT_IPV4OPTION_MATCH_TIMESTAMP) == IPT_IPV4OPTION_MATCH_TIMESTAMP) {
++ if (!opt->ts)
++ return 0;
++ }
++ else if ((info->options & IPT_IPV4OPTION_DONT_MATCH_TIMESTAMP) == IPT_IPV4OPTION_DONT_MATCH_TIMESTAMP) {
++ if (opt->ts)
++ return 0;
++ }
++ /* router-alert option */
++ if ((info->options & IPT_IPV4OPTION_MATCH_ROUTER_ALERT) == IPT_IPV4OPTION_MATCH_ROUTER_ALERT) {
++ if (!opt->router_alert)
++ return 0;
++ }
++ else if ((info->options & IPT_IPV4OPTION_DONT_MATCH_ROUTER_ALERT) == IPT_IPV4OPTION_DONT_MATCH_ROUTER_ALERT) {
++ if (opt->router_alert)
++ return 0;
++ }
++
++ /* we match ! */
++ return 1;
++}
++
++static int
++checkentry(const char *tablename,
++ const struct ipt_ip *ip,
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++{
++ const struct ipt_ipv4options_info *info = matchinfo; /* match info for rule */
++ /* Check the size */
++ if (matchsize != IPT_ALIGN(sizeof(struct ipt_ipv4options_info)))
++ return 0;
++ /* Now check the coherence of the data ... */
++ if (((info->options & IPT_IPV4OPTION_MATCH_ANY_OPT) == IPT_IPV4OPTION_MATCH_ANY_OPT) &&
++ (((info->options & IPT_IPV4OPTION_DONT_MATCH_SRR) == IPT_IPV4OPTION_DONT_MATCH_SRR) ||
++ ((info->options & IPT_IPV4OPTION_DONT_MATCH_RR) == IPT_IPV4OPTION_DONT_MATCH_RR) ||
++ ((info->options & IPT_IPV4OPTION_DONT_MATCH_TIMESTAMP) == IPT_IPV4OPTION_DONT_MATCH_TIMESTAMP) ||
++ ((info->options & IPT_IPV4OPTION_DONT_MATCH_ROUTER_ALERT) == IPT_IPV4OPTION_DONT_MATCH_ROUTER_ALERT) ||
++ ((info->options & IPT_IPV4OPTION_DONT_MATCH_ANY_OPT) == IPT_IPV4OPTION_DONT_MATCH_ANY_OPT)))
++ return 0; /* opposites */
++ if (((info->options & IPT_IPV4OPTION_DONT_MATCH_ANY_OPT) == IPT_IPV4OPTION_DONT_MATCH_ANY_OPT) &&
++ (((info->options & IPT_IPV4OPTION_MATCH_LSRR) == IPT_IPV4OPTION_MATCH_LSRR) ||
++ ((info->options & IPT_IPV4OPTION_MATCH_SSRR) == IPT_IPV4OPTION_MATCH_SSRR) ||
++ ((info->options & IPT_IPV4OPTION_MATCH_RR) == IPT_IPV4OPTION_MATCH_RR) ||
++ ((info->options & IPT_IPV4OPTION_MATCH_TIMESTAMP) == IPT_IPV4OPTION_MATCH_TIMESTAMP) ||
++ ((info->options & IPT_IPV4OPTION_MATCH_ROUTER_ALERT) == IPT_IPV4OPTION_MATCH_ROUTER_ALERT) ||
++ ((info->options & IPT_IPV4OPTION_MATCH_ANY_OPT) == IPT_IPV4OPTION_MATCH_ANY_OPT)))
++ return 0; /* opposites */
++ if (((info->options & IPT_IPV4OPTION_MATCH_SSRR) == IPT_IPV4OPTION_MATCH_SSRR) &&
++ ((info->options & IPT_IPV4OPTION_MATCH_LSRR) == IPT_IPV4OPTION_MATCH_LSRR))
++ return 0; /* cannot match in the same time loose and strict source routing */
++ if ((((info->options & IPT_IPV4OPTION_MATCH_SSRR) == IPT_IPV4OPTION_MATCH_SSRR) ||
++ ((info->options & IPT_IPV4OPTION_MATCH_LSRR) == IPT_IPV4OPTION_MATCH_LSRR)) &&
++ ((info->options & IPT_IPV4OPTION_DONT_MATCH_SRR) == IPT_IPV4OPTION_DONT_MATCH_SRR))
++ return 0; /* opposites */
++ if (((info->options & IPT_IPV4OPTION_MATCH_RR) == IPT_IPV4OPTION_MATCH_RR) &&
++ ((info->options & IPT_IPV4OPTION_DONT_MATCH_RR) == IPT_IPV4OPTION_DONT_MATCH_RR))
++ return 0; /* opposites */
++ if (((info->options & IPT_IPV4OPTION_MATCH_TIMESTAMP) == IPT_IPV4OPTION_MATCH_TIMESTAMP) &&
++ ((info->options & IPT_IPV4OPTION_DONT_MATCH_TIMESTAMP) == IPT_IPV4OPTION_DONT_MATCH_TIMESTAMP))
++ return 0; /* opposites */
++ if (((info->options & IPT_IPV4OPTION_MATCH_ROUTER_ALERT) == IPT_IPV4OPTION_MATCH_ROUTER_ALERT) &&
++ ((info->options & IPT_IPV4OPTION_DONT_MATCH_ROUTER_ALERT) == IPT_IPV4OPTION_DONT_MATCH_ROUTER_ALERT))
++ return 0; /* opposites */
++
++ /* everything looks ok. */
++ return 1;
++}
++
++static struct ipt_match ipv4options_match = {
++ .name = "ipv4options",
++ .match = match,
++ .checkentry = checkentry,
++ .me = THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ipt_register_match(&ipv4options_match);
++}
++
++static void __exit fini(void)
++{
++ ipt_unregister_match(&ipv4options_match);
++}
++
++module_init(init);
++module_exit(fini);
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ipt_IPV4OPTSSTRIP.c linux-2.6.3/net/ipv4/netfilter/ipt_IPV4OPTSSTRIP.c
+--- linux-2.6.3.org/net/ipv4/netfilter/ipt_IPV4OPTSSTRIP.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ipt_IPV4OPTSSTRIP.c 2004-02-27 00:03:05.931326480 +0100
+@@ -0,0 +1,89 @@
++/**
++ * Strip all IP options in the IP packet header.
++ *
++ * (C) 2001 by Fabrice MARIE <fabrice@netfilter.org>
++ * This software is distributed under GNU GPL v2, 1991
++ */
++
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/ip.h>
++#include <net/checksum.h>
++
++#include <linux/netfilter_ipv4/ip_tables.h>
++
++MODULE_AUTHOR("Fabrice MARIE <fabrice@netfilter.org>");
++MODULE_DESCRIPTION("Strip all options in IPv4 packets");
++MODULE_LICENSE("GPL");
++
++static unsigned int
++target(struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const void *targinfo,
++ void *userinfo)
++{
++ struct iphdr *iph;
++ struct sk_buff *skb;
++ struct ip_options *opt;
++ unsigned char *optiph;
++ int l;
++
++ if (!skb_ip_make_writable(pskb, (*pskb)->len))
++ return NF_DROP;
++
++ skb = (*pskb);
++ iph = (*pskb)->nh.iph;
++ optiph = skb->nh.raw;
++ l = ((struct ip_options *)(&(IPCB(skb)->opt)))->optlen;
++
++ /* if no options in packet then nothing to clear. */
++ if (iph->ihl * 4 == sizeof(struct iphdr))
++ return IPT_CONTINUE;
++
++ /* else clear all options */
++ memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
++ memset(optiph+sizeof(struct iphdr), IPOPT_NOOP, l);
++ opt = &(IPCB(skb)->opt);
++ opt->is_data = 0;
++ opt->optlen = l;
++
++ skb->nfcache |= NFC_ALTERED;
++
++ return IPT_CONTINUE;
++}
++
++static int
++checkentry(const char *tablename,
++ const struct ipt_entry *e,
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hook_mask)
++{
++ if (strcmp(tablename, "mangle")) {
++ printk(KERN_WARNING "IPV4OPTSSTRIP: can only be called from \"mangle\" table, not \"%s\"\n", tablename);
++ return 0;
++ }
++ /* nothing else to check because no parameters */
++ return 1;
++}
++
++static struct ipt_target ipt_ipv4optsstrip_reg = {
++ .name = "IPV4OPTSSTRIP",
++ .target = target,
++ .checkentry = checkentry,
++ .me = THIS_MODULE };
++
++static int __init init(void)
++{
++ return ipt_register_target(&ipt_ipv4optsstrip_reg);
++}
++
++static void __exit fini(void)
++{
++ ipt_unregister_target(&ipt_ipv4optsstrip_reg);
++}
++
++module_init(init);
++module_exit(fini);
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ipt_LOG.c linux-2.6.3/net/ipv4/netfilter/ipt_LOG.c
+--- linux-2.6.3.org/net/ipv4/netfilter/ipt_LOG.c 2004-02-18 04:59:20.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ipt_LOG.c 2004-02-27 00:03:00.002227840 +0100
+@@ -19,6 +19,7 @@
+ #include <net/tcp.h>
+ #include <net/route.h>
+
++#include <linux/netfilter.h>
+ #include <linux/netfilter_ipv4/ip_tables.h>
+ #include <linux/netfilter_ipv4/ipt_LOG.h>
+
+@@ -26,6 +27,10 @@
+ MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
+ MODULE_DESCRIPTION("iptables syslog logging module");
+
++static unsigned int nflog = 1;
++MODULE_PARM(nflog, "i");
++MODULE_PARM_DESC(nflog, "register as internal netfilter logging module");
++
+ #if 0
+ #define DEBUGP printk
+ #else
+@@ -324,28 +329,25 @@
+ /* maxlen = 230+ 91 + 230 + 252 = 803 */
+ }
+
+-static unsigned int
+-ipt_log_target(struct sk_buff **pskb,
++static void
++ipt_log_packet(unsigned int hooknum,
++ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+- unsigned int hooknum,
+- const void *targinfo,
+- void *userinfo)
++ const struct ipt_log_info *loginfo,
++ const char *level_string,
++ const char *prefix)
+ {
+- const struct ipt_log_info *loginfo = targinfo;
+- char level_string[4] = "< >";
+-
+- level_string[1] = '0' + (loginfo->level % 8);
+ spin_lock_bh(&log_lock);
+ printk(level_string);
+ printk("%sIN=%s OUT=%s ",
+- loginfo->prefix,
++ prefix == NULL ? loginfo->prefix : prefix,
+ in ? in->name : "",
+ out ? out->name : "");
+ #ifdef CONFIG_BRIDGE_NETFILTER
+- if ((*pskb)->nf_bridge) {
+- struct net_device *physindev = (*pskb)->nf_bridge->physindev;
+- struct net_device *physoutdev = (*pskb)->nf_bridge->physoutdev;
++ if (skb->nf_bridge) {
++ struct net_device *physindev = skb->nf_bridge->physindev;
++ struct net_device *physoutdev = skb->nf_bridge->physoutdev;
+
+ if (physindev && in != physindev)
+ printk("PHYSIN=%s ", physindev->name);
+@@ -357,25 +359,56 @@
+ if (in && !out) {
+ /* MAC logging for input chain only. */
+ printk("MAC=");
+- if ((*pskb)->dev && (*pskb)->dev->hard_header_len
+- && (*pskb)->mac.raw != (void*)(*pskb)->nh.iph) {
++ if (skb->dev && skb->dev->hard_header_len
++ && skb->mac.raw != (void*)skb->nh.iph) {
+ int i;
+- unsigned char *p = (*pskb)->mac.raw;
+- for (i = 0; i < (*pskb)->dev->hard_header_len; i++,p++)
++ unsigned char *p = skb->mac.raw;
++ for (i = 0; i < skb->dev->hard_header_len; i++,p++)
+ printk("%02x%c", *p,
+- i==(*pskb)->dev->hard_header_len - 1
++ i==skb->dev->hard_header_len - 1
+ ? ' ':':');
+ } else
+ printk(" ");
+ }
+
+- dump_packet(loginfo, *pskb, 0);
++ dump_packet(loginfo, skb, 0);
+ printk("\n");
+ spin_unlock_bh(&log_lock);
++}
++
++static unsigned int
++ipt_log_target(struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const void *targinfo,
++ void *userinfo)
++{
++ const struct ipt_log_info *loginfo = targinfo;
++ char level_string[4] = "< >";
++
++ level_string[1] = '0' + (loginfo->level % 8);
++ ipt_log_packet(hooknum, *pskb, in, out, loginfo, level_string, NULL);
+
+ return IPT_CONTINUE;
+ }
+
++static void
++ipt_logfn(unsigned int hooknum,
++ const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const char *prefix)
++{
++ struct ipt_log_info loginfo = {
++ .level = 0,
++ .logflags = IPT_LOG_MASK,
++ .prefix = ""
++ };
++
++ ipt_log_packet(hooknum, skb, in, out, &loginfo, KERN_WARNING, prefix);
++}
++
+ static int ipt_log_checkentry(const char *tablename,
+ const struct ipt_entry *e,
+ void *targinfo,
+@@ -413,11 +446,16 @@
+
+ static int __init init(void)
+ {
++ if (nflog)
++ nf_log_register(PF_INET, &ipt_logfn);
+ return ipt_register_target(&ipt_log_reg);
+ }
+
+ static void __exit fini(void)
+ {
++ if (nflog)
++ nf_log_unregister(PF_INET, &ipt_logfn);
++
+ ipt_unregister_target(&ipt_log_reg);
+ }
+
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ipt_LOG.c.orig linux-2.6.3/net/ipv4/netfilter/ipt_LOG.c.orig
+--- linux-2.6.3.org/net/ipv4/netfilter/ipt_LOG.c.orig 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ipt_LOG.c.orig 2004-02-18 04:59:20.000000000 +0100
+@@ -0,0 +1,425 @@
++/*
++ * This is a module which is used for logging packets.
++ */
++
++/* (C) 1999-2001 Paul `Rusty' Russell
++ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/module.h>
++#include <linux/spinlock.h>
++#include <linux/skbuff.h>
++#include <linux/ip.h>
++#include <net/icmp.h>
++#include <net/udp.h>
++#include <net/tcp.h>
++#include <net/route.h>
++
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_LOG.h>
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
++MODULE_DESCRIPTION("iptables syslog logging module");
++
++#if 0
++#define DEBUGP printk
++#else
++#define DEBUGP(format, args...)
++#endif
++
++/* Use lock to serialize, so printks don't overlap */
++static spinlock_t log_lock = SPIN_LOCK_UNLOCKED;
++
++/* One level of recursion won't kill us */
++static void dump_packet(const struct ipt_log_info *info,
++ const struct sk_buff *skb,
++ unsigned int iphoff)
++{
++ struct iphdr iph;
++
++ if (skb_copy_bits(skb, iphoff, &iph, sizeof(iph)) < 0) {
++ printk("TRUNCATED");
++ return;
++ }
++
++ /* Important fields:
++ * TOS, len, DF/MF, fragment offset, TTL, src, dst, options. */
++ /* Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */
++ printk("SRC=%u.%u.%u.%u DST=%u.%u.%u.%u ",
++ NIPQUAD(iph.saddr), NIPQUAD(iph.daddr));
++
++ /* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */
++ printk("LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ",
++ ntohs(iph.tot_len), iph.tos & IPTOS_TOS_MASK,
++ iph.tos & IPTOS_PREC_MASK, iph.ttl, ntohs(iph.id));
++
++ /* Max length: 6 "CE DF MF " */
++ if (ntohs(iph.frag_off) & IP_CE)
++ printk("CE ");
++ if (ntohs(iph.frag_off) & IP_DF)
++ printk("DF ");
++ if (ntohs(iph.frag_off) & IP_MF)
++ printk("MF ");
++
++ /* Max length: 11 "FRAG:65535 " */
++ if (ntohs(iph.frag_off) & IP_OFFSET)
++ printk("FRAG:%u ", ntohs(iph.frag_off) & IP_OFFSET);
++
++ if ((info->logflags & IPT_LOG_IPOPT)
++ && iph.ihl * 4 != sizeof(struct iphdr)) {
++ unsigned char opt[4 * 15 - sizeof(struct iphdr)];
++ unsigned int i, optsize;
++
++ optsize = iph.ihl * 4 - sizeof(struct iphdr);
++ if (skb_copy_bits(skb, iphoff+sizeof(iph), opt, optsize) < 0) {
++ printk("TRUNCATED");
++ return;
++ }
++
++ /* Max length: 127 "OPT (" 15*4*2chars ") " */
++ printk("OPT (");
++ for (i = 0; i < optsize; i++)
++ printk("%02X", opt[i]);
++ printk(") ");
++ }
++
++ switch (iph.protocol) {
++ case IPPROTO_TCP: {
++ struct tcphdr tcph;
++
++ /* Max length: 10 "PROTO=TCP " */
++ printk("PROTO=TCP ");
++
++ if (ntohs(iph.frag_off) & IP_OFFSET)
++ break;
++
++ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
++ if (skb_copy_bits(skb, iphoff+iph.ihl*4, &tcph, sizeof(tcph))
++ < 0) {
++ printk("INCOMPLETE [%u bytes] ",
++ skb->len - iphoff - iph.ihl*4);
++ break;
++ }
++
++ /* Max length: 20 "SPT=65535 DPT=65535 " */
++ printk("SPT=%u DPT=%u ",
++ ntohs(tcph.source), ntohs(tcph.dest));
++ /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
++ if (info->logflags & IPT_LOG_TCPSEQ)
++ printk("SEQ=%u ACK=%u ",
++ ntohl(tcph.seq), ntohl(tcph.ack_seq));
++ /* Max length: 13 "WINDOW=65535 " */
++ printk("WINDOW=%u ", ntohs(tcph.window));
++ /* Max length: 9 "RES=0x3F " */
++ printk("RES=0x%02x ", (u8)(ntohl(tcp_flag_word(&tcph) & TCP_RESERVED_BITS) >> 22));
++ /* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
++ if (tcph.cwr)
++ printk("CWR ");
++ if (tcph.ece)
++ printk("ECE ");
++ if (tcph.urg)
++ printk("URG ");
++ if (tcph.ack)
++ printk("ACK ");
++ if (tcph.psh)
++ printk("PSH ");
++ if (tcph.rst)
++ printk("RST ");
++ if (tcph.syn)
++ printk("SYN ");
++ if (tcph.fin)
++ printk("FIN ");
++ /* Max length: 11 "URGP=65535 " */
++ printk("URGP=%u ", ntohs(tcph.urg_ptr));
++
++ if ((info->logflags & IPT_LOG_TCPOPT)
++ && tcph.doff * 4 != sizeof(struct tcphdr)) {
++ unsigned char opt[4 * 15 - sizeof(struct tcphdr)];
++ unsigned int i, optsize;
++
++ optsize = tcph.doff * 4 - sizeof(struct tcphdr);
++ if (skb_copy_bits(skb, iphoff+iph.ihl*4 + sizeof(tcph),
++ opt, optsize) < 0) {
++ printk("TRUNCATED");
++ return;
++ }
++
++ /* Max length: 127 "OPT (" 15*4*2chars ") " */
++ printk("OPT (");
++ for (i = 0; i < optsize; i++)
++ printk("%02X", opt[i]);
++ printk(") ");
++ }
++ break;
++ }
++ case IPPROTO_UDP: {
++ struct udphdr udph;
++
++ /* Max length: 10 "PROTO=UDP " */
++ printk("PROTO=UDP ");
++
++ if (ntohs(iph.frag_off) & IP_OFFSET)
++ break;
++
++ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
++ if (skb_copy_bits(skb, iphoff+iph.ihl*4, &udph, sizeof(udph))
++ < 0) {
++ printk("INCOMPLETE [%u bytes] ",
++ skb->len - iphoff - iph.ihl*4);
++ break;
++ }
++
++ /* Max length: 20 "SPT=65535 DPT=65535 " */
++ printk("SPT=%u DPT=%u LEN=%u ",
++ ntohs(udph.source), ntohs(udph.dest),
++ ntohs(udph.len));
++ break;
++ }
++ case IPPROTO_ICMP: {
++ struct icmphdr icmph;
++ static size_t required_len[NR_ICMP_TYPES+1]
++ = { [ICMP_ECHOREPLY] = 4,
++ [ICMP_DEST_UNREACH]
++ = 8 + sizeof(struct iphdr) + 8,
++ [ICMP_SOURCE_QUENCH]
++ = 8 + sizeof(struct iphdr) + 8,
++ [ICMP_REDIRECT]
++ = 8 + sizeof(struct iphdr) + 8,
++ [ICMP_ECHO] = 4,
++ [ICMP_TIME_EXCEEDED]
++ = 8 + sizeof(struct iphdr) + 8,
++ [ICMP_PARAMETERPROB]
++ = 8 + sizeof(struct iphdr) + 8,
++ [ICMP_TIMESTAMP] = 20,
++ [ICMP_TIMESTAMPREPLY] = 20,
++ [ICMP_ADDRESS] = 12,
++ [ICMP_ADDRESSREPLY] = 12 };
++
++ /* Max length: 11 "PROTO=ICMP " */
++ printk("PROTO=ICMP ");
++
++ if (ntohs(iph.frag_off) & IP_OFFSET)
++ break;
++
++ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
++ if (skb_copy_bits(skb, iphoff+iph.ihl*4, &icmph, sizeof(icmph))
++ < 0) {
++ printk("INCOMPLETE [%u bytes] ",
++ skb->len - iphoff - iph.ihl*4);
++ break;
++ }
++
++ /* Max length: 18 "TYPE=255 CODE=255 " */
++ printk("TYPE=%u CODE=%u ", icmph.type, icmph.code);
++
++ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
++ if (icmph.type <= NR_ICMP_TYPES
++ && required_len[icmph.type]
++ && skb->len-iphoff-iph.ihl*4 < required_len[icmph.type]) {
++ printk("INCOMPLETE [%u bytes] ",
++ skb->len - iphoff - iph.ihl*4);
++ break;
++ }
++
++ switch (icmph.type) {
++ case ICMP_ECHOREPLY:
++ case ICMP_ECHO:
++ /* Max length: 19 "ID=65535 SEQ=65535 " */
++ printk("ID=%u SEQ=%u ",
++ ntohs(icmph.un.echo.id),
++ ntohs(icmph.un.echo.sequence));
++ break;
++
++ case ICMP_PARAMETERPROB:
++ /* Max length: 14 "PARAMETER=255 " */
++ printk("PARAMETER=%u ",
++ ntohl(icmph.un.gateway) >> 24);
++ break;
++ case ICMP_REDIRECT:
++ /* Max length: 24 "GATEWAY=255.255.255.255 " */
++ printk("GATEWAY=%u.%u.%u.%u ",
++ NIPQUAD(icmph.un.gateway));
++ /* Fall through */
++ case ICMP_DEST_UNREACH:
++ case ICMP_SOURCE_QUENCH:
++ case ICMP_TIME_EXCEEDED:
++ /* Max length: 3+maxlen */
++ if (!iphoff) { /* Only recurse once. */
++ printk("[");
++ dump_packet(info, skb,
++ iphoff + iph.ihl*4+sizeof(icmph));
++ printk("] ");
++ }
++
++ /* Max length: 10 "MTU=65535 " */
++ if (icmph.type == ICMP_DEST_UNREACH
++ && icmph.code == ICMP_FRAG_NEEDED)
++ printk("MTU=%u ", ntohs(icmph.un.frag.mtu));
++ }
++ break;
++ }
++ /* Max Length */
++ case IPPROTO_AH: {
++ struct ip_auth_hdr ah;
++
++ if (ntohs(iph.frag_off) & IP_OFFSET)
++ break;
++
++ /* Max length: 9 "PROTO=AH " */
++ printk("PROTO=AH ");
++
++ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
++ if (skb_copy_bits(skb, iphoff+iph.ihl*4, &ah, sizeof(ah)) < 0) {
++ printk("INCOMPLETE [%u bytes] ",
++ skb->len - iphoff - iph.ihl*4);
++ break;
++ }
++
++ /* Length: 15 "SPI=0xF1234567 " */
++ printk("SPI=0x%x ", ntohl(ah.spi));
++ break;
++ }
++ case IPPROTO_ESP: {
++ struct ip_esp_hdr esph;
++
++ /* Max length: 10 "PROTO=ESP " */
++ printk("PROTO=ESP ");
++
++ if (ntohs(iph.frag_off) & IP_OFFSET)
++ break;
++
++ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
++ if (skb_copy_bits(skb, iphoff+iph.ihl*4, &esph, sizeof(esph))
++ < 0) {
++ printk("INCOMPLETE [%u bytes] ",
++ skb->len - iphoff - iph.ihl*4);
++ break;
++ }
++
++ /* Length: 15 "SPI=0xF1234567 " */
++ printk("SPI=0x%x ", ntohl(esph.spi));
++ break;
++ }
++ /* Max length: 10 "PROTO 255 " */
++ default:
++ printk("PROTO=%u ", iph.protocol);
++ }
++
++ /* Proto Max log string length */
++ /* IP: 40+46+6+11+127 = 230 */
++ /* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */
++ /* UDP: 10+max(25,20) = 35 */
++ /* ICMP: 11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */
++ /* ESP: 10+max(25)+15 = 50 */
++ /* AH: 9+max(25)+15 = 49 */
++ /* unknown: 10 */
++
++ /* (ICMP allows recursion one level deep) */
++ /* maxlen = IP + ICMP + IP + max(TCP,UDP,ICMP,unknown) */
++ /* maxlen = 230+ 91 + 230 + 252 = 803 */
++}
++
++static unsigned int
++ipt_log_target(struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const void *targinfo,
++ void *userinfo)
++{
++ const struct ipt_log_info *loginfo = targinfo;
++ char level_string[4] = "< >";
++
++ level_string[1] = '0' + (loginfo->level % 8);
++ spin_lock_bh(&log_lock);
++ printk(level_string);
++ printk("%sIN=%s OUT=%s ",
++ loginfo->prefix,
++ in ? in->name : "",
++ out ? out->name : "");
++#ifdef CONFIG_BRIDGE_NETFILTER
++ if ((*pskb)->nf_bridge) {
++ struct net_device *physindev = (*pskb)->nf_bridge->physindev;
++ struct net_device *physoutdev = (*pskb)->nf_bridge->physoutdev;
++
++ if (physindev && in != physindev)
++ printk("PHYSIN=%s ", physindev->name);
++ if (physoutdev && out != physoutdev)
++ printk("PHYSOUT=%s ", physoutdev->name);
++ }
++#endif
++
++ if (in && !out) {
++ /* MAC logging for input chain only. */
++ printk("MAC=");
++ if ((*pskb)->dev && (*pskb)->dev->hard_header_len
++ && (*pskb)->mac.raw != (void*)(*pskb)->nh.iph) {
++ int i;
++ unsigned char *p = (*pskb)->mac.raw;
++ for (i = 0; i < (*pskb)->dev->hard_header_len; i++,p++)
++ printk("%02x%c", *p,
++ i==(*pskb)->dev->hard_header_len - 1
++ ? ' ':':');
++ } else
++ printk(" ");
++ }
++
++ dump_packet(loginfo, *pskb, 0);
++ printk("\n");
++ spin_unlock_bh(&log_lock);
++
++ return IPT_CONTINUE;
++}
++
++static int ipt_log_checkentry(const char *tablename,
++ const struct ipt_entry *e,
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hook_mask)
++{
++ const struct ipt_log_info *loginfo = targinfo;
++
++ if (targinfosize != IPT_ALIGN(sizeof(struct ipt_log_info))) {
++ DEBUGP("LOG: targinfosize %u != %u\n",
++ targinfosize, IPT_ALIGN(sizeof(struct ipt_log_info)));
++ return 0;
++ }
++
++ if (loginfo->level >= 8) {
++ DEBUGP("LOG: level %u >= 8\n", loginfo->level);
++ return 0;
++ }
++
++ if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
++ DEBUGP("LOG: prefix term %i\n",
++ loginfo->prefix[sizeof(loginfo->prefix)-1]);
++ return 0;
++ }
++
++ return 1;
++}
++
++static struct ipt_target ipt_log_reg = {
++ .name = "LOG",
++ .target = ipt_log_target,
++ .checkentry = ipt_log_checkentry,
++ .me = THIS_MODULE,
++};
++
++static int __init init(void)
++{
++ return ipt_register_target(&ipt_log_reg);
++}
++
++static void __exit fini(void)
++{
++ ipt_unregister_target(&ipt_log_reg);
++}
++
++module_init(init);
++module_exit(fini);
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ipt_mport.c linux-2.6.3/net/ipv4/netfilter/ipt_mport.c
+--- linux-2.6.3.org/net/ipv4/netfilter/ipt_mport.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ipt_mport.c 2004-02-27 00:03:10.772590496 +0100
+@@ -0,0 +1,116 @@
++/* Kernel module to match one of a list of TCP/UDP ports: ports are in
++ the same place so we can treat them as equal. */
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/udp.h>
++#include <linux/skbuff.h>
++
++#include <linux/netfilter_ipv4/ipt_mport.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++
++MODULE_LICENSE("GPL");
++
++#if 0
++#define duprintf(format, args...) printk(format , ## args)
++#else
++#define duprintf(format, args...)
++#endif
++
++/* Returns 1 if the port is matched by the test, 0 otherwise. */
++static inline int
++ports_match(const struct ipt_mport *minfo, u_int16_t src, u_int16_t dst)
++{
++ unsigned int i;
++ unsigned int m;
++ u_int16_t pflags = minfo->pflags;
++ for (i=0, m=1; i<IPT_MULTI_PORTS; i++, m<<=1) {
++ u_int16_t s, e;
++
++ if (pflags & m
++ && minfo->ports[i] == 65535)
++ return 0;
++
++ s = minfo->ports[i];
++
++ if (pflags & m) {
++ e = minfo->ports[++i];
++ m <<= 1;
++ } else
++ e = s;
++
++ if (minfo->flags & IPT_MPORT_SOURCE
++ && src >= s && src <= e)
++ return 1;
++
++ if (minfo->flags & IPT_MPORT_DESTINATION
++ && dst >= s && dst <= e)
++ return 1;
++ }
++
++ return 0;
++}
++
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ int *hotdrop)
++{
++ u16 ports[2];
++ const struct ipt_mport *minfo = matchinfo;
++
++ if (offset)
++ return 0;
++
++ /* Must be big enough to read ports (both UDP and TCP have
++ them at the start). */
++ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, ports, sizeof(ports)) < 0) {
++ /* We've been asked to examine this packet, and we
++ can't. Hence, no choice but to drop. */
++ duprintf("ipt_multiport:"
++ " Dropping evil offset=0 tinygram.\n");
++ *hotdrop = 1;
++ return 0;
++ }
++
++ return ports_match(minfo, ntohs(ports[0]), ntohs(ports[1]));
++}
++
++/* Called when user tries to insert an entry of this type. */
++static int
++checkentry(const char *tablename,
++ const struct ipt_ip *ip,
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++{
++ if (matchsize != IPT_ALIGN(sizeof(struct ipt_mport)))
++ return 0;
++
++ /* Must specify proto == TCP/UDP, no unknown flags or bad count */
++ return (ip->proto == IPPROTO_TCP || ip->proto == IPPROTO_UDP)
++ && !(ip->invflags & IPT_INV_PROTO)
++ && matchsize == IPT_ALIGN(sizeof(struct ipt_mport));
++}
++
++static struct ipt_match mport_match = {
++ .name = "mport",
++ .match = &match,
++ .checkentry = &checkentry,
++ .me = THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ipt_register_match(&mport_match);
++}
++
++static void __exit fini(void)
++{
++ ipt_unregister_match(&mport_match);
++}
++
++module_init(init);
++module_exit(fini);
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ipt_NOTRACK.c linux-2.6.3/net/ipv4/netfilter/ipt_NOTRACK.c
+--- linux-2.6.3.org/net/ipv4/netfilter/ipt_NOTRACK.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ipt_NOTRACK.c 2004-02-27 00:03:14.469028552 +0100
+@@ -0,0 +1,75 @@
++/* This is a module which is used for setting up fake conntracks
++ * on packets so that they are not seen by the conntrack/NAT code.
++ */
++#include <linux/module.h>
++#include <linux/skbuff.h>
++
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_conntrack.h>
++
++static unsigned int
++target(struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const void *targinfo,
++ void *userinfo)
++{
++ /* Previously seen (loopback)? Ignore. */
++ if ((*pskb)->nfct != NULL)
++ return IPT_CONTINUE;
++
++ /* Attach fake conntrack entry.
++ If there is a real ct entry correspondig to this packet,
++ it'll hang aroun till timing out. We don't deal with it
++ for performance reasons. JK */
++ (*pskb)->nfct = &ip_conntrack_untracked.infos[IP_CT_NEW];
++ nf_conntrack_get((*pskb)->nfct);
++
++ return IPT_CONTINUE;
++}
++
++static int
++checkentry(const char *tablename,
++ const struct ipt_entry *e,
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hook_mask)
++{
++ if (targinfosize != 0) {
++ printk(KERN_WARNING "NOTRACK: targinfosize %u != 0\n",
++ targinfosize);
++ return 0;
++ }
++
++ if (strcmp(tablename, "raw") != 0) {
++ printk(KERN_WARNING "NOTRACK: can only be called from \"raw\" table, not \"%s\"\n", tablename);
++ return 0;
++ }
++
++ return 1;
++}
++
++static struct ipt_target ipt_notrack_reg = {
++ .name = "NOTRACK",
++ .target = target,
++ .checkentry = checkentry,
++ .me = THIS_MODULE
++};
++
++static int __init init(void)
++{
++ if (ipt_register_target(&ipt_notrack_reg))
++ return -EINVAL;
++
++ return 0;
++}
++
++static void __exit fini(void)
++{
++ ipt_unregister_target(&ipt_notrack_reg);
++}
++
++module_init(init);
++module_exit(fini);
++MODULE_LICENSE("GPL");
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ipt_nth.c linux-2.6.3/net/ipv4/netfilter/ipt_nth.c
+--- linux-2.6.3.org/net/ipv4/netfilter/ipt_nth.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ipt_nth.c 2004-02-27 00:03:12.719294552 +0100
+@@ -0,0 +1,166 @@
++/*
++ This is a module which is used for match support for every Nth packet
++ This file is distributed under the terms of the GNU General Public
++ License (GPL). Copies of the GPL can be obtained from:
++ ftp://prep.ai.mit.edu/pub/gnu/GPL
++
++ 2001-07-18 Fabrice MARIE <fabrice@netfilter.org> : initial implementation.
++ 2001-09-20 Richard Wagner (rwagner@cloudnet.com)
++ * added support for multiple counters
++ * added support for matching on individual packets
++ in the counter cycle
++ 2004-02-19 Harald Welte <laforge@netfilter.org>
++ * port to 2.6.x
++
++*/
++
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/ip.h>
++#include <net/tcp.h>
++#include <linux/spinlock.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_nth.h>
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Fabrice Marie <fabrice@netfilter.org>");
++
++/*
++ * State information.
++ */
++struct state {
++ spinlock_t lock;
++ u_int16_t number;
++};
++
++static struct state states[IPT_NTH_NUM_COUNTERS];
++
++static int
++ipt_nth_match(const struct sk_buff *pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ int *hotdrop)
++{
++ /* Parameters from userspace */
++ const struct ipt_nth_info *info = matchinfo;
++ unsigned counter = info->counter;
++ if((counter < 0) || (counter >= IPT_NTH_NUM_COUNTERS))
++ {
++ printk(KERN_WARNING "nth: invalid counter %u. counter between 0 and %u\n", counter, IPT_NTH_NUM_COUNTERS-1);
++ return 0;
++ };
++
++ spin_lock(&states[counter].lock);
++
++ /* Are we matching every nth packet?*/
++ if (info->packet == 0xFF)
++ {
++ /* We're matching every nth packet and only every nth packet*/
++ /* Do we match or invert match? */
++ if (info->not == 0)
++ {
++ if (states[counter].number == 0)
++ {
++ ++states[counter].number;
++ goto match;
++ }
++ if (states[counter].number >= info->every)
++ states[counter].number = 0; /* reset the counter */
++ else
++ ++states[counter].number;
++ goto dontmatch;
++ }
++ else
++ {
++ if (states[counter].number == 0)
++ {
++ ++states[counter].number;
++ goto dontmatch;
++ }
++ if (states[counter].number >= info->every)
++ states[counter].number = 0;
++ else
++ ++states[counter].number;
++ goto match;
++ }
++ }
++ else
++ {
++ /* We're using the --packet, so there must be a rule for every value */
++ if (states[counter].number == info->packet)
++ {
++ /* only increment the counter when a match happens */
++ if (states[counter].number >= info->every)
++ states[counter].number = 0; /* reset the counter */
++ else
++ ++states[counter].number;
++ goto match;
++ }
++ else
++ goto dontmatch;
++ }
++
++ dontmatch:
++ /* don't match */
++ spin_unlock(&states[counter].lock);
++ return 0;
++
++ match:
++ spin_unlock(&states[counter].lock);
++ return 1;
++}
++
++static int
++ipt_nth_checkentry(const char *tablename,
++ const struct ipt_ip *e,
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++{
++ /* Parameters from userspace */
++ const struct ipt_nth_info *info = matchinfo;
++ unsigned counter = info->counter;
++ if((counter < 0) || (counter >= IPT_NTH_NUM_COUNTERS))
++ {
++ printk(KERN_WARNING "nth: invalid counter %u. counter between 0 and %u\n", counter, IPT_NTH_NUM_COUNTERS-1);
++ return 0;
++ };
++
++ if (matchsize != IPT_ALIGN(sizeof(struct ipt_nth_info))) {
++ printk("nth: matchsize %u != %u\n", matchsize,
++ IPT_ALIGN(sizeof(struct ipt_nth_info)));
++ return 0;
++ }
++
++ states[counter].number = info->startat;
++
++ return 1;
++}
++
++static struct ipt_match ipt_nth_reg = {
++ .name = "nth",
++ .match = ipt_nth_match,
++ .checkentry = ipt_nth_checkentry,
++ .me = THIS_MODULE
++};
++
++static int __init init(void)
++{
++ unsigned counter;
++
++ memset(&states, 0, sizeof(states));
++ for (counter = 0; counter < IPT_NTH_NUM_COUNTERS; counter++)
++ spin_lock_init(&(states[counter].lock));
++
++ return ipt_register_match(&ipt_nth_reg);
++}
++
++static void __exit fini(void)
++{
++ ipt_unregister_match(&ipt_nth_reg);
++}
++
++module_init(init);
++module_exit(fini);
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ipt_quota.c linux-2.6.3/net/ipv4/netfilter/ipt_quota.c
+--- linux-2.6.3.org/net/ipv4/netfilter/ipt_quota.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ipt_quota.c 2004-02-27 00:03:13.672149696 +0100
+@@ -0,0 +1,91 @@
++/*
++ * netfilter module to enforce network quotas
++ *
++ * Sam Johnston <samj@samj.net>
++ */
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/spinlock.h>
++#include <linux/interrupt.h>
++
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_quota.h>
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
++
++static spinlock_t quota_lock = SPIN_LOCK_UNLOCKED;
++
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset, int *hotdrop)
++{
++ struct ipt_quota_info *q = (struct ipt_quota_info *) matchinfo;
++ unsigned int datalen;
++
++ if (skb->len < sizeof(struct iphdr))
++ return NF_ACCEPT;
++
++ datalen = skb->len - skb->nh.iph->ihl*4;
++
++ spin_lock_bh("a_lock);
++
++ if (q->quota >= datalen) {
++ /* we can afford this one */
++ q->quota -= datalen;
++ spin_unlock_bh("a_lock);
++
++#ifdef DEBUG_IPT_QUOTA
++ printk("IPT Quota OK: %llu datlen %d \n", q->quota, datalen);
++#endif
++ return 1;
++ }
++
++ /* so we do not allow even small packets from now on */
++ q->quota = 0;
++
++#ifdef DEBUG_IPT_QUOTA
++ printk("IPT Quota Failed: %llu datlen %d \n", q->quota, datalen);
++#endif
++
++ spin_unlock_bh("a_lock);
++ return 0;
++}
++
++static int
++checkentry(const char *tablename,
++ const struct ipt_ip *ip,
++ void *matchinfo, unsigned int matchsize, unsigned int hook_mask)
++{
++ /* TODO: spinlocks? sanity checks? */
++ if (matchsize != IPT_ALIGN(sizeof (struct ipt_quota_info)))
++ return 0;
++
++ return 1;
++}
++
++static struct ipt_match quota_match = {
++ .name = "quota",
++ .match = match,
++ .checkentry = checkentry,
++ .me = THIS_MODULE
++};
++
++static int __init
++init(void)
++{
++ return ipt_register_match("a_match);
++}
++
++static void __exit
++fini(void)
++{
++ ipt_unregister_match("a_match);
++}
++
++module_init(init);
++module_exit(fini);
++
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ipt_realm.c linux-2.6.3/net/ipv4/netfilter/ipt_realm.c
+--- linux-2.6.3.org/net/ipv4/netfilter/ipt_realm.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ipt_realm.c 2004-02-27 00:03:15.262907864 +0100
+@@ -0,0 +1,70 @@
++/* Kernel module to match realm from routing. */
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/netdevice.h>
++#include <net/route.h>
++
++#include <linux/netfilter_ipv4/ipt_realm.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++
++MODULE_AUTHOR("Sampsa Ranta <sampsa@netsonic.fi>");
++MODULE_LICENSE("GPL");
++
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ int *hotdrop)
++{
++ const struct ipt_realm_info *info = matchinfo;
++ struct dst_entry *dst = skb->dst;
++ u32 id;
++
++ if(dst == NULL)
++ return 0;
++ id = dst->tclassid;
++
++ return (info->id == (id & info->mask)) ^ info->invert;
++}
++
++static int check(const char *tablename,
++ const struct ipt_ip *ip,
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++{
++ if (hook_mask
++ & ~((1 << NF_IP_POST_ROUTING) | (1 << NF_IP_FORWARD) |
++ (1 << NF_IP_LOCAL_OUT)| (1 << NF_IP_LOCAL_IN))) {
++ printk("ipt_realm: only valid for POST_ROUTING, LOCAL_OUT, "
++ "LOCAL_IN or FORWARD.\n");
++ return 0;
++ }
++
++ if (matchsize != IPT_ALIGN(sizeof(struct ipt_realm_info)))
++ return 0;
++
++ return 1;
++}
++
++static struct ipt_match realm_match = {
++ .name = "realm",
++ .match = match,
++ .checkentry = check,
++ .me = THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ipt_register_match(&realm_match);
++}
++
++static void __exit fini(void)
++{
++ ipt_unregister_match(&realm_match);
++}
++
++module_init(init);
++module_exit(fini);
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ipt_sctp.c linux-2.6.3/net/ipv4/netfilter/ipt_sctp.c
+--- linux-2.6.3.org/net/ipv4/netfilter/ipt_sctp.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ipt_sctp.c 2004-02-27 00:03:16.145773648 +0100
+@@ -0,0 +1,161 @@
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <net/ip.h>
++#include <linux/sctp.h>
++
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_sctp.h>
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Kiran Kumar Immidi");
++MODULE_DESCRIPTION("Match for SCTP protocol packets");
++
++#if 0
++#define duprintf(format, args...) printk(format , ## args)
++#else
++#define duprintf(format, args...)
++#endif
++
++#define SCCHECK(cond, option, flag, invflag) (!((flag) & (option)) \
++ || (!!((invflag) & (option)) ^ (cond)))
++static int
++match_packet(const struct sk_buff *skb,
++ const u_int32_t *chunkmap,
++ int chunk_match_type,
++ int *hotdrop)
++{
++ int offset;
++ u_int32_t chunkmapcopy[256 / sizeof (u_int32_t)];
++ sctp_chunkhdr_t sch;
++
++ int i = 0;
++
++ if (chunk_match_type == SCTP_CHUNK_MATCH_ALL) {
++ SCTP_CHUNKMAP_COPY(chunkmapcopy, chunkmap);
++ }
++
++ offset = skb->nh.iph->ihl * 4 + sizeof (sctp_sctphdr_t);
++ do {
++ if (skb_copy_bits(skb, offset, &sch, sizeof(sch)) < 0) {
++ duprintf("Dropping invalid SCTP packet.\n");
++ *hotdrop = 1;
++ return 0;
++ }
++
++ duprintf("SCTP chunk num: %d\toffset: %d\ttype: %d\tlength: %d\n",
++ ++i, offset, sch.type, htons(sch.length));
++
++ offset += (htons(sch.length) + 3) & ~3;
++
++ duprintf("skb->len: %d\toffset: %d\n", skb->len, offset);
++
++ if (SCTP_CHUNKMAP_IS_SET(chunkmap, sch.type)) {
++ switch (chunk_match_type) {
++ case SCTP_CHUNK_MATCH_ANY:
++ return 1;
++
++ case SCTP_CHUNK_MATCH_ALL:
++ SCTP_CHUNKMAP_CLEAR(chunkmapcopy, sch.type);
++ }
++ } else {
++ switch (chunk_match_type) {
++ case SCTP_CHUNK_MATCH_ONLY:
++ return 0;
++ }
++ }
++ } while (offset < skb->len);
++
++ switch (chunk_match_type) {
++ case SCTP_CHUNK_MATCH_ALL:
++ return SCTP_CHUNKMAP_IS_CLEAR(chunkmap);
++ case SCTP_CHUNK_MATCH_ANY:
++ return 0;
++ case SCTP_CHUNK_MATCH_ONLY:
++ return 1;
++ }
++
++ /* This will never be reached, but required to stop compiler whine */
++ return 0;
++}
++
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ int *hotdrop)
++{
++ const struct ipt_sctp_info *info;
++ sctp_sctphdr_t sh;
++
++ info = (const struct ipt_sctp_info *)matchinfo;
++
++ if (offset) {
++ duprintf("Dropping non-first fragment.. FIXME\n");
++ return 0;
++ }
++
++ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &sh, sizeof(sh)) < 0) {
++ duprintf("Dropping evil TCP offset=0 tinygram.\n");
++ *hotdrop = 1;
++ return 0;
++ }
++ duprintf("spt: %d\tdpt: %d\n", ntohs(sh.source), ntohs(sh.dest));
++
++ return SCCHECK(((ntohs(sh.source) >= info->spts[0])
++ && (ntohs(sh.source) <= info->spts[1])),
++ IPT_SCTP_SRC_PORTS, info->flags, info->invflags)
++ && SCCHECK(((ntohs(sh.dest) >= info->dpts[0])
++ && (ntohs(sh.dest) <= info->dpts[1])),
++ IPT_SCTP_DEST_PORTS, info->flags, info->invflags)
++ && SCCHECK(match_packet(skb, info->chunkmap, info->chunk_match_type,
++ hotdrop),
++ IPT_SCTP_CHUNK_TYPES, info->flags, info->invflags);
++}
++
++static int
++checkentry(const char *tablename,
++ const struct ipt_ip *ip,
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++{
++ const struct ipt_sctp_info *info;
++
++ info = (const struct ipt_sctp_info *)matchinfo;
++
++ return ip->proto == IPPROTO_SCTP
++ && !(ip->invflags & IPT_INV_PROTO)
++ && matchsize == IPT_ALIGN(sizeof(struct ipt_sctp_info))
++ && !(info->flags & ~IPT_SCTP_VALID_FLAGS)
++ && !(info->invflags & ~IPT_SCTP_VALID_FLAGS)
++ && !(info->invflags & ~info->flags)
++ && !(info->invflags
++ & (SCTP_CHUNK_MATCH_ALL
++ | SCTP_CHUNK_MATCH_ANY
++ | SCTP_CHUNK_MATCH_ONLY));
++}
++
++static struct ipt_match sctp_match =
++{
++ .list = { NULL, NULL},
++ .name = "sctp",
++ .match = &match,
++ .checkentry = &checkentry,
++ .destroy = NULL,
++ .me = THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ipt_register_match(&sctp_match);
++}
++
++static void __exit fini(void)
++{
++ ipt_unregister_match(&sctp_match);
++}
++
++module_init(init);
++module_exit(fini);
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ipt_state.c linux-2.6.3/net/ipv4/netfilter/ipt_state.c
+--- linux-2.6.3.org/net/ipv4/netfilter/ipt_state.c 2004-02-18 04:59:55.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ipt_state.c 2004-02-27 00:03:14.484026272 +0100
+@@ -30,7 +30,9 @@
+ enum ip_conntrack_info ctinfo;
+ unsigned int statebit;
+
+- if (!ip_conntrack_get((struct sk_buff *)skb, &ctinfo))
++ if (skb->nfct == &ip_conntrack_untracked.infos[IP_CT_NEW])
++ statebit = IPT_STATE_UNTRACKED;
++ else if (!ip_conntrack_get((struct sk_buff *)skb, &ctinfo))
+ statebit = IPT_STATE_INVALID;
+ else
+ statebit = IPT_STATE_BIT(ctinfo);
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ipt_state.c.orig linux-2.6.3/net/ipv4/netfilter/ipt_state.c.orig
+--- linux-2.6.3.org/net/ipv4/netfilter/ipt_state.c.orig 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ipt_state.c.orig 2004-02-18 04:59:55.000000000 +0100
+@@ -0,0 +1,72 @@
++/* Kernel module to match connection tracking information. */
++
++/* (C) 1999-2001 Paul `Rusty' Russell
++ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_conntrack.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_state.h>
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
++MODULE_DESCRIPTION("iptables connection tracking state match module");
++
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ int *hotdrop)
++{
++ const struct ipt_state_info *sinfo = matchinfo;
++ enum ip_conntrack_info ctinfo;
++ unsigned int statebit;
++
++ if (!ip_conntrack_get((struct sk_buff *)skb, &ctinfo))
++ statebit = IPT_STATE_INVALID;
++ else
++ statebit = IPT_STATE_BIT(ctinfo);
++
++ return (sinfo->statemask & statebit);
++}
++
++static int check(const char *tablename,
++ const struct ipt_ip *ip,
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++{
++ if (matchsize != IPT_ALIGN(sizeof(struct ipt_state_info)))
++ return 0;
++
++ return 1;
++}
++
++static struct ipt_match state_match = {
++ .name = "state",
++ .match = &match,
++ .checkentry = &check,
++ .me = THIS_MODULE,
++};
++
++static int __init init(void)
++{
++ need_ip_conntrack();
++ return ipt_register_match(&state_match);
++}
++
++static void __exit fini(void)
++{
++ ipt_unregister_match(&state_match);
++}
++
++module_init(init);
++module_exit(fini);
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ipt_TTL.c linux-2.6.3/net/ipv4/netfilter/ipt_TTL.c
+--- linux-2.6.3.org/net/ipv4/netfilter/ipt_TTL.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ipt_TTL.c 2004-02-27 00:03:07.345111552 +0100
+@@ -0,0 +1,120 @@
++/* TTL modification target for IP tables
++ * (C) 2000 by Harald Welte <laforge@gnumonks.org>
++ *
++ * Version: 1.3
++ *
++ * This software is distributed under the terms of GNU GPL
++ */
++
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/ip.h>
++#include <net/checksum.h>
++
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_TTL.h>
++
++MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
++MODULE_DESCRIPTION("IP tables TTL modification module");
++MODULE_LICENSE("GPL");
++
++static unsigned int
++ipt_ttl_target(struct sk_buff **pskb, const struct net_device *in,
++ const struct net_device *out, unsigned int hooknum,
++ const void *targinfo, void *userinfo)
++{
++ struct iphdr *iph;
++ const struct ipt_TTL_info *info = targinfo;
++ u_int16_t diffs[2];
++ int new_ttl;
++
++ if (!skb_ip_make_writable(pskb, (*pskb)->len))
++ return NF_DROP;
++
++ iph = (*pskb)->nh.iph;
++
++ switch (info->mode) {
++ case IPT_TTL_SET:
++ new_ttl = info->ttl;
++ break;
++ case IPT_TTL_INC:
++ new_ttl = iph->ttl + info->ttl;
++ if (new_ttl > 255)
++ new_ttl = 255;
++ break;
++ case IPT_TTL_DEC:
++ new_ttl = iph->ttl + info->ttl;
++ if (new_ttl < 0)
++ new_ttl = 0;
++ break;
++ default:
++ new_ttl = iph->ttl;
++ break;
++ }
++
++ if (new_ttl != iph->ttl) {
++ diffs[0] = htons(((unsigned)iph->ttl) << 8) ^ 0xFFFF;
++ iph->ttl = new_ttl;
++ diffs[1] = htons(((unsigned)iph->ttl) << 8);
++ iph->check = csum_fold(csum_partial((char *)diffs,
++ sizeof(diffs),
++ iph->check^0xFFFF));
++ (*pskb)->nfcache |= NFC_ALTERED;
++ }
++
++ return IPT_CONTINUE;
++}
++
++static int ipt_ttl_checkentry(const char *tablename,
++ const struct ipt_entry *e,
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hook_mask)
++{
++ struct ipt_TTL_info *info = targinfo;
++
++ if (targinfosize != IPT_ALIGN(sizeof(struct ipt_TTL_info))) {
++ printk(KERN_WARNING "TTL: targinfosize %u != %Zu\n",
++ targinfosize,
++ IPT_ALIGN(sizeof(struct ipt_TTL_info)));
++ return 0;
++ }
++
++ if (strcmp(tablename, "mangle")) {
++ printk(KERN_WARNING "TTL: can only be called from \"mangle\" table, not \"%s\"\n", tablename);
++ return 0;
++ }
++
++ if (info->mode > IPT_TTL_MAXMODE) {
++ printk(KERN_WARNING "TTL: invalid or unknown Mode %u\n",
++ info->mode);
++ return 0;
++ }
++
++ if ((info->mode != IPT_TTL_SET) && (info->ttl == 0)) {
++ printk(KERN_WARNING "TTL: increment/decrement doesn't make sense with value 0\n");
++ return 0;
++ }
++
++ return 1;
++}
++
++static struct ipt_target ipt_TTL = {
++ .name = "TTL",
++ .target = ipt_ttl_target,
++ .checkentry = ipt_ttl_checkentry,
++ .me = THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ipt_register_target(&ipt_TTL);
++}
++
++static void __exit fini(void)
++{
++ ipt_unregister_target(&ipt_TTL);
++}
++
++module_init(init);
++module_exit(fini);
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ipt_ULOG.c linux-2.6.3/net/ipv4/netfilter/ipt_ULOG.c
+--- linux-2.6.3.org/net/ipv4/netfilter/ipt_ULOG.c 2004-02-18 04:57:24.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ipt_ULOG.c 2004-02-27 00:03:00.002227840 +0100
+@@ -50,6 +50,7 @@
+ #include <linux/netlink.h>
+ #include <linux/netdevice.h>
+ #include <linux/mm.h>
++#include <linux/netfilter.h>
+ #include <linux/netfilter_ipv4/ip_tables.h>
+ #include <linux/netfilter_ipv4/ipt_ULOG.h>
+ #include <linux/netfilter_ipv4/lockhelp.h>
+@@ -80,6 +81,10 @@
+ MODULE_PARM(flushtimeout, "i");
+ MODULE_PARM_DESC(flushtimeout, "buffer flush timeout");
+
++static unsigned int nflog = 1;
++MODULE_PARM(nflog, "i");
++MODULE_PARM_DESC(nflog, "register as internal netfilter logging module");
++
+ /* global data structures */
+
+ typedef struct {
+@@ -157,17 +162,17 @@
+ return skb;
+ }
+
+-static unsigned int ipt_ulog_target(struct sk_buff **pskb,
+- const struct net_device *in,
+- const struct net_device *out,
+- unsigned int hooknum,
+- const void *targinfo, void *userinfo)
++static void ipt_ulog_packet(unsigned int hooknum,
++ const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const struct ipt_ulog_info *loginfo,
++ const char *prefix)
+ {
+ ulog_buff_t *ub;
+ ulog_packet_msg_t *pm;
+ size_t size, copy_len;
+ struct nlmsghdr *nlh;
+- struct ipt_ulog_info *loginfo = (struct ipt_ulog_info *) targinfo;
+
+ /* ffs == find first bit set, necessary because userspace
+ * is already shifting groupnumber, but we need unshifted.
+@@ -176,8 +181,8 @@
+
+ /* calculate the size of the skb needed */
+ if ((loginfo->copy_range == 0) ||
+- (loginfo->copy_range > (*pskb)->len)) {
+- copy_len = (*pskb)->len;
++ (loginfo->copy_range > skb->len)) {
++ copy_len = skb->len;
+ } else {
+ copy_len = loginfo->copy_range;
+ }
+@@ -214,19 +219,21 @@
+
+ /* copy hook, prefix, timestamp, payload, etc. */
+ pm->data_len = copy_len;
+- pm->timestamp_sec = (*pskb)->stamp.tv_sec;
+- pm->timestamp_usec = (*pskb)->stamp.tv_usec;
+- pm->mark = (*pskb)->nfmark;
++ pm->timestamp_sec = skb->stamp.tv_sec;
++ pm->timestamp_usec = skb->stamp.tv_usec;
++ pm->mark = skb->nfmark;
+ pm->hook = hooknum;
+- if (loginfo->prefix[0] != '\0')
++ if (prefix != NULL)
++ strncpy(pm->prefix, prefix, sizeof(pm->prefix));
++ else if (loginfo->prefix[0] != '\0')
+ strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
+ else
+ *(pm->prefix) = '\0';
+
+ if (in && in->hard_header_len > 0
+- && (*pskb)->mac.raw != (void *) (*pskb)->nh.iph
++ && skb->mac.raw != (void *) skb->nh.iph
+ && in->hard_header_len <= ULOG_MAC_LEN) {
+- memcpy(pm->mac, (*pskb)->mac.raw, in->hard_header_len);
++ memcpy(pm->mac, skb->mac.raw, in->hard_header_len);
+ pm->mac_len = in->hard_header_len;
+ } else
+ pm->mac_len = 0;
+@@ -241,8 +248,8 @@
+ else
+ pm->outdev_name[0] = '\0';
+
+- /* copy_len <= (*pskb)->len, so can't fail. */
+- if (skb_copy_bits(*pskb, 0, pm->payload, copy_len) < 0)
++ /* copy_len <= skb->len, so can't fail. */
++ if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
+ BUG();
+
+ /* check if we are building multi-part messages */
+@@ -266,8 +273,7 @@
+
+ UNLOCK_BH(&ulog_lock);
+
+- return IPT_CONTINUE;
+-
++ return;
+
+ nlmsg_failure:
+ PRINTR("ipt_ULOG: error during NLMSG_PUT\n");
+@@ -276,8 +282,35 @@
+ PRINTR("ipt_ULOG: Error building netlink message\n");
+
+ UNLOCK_BH(&ulog_lock);
++}
++
++static unsigned int ipt_ulog_target(struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const void *targinfo, void *userinfo)
++{
++ struct ipt_ulog_info *loginfo = (struct ipt_ulog_info *) targinfo;
+
+- return IPT_CONTINUE;
++ ipt_ulog_packet(hooknum, *pskb, in, out, loginfo, NULL);
++
++ return IPT_CONTINUE;
++}
++
++static void ipt_logfn(unsigned int hooknum,
++ const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const char *prefix)
++{
++ struct ipt_ulog_info loginfo = {
++ .nl_group = ULOG_DEFAULT_NLGROUP,
++ .copy_range = 0,
++ .qthreshold = ULOG_DEFAULT_QTHRESHOLD,
++ .prefix = ""
++ };
++
++ ipt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix);
+ }
+
+ static int ipt_ulog_checkentry(const char *tablename,
+@@ -341,7 +374,9 @@
+ sock_release(nflognl->sk_socket);
+ return -EINVAL;
+ }
+-
++ if (nflog)
++ nf_log_register(PF_INET, &ipt_logfn);
++
+ return 0;
+ }
+
+@@ -352,6 +387,8 @@
+
+ DEBUGP("ipt_ULOG: cleanup_module\n");
+
++ if (nflog)
++ nf_log_unregister(PF_INET, &ipt_logfn);
+ ipt_unregister_target(&ipt_ulog_reg);
+ sock_release(nflognl->sk_socket);
+
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/ipt_ULOG.c.orig linux-2.6.3/net/ipv4/netfilter/ipt_ULOG.c.orig
+--- linux-2.6.3.org/net/ipv4/netfilter/ipt_ULOG.c.orig 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/ipt_ULOG.c.orig 2004-02-18 04:57:24.000000000 +0100
+@@ -0,0 +1,375 @@
++/*
++ * netfilter module for userspace packet logging daemons
++ *
++ * (C) 2000-2002 by Harald Welte <laforge@netfilter.org>
++ *
++ * 2000/09/22 ulog-cprange feature added
++ * 2001/01/04 in-kernel queue as proposed by Sebastian Zander
++ * <zander@fokus.gmd.de>
++ * 2001/01/30 per-rule nlgroup conflicts with global queue.
++ * nlgroup now global (sysctl)
++ * 2001/04/19 ulog-queue reworked, now fixed buffer size specified at
++ * module loadtime -HW
++ * 2002/07/07 remove broken nflog_rcv() function -HW
++ * 2002/08/29 fix shifted/unshifted nlgroup bug -HW
++ * 2002/10/30 fix uninitialized mac_len field - <Anders K. Pedersen>
++ *
++ * (C) 1999-2001 Paul `Rusty' Russell
++ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This module accepts two parameters:
++ *
++ * nlbufsiz:
++ * The parameter specifies how big the buffer for each netlink multicast
++ * group is. e.g. If you say nlbufsiz=8192, up to eight kb of packets will
++ * get accumulated in the kernel until they are sent to userspace. It is
++ * NOT possible to allocate more than 128kB, and it is strongly discouraged,
++ * because atomically allocating 128kB inside the network rx softirq is not
++ * reliable. Please also keep in mind that this buffer size is allocated for
++ * each nlgroup you are using, so the total kernel memory usage increases
++ * by that factor.
++ *
++ * flushtimeout:
++ * Specify, after how many clock ticks (intel: 100 per second) the queue
++ * should be flushed even if it is not full yet.
++ *
++ * ipt_ULOG.c,v 1.22 2002/10/30 09:07:31 laforge Exp
++ */
++
++#include <linux/module.h>
++#include <linux/config.h>
++#include <linux/spinlock.h>
++#include <linux/socket.h>
++#include <linux/skbuff.h>
++#include <linux/kernel.h>
++#include <linux/timer.h>
++#include <linux/netlink.h>
++#include <linux/netdevice.h>
++#include <linux/mm.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_ULOG.h>
++#include <linux/netfilter_ipv4/lockhelp.h>
++#include <net/sock.h>
++#include <linux/bitops.h>
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
++MODULE_DESCRIPTION("iptables userspace logging module");
++
++#define ULOG_NL_EVENT 111 /* Harald's favorite number */
++#define ULOG_MAXNLGROUPS 32 /* numer of nlgroups */
++
++#if 0
++#define DEBUGP(format, args...) printk(__FILE__ ":" __FUNCTION__ ":" \
++ format, ## args)
++#else
++#define DEBUGP(format, args...)
++#endif
++
++#define PRINTR(format, args...) do { if (net_ratelimit()) printk(format, ## args); } while (0)
++
++static unsigned int nlbufsiz = 4096;
++MODULE_PARM(nlbufsiz, "i");
++MODULE_PARM_DESC(nlbufsiz, "netlink buffer size");
++
++static unsigned int flushtimeout = 10 * HZ;
++MODULE_PARM(flushtimeout, "i");
++MODULE_PARM_DESC(flushtimeout, "buffer flush timeout");
++
++/* global data structures */
++
++typedef struct {
++ unsigned int qlen; /* number of nlmsgs' in the skb */
++ struct nlmsghdr *lastnlh; /* netlink header of last msg in skb */
++ struct sk_buff *skb; /* the pre-allocated skb */
++ struct timer_list timer; /* the timer function */
++} ulog_buff_t;
++
++static ulog_buff_t ulog_buffers[ULOG_MAXNLGROUPS]; /* array of buffers */
++
++static struct sock *nflognl; /* our socket */
++static size_t qlen; /* current length of multipart-nlmsg */
++DECLARE_LOCK(ulog_lock); /* spinlock */
++
++/* send one ulog_buff_t to userspace */
++static void ulog_send(unsigned int nlgroupnum)
++{
++ ulog_buff_t *ub = &ulog_buffers[nlgroupnum];
++
++ if (timer_pending(&ub->timer)) {
++ DEBUGP("ipt_ULOG: ulog_send: timer was pending, deleting\n");
++ del_timer(&ub->timer);
++ }
++
++ /* last nlmsg needs NLMSG_DONE */
++ if (ub->qlen > 1)
++ ub->lastnlh->nlmsg_type = NLMSG_DONE;
++
++ NETLINK_CB(ub->skb).dst_groups = (1 << nlgroupnum);
++ DEBUGP("ipt_ULOG: throwing %d packets to netlink mask %u\n",
++ ub->qlen, nlgroup);
++ netlink_broadcast(nflognl, ub->skb, 0, (1 << nlgroupnum), GFP_ATOMIC);
++
++ ub->qlen = 0;
++ ub->skb = NULL;
++ ub->lastnlh = NULL;
++
++}
++
++
++/* timer function to flush queue in ULOG_FLUSH_INTERVAL time */
++static void ulog_timer(unsigned long data)
++{
++ DEBUGP("ipt_ULOG: timer function called, calling ulog_send\n");
++
++ /* lock to protect against somebody modifying our structure
++ * from ipt_ulog_target at the same time */
++ LOCK_BH(&ulog_lock);
++ ulog_send(data);
++ UNLOCK_BH(&ulog_lock);
++}
++
++struct sk_buff *ulog_alloc_skb(unsigned int size)
++{
++ struct sk_buff *skb;
++
++ /* alloc skb which should be big enough for a whole
++ * multipart message. WARNING: has to be <= 131000
++ * due to slab allocator restrictions */
++
++ skb = alloc_skb(nlbufsiz, GFP_ATOMIC);
++ if (!skb) {
++ PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n",
++ nlbufsiz);
++
++ /* try to allocate only as much as we need for
++ * current packet */
++
++ skb = alloc_skb(size, GFP_ATOMIC);
++ if (!skb)
++ PRINTR("ipt_ULOG: can't even allocate %ub\n", size);
++ }
++
++ return skb;
++}
++
++static unsigned int ipt_ulog_target(struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const void *targinfo, void *userinfo)
++{
++ ulog_buff_t *ub;
++ ulog_packet_msg_t *pm;
++ size_t size, copy_len;
++ struct nlmsghdr *nlh;
++ struct ipt_ulog_info *loginfo = (struct ipt_ulog_info *) targinfo;
++
++ /* ffs == find first bit set, necessary because userspace
++ * is already shifting groupnumber, but we need unshifted.
++ * ffs() returns [1..32], we need [0..31] */
++ unsigned int groupnum = ffs(loginfo->nl_group) - 1;
++
++ /* calculate the size of the skb needed */
++ if ((loginfo->copy_range == 0) ||
++ (loginfo->copy_range > (*pskb)->len)) {
++ copy_len = (*pskb)->len;
++ } else {
++ copy_len = loginfo->copy_range;
++ }
++
++ size = NLMSG_SPACE(sizeof(*pm) + copy_len);
++
++ ub = &ulog_buffers[groupnum];
++
++ LOCK_BH(&ulog_lock);
++
++ if (!ub->skb) {
++ if (!(ub->skb = ulog_alloc_skb(size)))
++ goto alloc_failure;
++ } else if (ub->qlen >= loginfo->qthreshold ||
++ size > skb_tailroom(ub->skb)) {
++ /* either the queue len is too high or we don't have
++ * enough room in nlskb left. send it to userspace. */
++
++ ulog_send(groupnum);
++
++ if (!(ub->skb = ulog_alloc_skb(size)))
++ goto alloc_failure;
++ }
++
++ DEBUGP("ipt_ULOG: qlen %d, qthreshold %d\n", ub->qlen,
++ loginfo->qthreshold);
++
++ /* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */
++ nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
++ size - sizeof(*nlh));
++ ub->qlen++;
++
++ pm = NLMSG_DATA(nlh);
++
++ /* copy hook, prefix, timestamp, payload, etc. */
++ pm->data_len = copy_len;
++ pm->timestamp_sec = (*pskb)->stamp.tv_sec;
++ pm->timestamp_usec = (*pskb)->stamp.tv_usec;
++ pm->mark = (*pskb)->nfmark;
++ pm->hook = hooknum;
++ if (loginfo->prefix[0] != '\0')
++ strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
++ else
++ *(pm->prefix) = '\0';
++
++ if (in && in->hard_header_len > 0
++ && (*pskb)->mac.raw != (void *) (*pskb)->nh.iph
++ && in->hard_header_len <= ULOG_MAC_LEN) {
++ memcpy(pm->mac, (*pskb)->mac.raw, in->hard_header_len);
++ pm->mac_len = in->hard_header_len;
++ } else
++ pm->mac_len = 0;
++
++ if (in)
++ strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));
++ else
++ pm->indev_name[0] = '\0';
++
++ if (out)
++ strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
++ else
++ pm->outdev_name[0] = '\0';
++
++ /* copy_len <= (*pskb)->len, so can't fail. */
++ if (skb_copy_bits(*pskb, 0, pm->payload, copy_len) < 0)
++ BUG();
++
++ /* check if we are building multi-part messages */
++ if (ub->qlen > 1) {
++ ub->lastnlh->nlmsg_flags |= NLM_F_MULTI;
++ }
++
++ /* if threshold is reached, send message to userspace */
++ if (qlen >= loginfo->qthreshold) {
++ if (loginfo->qthreshold > 1)
++ nlh->nlmsg_type = NLMSG_DONE;
++ }
++
++ ub->lastnlh = nlh;
++
++ /* if timer isn't already running, start it */
++ if (!timer_pending(&ub->timer)) {
++ ub->timer.expires = jiffies + flushtimeout;
++ add_timer(&ub->timer);
++ }
++
++ UNLOCK_BH(&ulog_lock);
++
++ return IPT_CONTINUE;
++
++
++nlmsg_failure:
++ PRINTR("ipt_ULOG: error during NLMSG_PUT\n");
++
++alloc_failure:
++ PRINTR("ipt_ULOG: Error building netlink message\n");
++
++ UNLOCK_BH(&ulog_lock);
++
++ return IPT_CONTINUE;
++}
++
++static int ipt_ulog_checkentry(const char *tablename,
++ const struct ipt_entry *e,
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hookmask)
++{
++ struct ipt_ulog_info *loginfo = (struct ipt_ulog_info *) targinfo;
++
++ if (targinfosize != IPT_ALIGN(sizeof(struct ipt_ulog_info))) {
++ DEBUGP("ipt_ULOG: targinfosize %u != 0\n", targinfosize);
++ return 0;
++ }
++
++ if (loginfo->prefix[sizeof(loginfo->prefix) - 1] != '\0') {
++ DEBUGP("ipt_ULOG: prefix term %i\n",
++ loginfo->prefix[sizeof(loginfo->prefix) - 1]);
++ return 0;
++ }
++
++ if (loginfo->qthreshold > ULOG_MAX_QLEN) {
++ DEBUGP("ipt_ULOG: queue threshold %i > MAX_QLEN\n",
++ loginfo->qthreshold);
++ return 0;
++ }
++
++ return 1;
++}
++
++static struct ipt_target ipt_ulog_reg = {
++ .name = "ULOG",
++ .target = ipt_ulog_target,
++ .checkentry = ipt_ulog_checkentry,
++ .me = THIS_MODULE,
++};
++
++static int __init init(void)
++{
++ int i;
++
++ DEBUGP("ipt_ULOG: init module\n");
++
++ if (nlbufsiz >= 128*1024) {
++ printk("Netlink buffer has to be <= 128kB\n");
++ return -EINVAL;
++ }
++
++ /* initialize ulog_buffers */
++ for (i = 0; i < ULOG_MAXNLGROUPS; i++) {
++ init_timer(&ulog_buffers[i].timer);
++ ulog_buffers[i].timer.function = ulog_timer;
++ ulog_buffers[i].timer.data = i;
++ }
++
++ nflognl = netlink_kernel_create(NETLINK_NFLOG, NULL);
++ if (!nflognl)
++ return -ENOMEM;
++
++ if (ipt_register_target(&ipt_ulog_reg) != 0) {
++ sock_release(nflognl->sk_socket);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static void __exit fini(void)
++{
++ ulog_buff_t *ub;
++ int i;
++
++ DEBUGP("ipt_ULOG: cleanup_module\n");
++
++ ipt_unregister_target(&ipt_ulog_reg);
++ sock_release(nflognl->sk_socket);
++
++ /* remove pending timers and free allocated skb's */
++ for (i = 0; i < ULOG_MAXNLGROUPS; i++) {
++ ub = &ulog_buffers[i];
++ if (timer_pending(&ub->timer)) {
++ DEBUGP("timer was pending, deleting\n");
++ del_timer(&ub->timer);
++ }
++
++ if (ub->skb) {
++ kfree_skb(ub->skb);
++ ub->skb = NULL;
++ }
++ }
++
++}
++
++module_init(init);
++module_exit(fini);
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/Kconfig linux-2.6.3/net/ipv4/netfilter/Kconfig
+--- linux-2.6.3.org/net/ipv4/netfilter/Kconfig 2004-02-18 04:59:13.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/Kconfig 2004-02-27 00:03:16.148773192 +0100
+@@ -579,5 +579,84 @@
+
+ To compile it as a module, choose M here. If unsure, say N.
+
++config IP_NF_TARGET_IPV4OPTSSTRIP
++ tristate 'IPV4OPTSSTRIP target support'
++ depends on IP_NF_MANGLE
++ help
++
++config IP_NF_TARGET_TTL
++ tristate 'TTL target support'
++ depends on IP_NF_MANGLE
++ help
++
++config IP_NF_MATCH_CONNLIMIT
++ tristate 'Connections/IP limit match support'
++ depends on IP_NF_IPTABLES
++ help
++
++config IP_NF_MATCH_DSTLIMIT
++ tristate 'dstlimit match support'
++ depends on IP_NF_IPTABLES
++ help
++
++config IP_NF_MATCH_FUZZY
++ tristate 'fuzzy match support'
++ depends on IP_NF_IPTABLES
++ help
++
++config IP_NF_MATCH_IPV4OPTIONS
++ tristate 'IPV4OPTIONS match support'
++ depends on IP_NF_IPTABLES
++ help
++
++config IP_NF_MATCH_MPORT
++ tristate 'Multiple port with ranges match support'
++ depends on IP_NF_IPTABLES
++ help
++
++config IP_NF_MATCH_NTH
++ tristate 'Nth match support'
++ depends on IP_NF_IPTABLES
++ help
++
++config IP_NF_MATCH_QUOTA
++ tristate 'quota match support'
++ depends on IP_NF_IPTABLES
++ help
++
++config IP_NF_TARGET_NOTRACK
++ tristate 'NOTRACK target support'
++ depends on IP_NF_RAW
++ help
++ The NOTRACK target allows a select rule to specify
++ which packets *not* to enter the conntrack/NAT
++ subsystem with all the consequences (no ICMP error tracking,
++ no protocol helpers for the selected packets).
++
++ If you want to compile it as a module, say M here and read
++ <file:Documentation/modules.txt>. If unsure, say `N'.
++
++config IP_NF_RAW
++ tristate 'raw table support (required for NOTRACK/TRACE)'
++ depends on IP_NF_IPTABLES
++ help
++ This option adds a `raw' table to iptables. This table is the very
++ first in the netfilter framework and hooks in at the PREROUTING
++ and OUTPUT chains.
++
++ If you want to compile it as a module, say M here and read
++ <file:Documentation/modules.txt>. If unsure, say `N'.
++ help
++
++config IP_NF_MATCH_REALM
++ tristate 'realm match support'
++ depends on IP_NF_IPTABLES && NET_CLS_ROUTE
++ help
++
++config IP_NF_MATCH_SCTP
++ tristate 'SCTP protocol match support'
++ depends on IP_NF_IPTABLES
++ help
++
+ endmenu
+
+diff -Nur linux-2.6.3.org/net/ipv4/netfilter/Makefile linux-2.6.3/net/ipv4/netfilter/Makefile
+--- linux-2.6.3.org/net/ipv4/netfilter/Makefile 2004-02-18 04:57:20.000000000 +0100
++++ linux-2.6.3/net/ipv4/netfilter/Makefile 2004-02-27 00:03:16.148773192 +0100
+@@ -38,19 +38,33 @@
+ obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o
+ obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o
+ obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o
++obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
+
+ # matches
+ obj-$(CONFIG_IP_NF_MATCH_HELPER) += ipt_helper.o
+ obj-$(CONFIG_IP_NF_MATCH_LIMIT) += ipt_limit.o
++obj-$(CONFIG_IP_NF_MATCH_SCTP) += ipt_sctp.o
++obj-$(CONFIG_IP_NF_MATCH_QUOTA) += ipt_quota.o
++obj-$(CONFIG_IP_NF_MATCH_DSTLIMIT) += ipt_dstlimit.o
+ obj-$(CONFIG_IP_NF_MATCH_MARK) += ipt_mark.o
+ obj-$(CONFIG_IP_NF_MATCH_MAC) += ipt_mac.o
+ obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o
+
+ obj-$(CONFIG_IP_NF_MATCH_PKTTYPE) += ipt_pkttype.o
+ obj-$(CONFIG_IP_NF_MATCH_MULTIPORT) += ipt_multiport.o
++
++obj-$(CONFIG_IP_NF_MATCH_MPORT) += ipt_mport.o
++
+ obj-$(CONFIG_IP_NF_MATCH_OWNER) += ipt_owner.o
+ obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o
+
++obj-$(CONFIG_IP_NF_MATCH_NTH) += ipt_nth.o
++
++obj-$(CONFIG_IP_NF_MATCH_IPV4OPTIONS) += ipt_ipv4options.o
++
++
++obj-$(CONFIG_IP_NF_MATCH_FUZZY) += ipt_fuzzy.o
++
+ obj-$(CONFIG_IP_NF_MATCH_RECENT) += ipt_recent.o
+
+ obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
+@@ -61,8 +75,10 @@
+
+ obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
+ obj-$(CONFIG_IP_NF_MATCH_STATE) += ipt_state.o
++obj-$(CONFIG_IP_NF_MATCH_CONNLIMIT) += ipt_connlimit.o
+ obj-$(CONFIG_IP_NF_MATCH_CONNTRACK) += ipt_conntrack.o
+ obj-$(CONFIG_IP_NF_MATCH_TCPMSS) += ipt_tcpmss.o
++obj-$(CONFIG_IP_NF_MATCH_REALM) += ipt_realm.o
+
+ obj-$(CONFIG_IP_NF_MATCH_PHYSDEV) += ipt_physdev.o
+
+@@ -79,8 +95,11 @@
+ obj-$(CONFIG_IP_NF_TARGET_CLASSIFY) += ipt_CLASSIFY.o
+ obj-$(CONFIG_IP_NF_NAT_SNMP_BASIC) += ip_nat_snmp_basic.o
+ obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
++obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
++obj-$(CONFIG_IP_NF_TARGET_IPV4OPTSSTRIP) += ipt_IPV4OPTSSTRIP.o
+ obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
+ obj-$(CONFIG_IP_NF_TARGET_TCPMSS) += ipt_TCPMSS.o
++obj-$(CONFIG_IP_NF_TARGET_NOTRACK) += ipt_NOTRACK.o
+
+ # generic ARP tables
+ obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o
+diff -Nur linux-2.6.3.org/net/ipv6/netfilter/ip6table_raw.c linux-2.6.3/net/ipv6/netfilter/ip6table_raw.c
+--- linux-2.6.3.org/net/ipv6/netfilter/ip6table_raw.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv6/netfilter/ip6table_raw.c 2004-02-27 00:03:14.469028552 +0100
+@@ -0,0 +1,154 @@
++/*
++ * IPv6 raw table, a port of the IPv4 raw table to IPv6
++ *
++ * Copyright (C) 2003 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ */
++#include <linux/module.h>
++#include <linux/netfilter_ipv6/ip6_tables.h>
++
++#define RAW_VALID_HOOKS ((1 << NF_IP6_PRE_ROUTING) | (1 << NF_IP6_LOCAL_OUT))
++
++#if 0
++#define DEBUGP(x, args...) printk(KERN_DEBUG x, ## args)
++#else
++#define DEBUGP(x, args...)
++#endif
++
++/* Standard entry. */
++struct ip6t_standard
++{
++ struct ip6t_entry entry;
++ struct ip6t_standard_target target;
++};
++
++struct ip6t_error_target
++{
++ struct ip6t_entry_target target;
++ char errorname[IP6T_FUNCTION_MAXNAMELEN];
++};
++
++struct ip6t_error
++{
++ struct ip6t_entry entry;
++ struct ip6t_error_target target;
++};
++
++static struct
++{
++ struct ip6t_replace repl;
++ struct ip6t_standard entries[2];
++ struct ip6t_error term;
++} initial_table __initdata
++= { { "raw", RAW_VALID_HOOKS, 3,
++ sizeof(struct ip6t_standard) * 2 + sizeof(struct ip6t_error),
++ { [NF_IP6_PRE_ROUTING] 0,
++ [NF_IP6_LOCAL_OUT] sizeof(struct ip6t_standard) },
++ { [NF_IP6_PRE_ROUTING] 0,
++ [NF_IP6_LOCAL_OUT] sizeof(struct ip6t_standard) },
++ 0, NULL, { } },
++ {
++ /* PRE_ROUTING */
++ { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
++ 0,
++ sizeof(struct ip6t_entry),
++ sizeof(struct ip6t_standard),
++ 0, { 0, 0 }, { } },
++ { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } },
++ -NF_ACCEPT - 1 } },
++ /* LOCAL_OUT */
++ { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
++ 0,
++ sizeof(struct ip6t_entry),
++ sizeof(struct ip6t_standard),
++ 0, { 0, 0 }, { } },
++ { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } },
++ -NF_ACCEPT - 1 } },
++ },
++ /* ERROR */
++ { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
++ 0,
++ sizeof(struct ip6t_entry),
++ sizeof(struct ip6t_error),
++ 0, { 0, 0 }, { } },
++ { { { { IP6T_ALIGN(sizeof(struct ip6t_error_target)), IP6T_ERROR_TARGET } },
++ { } },
++ "ERROR"
++ }
++ }
++};
++
++static struct ip6t_table packet_raw = {
++ .name = "raw",
++ .table = &initial_table.repl,
++ .valid_hooks = RAW_VALID_HOOKS,
++ .lock = RW_LOCK_UNLOCKED,
++ .me = THIS_MODULE
++};
++
++/* The work comes in here from netfilter.c. */
++static unsigned int
++ip6t_hook(unsigned int hook,
++ struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ return ip6t_do_table(pskb, hook, in, out, &packet_raw, NULL);
++}
++
++static struct nf_hook_ops ip6t_ops[] = {
++ {
++ .hook = ip6t_hook,
++ .pf = PF_INET6,
++ .hooknum = NF_IP6_PRE_ROUTING,
++ .priority = NF_IP6_PRI_FIRST
++ },
++ {
++ .hook = ip6t_hook,
++ .pf = PF_INET6,
++ .hooknum = NF_IP6_LOCAL_OUT,
++ .priority = NF_IP6_PRI_FIRST
++ },
++};
++
++static int __init init(void)
++{
++ int ret;
++
++ /* Register table */
++ ret = ip6t_register_table(&packet_raw);
++ if (ret < 0)
++ return ret;
++
++ /* Register hooks */
++ ret = nf_register_hook(&ip6t_ops[0]);
++ if (ret < 0)
++ goto cleanup_table;
++
++ ret = nf_register_hook(&ip6t_ops[1]);
++ if (ret < 0)
++ goto cleanup_hook0;
++
++ return ret;
++
++ cleanup_hook0:
++ nf_unregister_hook(&ip6t_ops[0]);
++ cleanup_table:
++ ip6t_unregister_table(&packet_raw);
++
++ return ret;
++}
++
++static void __exit fini(void)
++{
++ unsigned int i;
++
++ for (i = 0; i < sizeof(ip6t_ops)/sizeof(struct nf_hook_ops); i++)
++ nf_unregister_hook(&ip6t_ops[i]);
++
++ ip6t_unregister_table(&packet_raw);
++}
++
++module_init(init);
++module_exit(fini);
++MODULE_LICENSE("GPL");
+diff -Nur linux-2.6.3.org/net/ipv6/netfilter/ip6t_fuzzy.c linux-2.6.3/net/ipv6/netfilter/ip6t_fuzzy.c
+--- linux-2.6.3.org/net/ipv6/netfilter/ip6t_fuzzy.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv6/netfilter/ip6t_fuzzy.c 2004-02-27 00:03:09.360805120 +0100
+@@ -0,0 +1,189 @@
++/*
++ * This module implements a simple TSK FLC
++ * (Takagi-Sugeno-Kang Fuzzy Logic Controller) that aims
++ * to limit , in an adaptive and flexible way , the packet rate crossing
++ * a given stream . It serves as an initial and very simple (but effective)
++ * example of how Fuzzy Logic techniques can be applied to defeat DoS attacks.
++ * As a matter of fact , Fuzzy Logic can help us to insert any "behavior"
++ * into our code in a precise , adaptive and efficient manner.
++ * The goal is very similar to that of "limit" match , but using techniques of
++ * Fuzzy Control , that allow us to shape the transfer functions precisely ,
++ * avoiding over and undershoots - and stuff like that .
++ *
++ *
++ * 2002-08-10 Hime Aguiar e Oliveira Jr. <hime@engineer.com> : Initial version.
++ * 2002-08-17 : Changed to eliminate floating point operations .
++ * 2002-08-23 : Coding style changes .
++ * 2003-04-08 Maciej Soltysiak <solt@dns.toxicilms.tv> : IPv6 Port
++ */
++
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/ipv6.h>
++#include <linux/random.h>
++#include <net/tcp.h>
++#include <linux/spinlock.h>
++#include <linux/netfilter_ipv6/ip6_tables.h>
++#include <linux/netfilter_ipv6/ip6t_fuzzy.h>
++
++/*
++ Packet Acceptance Rate - LOW and Packet Acceptance Rate - HIGH
++ Expressed in percentage
++*/
++
++#define PAR_LOW 1/100
++#define PAR_HIGH 1
++
++static spinlock_t fuzzy_lock = SPIN_LOCK_UNLOCKED;
++
++MODULE_AUTHOR("Hime Aguiar e Oliveira Junior <hime@engineer.com>");
++MODULE_DESCRIPTION("IP tables Fuzzy Logic Controller match module");
++MODULE_LICENSE("GPL");
++
++static u_int8_t mf_high(u_int32_t tx,u_int32_t mini,u_int32_t maxi)
++{
++ if (tx >= maxi) return 100;
++
++ if (tx <= mini) return 0;
++
++ return ((100 * (tx-mini)) / (maxi-mini));
++}
++
++static u_int8_t mf_low(u_int32_t tx,u_int32_t mini,u_int32_t maxi)
++{
++ if (tx <= mini) return 100;
++
++ if (tx >= maxi) return 0;
++
++ return ((100 * (maxi - tx)) / (maxi - mini));
++
++}
++
++static int
++ip6t_fuzzy_match(const struct sk_buff *pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ const void *hdr,
++ u_int16_t datalen,
++ int *hotdrop)
++{
++ /* From userspace */
++
++ struct ip6t_fuzzy_info *info = (struct ip6t_fuzzy_info *) matchinfo;
++
++ u_int8_t random_number;
++ unsigned long amount;
++ u_int8_t howhigh, howlow;
++
++
++ spin_lock_bh(&fuzzy_lock); /* Rise the lock */
++
++ info->bytes_total += pskb->len;
++ info->packets_total++;
++
++ info->present_time = jiffies;
++
++ if (info->present_time >= info->previous_time)
++ amount = info->present_time - info->previous_time;
++ else {
++ /* There was a transition : I choose to re-sample
++ and keep the old acceptance rate...
++ */
++
++ amount = 0;
++ info->previous_time = info->present_time;
++ info->bytes_total = info->packets_total = 0;
++ };
++
++ if ( amount > HZ/10) {/* More than 100 ms elapsed ... */
++
++ info->mean_rate = (u_int32_t) ((HZ * info->packets_total) \
++ / amount);
++
++ info->previous_time = info->present_time;
++ info->bytes_total = info->packets_total = 0;
++
++ howhigh = mf_high(info->mean_rate,info->minimum_rate,info->maximum_rate);
++ howlow = mf_low(info->mean_rate,info->minimum_rate,info->maximum_rate);
++
++ info->acceptance_rate = (u_int8_t) \
++ (howhigh * PAR_LOW + PAR_HIGH * howlow);
++
++ /* In fact, the above defuzzification would require a denominator
++ * proportional to (howhigh+howlow) but, in this particular case,
++ * that expression is constant.
++ * An imediate consequence is that it is not necessary to call
++ * both mf_high and mf_low - but to keep things understandable,
++ * I did so.
++ */
++
++ }
++
++ spin_unlock_bh(&fuzzy_lock); /* Release the lock */
++
++
++ if (info->acceptance_rate < 100)
++ {
++ get_random_bytes((void *)(&random_number), 1);
++
++ /* If within the acceptance , it can pass => don't match */
++ if (random_number <= (255 * info->acceptance_rate) / 100)
++ return 0;
++ else
++ return 1; /* It can't pass (It matches) */
++ };
++
++ return 0; /* acceptance_rate == 100 % => Everything passes ... */
++
++}
++
++static int
++ip6t_fuzzy_checkentry(const char *tablename,
++ const struct ip6t_ip6 *ip,
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++{
++
++ const struct ip6t_fuzzy_info *info = matchinfo;
++
++ if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_fuzzy_info))) {
++ printk("ip6t_fuzzy: matchsize %u != %u\n", matchsize,
++ IP6T_ALIGN(sizeof(struct ip6t_fuzzy_info)));
++ return 0;
++ }
++
++ if ((info->minimum_rate < MINFUZZYRATE) || (info->maximum_rate > MAXFUZZYRATE)
++ || (info->minimum_rate >= info->maximum_rate)) {
++ printk("ip6t_fuzzy: BAD limits , please verify !!!\n");
++ return 0;
++ }
++
++ return 1;
++}
++
++static struct ip6t_match ip6t_fuzzy_reg = {
++ {NULL, NULL},
++ "fuzzy",
++ ip6t_fuzzy_match,
++ ip6t_fuzzy_checkentry,
++ NULL,
++ THIS_MODULE };
++
++static int __init init(void)
++{
++ if (ip6t_register_match(&ip6t_fuzzy_reg))
++ return -EINVAL;
++
++ return 0;
++}
++
++static void __exit fini(void)
++{
++ ip6t_unregister_match(&ip6t_fuzzy_reg);
++}
++
++module_init(init);
++module_exit(fini);
+diff -Nur linux-2.6.3.org/net/ipv6/netfilter/ip6t_HL.c linux-2.6.3/net/ipv6/netfilter/ip6t_HL.c
+--- linux-2.6.3.org/net/ipv6/netfilter/ip6t_HL.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv6/netfilter/ip6t_HL.c 2004-02-27 00:03:05.118450056 +0100
+@@ -0,0 +1,105 @@
++/*
++ * Hop Limit modification target for ip6tables
++ * Maciej Soltysiak <solt@dns.toxicfilms.tv>
++ * Based on HW's TTL module
++ *
++ * This software is distributed under the terms of GNU GPL
++ */
++
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/ip.h>
++
++#include <linux/netfilter_ipv6/ip6_tables.h>
++#include <linux/netfilter_ipv6/ip6t_HL.h>
++
++MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>");
++MODULE_DESCRIPTION("IP tables Hop Limit modification module");
++MODULE_LICENSE("GPL");
++
++static unsigned int ip6t_hl_target(struct sk_buff **pskb, unsigned int hooknum,
++ const struct net_device *in, const struct net_device *out,
++ const void *targinfo, void *userinfo)
++{
++ struct ipv6hdr *ip6h = (*pskb)->nh.ipv6h;
++ const struct ip6t_HL_info *info = targinfo;
++ u_int16_t diffs[2];
++ int new_hl;
++
++ switch (info->mode) {
++ case IP6T_HL_SET:
++ new_hl = info->hop_limit;
++ break;
++ case IP6T_HL_INC:
++ new_hl = ip6h->hop_limit + info->hop_limit;
++ if (new_hl > 255)
++ new_hl = 255;
++ break;
++ case IP6T_HL_DEC:
++ new_hl = ip6h->hop_limit + info->hop_limit;
++ if (new_hl < 0)
++ new_hl = 0;
++ break;
++ default:
++ new_hl = ip6h->hop_limit;
++ break;
++ }
++
++ if (new_hl != ip6h->hop_limit) {
++ diffs[0] = htons(((unsigned)ip6h->hop_limit) << 8) ^ 0xFFFF;
++ ip6h->hop_limit = new_hl;
++ diffs[1] = htons(((unsigned)ip6h->hop_limit) << 8);
++ }
++
++ return IP6T_CONTINUE;
++}
++
++static int ip6t_hl_checkentry(const char *tablename,
++ const struct ip6t_entry *e,
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hook_mask)
++{
++ struct ip6t_HL_info *info = targinfo;
++
++ if (targinfosize != IP6T_ALIGN(sizeof(struct ip6t_HL_info))) {
++ printk(KERN_WARNING "HL: targinfosize %u != %Zu\n",
++ targinfosize,
++ IP6T_ALIGN(sizeof(struct ip6t_HL_info)));
++ return 0;
++ }
++
++ if (strcmp(tablename, "mangle")) {
++ printk(KERN_WARNING "HL: can only be called from \"mangle\" table, not \"%s\"\n", tablename);
++ return 0;
++ }
++
++ if (info->mode > IP6T_HL_MAXMODE) {
++ printk(KERN_WARNING "HL: invalid or unknown Mode %u\n",
++ info->mode);
++ return 0;
++ }
++
++ if ((info->mode != IP6T_HL_SET) && (info->hop_limit == 0)) {
++ printk(KERN_WARNING "HL: increment/decrement doesn't make sense with value 0\n");
++ return 0;
++ }
++
++ return 1;
++}
++
++static struct ip6t_target ip6t_HL = { { NULL, NULL }, "HL",
++ ip6t_hl_target, ip6t_hl_checkentry, NULL, THIS_MODULE };
++
++static int __init init(void)
++{
++ return ip6t_register_target(&ip6t_HL);
++}
++
++static void __exit fini(void)
++{
++ ip6t_unregister_target(&ip6t_HL);
++}
++
++module_init(init);
++module_exit(fini);
+diff -Nur linux-2.6.3.org/net/ipv6/netfilter/ip6t_LOG.c linux-2.6.3/net/ipv6/netfilter/ip6t_LOG.c
+--- linux-2.6.3.org/net/ipv6/netfilter/ip6t_LOG.c 2004-02-18 04:57:21.000000000 +0100
++++ linux-2.6.3/net/ipv6/netfilter/ip6t_LOG.c 2004-02-27 00:03:00.003227688 +0100
+@@ -18,12 +18,17 @@
+ #include <net/udp.h>
+ #include <net/tcp.h>
+ #include <net/ipv6.h>
++#include <linux/netfilter.h>
+ #include <linux/netfilter_ipv6/ip6_tables.h>
+
+ MODULE_AUTHOR("Jan Rekorajski <baggins@pld.org.pl>");
+ MODULE_DESCRIPTION("IP6 tables LOG target module");
+ MODULE_LICENSE("GPL");
+
++static unsigned int nflog = 1;
++MODULE_PARM(nflog, "i");
++MODULE_PARM_DESC(nflog, "register as internal netfilter logging module");
++
+ struct in_device;
+ #include <net/route.h>
+ #include <linux/netfilter_ipv6/ip6t_LOG.h>
+@@ -265,40 +270,38 @@
+ }
+ }
+
+-static unsigned int
+-ip6t_log_target(struct sk_buff **pskb,
+- unsigned int hooknum,
++static void
++ip6t_log_packet(unsigned int hooknum,
++ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+- const void *targinfo,
+- void *userinfo)
++ const struct ip6t_log_info *loginfo,
++ const char *level_string,
++ const char *prefix)
+ {
+- struct ipv6hdr *ipv6h = (*pskb)->nh.ipv6h;
+- const struct ip6t_log_info *loginfo = targinfo;
+- char level_string[4] = "< >";
++ struct ipv6hdr *ipv6h = skb->nh.ipv6h;
+
+- level_string[1] = '0' + (loginfo->level % 8);
+ spin_lock_bh(&log_lock);
+ printk(level_string);
+ printk("%sIN=%s OUT=%s ",
+- loginfo->prefix,
++ prefix == NULL ? loginfo->prefix : prefix,
+ in ? in->name : "",
+ out ? out->name : "");
+ if (in && !out) {
+ /* MAC logging for input chain only. */
+ printk("MAC=");
+- if ((*pskb)->dev && (*pskb)->dev->hard_header_len && (*pskb)->mac.raw != (void*)ipv6h) {
+- if ((*pskb)->dev->type != ARPHRD_SIT){
++ if (skb->dev && skb->dev->hard_header_len && skb->mac.raw != (void*)ipv6h) {
++ if (skb->dev->type != ARPHRD_SIT){
+ int i;
+- unsigned char *p = (*pskb)->mac.raw;
+- for (i = 0; i < (*pskb)->dev->hard_header_len; i++,p++)
++ unsigned char *p = skb->mac.raw;
++ for (i = 0; i < skb->dev->hard_header_len; i++,p++)
+ printk("%02x%c", *p,
+- i==(*pskb)->dev->hard_header_len - 1
++ i==skb->dev->hard_header_len - 1
+ ? ' ':':');
+ } else {
+ int i;
+- unsigned char *p = (*pskb)->mac.raw;
+- if ( p - (ETH_ALEN*2+2) > (*pskb)->head ){
++ unsigned char *p = skb->mac.raw;
++ if ( p - (ETH_ALEN*2+2) > skb->head ){
+ p -= (ETH_ALEN+2);
+ for (i = 0; i < (ETH_ALEN); i++,p++)
+ printk("%02x%s", *p,
+@@ -309,10 +312,10 @@
+ i == ETH_ALEN-1 ? ' ' : ':');
+ }
+
+- if (((*pskb)->dev->addr_len == 4) &&
+- (*pskb)->dev->hard_header_len > 20){
++ if ((skb->dev->addr_len == 4) &&
++ skb->dev->hard_header_len > 20){
+ printk("TUNNEL=");
+- p = (*pskb)->mac.raw + 12;
++ p = skb->mac.raw + 12;
+ for (i = 0; i < 4; i++,p++)
+ printk("%3d%s", *p,
+ i == 3 ? "->" : ".");
+@@ -328,10 +331,41 @@
+ dump_packet(loginfo, ipv6h, 1);
+ printk("\n");
+ spin_unlock_bh(&log_lock);
++}
++
++static unsigned int
++ip6t_log_target(struct sk_buff **pskb,
++ unsigned int hooknum,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *targinfo,
++ void *userinfo)
++{
++ const struct ip6t_log_info *loginfo = targinfo;
++ char level_string[4] = "< >";
++
++ level_string[1] = '0' + (loginfo->level % 8);
++ ip6t_log_packet(hooknum, *pskb, in, out, loginfo, level_string, NULL);
+
+ return IP6T_CONTINUE;
+ }
+
++static void
++ip6t_logfn(unsigned int hooknum,
++ const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const char *prefix)
++{
++ struct ip6t_log_info loginfo = {
++ .level = 0,
++ .logflags = IP6T_LOG_MASK,
++ .prefix = ""
++ };
++
++ ip6t_log_packet(hooknum, skb, in, out, &loginfo, KERN_WARNING, prefix);
++}
++
+ static int ip6t_log_checkentry(const char *tablename,
+ const struct ip6t_entry *e,
+ void *targinfo,
+@@ -368,12 +402,16 @@
+ {
+ if (ip6t_register_target(&ip6t_log_reg))
+ return -EINVAL;
++ if (nflog)
++ nf_log_register(PF_INET, &ip6t_logfn);
+
+ return 0;
+ }
+
+ static void __exit fini(void)
+ {
++ if (nflog)
++ nf_log_unregister(PF_INET, &ip6t_logfn);
+ ip6t_unregister_target(&ip6t_log_reg);
+ }
+
+diff -Nur linux-2.6.3.org/net/ipv6/netfilter/ip6t_LOG.c.orig linux-2.6.3/net/ipv6/netfilter/ip6t_LOG.c.orig
+--- linux-2.6.3.org/net/ipv6/netfilter/ip6t_LOG.c.orig 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv6/netfilter/ip6t_LOG.c.orig 2004-02-18 04:57:21.000000000 +0100
+@@ -0,0 +1,381 @@
++/*
++ * This is a module which is used for logging packets.
++ */
++
++/* (C) 2001 Jan Rekorajski <baggins@pld.org.pl>
++ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/ip.h>
++#include <linux/spinlock.h>
++#include <linux/icmpv6.h>
++#include <net/udp.h>
++#include <net/tcp.h>
++#include <net/ipv6.h>
++#include <linux/netfilter_ipv6/ip6_tables.h>
++
++MODULE_AUTHOR("Jan Rekorajski <baggins@pld.org.pl>");
++MODULE_DESCRIPTION("IP6 tables LOG target module");
++MODULE_LICENSE("GPL");
++
++struct in_device;
++#include <net/route.h>
++#include <linux/netfilter_ipv6/ip6t_LOG.h>
++
++#if 0
++#define DEBUGP printk
++#else
++#define DEBUGP(format, args...)
++#endif
++
++struct esphdr {
++ __u32 spi;
++}; /* FIXME evil kludge */
++
++/* Use lock to serialize, so printks don't overlap */
++static spinlock_t log_lock = SPIN_LOCK_UNLOCKED;
++
++/* takes in current header and pointer to the header */
++/* if another header exists, sets hdrptr to the next header
++ and returns the new header value, else returns 0 */
++static u_int8_t ip6_nexthdr(u_int8_t currenthdr, u_int8_t **hdrptr)
++{
++ u_int8_t hdrlen, nexthdr = 0;
++
++ switch(currenthdr){
++ case IPPROTO_AH:
++ /* whoever decided to do the length of AUTH for ipv6
++ in 32bit units unlike other headers should be beaten...
++ repeatedly...with a large stick...no, an even LARGER
++ stick...no, you're still not thinking big enough */
++ nexthdr = **hdrptr;
++ hdrlen = *hdrptr[1] * 4 + 8;
++ *hdrptr = *hdrptr + hdrlen;
++ break;
++ /*stupid rfc2402 */
++ case IPPROTO_DSTOPTS:
++ case IPPROTO_ROUTING:
++ case IPPROTO_HOPOPTS:
++ nexthdr = **hdrptr;
++ hdrlen = *hdrptr[1] * 8 + 8;
++ *hdrptr = *hdrptr + hdrlen;
++ break;
++ case IPPROTO_FRAGMENT:
++ nexthdr = **hdrptr;
++ *hdrptr = *hdrptr + 8;
++ break;
++ }
++ return nexthdr;
++
++}
++
++/* One level of recursion won't kill us */
++static void dump_packet(const struct ip6t_log_info *info,
++ struct ipv6hdr *ipv6h, int recurse)
++{
++ u_int8_t currenthdr = ipv6h->nexthdr;
++ u_int8_t *hdrptr;
++ int fragment;
++
++ /* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000" */
++ printk("SRC=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", NIP6(ipv6h->saddr));
++ printk("DST=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", NIP6(ipv6h->daddr));
++
++ /* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */
++ printk("LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ",
++ ntohs(ipv6h->payload_len) + sizeof(struct ipv6hdr),
++ (ntohl(*(u_int32_t *)ipv6h) & 0x0ff00000) >> 20,
++ ipv6h->hop_limit,
++ (ntohl(*(u_int32_t *)ipv6h) & 0x000fffff));
++
++ fragment = 0;
++ hdrptr = (u_int8_t *)(ipv6h + 1);
++ while (currenthdr) {
++ if ((currenthdr == IPPROTO_TCP) ||
++ (currenthdr == IPPROTO_UDP) ||
++ (currenthdr == IPPROTO_ICMPV6))
++ break;
++ /* Max length: 48 "OPT (...) " */
++ printk("OPT ( ");
++ switch (currenthdr) {
++ case IPPROTO_FRAGMENT: {
++ struct frag_hdr *fhdr = (struct frag_hdr *)hdrptr;
++
++ /* Max length: 11 "FRAG:65535 " */
++ printk("FRAG:%u ", ntohs(fhdr->frag_off) & 0xFFF8);
++
++ /* Max length: 11 "INCOMPLETE " */
++ if (fhdr->frag_off & htons(0x0001))
++ printk("INCOMPLETE ");
++
++ printk("ID:%08x ", fhdr->identification);
++
++ if (ntohs(fhdr->frag_off) & 0xFFF8)
++ fragment = 1;
++
++ break;
++ }
++ case IPPROTO_DSTOPTS:
++ case IPPROTO_ROUTING:
++ case IPPROTO_HOPOPTS:
++ break;
++ /* Max Length */
++ case IPPROTO_AH:
++ case IPPROTO_ESP:
++ if (info->logflags & IP6T_LOG_IPOPT) {
++ struct esphdr *esph = (struct esphdr *)hdrptr;
++ int esp = (currenthdr == IPPROTO_ESP);
++
++ /* Max length: 4 "ESP " */
++ printk("%s ",esp ? "ESP" : "AH");
++
++ /* Length: 15 "SPI=0xF1234567 " */
++ printk("SPI=0x%x ", ntohl(esph->spi) );
++ break;
++ }
++ default:
++ break;
++ }
++ printk(") ");
++ currenthdr = ip6_nexthdr(currenthdr, &hdrptr);
++ }
++
++ switch (currenthdr) {
++ case IPPROTO_TCP: {
++ struct tcphdr *tcph = (struct tcphdr *)hdrptr;
++
++ /* Max length: 10 "PROTO=TCP " */
++ printk("PROTO=TCP ");
++
++ if (fragment)
++ break;
++
++ /* Max length: 20 "SPT=65535 DPT=65535 " */
++ printk("SPT=%u DPT=%u ",
++ ntohs(tcph->source), ntohs(tcph->dest));
++ /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
++ if (info->logflags & IP6T_LOG_TCPSEQ)
++ printk("SEQ=%u ACK=%u ",
++ ntohl(tcph->seq), ntohl(tcph->ack_seq));
++ /* Max length: 13 "WINDOW=65535 " */
++ printk("WINDOW=%u ", ntohs(tcph->window));
++ /* Max length: 9 "RES=0x3F " */
++ printk("RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(tcph) & TCP_RESERVED_BITS) >> 22));
++ /* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
++ if (tcph->cwr)
++ printk("CWR ");
++ if (tcph->ece)
++ printk("ECE ");
++ if (tcph->urg)
++ printk("URG ");
++ if (tcph->ack)
++ printk("ACK ");
++ if (tcph->psh)
++ printk("PSH ");
++ if (tcph->rst)
++ printk("RST ");
++ if (tcph->syn)
++ printk("SYN ");
++ if (tcph->fin)
++ printk("FIN ");
++ /* Max length: 11 "URGP=65535 " */
++ printk("URGP=%u ", ntohs(tcph->urg_ptr));
++
++ if ((info->logflags & IP6T_LOG_TCPOPT)
++ && tcph->doff * 4 != sizeof(struct tcphdr)) {
++ unsigned int i;
++
++ /* Max length: 127 "OPT (" 15*4*2chars ") " */
++ printk("OPT (");
++ for (i =sizeof(struct tcphdr); i < tcph->doff * 4; i++)
++ printk("%02X", ((u_int8_t *)tcph)[i]);
++ printk(") ");
++ }
++ break;
++ }
++ case IPPROTO_UDP: {
++ struct udphdr *udph = (struct udphdr *)hdrptr;
++
++ /* Max length: 10 "PROTO=UDP " */
++ printk("PROTO=UDP ");
++
++ if (fragment)
++ break;
++
++ /* Max length: 20 "SPT=65535 DPT=65535 " */
++ printk("SPT=%u DPT=%u LEN=%u ",
++ ntohs(udph->source), ntohs(udph->dest),
++ ntohs(udph->len));
++ break;
++ }
++ case IPPROTO_ICMPV6: {
++ struct icmp6hdr *icmp6h = (struct icmp6hdr *)hdrptr;
++
++ /* Max length: 13 "PROTO=ICMPv6 " */
++ printk("PROTO=ICMPv6 ");
++
++ if (fragment)
++ break;
++
++ /* Max length: 18 "TYPE=255 CODE=255 " */
++ printk("TYPE=%u CODE=%u ", icmp6h->icmp6_type, icmp6h->icmp6_code);
++
++ switch (icmp6h->icmp6_type) {
++ case ICMPV6_ECHO_REQUEST:
++ case ICMPV6_ECHO_REPLY:
++ /* Max length: 19 "ID=65535 SEQ=65535 " */
++ printk("ID=%u SEQ=%u ",
++ ntohs(icmp6h->icmp6_identifier),
++ ntohs(icmp6h->icmp6_sequence));
++ break;
++ case ICMPV6_MGM_QUERY:
++ case ICMPV6_MGM_REPORT:
++ case ICMPV6_MGM_REDUCTION:
++ break;
++
++ case ICMPV6_PARAMPROB:
++ /* Max length: 17 "POINTER=ffffffff " */
++ printk("POINTER=%08x ", ntohl(icmp6h->icmp6_pointer));
++ /* Fall through */
++ case ICMPV6_DEST_UNREACH:
++ case ICMPV6_PKT_TOOBIG:
++ case ICMPV6_TIME_EXCEED:
++ /* Max length: 3+maxlen */
++ if (recurse) {
++ printk("[");
++ dump_packet(info, (struct ipv6hdr *)(icmp6h + 1), 0);
++ printk("] ");
++ }
++
++ /* Max length: 10 "MTU=65535 " */
++ if (icmp6h->icmp6_type == ICMPV6_PKT_TOOBIG)
++ printk("MTU=%u ", ntohl(icmp6h->icmp6_mtu));
++ }
++ break;
++ }
++ /* Max length: 10 "PROTO 255 " */
++ default:
++ printk("PROTO=%u ", currenthdr);
++ }
++}
++
++static unsigned int
++ip6t_log_target(struct sk_buff **pskb,
++ unsigned int hooknum,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *targinfo,
++ void *userinfo)
++{
++ struct ipv6hdr *ipv6h = (*pskb)->nh.ipv6h;
++ const struct ip6t_log_info *loginfo = targinfo;
++ char level_string[4] = "< >";
++
++ level_string[1] = '0' + (loginfo->level % 8);
++ spin_lock_bh(&log_lock);
++ printk(level_string);
++ printk("%sIN=%s OUT=%s ",
++ loginfo->prefix,
++ in ? in->name : "",
++ out ? out->name : "");
++ if (in && !out) {
++ /* MAC logging for input chain only. */
++ printk("MAC=");
++ if ((*pskb)->dev && (*pskb)->dev->hard_header_len && (*pskb)->mac.raw != (void*)ipv6h) {
++ if ((*pskb)->dev->type != ARPHRD_SIT){
++ int i;
++ unsigned char *p = (*pskb)->mac.raw;
++ for (i = 0; i < (*pskb)->dev->hard_header_len; i++,p++)
++ printk("%02x%c", *p,
++ i==(*pskb)->dev->hard_header_len - 1
++ ? ' ':':');
++ } else {
++ int i;
++ unsigned char *p = (*pskb)->mac.raw;
++ if ( p - (ETH_ALEN*2+2) > (*pskb)->head ){
++ p -= (ETH_ALEN+2);
++ for (i = 0; i < (ETH_ALEN); i++,p++)
++ printk("%02x%s", *p,
++ i == ETH_ALEN-1 ? "->" : ":");
++ p -= (ETH_ALEN*2);
++ for (i = 0; i < (ETH_ALEN); i++,p++)
++ printk("%02x%c", *p,
++ i == ETH_ALEN-1 ? ' ' : ':');
++ }
++
++ if (((*pskb)->dev->addr_len == 4) &&
++ (*pskb)->dev->hard_header_len > 20){
++ printk("TUNNEL=");
++ p = (*pskb)->mac.raw + 12;
++ for (i = 0; i < 4; i++,p++)
++ printk("%3d%s", *p,
++ i == 3 ? "->" : ".");
++ for (i = 0; i < 4; i++,p++)
++ printk("%3d%c", *p,
++ i == 3 ? ' ' : '.');
++ }
++ }
++ } else
++ printk(" ");
++ }
++
++ dump_packet(loginfo, ipv6h, 1);
++ printk("\n");
++ spin_unlock_bh(&log_lock);
++
++ return IP6T_CONTINUE;
++}
++
++static int ip6t_log_checkentry(const char *tablename,
++ const struct ip6t_entry *e,
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hook_mask)
++{
++ const struct ip6t_log_info *loginfo = targinfo;
++
++ if (targinfosize != IP6T_ALIGN(sizeof(struct ip6t_log_info))) {
++ DEBUGP("LOG: targinfosize %u != %u\n",
++ targinfosize, IP6T_ALIGN(sizeof(struct ip6t_log_info)));
++ return 0;
++ }
++
++ if (loginfo->level >= 8) {
++ DEBUGP("LOG: level %u >= 8\n", loginfo->level);
++ return 0;
++ }
++
++ if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
++ DEBUGP("LOG: prefix term %i\n",
++ loginfo->prefix[sizeof(loginfo->prefix)-1]);
++ return 0;
++ }
++
++ return 1;
++}
++
++static struct ip6t_target ip6t_log_reg
++= { { NULL, NULL }, "LOG", ip6t_log_target, ip6t_log_checkentry, NULL,
++ THIS_MODULE };
++
++static int __init init(void)
++{
++ if (ip6t_register_target(&ip6t_log_reg))
++ return -EINVAL;
++
++ return 0;
++}
++
++static void __exit fini(void)
++{
++ ip6t_unregister_target(&ip6t_log_reg);
++}
++
++module_init(init);
++module_exit(fini);
+diff -Nur linux-2.6.3.org/net/ipv6/netfilter/ip6t_nth.c linux-2.6.3/net/ipv6/netfilter/ip6t_nth.c
+--- linux-2.6.3.org/net/ipv6/netfilter/ip6t_nth.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv6/netfilter/ip6t_nth.c 2004-02-27 00:03:12.719294552 +0100
+@@ -0,0 +1,173 @@
++/*
++ This is a module which is used for match support for every Nth packet
++ This file is distributed under the terms of the GNU General Public
++ License (GPL). Copies of the GPL can be obtained from:
++ ftp://prep.ai.mit.edu/pub/gnu/GPL
++
++ 2001-07-18 Fabrice MARIE <fabrice@netfilter.org> : initial implementation.
++ 2001-09-20 Richard Wagner (rwagner@cloudnet.com)
++ * added support for multiple counters
++ * added support for matching on individual packets
++ in the counter cycle
++ 2003-04-30 Maciej Soltysiak <solt@dns.toxicfilms.tv> : IPv6 Port
++
++*/
++
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/ip.h>
++#include <net/tcp.h>
++#include <linux/spinlock.h>
++#include <linux/netfilter_ipv6/ip6_tables.h>
++#include <linux/netfilter_ipv6/ip6t_nth.h>
++
++MODULE_LICENSE("GPL");
++
++/*
++ * State information.
++ */
++struct state {
++ spinlock_t lock;
++ u_int16_t number;
++};
++
++static struct state states[IP6T_NTH_NUM_COUNTERS];
++
++static int
++ip6t_nth_match(const struct sk_buff *pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ const void *hdr,
++ u_int16_t datalen,
++ int *hotdrop)
++{
++ /* Parameters from userspace */
++ const struct ip6t_nth_info *info = matchinfo;
++ unsigned counter = info->counter;
++ if((counter < 0) || (counter >= IP6T_NTH_NUM_COUNTERS))
++ {
++ printk(KERN_WARNING "nth: invalid counter %u. counter between 0 and %u\n", counter, IP6T_NTH_NUM_COUNTERS-1);
++ return 0;
++ };
++
++ spin_lock(&states[counter].lock);
++
++ /* Are we matching every nth packet?*/
++ if (info->packet == 0xFF)
++ {
++ /* We're matching every nth packet and only every nth packet*/
++ /* Do we match or invert match? */
++ if (info->not == 0)
++ {
++ if (states[counter].number == 0)
++ {
++ ++states[counter].number;
++ goto match;
++ }
++ if (states[counter].number >= info->every)
++ states[counter].number = 0; /* reset the counter */
++ else
++ ++states[counter].number;
++ goto dontmatch;
++ }
++ else
++ {
++ if (states[counter].number == 0)
++ {
++ ++states[counter].number;
++ goto dontmatch;
++ }
++ if (states[counter].number >= info->every)
++ states[counter].number = 0;
++ else
++ ++states[counter].number;
++ goto match;
++ }
++ }
++ else
++ {
++ /* We're using the --packet, so there must be a rule for every value */
++ if (states[counter].number == info->packet)
++ {
++ /* only increment the counter when a match happens */
++ if (states[counter].number >= info->every)
++ states[counter].number = 0; /* reset the counter */
++ else
++ ++states[counter].number;
++ goto match;
++ }
++ else
++ goto dontmatch;
++ }
++
++ dontmatch:
++ /* don't match */
++ spin_unlock(&states[counter].lock);
++ return 0;
++
++ match:
++ spin_unlock(&states[counter].lock);
++ return 1;
++}
++
++static int
++ip6t_nth_checkentry(const char *tablename,
++ const struct ip6t_ip6 *e,
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++{
++ /* Parameters from userspace */
++ const struct ip6t_nth_info *info = matchinfo;
++ unsigned counter = info->counter;
++ if((counter < 0) || (counter >= IP6T_NTH_NUM_COUNTERS))
++ {
++ printk(KERN_WARNING "nth: invalid counter %u. counter between 0 and %u\n", counter, IP6T_NTH_NUM_COUNTERS-1);
++ return 0;
++ };
++
++ if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_nth_info))) {
++ printk("nth: matchsize %u != %u\n", matchsize,
++ IP6T_ALIGN(sizeof(struct ip6t_nth_info)));
++ return 0;
++ }
++
++ states[counter].number = info->startat;
++
++ return 1;
++}
++
++static struct ip6t_match ip6t_nth_reg = {
++ {NULL, NULL},
++ "nth",
++ ip6t_nth_match,
++ ip6t_nth_checkentry,
++ NULL,
++ THIS_MODULE };
++
++static int __init init(void)
++{
++ unsigned counter;
++ memset(&states, 0, sizeof(states));
++ if (ip6t_register_match(&ip6t_nth_reg))
++ return -EINVAL;
++
++ for(counter = 0; counter < IP6T_NTH_NUM_COUNTERS; counter++)
++ {
++ spin_lock_init(&(states[counter].lock));
++ };
++
++ printk("ip6t_nth match loaded\n");
++ return 0;
++}
++
++static void __exit fini(void)
++{
++ ip6t_unregister_match(&ip6t_nth_reg);
++ printk("ip6t_nth match unloaded\n");
++}
++
++module_init(init);
++module_exit(fini);
+diff -Nur linux-2.6.3.org/net/ipv6/netfilter/ip6t_REJECT.c linux-2.6.3/net/ipv6/netfilter/ip6t_REJECT.c
+--- linux-2.6.3.org/net/ipv6/netfilter/ip6t_REJECT.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.3/net/ipv6/netfilter/ip6t_REJECT.c 2004-02-27 00:03:06.643218256 +0100
+@@ -0,0 +1,274 @@
++/*
++ * This is a module which is used for rejecting packets.
++ * Added support for customized reject packets (Jozsef Kadlecsik).
++ * Sun 12 Nov 2000
++ * Port to IPv6 / ip6tables (Harald Welte <laforge@gnumonks.org>)
++ */
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/icmpv6.h>
++#include <net/tcp.h>
++#include <linux/netfilter_ipv6/ip6_tables.h>
++#include <linux/netfilter_ipv6/ip6t_REJECT.h>
++
++#if 1
++#define DEBUGP printk
++#else
++#define DEBUGP(format, args...)
++#endif
++
++#if 0
++/* Send RST reply */
++static void send_reset(struct sk_buff *oldskb)
++{
++ struct sk_buff *nskb;
++ struct tcphdr *otcph, *tcph;
++ struct rtable *rt;
++ unsigned int otcplen;
++ int needs_ack;
++
++ /* IP header checks: fragment, too short. */
++ if (oldskb->nh.iph->frag_off & htons(IP_OFFSET)
++ || oldskb->len < (oldskb->nh.iph->ihl<<2) + sizeof(struct tcphdr))
++ return;
++
++ otcph = (struct tcphdr *)((u_int32_t*)oldskb->nh.iph + oldskb->nh.iph->ihl);
++ otcplen = oldskb->len - oldskb->nh.iph->ihl*4;
++
++ /* No RST for RST. */
++ if (otcph->rst)
++ return;
++
++ /* Check checksum. */
++ if (tcp_v4_check(otcph, otcplen, oldskb->nh.iph->saddr,
++ oldskb->nh.iph->daddr,
++ csum_partial((char *)otcph, otcplen, 0)) != 0)
++ return;
++
++ /* Copy skb (even if skb is about to be dropped, we can't just
++ clone it because there may be other things, such as tcpdump,
++ interested in it) */
++ nskb = skb_copy(oldskb, GFP_ATOMIC);
++ if (!nskb)
++ return;
++
++ /* This packet will not be the same as the other: clear nf fields */
++ nf_conntrack_put(nskb->nfct);
++ nskb->nfct = NULL;
++ nskb->nfcache = 0;
++#ifdef CONFIG_NETFILTER_DEBUG
++ nskb->nf_debug = 0;
++#endif
++
++ tcph = (struct tcphdr *)((u_int32_t*)nskb->nh.iph + nskb->nh.iph->ihl);
++
++ nskb->nh.iph->daddr = xchg(&nskb->nh.iph->saddr, nskb->nh.iph->daddr);
++ tcph->source = xchg(&tcph->dest, tcph->source);
++
++ /* Truncate to length (no data) */
++ tcph->doff = sizeof(struct tcphdr)/4;
++ skb_trim(nskb, nskb->nh.iph->ihl*4 + sizeof(struct tcphdr));
++ nskb->nh.iph->tot_len = htons(nskb->len);
++
++ if (tcph->ack) {
++ needs_ack = 0;
++ tcph->seq = otcph->ack_seq;
++ tcph->ack_seq = 0;
++ } else {
++ needs_ack = 1;
++ tcph->ack_seq = htonl(ntohl(otcph->seq) + otcph->syn + otcph->fin
++ + otcplen - (otcph->doff<<2));
++ tcph->seq = 0;
++ }
++
++ /* Reset flags */
++ ((u_int8_t *)tcph)[13] = 0;
++ tcph->rst = 1;
++ tcph->ack = needs_ack;
++
++ tcph->window = 0;
++ tcph->urg_ptr = 0;
++
++ /* Adjust TCP checksum */
++ tcph->check = 0;
++ tcph->check = tcp_v4_check(tcph, sizeof(struct tcphdr),
++ nskb->nh.iph->saddr,
++ nskb->nh.iph->daddr,
++ csum_partial((char *)tcph,
++ sizeof(struct tcphdr), 0));
++
++ /* Adjust IP TTL, DF */
++ nskb->nh.iph->ttl = MAXTTL;
++ /* Set DF, id = 0 */
++ nskb->nh.iph->frag_off = htons(IP_DF);
++ nskb->nh.iph->id = 0;
++
++ /* Adjust IP checksum */
++ nskb->nh.iph->check = 0;
++ nskb->nh.iph->check = ip_fast_csum((unsigned char *)nskb->nh.iph,
++ nskb->nh.iph->ihl);
++
++ /* Routing */
++ if (ip_route_output(&rt, nskb->nh.iph->daddr, nskb->nh.iph->saddr,
++ RT_TOS(nskb->nh.iph->tos) | RTO_CONN,
++ 0) != 0)
++ goto free_nskb;
++
++ dst_release(nskb->dst);
++ nskb->dst = &rt->u.dst;
++
++ /* "Never happens" */
++ if (nskb->len > nskb->dst->pmtu)
++ goto free_nskb;
++
++ NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, nskb, NULL, nskb->dst->dev,
++ ip_finish_output);
++ return;
++
++ free_nskb:
++ kfree_skb(nskb);
++}
++#endif
++
++static unsigned int reject6_target(struct sk_buff **pskb,
++ unsigned int hooknum,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *targinfo,
++ void *userinfo)
++{
++ const struct ip6t_reject_info *reject = targinfo;
++
++ /* WARNING: This code causes reentry within ip6tables.
++ This means that the ip6tables jump stack is now crap. We
++ must return an absolute verdict. --RR */
++ DEBUGP("REJECTv6: calling icmpv6_send\n");
++ switch (reject->with) {
++ case IP6T_ICMP6_NO_ROUTE:
++ icmpv6_send(*pskb, ICMPV6_DEST_UNREACH, ICMPV6_NOROUTE, 0, out);
++ break;
++ case IP6T_ICMP6_ADM_PROHIBITED:
++ icmpv6_send(*pskb, ICMPV6_DEST_UNREACH, ICMPV6_ADM_PROHIBITED, 0, out);
++ break;
++ case IP6T_ICMP6_NOT_NEIGHBOUR:
++ icmpv6_send(*pskb, ICMPV6_DEST_UNREACH, ICMPV6_NOT_NEIGHBOUR, 0, out);
++ break;
++ case IP6T_ICMP6_ADDR_UNREACH:
++ icmpv6_send(*pskb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, out);
++ break;
++ case IP6T_ICMP6_PORT_UNREACH:
++ icmpv6_send(*pskb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, out);
++ break;
++#if 0
++ case IPT_ICMP_ECHOREPLY: {
++ struct icmp6hdr *icmph = (struct icmphdr *)
++ ((u_int32_t *)(*pskb)->nh.iph + (*pskb)->nh.iph->ihl);
++ unsigned int datalen = (*pskb)->len - (*pskb)->nh.iph->ihl * 4;
++
++ /* Not non-head frags, or truncated */
++ if (((ntohs((*pskb)->nh.iph->frag_off) & IP_OFFSET) == 0)
++ && datalen >= 4) {
++ /* Usually I don't like cut & pasting code,
++ but dammit, my party is starting in 45
++ mins! --RR */
++ struct icmp_bxm icmp_param;
++
++ icmp_param.icmph=*icmph;
++ icmp_param.icmph.type=ICMP_ECHOREPLY;
++ icmp_param.data_ptr=(icmph+1);
++ icmp_param.data_len=datalen;
++ icmp_reply(&icmp_param, *pskb);
++ }
++ }
++ break;
++ case IPT_TCP_RESET:
++ send_reset(*pskb);
++ break;
++#endif
++ default:
++ printk(KERN_WARNING "REJECTv6: case %u not handled yet\n", reject->with);
++ break;
++ }
++
++ return NF_DROP;
++}
++
++static inline int find_ping_match(const struct ip6t_entry_match *m)
++{
++ const struct ip6t_icmp *icmpinfo = (const struct ip6t_icmp *)m->data;
++
++ if (strcmp(m->u.kernel.match->name, "icmp6") == 0
++ && icmpinfo->type == ICMPV6_ECHO_REQUEST
++ && !(icmpinfo->invflags & IP6T_ICMP_INV))
++ return 1;
++
++ return 0;
++}
++
++static int check(const char *tablename,
++ const struct ip6t_entry *e,
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hook_mask)
++{
++ const struct ip6t_reject_info *rejinfo = targinfo;
++
++ if (targinfosize != IP6T_ALIGN(sizeof(struct ip6t_reject_info))) {
++ DEBUGP("REJECTv6: targinfosize %u != 0\n", targinfosize);
++ return 0;
++ }
++
++ /* Only allow these for packet filtering. */
++ if (strcmp(tablename, "filter") != 0) {
++ DEBUGP("REJECTv6: bad table `%s'.\n", tablename);
++ return 0;
++ }
++ if ((hook_mask & ~((1 << NF_IP6_LOCAL_IN)
++ | (1 << NF_IP6_FORWARD)
++ | (1 << NF_IP6_LOCAL_OUT))) != 0) {
++ DEBUGP("REJECTv6: bad hook mask %X\n", hook_mask);
++ return 0;
++ }
++
++ if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) {
++ /* Must specify that it's an ICMP ping packet. */
++ if (e->ipv6.proto != IPPROTO_ICMPV6
++ || (e->ipv6.invflags & IP6T_INV_PROTO)) {
++ DEBUGP("REJECTv6: ECHOREPLY illegal for non-icmp\n");
++ return 0;
++ }
++ /* Must contain ICMP match. */
++ if (IP6T_MATCH_ITERATE(e, find_ping_match) == 0) {
++ DEBUGP("REJECTv6: ECHOREPLY illegal for non-ping\n");
++ return 0;
++ }
++ } else if (rejinfo->with == IP6T_TCP_RESET) {
++ /* Must specify that it's a TCP packet */
++ if (e->ipv6.proto != IPPROTO_TCP
++ || (e->ipv6.invflags & IP6T_INV_PROTO)) {
++ DEBUGP("REJECTv6: TCP_RESET illegal for non-tcp\n");
++ return 0;
++ }
++ }
++
++ return 1;
++}
++
++static struct ip6t_target ip6t_reject_reg
++= { { NULL, NULL }, "REJECT", reject6_target, check, NULL, THIS_MODULE };
++
++static int __init init(void)
++{
++ if (ip6t_register_target(&ip6t_reject_reg))
++ return -EINVAL;
++ return 0;
++}
++
++static void __exit fini(void)
++{
++ ip6t_unregister_target(&ip6t_reject_reg);
++}
++
++module_init(init);
++module_exit(fini);
+diff -Nur linux-2.6.3.org/net/ipv6/netfilter/Kconfig linux-2.6.3/net/ipv6/netfilter/Kconfig
+--- linux-2.6.3.org/net/ipv6/netfilter/Kconfig 2004-02-18 04:59:20.000000000 +0100
++++ linux-2.6.3/net/ipv6/netfilter/Kconfig 2004-02-27 00:03:14.474027792 +0100
+@@ -218,5 +218,37 @@
+ To compile it as a module, choose M here. If unsure, say N.
+
+ #dep_tristate ' LOG target support' CONFIG_IP6_NF_TARGET_LOG $CONFIG_IP6_NF_IPTABLES
++config IP6_NF_TARGET_HL
++ tristate 'HOPLIMIT target support'
++ depends on IP6_NF_MANGLE
++ help
++
++config IP6_NF_TARGET_REJECT
++ tristate 'REJECT target support'
++ depends on IP6_NF_FILTER
++ help
++
++config IP6_NF_MATCH_FUZZY
++ tristate 'Fuzzy match support'
++ depends on IP6_NF_FILTER
++ help
++
++config IP6_NF_MATCH_NTH
++ tristate 'Nth match support'
++ depends on IP6_NF_IPTABLES
++ help
++
++config IP6_NF_RAW
++ tristate 'raw table support (required for TRACE)'
++ depends on IP6_NF_IPTABLES
++ help
++ This option adds a `raw' table to ip6tables. This table is the very
++ first in the netfilter framework and hooks in at the PREROUTING
++ and OUTPUT chains.
++
++ If you want to compile it as a module, say M here and read
++ <file:Documentation/modules.txt>. If unsure, say `N'.
++ help
++
+ endmenu
+
+diff -Nur linux-2.6.3.org/net/ipv6/netfilter/Makefile linux-2.6.3/net/ipv6/netfilter/Makefile
+--- linux-2.6.3.org/net/ipv6/netfilter/Makefile 2004-02-18 04:58:26.000000000 +0100
++++ linux-2.6.3/net/ipv6/netfilter/Makefile 2004-02-27 00:03:14.474027792 +0100
+@@ -8,6 +8,7 @@
+ obj-$(CONFIG_IP6_NF_MATCH_MARK) += ip6t_mark.o
+ obj-$(CONFIG_IP6_NF_MATCH_LENGTH) += ip6t_length.o
+ obj-$(CONFIG_IP6_NF_MATCH_MAC) += ip6t_mac.o
++obj-$(CONFIG_IP6_NF_MATCH_FUZZY) += ip6t_fuzzy.o
+ obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
+ obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o ip6t_dst.o
+ obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o
+@@ -19,6 +20,11 @@
+ obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
+ obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
+ obj-$(CONFIG_IP6_NF_TARGET_MARK) += ip6t_MARK.o
++obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
+ obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o
+ obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
++obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
++
++obj-$(CONFIG_IP6_NF_MATCH_NTH) += ip6t_nth.o
++obj-$(CONFIG_IP6_NF_TARGET_HL) += ip6t_HL.o
+ obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o