You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
openwrt/target/linux/generic-2.4/patches/608-netfilter_ipset.patch

5703 lines
148 KiB
Diff

--- a/Documentation/Configure.help
+++ b/Documentation/Configure.help
@@ -3231,6 +3231,75 @@ CONFIG_IP_NF_TARGET_CLASSIFY
If you want to compile it as a module, say M here and read
Documentation/modules.txt. If unsure, say `N'.
+IP set support
+CONFIG_IP_NF_SET
+ This option adds IP set support to the kernel.
+
+ In order to define and use sets, you need userlevel utilities: an
+ iptables binary which knows about IP sets and the program ipset(8),
+ by which you can define and setup the sets themselves.
+
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/modules.txt>. If unsure, say `N'.
+
+set match support
+CONFIG_IP_NF_MATCH_SET
+ This option adds IP set match support.
+ You need the ipset utility to create and set up the sets.
+
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/modules.txt>. If unsure, say `N'.
+
+SET target support
+CONFIG_IP_NF_TARGET_SET
+ This option adds IP set target support.
+ You need the ipset utility to create and set up the sets.
+
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/modules.txt>. If unsure, say `N'.
+
+ipmap set type support
+CONFIG_IP_NF_SET_IPMAP
+ This option adds the ipmap set type support.
+
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/modules.txt>. If unsure, say `N'.
+
+macipmap set type support
+CONFIG_IP_NF_SET_MACIPMAP
+ This option adds the macipmap set type support.
+
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/modules.txt>. If unsure, say `N'.
+
+portmap set type support
+CONFIG_IP_NF_SET_PORTMAP
+ This option adds the portmap set type support.
+
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/modules.txt>. If unsure, say `N'.
+
+iphash set type support
+CONFIG_IP_NF_SET_IPHASH
+ This option adds the iphash set type support.
+
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/modules.txt>. If unsure, say `N'.
+
+nethash set type support
+CONFIG_IP_NF_SET_NETHASH
+ This option adds the nethash set type support.
+
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/modules.txt>. If unsure, say `N'.
+
+iptree set type support
+CONFIG_IP_NF_SET_IPTREE
+ This option adds the iptree set type support.
+
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/modules.txt>. If unsure, say `N'.
+
TTL target support
CONFIG_IP_NF_TARGET_TTL
This option adds a `TTL' target, which enables the user to set
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set.h
@@ -0,0 +1,489 @@
+#ifndef _IP_SET_H
+#define _IP_SET_H
+
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * A sockopt of such quality has hardly ever been seen before on the open
+ * market! This little beauty, hardly ever used: above 64, so it's
+ * traditionally used for firewalling, not touched (even once!) by the
+ * 2.0, 2.2 and 2.4 kernels!
+ *
+ * Comes with its own certificate of authenticity, valid anywhere in the
+ * Free world!
+ *
+ * Rusty, 19.4.2000
+ */
+#define SO_IP_SET 83
+
+/*
+ * Heavily modify by Joakim Axelsson 08.03.2002
+ * - Made it more modulebased
+ *
+ * Additional heavy modifications by Jozsef Kadlecsik 22.02.2004
+ * - bindings added
+ * - in order to "deal with" backward compatibility, renamed to ipset
+ */
+
+/*
+ * Used so that the kernel module and ipset-binary can match their versions
+ */
+#define IP_SET_PROTOCOL_VERSION 2
+
+#define IP_SET_MAXNAMELEN 32 /* set names and set typenames */
+
+/* Lets work with our own typedef for representing an IP address.
+ * We hope to make the code more portable, possibly to IPv6...
+ *
+ * The representation works in HOST byte order, because most set types
+ * will perform arithmetic operations and compare operations.
+ *
+ * For now the type is an uint32_t.
+ *
+ * Make sure to ONLY use the functions when translating and parsing
+ * in order to keep the host byte order and make it more portable:
+ * parse_ip()
+ * parse_mask()
+ * parse_ipandmask()
+ * ip_tostring()
+ * (Joakim: where are they???)
+ */
+
+typedef uint32_t ip_set_ip_t;
+
+/* Sets are identified by an id in kernel space. Tweak with ip_set_id_t
+ * and IP_SET_INVALID_ID if you want to increase the max number of sets.
+ */
+typedef uint16_t ip_set_id_t;
+
+#define IP_SET_INVALID_ID 65535
+
+/* How deep we follow bindings */
+#define IP_SET_MAX_BINDINGS 6
+
+/*
+ * Option flags for kernel operations (ipt_set_info)
+ */
+#define IPSET_SRC 0x01 /* Source match/add */
+#define IPSET_DST 0x02 /* Destination match/add */
+#define IPSET_MATCH_INV 0x04 /* Inverse matching */
+
+/*
+ * Set types (flavours)
+ */
+#define IPSET_TYPE_IP 0 /* IP address type of set */
+#define IPSET_TYPE_PORT 1 /* Port type of set */
+
+/* Reserved keywords */
+#define IPSET_TOKEN_DEFAULT ":default:"
+#define IPSET_TOKEN_ALL ":all:"
+
+/* SO_IP_SET operation constants, and their request struct types.
+ *
+ * Operation ids:
+ * 0-99: commands with version checking
+ * 100-199: add/del/test/bind/unbind
+ * 200-299: list, save, restore
+ */
+
+/* Single shot operations:
+ * version, create, destroy, flush, rename and swap
+ *
+ * Sets are identified by name.
+ */
+
+#define IP_SET_REQ_STD \
+ unsigned op; \
+ unsigned version; \
+ char name[IP_SET_MAXNAMELEN]
+
+#define IP_SET_OP_CREATE 0x00000001 /* Create a new (empty) set */
+struct ip_set_req_create {
+ IP_SET_REQ_STD;
+ char typename[IP_SET_MAXNAMELEN];
+};
+
+#define IP_SET_OP_DESTROY 0x00000002 /* Remove a (empty) set */
+struct ip_set_req_std {
+ IP_SET_REQ_STD;
+};
+
+#define IP_SET_OP_FLUSH 0x00000003 /* Remove all IPs in a set */
+/* Uses ip_set_req_std */
+
+#define IP_SET_OP_RENAME 0x00000004 /* Rename a set */
+/* Uses ip_set_req_create */
+
+#define IP_SET_OP_SWAP 0x00000005 /* Swap two sets */
+/* Uses ip_set_req_create */
+
+union ip_set_name_index {
+ char name[IP_SET_MAXNAMELEN];
+ ip_set_id_t index;
+};
+
+#define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */
+struct ip_set_req_get_set {
+ unsigned op;
+ unsigned version;
+ union ip_set_name_index set;
+};
+
+#define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */
+/* Uses ip_set_req_get_set */
+
+#define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */
+struct ip_set_req_version {
+ unsigned op;
+ unsigned version;
+};
+
+/* Double shots operations:
+ * add, del, test, bind and unbind.
+ *
+ * First we query the kernel to get the index and type of the target set,
+ * then issue the command. Validity of IP is checked in kernel in order
+ * to minimalize sockopt operations.
+ */
+
+/* Get minimal set data for add/del/test/bind/unbind IP */
+#define IP_SET_OP_ADT_GET 0x00000010 /* Get set and type */
+struct ip_set_req_adt_get {
+ unsigned op;
+ unsigned version;
+ union ip_set_name_index set;
+ char typename[IP_SET_MAXNAMELEN];
+};
+
+#define IP_SET_REQ_BYINDEX \
+ unsigned op; \
+ ip_set_id_t index;
+
+struct ip_set_req_adt {
+ IP_SET_REQ_BYINDEX;
+};
+
+#define IP_SET_OP_ADD_IP 0x00000101 /* Add an IP to a set */
+/* Uses ip_set_req_adt, with type specific addage */
+
+#define IP_SET_OP_DEL_IP 0x00000102 /* Remove an IP from a set */
+/* Uses ip_set_req_adt, with type specific addage */
+
+#define IP_SET_OP_TEST_IP 0x00000103 /* Test an IP in a set */
+/* Uses ip_set_req_adt, with type specific addage */
+
+#define IP_SET_OP_BIND_SET 0x00000104 /* Bind an IP to a set */
+/* Uses ip_set_req_bind, with type specific addage */
+struct ip_set_req_bind {
+ IP_SET_REQ_BYINDEX;
+ char binding[IP_SET_MAXNAMELEN];
+};
+
+#define IP_SET_OP_UNBIND_SET 0x00000105 /* Unbind an IP from a set */
+/* Uses ip_set_req_bind, with type speficic addage
+ * index = 0 means unbinding for all sets */
+
+#define IP_SET_OP_TEST_BIND_SET 0x00000106 /* Test binding an IP to a set */
+/* Uses ip_set_req_bind, with type specific addage */
+
+/* Multiple shots operations: list, save, restore.
+ *
+ * - check kernel version and query the max number of sets
+ * - get the basic information on all sets
+ * and size required for the next step
+ * - get actual set data: header, data, bindings
+ */
+
+/* Get max_sets and the index of a queried set
+ */
+#define IP_SET_OP_MAX_SETS 0x00000020
+struct ip_set_req_max_sets {
+ unsigned op;
+ unsigned version;
+ ip_set_id_t max_sets; /* max_sets */
+ ip_set_id_t sets; /* real number of sets */
+ union ip_set_name_index set; /* index of set if name used */
+};
+
+/* Get the id and name of the sets plus size for next step */
+#define IP_SET_OP_LIST_SIZE 0x00000201
+#define IP_SET_OP_SAVE_SIZE 0x00000202
+struct ip_set_req_setnames {
+ unsigned op;
+ ip_set_id_t index; /* set to list/save */
+ size_t size; /* size to get setdata/bindings */
+ /* followed by sets number of struct ip_set_name_list */
+};
+
+struct ip_set_name_list {
+ char name[IP_SET_MAXNAMELEN];
+ char typename[IP_SET_MAXNAMELEN];
+ ip_set_id_t index;
+ ip_set_id_t id;
+};
+
+/* The actual list operation */
+#define IP_SET_OP_LIST 0x00000203
+struct ip_set_req_list {
+ IP_SET_REQ_BYINDEX;
+ /* sets number of struct ip_set_list in reply */
+};
+
+struct ip_set_list {
+ ip_set_id_t index;
+ ip_set_id_t binding;
+ u_int32_t ref;
+ size_t header_size; /* Set header data of header_size */
+ size_t members_size; /* Set members data of members_size */
+ size_t bindings_size; /* Set bindings data of bindings_size */
+};
+
+struct ip_set_hash_list {
+ ip_set_ip_t ip;
+ ip_set_id_t binding;
+};
+
+/* The save operation */
+#define IP_SET_OP_SAVE 0x00000204
+/* Uses ip_set_req_list, in the reply replaced by
+ * sets number of struct ip_set_save plus a marker
+ * ip_set_save followed by ip_set_hash_save structures.
+ */
+struct ip_set_save {
+ ip_set_id_t index;
+ ip_set_id_t binding;
+ size_t header_size; /* Set header data of header_size */
+ size_t members_size; /* Set members data of members_size */
+};
+
+/* At restoring, ip == 0 means default binding for the given set: */
+struct ip_set_hash_save {
+ ip_set_ip_t ip;
+ ip_set_id_t id;
+ ip_set_id_t binding;
+};
+
+/* The restore operation */
+#define IP_SET_OP_RESTORE 0x00000205
+/* Uses ip_set_req_setnames followed by ip_set_restore structures
+ * plus a marker ip_set_restore, followed by ip_set_hash_save
+ * structures.
+ */
+struct ip_set_restore {
+ char name[IP_SET_MAXNAMELEN];
+ char typename[IP_SET_MAXNAMELEN];
+ ip_set_id_t index;
+ size_t header_size; /* Create data of header_size */
+ size_t members_size; /* Set members data of members_size */
+};
+
+static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
+{
+ return 4 * ((((b - a + 8) / 8) + 3) / 4);
+}
+
+#ifdef __KERNEL__
+
+#define ip_set_printk(format, args...) \
+ do { \
+ printk("%s: %s: ", __FILE__, __FUNCTION__); \
+ printk(format "\n" , ## args); \
+ } while (0)
+
+#if defined(IP_SET_DEBUG)
+#define DP(format, args...) \
+ do { \
+ printk("%s: %s (DBG): ", __FILE__, __FUNCTION__);\
+ printk(format "\n" , ## args); \
+ } while (0)
+#define IP_SET_ASSERT(x) \
+ do { \
+ if (!(x)) \
+ printk("IP_SET_ASSERT: %s:%i(%s)\n", \
+ __FILE__, __LINE__, __FUNCTION__); \
+ } while (0)
+#else
+#define DP(format, args...)
+#define IP_SET_ASSERT(x)
+#endif
+
+struct ip_set;
+
+/*
+ * The ip_set_type definition - one per set type, e.g. "ipmap".
+ *
+ * Each individual set has a pointer, set->type, going to one
+ * of these structures. Function pointers inside the structure implement
+ * the real behaviour of the sets.
+ *
+ * If not mentioned differently, the implementation behind the function
+ * pointers of a set_type, is expected to return 0 if ok, and a negative
+ * errno (e.g. -EINVAL) on error.
+ */
+struct ip_set_type {
+ struct list_head list; /* next in list of set types */
+
+ /* test for IP in set (kernel: iptables -m set src|dst)
+ * return 0 if not in set, 1 if in set.
+ */
+ int (*testip_kernel) (struct ip_set *set,
+ const struct sk_buff * skb,
+ u_int32_t flags,
+ ip_set_ip_t *ip);
+
+ /* test for IP in set (userspace: ipset -T set IP)
+ * return 0 if not in set, 1 if in set.
+ */
+ int (*testip) (struct ip_set *set,
+ const void *data, size_t size,
+ ip_set_ip_t *ip);
+
+ /*
+ * Size of the data structure passed by when
+ * adding/deletin/testing an entry.
+ */
+ size_t reqsize;
+
+ /* Add IP into set (userspace: ipset -A set IP)
+ * Return -EEXIST if the address is already in the set,
+ * and -ERANGE if the address lies outside the set bounds.
+ * If the address was not already in the set, 0 is returned.
+ */
+ int (*addip) (struct ip_set *set,
+ const void *data, size_t size,
+ ip_set_ip_t *ip);
+
+ /* Add IP into set (kernel: iptables ... -j SET set src|dst)
+ * Return -EEXIST if the address is already in the set,
+ * and -ERANGE if the address lies outside the set bounds.
+ * If the address was not already in the set, 0 is returned.
+ */
+ int (*addip_kernel) (struct ip_set *set,
+ const struct sk_buff * skb,
+ u_int32_t flags,
+ ip_set_ip_t *ip);
+
+ /* remove IP from set (userspace: ipset -D set --entry x)
+ * Return -EEXIST if the address is NOT in the set,
+ * and -ERANGE if the address lies outside the set bounds.
+ * If the address really was in the set, 0 is returned.
+ */
+ int (*delip) (struct ip_set *set,
+ const void *data, size_t size,
+ ip_set_ip_t *ip);
+
+ /* remove IP from set (kernel: iptables ... -j SET --entry x)
+ * Return -EEXIST if the address is NOT in the set,
+ * and -ERANGE if the address lies outside the set bounds.
+ * If the address really was in the set, 0 is returned.
+ */
+ int (*delip_kernel) (struct ip_set *set,
+ const struct sk_buff * skb,
+ u_int32_t flags,
+ ip_set_ip_t *ip);
+
+ /* new set creation - allocated type specific items
+ */
+ int (*create) (struct ip_set *set,
+ const void *data, size_t size);
+
+ /* retry the operation after successfully tweaking the set
+ */
+ int (*retry) (struct ip_set *set);
+
+ /* set destruction - free type specific items
+ * There is no return value.
+ * Can be called only when child sets are destroyed.
+ */
+ void (*destroy) (struct ip_set *set);
+
+ /* set flushing - reset all bits in the set, or something similar.
+ * There is no return value.
+ */
+ void (*flush) (struct ip_set *set);
+
+ /* Listing: size needed for header
+ */
+ size_t header_size;
+
+ /* Listing: Get the header
+ *
+ * Fill in the information in "data".
+ * This function is always run after list_header_size() under a
+ * writelock on the set. Therefor is the length of "data" always
+ * correct.
+ */
+ void (*list_header) (const struct ip_set *set,
+ void *data);
+
+ /* Listing: Get the size for the set members
+ */
+ int (*list_members_size) (const struct ip_set *set);
+
+ /* Listing: Get the set members
+ *
+ * Fill in the information in "data".
+ * This function is always run after list_member_size() under a
+ * writelock on the set. Therefor is the length of "data" always
+ * correct.
+ */
+ void (*list_members) (const struct ip_set *set,
+ void *data);
+
+ char typename[IP_SET_MAXNAMELEN];
+ char typecode;
+ int protocol_version;
+
+ /* Set this to THIS_MODULE if you are a module, otherwise NULL */
+ struct module *me;
+};
+
+extern int ip_set_register_set_type(struct ip_set_type *set_type);
+extern void ip_set_unregister_set_type(struct ip_set_type *set_type);
+
+/* A generic ipset */
+struct ip_set {
+ char name[IP_SET_MAXNAMELEN]; /* the name of the set */
+ rwlock_t lock; /* lock for concurrency control */
+ ip_set_id_t id; /* set id for swapping */
+ ip_set_id_t binding; /* default binding for the set */
+ atomic_t ref; /* in kernel and in hash references */
+ struct ip_set_type *type; /* the set types */
+ void *data; /* pooltype specific data */
+};
+
+/* Structure to bind set elements to sets */
+struct ip_set_hash {
+ struct list_head list; /* list of clashing entries in hash */
+ ip_set_ip_t ip; /* ip from set */
+ ip_set_id_t id; /* set id */
+ ip_set_id_t binding; /* set we bind the element to */
+};
+
+/* register and unregister set references */
+extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
+extern ip_set_id_t ip_set_get_byindex(ip_set_id_t id);
+extern void ip_set_put(ip_set_id_t id);
+
+/* API for iptables set match, and SET target */
+extern void ip_set_addip_kernel(ip_set_id_t id,
+ const struct sk_buff *skb,
+ const u_int32_t *flags);
+extern void ip_set_delip_kernel(ip_set_id_t id,
+ const struct sk_buff *skb,
+ const u_int32_t *flags);
+extern int ip_set_testip_kernel(ip_set_id_t id,
+ const struct sk_buff *skb,
+ const u_int32_t *flags);
+
+#endif /* __KERNEL__ */
+
+#endif /*_IP_SET_H*/
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_iphash.h
@@ -0,0 +1,30 @@
+#ifndef __IP_SET_IPHASH_H
+#define __IP_SET_IPHASH_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
+
+#define SETTYPE_NAME "iphash"
+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_iphash {
+ ip_set_ip_t *members; /* the iphash proper */
+ uint32_t initval; /* initval for jhash_1word */
+ uint32_t prime; /* prime for double hashing */
+ uint32_t hashsize; /* hash size */
+ uint16_t probes; /* max number of probes */
+ uint16_t resize; /* resize factor in percent */
+ ip_set_ip_t netmask; /* netmask */
+};
+
+struct ip_set_req_iphash_create {
+ uint32_t hashsize;
+ uint16_t probes;
+ uint16_t resize;
+ ip_set_ip_t netmask;
+};
+
+struct ip_set_req_iphash {
+ ip_set_ip_t ip;
+};
+
+#endif /* __IP_SET_IPHASH_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_ipmap.h
@@ -0,0 +1,56 @@
+#ifndef __IP_SET_IPMAP_H
+#define __IP_SET_IPMAP_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
+
+#define SETTYPE_NAME "ipmap"
+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_ipmap {
+ void *members; /* the ipmap proper */
+ ip_set_ip_t first_ip; /* host byte order, included in range */
+ ip_set_ip_t last_ip; /* host byte order, included in range */
+ ip_set_ip_t netmask; /* subnet netmask */
+ ip_set_ip_t sizeid; /* size of set in IPs */
+ u_int16_t hosts; /* number of hosts in a subnet */
+};
+
+struct ip_set_req_ipmap_create {
+ ip_set_ip_t from;
+ ip_set_ip_t to;
+ ip_set_ip_t netmask;
+};
+
+struct ip_set_req_ipmap {
+ ip_set_ip_t ip;
+};
+
+unsigned int
+mask_to_bits(ip_set_ip_t mask)
+{
+ unsigned int bits = 32;
+ ip_set_ip_t maskaddr;
+
+ if (mask == 0xFFFFFFFF)
+ return bits;
+
+ maskaddr = 0xFFFFFFFE;
+ while (--bits >= 0 && maskaddr != mask)
+ maskaddr <<= 1;
+
+ return bits;
+}
+
+ip_set_ip_t
+range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
+{
+ ip_set_ip_t mask = 0xFFFFFFFE;
+
+ *bits = 32;
+ while (--(*bits) >= 0 && mask && (to & mask) != from)
+ mask <<= 1;
+
+ return mask;
+}
+
+#endif /* __IP_SET_IPMAP_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_iptree.h
@@ -0,0 +1,39 @@
+#ifndef __IP_SET_IPTREE_H
+#define __IP_SET_IPTREE_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
+
+#define SETTYPE_NAME "iptree"
+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_iptreed {
+ unsigned long expires[255]; /* x.x.x.ADDR */
+};
+
+struct ip_set_iptreec {
+ struct ip_set_iptreed *tree[255]; /* x.x.ADDR.* */
+};
+
+struct ip_set_iptreeb {
+ struct ip_set_iptreec *tree[255]; /* x.ADDR.*.* */
+};
+
+struct ip_set_iptree {
+ unsigned int timeout;
+ unsigned int gc_interval;
+#ifdef __KERNEL__
+ struct timer_list gc;
+ struct ip_set_iptreeb *tree[255]; /* ADDR.*.*.* */
+#endif
+};
+
+struct ip_set_req_iptree_create {
+ unsigned int timeout;
+};
+
+struct ip_set_req_iptree {
+ ip_set_ip_t ip;
+ unsigned int timeout;
+};
+
+#endif /* __IP_SET_IPTREE_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_jhash.h
@@ -0,0 +1,148 @@
+#ifndef _LINUX_IPSET_JHASH_H
+#define _LINUX_IPSET_JHASH_H
+
+/* This is a copy of linux/jhash.h but the types u32/u8 are changed
+ * to __u32/__u8 so that the header file can be included into
+ * userspace code as well. Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
+ */
+
+/* jhash.h: Jenkins hash support.
+ *
+ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
+ *
+ * http://burtleburtle.net/bob/hash/
+ *
+ * These are the credits from Bob's sources:
+ *
+ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
+ * hash(), hash2(), hash3, and mix() are externally useful functions.
+ * Routines to test the hash are included if SELF_TEST is defined.
+ * You can use this free for any purpose. It has no warranty.
+ *
+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
+ *
+ * I've modified Bob's hash to be useful in the Linux kernel, and
+ * any bugs present are surely my fault. -DaveM
+ */
+
+/* NOTE: Arguments are modified. */
+#define __jhash_mix(a, b, c) \
+{ \
+ a -= b; a -= c; a ^= (c>>13); \
+ b -= c; b -= a; b ^= (a<<8); \
+ c -= a; c -= b; c ^= (b>>13); \
+ a -= b; a -= c; a ^= (c>>12); \
+ b -= c; b -= a; b ^= (a<<16); \
+ c -= a; c -= b; c ^= (b>>5); \
+ a -= b; a -= c; a ^= (c>>3); \
+ b -= c; b -= a; b ^= (a<<10); \
+ c -= a; c -= b; c ^= (b>>15); \
+}
+
+/* The golden ration: an arbitrary value */
+#define JHASH_GOLDEN_RATIO 0x9e3779b9
+
+/* The most generic version, hashes an arbitrary sequence
+ * of bytes. No alignment or length assumptions are made about
+ * the input key.
+ */
+static inline __u32 jhash(void *key, __u32 length, __u32 initval)
+{
+ __u32 a, b, c, len;
+ __u8 *k = key;
+
+ len = length;
+ a = b = JHASH_GOLDEN_RATIO;
+ c = initval;
+
+ while (len >= 12) {
+ a += (k[0] +((__u32)k[1]<<8) +((__u32)k[2]<<16) +((__u32)k[3]<<24));
+ b += (k[4] +((__u32)k[5]<<8) +((__u32)k[6]<<16) +((__u32)k[7]<<24));
+ c += (k[8] +((__u32)k[9]<<8) +((__u32)k[10]<<16)+((__u32)k[11]<<24));
+
+ __jhash_mix(a,b,c);
+
+ k += 12;
+ len -= 12;
+ }
+
+ c += length;
+ switch (len) {
+ case 11: c += ((__u32)k[10]<<24);
+ case 10: c += ((__u32)k[9]<<16);
+ case 9 : c += ((__u32)k[8]<<8);
+ case 8 : b += ((__u32)k[7]<<24);
+ case 7 : b += ((__u32)k[6]<<16);
+ case 6 : b += ((__u32)k[5]<<8);
+ case 5 : b += k[4];
+ case 4 : a += ((__u32)k[3]<<24);
+ case 3 : a += ((__u32)k[2]<<16);
+ case 2 : a += ((__u32)k[1]<<8);
+ case 1 : a += k[0];
+ };
+
+ __jhash_mix(a,b,c);
+
+ return c;
+}
+
+/* A special optimized version that handles 1 or more of __u32s.
+ * The length parameter here is the number of __u32s in the key.
+ */
+static inline __u32 jhash2(__u32 *k, __u32 length, __u32 initval)
+{
+ __u32 a, b, c, len;
+
+ a = b = JHASH_GOLDEN_RATIO;
+ c = initval;
+ len = length;
+
+ while (len >= 3) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ __jhash_mix(a, b, c);
+ k += 3; len -= 3;
+ }
+
+ c += length * 4;
+
+ switch (len) {
+ case 2 : b += k[1];
+ case 1 : a += k[0];
+ };
+
+ __jhash_mix(a,b,c);
+
+ return c;
+}
+
+
+/* A special ultra-optimized versions that knows they are hashing exactly
+ * 3, 2 or 1 word(s).
+ *
+ * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
+ * done at the end is not done here.
+ */
+static inline __u32 jhash_3words(__u32 a, __u32 b, __u32 c, __u32 initval)
+{
+ a += JHASH_GOLDEN_RATIO;
+ b += JHASH_GOLDEN_RATIO;
+ c += initval;
+
+ __jhash_mix(a, b, c);
+
+ return c;
+}
+
+static inline __u32 jhash_2words(__u32 a, __u32 b, __u32 initval)
+{
+ return jhash_3words(a, b, 0, initval);
+}
+
+static inline __u32 jhash_1word(__u32 a, __u32 initval)
+{
+ return jhash_3words(a, 0, 0, initval);
+}
+
+#endif /* _LINUX_IPSET_JHASH_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_macipmap.h
@@ -0,0 +1,38 @@
+#ifndef __IP_SET_MACIPMAP_H
+#define __IP_SET_MACIPMAP_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
+
+#define SETTYPE_NAME "macipmap"
+#define MAX_RANGE 0x0000FFFF
+
+/* general flags */
+#define IPSET_MACIP_MATCHUNSET 1
+
+/* per ip flags */
+#define IPSET_MACIP_ISSET 1
+
+struct ip_set_macipmap {
+ void *members; /* the macipmap proper */
+ ip_set_ip_t first_ip; /* host byte order, included in range */
+ ip_set_ip_t last_ip; /* host byte order, included in range */
+ u_int32_t flags;
+};
+
+struct ip_set_req_macipmap_create {
+ ip_set_ip_t from;
+ ip_set_ip_t to;
+ u_int32_t flags;
+};
+
+struct ip_set_req_macipmap {
+ ip_set_ip_t ip;
+ unsigned char ethernet[ETH_ALEN];
+};
+
+struct ip_set_macip {
+ unsigned short flags;
+ unsigned char ethernet[ETH_ALEN];
+};
+
+#endif /* __IP_SET_MACIPMAP_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_malloc.h
@@ -0,0 +1,27 @@
+#ifndef _IP_SET_MALLOC_H
+#define _IP_SET_MALLOC_H
+
+#ifdef __KERNEL__
+
+/* Memory allocation and deallocation */
+static size_t max_malloc_size = 131072; /* Guaranteed: slab.c */
+
+static inline void * ip_set_malloc(size_t bytes)
+{
+ if (bytes > max_malloc_size)
+ return vmalloc(bytes);
+ else
+ return kmalloc(bytes, GFP_KERNEL);
+}
+
+static inline void ip_set_free(void * data, size_t bytes)
+{
+ if (bytes > max_malloc_size)
+ vfree(data);
+ else
+ kfree(data);
+}
+
+#endif /* __KERNEL__ */
+
+#endif /*_IP_SET_MALLOC_H*/
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_nethash.h
@@ -0,0 +1,55 @@
+#ifndef __IP_SET_NETHASH_H
+#define __IP_SET_NETHASH_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
+
+#define SETTYPE_NAME "nethash"
+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_nethash {
+ ip_set_ip_t *members; /* the nethash proper */
+ uint32_t initval; /* initval for jhash_1word */
+ uint32_t prime; /* prime for double hashing */
+ uint32_t hashsize; /* hash size */
+ uint16_t probes; /* max number of probes */
+ uint16_t resize; /* resize factor in percent */
+ unsigned char cidr[30]; /* CIDR sizes */
+};
+
+struct ip_set_req_nethash_create {
+ uint32_t hashsize;
+ uint16_t probes;
+ uint16_t resize;
+};
+
+struct ip_set_req_nethash {
+ ip_set_ip_t ip;
+ unsigned char cidr;
+};
+
+static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
+
+static inline ip_set_ip_t
+pack(ip_set_ip_t ip, unsigned char cidr)
+{
+ ip_set_ip_t addr, *paddr = &addr;
+ unsigned char n, t, *a;
+
+ addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
+#ifdef __KERNEL__
+ DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
+#endif
+ n = cidr / 8;
+ t = cidr % 8;
+ a = &((unsigned char *)paddr)[n];
+ *a = *a /(1 << (8 - t)) + shifts[t];
+#ifdef __KERNEL__
+ DP("n: %u, t: %u, a: %u", n, t, *a);
+ DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
+ HIPQUAD(ip), cidr, NIPQUAD(addr));
+#endif
+
+ return ntohl(addr);
+}
+
+#endif /* __IP_SET_NETHASH_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_portmap.h
@@ -0,0 +1,25 @@
+#ifndef __IP_SET_PORTMAP_H
+#define __IP_SET_PORTMAP_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
+
+#define SETTYPE_NAME "portmap"
+#define MAX_RANGE 0x0000FFFF
+#define INVALID_PORT (MAX_RANGE + 1)
+
+struct ip_set_portmap {
+ void *members; /* the portmap proper */
+ ip_set_ip_t first_port; /* host byte order, included in range */
+ ip_set_ip_t last_port; /* host byte order, included in range */
+};
+
+struct ip_set_req_portmap_create {
+ ip_set_ip_t from;
+ ip_set_ip_t to;
+};
+
+struct ip_set_req_portmap {
+ ip_set_ip_t port;
+};
+
+#endif /* __IP_SET_PORTMAP_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_prime.h
@@ -0,0 +1,34 @@
+#ifndef __IP_SET_PRIME_H
+#define __IP_SET_PRIME_H
+
+static inline unsigned make_prime_bound(unsigned nr)
+{
+ unsigned long long nr64 = nr;
+ unsigned long long x = 1;
+ nr = 1;
+ while (x <= nr64) { x <<= 2; nr <<= 1; }
+ return nr;
+}
+
+static inline int make_prime_check(unsigned nr)
+{
+ unsigned x = 3;
+ unsigned b = make_prime_bound(nr);
+ while (x <= b) {
+ if (0 == (nr % x)) return 0;
+ x += 2;
+ }
+ return 1;
+}
+
+static unsigned make_prime(unsigned nr)
+{
+ if (0 == (nr & 1)) nr--;
+ while (nr > 1) {
+ if (make_prime_check(nr)) return nr;
+ nr -= 2;
+ }
+ return 2;
+}
+
+#endif /* __IP_SET_PRIME_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ipt_set.h
@@ -0,0 +1,21 @@
+#ifndef _IPT_SET_H
+#define _IPT_SET_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
+
+struct ipt_set_info {
+ ip_set_id_t index;
+ u_int32_t flags[IP_SET_MAX_BINDINGS + 1];
+};
+
+/* match info */
+struct ipt_set_info_match {
+ struct ipt_set_info match_set;
+};
+
+struct ipt_set_info_target {
+ struct ipt_set_info add_set;
+ struct ipt_set_info del_set;
+};
+
+#endif /*_IPT_SET_H*/
--- a/net/ipv4/netfilter/Config.in
+++ b/net/ipv4/netfilter/Config.in
@@ -22,6 +22,20 @@ tristate 'IP tables support (required fo
if [ "$CONFIG_IP_NF_IPTABLES" != "n" ]; then
# The simple matches.
dep_tristate ' limit match support' CONFIG_IP_NF_MATCH_LIMIT $CONFIG_IP_NF_IPTABLES
+
+ dep_tristate ' IP set support' CONFIG_IP_NF_SET $CONFIG_IP_NF_IPTABLES
+ if [ "$CONFIG_IP_NF_SET" != "n" ]; then
+ int ' Maximum number of sets' CONFIG_IP_NF_SET_MAX 256
+ int ' Hash size for bindings of IP sets' CONFIG_IP_NF_SET_HASHSIZE 1024
+ dep_tristate ' set match support' CONFIG_IP_NF_MATCH_SET $CONFIG_IP_NF_SET
+ dep_tristate ' SET target support' CONFIG_IP_NF_TARGET_SET $CONFIG_IP_NF_SET
+ dep_tristate ' ipmap set type support' CONFIG_IP_NF_SET_IPMAP $CONFIG_IP_NF_SET
+ dep_tristate ' portmap set type support' CONFIG_IP_NF_SET_PORTMAP $CONFIG_IP_NF_SET
+ dep_tristate ' macipmap set type support' CONFIG_IP_NF_SET_MACIPMAP $CONFIG_IP_NF_SET
+ dep_tristate ' iphash set type support' CONFIG_IP_NF_SET_IPHASH $CONFIG_IP_NF_SET
+ dep_tristate ' nethash set type support' CONFIG_IP_NF_SET_NETHASH $CONFIG_IP_NF_SET
+ dep_tristate ' iptree set type support' CONFIG_IP_NF_SET_IPTREE $CONFIG_IP_NF_SET
+ fi
dep_tristate ' MAC address match support' CONFIG_IP_NF_MATCH_MAC $CONFIG_IP_NF_IPTABLES
dep_tristate ' Packet type match support' CONFIG_IP_NF_MATCH_PKTTYPE $CONFIG_IP_NF_IPTABLES
dep_tristate ' netfilter MARK match support' CONFIG_IP_NF_MATCH_MARK $CONFIG_IP_NF_IPTABLES
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set.c
@@ -0,0 +1,2002 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module for IP set management */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
+#include <linux/jhash.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <asm/softirq.h>
+#include <asm/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+
+#define ASSERT_READ_LOCK(x) /* dont use that */
+#define ASSERT_WRITE_LOCK(x)
+#include <linux/netfilter_ipv4/listhelp.h>
+#include <linux/netfilter_ipv4/ip_set.h>
+
+static struct list_head set_type_list; /* all registered sets */
+static struct ip_set **ip_set_list; /* all individual sets */
+static DECLARE_RWLOCK(ip_set_lock); /* protects the lists and the hash */
+static DECLARE_MUTEX(ip_set_app_mutex); /* serializes user access */
+static ip_set_id_t ip_set_max = CONFIG_IP_NF_SET_MAX;
+static ip_set_id_t ip_set_bindings_hash_size = CONFIG_IP_NF_SET_HASHSIZE;
+static struct list_head *ip_set_hash; /* hash of bindings */
+static unsigned int ip_set_hash_random; /* random seed */
+
+/* Arrgh */
+#ifdef MODULE
+#define __MOD_INC(foo) __MOD_INC_USE_COUNT(foo)
+#define __MOD_DEC(foo) __MOD_DEC_USE_COUNT(foo)
+#define __MOD_INC_SELF MOD_INC_USE_COUNT
+#define __MOD_DEC_SELF MOD_DEC_USE_COUNT
+#else
+#define __MOD_INC(foo)
+#define __MOD_DEC(foo)
+#define __MOD_INC_SELF
+#define __MOD_DEC_SELF
+#endif
+
+/*
+ * Sets are identified either by the index in ip_set_list or by id.
+ * The id never changes and is used to find a key in the hash.
+ * The index may change by swapping and used at all other places
+ * (set/SET netfilter modules, binding value, etc.)
+ *
+ * Userspace requests are serialized by ip_set_mutex and sets can
+ * be deleted only from userspace. Therefore ip_set_list locking
+ * must obey the following rules:
+ *
+ * - kernel requests: read and write locking mandatory
+ * - user requests: read locking optional, write locking mandatory
+ */
+
+static inline void
+__ip_set_get(ip_set_id_t index)
+{
+ atomic_inc(&ip_set_list[index]->ref);
+}
+
+static inline void
+__ip_set_put(ip_set_id_t index)
+{
+ atomic_dec(&ip_set_list[index]->ref);
+}
+
+/*
+ * Binding routines
+ */
+
+static inline int
+ip_hash_cmp(const struct ip_set_hash *set_hash,
+ ip_set_id_t id, ip_set_ip_t ip)
+{
+ return set_hash->id == id && set_hash->ip == ip;
+}
+
+static ip_set_id_t
+ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
+{
+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
+ % ip_set_bindings_hash_size;
+ struct ip_set_hash *set_hash;
+
+ MUST_BE_READ_LOCKED(&ip_set_lock);
+ IP_SET_ASSERT(ip_set_list[id]);
+ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
+
+ set_hash = LIST_FIND(&ip_set_hash[key], ip_hash_cmp,
+ struct ip_set_hash *, id, ip);
+
+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
+ HIPQUAD(ip),
+ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
+
+ return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
+}
+
+static inline void
+__set_hash_del(struct ip_set_hash *set_hash)
+{
+ MUST_BE_WRITE_LOCKED(&ip_set_lock);
+ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
+
+ __ip_set_put(set_hash->binding);
+ list_del(&set_hash->list);
+ kfree(set_hash);
+}
+
+static int
+ip_set_hash_del(ip_set_id_t id, ip_set_ip_t ip)
+{
+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
+ % ip_set_bindings_hash_size;
+ struct ip_set_hash *set_hash;
+
+ IP_SET_ASSERT(ip_set_list[id]);
+ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
+ WRITE_LOCK(&ip_set_lock);
+ set_hash = LIST_FIND(&ip_set_hash[key], ip_hash_cmp,
+ struct ip_set_hash *, id, ip);
+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
+ HIPQUAD(ip),
+ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
+
+ if (set_hash != NULL)
+ __set_hash_del(set_hash);
+ WRITE_UNLOCK(&ip_set_lock);
+ return 0;
+}
+
+static int
+ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
+{
+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
+ % ip_set_bindings_hash_size;
+ struct ip_set_hash *set_hash;
+ int ret = 0;
+
+ IP_SET_ASSERT(ip_set_list[id]);
+ IP_SET_ASSERT(ip_set_list[binding]);
+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
+ HIPQUAD(ip), ip_set_list[binding]->name);
+ WRITE_LOCK(&ip_set_lock);
+ set_hash = LIST_FIND(&ip_set_hash[key], ip_hash_cmp,
+ struct ip_set_hash *, id, ip);
+ if (!set_hash) {
+ set_hash = kmalloc(sizeof(struct ip_set_hash), GFP_KERNEL);
+ if (!set_hash) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ INIT_LIST_HEAD(&set_hash->list);
+ set_hash->id = id;
+ set_hash->ip = ip;
+ list_add(&ip_set_hash[key], &set_hash->list);
+ } else {
+ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
+ DP("overwrite binding: %s",
+ ip_set_list[set_hash->binding]->name);
+ __ip_set_put(set_hash->binding);
+ }
+ set_hash->binding = binding;
+ __ip_set_get(set_hash->binding);
+ unlock:
+ WRITE_UNLOCK(&ip_set_lock);
+ return ret;
+}
+
+#define FOREACH_HASH_DO(fn, args...) \
+({ \
+ ip_set_id_t __key; \
+ struct ip_set_hash *__set_hash; \
+ \
+ for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
+ list_for_each_entry(__set_hash, &ip_set_hash[__key], list) \
+ fn(__set_hash , ## args); \
+ } \
+})
+
+#define FOREACH_HASH_RW_DO(fn, args...) \
+({ \
+ ip_set_id_t __key; \
+ struct ip_set_hash *__set_hash, *__n; \
+ \
+ MUST_BE_WRITE_LOCKED(&ip_set_lock); \
+ for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
+ list_for_each_entry_safe(__set_hash, __n, &ip_set_hash[__key], list)\
+ fn(__set_hash , ## args); \
+ } \
+})
+
+/* Add, del and test set entries from kernel */
+
+#define follow_bindings(index, set, ip) \
+((index = ip_set_find_in_hash((set)->id, ip)) != IP_SET_INVALID_ID \
+ || (index = (set)->binding) != IP_SET_INVALID_ID)
+
+int
+ip_set_testip_kernel(ip_set_id_t index,
+ const struct sk_buff *skb,
+ const u_int32_t *flags)
+{
+ struct ip_set *set;
+ ip_set_ip_t ip;
+ int res, i = 0;
+
+ IP_SET_ASSERT(flags[i]);
+ READ_LOCK(&ip_set_lock);
+ do {
+ set = ip_set_list[index];
+ IP_SET_ASSERT(set);
+ DP("set %s, index %u", set->name, index);
+ read_lock_bh(&set->lock);
+ res = set->type->testip_kernel(set, skb, flags[i], &ip);
+ read_unlock_bh(&set->lock);
+ } while (res > 0
+ && flags[++i]
+ && follow_bindings(index, set, ip));
+ READ_UNLOCK(&ip_set_lock);
+
+ return res;
+}
+
+void
+ip_set_addip_kernel(ip_set_id_t index,
+ const struct sk_buff *skb,
+ const u_int32_t *flags)
+{
+ struct ip_set *set;
+ ip_set_ip_t ip;
+ int res, i= 0;
+
+ IP_SET_ASSERT(flags[i]);
+ retry:
+ READ_LOCK(&ip_set_lock);
+ do {
+ set = ip_set_list[index];
+ IP_SET_ASSERT(set);
+ DP("set %s, index %u", set->name, index);
+ write_lock_bh(&set->lock);
+ res = set->type->addip_kernel(set, skb, flags[i], &ip);
+ write_unlock_bh(&set->lock);
+ } while ((res == 0 || res == -EEXIST)
+ && flags[++i]
+ && follow_bindings(index, set, ip));
+ READ_UNLOCK(&ip_set_lock);
+
+ if (res == -EAGAIN
+ && set->type->retry
+ && (res = set->type->retry(set)) == 0)
+ goto retry;
+}
+
+void
+ip_set_delip_kernel(ip_set_id_t index,
+ const struct sk_buff *skb,
+ const u_int32_t *flags)
+{
+ struct ip_set *set;
+ ip_set_ip_t ip;
+ int res, i = 0;
+
+ IP_SET_ASSERT(flags[i]);
+ READ_LOCK(&ip_set_lock);
+ do {
+ set = ip_set_list[index];
+ IP_SET_ASSERT(set);
+ DP("set %s, index %u", set->name, index);
+ write_lock_bh(&set->lock);
+ res = set->type->delip_kernel(set, skb, flags[i], &ip);
+ write_unlock_bh(&set->lock);
+ } while ((res == 0 || res == -EEXIST)
+ && flags[++i]
+ && follow_bindings(index, set, ip));
+ READ_UNLOCK(&ip_set_lock);
+}
+
+/* Register and deregister settype */
+
+static inline int
+set_type_equal(const struct ip_set_type *set_type, const char *str2)
+{
+ return !strncmp(set_type->typename, str2, IP_SET_MAXNAMELEN - 1);
+}
+
+static inline struct ip_set_type *
+find_set_type(const char *name)
+{
+ return LIST_FIND(&set_type_list,
+ set_type_equal,
+ struct ip_set_type *,
+ name);
+}
+
+int
+ip_set_register_set_type(struct ip_set_type *set_type)
+{
+ int ret = 0;
+
+ if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
+ ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
+ set_type->typename,
+ set_type->protocol_version,
+ IP_SET_PROTOCOL_VERSION);
+ return -EINVAL;
+ }
+
+ WRITE_LOCK(&ip_set_lock);
+ if (find_set_type(set_type->typename)) {
+ /* Duplicate! */
+ ip_set_printk("'%s' already registered!",
+ set_type->typename);
+ ret = -EINVAL;
+ goto unlock;
+ }
+ __MOD_INC_SELF;
+ list_append(&set_type_list, set_type);
+ DP("'%s' registered.", set_type->typename);
+ unlock:
+ WRITE_UNLOCK(&ip_set_lock);
+ return ret;
+}
+
+void
+ip_set_unregister_set_type(struct ip_set_type *set_type)
+{
+ WRITE_LOCK(&ip_set_lock);
+ if (!find_set_type(set_type->typename)) {
+ ip_set_printk("'%s' not registered?",
+ set_type->typename);
+ goto unlock;
+ }
+ LIST_DELETE(&set_type_list, set_type);
+ __MOD_DEC_SELF;
+ DP("'%s' unregistered.", set_type->typename);
+ unlock:
+ WRITE_UNLOCK(&ip_set_lock);
+
+}
+
+/*
+ * Userspace routines
+ */
+
+/*
+ * Find set by name, reference it once. The reference makes sure the
+ * thing pointed to, does not go away under our feet. Drop the reference
+ * later, using ip_set_put().
+ */
+ip_set_id_t
+ip_set_get_byname(const char *name)
+{
+ ip_set_id_t i, index = IP_SET_INVALID_ID;
+
+ down(&ip_set_app_mutex);
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
+ && strcmp(ip_set_list[i]->name, name) == 0) {
+ __ip_set_get(i);
+ index = i;
+ break;
+ }
+ }
+ up(&ip_set_app_mutex);
+ return index;
+}
+
+/*
+ * Find set by index, reference it once. The reference makes sure the
+ * thing pointed to, does not go away under our feet. Drop the reference
+ * later, using ip_set_put().
+ */
+ip_set_id_t
+ip_set_get_byindex(ip_set_id_t index)
+{
+ down(&ip_set_app_mutex);
+
+ if (index >= ip_set_max)
+ return IP_SET_INVALID_ID;
+
+ if (ip_set_list[index])
+ __ip_set_get(index);
+ else
+ index = IP_SET_INVALID_ID;
+
+ up(&ip_set_app_mutex);
+ return index;
+}
+
+/*
+ * If the given set pointer points to a valid set, decrement
+ * reference count by 1. The caller shall not assume the index
+ * to be valid, after calling this function.
+ */
+void ip_set_put(ip_set_id_t index)
+{
+ down(&ip_set_app_mutex);
+ if (ip_set_list[index])
+ __ip_set_put(index);
+ up(&ip_set_app_mutex);
+}
+
+/* Find a set by name or index */
+static ip_set_id_t
+ip_set_find_byname(const char *name)
+{
+ ip_set_id_t i, index = IP_SET_INVALID_ID;
+
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
+ && strcmp(ip_set_list[i]->name, name) == 0) {
+ index = i;
+ break;
+ }
+ }
+ return index;
+}
+
+static ip_set_id_t
+ip_set_find_byindex(ip_set_id_t index)
+{
+ if (index >= ip_set_max || ip_set_list[index] == NULL)
+ index = IP_SET_INVALID_ID;
+
+ return index;
+}
+
+/*
+ * Add, del, test, bind and unbind
+ */
+
+static inline int
+__ip_set_testip(struct ip_set *set,
+ const void *data,
+ size_t size,
+ ip_set_ip_t *ip)
+{
+ int res;
+
+ read_lock_bh(&set->lock);
+ res = set->type->testip(set, data, size, ip);
+ read_unlock_bh(&set->lock);
+
+ return res;
+}
+
+static int
+__ip_set_addip(ip_set_id_t index,
+ const void *data,
+ size_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
+ int res;
+
+ IP_SET_ASSERT(set);
+ do {
+ write_lock_bh(&set->lock);
+ res = set->type->addip(set, data, size, &ip);
+ write_unlock_bh(&set->lock);
+ } while (res == -EAGAIN
+ && set->type->retry
+ && (res = set->type->retry(set)) == 0);
+
+ return res;
+}
+
+static int
+ip_set_addip(ip_set_id_t index,
+ const void *data,
+ size_t size)
+{
+
+ return __ip_set_addip(index,
+ data + sizeof(struct ip_set_req_adt),
+ size - sizeof(struct ip_set_req_adt));
+}
+
+static int
+ip_set_delip(ip_set_id_t index,
+ const void *data,
+ size_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
+ int res;
+
+ IP_SET_ASSERT(set);
+ write_lock_bh(&set->lock);
+ res = set->type->delip(set,
+ data + sizeof(struct ip_set_req_adt),
+ size - sizeof(struct ip_set_req_adt),
+ &ip);
+ write_unlock_bh(&set->lock);
+
+ return res;
+}
+
+static int
+ip_set_testip(ip_set_id_t index,
+ const void *data,
+ size_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
+ int res;
+
+ IP_SET_ASSERT(set);
+ res = __ip_set_testip(set,
+ data + sizeof(struct ip_set_req_adt),
+ size - sizeof(struct ip_set_req_adt),
+ &ip);
+
+ return (res > 0 ? -EEXIST : res);
+}
+
+static int
+ip_set_bindip(ip_set_id_t index,
+ const void *data,
+ size_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ struct ip_set_req_bind *req_bind;
+ ip_set_id_t binding;
+ ip_set_ip_t ip;
+ int res;
+
+ IP_SET_ASSERT(set);
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
+
+ req_bind = (struct ip_set_req_bind *) data;
+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
+
+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
+ /* Default binding of a set */
+ char *binding_name;
+
+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
+ return -EINVAL;
+
+ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
+ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
+
+ binding = ip_set_find_byname(binding_name);
+ if (binding == IP_SET_INVALID_ID)
+ return -ENOENT;
+
+ WRITE_LOCK(&ip_set_lock);
+ /* Sets as binding values are referenced */
+ if (set->binding != IP_SET_INVALID_ID)
+ __ip_set_put(set->binding);
+ set->binding = binding;
+ __ip_set_get(set->binding);
+ WRITE_UNLOCK(&ip_set_lock);
+
+ return 0;
+ }
+ binding = ip_set_find_byname(req_bind->binding);
+ if (binding == IP_SET_INVALID_ID)
+ return -ENOENT;
+
+ res = __ip_set_testip(set,
+ data + sizeof(struct ip_set_req_bind),
+ size - sizeof(struct ip_set_req_bind),
+ &ip);
+ DP("set %s, ip: %u.%u.%u.%u, binding %s",
+ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
+
+ if (res >= 0)
+ res = ip_set_hash_add(set->id, ip, binding);
+
+ return res;
+}
+
+#define FOREACH_SET_DO(fn, args...) \
+({ \
+ ip_set_id_t __i; \
+ struct ip_set *__set; \
+ \
+ for (__i = 0; __i < ip_set_max; __i++) { \
+ __set = ip_set_list[__i]; \
+ if (__set != NULL) \
+ fn(__set , ##args); \
+ } \
+})
+
+static inline void
+__set_hash_del_byid(struct ip_set_hash *set_hash, ip_set_id_t id)
+{
+ if (set_hash->id == id)
+ __set_hash_del(set_hash);
+}
+
+static inline void
+__unbind_default(struct ip_set *set)
+{
+ if (set->binding != IP_SET_INVALID_ID) {
+ /* Sets as binding values are referenced */
+ __ip_set_put(set->binding);
+ set->binding = IP_SET_INVALID_ID;
+ }
+}
+
+static int
+ip_set_unbindip(ip_set_id_t index,
+ const void *data,
+ size_t size)
+{
+ struct ip_set *set;
+ struct ip_set_req_bind *req_bind;
+ ip_set_ip_t ip;
+ int res;
+
+ DP("");
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
+
+ req_bind = (struct ip_set_req_bind *) data;
+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
+
+ DP("%u %s", index, req_bind->binding);
+ if (index == IP_SET_INVALID_ID) {
+ /* unbind :all: */
+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
+ /* Default binding of sets */
+ WRITE_LOCK(&ip_set_lock);
+ FOREACH_SET_DO(__unbind_default);
+ WRITE_UNLOCK(&ip_set_lock);
+ return 0;
+ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
+ /* Flush all bindings of all sets*/
+ WRITE_LOCK(&ip_set_lock);
+ FOREACH_HASH_RW_DO(__set_hash_del);
+ WRITE_UNLOCK(&ip_set_lock);
+ return 0;
+ }
+ DP("unreachable reached!");
+ return -EINVAL;
+ }
+
+ set = ip_set_list[index];
+ IP_SET_ASSERT(set);
+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
+ /* Default binding of set */
+ ip_set_id_t binding = ip_set_find_byindex(set->binding);
+
+ if (binding == IP_SET_INVALID_ID)
+ return -ENOENT;
+
+ WRITE_LOCK(&ip_set_lock);
+ /* Sets in hash values are referenced */
+ __ip_set_put(set->binding);
+ set->binding = IP_SET_INVALID_ID;
+ WRITE_UNLOCK(&ip_set_lock);
+
+ return 0;
+ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
+ /* Flush all bindings */
+
+ WRITE_LOCK(&ip_set_lock);
+ FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
+ WRITE_UNLOCK(&ip_set_lock);
+ return 0;
+ }
+
+ res = __ip_set_testip(set,
+ data + sizeof(struct ip_set_req_bind),
+ size - sizeof(struct ip_set_req_bind),
+ &ip);
+
+ DP("set %s, ip: %u.%u.%u.%u", set->name, HIPQUAD(ip));
+ if (res >= 0)
+ res = ip_set_hash_del(set->id, ip);
+
+ return res;
+}
+
+static int
+ip_set_testbind(ip_set_id_t index,
+ const void *data,
+ size_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ struct ip_set_req_bind *req_bind;
+ ip_set_id_t binding;
+ ip_set_ip_t ip;
+ int res;
+
+ IP_SET_ASSERT(set);
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
+
+ req_bind = (struct ip_set_req_bind *) data;
+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
+
+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
+ /* Default binding of set */
+ char *binding_name;
+
+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
+ return -EINVAL;
+
+ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
+ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
+
+ binding = ip_set_find_byname(binding_name);
+ if (binding == IP_SET_INVALID_ID)
+ return -ENOENT;
+
+ res = (set->binding == binding) ? -EEXIST : 0;
+
+ return res;
+ }
+ binding = ip_set_find_byname(req_bind->binding);
+ if (binding == IP_SET_INVALID_ID)
+ return -ENOENT;
+
+
+ res = __ip_set_testip(set,
+ data + sizeof(struct ip_set_req_bind),
+ size - sizeof(struct ip_set_req_bind),
+ &ip);
+ DP("set %s, ip: %u.%u.%u.%u, binding %s",
+ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
+
+ if (res >= 0)
+ res = (ip_set_find_in_hash(set->id, ip) == binding)
+ ? -EEXIST : 0;
+
+ return res;
+}
+
+static struct ip_set_type *
+find_set_type_rlock(const char *typename)
+{
+ struct ip_set_type *type;
+
+ READ_LOCK(&ip_set_lock);
+ type = find_set_type(typename);
+ if (type == NULL)
+ READ_UNLOCK(&ip_set_lock);
+
+ return type;
+}
+
+static int
+find_free_id(const char *name,
+ ip_set_id_t *index,
+ ip_set_id_t *id)
+{
+ ip_set_id_t i;
+
+ *id = IP_SET_INVALID_ID;
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] == NULL) {
+ if (*id == IP_SET_INVALID_ID)
+ *id = *index = i;
+ } else if (strcmp(name, ip_set_list[i]->name) == 0)
+ /* Name clash */
+ return -EEXIST;
+ }
+ if (*id == IP_SET_INVALID_ID)
+ /* No free slot remained */
+ return -ERANGE;
+ /* Check that index is usable as id (swapping) */
+ check:
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
+ && ip_set_list[i]->id == *id) {
+ *id = i;
+ goto check;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Create a set
+ */
+static int
+ip_set_create(const char *name,
+ const char *typename,
+ ip_set_id_t restore,
+ const void *data,
+ size_t size)
+{
+ struct ip_set *set;
+ ip_set_id_t index, id;
+ int res = 0;
+
+ DP("setname: %s, typename: %s, id: %u", name, typename, restore);
+ /*
+ * First, and without any locks, allocate and initialize
+ * a normal base set structure.
+ */
+ set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
+ if (!set)
+ return -ENOMEM;
+ set->lock = RW_LOCK_UNLOCKED;
+ strncpy(set->name, name, IP_SET_MAXNAMELEN);
+ set->binding = IP_SET_INVALID_ID;
+ atomic_set(&set->ref, 0);
+
+ /*
+ * Next, take the &ip_set_lock, check that we know the type,
+ * and take a reference on the type, to make sure it
+ * stays available while constructing our new set.
+ *
+ * After referencing the type, we drop the &ip_set_lock,
+ * and let the new set construction run without locks.
+ */
+ set->type = find_set_type_rlock(typename);
+ if (set->type == NULL) {
+ /* Try loading the module */
+ char modulename[IP_SET_MAXNAMELEN + strlen("ip_set_") + 1];
+ strcpy(modulename, "ip_set_");
+ strcat(modulename, typename);
+ DP("try to load %s", modulename);
+ request_module(modulename);
+ set->type = find_set_type_rlock(typename);
+ }
+ if (set->type == NULL) {
+ ip_set_printk("no set type '%s', set '%s' not created",
+ typename, name);
+ kfree(set);
+ return -ENOENT;
+ }
+ __MOD_INC(set->type->me);
+ READ_UNLOCK(&ip_set_lock);
+
+ /*
+ * Without holding any locks, create private part.
+ */
+ res = set->type->create(set, data, size);
+ if (res != 0) {
+ __MOD_DEC(set->type->me);
+ kfree(set);
+ return res;
+ }
+
+ /* BTW, res==0 here. */
+
+ /*
+ * Here, we have a valid, constructed set. &ip_set_lock again,
+ * find free id/index and check that it is not already in
+ * ip_set_list.
+ */
+ WRITE_LOCK(&ip_set_lock);
+ if ((res = find_free_id(set->name, &index, &id)) != 0) {
+ DP("no free id!");
+ goto cleanup;
+ }
+
+ /* Make sure restore gets the same index */
+ if (restore != IP_SET_INVALID_ID && index != restore) {
+ DP("Can't restore, sets are screwed up");
+ res = -ERANGE;
+ goto cleanup;
+ }
+
+ /*
+ * Finally! Add our shiny new set to the list, and be done.
+ */
+ DP("create: '%s' created with index %u, id %u!", set->name, index, id);
+ set->id = id;
+ ip_set_list[index] = set;
+ WRITE_UNLOCK(&ip_set_lock);
+ return res;
+
+ cleanup:
+ WRITE_UNLOCK(&ip_set_lock);
+ set->type->destroy(set);
+ __MOD_DEC(set->type->me);
+ kfree(set);
+ return res;
+}
+
+/*
+ * Destroy a given existing set
+ */
+static void
+ip_set_destroy_set(ip_set_id_t index)
+{
+ struct ip_set *set = ip_set_list[index];
+
+ IP_SET_ASSERT(set);
+ DP("set: %s", set->name);
+ WRITE_LOCK(&ip_set_lock);
+ FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
+ if (set->binding != IP_SET_INVALID_ID)
+ __ip_set_put(set->binding);
+ ip_set_list[index] = NULL;
+ WRITE_UNLOCK(&ip_set_lock);
+
+ /* Must call it without holding any lock */
+ set->type->destroy(set);
+ __MOD_DEC(set->type->me);
+ kfree(set);
+}
+
+/*
+ * Destroy a set - or all sets
+ * Sets must not be referenced/used.
+ */
+static int
+ip_set_destroy(ip_set_id_t index)
+{
+ ip_set_id_t i;
+
+ /* ref modification always protected by the mutex */
+ if (index != IP_SET_INVALID_ID) {
+ if (atomic_read(&ip_set_list[index]->ref))
+ return -EBUSY;
+ ip_set_destroy_set(index);
+ } else {
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
+ && (atomic_read(&ip_set_list[i]->ref)))
+ return -EBUSY;
+ }
+
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL)
+ ip_set_destroy_set(i);
+ }
+ }
+ return 0;
+}
+
+static void
+ip_set_flush_set(struct ip_set *set)
+{
+ DP("set: %s %u", set->name, set->id);
+
+ write_lock_bh(&set->lock);
+ set->type->flush(set);
+ write_unlock_bh(&set->lock);
+}
+
+/*
+ * Flush data in a set - or in all sets
+ */
+static int
+ip_set_flush(ip_set_id_t index)
+{
+ if (index != IP_SET_INVALID_ID) {
+ IP_SET_ASSERT(ip_set_list[index]);
+ ip_set_flush_set(ip_set_list[index]);
+ } else
+ FOREACH_SET_DO(ip_set_flush_set);
+
+ return 0;
+}
+
+/* Rename a set */
+static int
+ip_set_rename(ip_set_id_t index, const char *name)
+{
+ struct ip_set *set = ip_set_list[index];
+ ip_set_id_t i;
+ int res = 0;
+
+ DP("set: %s to %s", set->name, name);
+ WRITE_LOCK(&ip_set_lock);
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
+ && strncmp(ip_set_list[i]->name,
+ name,
+ IP_SET_MAXNAMELEN - 1) == 0) {
+ res = -EEXIST;
+ goto unlock;
+ }
+ }
+ strncpy(set->name, name, IP_SET_MAXNAMELEN);
+ unlock:
+ WRITE_UNLOCK(&ip_set_lock);
+ return res;
+}
+
+/*
+ * Swap two sets so that name/index points to the other.
+ * References are also swapped.
+ */
+static int
+ip_set_swap(ip_set_id_t from_index, ip_set_id_t to_index)
+{
+ struct ip_set *from = ip_set_list[from_index];
+ struct ip_set *to = ip_set_list[to_index];
+ char from_name[IP_SET_MAXNAMELEN];
+ u_int32_t from_ref;
+
+ DP("set: %s to %s", from->name, to->name);
+ /* Type can't be changed. Artifical restriction. */
+ if (from->type->typecode != to->type->typecode)
+ return -ENOEXEC;
+
+ /* No magic here: ref munging protected by the mutex */
+ WRITE_LOCK(&ip_set_lock);
+ strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
+ from_ref = atomic_read(&from->ref);
+
+ strncpy(from->name, to->name, IP_SET_MAXNAMELEN);
+ atomic_set(&from->ref, atomic_read(&to->ref));
+ strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
+ atomic_set(&to->ref, from_ref);
+
+ ip_set_list[from_index] = to;
+ ip_set_list[to_index] = from;
+
+ WRITE_UNLOCK(&ip_set_lock);
+ return 0;
+}
+
+/*
+ * List set data
+ */
+
+static inline void
+__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
+ ip_set_id_t id, size_t *size)
+{
+ if (set_hash->id == id)
+ *size += sizeof(struct ip_set_hash_list);
+}
+
+static inline void
+__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
+ ip_set_id_t id, size_t *size)
+{
+ if (set_hash->id == id)
+ *size += sizeof(struct ip_set_hash_save);
+}
+
+static inline void
+__set_hash_bindings(struct ip_set_hash *set_hash,
+ ip_set_id_t id, void *data, int *used)
+{
+ if (set_hash->id == id) {
+ struct ip_set_hash_list *hash_list =
+ (struct ip_set_hash_list *)(data + *used);
+
+ hash_list->ip = set_hash->ip;
+ hash_list->binding = set_hash->binding;
+ *used += sizeof(struct ip_set_hash_list);
+ }
+}
+
+static int ip_set_list_set(ip_set_id_t index,
+ void *data,
+ int *used,
+ int len)
+{
+ struct ip_set *set = ip_set_list[index];
+ struct ip_set_list *set_list;
+
+ /* Pointer to our header */
+ set_list = (struct ip_set_list *) (data + *used);
+
+ DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
+
+ /* Get and ensure header size */
+ if (*used + sizeof(struct ip_set_list) > len)
+ goto not_enough_mem;
+ *used += sizeof(struct ip_set_list);
+
+ read_lock_bh(&set->lock);
+ /* Get and ensure set specific header size */
+ set_list->header_size = set->type->header_size;
+ if (*used + set_list->header_size > len)
+ goto unlock_set;
+
+ /* Fill in the header */
+ set_list->index = index;
+ set_list->binding = set->binding;
+ set_list->ref = atomic_read(&set->ref);
+
+ /* Fill in set spefific header data */
+ DP("call list_header");
+ set->type->list_header(set, data + *used);
+ DP("call list_header, done");
+ *used += set_list->header_size;
+
+ /* Get and ensure set specific members size */
+ DP("call list_members_size");
+ set_list->members_size = set->type->list_members_size(set);
+ DP("call list_members_size, done");
+ if (*used + set_list->members_size > len)
+ goto unlock_set;
+
+ /* Fill in set spefific members data */
+ DP("call list_members");
+ set->type->list_members(set, data + *used);
+ DP("call list_members, done");
+ *used += set_list->members_size;
+ read_unlock_bh(&set->lock);
+
+ /* Bindings */
+
+ /* Get and ensure set specific bindings size */
+ set_list->bindings_size = 0;
+ FOREACH_HASH_DO(__set_hash_bindings_size_list,
+ set->id, &set_list->bindings_size);
+ if (*used + set_list->bindings_size > len)
+ goto not_enough_mem;
+
+ /* Fill in set spefific bindings data */
+ FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
+
+ return 0;
+
+ unlock_set:
+ read_unlock_bh(&set->lock);
+ not_enough_mem:
+ DP("not enough mem, try again");
+ return -EAGAIN;
+}
+
+/*
+ * Save sets
+ */
+static int ip_set_save_set(ip_set_id_t index,
+ void *data,
+ int *used,
+ int len)
+{
+ struct ip_set *set;
+ struct ip_set_save *set_save;
+
+ /* Pointer to our header */
+ set_save = (struct ip_set_save *) (data + *used);
+
+ /* Get and ensure header size */
+ if (*used + sizeof(struct ip_set_save) > len)
+ goto not_enough_mem;
+ *used += sizeof(struct ip_set_save);
+
+ set = ip_set_list[index];
+ DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
+ data, data + *used);
+
+ read_lock_bh(&set->lock);
+ /* Get and ensure set specific header size */
+ set_save->header_size = set->type->header_size;
+ if (*used + set_save->header_size > len)
+ goto unlock_set;
+
+ /* Fill in the header */
+ set_save->index = index;
+ set_save->binding = set->binding;
+
+ /* Fill in set spefific header data */
+ set->type->list_header(set, data + *used);
+ *used += set_save->header_size;
+
+ DP("set header filled: %s, used: %u %p %p", set->name, *used,
+ data, data + *used);
+ /* Get and ensure set specific members size */
+ set_save->members_size = set->type->list_members_size(set);
+ if (*used + set_save->members_size > len)
+ goto unlock_set;
+
+ /* Fill in set spefific members data */
+ set->type->list_members(set, data + *used);
+ *used += set_save->members_size;
+ read_unlock_bh(&set->lock);
+ DP("set members filled: %s, used: %u %p %p", set->name, *used,
+ data, data + *used);
+ return 0;
+
+ unlock_set:
+ read_unlock_bh(&set->lock);
+ not_enough_mem:
+ DP("not enough mem, try again");
+ return -EAGAIN;
+}
+
+static inline void
+__set_hash_save_bindings(struct ip_set_hash *set_hash,
+ ip_set_id_t id,
+ void *data,
+ int *used,
+ int len,
+ int *res)
+{
+ if (*res == 0
+ && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
+ struct ip_set_hash_save *hash_save =
+ (struct ip_set_hash_save *)(data + *used);
+ /* Ensure bindings size */
+ if (*used + sizeof(struct ip_set_hash_save) > len) {
+ *res = -ENOMEM;
+ return;
+ }
+ hash_save->id = set_hash->id;
+ hash_save->ip = set_hash->ip;
+ hash_save->binding = set_hash->binding;
+ *used += sizeof(struct ip_set_hash_save);
+ }
+}
+
+static int ip_set_save_bindings(ip_set_id_t index,
+ void *data,
+ int *used,
+ int len)
+{
+ int res = 0;
+ struct ip_set_save *set_save;
+
+ DP("used %u, len %u", *used, len);
+ /* Get and ensure header size */
+ if (*used + sizeof(struct ip_set_save) > len)
+ return -ENOMEM;
+
+ /* Marker */
+ set_save = (struct ip_set_save *) (data + *used);
+ set_save->index = IP_SET_INVALID_ID;
+ *used += sizeof(struct ip_set_save);
+
+ DP("marker added used %u, len %u", *used, len);
+ /* Fill in bindings data */
+ if (index != IP_SET_INVALID_ID)
+ /* Sets are identified by id in hash */
+ index = ip_set_list[index]->id;
+ FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
+
+ return res;
+}
+
+/*
+ * Restore sets
+ */
+static int ip_set_restore(void *data,
+ int len)
+{
+ int res = 0;
+ int line = 0, used = 0, members_size;
+ struct ip_set *set;
+ struct ip_set_hash_save *hash_save;
+ struct ip_set_restore *set_restore;
+ ip_set_id_t index;
+
+ /* Loop to restore sets */
+ while (1) {
+ line++;
+
+ DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
+ /* Get and ensure header size */
+ if (used + sizeof(struct ip_set_restore) > len)
+ return line;
+ set_restore = (struct ip_set_restore *) (data + used);
+ used += sizeof(struct ip_set_restore);
+
+ /* Ensure data size */
+ if (used
+ + set_restore->header_size
+ + set_restore->members_size > len)
+ return line;
+
+ /* Check marker */
+ if (set_restore->index == IP_SET_INVALID_ID) {
+ line--;
+ goto bindings;
+ }
+
+ /* Try to create the set */
+ DP("restore %s %s", set_restore->name, set_restore->typename);
+ res = ip_set_create(set_restore->name,
+ set_restore->typename,
+ set_restore->index,
+ data + used,
+ set_restore->header_size);
+
+ if (res != 0)
+ return line;
+ used += set_restore->header_size;
+
+ index = ip_set_find_byindex(set_restore->index);
+ DP("index %u, restore_index %u", index, set_restore->index);
+ if (index != set_restore->index)
+ return line;
+ /* Try to restore members data */
+ set = ip_set_list[index];
+ members_size = 0;
+ DP("members_size %u reqsize %u",
+ set_restore->members_size, set->type->reqsize);
+ while (members_size + set->type->reqsize <=
+ set_restore->members_size) {
+ line++;
+ DP("members: %u, line %u", members_size, line);
+ res = __ip_set_addip(index,
+ data + used + members_size,
+ set->type->reqsize);
+ if (!(res == 0 || res == -EEXIST))
+ return line;
+ members_size += set->type->reqsize;
+ }
+
+ DP("members_size %u %u",
+ set_restore->members_size, members_size);
+ if (members_size != set_restore->members_size)
+ return line++;
+ used += set_restore->members_size;
+ }
+
+ bindings:
+ /* Loop to restore bindings */
+ while (used < len) {
+ line++;
+
+ DP("restore binding, line %u", line);
+ /* Get and ensure size */
+ if (used + sizeof(struct ip_set_hash_save) > len)
+ return line;
+ hash_save = (struct ip_set_hash_save *) (data + used);
+ used += sizeof(struct ip_set_hash_save);
+
+ /* hash_save->id is used to store the index */
+ index = ip_set_find_byindex(hash_save->id);
+ DP("restore binding index %u, id %u, %u -> %u",
+ index, hash_save->id, hash_save->ip, hash_save->binding);
+ if (index != hash_save->id)
+ return line;
+
+ set = ip_set_list[hash_save->id];
+ /* Null valued IP means default binding */
+ if (hash_save->ip)
+ res = ip_set_hash_add(set->id,
+ hash_save->ip,
+ hash_save->binding);
+ else {
+ IP_SET_ASSERT(set->binding == IP_SET_INVALID_ID);
+ WRITE_LOCK(&ip_set_lock);
+ set->binding = hash_save->binding;
+ __ip_set_get(set->binding);
+ WRITE_UNLOCK(&ip_set_lock);
+ DP("default binding: %u", set->binding);
+ }
+ if (res != 0)
+ return line;
+ }
+ if (used != len)
+ return line;
+
+ return 0;
+}
+
+static int
+ip_set_sockfn_set(struct sock *sk, int optval, void *user, unsigned int len)
+{
+ void *data;
+ int res = 0; /* Assume OK */
+ unsigned *op;
+ struct ip_set_req_adt *req_adt;
+ ip_set_id_t index = IP_SET_INVALID_ID;
+ int (*adtfn)(ip_set_id_t index,
+ const void *data, size_t size);
+ struct fn_table {
+ int (*fn)(ip_set_id_t index,
+ const void *data, size_t size);
+ } adtfn_table[] =
+ { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
+ { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
+ };
+
+ DP("optval=%d, user=%p, len=%d", optval, user, len);
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (optval != SO_IP_SET)
+ return -EBADF;
+ if (len <= sizeof(unsigned)) {
+ ip_set_printk("short userdata (want >%zu, got %u)",
+ sizeof(unsigned), len);
+ return -EINVAL;
+ }
+ data = vmalloc(len);
+ if (!data) {
+ DP("out of mem for %u bytes", len);
+ return -ENOMEM;
+ }
+ if (copy_from_user(data, user, len) != 0) {
+ res = -EFAULT;
+ goto done;
+ }
+ if (down_interruptible(&ip_set_app_mutex)) {
+ res = -EINTR;
+ goto done;
+ }
+
+ op = (unsigned *)data;
+ DP("op=%x", *op);
+
+ if (*op < IP_SET_OP_VERSION) {
+ /* Check the version at the beginning of operations */
+ struct ip_set_req_version *req_version =
+ (struct ip_set_req_version *) data;
+ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
+ res = -EPROTO;
+ goto done;
+ }
+ }
+
+ switch (*op) {
+ case IP_SET_OP_CREATE:{
+ struct ip_set_req_create *req_create
+ = (struct ip_set_req_create *) data;
+
+ if (len <= sizeof(struct ip_set_req_create)) {
+ ip_set_printk("short CREATE data (want >%zu, got %u)",
+ sizeof(struct ip_set_req_create), len);
+ res = -EINVAL;
+ goto done;
+ }
+ req_create->name[IP_SET_MAXNAMELEN - 1] = '\0';
+ req_create->typename[IP_SET_MAXNAMELEN - 1] = '\0';
+ res = ip_set_create(req_create->name,
+ req_create->typename,
+ IP_SET_INVALID_ID,
+ data + sizeof(struct ip_set_req_create),
+ len - sizeof(struct ip_set_req_create));
+ goto done;
+ }
+ case IP_SET_OP_DESTROY:{
+ struct ip_set_req_std *req_destroy
+ = (struct ip_set_req_std *) data;
+
+ if (len != sizeof(struct ip_set_req_std)) {
+ ip_set_printk("invalid DESTROY data (want %zu, got %u)",
+ sizeof(struct ip_set_req_std), len);
+ res = -EINVAL;
+ goto done;
+ }
+ if (strcmp(req_destroy->name, IPSET_TOKEN_ALL) == 0) {
+ /* Destroy all sets */
+ index = IP_SET_INVALID_ID;
+ } else {
+ req_destroy->name[IP_SET_MAXNAMELEN - 1] = '\0';
+ index = ip_set_find_byname(req_destroy->name);
+
+ if (index == IP_SET_INVALID_ID) {
+ res = -ENOENT;
+ goto done;
+ }
+ }
+
+ res = ip_set_destroy(index);
+ goto done;
+ }
+ case IP_SET_OP_FLUSH:{
+ struct ip_set_req_std *req_flush =
+ (struct ip_set_req_std *) data;
+
+ if (len != sizeof(struct ip_set_req_std)) {
+ ip_set_printk("invalid FLUSH data (want %zu, got %u)",
+ sizeof(struct ip_set_req_std), len);
+ res = -EINVAL;
+ goto done;
+ }
+ if (strcmp(req_flush->name, IPSET_TOKEN_ALL) == 0) {
+ /* Flush all sets */
+ index = IP_SET_INVALID_ID;
+ } else {
+ req_flush->name[IP_SET_MAXNAMELEN - 1] = '\0';
+ index = ip_set_find_byname(req_flush->name);
+
+ if (index == IP_SET_INVALID_ID) {
+ res = -ENOENT;
+ goto done;
+ }
+ }
+ res = ip_set_flush(index);
+ goto done;
+ }
+ case IP_SET_OP_RENAME:{
+ struct ip_set_req_create *req_rename
+ = (struct ip_set_req_create *) data;
+
+ if (len != sizeof(struct ip_set_req_create)) {
+ ip_set_printk("invalid RENAME data (want %zu, got %u)",
+ sizeof(struct ip_set_req_create), len);
+ res = -EINVAL;
+ goto done;
+ }
+
+ req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
+ req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
+
+ index = ip_set_find_byname(req_rename->name);
+ if (index == IP_SET_INVALID_ID) {
+ res = -ENOENT;
+ goto done;
+ }
+ res = ip_set_rename(index, req_rename->typename);
+ goto done;
+ }
+ case IP_SET_OP_SWAP:{
+ struct ip_set_req_create *req_swap
+ = (struct ip_set_req_create *) data;
+ ip_set_id_t to_index;
+
+ if (len != sizeof(struct ip_set_req_create)) {
+ ip_set_printk("invalid SWAP data (want %zu, got %u)",
+ sizeof(struct ip_set_req_create), len);
+ res = -EINVAL;
+ goto done;
+ }
+
+ req_swap->name[IP_SET_MAXNAMELEN - 1] = '\0';
+ req_swap->typename[IP_SET_MAXNAMELEN - 1] = '\0';
+
+ index = ip_set_find_byname(req_swap->name);
+ if (index == IP_SET_INVALID_ID) {
+ res = -ENOENT;
+ goto done;
+ }
+ to_index = ip_set_find_byname(req_swap->typename);
+ if (to_index == IP_SET_INVALID_ID) {
+ res = -ENOENT;
+ goto done;
+ }
+ res = ip_set_swap(index, to_index);
+ goto done;
+ }
+ default:
+ break; /* Set identified by id */
+ }
+
+ /* There we may have add/del/test/bind/unbind/test_bind operations */
+ if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
+ res = -EBADMSG;
+ goto done;
+ }
+ adtfn = adtfn_table[*op - IP_SET_OP_ADD_IP].fn;
+
+ if (len < sizeof(struct ip_set_req_adt)) {
+ ip_set_printk("short data in adt request (want >=%zu, got %u)",
+ sizeof(struct ip_set_req_adt), len);
+ res = -EINVAL;
+ goto done;
+ }
+ req_adt = (struct ip_set_req_adt *) data;
+
+ /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
+ if (!(*op == IP_SET_OP_UNBIND_SET
+ && req_adt->index == IP_SET_INVALID_ID)) {
+ index = ip_set_find_byindex(req_adt->index);
+ if (index == IP_SET_INVALID_ID) {
+ res = -ENOENT;
+ goto done;
+ }
+ }
+ res = adtfn(index, data, len);
+
+ done:
+ up(&ip_set_app_mutex);
+ vfree(data);
+ if (res > 0)
+ res = 0;
+ DP("final result %d", res);
+ return res;
+}
+
+static int
+ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
+{
+ int res = 0;
+ unsigned *op;
+ ip_set_id_t index = IP_SET_INVALID_ID;
+ void *data;
+ int copylen = *len;
+
+ DP("optval=%d, user=%p, len=%d", optval, user, *len);
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (optval != SO_IP_SET)
+ return -EBADF;
+ if (*len < sizeof(unsigned)) {
+ ip_set_printk("short userdata (want >=%zu, got %d)",
+ sizeof(unsigned), *len);
+ return -EINVAL;
+ }
+ data = vmalloc(*len);
+ if (!data) {
+ DP("out of mem for %d bytes", *len);
+ return -ENOMEM;
+ }
+ if (copy_from_user(data, user, *len) != 0) {
+ res = -EFAULT;
+ goto done;
+ }
+ if (down_interruptible(&ip_set_app_mutex)) {
+ res = -EINTR;
+ goto done;
+ }
+
+ op = (unsigned *) data;
+ DP("op=%x", *op);
+
+ if (*op < IP_SET_OP_VERSION) {
+ /* Check the version at the beginning of operations */
+ struct ip_set_req_version *req_version =
+ (struct ip_set_req_version *) data;
+ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
+ res = -EPROTO;
+ goto done;
+ }
+ }
+
+ switch (*op) {
+ case IP_SET_OP_VERSION: {
+ struct ip_set_req_version *req_version =
+ (struct ip_set_req_version *) data;
+
+ if (*len != sizeof(struct ip_set_req_version)) {
+ ip_set_printk("invalid VERSION (want %zu, got %d)",
+ sizeof(struct ip_set_req_version),
+ *len);
+ res = -EINVAL;
+ goto done;
+ }
+
+ req_version->version = IP_SET_PROTOCOL_VERSION;
+ res = copy_to_user(user, req_version,
+ sizeof(struct ip_set_req_version));
+ goto done;
+ }
+ case IP_SET_OP_GET_BYNAME: {
+ struct ip_set_req_get_set *req_get
+ = (struct ip_set_req_get_set *) data;
+
+ if (*len != sizeof(struct ip_set_req_get_set)) {
+ ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
+ sizeof(struct ip_set_req_get_set), *len);
+ res = -EINVAL;
+ goto done;
+ }
+ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
+ index = ip_set_find_byname(req_get->set.name);
+ req_get->set.index = index;
+ goto copy;
+ }
+ case IP_SET_OP_GET_BYINDEX: {
+ struct ip_set_req_get_set *req_get
+ = (struct ip_set_req_get_set *) data;
+
+ if (*len != sizeof(struct ip_set_req_get_set)) {
+ ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
+ sizeof(struct ip_set_req_get_set), *len);
+ res = -EINVAL;
+ goto done;
+ }
+ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
+ index = ip_set_find_byindex(req_get->set.index);
+ strncpy(req_get->set.name,
+ index == IP_SET_INVALID_ID ? ""
+ : ip_set_list[index]->name, IP_SET_MAXNAMELEN);
+ goto copy;
+ }
+ case IP_SET_OP_ADT_GET: {
+ struct ip_set_req_adt_get *req_get
+ = (struct ip_set_req_adt_get *) data;
+
+ if (*len != sizeof(struct ip_set_req_adt_get)) {
+ ip_set_printk("invalid ADT_GET (want %zu, got %d)",
+ sizeof(struct ip_set_req_adt_get), *len);
+ res = -EINVAL;
+ goto done;
+ }
+ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
+ index = ip_set_find_byname(req_get->set.name);
+ if (index != IP_SET_INVALID_ID) {
+ req_get->set.index = index;
+ strncpy(req_get->typename,
+ ip_set_list[index]->type->typename,
+ IP_SET_MAXNAMELEN - 1);
+ } else {
+ res = -ENOENT;
+ goto done;
+ }
+ goto copy;
+ }
+ case IP_SET_OP_MAX_SETS: {
+ struct ip_set_req_max_sets *req_max_sets
+ = (struct ip_set_req_max_sets *) data;
+ ip_set_id_t i;
+
+ if (*len != sizeof(struct ip_set_req_max_sets)) {
+ ip_set_printk("invalid MAX_SETS (want %zu, got %d)",
+ sizeof(struct ip_set_req_max_sets), *len);
+ res = -EINVAL;
+ goto done;
+ }
+
+ if (strcmp(req_max_sets->set.name, IPSET_TOKEN_ALL) == 0) {
+ req_max_sets->set.index = IP_SET_INVALID_ID;
+ } else {
+ req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
+ req_max_sets->set.index =
+ ip_set_find_byname(req_max_sets->set.name);
+ if (req_max_sets->set.index == IP_SET_INVALID_ID) {
+ res = -ENOENT;
+ goto done;
+ }
+ }
+ req_max_sets->max_sets = ip_set_max;
+ req_max_sets->sets = 0;
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL)
+ req_max_sets->sets++;
+ }
+ goto copy;
+ }
+ case IP_SET_OP_LIST_SIZE:
+ case IP_SET_OP_SAVE_SIZE: {
+ struct ip_set_req_setnames *req_setnames
+ = (struct ip_set_req_setnames *) data;
+ struct ip_set_name_list *name_list;
+ struct ip_set *set;
+ ip_set_id_t i;
+ int used;
+
+ if (*len < sizeof(struct ip_set_req_setnames)) {
+ ip_set_printk("short LIST_SIZE (want >=%zu, got %d)",
+ sizeof(struct ip_set_req_setnames), *len);
+ res = -EINVAL;
+ goto done;
+ }
+
+ req_setnames->size = 0;
+ used = sizeof(struct ip_set_req_setnames);
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] == NULL)
+ continue;
+ name_list = (struct ip_set_name_list *)
+ (data + used);
+ used += sizeof(struct ip_set_name_list);
+ if (used > copylen) {
+ res = -EAGAIN;
+ goto done;
+ }
+ set = ip_set_list[i];
+ /* Fill in index, name, etc. */
+ name_list->index = i;
+ name_list->id = set->id;
+ strncpy(name_list->name,
+ set->name,
+ IP_SET_MAXNAMELEN - 1);
+ strncpy(name_list->typename,
+ set->type->typename,
+ IP_SET_MAXNAMELEN - 1);
+ DP("filled %s of type %s, index %u\n",
+ name_list->name, name_list->typename,
+ name_list->index);
+ if (!(req_setnames->index == IP_SET_INVALID_ID
+ || req_setnames->index == i))
+ continue;
+ /* Update size */
+ switch (*op) {
+ case IP_SET_OP_LIST_SIZE: {
+ req_setnames->size += sizeof(struct ip_set_list)
+ + set->type->header_size
+ + set->type->list_members_size(set);
+ FOREACH_HASH_DO(__set_hash_bindings_size_list,
+ i, &req_setnames->size);
+ break;
+ }
+ case IP_SET_OP_SAVE_SIZE: {
+ req_setnames->size += sizeof(struct ip_set_save)
+ + set->type->header_size
+ + set->type->list_members_size(set);
+ FOREACH_HASH_DO(__set_hash_bindings_size_save,
+ i, &req_setnames->size);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ if (copylen != used) {
+ res = -EAGAIN;
+ goto done;
+ }
+ goto copy;
+ }
+ case IP_SET_OP_LIST: {
+ struct ip_set_req_list *req_list
+ = (struct ip_set_req_list *) data;
+ ip_set_id_t i;
+ int used;
+
+ if (*len < sizeof(struct ip_set_req_list)) {
+ ip_set_printk("short LIST (want >=%zu, got %d)",
+ sizeof(struct ip_set_req_list), *len);
+ res = -EINVAL;
+ goto done;
+ }
+ index = req_list->index;
+ if (index != IP_SET_INVALID_ID
+ && ip_set_find_byindex(index) != index) {
+ res = -ENOENT;
+ goto done;
+ }
+ used = 0;
+ if (index == IP_SET_INVALID_ID) {
+ /* List all sets */
+ for (i = 0; i < ip_set_max && res == 0; i++) {
+ if (ip_set_list[i] != NULL)
+ res = ip_set_list_set(i, data, &used, *len);
+ }
+ } else {
+ /* List an individual set */
+ res = ip_set_list_set(index, data, &used, *len);
+ }
+ if (res != 0)
+ goto done;
+ else if (copylen != used) {
+ res = -EAGAIN;
+ goto done;
+ }
+ goto copy;
+ }
+ case IP_SET_OP_SAVE: {
+ struct ip_set_req_list *req_save
+ = (struct ip_set_req_list *) data;
+ ip_set_id_t i;
+ int used;
+
+ if (*len < sizeof(struct ip_set_req_list)) {
+ ip_set_printk("short SAVE (want >=%zu, got %d)",
+ sizeof(struct ip_set_req_list), *len);
+ res = -EINVAL;
+ goto done;
+ }
+ index = req_save->index;
+ if (index != IP_SET_INVALID_ID
+ && ip_set_find_byindex(index) != index) {
+ res = -ENOENT;
+ goto done;
+ }
+ used = 0;
+ if (index == IP_SET_INVALID_ID) {
+ /* Save all sets */
+ for (i = 0; i < ip_set_max && res == 0; i++) {
+ if (ip_set_list[i] != NULL)
+ res = ip_set_save_set(i, data, &used, *len);
+ }
+ } else {
+ /* Save an individual set */
+ res = ip_set_save_set(index, data, &used, *len);
+ }
+ if (res == 0)
+ res = ip_set_save_bindings(index, data, &used, *len);
+
+ if (res != 0)
+ goto done;
+ else if (copylen != used) {
+ res = -EAGAIN;
+ goto done;
+ }
+ goto copy;
+ }
+ case IP_SET_OP_RESTORE: {
+ struct ip_set_req_setnames *req_restore
+ = (struct ip_set_req_setnames *) data;
+ int line;
+
+ if (*len < sizeof(struct ip_set_req_setnames)
+ || *len != req_restore->size) {
+ ip_set_printk("invalid RESTORE (want =%zu, got %d)",
+ req_restore->size, *len);
+ res = -EINVAL;
+ goto done;
+ }
+ line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
+ req_restore->size - sizeof(struct ip_set_req_setnames));
+ DP("ip_set_restore: %u", line);
+ if (line != 0) {
+ res = -EAGAIN;
+ req_restore->size = line;
+ copylen = sizeof(struct ip_set_req_setnames);
+ goto copy;
+ }
+ goto done;
+ }
+ default:
+ res = -EBADMSG;
+ goto done;
+ } /* end of switch(op) */
+
+ copy:
+ DP("set %s, copylen %u", index != IP_SET_INVALID_ID
+ && ip_set_list[index]
+ ? ip_set_list[index]->name
+ : ":all:", copylen);
+ if (res == 0)
+ res = copy_to_user(user, data, copylen);
+ else
+ copy_to_user(user, data, copylen);
+
+ done:
+ up(&ip_set_app_mutex);
+ vfree(data);
+ if (res > 0)
+ res = 0;
+ DP("final result %d", res);
+ return res;
+}
+
+static struct nf_sockopt_ops so_set = {
+ .pf = PF_INET,
+ .set_optmin = SO_IP_SET,
+ .set_optmax = SO_IP_SET + 1,
+ .set = &ip_set_sockfn_set,
+ .get_optmin = SO_IP_SET,
+ .get_optmax = SO_IP_SET + 1,
+ .get = &ip_set_sockfn_get,
+ .use = 0
+};
+
+static int max_sets, hash_size;
+MODULE_PARM(max_sets, "i");
+MODULE_PARM_DESC(max_sets, "maximal number of sets");
+MODULE_PARM(hash_size, "i");
+MODULE_PARM_DESC(hash_size, "hash size for bindings");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("module implementing core IP set support");
+
+static int __init init(void)
+{
+ int res;
+ ip_set_id_t i;
+
+ get_random_bytes(&ip_set_hash_random, 4);
+ if (max_sets)
+ ip_set_max = max_sets;
+ ip_set_list = vmalloc(sizeof(struct ip_set *) * ip_set_max);
+ if (!ip_set_list) {
+ printk(KERN_ERR "Unable to create ip_set_list\n");
+ return -ENOMEM;
+ }
+ memset(ip_set_list, 0, sizeof(struct ip_set *) * ip_set_max);
+ if (hash_size)
+ ip_set_bindings_hash_size = hash_size;
+ ip_set_hash = vmalloc(sizeof(struct list_head) * ip_set_bindings_hash_size);
+ if (!ip_set_hash) {
+ printk(KERN_ERR "Unable to create ip_set_hash\n");
+ vfree(ip_set_list);
+ return -ENOMEM;
+ }
+ for (i = 0; i < ip_set_bindings_hash_size; i++)
+ INIT_LIST_HEAD(&ip_set_hash[i]);
+
+ INIT_LIST_HEAD(&set_type_list);
+
+ res = nf_register_sockopt(&so_set);
+ if (res != 0) {
+ ip_set_printk("SO_SET registry failed: %d", res);
+ vfree(ip_set_list);
+ vfree(ip_set_hash);
+ return res;
+ }
+ return 0;
+}
+
+static void __exit fini(void)
+{
+ /* There can't be any existing set or binding. Racy. */
+ nf_unregister_sockopt(&so_set);
+ vfree(ip_set_list);
+ vfree(ip_set_hash);
+ DP("these are the famous last words");
+}
+
+EXPORT_SYMBOL(ip_set_register_set_type);
+EXPORT_SYMBOL(ip_set_unregister_set_type);
+
+EXPORT_SYMBOL(ip_set_get_byname);
+EXPORT_SYMBOL(ip_set_get_byindex);
+EXPORT_SYMBOL(ip_set_put);
+
+EXPORT_SYMBOL(ip_set_addip_kernel);
+EXPORT_SYMBOL(ip_set_delip_kernel);
+EXPORT_SYMBOL(ip_set_testip_kernel);
+
+module_init(init);
+module_exit(fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_iphash.c
@@ -0,0 +1,379 @@
+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an ip hash set */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <asm/softirq.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include <net/ip.h>
+
+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_iphash.h>
+#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/netfilter_ipv4/ip_set_prime.h>
+
+static inline __u32
+jhash_ip(const struct ip_set_iphash *map, ip_set_ip_t ip)
+{
+ return jhash_1word(ip, map->initval);
+}
+
+static inline __u32
+randhash_ip(const struct ip_set_iphash *map, ip_set_ip_t ip)
+{
+ return (1 + ip % map->prime);
+}
+
+static inline __u32
+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
+{
+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
+ __u32 jhash, randhash, id;
+ u_int16_t i;
+
+ *hash_ip = ip & map->netmask;
+ jhash = jhash_ip(map, *hash_ip);
+ randhash = randhash_ip(map, *hash_ip);
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
+
+ for (i = 0; i < map->probes; i++) {
+ id = (jhash + i * randhash) % map->hashsize;
+ DP("hash key: %u", id);
+ if (map->members[id] == *hash_ip)
+ return id;
+ /* No shortcut at testing - there can be deleted
+ * entries. */
+ }
+ return UINT_MAX;
+}
+
+static inline int
+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
+{
+ return (hash_id(set, ip, hash_ip) != UINT_MAX);
+}
+
+static int
+testip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
+ struct ip_set_req_iphash *req =
+ (struct ip_set_req_iphash *) data;
+
+ if (size != sizeof(struct ip_set_req_iphash)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_iphash),
+ size);
+ return -EINVAL;
+ }
+ return __testip(set, req->ip, hash_ip);
+}
+
+static int
+testip_kernel(struct ip_set *set, const struct sk_buff *skb,
+ u_int32_t flags, ip_set_ip_t *hash_ip)
+{
+ return __testip(set,
+ ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+ hash_ip);
+}
+
+static inline int
+__addip(struct ip_set_iphash *map, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
+{
+ __u32 jhash, randhash, probe;
+ u_int16_t i;
+
+ *hash_ip = ip & map->netmask;
+ jhash = jhash_ip(map, *hash_ip);
+ randhash = randhash_ip(map, *hash_ip);
+
+ for (i = 0; i < map->probes; i++) {
+ probe = (jhash + i * randhash) % map->hashsize;
+ if (map->members[probe] == *hash_ip)
+ return -EEXIST;
+ if (!map->members[probe]) {
+ map->members[probe] = *hash_ip;
+ return 0;
+ }
+ }
+ /* Trigger rehashing */
+ return -EAGAIN;
+}
+
+static int
+addip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
+ struct ip_set_req_iphash *req =
+ (struct ip_set_req_iphash *) data;
+
+ if (size != sizeof(struct ip_set_req_iphash)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_iphash),
+ size);
+ return -EINVAL;
+ }
+ return __addip((struct ip_set_iphash *) set->data, req->ip, hash_ip);
+}
+
+static int
+addip_kernel(struct ip_set *set, const struct sk_buff *skb,
+ u_int32_t flags, ip_set_ip_t *hash_ip)
+{
+ return __addip((struct ip_set_iphash *) set->data,
+ ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+ hash_ip);
+}
+
+static int retry(struct ip_set *set)
+{
+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
+ ip_set_ip_t hash_ip, *members;
+ u_int32_t i, hashsize;
+ unsigned newbytes;
+ int res;
+ struct ip_set_iphash tmp = {
+ .hashsize = map->hashsize,
+ .probes = map->probes,
+ .resize = map->resize,
+ .netmask = map->netmask,
+ };
+
+ if (map->resize == 0)
+ return -ERANGE;
+
+ again:
+ res = 0;
+
+ /* Calculate new parameters */
+ get_random_bytes(&tmp.initval, 4);
+ hashsize = tmp.hashsize + (tmp.hashsize * map->resize)/100;
+ if (hashsize == tmp.hashsize)
+ hashsize++;
+ tmp.prime = make_prime(hashsize);
+
+ ip_set_printk("rehashing of set %s triggered: "
+ "hashsize grows from %u to %u",
+ set->name, tmp.hashsize, hashsize);
+ tmp.hashsize = hashsize;
+
+ newbytes = hashsize * sizeof(ip_set_ip_t);
+ tmp.members = ip_set_malloc(newbytes);
+ if (!tmp.members) {
+ DP("out of memory for %d bytes", newbytes);
+ return -ENOMEM;
+ }
+ memset(tmp.members, 0, newbytes);
+
+ write_lock_bh(&set->lock);
+ map = (struct ip_set_iphash *) set->data; /* Play safe */
+ for (i = 0; i < map->hashsize && res == 0; i++) {
+ if (map->members[i])
+ res = __addip(&tmp, map->members[i], &hash_ip);
+ }
+ if (res) {
+ /* Failure, try again */
+ write_unlock_bh(&set->lock);
+ ip_set_free(tmp.members, newbytes);
+ goto again;
+ }
+
+ /* Success at resizing! */
+ members = map->members;
+ hashsize = map->hashsize;
+
+ map->initval = tmp.initval;
+ map->prime = tmp.prime;
+ map->hashsize = tmp.hashsize;
+ map->members = tmp.members;
+ write_unlock_bh(&set->lock);
+
+ ip_set_free(members, hashsize * sizeof(ip_set_ip_t));
+
+ return 0;
+}
+
+static inline int
+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
+{
+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
+ ip_set_ip_t id = hash_id(set, ip, hash_ip);
+
+ if (id == UINT_MAX)
+ return -EEXIST;
+
+ map->members[id] = 0;
+ return 0;
+}
+
+static int
+delip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
+ struct ip_set_req_iphash *req =
+ (struct ip_set_req_iphash *) data;
+
+ if (size != sizeof(struct ip_set_req_iphash)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_iphash),
+ size);
+ return -EINVAL;
+ }
+ return __delip(set, req->ip, hash_ip);
+}
+
+static int
+delip_kernel(struct ip_set *set, const struct sk_buff *skb,
+ u_int32_t flags, ip_set_ip_t *hash_ip)
+{
+ return __delip(set,
+ ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+ hash_ip);
+}
+
+static int create(struct ip_set *set, const void *data, size_t size)
+{
+ unsigned newbytes;
+ struct ip_set_req_iphash_create *req =
+ (struct ip_set_req_iphash_create *) data;
+ struct ip_set_iphash *map;
+
+ if (size != sizeof(struct ip_set_req_iphash_create)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_iphash_create),
+ size);
+ return -EINVAL;
+ }
+
+ if (req->hashsize < 1) {
+ ip_set_printk("hashsize too small");
+ return -ENOEXEC;
+ }
+
+ map = kmalloc(sizeof(struct ip_set_iphash), GFP_KERNEL);
+ if (!map) {
+ DP("out of memory for %d bytes",
+ sizeof(struct ip_set_iphash));
+ return -ENOMEM;
+ }
+ get_random_bytes(&map->initval, 4);
+ map->prime = make_prime(req->hashsize);
+ map->hashsize = req->hashsize;
+ map->probes = req->probes;
+ map->resize = req->resize;
+ map->netmask = req->netmask;
+ newbytes = map->hashsize * sizeof(ip_set_ip_t);
+ map->members = ip_set_malloc(newbytes);
+ if (!map->members) {
+ DP("out of memory for %d bytes", newbytes);
+ kfree(map);
+ return -ENOMEM;
+ }
+ memset(map->members, 0, newbytes);
+
+ set->data = map;
+ return 0;
+}
+
+static void destroy(struct ip_set *set)
+{
+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
+
+ ip_set_free(map->members, map->hashsize * sizeof(ip_set_ip_t));
+ kfree(map);
+
+ set->data = NULL;
+}
+
+static void flush(struct ip_set *set)
+{
+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
+ memset(map->members, 0, map->hashsize * sizeof(ip_set_ip_t));
+}
+
+static void list_header(const struct ip_set *set, void *data)
+{
+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
+ struct ip_set_req_iphash_create *header =
+ (struct ip_set_req_iphash_create *) data;
+
+ header->hashsize = map->hashsize;
+ header->probes = map->probes;
+ header->resize = map->resize;
+ header->netmask = map->netmask;
+}
+
+static int list_members_size(const struct ip_set *set)
+{
+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
+
+ return (map->hashsize * sizeof(ip_set_ip_t));
+}
+
+static void list_members(const struct ip_set *set, void *data)
+{
+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
+ int bytes = map->hashsize * sizeof(ip_set_ip_t);
+
+ memcpy(data, map->members, bytes);
+}
+
+static struct ip_set_type ip_set_iphash = {
+ .typename = SETTYPE_NAME,
+ .typecode = IPSET_TYPE_IP,
+ .protocol_version = IP_SET_PROTOCOL_VERSION,
+ .create = &create,
+ .destroy = &destroy,
+ .flush = &flush,
+ .reqsize = sizeof(struct ip_set_req_iphash),
+ .addip = &addip,
+ .addip_kernel = &addip_kernel,
+ .retry = &retry,
+ .delip = &delip,
+ .delip_kernel = &delip_kernel,
+ .testip = &testip,
+ .testip_kernel = &testip_kernel,
+ .header_size = sizeof(struct ip_set_req_iphash_create),
+ .list_header = &list_header,
+ .list_members_size = &list_members_size,
+ .list_members = &list_members,
+ .me = THIS_MODULE,
+};
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("iphash type of IP sets");
+
+static int __init init(void)
+{
+ return ip_set_register_set_type(&ip_set_iphash);
+}
+
+static void __exit fini(void)
+{
+ /* FIXME: possible race with ip_set_create() */
+ ip_set_unregister_set_type(&ip_set_iphash);
+}
+
+module_init(init);
+module_exit(fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_ipmap.c
@@ -0,0 +1,314 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the single bitmap type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <asm/softirq.h>
+#include <linux/spinlock.h>
+
+#include <linux/netfilter_ipv4/ip_set_ipmap.h>
+
+static inline ip_set_ip_t
+ip_to_id(const struct ip_set_ipmap *map, ip_set_ip_t ip)
+{
+ return (ip - map->first_ip)/map->hosts;
+}
+
+static inline int
+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
+{
+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
+ *hash_ip = ip & map->netmask;
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
+ return !!test_bit(ip_to_id(map, *hash_ip), map->members);
+}
+
+static int
+testip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
+ struct ip_set_req_ipmap *req =
+ (struct ip_set_req_ipmap *) data;
+
+ if (size != sizeof(struct ip_set_req_ipmap)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_ipmap),
+ size);
+ return -EINVAL;
+ }
+ return __testip(set, req->ip, hash_ip);
+}
+
+static int
+testip_kernel(struct ip_set *set,
+ const struct sk_buff *skb,
+ u_int32_t flags,
+ ip_set_ip_t *hash_ip)
+{
+ int res;
+
+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
+ flags & IPSET_SRC ? "SRC" : "DST",
+ NIPQUAD(skb->nh.iph->saddr),
+ NIPQUAD(skb->nh.iph->daddr));
+
+ res = __testip(set,
+ ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+ hash_ip);
+ return (res < 0 ? 0 : res);
+}
+
+static inline int
+__addip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
+{
+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
+ *hash_ip = ip & map->netmask;
+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
+ if (test_and_set_bit(ip_to_id(map, *hash_ip), map->members))
+ return -EEXIST;
+
+ return 0;
+}
+
+static int
+addip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
+ struct ip_set_req_ipmap *req =
+ (struct ip_set_req_ipmap *) data;
+
+ if (size != sizeof(struct ip_set_req_ipmap)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_ipmap),
+ size);
+ return -EINVAL;
+ }
+ DP("%u.%u.%u.%u", HIPQUAD(req->ip));
+ return __addip(set, req->ip, hash_ip);
+}
+
+static int
+addip_kernel(struct ip_set *set, const struct sk_buff *skb,
+ u_int32_t flags, ip_set_ip_t *hash_ip)
+{
+ return __addip(set,
+ ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+ hash_ip);
+}
+
+static inline int
+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
+{
+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
+ *hash_ip = ip & map->netmask;
+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
+ if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
+ return -EEXIST;
+
+ return 0;
+}
+
+static int
+delip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
+ struct ip_set_req_ipmap *req =
+ (struct ip_set_req_ipmap *) data;
+
+ if (size != sizeof(struct ip_set_req_ipmap)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_ipmap),
+ size);
+ return -EINVAL;
+ }
+ return __delip(set, req->ip, hash_ip);
+}
+
+static int
+delip_kernel(struct ip_set *set, const struct sk_buff *skb,
+ u_int32_t flags, ip_set_ip_t *hash_ip)
+{
+ return __delip(set,
+ ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+ hash_ip);
+}
+
+static int create(struct ip_set *set, const void *data, size_t size)
+{
+ int newbytes;
+ struct ip_set_req_ipmap_create *req =
+ (struct ip_set_req_ipmap_create *) data;
+ struct ip_set_ipmap *map;
+
+ if (size != sizeof(struct ip_set_req_ipmap_create)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_ipmap_create),
+ size);
+ return -EINVAL;
+ }
+
+ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
+ HIPQUAD(req->from), HIPQUAD(req->to));
+
+ if (req->from > req->to) {
+ DP("bad ip range");
+ return -ENOEXEC;
+ }
+
+ if (req->to - req->from > MAX_RANGE) {
+ ip_set_printk("range too big (max %d addresses)",
+ MAX_RANGE);
+ return -ENOEXEC;
+ }
+
+ map = kmalloc(sizeof(struct ip_set_ipmap), GFP_KERNEL);
+ if (!map) {
+ DP("out of memory for %d bytes",
+ sizeof(struct ip_set_ipmap));
+ return -ENOMEM;
+ }
+ map->first_ip = req->from;
+ map->last_ip = req->to;
+ map->netmask = req->netmask;
+
+ if (req->netmask == 0xFFFFFFFF) {
+ map->hosts = 1;
+ map->sizeid = map->last_ip - map->first_ip + 1;
+ } else {
+ unsigned int mask_bits, netmask_bits;
+ ip_set_ip_t mask;
+
+ map->first_ip &= map->netmask; /* Should we better bark? */
+
+ mask = range_to_mask(map->first_ip, map->last_ip, &mask_bits);
+ netmask_bits = mask_to_bits(map->netmask);
+
+ if (!mask || netmask_bits <= mask_bits)
+ return -ENOEXEC;
+
+ map->hosts = 2 << (32 - netmask_bits - 1);
+ map->sizeid = 2 << (netmask_bits - mask_bits - 1);
+ }
+ newbytes = bitmap_bytes(0, map->sizeid - 1);
+ map->members = kmalloc(newbytes, GFP_KERNEL);
+ if (!map->members) {
+ DP("out of memory for %d bytes", newbytes);
+ kfree(map);
+ return -ENOMEM;
+ }
+ memset(map->members, 0, newbytes);
+
+ set->data = map;
+ return 0;
+}
+
+static void destroy(struct ip_set *set)
+{
+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
+
+ kfree(map->members);
+ kfree(map);
+
+ set->data = NULL;
+}
+
+static void flush(struct ip_set *set)
+{
+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
+ memset(map->members, 0, bitmap_bytes(0, map->sizeid - 1));
+}
+
+static void list_header(const struct ip_set *set, void *data)
+{
+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
+ struct ip_set_req_ipmap_create *header =
+ (struct ip_set_req_ipmap_create *) data;
+
+ header->from = map->first_ip;
+ header->to = map->last_ip;
+ header->netmask = map->netmask;
+}
+
+static int list_members_size(const struct ip_set *set)
+{
+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
+
+ return bitmap_bytes(0, map->sizeid - 1);
+}
+
+static void list_members(const struct ip_set *set, void *data)
+{
+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
+ int bytes = bitmap_bytes(0, map->sizeid - 1);
+
+ memcpy(data, map->members, bytes);
+}
+
+static struct ip_set_type ip_set_ipmap = {
+ .typename = SETTYPE_NAME,
+ .typecode = IPSET_TYPE_IP,
+ .protocol_version = IP_SET_PROTOCOL_VERSION,
+ .create = &create,
+ .destroy = &destroy,
+ .flush = &flush,
+ .reqsize = sizeof(struct ip_set_req_ipmap),
+ .addip = &addip,
+ .addip_kernel = &addip_kernel,
+ .delip = &delip,
+ .delip_kernel = &delip_kernel,
+ .testip = &testip,
+ .testip_kernel = &testip_kernel,
+ .header_size = sizeof(struct ip_set_req_ipmap_create),
+ .list_header = &list_header,
+ .list_members_size = &list_members_size,
+ .list_members = &list_members,
+ .me = THIS_MODULE,
+};
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("ipmap type of IP sets");
+
+static int __init init(void)
+{
+ return ip_set_register_set_type(&ip_set_ipmap);
+}
+
+static void __exit fini(void)
+{
+ /* FIXME: possible race with ip_set_create() */
+ ip_set_unregister_set_type(&ip_set_ipmap);
+}
+
+module_init(init);
+module_exit(fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_iptree.c
@@ -0,0 +1,511 @@
+/* Copyright (C) 2005 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the iptree type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+
+#include <linux/netfilter_ipv4/ip_set_iptree.h>
+
+/* Garbage collection interval in seconds: */
+#define IPTREE_GC_TIME 5*60
+/* Sleep so many milliseconds before trying again
+ * to delete the gc timer at destroying a set */
+#define IPTREE_DESTROY_SLEEP 100
+
+static kmem_cache_t *branch_cachep;
+static kmem_cache_t *leaf_cachep;
+
+#define ABCD(a,b,c,d,addrp) do { \
+ a = ((unsigned char *)addrp)[3]; \
+ b = ((unsigned char *)addrp)[2]; \
+ c = ((unsigned char *)addrp)[1]; \
+ d = ((unsigned char *)addrp)[0]; \
+} while (0)
+
+#define TESTIP_WALK(map, elem, branch) do { \
+ if ((map)->tree[elem]) { \
+ branch = (map)->tree[elem]; \
+ } else \
+ return 0; \
+} while (0)
+
+static inline int
+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
+{
+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
+ unsigned char a,b,c,d;
+
+ *hash_ip = ip;
+ ABCD(a, b, c, d, hash_ip);
+ DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
+ TESTIP_WALK(map, a, btree);
+ TESTIP_WALK(btree, b, ctree);
+ TESTIP_WALK(ctree, c, dtree);
+ DP("%lu %lu", dtree->expires[d], jiffies);
+ return !!(map->timeout ? (time_after(dtree->expires[d], jiffies))
+ : dtree->expires[d]);
+}
+
+static int
+testip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
+ struct ip_set_req_iptree *req =
+ (struct ip_set_req_iptree *) data;
+
+ if (size != sizeof(struct ip_set_req_iptree)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_iptree),
+ size);
+ return -EINVAL;
+ }
+ return __testip(set, req->ip, hash_ip);
+}
+
+static int
+testip_kernel(struct ip_set *set,
+ const struct sk_buff *skb,
+ u_int32_t flags,
+ ip_set_ip_t *hash_ip)
+{
+ int res;
+
+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
+ flags & IPSET_SRC ? "SRC" : "DST",
+ NIPQUAD(skb->nh.iph->saddr),
+ NIPQUAD(skb->nh.iph->daddr));
+
+ res = __testip(set,
+ ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+ hash_ip);
+ return (res < 0 ? 0 : res);
+}
+
+#define ADDIP_WALK(map, elem, branch, type, cachep) do { \
+ if ((map)->tree[elem]) { \
+ DP("found %u", elem); \
+ branch = (map)->tree[elem]; \
+ } else { \
+ branch = (type *) \
+ kmem_cache_alloc(cachep, GFP_KERNEL); \
+ if (branch == NULL) \
+ return -ENOMEM; \
+ memset(branch, 0, sizeof(*branch)); \
+ (map)->tree[elem] = branch; \
+ DP("alloc %u", elem); \
+ } \
+} while (0)
+
+static inline int
+__addip(struct ip_set *set, ip_set_ip_t ip, unsigned int timeout,
+ ip_set_ip_t *hash_ip)
+{
+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
+ unsigned char a,b,c,d;
+ int ret = 0;
+
+ *hash_ip = ip;
+ ABCD(a, b, c, d, hash_ip);
+ DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
+ ADDIP_WALK(map, a, btree, struct ip_set_iptreeb, branch_cachep);
+ ADDIP_WALK(btree, b, ctree, struct ip_set_iptreec, branch_cachep);
+ ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreed, leaf_cachep);
+ if (dtree->expires[d]
+ && (!map->timeout || time_after(dtree->expires[d], jiffies)))
+ ret = -EEXIST;
+ dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
+ DP("%u %lu", d, dtree->expires[d]);
+ return ret;
+}
+
+static int
+addip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
+ struct ip_set_req_iptree *req =
+ (struct ip_set_req_iptree *) data;
+
+ if (size != sizeof(struct ip_set_req_iptree)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_iptree),
+ size);
+ return -EINVAL;
+ }
+ DP("%u.%u.%u.%u %u", HIPQUAD(req->ip), req->timeout);
+ return __addip(set, req->ip,
+ req->timeout ? req->timeout : map->timeout,
+ hash_ip);
+}
+
+static int
+addip_kernel(struct ip_set *set, const struct sk_buff *skb,
+ u_int32_t flags, ip_set_ip_t *hash_ip)
+{
+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
+
+ return __addip(set,
+ ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+ map->timeout,
+ hash_ip);
+}
+
+#define DELIP_WALK(map, elem, branch) do { \
+ if ((map)->tree[elem]) { \
+ branch = (map)->tree[elem]; \
+ } else \
+ return -EEXIST; \
+} while (0)
+
+static inline int
+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
+{
+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
+ unsigned char a,b,c,d;
+
+ *hash_ip = ip;
+ ABCD(a, b, c, d, hash_ip);
+ DELIP_WALK(map, a, btree);
+ DELIP_WALK(btree, b, ctree);
+ DELIP_WALK(ctree, c, dtree);
+
+ if (dtree->expires[d]) {
+ dtree->expires[d] = 0;
+ return 0;
+ }
+ return -EEXIST;
+}
+
+static int
+delip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
+ struct ip_set_req_iptree *req =
+ (struct ip_set_req_iptree *) data;
+
+ if (size != sizeof(struct ip_set_req_iptree)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_iptree),
+ size);
+ return -EINVAL;
+ }
+ return __delip(set, req->ip, hash_ip);
+}
+
+static int
+delip_kernel(struct ip_set *set, const struct sk_buff *skb,
+ u_int32_t flags, ip_set_ip_t *hash_ip)
+{
+ return __delip(set,
+ ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+ hash_ip);
+}
+
+#define LOOP_WALK_BEGIN(map, i, branch) \
+ for (i = 0; i < 255; i++) { \
+ if (!(map)->tree[i]) \
+ continue; \
+ branch = (map)->tree[i]
+
+#define LOOP_WALK_END }
+
+static void ip_tree_gc(unsigned long ul_set)
+{
+ struct ip_set *set = (void *) ul_set;
+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
+ unsigned char a,b,c,d;
+ unsigned char i,j,k;
+
+ i = j = k = 0;
+ DP("gc: %s", set->name);
+ write_lock_bh(&set->lock);
+ LOOP_WALK_BEGIN(map, a, btree);
+ LOOP_WALK_BEGIN(btree, b, ctree);
+ LOOP_WALK_BEGIN(ctree, c, dtree);
+ for (d = 0; d < 255; d++) {
+ if (dtree->expires[d]) {
+ DP("gc: %u %u %u %u: expires %lu jiffies %lu",
+ a, b, c, d,
+ dtree->expires[d], jiffies);
+ if (map->timeout
+ && time_before(dtree->expires[d], jiffies))
+ dtree->expires[d] = 0;
+ else
+ k = 1;
+ }
+ }
+ if (k == 0) {
+ DP("gc: %s: leaf %u %u %u empty",
+ set->name, a, b, c);
+ kmem_cache_free(leaf_cachep, dtree);
+ ctree->tree[c] = NULL;
+ } else {
+ DP("gc: %s: leaf %u %u %u not empty",
+ set->name, a, b, c);
+ j = 1;
+ k = 0;
+ }
+ LOOP_WALK_END;
+ if (j == 0) {
+ DP("gc: %s: branch %u %u empty",
+ set->name, a, b);
+ kmem_cache_free(branch_cachep, ctree);
+ btree->tree[b] = NULL;
+ } else {
+ DP("gc: %s: branch %u %u not empty",
+ set->name, a, b);
+ i = 1;
+ j = k = 0;
+ }
+ LOOP_WALK_END;
+ if (i == 0) {
+ DP("gc: %s: branch %u empty",
+ set->name, a);
+ kmem_cache_free(branch_cachep, btree);
+ map->tree[a] = NULL;
+ } else {
+ DP("gc: %s: branch %u not empty",
+ set->name, a);
+ i = j = k = 0;
+ }
+ LOOP_WALK_END;
+ write_unlock_bh(&set->lock);
+
+ map->gc.expires = jiffies + map->gc_interval * HZ;
+ add_timer(&map->gc);
+}
+
+static int create(struct ip_set *set, const void *data, size_t size)
+{
+ struct ip_set_req_iptree_create *req =
+ (struct ip_set_req_iptree_create *) data;
+ struct ip_set_iptree *map;
+
+ if (size != sizeof(struct ip_set_req_iptree_create)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_iptree_create),
+ size);
+ return -EINVAL;
+ }
+
+ map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
+ if (!map) {
+ DP("out of memory for %d bytes",
+ sizeof(struct ip_set_iptree));
+ return -ENOMEM;
+ }
+ memset(map, 0, sizeof(*map));
+ map->timeout = req->timeout;
+ set->data = map;
+
+ /* If there is no timeout for the entries,
+ * we still have to call gc because delete
+ * do not clean up empty branches */
+ map->gc_interval = IPTREE_GC_TIME;
+ init_timer(&map->gc);
+ map->gc.data = (unsigned long) set;
+ map->gc.function = ip_tree_gc;
+ map->gc.expires = jiffies + map->gc_interval * HZ;
+ add_timer(&map->gc);
+
+ return 0;
+}
+
+static void __flush(struct ip_set_iptree *map)
+{
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
+ unsigned int a,b,c;
+
+ LOOP_WALK_BEGIN(map, a, btree);
+ LOOP_WALK_BEGIN(btree, b, ctree);
+ LOOP_WALK_BEGIN(ctree, c, dtree);
+ kmem_cache_free(leaf_cachep, dtree);
+ LOOP_WALK_END;
+ kmem_cache_free(branch_cachep, ctree);
+ LOOP_WALK_END;
+ kmem_cache_free(branch_cachep, btree);
+ LOOP_WALK_END;
+}
+
+static void destroy(struct ip_set *set)
+{
+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
+
+ while (!del_timer(&map->gc))
+ msleep(IPTREE_DESTROY_SLEEP);
+ __flush(map);
+ kfree(map);
+ set->data = NULL;
+}
+
+static void flush(struct ip_set *set)
+{
+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
+ unsigned int timeout = map->timeout;
+
+ __flush(map);
+ memset(map, 0, sizeof(*map));
+ map->timeout = timeout;
+}
+
+static void list_header(const struct ip_set *set, void *data)
+{
+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
+ struct ip_set_req_iptree_create *header =
+ (struct ip_set_req_iptree_create *) data;
+
+ header->timeout = map->timeout;
+}
+
+static int list_members_size(const struct ip_set *set)
+{
+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
+ unsigned char a,b,c,d;
+ unsigned int count = 0;
+
+ LOOP_WALK_BEGIN(map, a, btree);
+ LOOP_WALK_BEGIN(btree, b, ctree);
+ LOOP_WALK_BEGIN(ctree, c, dtree);
+ for (d = 0; d < 255; d++) {
+ if (dtree->expires[d]
+ && (!map->timeout || time_after(dtree->expires[d], jiffies)))
+ count++;
+ }
+ LOOP_WALK_END;
+ LOOP_WALK_END;
+ LOOP_WALK_END;
+
+ DP("members %u", count);
+ return (count * sizeof(struct ip_set_req_iptree));
+}
+
+static void list_members(const struct ip_set *set, void *data)
+{
+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
+ unsigned char a,b,c,d;
+ size_t offset = 0;
+ struct ip_set_req_iptree *entry;
+
+ LOOP_WALK_BEGIN(map, a, btree);
+ LOOP_WALK_BEGIN(btree, b, ctree);
+ LOOP_WALK_BEGIN(ctree, c, dtree);
+ for (d = 0; d < 255; d++) {
+ if (dtree->expires[d]
+ && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
+ entry = (struct ip_set_req_iptree *)(data + offset);
+ entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
+ entry->timeout = !map->timeout ? 0
+ : (dtree->expires[d] - jiffies)/HZ;
+ offset += sizeof(struct ip_set_req_iptree);
+ }
+ }
+ LOOP_WALK_END;
+ LOOP_WALK_END;
+ LOOP_WALK_END;
+}
+
+static struct ip_set_type ip_set_iptree = {
+ .typename = SETTYPE_NAME,
+ .typecode = IPSET_TYPE_IP,
+ .protocol_version = IP_SET_PROTOCOL_VERSION,
+ .create = &create,
+ .destroy = &destroy,
+ .flush = &flush,
+ .reqsize = sizeof(struct ip_set_req_iptree),
+ .addip = &addip,
+ .addip_kernel = &addip_kernel,
+ .delip = &delip,
+ .delip_kernel = &delip_kernel,
+ .testip = &testip,
+ .testip_kernel = &testip_kernel,
+ .header_size = sizeof(struct ip_set_req_iptree_create),
+ .list_header = &list_header,
+ .list_members_size = &list_members_size,
+ .list_members = &list_members,
+ .me = THIS_MODULE,
+};
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("iptree type of IP sets");
+
+static int __init init(void)
+{
+ int ret;
+
+ branch_cachep = kmem_cache_create("ip_set_iptreeb",
+ sizeof(struct ip_set_iptreeb),
+ 0, 0, NULL, NULL);
+ if (!branch_cachep) {
+ printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ leaf_cachep = kmem_cache_create("ip_set_iptreed",
+ sizeof(struct ip_set_iptreed),
+ 0, 0, NULL, NULL);
+ if (!leaf_cachep) {
+ printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
+ ret = -ENOMEM;
+ goto free_branch;
+ }
+ ret = ip_set_register_set_type(&ip_set_iptree);
+ if (ret == 0)
+ goto out;
+
+ kmem_cache_destroy(leaf_cachep);
+ free_branch:
+ kmem_cache_destroy(branch_cachep);
+ out:
+ return ret;
+}
+
+static void __exit fini(void)
+{
+ /* FIXME: possible race with ip_set_create() */
+ ip_set_unregister_set_type(&ip_set_iptree);
+ kmem_cache_destroy(leaf_cachep);
+ kmem_cache_destroy(branch_cachep);
+}
+
+module_init(init);
+module_exit(fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_macipmap.c
@@ -0,0 +1,340 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the macipmap type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <asm/softirq.h>
+#include <linux/spinlock.h>
+#include <linux/if_ether.h>
+#include <linux/vmalloc.h>
+
+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_macipmap.h>
+
+static int
+testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
+{
+ struct ip_set_macipmap *map = (struct ip_set_macipmap *) set->data;
+ struct ip_set_macip *table = (struct ip_set_macip *) map->members;
+ struct ip_set_req_macipmap *req = (struct ip_set_req_macipmap *) data;
+
+ if (size != sizeof(struct ip_set_req_macipmap)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_macipmap),
+ size);
+ return -EINVAL;
+ }
+
+ if (req->ip < map->first_ip || req->ip > map->last_ip)
+ return -ERANGE;
+
+ *hash_ip = req->ip;
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
+ set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
+ if (test_bit(IPSET_MACIP_ISSET,
+ &table[req->ip - map->first_ip].flags)) {
+ return (memcmp(req->ethernet,
+ &table[req->ip - map->first_ip].ethernet,
+ ETH_ALEN) == 0);
+ } else {
+ return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
+ }
+}
+
+static int
+testip_kernel(struct ip_set *set, const struct sk_buff *skb,
+ u_int32_t flags, ip_set_ip_t *hash_ip)
+{
+ struct ip_set_macipmap *map =
+ (struct ip_set_macipmap *) set->data;
+ struct ip_set_macip *table =
+ (struct ip_set_macip *) map->members;
+ ip_set_ip_t ip;
+
+ ip = ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr);
+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
+ flags & IPSET_SRC ? "SRC" : "DST",
+ NIPQUAD(skb->nh.iph->saddr),
+ NIPQUAD(skb->nh.iph->daddr));
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return 0;
+
+ *hash_ip = ip;
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
+ if (test_bit(IPSET_MACIP_ISSET, &table[ip - map->first_ip].flags)) {
+ /* Is mac pointer valid?
+ * If so, compare... */
+ return (skb->mac.raw >= skb->head
+ && (skb->mac.raw + ETH_HLEN) <= skb->data
+ && (memcmp(skb->mac.ethernet->h_source,
+ &table[ip - map->first_ip].ethernet,
+ ETH_ALEN) == 0));
+ } else {
+ return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
+ }
+}
+
+/* returns 0 on success */
+static inline int
+__addip(struct ip_set *set,
+ ip_set_ip_t ip, unsigned char *ethernet, ip_set_ip_t *hash_ip)
+{
+ struct ip_set_macipmap *map =
+ (struct ip_set_macipmap *) set->data;
+ struct ip_set_macip *table =
+ (struct ip_set_macip *) map->members;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+ if (test_and_set_bit(IPSET_MACIP_ISSET,
+ &table[ip - map->first_ip].flags))
+ return -EEXIST;
+
+ *hash_ip = ip;
+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
+ memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
+ return 0;
+}
+
+static int
+addip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
+ struct ip_set_req_macipmap *req =
+ (struct ip_set_req_macipmap *) data;
+
+ if (size != sizeof(struct ip_set_req_macipmap)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_macipmap),
+ size);
+ return -EINVAL;
+ }
+ return __addip(set, req->ip, req->ethernet, hash_ip);
+}
+
+static int
+addip_kernel(struct ip_set *set, const struct sk_buff *skb,
+ u_int32_t flags, ip_set_ip_t *hash_ip)
+{
+ ip_set_ip_t ip;
+
+ ip = ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr);
+
+ if (!(skb->mac.raw >= skb->head
+ && (skb->mac.raw + ETH_HLEN) <= skb->data))
+ return -EINVAL;
+
+ return __addip(set, ip, skb->mac.ethernet->h_source, hash_ip);
+}
+
+static inline int
+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
+{
+ struct ip_set_macipmap *map =
+ (struct ip_set_macipmap *) set->data;
+ struct ip_set_macip *table =
+ (struct ip_set_macip *) map->members;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+ if (!test_and_clear_bit(IPSET_MACIP_ISSET,
+ &table[ip - map->first_ip].flags))
+ return -EEXIST;
+
+ *hash_ip = ip;
+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
+ return 0;
+}
+
+static int
+delip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
+ struct ip_set_req_macipmap *req =
+ (struct ip_set_req_macipmap *) data;
+
+ if (size != sizeof(struct ip_set_req_macipmap)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_macipmap),
+ size);
+ return -EINVAL;
+ }
+ return __delip(set, req->ip, hash_ip);
+}
+
+static int
+delip_kernel(struct ip_set *set, const struct sk_buff *skb,
+ u_int32_t flags, ip_set_ip_t *hash_ip)
+{
+ return __delip(set,
+ ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+ hash_ip);
+}
+
+static inline size_t members_size(ip_set_id_t from, ip_set_id_t to)
+{
+ return (size_t)((to - from + 1) * sizeof(struct ip_set_macip));
+}
+
+static int create(struct ip_set *set, const void *data, size_t size)
+{
+ int newbytes;
+ struct ip_set_req_macipmap_create *req =
+ (struct ip_set_req_macipmap_create *) data;
+ struct ip_set_macipmap *map;
+
+ if (size != sizeof(struct ip_set_req_macipmap_create)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_macipmap_create),
+ size);
+ return -EINVAL;
+ }
+
+ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
+ HIPQUAD(req->from), HIPQUAD(req->to));
+
+ if (req->from > req->to) {
+ DP("bad ip range");
+ return -ENOEXEC;
+ }
+
+ if (req->to - req->from > MAX_RANGE) {
+ ip_set_printk("range too big (max %d addresses)",
+ MAX_RANGE);
+ return -ENOEXEC;
+ }
+
+ map = kmalloc(sizeof(struct ip_set_macipmap), GFP_KERNEL);
+ if (!map) {
+ DP("out of memory for %d bytes",
+ sizeof(struct ip_set_macipmap));
+ return -ENOMEM;
+ }
+ map->flags = req->flags;
+ map->first_ip = req->from;
+ map->last_ip = req->to;
+ newbytes = members_size(map->first_ip, map->last_ip);
+ map->members = ip_set_malloc(newbytes);
+ DP("members: %u %p", newbytes, map->members);
+ if (!map->members) {
+ DP("out of memory for %d bytes", newbytes);
+ kfree(map);
+ return -ENOMEM;
+ }
+ memset(map->members, 0, newbytes);
+
+ set->data = map;
+ return 0;
+}
+
+static void destroy(struct ip_set *set)
+{
+ struct ip_set_macipmap *map =
+ (struct ip_set_macipmap *) set->data;
+
+ ip_set_free(map->members, members_size(map->first_ip, map->last_ip));
+ kfree(map);
+
+ set->data = NULL;
+}
+
+static void flush(struct ip_set *set)
+{
+ struct ip_set_macipmap *map =
+ (struct ip_set_macipmap *) set->data;
+ memset(map->members, 0, members_size(map->first_ip, map->last_ip));
+}
+
+static void list_header(const struct ip_set *set, void *data)
+{
+ struct ip_set_macipmap *map =
+ (struct ip_set_macipmap *) set->data;
+ struct ip_set_req_macipmap_create *header =
+ (struct ip_set_req_macipmap_create *) data;
+
+ DP("list_header %x %x %u", map->first_ip, map->last_ip,
+ map->flags);
+
+ header->from = map->first_ip;
+ header->to = map->last_ip;
+ header->flags = map->flags;
+}
+
+static int list_members_size(const struct ip_set *set)
+{
+ struct ip_set_macipmap *map =
+ (struct ip_set_macipmap *) set->data;
+
+ DP("%u", members_size(map->first_ip, map->last_ip));
+ return members_size(map->first_ip, map->last_ip);
+}
+
+static void list_members(const struct ip_set *set, void *data)
+{
+ struct ip_set_macipmap *map =
+ (struct ip_set_macipmap *) set->data;
+
+ int bytes = members_size(map->first_ip, map->last_ip);
+
+ DP("members: %u %p", bytes, map->members);
+ memcpy(data, map->members, bytes);
+}
+
+static struct ip_set_type ip_set_macipmap = {
+ .typename = SETTYPE_NAME,
+ .typecode = IPSET_TYPE_IP,
+ .protocol_version = IP_SET_PROTOCOL_VERSION,
+ .create = &create,
+ .destroy = &destroy,
+ .flush = &flush,
+ .reqsize = sizeof(struct ip_set_req_macipmap),
+ .addip = &addip,
+ .addip_kernel = &addip_kernel,
+ .delip = &delip,
+ .delip_kernel = &delip_kernel,
+ .testip = &testip,
+ .testip_kernel = &testip_kernel,
+ .header_size = sizeof(struct ip_set_req_macipmap_create),
+ .list_header = &list_header,
+ .list_members_size = &list_members_size,
+ .list_members = &list_members,
+ .me = THIS_MODULE,
+};
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("macipmap type of IP sets");
+
+static int __init init(void)
+{
+ return ip_set_register_set_type(&ip_set_macipmap);
+}
+
+static void __exit fini(void)
+{
+ /* FIXME: possible race with ip_set_create() */
+ ip_set_unregister_set_type(&ip_set_macipmap);
+}
+
+module_init(init);
+module_exit(fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_nethash.c
@@ -0,0 +1,450 @@
+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing a cidr nethash set */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <asm/softirq.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include <net/ip.h>
+
+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_nethash.h>
+#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/netfilter_ipv4/ip_set_prime.h>
+
+static inline __u32
+jhash_ip(const struct ip_set_nethash *map, ip_set_ip_t ip)
+{
+ return jhash_1word(ip, map->initval);
+}
+
+static inline __u32
+randhash_ip(const struct ip_set_nethash *map, ip_set_ip_t ip)
+{
+ return (1 + ip % map->prime);
+}
+
+static inline __u32
+hash_id_cidr(struct ip_set_nethash *map,
+ ip_set_ip_t ip,
+ unsigned char cidr,
+ ip_set_ip_t *hash_ip)
+{
+ __u32 jhash, randhash, id;
+ u_int16_t i;
+
+ *hash_ip = pack(ip, cidr);
+ jhash = jhash_ip(map, *hash_ip);
+ randhash = randhash_ip(map, *hash_ip);
+
+ for (i = 0; i < map->probes; i++) {
+ id = (jhash + i * randhash) % map->hashsize;
+ DP("hash key: %u", id);
+ if (map->members[id] == *hash_ip)
+ return id;
+ }
+ return UINT_MAX;
+}
+
+static inline __u32
+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
+{
+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
+ __u32 id = UINT_MAX;
+ int i;
+
+ for (i = 0; i < 30 && map->cidr[i]; i++) {
+ id = hash_id_cidr(map, ip, map->cidr[i], hash_ip);
+ if (id != UINT_MAX)
+ break;
+ }
+ return id;
+}
+
+static inline int
+__testip_cidr(struct ip_set *set, ip_set_ip_t ip, unsigned char cidr,
+ ip_set_ip_t *hash_ip)
+{
+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
+
+ return (hash_id_cidr(map, ip, cidr, hash_ip) != UINT_MAX);
+}
+
+static inline int
+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
+{
+ return (hash_id(set, ip, hash_ip) != UINT_MAX);
+}
+
+static int
+testip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
+ struct ip_set_req_nethash *req =
+ (struct ip_set_req_nethash *) data;
+
+ if (size != sizeof(struct ip_set_req_nethash)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_nethash),
+ size);
+ return -EINVAL;
+ }
+ return (req->cidr == 32 ? __testip(set, req->ip, hash_ip)
+ : __testip_cidr(set, req->ip, req->cidr, hash_ip));
+}
+
+static int
+testip_kernel(struct ip_set *set, const struct sk_buff *skb,
+ u_int32_t flags, ip_set_ip_t *hash_ip)
+{
+ return __testip(set,
+ ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+ hash_ip);
+}
+
+static inline int
+__addip_base(struct ip_set_nethash *map, ip_set_ip_t ip)
+{
+ __u32 jhash, randhash, probe;
+ u_int16_t i;
+
+ jhash = jhash_ip(map, ip);
+ randhash = randhash_ip(map, ip);
+
+ for (i = 0; i < map->probes; i++) {
+ probe = (jhash + i * randhash) % map->hashsize;
+ if (map->members[probe] == ip)
+ return -EEXIST;
+ if (!map->members[probe]) {
+ map->members[probe] = ip;
+ return 0;
+ }
+ }
+ /* Trigger rehashing */
+ return -EAGAIN;
+}
+
+static inline int
+__addip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
+ ip_set_ip_t *hash_ip)
+{
+ *hash_ip = pack(ip, cidr);
+ DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
+
+ return __addip_base(map, *hash_ip);
+}
+
+static void
+update_cidr_sizes(struct ip_set_nethash *map, unsigned char cidr)
+{
+ unsigned char next;
+ int i;
+
+ for (i = 0; i < 30 && map->cidr[i]; i++) {
+ if (map->cidr[i] == cidr) {
+ return;
+ } else if (map->cidr[i] < cidr) {
+ next = map->cidr[i];
+ map->cidr[i] = cidr;
+ cidr = next;
+ }
+ }
+ if (i < 30)
+ map->cidr[i] = cidr;
+}
+
+static int
+addip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
+ struct ip_set_req_nethash *req =
+ (struct ip_set_req_nethash *) data;
+ int ret;
+
+ if (size != sizeof(struct ip_set_req_nethash)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_nethash),
+ size);
+ return -EINVAL;
+ }
+ ret = __addip((struct ip_set_nethash *) set->data,
+ req->ip, req->cidr, hash_ip);
+
+ if (ret == 0)
+ update_cidr_sizes((struct ip_set_nethash *) set->data,
+ req->cidr);
+
+ return ret;
+}
+
+static int
+addip_kernel(struct ip_set *set, const struct sk_buff *skb,
+ u_int32_t flags, ip_set_ip_t *hash_ip)
+{
+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
+ int ret = -ERANGE;
+ ip_set_ip_t ip = ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr);
+
+ if (map->cidr[0])
+ ret = __addip(map, ip, map->cidr[0], hash_ip);
+
+ return ret;
+}
+
+static int retry(struct ip_set *set)
+{
+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
+ ip_set_ip_t *members;
+ u_int32_t i, hashsize;
+ unsigned newbytes;
+ int res;
+ struct ip_set_nethash tmp = {
+ .hashsize = map->hashsize,
+ .probes = map->probes,
+ .resize = map->resize
+ };
+
+ if (map->resize == 0)
+ return -ERANGE;
+
+ memcpy(tmp.cidr, map->cidr, 30 * sizeof(unsigned char));
+ again:
+ res = 0;
+
+ /* Calculate new parameters */
+ get_random_bytes(&tmp.initval, 4);
+ hashsize = tmp.hashsize + (tmp.hashsize * map->resize)/100;
+ if (hashsize == tmp.hashsize)
+ hashsize++;
+ tmp.prime = make_prime(hashsize);
+
+ ip_set_printk("rehashing of set %s triggered: "
+ "hashsize grows from %u to %u",
+ set->name, tmp.hashsize, hashsize);
+ tmp.hashsize = hashsize;
+
+ newbytes = hashsize * sizeof(ip_set_ip_t);
+ tmp.members = ip_set_malloc(newbytes);
+ if (!tmp.members) {
+ DP("out of memory for %d bytes", newbytes);
+ return -ENOMEM;
+ }
+ memset(tmp.members, 0, newbytes);
+
+ write_lock_bh(&set->lock);
+ map = (struct ip_set_nethash *) set->data; /* Play safe */
+ for (i = 0; i < map->hashsize && res == 0; i++) {
+ if (map->members[i])
+ res = __addip_base(&tmp, map->members[i]);
+ }
+ if (res) {
+ /* Failure, try again */
+ write_unlock_bh(&set->lock);
+ ip_set_free(tmp.members, newbytes);
+ goto again;
+ }
+
+ /* Success at resizing! */
+ members = map->members;
+ hashsize = map->hashsize;
+
+ map->initval = tmp.initval;
+ map->prime = tmp.prime;
+ map->hashsize = tmp.hashsize;
+ map->members = tmp.members;
+ write_unlock_bh(&set->lock);
+
+ ip_set_free(members, hashsize * sizeof(ip_set_ip_t));
+
+ return 0;
+}
+
+static inline int
+__delip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
+ ip_set_ip_t *hash_ip)
+{
+ ip_set_ip_t id = hash_id_cidr(map, ip, cidr, hash_ip);
+
+ if (id == UINT_MAX)
+ return -EEXIST;
+
+ map->members[id] = 0;
+ return 0;
+}
+
+static int
+delip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
+ struct ip_set_req_nethash *req =
+ (struct ip_set_req_nethash *) data;
+
+ if (size != sizeof(struct ip_set_req_nethash)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_nethash),
+ size);
+ return -EINVAL;
+ }
+ /* TODO: no garbage collection in map->cidr */
+ return __delip((struct ip_set_nethash *) set->data,
+ req->ip, req->cidr, hash_ip);
+}
+
+static int
+delip_kernel(struct ip_set *set, const struct sk_buff *skb,
+ u_int32_t flags, ip_set_ip_t *hash_ip)
+{
+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
+ int ret = -ERANGE;
+ ip_set_ip_t ip = ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr);
+
+ if (map->cidr[0])
+ ret = __delip(map, ip, map->cidr[0], hash_ip);
+
+ return ret;
+}
+
+static int create(struct ip_set *set, const void *data, size_t size)
+{
+ unsigned newbytes;
+ struct ip_set_req_nethash_create *req =
+ (struct ip_set_req_nethash_create *) data;
+ struct ip_set_nethash *map;
+
+ if (size != sizeof(struct ip_set_req_nethash_create)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_nethash_create),
+ size);
+ return -EINVAL;
+ }
+
+ if (req->hashsize < 1) {
+ ip_set_printk("hashsize too small");
+ return -ENOEXEC;
+ }
+
+ map = kmalloc(sizeof(struct ip_set_nethash), GFP_KERNEL);
+ if (!map) {
+ DP("out of memory for %d bytes",
+ sizeof(struct ip_set_nethash));
+ return -ENOMEM;
+ }
+ get_random_bytes(&map->initval, 4);
+ map->prime = make_prime(req->hashsize);
+ map->hashsize = req->hashsize;
+ map->probes = req->probes;
+ map->resize = req->resize;
+ memset(map->cidr, 0, 30 * sizeof(unsigned char));
+ newbytes = map->hashsize * sizeof(ip_set_ip_t);
+ map->members = ip_set_malloc(newbytes);
+ if (!map->members) {
+ DP("out of memory for %d bytes", newbytes);
+ kfree(map);
+ return -ENOMEM;
+ }
+ memset(map->members, 0, newbytes);
+
+ set->data = map;
+ return 0;
+}
+
+static void destroy(struct ip_set *set)
+{
+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
+
+ ip_set_free(map->members, map->hashsize * sizeof(ip_set_ip_t));
+ kfree(map);
+
+ set->data = NULL;
+}
+
+static void flush(struct ip_set *set)
+{
+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
+ memset(map->members, 0, map->hashsize * sizeof(ip_set_ip_t));
+ memset(map->cidr, 0, 30 * sizeof(unsigned char));
+}
+
+static void list_header(const struct ip_set *set, void *data)
+{
+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
+ struct ip_set_req_nethash_create *header =
+ (struct ip_set_req_nethash_create *) data;
+
+ header->hashsize = map->hashsize;
+ header->probes = map->probes;
+ header->resize = map->resize;
+}
+
+static int list_members_size(const struct ip_set *set)
+{
+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
+
+ return (map->hashsize * sizeof(ip_set_ip_t));
+}
+
+static void list_members(const struct ip_set *set, void *data)
+{
+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
+ int bytes = map->hashsize * sizeof(ip_set_ip_t);
+
+ memcpy(data, map->members, bytes);
+}
+
+static struct ip_set_type ip_set_nethash = {
+ .typename = SETTYPE_NAME,
+ .typecode = IPSET_TYPE_IP,
+ .protocol_version = IP_SET_PROTOCOL_VERSION,
+ .create = &create,
+ .destroy = &destroy,
+ .flush = &flush,
+ .reqsize = sizeof(struct ip_set_req_nethash),
+ .addip = &addip,
+ .addip_kernel = &addip_kernel,
+ .retry = &retry,
+ .delip = &delip,
+ .delip_kernel = &delip_kernel,
+ .testip = &testip,
+ .testip_kernel = &testip_kernel,
+ .header_size = sizeof(struct ip_set_req_nethash_create),
+ .list_header = &list_header,
+ .list_members_size = &list_members_size,
+ .list_members = &list_members,
+ .me = THIS_MODULE,
+};
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("nethash type of IP sets");
+
+static int __init init(void)
+{
+ return ip_set_register_set_type(&ip_set_nethash);
+}
+
+static void __exit fini(void)
+{
+ /* FIXME: possible race with ip_set_create() */
+ ip_set_unregister_set_type(&ip_set_nethash);
+}
+
+module_init(init);
+module_exit(fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_portmap.c
@@ -0,0 +1,321 @@
+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing a port set type as a bitmap */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <asm/softirq.h>
+#include <linux/spinlock.h>
+
+#include <net/ip.h>
+
+#include <linux/netfilter_ipv4/ip_set_portmap.h>
+
+static inline ip_set_ip_t
+get_port(const struct sk_buff *skb, u_int32_t flags)
+{
+ struct iphdr *iph = skb->nh.iph;
+ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
+
+ switch (iph->protocol) {
+ case IPPROTO_TCP: {
+ struct tcphdr *tcph = (struct tcphdr *)((u_int32_t *)iph + iph->ihl);
+
+ /* See comments at tcp_match in ip_tables.c */
+ if (offset != 0
+ || (offset == 0
+ && (skb->len - iph->ihl * 4) < sizeof(struct tcphdr)))
+ return INVALID_PORT;
+
+ return ntohs(flags & IPSET_SRC ?
+ tcph->source : tcph->dest);
+ }
+ case IPPROTO_UDP: {
+ struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl);
+
+ if (offset != 0
+ || (offset == 0
+ && (skb->len - iph->ihl * 4) < sizeof(struct udphdr)))
+ return INVALID_PORT;
+
+ return ntohs(flags & IPSET_SRC ?
+ udph->source : udph->dest);
+ }
+ default:
+ return INVALID_PORT;
+ }
+}
+
+static inline int
+__testport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
+{
+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
+
+ if (port < map->first_port || port > map->last_port)
+ return -ERANGE;
+
+ *hash_port = port;
+ DP("set: %s, port:%u, %u", set->name, port, *hash_port);
+ return !!test_bit(port - map->first_port, map->members);
+}
+
+static int
+testport(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_port)
+{
+ struct ip_set_req_portmap *req =
+ (struct ip_set_req_portmap *) data;
+
+ if (size != sizeof(struct ip_set_req_portmap)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_portmap),
+ size);
+ return -EINVAL;
+ }
+ return __testport(set, req->port, hash_port);
+}
+
+static int
+testport_kernel(struct ip_set *set, const struct sk_buff *skb,
+ u_int32_t flags, ip_set_ip_t *hash_port)
+{
+ int res;
+ ip_set_ip_t port = get_port(skb, flags);
+
+ DP("flag %s port %u", flags & IPSET_SRC ? "SRC" : "DST", port);
+ if (port == INVALID_PORT)
+ return 0;
+
+ res = __testport(set, port, hash_port);
+
+ return (res < 0 ? 0 : res);
+}
+
+static inline int
+__addport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
+{
+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
+
+ if (port < map->first_port || port > map->last_port)
+ return -ERANGE;
+ if (test_and_set_bit(port - map->first_port, map->members))
+ return -EEXIST;
+
+ *hash_port = port;
+ DP("port %u", port);
+ return 0;
+}
+
+static int
+addport(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_port)
+{
+ struct ip_set_req_portmap *req =
+ (struct ip_set_req_portmap *) data;
+
+ if (size != sizeof(struct ip_set_req_portmap)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_portmap),
+ size);
+ return -EINVAL;
+ }
+ return __addport(set, req->port, hash_port);
+}
+
+static int
+addport_kernel(struct ip_set *set, const struct sk_buff *skb,
+ u_int32_t flags, ip_set_ip_t *hash_port)
+{
+ ip_set_ip_t port = get_port(skb, flags);
+
+ if (port == INVALID_PORT)
+ return -EINVAL;
+
+ return __addport(set, port, hash_port);
+}
+
+static inline int
+__delport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
+{
+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
+
+ if (port < map->first_port || port > map->last_port)
+ return -ERANGE;
+ if (!test_and_clear_bit(port - map->first_port, map->members))
+ return -EEXIST;
+
+ *hash_port = port;
+ DP("port %u", port);
+ return 0;
+}
+
+static int
+delport(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_port)
+{
+ struct ip_set_req_portmap *req =
+ (struct ip_set_req_portmap *) data;
+
+ if (size != sizeof(struct ip_set_req_portmap)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_portmap),
+ size);
+ return -EINVAL;
+ }
+ return __delport(set, req->port, hash_port);
+}
+
+static int
+delport_kernel(struct ip_set *set, const struct sk_buff *skb,
+ u_int32_t flags, ip_set_ip_t *hash_port)
+{
+ ip_set_ip_t port = get_port(skb, flags);
+
+ if (port == INVALID_PORT)
+ return -EINVAL;
+
+ return __delport(set, port, hash_port);
+}
+
+static int create(struct ip_set *set, const void *data, size_t size)
+{
+ int newbytes;
+ struct ip_set_req_portmap_create *req =
+ (struct ip_set_req_portmap_create *) data;
+ struct ip_set_portmap *map;
+
+ if (size != sizeof(struct ip_set_req_portmap_create)) {
+ ip_set_printk("data length wrong (want %zu, have %zu)",
+ sizeof(struct ip_set_req_portmap_create),
+ size);
+ return -EINVAL;
+ }
+
+ DP("from %u to %u", req->from, req->to);
+
+ if (req->from > req->to) {
+ DP("bad port range");
+ return -ENOEXEC;
+ }
+
+ if (req->to - req->from > MAX_RANGE) {
+ ip_set_printk("range too big (max %d ports)",
+ MAX_RANGE);
+ return -ENOEXEC;
+ }
+
+ map = kmalloc(sizeof(struct ip_set_portmap), GFP_KERNEL);
+ if (!map) {
+ DP("out of memory for %d bytes",
+ sizeof(struct ip_set_portmap));
+ return -ENOMEM;
+ }
+ map->first_port = req->from;
+ map->last_port = req->to;
+ newbytes = bitmap_bytes(req->from, req->to);
+ map->members = kmalloc(newbytes, GFP_KERNEL);
+ if (!map->members) {
+ DP("out of memory for %d bytes", newbytes);
+ kfree(map);
+ return -ENOMEM;
+ }
+ memset(map->members, 0, newbytes);
+
+ set->data = map;
+ return 0;
+}
+
+static void destroy(struct ip_set *set)
+{
+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
+
+ kfree(map->members);
+ kfree(map);
+
+ set->data = NULL;
+}
+
+static void flush(struct ip_set *set)
+{
+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
+ memset(map->members, 0, bitmap_bytes(map->first_port, map->last_port));
+}
+
+static void list_header(const struct ip_set *set, void *data)
+{
+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
+ struct ip_set_req_portmap_create *header =
+ (struct ip_set_req_portmap_create *) data;
+
+ DP("list_header %u %u", map->first_port, map->last_port);
+
+ header->from = map->first_port;
+ header->to = map->last_port;
+}
+
+static int list_members_size(const struct ip_set *set)
+{
+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
+
+ return bitmap_bytes(map->first_port, map->last_port);
+}
+
+static void list_members(const struct ip_set *set, void *data)
+{
+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
+ int bytes = bitmap_bytes(map->first_port, map->last_port);
+
+ memcpy(data, map->members, bytes);
+}
+
+static struct ip_set_type ip_set_portmap = {
+ .typename = SETTYPE_NAME,
+ .typecode = IPSET_TYPE_PORT,
+ .protocol_version = IP_SET_PROTOCOL_VERSION,
+ .create = &create,
+ .destroy = &destroy,
+ .flush = &flush,
+ .reqsize = sizeof(struct ip_set_req_portmap),
+ .addip = &addport,
+ .addip_kernel = &addport_kernel,
+ .delip = &delport,
+ .delip_kernel = &delport_kernel,
+ .testip = &testport,
+ .testip_kernel = &testport_kernel,
+ .header_size = sizeof(struct ip_set_req_portmap_create),
+ .list_header = &list_header,
+ .list_members_size = &list_members_size,
+ .list_members = &list_members,
+ .me = THIS_MODULE,
+};
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("portmap type of IP sets");
+
+static int __init init(void)
+{
+ return ip_set_register_set_type(&ip_set_portmap);
+}
+
+static void __exit fini(void)
+{
+ /* FIXME: possible race with ip_set_create() */
+ ip_set_unregister_set_type(&ip_set_portmap);
+}
+
+module_init(init);
+module_exit(fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_set.c
@@ -0,0 +1,114 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module to match an IP set. */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/netfilter_ipv4/ipt_set.h>
+
+static inline int
+match_set(const struct ipt_set_info *info,
+ const struct sk_buff *skb,
+ int inv)
+{
+ if (ip_set_testip_kernel(info->index, skb, info->flags))
+ inv = !inv;
+ return inv;
+}
+
+static int
+match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const void *matchinfo,
+ int offset,
+ const void *hdr,
+ u_int16_t datalen,
+ int *hotdrop)
+{
+ const struct ipt_set_info_match *info = matchinfo;
+
+ return match_set(&info->match_set,
+ skb,
+ info->match_set.flags[0] & IPSET_MATCH_INV);
+}
+
+static int
+checkentry(const char *tablename,
+ const struct ipt_ip *ip,
+ void *matchinfo,
+ unsigned int matchsize,
+ unsigned int hook_mask)
+{
+ struct ipt_set_info_match *info =
+ (struct ipt_set_info_match *) matchinfo;
+ ip_set_id_t index;
+
+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
+ ip_set_printk("invalid matchsize %d", matchsize);
+ return 0;
+ }
+
+ index = ip_set_get_byindex(info->match_set.index);
+
+ if (index == IP_SET_INVALID_ID) {
+ ip_set_printk("Cannot find set indentified by id %u to match",
+ info->match_set.index);
+ return 0; /* error */
+ }
+ if (info->match_set.flags[IP_SET_MAX_BINDINGS] != 0) {
+ ip_set_printk("That's nasty!");
+ return 0; /* error */
+ }
+
+ return 1;
+}
+
+static void destroy(void *matchinfo, unsigned int matchsize)
+{
+ struct ipt_set_info_match *info = matchinfo;
+
+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
+ ip_set_printk("invalid matchsize %d", matchsize);
+ return;
+ }
+
+ ip_set_put(info->match_set.index);
+}
+
+static struct ipt_match set_match = {
+ .name = "set",
+ .match = &match,
+ .checkentry = &checkentry,
+ .destroy = &destroy,
+ .me = THIS_MODULE
+};
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("iptables IP set match module");
+
+static int __init init(void)
+{
+ return ipt_register_match(&set_match);
+}
+
+static void __exit fini(void)
+{
+ ipt_unregister_match(&set_match);
+}
+
+module_init(init);
+module_exit(fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_SET.c
@@ -0,0 +1,127 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* ipt_SET.c - netfilter target to manipulate IP sets */
+
+#include <linux/types.h>
+#include <linux/ip.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/netdevice.h>
+#include <linux/if.h>
+#include <linux/inetdevice.h>
+#include <net/protocol.h>
+#include <net/checksum.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv4/ip_nat_rule.h>
+#include <linux/netfilter_ipv4/ipt_set.h>
+
+static unsigned int
+target(struct sk_buff **pskb,
+ unsigned int hooknum,
+ const struct net_device *in,
+ const struct net_device *out,
+ const void *targinfo,
+ void *userinfo)
+{
+ const struct ipt_set_info_target *info = targinfo;
+
+ if (info->add_set.index != IP_SET_INVALID_ID)
+ ip_set_addip_kernel(info->add_set.index,
+ *pskb,
+ info->add_set.flags);
+ if (info->del_set.index != IP_SET_INVALID_ID)
+ ip_set_delip_kernel(info->del_set.index,
+ *pskb,
+ info->del_set.flags);
+
+ return IPT_CONTINUE;
+}
+
+static int
+checkentry(const char *tablename,
+ const struct ipt_entry *e,
+ void *targinfo,
+ unsigned int targinfosize, unsigned int hook_mask)
+{
+ struct ipt_set_info_target *info =
+ (struct ipt_set_info_target *) targinfo;
+ ip_set_id_t index;
+
+ if (targinfosize != IPT_ALIGN(sizeof(*info))) {
+ DP("bad target info size %u", targinfosize);
+ return 0;
+ }
+
+ if (info->add_set.index != IP_SET_INVALID_ID) {
+ index = ip_set_get_byindex(info->add_set.index);
+ if (index == IP_SET_INVALID_ID) {
+ ip_set_printk("cannot find add_set index %u as target",
+ info->add_set.index);
+ return 0; /* error */
+ }
+ }
+
+ if (info->del_set.index != IP_SET_INVALID_ID) {
+ index = ip_set_get_byindex(info->del_set.index);
+ if (index == IP_SET_INVALID_ID) {
+ ip_set_printk("cannot find del_set index %u as target",
+ info->del_set.index);
+ return 0; /* error */
+ }
+ }
+ if (info->add_set.flags[IP_SET_MAX_BINDINGS] != 0
+ || info->del_set.flags[IP_SET_MAX_BINDINGS] != 0) {
+ ip_set_printk("That's nasty!");
+ return 0; /* error */
+ }
+ return 1;
+}
+
+static void destroy(void *targetinfo, unsigned int targetsize)
+{
+ struct ipt_set_info_target *info = targetinfo;
+
+ if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) {
+ ip_set_printk("invalid targetsize %d", targetsize);
+ return;
+ }
+
+ if (info->add_set.index != IP_SET_INVALID_ID)
+ ip_set_put(info->add_set.index);
+ if (info->del_set.index != IP_SET_INVALID_ID)
+ ip_set_put(info->del_set.index);
+}
+
+static struct ipt_target SET_target = {
+ .name = "SET",
+ .target = target,
+ .checkentry = checkentry,
+ .destroy = destroy,
+ .me = THIS_MODULE
+};
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("iptables IP set target module");
+
+static int __init init(void)
+{
+ return ipt_register_target(&SET_target);
+}
+
+static void __exit fini(void)
+{
+ ipt_unregister_target(&SET_target);
+}
+
+module_init(init);
+module_exit(fini);
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -75,6 +75,18 @@ obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o
obj-$(CONFIG_IP_NF_MATCH_HELPER) += ipt_helper.o
obj-$(CONFIG_IP_NF_MATCH_LIMIT) += ipt_limit.o
obj-$(CONFIG_IP_NF_MATCH_MARK) += ipt_mark.o
+obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set.o
+obj-$(CONFIG_IP_NF_TARGET_SET) += ipt_SET.o
+ifdef CONFIG_IP_NF_SET
+ obj-$(CONFIG_IP_NF_SET) += ip_set.o
+ export-objs += ip_set.o
+endif
+obj-$(CONFIG_IP_NF_SET_IPMAP) += ip_set_ipmap.o
+obj-$(CONFIG_IP_NF_SET_PORTMAP) += ip_set_portmap.o
+obj-$(CONFIG_IP_NF_SET_MACIPMAP) += ip_set_macipmap.o
+obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
+obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
+obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
obj-$(CONFIG_IP_NF_MATCH_MAC) += ipt_mac.o
obj-$(CONFIG_IP_NF_MATCH_IPP2P) += ipt_ipp2p.o