--- linux-2.4.21/include/linux/if.h	2003-06-13 07:51:38.000000000 -0700
+++ linux-2.4.21.amds/include/linux/if.h	2003-07-30 16:27:15.000000000 -0700
@@ -48,6 +48,12 @@
 
 /* Private (from user) interface flags (netdevice->priv_flags). */
 #define IFF_802_1Q_VLAN 0x1             /* 802.1Q VLAN device.          */
+#define IFF_PKTGEN_RCV  0x2             /* Registered to receive & consume  Pktgen skbs */
+#define IFF_ACCEPT_LOCAL_ADDRS 0x4      /**  Accept pkts even if they come from a local
+                                         * address.  This lets use send pkts to ourselves
+                                         * over external interfaces (when used in conjunction
+                                         * with SO_BINDTODEVICE
+                                         */
 
 
 #define IF_GET_IFACE	0x0001		/* for querying only */
--- linux-2.4.21/include/linux/netdevice.h	2003-06-13 07:51:38.000000000 -0700
+++ linux-2.4.21.amds/include/linux/netdevice.h	2003-07-30 16:27:20.000000000 -0700
@@ -296,7 +296,9 @@
 
 	unsigned short		flags;	/* interface flags (a la BSD)	*/
 	unsigned short		gflags;
-        unsigned short          priv_flags; /* Like 'flags' but invisible to userspace. */
+        unsigned short          priv_flags; /* Like 'flags' but invisible to userspace,
+                                             * see: if.h for flag definitions.
+                                             */
         unsigned short          unused_alignment_fixer; /* Because we need priv_flags,
                                                          * and we want to be 32-bit aligned.
                                                          */
@@ -422,12 +424,20 @@
 	int			(*neigh_setup)(struct net_device *dev, struct neigh_parms *);
 	int			(*accept_fastpath)(struct net_device *, struct dst_entry*);
 
+#ifdef CONFIG_NET_SKB_RECYCLING
+	int			(*skb_recycle) (struct sk_buff *skb);
+	void			(*mem_reclaim) (struct net_device *dev);
+#endif
 	/* open/release and usage marking */
 	struct module *owner;
 
 	/* bridge stuff */
 	struct net_bridge_port	*br_port;
 
+#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
+	struct macvlan_port *macvlan_priv;
+#endif
+
 #ifdef CONFIG_NET_FASTROUTE
 #define NETDEV_FASTROUTE_HMASK 0xF
 	/* Semi-private data. Keep it at the end of device struct. */
@@ -438,6 +448,7 @@
 	/* this will get initialized at each interface type init routine */
 	struct divert_blk	*divert;
 #endif /* CONFIG_NET_DIVERT */
+
 };
 
 
--- linux-2.4.21/net/core/dev.c	2003-06-13 07:51:39.000000000 -0700
+++ linux-2.4.21.amds/net/core/dev.c	2003-07-30 16:20:41.000000000 -0700
@@ -1,4 +1,4 @@
-/*
+/* -*-linux-c-*-
  * 	NET3	Protocol independent device support routines.
  *
  *		This program is free software; you can redistribute it and/or
@@ -82,6 +82,7 @@
 #include <linux/interrupt.h>
 #include <linux/if_ether.h>
 #include <linux/netdevice.h>
+#include <linux/ethtool.h>
 #include <linux/etherdevice.h>
 #include <linux/notifier.h>
 #include <linux/skbuff.h>
@@ -109,6 +110,11 @@
 #endif
 
 
+#if defined(CONFIG_NET_PKTGEN) || defined(CONFIG_NET_PKTGEN_MODULE)
+#include "pktgen.h"
+#endif
+
+
 /* This define, if set, will randomly drop a packet when congestion
  * is more than moderate.  It helps fairness in the multi-interface
  * case when one of them is a hog, but it kills performance for the
@@ -1131,7 +1137,7 @@
   =======================================================================*/
 
 int netdev_max_backlog = 300;
-int weight_p = 64;            /* old backlog weight */
+int weight_p = 64;          /* old backlog weight */
 /* These numbers are selected based on intuition and some
  * experimentatiom, if you have more scientific way of doing this
  * please go ahead and fix things.
@@ -1423,6 +1429,19 @@
 }
 
 
+#if defined(CONFIG_NET_PKTGEN) || defined(CONFIG_NET_PKTGEN_MODULE)
+#warning "Compiling dev.c for pktgen.";
+
+int (*handle_pktgen_hook)(struct sk_buff *skb) = NULL;
+
+static __inline__ int handle_pktgen_rcv(struct sk_buff* skb) {
+        if (handle_pktgen_hook) {
+                return handle_pktgen_hook(skb);
+        }
+        return -1;
+}
+#endif
+
 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
 void (*br_handle_frame_hook)(struct sk_buff *skb) = NULL;
 #endif
@@ -1445,6 +1464,20 @@
 	return ret;
 }
 
+#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
+/* Returns >= 0 if we consume the packet.  Otherwise, let
+ * it fall through the rest of the packet processing.
+ */
+int (*macvlan_handle_frame_hook)(struct sk_buff *skb) = NULL;
+#endif
+
+/* Returns >= 0 if we consume the packet.  Otherwise, let
+ * it fall through the rest of the packet processing.
+ */
+static __inline__ int handle_macvlan(struct sk_buff *skb)
+{
+	return macvlan_handle_frame_hook(skb);
+}
 
 #ifdef CONFIG_NET_DIVERT
 static inline int handle_diverter(struct sk_buff *skb)
@@ -1493,11 +1526,23 @@
 		}
 	}
 
+#if defined(CONFIG_NET_PKTGEN) || defined(CONFIG_NET_PKTGEN_MODULE)
+        if ((skb->dev->priv_flags & IFF_PKTGEN_RCV) &&
+            (handle_pktgen_rcv(skb) >= 0)) {
+                /* Pktgen may consume the packet, no need to send
+                 * to further protocols.
+                 */
+                return 0;
+        }
+#endif
+
+        
 #ifdef CONFIG_NET_DIVERT
 	if (skb->dev->divert && skb->dev->divert->divert)
 		ret = handle_diverter(skb);
 #endif /* CONFIG_NET_DIVERT */
-			
+
+        
 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
 	if (skb->dev->br_port != NULL &&
 	    br_handle_frame_hook != NULL) {
@@ -1505,6 +1550,22 @@
 	}
 #endif
 
+#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
+	if (skb->dev->macvlan_priv != NULL &&
+	    macvlan_handle_frame_hook != NULL) {
+                if (handle_macvlan(skb) >= 0) {
+                        /* consumed by mac-vlan...it would have been
+                         * re-sent to this method with a different
+                         * device...
+                         */
+                        return 0;
+                }
+                else {
+                        /* Let it fall through and be processed normally */
+                }
+	}
+#endif
+        
 	for (ptype=ptype_base[ntohs(type)&15];ptype;ptype=ptype->next) {
 		if (ptype->type == type &&
 		    (!ptype->dev || ptype->dev == skb->dev)) {
@@ -1618,20 +1679,45 @@
 		local_irq_enable();
 
 		dev = list_entry(queue->poll_list.next, struct net_device, poll_list);
-
+#define ORIGINAL_NAPI_ALGORITHM
+#ifdef ORIGINAL_NAPI_ALGORITHM
 		if (dev->quota <= 0 || dev->poll(dev, &budget)) {
 			local_irq_disable();
 			list_del(&dev->poll_list);
 			list_add_tail(&dev->poll_list, &queue->poll_list);
 			if (dev->quota < 0)
-				dev->quota += dev->weight;
-			else
-				dev->quota = dev->weight;
+                                dev->quota += dev->weight;
+                        else
+                                dev->quota = dev->weight;
 		} else {
 			dev_put(dev);
 			local_irq_disable();
 		}
-	}
+#else
+                /* This scheme should allow devices to build up 2x their weight in quota
+                 * credit.  Heavy users will only get their normal quota.  This should
+                 * help let bursty traffic get higher priority. --Ben
+                 */
+                if (dev->poll(dev, &budget)) {
+                        /* More to do, put these guys back on the poll list */
+			local_irq_disable();
+			list_del(&dev->poll_list);
+			list_add_tail(&dev->poll_list, &queue->poll_list);
+                        dev->quota = dev->weight;
+                }  
+                else {
+                        /* These guys are done, they come off of the poll list */
+                        if (dev->quota >= dev->weight) {
+                                dev->quota = (dev->weight << 1); /* max quota of 2x weight */
+                        }
+                        else {
+                                dev->quota += dev->weight;
+                        }
+			dev_put(dev);
+			local_irq_disable();
+		}
+#endif
+        }
 
 	local_irq_enable();
 	br_read_unlock(BR_NETPROTO_LOCK);
@@ -2183,11 +2269,70 @@
 			notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
 			return 0;
 
+		case SIOCSIFWEIGHT:
+			if (ifr->ifr_qlen < 0)
+				return -EINVAL;
+			dev->weight = ifr->ifr_qlen;
+			return 0;
+                        
+		case SIOCGIFWEIGHT:
+			ifr->ifr_qlen = dev->weight;
+			return 0;
+                        
+                case SIOCSACCEPTLOCALADDRS:
+                        if (ifr->ifr_flags) {
+                                dev->priv_flags |= IFF_ACCEPT_LOCAL_ADDRS;
+                        }
+                        else {
+                                dev->priv_flags &= ~IFF_ACCEPT_LOCAL_ADDRS;
+                        }
+                        return 0;
+
+                case SIOCGACCEPTLOCALADDRS:
+                        if (dev->priv_flags & IFF_ACCEPT_LOCAL_ADDRS) {
+                                ifr->ifr_flags = 1;
+                        }
+                        else {
+                                ifr->ifr_flags = 0;
+                        }
+                        return 0;
+
 		/*
 		 *	Unknown or private ioctl
 		 */
 
 		default:
+                        /* Handle some generic ethtool commands here */
+                        if (cmd == SIOCETHTOOL) {
+                                u32 cmd = 0;
+                                if (copy_from_user(&cmd, ifr->ifr_data, sizeof(cmd))) {
+                                        return -EFAULT;
+                                }
+                                
+                                if (cmd == ETHTOOL_GNDSTATS) {
+                                        
+                                        struct ethtool_ndstats* nds = (struct ethtool_ndstats*)(ifr->ifr_data);
+                                        
+                                        /* Get net-device stats struct, will save it in the space
+                                         * pointed to by the ifr->flags number.  Would like to use
+                                         * ethtool, but it seems to require specific driver support,
+                                         * when this is a general purpose netdevice request...
+                                         */
+                                        struct net_device_stats *stats = dev->get_stats(dev);
+                                        if (stats) {
+                                                if (copy_to_user(nds->data, stats, sizeof(*stats))) {
+                                                        return -EFAULT;
+                                                }
+                                        }
+                                        else {
+                                                return -EOPNOTSUPP;
+                                        }
+                                        return 0;
+                                }
+                        }
+
+                                
+                                                
 			if ((cmd >= SIOCDEVPRIVATE &&
 			    cmd <= SIOCDEVPRIVATE + 15) ||
 			    cmd == SIOCBONDENSLAVE ||
@@ -2280,6 +2425,8 @@
 		case SIOCGIFMAP:
 		case SIOCGIFINDEX:
 		case SIOCGIFTXQLEN:
+                case SIOCGIFWEIGHT:
+                case SIOCGACCEPTLOCALADDRS:
 			dev_load(ifr.ifr_name);
 			read_lock(&dev_base_lock);
 			ret = dev_ifsioc(&ifr, cmd);
@@ -2343,6 +2490,8 @@
 		case SIOCBONDSLAVEINFOQUERY:
 		case SIOCBONDINFOQUERY:
 		case SIOCBONDCHANGEACTIVE:
+                case SIOCSIFWEIGHT:
+                case SIOCSACCEPTLOCALADDRS:
 			if (!capable(CAP_NET_ADMIN))
 				return -EPERM;
 			dev_load(ifr.ifr_name);
--- linux-2.4.21/net/core/pktgen.c	2002-11-28 15:53:15.000000000 -0800
+++ linux-2.4.21.amds/net/core/pktgen.c	2003-07-30 16:20:41.000000000 -0700
@@ -1,9 +1,8 @@
 /* -*-linux-c-*-
- * $Id: candela_2.4.21.patch,v 1.4 2003/09/30 21:05:04 greear Exp $
- * pktgen.c: Packet Generator for performance evaluation.
  *
  * Copyright 2001, 2002 by Robert Olsson <robert.olsson@its.uu.se>
  *                                 Uppsala University, Sweden
+ *                 2002  Ben Greear <greearb@candelatech.com>
  *
  * A tool for loading the network with preconfigurated packets.
  * The tool is implemented as a linux module.  Parameters are output 
@@ -21,30 +20,32 @@
  * Added multiskb option 020301 --DaveM
  * Scaling of results. 020417--sigurdur@linpro.no
  * Significant re-work of the module:
- *   *  Updated to support generation over multiple interfaces at once
- *       by creating 32 /proc/net/pg* files.  Each file can be manipulated
- *       individually.
+ *   *  Convert to threaded model to more efficiently be able to transmit
+ *       and receive on multiple interfaces at once.
  *   *  Converted many counters to __u64 to allow longer runs.
  *   *  Allow configuration of ranges, like min/max IP address, MACs,
  *       and UDP-ports, for both source and destination, and can
  *       set to use a random distribution or sequentially walk the range.
- *   *  Can now change some values after starting.
+ *   *  Can now change most values after starting.
  *   *  Place 12-byte packet in UDP payload with magic number,
- *       sequence number, and timestamp.  Will write receiver next.
- *   *  The new changes seem to have a performance impact of around 1%,
- *       as far as I can tell.
+ *       sequence number, and timestamp.
+ *   *  Add receiver code that detects dropped pkts, re-ordered pkts, and
+ *       latencies (with micro-second) precision.
+ *   *  Add IOCTL interface to easily get counters & configuration.
  *   --Ben Greear <greearb@candelatech.com>
  *
  * Renamed multiskb to clone_skb and cleaned up sending core for two distinct 
  * skb modes. A clone_skb=0 mode for Ben "ranges" work and a clone_skb != 0 
  * as a "fastpath" with a configurable number of clones after alloc's.
- *
  * clone_skb=0 means all packets are allocated this also means ranges time 
  * stamps etc can be used. clone_skb=100 means 1 malloc is followed by 100 
  * clones.
  *
  * Also moved to /proc/net/pktgen/ 
- * --ro 
+ * --ro
+ *
+ * Sept 10:  Fixed threading/locking.  Lots of bone-headed and more clever
+ *    mistakes.  Also merged in DaveM's patch in the -pre6 patch.
  *
  * See Documentation/networking/pktgen.txt for how to use this.
  */
@@ -79,172 +80,533 @@
 #include <linux/proc_fs.h>
 #include <linux/if_arp.h>
 #include <net/checksum.h>
+#include <net/profile.h>
 #include <asm/timex.h>
 
-#define cycles()	((u32)get_cycles())
+#include <linux/smp_lock.h> /* for lock kernel */
+#include <asm/div64.h> /* do_div */
+
+#include "pktgen.h"
 
 
-#define VERSION "pktgen version 1.2"
 static char version[] __initdata = 
-  "pktgen.c: v1.2: Packet Generator for packet performance testing.\n";
+  "pktgen.c: v1.6: Packet Generator for packet performance testing.\n";
 
 /* Used to help with determining the pkts on receive */
 
 #define PKTGEN_MAGIC 0xbe9be955
 
+/* #define PG_DEBUG(a) a */
+#define PG_DEBUG(a) /* a */
 
-/* Keep information per interface */
-struct pktgen_info {
-        /* Parameters */
+/* cycles per micro-second */
+static u32 pg_cycles_per_ns;
+static u32 pg_cycles_per_us;
+static u32 pg_cycles_per_ms;
 
-        /* If min != max, then we will either do a linear iteration, or
-         * we will do a random selection from within the range.
-         */
-        __u32 flags;     
+/* Module parameters, defaults. */
+static int pg_count_d = 0; /* run forever by default */
+static int pg_ipg_d = 0;
+static int pg_multiskb_d = 0;
+static int pg_thread_count = 1; /* Initial threads to create */
+static int debug = 0;
 
-#define F_IPSRC_RND   (1<<0)  /* IP-Src Random  */
-#define F_IPDST_RND   (1<<1)  /* IP-Dst Random  */
-#define F_UDPSRC_RND  (1<<2)  /* UDP-Src Random */
-#define F_UDPDST_RND  (1<<3)  /* UDP-Dst Random */
-#define F_MACSRC_RND  (1<<4)  /* MAC-Src Random */
-#define F_MACDST_RND  (1<<5)  /* MAC-Dst Random */
-#define F_SET_SRCMAC  (1<<6)  /* Specify-Src-Mac 
-				 (default is to use Interface's MAC Addr) */
-#define F_SET_SRCIP   (1<<7)  /*  Specify-Src-IP
-				  (default is to use Interface's IP Addr) */ 
-
-        
-        int pkt_size;    /* = ETH_ZLEN; */
-        int nfrags;
-        __u32 ipg;       /* Default Interpacket gap in nsec */
-        __u64 count;     /* Default No packets to send */
-        __u64 sofar;     /* How many pkts we've sent so far */
-        __u64 errors;    /* Errors when trying to transmit, pkts will be re-sent */
-        struct timeval started_at;
-        struct timeval stopped_at;
-        __u64 idle_acc;
-        __u32 seq_num;
-        
-        int clone_skb;   /* Use multiple SKBs during packet gen.  If this number
-                          * is greater than 1, then that many coppies of the same
-                          * packet will be sent before a new packet is allocated.
-                          * For instance, if you want to send 1024 identical packets
-                          * before creating a new packet, set clone_skb to 1024.
-                          */
-        int busy;
-        int do_run_run;   /* if this changes to false, the test will stop */
-        
-        char outdev[32];
-        char dst_min[32];
-        char dst_max[32];
-        char src_min[32];
-        char src_max[32];
 
-        /* If we're doing ranges, random or incremental, then this
-         * defines the min/max for those ranges.
-         */
-        __u32 saddr_min; /* inclusive, source IP address */
-        __u32 saddr_max; /* exclusive, source IP address */
-        __u32 daddr_min; /* inclusive, dest IP address */
-        __u32 daddr_max; /* exclusive, dest IP address */
-
-        __u16 udp_src_min; /* inclusive, source UDP port */
-        __u16 udp_src_max; /* exclusive, source UDP port */
-        __u16 udp_dst_min; /* inclusive, dest UDP port */
-        __u16 udp_dst_max; /* exclusive, dest UDP port */
-
-        __u32 src_mac_count; /* How many MACs to iterate through */
-        __u32 dst_mac_count; /* How many MACs to iterate through */
-        
-        unsigned char dst_mac[6];
-        unsigned char src_mac[6];
-        
-        __u32 cur_dst_mac_offset;
-        __u32 cur_src_mac_offset;
-        __u32 cur_saddr;
-        __u32 cur_daddr;
-        __u16 cur_udp_dst;
-        __u16 cur_udp_src;
-        
-        __u8 hh[14];
-        /* = { 
-           0x00, 0x80, 0xC8, 0x79, 0xB3, 0xCB, 
-           
-           We fill in SRC address later
-           0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-           0x08, 0x00
-           };
-        */
-        __u16 pad; /* pad out the hh struct to an even 16 bytes */
-        char result[512];
 
-        /* proc file names */
-        char fname[80];
-        char busy_fname[80];
-        
-        struct proc_dir_entry *proc_ent;
-        struct proc_dir_entry *busy_proc_ent;
-};
+/* List of all running threads */
+static struct pktgen_thread_info* pktgen_threads = NULL;
+spinlock_t _pg_threadlist_lock = SPIN_LOCK_UNLOCKED;
+
+/* Holds interfaces for all threads */
+#define PG_INFO_HASH_MAX 32
+static struct pktgen_interface_info* pg_info_hash[PG_INFO_HASH_MAX];
+spinlock_t _pg_hash_lock = SPIN_LOCK_UNLOCKED;
+
+#define PG_PROC_DIR "pktgen"
+static struct proc_dir_entry *pg_proc_dir = NULL;
+
+char module_fname[128];
+struct proc_dir_entry *module_proc_ent = NULL;
 
-struct pktgen_hdr {
-        __u32 pgh_magic;
-        __u32 seq_num;
-        struct timeval timestamp;
+
+static void init_pktgen_kthread(struct pktgen_thread_info *kthread, char *name);
+static int pg_rem_interface_info(struct pktgen_thread_info* pg_thread,
+                                 struct pktgen_interface_info* i);
+static int pg_add_interface_info(struct pktgen_thread_info* pg_thread,
+                                 const char* ifname);
+static void exit_pktgen_kthread(struct pktgen_thread_info *kthread);
+static void stop_pktgen_kthread(struct pktgen_thread_info *kthread);
+static struct pktgen_thread_info* pg_find_thread(const char* name);
+static int pg_add_thread_info(const char* name);
+static struct pktgen_interface_info* pg_find_interface(struct pktgen_thread_info* pg_thread,
+                                                       const char* ifname);
+static int pktgen_device_event(struct notifier_block *, unsigned long, void *);
+
+
+struct notifier_block pktgen_notifier_block = {
+	notifier_call: pktgen_device_event,
 };
 
-static int cpu_speed;
-static int debug;
+/*  This code works around the fact that do_div cannot handle two 64-bit
+    numbers, and regular 64-bit division doesn't work on x86 kernels.
+    --Ben
+*/
 
-/* Module parameters, defaults. */
-static int count_d = 100000;
-static int ipg_d = 0;
-static int clone_skb_d = 0;
+#define PG_DIV 0
+#define PG_REM 1
+
+/* This was emailed to LMKL by: Chris Caputo <ccaputo@alt.net>
+ * Function copied/adapted/optimized from:
+ *
+ *  nemesis.sourceforge.net/browse/lib/static/intmath/ix86/intmath.c.html
+ *
+ * Copyright 1994, University of Cambridge Computer Laboratory
+ * All Rights Reserved.
+ *
+ * TODO: When running on a 64-bit CPU platform, this should no longer be
+ * TODO: necessary.
+ */
+inline static s64 divremdi3(s64 x, s64 y, int type) {
+        u64 a = (x < 0) ? -x : x;
+        u64 b = (y < 0) ? -y : y;
+        u64 res = 0, d = 1;
+
+        if (b > 0) {
+                while (b < a) {
+                        b <<= 1;
+                        d <<= 1;
+                }
+        }
+        
+        do {
+                if ( a >= b ) {
+                        a -= b;
+                        res += d;
+                }
+                b >>= 1;
+                d >>= 1;
+        }
+        while (d);
+
+        if (PG_DIV == type) {
+                return (((x ^ y) & (1ll<<63)) == 0) ? res : -(s64)res;
+        }
+        else {
+                return ((x & (1ll<<63)) == 0) ? a : -(s64)a;
+        }
+}/* divremdi3 */
+
+/* End of hacks to deal with 64-bit math on x86 */
 
 
-#define MAX_PKTGEN 8
-static struct pktgen_info pginfos[MAX_PKTGEN];
 
+inline static void pg_lock_thread_list(char* msg) {
+        if (debug > 1) {
+                printk("before pg_lock_thread_list, msg: %s\n", msg);
+        }
+        spin_lock(&_pg_threadlist_lock);
+        if (debug > 1) {
+                printk("after pg_lock_thread_list, msg: %s\n", msg);
+        }
+}
+
+inline static void pg_unlock_thread_list(char* msg) {
+        if (debug > 1) {
+                printk("before pg_unlock_thread_list, msg: %s\n", msg);
+        }
+        spin_unlock(&_pg_threadlist_lock);
+        if (debug > 1) {
+                printk("after pg_unlock_thread_list, msg: %s\n", msg);
+        }
+}
+
+inline static void pg_lock_hash(char* msg) {
+        if (debug > 1) {
+                printk("before pg_lock_hash, msg: %s\n", msg);
+        }
+        spin_lock(&_pg_hash_lock);
+        if (debug > 1) {
+                printk("before pg_lock_hash, msg: %s\n", msg);
+        }
+}
+
+inline static void pg_unlock_hash(char* msg) {
+        if (debug > 1) {
+                printk("before pg_unlock_hash, msg: %s\n", msg);
+        }
+        spin_unlock(&_pg_hash_lock);
+        if (debug > 1) {
+                printk("after pg_unlock_hash, msg: %s\n", msg);
+        }
+}
+
+inline static void pg_lock(struct pktgen_thread_info* pg_thread, char* msg) {
+        if (debug > 1) {
+                printk("before pg_lock thread, msg: %s\n", msg);
+        }
+        spin_lock(&(pg_thread->pg_threadlock));
+        if (debug > 1) {
+                printk("after pg_lock thread, msg: %s\n", msg);
+        }
+}
+
+inline static void pg_unlock(struct pktgen_thread_info* pg_thread, char* msg) {
+        if (debug > 1) {
+                printk("before pg_unlock thread, thread: %p  msg: %s\n",
+                       pg_thread, msg);
+        }
+        spin_unlock(&(pg_thread->pg_threadlock));
+        if (debug > 1) {
+                printk("after pg_unlock thread, thread: %p  msg: %s\n",
+                       pg_thread, msg);
+        }
+}
 
 /** Convert to miliseconds */
-inline __u64 tv_to_ms(const struct timeval* tv) {
+static inline __u64 tv_to_ms(const struct timeval* tv) {
         __u64 ms = tv->tv_usec / 1000;
         ms += (__u64)tv->tv_sec * (__u64)1000;
         return ms;
 }
 
-inline __u64 getCurMs(void) {
+
+/** Convert to micro-seconds */
+static inline __u64 tv_to_us(const struct timeval* tv) {
+        __u64 us = tv->tv_usec;
+        us += (__u64)tv->tv_sec * (__u64)1000000;
+        return us;
+}
+
+
+static inline __u64 pg_div(__u64 n, __u32 base) {
+        __u64 tmp = n;
+        do_div(tmp, base);
+        /* printk("pg_div, n: %llu  base: %d  rv: %llu\n",
+                  n, base, tmp); */
+        return tmp;
+}
+
+/* Fast, not horribly accurate, since the machine started. */
+static inline __u64 getRelativeCurMs(void) {
+        return pg_div(get_cycles(), pg_cycles_per_ms);
+}
+
+/* Since the epoc.  More precise over long periods of time than
+ * getRelativeCurMs
+ */
+static inline __u64 getCurMs(void) {
         struct timeval tv;
         do_gettimeofday(&tv);
         return tv_to_ms(&tv);
 }
 
-#define PG_PROC_DIR "pktgen"
-static struct proc_dir_entry *proc_dir = 0;
+/* Since the epoc.  More precise over long periods of time than
+ * getRelativeCurMs
+ */
+static inline __u64 getCurUs(void) {
+        struct timeval tv;
+        do_gettimeofday(&tv);
+        return tv_to_us(&tv);
+}
 
-static struct net_device *setup_inject(struct pktgen_info* info)
-{
+/* Since the machine booted. */
+static inline __u64 getRelativeCurUs(void) {
+        return pg_div(get_cycles(), pg_cycles_per_us);
+}
+
+/* Since the machine booted. */
+static inline __u64 getRelativeCurNs(void) {
+        return pg_div(get_cycles(), pg_cycles_per_ns);
+}
+
+static inline __u64 tv_diff(const struct timeval* a, const struct timeval* b) {
+        return tv_to_us(a) - tv_to_us(b);
+}
+
+
+
+int pktgen_proc_ioctl(struct inode* inode, struct file* file, unsigned int cmd,
+                      unsigned long arg) {
+        int err = 0;
+        struct pktgen_ioctl_info args;
+        struct pktgen_thread_info* targ = NULL;
+
+        /*
+        if (!capable(CAP_NET_ADMIN)){
+                return -EPERM;
+        }
+        */
+        
+        if (copy_from_user(&args, (void*)arg, sizeof(args))) {
+                return -EFAULT;
+        }
+
+        /* Null terminate the names */
+        args.thread_name[31] = 0;
+        args.interface_name[31] = 0;
+
+        /* printk("pktgen:  thread_name: %s  interface_name: %s\n",
+         *        args.thread_name, args.interface_name);
+         */
+        
+        switch (cmd) {
+         case GET_PKTGEN_INTERFACE_INFO: {
+                 targ = pg_find_thread(args.thread_name);
+                 if (targ) {
+                         struct pktgen_interface_info* info;
+                         info = pg_find_interface(targ, args.interface_name);
+                         if (info) {
+                                 memcpy(&(args.info), info, sizeof(args.info));
+                                 if (copy_to_user((void*)(arg), &args, sizeof(args))) {
+                                         printk("ERROR: pktgen:  copy_to_user failed.\n");
+                                         err = -EFAULT;
+                                 }
+                                 else {
+                                         err = 0;
+                                 }
+                         }
+                         else {
+                                 /* printk("ERROR: pktgen:  Could not find interface -:%s:-\n",
+                                           args.interface_name);*/
+                                 err = -ENODEV;
+                         }
+                 }
+                 else {
+                         printk("ERROR: pktgen:  Could not find thread -:%s:-.\n",
+                                args.thread_name);
+                         err = -ENODEV;
+                 }
+                 break;
+         }
+         default:
+                /* pass on to underlying device instead?? */
+                printk(__FUNCTION__ ": Unknown pktgen IOCTL: %x \n",
+                       cmd);
+                return -EINVAL;
+        }
+        
+        return err;
+}/* pktgen_proc_ioctl */
+
+static struct file_operations pktgen_fops = {
+        ioctl:     pktgen_proc_ioctl,
+};
+
+static void remove_pg_info_from_hash(struct pktgen_interface_info* info) {
+        pg_lock_hash(__FUNCTION__);
+        {
+                int device_idx = info->odev ? info->odev->ifindex : 0;
+                int b = device_idx % PG_INFO_HASH_MAX;
+                struct pktgen_interface_info* p = pg_info_hash[b];
+                struct pktgen_interface_info* prev = pg_info_hash[b];
+
+                PG_DEBUG(printk("remove_pg_info_from_hash, p: %p info: %p  device_idx: %i\n",
+                                p, info, device_idx));
+
+                if (p != NULL) {
+                
+                        if (p == info) {
+                                pg_info_hash[b] = p->next_hash;
+                                p->next_hash = NULL;
+                        }
+                        else {
+                                while (prev->next_hash) {
+                                        p = prev->next_hash;
+                                        if (p == info) {
+                                                prev->next_hash = p->next_hash;
+                                                p->next_hash = NULL;
+                                                break;
+                                        }
+                                        prev = p;
+                                }
+                        }
+                }
+                
+                if (info->odev) {
+                        info->odev->priv_flags &= ~(IFF_PKTGEN_RCV);
+                }
+        }
+        pg_unlock_hash(__FUNCTION__);
+}/* remove_pg_info_from_hash */
+
+
+static void add_pg_info_to_hash(struct pktgen_interface_info* info) {
+        /* First remove it, just in case it's already there. */
+        remove_pg_info_from_hash(info);
+        
+        pg_lock_hash(__FUNCTION__);
+        {
+                int device_idx = info->odev ? info->odev->ifindex : 0;
+                int b = device_idx % PG_INFO_HASH_MAX;
+
+                PG_DEBUG(printk("add_pg_info_from_hash, b: %i info: %p  device_idx: %i\n",
+                                b, info, device_idx));
+
+                info->next_hash = pg_info_hash[b];
+                pg_info_hash[b] = info;
+
+
+                if (info->odev) {
+                        info->odev->priv_flags |= (IFF_PKTGEN_RCV);
+                }
+        }
+        pg_unlock_hash(__FUNCTION__);
+}/* add_pg_info_to_hash */
+
+
+/* Find the pktgen_interface_info for a device idx */
+struct pktgen_interface_info* find_pg_info(int device_idx) {
+        struct pktgen_interface_info* p = NULL;
+        if (debug > 1) {
+                printk("in find_pg_info...\n");
+        }
+        pg_lock_hash(__FUNCTION__);
+        {
+                int b = device_idx % PG_INFO_HASH_MAX;
+                p = pg_info_hash[b];
+                while (p) {
+                        if (p->odev && (p->odev->ifindex == device_idx)) {
+                                break;
+                        }
+                        p = p->next_hash;
+                }
+        }
+        pg_unlock_hash(__FUNCTION__);
+        return p;
+}
+
+
+/* Remove an interface from our hash, dissassociate pktgen_interface_info
+ * from interface
+ */
+static void check_remove_device(struct pktgen_interface_info* info) {
+        struct pktgen_interface_info* pi = NULL;
+        if (info->odev) {
+                pi = find_pg_info(info->odev->ifindex);
+                if (pi != info) {
+                        printk("ERROR: pi != info, pi: %p  info: %p\n", pi, info);
+                }
+                else {
+                        /* Remove info from our hash */
+                        remove_pg_info_from_hash(info);
+                }
+
+                rtnl_lock();
+                info->odev->priv_flags &= ~(IFF_PKTGEN_RCV);
+                atomic_dec(&(info->odev->refcnt));
+                info->odev = NULL;
+                rtnl_unlock();
+        }
+}/* check_remove_device */
+
+
+static int pg_remove_interface_from_all_threads(const char* dev_name) {
+        int cnt = 0;
+        pg_lock_thread_list(__FUNCTION__);
+        {
+                struct pktgen_thread_info* tmp = pktgen_threads;
+                struct pktgen_interface_info* info = NULL;
+                
+                while (tmp) {
+                        info = pg_find_interface(tmp, dev_name);
+                        if (info) {
+                                printk("pktgen:  Removing interface: %s from pktgen control.\n",
+                                       dev_name);
+                                pg_rem_interface_info(tmp, info);
+                                cnt++;
+                        }
+                        else {
+                                printk("pktgen:  Could not find interface: %s in rem_from_all.\n",
+                                       dev_name);
+                        }
+                        tmp = tmp->next;
+                }
+        }
+        pg_unlock_thread_list(__FUNCTION__);
+        return cnt;
+}/* pg_rem_interface_from_all_threads */
+
+
+static int pktgen_device_event(struct notifier_block *unused, unsigned long event, void *ptr) {
+	struct net_device *dev = (struct net_device *)(ptr);
+
+	/* It is OK that we do not hold the group lock right now,
+	 * as we run under the RTNL lock.
+	 */
+
+	switch (event) {
+	case NETDEV_CHANGEADDR:
+	case NETDEV_GOING_DOWN:
+	case NETDEV_DOWN:
+	case NETDEV_UP:
+		/* Ignore for now */
+		break;
+		
+	case NETDEV_UNREGISTER:
+                pg_remove_interface_from_all_threads(dev->name);
+		break;
+	};
+
+	return NOTIFY_DONE;
+}
+
+
+/* Associate pktgen_interface_info with a device.
+ */
+static struct net_device* pg_setup_interface(struct pktgen_interface_info* info) {
 	struct net_device *odev;
 
+        check_remove_device(info);                
+        
 	rtnl_lock();
-	odev = __dev_get_by_name(info->outdev);
+	odev = __dev_get_by_name(info->ifname);
 	if (!odev) {
-		sprintf(info->result, "No such netdevice: \"%s\"", info->outdev);
-		goto out_unlock;
+		printk("No such netdevice: \"%s\"\n", info->ifname);
 	}
-
-	if (odev->type != ARPHRD_ETHER) {
-		sprintf(info->result, "Not ethernet device: \"%s\"", info->outdev);
-		goto out_unlock;
+	else if (odev->type != ARPHRD_ETHER) {
+		printk("Not an ethernet device: \"%s\"\n", info->ifname);
 	}
-
-	if (!netif_running(odev)) {
-		sprintf(info->result, "Device is down: \"%s\"", info->outdev);
-		goto out_unlock;
+	else if (!netif_running(odev)) {
+		printk("Device is down: \"%s\"\n", info->ifname);
 	}
+	else if (odev->priv_flags & IFF_PKTGEN_RCV) {
+		printk("ERROR: Device: \"%s\" is already assigned to a pktgen interface.\n",
+                       info->ifname);
+	}
+        else {
+                atomic_inc(&odev->refcnt);
+                info->odev = odev;
+                info->odev->priv_flags |= (IFF_PKTGEN_RCV);
+        }
+        
+	rtnl_unlock();
+
+        if (info->odev) {
+                add_pg_info_to_hash(info);
+        }
+        
+        return info->odev;
+}
 
+/* Read info from the interface and set up internal pktgen_interface_info
+ * structure to have the right information to create/send packets
+ */
+static void pg_setup_inject(struct pktgen_interface_info* info)
+{
+        if (!info->odev) {
+                /* Try once more, just in case it works now. */
+                pg_setup_interface(info);
+        }
+        
+        if (!info->odev) {
+                printk("ERROR: info->odev == NULL in setup_inject.\n");
+                sprintf(info->result, "ERROR: info->odev == NULL in setup_inject.\n");
+                return;
+        }
+        
         /* Default to the interface's mac if not explicitly set. */
         if (!(info->flags & F_SET_SRCMAC)) {
-                memcpy(&(info->hh[6]), odev->dev_addr, 6);
+                memcpy(&(info->hh[6]), info->odev->dev_addr, 6);
         }
         else {
                 memcpy(&(info->hh[6]), info->src_mac, 6);
@@ -252,12 +614,15 @@
 
         /* Set up Dest MAC */
         memcpy(&(info->hh[0]), info->dst_mac, 6);
+
+        /* Set up pkt size */
+        info->cur_pkt_size = info->min_pkt_size;
         
 	info->saddr_min = 0;
 	info->saddr_max = 0;
         if (strlen(info->src_min) == 0) {
-                if (odev->ip_ptr) {
-                        struct in_device *in_dev = odev->ip_ptr;
+                if (info->odev->ip_ptr) {
+                        struct in_device *in_dev = info->odev->ip_ptr;
 
                         if (in_dev->ifa_list) {
                                 info->saddr_min = in_dev->ifa_list->ifa_address;
@@ -280,65 +645,131 @@
         info->cur_daddr = info->daddr_min;
         info->cur_udp_dst = info->udp_dst_min;
         info->cur_udp_src = info->udp_src_min;
-        
-	atomic_inc(&odev->refcnt);
-	rtnl_unlock();
-
-	return odev;
-
-out_unlock:
-	rtnl_unlock();
-	return NULL;
 }
 
-static void nanospin(int ipg, struct pktgen_info* info)
+/* ipg is in nano-seconds */
+static void nanospin(__u32 ipg, struct pktgen_interface_info* info)
 {
-	u32 idle_start, idle;
-
-	idle_start = cycles();
+	u64 idle_start = get_cycles();
+        u64 idle;
 
 	for (;;) {
 		barrier();
-		idle = cycles() - idle_start;
-		if (idle * 1000 >= ipg * cpu_speed)
+		idle = get_cycles() - idle_start;
+		if (idle * 1000 >= ipg * pg_cycles_per_us)
 			break;
 	}
 	info->idle_acc += idle;
 }
 
+
+/* ipg is in micro-seconds (usecs) */
+static void pg_udelay(__u32 delay_us, struct pktgen_interface_info* info,
+                      struct pktgen_thread_info* pg_thread)
+{
+        u64 start = getRelativeCurUs();
+        u64 now;
+        if (delay_us > (1000000 / HZ)) {
+                /* fall asleep for a bit */
+                __u32 us_per_tick = 1000000 / HZ;
+                __u32 ticks = delay_us / us_per_tick;
+                interruptible_sleep_on_timeout(&(pg_thread->queue), ticks);
+        }
+        
+	for (;;) {
+                now = getRelativeCurUs();
+                if (start + delay_us <= (now - 10)) {
+                        break;
+                }
+
+                if (!info->do_run_run) {
+                        return;
+                }
+                
+                if (current->need_resched) {
+                        schedule();
+                }
+                        
+                now = getRelativeCurUs();
+                if (start + delay_us <= (now - 10)) {
+                        break;
+                }
+
+                do_softirq();
+	}
+
+        info->idle_acc += (1000 * (now - start));
+
+        /* We can break out of the loop up to 10us early, so spend the rest of
+         * it spinning to increase accuracy.
+         */
+        if (start + delay_us > now) {
+                nanospin((start + delay_us) - now, info);
+        }
+}
+
+
+
+
+/* Returns: cycles per micro-second */
 static int calc_mhz(void)
 {
 	struct timeval start, stop;
-	u32 start_s, elapsed;
-
+	u64 start_s;
+        u64 t1, t2;
+        u32 elapsed;
+        u32 clock_time = 0;
+        
 	do_gettimeofday(&start);
-	start_s = cycles();
+	start_s = get_cycles();
+        /* Spin for 50,000,000 cycles */
 	do {
 		barrier();
-		elapsed = cycles() - start_s;
+		elapsed = (u32)(get_cycles() - start_s);
 		if (elapsed == 0)
 			return 0;
-	} while (elapsed < 1000 * 50000);
+	} while (elapsed < 50000000);
 	do_gettimeofday(&stop);
-	return elapsed/(stop.tv_usec-start.tv_usec+1000000*(stop.tv_sec-start.tv_sec));
+
+        t1 = tv_to_us(&start);
+        t2 = tv_to_us(&stop);
+
+        clock_time = (u32)(t2 - t1);
+        if (clock_time == 0) {
+                printk("pktgen: ERROR:  clock_time was zero..things may not work right, t1: %u  t2: %u ...\n",
+                       (u32)(t1), (u32)(t2));
+                return 0x7FFFFFFF;
+        }
+	return elapsed / clock_time;
 }
 
+/* Calibrate cycles per micro-second */
 static void cycles_calibrate(void)
 {
 	int i;
 
 	for (i = 0; i < 3; i++) {
-		int res = calc_mhz();
-		if (res > cpu_speed)
-			cpu_speed = res;
+		u32 res = calc_mhz();
+		if (res > pg_cycles_per_us)
+			pg_cycles_per_us = res;
 	}
+
+        /* Set these up too, only need to calculate these once. */
+        pg_cycles_per_ns = pg_cycles_per_us / 1000;
+        if (pg_cycles_per_ns == 0) {
+                pg_cycles_per_ns = 1;
+        }
+        pg_cycles_per_ms = pg_cycles_per_us * 1000;
+        
+        printk("pktgen: cycles_calibrate, cycles_per_ns: %d  per_us: %d  per_ms: %d\n",
+               pg_cycles_per_ns, pg_cycles_per_us, pg_cycles_per_ms);
 }
 
 
 /* Increment/randomize headers according to flags and current values
  * for IP src/dest, UDP src/dst port, MAC-Addr src/dst
  */
-static void mod_cur_headers(struct pktgen_info* info) {        
+static void mod_cur_headers(struct pktgen_interface_info* info) {        
         __u32 imn;
         __u32 imx;
         
@@ -428,7 +859,7 @@
                 else {
                      t = ntohl(info->cur_saddr);
                      t++;
-                     if (t >= imx) {
+                     if (t > imx) {
                              t = imn;
                      }
                 }
@@ -443,16 +874,31 @@
                 else {
                      t = ntohl(info->cur_daddr);
                      t++;
-                     if (t >= imx) {
+                     if (t > imx) {
                              t = imn;
                      }
                 }
                 info->cur_daddr = htonl(t);
         }
+
+        if (info->min_pkt_size < info->max_pkt_size) {
+                __u32 t;
+                if (info->flags & F_TXSIZE_RND) {
+                        t = ((net_random() % (info->max_pkt_size - info->min_pkt_size))
+                             + info->min_pkt_size);
+                }
+                else {
+                     t = info->cur_pkt_size + 1;
+                     if (t > info->max_pkt_size) {
+                             t = info->min_pkt_size;
+                     }
+                }
+                info->cur_pkt_size = t;
+        }
 }/* mod_cur_headers */
 
 
-static struct sk_buff *fill_packet(struct net_device *odev, struct pktgen_info* info)
+static struct sk_buff *fill_packet(struct net_device *odev, struct pktgen_interface_info* info)
 {
 	struct sk_buff *skb = NULL;
 	__u8 *eth;
@@ -461,7 +907,7 @@
 	struct iphdr *iph;
         struct pktgen_hdr *pgh = NULL;
         
-	skb = alloc_skb(info->pkt_size + 64 + 16, GFP_ATOMIC);
+	skb = alloc_skb(info->cur_pkt_size + 64 + 16, GFP_ATOMIC);
 	if (!skb) {
 		sprintf(info->result, "No memory");
 		return NULL;
@@ -481,7 +927,7 @@
 
 	memcpy(eth, info->hh, 14);
         
-	datalen = info->pkt_size - 14 - 20 - 8; /* Eth + IPh + UDPh */
+	datalen = info->cur_pkt_size - 14 - 20 - 8; /* Eth + IPh + UDPh */
 	if (datalen < sizeof(struct pktgen_hdr)) {
 		datalen = sizeof(struct pktgen_hdr);
         }
@@ -493,7 +939,7 @@
 
 	iph->ihl = 5;
 	iph->version = 4;
-	iph->ttl = 3;
+	iph->ttl = 32;
 	iph->tos = 0;
 	iph->protocol = IPPROTO_UDP; /* UDP */
 	iph->saddr = info->cur_saddr;
@@ -514,7 +960,6 @@
 		int frags = info->nfrags;
 		int i;
 
-                /* TODO: Verify this is OK...it sure is ugly. --Ben */
                 pgh = (struct pktgen_hdr*)(((char*)(udph)) + 8);
                 
 		if (frags > MAX_SKB_FRAGS)
@@ -562,234 +1007,855 @@
 
         /* Stamp the time, and sequence number, convert them to network byte order */
         if (pgh) {
-                pgh->pgh_magic = htonl(PKTGEN_MAGIC);
+                pgh->pgh_magic = __constant_htonl(PKTGEN_MAGIC);
                 do_gettimeofday(&(pgh->timestamp));
                 pgh->timestamp.tv_usec = htonl(pgh->timestamp.tv_usec);
                 pgh->timestamp.tv_sec = htonl(pgh->timestamp.tv_sec);
                 pgh->seq_num = htonl(info->seq_num);
         }
+        info->seq_num++;
         
 	return skb;
 }
 
 
-static void inject(struct pktgen_info* info)
-{
-	struct net_device *odev = NULL;
-	struct sk_buff *skb = NULL;
-	__u64 total = 0;
-        __u64 idle = 0;
-	__u64 lcount = 0;
-        int nr_frags = 0;
-	int last_ok = 1;           /* Was last skb sent? 
-	                            * Or a failed transmit of some sort?  This will keep
-                                    * sequence numbers in order, for example.
-                                    */
-        __u64 fp = 0;
-        __u32 fp_tmp = 0;
-
-	odev = setup_inject(info);
-	if (!odev)
-		return;
-
-        info->do_run_run = 1; /* Cranke yeself! */
-	info->idle_acc = 0;
-	info->sofar = 0;
-	lcount = info->count;
-
-
-        /* Build our initial pkt and place it as a re-try pkt. */
-	skb = fill_packet(odev, info);
-	if (skb == NULL) goto out_reldev;
+static void record_latency(struct pktgen_interface_info* info, int latency) {
+        /* NOTE:  Latency can be negative */
+        int div = 100;
+        int diff;
+        int vl;
+        int i;
 
-	do_gettimeofday(&(info->started_at));
+        info->pkts_rcvd_since_clear++;
+        
+        if (info->pkts_rcvd_since_clear < 100) {
+                div = info->pkts_rcvd;
+                if (info->pkts_rcvd_since_clear == 1) {
+                        info->avg_latency = latency;
+                }
+        }
 
-	while(info->do_run_run) {
+        if ((div + 1) == 0) {
+                info->avg_latency = 0;
+        }
+        else {
+                info->avg_latency = ((info->avg_latency * div + latency) / (div + 1));
+        }
 
-                /* Set a time-stamp, so build a new pkt each time */
+        if (latency < info->min_latency) {
+                info->min_latency = latency;
+        }
+        if (latency > info->max_latency) {
+                info->max_latency = latency;
+        }
 
-                if (last_ok) {
-                        if (++fp_tmp >= info->clone_skb ) {
-                                kfree_skb(skb);
-                                skb = fill_packet(odev, info);
-                                if (skb == NULL) {
-                                        break;
-                                }
-                                fp++;
-                                fp_tmp = 0; /* reset counter */
-                        }
-                        atomic_inc(&skb->users);
+        /* Place the latency in the right 'bucket' */
+        diff = (latency - info->min_latency);        
+        for (i = 0; i<LAT_BUCKETS_MAX; i++) {
+                vl = (1<<i);
+                if (latency <= vl) {
+                        info->latency_bkts[i]++;
+                        break;
                 }
+        }
+}/* record latency */
 
-                nr_frags = skb_shinfo(skb)->nr_frags;
-                   
-		spin_lock_bh(&odev->xmit_lock);
-		if (!netif_queue_stopped(odev)) {
 
-			if (odev->hard_start_xmit(skb, odev)) {
-				if (net_ratelimit()) {
-                                   printk(KERN_INFO "Hard xmit error\n");
-                                }
-                                info->errors++;
-				last_ok = 0;
-			}
-                        else {
-		           last_ok = 1;	
-                           info->sofar++;
-                           info->seq_num++;
+/* Returns < 0 if the skb is not a pktgen buffer. */
+int pktgen_receive(struct sk_buff* skb) {
+        /* See if we have a pktgen packet */
+        if ((skb->len >= (20 + 8 + sizeof(struct pktgen_hdr))) &&
+            (skb->protocol == __constant_htons(ETH_P_IP))) {
+                
+                /* It's IP, and long enough, lets check the magic number.
+                 * TODO:  This is a hack not always guaranteed to catch the right
+                 * packets.
+                 */
+                /*int i;
+                  char* tmp; */
+                struct pktgen_hdr* pgh;
+                /* printk("Length & protocol passed, skb->data: %p, raw: %p\n",
+                          skb->data, skb->h.raw); */
+                pgh = (struct pktgen_hdr*)(skb->data + 20 + 8);
+                /*
+                tmp = (char*)(skb->data);
+                for (i = 0; i<60; i++) {
+                        printk("%02hx ", tmp[i]);
+                        if (((i + 1) % 15) == 0) {
+                                printk("\n");
                         }
-		}
-		else {
-                        /* Re-try it next time */
-			last_ok = 0;
                 }
+                printk("\n");
+                */
                 
+                if (pgh->pgh_magic == __constant_ntohl(PKTGEN_MAGIC)) {
+                        struct net_device* dev = skb->dev;
+                        struct pktgen_interface_info* info = find_pg_info(dev->ifindex);
+                        
+                        /* Got one! */
+                        /* TODO:  Check UDP checksum ?? */
+                        __u32 seq = ntohl(pgh->seq_num);
 
-		spin_unlock_bh(&odev->xmit_lock);
-
-		if (info->ipg) {
-                        /* Try not to busy-spin if we have larger sleep times.
-                         * TODO:  Investigate better ways to do this.
-                         */
-                        if (info->ipg < 10000) { /* 10 usecs or less */
-                                nanospin(info->ipg, info);
+                        if (!info) {
+                                return -1;
                         }
-                        else if (info->ipg < 10000000) { /* 10ms or less */
-                                udelay(info->ipg / 1000);
+
+                        info->pkts_rcvd++;
+                        info->bytes_rcvd += (skb->len + 4); /* +4 for the checksum */
+                        
+                        /* Check for out-of-sequence packets */
+                        if (info->last_seq_rcvd == seq) {
+                                info->dup_rcvd++;
+                                info->dup_since_incr++;
                         }
                         else {
-                                mdelay(info->ipg / 1000000);
+                                __s64 rx = tv_to_us(&(skb->stamp));
+                                __s64 tx;
+                                struct timeval txtv;
+                                txtv.tv_usec = ntohl(pgh->timestamp.tv_usec);
+                                txtv.tv_sec = ntohl(pgh->timestamp.tv_sec);
+                                tx = tv_to_us(&txtv);
+                                record_latency(info, rx - tx);
+                                
+                                if ((info->last_seq_rcvd + 1) == seq) {
+                                        if ((info->peer_multiskb > 1) &&
+                                            (info->peer_multiskb > (info->dup_since_incr + 1))) {
+                                                
+                                                info->seq_gap_rcvd += (info->peer_multiskb -
+                                                                       info->dup_since_incr - 1);
+                                        }
+                                        /* Great, in order...all is well */
+                                }
+                                else if (info->last_seq_rcvd < seq) {
+                                        /* sequence gap, means we dropped a pkt most likely */
+                                        if (info->peer_multiskb > 1) {
+                                                /* We dropped more than one sequence number's worth,
+                                                 * and if we're using multiskb, then this is quite
+                                                 * a few.  This number still will not be exact, but
+                                                 * it will be closer.
+                                                 */
+                                                info->seq_gap_rcvd += (((seq - info->last_seq_rcvd) *
+                                                                        info->peer_multiskb) -
+                                                                       info->dup_since_incr);
+                                        }
+                                        else {
+                                                info->seq_gap_rcvd += (seq - info->last_seq_rcvd - 1);
+                                        }
+                                }
+                                else {
+                                        info->ooo_rcvd++; /* out-of-order */
+                                }
+                                
+                                info->dup_since_incr = 0;
                         }
+                        info->last_seq_rcvd = seq;
+                        kfree_skb(skb);
+                        if (debug > 1) {
+                                printk("done with pktgen_receive, free'd pkt\n");
+                        }
+                        return 0;
                 }
-                
-		if (signal_pending(current)) {
-                        break;
-                }
+        }
+        return -1; /* Let another protocol handle it, it's not for us! */
+}/* pktgen_receive */
 
-                /* If lcount is zero, then run forever */
-		if ((lcount != 0) && (--lcount == 0)) {
-			if (atomic_read(&skb->users) != 1) {
-				u32 idle_start, idle;
-
-				idle_start = cycles();
-				while (atomic_read(&skb->users) != 1) {
-					if (signal_pending(current)) {
-                                                break;
-                                        }
-					schedule();
-				}
-				idle = cycles() - idle_start;
-				info->idle_acc += idle;
-			}
-			break;
-		}
+static void pg_reset_latency_counters(struct pktgen_interface_info* info) {
+        int i;
+        info->avg_latency = 0;
+        info->min_latency = 0x7fffffff; /* largest integer */
+        info->max_latency = 0x80000000; /* smallest integer */
+        info->pkts_rcvd_since_clear = 0;
+        for (i = 0; i<LAT_BUCKETS_MAX; i++) {
+                info->latency_bkts[i] = 0;
+        }
+}
 
-		if (netif_queue_stopped(odev) || current->need_resched) {
-			u32 idle_start, idle;
+static void pg_clear_counters(struct pktgen_interface_info* info, int seq_too) {
+        info->idle_acc = 0;
+	info->sofar = 0;
+        info->tx_bytes = 0;
+        info->errors = 0;
+        info->ooo_rcvd = 0;
+        info->dup_rcvd = 0;
+        info->pkts_rcvd = 0;
+        info->bytes_rcvd = 0;
+        info->non_pg_pkts_rcvd = 0;
+        info->seq_gap_rcvd = 0; /* dropped */
 
-			idle_start = cycles();
-			do {
-				if (signal_pending(current)) {
-                                        info->do_run_run = 0;
-                                        break;
-                                }
-				if (!netif_running(odev)) {
-                                        info->do_run_run = 0;
-					break;
+        /* This is a bit of a hack, but it gets the dup counters
+         * in line so we don't have false alarms on dropped pkts.
+         */
+        if (seq_too) {
+                info->dup_since_incr = info->peer_multiskb - 1;
+                info->seq_num = 1;
+                info->last_seq_rcvd = 0;
+        }
+        
+        pg_reset_latency_counters(info);
+}
+
+/* Adds an interface to the thread.  The interface will be in
+ * the stopped queue untill started.
+ */
+static int add_interface_to_thread(struct pktgen_thread_info* pg_thread,
+                                   struct pktgen_interface_info* info) {
+        int rv = 0;
+        /* grab lock & insert into the stopped list */
+        pg_lock(pg_thread, __FUNCTION__);
+
+        if (info->pg_thread) {
+                printk("pktgen: ERROR:  Already assigned to a thread.\n");
+                rv = -EBUSY;
+                goto out;
+        }
+        
+        info->next = pg_thread->stopped_if_infos;
+        pg_thread->stopped_if_infos = info;
+        info->pg_thread = pg_thread;
+
+ out:
+        pg_unlock(pg_thread, __FUNCTION__);        
+        return rv;
+}
+
+/* Set up structure for sending pkts, clear counters, add to rcv hash,
+ * create initial packet, and move from the stopped to the running
+ * interface_info list
+ */
+static int pg_start_interface(struct pktgen_thread_info* pg_thread,
+                              struct pktgen_interface_info* info) {
+        PG_DEBUG(printk("Entering pg_start_interface..\n"));
+        pg_setup_inject(info);
+
+	if (!info->odev) {
+		return -1;
+        }
+
+        PG_DEBUG(printk("About to clean counters..\n"));
+        pg_clear_counters(info, 1);
+
+        info->do_run_run = 1; /* Cranke yeself! */
+
+	info->skb = NULL;
+
+	info->started_at = getCurUs();
+
+        pg_lock(pg_thread, __FUNCTION__);
+        {
+                /* Remove from the stopped list */
+                struct pktgen_interface_info* p = pg_thread->stopped_if_infos;
+                if (p == info) {
+                        pg_thread->stopped_if_infos = p->next;
+                        p->next = NULL;
+                }
+                else {
+                        while (p) {
+                                if (p->next == info) {
+                                        p->next = p->next->next;
+                                        info->next = NULL;
+                                        break;
                                 }
-				if (current->need_resched)
-					schedule();
-				else
-					do_softirq();
-			} while (netif_queue_stopped(odev));
-			idle = cycles() - idle_start;
-			info->idle_acc += idle;
-		}
-	}/* while we should be running */
+                                p = p->next;
+                        }
+                }
 
-	do_gettimeofday(&(info->stopped_at));
+                info->next_tx_ns = 0; /* Transmit immediately */
+                
+                /* Move to the front of the running list */
+                info->next = pg_thread->running_if_infos;
+                pg_thread->running_if_infos = info;
+                pg_thread->running_if_sz++;
+        }
+        pg_unlock(pg_thread, __FUNCTION__);
+        PG_DEBUG(printk("Leaving pg_start_interface..\n"));
+        return 0;
+}/* pg_start_interface */
 
-	total = (info->stopped_at.tv_sec - info->started_at.tv_sec) * 1000000 +
-		info->stopped_at.tv_usec - info->started_at.tv_usec;
 
-	idle = (__u32)(info->idle_acc)/(__u32)(cpu_speed);
+/* set stopped-at timer, remove from running list, do counters & statistics
+ * NOTE:  We do not remove from the rcv hash.
+ */
+static int pg_stop_interface(struct pktgen_thread_info* pg_thread,
+                             struct pktgen_interface_info* info) {
+        __u64 total_us;
+        if (!info->do_run_run) {
+                printk("pktgen interface: %s is already stopped\n", info->ifname);
+                return -EINVAL;
+        }
+        
+        info->stopped_at = getCurMs();
+        info->do_run_run = 0;
+
+        /* The main worker loop will place it onto the stopped list if needed,
+         * next time this interface is asked to be re-inserted into the
+         * list.
+         */
+        
+	total_us = info->stopped_at - info->started_at;
 
         {
+                __u64 idle = pg_div(info->idle_acc, pg_cycles_per_us);
 		char *p = info->result;
-                __u64 pps = (__u32)(info->sofar * 1000) / ((__u32)(total) / 1000);
-                __u64 bps = pps * 8 * (info->pkt_size + 4); /* take 32bit ethernet CRC into account */
-		p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags) %llupps %lluMb/sec (%llubps)  errors: %llu",
-			     (unsigned long long) total,
-			     (unsigned long long) (total - idle),
-			     (unsigned long long) idle,
-			     (unsigned long long) info->sofar,
-                             skb->len + 4, /* Add 4 to account for the ethernet checksum */
-                             nr_frags,
-			     (unsigned long long) pps,
-			     (unsigned long long) (bps / (u64) 1024 / (u64) 1024),
-			     (unsigned long long) bps,
-			     (unsigned long long) info->errors
+                __u64 pps = divremdi3(info->sofar * 1000, pg_div(total_us, 1000), PG_DIV);
+                __u64 bps = pps * 8 * (info->cur_pkt_size + 4); /* take 32bit ethernet CRC into account */
+                
+		p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte) %llupps %lluMb/sec (%llubps)  errors: %llu",
+			     total_us, total_us - idle, idle,
+			     info->sofar,
+                             info->cur_pkt_size + 4, /* Add 4 to account for the ethernet checksum */
+			     pps,
+			     bps >> 20, bps, info->errors
 			     );
 	}
+        return 0;
+}/* pg_stop_interface */
+
+
+/* Re-inserts 'last' into the pg_thread's list.  Calling code should
+ * make sure that 'last' is not already in the list.
+ */
+static struct pktgen_interface_info* pg_resort_pginfos(struct pktgen_thread_info* pg_thread,
+                                                       struct pktgen_interface_info* last,
+                                                       int setup_cur_if) {
+        struct pktgen_interface_info* rv = NULL;
+        
+        pg_lock(pg_thread, __FUNCTION__);
+        {
+                struct pktgen_interface_info* p = pg_thread->running_if_infos;
         
-out_reldev:
-        if (odev) {
-                dev_put(odev);
-                odev = NULL;
+                if (last) {
+                        if (!last->do_run_run) {
+                                /* If this guy was stopped while 'current', then
+                                 * we'll want to place him on the stopped list
+                                 * here.
+                                 */
+                                last->next = pg_thread->stopped_if_infos;
+                                pg_thread->stopped_if_infos = last;
+                                pg_thread->running_if_sz--;
+                        }
+                        else {
+                                /* re-insert */
+                                if (!p) {
+                                        pg_thread->running_if_infos = last;
+                                        last->next = NULL;
+                                }
+                                else {
+                                        /* Another special case, check to see if we should go at the
+                                         * front of the queue.
+                                         */
+                                        if (p->next_tx_ns > last->next_tx_ns) {
+                                                last->next = p;
+                                                pg_thread->running_if_infos = last;
+                                        }
+                                        else {
+                                                int inserted = 0;
+                                                while (p->next) {
+                                                        if (p->next->next_tx_ns > last->next_tx_ns) {
+                                                                /* Insert into the list */
+                                                                last->next = p->next;
+                                                                p->next = last;
+                                                                inserted = 1;
+                                                                break;
+                                                        }
+                                                        p = p->next;
+                                                }
+                                                if (!inserted) {
+                                                        /* place at the end */
+                                                        last->next = NULL;
+                                                        p->next = last;
+                                                }
+                                        }
+                                }
+                        }
+                }
+
+                /* List is re-sorted, so grab the first one to return */
+                rv = pg_thread->running_if_infos;
+                if (rv) {
+                        /* Pop him off of the list.  We do this here because we already
+                         * have the lock.  Calling code just has to be aware of this
+                         * feature.
+                         */
+                        pg_thread->running_if_infos = rv->next;
+                }
+        }
+
+        if (setup_cur_if) {
+                pg_thread->cur_if = rv;
+        }
+        
+        pg_unlock(pg_thread, __FUNCTION__);
+        return rv;
+}/* pg_resort_pginfos */
+
+
+void pg_stop_all_ifs(struct pktgen_thread_info* pg_thread) {
+        struct pktgen_interface_info* next = NULL;
+
+        pg_lock(pg_thread, __FUNCTION__);
+        if (pg_thread->cur_if) {
+                /* Move it onto the stopped list */
+                pg_stop_interface(pg_thread, pg_thread->cur_if);
+                pg_thread->cur_if->next = pg_thread->stopped_if_infos;
+                pg_thread->stopped_if_infos = pg_thread->cur_if;
+                pg_thread->cur_if = NULL;
+        }
+        pg_unlock(pg_thread, __FUNCTION__);
+
+        /* These have their own locking */
+        next = pg_resort_pginfos(pg_thread, NULL, 0);
+        while (next) {
+                pg_stop_interface(pg_thread, next);
+                next = pg_resort_pginfos(pg_thread, NULL, 0);
         }
+}/* pg_stop_all_ifs */
 
-        /* TODO:  Is this worth printing out (other than for debug?) */
-        printk("fp = %llu\n", (unsigned long long) fp);
-	return;
 
+void pg_rem_all_ifs(struct pktgen_thread_info* pg_thread) {
+        struct pktgen_interface_info* next = NULL;
+        
+        /* Remove all interfaces, clean up memory */
+        while ((next = pg_thread->stopped_if_infos)) {
+                int rv = pg_rem_interface_info(pg_thread, next);
+                if (rv >= 0) {
+                        kfree(next);
+                }
+                else {
+                        printk("ERROR: failed to rem_interface: %i\n", rv);
+                }
+        }
+}/* pg_rem_all_ifs */
+
+
+void pg_rem_from_thread_list(struct pktgen_thread_info* pg_thread) {
+        /* Remove from the thread list */
+        pg_lock_thread_list(__FUNCTION__);
+        {
+                struct pktgen_thread_info* tmp = pktgen_threads;
+                if (tmp == pg_thread) {
+                        pktgen_threads = tmp->next;
+                }
+                else {
+                        while (tmp) {
+                                if (tmp->next == pg_thread) {
+                                        tmp->next = pg_thread->next;
+                                        pg_thread->next = NULL;
+                                        break;
+                                }
+                                tmp = tmp->next;
+                        }
+                }
+        }
+        pg_unlock_thread_list(__FUNCTION__);
+}/* pg_rem_from_thread_list */
+
+
+/* Main loop of the thread.  Send pkts.
+ */
+void pg_thread_worker(struct pktgen_thread_info* pg_thread) {
+	struct net_device *odev = NULL;
+        __u64 idle_start = 0;
+        struct pktgen_interface_info* next = NULL;
+        u32 next_ipg = 0;
+        u64 now = 0; /* in nano-seconds */
+        u32 tx_since_softirq = 0;
+        u32 queue_stopped = 0;
+        
+        /* setup the thread environment */
+        init_pktgen_kthread(pg_thread, "kpktgend");
+        
+        PG_DEBUG(printk("Starting up pktgen thread: %s\n", pg_thread->name));
+        
+        /* an endless loop in which we are doing our work */
+        while (1) {
+
+                /* Re-sorts the list, inserting 'next' (which is really the last one
+                 * we used).  It pops the top one off of the queue and returns it.
+                 * Calling code must make sure to re-insert the returned value
+                 */
+                next = pg_resort_pginfos(pg_thread, next, 1);
+                
+                if (next) {
+
+                        odev = next->odev;
+
+                        if (next->ipg) {
+
+                                now = getRelativeCurNs();
+                                if (now < next->next_tx_ns) {
+                                        next_ipg = (u32)(next->next_tx_ns - now);
+                                        
+                                        /* Try not to busy-spin if we have larger sleep times.
+                                        * TODO:  Investigate better ways to do this.
+                                        */
+                                        if (next_ipg < 10000) { /* 10 usecs or less */
+                                                nanospin(next_ipg, next);
+                                        }
+                                        else if (next_ipg < 10000000) { /* 10ms or less */
+                                                pg_udelay(next_ipg / 1000, next, pg_thread);
+                                        }
+                                        else {
+                                                /* fall asleep for 10ms or more. */
+                                                pg_udelay(next_ipg / 1000, next, pg_thread);
+                                        }
+                                }
+
+                                /* This is max IPG, this has special meaning of
+                                 * "never transmit"
+                                 */
+                                if (next->ipg == 0x7FFFFFFF) {
+                                        next->next_tx_ns = getRelativeCurNs() + next->ipg;
+                                        continue;
+                                }
+                        }
+                
+                        if (netif_queue_stopped(odev) || current->need_resched) {
+                                
+                                idle_start = get_cycles();
+
+                                if (!netif_running(odev)) {
+                                        pg_stop_interface(pg_thread, next);
+                                        continue;
+                                }
+                                if (current->need_resched) {
+                                        schedule();
+                                }
+                                else {
+                                        do_softirq();
+                                        tx_since_softirq = 0;
+                                }
+                                next->idle_acc += get_cycles() - idle_start;
+
+                                if (netif_queue_stopped(odev)) {
+                                        queue_stopped++;
+                                        continue; /* Try the next interface */
+                                }
+                        }
+                        
+                        if (next->last_ok || !next->skb) {
+                                if ((++next->fp_tmp >= next->multiskb ) || (!next->skb)) {
+                                        /* build a new pkt */
+                                        if (next->skb) {
+                                                kfree_skb(next->skb);
+                                        }
+                                        next->skb = fill_packet(odev, next);
+                                        if (next->skb == NULL) {
+                                                printk("ERROR:  Couldn't allocate skb in fill_packet.\n");
+                                                schedule();
+                                                next->fp_tmp--; /* back out increment, OOM */
+                                                continue;
+                                        }
+                                        next->fp++;
+                                        next->fp_tmp = 0; /* reset counter */
+                                        /* Not sure what good knowing nr_frags is...
+                                        next->nr_frags = skb_shinfo(skb)->nr_frags;
+                                        */
+                                }
+                                atomic_inc(&(next->skb->users));
+                        }
+                
+                        spin_lock_bh(&odev->xmit_lock);
+                        if (!netif_queue_stopped(odev)) {
+                                if (odev->hard_start_xmit(next->skb, odev)) {
+                                        if (net_ratelimit()) {
+                                                printk(KERN_INFO "Hard xmit error\n");
+                                        }
+                                        next->errors++;
+                                        next->last_ok = 0;
+                                        queue_stopped++;
+                                }
+                                else {
+                                        queue_stopped = 0;
+                                        next->last_ok = 1;
+                                        next->sofar++;
+                                        next->tx_bytes += (next->cur_pkt_size + 4); /* count csum */
+                                }
+
+                                next->next_tx_ns = getRelativeCurNs() + next->ipg;
+                        }
+                        else {  /* Re-try it next time */
+                                queue_stopped++;
+                                next->last_ok = 0;
+                        }
+
+                        spin_unlock_bh(&odev->xmit_lock);
+
+                        if (++tx_since_softirq > pg_thread->max_before_softirq) {
+                                do_softirq();
+                                tx_since_softirq = 0;
+                        }
+
+                        /* If next->count is zero, then run forever */
+                        if ((next->count != 0) && (next->sofar >= next->count)) {
+                                if (atomic_read(&(next->skb->users)) != 1) {
+                                        idle_start = get_cycles();
+                                        while (atomic_read(&(next->skb->users)) != 1) {
+                                                if (signal_pending(current)) {
+                                                        break;
+                                                }
+                                                schedule();
+                                        }
+                                        next->idle_acc += get_cycles() - idle_start;
+                                }
+                                pg_stop_interface(pg_thread, next);
+                        }/* if we're done with a particular interface. */
+
+                }/* if could find the next interface to send on. */
+                else {
+                        /* fall asleep for a bit */
+                        interruptible_sleep_on_timeout(&(pg_thread->queue), HZ/10);
+                        queue_stopped = 0;
+                }
+                
+                /* here we are back from sleep, either due to the timeout
+                   (one second), or because we caught a signal.
+                */
+                if (pg_thread->terminate || signal_pending(current)) {
+                        /* we received a request to terminate ourself */
+                        break;
+                }
+
+                if (queue_stopped > pg_thread->running_if_sz) {
+                        /* All our devices are all fulled up, schedule and hope to run
+                         * again soon.
+                         */
+                        schedule();
+                        queue_stopped = 0;
+                }
+        }//while true
+
+        /* here we go only in case of termination of the thread */
+
+        PG_DEBUG(printk("pgthread: %s  stopping all Interfaces.\n", pg_thread->name));
+        pg_stop_all_ifs(pg_thread);
+
+        PG_DEBUG(printk("pgthread: %s  removing all Interfaces.\n", pg_thread->name));
+        pg_rem_all_ifs(pg_thread);
+
+        pg_rem_from_thread_list(pg_thread);
+        
+        /* cleanup the thread, leave */
+        PG_DEBUG(printk("pgthread: %s  calling exit_pktgen_kthread.\n", pg_thread->name));
+        exit_pktgen_kthread(pg_thread);
+}
+
+/* private functions */
+static void kthread_launcher(void *data) {
+        struct pktgen_thread_info *kthread = data;
+        kernel_thread((int (*)(void *))kthread->function, (void *)kthread, 0);       
 }
 
-/* proc/net/pktgen/pg */
+/* create a new kernel thread. Called by the creator. */
+void start_pktgen_kthread(struct pktgen_thread_info *kthread) {
 
-static int proc_busy_read(char *buf , char **start, off_t offset,
-			     int len, int *eof, void *data)
-{
-	char *p;
-        int idx = (int)(long)(data);
-        struct pktgen_info* info = NULL;
+        /* initialize the semaphore:
+           we start with the semaphore locked. The new kernel
+           thread will setup its stuff and unlock it. This
+           control flow (the one that creates the thread) blocks
+           in the down operation below until the thread has reached
+           the up() operation.
+         */
+        init_MUTEX_LOCKED(&kthread->startstop_sem);
+
+        /* store the function to be executed in the data passed to
+           the launcher */
+        kthread->function = pg_thread_worker;
         
-        if ((idx < 0) || (idx >= MAX_PKTGEN)) {
-                printk("ERROR: idx: %i is out of range in proc_write\n", idx);
-                return -EINVAL;
+        /* create the new thread my running a task through keventd */
+
+        /* initialize the task queue structure */
+        kthread->tq.sync = 0;
+        INIT_LIST_HEAD(&kthread->tq.list);
+        kthread->tq.routine = kthread_launcher;
+        kthread->tq.data = kthread;
+
+        /* and schedule it for execution */
+        schedule_task(&kthread->tq);
+
+        /* wait till it has reached the setup_thread routine */
+        down(&kthread->startstop_sem);
+}
+
+/* stop a kernel thread. Called by the removing instance */
+static void stop_pktgen_kthread(struct pktgen_thread_info *kthread) {
+        PG_DEBUG(printk("pgthread: %s  stop_pktgen_kthread.\n", kthread->name));
+
+        if (kthread->thread == NULL) {
+                printk("stop_kthread: killing non existing thread!\n");
+                return;
         }
-        info = &(pginfos[idx]);
-  
-	p = buf;
-	p += sprintf(p, "%d\n", info->busy);
-	*eof = 1;
-  
-	return p-buf;
+
+        /* Stop each interface */
+        pg_lock(kthread, __FUNCTION__);
+        {
+                struct pktgen_interface_info* tmp = kthread->running_if_infos;
+                while (tmp) {
+                        tmp->do_run_run = 0;
+                        tmp->next_tx_ns = 0;
+                        tmp = tmp->next;
+                }
+                if (kthread->cur_if) {
+                        kthread->cur_if->do_run_run = 0;
+                        kthread->cur_if->next_tx_ns = 0;
+                }
+        }
+        pg_unlock(kthread, __FUNCTION__);
+
+        /* Wait for everything to fully stop */
+        while (1) {
+                pg_lock(kthread, __FUNCTION__);
+                if (kthread->cur_if || kthread->running_if_infos) {
+                        pg_unlock(kthread, __FUNCTION__);
+                        if (current->need_resched) {
+                                schedule();
+                        }
+                        mdelay(1);
+                }
+                else {
+                        pg_unlock(kthread, __FUNCTION__);
+                        break;
+                }
+        }
+        
+        /* this function needs to be protected with the big
+	   kernel lock (lock_kernel()). The lock must be
+           grabbed before changing the terminate
+	   flag and released after the down() call. */
+        lock_kernel();
+        
+        /* initialize the semaphore. We lock it here, the
+           leave_thread call of the thread to be terminated
+           will unlock it. As soon as we see the semaphore
+           unlocked, we know that the thread has exited.
+	*/
+        init_MUTEX_LOCKED(&kthread->startstop_sem);
+
+        /* We need to do a memory barrier here to be sure that
+           the flags are visible on all CPUs. 
+        */
+        mb();
+
+        /* set flag to request thread termination */
+        kthread->terminate = 1;
+
+        /* We need to do a memory barrier here to be sure that
+           the flags are visible on all CPUs. 
+        */
+        mb();
+        kill_proc(kthread->thread->pid, SIGKILL, 1);
+       
+        /* block till thread terminated */
+        down(&kthread->startstop_sem);
+        kthread->in_use = 0;
+        
+        /* release the big kernel lock */
+        unlock_kernel();
+
+        /* now we are sure the thread is in zombie state. We
+           notify keventd to clean the process up.
+        */
+        kill_proc(2, SIGCHLD, 1);
+
+        PG_DEBUG(printk("pgthread: %s  done with stop_pktgen_kthread.\n", kthread->name));
+}/* stop_pktgen_kthread */
+
+
+/* initialize new created thread. Called by the new thread. */
+void init_pktgen_kthread(struct pktgen_thread_info *kthread, char *name) {
+        /* lock the kernel. A new kernel thread starts without
+           the big kernel lock, regardless of the lock state
+           of the creator (the lock level is *not* inheritated)
+        */
+        lock_kernel();
+
+        /* fill in thread structure */
+        kthread->thread = current;
+
+        /* set signal mask to what we want to respond */
+        siginitsetinv(&current->blocked, sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM));
+
+        /* initialise wait queue */
+        init_waitqueue_head(&kthread->queue);
+
+        /* initialise termination flag */
+        kthread->terminate = 0;
+
+        /* set name of this process (max 15 chars + 0 !) */
+        sprintf(current->comm, name);
+        
+        /* let others run */
+        unlock_kernel();
+
+        /* tell the creator that we are ready and let him continue */
+        up(&kthread->startstop_sem);
+}/* init_pktgen_kthread */
+
+/* cleanup of thread. Called by the exiting thread. */
+static void exit_pktgen_kthread(struct pktgen_thread_info *kthread) {
+        /* we are terminating */
+
+	/* lock the kernel, the exit will unlock it */
+        lock_kernel();
+        kthread->thread = NULL;
+        mb();
+
+        /* Clean up proc file system */
+        if (strlen(kthread->fname)) {
+                remove_proc_entry(kthread->fname, NULL);
+        }
+
+        /* notify the stop_kthread() routine that we are terminating. */
+	up(&kthread->startstop_sem);
+	/* the kernel_thread that called clone() does a do_exit here. */
+
+	/* there is no race here between execution of the "killer" and real termination
+	   of the thread (race window between up and do_exit), since both the
+	   thread and the "killer" function are running with the kernel lock held.
+	   The kernel lock will be freed after the thread exited, so the code
+	   is really not executed anymore as soon as the unload functions gets
+	   the kernel lock back.
+	   The init process may not have made the cleanup of the process here,
+	   but the cleanup can be done safely with the module unloaded.
+	*/
+}/* exit_pktgen_kthread */
+
+
+/* proc/net/pg */
+
+static char* pg_display_latency(struct pktgen_interface_info* info, char* p, int reset_latency) {
+        int i;
+        p += sprintf(p, "     avg_latency: %dus  min_lat: %dus  max_lat: %dus  pkts_in_sample: %llu\n",
+                     info->avg_latency, info->min_latency, info->max_latency,
+                     info->pkts_rcvd_since_clear);
+        p += sprintf(p, "      Buckets(us) [ ");
+        for (i = 0; i<LAT_BUCKETS_MAX; i++) {
+                p += sprintf(p, "%llu  ", info->latency_bkts[i]);
+        }
+        p += sprintf(p, "]\n");        
+
+        if (reset_latency) {
+                pg_reset_latency_counters(info);
+        }
+        return p;
 }
 
-static int proc_read(char *buf , char **start, off_t offset,
-			int len, int *eof, void *data)
+static int proc_pg_if_read(char *buf , char **start, off_t offset,
+                           int len, int *eof, void *data)
 {
 	char *p;
 	int i;
-        int idx = (int)(long)(data);
-        struct pktgen_info* info = NULL;
+        struct pktgen_interface_info* info = (struct pktgen_interface_info*)(data);
         __u64 sa;
         __u64 stopped;
-        __u64 now = getCurMs();
+        __u64 now = getCurUs();
+        __u64 now_rel_ns = getRelativeCurNs();
         
-        if ((idx < 0) || (idx >= MAX_PKTGEN)) {
-                printk("ERROR: idx: %i is out of range in proc_write\n", idx);
-                return -EINVAL;
-        }
-        info = &(pginfos[idx]);
-  
 	p = buf;
-        p += sprintf(p, "%s\n", VERSION); /* Help with parsing compatibility */
-	p += sprintf(p, "Params: count %llu  pkt_size: %u  frags: %d  ipg: %u  clone_skb: %d odev \"%s\"\n",
-		     (unsigned long long) info->count,
-		     info->pkt_size, info->nfrags, info->ipg,
-                     info->clone_skb, info->outdev);
-        p += sprintf(p, "     dst_min: %s  dst_max: %s  src_min: %s  src_max: %s\n",
+        p += sprintf(p, "VERSION-1\n"); /* Help with parsing compatibility */
+	p += sprintf(p, "Params: count %llu  min_pkt_size: %u  max_pkt_size: %u\n     frags: %d  ipg: %u  multiskb: %d  ifname: %s\n",
+		     info->count, info->min_pkt_size, info->max_pkt_size,
+                     info->nfrags, info->ipg, info->multiskb, info->ifname);
+        p += sprintf(p, "     dst_min: %s  dst_max: %s\n     src_min: %s  src_max: %s\n",
                      info->dst_min, info->dst_max, info->src_min, info->src_max);
         p += sprintf(p, "     src_mac: ");
 	for (i = 0; i < 6; i++) {
@@ -802,14 +1868,17 @@
         p += sprintf(p, "     udp_src_min: %d  udp_src_max: %d  udp_dst_min: %d  udp_dst_max: %d\n",
                      info->udp_src_min, info->udp_src_max, info->udp_dst_min,
                      info->udp_dst_max);
-        p += sprintf(p, "     src_mac_count: %d  dst_mac_count: %d\n     Flags: ",
-                     info->src_mac_count, info->dst_mac_count);
+        p += sprintf(p, "     src_mac_count: %d  dst_mac_count: %d  peer_multiskb: %d\n     Flags: ",
+                     info->src_mac_count, info->dst_mac_count, info->peer_multiskb);
         if (info->flags &  F_IPSRC_RND) {
                 p += sprintf(p, "IPSRC_RND  ");
         }
         if (info->flags & F_IPDST_RND) {
                 p += sprintf(p, "IPDST_RND  ");
         }
+        if (info->flags & F_TXSIZE_RND) {
+                p += sprintf(p, "TXSIZE_RND  ");
+        }
         if (info->flags & F_UDPSRC_RND) {
                 p += sprintf(p, "UDPSRC_RND  ");
         }
@@ -824,22 +1893,24 @@
         }
         p += sprintf(p, "\n");
         
-        sa = tv_to_ms(&(info->started_at));
-        stopped = tv_to_ms(&(info->stopped_at));
+        sa = info->started_at;
+        stopped = info->stopped_at;
         if (info->do_run_run) {
                 stopped = now; /* not really stopped, more like last-running-at */
         }
-        p += sprintf(p, "Current:\n     pkts-sofar: %llu  errors: %llu\n     started: %llums  stopped: %llums  now: %llums  idle: %lluns\n",
-                     (unsigned long long) info->sofar,
-		     (unsigned long long) info->errors,
-		     (unsigned long long) sa,
-		     (unsigned long long) stopped,
-		     (unsigned long long) now,
-		     (unsigned long long) info->idle_acc);
+        p += sprintf(p, "Current:\n     pkts-sofar: %llu  errors: %llu\n     started: %lluus  elapsed: %lluus\n     idle: %lluns  next_tx: %llu(%lli)ns\n",
+                     info->sofar, info->errors, sa, (stopped - sa), info->idle_acc,
+                     info->next_tx_ns, (long long)(info->next_tx_ns) - (long long)(now_rel_ns));
         p += sprintf(p, "     seq_num: %d  cur_dst_mac_offset: %d  cur_src_mac_offset: %d\n",
                      info->seq_num, info->cur_dst_mac_offset, info->cur_src_mac_offset);
         p += sprintf(p, "     cur_saddr: 0x%x  cur_daddr: 0x%x  cur_udp_dst: %d  cur_udp_src: %d\n",
                      info->cur_saddr, info->cur_daddr, info->cur_udp_dst, info->cur_udp_src);
+        p += sprintf(p, "     pkts_rcvd: %llu  bytes_rcvd: %llu  last_seq_rcvd: %d  ooo_rcvd: %llu\n",
+                     info->pkts_rcvd, info->bytes_rcvd, info->last_seq_rcvd, info->ooo_rcvd);
+        p += sprintf(p, "     dup_rcvd: %llu  seq_gap_rcvd(dropped): %llu  non_pg_rcvd: %llu\n",
+                     info->dup_rcvd, info->seq_gap_rcvd, info->non_pg_pkts_rcvd);
+
+        p = pg_display_latency(info, p, 0);
         
 	if (info->result[0])
 		p += sprintf(p, "Result: %s\n", info->result);
@@ -850,16 +1921,94 @@
 	return p - buf;
 }
 
+
+static int proc_pg_thread_read(char *buf , char **start, off_t offset,
+                               int len, int *eof, void *data)
+{
+	char *p;
+        struct pktgen_thread_info* pg_thread = (struct pktgen_thread_info*)(data);
+        struct pktgen_interface_info* info = NULL;
+
+        if (!pg_thread) {
+                printk("ERROR: could not find pg_thread in proc_pg_thread_read\n");
+                return -EINVAL;
+        }
+        
+	p = buf;
+        p += sprintf(p, "VERSION-1\n"); /* Help with parsing compatibility */
+	p += sprintf(p, "Name: %s  max_before_softirq: %d\n",
+                     pg_thread->name, pg_thread->max_before_softirq);
+
+        pg_lock(pg_thread, __FUNCTION__);
+        if (pg_thread->cur_if) {
+                p += sprintf(p, "Current: %s\n", pg_thread->cur_if->ifname);
+        }
+        else {
+                p += sprintf(p, "Current: NULL\n");
+        }
+        pg_unlock(pg_thread, __FUNCTION__);
+        
+        p += sprintf(p, "Running: ");
+        
+        pg_lock(pg_thread, __FUNCTION__);
+        info = pg_thread->running_if_infos;
+        while (info) {
+                p += sprintf(p, "%s ", info->ifname);
+                info = info->next;
+        }
+        p += sprintf(p, "\nStopped: ");
+        info = pg_thread->stopped_if_infos;
+        while (info) {
+                p += sprintf(p, "%s ", info->ifname);
+                info = info->next;
+        }
+
+	if (pg_thread->result[0])
+		p += sprintf(p, "\nResult: %s\n", pg_thread->result);
+	else
+		p += sprintf(p, "\nResult: NA\n");
+	*eof = 1;
+
+        pg_unlock(pg_thread, __FUNCTION__);
+
+	return p - buf;
+}/* proc_pg_thread_read */
+
+
+static int proc_pg_ctrl_read(char *buf , char **start, off_t offset,
+                             int len, int *eof, void *data)
+{
+	char *p;
+        struct pktgen_thread_info* pg_thread = NULL;
+        
+	p = buf;
+        p += sprintf(p, "VERSION-1\n"); /* Help with parsing compatibility */
+        p += sprintf(p, "Threads: ");
+        
+        pg_lock_thread_list(__FUNCTION__);
+        pg_thread = pktgen_threads;
+        while (pg_thread) {
+                p += sprintf(p, "%s ", pg_thread->name);
+                pg_thread = pg_thread->next;
+        }
+        p += sprintf(p, "\n");
+        
+	*eof = 1;
+
+        pg_unlock_thread_list(__FUNCTION__);
+	return p - buf;
+}/* proc_pg_ctrl_read */
+
+
 static int count_trail_chars(const char *user_buffer, unsigned int maxlen)
 {
 	int i;
 
 	for (i = 0; i < maxlen; i++) {
-		char c;
-
-		if (get_user(c, &user_buffer[i]))
-			return -EFAULT;
-		switch (c) {
+                char c;
+                if (get_user(c, &user_buffer[i]))
+                        return -EFAULT;
+                switch (c) {
 		case '\"':
 		case '\n':
 		case '\r':
@@ -875,7 +2024,7 @@
 	return i;
 }
 
-static unsigned long num_arg(const char *user_buffer, unsigned long maxlen,
+static unsigned long num_arg(const char *user_buffer, unsigned long maxlen, 
 			     unsigned long *num)
 {
 	int i = 0;
@@ -883,11 +2032,10 @@
 	*num = 0;
   
 	for(; i < maxlen; i++) {
-		char c;
-
-		if (get_user(c, &user_buffer[i]))
-			return -EFAULT;
-		if ((c >= '0') && (c <= '9')) {
+                char c;
+                if (get_user(c, &user_buffer[i]))
+                        return -EFAULT;
+                if ((c >= '0') && (c <= '9')) {
 			*num *= 10;
 			*num += c -'0';
 		} else
@@ -901,11 +2049,10 @@
 	int i = 0;
 
 	for(; i < maxlen; i++) {
-		char c;
-
-		if (get_user(c, &user_buffer[i]))
-			return -EFAULT;
-		switch (c) {
+                char c;
+                if (get_user(c, &user_buffer[i]))
+                        return -EFAULT;
+                switch (c) {
 		case '\"':
 		case '\n':
 		case '\r':
@@ -913,189 +2060,220 @@
 		case ' ':
 			goto done_str;
 		default:
-			break;
 		};
 	}
 done_str:
 	return i;
 }
 
-static int proc_write(struct file *file, const char *user_buffer,
-			 unsigned long count, void *data)
+static int proc_pg_if_write(struct file *file, const char *user_buffer,
+                            unsigned long count, void *data)
 {
 	int i = 0, max, len;
 	char name[16], valstr[32];
 	unsigned long value = 0;
-        int idx = (int)(long)(data);
-        struct pktgen_info* info = NULL;
-        char* result = NULL;
-	int tmp;
+        struct pktgen_interface_info* info = (struct pktgen_interface_info*)(data);
+        char* pg_result = NULL;
+        int tmp = 0;
         
-        if ((idx < 0) || (idx >= MAX_PKTGEN)) {
-                printk("ERROR: idx: %i is out of range in proc_write\n", idx);
-                return -EINVAL;
-        }
-        info = &(pginfos[idx]);
-        result = &(info->result[0]);
+        pg_result = &(info->result[0]);
         
 	if (count < 1) {
-		sprintf(result, "Wrong command format");
+		sprintf(pg_result, "Wrong command format");
 		return -EINVAL;
 	}
   
 	max = count - i;
 	tmp = count_trail_chars(&user_buffer[i], max);
-	if (tmp < 0)
-		return tmp;
-	i += tmp;
-  
+        if (tmp < 0) { return tmp; }
+        i += tmp;
+        
 	/* Read variable name */
 
 	len = strn_len(&user_buffer[i], sizeof(name) - 1);
-	if (len < 0)
-		return len;
+        if (len < 0) { return len; }
 	memset(name, 0, sizeof(name));
 	copy_from_user(name, &user_buffer[i], len);
 	i += len;
   
 	max = count -i;
 	len = count_trail_chars(&user_buffer[i], max);
-	if (len < 0)
-		return len;
+        if (len < 0) {
+                return len;
+        }
 	i += len;
 
-	if (debug)
-		printk("pg: %s,%lu\n", name, count);
+	if (debug) {
+                char tb[count + 1];
+                copy_from_user(tb, user_buffer, count);
+                tb[count] = 0;
+		printk("pg: %s,%lu  buffer -:%s:-\n", name, count, tb);
+        }
 
 	if (!strcmp(name, "stop")) {
 		if (info->do_run_run) {
-			strcpy(result, "Stopping");
+			strcpy(pg_result, "Stopping");
+                        pg_stop_interface(info->pg_thread, info);
                 }
                 else {
-                        strcpy(result, "Already stopped...\n");
+                        strcpy(pg_result, "Already stopped...\n");
+                }
+		return count;
+	}
+
+	if (!strcmp(name, "min_pkt_size")) {
+		len = num_arg(&user_buffer[i], 10, &value);
+                if (len < 0) { return len; }
+		i += len;
+		if (value < 14+20+8)
+			value = 14+20+8;
+                if (value != info->min_pkt_size) {
+                        info->min_pkt_size = value;
+                        info->cur_pkt_size = value;
                 }
-                info->do_run_run = 0;
+		sprintf(pg_result, "OK: min_pkt_size=%u", info->min_pkt_size);
+		return count;
+	}
+
+        if (!strcmp(name, "debug")) {
+		len = num_arg(&user_buffer[i], 10, &value);
+                if (len < 0) { return len; }
+		i += len;
+                debug = value;
+		sprintf(pg_result, "OK: debug=%u", debug);
 		return count;
 	}
 
-	if (!strcmp(name, "pkt_size")) {
+        if (!strcmp(name, "max_pkt_size")) {
 		len = num_arg(&user_buffer[i], 10, &value);
-		if (len < 0)
-			return len;
+                if (len < 0) { return len; }
 		i += len;
 		if (value < 14+20+8)
 			value = 14+20+8;
-		info->pkt_size = value;
-		sprintf(result, "OK: pkt_size=%u", info->pkt_size);
+                if (value != info->max_pkt_size) {
+                        info->max_pkt_size = value;
+                        info->cur_pkt_size = value;
+                }
+		sprintf(pg_result, "OK: max_pkt_size=%u", info->max_pkt_size);
 		return count;
 	}
-	if (!strcmp(name, "frags")) {
+        
+        if (!strcmp(name, "frags")) {
 		len = num_arg(&user_buffer[i], 10, &value);
-		if (len < 0)
-			return len;
+                if (len < 0) { return len; }
 		i += len;
 		info->nfrags = value;
-		sprintf(result, "OK: frags=%u", info->nfrags);
+		sprintf(pg_result, "OK: frags=%u", info->nfrags);
 		return count;
 	}
 	if (!strcmp(name, "ipg")) {
 		len = num_arg(&user_buffer[i], 10, &value);
-		if (len < 0)
-			return len;
+                if (len < 0) { return len; }
 		i += len;
 		info->ipg = value;
-		sprintf(result, "OK: ipg=%u", info->ipg);
+                if ((getRelativeCurNs() + info->ipg) > info->next_tx_ns) {
+                        info->next_tx_ns = getRelativeCurNs() + info->ipg;
+                }
+		sprintf(pg_result, "OK: ipg=%u", info->ipg);
 		return count;
 	}
  	if (!strcmp(name, "udp_src_min")) {
 		len = num_arg(&user_buffer[i], 10, &value);
-		if (len < 0)
-			return len;
+                if (len < 0) { return len; }
 		i += len;
-	 	info->udp_src_min = value;
-		sprintf(result, "OK: udp_src_min=%u", info->udp_src_min);
+                if (value != info->udp_src_min) {
+                        info->udp_src_min = value;
+                        info->cur_udp_src = value;
+                }       
+		sprintf(pg_result, "OK: udp_src_min=%u", info->udp_src_min);
 		return count;
 	}
  	if (!strcmp(name, "udp_dst_min")) {
 		len = num_arg(&user_buffer[i], 10, &value);
-		if (len < 0)
-			return len;
+                if (len < 0) { return len; }
 		i += len;
-	 	info->udp_dst_min = value;
-		sprintf(result, "OK: udp_dst_min=%u", info->udp_dst_min);
+                if (value != info->udp_dst_min) {
+                        info->udp_dst_min = value;
+                        info->cur_udp_dst = value;
+                }
+		sprintf(pg_result, "OK: udp_dst_min=%u", info->udp_dst_min);
 		return count;
 	}
  	if (!strcmp(name, "udp_src_max")) {
 		len = num_arg(&user_buffer[i], 10, &value);
-		if (len < 0)
-			return len;
+                if (len < 0) { return len; }
 		i += len;
-	 	info->udp_src_max = value;
-		sprintf(result, "OK: udp_src_max=%u", info->udp_src_max);
+                if (value != info->udp_src_max) {
+                        info->udp_src_max = value;
+                        info->cur_udp_src = value;
+                }
+		sprintf(pg_result, "OK: udp_src_max=%u", info->udp_src_max);
 		return count;
 	}
  	if (!strcmp(name, "udp_dst_max")) {
 		len = num_arg(&user_buffer[i], 10, &value);
-		if (len < 0)
-			return len;
+                if (len < 0) { return len; }
 		i += len;
-	 	info->udp_dst_max = value;
-		sprintf(result, "OK: udp_dst_max=%u", info->udp_dst_max);
+                if (value != info->udp_dst_max) {
+                        info->udp_dst_max = value;
+                        info->cur_udp_dst = value;
+                }
+		sprintf(pg_result, "OK: udp_dst_max=%u", info->udp_dst_max);
 		return count;
 	}
-	if (!strcmp(name, "clone_skb")) {
+	if (!strcmp(name, "multiskb")) {
 		len = num_arg(&user_buffer[i], 10, &value);
-		if (len < 0)
-			return len;
+                if (len < 0) { return len; }
 		i += len;
-                info->clone_skb = value;
+                info->multiskb = value;
 	
-		sprintf(result, "OK: clone_skb=%d", info->clone_skb);
+		sprintf(pg_result, "OK: multiskb=%d", info->multiskb);
+		return count;
+	}
+	if (!strcmp(name, "peer_multiskb")) {
+		len = num_arg(&user_buffer[i], 10, &value);
+                if (len < 0) { return len; }
+		i += len;
+                info->peer_multiskb = value;
+	
+		sprintf(pg_result, "OK: peer_multiskb=%d", info->peer_multiskb);
 		return count;
 	}
 	if (!strcmp(name, "count")) {
 		len = num_arg(&user_buffer[i], 10, &value);
-		if (len < 0)
-			return len;
+                if (len < 0) { return len; }
 		i += len;
 		info->count = value;
-		sprintf(result, "OK: count=%llu", (unsigned long long) info->count);
+		sprintf(pg_result, "OK: count=%llu", info->count);
 		return count;
 	}
 	if (!strcmp(name, "src_mac_count")) {
 		len = num_arg(&user_buffer[i], 10, &value);
-		if (len < 0)
-			return len;
+                if (len < 0) { return len; }
 		i += len;
-		info->src_mac_count = value;
-		sprintf(result, "OK: src_mac_count=%d", info->src_mac_count);
+		if (info->src_mac_count != value) {
+                        info->src_mac_count = value;
+                        info->cur_src_mac_offset = 0;
+                }
+		sprintf(pg_result, "OK: src_mac_count=%d", info->src_mac_count);
 		return count;
 	}
 	if (!strcmp(name, "dst_mac_count")) {
 		len = num_arg(&user_buffer[i], 10, &value);
-		if (len < 0)
-			return len;
+                if (len < 0) { return len; }
 		i += len;
-		info->dst_mac_count = value;
-		sprintf(result, "OK: dst_mac_count=%d", info->dst_mac_count);
-		return count;
-	}
-	if (!strcmp(name, "odev")) {
-		len = strn_len(&user_buffer[i], sizeof(info->outdev) - 1);
-		if (len < 0)
-			return len;
-		memset(info->outdev, 0, sizeof(info->outdev));
-		copy_from_user(info->outdev, &user_buffer[i], len);
-		i += len;
-		sprintf(result, "OK: odev=%s", info->outdev);
+		if (info->dst_mac_count != value) {
+                        info->dst_mac_count = value;
+                        info->cur_dst_mac_offset = 0;
+                }
+		sprintf(pg_result, "OK: dst_mac_count=%d", info->dst_mac_count);
 		return count;
 	}
 	if (!strcmp(name, "flag")) {
                 char f[32];
                 memset(f, 0, 32);
 		len = strn_len(&user_buffer[i], sizeof(f) - 1);
-		if (len < 0)
-			return len;
+                if (len < 0) { return len; }
 		copy_from_user(f, &user_buffer[i], len);
 		i += len;
                 if (strcmp(f, "IPSRC_RND") == 0) {
@@ -1104,6 +2282,12 @@
                 else if (strcmp(f, "!IPSRC_RND") == 0) {
                         info->flags &= ~F_IPSRC_RND;
                 }
+                else if (strcmp(f, "TXSIZE_RND") == 0) {
+                        info->flags |= F_TXSIZE_RND;
+                }
+                else if (strcmp(f, "!TXSIZE_RND") == 0) {
+                        info->flags &= ~F_TXSIZE_RND;
+                }
                 else if (strcmp(f, "IPDST_RND") == 0) {
                         info->flags |= F_IPDST_RND;
                 }
@@ -1135,69 +2319,94 @@
                         info->flags &= ~F_MACDST_RND;
                 }
                 else {
-                        sprintf(result, "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
+                        sprintf(pg_result, "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
                                 f,
-                                "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, MACSRC_RND, MACDST_RND\n");
+                                "IPSRC_RND, IPDST_RND, TXSIZE_RND, UDPSRC_RND, UDPDST_RND, MACSRC_RND, MACDST_RND\n");
                         return count;
                 }
-		sprintf(result, "OK: flags=0x%x", info->flags);
+		sprintf(pg_result, "OK: flags=0x%x", info->flags);
 		return count;
 	}
 	if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) {
+                char buf[IP_NAME_SZ];
 		len = strn_len(&user_buffer[i], sizeof(info->dst_min) - 1);
-		if (len < 0)
-			return len;
-		memset(info->dst_min, 0, sizeof(info->dst_min));
-		copy_from_user(info->dst_min, &user_buffer[i], len);
-		if(debug)
-			printk("pg: dst_min set to: %s\n", info->dst_min);
-		i += len;
-		sprintf(result, "OK: dst_min=%s", info->dst_min);
+                if (len < 0) { return len; }
+                copy_from_user(buf, &user_buffer[i], len);
+                buf[len] = 0;
+                if (strcmp(buf, info->dst_min) != 0) {
+                        memset(info->dst_min, 0, sizeof(info->dst_min));
+                        strncpy(info->dst_min, buf, len);
+                        info->daddr_min = in_aton(info->dst_min);
+                        info->cur_daddr = info->daddr_min;
+                }
+                if(debug)
+                        printk("pg: dst_min set to: %s\n", info->dst_min);
+                i += len;
+		sprintf(pg_result, "OK: dst_min=%s", info->dst_min);
 		return count;
 	}
 	if (!strcmp(name, "dst_max")) {
+                char buf[IP_NAME_SZ];
 		len = strn_len(&user_buffer[i], sizeof(info->dst_max) - 1);
-		if (len < 0)
-			return len;
-		memset(info->dst_max, 0, sizeof(info->dst_max));
-		copy_from_user(info->dst_max, &user_buffer[i], len);
+                if (len < 0) { return len; }
+                copy_from_user(buf, &user_buffer[i], len);
+                buf[len] = 0;
+                if (strcmp(buf, info->dst_max) != 0) {
+                        memset(info->dst_max, 0, sizeof(info->dst_max));
+                        strncpy(info->dst_max, buf, len);
+                        info->daddr_max = in_aton(info->dst_max);
+                        info->cur_daddr = info->daddr_max;
+                }
 		if(debug)
 			printk("pg: dst_max set to: %s\n", info->dst_max);
 		i += len;
-		sprintf(result, "OK: dst_max=%s", info->dst_max);
+		sprintf(pg_result, "OK: dst_max=%s", info->dst_max);
 		return count;
 	}
 	if (!strcmp(name, "src_min")) {
+                char buf[IP_NAME_SZ];
 		len = strn_len(&user_buffer[i], sizeof(info->src_min) - 1);
-		if (len < 0)
-			return len;
-		memset(info->src_min, 0, sizeof(info->src_min));
-		copy_from_user(info->src_min, &user_buffer[i], len);
+                if (len < 0) { return len; }
+                copy_from_user(buf, &user_buffer[i], len);
+                buf[len] = 0;
+                if (strcmp(buf, info->src_min) != 0) {
+                        memset(info->src_min, 0, sizeof(info->src_min));
+                        strncpy(info->src_min, buf, len);
+                        info->saddr_min = in_aton(info->src_min);
+                        info->cur_saddr = info->saddr_min;
+                }
 		if(debug)
 			printk("pg: src_min set to: %s\n", info->src_min);
 		i += len;
-		sprintf(result, "OK: src_min=%s", info->src_min);
+		sprintf(pg_result, "OK: src_min=%s", info->src_min);
 		return count;
 	}
 	if (!strcmp(name, "src_max")) {
+                char buf[IP_NAME_SZ];
 		len = strn_len(&user_buffer[i], sizeof(info->src_max) - 1);
-		if (len < 0)
-			return len;
-		memset(info->src_max, 0, sizeof(info->src_max));
-		copy_from_user(info->src_max, &user_buffer[i], len);
+                if (len < 0) { return len; }
+                copy_from_user(buf, &user_buffer[i], len);
+                buf[len] = 0;
+                if (strcmp(buf, info->src_max) != 0) {
+                        memset(info->src_max, 0, sizeof(info->src_max));
+                        strncpy(info->src_max, buf, len);
+                        info->saddr_max = in_aton(info->src_max);
+                        info->cur_saddr = info->saddr_max;
+                }
 		if(debug)
 			printk("pg: src_max set to: %s\n", info->src_max);
 		i += len;
-		sprintf(result, "OK: src_max=%s", info->src_max);
+		sprintf(pg_result, "OK: src_max=%s", info->src_max);
 		return count;
 	}
-	if (!strcmp(name, "dstmac")) {
+	if (!strcmp(name, "dst_mac")) {
 		char *v = valstr;
+                unsigned char old_dmac[6];
 		unsigned char *m = info->dst_mac;
-
+                memcpy(old_dmac, info->dst_mac, 6);
+                
 		len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
-		if (len < 0)
-			return len;
+                if (len < 0) { return len; }
 		memset(valstr, 0, sizeof(valstr));
 		copy_from_user(valstr, &user_buffer[i], len);
 		i += len;
@@ -1219,17 +2428,24 @@
 				m++;
 				*m = 0;
 			}
-		}	  
-		sprintf(result, "OK: dstmac");
+		}
+
+                if (memcmp(old_dmac, info->dst_mac, 6) != 0) {
+                        /* Set up Dest MAC */
+                        memcpy(&(info->hh[0]), info->dst_mac, 6);
+                }
+                
+		sprintf(pg_result, "OK: dstmac");
 		return count;
 	}
-	if (!strcmp(name, "srcmac")) {
+	if (!strcmp(name, "src_mac")) {
 		char *v = valstr;
+                unsigned char old_smac[6];
 		unsigned char *m = info->src_mac;
 
+                memcpy(old_smac, info->src_mac, 6);
 		len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
-		if (len < 0)
-			return len;
+                if (len < 0) { return len; }
 		memset(valstr, 0, sizeof(valstr));
 		copy_from_user(valstr, &user_buffer[i], len);
 		i += len;
@@ -1252,28 +2468,186 @@
 				*m = 0;
 			}
 		}	  
-		sprintf(result, "OK: srcmac");
+
+                if (memcmp(old_smac, info->src_mac, 6) != 0) {
+                        /* Default to the interface's mac if not explicitly set. */
+                        if ((!(info->flags & F_SET_SRCMAC)) && info->odev) {
+                                memcpy(&(info->hh[6]), info->odev->dev_addr, 6);
+                        }
+                        else {
+                                memcpy(&(info->hh[6]), info->src_mac, 6);
+                        }
+                }
+
+                sprintf(pg_result, "OK: srcmac");
 		return count;
 	}
 
+        if (!strcmp(name, "clear_counters")) {
+                pg_clear_counters(info, 0);
+                sprintf(pg_result, "OK: Clearing counters...\n");
+                return count;
+        }
+        
 	if (!strcmp(name, "inject") || !strcmp(name, "start")) {
-		MOD_INC_USE_COUNT;
-                if (info->busy) {
+                if (info->do_run_run) {
                         strcpy(info->result, "Already running...\n");
                 }
                 else {
-                        info->busy = 1;
-                        strcpy(info->result, "Starting");
-                        inject(info);
-                        info->busy = 0;
+                        int rv;
+                        if ((rv = pg_start_interface(info->pg_thread, info)) >= 0) {
+                                strcpy(info->result, "Starting");
+                        }
+                        else {
+                                sprintf(info->result, "Error starting: %i\n", rv);
+                        }
                 }
-		MOD_DEC_USE_COUNT;
 		return count;
 	}
 
 	sprintf(info->result, "No such parameter \"%s\"", name);
 	return -EINVAL;
-}
+}/* proc_pg_if_write */
+
+
+static int proc_pg_ctrl_write(struct file *file, const char *user_buffer,
+                              unsigned long count, void *data)
+{
+	int i = 0, max, len;
+	char name[16];
+        struct pktgen_thread_info* pg_thread = NULL;
+        
+	if (count < 1) {
+		printk("Wrong command format");
+		return -EINVAL;
+	}
+  
+	max = count - i;
+	len = count_trail_chars(&user_buffer[i], max);
+        if (len < 0) { return len; }
+        i += len;
+        
+	/* Read variable name */
+
+	len = strn_len(&user_buffer[i], sizeof(name) - 1);
+        if (len < 0) { return len; }
+	memset(name, 0, sizeof(name));
+	copy_from_user(name, &user_buffer[i], len);
+	i += len;
+  
+	max = count -i;
+	len = count_trail_chars(&user_buffer[i], max);
+        if (len < 0) { return len; }
+	i += len;
+
+	if (debug)
+		printk("pg_thread: %s,%lu\n", name, count);
+
+	if (!strcmp(name, "stop")) {
+                char f[32];
+                memset(f, 0, 32);
+		len = strn_len(&user_buffer[i], sizeof(f) - 1);
+                if (len < 0) { return len; }
+		copy_from_user(f, &user_buffer[i], len);
+		i += len;
+                pg_thread = pg_find_thread(f);
+                if (pg_thread) {
+                        printk("pktgen INFO: stopping thread: %s\n", pg_thread->name);
+                        stop_pktgen_kthread(pg_thread);
+                }
+                return count;
+	}
+
+        if (!strcmp(name, "start")) {
+                char f[32];
+                memset(f, 0, 32);
+		len = strn_len(&user_buffer[i], sizeof(f) - 1);
+                if (len < 0) { return len; }
+		copy_from_user(f, &user_buffer[i], len);
+		i += len;
+                pg_add_thread_info(f);
+                return count;
+	}
+
+	return -EINVAL;
+}/* proc_pg_ctrl_write */
+
+
+static int proc_pg_thread_write(struct file *file, const char *user_buffer,
+                                unsigned long count, void *data)
+{
+	int i = 0, max, len;
+	char name[16];
+        struct pktgen_thread_info* pg_thread = (struct pktgen_thread_info*)(data);
+        char* pg_result = &(pg_thread->result[0]);
+        unsigned long value = 0;
+        
+	if (count < 1) {
+		sprintf(pg_result, "Wrong command format");
+		return -EINVAL;
+	}
+  
+	max = count - i;
+        len = count_trail_chars(&user_buffer[i], max);
+        if (len < 0) { return len; }
+	i += len;
+  
+	/* Read variable name */
+
+	len = strn_len(&user_buffer[i], sizeof(name) - 1);
+        if (len < 0) { return len; }
+	memset(name, 0, sizeof(name));
+	copy_from_user(name, &user_buffer[i], len);
+	i += len;
+  
+	max = count -i;
+	len = count_trail_chars(&user_buffer[i], max);
+        if (len < 0) { return len; }
+	i += len;
+
+	if (debug) {
+		printk("pg_thread: %s,%lu\n", name, count);
+        }
+        
+        if (!strcmp(name, "add_interface")) {
+                char f[32];
+                memset(f, 0, 32);
+		len = strn_len(&user_buffer[i], sizeof(f) - 1);
+                if (len < 0) { return len; }
+		copy_from_user(f, &user_buffer[i], len);
+		i += len;
+                pg_add_interface_info(pg_thread, f);
+                return count;
+	}
+
+        if (!strcmp(name, "rem_interface")) {
+                struct pktgen_interface_info* info = NULL;
+                char f[32];
+                memset(f, 0, 32);
+                len = strn_len(&user_buffer[i], sizeof(f) - 1);
+                if (len < 0) { return len; }
+                copy_from_user(f, &user_buffer[i], len);
+                i += len;
+                info = pg_find_interface(pg_thread, f);
+                if (info) {
+                        pg_rem_interface_info(pg_thread, info);
+                        return count;
+                }
+                else {
+                        printk("ERROR:  That interface is not found.\n");
+                        return -ENODEV;
+                }
+	}
+
+        if (!strcmp(name, "max_before_softirq")) {
+                len = num_arg(&user_buffer[i], 10, &value);
+                pg_thread->max_before_softirq = value;
+                return count;
+	}
+
+
+	return -EINVAL;
+}/* proc_pg_thread_write */
 
 
 int create_proc_dir(void)
@@ -1282,109 +2656,348 @@
         /*  does proc_dir already exists */
         len = strlen(PG_PROC_DIR);
 
-        for (proc_dir = proc_net->subdir; proc_dir;
-             proc_dir=proc_dir->next) {
-                if ((proc_dir->namelen == len) &&
-                    (! memcmp(proc_dir->name, PG_PROC_DIR, len)))
+        for (pg_proc_dir = proc_net->subdir; pg_proc_dir; pg_proc_dir=pg_proc_dir->next) {
+                if ((pg_proc_dir->namelen == len) &&
+                    (! memcmp(pg_proc_dir->name, PG_PROC_DIR, len))) {
                         break;
+                }
+        }
+        
+        if (!pg_proc_dir) {
+                pg_proc_dir = create_proc_entry(PG_PROC_DIR, S_IFDIR, proc_net);
         }
-        if (!proc_dir)
-                proc_dir = create_proc_entry(PG_PROC_DIR, S_IFDIR, proc_net);
-        if (!proc_dir) return -ENODEV;
-        return 1;
+        
+        if (!pg_proc_dir) {
+                return -ENODEV;
+        }
+        
+        return 0;
 }
 
 int remove_proc_dir(void)
 {
         remove_proc_entry(PG_PROC_DIR, proc_net);
-        return 1;
+        return 0;
 }
 
-static int __init init(void)
-{
+static struct pktgen_interface_info* pg_find_interface(struct pktgen_thread_info* pg_thread,
+                                                       const char* ifname) {
+        struct pktgen_interface_info* rv = NULL;
+        pg_lock(pg_thread, __FUNCTION__);
+
+        if (pg_thread->cur_if && (strcmp(pg_thread->cur_if->ifname, ifname) == 0)) {
+                rv = pg_thread->cur_if;
+                goto found;
+        }
+        
+        rv = pg_thread->running_if_infos;
+        while (rv) {
+                if (strcmp(rv->ifname, ifname) == 0) {
+                        goto found;
+                }
+                rv = rv->next;
+        }
+
+        rv = pg_thread->stopped_if_infos;
+        while (rv) {
+                if (strcmp(rv->ifname, ifname) == 0) {
+                        goto found;
+                }
+                rv = rv->next;
+        }
+ found:
+        pg_unlock(pg_thread, __FUNCTION__);
+        return rv;
+}/* pg_find_interface */
+
+
+static int pg_add_interface_info(struct pktgen_thread_info* pg_thread, const char* ifname) {
+        struct pktgen_interface_info* i = pg_find_interface(pg_thread, ifname);
+        if (!i) {
+                i = kmalloc(sizeof(struct pktgen_interface_info), GFP_KERNEL);
+                if (!i) {
+                        return -ENOMEM;
+                }
+                memset(i, 0, sizeof(struct pktgen_interface_info));
+                
+                i->min_pkt_size = ETH_ZLEN;
+                i->max_pkt_size = ETH_ZLEN;
+                i->nfrags = 0;
+                i->multiskb = pg_multiskb_d;
+                i->peer_multiskb = 0;
+                i->ipg = pg_ipg_d;
+                i->count = pg_count_d;
+                i->sofar = 0;
+                i->hh[12] = 0x08; /* fill in protocol.  Rest is filled in later. */
+                i->hh[13] = 0x00;
+                i->udp_src_min = 9; /* sink NULL */
+                i->udp_src_max = 9;
+                i->udp_dst_min = 9;
+                i->udp_dst_max = 9;
+                i->rcv = pktgen_receive;
+
+                strncpy(i->ifname, ifname, 31);
+                sprintf(i->fname, "net/%s/%s", PG_PROC_DIR, ifname);
+
+                if (! pg_setup_interface(i)) {
+                        printk("ERROR: pg_setup_interface failed.\n");
+                        kfree(i);
+                        return -ENODEV;
+                }
+
+                i->proc_ent = create_proc_entry(i->fname, 0600, 0);
+                if (!i->proc_ent) {
+                        printk("pktgen: Error: cannot create %s procfs entry.\n", i->fname);
+                        kfree(i);
+                        return -EINVAL;
+                }
+                i->proc_ent->read_proc = proc_pg_if_read;
+                i->proc_ent->write_proc = proc_pg_if_write;
+                i->proc_ent->data = (void*)(i);
+
+                return add_interface_to_thread(pg_thread, i);
+        }
+        else {
+                printk("ERROR: interface already exists.\n");
+                return -EBUSY;
+        }
+}/* pg_add_interface_info */
+
+
+/* return the first !in_use thread structure */
+static struct pktgen_thread_info* pg_gc_thread_list_helper(void) {
+        struct pktgen_thread_info* rv = NULL;
+        
+        pg_lock_thread_list(__FUNCTION__);
+
+        rv = pktgen_threads;
+        while (rv) {
+                if (!rv->in_use) {
+                        break;
+                }
+                rv = rv->next;
+        }
+        pg_unlock_thread_list(__FUNCTION__);
+        return rv;
+}/* pg_find_thread */
+
+static void pg_gc_thread_list(void) {
+        struct pktgen_thread_info* t = NULL;
+        struct pktgen_thread_info* w = NULL;
+
+        while ((t = pg_gc_thread_list_helper())) {
+                pg_lock_thread_list(__FUNCTION__);
+                if (pktgen_threads == t) {
+                        pktgen_threads = t->next;
+                        kfree(t);
+                }
+                else {
+                        w = pktgen_threads;
+                        while (w) {
+                                if (w->next == t) {
+                                        w->next = t->next;
+                                        t->next = NULL;
+                                        kfree(t);
+                                        break;
+                                }
+                                w = w->next;
+                        }
+                }
+                pg_unlock_thread_list(__FUNCTION__);
+        }
+}/* pg_gc_thread_list */        
+
+
+static struct pktgen_thread_info* pg_find_thread(const char* name) {
+        struct pktgen_thread_info* rv = NULL;
+
+        pg_gc_thread_list();
+        
+        pg_lock_thread_list(__FUNCTION__);
+
+        rv = pktgen_threads;
+        while (rv) {
+                if (strcmp(rv->name, name) == 0) {
+                        break;
+                }
+                rv = rv->next;
+        }
+        pg_unlock_thread_list(__FUNCTION__);
+        return rv;
+}/* pg_find_thread */
+
+
+static int pg_add_thread_info(const char* name) {
+        struct pktgen_thread_info* pg_thread = NULL;
+
+        if (strlen(name) > 31) {
+                printk("pktgen ERROR:  Thread name cannot be more than 31 characters.\n");
+                return -EINVAL;
+        }
+        
+        if (pg_find_thread(name)) {
+                printk("pktgen ERROR: Thread: %s already exists\n", name);
+                return -EINVAL;
+        }
+
+        pg_thread = (struct pktgen_thread_info*)(kmalloc(sizeof(struct pktgen_thread_info), GFP_KERNEL));
+        if (!pg_thread) {
+                printk("pktgen: ERROR: out of memory, can't create new thread.\n");
+                return -ENOMEM;
+        }
+
+        memset(pg_thread, 0, sizeof(struct pktgen_thread_info));
+        strcpy(pg_thread->name, name);
+        spin_lock_init(&(pg_thread->pg_threadlock));
+        pg_thread->in_use = 1;
+        pg_thread->max_before_softirq = 100;
+        
+        sprintf(pg_thread->fname, "net/%s/%s", PG_PROC_DIR, pg_thread->name);
+        pg_thread->proc_ent = create_proc_entry(pg_thread->fname, 0600, 0);
+        if (!pg_thread->proc_ent) {
+                printk("pktgen: Error: cannot create %s procfs entry.\n", pg_thread->fname);
+                kfree(pg_thread);
+                return -EINVAL;
+        }
+        pg_thread->proc_ent->read_proc = proc_pg_thread_read;
+        pg_thread->proc_ent->write_proc = proc_pg_thread_write;
+        pg_thread->proc_ent->data = (void*)(pg_thread);
+
+        pg_thread->next = pktgen_threads;
+        pktgen_threads = pg_thread;
+
+        /* Start the thread running */
+        start_pktgen_kthread(pg_thread);
+        
+        return 0;
+}/* pg_add_thread_info */
+
+
+/* interface_info must be stopped and on the pg_thread stopped list
+ */
+static int pg_rem_interface_info(struct pktgen_thread_info* pg_thread,
+                                 struct pktgen_interface_info* info) {
+        if (info->do_run_run) {
+                printk("WARNING: trying to remove a running interface, stopping it now.\n");
+                pg_stop_interface(pg_thread, info);
+        }
+        
+        /* Diss-associate from the interface */
+        check_remove_device(info);
+        
+        /* Clean up proc file system */
+        if (strlen(info->fname)) {
+                remove_proc_entry(info->fname, NULL);
+        }
+        
+        pg_lock(pg_thread, __FUNCTION__);
+        {
+                /* Remove from the stopped list */
+                struct pktgen_interface_info* p = pg_thread->stopped_if_infos;
+                if (p == info) {
+                        pg_thread->stopped_if_infos = p->next;
+                        p->next = NULL;
+                }
+                else {
+                        while (p) {
+                                if (p->next == info) {
+                                        p->next = p->next->next;
+                                        info->next = NULL;
+                                        break;
+                                }
+                                p = p->next;
+                        }
+                }
+                
+                info->pg_thread = NULL;
+        }
+        pg_unlock(pg_thread, __FUNCTION__);
+        
+        return 0;
+}/* pg_rem_interface_info */
+
+
+static int __init pg_init(void) {
         int i;
 	printk(version);
+
+        /* Initialize our global variables */
+        for (i = 0; i<PG_INFO_HASH_MAX; i++) {
+                pg_info_hash[i] = NULL;
+        }
+        module_fname[0] = 0;
+        
+        if (handle_pktgen_hook) {
+                printk("pktgen: ERROR: pktgen is already loaded it seems..\n");
+                /* Already loaded */
+                return -EEXIST;
+        }
+
 	cycles_calibrate();
-	if (cpu_speed == 0) {
+	if (pg_cycles_per_us == 0) {
 		printk("pktgen: Error: your machine does not have working cycle counter.\n");
 		return -EINVAL;
 	}
 
 	create_proc_dir();
 
-        for (i = 0; i<MAX_PKTGEN; i++) {
-                memset(&(pginfos[i]), 0, sizeof(pginfos[i]));
-                pginfos[i].pkt_size = ETH_ZLEN;
-                pginfos[i].nfrags = 0;
-                pginfos[i].clone_skb = clone_skb_d;
-                pginfos[i].ipg = ipg_d;
-                pginfos[i].count = count_d;
-                pginfos[i].sofar = 0;
-                pginfos[i].hh[12] = 0x08; /* fill in protocol.  Rest is filled in later. */
-                pginfos[i].hh[13] = 0x00;
-                pginfos[i].udp_src_min = 9; /* sink NULL */
-                pginfos[i].udp_src_max = 9;
-                pginfos[i].udp_dst_min = 9;
-                pginfos[i].udp_dst_max = 9;
-                
-                sprintf(pginfos[i].fname, "net/%s/pg%i", PG_PROC_DIR, i);
-                pginfos[i].proc_ent = create_proc_entry(pginfos[i].fname, 0600, 0);
-                if (!pginfos[i].proc_ent) {
-                        printk("pktgen: Error: cannot create net/%s/pg procfs entry.\n", PG_PROC_DIR);
-                        goto cleanup_mem;
-                }
-                pginfos[i].proc_ent->read_proc = proc_read;
-                pginfos[i].proc_ent->write_proc = proc_write;
-                pginfos[i].proc_ent->data = (void*)(long)(i);
-
-                sprintf(pginfos[i].busy_fname, "net/%s/pg_busy%i",  PG_PROC_DIR, i);
-                pginfos[i].busy_proc_ent = create_proc_entry(pginfos[i].busy_fname, 0, 0);
-                if (!pginfos[i].busy_proc_ent) {
-                        printk("pktgen: Error: cannot create net/%s/pg_busy procfs entry.\n", PG_PROC_DIR);
-                        goto cleanup_mem;
-                }
-                pginfos[i].busy_proc_ent->read_proc = proc_busy_read;
-                pginfos[i].busy_proc_ent->data = (void*)(long)(i);
+        sprintf(module_fname, "net/%s/pgctrl", PG_PROC_DIR);
+        module_proc_ent = create_proc_entry(module_fname, 0600, 0);
+        if (!module_proc_ent) {
+                printk("pktgen: Error: cannot create %s procfs entry.\n", module_fname);
+                return -EINVAL;
         }
-        return 0;
-        
-cleanup_mem:
-        for (i = 0; i<MAX_PKTGEN; i++) {
-                if (strlen(pginfos[i].fname)) {
-                        remove_proc_entry(pginfos[i].fname, NULL);
-                }
-                if (strlen(pginfos[i].busy_fname)) {
-                        remove_proc_entry(pginfos[i].busy_fname, NULL);
-                }
+        module_proc_ent->read_proc = proc_pg_ctrl_read;
+        module_proc_ent->write_proc = proc_pg_ctrl_write;
+        module_proc_ent->proc_fops = &(pktgen_fops); /* IOCTL hook */
+        module_proc_ent->data = NULL;
+
+	/* Register us to receive netdevice events */
+	register_netdevice_notifier(&pktgen_notifier_block);
+        
+        /* Register handler */
+        handle_pktgen_hook = pktgen_receive;
+
+        for (i = 0; i<pg_thread_count; i++) {
+                char buf[30];
+                sprintf(buf, "kpktgend_%i", i);
+                pg_add_thread_info(buf);
         }
-	return -ENOMEM;
-}
+                
+        
+        return 0;        
+}/* pg_init */
 
 
-static void __exit cleanup(void)
+static void __exit pg_cleanup(void)
 {
-        int i;
-        for (i = 0; i<MAX_PKTGEN; i++) {
-                if (strlen(pginfos[i].fname)) {
-                        remove_proc_entry(pginfos[i].fname, NULL);
-                }
-                if (strlen(pginfos[i].busy_fname)) {
-                        remove_proc_entry(pginfos[i].busy_fname, NULL);
-                }
+        /* Un-register handler */
+        handle_pktgen_hook = NULL;
+
+        /* Stop all interfaces & threads */        
+        while (pktgen_threads) {
+                stop_pktgen_kthread(pktgen_threads);
         }
+        
+        /* Un-register us from receiving netdevice events */
+	unregister_netdevice_notifier(&pktgen_notifier_block);
+
+        /* Clean up proc file system */
+        remove_proc_entry(module_fname, NULL);
+        
 	remove_proc_dir();
+
 }
 
-module_init(init);
-module_exit(cleanup);
 
-MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se");
+module_init(pg_init);
+module_exit(pg_cleanup);
+
+MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se, Ben Greear<greearb@candelatech.com>");
 MODULE_DESCRIPTION("Packet Generator tool");
 MODULE_LICENSE("GPL");
-MODULE_PARM(count_d, "i");
-MODULE_PARM(ipg_d, "i");
-MODULE_PARM(cpu_speed, "i");
-MODULE_PARM(clone_skb_d, "i");
-
-
-
+MODULE_PARM(pg_count_d, "i");
+MODULE_PARM(pg_ipg_d, "i");
+MODULE_PARM(pg_thread_count, "i");
+MODULE_PARM(pg_multiskb_d, "i");
+MODULE_PARM(debug, "i");
--- linux-2.4.21/net/core/pktgen.h	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.4.21.amds/net/core/pktgen.h	2003-07-30 16:20:41.000000000 -0700
@@ -0,0 +1,241 @@
+/* -*-linux-c-*-
+ * $Id: candela_2.4.21.patch,v 1.4 2003/09/30 21:05:04 greear Exp $
+ * pktgen.c: Packet Generator for performance evaluation.
+ *
+ * See pktgen.c for details of changes, etc.
+*/
+
+
+#ifndef PKTGEN_H_INCLUDE_KERNEL__
+#define PKTGEN_H_INCLUDE_KERNEL__
+
+
+/* The buckets are exponential in 'width' */
+#define LAT_BUCKETS_MAX 32
+
+#define IP_NAME_SZ 32
+
+/* Keep information per interface */
+struct pktgen_interface_info {
+        char ifname[32];
+        
+        /* Parameters */
+
+        /* If min != max, then we will either do a linear iteration, or
+         * we will do a random selection from within the range.
+         */
+        __u32 flags;     
+
+#define F_IPSRC_RND   (1<<0)  /* IP-Src Random  */
+#define F_IPDST_RND   (1<<1)  /* IP-Dst Random  */
+#define F_UDPSRC_RND  (1<<2)  /* UDP-Src Random */
+#define F_UDPDST_RND  (1<<3)  /* UDP-Dst Random */
+#define F_MACSRC_RND  (1<<4)  /* MAC-Src Random */
+#define F_MACDST_RND  (1<<5)  /* MAC-Dst Random */
+#define F_SET_SRCMAC  (1<<6)  /* Specify-Src-Mac 
+				 (default is to use Interface's MAC Addr) */
+#define F_SET_SRCIP   (1<<7)  /*  Specify-Src-IP
+				  (default is to use Interface's IP Addr) */ 
+#define F_TXSIZE_RND  (1<<8)  /* Transmit size is random */
+
+        int min_pkt_size;    /* = ETH_ZLEN; */
+        int max_pkt_size;    /* = ETH_ZLEN; */
+        int nfrags;
+        __u32 ipg;    /* Default Interpacket gap in nsec */
+        __u64 count;  /* Default No packets to send */
+        __u64 sofar;  /* How many pkts we've sent so far */
+        __u64 tx_bytes; /* How many bytes we've transmitted */
+        __u64 errors;    /* Errors when trying to transmit, pkts will be re-sent */
+
+        /* runtime counters relating to multiskb */
+        __u64 next_tx_ns;          /* timestamp of when to tx next, in nano-seconds */
+        
+        __u64 fp;
+        __u32 fp_tmp;
+	int last_ok;           /* Was last skb sent? 
+	                        * Or a failed transmit of some sort?  This will keep
+                                * sequence numbers in order, for example.
+                                */
+        /* Fields relating to receiving pkts */
+        __u32 last_seq_rcvd;
+        __u64 ooo_rcvd;  /* out-of-order packets received */
+        __u64 pkts_rcvd; /* packets received */
+        __u64 dup_rcvd;  /* duplicate packets received */
+        __u64 bytes_rcvd; /* total bytes received, as obtained from the skb */
+        __u64 seq_gap_rcvd; /* how many gaps we received.  This coorelates to
+                             * dropped pkts, except perhaps in cases where we also
+                             * have re-ordered pkts.  In that case, you have to tie-break
+                             * by looking at send v/s received pkt totals for the interfaces
+                             * involved.
+                             */
+        __u64 non_pg_pkts_rcvd; /* Count how many non-pktgen skb's we are sent to check. */
+        __u64 dup_since_incr; /* How many dumplicates since the last seq number increment,
+                               * used to detect gaps when multiskb > 1
+                               */
+        int avg_latency; /* in micro-seconds */
+        int min_latency;
+        int max_latency;
+        __u64 latency_bkts[LAT_BUCKETS_MAX];
+        __u64 pkts_rcvd_since_clear; /* with regard to clearing/resetting the latency logic */
+        
+        __u64 started_at; /* micro-seconds */
+        __u64 stopped_at; /* micro-seconds */
+        __u64 idle_acc;
+        __u32 seq_num;
+        
+        int multiskb; /* Use multiple SKBs during packet gen.  If this number
+                          * is greater than 1, then that many coppies of the same
+                          * packet will be sent before a new packet is allocated.
+                          * For instance, if you want to send 1024 identical packets
+                          * before creating a new packet, set multiskb to 1024.
+                          */
+        int peer_multiskb; /* Helps detect drops when multiskb > 1 on peer */
+        int do_run_run;  /* if this changes to false, the test will stop */
+        
+        char dst_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */
+        char dst_max[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */
+        char src_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */
+        char src_max[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */
+
+        /* If we're doing ranges, random or incremental, then this
+         * defines the min/max for those ranges.
+         */
+        __u32 saddr_min; /* inclusive, source IP address */
+        __u32 saddr_max; /* exclusive, source IP address */
+        __u32 daddr_min; /* inclusive, dest IP address */
+        __u32 daddr_max; /* exclusive, dest IP address */
+
+        __u16 udp_src_min; /* inclusive, source UDP port */
+        __u16 udp_src_max; /* exclusive, source UDP port */
+        __u16 udp_dst_min; /* inclusive, dest UDP port */
+        __u16 udp_dst_max; /* exclusive, dest UDP port */
+
+        __u32 src_mac_count; /* How many MACs to iterate through */
+        __u32 dst_mac_count; /* How many MACs to iterate through */
+        
+        unsigned char dst_mac[6];
+        unsigned char src_mac[6];
+        
+        __u32 cur_dst_mac_offset;
+        __u32 cur_src_mac_offset;
+        __u32 cur_saddr;
+        __u32 cur_daddr;
+        __u16 cur_udp_dst;
+        __u16 cur_udp_src;
+        __u32 cur_pkt_size;
+        
+        __u8 hh[14];
+        /* = { 
+           0x00, 0x80, 0xC8, 0x79, 0xB3, 0xCB, 
+           
+           We fill in SRC address later
+           0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+           0x08, 0x00
+           };
+        */
+        __u16 pad; /* pad out the hh struct to an even 16 bytes */
+        char result[512];
+        /* proc file names */
+        char fname[80];
+
+        /* End of stuff that user-space should care about */
+
+        struct sk_buff* skb; /* skb we are to transmit next, mainly used for when we
+                              * are transmitting the same one multiple times
+                              */
+        struct pktgen_thread_info* pg_thread; /* the owner */
+
+        struct pktgen_interface_info* next_hash; /* Used for chaining in the hash buckets */
+        struct pktgen_interface_info* next; /* Used for chaining in the thread's run-queue */
+
+        
+
+        struct net_device* odev; /* The out-going device.  Note that the device should
+                                  * have it's pg_info pointer pointing back to this
+                                  * device.  This will be set when the user specifies
+                                  * the out-going device name (not when the inject is
+                                  * started as it used to do.)
+                                  */
+        
+        struct proc_dir_entry *proc_ent;
+        
+        int (*rcv) (struct sk_buff *skb);
+}; /* pktgen_interface_info */
+
+
+struct pktgen_hdr {
+        __u32 pgh_magic;
+        __u32 seq_num;
+        struct timeval timestamp;
+};
+
+
+/* Define some IOCTLs.  Just picking random numbers, basically. */
+#define GET_PKTGEN_INTERFACE_INFO 0x7450
+
+struct pktgen_ioctl_info {
+        char thread_name[32];
+        char interface_name[32];
+        struct pktgen_interface_info info;
+};
+
+
+struct pktgen_thread_info {
+        struct pktgen_interface_info* running_if_infos; /* list of running interfaces, current will
+                                                         * not be in this list.
+                                                         */
+        struct pktgen_interface_info* stopped_if_infos; /* list of stopped interfaces. */
+        struct pktgen_interface_info* cur_if;           /* Current (running) interface we are servicing in
+                                                         * the main thread loop.
+                                                         */
+
+        int running_if_sz;
+        struct pktgen_thread_info* next;
+        char name[32];
+        char fname[128]; /* name of proc file */
+        struct proc_dir_entry *proc_ent;
+        char result[512];
+        u32 max_before_softirq; /* We'll call do_softirq to prevent starvation. */
+        
+        spinlock_t pg_threadlock;
+        
+        /* Linux task structure of thread */
+        struct task_struct *thread;
+        
+        /* Task queue need to launch thread */
+        struct tq_struct tq;
+        
+        /* function to be started as thread */
+        void (*function) (struct pktgen_thread_info *kthread);
+        
+        /* semaphore needed on start and creation of thread. */
+        struct semaphore startstop_sem;
+
+        /* public data */
+
+        /* queue thread is waiting on. Gets initialized by
+           init_kthread, can be used by thread itself.
+        */
+        wait_queue_head_t queue;
+        
+        /* flag to tell thread whether to die or not.
+           When the thread receives a signal, it must check
+           the value of terminate and call exit_kthread and terminate
+           if set.
+        */
+        int terminate;
+
+        int in_use; /* if 0, then we can delete or re-use this struct */
+        
+        /* additional data to pass to kernel thread */
+        void *arg;
+};/* struct pktgen_thread_info */
+
+/* Defined in dev.c */
+extern int (*handle_pktgen_hook)(struct sk_buff *skb);
+
+/* Returns < 0 if the skb is not a pktgen buffer. */
+int pktgen_receive(struct sk_buff* skb);
+
+
+#endif
--- linux-2.4.21/net/netsyms.c	2003-06-13 07:51:39.000000000 -0700
+++ linux-2.4.21.amds/net/netsyms.c	2003-07-30 16:20:41.000000000 -0700
@@ -30,6 +30,7 @@
 #include <net/pkt_sched.h>
 #include <net/scm.h>
 #include <linux/if_bridge.h>
+#include <linux/if_macvlan.h>
 #include <linux/if_vlan.h>
 #include <linux/random.h>
 #ifdef CONFIG_NET_DIVERT
@@ -90,6 +91,14 @@
 extern int sysctl_max_syn_backlog;
 #endif
 
+#ifdef CONFIG_NET_PKTGEN_MODULE
+#warning "EXPORT_SYMBOL(handle_pktgen_hook);";
+extern int (*handle_pktgen_hook)(struct sk_buff *skb);
+/* Would be OK to export as EXPORT_SYMBOL_GPL, but can't get that to work for
+ * some reason. --Ben */
+EXPORT_SYMBOL(handle_pktgen_hook);
+#endif
+
 /* Skbuff symbols. */
 EXPORT_SYMBOL(skb_over_panic);
 EXPORT_SYMBOL(skb_under_panic);
@@ -234,6 +243,13 @@
 #endif
 #endif
 
+#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
+EXPORT_SYMBOL(macvlan_handle_frame_hook);
+#ifdef CONFIG_INET
+EXPORT_SYMBOL(macvlan_ioctl_hook);
+#endif
+#endif
+
 #ifdef CONFIG_NET_DIVERT
 EXPORT_SYMBOL(alloc_divert_blk);
 EXPORT_SYMBOL(free_divert_blk);
--- linux-2.4.21/Documentation/networking/pktgen.txt	2003-06-13 07:51:29.000000000 -0700
+++ linux-2.4.21.amds/Documentation/networking/pktgen.txt	2003-07-30 16:20:41.000000000 -0700
@@ -1,76 +1,118 @@
 How to use the Linux packet generator module.
 
-1. Enable CONFIG_NET_PKTGEN to compile and build pktgen.o, install it
-   in the place where insmod may find it.
-2. Cut script "ipg" (see below).
-3. Edit script to set preferred device and destination IP address.
-3a.  Create more scripts for different interfaces.  Up to thirty-two
-     pktgen processes can be configured and run at once by using the
-     32 /proc/net/pktgen/pg* files.
-4. Run in shell: ". ipg"
-5. After this two commands are defined:
-   A. "pg" to start generator and to get results.
-   B. "pgset" to change generator parameters. F.e.
-      pgset "multiskb 1"      use multiple SKBs for packet generation
-      pgset "multiskb 0"      use single SKB for all transmits
-      pgset "pkt_size 9014"   sets packet size to 9014
-      pgset "frags 5"         packet will consist of 5 fragments
-      pgset "count 200000"    sets number of packets to send, set to zero
-                              for continious sends untill explicitly
-                              stopped.
-      pgset "ipg 5000"        sets artificial gap inserted between packets
-                              to 5000 nanoseconds
-      pgset "dst 10.0.0.1"    sets IP destination address
-                              (BEWARE! This generator is very aggressive!)
-      pgset "dst_min 10.0.0.1"            Same as dst
-      pgset "dst_max 10.0.0.254"          Set the maximum destination IP.
-      pgset "src_min 10.0.0.1"            Set the minimum (or only) source IP.
-      pgset "src_max 10.0.0.254"          Set the maximum source IP.
-      pgset "dstmac 00:00:00:00:00:00"    sets MAC destination address
-      pgset "srcmac 00:00:00:00:00:00"    sets MAC source address
-      pgset "src_mac_count 1" Sets the number of MACs we'll range through.  The
-                              'minimum' MAC is what you set with srcmac.
-      pgset "dst_mac_count 1" Sets the number of MACs we'll range through.  The
-                              'minimum' MAC is what you set with dstmac.
-      pgset "flag [name]"     Set a flag to determine behaviour.  Current flags
-                              are: IPSRC_RND #IP Source is random (between min/max),
-                                   IPDST_RND, UDPSRC_RND,
-                                   UDPDST_RND, MACSRC_RND, MACDST_RND 
-      pgset "udp_src_min 9"   set UDP source port min, If < udp_src_max, then
-                              cycle through the port range.
-      pgset "udp_src_max 9"   set UDP source port max.
-      pgset "udp_dst_min 9"   set UDP destination port min, If < udp_dst_max, then
-                              cycle through the port range.
-      pgset "udp_dst_max 9"   set UDP destination port max.
-      pgset stop    	      aborts injection
+1.  Enable CONFIG_NET_PKTGEN to compile and build pktgen.o, install it
+     in the place where insmod may find it.
+2.  Add an interface to the kpktgend_0 thread:
+     echo "add_interface eth1" > /proc/net/pktgen/kpktgend_0
+2a. Add more interfaces as needed.
+3.  Configure interfaces by setting values as defined below.  The
+     general strategy is: echo "command" > /proc/net/pktgen/[device]
+     For example: echo "multiskb 100" > /proc/net/pktgen/eth1
+
+      "multiskb 100"    Will send 100 identical pkts before creating
+                              new packet with new timestamp, etc.
+      "multiskb 0"      Will create new skb for all transmits.
+      "peer_multiskb 100"   Helps us determine dropped & dup pkts, sender's multiskb.
+      "min_pkt_size 60"     sets packet minimum size to 60 (64 counting CRC)
+      "max_pkt_size 1514"   sets packet size to 1514 (1518 counting CRC)
+      "frags 5"         packet will consist of 5 fragments
+      "count 200000"    sets number of packets to send, set to zero
+                         for continious sends untill explicitly
+                         stopped.
+      "ipg 5000"        sets artificial gap inserted between packets
+                         to 5000 nanoseconds
+      "dst 10.0.0.1"    sets IP destination address
+                         (BEWARE! This generator is very aggressive!)
+      "dst_min 10.0.0.1"            Same as dst
+      "dst_max 10.0.0.254"          Set the maximum destination IP.
+      "src_min 10.0.0.1"            Set the minimum (or only) source IP.
+      "src_max 10.0.0.254"          Set the maximum source IP.
+      "dst_mac 00:00:00:00:00:00"   sets MAC destination address
+      "src_mac 00:00:00:00:00:00"   sets MAC source address
+      "src_mac_count 1" Sets the number of MACs we'll range through.  The
+                         'minimum' MAC is what you set with srcmac.
+      "dst_mac_count 1" Sets the number of MACs we'll range through.  The
+                         'minimum' MAC is what you set with dstmac.
+      "flag [name]"     Set a flag to determine behaviour.  Prepend '!' to the
+                         flag to turn it off.  Current flags are:
+                          IPSRC_RND #IP Source is random (between min/max),
+                          IPDST_RND, UDPSRC_RND, TXSIZE_RND
+                          UDPDST_RND, MACSRC_RND, MACDST_RND 
+      "udp_src_min 9"   set UDP source port min, If < udp_src_max, then
+                         cycle through the port range.
+      "udp_src_max 9"   set UDP source port max.
+      "udp_dst_min 9"   set UDP destination port min, If < udp_dst_max, then
+                         cycle through the port range.
+      "udp_dst_max 9"   set UDP destination port max.
+      "stop"            Stops this interface from transmitting.  It will still
+                         receive packets and record their latency, etc.
+      "start"           Starts the interface transmitting packets.
+      "clear_counters"  Clear the packet and latency counters.
+
+You can start and stop threads by echoing commands to the /proc/net/pktgen/pgctrl
+file.  Supported commands are:
+      "stop kpktgend_0"  Stop thread 0.
+      "start threadXX"   Start (create) thread XX.  You may wish to create one thread
+                          per CPU.
       
-  Also, ^C aborts generator.
 
----- cut here
+You can control manage the interfaces on a thread by echoing commands to
+the /proc/net/pktgen/[thread] file.  Supported commands are:
+      "add_interface eth1"  Add interface eth1 to the chosen thread.
+      "rem_interface eth1"  Remove interface eth1 from the chosen thread.
+      "max_before_softirq"  Maximum loops before we cause a call to do_softirq,
+                             this is to help mitigate starvatation on the RX side.
+
+
+You can examine various counters and parameters by reading the appropriate
+proc file:
+
+[root@localhost lanforge]# cat /proc/net/pktgen/kpktgend_0 
+VERSION-1
+Name: kpktgend_0
+Current: eth2
+Running: eth6 
+Stopped: eth1 eth5 
+Result: NA
+
+
+[root@localhost lanforge]# cat /proc/net/pktgen/eth2
+VERSION-1
+Params: count 0  pkt_size: 300  frags: 0  ipg: 0  multiskb: 0 ifname "eth2"
+     dst_min: 172.2.1.1  dst_max: 172.2.1.6  src_min: 172.1.1.4  src_max: 172.1.1.8
+     src_mac: 00:00:00:00:00:00  dst_mac: 00:00:00:00:00:00
+     udp_src_min: 99  udp_src_max: 1005  udp_dst_min: 9  udp_dst_max: 9
+     src_mac_count: 0  dst_mac_count: 0
+     Flags: IPSRC_RND  IPDST_RND  UDPSRC_RND  
+Current:
+     pkts-sofar: 158835950  errors: 0
+     started: 1026024703542360us  elapsed: 4756326418us
+     idle: 1723232054307ns  next_tx: 27997154666566(-3202934)ns
+     seq_num: 158835951  cur_dst_mac_offset: 0  cur_src_mac_offset: 0
+     cur_saddr: 0x60101ac  cur_daddr: 0x30102ac  cur_udp_dst: 9  cur_udp_src: 966
+     pkts_rcvd: 476002  bytes_rcvd: 159929440  last_seq_rcvd: 476002  ooo_rcvd: 0
+     dup_rcvd: 0  seq_gap_rcvd(dropped): 0  non_pg_rcvd: 0
+     avg_latency: 41us  min_latency: 40us  max_latency: 347us  pkts_in_sample: 476002
+      Buckets(us) [ 0  0  0  0  0  0  311968  164008  23  3  0  0  0  0  0  0  0  0  0  0  ]
+Result: OK: ipg=0
+
+[root@localhost lanforge]# cat /proc/net/pktgen/eth6
+VERSION-1
+Params: count 0  pkt_size: 300  frags: 0  ipg: 11062341  multiskb: 0 ifname "eth6"
+     dst_min: 90  dst_max: 90  src_min: 90  src_max: 90
+     src_mac: 00:00:00:00:00:00  dst_mac: 00:00:00:00:00:00
+     udp_src_min: 9  udp_src_max: 9  udp_dst_min: 9  udp_dst_max: 9
+     src_mac_count: 0  dst_mac_count: 0
+     Flags: 
+Current:
+     pkts-sofar: 479940  errors: 0
+     started: 1026024703542707us  elapsed: 4795667656us
+     idle: 109585100905ns  next_tx: 28042807786397(-79364)ns
+     seq_num: 479941  cur_dst_mac_offset: 0  cur_src_mac_offset: 0
+     cur_saddr: 0x0  cur_daddr: 0x0  cur_udp_dst: 9  cur_udp_src: 9
+     pkts_rcvd: 160323509  bytes_rcvd: 50392479910  last_seq_rcvd: 160323509  ooo_rcvd: 0
+     dup_rcvd: 0  seq_gap_rcvd(dropped): 0  non_pg_rcvd: 0
+     avg_latency: 230us  min_latency: 36us  max_latency: 1837us  pkts_in_sample: 160323509
+      Buckets(us) [ 0  0  0  0  0  0  287725  2618755  54130607  98979415  80358  4226649  0  0  0  0  0  0  0  0  ]
+Result: OK: ipg=11062341
 
-#! /bin/sh
-
-modprobe pktgen
-
-PGDEV=/proc/net/pktgen/pg0
-
-function pgset() {
-    local result
-
-    echo $1 > $PGDEV
-
-    result=`cat $PGDEV | fgrep "Result: OK:"`
-    if [ "$result" = "" ]; then
-         cat $PGDEV | fgrep Result:
-    fi
-}
-
-function pg() {
-    echo inject > $PGDEV
-    cat $PGDEV
-}
-
-pgset "odev eth0"
-pgset "dst 0.0.0.0"
-
----- cut here
--- linux-2.4.21/include/linux/if_macvlan.h	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.4.21.amds/include/linux/if_macvlan.h	2003-07-30 16:28:27.000000000 -0700
@@ -0,0 +1,57 @@
+/* -*- linux-c -*- */
+#ifndef _LINUX_IF_MACVLAN_H
+#define _LINUX_IF_MACVLAN_H
+
+/* the ioctl commands */
+
+/* actions */
+#define MACVLAN_ENABLE         1
+#define MACVLAN_DISABLE        2
+#define MACVLAN_ADD            3
+#define MACVLAN_DEL            4
+#define MACVLAN_BIND           5
+#define MACVLAN_UNBIND         6
+
+/* informative */
+#define MACVLAN_GET_NUM_PORTS  7
+#define MACVLAN_GET_PORT_NAME  8
+#define MACVLAN_GET_NUM_VLANS  9
+#define MACVLAN_GET_VLAN_NAME  10
+#define MACVLAN_GET_NUM_MACS   11
+#define MACVLAN_GET_MAC_NAME   12
+
+#define MACVLAN_SET_PORT_FLAGS 13
+#define MACVLAN_GET_PORT_FLAGS 14
+
+/* If this IOCTL succeedes, we are a MAC-VLAN interface, otherwise, we are not. */
+#define MACVLAN_IS_MACVLAN     15
+
+
+#ifdef __KERNEL__
+#include <linux/if.h>
+#include <linux/netdevice.h>
+extern int (*macvlan_ioctl_hook)(unsigned long arg);
+
+/* Returns >= 0 if it consumed the packet, otherwise let the pkt
+ * be processed by the netif_rx method, as if macvlan's didn't
+ * exist.
+ */
+extern int (*macvlan_handle_frame_hook)(struct sk_buff *skb);
+#endif
+
+struct macvlan_ioctl_reply {
+        int num;
+        char name[IFNAMSIZ];
+};
+
+struct macvlan_ioctl {
+        int  cmd;
+        int portidx;
+        char *ifname;
+        int ifidx; /* flags when setting port flags */
+        unsigned char *macaddr;
+        int macaddridx;
+        struct macvlan_ioctl_reply *reply;
+};
+
+#endif
--- linux-2.4.21/include/linux/sockios.h	2003-06-13 07:51:39.000000000 -0700
+++ linux-2.4.21.amds/include/linux/sockios.h	2003-07-30 16:20:41.000000000 -0700
@@ -65,6 +65,8 @@
 #define SIOCDIFADDR	0x8936		/* delete PA address		*/
 #define	SIOCSIFHWBROADCAST	0x8937	/* set hardware broadcast addr	*/
 #define SIOCGIFCOUNT	0x8938		/* get number of devices */
+#define SIOCGIFWEIGHT	0x8939		/* get weight of device, in stones */
+#define SIOCSIFWEIGHT	0x893a		/* set weight of device, in stones */
 
 #define SIOCGIFBR	0x8940		/* Bridging support		*/
 #define SIOCSIFBR	0x8941		/* Set bridging options 	*/
@@ -94,6 +96,10 @@
 #define SIOCGRARP	0x8961		/* get RARP table entry		*/
 #define SIOCSRARP	0x8962		/* set RARP table entry		*/
 
+/* MAC address based VLAN control calls */
+#define SIOCGIFMACVLAN	0x8965		/* Mac address multiplex/demultiplex support */
+#define SIOCSIFMACVLAN	0x8966		/* Set macvlan options 	*/
+
 /* Driver configuration calls */
 
 #define SIOCGIFMAP	0x8970		/* Get device parameters	*/
@@ -116,6 +122,15 @@
 #define SIOCBONDINFOQUERY      0x8994	/* rtn info about bond state    */
 #define SIOCBONDCHANGEACTIVE   0x8995   /* update to a new active slave */
 			
+
+/* Ben's little hack land */
+#define SIOCSACCEPTLOCALADDRS  0x89a0   /*  Allow interfaces to accept pkts from
+                                         * local interfaces...use with SO_BINDTODEVICE
+                                         */
+#define SIOCGACCEPTLOCALADDRS  0x89a1   /*  Allow interfaces to accept pkts from
+                                         * local interfaces...use with SO_BINDTODEVICE
+                                         */
+
 /* Device private ioctl calls */
 
 /*
--- linux-2.4.21/net/Config.in	2002-08-02 17:39:46.000000000 -0700
+++ linux-2.4.21.amds/net/Config.in	2003-07-30 16:20:41.000000000 -0700
@@ -48,6 +48,7 @@
             bool '    Per-VC IP filter kludge' CONFIG_ATM_BR2684_IPFILTER
       fi
    fi
+   tristate 'MAC address based VLANs (EXPERIMENTAL)' CONFIG_MACVLAN
 fi
 tristate '802.1Q VLAN Support' CONFIG_VLAN_8021Q
 
--- linux-2.4.21/net/Makefile	2002-08-02 17:39:46.000000000 -0700
+++ linux-2.4.21.amds/net/Makefile	2003-07-30 16:20:41.000000000 -0700
@@ -44,7 +44,8 @@
 subdir-$(CONFIG_ATM)		+= atm
 subdir-$(CONFIG_DECNET)		+= decnet
 subdir-$(CONFIG_ECONET)		+= econet
-subdir-$(CONFIG_VLAN_8021Q)           += 8021q
+subdir-$(CONFIG_VLAN_8021Q) += 8021q
+subdir-$(CONFIG_MACVLAN)	+= macvlan
 
 
 obj-y	:= socket.o $(join $(subdir-y), $(patsubst %,/%.o,$(notdir $(subdir-y))))
--- linux-2.4.21/net/ipv4/af_inet.c	2003-06-13 07:51:39.000000000 -0700
+++ linux-2.4.21.amds/net/ipv4/af_inet.c	2003-07-30 16:20:41.000000000 -0700
@@ -143,6 +143,10 @@
 int (*br_ioctl_hook)(unsigned long);
 #endif
 
+#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
+int (*macvlan_ioctl_hook)(unsigned long) = NULL;
+#endif
+
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 int (*vlan_ioctl_hook)(unsigned long arg);
 #endif
@@ -879,6 +883,18 @@
 #endif
 			return -ENOPKG;
 
+		case SIOCGIFMACVLAN:
+		case SIOCSIFMACVLAN:
+#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
+#ifdef CONFIG_KMOD
+			if (macvlan_ioctl_hook == NULL)
+ 				request_module("macvlan");
+#endif
+			if (macvlan_ioctl_hook != NULL)
+				return macvlan_ioctl_hook(arg);
+#endif
+			return -ENOPKG;
+
 		case SIOCGIFVLAN:
 		case SIOCSIFVLAN:
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
--- linux-2.4.21/net/macvlan/Makefile	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.4.21.amds/net/macvlan/Makefile	2003-07-30 16:20:41.000000000 -0700
@@ -0,0 +1,11 @@
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definition is now in the main makefile...
+
+O_TARGET	:= mac-mux.o
+obj-$(CONFIG_MACVLAN) := macvlan.o
+
+include $(TOPDIR)/Rules.make
--- linux-2.4.21/net/macvlan/macvlan.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.4.21.amds/net/macvlan/macvlan.c	2003-08-13 16:26:11.000000000 -0700
@@ -0,0 +1,2051 @@
+/* -*- linux-c -*-
+#######################################################################
+#
+# (C) Copyright 2001-2003
+# Alex Zeffertt, Cambridge Broadband Ltd, ajz@cambridgebroadband.com
+# Re-worked by Ben Greear <greearb@candelatech.com>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+# MA 02111-1307 USA
+#######################################################################
+# Notes:
+# 
+# This file implements the macvlan.o MAC address based VLAN support 
+# module.
+#
+# This provides an IOCTL interface which allows you to
+# It uses an IOCTL interface which allows you to
+#
+# 1. enable/disable MAC address based VLANS over an ether type net_device
+# 2. add/remove a MAC address based VLAN - which is an ether type net_device
+#    layered over the original MACVLAN enabled ether type net_device.
+# 3. bind/unbind MAC addresses to/from particular MAC address based VLANs
+# 4. discover the state of MAC address based VLANs on the system.
+# 5. set/get port flags, including whether to bind to destination MAC
+#    or source mac.
+# 6. Traffic to/from eth0 will not be affected.
+
+# Example: (Assuming you are using source binding)
+#
+# If you enable MAC address based VLANS over eth0
+#
+# You may then create further VLANs, e.g. eth0#1 eth0#2 ....
+# These will not receive any frames until you bind MAC addresses to them.
+# If you bind 11:22:33:44:55:66 to eth0#1, then any frames received by
+# eth0 with source MAC 11:22:33:44:55:66 will be routed up through eth0#1
+# instead of eth0.
+#
+# Example: (Assuming you are using destination (local) binding)
+#
+# If you enable MAC address based VLANS over eth0
+#
+# You may then create further VLANs, e.g. eth0#1 eth0#2 ....
+# These will not receive any frames until you bind MAC addresses to them.
+# If you bind 11:22:33:44:55:66 to eth0#1, then any broadcast/multicast
+# frames, or frames with a destination MAC 11:22:33:44:55:66
+# will be routed up through eth0#1 instead of eth0
+#
+# For broadcasts, the packet will be duplicated for every VLAN
+# with at least one MAC attached.  Attaching more than one MAC
+# when destination binding makes no sense...don't do it!
+#
+# 
+#######################################################################
+*/
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h> 
+#include <linux/fs.h>	  
+#include <linux/errno.h>  
+#include <linux/delay.h>  
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/tqueue.h>
+#include <linux/poll.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/if_macvlan.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <net/arp.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/semaphore.h>
+
+#ifdef CONFIG_PROC_FS
+#include <linux/proc_fs.h>
+#define MVL_PROC_DIR "macvlan"
+#define MVL_PROC_CFG "config"
+#define PORT_CFG_FILE_NAME "config"
+static struct proc_dir_entry *mvl_proc_dir;
+static struct proc_dir_entry *mvl_proc_cfg;
+#endif
+
+#include "macvlan.h"
+
+
+/*********************************************************/
+/*			defines				 */
+/*********************************************************/
+
+#if 0
+#define DEBUG(format,args...) printk(KERN_ERR format, ##args);
+#else
+#define DEBUG(format,args...)
+#endif
+
+
+#undef MVL_USE_RW_LOCKS
+#ifdef MVL_USE_RW_LOCKS
+/*  Must hold this lock to make any changes to the macvlan structures.
+ */
+static rwlock_t mvl_cfg_lock = RW_LOCK_UNLOCKED;
+
+#define MVL_READ_LOCK /* printk("%i: read-lock port list\n", __LINE__); */ \
+                            BUG_ON(in_interrupt()); \
+			    read_lock(&mvl_cfg_lock);
+#define MVL_READ_UNLOCK /* printk("%i: read-unlock port list\n", __LINE__); */ \
+                            BUG_ON(in_interrupt()); \
+			    read_unlock(&mvl_cfg_lock);
+
+#define MVL_WRITE_LOCK /* printk("%i: write-lock port list\n", __LINE__); */ \
+                            BUG_ON(in_interrupt()); \
+			    write_lock(&mvl_cfg_lock);
+#define MVL_WRITE_UNLOCK /* printk("%i: write-unlock port list\n", __LINE__); */ \
+                            BUG_ON(in_interrupt()); \
+			    write_unlock(&mvl_cfg_lock);
+
+
+#define MVL_IRQ_RLOCK(a) /* printk("%i: read-unlock port list\n", __LINE__); */ { \
+                            __u64 now = getCurUs(); \
+	                    __u64 later; \
+			    read_lock_irqsave(&mvl_cfg_lock, a); \
+                            later = getCurUs(); \
+                            if ((later - now) > 100) { \
+	                       printk("took: %lluus to acquire read lock, line: %i\n", \
+				      later - now, __LINE__); \
+                            }}
+
+#define MVL_IRQ_RUNLOCK(a) /* printk("%i: read-unlock port list\n", __LINE__); */ \
+			    read_unlock_irqrestore(&mvl_cfg_lock, a);
+#else
+/*  Must hold this lock to make any changes to the macvlan structures.
+ */
+static spinlock_t mvl_cfg_lock = SPIN_LOCK_UNLOCKED;
+
+#define MVL_READ_LOCK(a) MVL_WRITE_LOCK(a)
+#define MVL_READ_UNLOCK(a) MVL_WRITE_UNLOCK(a)
+
+#define MVL_WRITE_LOCK(a) /* printk("%i: write-lock port list\n", __LINE__); */ \
+		           spin_lock_irqsave(&mvl_cfg_lock, a);
+#define MVL_WRITE_UNLOCK(a) /* printk("%i: write-unlock port list\n", __LINE__); */ \
+                           spin_unlock_irqrestore(&mvl_cfg_lock, a); \
+
+
+#define MVL_IRQ_RLOCK(a) /* printk("%i: read-unlock port list\n", __LINE__); */ \
+			    spin_lock_irqsave(&mvl_cfg_lock, a); \
+
+#define MVL_IRQ_RUNLOCK(a) /* printk("%i: read-unlock port list\n", __LINE__); */ \
+			    spin_unlock_irqrestore(&mvl_cfg_lock, a);
+#endif
+
+
+/*********************************************************/
+/*		       file scope variables		 */
+/*********************************************************/
+
+static struct macvlan_port *port_list = NULL;
+
+static atomic_t macvlan_nports;
+static atomic_t mvl_vlan_counter;
+
+static int debug_lvl = 0;
+
+
+/*********************************************************/
+/*		   forward declarations			 */
+/*********************************************************/
+static int macvlan_hash_rem(const char* vlan_ifname,
+			    const unsigned char* mac);
+
+/*********************************************************/
+/*		   function definitions			 */
+/*********************************************************/
+
+/** Convert to micro-seconds */
+static inline __u64 tv_to_us(const struct timeval* tv) {
+        __u64 us = tv->tv_usec;
+        us += (__u64)tv->tv_sec * (__u64)1000000;
+        return us;
+}
+
+
+/* Since the epoc.  More precise over long periods of time than
+ * getRelativeCurMs
+ */
+static inline __u64 getCurUs(void) {
+        struct timeval tv;
+        do_gettimeofday(&tv);
+        return tv_to_us(&tv);
+}
+
+
+char toupper(char in) {
+	if ((in >= 'a') && (in <= 'z')) {
+		in -= ('a' - 'A');
+	}
+	return in;
+}
+
+#define iswhitespace(x)\
+	((x) == ' ' || (x) == '\n' || (x) == '\r' || (x) == '\r' )
+
+#define skip_whitespace(x) 	{ while (iswhitespace(*x)) (x)++; }
+
+static int copy_next_word(char *dst, char *src, int len) {
+	char *p;
+	for (p=src; p < src + len ; p++) {
+		if ( iswhitespace(*p))
+			break;
+		*dst++ = *p;
+	}
+	return p - src;
+}
+
+
+static int toMacString(unsigned char* rslt_mac, const char* raw_mac) {
+	// Turn HEX into bytes.  First, gather all the useful HEX
+	char tmp[12]; //More than 12 is useless, at least right now
+	char c;
+	int j = 0; //tmp's index.
+	int i;
+	char tmp_bt[3];
+	for (i = 0; i<strlen(raw_mac); i++) {
+		c = toupper(raw_mac[i]);
+		if (((c >= '0') && (c <= '9')) || ((c >= 'A') && (c <= 'F'))) {
+			tmp[j] = c;
+			//VLOG_ERR(VLOG << " c: " << c << endl);
+			if (j == 11) {
+				break; //done
+			}
+			j++;
+		}
+		else {
+			if ((c == ':') || (c == ' ') || (c == '.')) {
+				// Ok, valid divider
+			}
+			else {
+				// Invalid header
+				return -EINVAL;
+			}
+		}
+	}
+	
+	if (j != 11) {
+		//msg->append("ERROR:  Not enough HEX values in the input string.\n");
+		return -EINVAL;
+	}
+	
+	for (i = 0; i<6; i++) {
+		tmp_bt[0] = tmp[i*2];
+		tmp_bt[1] = tmp[i*2 +1];
+		tmp_bt[2] = 0;
+		//VLOG_ERR(VLOG << " tmp_bt -:" << tmp_bt << ":- i: " << i << endl);
+		rslt_mac[i] = (unsigned char)(simple_strtol(tmp_bt, NULL, 16) & 0xFF);
+		//VLOG_ERR(VLOG << " rslt_mac[" << i << "]  -:" << rslt_mac[i] << ":-\n");
+	}
+	return 0;
+}//toMacString
+
+
+struct macvlan_vlan* macvlan_find_vlan_in_port(struct macvlan_port* port,
+					       const char* ifname) {
+	struct macvlan_vlan* vlan;
+	for (vlan = port->vlan_list; vlan; vlan = vlan->next) {
+		if (!strcmp(vlan->dev->name, ifname)) {
+			return vlan;
+		}
+	}
+	return NULL;
+}	  
+
+
+/* Find port by mac-vlan interface name (eth1#777) */
+struct macvlan_port* macvlan_find_port_for_mvlan_ifname(const char* ifname) {
+	struct macvlan_port* port;
+	for (port = port_list; port; port = port->next) {
+		if (macvlan_find_vlan_in_port(port, ifname)) {
+			break;
+		}
+	}
+	return port;
+}
+
+struct macvlan_port* macvlan_find_port_for_underlying_ifname(const char* ifname) {
+	struct macvlan_port* port;
+	//printk("finding port for underlying ifname: %s\n", ifname);
+	for (port = port_list; port; port = port->next) {
+		//printk("Testing port: %p name: %s\n", port, port->dev->name);
+		if (strcmp(port->dev->name, ifname) == 0) {
+			break;
+		}
+	}
+	//printk("done finding port: %p\n", port);
+	return port;
+}	 
+
+/*
+ *	Rebuild the Ethernet MAC header. This is called after an ARP
+ *	(or in future other address resolution) has completed on this
+ *	sk_buff. We now let ARP fill in the other fields.
+ *
+ *	This routine CANNOT use cached dst->neigh!
+ *	Really, it is used only when dst->neigh is wrong.
+ *
+ */
+int macvlan_dev_rebuild_header(struct sk_buff *skb) {
+	struct net_device *dev = skb->dev;
+	struct ethhdr *veth = (struct ethhdr *)(skb->data);
+
+	switch (veth->h_proto) {
+#ifdef CONFIG_INET
+	case __constant_htons(ETH_P_IP):
+
+		return arp_find(veth->h_dest, skb);
+#endif	
+	default:
+		DEBUG("%s: unable to resolve type %X addresses.\n", 
+		      dev->name, (int)veth->h_proto);
+	 
+		memcpy(veth->h_source, dev->dev_addr, ETH_ALEN);
+		break;
+	};
+
+	return 0;
+}
+
+
+
+static struct net_device_stats *macvlan_get_stats(struct net_device *dev)
+{
+	struct macvlan_vlan *vlan = dev->priv;
+
+	return &vlan->statistics;
+}
+
+static int macvlan_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct macvlan_vlan *vlan = dev->priv;
+	DEBUG("%s: \n", __PRETTY_FUNCTION__);
+	vlan->statistics.tx_packets++;
+	vlan->statistics.tx_bytes += skb->len;
+
+	skb->dev = vlan->lowerdev;
+	dev_queue_xmit(skb);
+	return 0;
+}
+
+static int macvlan_open(struct net_device *dev)
+{
+	MOD_INC_USE_COUNT;
+	netif_start_queue(dev);
+	return 0;
+}
+
+static void macvlan_set_multicast_list(struct net_device *dev)
+{
+	/* TODO ??? */
+}
+
+static int macvlan_stop(struct net_device *dev)
+{
+	netif_stop_queue(dev);
+	MOD_DEC_USE_COUNT;
+	return 0;
+}
+
+static int macvlan_accept_fastpath(struct net_device *dev, struct dst_entry *dst)
+{
+	return -1;
+}
+
+
+/*
+ *	Create the VLAN header for an arbitrary protocol layer 
+ *
+ *	saddr=NULL	means use device source address
+ *	daddr=NULL	means leave destination address (eg unresolved arp)
+ *
+ *  This is called when the SKB is moving down the stack towards the
+ *  physical devices.
+ */
+int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
+			unsigned short type, void *daddr, void *saddr,
+			unsigned len)
+{
+	struct macvlan_vlan *vlan = dev->priv;
+	
+	DEBUG("%s: \n", __PRETTY_FUNCTION__);
+
+	/* Before delegating work to the lower layer, enter our MAC-address */
+	saddr = dev->dev_addr;
+
+	dev = vlan->lowerdev;
+
+	/* Now make the underlying real hard header */
+	return dev->hard_header(skb, dev, type, daddr, saddr, len);
+}
+
+
+void macvlan_dev_destructor(struct net_device *dev) {
+	atomic_dec(&mvl_vlan_counter);
+	if (dev->priv) {
+		//printk("dst: %s", dev->name);
+		kfree(dev->priv);
+		dev->priv = NULL;
+	}
+	else {
+		//printk("dst2: %s", dev->name);
+	}
+}
+
+
+static int macvlan_vlan_create(const char* port_name, int newifidx) {
+	struct macvlan_vlan *vlan = NULL;
+	struct macvlan_port* port;
+	char newifname[IFNAMSIZ+1];
+	struct net_device* td = NULL;
+	unsigned long flags;
+	int rv;
+	
+	MVL_WRITE_LOCK(flags);
+
+	//printk("--*-- ");
+	/* find the port to which ifname belongs */
+	port = macvlan_find_port_for_underlying_ifname(port_name);
+	if (!port) {
+		MVL_WRITE_UNLOCK(flags);
+		rv = -ENODEV;
+		goto unlockout;
+	}
+
+	BUG_ON(!port->dev);
+
+	//printk("1 ");
+	if (newifidx < 0) {
+		/* Find the next free index */
+		int i;
+		for (i = 0; i<MAX_MACVLANS_PER_PORT; i++) {
+			snprintf(newifname, IFNAMSIZ, "%s#%d", port->dev->name, i);
+			newifname[IFNAMSIZ] = 0;
+			if ((td = dev_get_by_name(newifname)) == NULL) {
+				newifidx = i;
+				break;
+			}
+			dev_put(td);
+		}
+
+		if (newifidx < 0) {
+			printk("macvlan: Could not find a free index, reached max: %i\n", i);
+		}
+	}
+	
+	//printk("2 ");
+	/* generate a name for the new vlan */
+	snprintf(newifname, IFNAMSIZ, "%s#%d", port->dev->name, newifidx);
+	newifname[IFNAMSIZ] = 0;
+
+	if ((td = dev_get_by_name(newifname)) != NULL) {
+		DEBUG("macvlan: vlan by that name already exists\n");
+		dev_put(td);
+		rv = -EEXIST;
+		goto unlockout;
+	}
+
+	//printk("3 ");
+	if ((vlan = kmalloc(sizeof(*vlan), GFP_KERNEL)) == NULL) {
+		DEBUG("macvlan: kmalloc failure\n");
+		rv = -ENOMEM;
+		goto unlockout;
+	}
+
+	memset(vlan, 0, sizeof(*vlan));
+
+	//printk("4 ");
+	if ((vlan->dev = kmalloc(sizeof(struct net_device), GFP_KERNEL)) == NULL) {
+		rv = -ENOMEM;
+		kfree(vlan);
+		goto unlockout;
+	}
+	memset(vlan->dev, 0, sizeof(struct net_device));
+	
+	//printk("5 ");
+	strcpy(vlan->dev->name, newifname);
+	ether_setup(vlan->dev);
+	
+	dev_hold(vlan->dev); /* MVL code holds reference */
+	
+	vlan->dev->priv = vlan;
+	vlan->port = port;
+	vlan->lowerdev = port->dev;
+
+	//printk("6 ");
+	/* dev->do_ioctl = macvlan_do_ioctl; */
+	vlan->dev->get_stats = macvlan_get_stats;
+	vlan->dev->hard_start_xmit = macvlan_xmit;
+	vlan->dev->hard_header = macvlan_hard_header;
+	vlan->dev->rebuild_header = macvlan_dev_rebuild_header;
+	vlan->dev->open = macvlan_open;
+	vlan->dev->set_multicast_list = macvlan_set_multicast_list;
+	vlan->dev->stop = macvlan_stop;
+	vlan->dev->accept_fastpath = macvlan_accept_fastpath;
+	vlan->dev->tx_queue_len = 0;
+	vlan->dev->set_mac_address = NULL;
+	vlan->dev->priv = vlan;
+	vlan->dev->destructor = macvlan_dev_destructor;
+	
+	/* This will change if you are using Destination (local) binding,
+	 * when you add a MAC to it..
+	 */
+	memcpy(vlan->dev->dev_addr, vlan->lowerdev->dev_addr, ETH_ALEN);
+
+	DEBUG("macvlan: created vlan %p\n", vlan);
+
+#ifdef MVL_CONFIG_PROC_FS
+	//printk("7 ");
+	if (vlan->port->proc_dir) {
+		vlan->proc_ent = create_proc_read_entry(vlan->dev->name, S_IRUGO,
+							vlan->port->proc_dir,
+							read_mvl, vlan);
+		if (!vlan->proc_ent) {
+			printk("ERROR:  Could not create proc entry for device: %s\n",
+			       vlan->dev->name);
+		}
+		else {
+			vlan->proc_ent->write_proc = write_mvl;
+		}
+	}
+#endif
+
+	atomic_inc(&port->ndevs);
+	
+	/* link to list */
+	//printk("8 ");
+	vlan->next = port->vlan_list;
+	port->vlan_list = vlan;
+
+	//printk("End of mac_vlan create1, ref-cnt: %i\n", atomic_read(&dev->refcnt));
+	
+	MVL_WRITE_UNLOCK(flags);
+	register_netdev(vlan->dev);
+
+	//printk("End of mac_vlan create2, ref-cnt: %i\n", atomic_read(&dev->refcnt));
+
+	atomic_inc(&mvl_vlan_counter);
+	//printk("9\n");
+	rv = 0;
+	goto out;
+
+ unlockout:
+	MVL_WRITE_UNLOCK(flags);
+ out:
+	return rv;
+} /* macvlan_vlan_create */
+
+
+/* Has locking internally */
+int macvlan_vlan_cleanup(const char* ifname) {
+	int i;
+	struct macvlan_port* port;
+	struct macvlan_vlan* vlan;
+	struct macvlan_vlan* walker;
+	struct macvlan_vlan* prev;
+	unsigned long flags;
+	int rv;
+	
+	DEBUG(__FUNCTION__"(%p)\n",vlan);
+	//printk("mvl_cln: %s", ifname);
+
+	MVL_WRITE_LOCK(flags);
+	/* NOTE:  Cannot depend on device name, it can be changed. --Ben */
+	port = macvlan_find_port_for_mvlan_ifname(ifname);
+	if (!port) {
+		rv = -ENODEV;
+		goto unlockout;
+	}
+
+	//printk("1 ");
+	vlan = macvlan_find_vlan_in_port(port, ifname);
+	BUG_ON(!vlan);
+
+	if (vlan->dev->flags & IFF_UP) {
+		rv = -EBUSY;
+		goto unlockout;
+	}
+
+	//printk("2 ");
+	for (i = 0; i<MACVLAN_HASH_LEN; i++) {
+		struct macvlan_hash_entry* tmp = vlan->port->hash_table[i];
+		struct macvlan_hash_entry* prev = NULL;
+		while (tmp) {
+			if (tmp->vlan == vlan) {
+				if (prev) {
+					prev->next = tmp->next;
+					kfree(tmp);
+					tmp = prev->next;
+				}
+				else {
+					vlan->port->hash_table[i] = tmp->next;
+					kfree(tmp);
+					tmp = vlan->port->hash_table[i];
+				}
+			}
+			else {
+				prev = tmp;
+				tmp = tmp->next;
+			}
+		}
+	}/* for all hash buckets */
+	//printk("3 ");
+
+#ifdef MVL_CONFIG_PROC_FS
+	if (vlan->proc_ent) {
+		remove_proc_entry(vlan->dev->name, vlan->port->proc_dir);
+		vlan->proc_ent = NULL;
+	}
+#endif
+
+
+	/*
+	 * remove the vlan in question from the list
+	 */
+	prev = NULL;
+	walker = port->vlan_list;
+	while (walker) {
+		if (walker == vlan) {
+			if (prev) {
+				prev->next = walker->next;
+			}
+			else {
+				port->vlan_list = walker->next;
+			}
+			break;
+		}
+		prev = walker;
+		walker = walker->next;
+	}/* while */
+	BUG_ON(walker != vlan);
+	
+	atomic_dec(&port->ndevs);
+
+	//printk("4 ");
+	//printk("End of mac_vlan cleanup1, ref-cnt: %i\n", atomic_read(&vlan->dev->refcnt));
+	dev_put(vlan->dev);
+
+	MVL_WRITE_UNLOCK(flags);
+
+	//printk("End of mac_vlan cleanup2, ref-cnt: %i\n", atomic_read(&vlan->dev->refcnt));
+	unregister_netdev(vlan->dev);
+	
+	/* VLAN will be deleted when the device is deleted */
+	
+	//printk("5 ");
+	rv = 0;
+	goto out;
+
+ unlockout:
+	MVL_WRITE_UNLOCK(flags);
+
+ out:
+	return rv;
+	
+} /* mac_vlan cleanup */
+
+
+
+static int macvlan_port_set_flags(const char* ifname, int flags) {
+	struct macvlan_port *port;
+			
+	/* find the port to which ifname belongs */
+	port = macvlan_find_port_for_underlying_ifname(ifname);
+	if (!port) {
+		return -ENODEV;
+	}
+	else {
+		port->flags = flags;
+	}
+	return 0;
+}/* macvlan_port_set_flags */
+
+static int macvlan_port_create(const char* ifname) {
+	struct macvlan_port *port;
+	struct net_device* dev;
+	
+	port = macvlan_find_port_for_underlying_ifname(ifname);
+	if (port != NULL) {
+		return -EEXIST;
+	}
+		
+	dev = dev_get_by_name(ifname);
+	if (dev == NULL) {
+		return -ENODEV;
+	}
+
+	if ((dev->macvlan_priv != NULL)
+	    || (dev->flags & IFF_LOOPBACK)
+	    || (dev->type != ARPHRD_ETHER)) {
+		printk("macvlan: lower layer failed"
+		      " dev->macvlan_priv=%p dev->flags=%08x dev->type=%08x\n",
+		      dev->macvlan_priv, dev->flags, dev->type);
+		dev_put(dev);
+		return -EINVAL;
+	}
+
+	if ((port = kmalloc(sizeof(*port), GFP_KERNEL)) == NULL) {
+		dev_put(dev);
+		return -ENOBUFS;
+	}
+
+	memset(port, 0, sizeof(*port));
+	port->dev = dev;
+
+	/* TODO:  Could use multicast filters in some NICs at least. */
+	dev_set_promiscuity(dev, 1);
+	dev->macvlan_priv = port;
+
+#ifdef MVL_CONFIG_PROC_FS
+	if (mvl_proc_dir) {
+		port->proc_dir = proc_mkdir(port->dev->name, mvl_proc_dir);
+
+		if (port->proc_dir) {
+			port->proc_ent = create_proc_read_entry(PORT_CFG_FILE_NAME, S_IRUGO,
+								port->proc_dir,
+								read_mvl_port, port);
+			if (port->proc_ent) {
+				port->proc_ent->write_proc = write_mvl_port;
+			}
+			else {
+				printk("macvlan: ERROR: failed to create proc entry for port: %s\n",
+				       port->dev->name);
+			}
+		}
+	}
+#endif
+
+	atomic_inc(&macvlan_nports);
+	
+	/* Link into our list */
+	port->next = port_list;
+	port_list = port;
+	
+	DEBUG("macvlan: created port=%p\n", port);
+	return 0;
+}/* macvlan_port_create */
+
+
+/* Clears all memory, kfree's it if possible.
+ */
+static int macvlan_port_cleanup(const char* ifname) {
+	struct macvlan_port *port;
+	struct macvlan_port *prev;
+	struct macvlan_port *walker;
+	int i;
+	
+	port = macvlan_find_port_for_underlying_ifname(ifname);
+	if (!port) {
+		return -ENODEV;
+	}
+
+	if (port->vlan_list) {
+		return -EBUSY;
+	}
+
+	/* hash table should be empty at this point */
+	for (i = 0 ; i < MACVLAN_HASH_LEN; i++) {
+		BUG_ON(port->hash_table[i]);
+	}
+
+	/* Remove from our port list */
+	prev = NULL;
+	walker = port_list;
+	while (walker) {
+		if (walker == port) {
+			if (prev) {
+				prev->next = walker->next;
+			}
+			else {
+				port_list = walker->next;
+			}
+			break;
+		}
+		prev = walker;
+		walker = walker->next;
+	}
+	BUG_ON(walker != port);
+	
+	
+#ifdef MVL_CONFIG_PROC_FS
+	if (port->proc_dir) {
+		if (port->proc_ent) {
+			remove_proc_entry(PORT_CFG_FILE_NAME, port->proc_dir);
+			port->proc_ent = NULL;
+		}
+		
+		remove_proc_entry(port->dev->name, mvl_proc_dir);
+		port->proc_dir = NULL;
+	}
+#endif
+	
+	dev_set_promiscuity(port->dev, -1);
+	port->dev->macvlan_priv = NULL;
+	dev_put(port->dev);
+
+	atomic_dec(&macvlan_nports);
+
+	kfree(port);
+	
+	return 0;
+}/* macvlan_port_cleanup */
+
+
+static inline struct macvlan_vlan *macvlan_hash_lookup(struct macvlan_port *port,
+						       const unsigned char *src) {
+	/* 
+	 * The hashing function is to simply
+	 * take the bottom source address byte
+	 */
+	struct macvlan_hash_entry *entry;
+	unsigned int bucket = VLAN_BUCKET(src);
+	for (entry = port->hash_table[bucket]; entry; entry = entry->next) {
+		if (memcmp(entry->mac, src, ETH_ALEN) == 0) {
+			/*DEBUG("macvlan: matched %02x:%02x:%02x:%02x:%02x:%02x to vlan %p\n", 
+			  src[0],src[1],src[2],src[3],src[4],src[5],entry->vlan); */
+			return entry->vlan;
+		}
+	}
+	return NULL;
+}
+
+
+static int macvlan_hash_add(const char* ifname,
+			    const unsigned char* macaddr) {
+	
+	struct macvlan_port *port;
+	struct macvlan_vlan *vlan;
+	unsigned int bucket = VLAN_BUCKET(macaddr);
+	struct macvlan_hash_entry* entry;
+	
+
+	/* find the port in question */
+	port = macvlan_find_port_for_mvlan_ifname(ifname);
+	if (!port) {
+		return -ENODEV;
+	}
+	    
+	/* find the vlan layered over this port */
+	vlan = macvlan_find_vlan_in_port(port, ifname);
+	BUG_ON(!vlan);
+	
+	/* check it's not already in the hash lookup table */
+	if (macvlan_hash_lookup(port, macaddr)) {
+		DEBUG("macvlan: user tried to add mac addr twice!\n");
+		return -EEXIST;
+	}
+
+	if ((atomic_read(&vlan->nmacs) > 0)
+	    && (port->flags & MVL_FILTER_ON_DEST)) {
+		printk("macvlan:  Already have a MAC on this vlan: %s and we are filtering on DEST, so no more are allowed!\n",
+		       ifname);
+		return -EINVAL;
+	}
+
+	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry) {
+		return -ENOBUFS;
+	}
+	memset(entry, 0, sizeof(*entry));
+	
+	memcpy(entry->mac, macaddr, sizeof(entry->mac));
+	entry->vlan = vlan;
+	entry->next = port->hash_table[bucket];
+	port->hash_table[bucket] = entry;
+	DEBUG("macvlan: added %02x:%02x:%02x:%02x:%02x:%02x to vlan %p\n", 
+	      entry->src[0],entry->src[1],entry->src[2],
+	      entry->src[3],entry->src[4],entry->src[5],
+	      vlan);
+
+	atomic_inc(&vlan->nmacs);
+	
+	if (port->flags & MVL_FILTER_ON_DEST) {
+		/* Set the MAC on the vlan device so that it sends pkts correctly. */
+		memcpy(vlan->dev->dev_addr, macaddr, ETH_ALEN);
+	}
+	
+	return 0;
+} /* macvlan_hash_add */
+
+/* cleans up the mac hash entry memory (kfree). */
+static int macvlan_hash_rem(const char* vlan_ifname,
+			    const unsigned char* mac) {
+	int bucket = VLAN_BUCKET(mac);
+	struct macvlan_port *port;
+	struct macvlan_hash_entry *entry;
+	struct macvlan_hash_entry* prev;
+	    
+	/* find the port in question */
+	port = macvlan_find_port_for_mvlan_ifname(vlan_ifname);
+
+	if (!port) {
+		return -ENODEV;
+	}
+
+	entry = port->hash_table[bucket];
+	prev = NULL;
+	//printk("hash_rem, found port: %p  bucket: %i  entry: %p\n",
+	//       port, bucket, entry);
+	while (entry) {
+		//printk("Testing entry: %p\n", entry);
+		if (memcmp(entry->mac, mac, ETH_ALEN) == 0) {
+			if (prev) {
+				prev->next = entry->next;
+			}
+			else {
+				port->hash_table[bucket] = entry->next;
+			}
+			atomic_dec(&entry->vlan->nmacs);
+			kfree(entry);
+			return 0;
+		}
+		prev = entry;
+		entry = entry->next;
+	}
+	
+	return -EINVAL;
+}/* macvlan_hash_rem */
+
+
+static int macvlan_ioctl_deviceless_stub(unsigned long arg) {
+	int err = 0;
+	struct macvlan_ioctl req;
+	struct macvlan_ioctl_reply rep;
+	unsigned long flags;
+	
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (copy_from_user(&req, (void *)arg, sizeof(req)))
+		return -EFAULT;
+
+	memset(&rep, 0, sizeof(rep));
+
+	switch (req.cmd)
+	{
+	case MACVLAN_ENABLE:
+	{
+		/* 
+		 * enable creation of mac based vlans
+		 * layered over an ethernet device
+		 */
+		char ifname[IFNAMSIZ];
+	    
+		/* Get name of ethernet device */
+		if(copy_from_user(ifname, (void *)req.ifname, sizeof(ifname))) {
+			err = -EFAULT;
+			break;
+		}
+		ifname[IFNAMSIZ-1] = '\0';
+
+		MVL_WRITE_LOCK(flags);
+		err = macvlan_port_create(ifname);
+		MVL_WRITE_UNLOCK(flags);
+		
+		break;
+	}
+	case MACVLAN_DISABLE:
+	{
+		/*
+		 * disable creation of mac based vlans
+		 * layered over an ethernet device
+		 */
+		char ifname[IFNAMSIZ];
+		
+		/* Get name of ethernet device */
+		if(copy_from_user(ifname, (void *)req.ifname, sizeof(ifname))) {
+			err = -EFAULT;
+			break;
+		}
+		ifname[IFNAMSIZ-1] = '\0';
+
+		MVL_WRITE_LOCK(flags);
+		err = macvlan_port_cleanup(ifname);
+		MVL_WRITE_UNLOCK(flags);
+
+		break;
+	}
+	case MACVLAN_ADD:
+	{
+		/* 
+		 * create a new mac based vlan
+		 */
+		char ifname[IFNAMSIZ];
+		int ifidx;
+		
+		/* Get name of port over which we are creating a vlan */
+		if(copy_from_user(ifname, (void *)req.ifname, sizeof(ifname))) {
+			err = -EFAULT;
+			break;
+		}
+		ifname[IFNAMSIZ-1] = '\0';
+		
+		/* Get index of new vlan we are creating */
+		ifidx = req.ifidx;
+
+		/* Has internal locking. */
+		err = macvlan_vlan_create(ifname, ifidx);
+		
+		break;
+	}
+	case MACVLAN_SET_PORT_FLAGS:
+	{
+		/* 
+		 * Set a macvlan_port's flags
+		 */
+		char ifname[IFNAMSIZ];
+		
+		/* Get name of port over which we are creating a vlan */
+		if(copy_from_user(ifname, (void *)req.ifname, sizeof(ifname))) {
+			err = -EFAULT;
+			break;
+		}
+		ifname[IFNAMSIZ-1] = '\0';
+
+		MVL_WRITE_LOCK(flags);
+		err = macvlan_port_set_flags(ifname, req.ifidx);
+		MVL_WRITE_UNLOCK(flags);
+		
+		break;
+	}
+	case MACVLAN_GET_PORT_FLAGS:
+	{
+		/* 
+		 * Set a macvlan_port's flags
+		 */
+		struct macvlan_port *port;
+		char ifname[IFNAMSIZ];
+		
+		/* Get name of port over which we are creating a vlan */
+		if(copy_from_user(ifname, (void *)req.ifname, sizeof(ifname))) {
+			err = -EFAULT;
+			break;
+		}
+		ifname[IFNAMSIZ-1] = '\0';
+		
+		MVL_READ_LOCK(flags);
+		/* find the port to which ifname belongs */
+		port = macvlan_find_port_for_mvlan_ifname(ifname);
+		if (!port) {
+			err = -ENODEV;
+		}
+		else {
+			rep.num = port->flags;
+		}
+		MVL_READ_UNLOCK(flags);
+		
+		if (copy_to_user((void *)req.reply, &rep, sizeof(rep))) {
+			err = -EFAULT;
+		}
+
+		break;
+	}
+	case MACVLAN_DEL:
+	{
+		/*
+		 * destroy a mac based vlan
+		 */
+		char ifname[IFNAMSIZ];
+		
+		/* Get name of vlan to remove */
+		if (copy_from_user(ifname, (void *)req.ifname, sizeof(ifname))) {
+			err = -EFAULT;
+			break;
+		}
+		ifname[IFNAMSIZ-1] = '\0';
+
+		/* Has internal locking */
+		err = macvlan_vlan_cleanup(ifname);
+		break;
+	}
+	
+	case MACVLAN_BIND:
+	{
+		/*
+		 * Bind a MAC address to vlan
+		 */
+		char ifname[IFNAMSIZ];
+		unsigned char macaddr[ETH_ALEN];
+		
+		/* Get name of vlan */
+		if (copy_from_user(ifname, (void *)req.ifname, sizeof(ifname))) {
+			err = -EFAULT;
+			break;
+		}
+		ifname[IFNAMSIZ-1] = '\0';
+		
+		/* Get mac address to bind to vlan */
+		if (copy_from_user(macaddr, (void *)req.macaddr, sizeof(macaddr))) {
+			err = -EFAULT;
+			break;
+		}
+		
+		MVL_WRITE_LOCK(flags);
+		err = macvlan_hash_add(ifname, macaddr);
+		MVL_WRITE_UNLOCK(flags);
+		break;
+	}
+	case MACVLAN_UNBIND:
+	{
+		/* 
+		 * Unbind a MAC address from a vlan
+		 */
+		char ifname[IFNAMSIZ];
+		unsigned char macaddr[ETH_ALEN];
+	    
+		/* Get name of vlan */
+		if (copy_from_user(ifname, (void *)req.ifname, sizeof(ifname))) {
+			err = -EFAULT;
+			break;
+		}
+		ifname[IFNAMSIZ-1] = '\0';
+	    
+		/* Get mac address to unbind */
+		if (copy_from_user(macaddr, (void *)req.macaddr, sizeof(macaddr))) {
+			err = -EFAULT;
+			break;
+		}
+
+		MVL_WRITE_LOCK(flags);
+		err = macvlan_hash_rem(ifname, macaddr);
+		MVL_WRITE_UNLOCK(flags);
+		break;
+	}
+
+	case MACVLAN_IS_MACVLAN:
+	{
+		/* 
+		 * Give user-space a chance of determining if we are a MAC-VLAN nor not.
+		 *  (If the IOCTL fails, we are not, otherwise we are.)
+		 */
+		struct macvlan_port *port;
+		char ifname[IFNAMSIZ];
+	    
+		/* Get name of vlan */
+		if(copy_from_user(ifname, (void *)req.ifname, sizeof(ifname))) {
+			err = -EFAULT;
+			break;
+		}
+		ifname[IFNAMSIZ-1] = '\0';
+	    
+		MVL_READ_LOCK(flags);
+		/* find the port in question */
+		port = macvlan_find_port_for_mvlan_ifname(ifname);
+		MVL_READ_UNLOCK(flags);
+
+		if (!port) {
+			/* printk("device: %s is NOT a MAC-VLAN\n", ifname); */
+			err = -ENODEV;
+		}
+		else {
+			/* printk("device: %s IS a MAC-VLAN\n", ifname); */
+			err = 0;
+		}
+		break;
+	}
+	case MACVLAN_GET_NUM_PORTS:
+	{
+		/* 
+		 * how many ethernet devices have mac based vlans enabled over them
+		 */
+		rep.num = atomic_read(&macvlan_nports);
+		if (copy_to_user((void *)req.reply, &rep, sizeof(rep))) {
+			err = -EFAULT;
+			break;
+		}
+		break;
+	}
+	case MACVLAN_GET_PORT_NAME:
+	{
+		/* 
+		 * name the nth device which has mac based vlans enabled over it
+		 */
+		struct macvlan_port *port;
+		int n = req.portidx;
+
+		MVL_READ_LOCK(flags);
+		/* find the port in question */
+		for (port = port_list; port && n; port = port->next, n--);
+		if (!port) {
+			err = -ENODEV;
+		}
+		else {
+			memcpy(rep.name, port->dev->name, IFNAMSIZ);
+					
+			if (copy_to_user((void *)req.reply, &rep, sizeof(rep))) {
+				err = -EFAULT;
+			}
+		}
+		MVL_READ_UNLOCK(flags);
+		break;
+	}
+	case MACVLAN_GET_NUM_VLANS:
+	{
+		/*
+		 * how many vlans are layered over the nth mac-based
+		 * vlan enabled device
+		 */
+
+		struct macvlan_port *port;
+		int n = req.portidx;
+
+		MVL_READ_LOCK(flags);
+		/* find the port in question */
+		for (port = port_list; port && n; port = port->next, n--);
+
+		if (!port) {
+			err = -ENODEV;
+		}
+		else {
+			rep.num = atomic_read(&port->ndevs);
+			if (copy_to_user((void *)req.reply, &rep, sizeof(rep))) {
+				err = -EFAULT;
+			}
+		}
+		MVL_READ_UNLOCK(flags);
+	
+		break;
+	}
+	case MACVLAN_GET_VLAN_NAME:
+	{
+		/* 
+		 * what's the name of the mth vlan layered over the nth
+		 * mac-based-vlan enabled ethernet device
+		 */
+		struct macvlan_port *port;
+		struct macvlan_vlan *vlan;
+		int n = req.portidx;
+		int m = req.ifidx;
+
+		
+		MVL_READ_LOCK(flags);
+		/* find the port in question */
+		for (port = port_list; port && n; port = port->next, n--);
+		if (!port) {
+			err = -EINVAL;
+		}
+		else {
+			/* find the vlan in question */
+			for (vlan = port->vlan_list; vlan && m; vlan = vlan->next, m--);
+
+			if (!vlan) {
+				err = -ENODEV;
+			}
+			else {
+				memcpy(rep.name, vlan->dev->name, IFNAMSIZ);
+			}
+			if (copy_to_user((void *)req.reply, &rep, sizeof(rep))) {
+				err = -EFAULT;
+			}
+		}
+		MVL_READ_UNLOCK(flags);
+		break;
+	}
+	case MACVLAN_GET_NUM_MACS:
+	{
+		/* 
+		 * how many mac addresses are owned by the mth vlan
+		 * layered over the nth mac-based-vlan enabled 
+		 * ethernet device
+		 */
+		struct macvlan_port *port;
+		struct macvlan_vlan *vlan;
+		int n = req.portidx;
+		int m = req.ifidx;
+
+
+		MVL_READ_LOCK(flags);
+		/* find the port in question */
+		for (port = port_list; port && n; port = port->next, n--);
+
+		if (!port) {
+			err = -EINVAL;
+		}
+		else {
+			/* find the vlan in question */
+			for (vlan = port->vlan_list; vlan && m; vlan = vlan->next, m--);
+
+			if (!vlan) {
+				err = -ENODEV;
+			}
+			else {
+				rep.num = atomic_read(&vlan->nmacs);
+			}
+			if (copy_to_user((void *)req.reply, &rep, sizeof(rep))) {
+				err = -EFAULT;
+			}
+		}
+		MVL_READ_UNLOCK(flags);
+		break;
+	}
+	case MACVLAN_GET_MAC_NAME:
+	{
+		/* 
+		 * what's the pth mac address owned by the mth vlan
+		 * layered over the nth mac-based-vlan enabled 
+		 * ethernet device
+		 */
+		struct macvlan_port *port;
+		struct macvlan_vlan *vlan;
+		struct macvlan_hash_entry *entry;
+		int n = req.portidx;
+		int m = req.ifidx;
+		int p = req.macaddridx;
+
+		MVL_READ_LOCK(flags);
+		/* find the port in question */
+		for (port = port_list; port && n; port = port->next, n--);
+
+		if (!port) {
+			err = -EINVAL;
+		}
+		else {
+			/* find the vlan in question */
+			for (vlan = port->vlan_list; vlan && m; vlan = vlan->next, m--);
+
+			if (!vlan) {
+				err = -ENODEV;
+			}
+			else {
+				/* find the mac addr in question */
+				int i;
+				for (i = 0; i<MACVLAN_HASH_LEN; i++) {
+					entry = port->hash_table[i];
+					while (entry) {
+						if (entry->vlan == vlan) {
+							if (--p == 0) {
+								memcpy(rep.name, entry->mac, sizeof(entry->mac));
+								goto found_one;
+							}
+						}
+						entry = entry->next;
+					} /* while */
+				}/* for */
+				
+				/* Didn't find one */
+				err = -ENODEV;
+			}
+
+		found_one:		
+			
+			if (copy_to_user((void *)req.reply, &rep, sizeof(rep))) {
+				err = -EFAULT;
+			}
+		}
+		MVL_READ_UNLOCK(flags);
+		break;
+	}
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	/* printk("Returning err: %i\n", err); */
+	return err;
+}/* ioctl handler */
+
+
+/* Return >= 0 if packet is consumed, otherwise return < 0. */
+static inline int mvl_handle_frame_fos(struct macvlan_port* port, struct sk_buff* skb) {
+	struct macvlan_vlan *vlan; /* the higher layer i/f to which skbuff is mapped */
+	int rv;
+	unsigned long flags;
+	
+	DEBUG("%s:  got port: %p, not filtering on DEST\n", __PRETTY_FUNCTION__, port);
+
+	MVL_IRQ_RLOCK(flags);
+	if (!(vlan = macvlan_hash_lookup(port, skb->mac.ethernet->h_source))) {
+		/* not for us, but don't delete it, others may consume it */
+		rv = -ENODEV;
+	}
+	else {
+		if (!(vlan->dev->flags & IFF_UP)) {
+			rv = 1;	 /* was consumed */
+			kfree_skb(skb);
+		}
+		else {
+			vlan->statistics.rx_packets++;
+			/*  Count the lower-level's header to make our counters look more
+			 *  like an ethernet device. */
+			vlan->statistics.rx_bytes += (skb->len + vlan->lowerdev->hard_header_len);
+
+			skb->dev = vlan->dev;
+			dev_hold(skb->dev);
+			if (memcmp(vlan->dev->dev_addr, skb->mac.ethernet->h_dest, ETH_ALEN)) {
+				skb->pkt_type=PACKET_OTHERHOST;
+			}
+			else {
+				skb->pkt_type = PACKET_HOST;
+			}
+			MVL_IRQ_RUNLOCK(flags);
+			netif_rx(skb);
+			dev_put(skb->dev);
+			rv = 0;
+			goto out;
+		}
+	}
+	
+	MVL_IRQ_RLOCK(flags);
+ out:
+	return rv;
+} /* filter on source */
+
+
+/* Return >= 0 if packet is consumed, otherwise return < 0. */
+static inline int mvl_handle_frame_fod(struct macvlan_port* port, struct sk_buff* skb) {
+	struct macvlan_vlan *vlan; /* the higher layer i/f to which skbuff is mapped */
+	int rv;
+	unsigned long flags;
+
+	/* Filtering on destination.. */
+	/* If it's a broadcast pkt, send it to all of them.  Otherwise,
+	 * send it to just one of them.
+	 */
+	if ((skb->pkt_type == PACKET_BROADCAST) || (skb->pkt_type == PACKET_MULTICAST)) {
+		/* never consume if we take this code branch, because it's bcast */
+		DEBUG("%s:  got port: %p, filtering on DEST, type is bcast or multicast\n",
+		      __PRETTY_FUNCTION__, port);
+		//printk("fod: ");
+		MVL_IRQ_RLOCK(flags);
+		//printk("1 ");
+		for (vlan = port->vlan_list; vlan; vlan = vlan->next) {
+			//printk(".");
+			DEBUG("%s:  got vlan: %s, nmacs: %i, up: %i\n",
+			      __PRETTY_FUNCTION__, vlan->dev->name,
+			      vlan->nmacs, (vlan->dev->flags & IFF_UP));
+			if (atomic_read(&vlan->nmacs) && (vlan->dev->flags & IFF_UP)) {
+				struct sk_buff* nskb;
+				
+				atomic_inc(&skb->users);
+				nskb = skb_share_check(skb, GFP_ATOMIC);
+				if (!nskb) {
+					vlan->statistics.rx_fifo_errors++;
+					vlan->statistics.rx_errors++;
+				}
+				else {
+					vlan->statistics.rx_packets++;
+					/*  Count the lower-level's header to make our counters
+					 *  look more like an ethernet device. */
+					vlan->statistics.rx_bytes +=
+						(nskb->len + vlan->lowerdev->hard_header_len);
+					vlan->statistics.multicast++;
+					
+					nskb->dev = vlan->dev;
+					netif_rx(nskb);
+				}
+			}
+		}
+		//printk("2 ");
+		rv = -1; /* did not consume this pkt, merely tasted it */
+		MVL_IRQ_RUNLOCK(flags);
+		goto out;
+	}
+	else {
+		struct ethhdr *eth = skb->mac.ethernet;
+		char* d = eth->h_dest;
+		/* Not a broadcast, try to find our port based on DESTINATION */
+		//printk("fodNB ");
+		MVL_IRQ_RLOCK(flags);
+		if (!(vlan = macvlan_hash_lookup(port, d))) {
+			/* not for us */
+			DEBUG("%s:  not a broadcast, and could not find vlan for dest: %2hx:%2hx:%2hx:%2hx:%2hx:%2hx\n",
+			      __PRETTY_FUNCTION__, d[0], d[1], d[2], d[3], d[4], d[5]);
+			
+			rv = -ENODEV;
+			//printk("1 ");
+		}
+		else {
+			DEBUG("%s:  not a broadcast, found vlan for dest: "
+			      "%2hx:%2hx:%2hx:%2hx:%2hx:%2hx, up: %i\n",
+			      __PRETTY_FUNCTION__, d[0], d[1], d[2], d[3], d[4], d[5],
+			      (vlan->dev->flags & IFF_UP));
+		
+			if (!(vlan->dev->flags & IFF_UP)) {
+				kfree_skb(skb);
+				rv = 0; /* consume */
+			}
+			else {
+				vlan->statistics.rx_packets++;
+				/*  Count the lower-level's header to make our counters
+				 *  look more like an ethernet device. */
+				vlan->statistics.rx_bytes +=
+					(skb->len + vlan->lowerdev->hard_header_len);
+		
+				skb->dev = vlan->dev;
+				if (!(eth->h_dest[0] & 1)) {
+					/* if it's not multicast, see if it's
+					 * for us, or not.
+					 */
+					if (memcmp(vlan->dev->dev_addr, eth->h_dest, ETH_ALEN)) {
+						skb->pkt_type = PACKET_OTHERHOST;
+					}
+					else {
+						skb->pkt_type = PACKET_HOST;
+					}
+				}
+				dev_hold(skb->dev);
+				MVL_IRQ_RUNLOCK(flags);
+				//printk("2 ");
+				netif_rx(skb);
+				dev_put(skb->dev);
+				//printk("3 ");
+				rv = 0;
+				goto out;
+			}
+		}
+	}/* else, was not broadcast */
+
+	MVL_IRQ_RUNLOCK(flags);
+	//printk("4 ");
+
+ out:
+	//printk("5 ");
+	return rv;
+}/* filter on dest */
+
+
+/* global entry point when receiving a pkt from lower-level devices.  Return
+ * >= 0 if we consume, otherwise packet will be sent to the rest of the stack
+ * as normal.
+ *
+ */
+static int macvlan_handle_frame(struct sk_buff *skb)
+{
+	struct macvlan_port *port;  /* maps skbuffs arriving from a lower layer
+				     * i/f to a higher layer i/f */
+	int rv = 0;
+
+	port = skb->dev->macvlan_priv;
+	if (port->flags & MVL_FILTER_ON_DEST) {
+		rv = mvl_handle_frame_fod(port, skb);
+	}
+	else {
+		rv = mvl_handle_frame_fos(port, skb);
+	}
+
+	return rv;
+}
+
+
+#ifdef MVL_CONFIG_PROC_FS
+
+static int read_mvl_glbl(char *page, char **start, off_t off,
+			 int count, int *eof, void *data) {
+	int	ret = -1;
+	char	*p = page;
+	int mx_len = (4096 - (p - page));
+	
+	if (! *eof ) {
+		struct macvlan_port* port;
+		int cnt;
+		unsigned long flags;
+		
+                /* Global counts here... */
+		p += sprintf(p, "MAC-VLAN module:\n");
+
+		p += sprintf(p, " port count: %i  vlan_counter: %i\n",
+			     atomic_read(&macvlan_nports),
+			     atomic_read(&mvl_vlan_counter));
+
+		MVL_READ_LOCK(flags);
+		port = port_list;
+		while (port) {
+			p += sprintf(p, " %s  num_vlans: %i  flags: %x\n",
+				     port->dev->name, atomic_read(&port->ndevs), port->flags);
+
+			/* catch overflow */
+			cnt = p - page;
+			if (cnt > (mx_len - 60)) {
+				if (mx_len - cnt >= 20) {
+					p += sprintf(p, "OUT_OF_SPACE!\n");
+				}
+				break;
+			}
+
+			port = port->next;
+		}
+
+		ret = p - page;
+		MVL_READ_UNLOCK(flags);
+	}
+	return ret;
+} /* read_mvl_glbl */
+
+static int write_mvl_glbl(struct file *file, const char *buffer,
+			  unsigned long count, void *data) {
+	char		*p;
+	const char	*end;
+	int		ret=count;
+	int		len;
+	char		dev_name[2][IFNAMSIZ];
+        char* tmps = NULL;
+	unsigned long flags;
+        
+        MVL_WRITE_LOCK(flags);
+        
+	end = buffer+count;
+
+	for (p= (char *) buffer; p< end ; ) {
+		if (iswhitespace(*p)) {
+			p++;
+			continue;
+		}
+
+		memset(dev_name[0], 0 ,IFNAMSIZ);
+		memset(dev_name[1], 0 ,IFNAMSIZ);
+
+		len = strlen("add_port ");
+		if (strncmp(p, "add_port ", len)==0)
+		{
+			p += len;
+
+			if ( (p + IFNAMSIZ) <= end)
+				p += copy_next_word(dev_name[0], p, IFNAMSIZ);
+			else
+				p += copy_next_word(dev_name[0], p, end-p );
+
+			skip_whitespace(p);
+
+                        /* This can fail, but not sure how to return failure
+                         * to user-space here.
+                         */
+                        macvlan_port_create(dev_name[0]);
+                        goto forend;
+		}
+                
+		len = strlen("remove_port ");
+		if (strncmp(p,"remove_port ",len)==0) {
+			p += len;
+
+			if ( (p + IFNAMSIZ) <= end)
+				p += copy_next_word(dev_name[0], p, IFNAMSIZ);
+			else
+				p += copy_next_word(dev_name[0], p, end-p );
+
+                        skip_whitespace(p);
+
+                        macvlan_port_cleanup(dev_name[0]);
+                        goto forend;
+		}
+
+                len = strlen("debug_lvl ");
+		if (strncmp(p,"debug_lvl ",len)==0)
+		{
+			p += len;
+
+			if ( (p + IFNAMSIZ) <= end)
+				p += copy_next_word(dev_name[0], p, IFNAMSIZ);
+			else
+				p += copy_next_word(dev_name[0], p, end-p );
+
+                        skip_whitespace(p);
+
+                        debug_lvl = simple_strtoul(dev_name[0], &tmps, 10);
+                        goto forend;
+		}
+
+                printk("ERROR:  Unsupported command\n");
+
+        forend:
+		p++;
+	}
+
+        MVL_WRITE_UNLOCK(flags);
+
+	return ret;
+} /* write_mvl_glbl */
+
+/* Proc file read for mac-vlan. */
+static int read_mvl(char *page, char **start, off_t off,
+		    int count, int *eof, void *data) {
+	int	ret = -1;
+	if (! *eof ) {
+		char	*p = page;
+		struct macvlan_vlan* vlan = (struct macvlan_vlan*)(data);
+		struct macvlan_hash_entry* entry;
+		int i;
+		int count = 0;
+		int cnt;
+		int mx_len = 4096;
+		unsigned long flags;
+		
+		
+		MVL_READ_LOCK(flags);
+		
+                /* Global counts here... */
+		p += sprintf(p, "MAC-VLAN %s:\n", vlan->dev->name);
+
+		p += sprintf(p, " MAC count: %i  lower_dev: %s  macvlan-port: %s\n",
+			     atomic_read(&vlan->nmacs), vlan->lowerdev->name,
+			     vlan->port->dev->name);
+
+		for (i = 0; i<MACVLAN_HASH_LEN; i++) {
+			entry = vlan->port->hash_table[i];
+			while (entry) {
+				if (entry->vlan == vlan) {
+					/* catch overflow */
+					cnt = p - page;
+					if (cnt > (mx_len - 40)) {
+						if (mx_len - cnt >= 20) {
+							p += sprintf(p, "OUT_OF_SPACE!\n");
+						}
+						goto outofspace;
+					}
+
+					p += sprintf(p, "  [%i] %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
+						     count, entry->mac[0], entry->mac[1], entry->mac[2], 
+						     entry->mac[3], entry->mac[4], entry->mac[5]);
+					count++;
+
+				}
+				entry = entry->next;
+			}/* while */
+		}/* for */
+
+	outofspace:
+
+		ret = p - page;
+
+		MVL_READ_UNLOCK(flags);
+	}
+	return ret;
+} /* read_mvl_glbl */
+
+
+static int write_mvl(struct file *file, const char *buffer,
+		     unsigned long count, void *data) {
+	char		*p;
+	const char	*end;
+	int		ret=count;
+	int		len;
+	char            arg[MVL_MX_ARG_LEN+1];
+	
+	struct macvlan_vlan* vlan = (struct macvlan_vlan*)(data);
+        char mac[ETH_ALEN];
+	unsigned long flags;
+	
+        MVL_WRITE_LOCK(flags);
+        
+	end = buffer+count;
+
+	for (p= (char *) buffer; p< end ; ) {
+		if (iswhitespace(*p)) {
+			p++;
+			continue;
+		}
+
+		memset(arg, 0, MVL_MX_ARG_LEN+1);
+
+		len = strlen("add_mac ");
+		if (strncmp(p, "add_mac ", len)==0) {
+			p += len;
+
+			if ( (p + MVL_MX_ARG_LEN) <= end)
+				p += copy_next_word(arg, p, MVL_MX_ARG_LEN);
+			else
+				p += copy_next_word(arg, p, end-p);
+
+			skip_whitespace(p);
+
+			if (toMacString(mac, arg) < 0) {
+				printk("macvlan:  MAC format is incorrect: %s\n",
+				       arg);
+			}
+			else {
+				/* This can fail, but not sure how to return failure
+				 * to user-space here.
+				 */
+				macvlan_hash_add(vlan->dev->name, mac);
+			}
+                        goto forend;
+		}
+                
+		len = strlen("remove_mac ");
+		if (strncmp(p,"remove_mac ",len)==0) {
+			p += len;
+
+			if ( (p + MVL_MX_ARG_LEN) <= end)
+				p += copy_next_word(arg, p, MVL_MX_ARG_LEN);
+			else
+				p += copy_next_word(arg, p, end-p);
+
+			skip_whitespace(p);
+
+			if (toMacString(mac, arg) < 0) {
+				printk("macvlan:  MAC format is incorrect: %s\n",
+				       arg);
+			}
+			else {
+				/* This can fail, but not sure how to return failure
+				 * to user-space here.
+				 */
+				macvlan_hash_rem(vlan->dev->name, mac);
+			}
+                        goto forend;
+		}
+
+                printk("ERROR:  Unsupported command\n");
+
+        forend:
+		p++;
+	}
+
+        MVL_WRITE_UNLOCK(flags);
+
+	return ret;
+} /* write_mvl */
+
+
+static int read_mvl_port(char *page, char **start, off_t off,
+			 int count, int *eof, void *data) {
+	int	ret = -1;
+	char	*p = page;
+	int mx_len = (4096 - (p - page));
+	int i;
+	
+	if (! *eof ) {
+		struct macvlan_port* port = (struct macvlan_port*)(data);
+		int cnt;
+		struct macvlan_vlan* vlan;
+		struct macvlan_hash_entry* entry;
+		unsigned long flags;
+
+		MVL_READ_LOCK(flags);
+		
+                /* Global counts here... */
+		p += sprintf(p, "MAC-VLAN Port: %s\n", port->dev->name);
+
+		p += sprintf(p, " vlan count: %i\n", atomic_read(&port->ndevs));
+
+		vlan = port->vlan_list;
+		while (vlan) {
+			p += sprintf(p, " %s\n", vlan->dev->name);
+
+			/* catch overflow */
+			cnt = p - page;
+			if (cnt > (mx_len - 40)) {
+				if (mx_len - cnt >= 20) {
+					p += sprintf(p, "OUT_OF_SPACE!\n");
+				}
+				goto outofspace;
+			}
+
+			vlan = vlan->next;
+		}
+
+		/* MAC addr hash */
+
+		for (i = 0; i<MACVLAN_HASH_LEN; i++) {
+			if (port->hash_table[i]) {
+				p += sprintf(p, " [%i] ", i);
+				entry = port->hash_table[i];
+				while (entry) {
+					/* catch overflow */
+					cnt = p - page;
+					if (cnt > (mx_len - 40)) {
+						if (mx_len - cnt >= 20) {
+							p += sprintf(p, "OUT_OF_SPACE!\n");
+						}
+						goto outofspace;
+					}
+					
+					p += sprintf(p, " %02hx:%02hx:%02hx:%02hx:%02hx:%02hx",
+						     entry->mac[0], entry->mac[1], entry->mac[2], 
+						     entry->mac[3], entry->mac[4], entry->mac[5]);
+
+					entry = entry->next;
+				}
+				p += sprintf(p, "\n");
+			}
+		}
+		
+	outofspace:
+		ret = p - page;
+		MVL_READ_UNLOCK(flags);
+	}
+	return ret;
+} /* read_mvl_glbl */
+
+
+static int write_mvl_port(struct file *file, const char *buffer,
+			  unsigned long count, void *data) {
+	char		*p;
+	const char	*end;
+	int		ret=count;
+	int		len;
+	char		dev_name[2][IFNAMSIZ];
+        char* tmps = NULL;
+        struct macvlan_port* port = (struct macvlan_port*)(data);
+	unsigned long flags;
+	
+	end = buffer+count;
+
+	for (p= (char *) buffer; p< end ; ) {
+		if (iswhitespace(*p)) {
+			p++;
+			continue;
+		}
+
+		memset(dev_name[0], 0 ,IFNAMSIZ);
+		memset(dev_name[1], 0 ,IFNAMSIZ);
+
+		len = strlen("add_vlan ");
+		if (strncmp(p, "add_vlan ", len)==0) {
+			p += len;
+
+			if ( (p + IFNAMSIZ) <= end)
+				p += copy_next_word(dev_name[0], p, IFNAMSIZ);
+			else
+				p += copy_next_word(dev_name[0], p, end-p );
+
+			skip_whitespace(p);
+
+                        /* This can fail, but not sure how to return failure
+                         * to user-space here.
+                         */
+			/* has internal locking */
+                        macvlan_vlan_create(port->dev->name,
+					    simple_strtoul(dev_name[0], &tmps, 10));
+                        goto forend;
+		}
+
+		len = strlen("set_flags ");
+		if (strncmp(p, "set_flags ", len)==0) {
+			p += len;
+
+			if ( (p + IFNAMSIZ) <= end)
+				p += copy_next_word(dev_name[0], p, IFNAMSIZ);
+			else
+				p += copy_next_word(dev_name[0], p, end-p );
+
+			skip_whitespace(p);
+
+                        /* This can fail, but not sure how to return failure
+                         * to user-space here.
+                         */
+
+			MVL_WRITE_LOCK(flags);
+                        macvlan_port_set_flags(port->dev->name,
+					       simple_strtoul(dev_name[0], &tmps, 16));
+			MVL_WRITE_UNLOCK(flags);
+			goto forend;
+		}
+
+		len = strlen("remove_vlan ");
+		if (strncmp(p,"remove_vlan ",len)==0) {
+			p += len;
+
+			if ( (p + IFNAMSIZ) <= end)
+				p += copy_next_word(dev_name[0], p, IFNAMSIZ);
+			else
+				p += copy_next_word(dev_name[0], p, end-p );
+
+                        skip_whitespace(p);
+
+			/* Has internal locking */
+                        macvlan_vlan_cleanup(dev_name[0]);
+                        goto forend;
+		}
+
+                printk("ERROR:  Unsupported command\n");
+
+        forend:
+		p++;
+	}
+
+	return ret;
+} /* write_mvl_port */
+
+
+#endif
+
+
+static int __init macvlan_init(void) {
+	printk (KERN_INFO "MAC address based VLAN support Revision: 1.3\n");
+
+	port_list = NULL;
+
+	macvlan_ioctl_hook = macvlan_ioctl_deviceless_stub;
+	macvlan_handle_frame_hook = macvlan_handle_frame;
+
+#ifdef MVL_CONFIG_PROC_FS
+
+        mvl_proc_dir = proc_mkdir(MVL_PROC_DIR, proc_net);
+        if (mvl_proc_dir) {
+		mvl_proc_cfg = create_proc_read_entry(MVL_PROC_CFG, S_IRUGO, mvl_proc_dir,
+						      read_mvl_glbl, NULL);
+		if (mvl_proc_cfg) {
+			mvl_proc_cfg->write_proc = write_mvl_glbl;
+		}
+	}
+#endif
+
+	
+	return 0;
+}
+
+static void macvlan_cleanup(void) {
+	struct macvlan_port *port;
+	
+	macvlan_handle_frame_hook = NULL;
+	macvlan_ioctl_hook = NULL;
+
+	/* destroy all existing ports */
+	while ((port = port_list)) {
+		if (macvlan_port_cleanup(port->dev->name) < 0) {
+			BUG_ON(1);
+		}
+	}
+
+#ifdef MVL_CONFIG_PROC_FS
+	if (mvl_proc_cfg) {
+		remove_proc_entry(MVL_PROC_CFG, mvl_proc_dir);
+		mvl_proc_cfg = NULL;
+	}
+	if (mvl_proc_dir) {
+		remove_proc_entry(MVL_PROC_DIR, proc_net);
+		mvl_proc_dir = NULL;
+	}
+#endif
+
+}/* macvlan_cleanup */
+
+
+module_init(macvlan_init);
+module_exit(macvlan_cleanup);
+MODULE_LICENSE("GPL");
--- linux-2.4.21/net/macvlan/macvlan.h	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.4.21.amds/net/macvlan/macvlan.h	2003-08-13 16:26:08.000000000 -0700
@@ -0,0 +1,104 @@
+/* -*- linux-c -*-
+
+# (C) Copyright 2001-2003
+# Alex Zeffertt, Cambridge Broadband Ltd, ajz@cambridgebroadband.com
+# Re-worked by Ben Greear <greearb@candelatech.com>
+
+*/
+
+#ifndef MACVLAN_KERNEL_H_FILE__
+#define MACVLAN_KERNEL_H_FILE__
+
+
+/* NOTE:  If you change this below, you should probably change macvlan_hash_lookup as
+ * well.  Especially if you make this bigger.
+ */
+#define MACVLAN_HASH_LEN 256
+
+#define VLAN_BUCKET(a) a[5] % MACVLAN_HASH_LEN;
+
+/* This can be made as large as desired, and mainly helps keep bad
+ * IOCTL arguments from taking down the box.
+ */
+#define MAX_MACVLANS_PER_PORT 10000
+
+/* Proc file related */
+#define MVL_MX_ARG_LEN 80
+
+#ifdef CONFIG_PROC_FS
+
+/* To use or not to use the PROC-FS */
+#define MVL_CONFIG_PROC_FS
+
+#endif
+
+
+/*********************************************************/
+/*		       types				 */
+/*********************************************************/
+/* a macvlan_vlan represents an upper layer interface */
+struct macvlan_vlan {
+	struct net_device* dev;
+	struct net_device_stats	statistics;
+	struct macvlan_vlan *next;
+	struct macvlan_port *port;
+	struct net_device *lowerdev;
+	atomic_t nmacs;  /* the number of mac addresses bound to this vlan */
+
+#ifdef MVL_CONFIG_PROC_FS
+        struct proc_dir_entry* proc_ent;
+#endif        
+
+};
+
+struct macvlan_hash_entry {
+	unsigned char mac[ETH_ALEN];    /* the eth hdr source to match.  Can
+					 * match as destination too, see flags in
+					 * macvlan_port.  Cannot match on both. */
+	struct macvlan_vlan *vlan;	/* the vlan target */
+	struct macvlan_hash_entry *next;/* next entry in list (same hash, any dev) */
+};
+
+
+/*
+ * a macvlan_port represents a mux/demux between a mac-
+ * based-vlan enabled ethernet device and vlans
+ * layered on top of it
+ */
+struct macvlan_port {
+	/* MAC to vlan lookup */
+	struct macvlan_hash_entry *hash_table[MACVLAN_HASH_LEN];
+	struct net_device *dev;  /* the mac-based-vlan enabled ethernet device */
+	atomic_t ndevs;    /* number of vlans layered over dev */
+	struct macvlan_vlan *vlan_list; /* list of vlans layered over this port */
+	struct macvlan_port *next;    /* next port */
+	
+#define MVL_FILTER_ON_DEST 0x1	/* 0x1	filter-on-destination (instead of source) */
+	int flags;
+
+#ifdef MVL_CONFIG_PROC_FS
+        struct proc_dir_entry* proc_dir;
+        struct proc_dir_entry* proc_ent;
+#endif        
+
+};
+
+
+#ifdef MVL_CONFIG_PROC_FS
+static int read_mvl_glbl(char *page, char **start, off_t off,
+			 int count, int *eof, void *data);
+static int write_mvl_glbl(struct file *file, const char *buffer,
+			  unsigned long count, void *data);
+static int read_mvl(char *page, char **start, off_t off,
+		    int count, int *eof, void *data);
+static int write_mvl(struct file *file, const char *buffer,
+		     unsigned long count, void *data);
+static int read_mvl_port(char *page, char **start, off_t off,
+			 int count, int *eof, void *data);
+static int write_mvl_port(struct file *file, const char *buffer,
+			  unsigned long count, void *data);
+#endif
+
+
+#endif
+
--- linux-2.4.21/net/packet/af_packet.c	2002-08-02 17:39:46.000000000 -0700
+++ linux-2.4.21.amds/net/packet/af_packet.c	2003-07-30 16:20:41.000000000 -0700
@@ -68,6 +68,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/if_bridge.h>
+#include <linux/if_macvlan.h>
 
 #ifdef CONFIG_NET_DIVERT
 #include <linux/divert.h>
@@ -1504,6 +1505,20 @@
 #endif				
 			return -ENOPKG;
 
+		case SIOCGIFMACVLAN:
+		case SIOCSIFMACVLAN:
+#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
+#ifdef CONFIG_INET
+#ifdef CONFIG_KMOD
+ 			if (macvlan_ioctl_hook == NULL)
+ 				request_module("macvlan");
+#endif
+			if (macvlan_ioctl_hook != NULL)
+				return macvlan_ioctl_hook(arg);
+#endif
+#endif
+			return -ENOPKG;
+
 		case SIOCGIFDIVERT:
 		case SIOCSIFDIVERT:
 #ifdef CONFIG_NET_DIVERT
--- linux-2.4.21/net/ipv4/arp.c	2002-11-28 15:53:15.000000000 -0800
+++ linux-2.4.21.amds/net/ipv4/arp.c	2003-07-30 16:20:41.000000000 -0700
@@ -1,4 +1,4 @@
-/* linux/net/inet/arp.c
+/* linux/net/inet/arp.c  -*-linux-c-*-
  *
  * Version:	$Id: candela_2.4.21.patch,v 1.4 2003/09/30 21:05:04 greear Exp $
  *
@@ -351,12 +351,22 @@
 	int flag = 0; 
 	/*unsigned long now; */
 
-	if (ip_route_output(&rt, sip, tip, 0, 0) < 0) 
+	if (ip_route_output(&rt, sip, tip, 0, 0) < 0)
 		return 1;
-	if (rt->u.dst.dev != dev) { 
-		NET_INC_STATS_BH(ArpFilter);
-		flag = 1;
-	} 
+        
+	if (rt->u.dst.dev != dev) {
+                if ((dev->priv_flags & IFF_ACCEPT_LOCAL_ADDRS) &&
+                    (rt->u.dst.dev == &loopback_dev))  {
+                        /* OK, we'll let this special case slide, so that we can arp from one
+                         * local interface to another.  This seems to work, but could use some
+                         * review. --Ben
+                         */
+                }
+                else {
+                        NET_INC_STATS_BH(ArpFilter);
+                        flag = 1;
+                }
+        }
 	ip_rt_put(rt); 
 	return flag; 
 } 
--- linux-2.4.21/net/ipv4/fib_frontend.c	2002-08-02 17:39:46.000000000 -0700
+++ linux-2.4.21.amds/net/ipv4/fib_frontend.c	2003-07-30 16:20:41.000000000 -0700
@@ -233,8 +233,17 @@
 
 	if (fib_lookup(&key, &res))
 		goto last_resort;
-	if (res.type != RTN_UNICAST)
-		goto e_inval_res;
+        
+	if (res.type != RTN_UNICAST) {
+                if ((res.type == RTN_LOCAL) &&
+                    (dev->priv_flags & IFF_ACCEPT_LOCAL_ADDRS)) {
+                        /* All is OK */
+                }
+                else {
+                        goto e_inval_res;
+                }
+        }
+        
 	*spec_dst = FIB_RES_PREFSRC(res);
 	fib_combine_itag(itag, &res);
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
--- linux-2.4.21/net/ipv4/tcp_ipv4.c	2003-06-13 07:51:39.000000000 -0700
+++ linux-2.4.21.amds/net/ipv4/tcp_ipv4.c	2003-07-30 16:20:41.000000000 -0700
@@ -1403,7 +1403,7 @@
 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
 #endif
 
-	/* Never answer to SYNs send to broadcast or multicast */
+	/* Never answer to SYNs sent to broadcast or multicast */
 	if (((struct rtable *)skb->dst)->rt_flags & 
 	    (RTCF_BROADCAST|RTCF_MULTICAST))
 		goto drop; 
--- linux-2.4.21/net/8021q/vlan_dev.c	2003-06-13 07:51:39.000000000 -0700
+++ linux-2.4.21.amds/net/8021q/vlan_dev.c	2003-08-05 20:38:25.000000000 -0700
@@ -1,18 +1,18 @@
-/*
+/* -*- linux-c -*-
  * INET		802.1Q VLAN
  *		Ethernet-type device handling.
  *
  * Authors:	Ben Greear <greearb@candelatech.com>
- *              Please send support related email to: vlan@scry.wanfear.com
- *              VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
+ *		Please send support related email to: vlan@scry.wanfear.com
+ *		VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
  * 
- * Fixes:       Mar 22 2001: Martin Bokaemper <mbokaemper@unispherenetworks.com>
- *                - reset skb->pkt_type on incoming packets when MAC was changed
- *                - see that changed MAC is saddr for outgoing packets
- *              Oct 20, 2001:  Ard van Breeman:
- *                - Fix MC-list, finally.
- *                - Flush MC-list on VLAN destroy.
- *                
+ * Fixes:	Mar 22 2001: Martin Bokaemper <mbokaemper@unispherenetworks.com>
+ *		  - reset skb->pkt_type on incoming packets when MAC was changed
+ *		  - see that changed MAC is saddr for outgoing packets
+ *		Oct 20, 2001:  Ard van Breeman:
+ *		  - Fix MC-list, finally.
+ *		  - Flush MC-list on VLAN destroy.
+ *		  
  *
  *		This program is free software; you can redistribute it and/or
  *		modify it under the terms of the GNU General Public License
@@ -99,18 +99,18 @@
  *  NOTE:  Should be similar to ethernet/eth.c.
  *
  *  SANITY NOTE:  This method is called when a packet is moving up the stack
- *                towards userland.  To get here, it would have already passed
- *                through the ethernet/eth.c eth_type_trans() method.
+ *		  towards userland.  To get here, it would have already passed
+ *		  through the ethernet/eth.c eth_type_trans() method.
  *  SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be
- *                 stored UNALIGNED in the memory.  RISC systems don't like
- *                 such cases very much...
+ *		   stored UNALIGNED in the memory.  RISC systems don't like
+ *		   such cases very much...
  *  SANITY NOTE 2a:  According to Dave Miller & Alexey, it will always be aligned,
- *                 so there doesn't need to be any of the unaligned stuff.  It has
- *                 been commented out now...  --Ben
+ *		   so there doesn't need to be any of the unaligned stuff.  It has
+ *		   been commented out now...  --Ben
  *
  */
 int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
-                  struct packet_type* ptype)
+		  struct packet_type* ptype)
 {
 	unsigned char *rawp = NULL;
 	struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data);
@@ -170,7 +170,7 @@
 		spin_unlock_bh(&vlan_group_lock);
 
 #ifdef VLAN_DEBUG
-		printk(VLAN_DBG "%s: dropping skb: %p because came in on wrong device, dev: %s  real_dev: %s, skb_dev: %s\n",
+		printk(VLAN_DBG "%s: dropping skb: %p because came in on wrong device, dev: %s	real_dev: %s, skb_dev: %s\n",
 			__FUNCTION__ skb, dev->name, 
 			VLAN_DEV_INFO(skb->dev)->real_dev->name, 
 			skb->dev->name);
@@ -324,8 +324,8 @@
  *  physical devices.
  */
 int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
-                         unsigned short type, void *daddr, void *saddr,
-                         unsigned len)
+			 unsigned short type, void *daddr, void *saddr,
+			 unsigned len)
 {
 	struct vlan_hdr *vhdr;
 	unsigned short veth_TCI = 0;
@@ -613,7 +613,7 @@
 				dev_put(dev);
 				return 0;
 			} else {
-				printk(KERN_ERR  "%s: flag %i is not valid.\n",
+				printk(KERN_ERR	 "%s: flag %i is not valid.\n",
 					__FUNCTION__, (int)(flag));
 				dev_put(dev);
 				return -EINVAL;
@@ -625,13 +625,66 @@
 			dev_put(dev);
 		}
 	} else {
-		printk(KERN_ERR  "%s: Could not find device: %s\n", 
+		printk(KERN_ERR	 "%s: Could not find device: %s\n", 
 			__FUNCTION__, dev_name);
 	}
 
 	return -EINVAL;
 }
 
+
+int vlan_dev_get_realdev_name(const char *dev_name, char* result)
+{
+	struct net_device *dev = dev_get_by_name(dev_name);
+	int rv = 0;
+	
+	if (dev) {
+		if (dev->priv_flags & IFF_802_1Q_VLAN) {
+			strncpy(result, VLAN_DEV_INFO(dev)->real_dev->name, 23);
+			dev_put(dev);
+			rv = 0;
+		} else {
+			printk(KERN_ERR 
+			       "%s: %s is not a vlan device, priv_flags: %hX.\n",
+			       __FUNCTION__, dev->name, dev->priv_flags);
+			dev_put(dev);
+			rv = -EINVAL;
+		}
+	} else {
+		printk(KERN_ERR	 "%s: Could not find device: %s\n", 
+			__FUNCTION__, dev_name);
+		rv = -ENODEV;
+	}
+
+	return rv;
+}
+
+int vlan_dev_get_vid(const char *dev_name, unsigned short* result)
+{
+	struct net_device *dev = dev_get_by_name(dev_name);
+	int rv = 0;
+	
+	if (dev) {
+		if (dev->priv_flags & IFF_802_1Q_VLAN) {
+			*result = VLAN_DEV_INFO(dev)->vlan_id;
+			dev_put(dev);
+			rv = 0;
+		} else {
+			printk(KERN_ERR 
+			       "%s: %s is not a vlan device, priv_flags: %hX.\n",
+			       __FUNCTION__, dev->name, dev->priv_flags);
+			dev_put(dev);
+			rv = -EINVAL;
+		}
+	} else {
+		printk(KERN_ERR	 "%s: Could not find device: %s\n", 
+			__FUNCTION__, dev_name);
+		rv = -ENODEV;
+	}
+
+	return rv;
+}
+
 int vlan_dev_set_mac_address(struct net_device *dev, void *addr_struct_p)
 {
 	struct sockaddr *addr = (struct sockaddr *)(addr_struct_p);
@@ -671,7 +724,7 @@
 }
 
 static inline int vlan_dmi_equals(struct dev_mc_list *dmi1,
-                                  struct dev_mc_list *dmi2)
+				  struct dev_mc_list *dmi2)
 {
 	return ((dmi1->dmi_addrlen == dmi2->dmi_addrlen) &&
 		(memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0));
--- linux-2.4.21/net/8021q/vlan.c	2003-06-13 07:51:39.000000000 -0700
+++ linux-2.4.21.amds/net/8021q/vlan.c	2003-08-11 16:43:09.000000000 -0700
@@ -1,13 +1,13 @@
-/*
+/* -*- linux-c -*-
  * INET		802.1Q VLAN
  *		Ethernet-type device handling.
  *
  * Authors:	Ben Greear <greearb@candelatech.com>
- *              Please send support related email to: vlan@scry.wanfear.com
- *              VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
+ *		Please send support related email to: vlan@scry.wanfear.com
+ *		VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
  * 
  * Fixes:
- *              Fix for packet capture - Nick Eggleston <nick@dccinc.com>;
+ *		Fix for packet capture - Nick Eggleston <nick@dccinc.com>;
  *		Add HW acceleration hooks - David S. Miller <davem@redhat.com>;
  *		Correct all the locking - David S. Miller <davem@redhat.com>;
  *		Use hash table for VLAN groups - David S. Miller <davem@redhat.com>
@@ -173,7 +173,7 @@
 	*pprev = grp->next;
 }
 
-/*  Find the protocol handler.  Assumes VID < VLAN_VID_MASK.
+/*  Find the protocol handler.	Assumes VID < VLAN_VID_MASK.
  *
  * Must be invoked with vlan_group_lock held.
  */
@@ -183,7 +183,7 @@
 	struct vlan_group *grp = __vlan_find_group(real_dev->ifindex);
 
 	if (grp)
-                return grp->vlan_devices[VID];
+		return grp->vlan_devices[VID];
 
 	return NULL;
 }
@@ -270,7 +270,7 @@
 		}
 	}
 
-        return ret;
+	return ret;
 }
 
 static int unregister_vlan_device(const char *vlan_IF_name)
@@ -655,17 +655,14 @@
 int vlan_ioctl_handler(unsigned long arg)
 {
 	int err = 0;
+	unsigned short vid = 0;
 	struct vlan_ioctl_args args;
 
-	/* everything here needs root permissions, except aguably the
-	 * hack ioctls for sending packets.  However, I know _I_ don't
-	 * want users running that on my network! --BLG
-	 */
 	if (!capable(CAP_NET_ADMIN))
 		return -EPERM;
 
 	if (copy_from_user(&args, (void*)arg,
-                           sizeof(struct vlan_ioctl_args)))
+			   sizeof(struct vlan_ioctl_args)))
 		return -EFAULT;
 
 	/* Null terminate this sucker, just in case. */
@@ -678,24 +675,32 @@
 
 	switch (args.cmd) {
 	case SET_VLAN_INGRESS_PRIORITY_CMD:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
 		err = vlan_dev_set_ingress_priority(args.device1,
 						    args.u.skb_priority,
 						    args.vlan_qos);
 		break;
 
 	case SET_VLAN_EGRESS_PRIORITY_CMD:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
 		err = vlan_dev_set_egress_priority(args.device1,
 						   args.u.skb_priority,
 						   args.vlan_qos);
 		break;
 
 	case SET_VLAN_FLAG_CMD:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
 		err = vlan_dev_set_vlan_flag(args.device1,
 					     args.u.flag,
 					     args.vlan_qos);
 		break;
 
 	case SET_VLAN_NAME_TYPE_CMD:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
 		if ((args.u.name_type >= 0) &&
 		    (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
 			vlan_name_type = args.u.name_type;
@@ -705,17 +710,9 @@
 		}
 		break;
 
-		/* TODO:  Figure out how to pass info back...
-		   case GET_VLAN_INGRESS_PRIORITY_IOCTL:
-		   err = vlan_dev_get_ingress_priority(args);
-		   break;
-
-		   case GET_VLAN_EGRESS_PRIORITY_IOCTL:
-		   err = vlan_dev_get_egress_priority(args);
-		   break;
-		*/
-
 	case ADD_VLAN_CMD:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
 		/* we have been given the name of the Ethernet Device we want to
 		 * talk to:  args.dev1	 We also have the
 		 * VLAN ID:  args.u.VID
@@ -728,12 +725,53 @@
 		break;
 
 	case DEL_VLAN_CMD:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
 		/* Here, the args.dev1 is the actual VLAN we want
 		 * to get rid of.
 		 */
 		err = unregister_vlan_device(args.device1);
 		break;
 
+	case GET_VLAN_INGRESS_PRIORITY_CMD:
+		/* TODO:  Implement
+		err = vlan_dev_get_ingress_priority(args);
+		if (copy_to_user((void*)arg, &args,
+				 sizeof(struct vlan_ioctl_args))) {
+			err = -EFAULT;
+		}
+		*/
+		err = -EINVAL;
+		break;
+
+	case GET_VLAN_EGRESS_PRIORITY_CMD:
+		/* TODO:  Implement
+		err = vlan_dev_get_egress_priority(args.device1, &(args.args);
+		if (copy_to_user((void*)arg, &args,
+				 sizeof(struct vlan_ioctl_args))) {
+			err = -EFAULT;
+		}
+		*/
+		err = -EINVAL;
+		break;
+
+	case GET_VLAN_REALDEV_NAME_CMD:
+		err = vlan_dev_get_realdev_name(args.device1, args.u.device2);
+		if (copy_to_user((void*)arg, &args,
+				 sizeof(struct vlan_ioctl_args))) {
+			err = -EFAULT;
+		}
+		break;
+
+	case GET_VLAN_VID_CMD:
+		err = vlan_dev_get_vid(args.device1, &vid);
+		args.u.VID = vid;
+		if (copy_to_user((void*)arg, &args,
+				 sizeof(struct vlan_ioctl_args))) {
+			err = -EFAULT;
+		}
+		break;
+
 	default:
 		/* pass on to underlying device instead?? */
 		printk(VLAN_DBG "%s: Unknown VLAN CMD: %x \n",
--- linux-2.4.21/net/8021q/vlan.h	2002-08-02 17:39:46.000000000 -0700
+++ linux-2.4.21.amds/net/8021q/vlan.h	2003-08-13 16:29:30.000000000 -0700
@@ -72,6 +72,8 @@
 int vlan_dev_set_ingress_priority(char* dev_name, __u32 skb_prio, short vlan_prio);
 int vlan_dev_set_egress_priority(char* dev_name, __u32 skb_prio, short vlan_prio);
 int vlan_dev_set_vlan_flag(char* dev_name, __u32 flag, short flag_val);
+int vlan_dev_get_realdev_name(const char* dev_name, char* result);
+int vlan_dev_get_vid(const char* dev_name, unsigned short* result);
 void vlan_dev_set_multicast_list(struct net_device *vlan_dev);
 
 #endif /* !(__BEN_VLAN_802_1Q_INC__) */
--- linux-2.4.21/include/linux/if_vlan.h	2002-11-28 15:53:15.000000000 -0800
+++ linux-2.4.21.amds/include/linux/if_vlan.h	2003-08-13 16:27:39.000000000 -0700
@@ -212,7 +212,9 @@
 	GET_VLAN_INGRESS_PRIORITY_CMD,
 	GET_VLAN_EGRESS_PRIORITY_CMD,
 	SET_VLAN_NAME_TYPE_CMD,
-	SET_VLAN_FLAG_CMD
+	SET_VLAN_FLAG_CMD,
+        GET_VLAN_REALDEV_NAME_CMD, /* If this works, you know it's a VLAN device, btw */
+        GET_VLAN_VID_CMD /* Get the VID of this VLAN (specified by name) */
 };
 
 enum vlan_name_types {
--- linux-2.4.21/include/linux/ethtool.h	2003-06-13 07:51:38.000000000 -0700
+++ linux-2.4.21.amds/include/linux/ethtool.h	2003-07-30 16:20:41.000000000 -0700
@@ -250,6 +250,12 @@
 	u64	data[0];
 };
 
+/* for dumping net-device statistics */
+struct ethtool_ndstats {
+	u32	cmd;		/* ETHTOOL_GNDSTATS */
+	u8	data[0];        /* sizeof(struct net_device_stats) */
+};
+
 /* CMDs currently supported */
 #define ETHTOOL_GSET		0x00000001 /* Get settings. */
 #define ETHTOOL_SSET		0x00000002 /* Set settings, privileged. */
@@ -281,6 +287,7 @@
 #define ETHTOOL_GSTRINGS	0x0000001b /* get specified string set */
 #define ETHTOOL_PHYS_ID		0x0000001c /* identify the NIC */
 #define ETHTOOL_GSTATS		0x0000001d /* get NIC-specific statistics */
+#define ETHTOOL_GNDSTATS		0x0000001e /* get standard net-device statistics */
 
 /* compatibility with older code */
 #define SPARC_ETH_GSET		ETHTOOL_GSET
--- linux-2.4.21/Documentation/CodingStyle	2001-09-09 16:40:43.000000000 -0700
+++ linux-2.4.21.amds/Documentation/CodingStyle	2003-08-05 20:51:17.000000000 -0700
@@ -184,6 +184,8 @@
   (interactive)
   (c-mode)
   (c-set-style "K&R")
+  (setq tab-width 8)
+  (setq	indent-tabs-mode t)
   (setq c-basic-offset 8))
 
 This will define the M-x linux-c-mode command.  When hacking on a
--- linux-2.4.21/include/linux/proc_fs.h	2002-08-02 17:39:45.000000000 -0700
+++ linux-2.4.21.amds/include/linux/proc_fs.h	2003-08-13 16:47:29.000000000 -0700
@@ -25,7 +25,8 @@
 /* Finally, the dynamically allocatable proc entries are reserved: */
 
 #define PROC_DYNAMIC_FIRST 4096
-#define PROC_NDYNAMIC      4096
+#define PROC_NDYNAMIC      8192 /* was 4096 previously, but was running out of
+                                 * slots when creating lots of VLANs --Ben */
 
 #define PROC_SUPER_MAGIC 0x9fa0
 
