diff --unified --recursive --new-file --exclude-from exclude linux.vanilla/Documentation/networking/shaper.txt linux/Documentation/networking/shaper.txt
--- linux.vanilla/Documentation/networking/shaper.txt	Thu Jan  1 01:00:00 1970
+++ linux/Documentation/networking/shaper.txt	Sun Aug 18 17:41:12 1996
@@ -0,0 +1,49 @@
+Traffic Shaper For Linux
+
+This is the current ALPHA release of the traffic shaper for Linux. It works
+within the following limits:
+
+o	Minimum shaping speed is currently about 9600 baud (it can only
+shape down to 1 byte per clock tick)
+
+o	Maximum is about 256K, it will go above this but get a bit blocky.
+
+o	If you ifconfig the master device that a shaper is attached to down
+then your machine will follow.
+
+o	The shaper must be a module.
+
+
+Setup:
+
+	A shaper device is configured using the shapeconfig program.
+Typically you will do something like this
+
+shapecfg attach shaper0 eth1
+shapecfg speed shaper0 64000
+ifconfig shaper0 myhost netmask 255.255.255.240 broadcast 1.2.3.4.255 up
+route add -net some.network netmask a.b.c.d dev shaper0
+
+The shaper should have the same IP address as the device it is attached to
+for normal use.
+
+Gotchas:
+
+	The shaper shapes transmitted traffic. Its rather impossible to
+shape received traffic except at the end (or a router) transmiting it.
+
+	Gated/routed/rwhod/mrouted all see the shaper as an additional device
+and will treat it as such unless patched. Note that for mrouted you can run
+mrouted tunnels via a traffic shaper to control bandwidth usage.
+
+	The shaper is device/route based. This makes it very easy to use
+with any setup BUT less flexible. You may well want to combine this patch
+with Mike McLagan <mmclagan@linux.org>'s patch to allow routes to be
+specified by source/destination pairs.
+
+	There is no "borrowing" or "sharing" scheme. This is a simple
+traffic limiter. I'd like to implement Van Jacobson and Sally Floyd's CBQ
+architecture into Linux one day (maybe in 2.1 sometime) and do this with
+style.
+
+Alan
diff --unified --recursive --new-file --exclude-from exclude linux.vanilla/drivers/net/Config.in linux/drivers/net/Config.in
--- linux.vanilla/drivers/net/Config.in	Sun Jul 28 13:33:32 1996
+++ linux/drivers/net/Config.in	Sat Aug 17 20:55:02 1996
@@ -10,6 +10,7 @@
     int '  Max DLCI per device' CONFIG_DLCI_MAX 8
     dep_tristate '  SDLA (Sangoma S502/S508) support' CONFIG_SDLA $CONFIG_DLCI
   fi
+  tristate 'Traffic Shaper (EXPERIMENTAL)' CONFIG_SHAPER
 fi
 tristate 'PLIP (parallel port) support' CONFIG_PLIP
 tristate 'PPP (point-to-point) support' CONFIG_PPP
diff --unified --recursive --new-file --exclude-from exclude linux.vanilla/drivers/net/Makefile linux/drivers/net/Makefile
--- linux.vanilla/drivers/net/Makefile	Sun Jul 28 13:33:26 1996
+++ linux/drivers/net/Makefile	Sat Aug 17 20:54:14 1996
@@ -47,6 +47,14 @@
   endif
 endif
 
+ifeq ($(CONFIG_SHAPER),y)
+L_OBJS += shaper.o
+else
+  ifeq ($(CONFIG_SHAPER),m)
+  M_OBJS += shaper.o
+  endif
+endif
+
 ifeq ($(CONFIG_SK_G16),y)
 L_OBJS += sk_g16.o
 endif
diff --unified --recursive --new-file --exclude-from exclude linux.vanilla/drivers/net/shaper.c linux/drivers/net/shaper.c
--- linux.vanilla/drivers/net/shaper.c	Thu Jan  1 01:00:00 1970
+++ linux/drivers/net/shaper.c	Sun Aug 18 17:38:51 1996
@@ -0,0 +1,633 @@
+/*
+ *	Simple traffic shaper for Linux NET3.
+ *	(c) Copyright 1996 Alan Cox <alan@cymru.net>, All Rights Reserved.
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *	
+ *	Algorithm:
+ *
+ *	Queue Frame:
+ *		Compute time length of frame at regulated speed
+ *		Add frame to queue at appropriate point
+ *		Adjust time length computation for followup frames
+ *		Any frame that falls outside of its boundaries is freed
+ *
+ *	We work to the following constants
+ *
+ *		SHAPER_QLEN	Maximum queued frames
+ *		SHAPER_LATENCY	Bounding latency on a frame. Leaving this latency
+ *				window drops the frame. This stops us queueing 
+ *				frames for a long time and confusing a remote
+ *				host.
+ *		SHAPER_MAXSLIP	Maximum time a priority frame may jump forward.
+ *				That bounds the penalty we will inflict on low
+ *				priority traffic.
+ *		SHAPER_BURST	Time range we call "now" in order to reduce
+ *				system load. The more we make this the burstier
+ *				the behaviour, the better local performance you
+ *				get through packet clustering on routers and the
+ *				worse the remote end gets to judge rtts.
+ *
+ *	This is designed to handle lower speed links ( < 200K/second or so). We
+ *	run off a 100-150Hz base clock typically. This gives us a resolution at
+ *	200Kbit/second of about 2Kbit or 256 bytes. Above that our timer
+ *	resolution may start to cause excessive burstiness in the traffic. We
+ *	could avoid a lot of that by calling kick_shaper() at the end of the 
+ *	tied device transmissions.
+ */
+ 
+ 
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/modversions.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/fcntl.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include "shaper.h"
+
+int sh_debug;		/* Debug flag */
+
+/*
+ *	Locking
+ */
+ 
+static int shaper_lock(struct shaper *sh)
+{
+	unsigned long flags;
+	save_flags(flags);
+	cli();
+	/*
+	 *	Lock in an interrupt may fail
+	 */
+	if(sh->locked && intr_count)
+	{
+		restore_flags(flags);
+		return 0;
+	}
+	while(sh->locked)
+		sleep_on(&sh->wait_queue);
+	sh->locked=1;
+	restore_flags(flags);
+	return 1;
+}
+
+static void shaper_kick(struct shaper *sh);
+
+static void shaper_unlock(struct shaper *sh)
+{
+	sh->locked=0;
+	wake_up(&sh->wait_queue);
+	shaper_kick(sh);
+}
+
+/*
+ *	Compute clocks on a buffer
+ */
+  
+static int shaper_clocks(struct shaper *shaper, struct sk_buff *skb)
+{
+ 	int t=skb->len/shaper->bytespertick;
+ 	return t;
+}
+
+/*
+ *	Set the speed of a shaper. We compute this in bytes per tick since
+ *	thats how the machine wants to run. Quoted input is in bits per second
+ *	as is traditional (note not BAUD). We assume 8 bit bytes. 
+ */
+  
+static void shaper_setspeed(struct shaper *shaper, int bitspersec)
+{
+	shaper->bytespertick=(bitspersec/HZ)/8;
+	if(!shaper->bytespertick)
+		shaper->bytespertick++;
+}
+
+/*
+ *	Throw a frame at a shaper.
+ */
+  
+static int shaper_qframe(struct shaper *shaper, struct sk_buff *skb)
+{
+ 	struct sk_buff *ptr;
+ 	
+ 	/*
+ 	 *	Get ready to work on this shaper. Lock may fail if its
+ 	 *	an interrupt and locked.
+ 	 */
+ 	 
+ 	if(!shaper_lock(shaper))
+ 		return -1;
+ 	ptr=shaper->sendq.prev;
+ 	
+ 	/*
+ 	 *	Set up our packet details
+ 	 */
+ 	 
+ 	skb->shapelatency=0;
+ 	skb->shapeclock=shaper->recovery;
+ 	if(skb->shapeclock<jiffies)
+ 		skb->shapeclock=jiffies;
+ 	skb->pri=0;	/* short term bug fix */
+ 	skb->shapestamp=jiffies;
+ 	
+ 	/*
+ 	 *	Time slots for this packet.
+ 	 */
+ 	 
+ 	skb->shapelen= shaper_clocks(shaper,skb);
+ 	
+#ifdef SHAPER_COMPLEX /* and broken.. */
+
+ 	while(ptr && ptr!=(struct sk_buff *)&shaper->sendq)
+ 	{
+ 		if(ptr->pri<skb->pri 
+ 			&& jiffies - ptr->shapeclock < SHAPER_MAXSLIP)
+ 		{
+ 			struct sk_buff *tmp=ptr->prev;
+
+ 			/*
+ 			 *	It goes before us therefore we slip the length
+ 			 *	of the new frame.
+ 			 */
+
+ 			ptr->shapeclock+=skb->shapelen;
+ 			ptr->shapelatency+=skb->shapelen;
+
+ 			/*
+ 			 *	The packet may have slipped so far back it
+ 			 *	fell off.
+ 			 */
+ 			if(ptr->shapelatency > SHAPER_LATENCY)
+ 			{
+ 				skb_unlink(ptr);
+ 				dev_kfree_skb(ptr, FREE_WRITE);
+ 			}
+ 			ptr=tmp;
+ 		}
+ 		else
+ 			break;
+ 	}
+ 	if(ptr==NULL || ptr==(struct sk_buff *)&shaper->sendq)
+ 		skb_queue_head(&shaper->sendq,skb);
+ 	else
+ 	{
+ 		struct sk_buff *tmp;
+ 		/*
+ 		 *	Set the packet clock out time according to the
+ 		 *	frames ahead. Im sure a bit of thought could drop
+ 		 *	this loop.
+ 		 */
+ 		for(tmp=skb_peek(&shaper->sendq); tmp!=NULL && tmp!=ptr; tmp=tmp->next)
+ 			skb->shapeclock+=tmp->shapelen;
+ 		skb_append(ptr,skb);
+ 	}
+#else
+	{
+		struct sk_buff *tmp;
+		/*
+		 *	Up our shape clock by the time pending on the queue
+		 *	(Should keep this in the shaper as a variable..)
+		 */
+		for(tmp=skb_peek(&shaper->sendq); tmp!=NULL && 
+			tmp!=(struct sk_buff *)&shaper->sendq; tmp=tmp->next)
+			skb->shapeclock+=tmp->shapelen;
+		/*
+		 *	Queue over time. Spill packet.
+		 */
+		if(skb->shapeclock-jiffies > SHAPER_LATENCY)
+			dev_kfree_skb(skb, FREE_WRITE);
+		else
+			skb_queue_tail(&shaper->sendq, skb);
+	}
+#endif 	
+ 	if(sh_debug)
+ 		printk("Frame queued.\n");
+ 	if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN)
+ 	{
+ 		ptr=skb_dequeue(&shaper->sendq);
+ 		dev_kfree_skb(ptr, FREE_WRITE);
+ 	}
+ 	shaper_unlock(shaper);
+ 	shaper_kick(shaper);
+ 	return 0;
+}
+
+/*
+ *	Transmit from a shaper
+ */
+ 
+static void shaper_queue_xmit(struct shaper *shaper, struct sk_buff *skb)
+{
+	struct sk_buff *newskb=skb_clone(skb, GFP_ATOMIC);
+	if(sh_debug)
+		printk("Kick frame on %p\n",newskb);
+	if(newskb)
+	{
+		newskb->dev=shaper->dev;
+		newskb->free=1;
+		newskb->arp=1;
+		newskb->pri=2;
+		if(sh_debug)
+			printk("Kick new frame to %s, %d\n",
+				shaper->dev->name,newskb->pri);
+		dev_queue_xmit(newskb, shaper->dev, newskb->pri);
+		if(sh_debug)
+			printk("Kicked new frame out.\n");
+		dev_kfree_skb(skb, FREE_WRITE);
+	}
+}
+
+/*
+ *	Timer handler for shaping clock
+ */
+ 
+static void shaper_timer(unsigned long data)
+{
+	struct shaper *sh=(struct shaper *)data;
+	shaper_kick(sh);
+}
+
+/*
+ *	Kick a shaper queue and try and do something sensible with the 
+ *	queue. 
+ */
+
+static void shaper_kick(struct shaper *shaper)
+{
+	struct sk_buff *skb;
+	unsigned long flags;
+	
+	save_flags(flags);
+	cli();
+
+	del_timer(&shaper->timer);
+
+	/*
+	 *	Shaper unlock will kick
+	 */
+	 
+	if(shaper->locked)
+	{	
+		if(sh_debug)
+			printk("Shaper locked.\n");
+		shaper->timer.expires=jiffies+1;
+		add_timer(&shaper->timer);
+		restore_flags(flags);
+		return;
+	}
+
+		
+	/*
+	 *	Walk the list (may be empty)
+	 */
+	 
+	while((skb=skb_peek(&shaper->sendq))!=NULL)
+	{
+		/*
+		 *	Each packet due to go out by now (within an error
+		 *	of SHAPER_BURST) gets kicked onto the link 
+		 */
+		 
+		if(sh_debug)
+			printk("Clock = %d, jiffies = %ld\n", skb->shapeclock, jiffies);
+		if(skb->shapeclock <= jiffies + SHAPER_BURST)
+		{
+			/*
+			 *	Pull the frame and get interrupts back on.
+			 */
+			 
+			skb_unlink(skb);
+			shaper->recovery=jiffies+skb->shapelen;
+			restore_flags(flags);
+
+			/*
+			 *	Pass on to the physical target device via
+			 *	our low level packet thrower.
+			 */
+			
+			skb->shapepend=0;
+			shaper_queue_xmit(shaper, skb);	/* Fire */
+			cli();
+		}
+		else
+			break;
+	}
+
+	/*
+	 *	Next kick.
+	 */
+	 
+	if(skb!=NULL)
+	{
+		del_timer(&shaper->timer);
+		shaper->timer.expires=skb->shapeclock;
+		add_timer(&shaper->timer);
+	}
+		
+	/*
+	 *	Interrupts on, mission complete
+	 */
+		
+	restore_flags(flags);
+}
+
+
+/*
+ *	Flush the shaper queues on a closedown
+ */
+ 
+static void shaper_flush(struct shaper *shaper)
+{
+	struct sk_buff *skb;
+	while((skb=skb_dequeue(&shaper->sendq))!=NULL)
+		dev_kfree_skb(skb, FREE_WRITE);
+}
+
+/*
+ *	Bring the interface up. We just disallow this until a 
+ *	bind.
+ */
+
+static int shaper_open(struct device *dev)
+{
+	struct shaper *shaper=dev->priv;
+	
+	/*
+	 *	Can't open until attached.
+	 */
+	 
+	if(shaper->dev==NULL)
+		return -ENODEV;
+	MOD_INC_USE_COUNT;
+	return 0;
+}
+
+/*
+ *	Closing a shaper flushes the queues.
+ */
+ 
+static int shaper_close(struct device *dev)
+{
+	struct shaper *shaper=dev->priv;
+	shaper_flush(shaper);
+	del_timer(&shaper->timer);
+	MOD_DEC_USE_COUNT;
+	return 0;
+}
+
+/*
+ *	Revectored calls. We alter the parameters and call the functions
+ *	for our attached device. This enables us to bandwidth allocate after
+ *	ARP and other resolutions and not before.
+ */
+
+
+static int shaper_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+	struct shaper *sh=dev->priv;
+	return shaper_qframe(sh, skb);
+}
+
+static struct enet_statistics *shaper_get_stats(struct device *dev)
+{
+	return NULL;
+}
+
+static int shaper_header(struct sk_buff *skb, struct device *dev, 
+	unsigned short type, void *daddr, void *saddr, unsigned len)
+{
+	struct shaper *sh=dev->priv;
+	if(sh_debug)
+		printk("Shaper header\n");
+	return sh->hard_header(skb,sh->dev,type,daddr,saddr,len);
+}
+
+static int shaper_rebuild_header(void *buff, struct device *dev, unsigned long dst,
+	struct sk_buff *skb)
+{
+	struct shaper *sh=dev->priv;
+	if(sh_debug)
+		printk("Shaper rebuild header\n");
+	return sh->rebuild_header(buff, sh->dev, dst, skb);
+}
+
+static void shaper_cache_bind(struct hh_cache **hhp, struct device *dev,
+	unsigned short htype, __u32 daddr)
+{
+	struct shaper *sh=dev->priv;
+	if(sh_debug)
+		printk("Shaper header cache bind\n");
+	return sh->header_cache_bind(hhp,sh->dev,htype,daddr);
+}
+
+static void shaper_cache_update(struct hh_cache *hh, struct device *dev,
+	unsigned char *haddr)
+{
+	struct shaper *sh=dev->priv;
+	if(sh_debug)
+		printk("Shaper cache update\n");
+	return sh->header_cache_update(hh, sh->dev, haddr);
+}
+
+static int shaper_attach(struct device *shdev, struct shaper *sh, struct device *dev)
+{
+	sh->dev = dev;
+	sh->hard_start_xmit=dev->hard_start_xmit;
+	sh->get_stats=dev->get_stats;
+	if(dev->hard_header)
+	{
+		sh->hard_header=dev->hard_header;
+		shdev->hard_header = shaper_header;
+	}
+	else
+		shdev->hard_header = NULL;
+		
+	if(dev->rebuild_header)
+	{
+		sh->rebuild_header	= dev->rebuild_header;
+		shdev->rebuild_header	= shaper_rebuild_header;
+	}
+	else
+		shdev->rebuild_header	= NULL;
+	
+	if(dev->header_cache_bind)
+	{
+		sh->header_cache_bind	= dev->header_cache_bind;
+		shdev->header_cache_bind= shaper_cache_bind;
+	}
+	else
+		shdev->header_cache_bind= NULL;
+		
+	if(dev->header_cache_update)
+	{
+		sh->header_cache_update	= dev->header_cache_update;
+		shdev->header_cache_update = shaper_cache_update;
+	}
+	else
+		shdev->header_cache_update= NULL;
+	
+	shdev->hard_header_len=dev->hard_header_len;
+	shdev->type=dev->type;
+	shdev->addr_len=dev->addr_len;
+	shdev->mtu=dev->mtu;
+	return 0;
+}
+
+static int shaper_ioctl(struct device *dev,  struct ifreq *ifr, int cmd)
+{
+	struct shaperconf *ss= (struct shaperconf *)&ifr->ifr_data;
+	struct shaper *sh=dev->priv;
+	struct device *them=dev_get(ss->ss_name);
+	switch(ss->ss_cmd)
+	{
+		case SHAPER_SET_DEV:
+			if(them==NULL)
+				return -ENODEV;
+			if(sh->dev)
+				return -EBUSY;
+			return shaper_attach(dev,dev->priv, them);
+		case SHAPER_SET_SPEED:
+			shaper_setspeed(sh,ss->ss_speed);
+			return 0;
+		default:
+			return -EINVAL;
+	}
+}
+
+static struct shaper *shaper_alloc(struct device *dev)
+{
+	struct shaper *sh=kmalloc(sizeof(struct shaper), GFP_KERNEL);
+	if(sh==NULL)
+		return NULL;
+	memset(sh,0,sizeof(*sh));
+	skb_queue_head_init(&sh->sendq);
+	init_timer(&sh->timer);
+	sh->timer.function=shaper_timer;
+	sh->timer.data=(unsigned long)sh;
+	return sh;
+}
+
+/*
+ *	Add a shaper device to the system
+ */
+ 
+int shaper_probe(struct device *dev)
+{
+	int i;
+	
+	/*
+	 *	Set up the shaper.
+	 */
+	
+	dev->priv = shaper_alloc(dev);
+	if(dev->priv==NULL)
+		return -ENOMEM;
+		
+	dev->open		= shaper_open;
+	dev->stop		= shaper_close;
+	dev->hard_start_xmit 	= shaper_start_xmit;
+	dev->get_stats 		= shaper_get_stats;
+	dev->set_multicast_list = NULL;
+	
+	/*
+	 *	Intialise the packet queues
+	 */
+	 
+	for(i=0;i<DEV_NUMBUFFS;i++)
+		skb_queue_head_init(&dev->buffs[i]);
+	
+	/*
+	 *	Handlers for when we attach to a device.
+	 */
+
+	dev->hard_header 	= shaper_header;
+	dev->rebuild_header 	= shaper_rebuild_header;
+	dev->header_cache_bind	= shaper_cache_bind;
+	dev->header_cache_update= shaper_cache_update;
+	dev->do_ioctl		= shaper_ioctl;
+	dev->hard_header_len	= 0;
+	dev->type		= ARPHRD_ETHER;	/* initially */
+	dev->set_mac_address	= NULL;
+	dev->mtu		= 1500;
+	dev->addr_len		= 0;
+	dev->tx_queue_len	= 10;
+	dev->flags		= 0;
+	dev->family		= AF_INET;
+	dev->pa_addr		= 0;
+	dev->pa_brdaddr		= 0;
+	dev->pa_mask		= 0;
+	dev->pa_alen		= 4;
+		
+	/*
+	 *	Shaper is ok
+	 */	
+	 
+	return 0;
+}
+ 
+#ifdef MODULE
+
+static char devicename[9];
+
+static struct device dev_shape = 
+{
+	devicename,
+	0, 0, 0, 0,
+	0, 0,
+	0, 0, 0, NULL, shaper_probe 
+};
+
+int init_module(void)
+{
+	int i;
+	for(i=0;i<99;i++)
+	{
+		sprintf(devicename,"shaper%d",i);
+		if(dev_get(devicename)==NULL)
+			break;
+	}
+	if(i==100)
+		return -ENFILE;
+	
+	printk("Traffic Shaper For Linux v0.01 (ALPHA #15)\n");	
+	if (register_netdev(&dev_shape) != 0)
+		return -EIO;
+	printk("Traffic shaper initialised.\n");
+	return 0;
+}
+
+void cleanup_module(void)
+{
+	/*
+	 *	No need to check MOD_IN_USE, as sys_delete_module() checks.
+	 *	To be unloadable we must be closed and detached so we don't
+	 *	need to flush things.
+	 */
+	 
+	unregister_netdev(&dev_shape);
+
+	/*
+	 *	Free up the private structure, or leak memory :-) 
+	 */
+	 
+	kfree(dev_shape.priv);
+	dev_shape.priv = NULL;
+}
+
+#else
+#error "Module only for now"
+#endif /* MODULE */
diff --unified --recursive --new-file --exclude-from exclude linux.vanilla/drivers/net/shaper.h linux/drivers/net/shaper.h
--- linux.vanilla/drivers/net/shaper.h	Thu Jan  1 01:00:00 1970
+++ linux/drivers/net/shaper.h	Sun Aug 18 16:39:58 1996
@@ -0,0 +1,61 @@
+#ifndef __LINUX_SHAPER_H
+#define __LINUX_SHAPER_H
+
+#ifdef __KERNEL__
+
+#define SHAPER_QLEN	10
+/*
+ *	This is a bit speed dependant (read it shouldnt be a constant!)
+ *
+ *	5 is about right for 28.8 upwards. Below that double for every
+ *	halving of speed or so. - ie about 20 for 9600 baud.
+ */
+#define SHAPER_LATENCY	(5*HZ)
+#define SHAPER_MAXSLIP	2
+#define SHAPER_BURST	(HZ/10)
+
+struct shaper
+{
+	struct sk_buff_head sendq;
+	__u32 bytespertick;
+	__u32 shapelatency;
+	__u32 shapeclock;
+	__u32 recovery;		/* Time we can next clock a packet out on
+				   an empty queue */
+	char locked;
+	struct device *dev;
+	int  (*hard_start_xmit) (struct sk_buff *skb,
+		struct device *dev);
+	int  (*hard_header) (struct sk_buff *skb,
+		struct device *dev,
+		unsigned short type,
+		void *daddr,
+		void *saddr,
+		unsigned len);
+	int  (*rebuild_header)(void *eth, struct device *dev,
+		unsigned long raddr, struct sk_buff *skb);
+	void (*header_cache_bind)(struct hh_cache **hhp, struct device *dev, unsigned short htype, __u32 daddr);
+	void (*header_cache_update)(struct hh_cache *hh, struct device *dev, unsigned char *  haddr);
+	struct enet_statistics* (*get_stats)(struct device *dev);
+	struct wait_queue *wait_queue;
+	struct timer_list timer;
+};
+
+#endif
+
+#define SHAPER_SET_DEV		0x0001
+#define SHAPER_SET_SPEED	0x0002
+
+struct shaperconf
+{
+	__u16	ss_cmd;
+	union
+	{
+		char 	ssu_name[14];
+		__u32	ssu_speed;
+	} ss_u;
+#define ss_speed ss_u.ssu_speed
+#define ss_name ss_u.ssu_name
+};
+
+#endif
diff --unified --recursive --new-file --exclude-from exclude linux.vanilla/include/linux/skbuff.h linux/include/linux/skbuff.h
--- linux.vanilla/include/linux/skbuff.h	Sun Jul 28 13:32:54 1996
+++ linux/include/linux/skbuff.h	Sat Aug 17 23:27:11 1996
@@ -112,6 +112,14 @@
 	unsigned char 	*end;			/* End pointer					*/
 	void 		(*destructor)(struct sk_buff *);	/* Destruct function		*/
 	__u16		redirport;		/* Redirect port				*/
+#if defined(CONFIG_SHAPER) || defined(CONFIG_SHAPER_MODULE)
+	__u32		shapelatency;		/* Latency on frame */
+	__u32		shapeclock;		/* Time it should go out */
+	__u32		shapelen;		/* Frame length in clocks */
+	__u32		shapestamp;		/* Stamp for shaper    */
+	__u16		shapepend;		/* Pending */
+	__u16		pri;			/* Frame priority 	*/
+#endif
 };
 
 #ifdef CONFIG_SKB_LARGE
diff --unified --recursive --new-file --exclude-from exclude linux.vanilla/net/core/dev.c linux/net/core/dev.c
--- linux.vanilla/net/core/dev.c	Sat Aug 17 18:08:39 1996
+++ linux/net/core/dev.c	Sat Aug 17 21:02:56 1996
@@ -358,6 +358,10 @@
 		pri = -pri-1;
 		retransmission = 1;
   	}
+#ifdef CONFIG_SHAPER
+	else
+		skb->pri=pri;
+#endif		
 
 #ifdef CONFIG_NET_DEBUG
 	if (pri >= DEV_NUMBUFFS) 
