diff -urpN --exclude-from=/home/davej/.exclude bk-linus/drivers/net/tulip/winbond-840.c linux-2.5/drivers/net/tulip/winbond-840.c
--- bk-linus/drivers/net/tulip/winbond-840.c	2002-11-21 02:17:11.000000000 +0000
+++ linux-2.5/drivers/net/tulip/winbond-840.c	2002-11-21 17:59:57.000000000 +0000
@@ -36,8 +36,10 @@
 		power management.
 		support for big endian descriptors
 			Copyright (C) 2001 Manfred Spraul
-  	* ethtool support (jgarzik)
+	* ethtool support (jgarzik)
 	* Replace some MII-related magic numbers with constants (jgarzik)
+	* OOM handling
+			Copyright (C) 2002 Manfred Spraul
   
 	TODO:
 	* enable pci_power_off
@@ -363,6 +364,7 @@ struct netdev_private {
 	struct w840_rx_desc *rx_head_desc;
 	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
 	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
+	int oom;
 	unsigned int cur_tx, dirty_tx;
 	unsigned int tx_q_bytes;
 	unsigned int tx_full;				/* The Tx queue is full. */
@@ -378,6 +380,7 @@ static int  mdio_read(struct net_device 
 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 static int  netdev_open(struct net_device *dev);
 static int  update_link(struct net_device *dev);
+static void refill_rx(struct net_device *dev);
 static void netdev_timer(unsigned long data);
 static void init_rxtx_rings(struct net_device *dev);
 static void free_rxtx_rings(struct netdev_private *np);
@@ -828,11 +831,40 @@ static inline void update_csr6(struct ne
 		np->mii_if.full_duplex = 1;
 }
 
+static void refill_rx(struct net_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+
+	/* Refill the Rx ring buffers. */
+	np->oom = 0;
+	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+		struct sk_buff *skb;
+		int entry;
+		entry = np->dirty_rx % RX_RING_SIZE;
+		if (np->rx_skbuff[entry] == NULL) {
+			skb = dev_alloc_skb(np->rx_buf_sz);
+			np->rx_skbuff[entry] = skb;
+			if (skb == NULL)
+				break;		/* Better luck next round. */
+			skb->dev = dev;		/* Mark as being used by this device. */
+			np->rx_addr[entry] = pci_map_single(np->pci_dev,
+							skb->tail,
+							skb->len, PCI_DMA_FROMDEVICE);
+			np->rx_ring[entry].buffer1 = np->rx_addr[entry];
+		}
+		wmb();
+		np->rx_ring[entry].status = DescOwn;
+	}
+	if (np->cur_rx-np->dirty_rx == RX_RING_SIZE)
+		np->oom = 1;
+}
+
 static void netdev_timer(unsigned long data)
 {
 	struct net_device *dev = (struct net_device *)data;
 	struct netdev_private *np = dev->priv;
 	long ioaddr = dev->base_addr;
+	long next = 10*HZ;
 
 	if (debug > 2)
 		printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
@@ -842,8 +874,16 @@ static void netdev_timer(unsigned long d
 	spin_lock_irq(&np->lock);
 	update_csr6(dev, update_link(dev));
 	spin_unlock_irq(&np->lock);
-	np->timer.expires = jiffies + 10*HZ;
-	add_timer(&np->timer);
+	if (np->oom) {
+		disable_irq(dev->irq);
+		refill_rx(dev);
+		if (np->oom)
+			next = 1;
+		else
+			writel(0, ioaddr + RxStartDemand);
+		enable_irq(dev->irq);
+	}
+	mod_timer(&np->timer, jiffies + next);
 }
 
 static void init_rxtx_rings(struct net_device *dev)
@@ -863,22 +903,9 @@ static void init_rxtx_rings(struct net_d
 	/* Mark the last entry as wrapping the ring. */
 	np->rx_ring[i-1].length |= DescEndRing;
 
-	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
-	for (i = 0; i < RX_RING_SIZE; i++) {
-		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
-		np->rx_skbuff[i] = skb;
-		if (skb == NULL)
-			break;
-		skb->dev = dev;			/* Mark as being used by this device. */
-		np->rx_addr[i] = pci_map_single(np->pci_dev,skb->tail,
-					skb->len,PCI_DMA_FROMDEVICE);
-
-		np->rx_ring[i].buffer1 = np->rx_addr[i];
-		np->rx_ring[i].status = DescOwn;
-	}
-
-	np->cur_rx = 0;
-	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+	np->cur_rx = RX_RING_SIZE;
+	np->dirty_rx = 0;
+	refill_rx(dev);
 
 	/* Initialize the Tx descriptors */
 	for (i = 0; i < TX_RING_SIZE; i++) {
@@ -1190,8 +1217,9 @@ static void intr_handler(int irq, void *
 
 		if (intr_status & (IntrRxDone | RxNoBuf))
 			netdev_rx(dev);
-		if (intr_status & RxNoBuf)
-			writel(0, ioaddr + RxStartDemand);
+		/* RxNoBuf is an error interrupt, we kick
+		 * RxStartDemand in netdev_error()
+		 */
 
 		if (intr_status & (TxIdle | IntrTxDone) &&
 			np->cur_tx != np->dirty_tx) {
@@ -1323,25 +1351,7 @@ static int netdev_rx(struct net_device *
 		entry = (++np->cur_rx) % RX_RING_SIZE;
 		np->rx_head_desc = &np->rx_ring[entry];
 	}
-
-	/* Refill the Rx ring buffers. */
-	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
-		struct sk_buff *skb;
-		entry = np->dirty_rx % RX_RING_SIZE;
-		if (np->rx_skbuff[entry] == NULL) {
-			skb = dev_alloc_skb(np->rx_buf_sz);
-			np->rx_skbuff[entry] = skb;
-			if (skb == NULL)
-				break;			/* Better luck next round. */
-			skb->dev = dev;			/* Mark as being used by this device. */
-			np->rx_addr[entry] = pci_map_single(np->pci_dev,
-							skb->tail,
-							skb->len, PCI_DMA_FROMDEVICE);
-			np->rx_ring[entry].buffer1 = np->rx_addr[entry];
-		}
-		wmb();
-		np->rx_ring[entry].status = DescOwn;
-	}
+	refill_rx(dev);
 
 	return 0;
 }
@@ -1385,8 +1395,14 @@ static void netdev_error(struct net_devi
 		if (netif_device_present(dev))
 			writel(0x1A0F5, ioaddr + IntrEnable);
 	}
+	/* strictly only needed with RxNoBuf, but
+	 * kicking too often probably doesn't hurt.
+	 */
+	if (np->oom)
+		mod_timer(&np->timer, jiffies+1);
+	else
+		writel(0, ioaddr + RxStartDemand);
 	np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
-	writel(0, ioaddr + RxStartDemand);
 	spin_unlock(&np->lock);
 }
 
@@ -1567,6 +1583,7 @@ static int netdev_close(struct net_devic
 			   dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
 	}
 
+	del_timer_sync(&np->timer);
  	/* Stop the chip's Tx and Rx processes. */
 	spin_lock_irq(&np->lock);
 	netif_device_detach(dev);
@@ -1601,8 +1618,6 @@ static int netdev_close(struct net_devic
 	}
 #endif /* __i386__ debugging only */
 
-	del_timer_sync(&np->timer);
-
 	free_rxtx_rings(np);
 	free_ringdesc(np);
 
