Oracle bug 12740042
01/13/2012
Patch written by Tina Yang
05/29/2012
Tweaked for the port of OL5U8

Ported:

http://ca-svn/viewvc/kernel/tags-5/kernel-2.6.18-269.0.0.0.1.el5.bug12740042/linux-2.6.18-netconsole-over-bonding.patch?view=markup&pathrev=12038

To the following, without changes:

http://ca-build32.us.oracle.com/svn/repos/rhel4/kernel/tags-5/kernel-2.6.18-303.0.0.0.1.el5/linux-2.6.18-netconsole-over-bonding.patch

--- a/drivers/net/bonding/bond_3ad.c.orig	2011-07-15 18:28:49.108468000 -0700
+++ a/drivers/net/bonding/bond_3ad.c	2011-07-15 18:38:19.090002000 -0700
@@ -2375,13 +2375,6 @@ int bond_3ad_xmit_xor(struct sk_buff *sk
 	struct ad_info ad_info;
 	int res = 1;
 
-	/*
-	 * If we risk deadlock from transmitting this in the
-	 * netpoll path, tell netpoll to queue the frame for later tx
-	 */
-	if (is_netpoll_tx_blocked(dev))
-		return NETDEV_TX_BUSY;
-
 	/* make sure that the slaves list will
 	 * not change during tx
 	 */
--- a/drivers/net/bonding/bond_alb.c.orig	2011-07-15 18:29:11.367521000 -0700
+++ a/drivers/net/bonding/bond_alb.c	2011-07-15 18:38:32.192441000 -0700
@@ -1304,14 +1304,6 @@ int bond_alb_xmit(struct sk_buff *skb, s
 	int res = 1;
 	struct ipv6hdr *ip6hdr;
 
-	/*
-	 * If we risk deadlock from transmitting this in the
-	 * netpoll path, tell netpoll to queue the frame for later
-	 *tx
-	 */
-	if (is_netpoll_tx_blocked(bond_dev))
-		return NETDEV_TX_BUSY;
-
 	ip_bcast = htonl(0xffffffff);
 	skb_reset_mac_header(skb);
 	eth_data = eth_hdr(skb);
--- a/drivers/net/bonding/bonding.h.orig	2011-07-15 15:47:25.193685000 -0700
+++ a/drivers/net/bonding/bonding.h	2011-07-15 18:08:00.796131000 -0700
@@ -18,6 +18,9 @@
 #include <linux/timer.h>
 #include <linux/proc_fs.h>
 #include <linux/if_bonding.h>
+#ifdef CONFIG_NET_POLL_CONTROLLER
+#include <linux/netpoll.h>
+#endif
 #include <linux/kobject.h>
 #include <linux/cpumask.h>
 #include <linux/in6.h>
@@ -126,32 +129,6 @@ extern int debug;
 #define bond_for_each_slave(bond, pos, cnt)	\
 		bond_for_each_slave_from(bond, pos, cnt, (bond)->first_slave)
 
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-extern atomic_t netpoll_block_tx;
-
-static inline void block_netpoll_tx(void)
-{
-	atomic_inc(&netpoll_block_tx);
-}
-
-static inline void unblock_netpoll_tx(void)
-{
-	atomic_dec(&netpoll_block_tx);
-}
-
-static inline int is_netpoll_tx_blocked(struct net_device *dev)
-{
-	if (unlikely(dev->priv_flags & IFF_IN_NETPOLL))
-		return atomic_read(&netpoll_block_tx);
-	return 0;
-}
-#else
-#define block_netpoll_tx()
-#define unblock_netpoll_tx()
-#define is_netpoll_tx_blocked(dev)
-#endif
-
 struct bond_params {
 	int mode;
 	int xmit_policy;
@@ -223,6 +200,12 @@ struct slave {
  */
 struct bonding {
 	struct   net_device *dev; /* first - useful for panic debug */
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	int (*netpoll_setup)(struct net_device *dev, struct netpoll_info *npinfo);
+	int (*netpoll_xmit_setup)(struct netpoll *np, struct sk_buff **skbp);
+	void (*netpoll_cleanup)(struct net_device *dev);
+	struct   slave *curr_np_slave;
+#endif
 	struct   slave *first_slave;
 	struct   slave *curr_active_slave;
 	struct   slave *current_arp_slave;
--- a/drivers/net/bonding/bond_sysfs.c.orig	2011-07-15 18:30:03.218285000 -0700
+++ a/drivers/net/bonding/bond_sysfs.c	2011-07-15 19:09:21.899914000 -0700
@@ -1072,7 +1072,6 @@ static ssize_t bonding_store_primary(str
 	char ifname[IFNAMSIZ] = { 0, };
 
 	rtnl_lock();
-	block_netpoll_tx();
 	read_lock(&bond->lock);
 	write_lock_bh(&bond->curr_slave_lock);
 
@@ -1113,7 +1112,6 @@ static ssize_t bonding_store_primary(str
 out:
 	write_unlock_bh(&bond->curr_slave_lock);
 	read_unlock(&bond->lock);
-	unblock_netpoll_tx();
 	rtnl_unlock();
 
 	return count;
@@ -1157,13 +1155,11 @@ static ssize_t bonding_store_primary_res
 	       bond->dev->name, pri_reselect_tbl[new_value].modename,
 	       new_value);
 
-	block_netpoll_tx();
 	read_lock(&bond->lock);
 	write_lock_bh(&bond->curr_slave_lock);
 	bond_select_active_slave(bond);
 	write_unlock_bh(&bond->curr_slave_lock);
 	read_unlock(&bond->lock);
-	unblock_netpoll_tx();
 out:
 	rtnl_unlock();
 	return ret;
@@ -1236,7 +1232,6 @@ static ssize_t bonding_store_active_slav
 	char ifname[IFNAMSIZ] = { 0, };
 
 	rtnl_lock();
-	block_netpoll_tx();
 	read_lock(&bond->lock);
 	write_lock_bh(&bond->curr_slave_lock);
 
@@ -1303,7 +1298,6 @@ static ssize_t bonding_store_active_slav
 out:
 	write_unlock_bh(&bond->curr_slave_lock);
 	read_unlock(&bond->lock);
-	unblock_netpoll_tx();
 
 	rtnl_unlock();
 
--- a/drivers/net/netconsole.c.orig	2011-07-18 13:10:29.513166000 -0700
+++ a/drivers/net/netconsole.c	2011-07-18 20:34:35.056963000 -0700
@@ -48,12 +48,22 @@ MODULE_DESCRIPTION("Console driver for n
 MODULE_LICENSE("GPL");
 
 #define MAX_PARAM_LENGTH	256
-#define MAX_PRINT_CHUNK		1000
 
 static char config[MAX_PARAM_LENGTH];
 module_param_string(netconsole, config, MAX_PARAM_LENGTH, 0);
 MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@<tgt-ip>/[tgt-macaddr]\n");
 
+static struct timer_list nc_timer;
+static DEFINE_SPINLOCK(nc_lock);
+static int nc_lock_owner = -1;
+
+#define MAX_PRINT_CHUNK 980
+#define SYSLOG_HEADER_LEN 0
+
+static int syslog_chars = SYSLOG_HEADER_LEN;
+static unsigned char syslog_line [MAX_PRINT_CHUNK + 10];
+extern int crash_mode;
+
 #ifndef	MODULE
 static int __init option_setup(char *opt)
 {
@@ -69,7 +79,6 @@ static struct netpoll np = {
 	.local_port	= 6665,
 	.remote_port	= 6666,
 	.remote_mac	= {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
-	.drop		= netpoll_queue,
 };
 
 /* Handle network interface device notifications */
@@ -107,23 +118,53 @@ static struct notifier_block netconsole_
 	.notifier_call  = netconsole_netdev_event,
 };
 
+static inline void feed_syslog_char(const unsigned char c)
+{
+        syslog_line[syslog_chars] = c;
+        syslog_chars++;
+        if (syslog_chars == MAX_PRINT_CHUNK || 
+	    (crash_mode && c == '\n')) {
+                netpoll_send_udp(&np, syslog_line, syslog_chars);
+                syslog_chars = SYSLOG_HEADER_LEN;
+        }
+}
+
 static void write_msg(struct console *con, const char *msg, unsigned int len)
 {
-	int frag, left;
+	int i;
 	unsigned long flags;
 
 	if (np.dev && netif_running(np.dev)) {
-		local_irq_save(flags);
-		for (left = len; left;) {
-			frag = min(left, MAX_PRINT_CHUNK);
-			netpoll_send_udp(&np, msg, frag);
-			msg += frag;
-			left -= frag;
+		while (!spin_trylock_irqsave(&nc_lock, flags)) {
+			if (nc_lock_owner == smp_processor_id())
+				return;
 		}
-		local_irq_restore(flags);
+		nc_lock_owner = smp_processor_id();
+
+		for (i = 0; i < len; i++)
+			feed_syslog_char(msg[i]);
+	 
+		nc_lock_owner = -1;
+		spin_unlock_irqrestore(&nc_lock, flags);
 	}
 }
 
+static int last_chars = 0;
+static void flush_timer(unsigned long p)
+{
+	if (spin_trylock(&nc_lock)) {
+		nc_lock_owner = smp_processor_id();
+		if (syslog_chars > 0 && syslog_chars == last_chars) {
+			netpoll_send_udp(&np, syslog_line, syslog_chars);
+                	syslog_chars = SYSLOG_HEADER_LEN;
+		}
+		nc_lock_owner = -1;
+		spin_unlock(&nc_lock);
+	}
+	last_chars = syslog_chars;
+	mod_timer(&nc_timer, jiffies + HZ/10);
+ }
+
 static struct console netconsole = {
 	.name	= "netcon",
 	.flags	= CON_ENABLED | CON_PRINTBUFFER,
@@ -136,6 +175,7 @@ static int __init init_netconsole(void)
 
 	if (!strnlen(config, MAX_PARAM_LENGTH)) {
 		printk(KERN_INFO "netconsole: not configured, aborting\n");
+		err = -EINVAL;
 		goto out;
 	}
 
@@ -152,6 +192,10 @@ static int __init init_netconsole(void)
 		goto out;
 
 	register_console(&netconsole);
+	init_timer(&nc_timer);
+	nc_timer.function = flush_timer;
+	nc_timer.data = (unsigned long)0;
+	mod_timer(&nc_timer, jiffies + HZ/10);
 	printk(KERN_INFO "netconsole: network logging started\n");
 
 out:
@@ -160,11 +204,18 @@ out:
 
 static void __exit cleanup_netconsole(void)
 {
+	int flags;
+
+	del_timer_sync(&nc_timer);
+	spin_lock_irqsave(&nc_lock, flags);
+	nc_lock_owner = smp_processor_id();
 	unregister_console(&netconsole);
 	unregister_netdevice_notifier(&netconsole_netdev_notifier);
 	rtnl_lock();
 	netpoll_cleanup(&np);
 	rtnl_unlock();
+	nc_lock_owner = -1;
+	spin_unlock_irqrestore(&nc_lock, flags);
 }
 
 module_init(init_netconsole);
--- a/drivers/char/sysrq.c.orig	2011-07-18 15:05:58.572030000 -0700
+++ a/drivers/char/sysrq.c	2011-07-18 15:41:52.174535000 -0700
@@ -414,6 +414,11 @@ void __handle_sysrq(int key, struct pt_r
 		 */
 		if (!check_mask || sysrq_enabled == 1 ||
 		    (sysrq_enabled & op_p->enable_mask)) {
+			if (op_p == &sysrq_crashdump_op ||
+			    op_p == &sysrq_reboot_op) {
+				extern int crash_mode;
+				crash_mode = 1;
+			}
 			printk("%s\n", op_p->action_msg);
 			console_loglevel = orig_log_level;
 			op_p->handler(key, pt_regs, tty);
--- a/net/core/netpoll.c.orig	2011-07-15 14:58:30.338761000 -0700
+++ a/net/core/netpoll.c	2011-07-18 16:00:26.733635000 -0700
@@ -35,7 +35,7 @@
 #define MAX_UDP_CHUNK 1460
 #define MAX_SKBS 32
 #define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
-#define MAX_RETRIES 20000
+#define MAX_RETRIES 1000
 
 static DEFINE_SPINLOCK(skb_list_lock);
 static int nr_skbs;
@@ -57,6 +57,27 @@ static atomic_t trapped;
 static void zap_completion_queue(void);
 static void arp_reply(struct sk_buff *skb);
 
+struct netpoll_info_ext {
+	struct   net_device *dev;
+	int (*netpoll_setup)(struct net_device *dev, struct netpoll_info *npinfo);
+	int (*netpoll_xmit_setup)(struct netpoll *np, struct sk_buff **skbp);
+	void (*netpoll_cleanup)(struct net_device *dev);
+};
+
+static inline int netpoll_set_dev(struct netpoll *np, struct sk_buff **skbp)
+{
+	struct sk_buff *skb = *skbp;
+
+	skb->dev = np->dev;
+        if (np->dev->flags & IFF_MASTER) {
+		struct netpoll_info_ext *npx = (struct netpoll_info_ext *)netdev_priv(np->dev);
+		/* Note: If failed, skb is freed already
+		 */
+		return (npx->netpoll_xmit_setup(np, skbp));
+	}
+	return 1;
+}
+
 static void queue_process(void *p)
 {
 	unsigned long flags;
@@ -183,6 +204,7 @@ static void service_arp_queue(struct net
 void netpoll_poll_dev(struct net_device *dev)
 {
 	struct netpoll_info *npinfo;
+	unsigned long flags;
 
 	if (!dev || !netif_running(dev))
 		return;
@@ -195,9 +217,9 @@ void netpoll_poll_dev(struct net_device 
 
 	if (dev->poll) {
 		if (dev->npinfo->poll_owner != smp_processor_id() &&
-		    spin_trylock(&npinfo->poll_lock)) {
+		    spin_trylock_irqsave(&npinfo->poll_lock, flags)) {
 			poll_one_napi(npinfo, dev, 16);
-			spin_unlock(&npinfo->poll_lock);
+			spin_unlock_irqrestore(&npinfo->poll_lock, flags);
 		}
 	}
 
@@ -208,7 +230,10 @@ void netpoll_poll_dev(struct net_device 
 
 void netpoll_poll(struct netpoll *np)
 {
-	netpoll_poll_dev(np->dev);
+	if (!(np->dev->flags & IFF_MASTER))
+		netpoll_poll_dev(np->dev);
+	else
+		np->dev->poll_controller(np->dev);
 }
 
 static void refill_skbs(void)
@@ -294,11 +319,12 @@ repeat:
 	return skb;
 }
 
-void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
-			     struct net_device *dev)
+void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
 {
+	struct net_device *dev = skb->dev;
 	int status;
 	struct netpoll_info *npinfo;
+	unsigned long flags;
 
 	if (!np || !dev || !netif_running(dev)) {
 		__kfree_skb(skb);
@@ -309,7 +335,7 @@ void netpoll_send_skb_on_dev(struct netp
 
 	/* avoid recursion */
 	if (npinfo->poll_owner == smp_processor_id() ||
-	    np->dev->xmit_lock_owner == smp_processor_id()) {
+	    dev->xmit_lock_owner == smp_processor_id()) {
 		if (np->drop)
 			np->drop(skb);
 		else
@@ -319,20 +345,19 @@ void netpoll_send_skb_on_dev(struct netp
 
 	do {
 		npinfo->tries--;
-		netif_tx_lock(dev);
-
-		/*
-		 * network drivers do not expect to be called if the queue is
-		 * stopped.
-		 */
 		status = NETDEV_TX_BUSY;
-		if (!netif_queue_stopped(dev)) {
-			dev->priv_flags |= IFF_IN_NETPOLL;
-			status = dev->hard_start_xmit(skb, dev);
-			dev->priv_flags &= ~IFF_IN_NETPOLL;
-		}
 
-		netif_tx_unlock(dev);
+		local_irq_save(flags);
+		if (netif_tx_trylock(dev)) {
+			/*
+			 * network drivers do not expect to be called if the queue is
+			 * stopped.
+			 */
+			if (!netif_queue_stopped(dev))
+				status = dev->hard_start_xmit(skb, dev);
+			netif_tx_unlock(dev);
+		}
+		local_irq_restore(flags);
 
 		/* success */
 		if(!status) {
@@ -341,14 +366,13 @@ void netpoll_send_skb_on_dev(struct netp
 		}
 
 		/* transmit busy */
-		netpoll_poll_dev(dev);
-		udelay(50);
+		if ((npinfo->tries & 0x1) == 0)
+                        netpoll_poll_dev(dev);
+                if (in_interrupt())
+                        break;
+                udelay(1000);
 	} while (npinfo->tries > 0);
-}
-
-void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
-{
-	netpoll_send_skb_on_dev(np, skb, np->dev);
+	__kfree_skb(skb);
 }
 
 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
@@ -407,6 +431,9 @@ void netpoll_send_udp(struct netpoll *np
 
 	skb_set_network_header(skb, ETH_HLEN);
 
+	if (!(netpoll_set_dev(np, &skb)))
+		return;
+
 	netpoll_send_skb(np, skb);
 }
 
@@ -732,6 +759,17 @@ int netpoll_setup(struct netpoll *np)
 		goto release;
 	}
 
+        /* Call the device specific netpoll initialization routine. */
+        if (ndev->flags & IFF_MASTER) {
+		struct netpoll_info_ext *npx = (struct netpoll_info_ext *)ndev->priv;
+                if (!npx->netpoll_setup(ndev, npinfo)) {
+			printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
+		       		np->name, np->dev_name);
+			err = -ENOTSUPP;
+			goto release;
+		}
+	}
+
 	if (!netif_running(ndev)) {
 		unsigned long atmost, atleast;
 
@@ -812,8 +850,10 @@ int netpoll_setup(struct netpoll *np)
 	return 0;
 
  release:
-	if (!ndev->npinfo)
+	if (!npinfo) {
 		kfree(npinfo);
+		ndev->npinfo = NULL;
+	}
 	np->dev = NULL;
 	dev_put(ndev);
 	return err;
@@ -843,6 +883,13 @@ void netpoll_cleanup(struct netpoll *np)
 			/* avoid racing with NAPI reading npinfo */
 			call_rcu_bh(&npinfo->rcu, rcu_netpoll_cleanup);
 		}
+        	/* Call the device specific netpoll cleanup routine. */
+		if (np->dev->flags & IFF_MASTER) {
+			struct netpoll_info_ext *npx = 
+				(struct netpoll_info_ext *)np->dev->priv;
+			npx->netpoll_cleanup(np->dev);
+		}
+
 		dev_put(np->dev);
 	}
 
@@ -862,7 +909,6 @@ void netpoll_set_trap(int trap)
 		atomic_dec(&trapped);
 }
 
-EXPORT_SYMBOL(netpoll_send_skb_on_dev);
 EXPORT_SYMBOL(netpoll_send_skb);
 EXPORT_SYMBOL(netpoll_set_trap);
 EXPORT_SYMBOL(netpoll_trap);
--- a/include/linux/netpoll.h.orig	2011-07-15 15:47:47.047741000 -0700
+++ a/include/linux/netpoll.h	2011-07-15 19:11:47.324682000 -0700
@@ -52,8 +52,6 @@ void netpoll_cleanup(struct netpoll *np)
 int __netpoll_rx(struct sk_buff *skb);
 void netpoll_queue(struct sk_buff *skb);
 void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
-void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
-			     struct net_device *dev);
 
 #ifdef CONFIG_NETPOLL
 static inline int netpoll_rx(struct sk_buff *skb)
--- a/arch/i386/kernel/traps.c.orig	2011-07-18 15:04:28.362898000 -0700
+++ a/arch/i386/kernel/traps.c	2011-07-18 15:16:51.885951000 -0700
@@ -394,6 +394,7 @@ void die(const char * str, struct pt_reg
 	};
 	static int die_counter;
 	unsigned long flags;
+	extern int crash_mode;
 
 	oops_enter();
 
@@ -438,6 +439,8 @@ void die(const char * str, struct pt_reg
 		if (notify_die(DIE_OOPS, str, regs, err,
 					current->thread.trap_no, SIGSEGV) !=
 				NOTIFY_STOP) {
+			if (kexec_should_crash(current))
+				crash_mode = 1;
 			show_registers(regs);
 			/* Executive summary in case the oops scrolled away */
 			esp = (unsigned long) (&regs->esp);
--- a/arch/x86_64/kernel/traps.c.orig	2011-07-18 15:04:53.477828000 -0700
+++ a/arch/x86_64/kernel/traps.c	2011-07-18 15:18:22.704079000 -0700
@@ -541,6 +541,7 @@ void __kprobes oops_end(unsigned long fl
 void __kprobes __die(const char * str, struct pt_regs * regs, long err)
 {
 	static int die_counter;
+	extern int crash_mode;
 	printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
 #ifdef CONFIG_PREEMPT
 	printk("PREEMPT ");
@@ -556,6 +557,8 @@ void __kprobes __die(const char * str, s
 	printk(KERN_ALERT "last sysfs file: %s\n", last_sysfs_file);
 #endif
 	notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
+	if (kexec_should_crash(current))
+		crash_mode = 1;
 	show_registers(regs);
 	/* Executive summary in case the oops scrolled away */
 	printk(KERN_ALERT "RIP ");
--- a/kernel/panic.c.orig	2011-07-18 15:03:38.213048000 -0700
+++ a/kernel/panic.c	2011-07-18 15:15:01.207727000 -0700
@@ -25,6 +25,8 @@ int tainted;
 static int pause_on_oops;
 static int pause_on_oops_flag;
 static DEFINE_SPINLOCK(pause_on_oops_lock);
+int crash_mode = 0;
+EXPORT_SYMBOL(crash_mode);
 
 int panic_timeout;
 
@@ -65,6 +67,7 @@ NORET_TYPE void panic(const char * fmt, 
 #if defined(CONFIG_S390)
         unsigned long caller = (unsigned long) __builtin_return_address(0);
 #endif
+	crash_mode = 1;
 
 	/*
 	 * It's possible to come here directly from a panic-assertion and not
--- a/drivers/net/bonding/bond_main.c.orig      2011-07-15 15:47:03.235631000 -0700
+++ a/drivers/net/bonding/bond_main.c   2011-07-15 18:21:53.732313000 -0700
@@ -159,10 +159,6 @@ MODULE_PARM_DESC(debug, "Print debug mes
 
 /*----------------------------- Global variables ----------------------------*/
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-atomic_t netpoll_block_tx = ATOMIC_INIT(0);
-#endif
-
 static const char * const version =
 	DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n";
 
@@ -305,7 +305,6 @@ static int bond_del_vlan(struct bonding 
 
 	dprintk("bond: %s, vlan id %d\n", bond->dev->name, vlan_id);
 
-	block_netpoll_tx();
 	write_lock_bh(&bond->lock);
 
 	list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
@@ -342,7 +337,6 @@ static int bond_del_vlan(struct bonding 
 
 out:
 	write_unlock_bh(&bond->lock);
-	unblock_netpoll_tx();
 	return res;
 }
 
@@ -444,15 +438,7 @@ int bond_dev_queue_xmit(struct bonding *
 	}
 
 	skb->priority = 1;
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) {
-		struct netpoll_info *npinfo = rcu_dereference(bond->dev->npinfo);
-		struct netpoll *np = npinfo->netpoll;
-		rcu_assign_pointer(slave_dev->npinfo, npinfo);
-		netpoll_send_skb_on_dev(np, skb, slave_dev);
-	} else
-#endif
-		dev_queue_xmit(skb);
+	dev_queue_xmit(skb);
 
 	return 0;
 }
@@ -1454,36 +1441,44 @@ static void bond_detach_slave(struct bon
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * You must hold read lock on bond->lock before calling this.
- */
-static bool slaves_support_netpoll(struct net_device *bond_dev)
+static void bond_poll_controller(struct net_device *bond_dev)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
 	struct slave *slave;
-	int i = 0;
-	bool ret = true;
 
-	bond_for_each_slave(bond, slave, i) {
-		if (slave->dev->priv_flags & IFF_DISABLE_NETPOLL) 
-			ret = false;
+	if ((slave = bond->curr_np_slave)) {
+		if (slave->dev->poll_controller)
+			netpoll_poll_dev(slave->dev);
 	}
-	return i != 0 && ret;
 }
 
-static void bond_poll_controller(struct net_device *bond_dev)
+static void bond_netpoll_setup(struct net_device *bond_dev,
+                              struct netpoll_info *npinfo)
 {
-	struct netpoll_info *npinfo = rcu_dereference(bond_dev->npinfo);
-	struct net_device *dev = npinfo->netpoll->real_dev;
-	if (dev != bond_dev)
-		netpoll_poll_dev(dev);
-}
-#else
+	struct bonding *bond = netdev_priv(bond_dev);
+	struct slave *slave;
+	int i;
 
+	bond_for_each_slave(bond, slave, i) {
+		if (!slave->dev->poll_controller)
+			return 0;
+	}
+	bond_for_each_slave(bond, slave, i) {
+		dev_hold(slave->dev);
+		slave->dev->npinfo = npinfo;
+	}
+	return 1;
+}
 static void bond_netpoll_cleanup(struct net_device *bond_dev)
 {
-}
+	struct bonding *bond = netdev_priv(bond_dev);
+	struct slave *slave;
+	int i;
 
+	bond_for_each_slave(bond, slave, i) {
+		dev_put(slave->dev);
+	}
+}
 #endif
 
 /*---------------------------------- IOCTL ----------------------------------*/
@@ -1907,18 +1903,6 @@ int bond_enslave(struct net_device *bond
 
 	bond_set_carrier(bond);
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	if (slaves_support_netpoll(bond_dev)) {
-		bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
-		if (bond_dev->npinfo)
-			rcu_assign_pointer(slave_dev->npinfo, bond_dev->npinfo);
-	} else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) {
-		bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
-		pr_info("New slave device %s does not support netpoll\n",
-			slave_dev->name);
-		pr_info("Disabling netpoll support for %s\n", bond_dev->name);
-	}
-#endif
 	read_unlock(&bond->lock);
 
 	res = bond_create_slave_symlinks(bond_dev, slave_dev);
@@ -1931,6 +1915,10 @@ int bond_enslave(struct net_device *bond
 	       new_slave->state == BOND_STATE_ACTIVE ? "n active" : " backup",
 	       new_slave->link != BOND_LINK_DOWN ? "n up" : " down");
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	slave_dev->npinfo = bond_dev->npinfo;
+#endif
+
 	/* enslave is successful */
 	return 0;
 
@@ -1988,7 +1976,6 @@ int bond_release(struct net_device *bond
 		return -EINVAL;
 	}
 
-	block_netpoll_tx();
 	call_netdevice_notifiers(NETDEV_RELEASE, bond_dev);
 	write_lock_bh(&bond->lock);
 
@@ -1999,7 +1986,6 @@ int bond_release(struct net_device *bond
 		       ": %s: %s not enslaved\n",
 		       bond_dev->name, slave_dev->name);
 		write_unlock_bh(&bond->lock);
-		unblock_netpoll_tx();
 		return -EINVAL;
 	}
 
@@ -2063,9 +2049,7 @@ int bond_release(struct net_device *bond
 		 * but before a new active slave is selected.
 		 */
 		write_unlock_bh(&bond->lock);
-		unblock_netpoll_tx();
 		bond_alb_deinit_slave(bond, slave);
-		block_netpoll_tx();
 		write_lock_bh(&bond->lock);
 	}
 
@@ -2076,17 +2060,13 @@ int bond_release(struct net_device *bond
 		 * will interfere.
 		 */
 		write_unlock_bh(&bond->lock);
-		unblock_netpoll_tx();
 		read_lock(&bond->lock);
-		block_netpoll_tx();
 		write_lock_bh(&bond->curr_slave_lock);
 
 		bond_select_active_slave(bond);
 
 		write_unlock_bh(&bond->curr_slave_lock);
-		unblock_netpoll_tx();
 		read_unlock(&bond->lock);
-		block_netpoll_tx();
 		write_lock_bh(&bond->lock);
 	}
 
@@ -2121,7 +2101,6 @@ int bond_release(struct net_device *bond
 	}
 
 	write_unlock_bh(&bond->lock);
-	unblock_netpoll_tx();
 
 	/* must do this from outside any spinlocks */
 	bond_destroy_slave_symlinks(bond_dev, slave_dev);
@@ -2151,14 +2135,6 @@ int bond_release(struct net_device *bond
 
 	netdev_set_master(slave_dev, NULL);
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	read_lock_bh(&bond->lock);
-	if (slaves_support_netpoll(bond_dev))
-		bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
-	read_unlock_bh(&bond->lock);
-	rcu_assign_pointer(slave_dev->npinfo, NULL);
-#endif
-
 	/* close slave before restoring its mac address */
 	dev_close(slave_dev);
 
@@ -2392,11 +2363,9 @@ static int bond_ioctl_change_active(stru
 	    (old_active) &&
 	    (new_active->link == BOND_LINK_UP) &&
 	    IS_UP(new_active->dev)) {
-		block_netpoll_tx();
 		write_lock_bh(&bond->curr_slave_lock);
 		bond_change_active_slave(bond, new_active);
 		write_unlock_bh(&bond->curr_slave_lock);
-		unblock_netpoll_tx();
 	} else {
 		res = -EINVAL;
 	}
@@ -2652,11 +2621,9 @@ static void bond_miimon_commit(struct bo
 
 do_failover:
 		ASSERT_RTNL();
-		block_netpoll_tx();
 		write_lock_bh(&bond->curr_slave_lock);
 		bond_select_active_slave(bond);
 		write_unlock_bh(&bond->curr_slave_lock);
-		unblock_netpoll_tx();
 	}
 
 	bond_set_carrier(bond);
@@ -3061,13 +3028,11 @@ void bond_loadbalance_arp_mon(void *work
 	}
 
 	if (do_failover) {
-		block_netpoll_tx();
 		write_lock_bh(&bond->curr_slave_lock);
 
 		bond_select_active_slave(bond);
 
 		write_unlock_bh(&bond->curr_slave_lock);
-		unblock_netpoll_tx();
 	}
 
 re_arm:
@@ -3235,11 +3200,9 @@ static void bond_ab_arp_commit(struct bo
 
 do_failover:
 		ASSERT_RTNL();
-		block_netpoll_tx();
 		write_lock_bh(&bond->curr_slave_lock);
 		bond_select_active_slave(bond);
 		write_unlock_bh(&bond->curr_slave_lock);
-		unblock_netpoll_tx();
 	}
 
 	bond_set_carrier(bond);
@@ -4421,13 +4384,6 @@ static int bond_xmit_roundrobin(struct s
 	int i, slave_no, res = 1;
 	struct iphdr *iph = ip_hdr(skb);
 
-	/*
-	 * If we risk deadlock from transmitting this in the
-	 * netpoll path, tell netpoll to queue the frame for later tx
-	 */
-	if (is_netpoll_tx_blocked(bond_dev))
-		return NETDEV_TX_BUSY;
-	
 	if (TX_QUEUE_OVERRIDE(bond)) {
 		if (!bond_slave_override(bond, skb))
 			return NETDEV_TX_OK;
@@ -4499,13 +4451,6 @@ static int bond_xmit_activebackup(struct
 	struct bonding *bond = bond_dev->priv;
 	int res = 1;
 
-	/*
-	 * If we risk deadlock from transmitting this in the
-	 * netpoll path, tell netpoll to queue the frame for later tx
-	 */
-	if (is_netpoll_tx_blocked(bond_dev))
-		return NETDEV_TX_BUSY;
-
 	if (TX_QUEUE_OVERRIDE(bond)) {
 		if (!bond_slave_override(bond, skb))
 			return NETDEV_TX_OK;
@@ -4546,13 +4595,6 @@ static int bond_xmit_xor(struct sk_buff 
 	int i;
 	int res = 1;
 
-	/*
-	 * If we risk deadlock from transmitting this in the
-	 * netpoll path, tell netpoll to queue the frame for later tx
-	 */
-	if (is_netpoll_tx_blocked(bond_dev))
-		return NETDEV_TX_BUSY;
-
 	read_lock(&bond->lock);
 
 	if (!BOND_IS_OK(bond)) {
@@ -4599,13 +4541,6 @@ static int bond_xmit_broadcast(struct sk
 	int i;
 	int res = 1;
 
-	/*
-	 * If we risk deadlock from transmitting this in the
-	 * netpoll path, tell netpoll to queue the frame for later tx
-	 */
-	if (is_netpoll_tx_blocked(bond_dev))
-		return NETDEV_TX_BUSY;
-
 	read_lock(&bond->lock);
 
 	if (!BOND_IS_OK(bond)) {
@@ -4658,6 +4593,67 @@ out:
 	return 0;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+int bond_xmit_netpoll(struct netpoll *np, struct sk_buff **skbp)
+{
+	struct net_device *bond_dev = np->dev;
+	struct bonding *bond = netdev_priv(bond_dev);
+	struct slave *slave, *start_at;
+	int ab = (bond_dev->hard_start_xmit == bond_xmit_activebackup);
+	int i, ret = 0;
+	struct sk_buff *skb = *skbp;
+
+	while (!read_trylock(&bond->lock)) {
+		if (in_interrupt())
+			goto rel;
+	}
+	while (!read_trylock(&bond->curr_slave_lock)) {
+		if (in_interrupt())
+			goto out2;
+	}
+
+	if (!BOND_IS_OK(bond))
+		goto out;
+	if (!bond->curr_np_slave || ab)
+		bond->curr_np_slave = bond->curr_active_slave;
+	if (!bond->curr_np_slave && !ab)
+		bond->curr_np_slave = bond->first_slave;
+	if (!(start_at = bond->curr_np_slave))
+		goto out;
+	bond_for_each_slave_from(bond, slave, i, start_at) {
+		if (IS_UP(slave->dev) &&
+		   (slave->link == BOND_LINK_UP) &&
+		    (slave->state == BOND_STATE_ACTIVE)) {
+			unsigned short vlan_id;
+
+			if (!list_empty(&bond->vlan_list) &&
+	    		    !(slave->dev->features & NETIF_F_HW_VLAN_TX) &&
+		    	    vlan_get_tag(skb, &vlan_id) == 0) {
+				skb->dev = slave->dev;
+				skb = vlan_put_tag(skb, vlan_id);
+				if (!skb) {
+					break;
+				}
+			} else {
+				skb->dev = slave->dev;
+			}
+			skb->priority = 1;
+			ret = 1;
+			break;
+		}
+	}
+out:
+	read_unlock(&bond->curr_slave_lock);
+out2:
+	read_unlock(&bond->lock);
+rel:
+	if (!ret && skb)
+		__kfree_skb(skb);
+	*skbp = skb;
+	return ret;
+}
+#endif
+
 /*------------------------- Device initialization ---------------------------*/
 
 static void bond_set_xmit_hash_policy(struct bonding *bond)
@@ -4859,6 +4855,15 @@ static int bond_init(struct net_device *
 	spin_lock_init(&(bond_info->tx_hashtbl_lock));
 	spin_lock_init(&(bond_info->rx_hashtbl_lock));
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	{
+		bond->netpoll_setup = bond_netpoll_setup;
+		bond->netpoll_xmit_setup = bond_xmit_netpoll;
+		bond->netpoll_cleanup = bond_netpoll_cleanup;
+		bond->curr_np_slave = 0;
+	}
+#endif
+
 #ifdef CONFIG_PROC_FS
 	bond_create_proc_entry(bond);
 #endif
@@ -5448,12 +5453,6 @@ static void __exit bonding_exit(void)
 	rtnl_lock();
 	bond_free_all();
 	rtnl_unlock();
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	/*
-	 * Make sure we don't have an imbalance on our netpoll blocking
-	 */
-	WARN_ON(atomic_read(&netpoll_block_tx));
-#endif
 }
 
 module_init(bonding_init);
