diff -up linux-2.6.18.x86_64/mm/vmscan.c.orig linux-2.6.18.x86_64/mm/vmscan.c
--- linux-2.6.18.x86_64/mm/vmscan.c.orig	2008-05-15 14:24:08.767239000 -0700
+++ linux-2.6.18.x86_64/mm/vmscan.c	2008-05-15 14:35:03.003466000 -0700
@@ -658,6 +658,34 @@ static unsigned long shrink_inactive_lis
 
 		nr_scanned += nr_scan;
 		nr_freed = shrink_page_list(&page_list, sc);
+		
+		/*
+		 * If we are direct reclaiming for contiguous pages and we do
+		 * not reclaim everything in the list, try again and wait
+		 * for IO to complete. This will stall high-order allocations
+		 * but that should be acceptable to the caller
+		 */
+
+		if (nr_freed < nr_taken && !current_is_kswapd() && (atomic_read(&zone->reclaim_in_progress) > 1)) {
+			long nr_active_pages = 0;
+			struct page *pg;
+
+			blk_congestion_wait(WRITE, max(atomic_read(&zone->reclaim_in_progress),HZ/100));
+
+                        /*
+			 * The attempt at page out may have made some
+			 * of the pages active, mark them inactive
+			 */
+			list_for_each_entry(pg, &page_list, lru) {
+				if (PageActive(pg)) {
+					nr_active_pages++;
+					ClearPageActive(pg);
+				}
+			}
+			count_vm_events(PGDEACTIVATE, nr_active_pages);
+			nr_freed += shrink_page_list(&page_list, sc);
+			}
+
 		nr_reclaimed += nr_freed;
 		local_irq_disable();
 		if (current_is_kswapd()) {
