Index: linux-2.6.11/include/linux/sched.h =================================================================== --- linux-2.6.11.orig/include/linux/sched.h 2005-04-07 17:33:03.000000000 -0700 +++ linux-2.6.11/include/linux/sched.h 2005-04-07 17:36:51.000000000 -0700 @@ -737,6 +737,8 @@ struct task_struct { nodemask_t mems_allowed; int cpuset_mems_generation; #endif + struct zone *lastzone; + struct vm_area_struct *lastvma; }; static inline pid_t process_group(struct task_struct *tsk) Index: linux-2.6.11/mm/memory.c =================================================================== --- linux-2.6.11.orig/mm/memory.c 2005-04-05 20:26:33.000000000 -0700 +++ linux-2.6.11/mm/memory.c 2005-04-07 17:43:58.000000000 -0700 @@ -1675,24 +1675,28 @@ do_anonymous_page(struct mm_struct *mm, /* ..except if it's a write access */ if (write_access) { - /* Allocate our own private page. */ - pte_unmap(page_table); - spin_unlock(&mm->page_table_lock); - - if (unlikely(anon_vma_prepare(vma))) - goto no_mem; - page = alloc_zeroed_user_highpage(vma, addr); - if (!page) - goto no_mem; - - spin_lock(&mm->page_table_lock); - page_table = pte_offset_map(pmd, addr); - - if (!pte_none(*page_table)) { + if (current->lastvma != vma || !(page = quicklist_get(current->lastzone, __GFP_ZERO))) { + /* Allocate our own private page. */ pte_unmap(page_table); - page_cache_release(page); spin_unlock(&mm->page_table_lock); - goto out; + + if (unlikely(anon_vma_prepare(vma))) + goto no_mem; + page = alloc_zeroed_user_highpage(vma, addr); + if (!page) + goto no_mem; + + spin_lock(&mm->page_table_lock); + page_table = pte_offset_map(pmd, addr); + + if (!pte_none(*page_table)) { + pte_unmap(page_table); + page_cache_release(page); + spin_unlock(&mm->page_table_lock); + goto out; + } + current->lastvma = vma; + current->lastzone = page_zone(page); } inc_mm_counter(mm, rss); entry = maybe_mkwrite(pte_mkdirty(mk_pte(page,