Index: linux-2.6.11/include/linux/gfp.h =================================================================== --- linux-2.6.11.orig/include/linux/gfp.h 2005-04-05 20:27:35.000000000 -0700 +++ linux-2.6.11/include/linux/gfp.h 2005-04-05 20:31:40.000000000 -0700 @@ -15,6 +15,10 @@ struct vm_area_struct; #define __GFP_DMA 0x01 #define __GFP_HIGHMEM 0x02 +/* Quicklists references. Next two bits */ +#define __GFP_COLD 0x04u /* Cache-cold page required */ +#define __GFP_ZERO 0x08u /* Return zeroed page on success */ + /* * Action modifiers - doesn't change the zoning * @@ -30,14 +34,12 @@ struct vm_area_struct; #define __GFP_HIGH 0x20u /* Should access emergency pools? */ #define __GFP_IO 0x40u /* Can start physical IO? */ #define __GFP_FS 0x80u /* Can call down to low-level FS? */ -#define __GFP_COLD 0x100u /* Cache-cold page required */ -#define __GFP_NOWARN 0x200u /* Suppress page allocation failure warning */ -#define __GFP_REPEAT 0x400u /* Retry the allocation. Might fail */ -#define __GFP_NOFAIL 0x800u /* Retry for ever. Cannot fail */ -#define __GFP_NORETRY 0x1000u /* Do not retry. Might fail */ -#define __GFP_NO_GROW 0x2000u /* Slab internal usage */ -#define __GFP_COMP 0x4000u /* Add compound page metadata */ -#define __GFP_ZERO 0x8000u /* Return zeroed page on success */ +#define __GFP_NOWARN 0x100u /* Suppress page allocation failure warning */ +#define __GFP_REPEAT 0x200u /* Retry the allocation. Might fail */ +#define __GFP_NOFAIL 0x400u /* Retry for ever. Cannot fail */ +#define __GFP_NORETRY 0x8000u /* Do not retry. Might fail */ +#define __GFP_NO_GROW 0x1000u /* Slab internal usage */ +#define __GFP_COMP 0x2000u /* Add compound page metadata */ #define __GFP_BITS_SHIFT 16 /* Room for 16 __GFP_FOO bits */ #define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1) Index: linux-2.6.11/mm/page_alloc.c =================================================================== --- linux-2.6.11.orig/mm/page_alloc.c 2005-04-05 20:27:35.000000000 -0700 +++ linux-2.6.11/mm/page_alloc.c 2005-04-05 21:01:45.000000000 -0700 @@ -565,6 +565,52 @@ static int rmqueue_bulk(struct zone *zon return allocated; } +/* + * quicklist accessor functions. These allow quick access to pages on the + * quicklists. The functions rely on zone information and the presence of + * a page in a quicklist to determine the eligibility for allocation. + */ + +/* Must be called with preemption off and interrupts disabled. + * returns a page if one was available and NULL if not + */ +inline struct page * quicklist_get(struct zone *z, unsigned int gfpmask) +{ + struct per_cpu_pages *p = QUICKLIST(z, gfpmask); + struct page * page = NULL; + unsigned long flags; + + local_irq_save(flags); + if (unlikely(p->count <= p->low)) + p->count += rmqueue_bulk(z, 0, p->batch, &p->list, !!(gfpmask & __GFP_ZERO)); + if (likely(p->count)) { + page = list_entry(p->list.next, struct page, lru); + list_del(&page->lru); + p->count--; + } + local_irq_restore(flags); + return page; +} + +/* + * Must be called with preemption off and interrupts disabled. Unconditionally + * puts the page on the quicklist and returns true if a batch of pages + * must be moved off the list + */ +inline void quicklist_push(struct zone *z, struct page *page, unsigned int gfpmask) +{ + struct per_cpu_pages *p; + unsigned long flags; + + local_irq_save(flags); + p = QUICKLIST(z, gfpmask); + list_add(&page->lru, &p->list); + p->count++; + if (p->count > p->high) + p->count -= free_pages_bulk(z, p->batch, &p->list, 0, !!(gfpmask & __GFP_ZERO)); + local_irq_restore(flags); +} + #if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU) static void __drain_pages(unsigned int cpu) { @@ -655,13 +701,9 @@ static void zone_statistics(struct zonel /* * Free a 0-order page */ -static void FASTCALL(free_hot_cold_page(struct page *page, int cold)); -static void fastcall free_hot_cold_page(struct page *page, int cold) +// static void FASTCALL(free_a_page(struct page *page, int cold)); +static void fastcall free_a_page(struct page *page, unsigned int __nocast gfp_flags) { - struct zone *zone = page_zone(page); - struct per_cpu_pages *pcp; - unsigned long flags; - arch_free_page(page, 0); kernel_map_pages(page, 1, 0); @@ -669,35 +711,13 @@ static void fastcall free_hot_cold_page( if (PageAnon(page)) page->mapping = NULL; free_pages_check(__FUNCTION__, page); - pcp = &zone->pageset[get_cpu()].pcp[cold]; - local_irq_save(flags); - if (pcp->count >= pcp->high) - pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0, cold >>1); - list_add(&page->lru, &pcp->list); - pcp->count++; - local_irq_restore(flags); - put_cpu(); -} - -void fastcall free_hot_page(struct page *page) -{ - free_hot_cold_page(page, 0); -} - -void fastcall free_cold_page(struct page *page) -{ - free_hot_cold_page(page, 1); -} - -void fastcall free_hot_zeroed_page(struct page *page) -{ - free_hot_cold_page(page, 2); + quicklist_push(page_zone(page), page, gfp_flags); } -void fastcall free_cold_zeroed_page(struct page *page) -{ - free_hot_cold_page(page, 3); -} +#define free_hot_page(page) free_a_page(page, 0) +#define free_cold_page(page) free_a_page(page, __GFP_COLD) +#define free_hot_zeroed_page(page) free_a_page(page, __GFP_ZERO) +#define free_cold_zeroed_page(page) free_a_page(page, __GFP_COLD | __GFP_ZERO) void prep_zero_page(struct page *page, unsigned int order, unsigned int __nocast gfp_flags) { @@ -723,26 +743,10 @@ buffered_rmqueue(struct zone *zone, int { unsigned long flags; struct page *page = NULL; - int nr_pages = 1 << order; - int zero = !!((gfp_flags & __GFP_ZERO) && zone->zero_pages >= nr_pages); - int cold = (zero << 1) + !!(gfp_flags & __GFP_COLD); - - if (order == 0) { - struct per_cpu_pages *pcp; - - pcp = &zone->pageset[get_cpu()].pcp[cold]; - local_irq_save(flags); - if (pcp->count <= pcp->low) - pcp->count += rmqueue_bulk(zone, 0, - pcp->batch, &pcp->list, zero); - if (pcp->count) { - page = list_entry(pcp->list.next, struct page, lru); - list_del(&page->lru); - pcp->count--; - } - local_irq_restore(flags); - put_cpu(); - } + int zero = !!(gfp_flags & __GFP_ZERO); + + if (order == 0) + page = quicklist_get(zone, gfp_flags); if (page == NULL) { spin_lock_irqsave(&zone->lock, flags); @@ -761,7 +765,7 @@ buffered_rmqueue(struct zone *zone, int if (page != NULL) { BUG_ON(bad_range(zone, page)); - mod_page_state_zone(zone, pgalloc, nr_pages); + mod_page_state_zone(zone, pgalloc, 1 << order); prep_new_page(page, order); if ((gfp_flags & __GFP_ZERO) && !zero) @@ -1027,7 +1031,7 @@ void __pagevec_free(struct pagevec *pvec int i = pagevec_count(pvec); while (--i >= 0) - free_hot_cold_page(pvec->pages[i], pvec->cold); + free_a_page(pvec->pages[i], pvec->cold <<2); } fastcall void __free_pages(struct page *page, unsigned int order) Index: linux-2.6.11/include/linux/mmzone.h =================================================================== --- linux-2.6.11.orig/include/linux/mmzone.h 2005-04-05 20:27:35.000000000 -0700 +++ linux-2.6.11/include/linux/mmzone.h 2005-04-05 20:50:56.000000000 -0700 @@ -216,6 +216,7 @@ struct zone { char *name; } ____cacheline_maxaligned_in_smp; +#define QUICKLIST(z, mask) (&(z)->pageset[smp_processor_id()].pcp[((mask) & 0x0c) >>2]) /* * The "priority" of VM scanning is how much of the queues we will scan in one