summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c49
1 files changed, 26 insertions, 23 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cb181e437..85eaca9e5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -108,17 +108,6 @@ static spinlock_t page_alloc_lock;
* but this had better return false if any reasonable "get_free_page()"
* allocation could currently fail..
*
- * Currently we approve of the following situations:
- * - the highest memory order has two entries
- * - the highest memory order has one free entry and:
- * - the next-highest memory order has two free entries
- * - the highest memory order has one free entry and:
- * - the next-highest memory order has one free entry
- * - the next-next-highest memory order has two free entries
- *
- * [previously, there had to be two entries of the highest memory
- * order, but this lead to problems on large-memory machines.]
- *
* This will return zero if no list was found, non-zero
* if there was memory (the bigger, the better).
*/
@@ -126,7 +115,18 @@ int free_memory_available(int nr)
{
int retval = 0;
unsigned long flags;
- struct free_area_struct * list = NULL;
+ struct free_area_struct * list;
+
+ /*
+ * If we have more than about 3% to 5% of all memory free,
+ * consider it to be good enough for anything.
+ * It may not be, due to fragmentation, but we
+ * don't want to keep on forever trying to find
+ * free unfragmented memory.
+ * Added low/high water marks to avoid thrashing -- Rik.
+ */
+ if (nr_free_pages > (num_physpages >> 5) + (nr ? 0 : num_physpages >> 6))
+ return nr+1;
list = free_area + NR_MEM_LISTS;
spin_lock_irqsave(&page_alloc_lock, flags);
@@ -263,10 +263,8 @@ unsigned long __get_free_pages(int gfp_mask, unsigned long order)
* "maxorder" is the highest order number that we're allowed
* to empty in order to find a free page..
*/
- maxorder = order + NR_MEM_LISTS/3;
- if (gfp_mask & __GFP_MED)
- maxorder += NR_MEM_LISTS/3;
- if ((gfp_mask & __GFP_HIGH) || maxorder > NR_MEM_LISTS)
+ maxorder = NR_MEM_LISTS-1;
+ if (gfp_mask & __GFP_HIGH)
maxorder = NR_MEM_LISTS;
if (in_interrupt() && (gfp_mask & __GFP_WAIT)) {
@@ -278,13 +276,18 @@ unsigned long __get_free_pages(int gfp_mask, unsigned long order)
}
}
-repeat:
- spin_lock_irqsave(&page_alloc_lock, flags);
- RMQUEUE(order, maxorder, (gfp_mask & GFP_DMA));
- spin_unlock_irqrestore(&page_alloc_lock, flags);
- if ((gfp_mask & __GFP_WAIT) && try_to_free_pages(gfp_mask,SWAP_CLUSTER_MAX))
- goto repeat;
-
+ for (;;) {
+ spin_lock_irqsave(&page_alloc_lock, flags);
+ RMQUEUE(order, maxorder, (gfp_mask & GFP_DMA));
+ spin_unlock_irqrestore(&page_alloc_lock, flags);
+ if (!(gfp_mask & __GFP_WAIT))
+ break;
+ shrink_dcache();
+ if (!try_to_free_pages(gfp_mask, SWAP_CLUSTER_MAX))
+ break;
+ gfp_mask &= ~__GFP_WAIT; /* go through this only once */
+ maxorder = NR_MEM_LISTS; /* Allow anything this time */
+ }
nopage:
return 0;
}