summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-03-02 02:36:47 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-03-02 02:36:47 +0000
commit8624512aa908741ba2795200133eae0d7f4557ea (patch)
treed5d3036fccf2604f4c98dedc11e8adb929d6b52e /mm
parent7b8f5d6f1d45d9f9de1d26e7d3c32aa5af11b488 (diff)
Merge with 2.3.48.
Diffstat (limited to 'mm')
-rw-r--r--mm/bootmem.c2
-rw-r--r--mm/filemap.c34
-rw-r--r--mm/memory.c30
-rw-r--r--mm/page_alloc.c26
-rw-r--r--mm/vmscan.c21
5 files changed, 57 insertions, 56 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 43ade5c96..7a6d9db09 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -82,7 +82,7 @@ static void __init reserve_bootmem_core(bootmem_data_t *bdata, unsigned long add
BUG();
for (i = sidx; i < eidx; i++)
if (test_and_set_bit(i, bdata->node_bootmem_map))
- BUG();
+ printk("hm, page %08lx reserved twice.\n", i*PAGE_SIZE);
}
static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, unsigned long size)
diff --git a/mm/filemap.c b/mm/filemap.c
index 749e14250..6756c70a0 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1321,8 +1321,7 @@ struct page * filemap_nopage(struct vm_area_struct * area,
* of the file is an error and results in a SIGBUS, while a
* private mapping just maps in a zero page.
*/
- if ((pgoff >= size) &&
- (area->vm_flags & VM_SHARED) && (area->vm_mm == current->mm))
+ if ((pgoff >= size) && (area->vm_mm == current->mm))
return NULL;
/*
@@ -1431,33 +1430,6 @@ page_not_uptodate:
return NULL;
}
-/*
- * Tries to write a shared mapped page to its backing store. May return -EIO
- * if the disk is full.
- */
-static inline int do_write_page(struct inode * inode, struct file * file,
- struct page * page, unsigned long index)
-{
- int retval;
- int (*writepage) (struct dentry *, struct page *);
-
- /* refuse to extend file size.. */
- if (S_ISREG(inode->i_mode)) {
- unsigned long size_idx = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-
- /* Ho humm.. We should have tested for this earlier */
- if (size_idx <= index)
- return -EIO;
- }
- writepage = inode->i_mapping->a_ops->writepage;
- lock_page(page);
-
- retval = writepage(file->f_dentry, page);
-
- UnlockPage(page);
- return retval;
-}
-
static int filemap_write_page(struct file *file,
unsigned long index,
struct page * page,
@@ -1476,7 +1448,9 @@ static int filemap_write_page(struct file *file,
* vma/file is guaranteed to exist in the unmap/sync cases because
* mmap_sem is held.
*/
- result = do_write_page(inode, file, page, index);
+ lock_page(page);
+ result = inode->i_mapping->a_ops->writepage(dentry, page);
+ UnlockPage(page);
return result;
}
diff --git a/mm/memory.c b/mm/memory.c
index b4bf6ed36..aab598aed 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -418,7 +418,7 @@ static struct page * follow_page(unsigned long address)
struct page * get_page_map(struct page *page, unsigned long vaddr)
{
- if (MAP_NR(page) >= max_mapnr)
+ if (MAP_NR(vaddr) >= max_mapnr)
return 0;
if (page == ZERO_PAGE(vaddr))
return 0;
@@ -712,6 +712,19 @@ int remap_page_range(unsigned long from, unsigned long phys_addr, unsigned long
}
/*
+ * Establish a new mapping:
+ * - flush the old one
+ * - update the page tables
+ * - inform the TLB about the new one
+ */
+static inline void establish_pte(struct vm_area_struct * vma, unsigned long address, pte_t *page_table, pte_t entry)
+{
+ flush_tlb_page(vma, address);
+ set_pte(page_table, entry);
+ update_mmu_cache(vma, address, entry);
+}
+
+/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
* and decrementing the shared-page counter for the old page.
@@ -769,8 +782,7 @@ static int do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma,
/* FallThrough */
case 1:
flush_cache_page(vma, address);
- set_pte(page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));
- flush_tlb_page(vma, address);
+ establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));
spin_unlock(&tsk->mm->page_table_lock);
return 1;
}
@@ -793,8 +805,7 @@ static int do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma,
copy_cow_page(old_page, new_page, address);
flush_page_to_ram(new_page);
flush_cache_page(vma, address);
- set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
- flush_tlb_page(vma, address);
+ establish_pte(vma, address, page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
/* Free the old page.. */
new_page = old_page;
@@ -862,6 +873,8 @@ void vmtruncate(struct inode * inode, loff_t offset)
struct vm_area_struct * mpnt;
struct address_space *mapping = inode->i_mapping;
+ if (inode->i_size < offset)
+ goto out;
inode->i_size = offset;
truncate_inode_pages(mapping, offset);
spin_lock(&mapping->i_shared_lock);
@@ -906,6 +919,9 @@ void vmtruncate(struct inode * inode, loff_t offset)
} while ((mpnt = mpnt->vm_next_share) != NULL);
out_unlock:
spin_unlock(&mapping->i_shared_lock);
+out:
+ /* this should go into ->truncate */
+ inode->i_size = offset;
if (inode->i_op && inode->i_op->truncate)
inode->i_op->truncate(inode);
}
@@ -1120,9 +1136,7 @@ static inline int handle_pte_fault(struct task_struct *tsk,
entry = pte_mkdirty(entry);
}
entry = pte_mkyoung(entry);
- set_pte(pte, entry);
- flush_tlb_page(vma, address);
- update_mmu_cache(vma, address, entry);
+ establish_pte(vma, address, pte, entry);
}
spin_unlock(&tsk->mm->page_table_lock);
return 1;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index efdbb98f1..1b61ebd17 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -27,6 +27,7 @@
int nr_swap_pages = 0;
int nr_lru_pages;
LIST_HEAD(lru_cache);
+pg_data_t *pgdat_list = (pg_data_t *)0;
static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
static int zone_balance_ratio[MAX_NR_ZONES] = { 128, 128, 128, };
@@ -264,24 +265,23 @@ struct page * __alloc_pages (zonelist_t *zonelist, unsigned long order)
{
if (z->low_on_memory)
z->low_on_memory = 0;
+ z->zone_wake_kswapd = 0;
}
else
{
extern wait_queue_head_t kswapd_wait;
- if (z->low_on_memory)
- goto balance;
-
- if (free <= z->pages_low)
- {
+ if (free <= z->pages_low) {
+ z->zone_wake_kswapd = 1;
wake_up_interruptible(&kswapd_wait);
+ } else
+ z->zone_wake_kswapd = 0;
- if (free <= z->pages_min)
- {
- z->low_on_memory = 1;
- goto balance;
- }
- }
+ if (free <= z->pages_min)
+ z->low_on_memory = 1;
+
+ if (z->low_on_memory)
+ goto balance;
}
}
/*
@@ -482,6 +482,9 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
unsigned long totalpages, offset;
unsigned int cumulative = 0;
+ pgdat->node_next = pgdat_list;
+ pgdat_list = pgdat;
+
totalpages = 0;
for (i = 0; i < MAX_NR_ZONES; i++) {
unsigned long size = zones_size[i];
@@ -560,6 +563,7 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
zone->pages_low = mask*2;
zone->pages_high = mask*3;
zone->low_on_memory = 0;
+ zone->zone_wake_kswapd = 0;
zone->zone_mem_map = mem_map + offset;
zone->zone_start_mapnr = offset;
zone->zone_start_paddr = zone_start_paddr;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 02cf78030..fa687b7e0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -8,6 +8,7 @@
* Removed kswapd_ctl limits, and swap out as many pages as needed
* to bring the system back to freepages.high: 2.4.97, Rik van Riel.
* Version: $Id: vmscan.c,v 1.5 1998/02/23 22:14:28 sct Exp $
+ * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
*/
#include <linux/slab.h>
@@ -468,7 +469,10 @@ DECLARE_WAIT_QUEUE_HEAD(kswapd_wait);
*/
int kswapd(void *unused)
{
+ int i;
struct task_struct *tsk = current;
+ pg_data_t *pgdat;
+ zone_t *zone;
tsk->session = 1;
tsk->pgrp = 1;
@@ -496,12 +500,17 @@ int kswapd(void *unused)
* up on a more timely basis.
*/
do {
- /* kswapd is critical to provide GFP_ATOMIC
- allocations (not GFP_HIGHMEM ones). */
- if (nr_free_pages() - nr_free_highpages() >= freepages.high)
- break;
- if (!do_try_to_free_pages(GFP_KSWAPD, 0))
- break;
+ pgdat = pgdat_list;
+ while (pgdat) {
+ for (i = 0; i < MAX_NR_ZONES; i++) {
+ zone = pgdat->node_zones + i;
+ if ((!zone->size) ||
+ (!zone->zone_wake_kswapd))
+ continue;
+ do_try_to_free_pages(GFP_KSWAPD, zone);
+ }
+ pgdat = pgdat->node_next;
+ }
run_task_queue(&tq_disk);
} while (!tsk->need_resched);
tsk->state = TASK_INTERRUPTIBLE;