summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-03-27 23:54:12 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-03-27 23:54:12 +0000
commitd3e71cb08747743fce908122bab08b479eb403a5 (patch)
treecbec6948fdbdee9af81cf3ecfb504070d2745d7b /mm
parentfe7ff1706e323d0e5ed83972960a1ecc1ee538b3 (diff)
Merge with Linux 2.3.99-pre3.
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/highmem.c13
-rw-r--r--mm/memory.c30
-rw-r--r--mm/mmap.c10
-rw-r--r--mm/page_alloc.c38
-rw-r--r--mm/vmalloc.c5
-rw-r--r--mm/vmscan.c4
7 files changed, 66 insertions, 36 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 1b9f9ed6a..bccdc9bd2 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2465,7 +2465,7 @@ generic_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos)
PAGE_BUG(page);
}
- status = mapping->a_ops->prepare_write(page, offset, offset+bytes);
+ status = mapping->a_ops->prepare_write(file, page, offset, offset+bytes);
if (status)
goto unlock;
kaddr = (char*)page_address(page);
diff --git a/mm/highmem.c b/mm/highmem.c
index 8b713b2ef..691e3df1f 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -234,11 +234,20 @@ static inline void copy_from_high_bh (struct buffer_head *to,
{
struct page *p_from;
unsigned long vfrom;
+ unsigned long flags;
p_from = from->b_page;
+
+ /*
+ * Since this can be executed from IRQ context, reentrance
+ * on the same CPU must be avoided:
+ */
+ __save_flags(flags);
+ __cli();
vfrom = kmap_atomic(p_from, KM_BOUNCE_WRITE);
memcpy(to->b_data, (char *)vfrom + bh_offset(from), to->b_size);
kunmap_atomic(vfrom, KM_BOUNCE_WRITE);
+ __restore_flags(flags);
}
static inline void copy_to_high_bh_irq (struct buffer_head *to,
@@ -246,11 +255,15 @@ static inline void copy_to_high_bh_irq (struct buffer_head *to,
{
struct page *p_to;
unsigned long vto;
+ unsigned long flags;
p_to = to->b_page;
+ __save_flags(flags);
+ __cli();
vto = kmap_atomic(p_to, KM_BOUNCE_READ);
memcpy((char *)vto + bh_offset(to), from->b_data, to->b_size);
kunmap_atomic(vto, KM_BOUNCE_READ);
+ __restore_flags(flags);
}
static inline void bounce_end_io (struct buffer_head *bh, int uptodate)
diff --git a/mm/memory.c b/mm/memory.c
index 9d1a131ff..1bb7433c0 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -428,6 +428,7 @@ int map_user_kiobuf(int rw, struct kiobuf *iobuf, unsigned long va, size_t len)
struct vm_area_struct * vma = 0;
struct page * map;
int i;
+ int datain = (rw == READ);
/* Make sure the iobuf is not already mapped somewhere. */
if (iobuf->nr_pages)
@@ -459,8 +460,19 @@ int map_user_kiobuf(int rw, struct kiobuf *iobuf, unsigned long va, size_t len)
vma = find_vma(current->mm, ptr);
if (!vma)
goto out_unlock;
+ if (vma->vm_start > ptr) {
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto out_unlock;
+ if (expand_stack(vma, ptr))
+ goto out_unlock;
+ }
+ if (((datain) && (!(vma->vm_flags & VM_WRITE))) ||
+ (!(vma->vm_flags & VM_READ))) {
+ err = -EACCES;
+ goto out_unlock;
+ }
}
- if (handle_mm_fault(current, vma, ptr, (rw==READ)) <= 0)
+ if (handle_mm_fault(current, vma, ptr, datain) <= 0)
goto out_unlock;
spin_lock(&mm->page_table_lock);
map = follow_page(ptr);
@@ -774,6 +786,15 @@ static inline void establish_pte(struct vm_area_struct * vma, unsigned long addr
update_mmu_cache(vma, address, entry);
}
+static inline void break_cow(struct vm_area_struct * vma, struct page * old_page, struct page * new_page, unsigned long address,
+ pte_t *page_table)
+{
+ copy_cow_page(old_page,new_page,address);
+ flush_page_to_ram(new_page);
+ flush_cache_page(vma, address);
+ establish_pte(vma, address, page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
+}
+
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
@@ -852,10 +873,7 @@ static int do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma,
if (pte_val(*page_table) == pte_val(pte)) {
if (PageReserved(old_page))
++vma->vm_mm->rss;
- copy_cow_page(old_page, new_page, address);
- flush_page_to_ram(new_page);
- flush_cache_page(vma, address);
- establish_pte(vma, address, page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
+ break_cow(vma, old_page, new_page, address, page_table);
/* Free the old page.. */
new_page = old_page;
@@ -903,7 +921,7 @@ static void partial_clear(struct vm_area_struct *vma, unsigned long address)
return;
flush_cache_page(vma, address);
page = pte_page(pte);
- if (page-mem_map >= max_mapnr)
+ if ((page-mem_map >= max_mapnr) || PageReserved(page))
return;
offset = address & ~PAGE_MASK;
memclear_highpage_flush(page, offset, PAGE_SIZE - offset);
diff --git a/mm/mmap.c b/mm/mmap.c
index 7bc2cf910..604624168 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -200,11 +200,11 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned lon
if (file != NULL) {
switch (flags & MAP_TYPE) {
case MAP_SHARED:
- if ((prot & PROT_WRITE) && !(file->f_mode & 2))
+ if ((prot & PROT_WRITE) && !(file->f_mode & FMODE_WRITE))
return -EACCES;
/* Make sure we don't allow writing to an append-only file.. */
- if (IS_APPEND(file->f_dentry->d_inode) && (file->f_mode & 2))
+ if (IS_APPEND(file->f_dentry->d_inode) && (file->f_mode & FMODE_WRITE))
return -EACCES;
/* make sure there are no mandatory locks on the file. */
@@ -213,7 +213,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned lon
/* fall through */
case MAP_PRIVATE:
- if (!(file->f_mode & 1))
+ if (!(file->f_mode & FMODE_READ))
return -EACCES;
break;
@@ -252,7 +252,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned lon
VM_ClearReadHint(vma);
vma->vm_raend = 0;
- if (file->f_mode & 1)
+ if (file->f_mode & FMODE_READ)
vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
if (flags & MAP_SHARED) {
vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
@@ -266,7 +266,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned lon
* We leave the VM_MAYSHARE bit on, just to get correct output
* from /proc/xxx/maps..
*/
- if (!(file->f_mode & 2))
+ if (!(file->f_mode & FMODE_WRITE))
vma->vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
}
} else
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1205ab835..a780c6a74 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -266,11 +266,19 @@ static int zone_balance_memory(zonelist_t *zonelist)
/*
* This is the 'heart' of the zoned buddy allocator:
*/
-struct page * __alloc_pages (zonelist_t *zonelist, unsigned long order)
+struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order)
{
zone_t **zone = zonelist->zones;
/*
+ * If this is a recursive call, we'd better
+ * do our best to just allocate things without
+ * further thought.
+ */
+ if (current->flags & PF_MEMALLOC)
+ goto allocate_ok;
+
+ /*
* (If anyone calls gfp from interrupts nonatomically then it
* will sooner or later tripped up by a schedule().)
*
@@ -283,32 +291,22 @@ struct page * __alloc_pages (zonelist_t *zonelist, unsigned long order)
break;
if (!z->size)
BUG();
- /*
- * If this is a recursive call, we'd better
- * do our best to just allocate things without
- * further thought.
- */
- if (!(current->flags & PF_MEMALLOC)) {
- /* Are we low on memory? */
- if (z->free_pages <= z->pages_low)
- continue;
- }
- /*
- * This is an optimization for the 'higher order zone
- * is empty' case - it can happen even in well-behaved
- * systems, think the page-cache filling up all RAM.
- * We skip over empty zones. (this is not exact because
- * we do not take the spinlock and it's not exact for
- * the higher order case, but will do it for most things.)
- */
- if (z->free_pages) {
+
+ /* Are we supposed to free memory? Don't make it worse.. */
+ if (!z->zone_wake_kswapd && z->free_pages > z->pages_low) {
struct page *page = rmqueue(z, order);
if (page)
return page;
}
}
+
+ /*
+ * Ok, no obvious zones were available, start
+ * balancing things a bit..
+ */
if (zone_balance_memory(zonelist)) {
zone = zonelist->zones;
+allocate_ok:
for (;;) {
zone_t *z = *(zone++);
if (!z)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 96cad2679..99510c53b 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -39,7 +39,8 @@ static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned lo
continue;
if (pte_present(page)) {
unsigned long map_nr = pte_pagenr(page);
- if (map_nr < max_mapnr)
+ if ((map_nr < max_mapnr) &&
+ (!PageReserved(mem_map + map_nr)))
__free_page(mem_map + map_nr);
continue;
}
@@ -206,7 +207,7 @@ void * vmalloc_prot(unsigned long size, pgprot_t prot)
struct vm_struct *area;
size = PAGE_ALIGN(size);
- if (!size || (size >> PAGE_SHIFT) > max_mapnr) {
+ if (!size || (size >> PAGE_SHIFT) > num_physpages) {
BUG();
return NULL;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d3dfb8db6..f00e9c535 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -45,7 +45,7 @@ static int try_to_swap_out(struct vm_area_struct* vma, unsigned long address, pt
if (!pte_present(pte))
goto out_failed;
page = pte_page(pte);
- if (page-mem_map >= max_mapnr)
+ if ((page-mem_map >= max_mapnr) || PageReserved(page))
goto out_failed;
/* Don't look at this pte if it's been accessed recently. */
@@ -59,7 +59,7 @@ static int try_to_swap_out(struct vm_area_struct* vma, unsigned long address, pt
goto out_failed;
}
- if (PageReserved(page) || PageLocked(page))
+ if (PageLocked(page))
goto out_failed;
/*