summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-10-05 01:18:40 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-10-05 01:18:40 +0000
commit012bb3e61e5eced6c610f9e036372bf0c8def2d1 (patch)
tree87efc733f9b164e8c85c0336f92c8fb7eff6d183 /mm/memory.c
parent625a1589d3d6464b5d90b8a0918789e3afffd220 (diff)
Merge with Linux 2.4.0-test9. Please check DECstation, I had a number
of rejects to fixup while integrating Linus patches. I also found that this kernel will only boot SMP on Origin; the UP kernel freeze soon after bootup with SCSI timeout messages. I commit this anyway since I found that the last CVS versions had the same problem.
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c71
1 files changed, 39 insertions, 32 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 83fc97cb3..6b047821d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -67,7 +67,7 @@ static inline void copy_cow_page(struct page * from, struct page * to, unsigned
copy_user_highpage(to, from, address);
}
-mem_map_t * mem_map = NULL;
+mem_map_t * mem_map;
/*
* Note: this doesn't free the actual pages themselves. That
@@ -924,33 +924,9 @@ static void partial_clear(struct vm_area_struct *vma, unsigned long address)
memclear_highpage_flush(page, offset, PAGE_SIZE - offset);
}
-/*
- * Handle all mappings that got truncated by a "truncate()"
- * system call.
- *
- * NOTE! We have to be ready to update the memory sharing
- * between the file and the memory map for a potential last
- * incomplete page. Ugly, but necessary.
- */
-void vmtruncate(struct inode * inode, loff_t offset)
+static void vmtruncate_list(struct vm_area_struct *mpnt,
+ unsigned long pgoff, unsigned long partial)
{
- unsigned long partial, pgoff;
- struct vm_area_struct * mpnt;
- struct address_space *mapping = inode->i_mapping;
- unsigned long limit;
-
- if (inode->i_size < offset)
- goto do_expand;
- inode->i_size = offset;
- truncate_inode_pages(mapping, offset);
- spin_lock(&mapping->i_shared_lock);
- if (!mapping->i_mmap)
- goto out_unlock;
-
- pgoff = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- partial = (unsigned long)offset & (PAGE_CACHE_SIZE - 1);
-
- mpnt = mapping->i_mmap;
do {
struct mm_struct *mm = mpnt->vm_mm;
unsigned long start = mpnt->vm_start;
@@ -983,6 +959,39 @@ void vmtruncate(struct inode * inode, loff_t offset)
zap_page_range(mm, start, len);
flush_tlb_range(mm, start, end);
} while ((mpnt = mpnt->vm_next_share) != NULL);
+}
+
+
+/*
+ * Handle all mappings that got truncated by a "truncate()"
+ * system call.
+ *
+ * NOTE! We have to be ready to update the memory sharing
+ * between the file and the memory map for a potential last
+ * incomplete page. Ugly, but necessary.
+ */
+void vmtruncate(struct inode * inode, loff_t offset)
+{
+ unsigned long partial, pgoff;
+ struct address_space *mapping = inode->i_mapping;
+ unsigned long limit;
+
+ if (inode->i_size < offset)
+ goto do_expand;
+ inode->i_size = offset;
+ truncate_inode_pages(mapping, offset);
+ spin_lock(&mapping->i_shared_lock);
+ if (!mapping->i_mmap && !mapping->i_mmap_shared)
+ goto out_unlock;
+
+ pgoff = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ partial = (unsigned long)offset & (PAGE_CACHE_SIZE - 1);
+
+ if (mapping->i_mmap != NULL)
+ vmtruncate_list(mapping->i_mmap, pgoff, partial);
+ if (mapping->i_mmap_shared != NULL)
+ vmtruncate_list(mapping->i_mmap_shared, pgoff, partial);
+
out_unlock:
spin_unlock(&mapping->i_shared_lock);
/* this should go into ->truncate */
@@ -1031,7 +1040,8 @@ void swapin_readahead(swp_entry_t entry)
num = valid_swaphandles(entry, &offset);
for (i = 0; i < num; offset++, i++) {
/* Don't block on I/O for read-ahead */
- if (atomic_read(&nr_async_pages) >= pager_daemon.swap_cluster) {
+ if (atomic_read(&nr_async_pages) >= pager_daemon.swap_cluster
+ * (1 << page_cluster)) {
while (i++ < num)
swap_free(SWP_ENTRY(SWP_TYPE(entry), offset++));
break;
@@ -1095,15 +1105,12 @@ static int do_swap_page(struct mm_struct * mm,
*/
static int do_anonymous_page(struct mm_struct * mm, struct vm_area_struct * vma, pte_t *page_table, int write_access, unsigned long addr)
{
- int high = 0;
struct page *page = NULL;
pte_t entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
if (write_access) {
page = alloc_page(GFP_HIGHUSER);
if (!page)
return -1;
- if (PageHighMem(page))
- high = 1;
clear_user_highpage(page, addr);
entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
mm->rss++;
@@ -1233,7 +1240,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
pgd = pgd_offset(mm, address);
pmd = pmd_alloc(pgd, address);
-
+
if (pmd) {
pte_t * pte = pte_alloc(pmd, address);
if (pte)