summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1999-07-05 23:09:37 +0000
committerRalf Baechle <ralf@linux-mips.org>1999-07-05 23:09:37 +0000
commitaba344fdfed81b2c03d6114c54cfd73a486aa10b (patch)
treed032d8430bf1234c3ecc6f6330d6de6e887e5963 /mm
parent40c138bfc6d37dbff5339f84575db1e3cec6e34e (diff)
Merge with Linux 2.3.9.
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c52
-rw-r--r--mm/mlock.c6
-rw-r--r--mm/mmap.c9
-rw-r--r--mm/mprotect.c6
-rw-r--r--mm/mremap.c2
-rw-r--r--mm/page_io.c16
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/swapfile.c1
8 files changed, 58 insertions, 36 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 8936d35d1..ed5b6d34c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -20,6 +20,7 @@
#include <linux/file.h>
#include <linux/swapctl.h>
#include <linux/slab.h>
+#include <linux/init.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
@@ -35,7 +36,8 @@
*/
atomic_t page_cache_size = ATOMIC_INIT(0);
-struct page * page_hash_table[PAGE_HASH_SIZE];
+unsigned int page_hash_bits;
+struct page **page_hash_table;
spinlock_t pagecache_lock = SPIN_LOCK_UNLOCKED;
@@ -273,8 +275,8 @@ int shrink_mmap(int priority, int gfp_mask)
continue;
}
if (!page_count(page)) {
-// BUG();
spin_unlock(&pagecache_lock);
+ BUG();
continue;
}
get_page(page);
@@ -292,13 +294,18 @@ int shrink_mmap(int priority, int gfp_mask)
/* Is it a buffer page? */
if (page->buffers) {
+ int mem = page->inode ? 0 : PAGE_CACHE_SIZE;
spin_unlock(&pagecache_lock);
- if (try_to_free_buffers(page))
- goto made_progress;
+ if (!try_to_free_buffers(page))
+ goto unlock_continue;
+ atomic_sub(mem, &buffermem);
spin_lock(&pagecache_lock);
}
- /* We can't free pages unless there's just one user */
+ /*
+ * We can't free pages unless there's just one user
+ * (count == 2 because we added one ourselves above).
+ */
if (page_count(page) != 2)
goto spin_unlock_continue;
@@ -354,6 +361,7 @@ inside:
if (page->offset == offset)
break;
}
+ set_bit(PG_referenced, &page->flags);
not_found:
return page;
}
@@ -1138,7 +1146,6 @@ ssize_t generic_file_read(struct file * filp, char * buf, size_t count, loff_t *
{
ssize_t retval;
- unlock_kernel();
retval = -EFAULT;
if (access_ok(VERIFY_WRITE, buf, count)) {
retval = 0;
@@ -1156,7 +1163,6 @@ ssize_t generic_file_read(struct file * filp, char * buf, size_t count, loff_t *
retval = desc.error;
}
}
- lock_kernel();
return retval;
}
@@ -1481,7 +1487,7 @@ static int filemap_write_page(struct vm_area_struct * vma,
* If a task terminates while we're swapping the page, the vma and
* and file could be released ... increment the count to be safe.
*/
- file->f_count++;
+ atomic_inc(&file->f_count);
result = do_write_page(inode, file, (const char *) page, offset);
fput(file);
return result;
@@ -1829,8 +1835,6 @@ generic_file_write(struct file *file, const char *buf,
count = limit - pos;
}
- unlock_kernel();
-
while (count) {
unsigned long bytes, pgpos, offset;
/*
@@ -1892,7 +1896,6 @@ repeat_find:
page_cache_free(page_cache);
err = written ? written : status;
- lock_kernel();
out:
return err;
}
@@ -1914,3 +1917,30 @@ void put_cached_page(unsigned long addr)
page_count(page));
page_cache_release(page);
}
+
+void __init page_cache_init(unsigned long memory_size)
+{
+ unsigned long htable_size, order;
+
+ htable_size = memory_size >> PAGE_SHIFT;
+ htable_size *= sizeof(struct page *);
+ for(order = 0; (PAGE_SIZE << order) < htable_size; order++)
+ ;
+
+ do {
+ unsigned long tmp = (PAGE_SIZE << order) / sizeof(struct page *);
+
+ page_hash_bits = 0;
+ while((tmp >>= 1UL) != 0UL)
+ page_hash_bits++;
+
+ page_hash_table = (struct page **)
+ __get_free_pages(GFP_ATOMIC, order);
+ } while(page_hash_table == NULL && --order > 0);
+
+ printk("Page-cache hash table entries: %d (order: %ld, %ld bytes)\n",
+ (1 << page_hash_bits), order, (PAGE_SIZE << order));
+ if (!page_hash_table)
+ panic("Failed to allocate page hash table\n");
+ memset(page_hash_table, 0, PAGE_HASH_SIZE * sizeof(struct page *));
+}
diff --git a/mm/mlock.c b/mm/mlock.c
index 4a938c958..7947031af 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -31,7 +31,7 @@ static inline int mlock_fixup_start(struct vm_area_struct * vma,
vma->vm_offset += vma->vm_start - n->vm_start;
n->vm_flags = newflags;
if (n->vm_file)
- n->vm_file->f_count++;
+ atomic_inc(&n->vm_file->f_count);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
insert_vm_struct(current->mm, n);
@@ -52,7 +52,7 @@ static inline int mlock_fixup_end(struct vm_area_struct * vma,
n->vm_offset += n->vm_start - vma->vm_start;
n->vm_flags = newflags;
if (n->vm_file)
- n->vm_file->f_count++;
+ atomic_inc(&n->vm_file->f_count);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
insert_vm_struct(current->mm, n);
@@ -82,7 +82,7 @@ static inline int mlock_fixup_middle(struct vm_area_struct * vma,
right->vm_offset += right->vm_start - left->vm_start;
vma->vm_flags = newflags;
if (vma->vm_file)
- vma->vm_file->f_count += 2;
+ atomic_add(2, &vma->vm_file->f_count);
if (vma->vm_ops && vma->vm_ops->open) {
vma->vm_ops->open(left);
diff --git a/mm/mmap.c b/mm/mmap.c
index e179a2932..c9d07a291 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -62,7 +62,7 @@ int vm_enough_memory(long pages)
if (sysctl_overcommit_memory)
return 1;
- free = buffermem >> PAGE_SHIFT;
+ free = atomic_read(&buffermem) >> PAGE_SHIFT;
free += atomic_read(&page_cache_size);
free += nr_free_pages;
free += nr_swap_pages;
@@ -313,7 +313,7 @@ unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
if (error)
goto unmap_and_free_vma;
vma->vm_file = file;
- file->f_count++;
+ atomic_inc(&file->f_count);
}
/*
@@ -547,7 +547,7 @@ static struct vm_area_struct * unmap_fixup(struct vm_area_struct *area,
mpnt->vm_file = area->vm_file;
mpnt->vm_pte = area->vm_pte;
if (mpnt->vm_file)
- mpnt->vm_file->f_count++;
+ atomic_inc(&mpnt->vm_file->f_count);
if (mpnt->vm_ops && mpnt->vm_ops->open)
mpnt->vm_ops->open(mpnt);
area->vm_end = addr; /* Truncate area */
@@ -786,8 +786,11 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
*/
flags = vma->vm_flags;
addr = vma->vm_start;
+
+ lock_kernel(); /* kswapd, ugh */
insert_vm_struct(mm, vma);
merge_segments(mm, vma->vm_start, vma->vm_end);
+ unlock_kernel();
mm->total_vm += len >> PAGE_SHIFT;
if (flags & VM_LOCKED) {
diff --git a/mm/mprotect.c b/mm/mprotect.c
index b28237c09..14073c0fa 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -103,7 +103,7 @@ static inline int mprotect_fixup_start(struct vm_area_struct * vma,
n->vm_flags = newflags;
n->vm_page_prot = prot;
if (n->vm_file)
- n->vm_file->f_count++;
+ atomic_inc(&n->vm_file->f_count);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
insert_vm_struct(current->mm, n);
@@ -126,7 +126,7 @@ static inline int mprotect_fixup_end(struct vm_area_struct * vma,
n->vm_flags = newflags;
n->vm_page_prot = prot;
if (n->vm_file)
- n->vm_file->f_count++;
+ atomic_inc(&n->vm_file->f_count);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
insert_vm_struct(current->mm, n);
@@ -158,7 +158,7 @@ static inline int mprotect_fixup_middle(struct vm_area_struct * vma,
vma->vm_flags = newflags;
vma->vm_page_prot = prot;
if (vma->vm_file)
- vma->vm_file->f_count += 2;
+ atomic_add(2,&vma->vm_file->f_count);
if (vma->vm_ops && vma->vm_ops->open) {
vma->vm_ops->open(left);
vma->vm_ops->open(right);
diff --git a/mm/mremap.c b/mm/mremap.c
index b50e00dec..48d3e9f94 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -136,7 +136,7 @@ static inline unsigned long move_vma(struct vm_area_struct * vma,
new_vma->vm_offset = vma->vm_offset + (addr - vma->vm_start);
lock_kernel();
if (new_vma->vm_file)
- new_vma->vm_file->f_count++;
+ atomic_inc(&new_vma->vm_file->f_count);
if (new_vma->vm_ops && new_vma->vm_ops->open)
new_vma->vm_ops->open(new_vma);
insert_vm_struct(current->mm, new_vma);
diff --git a/mm/page_io.c b/mm/page_io.c
index 75b7195fb..0f7e6d199 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -99,7 +99,7 @@ static void rw_swap_page_base(int rw, unsigned long entry, struct page *page, in
} else if (p->swap_file) {
struct inode *swapf = p->swap_file->d_inode;
int i;
- if (swapf->i_op->bmap == NULL
+ if (swapf->i_op->get_block == NULL
&& swapf->i_op->smap != NULL){
/*
With MS-DOS, we use msdos_smap which returns
@@ -110,7 +110,7 @@ static void rw_swap_page_base(int rw, unsigned long entry, struct page *page, in
It sounds like ll_rw_swap_file defined
its operation size (sector size) based on
PAGE_SIZE and the number of blocks to read.
- So using bmap or smap should work even if
+ So using get_block or smap should work even if
smap will require more blocks.
*/
int j;
@@ -147,8 +147,7 @@ static void rw_swap_page_base(int rw, unsigned long entry, struct page *page, in
atomic_inc(&nr_async_pages);
}
if (dolock) {
- /* only lock/unlock swap cache pages! */
- set_bit(PG_swap_unlock_after, &page->flags);
+ set_bit(PG_free_swap_after, &page->flags);
p->swap_map[offset]++;
}
set_bit(PG_free_after, &page->flags);
@@ -177,15 +176,6 @@ static void rw_swap_page_base(int rw, unsigned long entry, struct page *page, in
}
/*
- * This is run when asynchronous page I/O has completed.
- * It decrements the swap bitmap counter
- */
-void swap_after_unlock_page(unsigned long entry)
-{
- swap_free(entry);
-}
-
-/*
* A simple wrapper so the base function doesn't need to enforce
* that all swap pages go through the swap cache! We verify that:
* - the page is locked
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 8ee2699f0..2aa17d3a4 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -39,7 +39,7 @@ static struct inode_operations swapper_inode_operations = {
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
- NULL, /* bmap */
+ NULL, /* get_block */
NULL, /* readpage */
NULL, /* writepage */
block_flushpage, /* flushpage */
diff --git a/mm/swapfile.c b/mm/swapfile.c
index a4a523ef2..ce18f34f5 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -5,7 +5,6 @@
* Swap reorganised 29.12.95, Stephen Tweedie
*/
-#include <linux/config.h>
#include <linux/malloc.h>
#include <linux/smp_lock.h>
#include <linux/kernel_stat.h>