summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1997-07-20 14:56:40 +0000
committerRalf Baechle <ralf@linux-mips.org>1997-07-20 14:56:40 +0000
commite308faf24f68e262d92d294a01ddca7a17e76762 (patch)
tree22c47cb315811834861f013067878ff664e95abd /mm
parent30c6397ce63178fcb3e7963ac247f0a03132aca9 (diff)
Sync with Linux 2.1.46.
Diffstat (limited to 'mm')
-rw-r--r--mm/Makefile2
-rw-r--r--mm/filemap.c26
-rw-r--r--mm/mlock.c11
-rw-r--r--mm/mmap.c63
-rw-r--r--mm/mprotect.c10
-rw-r--r--mm/mremap.c3
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/simp.c434
-rw-r--r--mm/slab.c72
-rw-r--r--mm/swapfile.c78
-rw-r--r--mm/vmalloc.c8
-rw-r--r--mm/vmscan.c19
12 files changed, 579 insertions, 149 deletions
diff --git a/mm/Makefile b/mm/Makefile
index c64eefbd2..ef3820d1c 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -9,7 +9,7 @@
O_TARGET := mm.o
O_OBJS := memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o \
- vmalloc.o slab.o \
+ vmalloc.o slab.o simp.o\
swap.o vmscan.o page_io.o page_alloc.o swap_state.o swapfile.o
include $(TOPDIR)/Rules.make
diff --git a/mm/filemap.c b/mm/filemap.c
index 56aa1b486..8915c1096 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -753,10 +753,7 @@ page_read_error:
filp->f_reada = 1;
if (page_cache)
free_page(page_cache);
- if (DO_UPDATE_ATIME(inode)) {
- inode->i_atime = CURRENT_TIME;
- inode->i_dirt = 1;
- }
+ UPDATE_ATIME(inode)
if (!read)
read = error;
return read;
@@ -777,7 +774,7 @@ static unsigned long filemap_nopage(struct vm_area_struct * area, unsigned long
duplicate flushes. ... */
unsigned long offset;
struct page * page, **hash;
- struct inode * inode = area->vm_inode;
+ struct inode * inode = area->vm_dentry->d_inode;
unsigned long old_page, new_page;
new_page = 0;
@@ -921,7 +918,6 @@ static inline int do_write_page(struct inode * inode, struct file * file,
retval = -EIO;
if (size == file->f_op->write(inode, file, (const char *) page, size))
retval = 0;
- /* inode->i_status |= ST_MODIFIED is willingly *not* done here */
set_fs(old_fs);
return retval;
}
@@ -932,6 +928,7 @@ static int filemap_write_page(struct vm_area_struct * vma,
{
int result;
struct file file;
+ struct dentry * dentry;
struct inode * inode;
struct buffer_head * bh;
@@ -946,14 +943,15 @@ static int filemap_write_page(struct vm_area_struct * vma,
return 0;
}
- inode = vma->vm_inode;
+ dentry = vma->vm_dentry;
+ inode = dentry->d_inode;
file.f_op = inode->i_op->default_file_ops;
if (!file.f_op->write)
return -EIO;
file.f_mode = 3;
file.f_flags = 0;
file.f_count = 1;
- file.f_inode = inode;
+ file.f_dentry = dentry;
file.f_pos = offset;
file.f_reada = 0;
@@ -1191,12 +1189,8 @@ int generic_file_mmap(struct inode * inode, struct file * file, struct vm_area_s
return -EACCES;
if (!inode->i_op || !inode->i_op->readpage)
return -ENOEXEC;
- if (DO_UPDATE_ATIME(inode)) {
- inode->i_atime = CURRENT_TIME;
- inode->i_dirt = 1;
- }
- vma->vm_inode = inode;
- atomic_inc(&inode->i_count);
+ UPDATE_ATIME(inode);
+ vma->vm_dentry = dget(file->f_dentry);
vma->vm_ops = ops;
return 0;
}
@@ -1209,7 +1203,7 @@ int generic_file_mmap(struct inode * inode, struct file * file, struct vm_area_s
static int msync_interval(struct vm_area_struct * vma,
unsigned long start, unsigned long end, int flags)
{
- if (!vma->vm_inode)
+ if (!vma->vm_dentry)
return 0;
if (vma->vm_ops->sync) {
int error;
@@ -1217,7 +1211,7 @@ static int msync_interval(struct vm_area_struct * vma,
if (error)
return error;
if (flags & MS_SYNC)
- return file_fsync(vma->vm_inode, NULL);
+ return file_fsync(vma->vm_dentry->d_inode, NULL);
return 0;
}
return 0;
diff --git a/mm/mlock.c b/mm/mlock.c
index 5a69e4b55..eea100add 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -38,8 +38,7 @@ static inline int mlock_fixup_start(struct vm_area_struct * vma,
n->vm_end = end;
vma->vm_offset += vma->vm_start - n->vm_start;
n->vm_flags = newflags;
- if (n->vm_inode)
- atomic_inc(&n->vm_inode->i_count);
+ n->vm_dentry = dget(vma->vm_dentry);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
insert_vm_struct(current->mm, n);
@@ -59,8 +58,7 @@ static inline int mlock_fixup_end(struct vm_area_struct * vma,
n->vm_start = start;
n->vm_offset += n->vm_start - vma->vm_start;
n->vm_flags = newflags;
- if (n->vm_inode)
- atomic_inc(&n->vm_inode->i_count);
+ n->vm_dentry = dget(vma->vm_dentry);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
insert_vm_struct(current->mm, n);
@@ -89,8 +87,9 @@ static inline int mlock_fixup_middle(struct vm_area_struct * vma,
vma->vm_offset += vma->vm_start - left->vm_start;
right->vm_offset += right->vm_start - left->vm_start;
vma->vm_flags = newflags;
- if (vma->vm_inode)
- atomic_add(2, &vma->vm_inode->i_count);
+ if (vma->vm_dentry)
+ vma->vm_dentry->d_count += 2;
+
if (vma->vm_ops && vma->vm_ops->open) {
vma->vm_ops->open(left);
vma->vm_ops->open(right);
diff --git a/mm/mmap.c b/mm/mmap.c
index af8cd0a4a..be225e83b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -74,11 +74,11 @@ int vm_enough_memory(long pages)
/* Remove one vm structure from the inode's i_mmap ring. */
static inline void remove_shared_vm_struct(struct vm_area_struct *vma)
{
- struct inode * inode = vma->vm_inode;
+ struct dentry * dentry = vma->vm_dentry;
- if (inode) {
+ if (dentry) {
if (vma->vm_flags & VM_DENYWRITE)
- inode->i_writecount++;
+ dentry->d_inode->i_writecount++;
if(vma->vm_next_share)
vma->vm_next_share->vm_pprev_share = vma->vm_pprev_share;
*vma->vm_pprev_share = vma->vm_next_share;
@@ -194,7 +194,7 @@ unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
return -EACCES;
/* make sure there are no mandatory locks on the file. */
- if (locks_verify_locked(file->f_inode))
+ if (locks_verify_locked(file->f_dentry->d_inode))
return -EAGAIN;
/* fall through */
case MAP_PRIVATE:
@@ -259,7 +259,7 @@ unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
vma->vm_ops = NULL;
vma->vm_offset = off;
- vma->vm_inode = NULL;
+ vma->vm_dentry = NULL;
vma->vm_pte = 0;
do_munmap(addr, len); /* Clear old maps */
@@ -283,7 +283,7 @@ unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
if (file) {
int error = 0;
if (vma->vm_flags & VM_DENYWRITE) {
- if (file->f_inode->i_writecount > 0)
+ if (file->f_dentry->d_inode->i_writecount > 0)
error = -ETXTBSY;
else {
/* f_op->mmap might possibly sleep
@@ -291,16 +291,16 @@ unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
* might). In any case, this takes care of any
* race that this might cause.
*/
- file->f_inode->i_writecount--;
+ file->f_dentry->d_inode->i_writecount--;
correct_wcount = 1;
}
}
if (!error)
- error = file->f_op->mmap(file->f_inode, file, vma);
+ error = file->f_op->mmap(file->f_dentry->d_inode, file, vma);
if (error) {
if (correct_wcount)
- file->f_inode->i_writecount++;
+ file->f_dentry->d_inode->i_writecount++;
kmem_cache_free(vm_area_cachep, vma);
return error;
}
@@ -309,8 +309,10 @@ unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
flags = vma->vm_flags;
insert_vm_struct(mm, vma);
if (correct_wcount)
- file->f_inode->i_writecount++;
+ file->f_dentry->d_inode->i_writecount++;
merge_segments(mm, vma->vm_start, vma->vm_end);
+
+ addr = vma->vm_start;
/* merge_segments might have merged our vma, so we can't use it any more */
mm->total_vm += len >> PAGE_SHIFT;
@@ -387,8 +389,8 @@ static void unmap_fixup(struct vm_area_struct *area,
if (addr == area->vm_start && end == area->vm_end) {
if (area->vm_ops && area->vm_ops->close)
area->vm_ops->close(area);
- if (area->vm_inode)
- iput(area->vm_inode);
+ if (area->vm_dentry)
+ dput(area->vm_dentry);
return;
}
@@ -405,11 +407,14 @@ static void unmap_fixup(struct vm_area_struct *area,
if (!mpnt)
return;
- *mpnt = *area;
- mpnt->vm_offset += (end - area->vm_start);
+ mpnt->vm_mm = area->vm_mm;
mpnt->vm_start = end;
- if (mpnt->vm_inode)
- atomic_inc(&mpnt->vm_inode->i_count);
+ mpnt->vm_end = area->vm_end;
+ mpnt->vm_page_prot = area->vm_page_prot;
+ mpnt->vm_flags = area->vm_flags;
+ mpnt->vm_ops = area->vm_ops;
+ mpnt->vm_offset += (end - area->vm_start);
+ mpnt->vm_dentry = dget(area->vm_dentry);
if (mpnt->vm_ops && mpnt->vm_ops->open)
mpnt->vm_ops->open(mpnt);
area->vm_end = addr; /* Truncate area */
@@ -542,8 +547,8 @@ void exit_mmap(struct mm_struct * mm)
}
remove_shared_vm_struct(mpnt);
zap_page_range(mm, start, size);
- if (mpnt->vm_inode)
- iput(mpnt->vm_inode);
+ if (mpnt->vm_dentry)
+ dput(mpnt->vm_dentry);
kmem_cache_free(vm_area_cachep, mpnt);
mpnt = next;
}
@@ -555,7 +560,7 @@ void exit_mmap(struct mm_struct * mm)
void insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vmp)
{
struct vm_area_struct **pprev = &mm->mmap;
- struct inode * inode;
+ struct dentry * dentry;
/* Find where to link it in. */
while(*pprev && (*pprev)->vm_start <= vmp->vm_start)
@@ -567,8 +572,9 @@ void insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vmp)
*pprev = vmp;
vmp->vm_pprev = pprev;
- inode = vmp->vm_inode;
- if (inode) {
+ dentry = vmp->vm_dentry;
+ if (dentry) {
+ struct inode * inode = dentry->d_inode;
if (vmp->vm_flags & VM_DENYWRITE)
inode->i_writecount--;
@@ -615,16 +621,19 @@ void merge_segments (struct mm_struct * mm, unsigned long start_addr, unsigned l
for ( ; mpnt && prev->vm_start < end_addr ; prev = mpnt, mpnt = next) {
next = mpnt->vm_next;
- /* To share, we must have the same inode, operations.. */
- if ((mpnt->vm_inode != prev->vm_inode) ||
+ /* To share, we must have the same dentry, operations.. */
+ if ((mpnt->vm_dentry != prev->vm_dentry)||
(mpnt->vm_pte != prev->vm_pte) ||
(mpnt->vm_ops != prev->vm_ops) ||
(mpnt->vm_flags != prev->vm_flags) ||
(prev->vm_end != mpnt->vm_start))
continue;
- /* and if we have an inode, the offsets must be contiguous.. */
- if ((mpnt->vm_inode != NULL) || (mpnt->vm_flags & VM_SHM)) {
+ /*
+ * If we have a dentry or it's a shared memory area
+ * the offsets must be contiguous..
+ */
+ if ((mpnt->vm_dentry != NULL) || (mpnt->vm_flags & VM_SHM)) {
unsigned long off = prev->vm_offset+prev->vm_end-prev->vm_start;
if (off != mpnt->vm_offset)
continue;
@@ -645,8 +654,8 @@ void merge_segments (struct mm_struct * mm, unsigned long start_addr, unsigned l
mpnt->vm_ops->close(mpnt);
}
remove_shared_vm_struct(mpnt);
- if (mpnt->vm_inode)
- atomic_dec(&mpnt->vm_inode->i_count);
+ if (mpnt->vm_dentry)
+ dput(mpnt->vm_dentry);
kmem_cache_free(vm_area_cachep, mpnt);
mpnt = prev;
}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 2e46ca142..ddf4f4ed6 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -110,8 +110,7 @@ static inline int mprotect_fixup_start(struct vm_area_struct * vma,
vma->vm_offset += vma->vm_start - n->vm_start;
n->vm_flags = newflags;
n->vm_page_prot = prot;
- if (n->vm_inode)
- atomic_inc(&n->vm_inode->i_count);
+ n->vm_dentry = dget(n->vm_dentry);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
insert_vm_struct(current->mm, n);
@@ -133,8 +132,7 @@ static inline int mprotect_fixup_end(struct vm_area_struct * vma,
n->vm_offset += n->vm_start - vma->vm_start;
n->vm_flags = newflags;
n->vm_page_prot = prot;
- if (n->vm_inode)
- atomic_inc(&n->vm_inode->i_count);
+ n->vm_dentry = dget(n->vm_dentry);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
insert_vm_struct(current->mm, n);
@@ -165,8 +163,8 @@ static inline int mprotect_fixup_middle(struct vm_area_struct * vma,
right->vm_offset += right->vm_start - left->vm_start;
vma->vm_flags = newflags;
vma->vm_page_prot = prot;
- if (vma->vm_inode)
- atomic_add(2, &vma->vm_inode->i_count);
+ if (vma->vm_dentry)
+ vma->vm_dentry->d_count += 2;
if (vma->vm_ops && vma->vm_ops->open) {
vma->vm_ops->open(left);
vma->vm_ops->open(right);
diff --git a/mm/mremap.c b/mm/mremap.c
index a52db58de..aaabde322 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -140,8 +140,7 @@ static inline unsigned long move_vma(struct vm_area_struct * vma,
new_vma->vm_start = new_addr;
new_vma->vm_end = new_addr+new_len;
new_vma->vm_offset = vma->vm_offset + (addr - vma->vm_start);
- if (new_vma->vm_inode)
- atomic_inc(&new_vma->vm_inode->i_count);
+ new_vma->vm_dentry = dget(vma->vm_dentry);
if (new_vma->vm_ops && new_vma->vm_ops->open)
new_vma->vm_ops->open(new_vma);
insert_vm_struct(current->mm, new_vma);
diff --git a/mm/page_io.c b/mm/page_io.c
index 30d0c882e..5ebea3f09 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -98,7 +98,7 @@ void rw_swap_page(int rw, unsigned long entry, char * buf, int wait)
return;
wait_on_page(page);
} else if (p->swap_file) {
- struct inode *swapf = p->swap_file;
+ struct inode *swapf = p->swap_file->d_inode;
unsigned int zones[PAGE_SIZE/512];
int i;
if (swapf->i_op->bmap == NULL
diff --git a/mm/simp.c b/mm/simp.c
new file mode 100644
index 000000000..7959d6a0e
--- /dev/null
+++ b/mm/simp.c
@@ -0,0 +1,434 @@
+#define NULL 0
+/*
+ * mm/simp.c -- simple allocator for cached objects
+ *
+ * (C) 1997 Thomas Schoebel-Theuer
+ */
+
+#include <linux/simp.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <asm/spinlock.h>
+
+/* The next two defines can be independently enabled for debugging */
+/*#define DEBUG*/
+/*#define DEAD_BEEF*/
+
+#ifdef DEAD_BEEF
+#define DEBUG_BEEF 1
+#else
+#define DEBUG_BEEF 0
+#endif
+
+#ifdef __SMP__
+#define NR_PROCESSORS NR_CPUS
+#define GLOBAL_SIZE CHUNK_SIZE
+#else
+#define NR_PROCESSORS 1
+#define GLOBAL_SIZE PAGE_SIZE
+#endif
+
+#define POSTBUFFER_SIZE 63
+#define ORDER 2
+#define CHUNK_SIZE (PAGE_SIZE*(1<<ORDER))
+#define CHUNK_BASE(ptr) (struct header*)(((unsigned long)(ptr)) & ~(CHUNK_SIZE-1))
+#define CHUNK_END(hdr) (void**)((char*)(hdr) + CHUNK_SIZE)
+
+#define COLOR_INCREMENT (8*sizeof(void*)) /* should be 1 cache line */
+#define ALIGN_CACHE(adr) ((((((unsigned long)adr) - 1) / COLOR_INCREMENT) + 1) * COLOR_INCREMENT)
+#define HEADER_SIZE ALIGN_CACHE(sizeof(struct header))
+#define ELEM_SIZE ALIGN_CACHE(sizeof(struct elem))
+#define FILL_TYPE(name,wrongsize) char name[ALIGN_CACHE(wrongsize)-(wrongsize)]
+
+#define MAX_SIMPS ((GLOBAL_SIZE / sizeof(struct simp)) - 1)
+
+struct header { /* this is at the beginning of each memory region */
+ /* 1st cache line */
+ void ** index;
+ void ** fresh;
+ struct simp * father;
+ void ** emptypos;
+ struct header * next;
+ structor again_ctor;
+ structor first_ctor;
+ void * fill[1];
+#ifdef DEBUG
+ /* 2nd cache line */
+ char magic[32];
+#endif
+};
+
+struct per_processor {
+ void ** buffer_pos;
+ void * postbuffer[POSTBUFFER_SIZE];
+};
+
+struct simp {
+ /* 1st cache lines */
+ struct per_processor private[NR_PROCESSORS];
+ /* next cache line */
+ struct header * usable_list;
+ spinlock_t lock;
+ char fill[sizeof(void*) - sizeof(spinlock_t)];
+ long real_size;
+ long max_elems;
+ structor again_ctor;
+ structor first_ctor;
+ structor dtor;
+ long fill2;
+ /* next cache line */
+ long create_offset;
+ long color;
+ long max_color;
+ long size;
+ long fill3[4];
+ /* next cache line */
+ char name[32];
+};
+
+struct global_data {
+ /* 1st cache line */
+ long changed_flag;
+ long nr_simps;
+ spinlock_t lock;
+ char fill[(6+8)*sizeof(void*)+sizeof(void*)-sizeof(spinlock_t)];
+ /* rest */
+ struct simp simps[MAX_SIMPS];
+};
+
+static struct global_data * global = NULL;
+
+#ifdef DEBUG
+static char global_magic[32] = "SIMP header SdC581oi9rY20051962\n";
+#endif
+
+struct simp * simp_create(char * name, long size,
+ structor first_ctor,
+ structor again_ctor,
+ structor dtor)
+{
+ struct simp * simp;
+ long fraction;
+ long real_size;
+ int cpu;
+
+ if(!global) {
+#ifdef __SMP__
+ global = (struct global_data*)__get_free_pages(GFP_KERNEL, ORDER, 0);
+ memset(global, 0, CHUNK_SIZE);
+#else
+ global = (struct global_data*)get_free_page(GFP_KERNEL);
+#endif
+ spin_lock_init(&global->lock);
+ }
+
+ spin_lock(&global->lock);
+ simp = &global->simps[global->nr_simps++];
+ spin_unlock(&global->lock);
+
+ if(global->nr_simps >= MAX_SIMPS) {
+ printk("SIMP: too many simps allocated\n");
+ return NULL;
+ }
+ memset(simp, 0, sizeof(struct simp));
+ spin_lock_init(&simp->lock);
+ strncpy(simp->name, name, 15);
+ simp->size = size;
+ simp->real_size = real_size = ALIGN_CACHE(size);
+ /* allow aggregation of very small objects in 2-power fractions of
+ * cachelines */
+ fraction = COLOR_INCREMENT / 2;
+ while(size <= fraction && fraction >= sizeof(void*)) {
+ simp->real_size = fraction;
+ fraction >>= 1;
+ }
+ simp->first_ctor = first_ctor;
+ simp->again_ctor = again_ctor;
+ simp->dtor = dtor;
+
+ real_size += sizeof(void*);
+ simp->max_elems = (CHUNK_SIZE - HEADER_SIZE) / real_size;
+ simp->max_color = (CHUNK_SIZE - HEADER_SIZE) % real_size;
+ for(cpu = 0; cpu < NR_PROCESSORS; cpu++) {
+ struct per_processor * private = &simp->private[cpu];
+ private->buffer_pos = private->postbuffer;
+ }
+ return simp;
+}
+
+/* Do *not* inline this, it clobbers too many registers... */
+static void alloc_header(struct simp * simp)
+{
+ struct header * hdr;
+ char * ptr;
+ void ** index;
+ long count;
+
+ spin_unlock(&simp->lock);
+ for(;;) {
+ hdr = (struct header*)__get_free_pages(GFP_KERNEL, ORDER, 0);
+ if(hdr)
+ break;
+ if(!simp_garbage())
+ return;
+ }
+#ifdef DEBUG
+ if(CHUNK_BASE(hdr) != hdr)
+ panic("simp: bad kernel page alignment");
+#endif
+
+ memset(hdr, 0, HEADER_SIZE);
+#ifdef DEBUG
+ memcpy(hdr->magic, global_magic, sizeof(global_magic));
+#endif
+ hdr->father = simp;
+ hdr->again_ctor = simp->again_ctor;
+ hdr->first_ctor = simp->first_ctor;
+
+ /* note: races on simp->color don't produce any error :-) */
+ ptr = ((char*)hdr) + HEADER_SIZE + simp->color;
+ index = CHUNK_END(hdr);
+ for(count = 0; count < simp->max_elems; count++) {
+ *--index = ptr;
+ ptr += simp->real_size;
+ /* note: constructors are not called here in bunch but
+ * instead at each single simp_alloc(), in order
+ * to maximize chances that the cache will be
+ * polluted after a simp_alloc() anyway,
+ * and not here. */
+ }
+ hdr->index = hdr->fresh = hdr->emptypos = index;
+
+ spin_lock(&simp->lock);
+ simp->color += COLOR_INCREMENT;
+ if(simp->color >= simp->max_color)
+ simp->color = 0;
+ hdr->next = simp->usable_list;
+ simp->usable_list = hdr;
+}
+
+
+/* current x86 memcpy() is horribly moving around registers for nothing,
+ * is doing unnecessary work if the size is dividable by a power-of-two,
+ * and it clobbers way too many registers.
+ * This results in nearly any other register being transfered to stack.
+ * Fixing this would be a major win for the whole kernel!
+ */
+static void ** bunch_alloc(struct simp * simp, void ** buffer)
+{
+ struct header * hdr;
+ void ** index;
+ void ** to;
+ void ** end;
+ structor todo;
+ long length;
+
+ spin_lock(&simp->lock);
+ hdr = simp->usable_list;
+ if(!hdr) {
+ alloc_header(simp);
+ hdr = simp->usable_list;
+ if(!hdr) {
+ spin_unlock(&simp->lock);
+ *buffer = NULL;
+ return buffer+1;
+ }
+ }
+
+ index = hdr->index;
+ end = hdr->fresh;
+ todo = hdr->again_ctor;
+ if(index == end) {
+ end = CHUNK_END(hdr);
+ todo = hdr->first_ctor;
+ }
+ to = index + POSTBUFFER_SIZE/2;
+ if(to >= end) {
+ to = end;
+ if(to == CHUNK_END(hdr)) {
+ simp->usable_list = hdr->next;
+ hdr->next = NULL;
+ }
+ }
+ if(to > hdr->fresh)
+ hdr->fresh = to;
+ hdr->index = to;
+ length = ((unsigned long)to) - (unsigned long)index;
+ to = buffer + (length/sizeof(void**));
+
+ memcpy(buffer, index, length);
+
+ spin_unlock(&simp->lock);
+
+ if(todo) {
+ do {
+ todo(*buffer++);
+ } while(buffer < to);
+ }
+ return to;
+}
+
+void * simp_alloc(struct simp * simp)
+{
+#ifdef __SMP__
+ const long cpu = smp_processor_id();
+ struct per_processor * priv = &simp->private[cpu];
+#else
+#define priv (&simp->private[0]) /*fool gcc to use no extra register*/
+#endif
+ void ** buffer_pos = priv->buffer_pos;
+ void * res;
+
+ if(buffer_pos == priv->postbuffer) {
+ buffer_pos = bunch_alloc(simp, buffer_pos);
+ }
+ buffer_pos--;
+ res = *buffer_pos;
+ priv->buffer_pos = buffer_pos;
+ return res;
+}
+
+#ifdef DEBUG
+long check_header(struct header * hdr, void * ptr)
+{
+ void ** test;
+
+ if(!hdr) {
+ printk("SIMP: simp_free() with NULL pointer\n");
+ return 1;
+ }
+ if(strncmp(hdr->magic, global_magic, 32)) {
+ printk("SIMP: simpe_free() with bad ptr %p, or header corruption\n", ptr);
+ return 1;
+ }
+ /* This is brute force, but I don't want to pay for any
+ * overhead if debugging is not enabled, in particular
+ * no space overhead for keeping hashtables etc. */
+ test = hdr->index;
+ while(test < CHUNK_END(hdr)) {
+ if(*test++ == ptr) {
+ printk("SIMP: trying to simp_free(%p) again\n", ptr);
+ return 1;
+ }
+ }
+ return 0;
+}
+#endif
+
+static void ** bunch_free(struct simp * simp, void ** buffer)
+{
+ void ** stop;
+
+ stop = buffer - POSTBUFFER_SIZE/3;
+
+ spin_lock(&simp->lock);
+ while(buffer > stop) {
+ void * elem = buffer[-1];
+ struct header * hdr = CHUNK_BASE(elem);
+ void ** index = hdr->index;
+ index--;
+ hdr->index = index;
+ *index = elem;
+ if(!hdr->next) {
+ hdr->next = simp->usable_list;
+ simp->usable_list = hdr;
+ }
+
+ buffer -= 2;
+ elem = *buffer;
+ hdr = CHUNK_BASE(elem);
+ index = hdr->index;
+ index--;
+ hdr->index = index;
+ *index = elem;
+ if(!hdr->next) {
+ hdr->next = simp->usable_list;
+ simp->usable_list = hdr;
+ }
+ }
+ spin_unlock(&simp->lock);
+ global->changed_flag = 1;
+ return buffer;
+}
+
+void simp_free(void * objp)
+{
+ struct header * hdr;
+ void ** buffer_pos;
+ struct per_processor * private;
+#ifdef __SMP__
+ const long cpu = smp_processor_id();
+#else
+ const long cpu = 0;
+#endif
+
+ hdr = CHUNK_BASE(objp);
+#ifdef DEBUG
+ if(check_header(hdr, objp))
+ return;
+#endif
+
+ private = &hdr->father->private[cpu];
+ buffer_pos = private->buffer_pos;
+ if(buffer_pos >= private->postbuffer+POSTBUFFER_SIZE) {
+ buffer_pos = bunch_free(hdr->father, buffer_pos);
+ }
+
+ *buffer_pos++ = objp;
+ private->buffer_pos = buffer_pos;
+
+#ifdef DEAD_BEEF
+ {
+ unsigned int * ptr = (unsigned int*)objp;
+ int count = (hdr->father->real_size - ELEM_SIZE) / sizeof(unsigned int);
+ while(count--)
+ *ptr++ = 0xdeadbeef;
+ }
+#endif
+}
+
+long simp_garbage(void)
+{
+ int i;
+ int res;
+
+ if(!global->changed_flag)
+ return 0; /* shortcut */
+ /* Note: costs do not matter here. Any heavy thrashing of
+ * simp chunks that could be caused by pools stealing each
+ * other's memory has to be considered a BUG :-)
+ * Simply avoid memory shortages by conservative allocating
+ * policies.
+ */
+ global->changed_flag = 0;
+ res = 0;
+ for(i = 0; i < global->nr_simps; i++) {
+ struct simp * simp = &global->simps[i];
+ struct header ** base = &simp->usable_list;
+ struct header * del;
+
+ spin_lock(&simp->lock);
+ del = *base;
+ while(del) {
+ if(del->index == del->emptypos) {
+ if(simp->dtor) {
+ void ** ptr = del->index;
+ while(ptr < CHUNK_END(del)) {
+ simp->dtor(*ptr++);
+ }
+ }
+ *base = del->next;
+#ifdef DEBUG
+ memset(del, 0, CHUNK_SIZE);
+#endif
+ free_pages((unsigned long)del, ORDER);
+ res++;
+ } else
+ base = &del->next;
+ del = *base;
+ }
+ spin_unlock(&simp->lock);
+ }
+ return res;
+}
+
diff --git a/mm/slab.c b/mm/slab.c
index 6277739d4..f4793d271 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -123,7 +123,7 @@
* 0 if you wish to reduce memory usage.
*
* SLAB_DEBUG_SUPPORT - 1 for kmem_cache_create() to honour; SLAB_DEBUG_FREE,
- * SLAB_DEBUG_INITIAL, SLAB_RED_ZONE & SLAB_POISION.
+ * SLAB_DEBUG_INITIAL, SLAB_RED_ZONE & SLAB_POISON.
* 0 for faster, smaller, code (espically in the critical paths).
*
* SLAB_STATS - 1 to collect stats for /proc/slabinfo.
@@ -143,11 +143,11 @@
#if SLAB_DEBUG_SUPPORT
#if 0
#define SLAB_C_MASK (SLAB_DEBUG_FREE|SLAB_DEBUG_INITIAL|SLAB_RED_ZONE| \
- SLAB_POISION|SLAB_HWCACHE_ALIGN|SLAB_NO_REAP| \
+ SLAB_POISON|SLAB_HWCACHE_ALIGN|SLAB_NO_REAP| \
SLAB_HIGH_PACK)
#endif
#define SLAB_C_MASK (SLAB_DEBUG_FREE|SLAB_DEBUG_INITIAL|SLAB_RED_ZONE| \
- SLAB_POISION|SLAB_HWCACHE_ALIGN|SLAB_NO_REAP)
+ SLAB_POISON|SLAB_HWCACHE_ALIGN|SLAB_NO_REAP)
#else
#if 0
#define SLAB_C_MASK (SLAB_HWCACHE_ALIGN|SLAB_NO_REAP|SLAB_HIGH_PACK)
@@ -215,9 +215,9 @@ typedef struct kmem_bufctl_s {
#define SLAB_RED_MAGIC1 0x5A2CF071UL /* when obj is active */
#define SLAB_RED_MAGIC2 0x170FC2A5UL /* when obj is inactive */
-/* ...and for poisioning */
-#define SLAB_POISION_BYTE 0x5a /* byte value for poisioning */
-#define SLAB_POISION_END 0xa5 /* end-byte of poisioning */
+/* ...and for poisoning */
+#define SLAB_POISON_BYTE 0x5a /* byte value for poisoning */
+#define SLAB_POISON_END 0xa5 /* end-byte of poisoning */
#endif /* SLAB_DEBUG_SUPPORT */
@@ -546,17 +546,17 @@ kmem_freepages(kmem_cache_t *cachep, void *addr)
#if SLAB_DEBUG_SUPPORT
static inline void
-kmem_poision_obj(kmem_cache_t *cachep, void *addr)
+kmem_poison_obj(kmem_cache_t *cachep, void *addr)
{
- memset(addr, SLAB_POISION_BYTE, cachep->c_org_size);
- *(unsigned char *)(addr+cachep->c_org_size-1) = SLAB_POISION_END;
+ memset(addr, SLAB_POISON_BYTE, cachep->c_org_size);
+ *(unsigned char *)(addr+cachep->c_org_size-1) = SLAB_POISON_END;
}
static inline int
-kmem_check_poision_obj(kmem_cache_t *cachep, void *addr)
+kmem_check_poison_obj(kmem_cache_t *cachep, void *addr)
{
void *end;
- end = memchr(addr, SLAB_POISION_END, cachep->c_org_size);
+ end = memchr(addr, SLAB_POISON_END, cachep->c_org_size);
if (end != (addr+cachep->c_org_size-1))
return 1;
return 0;
@@ -605,7 +605,7 @@ kmem_slab_destroy(kmem_cache_t *cachep, kmem_slab_t *slabp)
{
if (cachep->c_dtor
#if SLAB_DEBUG_SUPPORT
- || cachep->c_flags & (SLAB_POISION || SLAB_RED_ZONE)
+ || cachep->c_flags & (SLAB_POISON || SLAB_RED_ZONE)
#endif /*SLAB_DEBUG_SUPPORT*/
) {
/* Doesn't use the bufctl ptrs to find objs. */
@@ -629,10 +629,10 @@ kmem_slab_destroy(kmem_cache_t *cachep, kmem_slab_t *slabp)
#endif /*SLAB_DEBUG_SUPPORT*/
(cachep->c_dtor)(objp, cachep, 0);
#if SLAB_DEBUG_SUPPORT
- else if (cachep->c_flags & SLAB_POISION) {
- if (kmem_check_poision_obj(cachep, objp))
+ else if (cachep->c_flags & SLAB_POISON) {
+ if (kmem_check_poison_obj(cachep, objp))
printk(KERN_ERR "kmem_slab_destory: "
- "Bad poision - %s\n", cachep->c_name);
+ "Bad poison - %s\n", cachep->c_name);
}
if (cachep->c_flags & SLAB_RED_ZONE)
objp -= BYTES_PER_WORD;
@@ -726,18 +726,18 @@ kmem_cache_create(const char *name, size_t size, size_t offset,
flags &= ~SLAB_DEBUG_INITIAL;
}
- if ((flags & SLAB_POISION) && ctor) {
- /* request for poisioning, but we can't do that with a constructor */
- printk("%sPoisioning requested, but con given - %s\n", func_nm, name);
- flags &= ~SLAB_POISION;
+ if ((flags & SLAB_POISON) && ctor) {
+ /* request for poisoning, but we can't do that with a constructor */
+ printk("%sPoisoning requested, but con given - %s\n", func_nm, name);
+ flags &= ~SLAB_POISON;
}
#if 0
if ((flags & SLAB_HIGH_PACK) && ctor) {
printk("%sHigh pack requested, but con given - %s\n", func_nm, name);
flags &= ~SLAB_HIGH_PACK;
}
- if ((flags & SLAB_HIGH_PACK) && (flags & (SLAB_POISION|SLAB_RED_ZONE))) {
- printk("%sHigh pack requested, but with poisioning/red-zoning - %s\n",
+ if ((flags & SLAB_HIGH_PACK) && (flags & (SLAB_POISON|SLAB_RED_ZONE))) {
+ printk("%sHigh pack requested, but with poisoning/red-zoning - %s\n",
func_nm, name);
flags &= ~SLAB_HIGH_PACK;
}
@@ -1094,9 +1094,9 @@ kmem_cache_init_objs(kmem_cache_t * cachep, kmem_slab_t * slabp, void *objp,
if (cachep->c_ctor)
cachep->c_ctor(objp, cachep, ctor_flags);
#if SLAB_DEBUG_SUPPORT
- else if (cachep->c_flags & SLAB_POISION) {
- /* need to poision the objs */
- kmem_poision_obj(cachep, objp);
+ else if (cachep->c_flags & SLAB_POISON) {
+ /* need to poison the objs */
+ kmem_poison_obj(cachep, objp);
}
if (cachep->c_flags & SLAB_RED_ZONE) {
@@ -1275,7 +1275,7 @@ kmem_report_alloc_err(const char *str, kmem_cache_t * cachep)
}
static void
-kmem_report_free_err(const char *str, void *objp, kmem_cache_t * cachep)
+kmem_report_free_err(const char *str, const void *objp, kmem_cache_t * cachep)
{
if (cachep)
SLAB_STATS_INC_ERR(cachep);
@@ -1386,7 +1386,7 @@ ret_obj:
bufp->buf_slabp = slabp;
objp = ((void*)bufp) - cachep->c_offset;
finished:
- /* The lock is not needed by the red-zone or poision ops, and the
+ /* The lock is not needed by the red-zone or poison ops, and the
* obj has been removed from the slab. Should be safe to drop
* the lock here.
*/
@@ -1395,8 +1395,8 @@ finished:
if (cachep->c_flags & SLAB_RED_ZONE)
goto red_zone;
ret_red:
- if ((cachep->c_flags & SLAB_POISION) && kmem_check_poision_obj(cachep, objp))
- kmem_report_alloc_err("Bad poision", cachep);
+ if ((cachep->c_flags & SLAB_POISON) && kmem_check_poison_obj(cachep, objp))
+ kmem_report_alloc_err("Bad poison", cachep);
#endif /* SLAB_DEBUG_SUPPORT */
return objp;
}
@@ -1456,7 +1456,7 @@ nul_ptr:
* it should be in this state _before_ it is released.
*/
static inline void
-__kmem_cache_free(kmem_cache_t *cachep, void *objp)
+__kmem_cache_free(kmem_cache_t *cachep, const void *objp)
{
kmem_slab_t *slabp;
kmem_bufctl_t *bufp;
@@ -1514,10 +1514,10 @@ passed_extra:
/* (hopefully) The most common case. */
finished:
#if SLAB_DEBUG_SUPPORT
- if (cachep->c_flags & SLAB_POISION) {
+ if (cachep->c_flags & SLAB_POISON) {
if (cachep->c_flags & SLAB_RED_ZONE)
objp += BYTES_PER_WORD;
- kmem_poision_obj(cachep, objp);
+ kmem_poison_obj(cachep, objp);
}
#endif /* SLAB_DEBUG_SUPPORT */
spin_unlock_irqrestore(&cachep->c_spinlock, save_flags);
@@ -1615,7 +1615,7 @@ kmalloc(size_t size, int flags)
}
void
-kfree(void *objp)
+kfree(const void *objp)
{
struct page *page;
int nr;
@@ -1654,7 +1654,7 @@ while(1);
}
void
-kfree_s(void *objp, size_t size)
+kfree_s(const void *objp, size_t size)
{
struct page *page;
int nr;
@@ -1861,7 +1861,7 @@ kmem_self_test(void)
kmem_cache_t *test_cachep;
printk(KERN_INFO "kmem_test() - start\n");
- test_cachep = kmem_cache_create("test-cachep", 16, 0, SLAB_RED_ZONE|SLAB_POISION, NULL, NULL);
+ test_cachep = kmem_cache_create("test-cachep", 16, 0, SLAB_RED_ZONE|SLAB_POISON, NULL, NULL);
if (test_cachep) {
char *objp = kmem_cache_alloc(test_cachep, SLAB_KERNEL);
if (objp) {
@@ -1870,12 +1870,12 @@ kmem_self_test(void)
*(objp+16) = 1;
kmem_cache_free(test_cachep, objp);
- /* Mess up poisioning. */
+ /* Mess up poisoning. */
*objp = 10;
objp = kmem_cache_alloc(test_cachep, SLAB_KERNEL);
kmem_cache_free(test_cachep, objp);
- /* Mess up poisioning (again). */
+ /* Mess up poisoning (again). */
*objp = 10;
kmem_cache_shrink(test_cachep);
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 819ae7aa8..400274268 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -326,7 +326,7 @@ again:
asmlinkage int sys_swapoff(const char * specialfile)
{
struct swap_info_struct * p = NULL;
- struct inode * inode;
+ struct dentry * dentry;
struct file filp;
int i, type, prev;
int err = -EPERM;
@@ -334,19 +334,22 @@ asmlinkage int sys_swapoff(const char * specialfile)
lock_kernel();
if (!suser())
goto out;
- err = namei(NAM_FOLLOW_LINK, specialfile, &inode);
- if (err)
+
+ dentry = namei(specialfile);
+ err = PTR_ERR(dentry);
+ if (IS_ERR(dentry))
goto out;
+
prev = -1;
for (type = swap_list.head; type >= 0; type = swap_info[type].next) {
p = swap_info + type;
if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
if (p->swap_file) {
- if (p->swap_file == inode)
+ if (p->swap_file == dentry)
break;
} else {
- if (S_ISBLK(inode->i_mode)
- && (p->swap_device == inode->i_rdev))
+ if (S_ISBLK(dentry->d_inode->i_mode)
+ && (p->swap_device == dentry->d_inode->i_rdev))
break;
}
}
@@ -354,7 +357,7 @@ asmlinkage int sys_swapoff(const char * specialfile)
}
err = -EINVAL;
if (type < 0){
- iput(inode);
+ dput(dentry);
goto out;
}
if (prev < 0) {
@@ -369,7 +372,7 @@ asmlinkage int sys_swapoff(const char * specialfile)
p->flags = SWP_USED;
err = try_to_unuse(type);
if (err) {
- iput(inode);
+ dput(dentry);
/* re-insert swap space back into swap_list */
for (prev = -1, i = swap_list.head; i >= 0; prev = i, i = swap_info[i].next)
if (p->prio >= swap_info[i].prio)
@@ -384,21 +387,19 @@ asmlinkage int sys_swapoff(const char * specialfile)
}
if(p->swap_device){
memset(&filp, 0, sizeof(filp));
- filp.f_inode = inode;
+ filp.f_dentry = dentry;
filp.f_mode = 3; /* read write */
/* open it again to get fops */
- if( !blkdev_open(inode, &filp) &&
+ if( !blkdev_open(dentry->d_inode, &filp) &&
filp.f_op && filp.f_op->release){
- filp.f_op->release(inode,&filp);
- filp.f_op->release(inode,&filp);
+ filp.f_op->release(dentry->d_inode,&filp);
+ filp.f_op->release(dentry->d_inode,&filp);
}
}
- iput(inode);
+ dput(dentry);
nr_swap_pages -= p->pages;
- iput(p->swap_file);
- if (p->swap_filename)
- kfree(p->swap_filename);
+ dput(p->swap_file);
p->swap_file = NULL;
p->swap_device = 0;
vfree(p->swap_map);
@@ -420,10 +421,8 @@ int get_swaparea_info(char *buf)
len += sprintf(buf, "Filename\t\t\tType\t\tSize\tUsed\tPriority\n");
for (i = 0 ; i < nr_swapfiles ; i++, ptr++)
if (ptr->flags & SWP_USED) {
- if (ptr->swap_filename)
- len += sprintf(buf + len, "%-31s ", ptr->swap_filename);
- else
- len += sprintf(buf + len, "(null)\t\t\t");
+ len += sprintf(buf + len, "%-31s ", ptr->swap_file->d_name.name);
+
if (ptr->swap_file)
len += sprintf(buf + len, "file\t\t");
else
@@ -451,11 +450,10 @@ int get_swaparea_info(char *buf)
asmlinkage int sys_swapon(const char * specialfile, int swap_flags)
{
struct swap_info_struct * p;
- struct inode * swap_inode;
+ struct dentry * swap_dentry;
unsigned int type;
int i, j, prev;
int error = -EPERM;
- char *tmp;
struct file filp;
static int least_priority = 0;
@@ -472,7 +470,6 @@ asmlinkage int sys_swapon(const char * specialfile, int swap_flags)
if (type >= nr_swapfiles)
nr_swapfiles = type+1;
p->flags = SWP_USED;
- p->swap_filename = NULL;
p->swap_file = NULL;
p->swap_device = 0;
p->swap_map = NULL;
@@ -488,25 +485,22 @@ asmlinkage int sys_swapon(const char * specialfile, int swap_flags)
} else {
p->prio = --least_priority;
}
- error = namei(NAM_FOLLOW_LINK, specialfile, &swap_inode);
- if (error)
- goto bad_swap_2;
- p->swap_file = swap_inode;
- error = -EBUSY;
- if (atomic_read(&swap_inode->i_count) != 1)
+ swap_dentry = namei(specialfile);
+ error = PTR_ERR(swap_dentry);
+ if (IS_ERR(swap_dentry))
goto bad_swap_2;
+
+ p->swap_file = swap_dentry;
error = -EINVAL;
- if (S_ISBLK(swap_inode->i_mode)) {
- p->swap_device = swap_inode->i_rdev;
+ if (S_ISBLK(swap_dentry->d_inode->i_mode)) {
+ p->swap_device = swap_dentry->d_inode->i_rdev;
set_blocksize(p->swap_device, PAGE_SIZE);
- filp.f_inode = swap_inode;
+ filp.f_dentry = swap_dentry;
filp.f_mode = 3; /* read write */
- error = blkdev_open(swap_inode, &filp);
- p->swap_file = NULL;
- iput(swap_inode);
- if(error)
+ error = blkdev_open(swap_dentry->d_inode, &filp);
+ if (error)
goto bad_swap_2;
error = -ENODEV;
if (!p->swap_device ||
@@ -520,7 +514,7 @@ asmlinkage int sys_swapon(const char * specialfile, int swap_flags)
if (p->swap_device == swap_info[i].swap_device)
goto bad_swap;
}
- } else if (!S_ISREG(swap_inode->i_mode))
+ } else if (!S_ISREG(swap_dentry->d_inode->i_mode))
goto bad_swap;
p->swap_lockmap = (unsigned char *) get_free_page(GFP_USER);
if (!p->swap_lockmap) {
@@ -580,12 +574,6 @@ asmlinkage int sys_swapon(const char * specialfile, int swap_flags)
prev = i;
}
p->next = i;
- if (!getname(specialfile, &tmp)) {
- if ((p->swap_filename =
- (char *) kmalloc(strlen(tmp)+1, GFP_KERNEL)) != (char *)NULL)
- strcpy(p->swap_filename, tmp);
- putname(tmp);
- }
if (prev < 0) {
swap_list.head = swap_list.next = p - swap_info;
} else {
@@ -595,11 +583,11 @@ asmlinkage int sys_swapon(const char * specialfile, int swap_flags)
goto out;
bad_swap:
if(filp.f_op && filp.f_op->release)
- filp.f_op->release(filp.f_inode,&filp);
+ filp.f_op->release(filp.f_dentry->d_inode,&filp);
bad_swap_2:
free_page((long) p->swap_lockmap);
vfree(p->swap_map);
- iput(p->swap_file);
+ dput(p->swap_file);
p->swap_device = 0;
p->swap_file = NULL;
p->swap_map = NULL;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 71afe1aea..d0270d586 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -68,7 +68,7 @@ static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned lo
}
}
-static void free_area_pages(unsigned long address, unsigned long size)
+void vmfree_area_pages(unsigned long address, unsigned long size)
{
pgd_t * dir;
unsigned long end = address + size;
@@ -125,7 +125,7 @@ static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo
return 0;
}
-static int alloc_area_pages(unsigned long address, unsigned long size)
+int vmalloc_area_pages(unsigned long address, unsigned long size)
{
pgd_t * dir;
unsigned long end = address + size;
@@ -181,7 +181,7 @@ void vfree(void * addr)
for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
if (tmp->addr == addr) {
*p = tmp->next;
- free_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
+ vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
kfree(tmp);
return;
}
@@ -201,7 +201,7 @@ void * vmalloc(unsigned long size)
if (!area)
return NULL;
addr = area->addr;
- if (alloc_area_pages(VMALLOC_VMADDR(addr), size)) {
+ if (vmalloc_area_pages(VMALLOC_VMADDR(addr), size)) {
vfree(addr);
return NULL;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 21c178159..eeadbaa4f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -7,7 +7,7 @@
* kswapd added: 7.1.96 sct
* Removed kswapd_ctl limits, and swap out as many pages as needed
* to bring the system back to free_pages_high: 2.4.97, Rik van Riel.
- * Version: $Id: vmscan.c,v 1.23 1997/04/12 04:31:05 davem Exp $
+ * Version: $Id: vmscan.c,v 1.3 1997/06/17 13:31:02 ralf Exp $
*/
#include <linux/mm.h>
@@ -362,13 +362,22 @@ static inline int do_try_to_free_page(int priority, int dma, int wait)
return 1;
state = 1;
case 1:
- if (kmem_cache_reap(i, dma, wait))
- return 1;
+ shrink_dcache();
state = 2;
case 2:
- if (shm_swap(i, dma))
+ /*
+ * We shouldn't have a priority here:
+ * If we're low on memory we should
+ * unconditionally throw away _all_
+ * kmalloc caches!
+ */
+ if (kmem_cache_reap(0, dma, wait))
return 1;
state = 3;
+ case 3:
+ if (shm_swap(i, dma))
+ return 1;
+ state = 4;
default:
if (swap_out(i, dma, wait))
return 1;
@@ -403,7 +412,7 @@ int try_to_free_page(int priority, int dma, int wait)
int kswapd(void *unused)
{
int i;
- char *revision="$Revision: 1.23 $", *s, *e;
+ char *revision="$Revision: 1.3 $", *s, *e;
current->session = 1;
current->pgrp = 1;