summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-03-13 20:55:15 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-03-13 20:55:15 +0000
commit1471f525455788c20b130690e0f104df451aeb43 (patch)
tree3778beba56558beb9a9548ea5b467e9c44ea966f /mm
parente80d2c5456d30ebba5b0eb8a9d33e17d815d4d83 (diff)
Merge with Linux 2.3.51.
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/memory.c176
-rw-r--r--mm/mmap.c6
3 files changed, 124 insertions, 64 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 6756c70a0..b5febc2e5 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -954,8 +954,7 @@ static void generic_file_readahead(int reada_ok,
*/
void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t * desc, read_actor_t actor)
{
- struct dentry *dentry = filp->f_dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = filp->f_dentry->d_inode;
struct address_space *mapping = inode->i_mapping;
unsigned long index, offset;
struct page *cached_page;
@@ -1307,8 +1306,7 @@ struct page * filemap_nopage(struct vm_area_struct * area,
{
int error;
struct file *file = area->vm_file;
- struct dentry *dentry = file->f_dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = file->f_dentry->d_inode;
struct address_space *mapping = inode->i_mapping;
struct page *page, **hash, *old_page;
unsigned long size = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
diff --git a/mm/memory.c b/mm/memory.c
index 1232bd928..dbcdd0052 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -398,28 +398,25 @@ static struct page * follow_page(unsigned long address)
return pte_page(*pte);
}
- printk(KERN_ERR "Missing page in follow_page\n");
return NULL;
}
/*
- * Given a physical address, is there a useful struct page pointing to it?
+ * Given a physical address, is there a useful struct page pointing to
+ * it? This may become more complex in the future if we start dealing
+ * with IO-aperture pages in kiobufs.
*/
-struct page * get_page_map(struct page *page, unsigned long vaddr)
+static inline struct page * get_page_map(struct page *page)
{
- if (MAP_NR(vaddr) >= max_mapnr)
- return 0;
- if (page == ZERO_PAGE(vaddr))
- return 0;
- if (PageReserved(page))
+ if (page > (mem_map + max_mapnr))
return 0;
return page;
}
/*
* Force in an entire range of pages from the current process's user VA,
- * and pin and lock the pages for IO.
+ * and pin them in physical memory.
*/
#define dprintk(x...)
@@ -430,8 +427,6 @@ int map_user_kiobuf(int rw, struct kiobuf *iobuf, unsigned long va, size_t len)
struct mm_struct * mm;
struct vm_area_struct * vma = 0;
struct page * map;
- int doublepage = 0;
- int repeat = 0;
int i;
/* Make sure the iobuf is not already mapped somewhere. */
@@ -447,11 +442,10 @@ int map_user_kiobuf(int rw, struct kiobuf *iobuf, unsigned long va, size_t len)
if (err)
return err;
- repeat:
down(&mm->mmap_sem);
err = -EFAULT;
- iobuf->locked = 1;
+ iobuf->locked = 0;
iobuf->offset = va & ~PAGE_MASK;
iobuf->length = len;
@@ -471,16 +465,15 @@ int map_user_kiobuf(int rw, struct kiobuf *iobuf, unsigned long va, size_t len)
spin_lock(&mm->page_table_lock);
map = follow_page(ptr);
if (!map) {
+ spin_unlock(&mm->page_table_lock);
dprintk (KERN_ERR "Missing page in map_user_kiobuf\n");
- goto retry;
+ goto out_unlock;
}
- map = get_page_map(map, ptr);
- if (map) {
- if (TryLockPage(map)) {
- goto retry;
- }
+ map = get_page_map(map);
+ if (map)
atomic_inc(&map->count);
- }
+ else
+ printk (KERN_INFO "Mapped page missing [%d]\n", i);
spin_unlock(&mm->page_table_lock);
iobuf->maplist[i] = map;
iobuf->nr_pages = ++i;
@@ -497,66 +490,133 @@ int map_user_kiobuf(int rw, struct kiobuf *iobuf, unsigned long va, size_t len)
unmap_kiobuf(iobuf);
dprintk ("map_user_kiobuf: end %d\n", err);
return err;
+}
- retry:
+/*
+ * Unmap all of the pages referenced by a kiobuf. We release the pages,
+ * and unlock them if they were locked.
+ */
+
+void unmap_kiobuf (struct kiobuf *iobuf)
+{
+ int i;
+ struct page *map;
+
+ for (i = 0; i < iobuf->nr_pages; i++) {
+ map = iobuf->maplist[i];
+ if (map) {
+ if (iobuf->locked)
+ UnlockPage(map);
+ __free_page(map);
+ }
+ }
+
+ iobuf->nr_pages = 0;
+ iobuf->locked = 0;
+}
+
+
+/*
+ * Lock down all of the pages of a kiovec for IO.
+ *
+ * If any page is mapped twice in the kiovec, we return the error -EINVAL.
+ *
+ * The optional wait parameter causes the lock call to block until all
+ * pages can be locked if set. If wait==0, the lock operation is
+ * aborted if any locked pages are found and -EAGAIN is returned.
+ */
+
+int lock_kiovec(int nr, struct kiobuf *iovec[], int wait)
+{
+ struct kiobuf *iobuf;
+ int i, j;
+ struct page *page, **ppage;
+ int doublepage = 0;
+ int repeat = 0;
+
+ repeat:
+
+ for (i = 0; i < nr; i++) {
+ iobuf = iovec[i];
+
+ if (iobuf->locked)
+ continue;
+ iobuf->locked = 1;
+
+ ppage = iobuf->maplist;
+ for (j = 0; j < iobuf->nr_pages; ppage++, j++) {
+ page = *ppage;
+ if (!page)
+ continue;
+
+ if (TryLockPage(page))
+ goto retry;
+ }
+ }
+
+ return 0;
+
+ retry:
+
/*
- * Undo the locking so far, wait on the page we got to, and try again.
+ * We couldn't lock one of the pages. Undo the locking so far,
+ * wait on the page we got to, and try again.
*/
- spin_unlock(&mm->page_table_lock);
- unmap_kiobuf(iobuf);
- up(&mm->mmap_sem);
-
+
+ unlock_kiovec(nr, iovec);
+ if (!wait)
+ return -EAGAIN;
+
/*
* Did the release also unlock the page we got stuck on?
*/
- if (map) {
- if (!PageLocked(map)) {
- /* If so, we may well have the page mapped twice
- * in the IO address range. Bad news. Of
- * course, it _might_ * just be a coincidence,
- * but if it happens more than * once, chances
- * are we have a double-mapped page. */
- if (++doublepage >= 3) {
- return -EINVAL;
- }
- }
-
- /*
- * Try again...
+ if (!PageLocked(page)) {
+ /*
+ * If so, we may well have the page mapped twice
+ * in the IO address range. Bad news. Of
+ * course, it _might_ just be a coincidence,
+ * but if it happens more than once, chances
+ * are we have a double-mapped page.
*/
- wait_on_page(map);
+ if (++doublepage >= 3)
+ return -EINVAL;
+
+ /* Try again... */
+ wait_on_page(page);
}
- if (++repeat < 16) {
- ptr = va & PAGE_MASK;
+ if (++repeat < 16)
goto repeat;
- }
return -EAGAIN;
}
-
/*
- * Unmap all of the pages referenced by a kiobuf. We release the pages,
- * and unlock them if they were locked.
+ * Unlock all of the pages of a kiovec after IO.
*/
-void unmap_kiobuf (struct kiobuf *iobuf)
+int unlock_kiovec(int nr, struct kiobuf *iovec[])
{
- int i;
- struct page *map;
+ struct kiobuf *iobuf;
+ int i, j;
+ struct page *page, **ppage;
- for (i = 0; i < iobuf->nr_pages; i++) {
- map = iobuf->maplist[i];
+ for (i = 0; i < nr; i++) {
+ iobuf = iovec[i];
+
+ if (!iobuf->locked)
+ continue;
+ iobuf->locked = 0;
- if (map && iobuf->locked) {
- UnlockPage(map);
- __free_page(map);
+ ppage = iobuf->maplist;
+ for (j = 0; j < iobuf->nr_pages; ppage++, j++) {
+ page = *ppage;
+ if (!page)
+ continue;
+ UnlockPage(page);
}
}
-
- iobuf->nr_pages = 0;
- iobuf->locked = 0;
+ return 0;
}
static inline void zeromap_pte_range(pte_t * pte, unsigned long address,
diff --git a/mm/mmap.c b/mm/mmap.c
index 5a6820972..b650babc8 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -305,14 +305,14 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned lon
atomic_dec(&file->f_dentry->d_inode->i_writecount);
correct_wcount = 1;
}
+ vma->vm_file = file;
+ get_file(file);
error = file->f_op->mmap(file, vma);
/* Fix up the count if necessary, then check for an error */
if (correct_wcount)
atomic_inc(&file->f_dentry->d_inode->i_writecount);
if (error)
goto unmap_and_free_vma;
- vma->vm_file = file;
- get_file(file);
}
/*
@@ -334,6 +334,8 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned lon
return addr;
unmap_and_free_vma:
+ vma->vm_file = NULL;
+ fput(file);
/* Undo any partial mapping done by a device driver. */
flush_cache_range(mm, vma->vm_start, vma->vm_end);
zap_page_range(mm, vma->vm_start, vma->vm_end - vma->vm_start);