summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/mips/mm/umap.c122
-rw-r--r--include/asm-mips/pgtable.h4
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/vmalloc.h11
-rw-r--r--mm/vmalloc.c16
5 files changed, 140 insertions, 15 deletions
diff --git a/arch/mips/mm/umap.c b/arch/mips/mm/umap.c
index 9593391ba..7ce0d8e5a 100644
--- a/arch/mips/mm/umap.c
+++ b/arch/mips/mm/umap.c
@@ -3,9 +3,14 @@
*
* (C) Copyright 1994 Linus Torvalds
*
- * Modified for removing active mappings from any task. This is required
- * for implementing the virtual graphics interface for direct rendering
- * on the SGI - miguel.
+ * Changes:
+ *
+ * Modified from Linus source to removing active mappings from any
+ * task. This is required for implementing the virtual graphics
+ * interface for direct rendering on the SGI - miguel.
+ *
+ * Added a routine to map a vmalloc()ed area into user space, this one
+ * is required by the /dev/shmiq driver - miguel.
*/
#include <linux/stat.h>
#include <linux/sched.h>
@@ -17,6 +22,8 @@
#include <linux/errno.h>
#include <linux/mman.h>
#include <linux/string.h>
+#include <linux/vmalloc.h>
+#include <linux/swap.h>
#include <asm/system.h>
#include <asm/pgtable.h>
@@ -81,7 +88,6 @@ remove_mapping_pmd_range (pgd_t *pgd, unsigned long address, unsigned long size)
* This routine is called from the page fault handler to remove a
* range of active mappings at this point
*/
-
void
remove_mapping (struct task_struct *task, unsigned long start, unsigned long end)
{
@@ -100,3 +106,111 @@ remove_mapping (struct task_struct *task, unsigned long start, unsigned long end
up (&task->mm->mmap_sem);
}
+void *vmalloc_uncached (unsigned long size)
+{
+ return vmalloc_prot (size, PAGE_KERNEL_UNCACHED);
+}
+
+static inline void free_pte(pte_t page)
+{
+ if (pte_present(page)) {
+ unsigned long addr = pte_page(page);
+ if (MAP_NR(addr) >= max_mapnr || PageReserved(mem_map+MAP_NR(addr)))
+ return;
+ free_page(addr);
+ if (current->mm->rss <= 0)
+ return;
+ current->mm->rss--;
+ return;
+ }
+ swap_free(pte_val(page));
+}
+
+static inline void forget_pte(pte_t page)
+{
+ if (!pte_none(page)) {
+ printk("forget_pte: old mapping existed!\n");
+ free_pte(page);
+ }
+}
+
+/*
+ * maps a range of vmalloc()ed memory into the requested pages. the old
+ * mappings are removed.
+ */
+static inline void
+vmap_pte_range (pte_t *pte, unsigned long address, unsigned long size, unsigned long vaddr)
+{
+ unsigned long end;
+ pgd_t *vdir;
+ pmd_t *vpmd;
+ pte_t *vpte;
+
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ pte_t oldpage = *pte;
+ unsigned long page;
+ pte_clear(pte);
+
+ vdir = pgd_offset_k (vaddr);
+ vpmd = pmd_offset (vdir, vaddr);
+ vpte = pte_offset (vpmd, vaddr);
+ page = pte_page (*vpte);
+
+ set_pte(pte, mk_pte_phys(page, PAGE_USERIO));
+ forget_pte(oldpage);
+ address += PAGE_SIZE;
+ vaddr += PAGE_SIZE;
+ pte++;
+ } while (address < end);
+}
+
+static inline int
+vmap_pmd_range (pmd_t *pmd, unsigned long address, unsigned long size, unsigned long vaddr)
+{
+ unsigned long end;
+
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ vaddr -= address;
+ do {
+ pte_t * pte = pte_alloc(pmd, address);
+ if (!pte)
+ return -ENOMEM;
+ vmap_pte_range(pte, address, end - address, address + vaddr);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address < end);
+ return 0;
+}
+
+int
+vmap_page_range (unsigned long from, unsigned long size, unsigned long vaddr)
+{
+ int error = 0;
+ pgd_t * dir;
+ unsigned long beg = from;
+ unsigned long end = from + size;
+
+ vaddr -= from;
+ dir = pgd_offset(current->mm, from);
+ flush_cache_range(current->mm, beg, end);
+ while (from < end) {
+ pmd_t *pmd = pmd_alloc(dir, from);
+ error = -ENOMEM;
+ if (!pmd)
+ break;
+ error = vmap_pmd_range(pmd, from, end - from, vaddr + from);
+ if (error)
+ break;
+ from = (from + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ }
+ flush_tlb_range(current->mm, beg, end);
+ return error;
+}
diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h
index f8785b9b5..3717bb398 100644
--- a/include/asm-mips/pgtable.h
+++ b/include/asm-mips/pgtable.h
@@ -16,6 +16,7 @@
* - flush_cache_page(mm, vmaddr) flushes a single page
* - flush_cache_range(mm, start, end) flushes a range of pages
* - flush_page_to_ram(page) write back kernel page to ram
+ *
*/
extern void (*flush_cache_all)(void);
extern void (*flush_cache_mm)(struct mm_struct *mm);
@@ -132,6 +133,9 @@ extern void (*add_wired_entry)(unsigned long entrylo0, unsigned long entrylo1,
_CACHE_CACHABLE_NONCOHERENT)
#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
_CACHE_UNCACHED)
+#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
+ _CACHE_UNCACHED)
+
/*
* MIPS can't do page protection for execute, and considers that the same like
* read. Also, write permissions imply read permissions. This is the closest
diff --git a/include/linux/mm.h b/include/linux/mm.h
index bfde668c7..674f69479 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -273,7 +273,7 @@ extern void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned
extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma);
extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
-
+extern int vmap_page_range (unsigned long from, unsigned long size, unsigned long vaddr);
extern void vmtruncate(struct inode * inode, unsigned long offset);
extern void handle_mm_fault(struct task_struct *tsk,struct vm_area_struct *vma, unsigned long address, int write_access);
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 40072ab47..ad7447530 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -15,10 +15,17 @@ struct vm_struct {
struct vm_struct * get_vm_area(unsigned long size);
void vfree(void * addr);
-void * vmalloc(unsigned long size);
+void * vmalloc_prot(unsigned long size, pgprot_t prot);
+void * vmalloc_uncached(unsigned long size);
+
+extern inline void * vmalloc(unsigned long size)
+{
+ vmalloc_prot (size, PAGE_KERNEL);
+}
+
int vread(char *buf, char *addr, int count);
void vmfree_area_pages(unsigned long address, unsigned long size);
-int vmalloc_area_pages(unsigned long address, unsigned long size);
+int vmalloc_area_pages(unsigned long address, unsigned long size, pgprot_t prot);
extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d0270d586..5b1387c7f 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -83,7 +83,7 @@ void vmfree_area_pages(unsigned long address, unsigned long size)
flush_tlb_all();
}
-static inline int alloc_area_pte(pte_t * pte, unsigned long address, unsigned long size)
+static inline int alloc_area_pte(pte_t * pte, unsigned long address, unsigned long size, pgprot_t prot)
{
unsigned long end;
@@ -98,14 +98,14 @@ static inline int alloc_area_pte(pte_t * pte, unsigned long address, unsigned lo
page = __get_free_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
- set_pte(pte, mk_pte(page, PAGE_KERNEL));
+ set_pte(pte, mk_pte(page, prot));
address += PAGE_SIZE;
pte++;
}
return 0;
}
-static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size)
+static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, pgprot_t prot)
{
unsigned long end;
@@ -117,7 +117,7 @@ static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo
pte_t * pte = pte_alloc_kernel(pmd, address);
if (!pte)
return -ENOMEM;
- if (alloc_area_pte(pte, address, end - address))
+ if (alloc_area_pte(pte, address, end - address, prot))
return -ENOMEM;
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
@@ -125,7 +125,7 @@ static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo
return 0;
}
-int vmalloc_area_pages(unsigned long address, unsigned long size)
+int vmalloc_area_pages(unsigned long address, unsigned long size, pgprot_t prot)
{
pgd_t * dir;
unsigned long end = address + size;
@@ -136,7 +136,7 @@ int vmalloc_area_pages(unsigned long address, unsigned long size)
pmd_t *pmd = pmd_alloc_kernel(dir, address);
if (!pmd)
return -ENOMEM;
- if (alloc_area_pmd(pmd, address, end - address))
+ if (alloc_area_pmd(pmd, address, end - address, prot))
return -ENOMEM;
set_pgdir(address, *dir);
address = (address + PGDIR_SIZE) & PGDIR_MASK;
@@ -189,7 +189,7 @@ void vfree(void * addr)
printk("Trying to vfree() nonexistent vm area (%p)\n", addr);
}
-void * vmalloc(unsigned long size)
+void * vmalloc_prot(unsigned long size, pgprot_t prot)
{
void * addr;
struct vm_struct *area;
@@ -201,7 +201,7 @@ void * vmalloc(unsigned long size)
if (!area)
return NULL;
addr = area->addr;
- if (vmalloc_area_pages(VMALLOC_VMADDR(addr), size)) {
+ if (vmalloc_area_pages(VMALLOC_VMADDR(addr), size, PAGE_KERNEL)) {
vfree(addr);
return NULL;
}