diff options
author | Ralf Baechle <ralf@linux-mips.org> | 1998-03-17 22:05:47 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 1998-03-17 22:05:47 +0000 |
commit | 27cfca1ec98e91261b1a5355d10a8996464b63af (patch) | |
tree | 8e895a53e372fa682b4c0a585b9377d67ed70d0e /include/asm-arm/proc-armv | |
parent | 6a76fb7214c477ccf6582bd79c5b4ccc4f9c41b1 (diff) |
Look Ma' what I found on my harddisk ...
o New faster syscalls for 2.1.x, too
o Upgrade to 2.1.89.
Don't try to run this. It's flaky as hell. But feel free to debug ...
Diffstat (limited to 'include/asm-arm/proc-armv')
-rw-r--r-- | include/asm-arm/proc-armv/assembler.h | 84 | ||||
-rw-r--r-- | include/asm-arm/proc-armv/mm-init.h | 118 | ||||
-rw-r--r-- | include/asm-arm/proc-armv/page.h | 68 | ||||
-rw-r--r-- | include/asm-arm/proc-armv/param.h | 27 | ||||
-rw-r--r-- | include/asm-arm/proc-armv/pgtable.h | 617 | ||||
-rw-r--r-- | include/asm-arm/proc-armv/processor.h | 103 | ||||
-rw-r--r-- | include/asm-arm/proc-armv/ptrace.h | 75 | ||||
-rw-r--r-- | include/asm-arm/proc-armv/semaphore.h | 81 | ||||
-rw-r--r-- | include/asm-arm/proc-armv/shmparam.h | 19 | ||||
-rw-r--r-- | include/asm-arm/proc-armv/system.h | 137 | ||||
-rw-r--r-- | include/asm-arm/proc-armv/uaccess.h | 204 | ||||
-rw-r--r-- | include/asm-arm/proc-armv/uncompress.h | 22 |
12 files changed, 1555 insertions, 0 deletions
diff --git a/include/asm-arm/proc-armv/assembler.h b/include/asm-arm/proc-armv/assembler.h new file mode 100644 index 000000000..2294981c3 --- /dev/null +++ b/include/asm-arm/proc-armv/assembler.h @@ -0,0 +1,84 @@ +/* + * linux/asm-arm/proc-armv/assembler.h + * + * Copyright (C) 1996 Russell King + * + * This file contains arm architecture specific defines + * for the different processors + */ + +/* + * LOADREGS: multiple register load (ldm) with pc in register list + * (takes account of ARM6 not using ^) + * + * RETINSTR: return instruction: adds the 's' in at the end of the + * instruction if this is not an ARM6 + * + * SAVEIRQS: save IRQ state (not required on ARM2/ARM3 - done + * implicitly + * + * RESTOREIRQS: restore IRQ state (not required on ARM2/ARM3 - done + * implicitly with ldm ... ^ or movs. + * + * These next two need thinking about - can't easily use stack... (see system.S) + * DISABLEIRQS: disable IRQS in SVC mode + * + * ENABLEIRQS: enable IRQS in SVC mode + * + * USERMODE: switch to USER mode + * + * SVCMODE: switch to SVC mode + */ + +#define N_BIT (1 << 31) +#define Z_BIT (1 << 30) +#define C_BIT (1 << 29) +#define V_BIT (1 << 28) + +#define PCMASK 0 + +#ifdef __ASSEMBLER__ + +#define I_BIT (1 << 7) +#define F_BIT (1 << 6) + +#define MODE_FIQ26 0x01 +#define MODE_FIQ32 0x11 + +#define DEFAULT_FIQ MODE_FIQ32 + +#define LOADREGS(cond, base, reglist...)\ + ldm##cond base,reglist + +#define RETINSTR(instr, regs...)\ + instr regs + +#define MODENOP + +#define MODE(savereg,tmpreg,mode) \ + mrs savereg, cpsr; \ + bic tmpreg, savereg, $0x1f; \ + orr tmpreg, tmpreg, $mode; \ + msr cpsr, tmpreg + +#define RESTOREMODE(savereg) \ + msr cpsr, savereg + +#define SAVEIRQS(tmpreg)\ + mrs tmpreg, cpsr; \ + str tmpreg, [sp, $-4]! + +#define RESTOREIRQS(tmpreg)\ + ldr tmpreg, [sp], $4; \ + msr cpsr, tmpreg + +#define DISABLEIRQS(tmpreg)\ + mrs tmpreg , cpsr; \ + orr tmpreg , tmpreg , $I_BIT; \ + msr cpsr, tmpreg + +#define ENABLEIRQS(tmpreg)\ + mrs tmpreg , cpsr; \ + bic tmpreg , tmpreg , $I_BIT; \ + msr cpsr, tmpreg +#endif diff --git a/include/asm-arm/proc-armv/mm-init.h b/include/asm-arm/proc-armv/mm-init.h new file mode 100644 index 000000000..0ffb05a16 --- /dev/null +++ b/include/asm-arm/proc-armv/mm-init.h @@ -0,0 +1,118 @@ +/* + * linux/include/asm-arm/proc-armv/mm-init.h + * + * Copyright (C) 1996 Russell King + * + * This contains the code to setup the memory map on an ARM v3 or v4 machine. + * This is both processor & architecture specific, and requires some + * more work to get it to fit into our separate processor and architecture + * structure. + */ + +/* + * On ebsa, we want the memory map set up so: + * + * PHYS VIRT + * 00000000 00000000 Zero page + * 000003ff 000003ff Zero page end + * 00000000 c0000000 Kernel and all physical memory + * 01ffffff c1ffffff End of physical (32MB) + * e0000000 e0000000 IO start + * ffffffff ffffffff IO end + * + * On rpc, we want: + * + * PHYS VIRT + * 10000000 00000000 Zero page + * 100003ff 000003ff Zero page end + * 10000000 c0000000 Kernel and all physical memory + * 1fffffff cfffffff End of physical (32MB) + * 02000000 d?000000 Screen memory (first image) + * 02000000 d8000000 Screen memory (second image) + * 00000000 df000000 StrongARM cache invalidation area + * 03000000 e0000000 IO start + * 03ffffff e0ffffff IO end + * + * We set it up using the section page table entries. + */ + +#include <asm/arch/mmap.h> +#include <asm/pgtable.h> + +#define V2P(x) virt_to_phys(x) +#define PTE_SIZE (PTRS_PER_PTE * 4) + +#define PMD_SECT (PMD_TYPE_SECT | PMD_DOMAIN(DOMAIN_KERNEL) | PMD_SECT_CACHEABLE) + +static inline void setup_swapper_dir (int index, unsigned long entry) +{ + pmd_t pmd; + + pmd_val(pmd) = entry; + set_pmd (pmd_offset (swapper_pg_dir + index, 0), pmd); +} + +static inline unsigned long setup_pagetables(unsigned long start_mem, unsigned long end_mem) +{ + unsigned long address; + unsigned int spi; + union { unsigned long l; unsigned long *p; } u; + + /* map in zero page */ + u.l = ((start_mem + (PTE_SIZE-1)) & ~(PTE_SIZE-1)); + start_mem = u.l + PTE_SIZE; + memzero (u.p, PTE_SIZE); + *u.p = V2P(PAGE_OFFSET) | PTE_CACHEABLE | PTE_TYPE_SMALL; + setup_swapper_dir (0, V2P(u.l) | PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_USER)); + + for (spi = 1; spi < (PAGE_OFFSET >> PGDIR_SHIFT); spi++) + pgd_val(swapper_pg_dir[spi]) = 0; + + /* map in physical ram & kernel */ + address = PAGE_OFFSET; + while (spi < end_mem >> PGDIR_SHIFT) { + setup_swapper_dir (spi++, + V2P(address) | PMD_SECT | + PMD_SECT_BUFFERABLE | PMD_SECT_AP_WRITE); + address += PGDIR_SIZE; + } + while (spi < PTRS_PER_PGD) + pgd_val(swapper_pg_dir[spi++]) = 0; + + /* + * An area to invalidate the cache + */ + setup_swapper_dir (0xdf0, SAFE_ADDR | PMD_SECT | PMD_SECT_AP_READ); + + /* map in IO */ + address = IO_START; + spi = IO_BASE >> PGDIR_SHIFT; + pgd_val(swapper_pg_dir[spi-1]) = 0xc0000000 | PMD_TYPE_SECT | + PMD_DOMAIN(DOMAIN_KERNEL) | PMD_SECT_AP_WRITE; + while (address < IO_START + IO_SIZE && address) { + pgd_val(swapper_pg_dir[spi++]) = address | + PMD_TYPE_SECT | PMD_DOMAIN(DOMAIN_IO) | + PMD_SECT_AP_WRITE; + address += PGDIR_SIZE; + } + +#ifdef HAVE_MAP_VID_MEM + map_screen_mem(0, 0, 0); +#endif + + flush_cache_all(); + return start_mem; +} + +static inline void mark_usable_memory_areas(unsigned long *start_mem, unsigned long end_mem) +{ + unsigned long smem; + + *start_mem = smem = PAGE_ALIGN(*start_mem); + + while (smem < end_mem) { + clear_bit(PG_reserved, &mem_map[MAP_NR(smem)].flags); + smem += PAGE_SIZE; + } +} + diff --git a/include/asm-arm/proc-armv/page.h b/include/asm-arm/proc-armv/page.h new file mode 100644 index 000000000..fd8768939 --- /dev/null +++ b/include/asm-arm/proc-armv/page.h @@ -0,0 +1,68 @@ +/* + * linux/include/asm-arm/proc-armv/page.h + * + * Copyright (C) 1995, 1996 Russell King + */ + +#ifndef __ASM_PROC_PAGE_H +#define __ASM_PROC_PAGE_H + +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT 12 +#define PAGE_SIZE (1UL << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#ifdef __KERNEL__ + +#define STRICT_MM_TYPECHECKS + +#ifdef STRICT_MM_TYPECHECKS +/* + * These are used to make use of C type-checking.. + */ +typedef struct { unsigned long pte; } pte_t; +typedef struct { unsigned long pmd; } pmd_t; +typedef struct { unsigned long pgd; } pgd_t; +typedef struct { unsigned long pgprot; } pgprot_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((x).pmd) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +#else +/* + * .. while these make it easier on the compiler + */ +typedef unsigned long pte_t; +typedef unsigned long pmd_t; +typedef unsigned long pgd_t; +typedef unsigned long pgprot_t; + +#define pte_val(x) (x) +#define pmd_val(x) (x) +#define pgd_val(x) (x) +#define pgprot_val(x) (x) + +#define __pte(x) (x) +#define __pmd(x) (x) +#define __pgd(x) (x) +#define __pgprot(x) (x) + +#endif + +/* to align the pointer to the (next) page boundary */ +#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) + +/* This handles the memory map.. */ +#define PAGE_OFFSET 0xc0000000 +#define MAP_NR(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> PAGE_SHIFT) + +#endif /* __KERNEL__ */ + +#endif /* __ASM_PROC_PAGE_H */ diff --git a/include/asm-arm/proc-armv/param.h b/include/asm-arm/proc-armv/param.h new file mode 100644 index 000000000..fa22ce2d3 --- /dev/null +++ b/include/asm-arm/proc-armv/param.h @@ -0,0 +1,27 @@ +/* + * linux/include/asm-arm/proc-armv/param.h + * + * Copyright (C) 1996 Russell King + */ + +#ifndef __ASM_PROC_PARAM_H +#define __ASM_PROC_PARAM_H + +#ifndef HZ +#define HZ 100 +#endif + +#define EXEC_PAGESIZE 4096 + +#ifndef NGROUPS +#define NGROUPS 32 +#endif + +#ifndef NOGROUP +#define NOGROUP (-1) +#endif + +#define MAXHOSTNAMELEN 64 /* max length of hostname */ + +#endif + diff --git a/include/asm-arm/proc-armv/pgtable.h b/include/asm-arm/proc-armv/pgtable.h new file mode 100644 index 000000000..5816b2698 --- /dev/null +++ b/include/asm-arm/proc-armv/pgtable.h @@ -0,0 +1,617 @@ +/* + * linux/include/asm-arm/proc-armv/pgtable.h + * + * Copyright (C) 1995, 1996, 1997 Russell King + * + * 12-01-1997 RMK Altered flushing routines to use function pointers + * now possible to combine ARM6, ARM7 and StrongARM versions. + */ +#ifndef __ASM_PROC_PGTABLE_H +#define __ASM_PROC_PGTABLE_H + +#include <asm/arch/mmu.h> + +#define LIBRARY_TEXT_START 0x0c000000 + +/* + * Cache flushing... + */ +#define flush_cache_all() \ + processor.u.armv3v4._flush_cache_all() + +#define flush_cache_mm(_mm) \ + do { \ + if ((_mm) == current->mm) \ + processor.u.armv3v4._flush_cache_all(); \ + } while (0) + +#define flush_cache_range(_mm,_start,_end) \ + do { \ + if ((_mm) == current->mm) \ + processor.u.armv3v4._flush_cache_area \ + ((_start), (_end), 1); \ + } while (0) + +#define flush_cache_page(_vma,_vmaddr) \ + do { \ + if ((_vma)->vm_mm == current->mm) \ + processor.u.armv3v4._flush_cache_area \ + ((_vmaddr), (_vmaddr) + PAGE_SIZE, \ + ((_vma)->vm_flags & VM_EXEC) ? 1 : 0); \ + } while (0) + +#define flush_icache_range(_start,_end) \ + processor.u.armv3v4._flush_icache_area((_start), (_end)) + +/* + * We don't have a mem map cache... + */ +#define update_mm_cache_all() do { } while (0) +#define update_mm_cache_task(tsk) do { } while (0) +#define update_mm_cache_mm(mm) do { } while (0) +#define update_mm_cache_mm_addr(mm,addr,pte) do { } while (0) + +/* + * This flushes back any buffered write data. We have to clean and flush the entries + * in the cache for this page. Is it necessary to invalidate the I-cache? + */ +#define flush_page_to_ram(_page) \ + processor.u.armv3v4._flush_ram_page ((_page) & PAGE_MASK); + +/* + * Make the page uncacheable (must flush page beforehand). + */ +#define uncache_page(_page) \ + processor.u.armv3v4._flush_ram_page ((_page) & PAGE_MASK); + +/* + * TLB flushing: + * + * - flush_tlb() flushes the current mm struct TLBs + * - flush_tlb_all() flushes all processes TLBs + * - flush_tlb_mm(mm) flushes the specified mm context TLB's + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(mm, start, end) flushes a range of pages + * + * GCC uses conditional instructions, and expects the assembler code to do so as well. + * + * We drain the write buffer in here to ensure that the page tables in ram + * are really up to date. It is more efficient to do this here... + */ +#define flush_tlb() flush_tlb_all() + +#define flush_tlb_all() \ + processor.u.armv3v4._flush_tlb_all() + +#define flush_tlb_mm(_mm) \ + do { \ + if ((_mm) == current->mm) \ + processor.u.armv3v4._flush_tlb_all(); \ + } while (0) + +#define flush_tlb_range(_mm,_start,_end) \ + do { \ + if ((_mm) == current->mm) \ + processor.u.armv3v4._flush_tlb_area \ + ((_start), (_end), 1); \ + } while (0) + +#define flush_tlb_page(_vma,_vmaddr) \ + do { \ + if ((_vma)->vm_mm == current->mm) \ + processor.u.armv3v4._flush_tlb_area \ + ((_vmaddr), (_vmaddr) + PAGE_SIZE, \ + ((_vma)->vm_flags & VM_EXEC) ? 1 : 0); \ + } while (0) + +/* + * Since the page tables are in cached memory, we need to flush the dirty + * data cached entries back before we flush the tlb... This is also useful + * to flush out the SWI instruction for signal handlers... + */ +#define __flush_entry_to_ram(entry) \ + processor.u.armv3v4._flush_cache_entry((unsigned long)(entry)) + +#define __flush_pte_to_ram(entry) \ + processor.u.armv3v4._flush_cache_pte((unsigned long)(entry)) + +/* PMD_SHIFT determines the size of the area a second-level page table can map */ +#define PMD_SHIFT 20 +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) + +/* PGDIR_SHIFT determines what a third-level page table entry can map */ +#define PGDIR_SHIFT 20 +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* + * entries per page directory level: the sa110 is two-level, so + * we don't really have any PMD directory physically. + */ +#define PTRS_PER_PTE 256 +#define PTRS_PER_PMD 1 +#define PTRS_PER_PGD 4096 + +/* Just any arbitrary offset to the start of the vmalloc VM area: the + * current 8MB value just means that there will be a 8MB "hole" after the + * physical memory until the kernel virtual memory starts. That means that + * any out-of-bounds memory accesses will hopefully be caught. + * The vmalloc() routines leaves a hole of 4kB between each vmalloced + * area for the same reason. ;) + */ +#define VMALLOC_OFFSET (8*1024*1024) +#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) +#define VMALLOC_VMADDR(x) ((unsigned long)(x)) + +/* PMD types (actually level 1 descriptor) */ +#define PMD_TYPE_MASK 0x0003 +#define PMD_TYPE_FAULT 0x0000 +#define PMD_TYPE_TABLE 0x0001 +#define PMD_TYPE_SECT 0x0002 +#define PMD_UPDATABLE 0x0010 +#define PMD_SECT_CACHEABLE 0x0008 +#define PMD_SECT_BUFFERABLE 0x0004 +#define PMD_SECT_AP_WRITE 0x0400 +#define PMD_SECT_AP_READ 0x0800 +#define PMD_DOMAIN(x) ((x) << 5) + +/* PTE types (actially level 2 descriptor) */ +#define PTE_TYPE_MASK 0x0003 +#define PTE_TYPE_FAULT 0x0000 +#define PTE_TYPE_LARGE 0x0001 +#define PTE_TYPE_SMALL 0x0002 +#define PTE_AP_READ 0x0aa0 +#define PTE_AP_WRITE 0x0550 +#define PTE_CACHEABLE 0x0008 +#define PTE_BUFFERABLE 0x0004 + +/* Domains */ +#define DOMAIN_USER 0 +#define DOMAIN_KERNEL 1 +#define DOMAIN_TABLE 1 +#define DOMAIN_IO 2 + +#define _PAGE_CHG_MASK (0xfffff00c | PTE_TYPE_MASK) + +/* + * We define the bits in the page tables as follows: + * PTE_BUFFERABLE page is dirty + * PTE_AP_WRITE page is writable + * PTE_AP_READ page is a young (unsetting this causes faults for any access) + * + * Any page that is mapped in is assumed to be readable... + */ +#define PAGE_NONE __pgprot(PTE_TYPE_SMALL) +#define PAGE_SHARED __pgprot(PTE_TYPE_SMALL | PTE_CACHEABLE | PTE_AP_READ | PTE_AP_WRITE) +#define PAGE_COPY __pgprot(PTE_TYPE_SMALL | PTE_CACHEABLE | PTE_AP_READ) +#define PAGE_READONLY __pgprot(PTE_TYPE_SMALL | PTE_CACHEABLE | PTE_AP_READ) +#define PAGE_KERNEL __pgprot(PTE_TYPE_SMALL | PTE_CACHEABLE | PTE_BUFFERABLE | PTE_AP_WRITE) + +#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_USER)) +#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_KERNEL)) + +/* + * The arm can't do page protection for execute, and considers that the same are read. + * Also, write permissions imply read permissions. This is the closest we can get.. + */ +#define __P000 PAGE_NONE +#define __P001 PAGE_READONLY +#define __P010 PAGE_COPY +#define __P011 PAGE_COPY +#define __P100 PAGE_READONLY +#define __P101 PAGE_READONLY +#define __P110 PAGE_COPY +#define __P111 PAGE_COPY + +#define __S000 PAGE_NONE +#define __S001 PAGE_READONLY +#define __S010 PAGE_SHARED +#define __S011 PAGE_SHARED +#define __S100 PAGE_READONLY +#define __S101 PAGE_READONLY +#define __S110 PAGE_SHARED +#define __S111 PAGE_SHARED + +#undef TEST_VERIFY_AREA + +/* + * BAD_PAGETABLE is used when we need a bogus page-table, while + * BAD_PAGE is used for a bogus page. + * + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +extern pte_t __bad_page(void); +extern pte_t * __bad_pagetable(void); +extern unsigned long *empty_zero_page; + +#define BAD_PAGETABLE __bad_pagetable() +#define BAD_PAGE __bad_page() +#define ZERO_PAGE ((unsigned long) empty_zero_page) + +/* number of bits that fit into a memory pointer */ +#define BYTES_PER_PTR (sizeof(unsigned long)) +#define BITS_PER_PTR (8*BYTES_PER_PTR) + +/* to align the pointer to a pointer address */ +#define PTR_MASK (~(sizeof(void*)-1)) + +/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */ +#define SIZEOF_PTR_LOG2 2 + +/* to find an entry in a page-table */ +#define PAGE_PTR(address) \ +((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) + +/* to set the page-dir */ +#define SET_PAGE_DIR(tsk,pgdir) \ +do { \ + tsk->tss.memmap = __virt_to_phys(pgdir); \ + if ((tsk) == current) \ + __asm__ __volatile__( \ + "mcr%? p15, 0, %0, c2, c0, 0\n" \ + : : "r" (tsk->tss.memmap)); \ +} while (0) + +extern __inline__ int pte_none(pte_t pte) +{ + return !pte_val(pte); +} + +#define pte_clear(ptep) set_pte(ptep, __pte(0)) + +extern __inline__ int pte_present(pte_t pte) +{ + switch (pte_val(pte) & PTE_TYPE_MASK) { + case PTE_TYPE_LARGE: + case PTE_TYPE_SMALL: + return 1; + default: + return 0; + } +} + +extern __inline__ int pmd_none(pmd_t pmd) +{ + return !pmd_val(pmd); +} + +#define pmd_clear(pmdp) set_pmd(pmdp, __pmd(0)) + +extern __inline__ int pmd_bad(pmd_t pmd) +{ + switch (pmd_val(pmd) & PMD_TYPE_MASK) { + case PMD_TYPE_FAULT: + case PMD_TYPE_TABLE: + return 0; + default: + return 1; + } +} + +extern __inline__ int pmd_present(pmd_t pmd) +{ + switch (pmd_val(pmd) & PMD_TYPE_MASK) { + case PMD_TYPE_TABLE: + return 1; + default: + return 0; + } +} + +/* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pgd is never bad, and a pmd always exists (as it's folded + * into the pgd entry) + */ +#define pgd_none(pgd) (0) +#define pgd_bad(pgd) (0) +#define pgd_present(pgd) (1) +#define pgd_clear(pgdp) + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ +#define pte_read(pte) (1) +#define pte_exec(pte) (1) + +extern __inline__ int pte_write(pte_t pte) +{ + return pte_val(pte) & PTE_AP_WRITE; +} + +extern __inline__ int pte_cacheable(pte_t pte) +{ + return pte_val(pte) & PTE_CACHEABLE; +} + +extern __inline__ int pte_dirty(pte_t pte) +{ + return pte_val(pte) & PTE_BUFFERABLE; +} + +extern __inline__ int pte_young(pte_t pte) +{ + return pte_val(pte) & PTE_AP_READ; +} + +extern __inline__ pte_t pte_wrprotect(pte_t pte) +{ + pte_val(pte) &= ~PTE_AP_WRITE; + return pte; +} + +extern __inline__ pte_t pte_nocache(pte_t pte) +{ + pte_val(pte) &= ~PTE_CACHEABLE; + return pte; +} + +extern __inline__ pte_t pte_mkclean(pte_t pte) +{ + pte_val(pte) &= ~PTE_BUFFERABLE; + return pte; +} + +extern __inline__ pte_t pte_mkold(pte_t pte) +{ + pte_val(pte) &= ~PTE_AP_READ; + return pte; +} + +extern __inline__ pte_t pte_mkwrite(pte_t pte) +{ + pte_val(pte) |= PTE_AP_WRITE; + return pte; +} + +extern __inline__ pte_t pte_mkdirty(pte_t pte) +{ + pte_val(pte) |= PTE_BUFFERABLE; + return pte; +} + +extern __inline__ pte_t pte_mkyoung(pte_t pte) +{ + pte_val(pte) |= PTE_AP_READ; + return pte; +} + +/* + * The following are unable to be implemented on this MMU + */ +#if 0 +extern __inline__ pte_t pte_rdprotect(pte_t pte) +{ + pte_val(pte) &= ~(PTE_CACHEABLE|PTE_AP_READ); + return pte; +} + +extern __inline__ pte_t pte_exprotect(pte_t pte) +{ + pte_val(pte) &= ~(PTE_CACHEABLE|PTE_AP_READ); + return pte; +} + +extern __inline__ pte_t pte_mkread(pte_t pte) +{ + pte_val(pte) |= PTE_CACHEABLE; + return pte; +} + +extern __inline__ pte_t pte_mkexec(pte_t pte) +{ + pte_val(pte) |= PTE_CACHEABLE; + return pte; +} +#endif + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +extern __inline__ pte_t mk_pte(unsigned long page, pgprot_t pgprot) +{ + pte_t pte; + pte_val(pte) = __virt_to_phys(page) | pgprot_val(pgprot); + return pte; +} + +/* This takes a physical page address that is used by the remapping functions */ +extern __inline__ pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) +{ + pte_t pte; + pte_val(pte) = physpage + pgprot_val(pgprot); + return pte; +} + +extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); + return pte; +} + +extern __inline__ void set_pte(pte_t *pteptr, pte_t pteval) +{ + *pteptr = pteval; + __flush_pte_to_ram(pteptr); +} + +extern __inline__ unsigned long pte_page(pte_t pte) +{ + return (unsigned long)phys_to_virt(pte_val(pte) & PAGE_MASK); +} + +extern __inline__ pmd_t mk_user_pmd(pte_t *ptep) +{ + pmd_t pmd; + pmd_val(pmd) = __virt_to_phys((unsigned long)ptep) | _PAGE_USER_TABLE; + return pmd; +} + +extern __inline__ pmd_t mk_kernel_pmd(pte_t *ptep) +{ + pmd_t pmd; + pmd_val(pmd) = __virt_to_phys((unsigned long)ptep) | _PAGE_KERNEL_TABLE; + return pmd; +} + +#if 1 +#define set_pmd(pmdp,pmd) processor.u.armv3v4._set_pmd(pmdp,pmd) +#else +extern __inline__ void set_pmd(pmd_t *pmdp, pmd_t pmd) +{ + *pmdp = pmd; + __flush_pte_to_ram(pmdp); +} +#endif + +extern __inline__ unsigned long pmd_page(pmd_t pmd) +{ + return (unsigned long)phys_to_virt(pmd_val(pmd) & 0xfffffc00); +} + +/* to find an entry in a kernel page-table-directory */ +#define pgd_offset_k(address) pgd_offset(&init_mm, address) + +/* to find an entry in a page-table-directory */ +extern __inline__ pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address) +{ + return mm->pgd + (address >> PGDIR_SHIFT); +} + +/* Find an entry in the second-level page table.. */ +#define pmd_offset(dir, address) ((pmd_t *)(dir)) + +/* Find an entry in the third-level page table.. */ +extern __inline__ pte_t * pte_offset(pmd_t * dir, unsigned long address) +{ + return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); +} + +extern unsigned long get_small_page(int priority); +extern void free_small_page(unsigned long page); + +/* + * Allocate and free page tables. The xxx_kernel() versions are + * used to allocate a kernel page table - this turns on ASN bits + * if any. + */ +extern __inline__ void pte_free_kernel(pte_t * pte) +{ + free_small_page((unsigned long) pte); +} + +extern const char bad_pmd_string[]; + +extern __inline__ pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address) +{ + address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); + if (pmd_none(*pmd)) { + pte_t *page = (pte_t *) get_small_page(GFP_KERNEL); + if (pmd_none(*pmd)) { + if (page) { + memzero (page, PTRS_PER_PTE * BYTES_PER_PTR); + set_pmd(pmd, mk_kernel_pmd(page)); + return page + address; + } + set_pmd(pmd, mk_kernel_pmd(BAD_PAGETABLE)); + return NULL; + } + free_small_page((unsigned long) page); + } + if (pmd_bad(*pmd)) { + printk(bad_pmd_string, pmd_val(*pmd)); + set_pmd(pmd, mk_kernel_pmd(BAD_PAGETABLE)); + return NULL; + } + return (pte_t *) pmd_page(*pmd) + address; +} + +/* + * allocating and freeing a pmd is trivial: the 1-entry pmd is + * inside the pgd, so has no extra memory associated with it. + */ +#define pmd_free_kernel(pmdp) pmd_val(*(pmdp)) = 0; +#define pmd_alloc_kernel(pgdp, address) ((pmd_t *)(pgdp)) + +extern __inline__ void pte_free(pte_t * pte) +{ + free_small_page((unsigned long) pte); +} + +extern __inline__ pte_t * pte_alloc(pmd_t * pmd, unsigned long address) +{ + address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); + + if (pmd_none(*pmd)) { + pte_t *page = (pte_t *) get_small_page(GFP_KERNEL); + if (pmd_none(*pmd)) { + if (page) { + memzero (page, PTRS_PER_PTE * BYTES_PER_PTR); + set_pmd(pmd, mk_user_pmd(page)); + return page + address; + } + set_pmd(pmd, mk_user_pmd(BAD_PAGETABLE)); + return NULL; + } + free_small_page ((unsigned long) page); + } + if (pmd_bad(*pmd)) { + printk(bad_pmd_string, pmd_val(*pmd)); + set_pmd(pmd, mk_user_pmd(BAD_PAGETABLE)); + return NULL; + } + return (pte_t *) pmd_page(*pmd) + address; +} + +/* + * allocating and freeing a pmd is trivial: the 1-entry pmd is + * inside the pgd, so has no extra memory associated with it. + */ +#define pmd_free(pmdp) pmd_val(*(pmdp)) = 0; +#define pmd_alloc(pgdp, address) ((pmd_t *)(pgdp)) + +/* + * Free a page directory. Takes the virtual address. + */ +extern __inline__ void pgd_free(pgd_t * pgd) +{ + free_pages((unsigned long) pgd, 2); +} + +/* + * Allocate a new page directory. Return the virtual address of it. + */ +extern __inline__ pgd_t * pgd_alloc(void) +{ + unsigned long pgd; + + /* + * need to get a 16k page for level 1 + */ + pgd = __get_free_pages(GFP_KERNEL,2,0); + if (pgd) + memzero ((void *)pgd, PTRS_PER_PGD * BYTES_PER_PTR); + return (pgd_t *)pgd; +} + +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; + +/* + * The sa110 doesn't have any external MMU info: the kernel page + * tables contain all the necessary information. + */ +extern __inline__ void update_mmu_cache(struct vm_area_struct * vma, + unsigned long address, pte_t pte) +{ +} + +#define SWP_TYPE(entry) (((entry) >> 2) & 0x7f) +#define SWP_OFFSET(entry) ((entry) >> 9) +#define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << 9)) + +#endif /* __ASM_PROC_PAGE_H */ + diff --git a/include/asm-arm/proc-armv/processor.h b/include/asm-arm/proc-armv/processor.h new file mode 100644 index 000000000..cc2be2ceb --- /dev/null +++ b/include/asm-arm/proc-armv/processor.h @@ -0,0 +1,103 @@ +/* + * linux/include/asm-arm/proc-armv/processor.h + * + * Copyright (c) 1996 Russell King. + * + * Changelog: + * 20-09-1996 RMK Created + * 26-09-1996 RMK Added 'EXTRA_THREAD_STRUCT*' + * 28-09-1996 RMK Moved start_thread into the processor dependencies + */ +#ifndef __ASM_PROC_PROCESSOR_H +#define __ASM_PROC_PROCESSOR_H + +#ifdef __KERNEL__ + +#define KERNEL_STACK_SIZE PAGE_SIZE + +/* + * on arm2,3 wp does not work + */ +#define wp_works_ok 0 +#define wp_works_ok__is_a_macro /* for versions in ksyms.c */ + +struct context_save_struct { + unsigned long cpsr; + unsigned long r4; + unsigned long r5; + unsigned long r6; + unsigned long r7; + unsigned long r8; + unsigned long r9; + unsigned long fp; + unsigned long pc; +}; + +#define EXTRA_THREAD_STRUCT \ + struct context_save_struct *save; \ + unsigned long memmap; + +#define EXTRA_THREAD_STRUCT_INIT \ + 0, \ + ((unsigned long) swapper_pg_dir) - PAGE_OFFSET + +DECLARE_THREAD_STRUCT; + +/* + * Return saved PC of a blocked thread. + */ +extern __inline__ unsigned long thread_saved_pc (struct thread_struct *t) +{ + if (t->save) + return t->save->pc; + else + return 0; +} + +extern __inline__ unsigned long get_css_fp (struct thread_struct *t) +{ + if (t->save) + return t->save->fp; + else + return 0; +} + +asmlinkage void ret_from_sys_call(void) __asm__ ("ret_from_sys_call"); + +extern __inline__ void copy_thread_css (struct context_save_struct *save) +{ + save->cpsr = SVC_MODE; + save->r4 = + save->r5 = + save->r6 = + save->r7 = + save->r8 = + save->r9 = + save->fp = 0; + save->pc = (unsigned long) ret_from_sys_call; +} + +#define start_thread(regs,pc,sp) \ +({ \ + unsigned long *stack = (unsigned long *)sp; \ + set_fs(USER_DS); \ + memzero(regs->uregs, sizeof(regs->uregs)); \ + regs->ARM_cpsr = sp <= 0x04000000 ? USR26_MODE : USR_MODE; \ + regs->ARM_pc = pc; /* pc */ \ + regs->ARM_sp = sp; /* sp */ \ + regs->ARM_r2 = stack[2]; /* r2 (envp) */ \ + regs->ARM_r1 = stack[1]; /* r1 (argv) */ \ + regs->ARM_r0 = stack[0]; /* r0 (argc) */ \ +}) + +/* Allocation and freeing of basic task resources. */ +/* + * NOTE! The task struct and the stack go together + */ +#define alloc_task_struct() \ + ((struct task_struct *) __get_free_pages(GFP_KERNEL,1,0)) +#define free_task_struct(p) free_pages((unsigned long)(p),1) + +#endif + +#endif diff --git a/include/asm-arm/proc-armv/ptrace.h b/include/asm-arm/proc-armv/ptrace.h new file mode 100644 index 000000000..42a56433f --- /dev/null +++ b/include/asm-arm/proc-armv/ptrace.h @@ -0,0 +1,75 @@ +/* + * linux/include/asm-arm/proc-armv/ptrace.h + * + * Copyright (C) 1996 Russell King + */ + +#ifndef __ASM_PROC_PTRACE_H +#define __ASM_PROC_PTRACE_H + +/* this struct defines the way the registers are stored on the + stack during a system call. */ + +struct pt_regs { + long uregs[18]; +}; + +#define ARM_cpsr uregs[16] +#define ARM_pc uregs[15] +#define ARM_lr uregs[14] +#define ARM_sp uregs[13] +#define ARM_ip uregs[12] +#define ARM_fp uregs[11] +#define ARM_r10 uregs[10] +#define ARM_r9 uregs[9] +#define ARM_r8 uregs[8] +#define ARM_r7 uregs[7] +#define ARM_r6 uregs[6] +#define ARM_r5 uregs[5] +#define ARM_r4 uregs[4] +#define ARM_r3 uregs[3] +#define ARM_r2 uregs[2] +#define ARM_r1 uregs[1] +#define ARM_r0 uregs[0] +#define ARM_ORIG_r0 uregs[17] /* -1 */ + +#define USR26_MODE 0x00 +#define FIQ26_MODE 0x01 +#define IRQ26_MODE 0x02 +#define SVC26_MODE 0x03 +#define USR_MODE 0x10 +#define FIQ_MODE 0x11 +#define IRQ_MODE 0x12 +#define SVC_MODE 0x13 +#define ABT_MODE 0x17 +#define UND_MODE 0x1b +#define SYSTEM_MODE 0x1f +#define MODE_MASK 0x1f +#define F_BIT 0x40 +#define I_BIT 0x80 +#define CC_V_BIT (1 << 28) +#define CC_C_BIT (1 << 29) +#define CC_Z_BIT (1 << 30) +#define CC_N_BIT (1 << 31) + +#define user_mode(regs) \ + ((((regs)->ARM_cpsr & MODE_MASK) == USR_MODE) || \ + (((regs)->ARM_cpsr & MODE_MASK) == USR26_MODE)) + +#define processor_mode(regs) \ + ((regs)->ARM_cpsr & MODE_MASK) + +#define interrupts_enabled(regs) \ + (!((regs)->ARM_cpsr & I_BIT)) + +#define fast_interrupts_enabled(regs) \ + (!((regs)->ARM_cpsr & F_BIT)) + +#define condition_codes(regs) \ + ((regs)->ARM_cpsr & (CC_V_BIT|CC_C_BIT|CC_Z_BIT|CC_N_BIT)) + +#define instruction_pointer(regs) ((regs)->ARM_pc) +#define pc_pointer(v) (v) + +#endif + diff --git a/include/asm-arm/proc-armv/semaphore.h b/include/asm-arm/proc-armv/semaphore.h new file mode 100644 index 000000000..fb1ad746f --- /dev/null +++ b/include/asm-arm/proc-armv/semaphore.h @@ -0,0 +1,81 @@ +/* + * linux/include/asm-arm/semaphore.h + */ +#ifndef __ASM_PROC_SEMAPHORE_H +#define __ASM_PROC_SEMAPHORE_H + +/* + * This is ugly, but we want the default case to fall through. + * "__down" is the actual routine that waits... + */ +extern inline void down(struct semaphore * sem) +{ + __asm__ __volatile__ (" + @ atomic down operation + mrs r0, cpsr + orr r1, r0, #128 @ disable IRQs + bic r0, r0, #0x80000000 @ clear N + msr cpsr, r1 + ldr r1, [%0] + subs r1, r1, #1 + str r1, [%0] + orrmi r0, r0, #0x80000000 @ set N + msr cpsr, r0 + movmi r0, %0 + blmi " SYMBOL_NAME_STR(__down) + : : "r" (sem) : "r0", "r1", "r2", "r3", "ip", "lr", "cc"); +} + +/* + * This is ugly, but we want the default case to fall through. + * "__down_interruptible" is the actual routine that waits... + */ +extern inline int down_interruptible (struct semaphore * sem) +{ + int result; + __asm__ __volatile__ (" + @ atomic down operation + mrs r0, cpsr + orr r1, r0, #128 @ disable IRQs + bic r0, r0, #0x80000000 @ clear N + msr cpsr, r1 + ldr r1, [%1] + subs r1, r1, #1 + str r1, [%1] + orrmi r0, r0, #0x80000000 @ set N + msr cpsr, r0 + movmi r0, %1 + movpl r0, #0 + blmi " SYMBOL_NAME_STR(__down_interruptible) " + mov %0, r0" + : "=r" (result) + : "r" (sem) + : "r0", "r1", "r2", "r3", "ip", "lr", "cc"); + return result; +} + +/* + * Note! This is subtle. We jump to wake people up only if + * the semaphore was negative (== somebody was waiting on it). + * The default case (no contention) will result in NO + * jumps for both down() and up(). + */ +extern inline void up(struct semaphore * sem) +{ + __asm__ __volatile__ (" + @ atomic up operation + mrs r0, cpsr + orr r1, r0, #128 @ disable IRQs + bic r0, r0, #0x80000000 @ clear N + msr cpsr, r1 + ldr r1, [%0] + adds r1, r1, #1 + str r1, [%0] + orrls r0, r0, #0x80000000 @ set N + msr cpsr, r0 + movmi r0, %0 + blmi " SYMBOL_NAME_STR(__up) + : : "r" (sem) : "r0", "r1", "r2", "r3", "ip", "lr", "cc"); +} + +#endif diff --git a/include/asm-arm/proc-armv/shmparam.h b/include/asm-arm/proc-armv/shmparam.h new file mode 100644 index 000000000..10e280b6d --- /dev/null +++ b/include/asm-arm/proc-armv/shmparam.h @@ -0,0 +1,19 @@ +/* + * linux/include/asm-arm/proc-armv/shmparam.h + * + * Copyright (C) 1996 Russell King + * + * definitions for the shared process memory on ARM v3 or v4 + * processors + */ + +#ifndef __ASM_PROC_SHMPARAM_H +#define __ASM_PROC_SHMPARAM_H + +#ifndef SHM_RANGE_START +#define SHM_RANGE_START 0x50000000 +#define SHM_RANGE_END 0x60000000 +#define SHMMAX 0x01000000 +#endif + +#endif diff --git a/include/asm-arm/proc-armv/system.h b/include/asm-arm/proc-armv/system.h new file mode 100644 index 000000000..6a1f38de0 --- /dev/null +++ b/include/asm-arm/proc-armv/system.h @@ -0,0 +1,137 @@ +/* + * linux/include/asm-arm/proc-armv/system.h + * + * Copyright (C) 1996 Russell King + */ + +#ifndef __ASM_PROC_SYSTEM_H +#define __ASM_PROC_SYSTEM_H + +extern const char xchg_str[]; + +extern __inline__ unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +{ + switch (size) { + case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]" : "=r" (x) : "r" (x), "r" (ptr) : "memory"); + break; + case 2: abort (); + case 4: __asm__ __volatile__ ("swp %0, %1, [%2]" : "=r" (x) : "r" (x), "r" (ptr) : "memory"); + break; + default: arm_invalidptr(xchg_str, size); + } + return x; +} + +/* + * This processor does not need anything special before reset, + * but RPC may do... + */ +extern __inline__ void proc_hard_reset(void) +{ +} + +/* + * We can wait for an interrupt... + */ +#if 0 +#define proc_idle() \ + do { \ + __asm__ __volatile__( \ +" mcr p15, 0, %0, c15, c8, 2" \ + : : "r" (0)); \ + } while (0) +#else +#define proc_idle() +#endif +/* + * A couple of speedups for the ARM + */ + +/* + * Save the current interrupt enable state & disable IRQs + */ +#define __save_flags_cli(x) \ + do { \ + unsigned long temp; \ + __asm__ __volatile__( \ + "mrs %1, cpsr\n" \ +" and %0, %1, #192\n" \ +" orr %1, %1, #128\n" \ +" msr cpsr, %1" \ + : "=r" (x), "=r" (temp) \ + : \ + : "memory"); \ + } while (0) + +/* + * Enable IRQs + */ +#define __sti() \ + do { \ + unsigned long temp; \ + __asm__ __volatile__( \ + "mrs %0, cpsr\n" \ +" bic %0, %0, #128\n" \ +" msr cpsr, %0" \ + : "=r" (temp) \ + : \ + : "memory"); \ + } while(0) + +/* + * Disable IRQs + */ +#define __cli() \ + do { \ + unsigned long temp; \ + __asm__ __volatile__( \ + "mrs %0, cpsr\n" \ +" orr %0, %0, #128\n" \ +" msr cpsr, %0" \ + : "=r" (temp) \ + : \ + : "memory"); \ + } while(0) + +/* + * save current IRQ & FIQ state + */ +#define __save_flags(x) \ + do { \ + __asm__ __volatile__( \ + "mrs %0, cpsr\n" \ +" and %0, %0, #192" \ + : "=r" (x) \ + : \ + : "memory"); \ + } while (0) + +/* + * restore saved IRQ & FIQ state + */ +#define __restore_flags(x) \ + do { \ + unsigned long temp; \ + __asm__ __volatile__( \ + "mrs %0, cpsr\n" \ +" bic %0, %0, #192\n" \ +" orr %0, %0, %1\n" \ +" msr cpsr, %0" \ + : "=r" (temp) \ + : "r" (x) \ + : "memory"); \ + } while (0) + +#ifdef __SMP__ +#error SMP not supported +#else + +#define cli() __cli() +#define sti() __sti() +#define save_flags(x) __save_flags(x) +#define restore_flags(x) __restore_flags(x) +#define save_flags_cli(x) __save_flags_cli(x) + +#endif + +#endif diff --git a/include/asm-arm/proc-armv/uaccess.h b/include/asm-arm/proc-armv/uaccess.h new file mode 100644 index 000000000..acadb35a3 --- /dev/null +++ b/include/asm-arm/proc-armv/uaccess.h @@ -0,0 +1,204 @@ +/* + * linux/include/asm-arm/proc-armv/uaccess.h + */ + +/* + * The fs functions are implemented on the ARMV3 and V4 architectures + * using the domain register. + * + * DOMAIN_IO - domain 2 includes all IO only + * DOMAIN_KERNEL - domain 1 includes all kernel memory only + * DOMAIN_USER - domain 0 includes all user memory only + */ + +#define DOMAIN_CLIENT 1 +#define DOMAIN_MANAGER 3 + +#define DOMAIN_USER_CLIENT ((DOMAIN_CLIENT) << 0) +#define DOMAIN_USER_MANAGER ((DOMAIN_MANAGER) << 0) + +#define DOMAIN_KERNEL_CLIENT ((DOMAIN_CLIENT) << 2) +#define DOMAIN_KERNEL_MANAGER ((DOMAIN_MANAGER) << 2) + +#define DOMAIN_IO_CLIENT ((DOMAIN_CLIENT) << 4) +#define DOMAIN_IO_MANAGER ((DOMAIN_MANAGER) << 4) + +/* + * When we want to access kernel memory in the *_user functions, + * we change the domain register to KERNEL_DS, thus allowing + * unrestricted access + */ +#define KERNEL_DOMAIN (DOMAIN_USER_CLIENT | DOMAIN_KERNEL_MANAGER | DOMAIN_IO_CLIENT) +#define USER_DOMAIN (DOMAIN_USER_CLIENT | DOMAIN_KERNEL_CLIENT | DOMAIN_IO_CLIENT) + +/* + * Note that this is actually 0x1,0000,0000 + */ +#define KERNEL_DS 0x00000000 +#define USER_DS 0xc0000000 + +#define get_ds() (KERNEL_DS) +#define get_fs() (current->addr_limit) + +#define segment_eq(a,b) ((a) == (b)) + +extern __inline__ void set_fs (mm_segment_t fs) +{ + current->addr_limit = fs; + + __asm__ __volatile__("mcr p15, 0, %0, c3, c0" : + : "r" (fs ? USER_DOMAIN : KERNEL_DOMAIN)); +} + +/* + * a + s <= 2^32 -> C = 0 || Z = 0 (LS) + * (a + s) <= l -> C = 0 || Z = 0 (LS) + */ +#define __range_ok(addr,size) ({ \ + unsigned long flag, sum; \ + __asm__ __volatile__("adds %1, %2, %3; cmpls %1, %0; movls %0, #0" \ + : "=&r" (flag), "=&r" (sum) \ + : "r" (addr), "Ir" (size), "0" (current->addr_limit) \ + : "cc"); \ + flag; }) + +#define __addr_ok(addr) ({ \ + unsigned long flag; \ + __asm__ __volatile__("cmp %2, %0; movlo %0, #0" \ + : "=&r" (flag) \ + : "0" (current->addr_limit), "r" (addr) \ + : "cc"); \ + (flag == 0); }) + +#define access_ok(type,addr,size) (__range_ok(addr,size) == 0) + +#define __put_user_asm_byte(x,addr,err) \ + __asm__ __volatile__( \ + "1: strbt %1,[%2],#0\n" \ + "2:\n" \ + " .section .fixup,\"ax\"\n" \ + " .align 2\n" \ + "3: mvn %0, %3\n" \ + " b 2b\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 3b\n" \ + " .previous" \ + : "=r" (err) \ + : "r" (x), "r" (addr), "i" (EFAULT), "0" (err)) + +#define __put_user_asm_half(x,addr,err) \ +({ \ + unsigned long __temp = (unsigned long)(x); \ + __asm__ __volatile__( \ + "1: strbt %1,[%3],#0\n" \ + "2: strbt %2,[%4],#0\n" \ + "3:\n" \ + " .section .fixup,\"ax\"\n" \ + " .align 2\n" \ + "4: mvn %0, %5\n" \ + " b 3b\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 4b\n" \ + " .long 2b, 4b\n" \ + " .previous" \ + : "=r" (err) \ + : "r" (__temp), "r" (__temp >> 8), \ + "r" (addr), "r" ((int)(addr) + 1), \ + "i" (EFAULT), "0" (err)); \ +}) + +#define __put_user_asm_word(x,addr,err) \ + __asm__ __volatile__( \ + "1: strt %1,[%2],#0\n" \ + "2:\n" \ + " .section .fixup,\"ax\"\n" \ + " .align 2\n" \ + "3: mvn %0, %3\n" \ + " b 2b\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 3b\n" \ + " .previous" \ + : "=r" (err) \ + : "r" (x), "r" (addr), "i" (EFAULT), "0" (err)) + +#define __get_user_asm_byte(x,addr,err) \ + __asm__ __volatile__( \ + "1: ldrbt %1,[%2],#0\n" \ + "2:\n" \ + " .section .fixup,\"ax\"\n" \ + " .align 2\n" \ + "3: mvn %0, %3\n" \ + " b 2b\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 3b\n" \ + " .previous" \ + : "=r" (err), "=r" (x) \ + : "r" (addr), "i" (EFAULT), "0" (err)) + +#define __get_user_asm_half(x,addr,err) \ +({ \ + unsigned long __temp; \ + __asm__ __volatile__( \ + "1: ldrbt %1,[%3],#0\n" \ + "2: ldrbt %2,[%4],#0\n" \ + " orr %1, %1, %2, lsl #8\n" \ + "3:\n" \ + " .section .fixup,\"ax\"\n" \ + " .align 2\n" \ + "4: mvn %0, %5\n" \ + " b 3b\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 4b\n" \ + " .long 2b, 4b\n" \ + " .previous" \ + : "=r" (err), "=r" (x), "=&r" (__temp) \ + : "r" (addr), "r" ((int)(addr) + 1), \ + "i" (EFAULT), "0" (err)); \ +}) + + +#define __get_user_asm_word(x,addr,err) \ + __asm__ __volatile__( \ + "1: ldrt %1,[%2],#0\n" \ + "2:\n" \ + " .section .fixup,\"ax\"\n" \ + " .align 2\n" \ + "3: mvn %0, %3\n" \ + " b 2b\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 3b\n" \ + " .previous" \ + : "=r" (err), "=r" (x) \ + : "r" (addr), "i" (EFAULT), "0" (err)) + +extern unsigned long __arch_copy_from_user(void *to, const void *from, unsigned long n); +#define __do_copy_from_user(to,from,n) \ + (n) = __arch_copy_from_user(to,from,n) + +extern unsigned long __arch_copy_to_user(void *to, const void *from, unsigned long n); +#define __do_copy_to_user(to,from,n) \ + (n) = __arch_copy_to_user(to,from,n) + +extern unsigned long __arch_clear_user(void *addr, unsigned long n); +#define __do_clear_user(addr,sz) \ + (sz) = __arch_clear_user(addr,sz) + +extern unsigned long __arch_strncpy_from_user(char *to, const char *from, unsigned long count); +#define __do_strncpy_from_user(dst,src,count,res) \ + (res) = __arch_strncpy_from_user(dst,src,count) + +extern unsigned long __arch_strlen_user(const char *s); +#define __do_strlen_user(s,res) \ + (res) = __arch_strlen_user(s) diff --git a/include/asm-arm/proc-armv/uncompress.h b/include/asm-arm/proc-armv/uncompress.h new file mode 100644 index 000000000..acce2de35 --- /dev/null +++ b/include/asm-arm/proc-armv/uncompress.h @@ -0,0 +1,22 @@ +/* + * linux/include/asm-arm/proc-armv/uncompress.h + * + * (c) 1997 Russell King + */ + +static inline void proc_decomp_setup (void) +{ + __asm__ __volatile__(" + mrc p15, 0, r0, c0, c0 + eor r0, r0, #0x44 << 24 + eor r0, r0, #0x01 << 16 + eor r0, r0, #0xA1 << 8 + movs r0, r0, lsr #4 + mcreq p15, 0, r0, c7, c5, 0 @ flush I cache + mrceq p15, 0, r0, c1, c0 + orreq r0, r0, #1 << 12 + mcreq p15, 0, r0, c1, c0 @ enable I cache + mov r0, #0 + mcreq p15, 0, r0, c15, c1, 2 @ enable clock switching + " : : : "r0", "cc", "memory"); +} |