summaryrefslogtreecommitdiffstats
path: root/include/asm-sh
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sh')
-rw-r--r--include/asm-sh/dma.h4
-rw-r--r--include/asm-sh/highmem.h85
-rw-r--r--include/asm-sh/mmu_context.h2
-rw-r--r--include/asm-sh/page.h32
-rw-r--r--include/asm-sh/pgtable-2level.h62
-rw-r--r--include/asm-sh/pgtable.h264
-rw-r--r--include/asm-sh/processor.h1
-rw-r--r--include/asm-sh/spinlock.h47
-rw-r--r--include/asm-sh/string.h21
-rw-r--r--include/asm-sh/system.h28
10 files changed, 330 insertions, 216 deletions
diff --git a/include/asm-sh/dma.h b/include/asm-sh/dma.h
index 4561b3d05..16f54584c 100644
--- a/include/asm-sh/dma.h
+++ b/include/asm-sh/dma.h
@@ -6,6 +6,10 @@
#define MAX_DMA_CHANNELS 8
+/* The maximum address that we can perform a DMA transfer to on this platform */
+/* XXX: This is not applicable to SuperH, just needed for alloc_bootmem */
+#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000)
+
extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
extern void free_dma(unsigned int dmanr); /* release it again */
diff --git a/include/asm-sh/highmem.h b/include/asm-sh/highmem.h
new file mode 100644
index 000000000..bd5564aea
--- /dev/null
+++ b/include/asm-sh/highmem.h
@@ -0,0 +1,85 @@
+/*
+ * highmem.h: virtual kernel memory mappings for high memory
+ *
+ * Used in CONFIG_HIGHMEM systems for memory pages which
+ * are not addressable by direct kernel virtual adresses.
+ *
+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
+ * Gerhard.Wichert@pdb.siemens.de
+ *
+ *
+ * Redesigned the x86 32-bit VM architecture to deal with
+ * up to 16 Terrabyte physical memory. With current x86 CPUs
+ * we now support up to 64 Gigabytes physical RAM.
+ *
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+ */
+
+#ifndef _ASM_HIGHMEM_H
+#define _ASM_HIGHMEM_H
+
+#include <linux/init.h>
+
+/* undef for production */
+#define HIGHMEM_DEBUG 1
+
+/* declarations for highmem.c */
+extern unsigned long highstart_pfn, highend_pfn;
+
+extern pte_t *kmap_pte;
+extern pgprot_t kmap_prot;
+
+extern void kmap_init(void) __init;
+
+/* kmap helper functions necessary to access the highmem pages in kernel */
+#include <asm/pgtable.h>
+#include <asm/kmap_types.h>
+
+extern inline unsigned long kmap(struct page *page, enum km_type type)
+{
+ if (page < highmem_start_page)
+ return page_address(page);
+ {
+ enum fixed_addresses idx = type+KM_TYPE_NR*smp_processor_id();
+ unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN+idx);
+
+#if HIGHMEM_DEBUG
+ if (!pte_none(*(kmap_pte-idx)))
+ {
+ __label__ here;
+ here:
+ printk(KERN_ERR "not null pte on CPU %d from %p\n",
+ smp_processor_id(), &&here);
+ }
+#endif
+ set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
+ __flush_tlb_one(vaddr);
+
+ return vaddr;
+ }
+}
+
+extern inline void kunmap(unsigned long vaddr, enum km_type type)
+{
+#if HIGHMEM_DEBUG
+ enum fixed_addresses idx = type+KM_TYPE_NR*smp_processor_id();
+ if ((vaddr & PAGE_MASK) == __fix_to_virt(FIX_KMAP_BEGIN+idx))
+ {
+ /* force other mappings to Oops if they'll try to access
+ this pte without first remap it */
+ pte_clear(kmap_pte-idx);
+ __flush_tlb_one(vaddr);
+ }
+#endif
+}
+
+extern inline void kmap_check(void)
+{
+#if HIGHMEM_DEBUG
+ int idx_base = KM_TYPE_NR*smp_processor_id(), i;
+ for (i = idx_base; i < idx_base+KM_TYPE_NR; i++)
+ if (!pte_none(*(kmap_pte-i)))
+ BUG();
+#endif
+}
+#endif /* _ASM_HIGHMEM_H */
diff --git a/include/asm-sh/mmu_context.h b/include/asm-sh/mmu_context.h
index d08cf7863..aca17690f 100644
--- a/include/asm-sh/mmu_context.h
+++ b/include/asm-sh/mmu_context.h
@@ -141,7 +141,7 @@ extern __inline__ void switch_mm(struct mm_struct *prev,
struct task_struct *tsk, unsigned int cpu)
{
if (prev != next) {
- unsigned long __pgdir = __pa(next->pgd);
+ unsigned long __pgdir = (unsigned long)next->pgd;
__asm__ __volatile__("mov.l %0,%1": \
:"r" (__pgdir), "m" (__m(MMU_TTB)));
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index 9c8b732b2..228c8d2d8 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -23,12 +23,9 @@
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
-#define STRICT_MM_TYPECHECKS
-
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
-#ifdef STRICT_MM_TYPECHECKS
/*
* These are used to make use of C type-checking..
*/
@@ -47,26 +44,6 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
-#else
-/*
- * .. while these make it easier on the compiler
- */
-typedef unsigned long pte_t;
-typedef unsigned long pmd_t;
-typedef unsigned long pgd_t;
-typedef unsigned long pgprot_t;
-
-#define pte_val(x) (x)
-#define pmd_val(x) (x)
-#define pgd_val(x) (x)
-#define pgprot_val(x) (x)
-
-#define __pte(x) (x)
-#define __pmd(x) (x)
-#define __pgd(x) (x)
-#define __pgprot(x) (x)
-
-#endif
#endif /* !__ASSEMBLY__ */
/* to align the pointer to the (next) page boundary */
@@ -75,7 +52,7 @@ typedef unsigned long pgprot_t;
/*
* IF YOU CHANGE THIS, PLEASE ALSO CHANGE
*
- * arch/sh/vmlinux.lds
+ * arch/sh/vmlinux.lds.S
*
* which has the same constant encoded..
*/
@@ -89,8 +66,15 @@ typedef unsigned long pgprot_t;
#define MAP_NR(addr) ((__pa(addr)-__MEMORY_START) >> PAGE_SHIFT)
#ifndef __ASSEMBLY__
+
+extern int console_loglevel;
+
+/*
+ * Tell the user there is some problem.
+ */
#define BUG() do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
+ console_loglevel = 0; \
asm volatile("nop"); \
} while (0)
diff --git a/include/asm-sh/pgtable-2level.h b/include/asm-sh/pgtable-2level.h
new file mode 100644
index 000000000..ebfa410b3
--- /dev/null
+++ b/include/asm-sh/pgtable-2level.h
@@ -0,0 +1,62 @@
+#ifndef __ASM_SH_PGTABLE_2LEVEL_H
+#define __ASM_SH_PGTABLE_2LEVEL_H
+
+/*
+ * traditional two-level paging structure:
+ */
+
+#define PGDIR_SHIFT 22
+#define PTRS_PER_PGD 1024
+
+/*
+ * this is two-level, so we don't really have any
+ * PMD directory physically.
+ */
+#define PMD_SHIFT 22
+#define PTRS_PER_PMD 1
+
+#define PTRS_PER_PTE 1024
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pgd is never bad, and a pmd always exists (as it's folded
+ * into the pgd entry)
+ */
+extern inline int pgd_none(pgd_t pgd) { return 0; }
+extern inline int pgd_bad(pgd_t pgd) { return 0; }
+extern inline int pgd_present(pgd_t pgd) { return 1; }
+#define pgd_clear(xp) do { pgd_val(*(xp)) = 0; } while (0)
+
+#define pgd_page(pgd) \
+((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
+
+extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
+{
+ return (pmd_t *) dir;
+}
+
+extern __inline__ pmd_t *get_pmd_fast(void)
+{
+ return (pmd_t *)0;
+}
+
+extern __inline__ void free_pmd_fast(pmd_t *pmd) { }
+extern __inline__ void free_pmd_slow(pmd_t *pmd) { }
+
+extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
+{
+ if (!pgd)
+ BUG();
+ return (pmd_t *) pgd;
+}
+
+#define SWP_ENTRY(type,offset) __pte((((type) << 1) | ((offset) << 8)))
+
+#endif /* __ASM_SH_PGTABLE_2LEVEL_H */
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index cf5eab380..7ea944afa 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -65,25 +65,36 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
#endif /* !__ASSEMBLY__ */
-/* PMD_SHIFT determines the size of the area a second-level page table can map */
-#define PMD_SHIFT 22
+#define pgd_quicklist (current_cpu_data.pgd_quick)
+#define pmd_quicklist ((unsigned long *)0)
+#define pte_quicklist (current_cpu_data.pte_quick)
+#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
+
+#include <asm/pgtable-2level.h>
+
+/*
+ * Certain architectures need to do special things when PTEs
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
+
+#define __beep() asm("")
+
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-
-/* PGDIR_SHIFT determines what a third-level page table entry can map */
-#define PGDIR_SHIFT 22
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
-/*
- * Entries per page directory level: we use two-level, so
- * we don't really have any PMD directory physically.
- */
-#define PTRS_PER_PTE 1024
-#define PTRS_PER_PMD 1
-#define PTRS_PER_PGD 1024
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
+#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
+#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
+
+#define TWOLEVEL_PGDIR_SHIFT 22
+#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
+#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
+
#ifndef __ASSEMBLY__
#define VMALLOC_START P3SEG
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
@@ -96,7 +107,7 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
/* 0x010 */
#define _PAGE_RW 0x020 /* PR0-bit : write access allowed */
#define _PAGE_USER 0x040 /* PR1-bit : user space access allowed */
-/* 0x080 */
+#define _PAGE_PROTNONE 0x080 /* software: if not present */
#define _PAGE_PRESENT 0x100 /* V-bit : page is valid */
#if defined(__sh3__)
@@ -115,7 +126,7 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
+#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
@@ -156,154 +167,101 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
extern pte_t __bad_page(void);
extern pte_t * __bad_pagetable(void);
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
extern unsigned long empty_zero_page[1024];
+#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
-#define BAD_PAGETABLE __bad_pagetable()
-#define BAD_PAGE __bad_page()
-#define ZERO_PAGE(vaddr) ((unsigned long) empty_zero_page)
-
-/* number of bits that fit into a memory pointer */
-#define BITS_PER_PTR (8*sizeof(unsigned long))
-
-/* to align the pointer to a pointer address */
-#define PTR_MASK (~(sizeof(void*)-1))
-
-/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
-/* 64-bit machines, beware! SRB. */
-#define SIZEOF_PTR_LOG2 2
-
-/* to find an entry in a page-table */
-#define PAGE_PTR(address) \
-((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
-
-/* Certain architectures need to do special things when pte's
- * within a page table are directly modified. Thus, the following
- * hook is made available.
+/*
+ * Handling allocation failures during page table setup.
*/
-extern __inline__ void set_pte(pte_t *ptep, pte_t pteval)
-{
- *ptep = pteval;
-}
+extern void __handle_bad_pmd(pmd_t * pmd);
+extern void __handle_bad_pmd_kernel(pmd_t * pmd);
-extern __inline__ int pte_none(pte_t pte)
-{
- return !pte_val(pte);
-}
-
-extern __inline__ int pte_present(pte_t pte)
-{
- return pte_val(pte) & _PAGE_PRESENT;
-}
-
-extern __inline__ void pte_clear(pte_t *ptep)
-{
- pte_val(*ptep) = 0;
-}
-
-extern __inline__ int pmd_none(pmd_t pmd)
-{
- return !pmd_val(pmd);
-}
+#define pte_none(x) (!pte_val(x))
+#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
+#define pte_clear(xp) do { pte_val(*(xp)) = 0; } while (0)
+#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> PAGE_SHIFT)))
+#define pmd_none(x) (!pmd_val(x))
#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
-
-extern __inline__ int pmd_present(pmd_t pmd)
-{
- return pmd_val(pmd) & _PAGE_PRESENT;
-}
-
-extern __inline__ void pmd_clear(pmd_t *pmdp)
-{
- pmd_val(*pmdp) = 0;
-}
+#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
+#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
/*
- * The "pgd_xxx()" functions here are trivial for a folded two-level
- * setup: the pgd is never bad, and a pmd always exists (as it's folded
- * into the pgd entry)
+ * Permanent address of a page. Obviously must never be
+ * called on a highmem page.
*/
-extern __inline__ int pgd_none(pgd_t pgd) { return 0; }
-extern __inline__ int pgd_bad(pgd_t pgd) { return 0; }
-extern __inline__ int pgd_present(pgd_t pgd) { return 1; }
-extern __inline__ void pgd_clear(pgd_t * pgdp) { }
+#define page_address(page) ({ if (PageHighMem(page)) BUG(); PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT); })
+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
+#define pte_page(x) (mem_map+pte_pagenr(x))
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
-extern __inline__ int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
-extern __inline__ int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
-extern __inline__ int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; }
-extern __inline__ int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; }
-extern __inline__ int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_RW; }
-
-extern __inline__ pte_t pte_rdprotect(pte_t pte){ pte_val(pte) &= ~_PAGE_USER; return pte; }
-extern __inline__ pte_t pte_exprotect(pte_t pte){ pte_val(pte) &= ~_PAGE_USER; return pte; }
-extern __inline__ pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
-extern __inline__ pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
-extern __inline__ pte_t pte_wrprotect(pte_t pte){ pte_val(pte) &= ~_PAGE_RW; return pte; }
-extern __inline__ pte_t pte_mkread(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; }
-extern __inline__ pte_t pte_mkexec(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; }
-extern __inline__ pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
-extern __inline__ pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
-extern __inline__ pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; }
+extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
+extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
+extern inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; }
+extern inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; }
+extern inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_RW; }
+
+extern inline pte_t pte_rdprotect(pte_t pte){ pte_val(pte) &= ~_PAGE_USER; return pte; }
+extern inline pte_t pte_exprotect(pte_t pte){ pte_val(pte) &= ~_PAGE_USER; return pte; }
+extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
+extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+extern inline pte_t pte_wrprotect(pte_t pte){ pte_val(pte) &= ~_PAGE_RW; return pte; }
+extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; }
+extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; }
+extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
+extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; }
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
-extern __inline__ pte_t mk_pte(unsigned long page, pgprot_t pgprot)
+extern inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
{
- return __pte(__pa(page) | pgprot_val(pgprot));
+ pte_t __pte;
+
+ pte_val(__pte) = (page-mem_map)*(unsigned long long)PAGE_SIZE +
+ pgprot_val(pgprot);
+ return __pte;
}
/* This takes a physical page address that is used by the remapping functions */
-extern __inline__ pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
-{
- return __pte(physpage | pgprot_val(pgprot));
-}
+#define mk_pte_phys(physpage, pgprot) \
+({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
-extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot)
-{
- return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
-}
+extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
-extern __inline__ unsigned long pte_page(pte_t pte)
-{
- return (unsigned long)__va(pte_val(pte) & PAGE_MASK);
-}
+#define page_pte_prot(page,prot) mk_pte(page, prot)
+#define page_pte(page) page_pte_prot(page, __pgprot(0))
-extern __inline__ unsigned long pmd_page(pmd_t pmd)
-{
- return (unsigned long)__va(pmd_val(pmd) & PAGE_MASK);
-}
+#define pmd_page(pmd) \
+((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-extern __inline__ void pmd_set(pmd_t * pmdp, pte_t * ptep)
-{
- pmd_val(*pmdp) = __pa(((unsigned long) ptep) & PAGE_MASK) | _PAGE_TABLE;
-}
+/* to find an entry in a page-table-directory. */
+#define __pgd_offset(address) \
+ ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+
+#define pgd_offset(mm, address) ((mm)->pgd+__pgd_offset(address))
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-/* to find an entry in a page-table-directory */
-extern __inline__ pgd_t *pgd_offset(struct mm_struct *mm, unsigned long addr)
-{
- return mm->pgd + (addr >> PGDIR_SHIFT);
-}
+#define __pmd_offset(address) \
+ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-/* Find an entry in the second-level page table.. */
-extern __inline__ pmd_t * pmd_offset(pgd_t * dir, unsigned long addr)
-{
- return (pmd_t *) dir;
-}
-
-/* Find an entry in the third-level page table.. */
-extern __inline__ pte_t *pte_offset(pmd_t * dir, unsigned long addr)
-{
- return (pte_t *) (pmd_page(*dir)) +
- ((addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
-}
+/* Find an entry in the third-level page table.. */
+#define __pte_offset(address) \
+ ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset(dir, address) ((pte_t *) pmd_page(*(dir)) + \
+ __pte_offset(address))
/*
* Allocate and free page tables. The xxx_kernel() versions are
@@ -311,11 +269,6 @@ extern __inline__ pte_t *pte_offset(pmd_t * dir, unsigned long addr)
* if any.
*/
-#define pgd_quicklist (current_cpu_data.pgd_quick)
-#define pmd_quicklist ((unsigned long *)0)
-#define pte_quicklist (current_cpu_data.pte_quick)
-#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
-
extern __inline__ pgd_t *get_pgd_slow(void)
{
pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
@@ -379,23 +332,6 @@ extern __inline__ void free_pte_slow(pte_t *pte)
free_page((unsigned long)pte);
}
-/* We don't use pmd cache, so these are dummy routines */
-extern __inline__ pmd_t *get_pmd_fast(void)
-{
- return (pmd_t *)0;
-}
-
-extern __inline__ void free_pmd_fast(pmd_t *pmd)
-{
-}
-
-extern __inline__ void free_pmd_slow(pmd_t *pmd)
-{
-}
-
-extern void __bad_pte(pmd_t *pmd);
-extern void __bad_pte_kernel(pmd_t *pmd);
-
#define pte_free_kernel(pte) free_pte_slow(pte)
#define pte_free(pte) free_pte_slow(pte)
#define pgd_free(pgd) free_pgd_slow(pgd)
@@ -405,15 +341,15 @@ extern __inline__ pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
{
address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
if (pmd_none(*pmd)) {
- pte_t *page = get_pte_fast();
+ pte_t *page = (pte_t *) get_pte_fast();
if (!page)
return get_pte_kernel_slow(pmd, address);
- pmd_set(pmd, page);
+ pmd_val(*pmd) = _KERNPG_TABLE + __pa(page);
return page + address;
}
if (pmd_bad(*pmd)) {
- __bad_pte_kernel(pmd);
+ __handle_bad_pmd_kernel(pmd);
return NULL;
}
return (pte_t *) pmd_page(*pmd) + address;
@@ -421,13 +357,13 @@ extern __inline__ pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
extern __inline__ pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
{
- address = (address >> (PAGE_SHIFT-2)) & 4*(PTRS_PER_PTE - 1);
+ address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
if (pmd_none(*pmd))
goto getnew;
if (pmd_bad(*pmd))
goto fix;
- return (pte_t *) (pmd_page(*pmd) + address);
+ return (pte_t *)pmd_page(*pmd) + address;
getnew:
{
unsigned long page = (unsigned long) get_pte_fast();
@@ -435,10 +371,10 @@ getnew:
if (!page)
return get_pte_slow(pmd, address);
pmd_val(*pmd) = _PAGE_TABLE + __pa(page);
- return (pte_t *) (page + address);
+ return (pte_t *)page + address;
}
fix:
- __bad_pte(pmd);
+ __handle_bad_pmd(pmd);
return NULL;
}
@@ -450,11 +386,6 @@ extern inline void pmd_free(pmd_t * pmd)
{
}
-extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
-{
- return (pmd_t *) pgd;
-}
-
#define pmd_free_kernel pmd_free
#define pmd_alloc_kernel pmd_alloc
@@ -481,9 +412,8 @@ extern pgd_t swapper_pg_dir[1024];
extern void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t pte);
-#define SWP_TYPE(entry) (((entry) >> 1) & 0x3f)
-#define SWP_OFFSET(entry) ((entry) >> 8)
-#define SWP_ENTRY(type,offset) (((type) << 1) | ((offset) << 8))
+#define SWP_TYPE(entry) (((pte_val(entry)) >> 1) & 0x3f)
+#define SWP_OFFSET(entry) ((pte_val(entry)) >> 8)
#define module_map vmalloc
#define module_unmap vfree
diff --git a/include/asm-sh/processor.h b/include/asm-sh/processor.h
index 4efcf3fe2..07175eff5 100644
--- a/include/asm-sh/processor.h
+++ b/include/asm-sh/processor.h
@@ -33,7 +33,6 @@ struct sh_cpuinfo {
char hard_math;
- /* Not yet used */
unsigned long *pgd_quick;
unsigned long *pte_quick;
unsigned long pgtable_cache_sz;
diff --git a/include/asm-sh/spinlock.h b/include/asm-sh/spinlock.h
index 16f70ffd7..8db187a29 100644
--- a/include/asm-sh/spinlock.h
+++ b/include/asm-sh/spinlock.h
@@ -1,6 +1,53 @@
#ifndef __ASM_SH_SPINLOCK_H
#define __ASM_SH_SPINLOCK_H
+#ifndef __SMP__
+
+typedef struct { } spinlock_t;
+#define SPIN_LOCK_UNLOCKED { }
+
+#define spin_lock_init(lock) do { } while(0)
+#define spin_lock(lock) do { } while(0)
+#define spin_trylock(lock) (1)
+#define spin_unlock_wait(lock) do { } while(0)
+#define spin_unlock(lock) do { } while(0)
+#define spin_lock_irq(lock) cli()
+#define spin_unlock_irq(lock) sti()
+
+#define spin_lock_irqsave(lock, flags) save_and_cli(flags)
+#define spin_unlock_irqrestore(lock, flags) restore_flags(flags)
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+typedef struct { } rwlock_t;
+#define RW_LOCK_UNLOCKED (rwlock_t) { }
+
+#define read_lock(lock) do { } while(0)
+#define read_unlock(lock) do { } while(0)
+#define write_lock(lock) do { } while(0)
+#define write_unlock(lock) do { } while(0)
+#define read_lock_irq(lock) cli()
+#define read_unlock_irq(lock) sti()
+#define write_lock_irq(lock) cli()
+#define write_unlock_irq(lock) sti()
+
+#define read_lock_irqsave(lock, flags) save_and_cli(flags)
+#define read_unlock_irqrestore(lock, flags) restore_flags(flags)
+#define write_lock_irqsave(lock, flags) save_and_cli(flags)
+#define write_unlock_irqrestore(lock, flags) restore_flags(flags)
+
+#else
+
#error "No SMP on SH"
+#endif /* SMP */
+
#endif /* __ASM_SH_SPINLOCK_H */
diff --git a/include/asm-sh/string.h b/include/asm-sh/string.h
index 6752d3f9c..56ea9d8e8 100644
--- a/include/asm-sh/string.h
+++ b/include/asm-sh/string.h
@@ -18,9 +18,9 @@ extern __inline__ char *strcpy(char *__dest, const char *__src)
"cmp/eq #0,%2\n\t"
"bf/s 1b\n\t"
" add #1,%0\n\t"
- :"=&r" (__dest), "=&r" (__src), "=&z" (__dummy)
- :"0" (__dest), "1" (__src)
- :"memory");
+ : "=r" (__dest), "=r" (__src), "=&z" (__dummy)
+ : "0" (__dest), "1" (__src)
+ : "memory");
return __xdest;
}
@@ -37,12 +37,12 @@ extern __inline__ char *strncpy(char *__dest, const char *__src, size_t __n)
__asm__ __volatile__(
"1:\n"
"mov.b @%1+,%2\n\t"
- "mov.b %2,@%0\n\t"
- "cmp/eq #0,%2\n\t"
- "bt/s 2f\n\t"
- " cmp/eq %5,%1\n\t"
- "bf/s 1b\n\t"
- " add #1,%0\n"
+ "mov.b %2,@%0\n\t"
+ "cmp/eq #0,%2\n\t"
+ "bt/s 2f\n\t"
+ " cmp/eq %5,%1\n\t"
+ "bf/s 1b\n\t"
+ " add #1,%0\n"
"2:"
: "=r" (__dest), "=r" (__src), "=&z" (__dummy)
: "0" (__dest), "1" (__src), "r" (__src+__n)
@@ -113,6 +113,9 @@ extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+#define __HAVE_ARCH_MEMCHR
+extern void *memchr(const void *__s, int __c, size_t __n);
+
/* Don't build bcopy at all ... */
#define __HAVE_ARCH_BCOPY
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index 40c54212f..d898e3517 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -98,7 +98,7 @@ extern __inline__ void __sti(void)
__asm__ __volatile__("stc sr,%0\n\t"
"and %1,%0\n\t"
"ldc %0,sr"
- : "=&z" (__dummy)
+ : "=&r" (__dummy)
: "r" (0xefffffff)
: "memory");
}
@@ -109,24 +109,24 @@ extern __inline__ void __cli(void)
__asm__ __volatile__("stc sr,%0\n\t"
"or %1,%0\n\t"
"ldc %0,sr"
- : "=&z" (__dummy)
+ : "=&r" (__dummy)
: "r" (0x10000000)
: "memory");
}
#define __save_flags(x) \
-__asm__ __volatile__("stc sr,%0\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop":"=r" (x): /* no inputs */ :"memory")
-
-#define __save_and_cli(x) \
-({ unsigned long __dummy; \
-__asm__ __volatile__( \
- "stc sr,%0\n\t" \
- "mov %0,%1\n\t" \
- "or %2,%1\n\t" \
- "ldc %1,sr" \
- : "=&r" (x), "=&z" (__dummy) \
- : "r" (0x10000000) \
- : "memory"); })
+__asm__ __volatile__("stc sr,%0":"=r" (x): /* no inputs */ :"memory")
+
+#define __save_and_cli(x) \
+x = (__extension__ ({ unsigned long __dummy,__sr; \
+ __asm__ __volatile__( \
+ "stc sr,%1\n\t" \
+ "or %0,%1\n\t" \
+ "stc sr,%0\n\t" \
+ "ldc %1,sr" \
+ : "=r" (__sr), "=&r" (__dummy) \
+ : "0" (0x10000000) \
+ : "memory"); __sr; }))
#define __restore_flags(x) \
__asm__ __volatile__("ldc %0,sr": /* no output */: "r" (x):"memory")