summaryrefslogtreecommitdiffstats
path: root/include/asm-sh
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-03-12 23:15:27 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-03-12 23:15:27 +0000
commitae38fd1e4c98588314a42097c5a5e77dcef23561 (patch)
treef9f10c203bb9e5fbad4810d1f8774c08dfad20ff /include/asm-sh
parent466a823d79f41d0713b272e48fd73e494b0588e0 (diff)
Merge with Linux 2.3.50.
Diffstat (limited to 'include/asm-sh')
-rw-r--r--include/asm-sh/cache.h16
-rw-r--r--include/asm-sh/div64.h10
-rw-r--r--include/asm-sh/dma.h8
-rw-r--r--include/asm-sh/elf.h5
-rw-r--r--include/asm-sh/hardirq.h6
-rw-r--r--include/asm-sh/hdreg.h2
-rw-r--r--include/asm-sh/highmem.h85
-rw-r--r--include/asm-sh/ide.h20
-rw-r--r--include/asm-sh/io.h36
-rw-r--r--include/asm-sh/ipcbuf.h29
-rw-r--r--include/asm-sh/irq.h5
-rw-r--r--include/asm-sh/mmu_context.h30
-rw-r--r--include/asm-sh/msgbuf.h31
-rw-r--r--include/asm-sh/page.h4
-rw-r--r--include/asm-sh/pci.h143
-rw-r--r--include/asm-sh/pgalloc-2level.h23
-rw-r--r--include/asm-sh/pgalloc.h182
-rw-r--r--include/asm-sh/pgtable-2level.h28
-rw-r--r--include/asm-sh/pgtable.h293
-rw-r--r--include/asm-sh/posix_types.h8
-rw-r--r--include/asm-sh/processor.h69
-rw-r--r--include/asm-sh/resource.h4
-rw-r--r--include/asm-sh/scatterlist.h13
-rw-r--r--include/asm-sh/semaphore-helper.h14
-rw-r--r--include/asm-sh/semaphore.h156
-rw-r--r--include/asm-sh/sembuf.h25
-rw-r--r--include/asm-sh/shmbuf.h42
-rw-r--r--include/asm-sh/siginfo.h2
-rw-r--r--include/asm-sh/softirq.h62
-rw-r--r--include/asm-sh/stat.h36
-rw-r--r--include/asm-sh/system.h130
-rw-r--r--include/asm-sh/termios.h1
-rw-r--r--include/asm-sh/types.h4
-rw-r--r--include/asm-sh/unistd.h80
-rw-r--r--include/asm-sh/user.h12
35 files changed, 1041 insertions, 573 deletions
diff --git a/include/asm-sh/cache.h b/include/asm-sh/cache.h
index f9113e77b..17108905d 100644
--- a/include/asm-sh/cache.h
+++ b/include/asm-sh/cache.h
@@ -1,5 +1,7 @@
-/*
+/* $Id: cache.h,v 1.3 1999/12/11 12:31:51 gniibe Exp $
+ *
* include/asm-sh/cache.h
+ *
* Copyright 1999 (C) Niibe Yutaka
*/
#ifndef __ASM_SH_CACHE_H
@@ -12,18 +14,6 @@
#define L1_CACHE_BYTES 32
#endif
-#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
-
-#define SMP_CACHE_BYTES L1_CACHE_BYTES
-
-#ifdef MODULE
-#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
-#else
-#define __cacheline_aligned \
- __attribute__((__aligned__(L1_CACHE_BYTES), \
- __section__(".data.cacheline_aligned")))
-#endif
-
extern void cache_flush_area(unsigned long start, unsigned long end);
extern void cache_purge_area(unsigned long start, unsigned long end);
extern void cache_wback_area(unsigned long start, unsigned long end);
diff --git a/include/asm-sh/div64.h b/include/asm-sh/div64.h
new file mode 100644
index 000000000..0ac48d107
--- /dev/null
+++ b/include/asm-sh/div64.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_SH_DIV64
+#define __ASM_SH_DIV64
+
+#define do_div(n,base) ({ \
+int __res; \
+__res = ((unsigned long) n) % (unsigned) base; \
+n = ((unsigned long) n) / (unsigned) base; \
+__res; })
+
+#endif /* __ASM_SH_DIV64 */
diff --git a/include/asm-sh/dma.h b/include/asm-sh/dma.h
index 16f54584c..b5a7f0bea 100644
--- a/include/asm-sh/dma.h
+++ b/include/asm-sh/dma.h
@@ -1,14 +1,16 @@
#ifndef __ASM_SH_DMA_H
#define __ASM_SH_DMA_H
-/* Don't define MAX_DMA_ADDRESS; it's useless on the SuperH and any
- occurrence should be flagged as an error. */
+#include <asm/io.h> /* need byte IO */
#define MAX_DMA_CHANNELS 8
/* The maximum address that we can perform a DMA transfer to on this platform */
+/* Don't define MAX_DMA_ADDRESS; it's useless on the SuperH and any
+ occurrence should be flagged as an error. */
+/* But... */
/* XXX: This is not applicable to SuperH, just needed for alloc_bootmem */
-#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000)
+#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x10000000)
extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
extern void free_dma(unsigned int dmanr); /* release it again */
diff --git a/include/asm-sh/elf.h b/include/asm-sh/elf.h
index a0e98de5f..72b78b813 100644
--- a/include/asm-sh/elf.h
+++ b/include/asm-sh/elf.h
@@ -14,10 +14,7 @@ typedef unsigned long elf_greg_t;
#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-/* Though SH-3 has no floating point regs.. */
-#define ELF_NFPREG 34
-typedef double elf_fpreg_t;
-typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+typedef struct user_fpu_struct elf_fpregset_t;
/*
* This is used to ensure we don't load something for the wrong architecture.
diff --git a/include/asm-sh/hardirq.h b/include/asm-sh/hardirq.h
index bc5866f7e..40ae80f51 100644
--- a/include/asm-sh/hardirq.h
+++ b/include/asm-sh/hardirq.h
@@ -6,13 +6,15 @@
extern unsigned int local_irq_count[NR_CPUS];
#define in_interrupt() (local_irq_count[smp_processor_id()] != 0)
+#define in_irq() (local_irq_count[smp_processor_id()] != 0)
+
#ifndef __SMP__
#define hardirq_trylock(cpu) (local_irq_count[cpu] == 0)
#define hardirq_endlock(cpu) do { } while (0)
-#define hardirq_enter(cpu) (local_irq_count[cpu]++)
-#define hardirq_exit(cpu) (local_irq_count[cpu]--)
+#define irq_enter(cpu, irq) (local_irq_count[cpu]++)
+#define irq_exit(cpu, irq) (local_irq_count[cpu]--)
#define synchronize_irq() barrier()
diff --git a/include/asm-sh/hdreg.h b/include/asm-sh/hdreg.h
index 4a2272c8a..1d417a15c 100644
--- a/include/asm-sh/hdreg.h
+++ b/include/asm-sh/hdreg.h
@@ -7,6 +7,6 @@
#ifndef __ASM_SH_HDREG_H
#define __ASM_SH_HDREG_H
-typedef unsigned short ide_ioreg_t;
+typedef unsigned int ide_ioreg_t;
#endif /* __ASM_SH_HDREG_H */
diff --git a/include/asm-sh/highmem.h b/include/asm-sh/highmem.h
deleted file mode 100644
index bd5564aea..000000000
--- a/include/asm-sh/highmem.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * highmem.h: virtual kernel memory mappings for high memory
- *
- * Used in CONFIG_HIGHMEM systems for memory pages which
- * are not addressable by direct kernel virtual adresses.
- *
- * Copyright (C) 1999 Gerhard Wichert, Siemens AG
- * Gerhard.Wichert@pdb.siemens.de
- *
- *
- * Redesigned the x86 32-bit VM architecture to deal with
- * up to 16 Terrabyte physical memory. With current x86 CPUs
- * we now support up to 64 Gigabytes physical RAM.
- *
- * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
- */
-
-#ifndef _ASM_HIGHMEM_H
-#define _ASM_HIGHMEM_H
-
-#include <linux/init.h>
-
-/* undef for production */
-#define HIGHMEM_DEBUG 1
-
-/* declarations for highmem.c */
-extern unsigned long highstart_pfn, highend_pfn;
-
-extern pte_t *kmap_pte;
-extern pgprot_t kmap_prot;
-
-extern void kmap_init(void) __init;
-
-/* kmap helper functions necessary to access the highmem pages in kernel */
-#include <asm/pgtable.h>
-#include <asm/kmap_types.h>
-
-extern inline unsigned long kmap(struct page *page, enum km_type type)
-{
- if (page < highmem_start_page)
- return page_address(page);
- {
- enum fixed_addresses idx = type+KM_TYPE_NR*smp_processor_id();
- unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN+idx);
-
-#if HIGHMEM_DEBUG
- if (!pte_none(*(kmap_pte-idx)))
- {
- __label__ here;
- here:
- printk(KERN_ERR "not null pte on CPU %d from %p\n",
- smp_processor_id(), &&here);
- }
-#endif
- set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
- __flush_tlb_one(vaddr);
-
- return vaddr;
- }
-}
-
-extern inline void kunmap(unsigned long vaddr, enum km_type type)
-{
-#if HIGHMEM_DEBUG
- enum fixed_addresses idx = type+KM_TYPE_NR*smp_processor_id();
- if ((vaddr & PAGE_MASK) == __fix_to_virt(FIX_KMAP_BEGIN+idx))
- {
- /* force other mappings to Oops if they'll try to access
- this pte without first remap it */
- pte_clear(kmap_pte-idx);
- __flush_tlb_one(vaddr);
- }
-#endif
-}
-
-extern inline void kmap_check(void)
-{
-#if HIGHMEM_DEBUG
- int idx_base = KM_TYPE_NR*smp_processor_id(), i;
- for (i = idx_base; i < idx_base+KM_TYPE_NR; i++)
- if (!pte_none(*(kmap_pte-i)))
- BUG();
-#endif
-}
-#endif /* _ASM_HIGHMEM_H */
diff --git a/include/asm-sh/ide.h b/include/asm-sh/ide.h
index b9b3efcb4..95a385596 100644
--- a/include/asm-sh/ide.h
+++ b/include/asm-sh/ide.h
@@ -17,7 +17,7 @@
#include <linux/config.h>
#ifndef MAX_HWIFS
-#define MAX_HWIFS 10
+#define MAX_HWIFS 1 /* XXX: For my board -- gniibe */
#endif
#define ide__sti() __sti()
@@ -25,12 +25,8 @@
static __inline__ int ide_default_irq(ide_ioreg_t base)
{
switch (base) {
- case 0x1f0: return 14;
- case 0x170: return 15;
- case 0x1e8: return 11;
- case 0x168: return 10;
- case 0x1e0: return 8;
- case 0x160: return 12;
+ case 0xba0001f0: return 14;
+ case 0xba000170: return 14;
default:
return 0;
}
@@ -39,12 +35,10 @@ static __inline__ int ide_default_irq(ide_ioreg_t base)
static __inline__ ide_ioreg_t ide_default_io_base(int index)
{
switch (index) {
- case 0: return 0x1f0;
- case 1: return 0x170;
- case 2: return 0x1e8;
- case 3: return 0x168;
- case 4: return 0x1e0;
- case 5: return 0x160;
+ case 0:
+ return 0xba0001f0;
+ case 1:
+ return 0xba000170;
default:
return 0;
}
diff --git a/include/asm-sh/io.h b/include/asm-sh/io.h
index 677be2445..5a7b0468b 100644
--- a/include/asm-sh/io.h
+++ b/include/asm-sh/io.h
@@ -14,32 +14,38 @@
#include <asm/cache.h>
+#define inb_p inb
+#define outb_p outb
+
+#define inw_p inw
+#define outw_p outw
+
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
extern __inline__ unsigned long readb(unsigned long addr)
{
- return *(volatile unsigned char*)addr;
+ return *(volatile unsigned char*)addr;
}
extern __inline__ unsigned long readw(unsigned long addr)
{
- return *(volatile unsigned short*)addr;
+ return *(volatile unsigned short*)addr;
}
extern __inline__ unsigned long readl(unsigned long addr)
{
- return *(volatile unsigned long*)addr;
+ return *(volatile unsigned long*)addr;
}
extern __inline__ void writeb(unsigned char b, unsigned long addr)
{
- *(volatile unsigned char*)addr = b;
+ *(volatile unsigned char*)addr = b;
}
extern __inline__ void writew(unsigned short b, unsigned long addr)
{
- *(volatile unsigned short*)addr = b;
+ *(volatile unsigned short*)addr = b;
}
extern __inline__ void writel(unsigned int b, unsigned long addr)
@@ -49,27 +55,27 @@ extern __inline__ void writel(unsigned int b, unsigned long addr)
extern __inline__ unsigned long inb_local(unsigned long addr)
{
- return readb(addr);
+ return readb(addr);
}
extern __inline__ void outb_local(unsigned char b, unsigned long addr)
{
- return writeb(b,addr);
+ return writeb(b,addr);
}
extern __inline__ unsigned long inb(unsigned long addr)
{
- return readb(addr);
+ return readb(addr);
}
extern __inline__ unsigned long inw(unsigned long addr)
{
- return readw(addr);
+ return readw(addr);
}
extern __inline__ unsigned long inl(unsigned long addr)
{
- return readl(addr);
+ return readl(addr);
}
extern __inline__ void insb(unsigned long addr, void *buffer, int count)
@@ -123,6 +129,9 @@ extern __inline__ void outsl(unsigned long addr, const void *buffer, int count)
while(count--) outl(*buf++, addr);
}
+#define ctrl_in(addr) *(addr)
+#define ctrl_out(data,addr) *(addr) = (data)
+
extern __inline__ unsigned long ctrl_inb(unsigned long addr)
{
return *(volatile unsigned char*)addr;
@@ -145,7 +154,7 @@ extern __inline__ void ctrl_outb(unsigned char b, unsigned long addr)
extern __inline__ void ctrl_outw(unsigned short b, unsigned long addr)
{
- *(volatile unsigned short*)addr = b;
+ *(volatile unsigned short*)addr = b;
}
extern __inline__ void ctrl_outl(unsigned int b, unsigned long addr)
@@ -153,12 +162,9 @@ extern __inline__ void ctrl_outl(unsigned int b, unsigned long addr)
*(volatile unsigned long*)addr = b;
}
-#define inb_p inb
-#define outb_p outb
-
#ifdef __KERNEL__
-#define IO_SPACE_LIMIT 0xffff
+#define IO_SPACE_LIMIT 0xffffffff
#include <asm/addrspace.h>
diff --git a/include/asm-sh/ipcbuf.h b/include/asm-sh/ipcbuf.h
new file mode 100644
index 000000000..5ffc9972a
--- /dev/null
+++ b/include/asm-sh/ipcbuf.h
@@ -0,0 +1,29 @@
+#ifndef __ASM_SH_IPCBUF_H__
+#define __ASM_SH_IPCBUF_H__
+
+/*
+ * The ipc64_perm structure for i386 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 32-bit mode_t and seq
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct ipc64_perm
+{
+ __kernel_key_t key;
+ __kernel_uid32_t uid;
+ __kernel_gid32_t gid;
+ __kernel_uid32_t cuid;
+ __kernel_gid32_t cgid;
+ __kernel_mode_t mode;
+ unsigned short __pad1;
+ unsigned short seq;
+ unsigned short __pad2;
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+#endif /* __ASM_SH_IPCBUF_H__ */
diff --git a/include/asm-sh/irq.h b/include/asm-sh/irq.h
index f05fd7ac3..ab492fa5b 100644
--- a/include/asm-sh/irq.h
+++ b/include/asm-sh/irq.h
@@ -12,8 +12,8 @@
#include <linux/config.h>
#define TIMER_IRQ 16 /* Hard-wired */
-#define TIMER_IRP_OFFSET 12
-#define TIMER_PRIORITY 1
+#define TIMER_IPR_OFFSET 12
+#define TIMER_PRIORITY 2
#if defined(__SH4__)
/*
@@ -39,5 +39,6 @@ extern void enable_irq(unsigned int);
*/
extern void set_ipr_data(unsigned int irq, int offset, int priority);
extern void make_onChip_irq(unsigned int irq);
+extern void make_imask_irq(unsigned int irq);
#endif /* __ASM_SH_IRQ_H */
diff --git a/include/asm-sh/mmu_context.h b/include/asm-sh/mmu_context.h
index b90be6fbd..e4448a7dd 100644
--- a/include/asm-sh/mmu_context.h
+++ b/include/asm-sh/mmu_context.h
@@ -11,9 +11,6 @@
(b) ASID (Address Space IDentifier)
*/
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
-{
-}
/*
* Cache of MMU context last used.
*/
@@ -27,6 +24,11 @@ extern unsigned long mmu_context_cache;
/* ASID is 8-bit value, so it can't be 0x100 */
#define MMU_NO_ASID 0x100
+/*
+ * Virtual Page Number mask
+ */
+#define MMU_VPN_MASK 0xfffff000
+
extern __inline__ void
get_new_mmu_context(struct mm_struct *mm)
{
@@ -114,16 +116,22 @@ extern __inline__ void destroy_context(struct mm_struct *mm)
extern __inline__ void set_asid(unsigned long asid)
{
- __asm__ __volatile__ ("mov.l %0,%1"
- : /* no output */
- : "r" (asid), "m" (__m(MMU_PTEH)));
+ unsigned long __dummy;
+
+ __asm__ __volatile__ ("mov.l %2, %0\n\t"
+ "and %3, %0\n\t"
+ "or %1, %0\n\t"
+ "mov.l %0, %2"
+ : "=&r" (__dummy)
+ : "r" (asid), "m" (__m(MMU_PTEH)),
+ "r" (0xffffff00));
}
extern __inline__ unsigned long get_asid(void)
{
unsigned long asid;
- __asm__ __volatile__ ("mov.l %1,%0"
+ __asm__ __volatile__ ("mov.l %1, %0"
: "=r" (asid)
: "m" (__m(MMU_PTEH)));
asid &= MMU_CONTEXT_ASID_MASK;
@@ -146,19 +154,23 @@ extern __inline__ void switch_mm(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk, unsigned int cpu)
{
+ set_bit(cpu, &next->cpu_vm_mask);
if (prev != next) {
unsigned long __pgdir = (unsigned long)next->pgd;
- __asm__ __volatile__("mov.l %0,%1"
+ __asm__ __volatile__("mov.l %0, %1"
: /* no output */
: "r" (__pgdir), "m" (__m(MMU_TTB)));
activate_context(next);
clear_bit(cpu, &prev->cpu_vm_mask);
}
- set_bit(cpu, &next->cpu_vm_mask);
}
#define activate_mm(prev, next) \
switch_mm((prev),(next),NULL,smp_processor_id())
+extern __inline__ void
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+}
#endif /* __ASM_SH_MMU_CONTEXT_H */
diff --git a/include/asm-sh/msgbuf.h b/include/asm-sh/msgbuf.h
new file mode 100644
index 000000000..517432343
--- /dev/null
+++ b/include/asm-sh/msgbuf.h
@@ -0,0 +1,31 @@
+#ifndef __ASM_SH_MSGBUF_H
+#define __ASM_SH_MSGBUF_H
+
+/*
+ * The msqid64_ds structure for i386 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+ __kernel_time_t msg_stime; /* last msgsnd time */
+ unsigned long __unused1;
+ __kernel_time_t msg_rtime; /* last msgrcv time */
+ unsigned long __unused2;
+ __kernel_time_t msg_ctime; /* last change time */
+ unsigned long __unused3;
+ unsigned long msg_cbytes; /* current number of bytes on queue */
+ unsigned long msg_qnum; /* number of messages in queue */
+ unsigned long msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+#endif /* __ASM_SH_MSGBUF_H */
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index 24a374d4b..23309458c 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -19,6 +19,7 @@
#define PAGE_SHIFT 12
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PTE_MASK PAGE_MASK
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
@@ -39,6 +40,9 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-sh/pci.h b/include/asm-sh/pci.h
new file mode 100644
index 000000000..e8f6e1d66
--- /dev/null
+++ b/include/asm-sh/pci.h
@@ -0,0 +1,143 @@
+#ifndef __ASM_SH_PCI_H
+#define __ASM_SH_PCI_H
+
+/* Can be used to override the logic in pci_scan_bus for skipping
+ already-configured bus numbers - to be used for buggy BIOSes
+ or architectures with incomplete PCI setup by the loader */
+
+#define pcibios_assign_all_busses() 0
+
+#ifdef __KERNEL__
+
+/* Dynamic DMA mapping stuff.
+ * SuperH has everything mapped statically like x86.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <asm/scatterlist.h>
+#include <linux/string.h>
+#include <asm/io.h>
+
+struct pci_dev;
+
+/* Allocate and map kernel buffer using consistent mode DMA for a device.
+ * hwdev should be valid struct pci_dev pointer for PCI devices,
+ * NULL for PCI-like buses (ISA, EISA).
+ * Returns non-NULL cpu-view pointer to the buffer if successful and
+ * sets *dma_addrp to the pci side dma address as well, else *dma_addrp
+ * is undefined.
+ */
+extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle);
+
+/* Free and unmap a consistent DMA buffer.
+ * cpu_addr is what was returned from pci_alloc_consistent,
+ * size must be the same as what as passed into pci_alloc_consistent,
+ * and likewise dma_addr must be the same as what *dma_addrp was set to.
+ *
+ * References to the memory and mappings associated with cpu_addr/dma_addr
+ * past this call are illegal.
+ */
+extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
+/* Map a single buffer of the indicated size for DMA in streaming mode.
+ * The 32-bit bus address to use is returned.
+ *
+ * Once the device is given the dma address, the device owns this memory
+ * until either pci_unmap_single or pci_dma_sync_single is performed.
+ */
+extern inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
+ size_t size)
+{
+ return virt_to_bus(ptr);
+}
+
+/* Unmap a single streaming mode DMA translation. The dma_addr and size
+ * must match what was provided for in a previous pci_map_single call. All
+ * other usages are undefined.
+ *
+ * After this call, reads by the cpu to the buffer are guarenteed to see
+ * whatever the device wrote there.
+ */
+extern inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
+ size_t size)
+{
+ /* Nothing to do */
+}
+
+/* Map a set of buffers described by scatterlist in streaming
+ * mode for DMA. This is the scather-gather version of the
+ * above pci_map_single interface. Here the scatter gather list
+ * elements are each tagged with the appropriate dma address
+ * and length. They are obtained via sg_dma_{address,length}(SG).
+ *
+ * NOTE: An implementation may be able to use a smaller number of
+ * DMA address/length pairs than there are SG table elements.
+ * (for example via virtual mapping capabilities)
+ * The routine returns the number of addr/length pairs actually
+ * used, at most nents.
+ *
+ * Device ownership issues as mentioned above for pci_map_single are
+ * the same here.
+ */
+extern inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nents)
+{
+ return nents;
+}
+
+/* Unmap a set of streaming mode DMA translations.
+ * Again, cpu read rules concerning calls here are the same as for
+ * pci_unmap_single() above.
+ */
+extern inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nents)
+{
+ /* Nothing to do */
+}
+
+/* Make physical memory consistent for a single
+ * streaming mode DMA translation after a transfer.
+ *
+ * If you perform a pci_map_single() but wish to interrogate the
+ * buffer using the cpu, yet do not wish to teardown the PCI dma
+ * mapping, you must call this function before doing so. At the
+ * next point you give the PCI dma address back to the card, the
+ * device again owns the buffer.
+ */
+extern inline void pci_dma_sync_single(struct pci_dev *hwdev,
+ dma_addr_t dma_handle,
+ size_t size)
+{
+ /* Nothing to do */
+}
+
+/* Make physical memory consistent for a set of streaming
+ * mode DMA translations after a transfer.
+ *
+ * The same as pci_dma_sync_single but for a scatter-gather list,
+ * same rules and usage.
+ */
+extern inline void pci_dma_sync_sg(struct pci_dev *hwdev,
+ struct scatterlist *sg,
+ int nelems)
+{
+ /* Nothing to do */
+}
+
+/* These macros should be used after a pci_map_sg call has been done
+ * to get bus addresses of each of the SG entries and their lengths.
+ * You should only work with the number of sg entries pci_map_sg
+ * returns, or alternatively stop on the first sg_dma_len(sg) which
+ * is 0.
+ */
+#define sg_dma_address(sg) (virt_to_bus((sg)->address))
+#define sg_dma_len(sg) ((sg)->length)
+
+#endif /* __KERNEL__ */
+
+
+#endif /* __ASM_SH_PCI_H */
+
diff --git a/include/asm-sh/pgalloc-2level.h b/include/asm-sh/pgalloc-2level.h
new file mode 100644
index 000000000..a106b23a3
--- /dev/null
+++ b/include/asm-sh/pgalloc-2level.h
@@ -0,0 +1,23 @@
+#ifndef __ASM_SH_PGALLOC_2LEVEL_H
+#define __ASM_SH_PGALLOC_2LEVEL_H
+
+/*
+ * traditional two-level paging, page table allocation routines:
+ */
+
+extern __inline__ pmd_t *get_pmd_fast(void)
+{
+ return (pmd_t *)0;
+}
+
+extern __inline__ void free_pmd_fast(pmd_t *pmd) { }
+extern __inline__ void free_pmd_slow(pmd_t *pmd) { }
+
+extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
+{
+ if (!pgd)
+ BUG();
+ return (pmd_t *) pgd;
+}
+
+#endif /* __ASM_SH_PGALLOC_2LEVEL_H */
diff --git a/include/asm-sh/pgalloc.h b/include/asm-sh/pgalloc.h
new file mode 100644
index 000000000..0b728aee3
--- /dev/null
+++ b/include/asm-sh/pgalloc.h
@@ -0,0 +1,182 @@
+#ifndef __ASM_SH_PGALLOC_H
+#define __ASM_SH_PGALLOC_H
+
+#include <asm/processor.h>
+#include <linux/threads.h>
+
+#define pgd_quicklist (current_cpu_data.pgd_quick)
+#define pmd_quicklist ((unsigned long *)0)
+#define pte_quicklist (current_cpu_data.pte_quick)
+#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
+
+#include <asm/pgalloc-2level.h>
+
+/*
+ * Allocate and free page tables. The xxx_kernel() versions are
+ * used to allocate a kernel page table - this turns on ASN bits
+ * if any.
+ */
+
+extern __inline__ pgd_t *get_pgd_slow(void)
+{
+ pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
+
+ if (ret) {
+ memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+ return ret;
+}
+
+extern __inline__ pgd_t *get_pgd_fast(void)
+{
+ unsigned long *ret;
+
+ if ((ret = pgd_quicklist) != NULL) {
+ pgd_quicklist = (unsigned long *)(*ret);
+ ret[0] = 0;
+ pgtable_cache_size--;
+ } else
+ ret = (unsigned long *)get_pgd_slow();
+ return (pgd_t *)ret;
+}
+
+extern __inline__ void free_pgd_fast(pgd_t *pgd)
+{
+ *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
+ pgd_quicklist = (unsigned long *) pgd;
+ pgtable_cache_size++;
+}
+
+extern __inline__ void free_pgd_slow(pgd_t *pgd)
+{
+ free_page((unsigned long)pgd);
+}
+
+extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
+extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
+
+extern __inline__ pte_t *get_pte_fast(void)
+{
+ unsigned long *ret;
+
+ if((ret = (unsigned long *)pte_quicklist) != NULL) {
+ pte_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
+ }
+ return (pte_t *)ret;
+}
+
+extern __inline__ void free_pte_fast(pte_t *pte)
+{
+ *(unsigned long *)pte = (unsigned long) pte_quicklist;
+ pte_quicklist = (unsigned long *) pte;
+ pgtable_cache_size++;
+}
+
+extern __inline__ void free_pte_slow(pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+#define pte_free_kernel(pte) free_pte_slow(pte)
+#define pte_free(pte) free_pte_slow(pte)
+#define pgd_free(pgd) free_pgd_slow(pgd)
+#define pgd_alloc() get_pgd_fast()
+
+extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
+{
+ if (!pmd)
+ BUG();
+ address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+ if (pmd_none(*pmd)) {
+ pte_t * page = (pte_t *) get_pte_fast();
+
+ if (!page)
+ return get_pte_kernel_slow(pmd, address);
+ set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(page)));
+ return page + address;
+ }
+ if (pmd_bad(*pmd)) {
+ __handle_bad_pmd_kernel(pmd);
+ return NULL;
+ }
+ return (pte_t *) pmd_page(*pmd) + address;
+}
+
+extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
+{
+ address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+
+ if (pmd_none(*pmd))
+ goto getnew;
+ if (pmd_bad(*pmd))
+ goto fix;
+ return (pte_t *)pmd_page(*pmd) + address;
+getnew:
+{
+ unsigned long page = (unsigned long) get_pte_fast();
+
+ if (!page)
+ return get_pte_slow(pmd, address);
+ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(page)));
+ return (pte_t *)page + address;
+}
+fix:
+ __handle_bad_pmd(pmd);
+ return NULL;
+}
+
+/*
+ * allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+extern inline void pmd_free(pmd_t * pmd)
+{
+}
+
+#define pmd_free_kernel pmd_free
+#define pmd_alloc_kernel pmd_alloc
+
+extern int do_check_pgt_cache(int, int);
+
+extern inline void set_pgdir(unsigned long address, pgd_t entry)
+{
+ struct task_struct * p;
+ pgd_t *pgd;
+
+ read_lock(&tasklist_lock);
+ for_each_task(p) {
+ if (!p->mm)
+ continue;
+ *pgd_offset(p->mm,address) = entry;
+ }
+ read_unlock(&tasklist_lock);
+ for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
+ pgd[address >> PGDIR_SHIFT] = entry;
+}
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb() flushes the current mm struct TLBs
+ * - flush_tlb_all() flushes all processes TLBs
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(mm, start, end) flushes a range of pages
+ *
+ */
+
+extern void flush_tlb(void);
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+
+#endif /* __ASM_SH_PGALLOC_H */
diff --git a/include/asm-sh/pgtable-2level.h b/include/asm-sh/pgtable-2level.h
index 2ec0bcbcc..8fc2666e7 100644
--- a/include/asm-sh/pgtable-2level.h
+++ b/include/asm-sh/pgtable-2level.h
@@ -34,6 +34,19 @@ extern inline int pgd_bad(pgd_t pgd) { return 0; }
extern inline int pgd_present(pgd_t pgd) { return 1; }
#define pgd_clear(xp) do { } while (0)
+/*
+ * Certain architectures need to do special things when PTEs
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
+/*
+ * (pmds are folded into pgds so this doesnt get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
+#define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval)
+
#define pgd_page(pgd) \
((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
@@ -42,19 +55,4 @@ extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
return (pmd_t *) dir;
}
-extern __inline__ pmd_t *get_pmd_fast(void)
-{
- return (pmd_t *)0;
-}
-
-extern __inline__ void free_pmd_fast(pmd_t *pmd) { }
-extern __inline__ void free_pmd_slow(pmd_t *pmd) { }
-
-extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
-{
- if (!pgd)
- BUG();
- return (pmd_t *) pgd;
-}
-
#endif /* __ASM_SH_PGTABLE_2LEVEL_H */
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index d5744c35d..ca6fffbca 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -13,6 +13,7 @@
#include <linux/threads.h>
extern pgd_t swapper_pg_dir[1024];
+extern void paging_init(void);
#if defined(__sh3__)
/* Cache flushing:
@@ -32,6 +33,7 @@ extern pgd_t swapper_pg_dir[1024];
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
+#define flush_icache_page(vma,pg) do { } while (0)
#elif defined(__SH4__)
/*
* Caches are broken on SH-4, so we need them.
@@ -41,48 +43,28 @@ extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_range(struct mm_struct *mm, unsigned long start,
unsigned long end);
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr);
-extern void flush_page_to_ram(unsigned long page);
+extern void __flush_page_to_ram(unsigned long page_va);
+#define flush_page_to_ram(page) __flush_page_to_ram(page_address(page))
extern void flush_icache_range(unsigned long start, unsigned long end);
+extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg);
#endif
-/* TLB flushing:
- *
- * - flush_tlb_all() flushes all processes TLB entries
- * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
- * - flush_tlb_page(mm, vmaddr) flushes a single page
- * - flush_tlb_range(mm, start, end) flushes a range of pages
- */
-extern void flush_tlb_all(void);
-extern void flush_tlb_mm(struct mm_struct *mm);
-extern void flush_tlb_range(struct mm_struct *mm, unsigned long start,
- unsigned long end);
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
-extern inline void flush_tlb_pgtables(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
-}
-
/*
* Basically we have the same two-level (which is the logical three level
* Linux page table layout folded) page tables as the i386.
*/
-#endif /* !__ASSEMBLY__ */
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[1024];
+#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
-#define pgd_quicklist (current_cpu_data.pgd_quick)
-#define pmd_quicklist ((unsigned long *)0)
-#define pte_quicklist (current_cpu_data.pte_quick)
-#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
+#endif /* !__ASSEMBLY__ */
#include <asm/pgtable-2level.h>
-/*
- * Certain architectures need to do special things when PTEs
- * within a page table are directly modified. Thus, the following
- * hook is made available.
- */
-#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
-
#define __beep() asm("")
#define PMD_SIZE (1UL << PMD_SHIFT)
@@ -105,10 +87,10 @@ extern inline void flush_tlb_pgtables(struct mm_struct *mm,
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
#define VMALLOC_END P4SEG
-#define _PAGE_READ 0x001 /* software: read access alowed */
+#define _PAGE_READ 0x001 /* software: read access allowed */
#define _PAGE_ACCESSED 0x002 /* software: page referenced */
#define _PAGE_DIRTY 0x004 /* D-bit : page changed */
-/* 0x008 */
+#define _PAGE_CACHABLE 0x008 /* C-bit : cachable */
/* 0x010 */
#define _PAGE_RW 0x020 /* PR0-bit : write access allowed */
#define _PAGE_USER 0x040 /* PR1-bit : user space access allowed */
@@ -117,26 +99,26 @@ extern inline void flush_tlb_pgtables(struct mm_struct *mm,
#if defined(__sh3__)
/* Mask which drop software flags */
-#define _PAGE_FLAGS_HARDWARE_MASK 0x1ffff164
-/* Flags defalult: SZ=1 (4k-byte), C=1 (cachable), SH=0 (not shared) */
-#define _PAGE_FLAGS_HARDWARE_DEFAULT 0x00000018
+#define _PAGE_FLAGS_HARDWARE_MASK 0x1ffff16c
+/* Flags defalult: SZ=1 (4k-byte), C=0 (non-cachable), SH=0 (not shared) */
+#define _PAGE_FLAGS_HARDWARE_DEFAULT 0x00000010
#elif defined(__SH4__)
/* Mask which drops software flags */
-#define _PAGE_FLAGS_HARDWARE_MASK 0x1ffff164
-/* Flags defalult: SZ=01 (4k-byte), C=1 (cachable), SH=0 (not shared), WT=0 */
-#define _PAGE_FLAGS_HARDWARE_DEFAULT 0x00000018
+#define _PAGE_FLAGS_HARDWARE_MASK 0x1ffff16c
+/* Flags defalult: SZ=01 (4k-byte), C=0 (non-cachable), SH=0 (not shared), WT=0 */
+#define _PAGE_FLAGS_HARDWARE_DEFAULT 0x00000010
#endif
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY)
-#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
-#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
+#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE |_PAGE_ACCESSED)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_CACHABLE |_PAGE_ACCESSED)
+#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED)
+#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED)
/*
* As i386 and MIPS, SuperH can't do page protection for execute, and
@@ -163,23 +145,6 @@ extern inline void flush_tlb_pgtables(struct mm_struct *mm,
#define __S111 PAGE_SHARED
/*
- * BAD_PAGETABLE is used when we need a bogus page-table, while
- * BAD_PAGE is used for a bogus page.
- *
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern pte_t __bad_page(void);
-extern pte_t * __bad_pagetable(void);
-
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
-
-/*
* Handling allocation failures during page table setup.
*/
extern void __handle_bad_pmd(pmd_t * pmd);
@@ -187,19 +152,19 @@ extern void __handle_bad_pmd_kernel(pmd_t * pmd);
#define pte_none(x) (!pte_val(x))
#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
-#define pte_clear(xp) do { pte_val(*(xp)) = 0; } while (0)
+#define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0)
#define pte_pagenr(x) ((unsigned long)(((pte_val(x) -__MEMORY_START) >> PAGE_SHIFT)))
#define pmd_none(x) (!pmd_val(x))
-#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
-#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
+#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
+#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
/*
* Permanent address of a page. Obviously must never be
* called on a highmem page.
*/
-#define page_address(page) ({ PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT) + __MEMORY_START; })
+#define page_address(page) ({ if (!(page)->virtual) BUG(); (page)->virtual; })
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
#define pte_page(x) (mem_map+pte_pagenr(x))
@@ -212,39 +177,40 @@ extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
extern inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; }
extern inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; }
extern inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_RW; }
-
-extern inline pte_t pte_rdprotect(pte_t pte){ pte_val(pte) &= ~_PAGE_USER; return pte; }
-extern inline pte_t pte_exprotect(pte_t pte){ pte_val(pte) &= ~_PAGE_USER; return pte; }
-extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
-extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
-extern inline pte_t pte_wrprotect(pte_t pte){ pte_val(pte) &= ~_PAGE_RW; return pte; }
-extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; }
-extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; }
-extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
-extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
-extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; }
+
+extern inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
+extern inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
+extern inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
+extern inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
+extern inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
+extern inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
+extern inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
+extern inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
+extern inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
+extern inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
+ *
+ * extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
*/
-extern inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
-{
- pte_t __pte;
-
- pte_val(__pte) = (page-mem_map)*(unsigned long long)PAGE_SIZE +
- __MEMORY_START + pgprot_val(pgprot);
- return __pte;
-}
+#define mk_pte(page,pgprot) \
+({ pte_t __pte; \
+ \
+ set_pte(&__pte, __pte(((page)-mem_map) * \
+ (unsigned long long)PAGE_SIZE + pgprot_val(pgprot) + \
+ __MEMORY_START)); \
+ __pte; \
+})
/* This takes a physical page address that is used by the remapping functions */
#define mk_pte_phys(physpage, pgprot) \
-({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
+({ pte_t __pte; set_pte(&__pte, __pte(physpage + pgprot_val(pgprot))); __pte; })
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
+{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
-#define page_pte_prot(page,prot) mk_pte(page, prot)
#define page_pte(page) page_pte_prot(page, __pgprot(0))
#define pmd_page(pmd) \
@@ -267,157 +233,6 @@ extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pte_offset(dir, address) ((pte_t *) pmd_page(*(dir)) + \
__pte_offset(address))
-/*
- * Allocate and free page tables. The xxx_kernel() versions are
- * used to allocate a kernel page table - this turns on ASN bits
- * if any.
- */
-
-extern __inline__ pgd_t *get_pgd_slow(void)
-{
- pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
-
- if (ret) {
- /* Clear User space */
- memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
-
- /* XXX: Copy vmalloc-ed space??? */
- memcpy(ret + USER_PTRS_PER_PGD,
- swapper_pg_dir + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
- }
- return ret;
-}
-
-extern __inline__ pgd_t *get_pgd_fast(void)
-{
- unsigned long *ret;
-
- if ((ret = pgd_quicklist) != NULL) {
- pgd_quicklist = (unsigned long *)(*ret);
- ret[0] = 0;
- pgtable_cache_size--;
- } else
- ret = (unsigned long *)get_pgd_slow();
- return (pgd_t *)ret;
-}
-
-extern __inline__ void free_pgd_fast(pgd_t *pgd)
-{
- *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
- pgd_quicklist = (unsigned long *) pgd;
- pgtable_cache_size++;
-}
-
-extern __inline__ void free_pgd_slow(pgd_t *pgd)
-{
- free_page((unsigned long)pgd);
-}
-
-extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
-extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
-
-extern __inline__ pte_t *get_pte_fast(void)
-{
- unsigned long *ret;
-
- if((ret = (unsigned long *)pte_quicklist) != NULL) {
- pte_quicklist = (unsigned long *)(*ret);
- ret[0] = ret[1];
- pgtable_cache_size--;
- }
- return (pte_t *)ret;
-}
-
-extern __inline__ void free_pte_fast(pte_t *pte)
-{
- *(unsigned long *)pte = (unsigned long) pte_quicklist;
- pte_quicklist = (unsigned long *) pte;
- pgtable_cache_size++;
-}
-
-extern __inline__ void free_pte_slow(pte_t *pte)
-{
- free_page((unsigned long)pte);
-}
-
-#define pte_free_kernel(pte) free_pte_slow(pte)
-#define pte_free(pte) free_pte_slow(pte)
-#define pgd_free(pgd) free_pgd_slow(pgd)
-#define pgd_alloc() get_pgd_fast()
-
-extern __inline__ pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
-{
- address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
- if (pmd_none(*pmd)) {
- pte_t *page = (pte_t *) get_pte_fast();
-
- if (!page)
- return get_pte_kernel_slow(pmd, address);
- pmd_val(*pmd) = _KERNPG_TABLE + __pa(page);
- return page + address;
- }
- if (pmd_bad(*pmd)) {
- __handle_bad_pmd_kernel(pmd);
- return NULL;
- }
- return (pte_t *) pmd_page(*pmd) + address;
-}
-
-extern __inline__ pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
-{
- address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
-
- if (pmd_none(*pmd))
- goto getnew;
- if (pmd_bad(*pmd))
- goto fix;
- return (pte_t *)pmd_page(*pmd) + address;
-getnew:
-{
- unsigned long page = (unsigned long) get_pte_fast();
-
- if (!page)
- return get_pte_slow(pmd, address);
- pmd_val(*pmd) = _PAGE_TABLE + __pa(page);
- return (pte_t *)page + address;
-}
-fix:
- __handle_bad_pmd(pmd);
- return NULL;
-}
-
-/*
- * allocating and freeing a pmd is trivial: the 1-entry pmd is
- * inside the pgd, so has no extra memory associated with it.
- */
-extern inline void pmd_free(pmd_t * pmd)
-{
-}
-
-#define pmd_free_kernel pmd_free
-#define pmd_alloc_kernel pmd_alloc
-
-extern int do_check_pgt_cache(int, int);
-
-extern inline void set_pgdir(unsigned long address, pgd_t entry)
-{
- struct task_struct * p;
- pgd_t *pgd;
-
- read_lock(&tasklist_lock);
- for_each_task(p) {
- if (!p->mm)
- continue;
- *pgd_offset(p->mm,address) = entry;
- }
- read_unlock(&tasklist_lock);
- for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
- pgd[address >> PGDIR_SHIFT] = entry;
-}
-
-extern pgd_t swapper_pg_dir[1024];
-
extern void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t pte);
diff --git a/include/asm-sh/posix_types.h b/include/asm-sh/posix_types.h
index dbf6dce6d..a1d59140d 100644
--- a/include/asm-sh/posix_types.h
+++ b/include/asm-sh/posix_types.h
@@ -24,6 +24,13 @@ typedef long __kernel_suseconds_t;
typedef long __kernel_clock_t;
typedef int __kernel_daddr_t;
typedef char * __kernel_caddr_t;
+typedef unsigned short __kernel_uid16_t;
+typedef unsigned short __kernel_gid16_t;
+typedef unsigned int __kernel_uid32_t;
+typedef unsigned int __kernel_gid32_t;
+
+typedef unsigned short __kernel_old_uid_t;
+typedef unsigned short __kernel_old_gid_t;
#ifdef __GNUC__
typedef long long __kernel_loff_t;
@@ -38,6 +45,7 @@ typedef struct {
} __kernel_fsid_t;
#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
+
#undef __FD_SET
static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
{
diff --git a/include/asm-sh/processor.h b/include/asm-sh/processor.h
index ae8b036c2..962461a33 100644
--- a/include/asm-sh/processor.h
+++ b/include/asm-sh/processor.h
@@ -1,7 +1,7 @@
/*
* include/asm-sh/processor.h
*
- * Copyright (C) 1999 Niibe Yutaka
+ * Copyright (C) 1999, 2000 Niibe Yutaka
*/
#ifndef __ASM_SH_PROCESSOR_H
@@ -15,7 +15,7 @@
* Default implementation of macro that returns current
* instruction pointer ("program counter").
*/
-#define current_text_addr() ({ void *pc; __asm__("mova 1f,%0\n1:":"=z" (pc)); pc; })
+#define current_text_addr() ({ void *pc; __asm__("mova 1f, %0\n1:":"=z" (pc)); pc; })
/*
* CPU type and hardware bug flags. Kept separately for each CPU.
@@ -53,6 +53,15 @@ extern struct sh_cpuinfo boot_cpu_data;
*/
#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
+/*
+ * FPU structure and data
+ */
+/* FD-bit of SR register.
+ * When it's set, it means the processor doesn't have right to use FPU,
+ * and it results exception when the floating operation is executed.
+ */
+#define SR_FD 0x00008000
+
#define NUM_FPU_REGS 16
struct sh_fpu_hard_struct {
@@ -67,9 +76,9 @@ struct sh_fpu_hard_struct {
/* Dummy fpu emulator */
struct sh_fpu_soft_struct {
unsigned long fp_regs[NUM_FPU_REGS];
- unsigned long xf_regs[NUM_FPU_REGS];
unsigned long fpscr;
unsigned long fpul;
+ unsigned long xf_regs[NUM_FPU_REGS];
unsigned char lookahead;
unsigned long entry_pc;
@@ -98,9 +107,9 @@ struct thread_struct {
#define INIT_THREAD { \
sizeof(init_stack) + (long) &init_stack, /* sp */ \
0, /* pc */ \
- 0, 0, \
- 0, \
- {{{0,}},} \
+ 0, 0, \
+ 0, \
+ {{{0,}},} /* fpu state */ \
}
/*
@@ -139,20 +148,19 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
#define forget_segments() do { } while (0)
/*
- * FPU lazy state save handling..
+ * FPU lazy state save handling.
*/
-#define SR_FD 0x00008000
extern __inline__ void release_fpu(void)
{
unsigned long __dummy;
/* Set FD flag in SR */
- __asm__ __volatile__ ("stc sr,%0\n\t"
- "or %1,%0\n\t"
- "ldc %0,sr"
- : "=&r" (__dummy)
- : "r" (SR_FD));
+ __asm__ __volatile__("stc $sr, %0\n\t"
+ "or %1, %0\n\t"
+ "ldc %0, $sr"
+ : "=&r" (__dummy)
+ : "r" (SR_FD));
}
extern __inline__ void grab_fpu(void)
@@ -160,25 +168,25 @@ extern __inline__ void grab_fpu(void)
unsigned long __dummy;
/* Clear out FD flag in SR */
- __asm__ __volatile__ ("stc sr,%0\n\t"
- "and %1,%0\n\t"
- "ldc %0,sr"
- : "=&r" (__dummy)
- : "r" (~SR_FD));
+ __asm__ __volatile__("stc $sr, %0\n\t"
+ "and %1, %0\n\t"
+ "ldc %0, $sr"
+ : "=&r" (__dummy)
+ : "r" (~SR_FD));
}
extern void save_fpu(struct task_struct *__tsk);
-#define unlazy_fpu(tsk) do { \
- if (tsk->flags & PF_USEDFPU) \
- save_fpu(tsk); \
+#define unlazy_fpu(tsk) do { \
+ if ((tsk)->flags & PF_USEDFPU) { \
+ grab_fpu(); \
+ save_fpu(tsk); \
+ } \
} while (0)
-#define clear_fpu(tsk) do { \
- if (tsk->flags & PF_USEDFPU) { \
- tsk->flags &= ~PF_USEDFPU; \
- release_fpu(); \
- } \
+#define clear_fpu(tsk) do { \
+ if ((tsk)->flags & PF_USEDFPU) \
+ (tsk)->flags &= ~PF_USEDFPU; \
} while (0)
/*
@@ -189,13 +197,7 @@ extern __inline__ unsigned long thread_saved_pc(struct thread_struct *t)
return t->pc;
}
-static inline unsigned long get_wchan(struct task_struct *p)
-{
- if (!p || p == current || p->state == TASK_RUNNING)
- return 0;
- /* FIXME: here the actual wchan calculation should sit */
- return 0;
-}
+extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.pc)
#define KSTK_ESP(tsk) ((tsk)->thread.sp)
@@ -203,6 +205,7 @@ static inline unsigned long get_wchan(struct task_struct *p)
#define THREAD_SIZE (2*PAGE_SIZE)
extern struct task_struct * alloc_task_struct(void);
extern void free_task_struct(struct task_struct *);
+#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
diff --git a/include/asm-sh/resource.h b/include/asm-sh/resource.h
index ef89ec7e6..084ad61ec 100644
--- a/include/asm-sh/resource.h
+++ b/include/asm-sh/resource.h
@@ -18,14 +18,14 @@
#define RLIM_NLIMITS 10
+#ifdef __KERNEL__
+
/*
* SuS says limits have to be unsigned.
* Which makes a ton more sense anyway.
*/
#define RLIM_INFINITY (~0UL)
-#ifdef __KERNEL__
-
#define INIT_RLIMITS \
{ \
{ RLIM_INFINITY, RLIM_INFINITY }, \
diff --git a/include/asm-sh/scatterlist.h b/include/asm-sh/scatterlist.h
new file mode 100644
index 000000000..a6d0b59c5
--- /dev/null
+++ b/include/asm-sh/scatterlist.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_SH_SCATTERLIST_H
+#define __ASM_SH_SCATTERLIST_H
+
+struct scatterlist {
+ char * address; /* Location data is to be transferred to */
+ char * alt_address; /* Location of actual if address is a
+ * dma indirect buffer. NULL otherwise */
+ unsigned int length;
+};
+
+#define ISA_DMA_THRESHOLD (0x1fffffff)
+
+#endif /* !(__ASM_SH_SCATTERLIST_H) */
diff --git a/include/asm-sh/semaphore-helper.h b/include/asm-sh/semaphore-helper.h
index fbd6c95c0..bd8230c36 100644
--- a/include/asm-sh/semaphore-helper.h
+++ b/include/asm-sh/semaphore-helper.h
@@ -16,7 +16,7 @@
*/
static __inline__ void wake_one_more(struct semaphore * sem)
{
- atomic_inc((atomic_t *)&sem->waking);
+ atomic_inc((atomic_t *)&sem->sleepers);
}
static __inline__ int waking_non_zero(struct semaphore *sem)
@@ -25,8 +25,8 @@ static __inline__ int waking_non_zero(struct semaphore *sem)
int ret = 0;
spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking > 0) {
- sem->waking--;
+ if (sem->sleepers > 0) {
+ sem->sleepers--;
ret = 1;
}
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
@@ -50,8 +50,8 @@ static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
int ret = 0;
spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking > 0) {
- sem->waking--;
+ if (sem->sleepers > 0) {
+ sem->sleepers--;
ret = 1;
} else if (signal_pending(tsk)) {
atomic_inc(&sem->count);
@@ -76,10 +76,10 @@ static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
int ret = 1;
spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking <= 0)
+ if (sem->sleepers <= 0)
atomic_inc(&sem->count);
else {
- sem->waking--;
+ sem->sleepers--;
ret = 0;
}
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
diff --git a/include/asm-sh/semaphore.h b/include/asm-sh/semaphore.h
index 4164b7805..8b75e34a4 100644
--- a/include/asm-sh/semaphore.h
+++ b/include/asm-sh/semaphore.h
@@ -9,7 +9,7 @@
* (C) Copyright 1996 Linus Torvalds
*
* SuperH verison by Niibe Yutaka
- *
+ * (Currently no asm implementation but generic C code...)
*/
#include <linux/spinlock.h>
@@ -19,7 +19,7 @@
struct semaphore {
atomic_t count;
- int waking;
+ int sleepers;
wait_queue_head_t wait;
#if WAITQUEUE_DEBUG
long __magic;
@@ -55,7 +55,7 @@ extern inline void sema_init (struct semaphore *sem, int val)
* GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
*/
atomic_set(&sem->count, val);
- sem->waking = 0;
+ sem->sleepers = 0;
init_waitqueue_head(&sem->wait);
#if WAITQUEUE_DEBUG
sem->__magic = (int)&sem->__magic;
@@ -72,15 +72,20 @@ static inline void init_MUTEX_LOCKED (struct semaphore *sem)
sema_init(sem, 0);
}
+#if 0
asmlinkage void __down_failed(void /* special register calling convention */);
asmlinkage int __down_failed_interruptible(void /* params in registers */);
asmlinkage int __down_failed_trylock(void /* params in registers */);
asmlinkage void __up_wakeup(void /* special register calling convention */);
+#endif
asmlinkage void __down(struct semaphore * sem);
asmlinkage int __down_interruptible(struct semaphore * sem);
asmlinkage int __down_trylock(struct semaphore * sem);
asmlinkage void __up(struct semaphore * sem);
+extern struct rw_semaphore *__down_read(struct rw_semaphore *sem, int carry);
+extern struct rw_semaphore *__down_write(struct rw_semaphore *sem, int carry);
+asmlinkage struct rw_semaphore *__rwsem_wake(struct rw_semaphore *sem);
extern spinlock_t semaphore_wake_lock;
@@ -131,4 +136,149 @@ extern __inline__ void up(struct semaphore * sem)
__up(sem);
}
+/* rw mutexes (should that be mutices? =) -- throw rw
+ * spinlocks and semaphores together, and this is what we
+ * end up with...
+ *
+ * SuperH version by Niibe Yutaka
+ */
+struct rw_semaphore {
+ atomic_t count;
+ volatile unsigned char write_bias_granted;
+ volatile unsigned char read_bias_granted;
+ volatile unsigned char pad1;
+ volatile unsigned char pad2;
+ wait_queue_head_t wait;
+ wait_queue_head_t write_bias_wait;
+#if WAITQUEUE_DEBUG
+ long __magic;
+ atomic_t readers;
+ atomic_t writers;
+#endif
+};
+
+#define RW_LOCK_BIAS 0x01000000
+
+#if WAITQUEUE_DEBUG
+#define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0)
+#else
+#define __RWSEM_DEBUG_INIT /* */
+#endif
+
+#define __RWSEM_INITIALIZER(name,count) \
+{ ATOMIC_INIT(count), 0, 0, 0, 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
+ __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \
+ __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT }
+
+#define __DECLARE_RWSEM_GENERIC(name,count) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name,count)
+
+#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS)
+#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1)
+#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0)
+
+extern inline void init_rwsem(struct rw_semaphore *sem)
+{
+ atomic_set(&sem->count, RW_LOCK_BIAS);
+ sem->read_bias_granted = 0;
+ sem->write_bias_granted = 0;
+ init_waitqueue_head(&sem->wait);
+ init_waitqueue_head(&sem->write_bias_wait);
+#if WAITQUEUE_DEBUG
+ sem->__magic = (long)&sem->__magic;
+ atomic_set(&sem->readers, 0);
+ atomic_set(&sem->writers, 0);
+#endif
+}
+
+extern inline void down_read(struct rw_semaphore *sem)
+{
+ int saved = atomic_read(&sem->count), new;
+#if WAITQUEUE_DEBUG
+ if (sem->__magic != (long)&sem->__magic)
+ BUG();
+#endif
+ if ((new = atomic_dec_return(&sem->count)) < 0)
+ __down_read(sem, (new < 0 && saved >=0));
+#if WAITQUEUE_DEBUG
+ if (sem->write_bias_granted)
+ BUG();
+ if (atomic_read(&sem->writers))
+ BUG();
+ atomic_inc(&sem->readers);
+#endif
+}
+
+extern inline void down_write(struct rw_semaphore *sem)
+{
+ int saved = atomic_read(&sem->count), new;
+#if WAITQUEUE_DEBUG
+ if (sem->__magic != (long)&sem->__magic)
+ BUG();
+#endif
+ if ((new = atomic_sub_return(RW_LOCK_BIAS, &sem->count)) != 0)
+ __down_write(sem, (new < 0 && saved >=0));
+#if WAITQUEUE_DEBUG
+ if (atomic_read(&sem->writers))
+ BUG();
+ if (atomic_read(&sem->readers))
+ BUG();
+ if (sem->read_bias_granted)
+ BUG();
+ if (sem->write_bias_granted)
+ BUG();
+ atomic_inc(&sem->writers);
+#endif
+}
+
+/* When a reader does a release, the only significant
+ * case is when there was a writer waiting, and we've
+ * bumped the count to 0: we must wake the writer up.
+ */
+extern inline void __up_read(struct rw_semaphore *sem)
+{
+ if (atomic_inc_return(&sem->count) == 0)
+ __rwsem_wake(sem);
+}
+
+/* releasing the writer is easy -- just release it and
+ * wake up any sleepers.
+ */
+extern inline void __up_write(struct rw_semaphore *sem)
+{
+ int saved = atomic_read(&sem->count), new;
+
+ new = atomic_add_return(RW_LOCK_BIAS, &sem->count);
+ if (saved < 0 && new >= 0)
+ __rwsem_wake(sem);
+}
+
+extern inline void up_read(struct rw_semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+ if (sem->write_bias_granted)
+ BUG();
+ if (atomic_read(&sem->writers))
+ BUG();
+ atomic_dec(&sem->readers);
+#endif
+ __up_read(sem);
+}
+
+extern inline void up_write(struct rw_semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+ if (sem->read_bias_granted)
+ BUG();
+ if (sem->write_bias_granted)
+ BUG();
+ if (atomic_read(&sem->readers))
+ BUG();
+ if (atomic_read(&sem->writers) != 1)
+ BUG();
+ atomic_dec(&sem->writers);
+#endif
+ __up_write(sem);
+}
+
#endif /* __ASM_SH_SEMAPHORE_H */
diff --git a/include/asm-sh/sembuf.h b/include/asm-sh/sembuf.h
new file mode 100644
index 000000000..d79f3bd57
--- /dev/null
+++ b/include/asm-sh/sembuf.h
@@ -0,0 +1,25 @@
+#ifndef __ASM_SH_SEMBUF_H
+#define __ASM_SH_SEMBUF_H
+
+/*
+ * The semid64_ds structure for i386 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct semid64_ds {
+ struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
+ __kernel_time_t sem_otime; /* last semop time */
+ unsigned long __unused1;
+ __kernel_time_t sem_ctime; /* last change time */
+ unsigned long __unused2;
+ unsigned long sem_nsems; /* no. of semaphores in array */
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* __ASM_SH_SEMBUF_H */
diff --git a/include/asm-sh/shmbuf.h b/include/asm-sh/shmbuf.h
new file mode 100644
index 000000000..b2101f490
--- /dev/null
+++ b/include/asm-sh/shmbuf.h
@@ -0,0 +1,42 @@
+#ifndef __ASM_SH_SHMBUF_H
+#define __ASM_SH_SHMBUF_H
+
+/*
+ * The shmid64_ds structure for i386 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct shmid64_ds {
+ struct ipc64_perm shm_perm; /* operation perms */
+ size_t shm_segsz; /* size of segment (bytes) */
+ __kernel_time_t shm_atime; /* last attach time */
+ unsigned long __unused1;
+ __kernel_time_t shm_dtime; /* last detach time */
+ unsigned long __unused2;
+ __kernel_time_t shm_ctime; /* last change time */
+ unsigned long __unused3;
+ __kernel_pid_t shm_cpid; /* pid of creator */
+ __kernel_pid_t shm_lpid; /* pid of last operator */
+ unsigned long shm_nattch; /* no. of current attaches */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+struct shminfo64 {
+ unsigned long shmmax;
+ unsigned long shmmin;
+ unsigned long shmmni;
+ unsigned long shmseg;
+ unsigned long shmall;
+ unsigned long __unused1;
+ unsigned long __unused2;
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* __ASM_SH_SHMBUF_H */
diff --git a/include/asm-sh/siginfo.h b/include/asm-sh/siginfo.h
index ca0b5a157..5a2a2b493 100644
--- a/include/asm-sh/siginfo.h
+++ b/include/asm-sh/siginfo.h
@@ -149,7 +149,7 @@ typedef struct siginfo {
#define CLD_TRAPPED 4 /* traced child has trapped */
#define CLD_STOPPED 5 /* child has stopped */
#define CLD_CONTINUED 6 /* stopped child has continued */
-#define NSIGCHLD
+#define NSIGCHLD 6
/*
* SIGPOLL si_codes
diff --git a/include/asm-sh/softirq.h b/include/asm-sh/softirq.h
index a1ff593fd..fdc6afc61 100644
--- a/include/asm-sh/softirq.h
+++ b/include/asm-sh/softirq.h
@@ -1,71 +1,17 @@
#ifndef __ASM_SH_SOFTIRQ_H
#define __ASM_SH_SOFTIRQ_H
+#include <asm/atomic.h>
+#include <asm/hardirq.h>
+
extern unsigned int local_bh_count[NR_CPUS];
#define cpu_bh_disable(cpu) do { local_bh_count[(cpu)]++; barrier(); } while (0)
#define cpu_bh_enable(cpu) do { barrier(); local_bh_count[(cpu)]--; } while (0)
-#define cpu_bh_trylock(cpu) (local_bh_count[(cpu)] ? 0 : (local_bh_count[(cpu)] = 1))
-#define cpu_bh_endlock(cpu) (local_bh_count[(cpu)] = 0)
-
#define local_bh_disable() cpu_bh_disable(smp_processor_id())
#define local_bh_enable() cpu_bh_enable(smp_processor_id())
-#define get_active_bhs() (bh_mask & bh_active)
-#define clear_active_bhs(x) atomic_clear_mask((x),(atomic_t *)&bh_active)
-
-extern inline void init_bh(int nr, void (*routine)(void))
-{
- bh_base[nr] = routine;
- atomic_set(&bh_mask_count[nr], 0);
- bh_mask |= 1 << nr;
-}
-
-extern inline void remove_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- mb();
- bh_base[nr] = NULL;
-}
-
-extern inline void mark_bh(int nr)
-{
- set_bit(nr, &bh_active);
-}
-
-extern inline void start_bh_atomic(void)
-{
- local_bh_disable();
- barrier();
-}
-
-extern inline void end_bh_atomic(void)
-{
- barrier();
- local_bh_enable();
-}
-
-/* These are for the irq's testing the lock */
-#define softirq_trylock(cpu) (cpu_bh_trylock(cpu))
-#define softirq_endlock(cpu) (cpu_bh_endlock(cpu))
-#define synchronize_bh() barrier()
-
-/*
- * These use a mask count to correctly handle
- * nested disable/enable calls
- */
-extern inline void disable_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- atomic_inc(&bh_mask_count[nr]);
- synchronize_bh();
-}
-
-extern inline void enable_bh(int nr)
-{
- if (atomic_dec_and_test(&bh_mask_count[nr]))
- bh_mask |= 1 << nr;
-}
+#define in_softirq() (local_bh_count[smp_processor_id()] != 0)
#endif /* __ASM_SH_SOFTIRQ_H */
diff --git a/include/asm-sh/stat.h b/include/asm-sh/stat.h
index 5e484792a..661154807 100644
--- a/include/asm-sh/stat.h
+++ b/include/asm-sh/stat.h
@@ -38,4 +38,40 @@ struct stat {
unsigned long __unused5;
};
+/* This matches struct stat64 in glibc2.1, hence the absolutely
+ * insane amounts of padding around dev_t's.
+ */
+struct stat64 {
+ unsigned short st_dev;
+ unsigned char __pad0[10];
+
+ unsigned long st_ino;
+ unsigned int st_mode;
+ unsigned int st_nlink;
+
+ unsigned long st_uid;
+ unsigned long st_gid;
+
+ unsigned short st_rdev;
+ unsigned char __pad3[10];
+
+ long long st_size;
+ unsigned long st_blksize;
+
+ unsigned long st_blocks; /* Number 512-byte blocks allocated. */
+ unsigned long __pad4; /* future possible st_blocks high bits */
+
+ unsigned long st_atime;
+ unsigned long __pad5;
+
+ unsigned long st_mtime;
+ unsigned long __pad6;
+
+ unsigned long st_ctime;
+ unsigned long __pad7; /* will be high 32 bits of ctime someday */
+
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
#endif /* __ASM_SH_STAT_H */
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index d898e3517..e76071bf2 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -2,7 +2,7 @@
#define __ASM_SH_SYSTEM_H
/*
- * Copyright (C) 1999 Niibe Yutaka
+ * Copyright (C) 1999, 2000 Niibe Yutaka
*/
/*
@@ -19,44 +19,44 @@ typedef struct {
#define prepare_to_switch() do { } while(0)
#define switch_to(prev,next,last) do { \
register struct task_struct *__last; \
- register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \
- register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \
- register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \
- register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next; \
- register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp; \
- register unsigned long __ts7 __asm__ ("r7") = next->thread.pc; \
+ register unsigned long *__ts1 __asm__ ("$r1") = &prev->thread.sp; \
+ register unsigned long *__ts2 __asm__ ("$r2") = &prev->thread.pc; \
+ register unsigned long *__ts4 __asm__ ("$r4") = (unsigned long *)prev; \
+ register unsigned long *__ts5 __asm__ ("$r5") = (unsigned long *)next; \
+ register unsigned long *__ts6 __asm__ ("$r6") = &next->thread.sp; \
+ register unsigned long __ts7 __asm__ ("$r7") = next->thread.pc; \
__asm__ __volatile__ (".balign 4\n\t" \
- "stc.l gbr,@-r15\n\t" \
- "sts.l pr,@-r15\n\t" \
- "mov.l r8,@-r15\n\t" \
- "mov.l r9,@-r15\n\t" \
- "mov.l r10,@-r15\n\t" \
- "mov.l r11,@-r15\n\t" \
- "mov.l r12,@-r15\n\t" \
- "mov.l r13,@-r15\n\t" \
- "mov.l r14,@-r15\n\t" \
- "mov.l r15,@r1 ! save SP\n\t" \
- "mov.l @r6,r15 ! change to new stack\n\t" \
- "mov.l %0,@-r15 ! push R0 onto new stack\n\t" \
- "mova 1f,%0\n\t" \
- "mov.l %0,@r2 ! save PC\n\t" \
- "mov.l 2f,%0\n\t" \
+ "stc.l $gbr, @-$r15\n\t" \
+ "sts.l $pr, @-$r15\n\t" \
+ "mov.l $r8, @-$r15\n\t" \
+ "mov.l $r9, @-$r15\n\t" \
+ "mov.l $r10, @-$r15\n\t" \
+ "mov.l $r11, @-$r15\n\t" \
+ "mov.l $r12, @-$r15\n\t" \
+ "mov.l $r13, @-$r15\n\t" \
+ "mov.l $r14, @-$r15\n\t" \
+ "mov.l $r15, @$r1 ! save SP\n\t" \
+ "mov.l @$r6, $r15 ! change to new stack\n\t" \
+ "mov.l %0, @-$r15 ! push R0 onto new stack\n\t" \
+ "mova 1f, %0\n\t" \
+ "mov.l %0, @$r2 ! save PC\n\t" \
+ "mov.l 2f, %0\n\t" \
"jmp @%0 ! call __switch_to\n\t" \
- " lds r7,pr ! with return to new PC\n\t" \
+ " lds $r7, $pr ! with return to new PC\n\t" \
".balign 4\n" \
"2:\n\t" \
- ".long " "_" "__switch_to\n" \
+ ".long " "__switch_to\n" \
"1:\n\t" \
- "mov.l @r15+,%0 ! pop R0 from new stack\n\t" \
- "mov.l @r15+,r14\n\t" \
- "mov.l @r15+,r13\n\t" \
- "mov.l @r15+,r12\n\t" \
- "mov.l @r15+,r11\n\t" \
- "mov.l @r15+,r10\n\t" \
- "mov.l @r15+,r9\n\t" \
- "mov.l @r15+,r8\n\t" \
- "lds.l @r15+,pr\n\t" \
- "ldc.l @r15+,gbr\n\t" \
+ "mov.l @$r15+, %0 ! pop R0 from new stack\n\t" \
+ "mov.l @$r15+, $r14\n\t" \
+ "mov.l @$r15+, $r13\n\t" \
+ "mov.l @$r15+, $r12\n\t" \
+ "mov.l @$r15+, $r11\n\t" \
+ "mov.l @$r15+, $r10\n\t" \
+ "mov.l @$r15+, $r9\n\t" \
+ "mov.l @$r15+, $r8\n\t" \
+ "lds.l @$r15+, $pr\n\t" \
+ "ldc.l @$r15+, $gbr\n\t" \
:"=&z" (__last) \
:"0" (prev), \
"r" (__ts1), "r" (__ts2), \
@@ -95,9 +95,9 @@ extern __inline__ void __sti(void)
{
unsigned long __dummy;
- __asm__ __volatile__("stc sr,%0\n\t"
- "and %1,%0\n\t"
- "ldc %0,sr"
+ __asm__ __volatile__("stc $sr, %0\n\t"
+ "and %1, %0\n\t"
+ "ldc %0, $sr"
: "=&r" (__dummy)
: "r" (0xefffffff)
: "memory");
@@ -106,30 +106,46 @@ extern __inline__ void __sti(void)
extern __inline__ void __cli(void)
{
unsigned long __dummy;
- __asm__ __volatile__("stc sr,%0\n\t"
- "or %1,%0\n\t"
- "ldc %0,sr"
+ __asm__ __volatile__("stc $sr, %0\n\t"
+ "or %1, %0\n\t"
+ "ldc %0, $sr"
: "=&r" (__dummy)
: "r" (0x10000000)
: "memory");
}
-#define __save_flags(x) \
-__asm__ __volatile__("stc sr,%0":"=r" (x): /* no inputs */ :"memory")
+#define __save_flags(x) \
+x = (__extension__ ({ unsigned long __sr; \
+ __asm__ __volatile__( \
+ "stc $sr, %0" \
+ : "=r" (__sr) \
+ : /* no inputs */ \
+ : "memory"); \
+ (__sr & 0xffff7f0f);}))
#define __save_and_cli(x) \
x = (__extension__ ({ unsigned long __dummy,__sr; \
__asm__ __volatile__( \
- "stc sr,%1\n\t" \
- "or %0,%1\n\t" \
- "stc sr,%0\n\t" \
- "ldc %1,sr" \
+ "stc $sr, %1\n\t" \
+ "or %0, %1\n\t" \
+ "stc $sr, %0\n\t" \
+ "ldc %1, $sr" \
: "=r" (__sr), "=&r" (__dummy) \
: "0" (0x10000000) \
- : "memory"); __sr; }))
-
-#define __restore_flags(x) \
-__asm__ __volatile__("ldc %0,sr": /* no output */: "r" (x):"memory")
+ : "memory"); (__sr & 0xffff7f0f); }))
+
+#define __restore_flags(x) do { \
+ unsigned long __dummy; \
+ __asm__ __volatile__( \
+ "stc $sr, %0\n\t" \
+ "and %1, %0\n\t" \
+ "or %2, %0\n\t" \
+ "ldc %0, $sr" \
+ : "=&r" (__dummy) \
+ : "r" (0x000080f0), /* IMASK+FD */ \
+ "r" (x) \
+ : "memory"); \
+} while (0)
/* For spinlocks etc */
#define local_irq_save(x) __save_and_cli(x)
@@ -169,12 +185,26 @@ extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
return retval;
}
+extern __inline__ unsigned long xchg_u8(volatile unsigned char * m, unsigned long val)
+{
+ unsigned long flags, retval;
+
+ save_and_cli(flags);
+ retval = *m;
+ *m = val & 0xff;
+ restore_flags(flags);
+ return retval;
+}
+
static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
switch (size) {
case 4:
return xchg_u32(ptr, x);
break;
+ case 1:
+ return xchg_u8(ptr, x);
+ break;
}
__xchg_called_with_bad_pointer();
return x;
diff --git a/include/asm-sh/termios.h b/include/asm-sh/termios.h
index 4e334d49a..106e675d9 100644
--- a/include/asm-sh/termios.h
+++ b/include/asm-sh/termios.h
@@ -35,6 +35,7 @@ struct termio {
#define TIOCM_RI TIOCM_RNG
#define TIOCM_OUT1 0x2000
#define TIOCM_OUT2 0x4000
+#define TIOCM_LOOP 0x8000
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
diff --git a/include/asm-sh/types.h b/include/asm-sh/types.h
index 2782ac295..cb0f9f83a 100644
--- a/include/asm-sh/types.h
+++ b/include/asm-sh/types.h
@@ -41,6 +41,10 @@ typedef unsigned long long u64;
#define BITS_PER_LONG 32
+/* Dma addresses are 32-bits wide. */
+
+typedef u32 dma_addr_t;
+
#endif /* __KERNEL__ */
#endif /* __ASM_SH_TYPES_H */
diff --git a/include/asm-sh/unistd.h b/include/asm-sh/unistd.h
index c416fda37..20f5f659b 100644
--- a/include/asm-sh/unistd.h
+++ b/include/asm-sh/unistd.h
@@ -31,7 +31,7 @@
#define __NR_lseek 19
#define __NR_getpid 20
#define __NR_mount 21
-#define __NR_oldumount 22
+#define __NR_umount 22
#define __NR_setuid 23
#define __NR_getuid 24
#define __NR_stime 25
@@ -61,7 +61,7 @@
#define __NR_geteuid 49
#define __NR_getegid 50
#define __NR_acct 51
-#define __NR_umount 52
+#define __NR_umount2 52
#define __NR_lock 53
#define __NR_ioctl 54
#define __NR_fcntl 55
@@ -85,7 +85,7 @@
#define __NR_sigpending 73
#define __NR_sethostname 74
#define __NR_setrlimit 75
-#define __NR_getrlimit 76
+#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
#define __NR_getrusage 77
#define __NR_gettimeofday 78
#define __NR_settimeofday 79
@@ -200,6 +200,33 @@
#define __NR_streams1 188 /* some people actually want it */
#define __NR_streams2 189 /* some people actually want it */
#define __NR_vfork 190
+#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
+#define __NR_mmap2 192
+#define __NR_truncate64 193
+#define __NR_ftruncate64 194
+#define __NR_stat64 195
+#define __NR_lstat64 196
+#define __NR_fstat64 197
+#define __NR_lchown32 198
+#define __NR_getuid32 199
+#define __NR_getgid32 200
+#define __NR_geteuid32 201
+#define __NR_getegid32 202
+#define __NR_setreuid32 203
+#define __NR_setregid32 204
+#define __NR_getgroups32 205
+#define __NR_setgroups32 206
+#define __NR_fchown32 207
+#define __NR_setresuid32 208
+#define __NR_getresuid32 209
+#define __NR_setresgid32 210
+#define __NR_getresgid32 211
+#define __NR_chown32 212
+#define __NR_setuid32 213
+#define __NR_setgid32 214
+#define __NR_setfsuid32 215
+#define __NR_setfsgid32 216
+#define __NR_pivot_root 217
/* user-visible error numbers are in the range -1 - -125: see <asm-sh/errno.h> */
@@ -219,7 +246,7 @@ do { \
#define _syscall0(type,name) \
type name(void) \
{ \
-register long __sc0 __asm__ ("r0") = __NR_##name; \
+register long __sc0 __asm__ ("$r0") = __NR_##name; \
__asm__ __volatile__ ("trapa #0" \
: "=z" (__sc0) \
: "0" (__sc0) \
@@ -230,8 +257,8 @@ __syscall_return(type,__sc0); \
#define _syscall1(type,name,type1,arg1) \
type name(type1 arg1) \
{ \
-register long __sc0 __asm__ ("r0") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
+register long __sc0 __asm__ ("$r0") = __NR_##name; \
+register long __sc4 __asm__ ("$r4") = (long) arg1; \
__asm__ __volatile__ ("trapa #0" \
: "=z" (__sc0) \
: "0" (__sc0), "r" (__sc4) \
@@ -242,9 +269,9 @@ __syscall_return(type,__sc0); \
#define _syscall2(type,name,type1,arg1,type2,arg2) \
type name(type1 arg1,type2 arg2) \
{ \
-register long __sc0 __asm__ ("r0") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
+register long __sc0 __asm__ ("$r0") = __NR_##name; \
+register long __sc4 __asm__ ("$r4") = (long) arg1; \
+register long __sc5 __asm__ ("$r5") = (long) arg2; \
__asm__ __volatile__ ("trapa #0" \
: "=z" (__sc0) \
: "0" (__sc0), "r" (__sc4), "r" (__sc5) \
@@ -255,10 +282,10 @@ __syscall_return(type,__sc0); \
#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
type name(type1 arg1,type2 arg2,type3 arg3) \
{ \
-register long __sc0 __asm__ ("r0") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-register long __sc6 __asm__ ("r6") = (long) arg3; \
+register long __sc0 __asm__ ("$r0") = __NR_##name; \
+register long __sc4 __asm__ ("$r4") = (long) arg1; \
+register long __sc5 __asm__ ("$r5") = (long) arg2; \
+register long __sc6 __asm__ ("$r6") = (long) arg3; \
__asm__ __volatile__ ("trapa #0" \
: "=z" (__sc0) \
: "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6) \
@@ -269,11 +296,11 @@ __syscall_return(type,__sc0); \
#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
{ \
-register long __sc0 __asm__ ("r0") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-register long __sc6 __asm__ ("r6") = (long) arg3; \
-register long __sc6 __asm__ ("r7") = (long) arg4; \
+register long __sc0 __asm__ ("$r0") = __NR_##name; \
+register long __sc4 __asm__ ("$r4") = (long) arg1; \
+register long __sc5 __asm__ ("$r5") = (long) arg2; \
+register long __sc6 __asm__ ("$r6") = (long) arg3; \
+register long __sc7 __asm__ ("$r7") = (long) arg4; \
__asm__ __volatile__ ("trapa #0" \
: "=z" (__sc0) \
: "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), \
@@ -282,6 +309,23 @@ __asm__ __volatile__ ("trapa #0" \
__syscall_return(type,__sc0); \
}
+#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
+type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
+{ \
+register long __sc0 __asm__ ("$r0") = __NR_##name; \
+register long __sc4 __asm__ ("$r4") = (long) arg1; \
+register long __sc5 __asm__ ("$r5") = (long) arg2; \
+register long __sc6 __asm__ ("$r6") = (long) arg3; \
+register long __sc7 __asm__ ("$r7") = (long) arg4; \
+__asm__ __volatile__ ("mov.l %2, @-$r15\n\t" \
+ "trapa #0" \
+ : "=z" (__sc0) \
+ : "0" (__sc0), "r" (arg5), \
+ "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7) \
+ : "memory" ); \
+__syscall_return(type,__sc0); \
+}
+
#ifdef __KERNEL_SYSCALLS__
/*
diff --git a/include/asm-sh/user.h b/include/asm-sh/user.h
index 793c9691e..243cc04fd 100644
--- a/include/asm-sh/user.h
+++ b/include/asm-sh/user.h
@@ -2,6 +2,7 @@
#define __ASM_SH_USER_H
#include <linux/types.h>
+#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/page.h>
@@ -27,8 +28,18 @@
* current->start_stack, so we round each of these in order to be able
* to write an integer number of pages.
*/
+
+struct user_fpu_struct {
+ unsigned long fp_regs[NUM_FPU_REGS];
+ unsigned long xf_regs[NUM_FPU_REGS];
+ unsigned long fpscr;
+ unsigned long fpul;
+};
+
struct user {
struct pt_regs regs; /* entire machine state */
+ struct user_fpu_struct fpu; /* Math Co-processor registers. */
+ int u_fpvalid; /* True if math co-processor being used. */
size_t u_tsize; /* text size (pages) */
size_t u_dsize; /* data size (pages) */
size_t u_ssize; /* stack size (pages) */
@@ -37,6 +48,7 @@ struct user {
unsigned long start_stack; /* stack starting address */
long int signal; /* signal causing core dump */
struct regs * u_ar0; /* help gdb find registers */
+ struct user_fpu_struct* u_fpstate; /* Math Co-processor pointer. */
unsigned long magic; /* identifies a core file */
char u_comm[32]; /* user command name */
};