summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile47
-rw-r--r--arch/arm/kernel/armksyms.c178
-rw-r--r--arch/arm/kernel/calls.S194
-rw-r--r--arch/arm/kernel/dma.c199
-rw-r--r--arch/arm/kernel/ecard.c604
-rw-r--r--arch/arm/kernel/entry-armo.S643
-rw-r--r--arch/arm/kernel/entry-armv.S671
-rw-r--r--arch/arm/kernel/entry-common.S283
-rw-r--r--arch/arm/kernel/head-armo.S63
-rw-r--r--arch/arm/kernel/head-armv.S312
-rw-r--r--arch/arm/kernel/iic.c160
-rw-r--r--arch/arm/kernel/init_task.c23
-rw-r--r--arch/arm/kernel/ioport.c98
-rw-r--r--arch/arm/kernel/irq.c327
-rw-r--r--arch/arm/kernel/oldlatches.c53
-rw-r--r--arch/arm/kernel/process.c239
-rw-r--r--arch/arm/kernel/ptrace.c745
-rw-r--r--arch/arm/kernel/setup-ebsa110.c143
-rw-r--r--arch/arm/kernel/setup.c292
-rw-r--r--arch/arm/kernel/signal.c515
-rw-r--r--arch/arm/kernel/sys_arm.c372
-rw-r--r--arch/arm/kernel/time.c154
-rw-r--r--arch/arm/kernel/traps.c306
23 files changed, 6621 insertions, 0 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
new file mode 100644
index 000000000..90e71345a
--- /dev/null
+++ b/arch/arm/kernel/Makefile
@@ -0,0 +1,47 @@
+#
+# Makefile for the linux kernel.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+
+HEAD_OBJ = head-$(PROCESSOR).o
+ENTRY_OBJ = entry-$(PROCESSOR).o
+
+O_TARGET := kernel.o
+O_OBJS := $(ENTRY_OBJ) ioport.o irq.o process.o ptrace.o signal.o sys_arm.o time.o traps.o
+
+all: kernel.o $(HEAD_OBJ) init_task.o
+
+ifeq ($(CONFIG_MODULES),y)
+OX_OBJS = armksyms.o
+else
+O_OBJS += armksyms.o
+endif
+
+ifdef CONFIG_ARCH_ACORN
+ O_OBJS += setup.o ecard.o iic.o dma.o
+ ifdef CONFIG_ARCH_ARC
+ O_OBJS += oldlatches.o
+ endif
+endif
+
+ifeq ($(MACHINE),ebsa110)
+ O_OBJS += setup-ebsa110.o dma.o
+endif
+
+ifeq ($(MACHINE),nexuspci)
+ O_OBJS += setup-ebsa110.o
+endif
+
+$(HEAD_OBJ): $(HEAD_OBJ:.o=.S)
+ $(CC) -D__ASSEMBLY__ -traditional -c $(HEAD_OBJ:.o=.S) -o $@
+
+include $(TOPDIR)/Rules.make
+
+$(ENTRY_OBJ:.o=.S): ../lib/constants.h
+
+.PHONY: ../lib/constants.h
+
+../lib/constants.h:
+ $(MAKE) -C ../lib constants.h
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
new file mode 100644
index 000000000..19666ac1e
--- /dev/null
+++ b/arch/arm/kernel/armksyms.c
@@ -0,0 +1,178 @@
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/user.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+
+#include <asm/ecard.h>
+#include <asm/io.h>
+#include <asm/delay.h>
+#include <asm/dma.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+
+extern void dump_thread(struct pt_regs *, struct user *);
+extern int dump_fpu(struct pt_regs *, struct user_fp_struct *);
+
+/*
+ * libgcc functions - functions that are used internally by the
+ * compiler... (prototypes are not correct though, but that
+ * doesn't really matter since they're not versioned).
+ */
+extern void __gcc_bcmp(void);
+extern void __ashldi3(void);
+extern void __ashrdi3(void);
+extern void __cmpdi2(void);
+extern void __divdi3(void);
+extern void __divsi3(void);
+extern void __lshrdi3(void);
+extern void __moddi3(void);
+extern void __modsi3(void);
+extern void __muldi3(void);
+extern void __negdi2(void);
+extern void __ucmpdi2(void);
+extern void __udivdi3(void);
+extern void __udivmoddi4(void);
+extern void __udivsi3(void);
+extern void __umoddi3(void);
+extern void __umodsi3(void);
+
+extern void inswb(unsigned int port, void *to, int len);
+extern void outswb(unsigned int port, const void *to, int len);
+
+/*
+ * floating point math emulator support.
+ * These will not change. If they do, then a new version
+ * of the emulator will have to be compiled...
+ * fp_current is never actually dereferenced - it is just
+ * used as a pointer to pass back for send_sig().
+ */
+extern void (*fp_save)(unsigned char *);
+extern void (*fp_restore)(unsigned char *);
+extern void fp_setup(void);
+extern void fpreturn(void);
+extern void fpundefinstr(void);
+extern void fp_enter(void);
+extern void fp_printk(void);
+extern struct task_struct *fp_current;
+extern void fp_send_sig(int);
+
+/* platform dependent support */
+EXPORT_SYMBOL(dump_thread);
+EXPORT_SYMBOL(dump_fpu);
+EXPORT_SYMBOL(udelay);
+EXPORT_SYMBOL(dma_str);
+EXPORT_SYMBOL(xchg_str);
+
+/* expansion card support */
+#ifdef CONFIG_ARCH_ACORN
+EXPORT_SYMBOL(ecard_startfind);
+EXPORT_SYMBOL(ecard_find);
+EXPORT_SYMBOL(ecard_readchunk);
+EXPORT_SYMBOL(ecard_address);
+#endif
+
+/* processor dependencies */
+EXPORT_SYMBOL(processor);
+
+/* io */
+EXPORT_SYMBOL(outswb);
+EXPORT_SYMBOL(outsw);
+EXPORT_SYMBOL(inswb);
+EXPORT_SYMBOL(insw);
+
+#ifdef CONFIG_ARCH_RPC
+EXPORT_SYMBOL(drambank);
+#endif
+
+/* dma */
+EXPORT_SYMBOL(enable_dma);
+EXPORT_SYMBOL(set_dma_mode);
+EXPORT_SYMBOL(set_dma_addr);
+EXPORT_SYMBOL(set_dma_count);
+EXPORT_SYMBOL(get_dma_residue);
+
+/*
+ * floating point math emulator support.
+ * These symbols will never change their calling convention...
+ */
+EXPORT_SYMBOL_NOVERS(fpreturn);
+EXPORT_SYMBOL_NOVERS(fpundefinstr);
+EXPORT_SYMBOL_NOVERS(fp_enter);
+EXPORT_SYMBOL_NOVERS(fp_save);
+EXPORT_SYMBOL_NOVERS(fp_restore);
+EXPORT_SYMBOL_NOVERS(fp_setup);
+
+const char __kstrtab_fp_printk[] __attribute__((section(".kstrtab"))) = __MODULE_STRING(fp_printk);
+const struct module_symbol __ksymtab_fp_printk __attribute__((section("__ksymtab"))) =
+{ (unsigned long)&printk, __kstrtab_fp_printk };
+
+const char __kstrtab_fp_send_sig[] __attribute__((section(".kstrtab"))) = __MODULE_STRING(fp_send_sig);
+const struct module_symbol __ksymtab_fp_send_sig __attribute__((section("__ksymtab"))) =
+{ (unsigned long)&send_sig, __kstrtab_fp_send_sig };
+
+//EXPORT_SYMBOL_NOVERS(fp_current);
+
+ /*
+ * string / mem functions
+ */
+EXPORT_SYMBOL_NOVERS(strcpy);
+EXPORT_SYMBOL_NOVERS(strncpy);
+EXPORT_SYMBOL_NOVERS(strcat);
+EXPORT_SYMBOL_NOVERS(strncat);
+EXPORT_SYMBOL_NOVERS(strcmp);
+EXPORT_SYMBOL_NOVERS(strncmp);
+EXPORT_SYMBOL_NOVERS(strchr);
+EXPORT_SYMBOL_NOVERS(strlen);
+EXPORT_SYMBOL_NOVERS(strnlen);
+EXPORT_SYMBOL_NOVERS(strspn);
+EXPORT_SYMBOL_NOVERS(strpbrk);
+EXPORT_SYMBOL_NOVERS(strtok);
+EXPORT_SYMBOL_NOVERS(strrchr);
+EXPORT_SYMBOL_NOVERS(memset);
+EXPORT_SYMBOL_NOVERS(memcpy);
+EXPORT_SYMBOL_NOVERS(memmove);
+EXPORT_SYMBOL_NOVERS(memcmp);
+EXPORT_SYMBOL_NOVERS(memscan);
+EXPORT_SYMBOL_NOVERS(memzero);
+
+ /* user mem (segment) */
+#if defined(CONFIG_CPU_ARM6) || defined(CONFIG_CPU_SA110)
+EXPORT_SYMBOL(__arch_copy_from_user);
+EXPORT_SYMBOL(__arch_copy_to_user);
+EXPORT_SYMBOL(__arch_clear_user);
+EXPORT_SYMBOL(__arch_strlen_user);
+#elif defined(CONFIG_CPU_ARM2) || defined(CONFIG_CPU_ARM3)
+EXPORT_SYMBOL(uaccess_kernel);
+EXPORT_SYMBOL(uaccess_user);
+#endif
+
+ /* gcc lib functions */
+EXPORT_SYMBOL_NOVERS(__gcc_bcmp);
+EXPORT_SYMBOL_NOVERS(__ashldi3);
+EXPORT_SYMBOL_NOVERS(__ashrdi3);
+EXPORT_SYMBOL_NOVERS(__cmpdi2);
+EXPORT_SYMBOL_NOVERS(__divdi3);
+EXPORT_SYMBOL_NOVERS(__divsi3);
+EXPORT_SYMBOL_NOVERS(__lshrdi3);
+EXPORT_SYMBOL_NOVERS(__moddi3);
+EXPORT_SYMBOL_NOVERS(__modsi3);
+EXPORT_SYMBOL_NOVERS(__muldi3);
+EXPORT_SYMBOL_NOVERS(__negdi2);
+EXPORT_SYMBOL_NOVERS(__ucmpdi2);
+EXPORT_SYMBOL_NOVERS(__udivdi3);
+EXPORT_SYMBOL_NOVERS(__udivmoddi4);
+EXPORT_SYMBOL_NOVERS(__udivsi3);
+EXPORT_SYMBOL_NOVERS(__umoddi3);
+EXPORT_SYMBOL_NOVERS(__umodsi3);
+
+ /* bitops */
+EXPORT_SYMBOL(set_bit);
+EXPORT_SYMBOL(test_and_set_bit);
+EXPORT_SYMBOL(clear_bit);
+EXPORT_SYMBOL(test_and_clear_bit);
+EXPORT_SYMBOL(change_bit);
+EXPORT_SYMBOL(test_and_change_bit);
+EXPORT_SYMBOL(find_first_zero_bit);
+EXPORT_SYMBOL(find_next_zero_bit);
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
new file mode 100644
index 000000000..0d02eb85a
--- /dev/null
+++ b/arch/arm/kernel/calls.S
@@ -0,0 +1,194 @@
+/*
+ * linux/arch/arm/lib/calls.h
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+#ifndef NR_SYSCALLS
+#define NR_syscalls 256
+#define NR_SYSCALLS 182
+#else
+
+/* 0 */ .long SYMBOL_NAME(sys_setup)
+ .long SYMBOL_NAME(sys_exit)
+ .long SYMBOL_NAME(sys_fork_wrapper)
+ .long SYMBOL_NAME(sys_read)
+ .long SYMBOL_NAME(sys_write)
+/* 5 */ .long SYMBOL_NAME(sys_open)
+ .long SYMBOL_NAME(sys_close)
+ .long SYMBOL_NAME(sys_waitpid)
+ .long SYMBOL_NAME(sys_creat)
+ .long SYMBOL_NAME(sys_link)
+/* 10 */ .long SYMBOL_NAME(sys_unlink)
+ .long SYMBOL_NAME(sys_execve_wrapper)
+ .long SYMBOL_NAME(sys_chdir)
+ .long SYMBOL_NAME(sys_time)
+ .long SYMBOL_NAME(sys_mknod)
+/* 15 */ .long SYMBOL_NAME(sys_chmod)
+ .long SYMBOL_NAME(sys_chown)
+ .long SYMBOL_NAME(sys_ni_syscall) /* was sys_break */
+ .long SYMBOL_NAME(sys_stat)
+ .long SYMBOL_NAME(sys_lseek)
+/* 20 */ .long SYMBOL_NAME(sys_getpid)
+ .long SYMBOL_NAME(sys_mount_wrapper)
+ .long SYMBOL_NAME(sys_umount)
+ .long SYMBOL_NAME(sys_setuid)
+ .long SYMBOL_NAME(sys_getuid)
+/* 25 */ .long SYMBOL_NAME(sys_stime)
+ .long SYMBOL_NAME(sys_ptrace)
+ .long SYMBOL_NAME(sys_alarm)
+ .long SYMBOL_NAME(sys_fstat)
+ .long SYMBOL_NAME(sys_pause)
+/* 30 */ .long SYMBOL_NAME(sys_utime)
+ .long SYMBOL_NAME(sys_ni_syscall) /* was sys_stty */
+ .long SYMBOL_NAME(sys_ni_syscall) /* was sys_getty */
+ .long SYMBOL_NAME(sys_access)
+ .long SYMBOL_NAME(sys_nice)
+/* 35 */ .long SYMBOL_NAME(sys_ni_syscall) /* was sys_ftime */
+ .long SYMBOL_NAME(sys_sync)
+ .long SYMBOL_NAME(sys_kill)
+ .long SYMBOL_NAME(sys_rename)
+ .long SYMBOL_NAME(sys_mkdir)
+/* 40 */ .long SYMBOL_NAME(sys_rmdir)
+ .long SYMBOL_NAME(sys_dup)
+ .long SYMBOL_NAME(sys_pipe)
+ .long SYMBOL_NAME(sys_times)
+ .long SYMBOL_NAME(sys_ni_syscall) /* was sys_prof */
+/* 45 */ .long SYMBOL_NAME(sys_brk)
+ .long SYMBOL_NAME(sys_setgid)
+ .long SYMBOL_NAME(sys_getgid)
+ .long SYMBOL_NAME(sys_signal)
+ .long SYMBOL_NAME(sys_geteuid)
+/* 50 */ .long SYMBOL_NAME(sys_getegid)
+ .long SYMBOL_NAME(sys_acct)
+ .long SYMBOL_NAME(sys_ni_syscall) /* was sys_phys */
+ .long SYMBOL_NAME(sys_ni_syscall) /* was sys_lock */
+ .long SYMBOL_NAME(sys_ioctl)
+/* 55 */ .long SYMBOL_NAME(sys_fcntl)
+ .long SYMBOL_NAME(sys_ni_syscall) /* was sys_mpx */
+ .long SYMBOL_NAME(sys_setpgid)
+ .long SYMBOL_NAME(sys_ni_syscall) /* was sys_ulimit */
+ .long SYMBOL_NAME(sys_olduname)
+/* 60 */ .long SYMBOL_NAME(sys_umask)
+ .long SYMBOL_NAME(sys_chroot)
+ .long SYMBOL_NAME(sys_ustat)
+ .long SYMBOL_NAME(sys_dup2)
+ .long SYMBOL_NAME(sys_getppid)
+/* 65 */ .long SYMBOL_NAME(sys_getpgrp)
+ .long SYMBOL_NAME(sys_setsid)
+ .long SYMBOL_NAME(sys_sigaction)
+ .long SYMBOL_NAME(sys_sgetmask)
+ .long SYMBOL_NAME(sys_ssetmask)
+/* 70 */ .long SYMBOL_NAME(sys_setreuid)
+ .long SYMBOL_NAME(sys_setregid)
+ .long SYMBOL_NAME(sys_sigsuspend_wrapper)
+ .long SYMBOL_NAME(sys_sigpending)
+ .long SYMBOL_NAME(sys_sethostname)
+/* 75 */ .long SYMBOL_NAME(sys_setrlimit)
+ .long SYMBOL_NAME(sys_getrlimit)
+ .long SYMBOL_NAME(sys_getrusage)
+ .long SYMBOL_NAME(sys_gettimeofday)
+ .long SYMBOL_NAME(sys_settimeofday)
+/* 80 */ .long SYMBOL_NAME(sys_getgroups)
+ .long SYMBOL_NAME(sys_setgroups)
+ .long SYMBOL_NAME(old_select)
+ .long SYMBOL_NAME(sys_symlink)
+ .long SYMBOL_NAME(sys_lstat)
+/* 85 */ .long SYMBOL_NAME(sys_readlink)
+ .long SYMBOL_NAME(sys_uselib)
+ .long SYMBOL_NAME(sys_swapon)
+ .long SYMBOL_NAME(sys_reboot)
+ .long SYMBOL_NAME(old_readdir)
+/* 90 */ .long SYMBOL_NAME(old_mmap)
+ .long SYMBOL_NAME(sys_munmap)
+ .long SYMBOL_NAME(sys_truncate)
+ .long SYMBOL_NAME(sys_ftruncate)
+ .long SYMBOL_NAME(sys_fchmod)
+/* 95 */ .long SYMBOL_NAME(sys_fchown)
+ .long SYMBOL_NAME(sys_getpriority)
+ .long SYMBOL_NAME(sys_setpriority)
+ .long SYMBOL_NAME(sys_ni_syscall) /* was sys_profil */
+ .long SYMBOL_NAME(sys_statfs)
+/* 100 */ .long SYMBOL_NAME(sys_fstatfs)
+ .long SYMBOL_NAME(sys_ni_syscall) /* .long _sys_ioperm */
+ .long SYMBOL_NAME(sys_socketcall)
+ .long SYMBOL_NAME(sys_syslog)
+ .long SYMBOL_NAME(sys_setitimer)
+/* 105 */ .long SYMBOL_NAME(sys_getitimer)
+ .long SYMBOL_NAME(sys_newstat)
+ .long SYMBOL_NAME(sys_newlstat)
+ .long SYMBOL_NAME(sys_newfstat)
+ .long SYMBOL_NAME(sys_uname)
+/* 110 */ .long SYMBOL_NAME(sys_iopl)
+ .long SYMBOL_NAME(sys_vhangup)
+ .long SYMBOL_NAME(sys_idle)
+ .long SYMBOL_NAME(sys_syscall) /* call a syscall */
+ .long SYMBOL_NAME(sys_wait4)
+/* 115 */ .long SYMBOL_NAME(sys_swapoff)
+ .long SYMBOL_NAME(sys_sysinfo)
+ .long SYMBOL_NAME(sys_ipc)
+ .long SYMBOL_NAME(sys_fsync)
+ .long SYMBOL_NAME(sys_sigreturn_wrapper)
+ .long SYMBOL_NAME(sys_clone_wapper)
+ .long SYMBOL_NAME(sys_setdomainname)
+ .long SYMBOL_NAME(sys_newuname)
+ .long SYMBOL_NAME(sys_ni_syscall) /* .long SYMBOL_NAME(sys_modify_ldt) */
+ .long SYMBOL_NAME(sys_adjtimex)
+/* 125 */ .long SYMBOL_NAME(sys_mprotect)
+ .long SYMBOL_NAME(sys_sigprocmask)
+ .long SYMBOL_NAME(sys_create_module)
+ .long SYMBOL_NAME(sys_init_module)
+ .long SYMBOL_NAME(sys_delete_module)
+/* 130 */ .long SYMBOL_NAME(sys_get_kernel_syms)
+ .long SYMBOL_NAME(sys_quotactl)
+ .long SYMBOL_NAME(sys_getpgid)
+ .long SYMBOL_NAME(sys_fchdir)
+ .long SYMBOL_NAME(sys_bdflush)
+/* 135 */ .long SYMBOL_NAME(sys_sysfs)
+ .long SYMBOL_NAME(sys_personality)
+ .long SYMBOL_NAME(sys_ni_syscall) /* .long _sys_afs_syscall */
+ .long SYMBOL_NAME(sys_setfsuid)
+ .long SYMBOL_NAME(sys_setfsgid)
+/* 140 */ .long SYMBOL_NAME(sys_llseek_wrapper)
+ .long SYMBOL_NAME(sys_getdents)
+ .long SYMBOL_NAME(sys_select)
+ .long SYMBOL_NAME(sys_flock)
+ .long SYMBOL_NAME(sys_msync)
+/* 145 */ .long SYMBOL_NAME(sys_readv)
+ .long SYMBOL_NAME(sys_writev)
+ .long SYMBOL_NAME(sys_getsid)
+ .long SYMBOL_NAME(sys_ni_syscall)
+ .long SYMBOL_NAME(sys_ni_syscall)
+/* 150 */ .long SYMBOL_NAME(sys_mlock)
+ .long SYMBOL_NAME(sys_munlock)
+ .long SYMBOL_NAME(sys_mlockall)
+ .long SYMBOL_NAME(sys_munlockall)
+ .long SYMBOL_NAME(sys_sched_setparam)
+/* 155 */ .long SYMBOL_NAME(sys_sched_getparam)
+ .long SYMBOL_NAME(sys_sched_setscheduler)
+ .long SYMBOL_NAME(sys_sched_getscheduler)
+ .long SYMBOL_NAME(sys_sched_yield)
+ .long SYMBOL_NAME(sys_sched_get_priority_max)
+/* 160 */ .long SYMBOL_NAME(sys_sched_get_priority_min)
+ .long SYMBOL_NAME(sys_sched_rr_get_interval)
+ .long SYMBOL_NAME(sys_nanosleep)
+ .long SYMBOL_NAME(sys_mremap)
+ .long SYMBOL_NAME(sys_setresuid)
+/* 165 */ .long SYMBOL_NAME(sys_getresuid)
+ .long SYMBOL_NAME(sys_ni_syscall)
+ .long SYMBOL_NAME(sys_query_module)
+ .long SYMBOL_NAME(sys_poll)
+ .long SYMBOL_NAME(sys_nfsservctl)
+/* 170 */ .long SYMBOL_NAME(sys_setresgid)
+ .long SYMBOL_NAME(sys_getresgid)
+ .long SYMBOL_NAME(sys_prctl)
+ .long SYMBOL_NAME(sys_rt_sigreturn_wrapper)
+ .long SYMBOL_NAME(sys_rt_sigaction)
+/* 175 */ .long SYMBOL_NAME(sys_rt_sigprocmask)
+ .long SYMBOL_NAME(sys_rt_sigpending)
+ .long SYMBOL_NAME(sys_rt_sigtimedwait)
+ .long SYMBOL_NAME(sys_rt_sigqueueinfo)
+ .long SYMBOL_NAME(sys_rt_sigsuspend_wrapper)
+/* 180 */ .long SYMBOL_NAME(sys_pread)
+ .long SYMBOL_NAME(sys_pwrite)
+ .space (NR_syscalls - 182) * 4
+#endif
diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c
new file mode 100644
index 000000000..3c165c41d
--- /dev/null
+++ b/arch/arm/kernel/dma.c
@@ -0,0 +1,199 @@
+/*
+ * linux/arch/arm/kernel/dma.c
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/malloc.h>
+#include <linux/mman.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/hardware.h>
+#include <asm/io.h>
+#define KERNEL_ARCH_DMA
+#include <asm/dma.h>
+
+static unsigned long dma_address[8];
+static unsigned long dma_count[8];
+static char dma_direction[8] = { -1, -1, -1, -1, -1, -1, -1};
+
+#if defined(CONFIG_ARCH_A5K) || defined(CONFIG_ARCH_RPC)
+#define DMA_PCIO
+#endif
+#if defined(CONFIG_ARCH_ARC) && defined(CONFIG_BLK_DEV_FD)
+#define DMA_OLD
+#endif
+
+void enable_dma (unsigned int dmanr)
+{
+ switch (dmanr) {
+#ifdef DMA_PCIO
+ case 2: {
+ void *fiqhandler_start;
+ unsigned int fiqhandler_length;
+ extern void floppy_fiqsetup (unsigned long len, unsigned long addr,
+ unsigned long port);
+ switch (dma_direction[dmanr]) {
+ case 1: {
+ extern unsigned char floppy_fiqin_start, floppy_fiqin_end;
+ fiqhandler_start = &floppy_fiqin_start;
+ fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start;
+ break;
+ }
+ case 0: {
+ extern unsigned char floppy_fiqout_start, floppy_fiqout_end;
+ fiqhandler_start = &floppy_fiqout_start;
+ fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start;
+ break;
+ }
+ default:
+ printk ("enable_dma: dma%d not initialised\n", dmanr);
+ return;
+ }
+ memcpy ((void *)0x1c, fiqhandler_start, fiqhandler_length);
+ flush_page_to_ram(0);
+ floppy_fiqsetup (dma_count[dmanr], dma_address[dmanr], (int)PCIO_FLOPPYDMABASE);
+ enable_irq (64);
+ return;
+ }
+#endif
+#ifdef DMA_OLD
+ case 0: { /* Data DMA */
+ switch (dma_direction[dmanr]) {
+ case 1: /* read */
+ {
+ extern unsigned char fdc1772_dma_read, fdc1772_dma_read_end;
+ extern void fdc1772_setupdma(unsigned int count,unsigned int addr);
+ unsigned long flags;
+#ifdef DEBUG
+ printk("enable_dma fdc1772 data read\n");
+#endif
+ save_flags(flags);
+ cliIF();
+
+ memcpy ((void *)0x1c, (void *)&fdc1772_dma_read,
+ &fdc1772_dma_read_end - &fdc1772_dma_read);
+ fdc1772_setupdma(dma_count[dmanr],dma_address[dmanr]); /* Sets data pointer up */
+ enable_irq (64);
+ restore_flags(flags);
+ }
+ break;
+
+ case 0: /* write */
+ {
+ extern unsigned char fdc1772_dma_write, fdc1772_dma_write_end;
+ extern void fdc1772_setupdma(unsigned int count,unsigned int addr);
+ unsigned long flags;
+
+#ifdef DEBUG
+ printk("enable_dma fdc1772 data write\n");
+#endif
+ save_flags(flags);
+ cliIF();
+ memcpy ((void *)0x1c, (void *)&fdc1772_dma_write,
+ &fdc1772_dma_write_end - &fdc1772_dma_write);
+ fdc1772_setupdma(dma_count[dmanr],dma_address[dmanr]); /* Sets data pointer up */
+ enable_irq (64);
+
+ restore_flags(flags);
+ }
+ break;
+ default:
+ printk ("enable_dma: dma%d not initialised\n", dmanr);
+ return;
+ }
+ }
+ break;
+
+ case 1: { /* Command end FIQ - actually just sets a flag */
+ /* Need to build a branch at the FIQ address */
+ extern void fdc1772_comendhandler(void);
+ unsigned long flags;
+
+ /*printk("enable_dma fdc1772 command end FIQ\n");*/
+ save_flags(flags);
+ cliIF();
+
+ *((unsigned int *)0x1c)=0xea000000 | (((unsigned int)fdc1772_comendhandler-(0x1c+8))/4); /* B fdc1772_comendhandler */
+
+ restore_flags(flags);
+ }
+ break;
+#endif
+ case DMA_0:
+ case DMA_1:
+ case DMA_2:
+ case DMA_3:
+ case DMA_S0:
+ case DMA_S1:
+ arch_enable_dma (dmanr - DMA_0);
+ break;
+
+ default:
+ printk ("enable_dma: dma %d not supported\n", dmanr);
+ }
+}
+
+void set_dma_mode (unsigned int dmanr, char mode)
+{
+ if (dmanr < 8) {
+ if (mode == DMA_MODE_READ)
+ dma_direction[dmanr] = 1;
+ else if (mode == DMA_MODE_WRITE)
+ dma_direction[dmanr] = 0;
+ else
+ printk ("set_dma_mode: dma%d: invalid mode %02X not supported\n",
+ dmanr, mode);
+ } else if (dmanr < MAX_DMA_CHANNELS)
+ arch_set_dma_mode (dmanr - DMA_0, mode);
+ else
+ printk ("set_dma_mode: dma %d not supported\n", dmanr);
+}
+
+void set_dma_addr (unsigned int dmanr, unsigned int addr)
+{
+ if (dmanr < 8)
+ dma_address[dmanr] = (unsigned long)addr;
+ else if (dmanr < MAX_DMA_CHANNELS)
+ arch_set_dma_addr (dmanr - DMA_0, addr);
+ else
+ printk ("set_dma_addr: dma %d not supported\n", dmanr);
+}
+
+void set_dma_count (unsigned int dmanr, unsigned int count)
+{
+ if (dmanr < 8)
+ dma_count[dmanr] = (unsigned long)count;
+ else if (dmanr < MAX_DMA_CHANNELS)
+ arch_set_dma_count (dmanr - DMA_0, count);
+ else
+ printk ("set_dma_count: dma %d not supported\n", dmanr);
+}
+
+int get_dma_residue (unsigned int dmanr)
+{
+ if (dmanr < 8) {
+ switch (dmanr) {
+#if defined(CONFIG_ARCH_A5K) || defined(CONFIG_ARCH_RPC)
+ case 2: {
+ extern int floppy_fiqresidual (void);
+ return floppy_fiqresidual ();
+ }
+#endif
+#if defined(CONFIG_ARCH_ARC) && defined(CONFIG_BLK_DEV_FD)
+ case 0: {
+ extern unsigned int fdc1772_bytestogo;
+ return fdc1772_bytestogo;
+ }
+#endif
+ default:
+ return -1;
+ }
+ } else if (dmanr < MAX_DMA_CHANNELS)
+ return arch_dma_count (dmanr - DMA_0);
+ return -1;
+}
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c
new file mode 100644
index 000000000..cc18252b3
--- /dev/null
+++ b/arch/arm/kernel/ecard.c
@@ -0,0 +1,604 @@
+/*
+ * linux/arch/arm/kernel/ecard.c
+ *
+ * Find all installed expansion cards, and handle interrupts from them.
+ *
+ * Copyright 1995,1996,1997 Russell King
+ *
+ * Created from information from Acorns RiscOS3 PRMs
+ *
+ * 08-Dec-1996 RMK Added code for the 9'th expansion card - the ether podule slot.
+ * 06-May-1997 RMK Added blacklist for cards whose loader doesn't work.
+ * 12-Sep-1997 RMK Created new handling of interrupt enables/disables - cards can
+ * now register their own routine to control interrupts (recommended).
+ * 29-Sep-1997 RMK Expansion card interrupt hardware not being re-enabled on reset from
+ * Linux. (Caused cards not to respond under RiscOS without hard reset).
+ */
+
+#define ECARD_C
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/malloc.h>
+
+#include <asm/irq-no.h>
+#include <asm/ecard.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/hardware.h>
+#include <asm/arch/irq.h>
+
+#ifdef CONFIG_ARCH_ARC
+#include <asm/arch/oldlatches.h>
+#else
+#define oldlatch_init()
+#endif
+
+#define BLACKLIST_NAME(m,p,s) { m, p, NULL, s }
+#define BLACKLIST_LOADER(m,p,l) { m, p, l, NULL }
+#define BLACKLIST_NOLOADER(m,p) { m, p, noloader, blacklisted_str }
+#define BUS_ADDR(x) ((((unsigned long)(x)) << 2) + IO_BASE)
+
+extern unsigned long atomwide_serial_loader[], oak_scsi_loader[], noloader[];
+static const char blacklisted_str[] = "*loader blacklisted - not 32-bit compliant*";
+
+static const struct expcard_blacklist {
+ unsigned short manufacturer;
+ unsigned short product;
+ const loader_t loader;
+ const char *type;
+} blacklist[] = {
+/* Cards without names */
+ BLACKLIST_NAME(MANU_ACORN, PROD_ACORN_ETHER1, "Acorn Ether1"),
+
+/* Cards with corrected loader */
+ BLACKLIST_LOADER(MANU_ATOMWIDE, PROD_ATOMWIDE_3PSERIAL, atomwide_serial_loader),
+ BLACKLIST_LOADER(MANU_OAK, PROD_OAK_SCSI, oak_scsi_loader),
+
+/* Unsupported cards with no loader */
+BLACKLIST_NOLOADER(MANU_ALSYSTEMS, PROD_ALSYS_SCSIATAPI),
+BLACKLIST_NOLOADER(MANU_MCS, PROD_MCS_CONNECT32)
+};
+
+extern int setup_arm_irq(int, struct irqaction *);
+
+/*
+ * from linux/arch/arm/kernel/irq.c
+ */
+extern void do_ecard_IRQ(int irq, struct pt_regs *);
+
+static ecard_t expcard[MAX_ECARDS];
+static signed char irqno_to_expcard[16];
+static unsigned int ecard_numcards, ecard_numirqcards;
+static unsigned int have_expmask;
+static unsigned long kmem;
+
+static void ecard_def_irq_enable (ecard_t *ec, int irqnr)
+{
+#ifdef HAS_EXPMASK
+ if (irqnr < 4 && have_expmask) {
+ have_expmask |= 1 << irqnr;
+ EXPMASK_ENABLE = have_expmask;
+ }
+#endif
+}
+
+static void ecard_def_irq_disable (ecard_t *ec, int irqnr)
+{
+#ifdef HAS_EXPMASK
+ if (irqnr < 4 && have_expmask) {
+ have_expmask &= ~(1 << irqnr);
+ EXPMASK_ENABLE = have_expmask;
+ }
+#endif
+}
+
+static void ecard_def_fiq_enable (ecard_t *ec, int fiqnr)
+{
+ panic ("ecard_def_fiq_enable called - impossible");
+}
+
+static void ecard_def_fiq_disable (ecard_t *ec, int fiqnr)
+{
+ panic ("ecard_def_fiq_disable called - impossible");
+}
+
+static expansioncard_ops_t ecard_default_ops = {
+ ecard_def_irq_enable,
+ ecard_def_irq_disable,
+ ecard_def_fiq_enable,
+ ecard_def_fiq_disable
+};
+
+/*
+ * Enable and disable interrupts from expansion cards.
+ * (interrupts are disabled for these functions).
+ *
+ * They are not meant to be called directly, but via enable/disable_irq.
+ */
+void ecard_enableirq (unsigned int irqnr)
+{
+ if (irqnr < MAX_ECARDS && irqno_to_expcard[irqnr] != -1) {
+ ecard_t *ec = expcard + irqno_to_expcard[irqnr];
+
+ if (!ec->ops)
+ ec->ops = &ecard_default_ops;
+
+ if (ec->claimed && ec->ops->irqenable)
+ ec->ops->irqenable (ec, irqnr);
+ else
+ printk (KERN_ERR "ecard: rejecting request to "
+ "enable IRQs for %d\n", irqnr);
+ }
+}
+
+void ecard_disableirq (unsigned int irqnr)
+{
+ if (irqnr < MAX_ECARDS && irqno_to_expcard[irqnr] != -1) {
+ ecard_t *ec = expcard + irqno_to_expcard[irqnr];
+
+ if (!ec->ops)
+ ec->ops = &ecard_default_ops;
+
+ if (ec->ops && ec->ops->irqdisable)
+ ec->ops->irqdisable (ec, irqnr);
+ }
+}
+
+void ecard_enablefiq (unsigned int fiqnr)
+{
+ if (fiqnr < MAX_ECARDS && irqno_to_expcard[fiqnr] != -1) {
+ ecard_t *ec = expcard + irqno_to_expcard[fiqnr];
+
+ if (!ec->ops)
+ ec->ops = &ecard_default_ops;
+
+ if (ec->claimed && ec->ops->fiqenable)
+ ec->ops->fiqenable (ec, fiqnr);
+ else
+ printk (KERN_ERR "ecard: rejecting request to "
+ "enable FIQs for %d\n", fiqnr);
+ }
+}
+
+void ecard_disablefiq (unsigned int fiqnr)
+{
+ if (fiqnr < MAX_ECARDS && irqno_to_expcard[fiqnr] != -1) {
+ ecard_t *ec = expcard + irqno_to_expcard[fiqnr];
+
+ if (!ec->ops)
+ ec->ops = &ecard_default_ops;
+
+ if (ec->ops->fiqdisable)
+ ec->ops->fiqdisable (ec, fiqnr);
+ }
+}
+
+static void *ecard_malloc(int len)
+{
+ int r;
+
+ len = (len + 3) & ~3;
+
+ if (kmem) {
+ r = kmem;
+ kmem += len;
+ return (void *)r;
+ } else
+ return kmalloc(len, GFP_KERNEL);
+}
+
+static void ecard_irq_noexpmask(int intr_no, void *dev_id, struct pt_regs *regs)
+{
+ const int num_cards = ecard_numirqcards;
+ int i, called = 0;
+
+ mask_irq (IRQ_EXPANSIONCARD);
+ for (i = 0; i < num_cards; i++) {
+ if (expcard[i].claimed && expcard[i].irq &&
+ (!expcard[i].irqmask ||
+ expcard[i].irqaddr[0] & expcard[i].irqmask)) {
+ do_ecard_IRQ(expcard[i].irq, regs);
+ called ++;
+ }
+ }
+ cli ();
+ unmask_irq (IRQ_EXPANSIONCARD);
+ if (called == 0)
+ printk (KERN_WARNING "Wild interrupt from backplane?\n");
+}
+
+#ifdef HAS_EXPMASK
+static unsigned char priority_masks[] =
+{
+ 0xf0, 0xf1, 0xf3, 0xf7, 0xff, 0xff, 0xff, 0xff
+};
+
+static unsigned char first_set[] =
+{
+ 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
+ 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00
+};
+
+static void ecard_irq_expmask (int intr_no, void *dev_id, struct pt_regs *regs)
+{
+ const unsigned int statusmask = 15;
+ unsigned int status;
+
+ status = EXPMASK_STATUS & statusmask;
+ if (status) {
+ unsigned int irqno;
+ ecard_t *ec;
+again:
+ irqno = first_set[status];
+ ec = expcard + irqno_to_expcard[irqno];
+ if (ec->claimed) {
+ unsigned int oldexpmask;
+ /*
+ * this ugly code is so that we can operate a prioritorising system.
+ * Card 0 highest priority
+ * Card 1
+ * Card 2
+ * Card 3 lowest priority
+ * Serial cards should go in 0/1, ethernet/scsi in 2/3
+ * otherwise you will lose serial data at high speeds!
+ */
+ oldexpmask = have_expmask;
+ EXPMASK_ENABLE = (have_expmask &= priority_masks[irqno]);
+ sti ();
+ do_ecard_IRQ (ec->irq, regs);
+ cli ();
+ EXPMASK_ENABLE = have_expmask = oldexpmask;
+ status = EXPMASK_STATUS & statusmask;
+ if (status)
+ goto again;
+ } else {
+ printk (KERN_WARNING "card%d: interrupt from unclaimed card???\n", irqno);
+ EXPMASK_ENABLE = (have_expmask &= ~(1 << irqno));
+ }
+ } else
+ printk (KERN_WARNING "Wild interrupt from backplane (masks)\n");
+}
+
+static int ecard_checkirqhw (void)
+{
+ int found;
+
+ EXPMASK_ENABLE = 0x00;
+ EXPMASK_STATUS = 0xff;
+ found = ((EXPMASK_STATUS & 15) == 0);
+ EXPMASK_ENABLE = 0xff;
+
+ return found;
+}
+#endif
+
+static void ecard_readbytes (void *addr, ecard_t *ec, int off, int len, int useld)
+{
+ extern int ecard_loader_read(int off, volatile unsigned int pa, loader_t loader);
+ unsigned char *a = (unsigned char *)addr;
+
+ if (ec->slot_no == 8) {
+ static unsigned int lowaddress;
+ unsigned int laddr, haddr;
+ unsigned char byte = 0; /* keep gcc quiet */
+
+ laddr = off & 4095; /* number of bytes to read from offset + base addr */
+ haddr = off >> 12; /* offset into card from base addr */
+
+ if (haddr > 256)
+ return;
+
+ /*
+ * If we require a low address or address 0, then reset, and start again...
+ */
+ if (!off || lowaddress > laddr) {
+ outb (0, ec->podaddr);
+ lowaddress = 0;
+ }
+ while (lowaddress <= laddr) {
+ byte = inb (ec->podaddr + haddr);
+ lowaddress += 1;
+ }
+ while (len--) {
+ *a++ = byte;
+ if (len) {
+ byte = inb (ec->podaddr + haddr);
+ lowaddress += 1;
+ }
+ }
+ } else {
+ if (!useld || !ec->loader) {
+ while(len--)
+ *a++ = inb(ec->podaddr + (off++));
+ } else {
+ while(len--) {
+ *(unsigned long *)0x108 = 0; /* hack for some loaders!!! */
+ *a++ = ecard_loader_read(off++, BUS_ADDR(ec->podaddr), ec->loader);
+ }
+ }
+ }
+}
+
+/*
+ * This is called to reset the loaders for each expansion card on reboot.
+ *
+ * This is required to make sure that the card is in the correct state
+ * that RiscOS expects it to be.
+ */
+void ecard_reset (int card)
+{
+ extern int ecard_loader_reset (volatile unsigned int pa, loader_t loader);
+
+ if (card >= ecard_numcards)
+ return;
+
+ if (card < 0) {
+ for (card = 0; card < ecard_numcards; card++)
+ if (expcard[card].loader)
+ ecard_loader_reset (BUS_ADDR(expcard[card].podaddr),
+ expcard[card].loader);
+ } else
+ if (expcard[card].loader)
+ ecard_loader_reset (BUS_ADDR(expcard[card].podaddr),
+ expcard[card].loader);
+
+#ifdef HAS_EXPMASK
+ if (have_expmask) {
+ have_expmask |= ~0;
+ EXPMASK_ENABLE = have_expmask;
+ }
+#endif
+}
+
+static unsigned int ecard_startcard;
+
+void ecard_startfind (void)
+{
+ ecard_startcard = 0;
+}
+
+ecard_t *ecard_find (int cld, const card_ids *cids)
+{
+ int card;
+ if (!cids) {
+ for (card = ecard_startcard; card < ecard_numcards; card++)
+ if (!expcard[card].claimed &&
+ ((expcard[card].cld.ecld ^ cld) & 0x78) == 0)
+ break;
+ } else {
+ for (card = ecard_startcard; card < ecard_numcards; card++) {
+ unsigned int manufacturer, product;
+ int i;
+
+ if (expcard[card].claimed)
+ continue;
+
+ manufacturer = expcard[card].cld.manufacturer;
+ product = expcard[card].cld.product;
+
+ for (i = 0; cids[i].manufacturer != 65535; i++)
+ if (manufacturer == cids[i].manufacturer &&
+ product == cids[i].product)
+ break;
+
+ if (cids[i].manufacturer != 65535)
+ break;
+ }
+ }
+ ecard_startcard = card + 1;
+ return card < ecard_numcards ? &expcard[card] : NULL;
+}
+
+int ecard_readchunk (struct in_chunk_dir *cd, ecard_t *ec, int id, int num)
+{
+ struct ex_chunk_dir excd;
+ int index = 16;
+ int useld = 0;
+
+ while(1) {
+ ecard_readbytes(&excd, ec, index, 8, useld);
+ index += 8;
+ if (c_id(&excd) == 0) {
+ if (!useld && ec->loader) {
+ useld = 1;
+ index = 0;
+ continue;
+ }
+ return 0;
+ }
+ if (c_id(&excd) == 0xf0) { /* link */
+ index = c_start(&excd);
+ continue;
+ }
+ if (c_id(&excd) == 0x80) { /* loader */
+ if (!ec->loader) {
+ ec->loader = (loader_t)ecard_malloc(c_len(&excd));
+ ecard_readbytes(ec->loader, ec, (int)c_start(&excd), c_len(&excd), useld);
+ }
+ continue;
+ }
+ if (c_id(&excd) == id && num-- == 0)
+ break;
+ }
+
+ if (c_id(&excd) & 0x80) {
+ switch (c_id(&excd) & 0x70) {
+ case 0x70:
+ ecard_readbytes((unsigned char *)excd.d.string, ec,
+ (int)c_start(&excd), c_len(&excd), useld);
+ break;
+ case 0x00:
+ break;
+ }
+ }
+ cd->start_offset = c_start(&excd);
+ memcpy (cd->d.string, excd.d.string, 256);
+ return 1;
+}
+
+unsigned int ecard_address (ecard_t *ec, card_type_t memc, card_speed_t speed)
+{
+ switch (ec->slot_no) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ return (memc ? MEMCECIO_BASE : IOCECIO_BASE + (speed << 17)) + (ec->slot_no << 12);
+#ifdef IOCEC4IO_BASE
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ return (memc ? 0 : IOCEC4IO_BASE + (speed << 17)) + ((ec->slot_no - 4) << 12);
+#endif
+#ifdef MEMCEC8IO_BASE
+ case 8:
+ return MEMCEC8IO_BASE;
+#endif
+ }
+ return 0;
+}
+
+/*
+ * Probe for an expansion card.
+ *
+ * If bit 1 of the first byte of the card is set,
+ * then the card does not exist.
+ */
+static int ecard_probe (int card, int freeslot)
+{
+ ecard_t *ec = expcard + freeslot;
+ struct ex_ecld excld;
+ const char *card_desc = NULL;
+ int i;
+
+ irqno_to_expcard[card] = -1;
+
+ ec->slot_no = card;
+ if ((ec->podaddr = ecard_address (ec, 0, ECARD_SYNC)) == 0)
+ return 0;
+
+ excld.r_ecld = 2;
+ ecard_readbytes (&excld, ec, 0, 16, 0);
+ if (excld.r_ecld & 2)
+ return 0;
+
+ irqno_to_expcard[card] = freeslot;
+
+ ec->irq = -1;
+ ec->fiq = -1;
+ ec->cld.ecld = e_ecld(&excld);
+ ec->cld.manufacturer = e_manu(&excld);
+ ec->cld.product = e_prod(&excld);
+ ec->cld.country = e_country(&excld);
+ ec->cld.fiqmask = e_fiqmask(&excld);
+ ec->cld.irqmask = e_irqmask(&excld);
+ ec->cld.fiqaddr = e_fiqaddr(&excld);
+ ec->cld.irqaddr = e_irqaddr(&excld);
+ ec->fiqaddr =
+ ec->irqaddr = (unsigned char *)BUS_ADDR(ec->podaddr);
+ ec->fiqmask = 4;
+ ec->irqmask = 1;
+ ec->ops = &ecard_default_ops;
+
+ for (i = 0; i < sizeof (blacklist) / sizeof (*blacklist); i++)
+ if (blacklist[i].manufacturer == ec->cld.manufacturer &&
+ blacklist[i].product == ec->cld.product) {
+ ec->loader = blacklist[i].loader;
+ card_desc = blacklist[i].type;
+ break;
+ }
+
+ if (card != 8) {
+ ec->irq = 32 + card;
+#if 0
+ ec->fiq = 96 + card;
+#endif
+ } else {
+ ec->irq = 11;
+ ec->fiq = -1;
+ }
+
+ if ((ec->cld.ecld & 0x78) == 0) {
+ struct in_chunk_dir incd;
+ printk ("\n %d: [%04X:%04X] ", card, ec->cld.manufacturer, ec->cld.product);
+ if (e_is (&excld)) {
+ ec->fiqmask = e_fiqmask (&excld);
+ ec->irqmask = e_irqmask (&excld);
+ ec->fiqaddr += e_fiqaddr (&excld);
+ ec->irqaddr += e_irqaddr (&excld);
+ }
+ if (!card_desc && e_cd (&excld) && ecard_readchunk (&incd, ec, 0xf5, 0))
+ card_desc = incd.d.string;
+ if (card_desc)
+ printk ("%s", card_desc);
+ else
+ printk ("*Unknown*");
+ } else
+ printk("\n %d: Simple card %d\n", card, (ec->cld.ecld >> 3) & 15);
+ return 1;
+}
+
+static struct irqaction irqexpansioncard = { ecard_irq_noexpmask, SA_INTERRUPT, 0, "expansion cards", NULL, NULL };
+
+/*
+ * Initialise the expansion card system.
+ * Locate all hardware - interrupt management and
+ * actual cards.
+ */
+unsigned long ecard_init(unsigned long start_mem)
+{
+ int i, nc = 0;
+
+ kmem = (start_mem | 3) & ~3;
+ memset (expcard, 0, sizeof (expcard));
+
+#ifdef HAS_EXPMASK
+ if (ecard_checkirqhw()) {
+ printk (KERN_DEBUG "Expansion card interrupt management hardware found\n");
+ irqexpansioncard.handler = ecard_irq_expmask;
+ have_expmask = -1;
+ }
+#endif
+ printk("Installed expansion cards:");
+
+ /*
+ * First of all, probe all cards on the expansion card interrupt line
+ */
+ for (i = 0; i < 4; i++)
+ if (ecard_probe (i, nc))
+ nc += 1;
+ else
+ have_expmask &= ~(1<<i);
+
+ ecard_numirqcards = nc;
+
+ /*
+ * Now probe other cards with different interrupt lines
+ */
+#ifdef MEMCEC8IO_BASE
+ if (ecard_probe (8, nc))
+ nc += 1;
+#endif
+ printk("\n");
+ ecard_numcards = nc;
+
+ if (nc && setup_arm_irq(IRQ_EXPANSIONCARD, &irqexpansioncard)) {
+ printk ("Could not allocate interrupt for expansion cards\n");
+ return kmem;
+ }
+
+#ifdef HAS_EXPMASK
+ if (nc && have_expmask)
+ EXPMASK_ENABLE = have_expmask;
+#endif
+ oldlatch_init ();
+ start_mem = kmem;
+ kmem = 0;
+ return start_mem;
+}
diff --git a/arch/arm/kernel/entry-armo.S b/arch/arm/kernel/entry-armo.S
new file mode 100644
index 000000000..20c1b8e7c
--- /dev/null
+++ b/arch/arm/kernel/entry-armo.S
@@ -0,0 +1,643 @@
+/*
+ * linux/arch/arm/kernel/entry-armo.S
+ *
+ * Copyright (C) 1995,1996,1997,1998 Russell King.
+ *
+ * Low-level vector interface routines
+ *
+ * Design issues:
+ * - We have several modes that each vector can be called from,
+ * each with its own set of registers. On entry to any vector,
+ * we *must* save the registers used in *that* mode.
+ *
+ * - This code must be as fast as possible.
+ *
+ * There are a few restrictions on the vectors:
+ * - the SWI vector cannot be called from *any* non-user mode
+ *
+ * - the FP emulator is *never* called from *any* non-user mode undefined
+ * instruction.
+ *
+ * Ok, so this file may be a mess, but its as efficient as possible while
+ * adhering to the above criteria.
+ */
+#include <linux/autoconf.h>
+#include <linux/linkage.h>
+
+#include <asm/assembler.h>
+#include <asm/errno.h>
+#include <asm/hardware.h>
+
+#include "../lib/constants.h"
+
+ .text
+
+@ Offsets into task structure
+@ ---------------------------
+@
+#define STATE 0
+#define COUNTER 4
+#define PRIORITY 8
+#define FLAGS 12
+#define SIGPENDING 16
+
+#define PF_TRACESYS 0x20
+
+@ Bad Abort numbers
+@ -----------------
+@
+#define BAD_PREFETCH 0
+#define BAD_DATA 1
+#define BAD_ADDREXCPTN 2
+#define BAD_IRQ 3
+#define BAD_UNDEFINSTR 4
+
+@ OS version number used in SWIs
+@ RISC OS is 0
+@ RISC iX is 8
+@
+#define OS_NUMBER 9
+
+@
+@ Stack format (ensured by USER_* and SVC_*)
+@
+#define S_OLD_R0 64
+#define S_PSR 60
+#define S_PC 60
+#define S_LR 56
+#define S_SP 52
+#define S_IP 48
+#define S_FP 44
+#define S_R10 40
+#define S_R9 36
+#define S_R8 32
+#define S_R7 28
+#define S_R6 24
+#define S_R5 20
+#define S_R4 16
+#define S_R3 12
+#define S_R2 8
+#define S_R1 4
+#define S_R0 0
+
+#ifdef IOC_BASE
+/* IOC / IOMD based hardware */
+ .equ ioc_base_high, IOC_BASE & 0xff000000
+ .equ ioc_base_low, IOC_BASE & 0x00ff0000
+ .macro disable_fiq
+ mov r12, #ioc_base_high
+ .if ioc_base_low
+ orr r12, r12, #ioc_base_low
+ .endif
+ strb r12, [r12, #0x38] @ Disable FIQ register
+ .endm
+
+ .macro get_irqnr_and_base, irqnr, base
+ mov r4, #ioc_base_high @ point at IOC
+ .if ioc_base_low
+ orr r4, r4, #ioc_base_low
+ .endif
+ ldrb \irqnr, [r4, #0x24] @ get high priority first
+ adr \base, irq_prio_h
+ teq \irqnr, #0
+ ldreqb \irqnr, [r4, #0x14] @ get low priority
+ adreq \base, irq_prio_l
+ .endm
+
+/*
+ * Interrupt table (incorporates priority)
+ */
+ .macro irq_prio_table
+irq_prio_l: .byte 0, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
+ .byte 4, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
+ .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
+ .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
+ .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+irq_prio_h: .byte 0, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 12, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .endm
+#else
+#error Unknown architecture
+#endif
+
+/*=============================================================================
+ * For entry-common.S
+ */
+
+ .macro save_user_regs
+ str r0, [sp, #-4]!
+ str lr, [sp, #-4]!
+ sub sp, sp, #15*4
+ stmia sp, {r0 - lr}^
+ mov r0, r0
+ .endm
+
+ .macro restore_user_regs
+ ldmia sp, {r0 - lr}^
+ mov r0, r0
+ add sp, sp, #15*4
+ ldr lr, [sp], #8
+ movs pc, lr
+ .endm
+
+ .macro mask_pc, rd, rm
+ bic \rd, \rm, #PCMASK
+ .endm
+
+ .macro arm700_bug_check, instr, temp
+ .endm
+
+ .macro enable_irqs, temp
+ teqp pc, #0x00000003
+ .endm
+
+ .macro initialise_traps_extra
+ .endm
+
+ .macro get_current_task, rd
+ mov \rd, sp, lsr #13
+ mov \rd, \rd, lsl #13
+ .endm
+
+ /*
+ * Like adr, but force SVC mode (if required)
+ */
+ .macro adrsvc, cond, reg, label
+ adr\cond \reg, \label
+ orr\cond \reg, \reg, #3
+ .endm
+
+#if 0
+/*
+ * Uncomment these if you wish to get more debugging into about data aborts.
+ */
+#define FAULT_CODE_LDRSTRPOST 0x80
+#define FAULT_CODE_LDRSTRPRE 0x40
+#define FAULT_CODE_LDRSTRREG 0x20
+#define FAULT_CODE_LDMSTM 0x10
+#define FAULT_CODE_LDCSTC 0x08
+#endif
+#define FAULT_CODE_PREFETCH 0x04
+#define FAULT_CODE_WRITE 0x02
+#define FAULT_CODE_USER 0x01
+
+
+#define SVC_SAVE_ALL \
+ str sp, [sp, #-16]! ;\
+ str lr, [sp, #8] ;\
+ str lr, [sp, #4] ;\
+ stmfd sp!, {r0 - r12} ;\
+ mov r0, #-1 ;\
+ str r0, [sp, #S_OLD_R0] ;\
+ mov fp, #0
+
+#define SVC_IRQ_SAVE_ALL \
+ str sp, [sp, #-16]! ;\
+ str lr, [sp, #4] ;\
+ ldr lr, .LCirq ;\
+ ldr lr, [lr] ;\
+ str lr, [sp, #8] ;\
+ stmfd sp!, {r0 - r12} ;\
+ mov r0, #-1 ;\
+ str r0, [sp, #S_OLD_R0] ;\
+ mov fp, #0
+
+#define USER_RESTORE_ALL \
+ ldmia sp, {r0 - lr}^ ;\
+ mov r0, r0 ;\
+ add sp, sp, #15*4 ;\
+ ldr lr, [sp], #8 ;\
+ movs pc, lr
+
+#define SVC_RESTORE_ALL \
+ ldmfd sp, {r0 - pc}^
+
+/*=============================================================================
+ * Undefined FIQs
+ *-----------------------------------------------------------------------------
+ */
+_unexp_fiq: ldr sp, .LCfiq
+ mov r12, #IOC_BASE
+ strb r12, [r12, #0x38] @ Disable FIQ register
+ teqp pc, #0x0c000003
+ mov r0, r0
+ stmfd sp!, {r0 - r3, ip, lr}
+ adr r0, Lfiqmsg
+ bl SYMBOL_NAME(printk)
+ ldmfd sp!, {r0 - r3, ip, lr}
+ teqp pc, #0x0c000001
+ mov r0, r0
+ movs pc, lr
+
+Lfiqmsg: .ascii "*** Unexpeced FIQ\n\0"
+ .align
+
+.LCfiq: .word __temp_fiq
+.LCirq: .word __temp_irq
+
+/*=============================================================================
+ * Undefined instruction handler
+ *-----------------------------------------------------------------------------
+ * Handles floating point instructions
+ */
+vector_undefinstr:
+ tst lr,#3
+ bne __und_svc
+ save_user_regs
+ mov fp, #0
+ teqp pc, #I_BIT | MODE_SVC
+.Lbug_undef:
+ adr r1, .LC2
+ ldmia r1, {r1, r4}
+ ldr r1, [r1]
+ get_current_task r2
+ teq r1, r2
+ stmnefd sp!, {ip, lr}
+ blne SYMBOL_NAME(math_state_restore)
+ ldmnefd sp!, {ip, lr}
+ ldr pc, [r4] @ Call FP module USR entry point
+
+ .globl SYMBOL_NAME(fpundefinstr)
+SYMBOL_NAME(fpundefinstr): @ Called by FP module on undefined instr
+SYMBOL_NAME(fpundefinstrsvc):
+ mov r0, lr
+ mov r1, sp
+ teqp pc, #MODE_SVC
+ bl SYMBOL_NAME(do_undefinstr)
+ b ret_from_exception @ Normal FP exit
+
+__und_svc: SVC_SAVE_ALL @ Non-user mode
+ mask_pc r0, lr
+ and r2, lr, #3
+ sub r0, r0, #4
+ mov r1, sp
+ bl SYMBOL_NAME(do_undefinstr)
+ SVC_RESTORE_ALL
+
+.LC2: .word SYMBOL_NAME(last_task_used_math)
+ .word SYMBOL_NAME(fp_enter)
+
+/*=============================================================================
+ * Prefetch abort handler
+ *-----------------------------------------------------------------------------
+ */
+
+vector_prefetch:
+ sub lr, lr, #4
+ tst lr, #3
+ bne __pabt_invalid
+ save_user_regs
+ teqp pc, #0x00000003 @ NOT a problem - doesnt change mode
+ mask_pc r0, lr @ Address of abort
+ mov r1, #FAULT_CODE_PREFETCH|FAULT_CODE_USER @ Error code
+ mov r2, sp @ Tasks registers
+ bl SYMBOL_NAME(do_PrefetchAbort)
+ teq r0, #0 @ If non-zero, we believe this abort..
+ bne ret_from_sys_call
+#ifdef DEBUG_UNDEF
+ adr r0, t
+ bl SYMBOL_NAME(printk)
+#endif
+ ldr lr, [sp,#S_PC] @ program to test this on. I think its
+ b .Lbug_undef @ broken at the moment though!)
+
+__pabt_invalid: SVC_SAVE_ALL
+ mov r0, sp @ Prefetch aborts are definitely *not*
+ mov r1, #BAD_PREFETCH @ allowed in non-user modes. We cant
+ and r2, lr, #3 @ recover from this problem.
+ b SYMBOL_NAME(bad_mode)
+
+#ifdef DEBUG_UNDEF
+t: .ascii "*** undef ***\r\n\0"
+ .align
+#endif
+
+/*=============================================================================
+ * Address exception handler
+ *-----------------------------------------------------------------------------
+ * These aren't too critical.
+ * (they're not supposed to happen).
+ * In order to debug the reason for address exceptions in non-user modes,
+ * we have to obtain all the registers so that we can see what's going on.
+ */
+
+vector_addrexcptn:
+ sub lr, lr, #8
+ tst lr, #3
+ bne Laddrexcptn_not_user
+ save_user_regs
+ teq pc, #0x00000003
+ mask_pc r0, lr @ Point to instruction
+ mov r1, sp @ Point to registers
+ mov r2, #0x400
+ mov lr, pc
+ bl SYMBOL_NAME(do_excpt)
+ b ret_from_exception
+
+Laddrexcptn_not_user:
+ SVC_SAVE_ALL
+ and r2, lr, #3
+ teq r2, #3
+ bne Laddrexcptn_illegal_mode
+ teqp pc, #0x00000003 @ NOT a problem - doesnt change mode
+ mask_pc r0, lr
+ mov r1, sp
+ orr r2, r2, #0x400
+ bl SYMBOL_NAME(do_excpt)
+ ldmia sp, {r0 - lr} @ I cant remember the reason I changed this...
+ add sp, sp, #15*4
+ movs pc, lr
+
+Laddrexcptn_illegal_mode:
+ mov r0, sp
+ str lr, [sp, #-4]!
+ orr r1, r2, #0x0c000000
+ teqp r1, #0 @ change into mode (wont be user mode)
+ mov r0, r0
+ mov r1, r8 @ Any register from r8 - r14 can be banked
+ mov r2, r9
+ mov r3, r10
+ mov r4, r11
+ mov r5, r12
+ mov r6, r13
+ mov r7, r14
+ teqp pc, #0x04000003 @ back to svc
+ mov r0, r0
+ stmfd sp!, {r1-r7}
+ ldmia r0, {r0-r7}
+ stmfd sp!, {r0-r7}
+ mov r0, sp
+ mov r1, #BAD_ADDREXCPTN
+ b SYMBOL_NAME(bad_mode)
+
+/*=============================================================================
+ * Interrupt (IRQ) handler
+ *-----------------------------------------------------------------------------
+ * Note: if in user mode, then *no* kernel routine is running, so dont have
+ * to save svc lr
+ * (r13 points to irq temp save area)
+ */
+
+vector_IRQ: ldr r13, .LCirq @ Ill leave this one in just in case...
+ sub lr, lr, #4
+ str lr, [r13]
+ tst lr, #3
+ bne __irq_svc
+ teqp pc, #0x08000003
+ mov r0, r0
+ ldr lr, .LCirq
+ ldr lr, [lr]
+ save_user_regs
+
+1: get_irqnr_and_base r6, r5
+ teq r6, #0
+ ldrneb r0, [r5, r6] @ get IRQ number
+ movne r1, sp
+ @
+ @ routine called with r0 = irq number, r1 = struct pt_regs *
+ @
+ adr lr, 1b
+ orr lr, lr, #3 @ Force SVC
+ bne do_IRQ
+ b ret_with_reschedule
+
+ irq_prio_table
+
+__irq_svc: teqp pc, #0x08000003
+ mov r0, r0
+ SVC_IRQ_SAVE_ALL
+ and r2, lr, #3
+ teq r2, #3
+ bne __irq_invalid
+1: get_irqnr_and_base r6, r5
+ teq r6, #0
+ ldrneb r0, [r5, r6] @ get IRQ number
+ movne r1, sp
+ @
+ @ routine called with r0 = irq number, r1 = struct pt_regs *
+ @
+ adr lr, 1b
+ orr lr, lr, #3 @ Force SVC
+ bne do_IRQ @ Returns to 1b
+ SVC_RESTORE_ALL
+
+__irq_invalid: mov r0, sp
+ mov r1, #BAD_IRQ
+ b SYMBOL_NAME(bad_mode)
+
+/*=============================================================================
+ * Data abort handler code
+ *-----------------------------------------------------------------------------
+ *
+ * This handles both exceptions from user and SVC modes, computes the address
+ * range of the problem, and does any correction that is required. It then
+ * calls the kernel data abort routine.
+ *
+ * This is where I wish that the ARM would tell you which address aborted.
+ */
+
+vector_data: sub lr, lr, #8 @ Correct lr
+ tst lr, #3
+ bne Ldata_not_user
+ save_user_regs
+ teqp pc, #0x00000003 @ NOT a problem - doesnt change mode
+ mask_pc r0, lr
+ mov r2, #FAULT_CODE_USER
+ bl Ldata_do
+ b ret_from_exception
+
+Ldata_not_user:
+ SVC_SAVE_ALL
+ and r2, lr, #3
+ teq r2, #3
+ bne Ldata_illegal_mode
+ tst lr, #0x08000000
+ teqeqp pc, #0x00000003 @ NOT a problem - doesnt change mode
+ mask_pc r0, lr
+ mov r2, #0
+ bl Ldata_do
+ SVC_RESTORE_ALL
+
+Ldata_illegal_mode:
+ mov r0, sp
+ mov r1, #BAD_DATA
+ b SYMBOL_NAME(bad_mode)
+
+Ldata_do: mov r3, sp
+ ldr r4, [r0] @ Get instruction
+ tst r4, #1 << 20 @ Check to see if it is a write instruction
+ orreq r2, r2, #FAULT_CODE_WRITE @ Indicate write instruction
+ mov r1, r4, lsr #22 @ Now branch to the relevent processing routine
+ and r1, r1, #15 << 2
+ add pc, pc, r1
+ movs pc, lr
+ b Ldata_unknown
+ b Ldata_unknown
+ b Ldata_unknown
+ b Ldata_unknown
+ b Ldata_ldrstr_post @ ldr rd, [rn], #m
+ b Ldata_ldrstr_numindex @ ldr rd, [rn, #m] @ RegVal
+ b Ldata_ldrstr_post @ ldr rd, [rn], rm
+ b Ldata_ldrstr_regindex @ ldr rd, [rn, rm]
+ b Ldata_ldmstm @ ldm*a rn, <rlist>
+ b Ldata_ldmstm @ ldm*b rn, <rlist>
+ b Ldata_unknown
+ b Ldata_unknown
+ b Ldata_ldrstr_post @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
+ b Ldata_ldcstc_pre @ ldc rd, [rn, #m]
+ b Ldata_unknown
+Ldata_unknown: @ Part of jumptable
+ ldr r3, [sp, #15 * 4]
+ str r3, [sp, #-4]!
+ mov r1, r1, lsr #2
+ mov r2, r0
+ mov r3, r4
+ adr r0, Ltt
+ bl SYMBOL_NAME(printk)
+Llpxx: b Llpxx
+
+Ltt: .ascii "Unknown data abort code %d [pc=%p, *pc=%p]\nLR=%p\0"
+ .align
+
+Ldata_ldrstr_post:
+ mov r0, r4, lsr #14 @ Get Rn
+ and r0, r0, #15 << 2 @ Mask out reg.
+ teq r0, #15 << 2
+ ldr r0, [r3, r0] @ Get register
+ biceq r0, r0, #PCMASK
+ mov r1, r0
+#ifdef FAULT_CODE_LDRSTRPOST
+ orr r2, r2, #FAULT_CODE_LDRSTRPOST
+#endif
+ b SYMBOL_NAME(do_DataAbort)
+
+Ldata_ldrstr_numindex:
+ mov r0, r4, lsr #14 @ Get Rn
+ and r0, r0, #15 << 2 @ Mask out reg.
+ teq r0, #15 << 2
+ ldr r0, [r3, r0] @ Get register
+ biceq r0, r0, #PCMASK
+ mov r1, r4, lsl #20
+ tst r4, #1 << 23
+ addne r0, r0, r1, lsr #20
+ subeq r0, r0, r1, lsr #20
+ mov r1, r0
+#ifdef FAULT_CODE_LDRSTRPRE
+ orr r2, r2, #FAULT_CODE_LDRSTRPRE
+#endif
+ b SYMBOL_NAME(do_DataAbort)
+
+Ldata_ldrstr_regindex:
+ mov r0, r4, lsr #14 @ Get Rn
+ and r0, r0, #15 << 2 @ Mask out reg.
+ teq r0, #15 << 2
+ ldr r0, [r3, r0] @ Get register
+ biceq r0, r0, #PCMASK
+ and r7, r4, #15
+ teq r7, #15 @ Check for PC
+ ldr r7, [r3, r7, lsl #2] @ Get Rm
+ biceq r7, r7, #PCMASK
+ and r8, r4, #0x60 @ Get shift types
+ mov r9, r4, lsr #7 @ Get shift amount
+ and r9, r9, #31
+ teq r8, #0
+ moveq r7, r7, lsl r9
+ teq r8, #0x20 @ LSR shift
+ moveq r7, r7, lsr r9
+ teq r8, #0x40 @ ASR shift
+ moveq r7, r7, asr r9
+ teq r8, #0x60 @ ROR shift
+ moveq r7, r7, ror r9
+ tst r4, #1 << 23
+ addne r0, r0, r7
+ subeq r0, r0, r7 @ Apply correction
+ mov r1, r0
+#ifdef FAULT_CODE_LDRSTRREG
+ orr r2, r2, #FAULT_CODE_LDRSTRREG
+#endif
+ b SYMBOL_NAME(do_DataAbort)
+
+Ldata_ldmstm:
+ mov r7, #0x11
+ orr r7, r7, r7, lsl #8
+ and r0, r4, r7
+ and r1, r4, r7, lsl #1
+ add r0, r0, r1, lsr #1
+ and r1, r4, r7, lsl #2
+ add r0, r0, r1, lsr #2
+ and r1, r4, r7, lsl #3
+ add r0, r0, r1, lsr #3
+ add r0, r0, r0, lsr #8
+ add r0, r0, r0, lsr #4
+ and r7, r0, #15 @ r7 = no. of registers to transfer.
+ mov r5, r4, lsr #14 @ Get Rn
+ and r5, r5, #15 << 2
+ ldr r0, [r3, r5] @ Get reg
+ eor r6, r4, r4, lsl #2
+ tst r6, #1 << 23 @ Check inc/dec ^ writeback
+ rsbeq r7, r7, #0
+ add r7, r0, r7, lsl #2 @ Do correction (signed)
+ subne r1, r7, #1
+ subeq r1, r0, #1
+ moveq r0, r7
+ tst r4, #1 << 21 @ Check writeback
+ strne r7, [r3, r5]
+ eor r6, r4, r4, lsl #1
+ tst r6, #1 << 24 @ Check Pre/Post ^ inc/dec
+ addeq r0, r0, #4
+ addeq r1, r1, #4
+ teq r5, #15*4 @ CHECK FOR PC
+ biceq r1, r1, #PCMASK
+ biceq r0, r0, #PCMASK
+#ifdef FAULT_CODE_LDMSTM
+ orr r2, r2, #FAULT_CODE_LDMSTM
+#endif
+ b SYMBOL_NAME(do_DataAbort)
+
+Ldata_ldcstc_pre:
+ mov r0, r4, lsr #14 @ Get Rn
+ and r0, r0, #15 << 2 @ Mask out reg.
+ teq r0, #15 << 2
+ ldr r0, [r3, r0] @ Get register
+ biceq r0, r0, #PCMASK
+ mov r1, r4, lsl #24 @ Get offset
+ tst r4, #1 << 23
+ addne r0, r0, r1, lsr #24
+ subeq r0, r0, r1, lsr #24
+ mov r1, r0
+#ifdef FAULT_CODE_LDCSTC
+ orr r2, r2, #FAULT_CODE_LDCSTC
+#endif
+ b SYMBOL_NAME(do_DataAbort)
+
+#include "entry-common.S"
+
+ .data
+
+__temp_irq: .word 0 @ saved lr_irq
+__temp_fiq: .space 128
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
new file mode 100644
index 000000000..a2de41f33
--- /dev/null
+++ b/arch/arm/kernel/entry-armv.S
@@ -0,0 +1,671 @@
+/*
+ * linux/arch/arm/kernel/entry-armv.S
+ *
+ * Copyright (C) 1996,1997,1998 Russell King.
+ * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
+ *
+ * Low-level vector interface routines
+ *
+ * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes
+ * it to save wrong values... Be aware!
+ */
+#include <linux/config.h> /* for CONFIG_ARCH_EBSA110 /*
+#include <linux/autoconf.h>
+#include <linux/linkage.h>
+
+#include <asm/assembler.h>
+#include <asm/errno.h>
+#include <asm/hardware.h>
+
+#include "../lib/constants.h"
+
+ .text
+
+@ Offsets into task structure
+@ ---------------------------
+@
+#define STATE 0
+#define COUNTER 4
+#define PRIORITY 8
+#define FLAGS 12
+#define SIGPENDING 16
+
+#define PF_TRACESYS 0x20
+
+@ Bad Abort numbers
+@ -----------------
+@
+#define BAD_PREFETCH 0
+#define BAD_DATA 1
+#define BAD_ADDREXCPTN 2
+#define BAD_IRQ 3
+#define BAD_UNDEFINSTR 4
+
+@ OS version number used in SWIs
+@ RISC OS is 0
+@ RISC iX is 8
+@
+#define OS_NUMBER 9
+
+@
+@ Stack format (ensured by USER_* and SVC_*)
+@
+#define S_FRAME_SIZE 72
+#define S_OLD_R0 68
+#define S_PSR 64
+#define S_PC 60
+#define S_LR 56
+#define S_SP 52
+#define S_IP 48
+#define S_FP 44
+#define S_R10 40
+#define S_R9 36
+#define S_R8 32
+#define S_R7 28
+#define S_R6 24
+#define S_R5 20
+#define S_R4 16
+#define S_R3 12
+#define S_R2 8
+#define S_R1 4
+#define S_R0 0
+
+#ifdef IOC_BASE
+/* IOC / IOMD based hardware */
+ .equ ioc_base_high, IOC_BASE & 0xff000000
+ .equ ioc_base_low, IOC_BASE & 0x00ff0000
+ .macro disable_fiq
+ mov r12, #ioc_base_high
+ .if ioc_base_low
+ orr r12, r12, #ioc_base_low
+ .endif
+ strb r12, [r12, #0x38] @ Disable FIQ register
+ .endm
+
+ .macro get_irqnr_and_base, irqnr, base
+ mov r4, #ioc_base_high @ point at IOC
+ .if ioc_base_low
+ orr r4, r4, #ioc_base_low
+ .endif
+ ldrb \irqnr, [r4, #0x24] @ get high priority first
+ adr \base, irq_prio_h
+ teq \irqnr, #0
+#ifdef IOMD_BASE
+ ldreqb \irqnr, [r4, #0x1f4] @ get dma
+ adreq \base, irq_prio_d
+ teqeq \irqnr, #0
+#endif
+ ldreqb \irqnr, [r4, #0x14] @ get low priority
+ adreq \base, irq_prio_l
+ .endm
+
+/*
+ * Interrupt table (incorporates priority)
+ */
+ .macro irq_prio_table
+irq_prio_l: .byte 0, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
+ .byte 4, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
+ .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
+ .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
+ .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+#ifdef IOMD_BASE
+irq_prio_d: .byte 0,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 20,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+#endif
+irq_prio_h: .byte 0, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 12, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .endm
+
+#elif defined(CONFIG_ARCH_EBSA110)
+
+ .macro disable_fiq
+ .endm
+
+ .macro get_irqnr_and_base, irqnr, base
+ mov r4, #0xf3000000
+ ldrb \irqnr, [r4] @ get interrupts
+ adr \base, irq_prio_ebsa110
+ .endm
+
+ .macro irq_prio_table
+irq_prio_ebsa110:
+ .byte 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 1, 1, 2, 2, 2, 2
+ .byte 4, 4, 1, 1, 2, 2, 2, 2, 3, 3, 1, 1, 2, 2, 2, 2
+ .byte 5, 5, 1, 1, 2, 2, 2, 2, 3, 3, 1, 1, 2, 2, 2, 2
+ .byte 5, 5, 1, 1, 2, 2, 2, 2, 3, 3, 1, 1, 2, 2, 2, 2
+
+ .byte 6, 6, 6, 6, 2, 2, 2, 2, 3, 3, 6, 6, 2, 2, 2, 2
+ .byte 6, 6, 6, 6, 2, 2, 2, 2, 3, 3, 6, 6, 2, 2, 2, 2
+ .byte 6, 6, 6, 6, 2, 2, 2, 2, 3, 3, 6, 6, 2, 2, 2, 2
+ .byte 6, 6, 6, 6, 2, 2, 2, 2, 3, 3, 6, 6, 2, 2, 2, 2
+
+ .byte 7, 0, 1, 1, 2, 2, 2, 2, 3, 3, 1, 1, 2, 2, 2, 2
+ .byte 4, 4, 1, 1, 2, 2, 2, 2, 3, 3, 1, 1, 2, 2, 2, 2
+ .byte 5, 5, 1, 1, 2, 2, 2, 2, 3, 3, 1, 1, 2, 2, 2, 2
+ .byte 5, 5, 1, 1, 2, 2, 2, 2, 3, 3, 1, 1, 2, 2, 2, 2
+
+ .byte 6, 6, 6, 6, 2, 2, 2, 2, 3, 3, 6, 6, 2, 2, 2, 2
+ .byte 6, 6, 6, 6, 2, 2, 2, 2, 3, 3, 6, 6, 2, 2, 2, 2
+ .byte 6, 6, 6, 6, 2, 2, 2, 2, 3, 3, 6, 6, 2, 2, 2, 2
+ .byte 6, 6, 6, 6, 2, 2, 2, 2, 3, 3, 6, 6, 2, 2, 2, 2
+ .endm
+
+#else
+#error Unknown architecture
+#endif
+
+/*============================================================================
+ * For entry-common.S
+ */
+
+ .macro save_user_regs
+ sub sp, sp, #S_FRAME_SIZE
+ stmia sp, {r0 - r12} @ Calling r0 - r12
+ add r8, sp, #S_PC
+ stmdb r8, {sp, lr}^ @ Calling sp, lr
+ mov r7, r0
+ mrs r6, spsr
+ mov r5, lr
+ stmia r8, {r5, r6, r7} @ Save calling PC, CPSR, OLD_R0
+ .endm
+
+ .macro restore_user_regs
+ mrs r0, cpsr @ disable IRQs
+ orr r0, r0, #I_BIT
+ msr cpsr, r0
+ ldr r0, [sp, #S_PSR] @ Get calling cpsr
+ msr spsr, r0 @ save in spsr_svc
+ ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
+ mov r0, r0
+ add sp, sp, #S_PC
+ ldr lr, [sp], #S_FRAME_SIZE - S_PC @ Get PC and jump over PC, PSR, OLD_R0
+ movs pc, lr @ return & move spsr_svc into cpsr
+ .endm
+
+ .macro mask_pc, rd, rm
+ .endm
+
+ .macro arm700_bug_check, instr, temp
+ and \temp, \instr, #0x0f000000 @ check for SWI
+ teq \temp, #0x0f000000
+ bne .Larm700bug
+ .endm
+
+ .macro enable_irqs, temp
+ mrs \temp, cpsr
+ bic \temp, \temp, #I_BIT
+ msr cpsr, \temp
+ .endm
+
+ .macro initialise_traps_extra
+ mrs r0, cpsr
+ bic r0, r0, #31
+ orr r0, r0, #0xd3
+ msr cpsr, r0
+ .endm
+
+
+.Larm700bug: str lr, [r8]
+ ldr r0, [sp, #S_PSR] @ Get calling cpsr
+ msr spsr, r0
+ ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
+ mov r0, r0
+ add sp, sp, #S_PC
+ ldr lr, [sp], #S_FRAME_SIZE - S_PC @ Get PC and jump over PC, PSR, OLD_R0
+ movs pc, lr
+
+
+ .macro get_current_task, rd
+ mov \rd, sp, lsr #13
+ mov \rd, \rd, lsl #13
+ .endm
+
+ /*
+ * Like adr, but force SVC mode (if required)
+ */
+ .macro adrsvc, cond, reg, label
+ adr\cond \reg, \label
+ .endm
+
+/*=============================================================================
+ * Undefined FIQs
+ *-----------------------------------------------------------------------------
+ * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
+ * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
+ * Basically to switch modes, we *HAVE* to clobber one register... brain
+ * damage alert! I don't think that we can execute any code in here in any
+ * other mode than FIQ... Ok you can switch to another mode, but you can't
+ * get out of that mode without clobbering one register.
+ */
+_unexp_fiq: disable_fiq
+ subs pc, lr, #4
+
+/*=============================================================================
+ * Interrupt entry dispatcher
+ *-----------------------------------------------------------------------------
+ * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
+ */
+vector_IRQ: @
+ @ save mode specific registers
+ @
+ ldr r13, .LCirq
+ sub lr, lr, #4
+ str lr, [r13] @ save lr_IRQ
+ mrs lr, spsr
+ str lr, [r13, #4] @ save spsr_IRQ
+ @
+ @ now branch to the relevent MODE handling routine
+ @
+ mrs sp, cpsr @ switch to SVC mode
+ bic sp, sp, #31
+ orr sp, sp, #0x13
+ msr spsr, sp
+ and lr, lr, #15
+ cmp lr, #4
+ addlts pc, pc, lr, lsl #2 @ Changes mode and branches
+ b __irq_invalid @ 4 - 15
+ b __irq_usr @ 0 (USR_26 / USR_32)
+ b __irq_invalid @ 1 (FIQ_26 / FIQ_32)
+ b __irq_invalid @ 2 (IRQ_26 / IRQ_32)
+ b __irq_svc @ 3 (SVC_26 / SVC_32)
+/*
+ *------------------------------------------------------------------------------------------------
+ * Undef instr entry dispatcher - dispatches it to the correct handler for the processor mode
+ *------------------------------------------------------------------------------------------------
+ * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
+ */
+.LCirq: .word __temp_irq
+.LCund: .word __temp_und
+.LCabt: .word __temp_abt
+
+vector_undefinstr:
+ @
+ @ save mode specific registers
+ @
+ ldr r13, [pc, #.LCund - . - 8]
+ str lr, [r13]
+ mrs lr, spsr
+ str lr, [r13, #4]
+ @
+ @ now branch to the relevent MODE handling routine
+ @
+ mrs sp, cpsr
+ bic sp, sp, #31
+ orr sp, sp, #0x13
+ msr spsr, sp
+ and lr, lr, #15
+ cmp lr, #4
+ addlts pc, pc, lr, lsl #2 @ Changes mode and branches
+ b __und_invalid @ 4 - 15
+ b __und_usr @ 0 (USR_26 / USR_32)
+ b __und_invalid @ 1 (FIQ_26 / FIQ_32)
+ b __und_invalid @ 2 (IRQ_26 / IRQ_32)
+ b __und_svc @ 3 (SVC_26 / SVC_32)
+/*
+ *------------------------------------------------------------------------------------------------
+ * Prefetch abort dispatcher - dispatches it to the correct handler for the processor mode
+ *------------------------------------------------------------------------------------------------
+ * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
+ */
+vector_prefetch:
+ @
+ @ save mode specific registers
+ @
+ sub lr, lr, #4
+ ldr r13, .LCabt
+ str lr, [r13]
+ mrs lr, spsr
+ str lr, [r13, #4]
+ @
+ @ now branch to the relevent MODE handling routine
+ @
+ mrs sp, cpsr
+ bic sp, sp, #31
+ orr sp, sp, #0x13
+ msr spsr, sp
+ and lr, lr, #15
+ adds pc, pc, lr, lsl #2 @ Changes mode and branches
+ b __pabt_invalid @ 4 - 15
+ b __pabt_usr @ 0 (USR_26 / USR_32)
+ b __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
+ b __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
+ b __pabt_invalid @ 3 (SVC_26 / SVC_32)
+/*
+ *------------------------------------------------------------------------------------------------
+ * Data abort dispatcher - dispatches it to the correct handler for the processor mode
+ *------------------------------------------------------------------------------------------------
+ * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
+ */
+vector_data: @
+ @ save mode specific registers
+ @
+ sub lr, lr, #8
+ ldr r13, .LCabt
+ str lr, [r13]
+ mrs lr, spsr
+ str lr, [r13, #4]
+ @
+ @ now branch to the relevent MODE handling routine
+ @
+ mrs sp, cpsr
+ bic sp, sp, #31
+ orr sp, sp, #0x13
+ msr spsr, sp
+ and lr, lr, #15
+ cmp lr, #4
+ addlts pc, pc, lr, lsl #2 @ Changes mode & branches
+ b __dabt_invalid @ 4 - 15
+ b __dabt_usr @ 0 (USR_26 / USR_32)
+ b __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
+ b __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
+ b __dabt_svc @ 3 (SVC_26 / SVC_32)
+
+/*=============================================================================
+ * Undefined instruction handler
+ *-----------------------------------------------------------------------------
+ * Handles floating point instructions
+ */
+__und_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
+ stmia sp, {r0 - r12} @ Save r0 - r12
+ add r8, sp, #S_PC
+ stmdb r8, {sp, lr}^ @ Save user r0 - r12
+ ldr r4, .LCund
+ ldmia r4, {r5 - r7}
+ stmia r8, {r5 - r7} @ Save USR pc, cpsr, old_r0
+ mov fp, #0
+
+ adr r1, .LC2
+ ldmia r1, {r1, r4}
+ ldr r1, [r1]
+ get_current_task r2
+ teq r1, r2
+ blne SYMBOL_NAME(math_state_restore)
+ adrsvc, al, r9, SYMBOL_NAME(fpreturn)
+ adrsvc al, lr, SYMBOL_NAME(fpundefinstr)
+ ldr pc, [r4] @ Call FP module USR entry point
+
+ .globl SYMBOL_NAME(fpundefinstr)
+SYMBOL_NAME(fpundefinstr): @ Called by FP module on undefined instr
+ mov r0, lr
+ mov r1, sp
+ mrs r4, cpsr @ Enable interrupts
+ bic r4, r4, #I_BIT
+ msr cpsr, r4
+ bl SYMBOL_NAME(do_undefinstr)
+ b ret_from_exception @ Normal FP exit
+
+__und_svc: sub sp, sp, #S_FRAME_SIZE
+ stmia sp, {r0 - r12} @ save r0 - r12
+ mov r6, lr
+ ldr r7, .LCund
+ ldmia r7, {r7 - r9}
+ add r5, sp, #S_FRAME_SIZE
+ add r4, sp, #S_SP
+ stmia r4, {r5 - r9} @ save sp_SVC, lr_SVC, pc, cpsr, old_ro
+
+ adr r1, .LC2
+ ldmia r1, {r1, r4}
+ ldr r1, [r1]
+ mov r2, sp, lsr #13
+ mov r2, r2, lsl #13
+ teq r1, r2
+ blne SYMBOL_NAME(math_state_restore)
+ adrsvc al, r9, SYMBOL_NAME(fpreturnsvc)
+ adrsvc al, lr, SYMBOL_NAME(fpundefinstrsvc)
+ ldr pc, [r4] @ Call FP module SVC entry point
+
+ .globl SYMBOL_NAME(fpundefinstrsvc)
+SYMBOL_NAME(fpundefinstrsvc):
+ mov r0, r5 @ unsigned long pc
+ mov r1, sp @ struct pt_regs *regs
+ bl SYMBOL_NAME(do_undefinstr)
+
+ .globl SYMBOL_NAME(fpreturnsvc)
+SYMBOL_NAME(fpreturnsvc):
+ ldr lr, [sp, #S_PSR] @ Get SVC cpsr
+ msr spsr, lr
+ ldmia sp, {r0 - pc}^ @ Restore SVC registers
+
+.LC2: .word SYMBOL_NAME(last_task_used_math)
+ .word SYMBOL_NAME(fp_enter)
+
+__und_invalid: sub sp, sp, #S_FRAME_SIZE
+ stmia sp, {r0 - lr}
+ mov r7, r0
+ ldr r4, .LCund
+ ldmia r4, {r5, r6} @ Get UND/IRQ/FIQ/ABT pc, cpsr
+ add r4, sp, #S_PC
+ stmia r4, {r5, r6, r7} @ Save UND/IRQ/FIQ/ABT pc, cpsr, old_r0
+ mov r0, sp @ struct pt_regs *regs
+ mov r1, #BAD_UNDEFINSTR @ int reason
+ and r2, r6, #31 @ int mode
+ b SYMBOL_NAME(bad_mode) @ Does not ever return...
+/*=============================================================================
+ * Prefetch abort handler
+ *-----------------------------------------------------------------------------
+ */
+pabtmsg: .ascii "Pabt: %08lX\n\0"
+ .align
+__pabt_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
+ stmia sp, {r0 - r12} @ Save r0 - r12
+ add r8, sp, #S_PC
+ stmdb r8, {sp, lr}^ @ Save sp_usr lr_usr
+ ldr r4, .LCabt
+ ldmia r4, {r5 - r7} @ Get USR pc, cpsr
+ stmia r8, {r5 - r7} @ Save USR pc, cpsr, old_r0
+
+ mrs r7, cpsr @ Enable interrupts if they were
+ bic r7, r7, #I_BIT @ previously
+ msr cpsr, r7
+ mov r0, r5 @ address (pc)
+ mov r1, sp @ regs
+ bl SYMBOL_NAME(do_PrefetchAbort) @ call abort handler
+ teq r0, #0 @ Does this still apply???
+ bne ret_from_exception @ Return from exception
+#ifdef DEBUG_UNDEF
+ adr r0, t
+ bl SYMBOL_NAME(printk)
+#endif
+ mov r0, r5
+ mov r1, sp
+ and r2, r6, #31
+ bl SYMBOL_NAME(do_undefinstr)
+ ldr lr, [sp, #S_PSR] @ Get USR cpsr
+ msr spsr, lr
+ ldmia sp, {r0 - pc}^ @ Restore USR registers
+
+__pabt_invalid: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
+ stmia sp, {r0 - lr} @ Save XXX r0 - lr
+ mov r7, r0 @ OLD R0
+ ldr r4, .LCabt
+ ldmia r4, {r5 - r7} @ Get XXX pc, cpsr
+ add r4, sp, #S_PC
+ stmia r4, {r5 - r7} @ Save XXX pc, cpsr, old_r0
+ mov r0, sp @ Prefetch aborts are definitely *not*
+ mov r1, #BAD_PREFETCH @ allowed in non-user modes. We cant
+ and r2, r6, #31 @ recover from this problem.
+ b SYMBOL_NAME(bad_mode)
+
+#ifdef DEBUG_UNDEF
+t: .ascii "*** undef ***\r\n\0"
+ .align
+#endif
+
+/*=============================================================================
+ * Address exception handler
+ *-----------------------------------------------------------------------------
+ * These aren't too critical.
+ * (they're not supposed to happen, and won't happen in 32-bit mode).
+ */
+
+vector_addrexcptn:
+ b vector_addrexcptn
+
+/*=============================================================================
+ * Interrupt (IRQ) handler
+ *-----------------------------------------------------------------------------
+ */
+__irq_usr: sub sp, sp, #S_FRAME_SIZE
+ stmia sp, {r0 - r12} @ save r0 - r12
+ add r8, sp, #S_PC
+ stmdb r8, {sp, lr}^
+ ldr r4, .LCirq
+ ldmia r4, {r5 - r7} @ get saved PC, SPSR
+ stmia r8, {r5 - r7} @ save pc, psr, old_r0
+1: get_irqnr_and_base r6, r5
+ teq r6, #0
+ ldrneb r0, [r5, r6] @ get IRQ number
+ movne r1, sp
+ @
+ @ routine called with r0 = irq number, r1 = struct pt_regs *
+ @
+ adrsvc ne, lr, 1b
+ bne do_IRQ
+ b ret_with_reschedule
+
+ irq_prio_table
+
+__irq_svc: sub sp, sp, #S_FRAME_SIZE
+ stmia sp, {r0 - r12} @ save r0 - r12
+ mov r6, lr
+ ldr r7, .LCirq
+ ldmia r7, {r7 - r9}
+ add r5, sp, #S_FRAME_SIZE
+ add r4, sp, #S_SP
+ stmia r4, {r5, r6, r7, r8, r9} @ save sp_SVC, lr_SVC, pc, cpsr, old_ro
+1: get_irqnr_and_base r6, r5
+ teq r6, #0
+ ldrneb r0, [r5, r6] @ get IRQ number
+ movne r1, sp
+ @
+ @ routine called with r0 = irq number, r1 = struct pt_regs *
+ @
+ adrsvc ne, lr, 1b
+ bne do_IRQ
+ ldr r0, [sp, #S_PSR]
+ msr spsr, r0
+ ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+
+__irq_invalid: sub sp, sp, #S_FRAME_SIZE @ Allocate space on stack for frame
+ stmfd sp, {r0 - lr} @ Save r0 - lr
+ mov r7, #-1
+ ldr r4, .LCirq
+ ldmia r4, {r5, r6} @ get saved pc, psr
+ add r4, sp, #S_PC
+ stmia r4, {r5, r6, r7}
+ mov fp, #0
+ mov r0, sp
+ mov r1, #BAD_IRQ
+ b SYMBOL_NAME(bad_mode)
+
+/*=============================================================================
+ * Data abort handler code
+ *-----------------------------------------------------------------------------
+ */
+.LCprocfns: .word SYMBOL_NAME(processor)
+
+__dabt_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
+ stmia sp, {r0 - r12} @ save r0 - r12
+ add r3, sp, #S_PC
+ stmdb r3, {sp, lr}^
+ ldr r0, .LCabt
+ ldmia r0, {r0 - r2} @ Get USR pc, cpsr
+ stmia r3, {r0 - r2} @ Save USR pc, cpsr, old_r0
+ mov fp, #0
+ mrs r2, cpsr @ Enable interrupts if they were
+ bic r2, r2, #I_BIT @ previously
+ msr cpsr, r2
+ ldr r2, .LCprocfns
+ mov lr, pc
+ ldr pc, [r2, #8] @ call processor specific code
+ mov r3, sp
+ bl SYMBOL_NAME(do_DataAbort)
+ b ret_from_sys_call
+
+__dabt_svc: sub sp, sp, #S_FRAME_SIZE
+ stmia sp, {r0 - r12} @ save r0 - r12
+ ldr r2, .LCabt
+ add r0, sp, #S_FRAME_SIZE
+ add r5, sp, #S_SP
+ mov r1, lr
+ ldmia r2, {r2 - r4} @ get pc, cpsr
+ stmia r5, {r0 - r4} @ save sp_SVC, lr_SVC, pc, cpsr, old_ro
+ tst r3, #I_BIT
+ mrseq r0, cpsr @ Enable interrupts if they were
+ biceq r0, r0, #I_BIT @ previously
+ msreq cpsr, r0
+ mov r0, r2
+ ldr r2, .LCprocfns
+ mov lr, pc
+ ldr pc, [r2, #8] @ call processor specific code
+ mov r3, sp
+ bl SYMBOL_NAME(do_DataAbort)
+ ldr r0, [sp, #S_PSR]
+ msr spsr, r0
+ ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+
+__dabt_invalid: sub sp, sp, #S_FRAME_SIZE
+ stmia sp, {r0 - lr} @ Save SVC r0 - lr [lr *should* be intact]
+ mov r7, r0
+ ldr r4, .LCabt
+ ldmia r4, {r5, r6} @ Get SVC pc, cpsr
+ add r4, sp, #S_PC
+ stmia r4, {r5, r6, r7} @ Save SVC pc, cpsr, old_r0
+ mov r0, sp
+ mov r1, #BAD_DATA
+ and r2, r6, #31
+ b SYMBOL_NAME(bad_mode)
+
+
+#include "entry-common.S"
+
+ .data
+
+__temp_irq: .word 0 @ saved lr_irq
+ .word 0 @ saved spsr_irq
+ .word -1 @ old_r0
+__temp_und: .word 0 @ Saved lr_und
+ .word 0 @ Saved spsr_und
+ .word -1 @ old_r0
+__temp_abt: .word 0 @ Saved lr_abt
+ .word 0 @ Saved spsr_abt
+ .word -1 @ old_r0
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
new file mode 100644
index 000000000..5725e1781
--- /dev/null
+++ b/arch/arm/kernel/entry-common.S
@@ -0,0 +1,283 @@
+/*
+ *=============================================================================
+ * Low-level interface code
+ *-----------------------------------------------------------------------------
+ * Trap initialisation
+ *-----------------------------------------------------------------------------
+ *
+ * Note - FIQ code has changed. The default is a couple of words in 0x1c, 0x20
+ * that call _unexp_fiq. Nowever, we now copy the FIQ routine to 0x1c (removes
+ * some excess cycles).
+ *
+ * What we need to put into 0-0x1c are ldrs to branch to 0xC0000000
+ * (the kernel).
+ * 0x1c onwards is reserved for FIQ, so I think that I will allocate 0xe0 onwards for
+ * the actuall address to jump to.
+ */
+/*
+ * these go into 0x00
+ */
+.Lbranches: swi SYS_ERROR0
+ ldr pc, .Lbranches + 0xe4
+ ldr pc, .Lbranches + 0xe8
+ ldr pc, .Lbranches + 0xec
+ ldr pc, .Lbranches + 0xf0
+ ldr pc, .Lbranches + 0xf4
+ ldr pc, .Lbranches + 0xf8
+ ldr pc, .Lbranches + 0xfc
+/*
+ * this is put into 0xe4 and above
+ */
+.Ljump_addresses:
+ .word vector_undefinstr @ 0xe4
+ .word vector_swi @ 0xe8
+ .word vector_prefetch @ 0xec
+ .word vector_data @ 0xf0
+ .word vector_addrexcptn @ 0xf4
+ .word vector_IRQ @ 0xf8
+ .word _unexp_fiq @ 0xfc
+/*
+ * initialise the trap system
+ */
+ENTRY(trap_init)
+ stmfd sp!, {r4 - r7, lr}
+ initialise_traps_extra
+ mov r0, #0xe4
+ adr r1, .Ljump_addresses
+ ldmia r1, {r1 - r6}
+ stmia r0, {r1 - r6}
+ mov r0, #0
+ adr r1, .Lbranches
+ ldmia r1, {r1 - r7}
+ stmia r0, {r1 - r7}
+ LOADREGS(fd, sp!, {r4 - r7, pc})
+
+/*=============================================================================
+ * SWI handler
+ *-----------------------------------------------------------------------------
+ *
+ * We now handle sys-call tracing, and the errno in the task structure.
+ * Still have a problem with >4 arguments for functions. Theres only
+ * a couple of functions in the code that have 5 arguments, so Im not
+ * too worried.
+ */
+
+#include "calls.S"
+
+vector_swi: save_user_regs
+ mov fp, #0
+ mask_pc lr, lr
+ ldr r6, [lr, #-4]! @ get SWI instruction
+ arm700_bug_check r6, r7
+ enable_irqs r7
+
+ bic r6, r6, #0xff000000 @ mask off SWI op-code
+ eor r6, r6, #OS_NUMBER<<20 @ check OS number
+ cmp r6, #NR_SYSCALLS @ check upper syscall limit
+ bcs 2f
+
+ get_current_task r5
+ ldr ip, [r5, #FLAGS] @ check for syscall tracing
+ tst ip, #PF_TRACESYS
+ bne 1f
+
+ adr ip, SYMBOL_NAME(sys_call_table)
+ str r4, [sp, #-4]! @ new style: (r0 = arg1, r5 = arg5)
+ mov lr, pc
+ ldr pc, [ip, r6, lsl #2] @ call sys routine
+ add sp, sp, #4
+ str r0, [sp, #S_R0] @ returned r0
+ b ret_from_sys_call
+
+1: ldr r7, [sp, #S_IP] @ save old IP
+ mov r0, #0
+ str r7, [sp, #S_IP] @ trace entry [IP = 0]
+ bl SYMBOL_NAME(syscall_trace)
+ str r7, [sp, #S_IP]
+ ldmia sp, {r0 - r3} @ have to reload r0 - r3
+ adr ip, SYMBOL_NAME(sys_call_table)
+ str r4, [sp, #-4]! @ new style: (r0 = arg1, r5 = arg5)
+ mov lr, pc
+ ldr pc, [ip, r6, lsl #2] @ call sys routine
+ add sp, sp, #4
+ str r0, [sp, #S_R0] @ returned r0
+ mov r0, #1
+ str r0, [sp, #S_IP] @ trace exit [IP = 1]
+ bl SYMBOL_NAME(syscall_trace)
+ str r7, [sp, #S_IP]
+ b ret_from_sys_call
+
+2: tst r6, #0x00f00000 @ is it a Unix SWI?
+ bne 3f
+ cmp r6, #(KSWI_SYS_BASE - KSWI_BASE)
+ bcc 4f @ not private func
+ bic r0, r6, #0x000f0000
+ mov r1, sp
+ bl SYMBOL_NAME(arm_syscall)
+ b ret_from_sys_call
+
+3: eor r0, r6, #OS_NUMBER<<20 @ Put OS number back
+ mov r1, sp
+ bl SYMBOL_NAME(deferred)
+ ldmfd sp, {r0 - r3}
+ b ret_from_sys_call
+
+4: bl SYMBOL_NAME(sys_ni_syscall)
+ str r0, [sp, #0] @ returned r0
+ b ret_from_sys_call
+
+@ r0 = syscall number
+@ r1 = syscall r0
+@ r5 = syscall r4
+@ ip = syscall table
+SYMBOL_NAME(sys_syscall):
+ mov r6, r0
+ eor r6, r6, #OS_NUMBER << 20
+ cmp r6, #NR_SYSCALLS @ check range
+ movgt r0, #-ENOSYS
+ movgt pc, lr
+ add sp, sp, #4 @ take of the save of our r4
+ ldmib sp, {r0 - r4} @ get our args
+ str r4, [sp, #-4]! @ Put our arg on the stack
+ ldr pc, [ip, r6, lsl #2]
+
+ENTRY(sys_call_table)
+#include "calls.S"
+
+/*============================================================================
+ * Special system call wrappers
+ */
+sys_fork_wrapper:
+ add r0, sp, #4
+ b SYMBOL_NAME(sys_fork)
+
+sys_execve_wrapper:
+ add r3, sp, #4
+ b SYMBOL_NAME(sys_execve)
+
+sys_mount_wrapper:
+ mov r6, lr
+ add r5, sp, #4
+ str r5, [sp]
+ str r4, [sp, #-4]!
+ bl SYMBOL_NAME(sys_compat_mount)
+ add sp, sp, #4
+ RETINSTR(mov,pc,r6)
+
+sys_clone_wapper:
+ add r2, sp, #4
+ b SYMBOL_NAME(sys_clone)
+
+sys_llseek_wrapper:
+ mov r6, lr
+ add r5, sp, #4
+ str r5, [sp]
+ str r4, [sp, #-4]!
+ bl SYMBOL_NAME(sys_compat_llseek)
+ add sp, sp, #4
+ RETINSTR(mov,pc,r6)
+
+sys_sigsuspend_wrapper:
+ add r3, sp, #4
+ b SYMBOL_NAME(sys_sigsuspend)
+
+sys_rt_sigsuspend_wrapper:
+ add r2, sp, #4
+ b SYMBOL_NAME(sys_rt_sigsuspend)
+
+sys_sigreturn_wrapper:
+ add r0, sp, #4
+ b SYMBOL_NAME(sys_sigreturn)
+
+sys_rt_sigreturn_wrapper:
+ add r0, sp, #4
+ b SYMBOL_NAME(sys_rt_sigreturn)
+
+/*============================================================================
+ * All exits to user mode from the kernel go through this code.
+ */
+
+ .globl ret_from_sys_call
+
+ .globl SYMBOL_NAME(fpreturn)
+SYMBOL_NAME(fpreturn):
+ret_from_exception:
+ adr r0, 1f
+ ldmia r0, {r0, r1}
+ ldr r0, [r0]
+ ldr r1, [r1]
+ tst r0, r1
+ blne SYMBOL_NAME(do_bottom_half)
+ret_from_intr: ldr r0, [sp, #S_PSR]
+ tst r0, #3
+ beq ret_with_reschedule
+ b ret_from_all
+
+ret_signal: mov r1, sp
+ adrsvc al, lr, ret_from_all
+ b SYMBOL_NAME(do_signal)
+
+2: bl SYMBOL_NAME(schedule)
+
+ret_from_sys_call:
+ adr r0, 1f
+ ldmia r0, {r0, r1}
+ ldr r0, [r0]
+ ldr r1, [r1]
+ tst r0, r1
+ adrsvc ne, lr, ret_from_intr
+ bne SYMBOL_NAME(do_bottom_half)
+
+ret_with_reschedule:
+ ldr r0, 1f + 8
+ ldr r0, [r0]
+ teq r0, #0
+ bne 2b
+
+ get_current_task r1
+ ldr r1, [r1, #SIGPENDING]
+ teq r1, #0
+ bne ret_signal
+
+ret_from_all: restore_user_regs
+
+1: .word SYMBOL_NAME(bh_mask)
+ .word SYMBOL_NAME(bh_active)
+ .word SYMBOL_NAME(need_resched)
+
+/*============================================================================
+ * FP support
+ */
+
+1: .word SYMBOL_NAME(fp_save)
+ .word SYMBOL_NAME(fp_restore)
+
+.Lfpnull: mov pc, lr
+
+
+/*
+ * Function to call when switching tasks to save FP state
+ */
+ENTRY(fpe_save)
+ ldr r1, 1b
+ ldr pc, [r1]
+
+/*
+ * Function to call when switching tasks to restore FP state
+ */
+ENTRY(fpe_restore)
+ ldr r1, 1b + 4
+ ldr pc, [r1]
+
+
+ .data
+
+ENTRY(fp_enter)
+ .word SYMBOL_NAME(fpundefinstr)
+ .word SYMBOL_NAME(fpundefinstrsvc)
+
+ENTRY(fp_save)
+ .word .Lfpnull
+ENTRY(fp_restore)
+ .word .Lfpnull
+
diff --git a/arch/arm/kernel/head-armo.S b/arch/arm/kernel/head-armo.S
new file mode 100644
index 000000000..7bd69ed5f
--- /dev/null
+++ b/arch/arm/kernel/head-armo.S
@@ -0,0 +1,63 @@
+/*
+ * linux/arch/arm/kernel/head.S
+ *
+ * Copyright (C) 1994, 1995, 1996, 1997 Russell King
+ *
+ * 26-bit kernel startup code
+ */
+#include <linux/linkage.h>
+
+ .text
+ .align
+/*
+ * Entry point.
+ */
+ENTRY(stext)
+ENTRY(_stext)
+__entry: cmp pc, #0x02000000
+ ldrlt pc, LC1 @ if 0x01800000, call at 0x02080000
+ teq r0, #0 @ Check for old calling method
+ blne Loldparams @ Move page if old
+ adr r5, LC0
+ ldmia r5, {r5, r6, sl, sp} @ Setup stack
+ mov r4, #0
+1: cmp r5, sl @ Clear BSS
+ strcc r4, [r5], #4
+ bcc 1b
+ mov r0, #0xea000000 @ Point undef instr to continuation
+ adr r5, Lcontinue - 12
+ orr r5, r0, r5, lsr #2
+ str r5, [r4, #4]
+ mov r2, r4
+ ldr r5, Larm2_id
+ swp r0, r0, [r2] @ check for swp (ARM2 can't)
+ ldr r5, Larm250_id
+ mrc 15, 0, r0, c0, c0 @ check for CP#15 (ARM250 can't)
+ mov r5, r0 @ Use processor ID if we do have CP#15
+Lcontinue: str r5, [r6]
+ mov r5, #0xeb000000 @ Point undef instr vector to itself
+ sub r5, r5, #2
+ str r5, [r4, #4]
+ mov fp, #0
+ b SYMBOL_NAME(start_kernel)
+
+LC1: .word SYMBOL_NAME(_stext)
+LC0: .word SYMBOL_NAME(_edata)
+ .word SYMBOL_NAME(arm_id)
+ .word SYMBOL_NAME(_end)
+ .word SYMBOL_NAME(init_task_union)+8192
+Larm2_id: .long 0x41560200
+Larm250_id: .long 0x41560250
+ .align
+
+Loldparams: mov r4, #0x02000000
+ add r3, r4, #0x00080000
+ add r4, r4, #0x0007c000
+1: ldmia r0!, {r5 - r12}
+ stmia r4!, {r5 - r12}
+ cmp r4, r3
+ blt 1b
+ movs pc, lr
+
+ .align 13
+ENTRY(this_must_match_init_task)
diff --git a/arch/arm/kernel/head-armv.S b/arch/arm/kernel/head-armv.S
new file mode 100644
index 000000000..0af401e43
--- /dev/null
+++ b/arch/arm/kernel/head-armv.S
@@ -0,0 +1,312 @@
+/*
+ * linux/arch/arm/kernel/head32.S
+ *
+ * Copyright (C) 1994, 1995, 1996, 1997 Russell King
+ *
+ * Kernel 32 bit startup code for ARM6 / ARM7 / StrongARM
+ */
+#include <linux/config.h>
+#include <linux/linkage.h>
+ .text
+ .align
+
+ .globl SYMBOL_NAME(swapper_pg_dir)
+ .equ SYMBOL_NAME(swapper_pg_dir), 0xc0004000
+
+ .globl __stext
+/*
+ * Entry point and restart point. Entry *must* be called with r0 == 0,
+ * MMU off.
+ *
+ * r1 = 0 -> ebsa (Ram @ 0x00000000)
+ * r1 = 1 -> RPC (Ram @ 0x10000000)
+ * r1 = 2 -> ebsit (???)
+ * r1 = 3 -> nexuspci
+ */
+ENTRY(stext)
+ENTRY(_stext)
+__entry:
+ teq r0, #0 @ check for illegal entry...
+ bne .Lerror @ loop indefinitely
+ cmp r1, #4 @ Unknown machine architecture
+ bge .Lerror
+@
+@ First thing to do is to get the page tables set up so that we can call the kernel
+@ in the correct place. This is relocatable code...
+@
+ mrc p15, 0, r9, c0, c0 @ get Processor ID
+@
+@ Read processor ID register (CP#15, CR0).
+@ NOTE: ARM2 & ARM250 cause an undefined instruction exception...
+@ Values are:
+@ XX01XXXX = ARMv4 architecture (StrongARM)
+@ XX00XXXX = ARMv3 architecture
+@ 4156061X = ARM 610
+@ 4156030X = ARM 3
+@ 4156025X = ARM 250
+@ 4156020X = ARM 2
+@
+ adr r10, .LCProcTypes
+1: ldmia r10!, {r5, r6, r8} @ Get Set, Mask, MMU Flags
+ teq r5, #0 @ End of list?
+ beq .Lerror
+ eor r5, r5, r9
+ tst r5, r6
+ addne r10, r10, #8
+ bne 1b
+
+ adr r4, .LCMachTypes
+ add r4, r4, r1, lsl #4
+ ldmia r4, {r4, r5, r6} @ r4 = page dir in physical ram
+
+ mov r0, r4
+ mov r1, #0
+ add r2, r0, #0x4000
+1: str r1, [r0], #4 @ Clear page table
+ teq r0, r2
+ bne 1b
+@
+@ Add enough entries to allow the kernel to be called.
+@ It will sort out the real mapping in paging_init
+@
+ add r0, r4, #0x3000
+ mov r1, #0x0000000c @ SECT_CACHEABLE | SECT_BUFFERABLE
+ orr r1, r1, r8
+ add r1, r1, r5
+ str r1, [r0], #4
+ add r1, r1, #1 << 20
+ str r1, [r0], #4
+ add r1, r1, #1 << 20
+@
+@ Map in IO space
+@
+ add r0, r4, #0x3800
+ orr r1, r6, r8
+ add r2, r0, #0x0800
+1: str r1, [r0], #4
+ add r1, r1, #1 << 20
+ teq r0, r2
+ bne 1b
+@
+@ Map in screen at 0x02000000 & SCREEN2_BASE
+@
+ teq r5, #0
+ addne r0, r4, #0x80 @ 02000000
+ movne r1, #0x02000000
+ orrne r1, r1, r8
+ strne r1, [r0]
+ addne r0, r4, #0x3600 @ d8000000
+ strne r1, [r0]
+@
+@ The following should work on both v3 and v4 implementations
+@
+ mov lr, pc
+ mov pc, r10 @ Call processor flush (returns ctrl reg)
+ adr r5, __entry
+ sub r10, r10, r5 @ Make r10 PIC
+ ldr lr, .Lbranch
+ mcr p15, 0, r0, c1, c0 @ Enable MMU & caches. In 3 instructions
+ @ we lose this page!
+ mov pc, lr
+
+.Lerror: mov r0, #0x02000000
+ mov r1, #0x11
+ orr r1, r1, r1, lsl #8
+ orr r1, r1, r1, lsl #16
+ str r1, [r0], #4
+ str r1, [r0], #4
+ str r1, [r0], #4
+ str r1, [r0], #4
+ b .Lerror
+
+.Lbranch: .long .Lalready_done_mmap @ Real address of routine
+
+ @ EBSA (pg dir phys, phys ram start, phys i/o)
+.LCMachTypes: .long SYMBOL_NAME(swapper_pg_dir) - 0xc0000000 @ Address of page tables (physical)
+ .long 0 @ Address of RAM
+ .long 0xe0000000 @ I/O address
+ .long 0
+
+ @ RPC
+ .long SYMBOL_NAME(swapper_pg_dir) - 0xc0000000 + 0x10000000
+ .long 0x10000000
+ .long 0x03000000
+ .long 0
+
+ @ EBSIT ???
+ .long SYMBOL_NAME(swapper_pg_dir) - 0xc0000000
+ .long 0
+ .long 0xe0000000
+ .long 0
+
+ @ NexusPCI
+ .long SYMBOL_NAME(swapper_pg_dir) - 0xc0000000 + 0x40000000
+ .long 0x40000000
+ .long 0x10000000
+ .long 0
+
+.LCProcTypes: @ ARM6 / 610
+ .long 0x41560600
+ .long 0xffffff00
+ .long 0x00000c12
+ b .Larmv3_flush_early @ arm v3 flush & ctrl early setup
+ mov pc, lr
+
+ @ ARM7 / 710
+ .long 0x41007000
+ .long 0xfffff000
+ .long 0x00000c12
+ b .Larmv3_flush_late @ arm v3 flush & ctrl late setup
+ mov pc, lr
+
+ @ StrongARM
+ .long 0x4401a100
+ .long 0xfffffff0
+ .long 0x00000c02
+ b .Larmv4_flush_early
+ b .Lsa_fastclock
+
+ .long 0
+
+.LC0: .long SYMBOL_NAME(_edata)
+ .long SYMBOL_NAME(arm_id)
+ .long SYMBOL_NAME(_end)
+ .long SYMBOL_NAME(init_task_union)+8192
+ .align
+
+.Larmv3_flush_early:
+ mov r0, #0
+ mcr p15, 0, r0, c7, c0 @ flush caches on v3
+ mcr p15, 0, r0, c5, c0 @ flush TLBs on v3
+ mcr p15, 0, r4, c2, c0 @ load page table pointer
+ mov r0, #0x1f @ Domains 0, 1 = client
+ mcr p15, 0, r0, c3, c0 @ load domain access register
+ mov r0, #0x3d @ ....S..DPWC.M
+ orr r0, r0, #0x100
+ mov pc, lr
+
+.Larmv3_flush_late:
+ mov r0, #0
+ mcr p15, 0, r0, c7, c0 @ flush caches on v3
+ mcr p15, 0, r0, c5, c0 @ flush TLBs on v3
+ mcr p15, 0, r4, c2, c0 @ load page table pointer
+ mov r0, #0x1f @ Domains 0, 1 = client
+ mcr p15, 0, r0, c3, c0 @ load domain access register
+ mov r0, #0x7d @ ....S.LDPWC.M
+ orr r0, r0, #0x100
+ mov pc, lr
+
+.Larmv4_flush_early:
+ mov r0, #0
+ mcr p15, 0, r0, c7, c7 @ flush I,D caches on v4
+ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
+ mcr p15, 0, r0, c8, c7 @ flush I,D TLBs on v4
+ mcr p15, 0, r4, c2, c0 @ load page table pointer
+ mov r0, #0x1f @ Domains 0, 1 = client
+ mcr p15, 0, r0, c3, c0 @ load domain access register
+ mrc p15, 0, r0, c1, c0 @ get control register v4
+ bic r0, r0, #0x0e00
+ bic r0, r0, #0x0002
+ orr r0, r0, #0x003d @ I...S..DPWC.M
+ orr r0, r0, #0x1100 @ v4 supports separate I cache
+ mov pc, lr
+
+.Lsa_fastclock: mcr p15, 0, r4, c15, c1, 2 @ Enable clock switching
+ mov pc, lr
+
+.Lalready_done_mmap:
+ adr r5, __entry @ Add base back in
+ add r10, r10, r5
+ adr r5, .LC0
+ ldmia r5, {r5, r6, r8, sp} @ Setup stack
+ mov r4, #0
+1: cmp r5, r8 @ Clear BSS
+ strcc r4, [r5],#4
+ bcc 1b
+
+ str r9, [r6] @ Save processor ID
+ mov lr, pc
+ add pc, r10, #4 @ Call post-processor init
+ mov fp, #0
+ b SYMBOL_NAME(start_kernel)
+
+#if 1
+/*
+ * Useful debugging routines
+ */
+ .globl _printhex8
+_printhex8: mov r1, #8
+ b printhex
+
+ .globl _printhex4
+_printhex4: mov r1, #4
+ b printhex
+
+ .globl _printhex2
+_printhex2: mov r1, #2
+printhex: ldr r2, =hexbuf
+ add r3, r2, r1
+ mov r1, #0
+ strb r1, [r3]
+1: and r1, r0, #15
+ mov r0, r0, lsr #4
+ cmp r1, #10
+ addlt r1, r1, #'0'
+ addge r1, r1, #'a' - 10
+ strb r1, [r3, #-1]!
+ teq r3, r2
+ bne 1b
+ mov r0, r2
+
+ .globl _printascii
+_printascii:
+#ifdef CONFIG_ARCH_RPC
+ mov r3, #0xe0000000
+ orr r3, r3, #0x00010000
+ orr r3, r3, #0x00000fe0
+#else
+ mov r3, #0xf0000000
+ orr r3, r3, #0x0be0
+#endif
+ b 3f
+1: ldrb r2, [r3, #0x18]
+ tst r2, #0x10
+ beq 1b
+ strb r1, [r3]
+2: ldrb r2, [r3, #0x14]
+ and r2, r2, #0x60
+ teq r2, #0x60
+ bne 2b
+ teq r1, #'\n'
+ moveq r1, #'\r'
+ beq 1b
+3: teq r0, #0
+ ldrneb r1, [r0], #1
+ teqne r1, #0
+ bne 1b
+ mov pc, lr
+
+ .ltorg
+
+ .globl _printch
+_printch:
+#ifdef CONFIG_ARCH_RPC
+ mov r3, #0xe0000000
+ orr r3, r3, #0x00010000
+ orr r3, r3, #0x00000fe0
+#else
+ mov r3, #0xf0000000
+ orr r3, r3, #0x0be0
+#endif
+ mov r1, r0
+ mov r0, #0
+ b 1b
+
+ .bss
+hexbuf: .space 16
+
+#endif
+
+ .text
+ .align 13
+ENTRY(this_must_match_init_task)
diff --git a/arch/arm/kernel/iic.c b/arch/arm/kernel/iic.c
new file mode 100644
index 000000000..10a25e01b
--- /dev/null
+++ b/arch/arm/kernel/iic.c
@@ -0,0 +1,160 @@
+/*
+ * linux/arch/arm/kernel/iic.c
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ *
+ * IIC is used to get the current time from the CMOS rtc.
+ */
+
+#include <asm/system.h>
+#include <asm/delay.h>
+#include <asm/io.h>
+#include <asm/hardware.h>
+
+/*
+ * if delay loop has been calibrated then us that,
+ * else use IOC timer 1.
+ */
+static void iic_delay (void)
+{
+ extern unsigned long loops_per_sec;
+ if (loops_per_sec != (1 << 12)) {
+ udelay(10);
+ return;
+ } else {
+ unsigned long flags;
+ save_flags_cli(flags);
+
+ outb(254, IOC_T1LTCHL);
+ outb(255, IOC_T1LTCHH);
+ outb(0, IOC_T1GO);
+ outb(1<<6, IOC_IRQCLRA); /* clear T1 irq */
+ outb(4, IOC_T1LTCHL);
+ outb(0, IOC_T1LTCHH);
+ outb(0, IOC_T1GO);
+ while ((inb(IOC_IRQSTATA) & (1<<6)) == 0);
+ restore_flags(flags);
+ }
+}
+
+static inline void iic_start (void)
+{
+ unsigned char out;
+
+ out = inb(IOC_CONTROL) | 0xc2;
+
+ outb(out, IOC_CONTROL);
+ iic_delay();
+
+ outb(out ^ 1, IOC_CONTROL);
+ iic_delay();
+}
+
+static inline void iic_stop (void)
+{
+ unsigned char out;
+
+ out = inb(IOC_CONTROL) | 0xc3;
+
+ iic_delay();
+ outb(out ^ 1, IOC_CONTROL);
+
+ iic_delay();
+ outb(out, IOC_CONTROL);
+}
+
+static int iic_sendbyte (unsigned char b)
+{
+ unsigned char out, in;
+ int i;
+
+ out = (inb(IOC_CONTROL) & 0xfc) | 0xc0;
+
+ outb(out, IOC_CONTROL);
+ for (i = 7; i >= 0; i--) {
+ unsigned char c;
+ c = out | ((b & (1 << i)) ? 1 : 0);
+
+ outb(c, IOC_CONTROL);
+ iic_delay();
+
+ outb(c | 2, IOC_CONTROL);
+ iic_delay();
+
+ outb(c, IOC_CONTROL);
+ }
+ outb(out | 1, IOC_CONTROL);
+ iic_delay();
+
+ outb(out | 3, IOC_CONTROL);
+ iic_delay();
+
+ in = inb(IOC_CONTROL) & 1;
+
+ outb(out | 1, IOC_CONTROL);
+ iic_delay();
+
+ outb(out, IOC_CONTROL);
+ iic_delay();
+
+ if(in) {
+ printk("No acknowledge from RTC\n");
+ return 1;
+ } else
+ return 0;
+}
+
+static unsigned char iic_recvbyte (void)
+{
+ unsigned char out, in;
+ int i;
+
+ out = (inb(IOC_CONTROL) & 0xfc) | 0xc0;
+
+ outb(out, IOC_CONTROL);
+ in = 0;
+ for (i = 7; i >= 0; i--) {
+ outb(out | 1, IOC_CONTROL);
+ iic_delay();
+ outb(out | 3, IOC_CONTROL);
+ iic_delay();
+ in = (in << 1) | (inb(IOC_CONTROL) & 1);
+ outb(out | 1, IOC_CONTROL);
+ iic_delay();
+ }
+ outb(out, IOC_CONTROL);
+ iic_delay();
+ outb(out | 2, IOC_CONTROL);
+ iic_delay();
+
+ return in;
+}
+
+void iic_control (unsigned char addr, unsigned char loc, unsigned char *buf, int len)
+{
+ iic_start();
+
+ if (iic_sendbyte(addr & 0xfe))
+ goto error;
+
+ if (iic_sendbyte(loc))
+ goto error;
+
+ if (addr & 1) {
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (iic_sendbyte (buf[i]))
+ break;
+ } else {
+ int i;
+
+ iic_stop();
+ iic_start();
+ iic_sendbyte(addr|1);
+ for (i = 0; i < len; i++)
+ buf[i] = iic_recvbyte ();
+ }
+error:
+ iic_stop();
+}
diff --git a/arch/arm/kernel/init_task.c b/arch/arm/kernel/init_task.c
new file mode 100644
index 000000000..acc206942
--- /dev/null
+++ b/arch/arm/kernel/init_task.c
@@ -0,0 +1,23 @@
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+static struct vm_area_struct init_mmap = INIT_MMAP;
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS;
+struct mm_struct init_mm = INIT_MM;
+
+/*
+ * Initial task structure.
+ *
+ * We need to make sure that this is 8192-byte aligned due to the
+ * way process stacks are handled. This is done by making sure
+ * the linker maps this in the .text segment right after head.S,
+ * and making head.S ensure the proper alignment.
+ *
+ * The things we do for performance..
+ */
+union task_union init_task_union __attribute__((__section__(".text"))) = { INIT_TASK };
diff --git a/arch/arm/kernel/ioport.c b/arch/arm/kernel/ioport.c
new file mode 100644
index 000000000..defa74335
--- /dev/null
+++ b/arch/arm/kernel/ioport.c
@@ -0,0 +1,98 @@
+/*
+ * linux/arch/arm/kernel/ioport.c
+ *
+ * This contains the io-permission bitmap code - written by obz, with changes
+ * by Linus.
+ *
+ * Modifications for ARM processor Copyright (C) 1995, 1996 Russell King
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
+asmlinkage void set_bitmap(unsigned long *bitmap, short base, short extent, int new_value)
+{
+ int mask;
+ unsigned long *bitmap_base = bitmap + (base >> 5);
+ unsigned short low_index = base & 0x1f;
+ int length = low_index + extent;
+
+ if (low_index != 0) {
+ mask = (~0 << low_index);
+ if (length < 32)
+ mask &= ~(~0 << length);
+ if (new_value)
+ *bitmap_base++ |= mask;
+ else
+ *bitmap_base++ &= ~mask;
+ length -= 32;
+ }
+
+ mask = (new_value ? ~0 : 0);
+ while (length >= 32) {
+ *bitmap_base++ = mask;
+ length -= 32;
+ }
+
+ if (length > 0) {
+ mask = ~(~0 << length);
+ if (new_value)
+ *bitmap_base++ |= mask;
+ else
+ *bitmap_base++ &= ~mask;
+ }
+}
+
+/*
+ * this changes the io permissions bitmap in the current task.
+ */
+asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+{
+ if (from + num <= from)
+ return -EINVAL;
+#ifndef __arm__
+ if (from + num > IO_BITMAP_SIZE*32)
+ return -EINVAL;
+#endif
+ if (!suser())
+ return -EPERM;
+
+#ifdef IODEBUG
+ printk("io: from=%d num=%d %s\n", from, num, (turn_on ? "on" : "off"));
+#endif
+#ifndef __arm__
+ set_bitmap((unsigned long *)current->tss.io_bitmap, from, num, !turn_on);
+#endif
+ return 0;
+}
+
+unsigned int *stack;
+
+/*
+ * sys_iopl has to be used when you want to access the IO ports
+ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
+ * you'd need 8kB of bitmaps/process, which is a bit excessive.
+ *
+ * Here we just change the eflags value on the stack: we allow
+ * only the super-user to do it. This depends on the stack-layout
+ * on system-call entry - see also fork() and the signal handling
+ * code.
+ */
+asmlinkage int sys_iopl(long ebx,long ecx,long edx,
+ long esi, long edi, long ebp, long eax, long ds,
+ long es, long fs, long gs, long orig_eax,
+ long eip,long cs,long eflags,long esp,long ss)
+{
+ unsigned int level = ebx;
+
+ if (level > 3)
+ return -EINVAL;
+ if (!suser())
+ return -EPERM;
+ *(&eflags) = (eflags & 0xffffcfff) | (level << 12);
+ return 0;
+}
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
new file mode 100644
index 000000000..e0fb7540a
--- /dev/null
+++ b/arch/arm/kernel/irq.c
@@ -0,0 +1,327 @@
+/*
+ * linux/arch/arm/kernel/irq.c
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ * Modifications for ARM processor Copyright (C) 1995, 1996 Russell King.
+ *
+ * This file contains the code used by various IRQ handling routines:
+ * asking for different IRQ's should be done through these routines
+ * instead of just grabbing them. Thus setups with different IRQ numbers
+ * shouldn't result in any weird surprises, and installing new handlers
+ * should be easier.
+ */
+
+/*
+ * IRQ's are in fact implemented a bit like signal handlers for the kernel.
+ * Naturally it's not a 1:1 relation, but there are similarities.
+ */
+#include <linux/config.h> /* for CONFIG_DEBUG_ERRORS */
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/malloc.h>
+#include <linux/random.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/hardware.h>
+#include <asm/irq-no.h>
+#include <asm/arch/irq.h>
+
+unsigned int local_irq_count[NR_CPUS];
+#ifdef __SMP__
+atomic_t __arm_bh_counter;
+#else
+int __arm_bh_counter;
+#endif
+
+spinlock_t irq_controller_lock;
+
+#ifndef SMP
+#define irq_enter(cpu, irq) (++local_irq_count[cpu])
+#define irq_exit(cpu, irq) (--local_irq_count[cpu])
+#else
+#error SMP not supported
+#endif
+
+void disable_irq(unsigned int irq_nr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&irq_controller_lock, flags);
+#ifdef cliIF
+ save_flags(flags);
+ cliIF();
+#endif
+ mask_irq(irq_nr);
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+
+void enable_irq(unsigned int irq_nr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&irq_controller_lock, flags);
+#ifdef cliIF
+ save_flags (flags);
+ cliIF();
+#endif
+ unmask_irq(irq_nr);
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+
+struct irqaction *irq_action[NR_IRQS];
+
+/*
+ * Bitmask indicating valid interrupt numbers
+ */
+unsigned long validirqs[NR_IRQS / 32] = {
+ 0x003fffff, 0x000001ff, 0x000000ff, 0x00000000
+};
+
+int get_irq_list(char *buf)
+{
+ int i;
+ struct irqaction * action;
+ char *p = buf;
+
+ for (i = 0 ; i < NR_IRQS ; i++) {
+ action = irq_action[i];
+ if (!action)
+ continue;
+ p += sprintf(p, "%3d: %10u %s",
+ i, kstat.interrupts[i], action->name);
+ for (action = action->next; action; action = action->next) {
+ p += sprintf(p, ", %s", action->name);
+ }
+ *p++ = '\n';
+ }
+ return p - buf;
+}
+
+/*
+ * do_IRQ handles all normal device IRQ's
+ */
+asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
+{
+ struct irqaction * action;
+ int status, cpu;
+
+#if defined(HAS_IOMD) || defined(HAS_IOC)
+ if (irq != IRQ_EXPANSIONCARD)
+#endif
+ {
+ spin_lock(&irq_controller_lock);
+ mask_and_ack_irq(irq);
+ spin_unlock(&irq_controller_lock);
+ }
+
+ cpu = smp_processor_id();
+ irq_enter(cpu, irq);
+ kstat.interrupts[irq]++;
+
+ /* Return with this interrupt masked if no action */
+ status = 0;
+ action = *(irq + irq_action);
+ if (action) {
+ if (!(action->flags & SA_INTERRUPT))
+ __sti();
+
+ do {
+ status |= action->flags;
+ action->handler(irq, action->dev_id, regs);
+ action = action->next;
+ } while (action);
+ if (status & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+ __cli();
+#if defined(HAS_IOMD) || defined(HAS_IOC)
+ if (irq != IRQ_KEYBOARDTX && irq != IRQ_EXPANSIONCARD)
+#endif
+ {
+ spin_lock(&irq_controller_lock);
+ unmask_irq(irq);
+ spin_unlock(&irq_controller_lock);
+ }
+ }
+
+ irq_exit(cpu, irq);
+ /*
+ * This should be conditional: we should really get
+ * a return code from the irq handler to tell us
+ * whether the handler wants us to do software bottom
+ * half handling or not..
+ *
+ * ** IMPORTANT NOTE: do_bottom_half() ENABLES IRQS!!! **
+ * ** WE MUST DISABLE THEM AGAIN, ELSE IDE DISKS GO **
+ * ** AWOL **
+ */
+ if (1) {
+ if (bh_active & bh_mask)
+ do_bottom_half();
+ __cli();
+ }
+}
+
+#if defined(HAS_IOMD) || defined(HAS_IOC)
+void do_ecard_IRQ(int irq, struct pt_regs *regs)
+{
+ struct irqaction * action;
+
+ action = *(irq + irq_action);
+ if (action) {
+ do {
+ action->handler(irq, action->dev_id, regs);
+ action = action->next;
+ } while (action);
+ } else {
+ spin_lock(&irq_controller_lock);
+ mask_irq (irq);
+ spin_unlock(&irq_controller_lock);
+ }
+}
+#endif
+
+int setup_arm_irq(int irq, struct irqaction * new)
+{
+ int shared = 0;
+ struct irqaction *old, **p;
+ unsigned long flags;
+
+ p = irq_action + irq;
+ if ((old = *p) != NULL) {
+ /* Can't share interrupts unless both agree to */
+ if (!(old->flags & new->flags & SA_SHIRQ))
+ return -EBUSY;
+
+ /* add new interrupt at end of irq queue */
+ do {
+ p = &old->next;
+ old = *p;
+ } while (old);
+ shared = 1;
+ }
+
+ if (new->flags & SA_SAMPLE_RANDOM)
+ rand_initialize_irq(irq);
+
+ save_flags_cli(flags);
+ *p = new;
+
+ if (!shared) {
+ spin_lock(&irq_controller_lock);
+ unmask_irq(irq);
+ spin_unlock(&irq_controller_lock);
+ }
+ restore_flags(flags);
+ return 0;
+}
+
+/*
+ * Using "struct sigaction" is slightly silly, but there
+ * are historical reasons and it works well, so..
+ */
+int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
+ unsigned long irq_flags, const char * devname, void *dev_id)
+{
+ unsigned long retval;
+ struct irqaction *action;
+
+ if (irq >= NR_IRQS || !(validirqs[irq >> 5] & (1 << (irq & 31))))
+ return -EINVAL;
+ if (!handler)
+ return -EINVAL;
+
+ action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
+ if (!action)
+ return -ENOMEM;
+
+ action->handler = handler;
+ action->flags = irq_flags;
+ action->mask = 0;
+ action->name = devname;
+ action->next = NULL;
+ action->dev_id = dev_id;
+
+ retval = setup_arm_irq(irq, action);
+
+ if (retval)
+ kfree(action);
+ return retval;
+}
+
+void free_irq(unsigned int irq, void *dev_id)
+{
+ struct irqaction * action, **p;
+ unsigned long flags;
+
+ if (irq >= NR_IRQS || !(validirqs[irq >> 5] & (1 << (irq & 31)))) {
+ printk(KERN_ERR "Trying to free IRQ%d\n",irq);
+#ifdef CONFIG_DEBUG_ERRORS
+ __backtrace();
+#endif
+ return;
+ }
+ for (p = irq + irq_action; (action = *p) != NULL; p = &action->next) {
+ if (action->dev_id != dev_id)
+ continue;
+
+ /* Found it - now free it */
+ save_flags_cli (flags);
+ *p = action->next;
+ restore_flags (flags);
+ kfree(action);
+ return;
+ }
+ printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
+#ifdef CONFIG_DEBUG_ERRORS
+ __backtrace();
+#endif
+}
+
+unsigned long probe_irq_on (void)
+{
+ unsigned int i, irqs = 0;
+ unsigned long delay;
+
+ /* first snaffle up any unassigned irqs */
+ for (i = 15; i > 0; i--) {
+ if (!irq_action[i]) {
+ enable_irq(i);
+ irqs |= 1 << i;
+ }
+ }
+
+ /* wait for spurious interrupts to mask themselves out again */
+ for (delay = jiffies + HZ/10; delay > jiffies; )
+ /* min 100ms delay */;
+
+ /* now filter out any obviously spurious interrupts */
+ return irqs & get_enabled_irqs();
+}
+
+int probe_irq_off (unsigned long irqs)
+{
+ unsigned int i;
+
+ irqs &= ~get_enabled_irqs();
+ if (!irqs)
+ return 0;
+ i = ffz (~irqs);
+ if (irqs != (irqs & (1 << i)))
+ i = -i;
+ return i;
+}
+
+__initfunc(void init_IRQ(void))
+{
+ irq_init_irq();
+}
diff --git a/arch/arm/kernel/oldlatches.c b/arch/arm/kernel/oldlatches.c
new file mode 100644
index 000000000..c4674cd35
--- /dev/null
+++ b/arch/arm/kernel/oldlatches.c
@@ -0,0 +1,53 @@
+/* Support for the latches on the old Archimedes which control the floppy,
+ * hard disc and printer
+ *
+ * (c) David Alan Gilbert 1995/1996
+ */
+#include <linux/kernel.h>
+
+#include <asm/io.h>
+#include <asm/hardware.h>
+
+#ifdef LATCHAADDR
+/*
+ * They are static so that everyone who accesses them has to go through here
+ */
+static unsigned char LatchACopy;
+
+/* newval=(oldval & ~mask)|newdata */
+void oldlatch_aupdate(unsigned char mask,unsigned char newdata)
+{
+ LatchACopy=(LatchACopy & ~mask)|newdata;
+ outb(LatchACopy, LATCHAADDR);
+#ifdef DEBUG
+ printk("oldlatch_A:0x%2x\n",LatchACopy);
+#endif
+
+}
+#endif
+
+#ifdef LATCHBADDR
+static unsigned char LatchBCopy;
+
+/* newval=(oldval & ~mask)|newdata */
+void oldlatch_bupdate(unsigned char mask,unsigned char newdata)
+{
+ LatchBCopy=(LatchBCopy & ~mask)|newdata;
+ outb(LatchBCopy, LATCHBADDR);
+#ifdef DEBUG
+ printk("oldlatch_B:0x%2x\n",LatchBCopy);
+#endif
+}
+#endif
+
+void oldlatch_init(void)
+{
+ printk("oldlatch: init\n");
+#ifdef LATCHAADDR
+ oldlatch_aupdate(0xff,0xff);
+#endif
+#ifdef LATCHBADDR
+ oldlatch_bupdate(0xff,0x8); /* Thats no FDC reset...*/
+#endif
+ return ;
+}
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
new file mode 100644
index 000000000..7f45e7c3c
--- /dev/null
+++ b/arch/arm/kernel/process.c
@@ -0,0 +1,239 @@
+/*
+ * linux/arch/arm/kernel/process.c
+ *
+ * Copyright (C) 1996 Russell King - Converted to ARM.
+ * Origional Copyright (C) 1995 Linus Torvalds
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#define __KERNEL_SYSCALLS__
+#include <stdarg.h>
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/malloc.h>
+#include <linux/vmalloc.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/interrupt.h>
+#include <linux/config.h>
+#include <linux/unistd.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/io.h>
+
+extern void fpe_save(struct fp_soft_struct *);
+extern char *processor_modes[];
+
+asmlinkage void ret_from_sys_call(void) __asm__("ret_from_sys_call");
+
+static int hlt_counter=0;
+
+void disable_hlt(void)
+{
+ hlt_counter++;
+}
+
+void enable_hlt(void)
+{
+ hlt_counter--;
+}
+
+/*
+ * The idle loop on an arm..
+ */
+asmlinkage int sys_idle(void)
+{
+ int ret = -EPERM;
+
+ lock_kernel();
+ if (current->pid != 0)
+ goto out;
+ /* endless idle loop with no priority at all */
+ current->priority = -100;
+ for (;;)
+ {
+ if (!hlt_counter && !need_resched)
+ proc_idle ();
+ run_task_queue(&tq_scheduler);
+ schedule();
+ }
+ ret = 0;
+out:
+ unlock_kernel();
+ return ret;
+}
+
+__initfunc(void reboot_setup(char *str, int *ints))
+{
+}
+
+/*
+ * This routine reboots the machine by resetting the expansion cards via
+ * their loaders, turning off the processor cache (if ARM3), copying the
+ * first instruction of the ROM to 0, and executing it there.
+ */
+void machine_restart(char * __unused)
+{
+ proc_hard_reset ();
+ arch_hard_reset ();
+}
+
+void machine_halt(void)
+{
+}
+
+void machine_power_off(void)
+{
+}
+
+
+void show_regs(struct pt_regs * regs)
+{
+ unsigned long flags;
+
+ flags = condition_codes(regs);
+
+ printk("\n"
+ "pc : [<%08lx>]\n"
+ "lr : [<%08lx>]\n"
+ "sp : %08lx ip : %08lx fp : %08lx\n",
+ instruction_pointer(regs),
+ regs->ARM_lr, regs->ARM_sp,
+ regs->ARM_ip, regs->ARM_fp);
+ printk( "r10: %08lx r9 : %08lx r8 : %08lx\n",
+ regs->ARM_r10, regs->ARM_r9,
+ regs->ARM_r8);
+ printk( "r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
+ regs->ARM_r7, regs->ARM_r6,
+ regs->ARM_r5, regs->ARM_r4);
+ printk( "r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
+ regs->ARM_r3, regs->ARM_r2,
+ regs->ARM_r1, regs->ARM_r0);
+ printk("Flags: %c%c%c%c",
+ flags & CC_N_BIT ? 'N' : 'n',
+ flags & CC_Z_BIT ? 'Z' : 'z',
+ flags & CC_C_BIT ? 'C' : 'c',
+ flags & CC_V_BIT ? 'V' : 'v');
+ printk(" IRQs %s FIQs %s Mode %s\n",
+ interrupts_enabled(regs) ? "on" : "off",
+ fast_interrupts_enabled(regs) ? "on" : "off",
+ processor_modes[processor_mode(regs)]);
+#if defined(CONFIG_CPU_ARM6) || defined(CONFIG_CPU_SA110)
+{ int ctrl, transbase, dac;
+ __asm__ (
+" mrc p15, 0, %0, c1, c0\n"
+" mrc p15, 0, %1, c2, c0\n"
+" mrc p15, 0, %2, c3, c0\n"
+ : "=r" (ctrl), "=r" (transbase), "=r" (dac));
+ printk("Control: %04X Table: %08X DAC: %08X",
+ ctrl, transbase, dac);
+ }
+#endif
+ printk ("Segment %s\n", get_fs() == get_ds() ? "kernel" : "user");
+}
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+ if (last_task_used_math == current)
+ last_task_used_math = NULL;
+}
+
+void flush_thread(void)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ current->debugreg[i] = 0;
+ if (last_task_used_math == current)
+ last_task_used_math = NULL;
+ current->used_math = 0;
+ current->flags &= ~PF_USEDFPU;
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+}
+
+int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
+ struct task_struct * p, struct pt_regs * regs)
+{
+ struct pt_regs * childregs;
+ struct context_save_struct * save;
+
+ childregs = ((struct pt_regs *)((unsigned long)p + 8192)) - 1;
+ *childregs = *regs;
+ childregs->ARM_r0 = 0;
+
+ save = ((struct context_save_struct *)(childregs)) - 1;
+ copy_thread_css (save);
+ p->tss.save = save;
+ /*
+ * Save current math state in p->tss.fpe_save if not already there.
+ */
+ if (last_task_used_math == current)
+ fpe_save (&p->tss.fpstate.soft);
+
+ return 0;
+}
+
+/*
+ * fill in the fpe structure for a core dump...
+ */
+int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
+{
+ int fpvalid = 0;
+
+ if (current->used_math) {
+ if (last_task_used_math == current)
+ fpe_save (&current->tss.fpstate.soft);
+
+ memcpy (fp, &current->tss.fpstate.soft, sizeof (fp));
+ }
+
+ return fpvalid;
+}
+
+/*
+ * fill in the user structure for a core dump..
+ */
+void dump_thread(struct pt_regs * regs, struct user * dump)
+{
+ int i;
+
+ dump->magic = CMAGIC;
+ dump->start_code = current->mm->start_code;
+ dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
+
+ dump->u_tsize = (current->mm->end_code - current->mm->start_code) >> PAGE_SHIFT;
+ dump->u_dsize = (current->mm->brk - current->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ dump->u_ssize = 0;
+
+ for (i = 0; i < 8; i++)
+ dump->u_debugreg[i] = current->debugreg[i];
+
+ if (dump->start_stack < 0x04000000)
+ dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
+
+ dump->regs = *regs;
+ dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
+}
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
new file mode 100644
index 000000000..5fa67df6c
--- /dev/null
+++ b/arch/arm/kernel/ptrace.c
@@ -0,0 +1,745 @@
+/* ptrace.c */
+/* By Ross Biro 1/23/92 */
+/* edited by Linus Torvalds */
+/* edited for ARM by Russell King */
+
+#include <linux/head.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+/*
+ * Breakpoint SWI instruction: SWI &9F0001
+ */
+#define BREAKINST 0xef9f0001
+
+/* change a pid into a task struct. */
+static inline struct task_struct * get_task(int pid)
+{
+ int i;
+
+ for (i = 1; i < NR_TASKS; i++) {
+ if (task[i] != NULL && (task[i]->pid == pid))
+ return task[i];
+ }
+ return NULL;
+}
+
+/*
+ * this routine will get a word off of the processes privileged stack.
+ * the offset is how far from the base addr as stored in the TSS.
+ * this routine assumes that all the privileged stacks are in our
+ * data space.
+ */
+static inline long get_stack_long(struct task_struct *task, int offset)
+{
+ unsigned char *stack;
+
+ stack = (unsigned char *)((unsigned long)task + 8192 - sizeof(struct pt_regs));
+ stack += offset << 2;
+ return *(unsigned long *)stack;
+}
+
+/*
+ * this routine will put a word on the processes privileged stack.
+ * the offset is how far from the base addr as stored in the TSS.
+ * this routine assumes that all the privileged stacks are in our
+ * data space.
+ */
+static inline long put_stack_long(struct task_struct *task, int offset,
+ unsigned long data)
+{
+ unsigned char *stack;
+
+ stack = (unsigned char *)((unsigned long)task + 8192 - sizeof(struct pt_regs));
+ stack += offset << 2;
+ *(unsigned long *) stack = data;
+ return 0;
+}
+
+/*
+ * This routine gets a long from any process space by following the page
+ * tables. NOTE! You should check that the long isn't on a page boundary,
+ * and that it is in the task area before calling this: this routine does
+ * no checking.
+ */
+static unsigned long get_long(struct task_struct * tsk,
+ struct vm_area_struct * vma, unsigned long addr)
+{
+ pgd_t *pgdir;
+ pmd_t *pgmiddle;
+ pte_t *pgtable;
+ unsigned long page;
+
+repeat:
+ pgdir = pgd_offset(vma->vm_mm, addr);
+ if (pgd_none(*pgdir)) {
+ handle_mm_fault(tsk, vma, addr, 0);
+ goto repeat;
+ }
+ if (pgd_bad(*pgdir)) {
+ printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir));
+ pgd_clear(pgdir);
+ return 0;
+ }
+ pgmiddle = pmd_offset(pgdir, addr);
+ if (pmd_none(*pgmiddle)) {
+ handle_mm_fault(tsk, vma, addr, 0);
+ goto repeat;
+ }
+ if (pmd_bad(*pgmiddle)) {
+ printk("ptrace: bad page middle %08lx\n", pmd_val(*pgmiddle));
+ pmd_clear(pgmiddle);
+ return 0;
+ }
+ pgtable = pte_offset(pgmiddle, addr);
+ if (!pte_present(*pgtable)) {
+ handle_mm_fault(tsk, vma, addr, 0);
+ goto repeat;
+ }
+ page = pte_page(*pgtable);
+
+ if(MAP_NR(page) >= max_mapnr)
+ return 0;
+ page += addr & ~PAGE_MASK;
+ return *(unsigned long *)page;
+}
+
+/*
+ * This routine puts a long into any process space by following the page
+ * tables. NOTE! You should check that the long isn't on a page boundary,
+ * and that it is in the task area before calling this: this routine does
+ * no checking.
+ *
+ * Now keeps R/W state of the page so that a text page stays readonly
+ * even if a debugger scribbles breakpoints into it. -M.U-
+ */
+static void put_long(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long addr,
+ unsigned long data)
+{
+ pgd_t *pgdir;
+ pmd_t *pgmiddle;
+ pte_t *pgtable;
+ unsigned long page;
+
+repeat:
+ pgdir = pgd_offset(vma->vm_mm, addr);
+ if (!pgd_present(*pgdir)) {
+ handle_mm_fault(tsk, vma, addr, 1);
+ goto repeat;
+ }
+ if (pgd_bad(*pgdir)) {
+ printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir));
+ pgd_clear(pgdir);
+ return;
+ }
+ pgmiddle = pmd_offset(pgdir, addr);
+ if (pmd_none(*pgmiddle)) {
+ handle_mm_fault(tsk, vma, addr, 1);
+ goto repeat;
+ }
+ if (pmd_bad(*pgmiddle)) {
+ printk("ptrace: bad page middle %08lx\n", pmd_val(*pgmiddle));
+ pmd_clear(pgmiddle);
+ return;
+ }
+ pgtable = pte_offset(pgmiddle, addr);
+ if (!pte_present(*pgtable)) {
+ handle_mm_fault(tsk, vma, addr, 1);
+ goto repeat;
+ }
+ page = pte_page(*pgtable);
+ if (!pte_write(*pgtable)) {
+ handle_mm_fault(tsk, vma, addr, 1);
+ goto repeat;
+ }
+
+ if (MAP_NR(page) < max_mapnr) {
+ page += addr & ~PAGE_MASK;
+ *(unsigned long *)page = data;
+ __flush_entry_to_ram(page);
+ }
+ set_pte(pgtable, pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
+ flush_tlb();
+}
+
+static struct vm_area_struct * find_extend_vma(struct task_struct * tsk, unsigned long addr)
+{
+ struct vm_area_struct * vma;
+
+ addr &= PAGE_MASK;
+ vma = find_vma(tsk->mm,addr);
+ if (!vma)
+ return NULL;
+ if (vma->vm_start <= addr)
+ return vma;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ return NULL;
+ if (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur)
+ return NULL;
+ vma->vm_offset -= vma->vm_start - addr;
+ vma->vm_start = addr;
+ return vma;
+}
+
+/*
+ * This routine checks the page boundaries, and that the offset is
+ * within the task area. It then calls get_long() to read a long.
+ */
+static int read_long(struct task_struct * tsk, unsigned long addr,
+ unsigned long * result)
+{
+ struct vm_area_struct * vma = find_extend_vma(tsk, addr);
+
+ if (!vma)
+ return -EIO;
+ if ((addr & ~PAGE_MASK) > PAGE_SIZE-sizeof(long)) {
+ unsigned long low,high;
+ struct vm_area_struct * vma_high = vma;
+
+ if (addr + sizeof(long) >= vma->vm_end) {
+ vma_high = vma->vm_next;
+ if (!vma_high || vma_high->vm_start != vma->vm_end)
+ return -EIO;
+ }
+ low = get_long(tsk, vma, addr & ~(sizeof(long)-1));
+ high = get_long(tsk, vma_high, (addr+sizeof(long)) & ~(sizeof(long)-1));
+ switch (addr & (sizeof(long)-1)) {
+ case 1:
+ low >>= 8;
+ low |= high << 24;
+ break;
+ case 2:
+ low >>= 16;
+ low |= high << 16;
+ break;
+ case 3:
+ low >>= 24;
+ low |= high << 8;
+ break;
+ }
+ *result = low;
+ } else
+ *result = get_long(tsk, vma, addr);
+ return 0;
+}
+
+/*
+ * This routine checks the page boundaries, and that the offset is
+ * within the task area. It then calls put_long() to write a long.
+ */
+static int write_long(struct task_struct * tsk, unsigned long addr,
+ unsigned long data)
+{
+ struct vm_area_struct * vma = find_extend_vma(tsk, addr);
+
+ if (!vma)
+ return -EIO;
+ if ((addr & ~PAGE_MASK) > PAGE_SIZE-sizeof(long)) {
+ unsigned long low,high;
+ struct vm_area_struct * vma_high = vma;
+
+ if (addr + sizeof(long) >= vma->vm_end) {
+ vma_high = vma->vm_next;
+ if (!vma_high || vma_high->vm_start != vma->vm_end)
+ return -EIO;
+ }
+ low = get_long(tsk, vma, addr & ~(sizeof(long)-1));
+ high = get_long(tsk, vma_high, (addr+sizeof(long)) & ~(sizeof(long)-1));
+ switch (addr & (sizeof(long)-1)) {
+ case 0: /* shouldn't happen, but safety first */
+ low = data;
+ break;
+ case 1:
+ low &= 0x000000ff;
+ low |= data << 8;
+ high &= ~0xff;
+ high |= data >> 24;
+ break;
+ case 2:
+ low &= 0x0000ffff;
+ low |= data << 16;
+ high &= ~0xffff;
+ high |= data >> 16;
+ break;
+ case 3:
+ low &= 0x00ffffff;
+ low |= data << 24;
+ high &= ~0xffffff;
+ high |= data >> 8;
+ break;
+ }
+ put_long(tsk, vma, addr & ~(sizeof(long)-1),low);
+ put_long(tsk, vma_high, (addr+sizeof(long)) & ~(sizeof(long)-1),high);
+ } else
+ put_long(tsk, vma, addr, data);
+ return 0;
+}
+
+/*
+ * Get value of register `rn' (in the instruction)
+ */
+static unsigned long ptrace_getrn (struct task_struct *child, unsigned long insn)
+{
+ unsigned int reg = (insn >> 16) & 15;
+ unsigned long val;
+
+ if (reg == 15)
+ val = pc_pointer (get_stack_long (child, reg));
+ else
+ val = get_stack_long (child, reg);
+
+printk ("r%02d=%08lX ", reg, val);
+ return val;
+}
+
+/*
+ * Get value of operand 2 (in an ALU instruction)
+ */
+static unsigned long ptrace_getaluop2 (struct task_struct *child, unsigned long insn)
+{
+ unsigned long val;
+ int shift;
+ int type;
+
+printk ("op2=");
+ if (insn & 1 << 25) {
+ val = insn & 255;
+ shift = (insn >> 8) & 15;
+ type = 3;
+printk ("(imm)");
+ } else {
+ val = get_stack_long (child, insn & 15);
+
+ if (insn & (1 << 4))
+ shift = (int)get_stack_long (child, (insn >> 8) & 15);
+ else
+ shift = (insn >> 7) & 31;
+
+ type = (insn >> 5) & 3;
+printk ("(r%02ld)", insn & 15);
+ }
+printk ("sh%dx%d", type, shift);
+ switch (type) {
+ case 0: val <<= shift; break;
+ case 1: val >>= shift; break;
+ case 2:
+ val = (((signed long)val) >> shift);
+ break;
+ case 3:
+ __asm__ __volatile__("mov %0, %0, ror %1" : "=r" (val) : "0" (val), "r" (shift));
+ break;
+ }
+printk ("=%08lX ", val);
+ return val;
+}
+
+/*
+ * Get value of operand 2 (in a LDR instruction)
+ */
+static unsigned long ptrace_getldrop2 (struct task_struct *child, unsigned long insn)
+{
+ unsigned long val;
+ int shift;
+ int type;
+
+ val = get_stack_long (child, insn & 15);
+ shift = (insn >> 7) & 31;
+ type = (insn >> 5) & 3;
+
+printk ("op2=r%02ldsh%dx%d", insn & 15, shift, type);
+ switch (type) {
+ case 0: val <<= shift; break;
+ case 1: val >>= shift; break;
+ case 2:
+ val = (((signed long)val) >> shift);
+ break;
+ case 3:
+ __asm__ __volatile__("mov %0, %0, ror %1" : "=r" (val) : "0" (val), "r" (shift));
+ break;
+ }
+printk ("=%08lX ", val);
+ return val;
+}
+#undef pc_pointer
+#define pc_pointer(x) ((x) & 0x03fffffc)
+int ptrace_set_bpt (struct task_struct *child)
+{
+ unsigned long insn, pc, alt;
+ int i, nsaved = 0, res;
+
+ pc = pc_pointer (get_stack_long (child, 15/*REG_PC*/));
+
+ res = read_long (child, pc, &insn);
+ if (res < 0)
+ return res;
+
+ child->debugreg[nsaved++] = alt = pc + 4;
+printk ("ptrace_set_bpt: insn=%08lX pc=%08lX ", insn, pc);
+ switch (insn & 0x0e100000) {
+ case 0x00000000:
+ case 0x00100000:
+ case 0x02000000:
+ case 0x02100000: /* data processing */
+ printk ("data ");
+ switch (insn & 0x01e0f000) {
+ case 0x0000f000:
+ alt = ptrace_getrn(child, insn) & ptrace_getaluop2(child, insn);
+ break;
+ case 0x0020f000:
+ alt = ptrace_getrn(child, insn) ^ ptrace_getaluop2(child, insn);
+ break;
+ case 0x0040f000:
+ alt = ptrace_getrn(child, insn) - ptrace_getaluop2(child, insn);
+ break;
+ case 0x0060f000:
+ alt = ptrace_getaluop2(child, insn) - ptrace_getrn(child, insn);
+ break;
+ case 0x0080f000:
+ alt = ptrace_getrn(child, insn) + ptrace_getaluop2(child, insn);
+ break;
+ case 0x00a0f000:
+ alt = ptrace_getrn(child, insn) + ptrace_getaluop2(child, insn) +
+ (get_stack_long (child, 16/*REG_PSR*/) & CC_C_BIT ? 1 : 0);
+ break;
+ case 0x00c0f000:
+ alt = ptrace_getrn(child, insn) - ptrace_getaluop2(child, insn) +
+ (get_stack_long (child, 16/*REG_PSR*/) & CC_C_BIT ? 1 : 0);
+ break;
+ case 0x00e0f000:
+ alt = ptrace_getaluop2(child, insn) - ptrace_getrn(child, insn) +
+ (get_stack_long (child, 16/*REG_PSR*/) & CC_C_BIT ? 1 : 0);
+ break;
+ case 0x0180f000:
+ alt = ptrace_getrn(child, insn) | ptrace_getaluop2(child, insn);
+ break;
+ case 0x01a0f000:
+ alt = ptrace_getaluop2(child, insn);
+ break;
+ case 0x01c0f000:
+ alt = ptrace_getrn(child, insn) & ~ptrace_getaluop2(child, insn);
+ break;
+ case 0x01e0f000:
+ alt = ~ptrace_getaluop2(child, insn);
+ break;
+ }
+ break;
+
+ case 0x04100000: /* ldr */
+ if ((insn & 0xf000) == 0xf000) {
+printk ("ldr ");
+ alt = ptrace_getrn(child, insn);
+ if (insn & 1 << 24) {
+ if (insn & 1 << 23)
+ alt += ptrace_getldrop2 (child, insn);
+ else
+ alt -= ptrace_getldrop2 (child, insn);
+ }
+ if (read_long (child, alt, &alt) < 0)
+ alt = pc + 4; /* not valid */
+ else
+ alt = pc_pointer (alt);
+ }
+ break;
+
+ case 0x06100000: /* ldr imm */
+ if ((insn & 0xf000) == 0xf000) {
+printk ("ldrimm ");
+ alt = ptrace_getrn(child, insn);
+ if (insn & 1 << 24) {
+ if (insn & 1 << 23)
+ alt += insn & 0xfff;
+ else
+ alt -= insn & 0xfff;
+ }
+ if (read_long (child, alt, &alt) < 0)
+ alt = pc + 4; /* not valid */
+ else
+ alt = pc_pointer (alt);
+ }
+ break;
+
+ case 0x08100000: /* ldm */
+ if (insn & (1 << 15)) {
+ unsigned long base;
+ int nr_regs;
+printk ("ldm ");
+
+ if (insn & (1 << 23)) {
+ nr_regs = insn & 65535;
+
+ nr_regs = (nr_regs & 0x5555) + ((nr_regs & 0xaaaa) >> 1);
+ nr_regs = (nr_regs & 0x3333) + ((nr_regs & 0xcccc) >> 2);
+ nr_regs = (nr_regs & 0x0707) + ((nr_regs & 0x7070) >> 4);
+ nr_regs = (nr_regs & 0x000f) + ((nr_regs & 0x0f00) >> 8);
+ nr_regs <<= 2;
+
+ if (!(insn & (1 << 24)))
+ nr_regs -= 4;
+ } else {
+ if (insn & (1 << 24))
+ nr_regs = -4;
+ else
+ nr_regs = 0;
+ }
+
+ base = ptrace_getrn (child, insn);
+
+ if (read_long (child, base + nr_regs, &alt) < 0)
+ alt = pc + 4; /* not valid */
+ else
+ alt = pc_pointer (alt);
+ break;
+ }
+ break;
+
+ case 0x0a000000:
+ case 0x0a100000: { /* bl or b */
+ signed long displ;
+printk ("b/bl ");
+ /* It's a branch/branch link: instead of trying to
+ * figure out whether the branch will be taken or not,
+ * we'll put a breakpoint at either location. This is
+ * simpler, more reliable, and probably not a whole lot
+ * slower than the alternative approach of emulating the
+ * branch.
+ */
+ displ = (insn & 0x00ffffff) << 8;
+ displ = (displ >> 6) + 8;
+ if (displ != 0 && displ != 4)
+ alt = pc + displ;
+ }
+ break;
+ }
+printk ("=%08lX\n", alt);
+ if (alt != pc + 4)
+ child->debugreg[nsaved++] = alt;
+
+ for (i = 0; i < nsaved; i++) {
+ res = read_long (child, child->debugreg[i], &insn);
+ if (res >= 0) {
+ child->debugreg[i + 2] = insn;
+ res = write_long (child, child->debugreg[i], BREAKINST);
+ }
+ if (res < 0) {
+ child->debugreg[4] = 0;
+ return res;
+ }
+ }
+ child->debugreg[4] = nsaved;
+ return 0;
+}
+
+/* Ensure no single-step breakpoint is pending. Returns non-zero
+ * value if child was being single-stepped.
+ */
+int ptrace_cancel_bpt (struct task_struct *child)
+{
+ int i, nsaved = child->debugreg[4];
+
+ child->debugreg[4] = 0;
+
+ if (nsaved > 2) {
+ printk ("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved);
+ nsaved = 2;
+ }
+ for (i = 0; i < nsaved; i++)
+ write_long (child, child->debugreg[i], child->debugreg[i + 2]);
+ return nsaved != 0;
+}
+
+asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
+{
+ struct task_struct *child;
+ int ret;
+
+ lock_kernel();
+ ret = -EPERM;
+ if (request == PTRACE_TRACEME) {
+ /* are we already being traced? */
+ if (current->flags & PF_PTRACED)
+ goto out;
+ /* set the ptrace bit in the process flags. */
+ current->flags |= PF_PTRACED;
+ ret = 0;
+ goto out;
+ }
+ if (pid == 1) /* you may not mess with init */
+ goto out;
+ ret = -ESRCH;
+ if (!(child = get_task(pid)))
+ goto out;
+ ret = -EPERM;
+ if (request == PTRACE_ATTACH) {
+ if (child == current)
+ goto out;
+ if ((!child->dumpable ||
+ (current->uid != child->euid) ||
+ (current->uid != child->suid) ||
+ (current->uid != child->uid) ||
+ (current->gid != child->egid) ||
+ (current->gid != child->sgid) ||
+ (current->gid != child->gid)) && !suser())
+ goto out;
+ /* the same process cannot be attached many times */
+ if (child->flags & PF_PTRACED)
+ goto out;
+ child->flags |= PF_PTRACED;
+ if (child->p_pptr != current) {
+ REMOVE_LINKS(child);
+ child->p_pptr = current;
+ SET_LINKS(child);
+ }
+ send_sig(SIGSTOP, child, 1);
+ ret = 0;
+ goto out;
+ }
+ ret = -ESRCH;
+ if (!(child->flags & PF_PTRACED))
+ goto out;
+ if (child->state != TASK_STOPPED) {
+ if (request != PTRACE_KILL)
+ goto out;
+ }
+ if (child->p_pptr != current)
+ goto out;
+
+ switch (request) {
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKDATA: {
+ unsigned long tmp;
+
+ ret = read_long(child, addr, &tmp);
+ if (ret >= 0)
+ ret = put_user(tmp, (unsigned long *)data);
+ goto out;
+ }
+
+ case PTRACE_PEEKUSR: { /* read the word at location addr in the USER area. */
+ unsigned long tmp;
+
+ ret = -EIO;
+ if ((addr & 3) || addr < 0 || addr >= sizeof(struct user))
+ goto out;
+
+ tmp = 0; /* Default return condition */
+ if (addr < sizeof (struct pt_regs))
+ tmp = get_stack_long(child, (int)addr >> 2);
+ ret = put_user(tmp, (unsigned long *)data);
+ goto out;
+ }
+
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ case PTRACE_POKEDATA:
+ ret = write_long(child,addr,data);
+ goto out;
+
+ case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
+ ret = -EIO;
+ if ((addr & 3) || addr < 0 || addr >= sizeof(struct user))
+ goto out;
+
+ if (addr < sizeof (struct pt_regs))
+ ret = put_stack_long(child, (int)addr >> 2, data);
+ goto out;
+
+ case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
+ case PTRACE_CONT: /* restart after signal. */
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ goto out;
+ if (request == PTRACE_SYSCALL)
+ child->flags |= PF_TRACESYS;
+ else
+ child->flags &= ~PF_TRACESYS;
+ child->exit_code = data;
+ wake_up_process (child);
+ /* make sure single-step breakpoint is gone. */
+ ptrace_cancel_bpt (child);
+ ret = 0;
+ goto out;
+
+ /* make the child exit. Best I can do is send it a sigkill.
+ * perhaps it should be put in the status that it wants to
+ * exit.
+ */
+ case PTRACE_KILL:
+ if (child->state == TASK_ZOMBIE) /* already dead */
+ return 0;
+ wake_up_process (child);
+ child->exit_code = SIGKILL;
+ ptrace_cancel_bpt (child);
+ /* make sure single-step breakpoint is gone. */
+ ptrace_cancel_bpt (child);
+ ret = 0;
+ goto out;
+
+ case PTRACE_SINGLESTEP: /* execute single instruction. */
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ goto out;
+ child->debugreg[4] = -1;
+ child->flags &= ~PF_TRACESYS;
+ wake_up_process(child);
+ child->exit_code = data;
+ /* give it a chance to run. */
+ ret = 0;
+ goto out;
+
+ case PTRACE_DETACH: /* detach a process that was attached. */
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ goto out;
+ child->flags &= ~(PF_PTRACED|PF_TRACESYS);
+ wake_up_process (child);
+ child->exit_code = data;
+ REMOVE_LINKS(child);
+ child->p_pptr = child->p_opptr;
+ SET_LINKS(child);
+ /* make sure single-step breakpoint is gone. */
+ ptrace_cancel_bpt (child);
+ ret = 0;
+ goto out;
+
+ default:
+ ret = -EIO;
+ goto out;
+ }
+out:
+ unlock_kernel();
+ return ret;
+}
+
+asmlinkage void syscall_trace(void)
+{
+ if ((current->flags & (PF_PTRACED|PF_TRACESYS))
+ != (PF_PTRACED|PF_TRACESYS))
+ return;
+ current->exit_code = SIGTRAP;
+ current->state = TASK_STOPPED;
+ notify_parent(current, SIGCHLD);
+ schedule();
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (current->exit_code) {
+ send_sig(current->exit_code, current, 1);
+ current->exit_code = 0;
+ }
+}
diff --git a/arch/arm/kernel/setup-ebsa110.c b/arch/arm/kernel/setup-ebsa110.c
new file mode 100644
index 000000000..285284b7d
--- /dev/null
+++ b/arch/arm/kernel/setup-ebsa110.c
@@ -0,0 +1,143 @@
+/*
+ * linux/arch/arm/kernel/setup-sa.c
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+
+/*
+ * This file obtains various parameters about the system that the kernel
+ * is running on.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/malloc.h>
+#include <linux/ldt.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/major.h>
+#include <linux/utsname.h>
+
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/hardware.h>
+#include <asm/pgtable.h>
+
+#ifndef CONFIG_CMDLINE
+#define CONFIG_CMDLINE "root=nfs rw console=ttyS1,38400n8"
+#endif
+#define MEM_SIZE (16*1024*1024)
+
+#define COMMAND_LINE_SIZE 256
+
+unsigned char aux_device_present;
+unsigned long arm_id;
+extern int root_mountflags;
+extern int _etext, _edata, _end;
+
+#ifdef CONFIG_BLK_DEV_RAM
+extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
+extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
+extern int rd_image_start; /* starting block # of image */
+
+static inline void setup_ramdisk (void)
+{
+ rd_image_start = 0;
+ rd_prompt = 1;
+ rd_doload = 1;
+}
+#else
+#define setup_ramdisk()
+#endif
+
+static char default_command_line[] = CONFIG_CMDLINE;
+static char command_line[COMMAND_LINE_SIZE] = { 0, };
+ char saved_command_line[COMMAND_LINE_SIZE];
+
+struct processor processor;
+extern const struct processor sa110_processor_functions;
+
+void setup_arch(char **cmdline_p,
+ unsigned long * memory_start_p, unsigned long * memory_end_p)
+{
+ unsigned long memory_start, memory_end;
+ char c = ' ', *to = command_line, *from;
+ int len = 0;
+
+ memory_start = (unsigned long)&_end;
+ memory_end = 0xc0000000 + MEM_SIZE;
+ from = default_command_line;
+
+ processor = sa110_processor_functions;
+ processor._proc_init ();
+
+ ROOT_DEV = 0x00ff;
+ setup_ramdisk();
+
+ init_task.mm->start_code = TASK_SIZE;
+ init_task.mm->end_code = TASK_SIZE + (unsigned long) &_etext;
+ init_task.mm->end_data = TASK_SIZE + (unsigned long) &_edata;
+ init_task.mm->brk = TASK_SIZE + (unsigned long) &_end;
+
+ /* Save unparsed command line copy for /proc/cmdline */
+ memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
+ saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
+
+ for (;;) {
+ if (c == ' ' &&
+ from[0] == 'm' &&
+ from[1] == 'e' &&
+ from[2] == 'm' &&
+ from[3] == '=') {
+ memory_end = simple_strtoul(from+4, &from, 0);
+ if ( *from == 'K' || *from == 'k' ) {
+ memory_end = memory_end << 10;
+ from++;
+ } else if ( *from == 'M' || *from == 'm' ) {
+ memory_end = memory_end << 20;
+ from++;
+ }
+ memory_end = memory_end + PAGE_OFFSET;
+ }
+ c = *from++;
+ if (!c)
+ break;
+ if (COMMAND_LINE_SIZE <= ++len)
+ break;
+ *to++ = c;
+ }
+
+ *to = '\0';
+ *cmdline_p = command_line;
+ *memory_start_p = memory_start;
+ *memory_end_p = memory_end;
+ strcpy (system_utsname.machine, "sa110");
+}
+
+int get_cpuinfo(char * buffer)
+{
+ int len;
+
+ len = sprintf (buffer, "CPU:\n"
+ "Type\t\t: %s\n"
+ "Revision\t: %d\n"
+ "Manufacturer\t: %s\n"
+ "32bit modes\t: %s\n"
+ "BogoMips\t: %lu.%02lu\n",
+ "sa110",
+ (int)arm_id & 15,
+ "DEC",
+ "yes",
+ (loops_per_sec+2500) / 500000,
+ ((loops_per_sec+2500) / 5000) % 100);
+ return len;
+}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
new file mode 100644
index 000000000..ac304fb3e
--- /dev/null
+++ b/arch/arm/kernel/setup.c
@@ -0,0 +1,292 @@
+/*
+ * linux/arch/arm/kernel/setup.c
+ *
+ * Copyright (C) 1995, 1996, 1997 Russell King
+ */
+
+/*
+ * This file obtains various parameters about the system that the kernel
+ * is running on.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/malloc.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/major.h>
+#include <linux/utsname.h>
+#include <linux/blk.h>
+
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/hardware.h>
+#include <asm/pgtable.h>
+#include <asm/arch/mmu.h>
+#include <asm/procinfo.h>
+#include <asm/io.h>
+#include <asm/setup.h>
+
+struct drive_info_struct { char dummy[32]; } drive_info;
+struct screen_info screen_info;
+struct processor processor;
+unsigned char aux_device_present;
+
+extern const struct processor arm2_processor_functions;
+extern const struct processor arm250_processor_functions;
+extern const struct processor arm3_processor_functions;
+extern const struct processor arm6_processor_functions;
+extern const struct processor arm7_processor_functions;
+extern const struct processor sa110_processor_functions;
+
+struct armversions armidlist[] = {
+#if defined(CONFIG_CPU_ARM2) || defined(CONFIG_CPU_ARM3)
+ { 0x41560200, 0xfffffff0, F_MEMC , "ARM/VLSI", "arm2" , &arm2_processor_functions },
+ { 0x41560250, 0xfffffff0, F_MEMC , "ARM/VLSI", "arm250" , &arm250_processor_functions },
+ { 0x41560300, 0xfffffff0, F_MEMC|F_CACHE, "ARM/VLSI", "arm3" , &arm3_processor_functions },
+#endif
+#if defined(CONFIG_CPU_ARM6) || defined(CONFIG_CPU_SA110)
+ { 0x41560600, 0xfffffff0, F_MMU|F_32BIT , "ARM/VLSI", "arm6" , &arm6_processor_functions },
+ { 0x41560610, 0xfffffff0, F_MMU|F_32BIT , "ARM/VLSI", "arm610" , &arm6_processor_functions },
+ { 0x41007000, 0xffffff00, F_MMU|F_32BIT , "ARM/VLSI", "arm7" , &arm7_processor_functions },
+ { 0x41007100, 0xffffff00, F_MMU|F_32BIT , "ARM/VLSI", "arm710" , &arm7_processor_functions },
+ { 0x4401a100, 0xfffffff0, F_MMU|F_32BIT , "DEC", "sa110" , &sa110_processor_functions },
+#endif
+ { 0x00000000, 0x00000000, 0 , "***", "*unknown*" , NULL }
+};
+
+static struct param_struct *params = (struct param_struct *)PARAMS_BASE;
+
+unsigned long arm_id;
+unsigned int vram_half_sam;
+int armidindex;
+int ioebpresent;
+int memc_ctrl_reg;
+int number_ide_drives;
+int number_mfm_drives;
+
+extern int bytes_per_char_h;
+extern int bytes_per_char_v;
+extern int root_mountflags;
+extern int _etext, _edata, _end;
+extern unsigned long real_end_mem;
+
+/*-------------------------------------------------------------------------
+ * Early initialisation routines for various configurable items in the
+ * kernel. Each one either supplies a setup_ function, or defines this
+ * symbol to be empty if not configured.
+ */
+
+/*
+ * Risc-PC specific initialisation
+ */
+#ifdef CONFIG_ARCH_RPC
+
+extern void init_dram_banks(struct param_struct *params);
+
+static void setup_rpc (struct param_struct *params)
+{
+ init_dram_banks(params);
+
+ switch (params->u1.s.pages_in_vram) {
+ case 256:
+ vram_half_sam = 1024;
+ break;
+ case 512:
+ default:
+ vram_half_sam = 2048;
+ }
+
+ /*
+ * Set ROM speed to maximum
+ */
+ outb (0x1d, IOMD_ROMCR0);
+}
+#else
+#define setup_rpc(x)
+#endif
+
+/*
+ * ram disk
+ */
+#ifdef CONFIG_BLK_DEV_RAM
+extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
+extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
+extern int rd_image_start; /* starting block # of image */
+
+static void setup_ramdisk (struct param_struct *params)
+{
+ rd_image_start = params->u1.s.rd_start;
+ rd_prompt = (params->u1.s.flags & FLAG_RDPROMPT) == 0;
+ rd_doload = (params->u1.s.flags & FLAG_RDLOAD) == 0;
+}
+#else
+#define setup_ramdisk(p)
+#endif
+
+/*
+ * initial ram disk
+ */
+#ifdef CONFIG_BLK_DEV_INITRD
+static void setup_initrd (struct param_struct *params, unsigned long memory_end)
+{
+ initrd_start = params->u1.s.initrd_start;
+ initrd_end = params->u1.s.initrd_start + params->u1.s.initrd_size;
+
+ if (initrd_end > memory_end) {
+ printk ("initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx) - disabling initrd\n",
+ initrd_end, memory_end);
+ initrd_start = 0;
+ }
+}
+#else
+#define setup_initrd(p,m)
+#endif
+
+static inline void check_ioeb_present(void)
+{
+ if (((*IOEB_BASE) & 15) == 5)
+ armidlist[armidindex].features |= F_IOEB;
+}
+
+static void get_processor_type (void)
+{
+ for (armidindex = 0; ; armidindex ++)
+ if (!((armidlist[armidindex].id ^ arm_id) &
+ armidlist[armidindex].mask))
+ break;
+
+ if (armidlist[armidindex].id == 0) {
+ int i;
+
+ for (i = 0; i < 3200; i++)
+ ((unsigned long *)SCREEN2_BASE)[i] = 0x77113322;
+
+ while (1);
+ }
+ processor = *armidlist[armidindex].proc;
+}
+
+#define COMMAND_LINE_SIZE 256
+
+static char command_line[COMMAND_LINE_SIZE] = { 0, };
+ char saved_command_line[COMMAND_LINE_SIZE];
+
+void setup_arch(char **cmdline_p,
+ unsigned long * memory_start_p, unsigned long * memory_end_p)
+{
+ static unsigned char smptrap;
+ unsigned long memory_start, memory_end;
+ char c = ' ', *to = command_line, *from;
+ int len = 0;
+
+ if (smptrap == 1)
+ return;
+ smptrap = 1;
+
+ get_processor_type ();
+ check_ioeb_present ();
+ processor._proc_init ();
+
+ bytes_per_char_h = params->u1.s.bytes_per_char_h;
+ bytes_per_char_v = params->u1.s.bytes_per_char_v;
+ from = params->commandline;
+ ROOT_DEV = to_kdev_t (params->u1.s.rootdev);
+ ORIG_X = params->u1.s.video_x;
+ ORIG_Y = params->u1.s.video_y;
+ ORIG_VIDEO_COLS = params->u1.s.video_num_cols;
+ ORIG_VIDEO_LINES = params->u1.s.video_num_rows;
+ memc_ctrl_reg = params->u1.s.memc_control_reg;
+ number_ide_drives = (params->u1.s.adfsdrives >> 6) & 3;
+ number_mfm_drives = (params->u1.s.adfsdrives >> 3) & 3;
+
+ setup_rpc (params);
+ setup_ramdisk (params);
+
+ if (!(params->u1.s.flags & FLAG_READONLY))
+ root_mountflags &= ~MS_RDONLY;
+
+ memory_start = MAPTOPHYS((unsigned long)&_end);
+ memory_end = GET_MEMORY_END(params);
+
+ init_task.mm->start_code = TASK_SIZE;
+ init_task.mm->end_code = TASK_SIZE + (unsigned long) &_etext;
+ init_task.mm->end_data = TASK_SIZE + (unsigned long) &_edata;
+ init_task.mm->brk = TASK_SIZE + (unsigned long) &_end;
+
+ /* Save unparsed command line copy for /proc/cmdline */
+ memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
+ saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
+
+ for (;;) {
+ if (c == ' ' &&
+ from[0] == 'm' &&
+ from[1] == 'e' &&
+ from[2] == 'm' &&
+ from[3] == '=') {
+ memory_end = simple_strtoul(from+4, &from, 0);
+ if (*from == 'K' || *from == 'k') {
+ memory_end = memory_end << 10;
+ from++;
+ } else if (*from == 'M' || *from == 'm') {
+ memory_end = memory_end << 20;
+ from++;
+ }
+ memory_end = memory_end + PAGE_OFFSET;
+ }
+ c = *from++;
+ if (!c)
+ break;
+ if (COMMAND_LINE_SIZE <= ++len)
+ break;
+ *to++ = c;
+ }
+
+ *to = '\0';
+ *cmdline_p = command_line;
+ *memory_start_p = memory_start;
+ *memory_end_p = memory_end;
+
+ setup_initrd (params, memory_end);
+
+ strcpy (system_utsname.machine, armidlist[armidindex].name);
+}
+
+#define ISSET(bit) (armidlist[armidindex].features & bit)
+
+int get_cpuinfo(char * buffer)
+{
+ int len;
+
+ len = sprintf (buffer, "CPU:\n"
+ "Type\t\t: %s\n"
+ "Revision\t: %d\n"
+ "Manufacturer\t: %s\n"
+ "32bit modes\t: %s\n"
+ "BogoMips\t: %lu.%02lu\n",
+ armidlist[armidindex].name,
+ (int)arm_id & 15,
+ armidlist[armidindex].manu,
+ ISSET (F_32BIT) ? "yes" : "no",
+ (loops_per_sec+2500) / 500000,
+ ((loops_per_sec+2500) / 5000) % 100);
+ len += sprintf (buffer + len,
+ "\nHardware:\n"
+ "Mem System\t: %s\n"
+ "IOEB\t\t: %s\n",
+ ISSET(F_MEMC) ? "MEMC" :
+ ISSET(F_MMU) ? "MMU" : "*unknown*",
+ ISSET(F_IOEB) ? "present" : "absent"
+ );
+ return len;
+}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
new file mode 100644
index 000000000..0cba3dd07
--- /dev/null
+++ b/arch/arm/kernel/signal.c
@@ -0,0 +1,515 @@
+/*
+ * linux/arch/arm/kernel/signal.c
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+
+#include <linux/config.h> /* for CONFIG_CPU_ARM6 and CONFIG_CPU_SA110 */
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn))
+#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn))
+
+asmlinkage int sys_wait4(pid_t pid, unsigned long * stat_addr,
+ int options, unsigned long *ru);
+asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs);
+extern int ptrace_cancel_bpt (struct task_struct *);
+extern int ptrace_set_bpt (struct task_struct *);
+
+/*
+ * atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask, struct pt_regs *regs)
+{
+
+ sigset_t saveset;
+
+ mask &= _BLOCKABLE;
+ spin_lock_irq(&current->sigmask_lock);
+ saveset = current->blocked;
+ siginitset(&current->blocked, mask);
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+ regs->ARM_r0 = -EINTR;
+
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&saveset, regs))
+ return regs->ARM_r0;
+ }
+}
+
+asmlinkage int
+sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, struct pt_regs *regs)
+{
+ sigset_t saveset, newset;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
+ return -EFAULT;
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+
+ spin_lock_irq(&current->sigmask_lock);
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+ regs->ARM_r0 = -EINTR;
+
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&saveset, regs))
+ return regs->ARM_r0;
+ }
+}
+
+asmlinkage int
+sys_sigaction(int sig, const struct old_sigaction *act,
+ struct old_sigaction *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ if (act) {
+ old_sigset_t mask;
+ if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
+ __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ return -EFAULT;
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ __get_user(mask, &act->sa_mask);
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
+ __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ return -EFAULT;
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ }
+
+ return ret;
+}
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+struct sigframe
+{
+ struct sigcontext sc;
+ unsigned long extramask[_NSIG_WORDS-1];
+ unsigned long retcode;
+};
+
+struct rt_sigframe
+{
+ struct siginfo *pinfo;
+ void *puc;
+ struct siginfo info;
+ struct ucontext uc;
+ unsigned long retcode;
+};
+
+static int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
+{
+ __get_user(regs->ARM_r0, &sc->arm_r0);
+ __get_user(regs->ARM_r1, &sc->arm_r1);
+ __get_user(regs->ARM_r2, &sc->arm_r2);
+ __get_user(regs->ARM_r3, &sc->arm_r3);
+ __get_user(regs->ARM_r4, &sc->arm_r4);
+ __get_user(regs->ARM_r5, &sc->arm_r5);
+ __get_user(regs->ARM_r6, &sc->arm_r6);
+ __get_user(regs->ARM_r7, &sc->arm_r7);
+ __get_user(regs->ARM_r8, &sc->arm_r8);
+ __get_user(regs->ARM_r9, &sc->arm_r9);
+ __get_user(regs->ARM_r10, &sc->arm_r10);
+ __get_user(regs->ARM_fp, &sc->arm_fp);
+ __get_user(regs->ARM_ip, &sc->arm_ip);
+ __get_user(regs->ARM_sp, &sc->arm_sp);
+ __get_user(regs->ARM_lr, &sc->arm_lr);
+ __get_user(regs->ARM_pc, &sc->arm_pc); /* security! */
+#if defined(CONFIG_CPU_ARM6) || defined(CONFIG_CPU_SA110)
+ __get_user(regs->ARM_cpsr, &sc->arm_cpsr); /* security! */
+#endif
+
+ /* send SIGTRAP if we're single-stepping */
+ if (ptrace_cancel_bpt (current))
+ send_sig (SIGTRAP, current, 1);
+
+ return regs->ARM_r0;
+}
+
+asmlinkage int sys_sigreturn(struct pt_regs *regs)
+{
+ struct sigframe *frame;
+ sigset_t set;
+
+ frame = (struct sigframe *)regs->ARM_sp;
+
+ if (verify_area(VERIFY_READ, frame, sizeof (*frame)))
+ goto badframe;
+ if (__get_user(set.sig[0], &frame->sc.oldmask)
+ || (_NSIG_WORDS > 1
+ && __copy_from_user(&set.sig[1], &frame->extramask,
+ sizeof(frame->extramask))))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sigmask_lock);
+ current->blocked = set;
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+
+ return restore_sigcontext(regs, &frame->sc);
+
+badframe:
+ lock_kernel();
+ do_exit(SIGSEGV);
+}
+
+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
+{
+ struct rt_sigframe *frame;
+ sigset_t set;
+
+ frame = (struct rt_sigframe *)regs->ARM_sp;
+
+ if (verify_area(VERIFY_READ, frame, sizeof (*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sigmask_lock);
+ current->blocked = set;
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+
+ return restore_sigcontext(regs, &frame->uc.uc_mcontext);
+
+badframe:
+ lock_kernel();
+ do_exit(SIGSEGV);
+}
+
+static void
+setup_sigcontext(struct sigcontext *sc, /*struct _fpstate *fpstate,*/
+ struct pt_regs *regs, unsigned long mask)
+{
+ __put_user (regs->ARM_r0, &sc->arm_r0);
+ __put_user (regs->ARM_r1, &sc->arm_r1);
+ __put_user (regs->ARM_r2, &sc->arm_r2);
+ __put_user (regs->ARM_r3, &sc->arm_r3);
+ __put_user (regs->ARM_r4, &sc->arm_r4);
+ __put_user (regs->ARM_r5, &sc->arm_r5);
+ __put_user (regs->ARM_r6, &sc->arm_r6);
+ __put_user (regs->ARM_r7, &sc->arm_r7);
+ __put_user (regs->ARM_r8, &sc->arm_r8);
+ __put_user (regs->ARM_r9, &sc->arm_r9);
+ __put_user (regs->ARM_r10, &sc->arm_r10);
+ __put_user (regs->ARM_fp, &sc->arm_fp);
+ __put_user (regs->ARM_ip, &sc->arm_ip);
+ __put_user (regs->ARM_sp, &sc->arm_sp);
+ __put_user (regs->ARM_lr, &sc->arm_lr);
+ __put_user (regs->ARM_pc, &sc->arm_pc); /* security! */
+#if defined(CONFIG_CPU_ARM6) || defined(CONFIG_CPU_SA110)
+ __put_user (regs->ARM_cpsr, &sc->arm_cpsr); /* security! */
+#endif
+
+ __put_user (current->tss.trap_no, &sc->trap_no);
+ __put_user (current->tss.error_code, &sc->error_code);
+ __put_user (mask, &sc->oldmask);
+}
+
+static void setup_frame(int sig, struct k_sigaction *ka,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct sigframe *frame;
+ unsigned long retcode;
+
+ frame = (struct sigframe *)regs->ARM_sp - 1;
+
+ if (!access_ok(VERIFT_WRITE, frame, sizeof (*frame)))
+ goto segv_and_exit;
+
+ setup_sigcontext(&frame->sc, /*&frame->fpstate,*/ regs, set->sig[0]);
+
+ if (_NSIG_WORDS > 1) {
+ __copy_to_user(frame->extramask, &set->sig[1],
+ sizeof(frame->extramask));
+ }
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ retcode = (unsigned long)ka->sa.sa_restorer; /* security! */
+ } else {
+ retcode = (unsigned long)&frame->retcode;
+ __put_user(SWI_SYS_SIGRETURN, &frame->retcode);
+ __flush_entry_to_ram (&frame->retcode);
+ }
+
+ if (current->exec_domain && current->exec_domain->signal_invmap && sig < 32)
+ regs->ARM_r0 = current->exec_domain->signal_invmap[sig];
+ else
+ regs->ARM_r0 = sig;
+ regs->ARM_sp = (unsigned long)frame;
+ regs->ARM_lr = retcode;
+ regs->ARM_pc = (unsigned long)ka->sa.sa_handler; /* security! */
+ return;
+
+segv_and_exit:
+ lock_kernel();
+ do_exit (SIGSEGV);
+}
+
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe *frame;
+ unsigned long retcode;
+
+ frame = (struct rt_sigframe *)regs->ARM_sp - 1;
+ if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
+ goto segv_and_exit;
+
+ __put_user(&frame->info, &frame->pinfo);
+ __put_user(&frame->uc, &frame->puc);
+ __copy_to_user(&frame->info, info, sizeof(*info));
+
+ /* Clear all the bits of the ucontext we don't use. */
+ __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));
+
+ setup_sigcontext(&frame->uc.uc_mcontext, /*&frame->fpstate,*/
+ regs, set->sig[0]);
+ __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ retcode = (unsigned long)ka->sa.sa_restorer; /* security! */
+ } else {
+ retcode = (unsigned long)&frame->retcode;
+ __put_user(SWI_SYS_RT_SIGRETURN, &frame->retcode);
+ __flush_entry_to_ram (&frame->retcode);
+ }
+
+ if (current->exec_domain && current->exec_domain->signal_invmap && sig < 32)
+ regs->ARM_r0 = current->exec_domain->signal_invmap[sig];
+ else
+ regs->ARM_r0 = sig;
+ regs->ARM_sp = (unsigned long)frame;
+ regs->ARM_lr = retcode;
+ regs->ARM_pc = (unsigned long)ka->sa.sa_handler; /* security! */
+ return;
+
+segv_and_exit:
+ lock_kernel();
+ do_exit (SIGSEGV);
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void
+handle_signal(unsigned long sig, struct k_sigaction *ka,
+ siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
+{
+ /* Set up the stack frame */
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ setup_rt_frame(sig, ka, info, oldset, regs);
+ else
+ setup_frame(sig, ka, oldset, regs);
+
+ if (ka->sa.sa_flags & SA_ONESHOT)
+ ka->sa.sa_handler = SIG_DFL;
+
+ if (!(ka->sa.sa_flags & SA_NODEFER)) {
+ spin_lock_irq(&current->sigmask_lock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ sigaddset(&current->blocked,sig);
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+ }
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs)
+{
+ unsigned long instr, *pc = (unsigned long *)(instruction_pointer(regs)-4);
+ struct k_sigaction *ka;
+ siginfo_t info;
+ int single_stepping, swi_instr;
+
+ if (!oldset)
+ oldset = &current->blocked;
+
+ single_stepping = ptrace_cancel_bpt (current);
+ swi_instr = (!get_user (instr, pc) && (instr & 0x0f000000) == 0x0f000000);
+
+ for (;;) {
+ unsigned long signr;
+
+ spin_lock_irq (&current->sigmask_lock);
+ signr = dequeue_signal(&current->blocked, &info);
+ spin_unlock_irq (&current->sigmask_lock);
+
+ if (!signr)
+ break;
+
+ if ((current->flags & PF_PTRACED) && signr != SIGKILL) {
+ /* Let the debugger run. */
+ current->exit_code = signr;
+ current->state = TASK_STOPPED;
+ notify_parent(current, SIGCHLD);
+ schedule();
+ single_stepping |= ptrace_cancel_bpt (current);
+
+ /* We're back. Did the debugger cancel the sig? */
+ if (!(signr = current->exit_code))
+ continue;
+ current->exit_code = 0;
+
+ /* The debugger continued. Ignore SIGSTOP. */
+ if (signr == SIGSTOP)
+ continue;
+
+ /* Update the siginfo structure. Is this good? */
+ if (signr != info.si_signo) {
+ info.si_signo = signr;
+ info.si_errno = 0;
+ info.si_code = SI_USER;
+ info.si_pid = current->p_pptr->pid;
+ info.si_uid = current->p_pptr->uid;
+ }
+
+ /* If the (new) signal is now blocked, requeue it. */
+ if (sigismember(&current->blocked, signr)) {
+ send_sig_info(signr, &info, current);
+ continue;
+ }
+ }
+
+ ka = &current->sig->action[signr-1];
+ if (ka->sa.sa_handler == SIG_IGN) {
+ if (signr != SIGCHLD)
+ continue;
+ /* Check for SIGCHLD: it's special. */
+ while (sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
+ /* nothing */;
+ continue;
+ }
+
+ if (ka->sa.sa_handler == SIG_DFL) {
+ int exit_code = signr;
+
+ /* Init gets no signals it doesn't want. */
+ if (current->pid == 1)
+ continue;
+
+ switch (signr) {
+ case SIGCONT: case SIGCHLD: case SIGWINCH:
+ continue;
+
+ case SIGTSTP: case SIGTTIN: case SIGTTOU:
+ if (is_orphaned_pgrp(current->pgrp))
+ continue;
+ /* FALLTHRU */
+
+ case SIGSTOP:
+ current->state = TASK_STOPPED;
+ current->exit_code = signr;
+ if (!(current->p_pptr->sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
+ notify_parent(current, SIGCHLD);
+ schedule();
+ continue;
+
+ case SIGQUIT: case SIGILL: case SIGTRAP:
+ case SIGABRT: case SIGFPE: case SIGSEGV:
+ lock_kernel();
+ if (current->binfmt
+ && current->binfmt->core_dump
+ && current->binfmt->core_dump(signr, regs))
+ exit_code |= 0x80;
+ unlock_kernel();
+ /* FALLTHRU */
+
+ default:
+ lock_kernel();
+ sigaddset(&current->signal, signr);
+ current->flags |= PF_SIGNALED;
+ do_exit(exit_code);
+ /* NOTREACHED */
+ }
+ }
+
+ /* Are we from a system call? */
+ if (swi_instr) {
+ switch (regs->ARM_r0) {
+ case -ERESTARTNOHAND:
+ regs->ARM_r0 = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->ARM_r0 = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ regs->ARM_r0 = regs->ARM_ORIG_r0;
+ regs->ARM_pc -= 4;
+ }
+ }
+ /* Whee! Actually deliver the signal. */
+ handle_signal(signr, ka, &info, oldset, regs);
+ if (single_stepping)
+ ptrace_set_bpt (current);
+ return 1;
+ }
+
+ if (swi_instr &&
+ (regs->ARM_r0 == -ERESTARTNOHAND ||
+ regs->ARM_r0 == -ERESTARTSYS ||
+ regs->ARM_r0 == -ERESTARTNOINTR)) {
+ regs->ARM_r0 = regs->ARM_ORIG_r0;
+ regs->ARM_pc -= 4;
+ }
+ if (single_stepping)
+ ptrace_set_bpt (current);
+ return 0;
+}
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
new file mode 100644
index 000000000..ab514903d
--- /dev/null
+++ b/arch/arm/kernel/sys_arm.c
@@ -0,0 +1,372 @@
+/*
+ * linux/arch/arm/kernel/sys_arm.c
+ *
+ * Copyright (C) People who wrote linux/arch/i386/kernel/sys_i386.c
+ * Copyright (C) 1995, 1996 Russell King.
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/arm
+ * platform.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/utsname.h>
+
+#include <asm/uaccess.h>
+#include <asm/ipc.h>
+
+/*
+ * Constant strings used in inlined functions in header files
+ */
+/* proc/system.h */
+const char xchg_str[] = "xchg";
+/* arch/dma.h */
+const char dma_str[] = "%s: dma %d not supported\n";
+
+/*
+ * sys_pipe() is the normal C calling standard for creating
+ * a pipe. It's not the way unix traditionally does this, though.
+ */
+asmlinkage int sys_pipe(unsigned long * fildes)
+{
+ int fd[2];
+ int error;
+
+ lock_kernel();
+ error = do_pipe(fd);
+ unlock_kernel();
+ if (!error) {
+ if (copy_to_user(fildes, fd, 2*sizeof(int)))
+ error = -EFAULT;
+ }
+ return error;
+}
+
+/*
+ * Perform the select(nd, in, out, ex, tv) and mmap() system
+ * calls. ARM Linux didn't use to be able to handle more than
+ * 4 system call parameters, so these system calls used a memory
+ * block for parameter passing..
+ */
+
+struct mmap_arg_struct {
+ unsigned long addr;
+ unsigned long len;
+ unsigned long prot;
+ unsigned long flags;
+ unsigned long fd;
+ unsigned long offset;
+};
+
+asmlinkage int old_mmap(struct mmap_arg_struct *arg)
+{
+ int error = -EFAULT;
+ struct file * file = NULL;
+ struct mmap_arg_struct a;
+
+ lock_kernel();
+ if (copy_from_user(&a, arg, sizeof(a)))
+ goto out;
+ if (!(a.flags & MAP_ANONYMOUS)) {
+ error = -EBADF;
+ if (a.fd >= NR_OPEN || !(file = current->files->fd[a.fd]))
+ goto out;
+ }
+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ error = do_mmap(file, a.addr, a.len, a.prot, a.flags, a.offset);
+out:
+ unlock_kernel();
+ return error;
+}
+
+
+extern asmlinkage int sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
+
+struct sel_arg_struct {
+ unsigned long n;
+ fd_set *inp, *outp, *exp;
+ struct timeval *tvp;
+};
+
+asmlinkage int old_select(struct sel_arg_struct *arg)
+{
+ struct sel_arg_struct a;
+
+ if (copy_from_user(&a, arg, sizeof(a)))
+ return -EFAULT;
+ /* sys_select() does the appropriate kernel locking */
+ return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
+}
+
+/*
+ * sys_ipc() is the de-multiplexer for the SysV IPC calls..
+ *
+ * This is really horribly ugly.
+ */
+asmlinkage int sys_ipc (uint call, int first, int second, int third, void *ptr, long fifth)
+{
+ int version, ret;
+
+ lock_kernel();
+ version = call >> 16; /* hack for backward compatibility */
+ call &= 0xffff;
+
+ if (call <= SEMCTL)
+ switch (call) {
+ case SEMOP:
+ ret = sys_semop (first, (struct sembuf *)ptr, second);
+ goto out;
+ case SEMGET:
+ ret = sys_semget (first, second, third);
+ goto out;
+ case SEMCTL: {
+ union semun fourth;
+ ret = -EINVAL;
+ if (!ptr)
+ goto out;
+ ret = -EFAULT;
+ if (get_user(fourth.__pad, (void **) ptr))
+ goto out;
+ ret = sys_semctl (first, second, third, fourth);
+ goto out;
+ }
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ if (call <= MSGCTL)
+ switch (call) {
+ case MSGSND:
+ ret = sys_msgsnd (first, (struct msgbuf *) ptr,
+ second, third);
+ goto out;
+ case MSGRCV:
+ switch (version) {
+ case 0: {
+ struct ipc_kludge tmp;
+ ret = -EINVAL;
+ if (!ptr)
+ goto out;
+ ret = -EFAULT;
+ if (copy_from_user(&tmp,(struct ipc_kludge *) ptr,
+ sizeof (tmp)))
+ goto out;
+ ret = sys_msgrcv (first, tmp.msgp, second, tmp.msgtyp, third);
+ goto out;
+ }
+ case 1: default:
+ ret = sys_msgrcv (first, (struct msgbuf *) ptr, second, fifth, third);
+ goto out;
+ }
+ case MSGGET:
+ ret = sys_msgget ((key_t) first, second);
+ goto out;
+ case MSGCTL:
+ ret = sys_msgctl (first, second, (struct msqid_ds *) ptr);
+ goto out;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ if (call <= SHMCTL)
+ switch (call) {
+ case SHMAT:
+ switch (version) {
+ case 0: default: {
+ ulong raddr;
+ ret = sys_shmat (first, (char *) ptr, second, &raddr);
+ if (ret)
+ goto out;
+ ret = put_user (raddr, (ulong *) third);
+ goto out;
+ }
+ case 1: /* iBCS2 emulator entry point */
+ ret = -EINVAL;
+ if (!segment_eq(get_fs(), get_ds()))
+ goto out;
+ ret = sys_shmat (first, (char *) ptr, second, (ulong *) third);
+ goto out;
+ }
+ case SHMDT:
+ ret = sys_shmdt ((char *)ptr);
+ goto out;
+ case SHMGET:
+ ret = sys_shmget (first, second, third);
+ goto out;
+ case SHMCTL:
+ ret = sys_shmctl (first, second, (struct shmid_ds *) ptr);
+ goto out;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ else
+ ret = -EINVAL;
+out:
+ unlock_kernel();
+ return ret;
+}
+
+/* Fork a new task - this creates a new program thread.
+ * This is called indirectly via a small wrapper
+ */
+asmlinkage int sys_fork(struct pt_regs *regs)
+{
+ int ret;
+
+ lock_kernel();
+ ret = do_fork(SIGCHLD, regs->ARM_sp, regs);
+ unlock_kernel();
+
+ return ret;
+}
+
+/* Clone a task - this clones the calling program thread.
+ * This is called indirectly via a small wrapper
+ */
+asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, struct pt_regs *regs)
+{
+ int ret;
+
+ lock_kernel();
+ if (!newsp)
+ newsp = regs->ARM_sp;
+
+ ret = do_fork(clone_flags, newsp, regs);
+ unlock_kernel();
+ return ret;
+}
+
+/* sys_execve() executes a new program.
+ * This is called indirectly via a small wrapper
+ */
+asmlinkage int sys_execve(char *filenamei, char **argv, char **envp, struct pt_regs *regs)
+{
+ int error;
+ char * filename;
+
+ lock_kernel();
+ filename = getname(filenamei);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+ error = do_execve(filename, argv, envp, regs);
+ putname(filename);
+out:
+ unlock_kernel();
+ return error;
+}
+
+/*
+ * Detect the old function calling standard
+ */
+static inline unsigned long old_calling_standard (struct pt_regs *regs)
+{
+ unsigned long instr, *pcv = (unsigned long *)(instruction_pointer(regs) - 8);
+ return (!get_user (instr, pcv) && instr == 0xe1a0300d);
+}
+
+/* Compatability functions - we used to pass 5 parameters as r0, r1, r2, *r3, *(r3+4)
+ * We now use r0 - r4, and return an error if the old style calling standard is used.
+ * Eventually these functions will disappear.
+ */
+asmlinkage int
+sys_compat_llseek (unsigned int fd, unsigned long offset_high, unsigned long offset_low,
+ loff_t *result, unsigned int origin, struct pt_regs *regs)
+{
+ extern int sys_llseek (unsigned int, unsigned long, unsigned long, loff_t *, unsigned int);
+
+ if (old_calling_standard (regs)) {
+ printk (KERN_NOTICE "%s (%d): unsupported llseek call standard\n",
+ current->comm, current->pid);
+ return -EINVAL;
+ }
+ return sys_llseek (fd, offset_high, offset_low, result, origin);
+}
+
+asmlinkage int
+sys_compat_mount (char *devname, char *dirname, char *type, unsigned long flags, void *data,
+ struct pt_regs *regs)
+{
+ extern int sys_mount (char *, char *, char *, unsigned long, void *);
+
+ if (old_calling_standard (regs)) {
+ printk (KERN_NOTICE "%s (%d): unsupported mount call standard\n",
+ current->comm, current->pid);
+ return -EINVAL;
+ }
+ return sys_mount (devname, dirname, type, flags, data);
+}
+
+asmlinkage int sys_uname (struct old_utsname * name)
+{
+ static int warned = 0;
+
+ if (warned == 0) {
+ warned ++;
+ printk (KERN_NOTICE "%s (%d): obsolete uname call\n",
+ current->comm, current->pid);
+ }
+
+ if (name && !copy_to_user (name, &system_utsname, sizeof (*name)))
+ return 0;
+ return -EFAULT;
+}
+
+asmlinkage int sys_olduname(struct oldold_utsname * name)
+{
+ int error;
+ static int warned = 0;
+
+ if (warned == 0) {
+ warned ++;
+ printk (KERN_NOTICE "%s (%d): obsolete olduname call\n",
+ current->comm, current->pid);
+ }
+
+ if (!name)
+ return -EFAULT;
+
+ if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
+ return -EFAULT;
+
+ error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
+ error -= __put_user(0,name->sysname+__OLD_UTS_LEN);
+ error -= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
+ error -= __put_user(0,name->nodename+__OLD_UTS_LEN);
+ error -= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
+ error -= __put_user(0,name->release+__OLD_UTS_LEN);
+ error -= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
+ error -= __put_user(0,name->version+__OLD_UTS_LEN);
+ error -= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
+ error -= __put_user(0,name->machine+__OLD_UTS_LEN);
+ error = error ? -EFAULT : 0;
+
+ return error;
+}
+
+asmlinkage int sys_pause(void)
+{
+ static int warned = 0;
+
+ if (warned == 0) {
+ warned ++;
+ printk (KERN_NOTICE "%s (%d): obsolete pause call\n",
+ current->comm, current->pid);
+ }
+
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ return -ERESTARTNOHAND;
+}
+
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
new file mode 100644
index 000000000..b1c679ec5
--- /dev/null
+++ b/arch/arm/kernel/time.c
@@ -0,0 +1,154 @@
+/*
+ * linux/arch/arm/kernel/time.c
+ *
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ * Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King
+ *
+ * This file contains the ARM-specific time handling details:
+ * reading the RTC at bootup, etc...
+ *
+ * 1994-07-02 Alan Modra
+ * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
+ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
+ */
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/delay.h>
+
+#include <linux/timex.h>
+#include <asm/irq-no.h>
+#include <asm/hardware.h>
+
+extern int setup_arm_irq(int, struct irqaction *);
+extern volatile unsigned long lost_ticks;
+
+/* change this if you have some constant time drift */
+#define USECS_PER_JIFFY (1000000/HZ)
+
+#ifndef BCD_TO_BIN
+#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
+#endif
+
+#ifndef BIN_TO_BCD
+#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
+#endif
+
+/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
+ * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
+ * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
+ *
+ * [For the Julian calendar (which was used in Russia before 1917,
+ * Britain & colonies before 1752, anywhere else before 1582,
+ * and is still in use by some communities) leave out the
+ * -year/100+year/400 terms, and add 10.]
+ *
+ * This algorithm was first published by Gauss (I think).
+ *
+ * WARNING: this function will overflow on 2106-02-07 06:28:16 on
+ * machines were long is 32-bit! (However, as time_t is signed, we
+ * will already get problems at other places on 2038-01-19 03:14:08)
+ */
+static inline unsigned long mktime(unsigned int year, unsigned int mon,
+ unsigned int day, unsigned int hour,
+ unsigned int min, unsigned int sec)
+{
+ if (0 >= (int) (mon -= 2)) { /* 1..12 -> 11,12,1..10 */
+ mon += 12; /* Puts Feb last since it has leap day */
+ year -= 1;
+ }
+ return (((
+ (unsigned long)(year/4 - year/100 + year/400 + 367*mon/12 + day) +
+ year*365 - 719499
+ )*24 + hour /* now have hours */
+ )*60 + min /* now have minutes */
+ )*60 + sec; /* finally seconds */
+}
+
+#include <asm/arch/time.h>
+
+static unsigned long do_gettimeoffset(void)
+{
+ return gettimeoffset ();
+}
+
+void do_gettimeofday(struct timeval *tv)
+{
+ unsigned long flags;
+
+ save_flags_cli (flags);
+ *tv = xtime;
+ tv->tv_usec += do_gettimeoffset();
+
+ /*
+ * xtime is atomically updated in timer_bh. lost_ticks is
+ * nonzero if the tiemr bottom half hasnt executed yet.
+ */
+ if (lost_ticks)
+ tv->tv_usec += USECS_PER_JIFFY;
+
+ restore_flags(flags);
+
+ if (tv->tv_usec >= 1000000) {
+ tv->tv_usec -= 1000000;
+ tv->tv_sec++;
+ }
+}
+
+void do_settimeofday(struct timeval *tv)
+{
+ cli ();
+ /* This is revolting. We need to set the xtime.tv_usec
+ * correctly. However, the value in this location is
+ * is value at the last tick.
+ * Discover what correction gettimeofday
+ * would have done, and then undo it!
+ */
+ tv->tv_usec -= do_gettimeoffset();
+
+ if (tv->tv_usec < 0) {
+ tv->tv_usec += 1000000;
+ tv->tv_sec--;
+ }
+
+ xtime = *tv;
+ time_state = TIME_BAD;
+ time_maxerror = MAXPHASE;
+ time_esterror = MAXPHASE;
+ sti ();
+}
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick.
+ */
+static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ if (reset_timer ())
+ do_timer(regs);
+
+ update_rtc ();
+}
+
+static struct irqaction irqtimer0 = { timer_interrupt, 0, 0, "timer", NULL, NULL};
+
+void time_init(void)
+{
+ xtime.tv_sec = setup_timer();
+ xtime.tv_usec = 0;
+
+ setup_arm_irq(IRQ_TIMER0, &irqtimer0);
+}
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
new file mode 100644
index 000000000..7ff7436c5
--- /dev/null
+++ b/arch/arm/kernel/traps.c
@@ -0,0 +1,306 @@
+/*
+ * linux/arch/arm/kernel/traps.c
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ * Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
+ */
+
+/*
+ * 'traps.c' handles hardware exceptions after we have saved some state in
+ * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably
+ * kill the offending process.
+ */
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/spinlock.h>
+#include <asm/atomic.h>
+#include <asm/pgtable.h>
+
+extern void fpe_save(struct fp_soft_struct *);
+extern void fpe_restore(struct fp_soft_struct *);
+extern void die_if_kernel(char *str, struct pt_regs *regs, int err, int ret);
+extern void c_backtrace (unsigned long fp, int pmode);
+extern int ptrace_cancel_bpt (struct task_struct *);
+
+char *processor_modes[]=
+{ "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
+ "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
+ "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" ,
+ "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32"
+};
+
+static char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
+
+static inline void console_verbose(void)
+{
+ extern int console_loglevel;
+ console_loglevel = 15;
+}
+
+int kstack_depth_to_print = 200;
+
+static int verify_stack_pointer (unsigned long stackptr, int size)
+{
+#if defined(CONFIG_CPU_ARM2) || defined(CONFIG_CPU_ARM3)
+ if (stackptr < 0x02048000 || stackptr + size > 0x03000000)
+ return -EFAULT;
+#else
+ if (stackptr < 0xc0000000 || stackptr + size > (unsigned long)high_memory)
+ return -EFAULT;
+#endif
+ return 0;
+}
+
+static void dump_stack (unsigned long *start, unsigned long *end, int offset, int max)
+{
+ unsigned long *p;
+ int i;
+
+ for (p = start + offset, i = 0; i < max && p < end; i++, p++) {
+ if (i && (i & 7) == 0)
+ printk ("\n ");
+ printk ("%08lx ", *p);
+ }
+ printk ("\n");
+}
+
+/*
+ * These constants are for searching for possible module text
+ * segments. VMALLOC_OFFSET comes from mm/vmalloc.c; MODULE_RANGE is
+ * a guess of how much space is likely to be vmalloced.
+ */
+#define VMALLOC_OFFSET (8*1024*1024)
+#define MODULE_RANGE (8*1024*1024)
+
+static void dump_instr (unsigned long pc)
+{
+ unsigned long module_start, module_end;
+ int pmin = -2, pmax = 3, ok = 0;
+ extern char start_kernel, _etext;
+
+ module_start = VMALLOC_START;
+ module_end = module_start + MODULE_RANGE;
+
+ if ((pc >= (unsigned long) &start_kernel) &&
+ (pc <= (unsigned long) &_etext)) {
+ if (pc + pmin < (unsigned long) &start_kernel)
+ pmin = ((unsigned long) &start_kernel) - pc;
+ if (pc + pmax > (unsigned long) &_etext)
+ pmax = ((unsigned long) &_etext) - pc;
+ ok = 1;
+ } else if (pc >= module_start && pc <= module_end) {
+ if (pc + pmin < module_start)
+ pmin = module_start - pc;
+ if (pc + pmax > module_end)
+ pmax = module_end - pc;
+ ok = 1;
+ }
+ printk ("Code: ");
+ if (ok) {
+ int i;
+ for (i = pmin; i < pmax; i++)
+ printk("%08lx ", ((unsigned long *)pc)[i]);
+ printk ("\n");
+ } else
+ printk ("pc not in code space\n");
+}
+
+/*
+ * This function is protected against kernel-mode re-entrancy. If it
+ * is re-entered it will hang the system since we can't guarantee in
+ * this case that any of the functions that it calls are safe any more.
+ * Even the panic function could be a problem, but we'll give it a go.
+ */
+void die_if_kernel(char *str, struct pt_regs *regs, int err, int ret)
+{
+ static int died = 0;
+ unsigned long cstack, sstack, frameptr;
+
+ if (user_mode(regs))
+ return;
+
+ switch (died) {
+ case 2:
+ while (1);
+ case 1:
+ died ++;
+ panic ("die_if_kernel re-entered. Major kernel corruption. Please reboot me!");
+ break;
+ case 0:
+ died ++;
+ break;
+ }
+
+ console_verbose ();
+ printk ("Internal error: %s: %x\n", str, err);
+ printk ("CPU: %d", smp_processor_id());
+ show_regs (regs);
+ printk ("Process %s (pid: %d, stackpage=%08lx)\nStack: ",
+ current->comm, current->pid, 4096+(unsigned long)current);
+
+ cstack = (unsigned long)(regs + 1);
+ sstack = 4096+(unsigned long)current;
+
+ if (*(unsigned long *)sstack != STACK_MAGIC)
+ printk ("*** corrupted stack page\n ");
+
+ if (verify_stack_pointer (cstack, 4))
+ printk ("%08lx invalid kernel stack pointer\n", cstack);
+ else if(cstack > sstack + 4096)
+ printk("(sp overflow)\n");
+ else if(cstack < sstack)
+ printk("(sp underflow)\n");
+ else
+ dump_stack ((unsigned long *)sstack, (unsigned long *)sstack + 1024,
+ cstack - sstack, kstack_depth_to_print);
+
+ frameptr = regs->ARM_fp;
+ if (frameptr) {
+ if (verify_stack_pointer (frameptr, 4))
+ printk ("Backtrace: invalid frame pointer\n");
+ else {
+ printk("Backtrace: \n");
+ c_backtrace (frameptr, processor_mode(regs));
+ }
+ }
+
+ dump_instr (instruction_pointer(regs));
+ died = 0;
+ if (ret != -1)
+ do_exit (ret);
+ else {
+ cli ();
+ while (1);
+ }
+}
+
+void bad_user_access_alignment (const void *ptr)
+{
+ void *pc;
+ __asm__("mov %0, lr\n": "=r" (pc));
+ printk (KERN_ERR "bad_user_access_alignment called: ptr = %p, pc = %p\n", ptr, pc);
+ current->tss.error_code = 0;
+ current->tss.trap_no = 11;
+ force_sig (SIGBUS, current);
+/* die_if_kernel("Oops - bad user access alignment", regs, mode, SIGBUS);*/
+}
+
+asmlinkage void do_undefinstr (int address, struct pt_regs *regs, int mode)
+{
+ current->tss.error_code = 0;
+ current->tss.trap_no = 6;
+ force_sig (SIGILL, current);
+ die_if_kernel("Oops - undefined instruction", regs, mode, SIGILL);
+}
+
+asmlinkage void do_excpt (int address, struct pt_regs *regs, int mode)
+{
+ current->tss.error_code = 0;
+ current->tss.trap_no = 11;
+ force_sig (SIGBUS, current);
+ die_if_kernel("Oops - address exception", regs, mode, SIGBUS);
+}
+
+asmlinkage void do_unexp_fiq (struct pt_regs *regs)
+{
+#ifndef CONFIG_IGNORE_FIQ
+ printk ("Hmm. Unexpected FIQ received, but trying to continue\n");
+ printk ("You may have a hardware problem...\n");
+#endif
+}
+
+asmlinkage void bad_mode(struct pt_regs *regs, int reason, int proc_mode)
+{
+ printk (KERN_CRIT "Bad mode in %s handler detected: mode %s\n",
+ handler[reason],
+ processor_modes[proc_mode]);
+ die_if_kernel ("Oops", regs, 0, -1);
+}
+
+/*
+ * 'math_state_restore()' saves the current math information in the
+ * old math state array, and gets the new ones from the current task.
+ *
+ * We no longer save/restore the math state on every context switch
+ * any more. We only do this now if it actually gets used.
+ */
+asmlinkage void math_state_restore (void)
+{
+ if (last_task_used_math == current)
+ return;
+ if (last_task_used_math)
+ /*
+ * Save current fp state into last_task_used_math->tss.fpe_save
+ */
+ fpe_save (&last_task_used_math->tss.fpstate.soft);
+ last_task_used_math = current;
+ if (current->used_math) {
+ /*
+ * Restore current fp state from current->tss.fpe_save
+ */
+ fpe_restore (&current->tss.fpstate.soft);
+ } else {
+ /*
+ * initialise fp state
+ */
+ fpe_restore (&init_task.tss.fpstate.soft);
+ current->used_math = 1;
+ }
+}
+
+asmlinkage void arm_syscall (int no, struct pt_regs *regs)
+{
+ switch (no) {
+ case 0: /* branch through 0 */
+ printk ("[%d] %s: branch through zero\n", current->pid, current->comm);
+ force_sig (SIGILL, current);
+ if (user_mode(regs)) {
+ show_regs (regs);
+ c_backtrace (regs->ARM_fp, processor_mode(regs));
+ }
+ die_if_kernel ("Oops", regs, 0, SIGILL);
+ break;
+
+ case 1: /* SWI_BREAK_POINT */
+ regs->ARM_pc -= 4; /* Decrement PC by one instruction */
+ ptrace_cancel_bpt (current);
+ force_sig (SIGTRAP, current);
+ break;
+
+ default:
+ printk ("[%d] %s: arm syscall %d\n", current->pid, current->comm, no);
+ force_sig (SIGILL, current);
+ if (user_mode(regs)) {
+ show_regs (regs);
+ c_backtrace (regs->ARM_fp, processor_mode(regs));
+ }
+ die_if_kernel ("Oops", regs, no, SIGILL);
+ break;
+ }
+}
+
+asmlinkage void deferred(int n, struct pt_regs *regs)
+{
+ printk ("[%d] %s: old system call %X\n", current->pid, current->comm, n);
+ show_regs (regs);
+ force_sig (SIGILL, current);
+}
+
+asmlinkage void arm_malalignedptr(const char *str, void *pc, volatile void *ptr)
+{
+ printk ("Mal-aligned pointer in %s: %p (PC=%p)\n", str, ptr, pc);
+}
+
+asmlinkage void arm_invalidptr (const char *function, int size)
+{
+ printk ("Invalid pointer size in %s (PC=%p) size %d\n",
+ function, __builtin_return_address(0), size);
+}