diff options
author | Ralf Baechle <ralf@linux-mips.org> | 1998-05-07 02:55:41 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 1998-05-07 02:55:41 +0000 |
commit | dcec8a13bf565e47942a1751a9cec21bec5648fe (patch) | |
tree | 548b69625b18cc2e88c3e68d0923be546c9ebb03 /arch/alpha | |
parent | 2e0f55e79c49509b7ff70ff1a10e1e9e90a3dfd4 (diff) |
o Merge with Linux 2.1.99.
o Fix ancient bug in the ELF loader making ldd crash.
o Fix ancient bug in the keyboard code for SGI, SNI and Jazz.
Diffstat (limited to 'arch/alpha')
30 files changed, 5003 insertions, 726 deletions
diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile index 9cd1ab3e6..ff6e422df 100644 --- a/arch/alpha/Makefile +++ b/arch/alpha/Makefile @@ -10,8 +10,38 @@ NM := nm -B +#LINKFLAGS = -static -T arch/alpha/vmlinux.lds +#CFLAGS := $(CFLAGS) -pipe -mno-fp-regs -ffixed-8 + +ifdef CONFIG_CROSSCOMPILE +# enable this for linking under OSF/1: +LINKFLAGS = -non_shared -T 0xfffffc0000310000 -N +else + elf=$(shell if $(LD) --help | grep elf64alpha >/dev/null; then echo yes; fi) + ifeq ($(elf),yes) +# LINKFLAGS = -static -Ttext 0xfffffc0000310000 -N LINKFLAGS = -static -T arch/alpha/vmlinux.lds -CFLAGS := $(CFLAGS) -pipe -mno-fp-regs -ffixed-8 + else + LINKFLAGS = -static -T arch/alpha/vmlinux.lds -N + endif +# GNU gcc/cc1/as can use pipes instead of temporary files +CFLAGS := $(CFLAGS) -pipe +endif + +CFLAGS := $(CFLAGS) -mno-fp-regs -ffixed-8 -Wno-uninitialized + +# determine if we can use the BWX instructions with GAS +$(shell rm -f ./GAS_VER) +$(shell $(AS) --version >& ./GAS_VER) +OLD_GAS := $(shell if cat ./GAS_VER | grep 'version 2.7' > /dev/null; then echo yes; else echo no; fi) +$(shell rm -f ./GAS_VER) + +ifneq ($(OLD_GAS),yes) +# if PYXIS, then enable use of BWIO space + ifeq ($(CONFIG_ALPHA_PYXIS),y) + CFLAGS := $(CFLAGS) -Wa,-m21164a -DBWX_USABLE -DBWIO_ENABLED + endif +endif HEAD := arch/alpha/kernel/head.o @@ -23,7 +53,7 @@ ifeq ($(CONFIG_MATHEMU),y) CORE_FILES := $(CORE_FILES) arch/alpha/math-emu/math-emu.o endif -LIBS := arch/alpha/lib/lib.a $(LIBS) arch/alpha/lib/lib.a +LIBS := $(TOPDIR)/arch/alpha/lib/lib.a $(LIBS) $(TOPDIR)/arch/alpha/lib/lib.a MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot diff --git a/arch/alpha/boot/bootp.c b/arch/alpha/boot/bootp.c index 65829793e..a1dc6d818 100644 --- a/arch/alpha/boot/bootp.c +++ b/arch/alpha/boot/bootp.c @@ -16,6 +16,7 @@ #include <asm/console.h> #include <asm/hwrpb.h> #include <asm/pgtable.h> +#include <asm/io.h> #include <stdarg.h> @@ -156,7 +157,8 @@ void pal_init(void) printk("Ok (rev %lx)\n", rev); /* remove the old virtual page-table mapping */ L1[1] = 0; - flush_tlb_all(); + + tbia(); /* do it directly in case we are SMP */ } static inline long load(unsigned long dst, @@ -189,30 +191,59 @@ extern char _end; void start_kernel(void) { - long i; - int nbytes; - char envval[256]; + static long i; + static int nbytes; + /* + * note that this crufty stuff with static and envval and envbuf + * is because: + * + * 1. frequently, the stack is is short, and we don't want to overrun; + * 2. frequently the stack is where we are going to copy the kernel to; + * 3. a certain SRM console required the GET_ENV output to stack. + */ + static char envval[256]; + char envbuf[256]; printk("Linux/AXP bootp loader for Linux " UTS_RELEASE "\n"); if (hwrpb.pagesize != 8192) { - printk("Expected 8kB pages, got %ldkB\n", hwrpb.pagesize >> 10); + printk("Expected 8kB pages, got %ldkB\n", + hwrpb.pagesize >> 10); return; } pal_init(); nbytes = dispatch(CCB_GET_ENV, ENV_BOOTED_OSFLAGS, - envval, sizeof(envval)); - if (nbytes < 0) { + envbuf, sizeof(envbuf)); + if (nbytes < 0 || nbytes >= sizeof(envbuf)) { nbytes = 0; } - envval[nbytes] = '\0'; - strcpy((char*)ZERO_PAGE, envval); - - printk("Loading the kernel ...\n"); + envbuf[nbytes] = '\0'; + memcpy(envval, envbuf, nbytes+1); + printk("Loading the kernel...'%s'\n", envval); /* NOTE: *no* callbacks or printouts from here on out!!! */ +#if 1 + /* + * this is a hack, as some consoles seem to get virtual 20000000 + * (ie where the SRM console puts the kernel bootp image) memory + * overlapping physical 310000 memory, which causes real problems + * when attempting to copy the former to the latter... :-( + * + * so, we first move the kernel virtual-to-physical way above where + * we physically want the kernel to end up, then copy it from there + * to its final resting place... ;-} + * + * sigh... + */ + + i = load(START_ADDR+(4*KERNEL_SIZE), KERNEL_ORIGIN, KERNEL_SIZE); + i = load(START_ADDR, START_ADDR+(4*KERNEL_SIZE), KERNEL_SIZE); +#else i = load(START_ADDR, KERNEL_ORIGIN, KERNEL_SIZE); +#endif + + strcpy((char*)ZERO_PAGE, envval); runkernel(); diff --git a/arch/alpha/config.in b/arch/alpha/config.in index 1fefe0c62..fc2546218 100644 --- a/arch/alpha/config.in +++ b/arch/alpha/config.in @@ -6,10 +6,11 @@ mainmenu_name "Kernel configuration of Linux for Alpha machines" # clear all implied options (don't want default values for those): unset CONFIG_CROSSCOMPILE CONFIG_NATIVE -unset CONFIG_ALPHA_EV4 CONFIG_ALPHA_EV5 +unset CONFIG_ALPHA_EV4 CONFIG_ALPHA_EV5 CONFIG_ALPHA_EV6 unset CONFIG_PCI CONFIG_ALPHA_EISA unset CONFIG_ALPHA_LCA CONFIG_ALPHA_APECS CONFIG_ALPHA_CIA unset CONFIG_ALPHA_T2 CONFIG_ALPHA_PYXIS +unset CONFIG_ALPHA_TSUNAMI CONFIG_ALPHA_MCPCIA unset CONFIG_ALPHA_NEED_ROUNDING_EMULATION mainmenu_option next_comment @@ -48,13 +49,16 @@ choice 'Alpha system type' \ PC164 CONFIG_ALPHA_PC164 \ LX164 CONFIG_ALPHA_LX164 \ SX164 CONFIG_ALPHA_SX164 \ + DP264 CONFIG_ALPHA_DP264 \ Jensen CONFIG_ALPHA_JENSEN \ Noname CONFIG_ALPHA_NONAME \ + Takara CONFIG_ALPHA_TAKARA \ Mikasa CONFIG_ALPHA_MIKASA \ Noritake CONFIG_ALPHA_NORITAKE \ Alcor CONFIG_ALPHA_ALCOR \ Miata CONFIG_ALPHA_MIATA \ Sable CONFIG_ALPHA_SABLE \ + Rawhide CONFIG_ALPHA_RAWHIDE \ AlphaBook1 CONFIG_ALPHA_BOOK1 \ Ruffian CONFIG_ALPHA_RUFFIAN \ Platform2000 CONFIG_ALPHA_P2K" Cabriolet @@ -78,7 +82,8 @@ then define_bool CONFIG_ALPHA_APECS y fi if [ "$CONFIG_ALPHA_EB164" = "y" -o "$CONFIG_ALPHA_PC164" = "y" \ - -o "$CONFIG_ALPHA_ALCOR" = "y" -o "$CONFIG_ALPHA_XLT" = "y" ] + -o "$CONFIG_ALPHA_ALCOR" = "y" -o "$CONFIG_ALPHA_XLT" = "y" \ + -o "$CONFIG_ALPHA_TAKARA" = "y" ] then define_bool CONFIG_PCI y define_bool CONFIG_ALPHA_EV5 y @@ -86,9 +91,7 @@ then fi if [ "$CONFIG_ALPHA_MIKASA" = "y" -o "$CONFIG_ALPHA_NORITAKE" = "y" ] then - choice 'CPU daughtercard' \ - "Pinnacle CONFIG_ALPHA_PINNACLE \ - Primo CONFIG_ALPHA_PRIMO" Primo + bool 'EV5 CPU daughtercard (model 5/xxx)?' CONFIG_ALPHA_PRIMO if [ "$CONFIG_ALPHA_PRIMO" = "y" ] then define_bool CONFIG_ALPHA_EV5 y @@ -102,7 +105,13 @@ fi if [ "$CONFIG_ALPHA_SABLE" = "y" ] then define_bool CONFIG_PCI y + bool 'EV5 CPU(s) (model 5/xxx)?' CONFIG_ALPHA_GAMMA + if [ "$CONFIG_ALPHA_GAMMA" = "y" ] + then + define_bool CONFIG_ALPHA_EV5 y + else define_bool CONFIG_ALPHA_EV4 y + fi define_bool CONFIG_ALPHA_T2 y fi if [ "$CONFIG_ALPHA_MIATA" = "y" -o "$CONFIG_ALPHA_LX164" = "y" \ @@ -112,6 +121,18 @@ then define_bool CONFIG_ALPHA_EV5 y define_bool CONFIG_ALPHA_PYXIS y fi +if [ "$CONFIG_ALPHA_DP264" = "y" ] +then + define_bool CONFIG_PCI y + define_bool CONFIG_ALPHA_EV6 y + define_bool CONFIG_ALPHA_TSUNAMI y +fi +if [ "$CONFIG_ALPHA_RAWHIDE" = "y" ] +then + define_bool CONFIG_PCI y + define_bool CONFIG_ALPHA_EV5 y + define_bool CONFIG_ALPHA_MCPCIA y +fi if [ "$CONFIG_ALPHA_JENSEN" = "y" ] then define_bool CONFIG_ALPHA_EV4 y @@ -127,12 +148,19 @@ if [ "$CONFIG_ALPHA_CABRIOLET" = "y" -o "$CONFIG_ALPHA_AVANTI" = "y" \ -o "$CONFIG_ALPHA_MIKASA" = "y" -o "$CONFIG_ALPHA_ALCOR" = "y" \ -o "$CONFIG_ALPHA_SABLE" = "y" -o "$CONFIG_ALPHA_MIATA" = "y" \ -o "$CONFIG_ALPHA_NORITAKE" = "y" -o "$CONFIG_ALPHA_PC164" = "y" \ - -o "$CONFIG_ALPHA_LX164" = "y" -o "$CONFIG_ALPHA_SX164" = "y" ] + -o "$CONFIG_ALPHA_LX164" = "y" -o "$CONFIG_ALPHA_SX164" = "y" \ + -o "$CONFIG_ALPHA_DP264" = "y" -o "$CONFIG_ALPHA_RAWHIDE" = "y" ] then - bool 'Using SRM as bootloader' CONFIG_ALPHA_SRM + bool 'Use SRM as bootloader' CONFIG_ALPHA_SRM + if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then + if [ "$CONFIG_ALPHA_SRM" = "y" ]; then + bool ' Use SRM PCI setup' CONFIG_ALPHA_SRM_SETUP + fi + fi fi if [ "$CONFIG_ALPHA_ALCOR" = "y" -o "$CONFIG_ALPHA_MIKASA" = "y" \ - -o "$CONFIG_ALPHA_SABLE" = "y" -o "$CONFIG_ALPHA_NORITAKE" = "y" ] + -o "$CONFIG_ALPHA_SABLE" = "y" -o "$CONFIG_ALPHA_NORITAKE" = "y" \ + -o "$CONFIG_ALPHA_RAWHIDE" = "y" ] then define_bool CONFIG_ALPHA_EISA y fi @@ -141,9 +169,15 @@ then define_bool CONFIG_ALPHA_AVANTI y fi +#bool 'Echo console messages on /dev/ttyS0 (COM1)' CONFIG_SERIAL_ECHO + if [ "$CONFIG_PCI" = "y" ]; then bool 'TGA Console Support' CONFIG_TGA_CONSOLE - if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then +# if [ "$CONFIG_TGA_CONSOLE" = "y" ]; then +# bool 'VGA Console Support' CONFIG_VGA_CONSOLE +# fi + bool 'PCI quirks' CONFIG_PCI_QUIRKS + if [ "$CONFIG_PCI_QUIRKS" = "y" -a "$CONFIG_EXPERIMENTAL" = "y" ]; then bool 'PCI bridge optimization (experimental)' CONFIG_PCI_OPTIMIZE fi bool 'Backward-compatible /proc/pci' CONFIG_PCI_OLD_PROC @@ -216,17 +250,6 @@ if [ "$CONFIG_CD_NO_IDESCSI" != "n" ]; then fi endmenu -# Conditionally compile in the Uniform CD-ROM driver -if [ "$CONFIG_BLK_DEV_IDECD" = "y" -o "$CONFIG_BLK_DEV_SR" = "y" -o "$CONFIG_SBPCD" = "y" -o "$CONFIG_MCD" = "y" -o "$CONFIG_CM206" = "y" -o "$CONFIG_CDU31A" = "y" ]; then - define_bool CONFIG_CDROM y -else - if [ "$CONFIG_BLK_DEV_IDECD" = "m" -o "$CONFIG_BLK_DEV_SR" = "m" -o "$CONFIG_SBPCD" = "m" -o "$CONFIG_MCD" = "m" -o "$CONFIG_CM206" = "m" -o "$CONFIG_CDU31A" = "m" ]; then - define_bool CONFIG_CDROM m - else - define_bool CONFIG_CDROM n - fi -fi - source fs/Config.in source fs/nls/Config.in diff --git a/arch/alpha/defconfig b/arch/alpha/defconfig index 1bdecf444..7d78e4b97 100644 --- a/arch/alpha/defconfig +++ b/arch/alpha/defconfig @@ -27,14 +27,20 @@ CONFIG_NATIVE=y # CONFIG_ALPHA_EB64P is not set # CONFIG_ALPHA_EB164 is not set # CONFIG_ALPHA_PC164 is not set +# CONFIG_ALPHA_LX164 is not set +# CONFIG_ALPHA_SX164 is not set +# CONFIG_ALPHA_DP264 is not set # CONFIG_ALPHA_JENSEN is not set # CONFIG_ALPHA_NONAME is not set +# CONFIG_ALPHA_TAKARA is not set # CONFIG_ALPHA_MIKASA is not set # CONFIG_ALPHA_NORITAKE is not set CONFIG_ALPHA_ALCOR=y # CONFIG_ALPHA_MIATA is not set # CONFIG_ALPHA_SABLE is not set +# CONFIG_ALPHA_RAWHIDE is not set # CONFIG_ALPHA_BOOK1 is not set +# CONFIG_ALPHA_RUFFIAN is not set # CONFIG_ALPHA_P2K is not set CONFIG_PCI=y CONFIG_ALPHA_EV5=y @@ -214,7 +220,6 @@ CONFIG_DE4X5=y # CD-ROM drivers (not for SCSI or IDE/ATAPI drives) # # CONFIG_CD_NO_IDESCSI is not set -CONFIG_CDROM=y # # Filesystems diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile index ba0aee556..8d09ea8c4 100644 --- a/arch/alpha/kernel/Makefile +++ b/arch/alpha/kernel/Makefile @@ -8,9 +8,9 @@ # Note 2! The CFLAGS definitions are now in the main makefile... .S.s: - $(CPP) -D__ASSEMBLY__ -traditional $< -o $*.s + $(CPP) -D__ASSEMBLY__ $(AFLAGS) -traditional $< -o $*.s .S.o: - $(CC) -D__ASSEMBLY__ -traditional -c $< -o $*.o + $(CC) -D__ASSEMBLY__ $(AFLAGS) -traditional -c $< -o $*.o all: kernel.o head.o @@ -35,19 +35,29 @@ endif ifdef CONFIG_ALPHA_T2 O_OBJS += t2.o endif +ifdef CONFIG_ALPHA_TSUNAMI +O_OBJS += tsunami.o +endif +ifdef CONFIG_ALPHA_MCPCIA +O_OBJS += mcpcia.o +endif + ifneq ($(CONFIG_ALPHA_PC164)$(CONFIG_ALPHA_LX164),nn) O_OBJS += smc37c93x.o endif -ifdef CONFIG_ALPHA_SX164 +ifneq ($(CONFIG_ALPHA_SX164)$(CONFIG_ALPHA_MIATA)$(CONFIG_ALPHA_DP264),nnn) O_OBJS += smc37c669.o endif +ifdef SMP +O_OBJS += smp.o +endif all: kernel.o head.o head.o: head.s head.s: head.S $(TOPDIR)/include/asm-alpha/system.h - $(CPP) -traditional -o $*.s $< + $(CPP) -traditional $(AFLAGS) -o $*.s $< include $(TOPDIR)/Rules.make diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c index 132175541..c220ccdc6 100644 --- a/arch/alpha/kernel/alpha_ksyms.c +++ b/arch/alpha/kernel/alpha_ksyms.c @@ -27,7 +27,6 @@ #define __KERNEL_SYSCALLS__ #include <asm/unistd.h> -extern void bcopy (const char *src, char *dst, int len); extern struct hwrpb_struct *hwrpb; extern void dump_thread(struct pt_regs *, struct user *); extern int dump_fpu(struct pt_regs *, elf_fpregset_t *); @@ -134,7 +133,6 @@ EXPORT_SYMBOL(__strlen_user); * interface isn't gonna change any time soon now, so it's OK * to leave it out of version control. */ -# undef bcopy # undef memcpy # undef memset EXPORT_SYMBOL_NOVERS(__divl); @@ -147,7 +145,3 @@ EXPORT_SYMBOL_NOVERS(__remq); EXPORT_SYMBOL_NOVERS(__remqu); EXPORT_SYMBOL_NOVERS(memcpy); EXPORT_SYMBOL_NOVERS(memset); - -#if CONFIG_PCI -EXPORT_SYMBOL(pci_devices); -#endif diff --git a/arch/alpha/kernel/apecs.c b/arch/alpha/kernel/apecs.c index d5e7fa2a7..2bbdf0062 100644 --- a/arch/alpha/kernel/apecs.c +++ b/arch/alpha/kernel/apecs.c @@ -10,7 +10,6 @@ #include <linux/kernel.h> #include <linux/config.h> #include <linux/types.h> -#include <linux/bios32.h> #include <linux/pci.h> #include <asm/system.h> @@ -18,13 +17,14 @@ #include <asm/hwrpb.h> #include <asm/ptrace.h> -/* NOTE: Herein are back-to-back mb insns. They are magic. - A plausible explanation is that the i/o controler does not properly - handle the system transaction. Another involves timing. Ho hum. */ +/* + * NOTE: Herein lie back-to-back mb instructions. They are magic. + * One plausible explanation is that the i/o controller does not properly + * handle the system transaction. Another involves timing. Ho hum. + */ extern struct hwrpb_struct *hwrpb; extern asmlinkage void wrmces(unsigned long mces); -extern int alpha_sys_type; /* * BIOS32-style PCI interface: @@ -36,13 +36,16 @@ extern int alpha_sys_type; # define DBG(args) #endif -#define vulp volatile unsigned long * #define vuip volatile unsigned int * static volatile unsigned int apecs_mcheck_expected = 0; static volatile unsigned int apecs_mcheck_taken = 0; -static unsigned long apecs_jd, apecs_jd1, apecs_jd2; +static unsigned int apecs_jd, apecs_jd1, apecs_jd2; +#ifdef CONFIG_ALPHA_SRM_SETUP +unsigned int APECS_DMA_WIN_BASE = APECS_DMA_WIN_BASE_DEFAULT; +unsigned int APECS_DMA_WIN_SIZE = APECS_DMA_WIN_SIZE_DEFAULT; +#endif /* SRM_SETUP */ /* * Given a bus, device, and function number, compute resulting @@ -194,7 +197,7 @@ static unsigned int conf_read(unsigned long addr, unsigned char type1) } /* reset error status: */ - *(vulp)APECS_IOC_DCSR = stat0; + *(vuip)APECS_IOC_DCSR = stat0; mb(); wrmces(0x7); /* reset machine check */ value = 0xffffffff; @@ -269,7 +272,7 @@ static void conf_write(unsigned long addr, unsigned int value, unsigned char typ } /* reset error status: */ - *(vulp)APECS_IOC_DCSR = stat0; + *(vuip)APECS_IOC_DCSR = stat0; mb(); wrmces(0x7); /* reset machine check */ } @@ -424,6 +427,38 @@ unsigned long apecs_init(unsigned long mem_start, unsigned long mem_end) *(vuip)APECS_IOC_TB2R = 0; #else /* CONFIG_ALPHA_XL */ +#ifdef CONFIG_ALPHA_SRM_SETUP + /* check window 1 for enabled and mapped to 0 */ + if ((*(vuip)APECS_IOC_PB1R & (1U<<19)) && (*(vuip)APECS_IOC_TB1R == 0)) + { + APECS_DMA_WIN_BASE = *(vuip)APECS_IOC_PB1R & 0xfff00000U; + APECS_DMA_WIN_SIZE = *(vuip)APECS_IOC_PM1R & 0xfff00000U; + APECS_DMA_WIN_SIZE += 0x00100000U; +#if 0 + printk("apecs_init: using Window 1 settings\n"); + printk("apecs_init: PB1R 0x%x PM1R 0x%x TB1R 0x%x\n", + *(vuip)APECS_IOC_PB1R, + *(vuip)APECS_IOC_PM1R, + *(vuip)APECS_IOC_TB1R); +#endif + } + else /* check window 2 for enabled and mapped to 0 */ + if ((*(vuip)APECS_IOC_PB2R & (1U<<19)) && (*(vuip)APECS_IOC_TB2R == 0)) + { + APECS_DMA_WIN_BASE = *(vuip)APECS_IOC_PB2R & 0xfff00000U; + APECS_DMA_WIN_SIZE = *(vuip)APECS_IOC_PM2R & 0xfff00000U; + APECS_DMA_WIN_SIZE += 0x00100000U; +#if 0 + printk("apecs_init: using Window 2 settings\n"); + printk("apecs_init: PB2R 0x%x PM2R 0x%x TB2R 0x%x\n", + *(vuip)APECS_IOC_PB2R, + *(vuip)APECS_IOC_PM2R, + *(vuip)APECS_IOC_TB2R); +#endif + } + else /* we must use our defaults... */ +#endif /* SRM_SETUP */ + { /* * Set up the PCI->physical memory translation windows. * For now, window 2 is disabled. In the future, we may @@ -435,9 +470,11 @@ unsigned long apecs_init(unsigned long mem_start, unsigned long mem_end) *(vuip)APECS_IOC_PB1R = 1U<<19 | (APECS_DMA_WIN_BASE & 0xfff00000U); *(vuip)APECS_IOC_PM1R = (APECS_DMA_WIN_SIZE - 1) & 0xfff00000U; *(vuip)APECS_IOC_TB1R = 0; + } #endif /* CONFIG_ALPHA_XL */ #ifdef CONFIG_ALPHA_CABRIOLET +#ifdef NO_LONGER_NEEDED_I_HOPE /* * JAE: HACK!!! for now, hardwire if configured... * davidm: Older miniloader versions don't set the clock frequency @@ -461,6 +498,7 @@ unsigned long apecs_init(unsigned long mem_start, unsigned long mem_end) sum += *l; hwrpb->chksum = sum; } +#endif /* NO_LONGER_NEEDED_I_HOPE */ #endif /* CONFIG_ALPHA_CABRIOLET */ /* @@ -483,15 +521,15 @@ unsigned long apecs_init(unsigned long mem_start, unsigned long mem_end) int apecs_pci_clr_err(void) { - apecs_jd = *(vulp)APECS_IOC_DCSR; + apecs_jd = *(vuip)APECS_IOC_DCSR; if (apecs_jd & 0xffe0L) { - apecs_jd1 = *(vulp)APECS_IOC_SEAR; - *(vulp)APECS_IOC_DCSR = apecs_jd | 0xffe1L; - apecs_jd = *(vulp)APECS_IOC_DCSR; + apecs_jd1 = *(vuip)APECS_IOC_SEAR; + *(vuip)APECS_IOC_DCSR = apecs_jd | 0xffe1L; + apecs_jd = *(vuip)APECS_IOC_DCSR; mb(); } - *(vulp)APECS_IOC_TBIA = APECS_IOC_TBIA; - apecs_jd2 = *(vulp)APECS_IOC_TBIA; + *(vuip)APECS_IOC_TBIA = (unsigned int)APECS_IOC_TBIA; + apecs_jd2 = *(vuip)APECS_IOC_TBIA; mb(); return 0; } diff --git a/arch/alpha/kernel/bios32.c b/arch/alpha/kernel/bios32.c index af8971834..46880f11e 100644 --- a/arch/alpha/kernel/bios32.c +++ b/arch/alpha/kernel/bios32.c @@ -25,6 +25,7 @@ */ #include <linux/config.h> #include <linux/kernel.h> +#include <linux/tasks.h> #include <linux/smp.h> #include <linux/smp_lock.h> #include <linux/init.h> @@ -55,7 +56,6 @@ asmlinkage int sys_pciconfig_write() #else /* CONFIG_PCI */ -#include <linux/bios32.h> #include <linux/pci.h> #include <linux/malloc.h> #include <linux/mm.h> @@ -63,6 +63,8 @@ asmlinkage int sys_pciconfig_write() #include <asm/hwrpb.h> #include <asm/io.h> #include <asm/uaccess.h> +#include <asm/segment.h> +#include <asm/system.h> #define KB 1024 @@ -70,7 +72,9 @@ asmlinkage int sys_pciconfig_write() #define GB (1024*MB) #define MAJOR_REV 0 -#define MINOR_REV 3 + +/* minor revision 4, add multi-PCI handling */ +#define MINOR_REV 4 /* * Align VAL to ALIGN, which must be a power of two. @@ -78,7 +82,20 @@ asmlinkage int sys_pciconfig_write() #define ALIGN(val,align) (((val) + ((align) - 1)) & ~((align) - 1)) +#if defined(CONFIG_ALPHA_MCPCIA) || defined(CONFIG_ALPHA_TSUNAMI) +/* multiple PCI bus machines */ +/* make handle from bus number */ +extern struct linux_hose_info *bus2hose[256]; +#define HANDLE(b) (((unsigned long)(bus2hose[(b)]->pci_hose_index)&3)<<32) +#define DEV_IS_ON_PRIMARY(dev) \ + (bus2hose[(dev)->bus->number]->pci_first_busno == (dev)->bus->number) +#else /* MCPCIA || TSUNAMI */ +#define HANDLE(b) (0) +#define DEV_IS_ON_PRIMARY(dev) ((dev)->bus->number == 0) +#endif /* MCPCIA || TSUNAMI */ /* + * PCI_MODIFY + * * Temporary internal macro. If this 0, then do not write to any of * the PCI registers, merely read them (i.e., use configuration as * determined by SRM). The SRM seem do be doing a less than perfect @@ -95,7 +112,18 @@ asmlinkage int sys_pciconfig_write() * the graphics card---there have been some rumor that the #9 BIOS * incorrectly resets that address to 0...). */ +#ifdef CONFIG_ALPHA_SRM_SETUP +#define PCI_MODIFY 0 +static struct pci_dev *irq_dev_to_reset[16]; +static unsigned char irq_to_reset[16]; +static int irq_reset_count = 0; +static struct pci_dev *io_dev_to_reset[16]; +static unsigned char io_reg_to_reset[16]; +static unsigned int io_to_reset[16]; +static int io_reset_count = 0; +#else /* SRM_SETUP */ #define PCI_MODIFY 1 +#endif /* SRM_SETUP */ extern struct hwrpb_struct *hwrpb; @@ -103,9 +131,7 @@ extern struct hwrpb_struct *hwrpb; #if defined(CONFIG_ALPHA_PC164) || defined(CONFIG_ALPHA_LX164) extern int SMC93x_Init(void); #endif -#ifdef CONFIG_ALPHA_SX164 extern int SMC669_Init(void); -#endif #ifdef CONFIG_ALPHA_MIATA static int es1888_init(void); #endif @@ -115,7 +141,7 @@ static int es1888_init(void); /* * NOTE: we can't just blindly use 64K for machines with EISA busses; they * may also have PCI-PCI bridges present, and then we'd configure the bridge - * incorrectly + * incorrectly. * * Also, we start at 0x8000 or 0x9000, in hopes to get all devices' * IO space areas allocated *before* 0xC000; this is because certain @@ -123,12 +149,17 @@ static int es1888_init(void); * accesses to probe the bus. If a device's registers appear at 0xC000, * it may see an INx/OUTx at that address during BIOS emulation of the * VGA BIOS, and some cards, notably Adaptec 2940UW, take mortal offense. + * + * Note that we may need this stuff for SRM_SETUP also, since certain + * SRM consoles screw up and allocate I/O space addresses > 64K behind + * PCI-to_PCI bridges, which can't pass I/O addresses larger than 64K, AFAIK. */ #if defined(CONFIG_ALPHA_EISA) -static unsigned int io_base = 0x9000; /* start above 8th slot */ +#define DEFAULT_IO_BASE 0x9000 /* start above 8th slot */ #else -static unsigned int io_base = 0x8000; +#define DEFAULT_IO_BASE 0x8000 /* start at 8th slot */ #endif +static unsigned int io_base; #if defined(CONFIG_ALPHA_XL) /* @@ -142,7 +173,7 @@ static unsigned int io_base = 0x8000; * We accept the risk that a broken Myrinet card will be put into a true XL * and thus can more easily run into the problem described below. */ -static unsigned int mem_base = 16*MB + 2*MB; /* 16M to 64M-1 is avail */ +#define DEFAULT_MEM_BASE (16*MB + 2*MB) /* 16M to 64M-1 is avail */ #elif defined(CONFIG_ALPHA_LCA) || defined(CONFIG_ALPHA_APECS) /* @@ -154,7 +185,7 @@ static unsigned int mem_base = 16*MB + 2*MB; /* 16M to 64M-1 is avail */ * However, APECS and LCA have only 34 bits for physical addresses, thus * limiting PCI bus memory addresses for SPARSE access to be less than 128Mb. */ -static unsigned int mem_base = 64*MB + 2*MB; +#define DEFAULT_MEM_BASE (64*MB + 2*MB) #else /* @@ -166,9 +197,10 @@ static unsigned int mem_base = 64*MB + 2*MB; * Because CIA and PYXIS and T2 have more bits for physical addresses, * they support an expanded range of SPARSE memory addresses. */ -static unsigned int mem_base = 128*MB + 16*MB; +#define DEFAULT_MEM_BASE (128*MB + 16*MB) #endif +static unsigned int mem_base; /* * Disable PCI device DEV so that it does not respond to I/O or memory @@ -179,7 +211,6 @@ static void disable_dev(struct pci_dev *dev) struct pci_bus *bus; unsigned short cmd; -#ifdef CONFIG_ALPHA_EISA /* * HACK: the PCI-to-EISA bridge does not seem to identify * itself as a bridge... :-( @@ -189,15 +220,17 @@ static void disable_dev(struct pci_dev *dev) DBG_DEVS(("disable_dev: ignoring PCEB...\n")); return; } -#endif -#ifdef CONFIG_ALPHA_SX164 + + /* + * we don't have code that will init the CYPRESS bridge correctly + * so we do the next best thing, and depend on the previous + * console code to do the right thing, and ignore it here... :-\ + */ if (dev->vendor == PCI_VENDOR_ID_CONTAQ && - /* FIXME: We want a symbolic device name here. */ - dev->device == 0xc693) { + dev->device == PCI_DEVICE_ID_CONTAQ_82C693) { DBG_DEVS(("disable_dev: ignoring CYPRESS bridge...\n")); return; } -#endif bus = dev->bus; pcibios_read_config_word(bus->number, dev->devfn, PCI_COMMAND, &cmd); @@ -211,16 +244,16 @@ static void disable_dev(struct pci_dev *dev) /* * Layout memory and I/O for a device: */ -#define MAX(val1, val2) ((val1) > (val2) ? val1 : val2) +#define MAX(val1, val2) ((val1) > (val2) ? (val1) : (val2)) static void layout_dev(struct pci_dev *dev) { struct pci_bus *bus; unsigned short cmd; - unsigned int base, mask, size, reg; + unsigned int base, mask, size, off, idx; unsigned int alignto; + unsigned long handle; -#ifdef CONFIG_ALPHA_EISA /* * HACK: the PCI-to-EISA bridge does not seem to identify * itself as a bridge... :-( @@ -230,32 +263,40 @@ static void layout_dev(struct pci_dev *dev) DBG_DEVS(("layout_dev: ignoring PCEB...\n")); return; } -#endif -#ifdef CONFIG_ALPHA_SX164 + + /* + * we don't have code that will init the CYPRESS bridge correctly + * so we do the next best thing, and depend on the previous + * console code to do the right thing, and ignore it here... :-\ + */ if (dev->vendor == PCI_VENDOR_ID_CONTAQ && - dev->device == 0xc693) { + dev->device == PCI_DEVICE_ID_CONTAQ_82C693) { DBG_DEVS(("layout_dev: ignoring CYPRESS bridge...\n")); return; } -#endif bus = dev->bus; pcibios_read_config_word(bus->number, dev->devfn, PCI_COMMAND, &cmd); - for (reg = PCI_BASE_ADDRESS_0; reg <= PCI_BASE_ADDRESS_5; reg += 4) { + for (idx = 0; idx <= 5; idx++) { + off = PCI_BASE_ADDRESS_0 + 4*idx; /* * Figure out how much space and of what type this * device wants. */ - pcibios_write_config_dword(bus->number, dev->devfn, reg, + pcibios_write_config_dword(bus->number, dev->devfn, off, 0xffffffff); - pcibios_read_config_dword(bus->number, dev->devfn, reg, &base); + pcibios_read_config_dword(bus->number, dev->devfn, off, &base); if (!base) { /* this base-address register is unused */ - dev->base_address[(reg - PCI_BASE_ADDRESS_0)>>2] = 0; + dev->base_address[idx] = 0; continue; } + DBG_DEVS(("layout_dev: slot %d fn %d off 0x%x base 0x%x\n", + PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), + off, base)); + /* * We've read the base address register back after * writing all ones and so now we must decode it. @@ -281,11 +322,13 @@ static void layout_dev(struct pci_dev *dev) base = ALIGN(io_base, alignto); io_base = base + size; pcibios_write_config_dword(bus->number, dev->devfn, - reg, base | 0x1); - dev->base_address[(reg - PCI_BASE_ADDRESS_0)>>2] - = base | 0x1; - DBG_DEVS(("layout_dev: dev 0x%x IO @ 0x%x (0x%x)\n", - dev->device, base, size)); + off, base | 0x1); + + handle = HANDLE(bus->number) | base | 1; + dev->base_address[idx] = handle; + + DBG_DEVS(("layout_dev: dev 0x%x IO @ 0x%lx (0x%x)\n", + dev->device, handle, size)); } else { unsigned int type; /* @@ -306,7 +349,7 @@ static void layout_dev(struct pci_dev *dev) "slot %d, function %d: \n", PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); - reg += 4; /* skip extra 4 bytes */ + idx++; /* skip extra 4 bytes */ continue; case PCI_BASE_ADDRESS_MEM_TYPE_1M: @@ -365,10 +408,11 @@ static void layout_dev(struct pci_dev *dev) } mem_base = base + size; pcibios_write_config_dword(bus->number, dev->devfn, - reg, base); - dev->base_address[(reg-PCI_BASE_ADDRESS_0)>>2] = base; - DBG_DEVS(("layout_dev: dev 0x%x MEM @ 0x%x (0x%x)\n", - dev->device, base, size)); + off, base); + handle = HANDLE(bus->number) | base; + dev->base_address[idx] = handle; + DBG_DEVS(("layout_dev: dev 0x%x MEM @ 0x%lx (0x%x)\n", + dev->device, handle, size)); } } @@ -397,16 +441,17 @@ static void layout_dev(struct pci_dev *dev) } -static void layout_bus(struct pci_bus *bus) +static int layout_bus(struct pci_bus *bus) { unsigned int l, tio, bio, tmem, bmem; struct pci_bus *child; struct pci_dev *dev; + int found_vga = 0; DBG_DEVS(("layout_bus: starting bus %d\n", bus->number)); if (!bus->devices && !bus->children) - return; + return 0; /* * Align the current bases on appropriate boundaries (4K for @@ -424,6 +469,8 @@ static void layout_bus(struct pci_bus *bus) * devices. They'll be re-enabled only once all address * decoders are programmed consistently. */ + DBG_DEVS(("layout_bus: disable_dev for bus %d\n", bus->number)); + for (dev = bus->devices; dev; dev = dev->sibling) { if ((dev->class >> 16 != PCI_BASE_CLASS_BRIDGE) || (dev->class >> 8 == PCI_CLASS_BRIDGE_PCMCIA)) { @@ -441,6 +488,8 @@ static void layout_bus(struct pci_bus *bus) (dev->class >> 8 == PCI_CLASS_BRIDGE_PCMCIA)) { layout_dev(dev); } + if ((dev->class >> 8) == PCI_CLASS_DISPLAY_VGA) + found_vga = 1; } /* * Recursively allocate space for all of the sub-buses: @@ -448,7 +497,7 @@ static void layout_bus(struct pci_bus *bus) DBG_DEVS(("layout_bus: starting bus %d children\n", bus->number)); for (child = bus->children; child; child = child->next) { - layout_bus(child); + found_vga += layout_bus(child); } /* * Align the current bases on 4K and 1MB boundaries: @@ -458,6 +507,8 @@ static void layout_bus(struct pci_bus *bus) if (bus->self) { struct pci_dev *bridge = bus->self; + + DBG_DEVS(("layout_bus: config bus %d bridge\n", bus->number)); /* * Set up the top and bottom of the PCI I/O segment * for this bus. @@ -481,10 +532,13 @@ static void layout_bus(struct pci_bus *bus) pcibios_write_config_dword(bridge->bus->number, bridge->devfn, 0x24, 0x0000ffff); /* - * Tell bridge that there is an ISA bus in the system: + * Tell bridge that there is an ISA bus in the system, + * and (possibly) a VGA as well. */ + l = 0x00040000; /* ISA present */ + if (found_vga) l |= 0x00080000; /* VGA present */ pcibios_write_config_dword(bridge->bus->number, bridge->devfn, - 0x3c, 0x00040000); + 0x3c, l); /* * Clear status bits, enable I/O (for downstream I/O), * turn on master enable (for upstream I/O), turn on @@ -494,75 +548,26 @@ static void layout_bus(struct pci_bus *bus) pcibios_write_config_dword(bridge->bus->number, bridge->devfn, 0x4, 0xffff0007); } + DBG_DEVS(("layout_bus: bus %d finished\n", bus->number)); + return found_vga; } #endif /* !PCI_MODIFY */ -/* - * Given the vendor and device ids, find the n'th instance of that device - * in the system. - */ -int pcibios_find_device (unsigned short vendor, unsigned short device_id, - unsigned short index, unsigned char *bus, - unsigned char *devfn) -{ - unsigned int curr = 0; - struct pci_dev *dev; - - for (dev = pci_devices; dev; dev = dev->next) { - if (dev->vendor == vendor && dev->device == device_id) { - if (curr == index) { - *devfn = dev->devfn; - *bus = dev->bus->number; - return PCIBIOS_SUCCESSFUL; - } - ++curr; - } - } - return PCIBIOS_DEVICE_NOT_FOUND; -} - - -/* - * Given the class, find the n'th instance of that device - * in the system. - */ -int pcibios_find_class (unsigned int class_code, unsigned short index, - unsigned char *bus, unsigned char *devfn) -{ - unsigned int curr = 0; - struct pci_dev *dev; - - for (dev = pci_devices; dev; dev = dev->next) { - if (dev->class == class_code) { - if (curr == index) { - *devfn = dev->devfn; - *bus = dev->bus->number; - return PCIBIOS_SUCCESSFUL; - } - ++curr; - } - } - return PCIBIOS_DEVICE_NOT_FOUND; -} - - int pcibios_present(void) { return 1; } -unsigned long pcibios_init(unsigned long mem_start, - unsigned long mem_end) +void __init +pcibios_init(void) { printk("Alpha PCI BIOS32 revision %x.%02x\n", MAJOR_REV, MINOR_REV); - #if !PCI_MODIFY printk("...NOT modifying existing (SRM) PCI configuration\n"); #endif - return mem_start; } /* @@ -633,6 +638,76 @@ bridge_swizzle(unsigned char pin, unsigned int slot) return (((pin-1) + slot) % 4) + 1; } +#ifdef CONFIG_ALPHA_SRM_SETUP +/* look for mis-configured devices' I/O space addresses behind bridges */ +static void check_behind_io(struct pci_dev *dev) +{ + struct pci_bus *bus = dev->bus; + unsigned int reg, orig_base, new_base, found_one = 0; + + for (reg = PCI_BASE_ADDRESS_0; reg <= PCI_BASE_ADDRESS_5; reg += 4) { + /* read the current setting, check for I/O space and >= 64K */ + pcibios_read_config_dword(bus->number, dev->devfn, reg, &orig_base); + if (!orig_base || !(orig_base & PCI_BASE_ADDRESS_SPACE_IO)) + continue; /* unused or non-IO */ + if (orig_base < 64*1024) { +#if 1 +printk("check_behind_io: ALREADY OK! bus %d slot %d base 0x%x\n", + bus->number, PCI_SLOT(dev->devfn), orig_base); +#endif + if (orig_base & ~1) + continue; /* OK! */ + orig_base = 0x12001; /* HACK! FIXME!! */ + } + + /* HACK ALERT! for now, just subtract 32K from the + original address, which should give us addresses + in the range 0x8000 and up */ + new_base = orig_base - 0x8000; +#if 1 +printk("check_behind_io: ALERT! bus %d slot %d old 0x%x new 0x%x\n", + bus->number, PCI_SLOT(dev->devfn), orig_base, new_base); +#endif + pcibios_write_config_dword(bus->number, dev->devfn, + reg, new_base); + + io_dev_to_reset[io_reset_count] = dev; + io_reg_to_reset[io_reset_count] = reg; + io_to_reset[io_reset_count] = orig_base; + io_reset_count++; + found_one++; + } /* end for-loop */ + + /* if any were modified, gotta hack the bridge IO limits too... */ + if (found_one) { + if (bus->self) { + struct pci_dev *bridge = bus->self; + unsigned int l; + /* + * Set up the top and bottom of the PCI I/O segment + * for this bus. + */ + pcibios_read_config_dword(bridge->bus->number, + bridge->devfn, 0x1c, &l); +#if 1 +printk("check_behind_io: ALERT! bus %d slot %d oldLIM 0x%x\n", + bus->number, PCI_SLOT(bridge->devfn), l); +#endif + l = (l & 0xffff0000U) | 0xf080U; /* give it ALL */ + pcibios_write_config_dword(bridge->bus->number, + bridge->devfn, 0x1c, l); + pcibios_write_config_dword(bridge->bus->number, + bridge->devfn, + 0x3c, 0x00040000); + pcibios_write_config_dword(bridge->bus->number, + bridge->devfn, + 0x4, 0xffff0007); + } else + printk("check_behind_io: WARNING! bus->self NULL\n"); + } +} +#endif /* CONFIG_ALPHA_SRM_SETUP */ + /* * Most evaluation boards share most of the fixup code, which is isolated * here. This function is declared "inline" as only one platform will ever @@ -644,7 +719,7 @@ common_fixup(long min_idsel, long max_idsel, long irqs_per_slot, char irq_tab[max_idsel - min_idsel + 1][irqs_per_slot], long ide_base) { - struct pci_dev *dev; + struct pci_dev *dev, *curr; unsigned char pin; unsigned char slot; @@ -652,12 +727,18 @@ common_fixup(long min_idsel, long max_idsel, long irqs_per_slot, * Go through all devices, fixing up irqs as we see fit: */ for (dev = pci_devices; dev; dev = dev->next) { - if ((dev->class >> 16 != PCI_BASE_CLASS_BRIDGE - /* PCEB (PCI to EISA bridge) does not identify - itself as a bridge... :-P */ - && !(dev->vendor == PCI_VENDOR_ID_INTEL && - dev->device == PCI_DEVICE_ID_INTEL_82375)) - || dev->class >> 8 == PCI_CLASS_BRIDGE_PCMCIA) { + if (dev->class >> 16 != PCI_BASE_CLASS_BRIDGE || + dev->class >> 8 == PCI_CLASS_BRIDGE_PCMCIA) { + /* + * HACK: the PCI-to-EISA bridge appears not to identify + * itself as a bridge... :-( + */ + if (dev->vendor == PCI_VENDOR_ID_INTEL && + dev->device == PCI_DEVICE_ID_INTEL_82375) { + DBG_DEVS(("common_fixup: ignoring PCEB...\n")); + continue; + } + /* * This device is not on the primary bus, we need * to figure out which interrupt pin it will come @@ -668,45 +749,100 @@ common_fixup(long min_idsel, long max_idsel, long irqs_per_slot, * the inline static routine above). */ dev->irq = 0; - if (dev->bus->number != 0) { - struct pci_dev *curr = dev; + if (!DEV_IS_ON_PRIMARY(dev)) { /* read the pin and do the PCI-PCI bridge interrupt pin swizzle */ pcibios_read_config_byte(dev->bus->number, dev->devfn, PCI_INTERRUPT_PIN, &pin); - /* cope with 0 */ - if (pin == 0) + /* cope with 0 and illegal */ + if (pin == 0 || pin > 4) pin = 1; /* follow the chain of bridges, swizzling as we go */ + curr = dev; #if defined(CONFIG_ALPHA_MIATA) + /* check first for the built-in bridge */ + if ((PCI_SLOT(dev->bus->self->devfn) == 8) || + (PCI_SLOT(dev->bus->self->devfn) == 20)) { slot = PCI_SLOT(dev->devfn) + 5; DBG_DEVS(("MIATA: bus 1 slot %d pin %d" " irq %d min_idsel %d\n", PCI_SLOT(dev->devfn), pin, irq_tab[slot - min_idsel][pin], min_idsel)); + } + else /* must be a card-based bridge */ + { + do { + if ((PCI_SLOT(curr->bus->self->devfn) == 8) || + (PCI_SLOT(curr->bus->self->devfn) == 20)) + { + slot = PCI_SLOT(curr->devfn) + 5; + break; + } + /* swizzle */ + pin = bridge_swizzle( + pin, PCI_SLOT(curr->devfn)) ; + /* move up the chain of bridges */ + curr = curr->bus->self ; + /* slot of the next bridge. */ + slot = PCI_SLOT(curr->devfn); + } while (curr->bus->self) ; + } #elif defined(CONFIG_ALPHA_NORITAKE) - /* WAG Alert! */ - slot = PCI_SLOT(dev->devfn) + 14; + /* check first for the built-in bridge */ + if (PCI_SLOT(dev->bus->self->devfn) == 8) { + slot = PCI_SLOT(dev->devfn) + 15; /* WAG! */ DBG_DEVS(("NORITAKE: bus 1 slot %d pin %d" - " irq %d min_idsel %d\n", + "irq %d min_idsel %ld\n", PCI_SLOT(dev->devfn), pin, irq_tab[slot - min_idsel][pin], min_idsel)); -#else + } + else /* must be a card-based bridge */ + { do { + if (PCI_SLOT(curr->bus->self->devfn) == 8) { + slot = PCI_SLOT(curr->devfn) + 15; + break; + } /* swizzle */ - pin = bridge_swizzle(pin, PCI_SLOT(curr->devfn)); + pin = bridge_swizzle( + pin, PCI_SLOT(curr->devfn)) ; + /* move up the chain of bridges */ + curr = curr->bus->self ; + /* slot of the next bridge. */ + slot = PCI_SLOT(curr->devfn); + } while (curr->bus->self) ; + } +#else /* everyone but MIATA and NORITAKE */ + DBG_DEVS(("common_fixup: bus %d slot %d pin %d " + "irq %d min_idsel %ld\n", + curr->bus->number, + PCI_SLOT(dev->devfn), pin, + irq_tab[slot - min_idsel][pin], + min_idsel)); + do { + /* swizzle */ + pin = + bridge_swizzle(pin, PCI_SLOT(curr->devfn)); /* move up the chain of bridges */ curr = curr->bus->self; } while (curr->bus->self); /* The slot is the slot of the last bridge. */ slot = PCI_SLOT(curr->devfn); -#endif /* MIATA */ - } else { +#endif +#ifdef CONFIG_ALPHA_SRM_SETUP + /* + * must make sure that SRM didn't screw up + * and allocate an address > 64K for I/O + * space behind a PCI-PCI bridge + */ + check_behind_io(dev); +#endif /* CONFIG_ALPHA_SRM_SETUP */ + } else { /* just a device on a primary bus */ /* work out the slot */ slot = PCI_SLOT(dev->devfn); /* read the pin */ @@ -714,16 +850,48 @@ common_fixup(long min_idsel, long max_idsel, long irqs_per_slot, dev->devfn, PCI_INTERRUPT_PIN, &pin); + DBG_DEVS(("common_fixup: bus %d slot %d" + " pin %d irq %d min_idsel %ld\n", + dev->bus->number, slot, pin, + irq_tab[slot - min_idsel][pin], + min_idsel)); + /* cope with 0 and illegal */ + if (pin == 0 || pin > 4) + pin = 1; } if (irq_tab[slot - min_idsel][pin] != -1) dev->irq = irq_tab[slot - min_idsel][pin]; -#if PCI_MODIFY - /* tell the device: */ - pcibios_write_config_byte(dev->bus->number, +#ifdef CONFIG_ALPHA_RAWHIDE + dev->irq += + 24 * bus2hose[dev->bus->number]->pci_hose_index; +#endif /* RAWHIDE */ +#ifdef CONFIG_ALPHA_SRM + { + unsigned char irq_orig; + /* read the original SRM-set IRQ and tell */ + pcibios_read_config_byte(dev->bus->number, dev->devfn, PCI_INTERRUPT_LINE, - dev->irq); -#endif + &irq_orig); + if (irq_orig != dev->irq) { + DBG_DEVS(("common_fixup: bus %d slot 0x%x " + "SRM IRQ 0x%x changed to 0x%x\n", + dev->bus->number,PCI_SLOT(dev->devfn), + irq_orig, dev->irq)); +#ifdef CONFIG_ALPHA_SRM_SETUP + irq_dev_to_reset[irq_reset_count] = dev; + irq_to_reset[irq_reset_count] = irq_orig; + irq_reset_count++; +#endif /* CONFIG_ALPHA_SRM_SETUP */ + } + } +#endif /* SRM */ + + /* always tell the device, so the driver knows what is + * the real IRQ to use; the device does not use it. + */ + pcibios_write_config_byte(dev->bus->number, dev->devfn, + PCI_INTERRUPT_LINE, dev->irq); DBG_DEVS(("common_fixup: bus %d slot 0x%x" " VID 0x%x DID 0x%x\n" @@ -737,11 +905,24 @@ common_fixup(long min_idsel, long max_idsel, long irqs_per_slot, * if it's a VGA, enable its BIOS ROM at C0000 */ if ((dev->class >> 8) == PCI_CLASS_DISPLAY_VGA) { - pcibios_write_config_dword(dev->bus->number, + /* but if its a Cirrus 543x/544x DISABLE it, */ + /* since enabling ROM disables the memory... */ + if ((dev->vendor == PCI_VENDOR_ID_CIRRUS) && + (dev->device >= 0x00a0) && + (dev->device <= 0x00ac)) { + pcibios_write_config_dword( + dev->bus->number, + dev->devfn, + PCI_ROM_ADDRESS, + 0x00000000); + } else { + pcibios_write_config_dword( + dev->bus->number, dev->devfn, PCI_ROM_ADDRESS, 0x000c0000 | PCI_ROM_ADDRESS_ENABLE); } + } /* * if it's a SCSI, disable its BIOS ROM */ @@ -752,46 +933,6 @@ common_fixup(long min_idsel, long max_idsel, long irqs_per_slot, 0x0000000); } } -#ifdef CONFIG_ALPHA_SX164 - /* If it the CYPRESS PCI-ISA bridge, disable IDE - interrupt routing through PCI (ie do through PIC). */ - else if (dev->vendor == PCI_VENDOR_ID_CONTAQ && - dev->device == 0xc693 && - PCI_FUNC(dev->devfn) == 0) { - pcibios_write_config_word(dev->bus->number, - dev->devfn, 0x04, 0x0007); - - pcibios_write_config_byte(dev->bus->number, - dev->devfn, 0x40, 0x80); - pcibios_write_config_byte(dev->bus->number, - dev->devfn, 0x41, 0x80); - pcibios_write_config_byte(dev->bus->number, - dev->devfn, 0x42, 0x80); - pcibios_write_config_byte(dev->bus->number, - dev->devfn, 0x43, 0x80); - pcibios_write_config_byte(dev->bus->number, - dev->devfn, 0x44, 0x27); - pcibios_write_config_byte(dev->bus->number, - dev->devfn, 0x45, 0xe0); - pcibios_write_config_byte(dev->bus->number, - dev->devfn, 0x48, 0xf0); - pcibios_write_config_byte(dev->bus->number, - dev->devfn, 0x49, 0x40); - pcibios_write_config_byte(dev->bus->number, - dev->devfn, 0x4a, 0x00); - pcibios_write_config_byte(dev->bus->number, - dev->devfn, 0x4b, 0x80); - pcibios_write_config_byte(dev->bus->number, - dev->devfn, 0x4c, 0x80); - pcibios_write_config_byte(dev->bus->number, - dev->devfn, 0x4d, 0x70); - - outb(0, DMA1_RESET_REG); - outb(0, DMA2_RESET_REG); - outb(DMA_MODE_CASCADE, DMA2_MODE_REG); - outb(0, DMA2_MASK_REG); - } -#endif /* SX164 */ } if (ide_base) { enable_ide(ide_base); @@ -814,6 +955,7 @@ common_fixup(long min_idsel, long max_idsel, long irqs_per_slot, static inline void eb66p_fixup(void) { static char irq_tab[5][5] __initlocaldata = { + /*INT INTA INTB INTC INTD */ {16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J25 */ {16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J26 */ { -1, -1, -1, -1, -1}, /* IdSel 8, SIO */ @@ -825,8 +967,8 @@ static inline void eb66p_fixup(void) /* - * The PC164/LX164 has 19 PCI interrupts, four from each of the four PCI - * slots, the SIO, PCI/IDE, and USB. + * The PC164 and LX164 have 19 PCI interrupts, four from each of the four + * PCI slots, the SIO, PCI/IDE, and USB. * * Each of the interrupts can be individually masked. This is * accomplished by setting the appropriate bit in the mask register. @@ -901,6 +1043,7 @@ static inline void alphapc164_fixup(void) static inline void cabriolet_fixup(void) { static char irq_tab[5][5] __initlocaldata = { + /*INT INTA INTB INTC INTD */ { 16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 5, slot 2, J21 */ { 16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J19 */ { 16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J20 */ @@ -957,6 +1100,7 @@ static inline void cabriolet_fixup(void) static inline void eb66_and_eb64p_fixup(void) { static char irq_tab[5][5] __initlocaldata = { + /*INT INTA INTB INTC INTD */ {16+7, 16+7, 16+7, 16+7, 16+7}, /* IdSel 5, slot ?, ?? */ {16+0, 16+0, 16+2, 16+4, 16+9}, /* IdSel 6, slot ?, ?? */ {16+1, 16+1, 16+3, 16+8, 16+10}, /* IdSel 7, slot ?, ?? */ @@ -968,7 +1112,7 @@ static inline void eb66_and_eb64p_fixup(void) /* - * Fixup configuration for MIKASA (NORITAKE is different) + * Fixup configuration for MIKASA (AlphaServer 1000) * * Summary @ 0x536: * Bit Meaning @@ -1020,7 +1164,10 @@ static inline void mikasa_fixup(void) } /* - * Fixup configuration for NORITAKE (MIKASA is different) + * Fixup configuration for NORITAKE (AlphaServer 1000A) + * + * This is also used for CORELLE (AlphaServer 800) + * and ALCOR Primo (AlphaStation 600A). * * Summary @ 0x542, summary register #1: * Bit Meaning @@ -1076,8 +1223,11 @@ static inline void mikasa_fixup(void) */ static inline void noritake_fixup(void) { - static char irq_tab[13][5] __initlocaldata = { + static char irq_tab[15][5] __initlocaldata = { /*INT INTA INTB INTC INTD */ + /* note: IDSELs 16, 17, and 25 are CORELLE only */ + { 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */ + { -1, -1, -1, -1, -1}, /* IdSel 17, S3 Trio64 */ { -1, -1, -1, -1, -1}, /* IdSel 18, PCEB */ { -1, -1, -1, -1, -1}, /* IdSel 19, PPB */ { -1, -1, -1, -1, -1}, /* IdSel 20, ???? */ @@ -1085,18 +1235,20 @@ static inline void noritake_fixup(void) { 16+2, 16+2, 16+3, 32+2, 32+3}, /* IdSel 22, slot 0 */ { 16+4, 16+4, 16+5, 32+4, 32+5}, /* IdSel 23, slot 1 */ { 16+6, 16+6, 16+7, 32+6, 32+7}, /* IdSel 24, slot 2 */ - /* The following are actually on bus 1, across the bridge */ + { 16+8, 16+8, 16+9, 32+8, 32+9}, /* IdSel 25, slot 3 */ + /* the following 5 are actually on PCI bus 1, which is */ + /* across the built-in bridge of the NORITAKE only */ { 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */ { 16+8, 16+8, 16+9, 32+8, 32+9}, /* IdSel 17, slot 3 */ {16+10, 16+10, 16+11, 32+10, 32+11}, /* IdSel 18, slot 4 */ {16+12, 16+12, 16+13, 32+12, 32+13}, /* IdSel 19, slot 5 */ {16+14, 16+14, 16+15, 32+14, 32+15}, /* IdSel 20, slot 6 */ }; - common_fixup(7, 18, 5, irq_tab, 0); + common_fixup(5, 19, 5, irq_tab, 0); } /* - * Fixup configuration for ALCOR + * Fixup configuration for ALCOR and XLT (XL-300/366/433) * * Summary @ GRU_INT_REQ: * Bit Meaning @@ -1126,6 +1278,7 @@ static inline void noritake_fixup(void) * The device to slot mapping looks like: * * Slot Device + * 6 built-in TULIP (XLT only) * 7 PCI on board slot 0 * 8 PCI on board slot 3 * 9 PCI on board slot 4 @@ -1140,8 +1293,10 @@ static inline void noritake_fixup(void) */ static inline void alcor_fixup(void) { - static char irq_tab[6][5] __initlocaldata = { + static char irq_tab[7][5] __initlocaldata = { /*INT INTA INTB INTC INTD */ + /* note: IDSEL 17 is XLT only */ + {16+13, 16+13, 16+13, 16+13, 16+13}, /* IdSel 17, TULIP */ { 16+8, 16+8, 16+9, 16+10, 16+11}, /* IdSel 18, slot 0 */ {16+16, 16+16, 16+17, 16+18, 16+19}, /* IdSel 19, slot 3 */ {16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 20, slot 4 */ @@ -1149,62 +1304,6 @@ static inline void alcor_fixup(void) { 16+0, 16+0, 16+1, 16+2, 16+3}, /* IdSel 22, slot 2 */ { 16+4, 16+4, 16+5, 16+6, 16+7}, /* IdSel 23, slot 1 */ }; - common_fixup(7, 12, 5, irq_tab, 0); -} - -/* - * Fixup configuration for ALPHA XLT (EV5/EV56) - * - * Summary @ GRU_INT_REQ: - * Bit Meaning - * 0 Interrupt Line A from slot 2 - * 1 Interrupt Line B from slot 2 - * 2 Interrupt Line C from slot 2 - * 3 Interrupt Line D from slot 2 - * 4 Interrupt Line A from slot 1 - * 5 Interrupt line B from slot 1 - * 6 Interrupt Line C from slot 1 - * 7 Interrupt Line D from slot 1 - * 8 Interrupt Line A from slot 0 - * 9 Interrupt Line B from slot 0 - *10 Interrupt Line C from slot 0 - *11 Interrupt Line D from slot 0 - *12 NCR810 SCSI in slot 9 - *13 DC-21040 (TULIP) in slot 6 - *14-19 Reserved - *20-23 Jumpers (interrupt) - *24-27 Module revision - *28-30 Reserved - *31 EISA interrupt - * - * The device to slot mapping looks like: - * - * Slot Device - * 6 TULIP - * 7 PCI on board slot 0 - * 8 none - * 9 SCSI - * 10 PCI-ISA bridge - * 11 PCI on board slot 2 - * 12 PCI on board slot 1 - * - * - * This two layered interrupt approach means that we allocate IRQ 16 and - * above for PCI interrupts. The IRQ relates to which bit the interrupt - * comes in on. This makes interrupt processing much easier. - */ -static inline void xlt_fixup(void) -{ - static char irq_tab[7][5] __initlocaldata = { - /*INT INTA INTB INTC INTD */ - {16+13, 16+13, 16+13, 16+13, 16+13}, /* IdSel 17, TULIP */ - { 16+8, 16+8, 16+9, 16+10, 16+11}, /* IdSel 18, slot 0 */ - { -1, -1, -1, -1, -1}, /* IdSel 19, none */ - {16+12, 16+12, 16+12, 16+12, 16+12}, /* IdSel 20, SCSI */ - { -1, -1, -1, -1, -1}, /* IdSel 21, SIO */ - { 16+0, 16+0, 16+1, 16+2, 16+3}, /* IdSel 22, slot 2 */ - { 16+4, 16+4, 16+5, 16+6, 16+7}, /* IdSel 23, slot 1 */ - }; common_fixup(6, 12, 5, irq_tab, 0); } @@ -1262,8 +1361,6 @@ static inline void xlt_fixup(void) * with the values in the sable_irq_to_mask[] and sable_mask_to_irq[] tables * in irq.c */ - -#ifdef CONFIG_ALPHA_SABLE static inline void sable_fixup(void) { static char irq_tab[9][5] __initlocaldata = { @@ -1280,7 +1377,6 @@ static inline void sable_fixup(void) }; common_fixup(0, 8, 5, irq_tab, 0); } -#endif /* * Fixup configuration for MIATA (EV56+PYXIS) @@ -1362,7 +1458,8 @@ static inline void miata_fixup(void) { -1, -1, -1, -1, -1}, /* IdSel 21, none */ {16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 22, slot 4 */ {16+16, 16+16, 16+17, 16+18, 16+19}, /* IdSel 23, slot 5 */ - /* The following are actually on bus 1, across the bridge */ + /* The following are actually on bus 1, which is */ + /* across the builtin PCI-PCI bridge */ {16+20, 16+20, 16+21, 16+22, 16+23}, /* IdSel 24, slot 1 */ {16+24, 16+24, 16+25, 16+26, 16+27}, /* IdSel 25, slot 2 */ {16+28, 16+28, 16+29, 16+30, 16+31}, /* IdSel 26, slot 3 */ @@ -1373,6 +1470,7 @@ static inline void miata_fixup(void) { -1, -1, -1, -1, -1}, /* IdSel 31, PCI-PCI */ }; common_fixup(3, 20, 5, irq_tab, 0); + SMC669_Init(); /* it might be a GL (fails harmlessly if not) */ es1888_init(); } #endif @@ -1399,7 +1497,6 @@ static inline void miata_fixup(void) *14 Interrupt Line B from slot 1 *15 Interrupt line B from slot 0 *16 Interrupt Line C from slot 3 - *17 Interrupt Line C from slot 2 *18 Interrupt Line C from slot 1 *19 Interrupt Line C from slot 0 @@ -1417,7 +1514,6 @@ static inline void miata_fixup(void) * */ -#ifdef CONFIG_ALPHA_SX164 static inline void sx164_fixup(void) { static char irq_tab[5][5] __initlocaldata = { @@ -1428,12 +1524,154 @@ static inline void sx164_fixup(void) { -1, -1, -1, -1, -1}, /* IdSel 8 SIO */ { 16+ 8, 16+ 8, 16+12, 16+16, 16+20} /* IdSel 9 slot 3 J15 */ }; - common_fixup(5, 9, 5, irq_tab, 0); + SMC669_Init(); +} + +/* + * Fixup configuration for DP264 (EV6+TSUNAMI) + * + * Summary @ TSUNAMI_CSR_DIM0: + * Bit Meaning + * 0-17 Unused + *18 Interrupt SCSI B (Adaptec 7895 builtin) + *19 Interrupt SCSI A (Adaptec 7895 builtin) + *20 Interrupt Line D from slot 2 PCI0 + *21 Interrupt Line C from slot 2 PCI0 + *22 Interrupt Line B from slot 2 PCI0 + *23 Interrupt Line A from slot 2 PCI0 + *24 Interrupt Line D from slot 1 PCI0 + *25 Interrupt Line C from slot 1 PCI0 + *26 Interrupt Line B from slot 1 PCI0 + *27 Interrupt Line A from slot 1 PCI0 + *28 Interrupt Line D from slot 0 PCI0 + *29 Interrupt Line C from slot 0 PCI0 + *30 Interrupt Line B from slot 0 PCI0 + *31 Interrupt Line A from slot 0 PCI0 + * + *32 Interrupt Line D from slot 3 PCI1 + *33 Interrupt Line C from slot 3 PCI1 + *34 Interrupt Line B from slot 3 PCI1 + *35 Interrupt Line A from slot 3 PCI1 + *36 Interrupt Line D from slot 2 PCI1 + *37 Interrupt Line C from slot 2 PCI1 + *38 Interrupt Line B from slot 2 PCI1 + *39 Interrupt Line A from slot 2 PCI1 + *40 Interrupt Line D from slot 1 PCI1 + *41 Interrupt Line C from slot 1 PCI1 + *42 Interrupt Line B from slot 1 PCI1 + *43 Interrupt Line A from slot 1 PCI1 + *44 Interrupt Line D from slot 0 PCI1 + *45 Interrupt Line C from slot 0 PCI1 + *46 Interrupt Line B from slot 0 PCI1 + *47 Interrupt Line A from slot 0 PCI1 + *48-52 Unused + *53 PCI0 NMI (from Cypress) + *54 PCI0 SMI INT (from Cypress) + *55 PCI0 ISA Interrupt (from Cypress) + *56-60 Unused + *61 PCI1 Bus Error + *62 PCI0 Bus Error + *63 Reserved + * + * IdSel + * 5 Cypress Bridge I/O + * 6 SCSI Adaptec builtin + * 7 64 bit PCI option slot 0 + * 8 64 bit PCI option slot 1 + * 9 64 bit PCI option slot 2 + * + */ +static inline void dp264_fixup(void) +{ + static char irq_tab[5][5] __initlocaldata = { + /*INT INTA INTB INTC INTD */ + { -1, -1, -1, -1, -1}, /* IdSel 5 ISA Bridge */ + { 16+ 2, 16+ 2, 16+ 2, 16+ 2, 16+ 2}, /* IdSel 6 SCSI builtin */ + { 16+15, 16+15, 16+14, 16+13, 16+12}, /* IdSel 7 slot 0 */ + { 16+11, 16+11, 16+10, 16+ 9, 16+ 8}, /* IdSel 8 slot 1 */ + { 16+ 7, 16+ 7, 16+ 6, 16+ 5, 16+ 4} /* IdSel 9 slot 2 */ + }; + common_fixup(5, 9, 5, irq_tab, 0); SMC669_Init(); } -#endif + +/* + * Fixup configuration for RAWHIDE + * + * Summary @ MCPCIA_PCI0_INT_REQ: + * Bit Meaning + *0 Interrupt Line A from slot 2 PCI0 + *1 Interrupt Line B from slot 2 PCI0 + *2 Interrupt Line C from slot 2 PCI0 + *3 Interrupt Line D from slot 2 PCI0 + *4 Interrupt Line A from slot 3 PCI0 + *5 Interrupt Line B from slot 3 PCI0 + *6 Interrupt Line C from slot 3 PCI0 + *7 Interrupt Line D from slot 3 PCI0 + *8 Interrupt Line A from slot 4 PCI0 + *9 Interrupt Line B from slot 4 PCI0 + *10 Interrupt Line C from slot 4 PCI0 + *11 Interrupt Line D from slot 4 PCI0 + *12 Interrupt Line A from slot 5 PCI0 + *13 Interrupt Line B from slot 5 PCI0 + *14 Interrupt Line C from slot 5 PCI0 + *15 Interrupt Line D from slot 5 PCI0 + *16 EISA interrupt (PCI 0) or SCSI interrupt (PCI 1) + *17-23 NA + * + * IdSel + * 1 EISA bridge (PCI bus 0 only) + * 2 PCI option slot 2 + * 3 PCI option slot 3 + * 4 PCI option slot 4 + * 5 PCI option slot 5 + * + */ + +static inline void rawhide_fixup(void) +{ + static char irq_tab[5][5] __initlocaldata = { + /*INT INTA INTB INTC INTD */ + { 16+16, 16+16, 16+16, 16+16, 16+16}, /* IdSel 1 SCSI PCI 1 only */ + { 16+ 0, 16+ 0, 16+ 1, 16+ 2, 16+ 3}, /* IdSel 2 slot 2 */ + { 16+ 4, 16+ 4, 16+ 5, 16+ 6, 16+ 7}, /* IdSel 3 slot 3 */ + { 16+ 8, 16+ 8, 16+ 9, 16+10, 16+11}, /* IdSel 4 slot 4 */ + { 16+12, 16+12, 16+13, 16+14, 16+15} /* IdSel 5 slot 5 */ + }; + common_fixup(1, 5, 5, irq_tab, 0); +} + +/* + * The Takara has PCI devices 1, 2, and 3 configured to slots 20, + * 19, and 18 respectively, in the default configuration. They can + * also be jumpered to slots 8, 7, and 6 respectively, which is fun + * because the SIO ISA bridge can also be slot 7. However, the SIO + * doesn't explicitly generate PCI-type interrupts, so we can + * assign it whatever the hell IRQ we like and it doesn't matter. + */ +static inline void takara_fixup(void) +{ + static char irq_tab[15][5] __initlocaldata = { + { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */ + { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */ + { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */ + { -1, -1, -1, -1, -1}, /* slot 9 == nothing */ + { -1, -1, -1, -1, -1}, /* slot 10 == nothing */ + { -1, -1, -1, -1, -1}, /* slot 11 == nothing */ + { -1, -1, -1, -1, -1}, /* slot 12 == nothing */ + { -1, -1, -1, -1, -1}, /* slot 13 == nothing */ + { -1, -1, -1, -1, -1}, /* slot 14 == nothing */ + { -1, -1, -1, -1, -1}, /* slot 15 == nothing */ + { -1, -1, -1, -1, -1}, /* slot 16 == nothing */ + { -1, -1, -1, -1, -1}, /* slot 17 == nothing */ + { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 18 == device 3 */ + { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 19 == device 2 */ + { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 20 == device 1 */ + }; + common_fixup(6, 20, 5, irq_tab, 0x26e); +} /* * Fixup configuration for all boards that route the PCI interrupts @@ -1462,6 +1700,7 @@ static inline void sio_fixup(void) * driven at all). */ static const char pirq_tab[][5] __initlocaldata = { + /*INT A B C D */ #ifdef CONFIG_ALPHA_P2K { 0, 0, -1, -1, -1}, /* idsel 6 (53c810) */ {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */ @@ -1497,7 +1736,7 @@ static inline void sio_fixup(void) #if defined(CONFIG_ALPHA_BOOK1) /* for the AlphaBook1, NCR810 SCSI is 14, PCMCIA controller is 15 */ - const unsigned int route_tab = 0x0e0f0a0a; + const unsigned int new_route_tab = 0x0e0f0a0a; #elif defined(CONFIG_ALPHA_NONAME) /* @@ -1510,16 +1749,24 @@ static inline void sio_fixup(void) * they are co-indicated when the platform type "Noname" is * selected... :-( */ - const unsigned int route_tab = 0x0b0a0f09; + const unsigned int new_route_tab = 0x0b0a0f09; #else - const unsigned int route_tab = 0x0b0a090f; + const unsigned int new_route_tab = 0x0b0a090f; #endif - - unsigned int level_bits; + unsigned int route_tab, old_route_tab; + unsigned int level_bits, old_level_bits; unsigned char pin, slot; int pirq; + pcibios_read_config_dword(0, PCI_DEVFN(7, 0), 0x60, &old_route_tab); + DBG_DEVS(("sio_fixup: old pirq route table: 0x%08x\n", + old_route_tab)); +#if PCI_MODIFY + route_tab = new_route_tab; pcibios_write_config_dword(0, PCI_DEVFN(7, 0), 0x60, route_tab); +#else + route_tab = old_route_tab; +#endif /* * Go through all devices, fixing up irqs as we see fit: @@ -1576,20 +1823,33 @@ static inline void sio_fixup(void) * if it's a VGA, enable its BIOS ROM at C0000 */ if ((dev->class >> 8) == PCI_CLASS_DISPLAY_VGA) { - pcibios_write_config_dword(dev->bus->number, + /* but if its a Cirrus 543x/544x DISABLE it, */ + /* since enabling ROM disables the memory... */ + if ((dev->vendor == PCI_VENDOR_ID_CIRRUS) && + (dev->device >= 0x00a0) && + (dev->device <= 0x00ac)) { + pcibios_write_config_dword( + dev->bus->number, + dev->devfn, + PCI_ROM_ADDRESS, + 0x00000000); + } else { + pcibios_write_config_dword( + dev->bus->number, dev->devfn, PCI_ROM_ADDRESS, 0x000c0000 | PCI_ROM_ADDRESS_ENABLE); } + } if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { continue; /* for now, displays get no IRQ */ } if (pirq < 0) { - printk("bios32.sio_fixup: " + DBG_DEVS(("bios32.sio_fixup: " "weird, device %04x:%04x coming in on" " slot %d has no irq line!!\n", - dev->vendor, dev->device, slot); + dev->vendor, dev->device, slot)); continue; } @@ -1653,7 +1913,12 @@ static inline void sio_fixup(void) * * Note: we at least preserve any level-set bits on AlphaBook1 */ - level_bits |= ((inb(0x4d0) | (inb(0x4d1) << 8)) & 0x71ff); + old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8); + DBG_DEVS(("sio_fixup: old irq level bits: 0x%04x\n", + old_level_bits)); + level_bits |= (old_level_bits & 0x71ff); + DBG_DEVS(("sio_fixup: new irq level bits: 0x%04x\n", + level_bits)); outb((level_bits >> 0) & 0xff, 0x4d0); outb((level_bits >> 8) & 0xff, 0x4d1); @@ -1685,14 +1950,38 @@ static inline void sio_fixup(void) extern void tga_console_init(void); #endif /* CONFIG_TGA_CONSOLE */ -unsigned long __init -pcibios_fixup(unsigned long mem_start, unsigned long mem_end) +void __init +pcibios_fixup(void) { + struct pci_bus *cur; + +#ifdef CONFIG_ALPHA_MCPCIA + /* must do massive setup for multiple PCI busses here... */ + DBG_DEVS(("pcibios_fixup: calling mcpcia_fixup()...\n")); + mcpcia_fixup(); +#endif /* MCPCIA */ + +#ifdef CONFIG_ALPHA_TSUNAMI + /* must do massive setup for multiple PCI busses here... */ + /* tsunami_fixup(); */ +#endif /* TSUNAMI */ + #if PCI_MODIFY && !defined(CONFIG_ALPHA_RUFFIAN) /* * Scan the tree, allocating PCI memory and I/O space. */ - layout_bus(&pci_root); + /* + * Sigh; check_region() will need changing to accept a HANDLE, + * if we allocate I/O space addresses on a per-bus basis. + * For now, make the I/O bases unique across all busses, so + * that check_region() will not get confused... ;-} + */ + io_base = DEFAULT_IO_BASE; + for (cur = &pci_root; cur; cur = cur->next) { + mem_base = DEFAULT_MEM_BASE; + DBG_DEVS(("pcibios_fixup: calling layout_bus()\n")); + layout_bus(cur); + } #endif /* @@ -1713,10 +2002,8 @@ pcibios_fixup(unsigned long mem_start, unsigned long mem_end) eb66_and_eb64p_fixup(); #elif defined(CONFIG_ALPHA_MIKASA) mikasa_fixup(); -#elif defined(CONFIG_ALPHA_ALCOR) +#elif defined(CONFIG_ALPHA_ALCOR) || defined(CONFIG_ALPHA_XLT) alcor_fixup(); -#elif defined(CONFIG_ALPHA_XLT) - xlt_fixup(); #elif defined(CONFIG_ALPHA_SABLE) sable_fixup(); #elif defined(CONFIG_ALPHA_MIATA) @@ -1725,6 +2012,12 @@ pcibios_fixup(unsigned long mem_start, unsigned long mem_end) noritake_fixup(); #elif defined(CONFIG_ALPHA_SX164) sx164_fixup(); +#elif defined(CONFIG_ALPHA_DP264) + dp264_fixup(); +#elif defined(CONFIG_ALPHA_RAWHIDE) + rawhide_fixup(); +#elif defined(CONFIG_ALPHA_TAKARA) + takara_fixup(); #elif defined(CONFIG_ALPHA_RUFFIAN) /* no fixup needed */ #else @@ -1736,8 +2029,6 @@ pcibios_fixup(unsigned long mem_start, unsigned long mem_end) tga_console_init(); #endif #endif - - return mem_start; } @@ -1831,6 +2122,33 @@ asmlinkage int sys_pciconfig_write(unsigned long bus, unsigned long dfn, return err; } +#if (defined(CONFIG_ALPHA_PC164) || \ + defined(CONFIG_ALPHA_LX164) || \ + defined(CONFIG_ALPHA_SX164) || \ + defined(CONFIG_ALPHA_EB164) || \ + defined(CONFIG_ALPHA_EB66P) || \ + defined(CONFIG_ALPHA_CABRIOLET)) && defined(CONFIG_ALPHA_SRM) + +/* + on the above machines, under SRM console, we must use the CSERVE PALcode + routine to manage the interrupt mask for us, otherwise, the kernel/HW get + out of sync with what the PALcode thinks it needs to deliver/ignore + */ +void +cserve_update_hw(unsigned long irq, unsigned long mask) +{ + extern void cserve_ena(unsigned long); + extern void cserve_dis(unsigned long); + + if (mask & (1UL << irq)) + /* disable */ + cserve_dis(irq - 16); + else + /* enable */ + cserve_ena(irq - 16); + return; +} +#endif /* (PC164 || LX164 || SX164 || EB164 || CABRIO) && SRM */ #ifdef CONFIG_ALPHA_MIATA /* @@ -1877,4 +2195,49 @@ es1888_init(void) } #endif /* CONFIG_ALPHA_MIATA */ +__initfunc(char *pcibios_setup(char *str)) +{ + return str; +} + +#ifdef CONFIG_ALPHA_SRM_SETUP +void reset_for_srm(void) +{ + extern void scrreset(void); + struct pci_dev *dev; + int i; + + /* reset any IRQs that we changed */ + for (i = 0; i < irq_reset_count; i++) { + dev = irq_dev_to_reset[i]; + + pcibios_write_config_byte(dev->bus->number, dev->devfn, + PCI_INTERRUPT_LINE, irq_to_reset[i]); +#if 1 + printk("reset_for_srm: bus %d slot 0x%x " + "SRM IRQ 0x%x changed back from 0x%x\n", + dev->bus->number, PCI_SLOT(dev->devfn), + irq_to_reset[i], dev->irq); +#endif + } + + /* reset any IO addresses that we changed */ + for (i = 0; i < io_reset_count; i++) { + dev = io_dev_to_reset[i]; + + pcibios_write_config_byte(dev->bus->number, dev->devfn, + io_reg_to_reset[i], io_to_reset[i]); +#if 1 + printk("reset_for_srm: bus %d slot 0x%x " + "SRM IO restored to 0x%x\n", + dev->bus->number, PCI_SLOT(dev->devfn), + io_to_reset[i]); +#endif +} + + /* reset the visible screen to the top of display memory */ + scrreset(); +} +#endif /* CONFIG_ALPHA_SRM_SETUP */ + #endif /* CONFIG_PCI */ diff --git a/arch/alpha/kernel/cia.c b/arch/alpha/kernel/cia.c index 4bebe2732..57fae7d87 100644 --- a/arch/alpha/kernel/cia.c +++ b/arch/alpha/kernel/cia.c @@ -6,8 +6,8 @@ * */ #include <linux/kernel.h> +#include <linux/config.h> #include <linux/types.h> -#include <linux/bios32.h> #include <linux/pci.h> #include <linux/sched.h> @@ -17,13 +17,14 @@ #include <asm/ptrace.h> #include <asm/mmu_context.h> -/* NOTE: Herein are back-to-back mb insns. They are magic. - A plausible explanation is that the i/o controler does not properly - handle the system transaction. Another involves timing. Ho hum. */ +/* + * NOTE: Herein lie back-to-back mb instructions. They are magic. + * One plausible explanation is that the i/o controller does not properly + * handle the system transaction. Another involves timing. Ho hum. + */ extern struct hwrpb_struct *hwrpb; extern asmlinkage void wrmces(unsigned long mces); -extern int alpha_sys_type; /* * Machine check reasons. Defined according to PALcode sources @@ -56,13 +57,17 @@ extern int alpha_sys_type; # define DBGC(args) #endif -#define vulp volatile unsigned long * #define vuip volatile unsigned int * static volatile unsigned int CIA_mcheck_expected = 0; static volatile unsigned int CIA_mcheck_taken = 0; static unsigned int CIA_jd; +#ifdef CONFIG_ALPHA_SRM_SETUP +unsigned int CIA_DMA_WIN_BASE = CIA_DMA_WIN_BASE_DEFAULT; +unsigned int CIA_DMA_WIN_SIZE = CIA_DMA_WIN_SIZE_DEFAULT; +unsigned long cia_sm_base_r1, cia_sm_base_r2, cia_sm_base_r3; +#endif /* SRM_SETUP */ /* * Given a bus, device, and function number, compute resulting @@ -271,7 +276,7 @@ static void conf_write(unsigned long addr, unsigned int value, } /* reset error status: */ - *(vulp)CIA_IOC_CIA_ERR = stat0; + *(vuip)CIA_IOC_CIA_ERR = stat0; mb(); wrmces(0x7); /* reset machine check */ value = 0xffffffff; @@ -442,6 +447,18 @@ unsigned long cia_init(unsigned long mem_start, unsigned long mem_end) printk("CIA_init: CIA_STAT was 0x%x\n", temp); temp = *(vuip)CIA_IOC_MCR; mb(); printk("CIA_init: CIA_MCR was 0x%x\n", temp); + temp = *(vuip)CIA_IOC_CIA_CTRL; mb(); + printk("CIA_init: CIA_CTRL was 0x%x\n", temp); + temp = *(vuip)CIA_IOC_ERR_MASK; mb(); + printk("CIA_init: CIA_ERR_MASK was 0x%x\n", temp); + temp = *((vuip)CIA_IOC_PCI_W0_BASE); mb(); + printk("CIA_init: W0_BASE was 0x%x\n", temp); + temp = *((vuip)CIA_IOC_PCI_W1_BASE); mb(); + printk("CIA_init: W1_BASE was 0x%x\n", temp); + temp = *((vuip)CIA_IOC_PCI_W2_BASE); mb(); + printk("CIA_init: W2_BASE was 0x%x\n", temp); + temp = *((vuip)CIA_IOC_PCI_W3_BASE); mb(); + printk("CIA_init: W3_BASE was 0x%x\n", temp); } #endif /* DEBUG_DUMP_REGS */ @@ -458,6 +475,70 @@ unsigned long cia_init(unsigned long mem_start, unsigned long mem_end) *(vuip)CIA_IOC_CIA_CTRL = cia_tmp; mb(); +#ifdef CONFIG_ALPHA_SRM_SETUP + /* check window 0 for enabled and mapped to 0 */ + if (((*(vuip)CIA_IOC_PCI_W0_BASE & 3) == 1) && + (*(vuip)CIA_IOC_PCI_T0_BASE == 0)) + { + CIA_DMA_WIN_BASE = *(vuip)CIA_IOC_PCI_W0_BASE & 0xfff00000U; + CIA_DMA_WIN_SIZE = *(vuip)CIA_IOC_PCI_W0_MASK & 0xfff00000U; + CIA_DMA_WIN_SIZE += 0x00100000U; +#if 1 + printk("cia_init: using Window 0 settings\n"); + printk("cia_init: BASE 0x%x MASK 0x%x TRANS 0x%x\n", + *(vuip)CIA_IOC_PCI_W0_BASE, + *(vuip)CIA_IOC_PCI_W0_MASK, + *(vuip)CIA_IOC_PCI_T0_BASE); +#endif + } + else /* check window 1 for enabled and mapped to 0 */ + if (((*(vuip)CIA_IOC_PCI_W1_BASE & 3) == 1) && + (*(vuip)CIA_IOC_PCI_T1_BASE == 0)) + { + CIA_DMA_WIN_BASE = *(vuip)CIA_IOC_PCI_W1_BASE & 0xfff00000U; + CIA_DMA_WIN_SIZE = *(vuip)CIA_IOC_PCI_W1_MASK & 0xfff00000U; + CIA_DMA_WIN_SIZE += 0x00100000U; +#if 1 + printk("cia_init: using Window 1 settings\n"); + printk("cia_init: BASE 0x%x MASK 0x%x TRANS 0x%x\n", + *(vuip)CIA_IOC_PCI_W1_BASE, + *(vuip)CIA_IOC_PCI_W1_MASK, + *(vuip)CIA_IOC_PCI_T1_BASE); +#endif + } + else /* check window 2 for enabled and mapped to 0 */ + if (((*(vuip)CIA_IOC_PCI_W2_BASE & 3) == 1) && + (*(vuip)CIA_IOC_PCI_T2_BASE == 0)) + { + CIA_DMA_WIN_BASE = *(vuip)CIA_IOC_PCI_W2_BASE & 0xfff00000U; + CIA_DMA_WIN_SIZE = *(vuip)CIA_IOC_PCI_W2_MASK & 0xfff00000U; + CIA_DMA_WIN_SIZE += 0x00100000U; +#if 1 + printk("cia_init: using Window 2 settings\n"); + printk("cia_init: BASE 0x%x MASK 0x%x TRANS 0x%x\n", + *(vuip)CIA_IOC_PCI_W2_BASE, + *(vuip)CIA_IOC_PCI_W2_MASK, + *(vuip)CIA_IOC_PCI_T2_BASE); +#endif + } + else /* check window 3 for enabled and mapped to 0 */ + if (((*(vuip)CIA_IOC_PCI_W3_BASE & 3) == 1) && + (*(vuip)CIA_IOC_PCI_T3_BASE == 0)) + { + CIA_DMA_WIN_BASE = *(vuip)CIA_IOC_PCI_W3_BASE & 0xfff00000U; + CIA_DMA_WIN_SIZE = *(vuip)CIA_IOC_PCI_W3_MASK & 0xfff00000U; + CIA_DMA_WIN_SIZE += 0x00100000U; +#if 1 + printk("cia_init: using Window 3 settings\n"); + printk("cia_init: BASE 0x%x MASK 0x%x TRANS 0x%x\n", + *(vuip)CIA_IOC_PCI_W3_BASE, + *(vuip)CIA_IOC_PCI_W3_MASK, + *(vuip)CIA_IOC_PCI_T3_BASE); +#endif + } + else /* we must use our defaults which were pre-initialized... */ +#endif /* SRM_SETUP */ + { /* * Set up the PCI->physical memory translation windows. * For now, windows 1,2 and 3 are disabled. In the future, we may @@ -472,6 +553,7 @@ unsigned long cia_init(unsigned long mem_start, unsigned long mem_end) *(vuip)CIA_IOC_PCI_W1_BASE = 0x0; *(vuip)CIA_IOC_PCI_W2_BASE = 0x0; *(vuip)CIA_IOC_PCI_W3_BASE = 0x0; + } /* * check ASN in HWRPB for validity, report if bad @@ -483,28 +565,54 @@ unsigned long cia_init(unsigned long mem_start, unsigned long mem_end) } /* - * Finally, clear the CIA_CFG register, which gets used + * Next, clear the CIA_CFG register, which gets used * for PCI Config Space accesses. That is the way * we want to use it, and we do not want to depend on * what ARC or SRM might have left behind... */ { -#if 0 - unsigned int cia_cfg = *(vuip)CIA_IOC_CFG; mb(); - if (cia_cfg) printk("CIA_init: CFG was 0x%x\n", cia_cfg); -#endif - *(vuip)CIA_IOC_CFG = 0; mb(); + unsigned int cia_cfg = *((vuip)CIA_IOC_CFG); mb(); + if (cia_cfg) { + printk("CIA_init: CFG was 0x%x\n", cia_cfg); + *((vuip)CIA_IOC_CFG) = 0; mb(); + } } -#if 0 { - unsigned int temp; - temp = *(vuip)CIA_IOC_CIA_CTRL; mb(); - printk("CIA_init: CIA_CTRL was 0x%x\n", temp); - temp = *(vuip)CIA_IOC_ERR_MASK; mb(); - printk("CIA_init: CIA_ERR_MASK was 0x%x\n", temp); - } + unsigned int cia_hae_mem = *((vuip)CIA_IOC_HAE_MEM); + unsigned int cia_hae_io = *((vuip)CIA_IOC_HAE_IO); +#if 0 + printk("CIA_init: HAE_MEM was 0x%x\n", cia_hae_mem); + printk("CIA_init: HAE_IO was 0x%x\n", cia_hae_io); #endif +#ifdef CONFIG_ALPHA_SRM_SETUP + /* + sigh... For the SRM setup, unless we know apriori what the HAE + contents will be, we need to setup the arbitrary region bases + so we can test against the range of addresses and tailor the + region chosen for the SPARSE memory access. + + see include/asm-alpha/cia.h for the SPARSE mem read/write + */ + cia_sm_base_r1 = (cia_hae_mem ) & 0xe0000000UL; /* region 1 */ + cia_sm_base_r2 = (cia_hae_mem << 16) & 0xf8000000UL; /* region 2 */ + cia_sm_base_r3 = (cia_hae_mem << 24) & 0xfc000000UL; /* region 3 */ + + /* + Set the HAE cache, so that setup_arch() code + will use the SRM setting always. Our readb/writeb + code in cia.h expects never to have to change + the contents of the HAE. + */ + hae.cache = cia_hae_mem; +#else /* SRM_SETUP */ + *((vuip)CIA_IOC_HAE_MEM) = 0; mb(); + cia_hae_mem = *((vuip)CIA_IOC_HAE_MEM); + *((vuip)CIA_IOC_HAE_IO) = 0; mb(); + cia_hae_io = *((vuip)CIA_IOC_HAE_IO); +#endif /* SRM_SETUP */ + } + return mem_start; } @@ -512,7 +620,7 @@ int cia_pci_clr_err(void) { CIA_jd = *(vuip)CIA_IOC_CIA_ERR; DBGM(("CIA_pci_clr_err: CIA ERR after read 0x%x\n", CIA_jd)); - *(vulp)CIA_IOC_CIA_ERR = 0x0180; + *(vuip)CIA_IOC_CIA_ERR = 0x0180; mb(); return 0; } diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S index b139d5178..0bbc71926 100644 --- a/arch/alpha/kernel/entry.S +++ b/arch/alpha/kernel/entry.S @@ -4,6 +4,7 @@ * kernel entry-points */ +#include <linux/config.h> #include <asm/system.h> #define halt .long PAL_halt @@ -48,6 +49,8 @@ * JRP - Save regs 16-18 in a special area of the stack, so that * the palcode-provided values are available to the signal handler. */ +#if defined(CONFIG_ALPHA_TSUNAMI) +/* TSUNAMI has no HAE register to save/restore */ #define SAVE_ALL \ subq $30,184,$30; \ stq $0,0($30); \ @@ -55,6 +58,55 @@ stq $2,16($30); \ stq $3,24($30); \ stq $4,32($30); \ + stq $5,40($30); \ + stq $6,48($30); \ + stq $7,56($30); \ + stq $8,64($30); \ + stq $19,72($30); \ + stq $20,80($30); \ + stq $21,88($30); \ + stq $22,96($30); \ + stq $23,104($30); \ + stq $24,112($30); \ + stq $25,120($30); \ + stq $26,128($30); \ + stq $27,136($30); \ + stq $28,144($30); \ + stq $16,160($30); \ + stq $17,168($30); \ + stq $18,176($30) + +#define RESTORE_ALL \ + ldq $0,0($30); \ + ldq $1,8($30); \ + ldq $2,16($30); \ + ldq $3,24($30); \ + ldq $4,32($30); \ + ldq $5,40($30); \ + ldq $6,48($30); \ + ldq $7,56($30); \ + ldq $8,64($30); \ + ldq $19,72($30); \ + ldq $20,80($30); \ + ldq $21,88($30); \ + ldq $22,96($30); \ + ldq $23,104($30); \ + ldq $24,112($30); \ + ldq $25,120($30); \ + ldq $26,128($30); \ + ldq $27,136($30); \ + ldq $28,144($30); \ + addq $30,184,$30 + +#else /* TSUNAMI */ +#define SAVE_ALL \ + subq $30,184,$30; \ + stq $0,0($30); \ + stq $1,8($30); \ + stq $2,16($30); \ + stq $3,24($30); \ + stq $4,32($30); \ + stq $28,144($30); \ lda $2,hae; \ stq $5,40($30); \ stq $6,48($30); \ @@ -70,7 +122,6 @@ stq $25,120($30); \ stq $26,128($30); \ stq $27,136($30); \ - stq $28,144($30); \ stq $2,152($30); \ stq $16,160($30); \ stq $17,168($30); \ @@ -113,6 +164,8 @@ ldq $28,144($30); \ addq $30,184,$30 +#endif /* TSUNAMI */ + .text .set noat #if defined(__linux__) && !defined(__ELF__) @@ -508,6 +561,8 @@ sys_clone: alpha_switch_to: bsr $1,do_switch_stack call_pal PAL_swpctx + lda $16,-2($31) + call_pal PAL_tbi bsr $1,undo_switch_stack ret $31,($26),1 .end alpha_switch_to @@ -681,6 +736,19 @@ signal_return: br $31,restore_all .end entSys +#ifdef __SMP__ + .globl ret_from_smpfork +.align 3 +.ent ret_from_smpfork +ret_from_smpfork: + .set at + stq $31,scheduler_lock + mb /* ?????????????????? */ + br ret_from_sys_call + .set noat +.end ret_from_smpfork +#endif /* __SMP__ */ + .align 3 .ent reschedule reschedule: diff --git a/arch/alpha/kernel/head.S b/arch/alpha/kernel/head.S index 4a3ec9e7c..a6bcd616d 100644 --- a/arch/alpha/kernel/head.S +++ b/arch/alpha/kernel/head.S @@ -32,6 +32,27 @@ __start: halt .end __start +#ifdef __SMP__ + .align 3 + .globl __start_cpu + .ent __start_cpu + /* on entry here from SRM console, the HWPCB of this processor */ + /* has been loaded, and $27 contains the task pointer */ +__start_cpu: + /* first order of business, load the GP */ + br $26,1f +1: ldgp $29,0($26) + /* We need to get current loaded up with our first task... */ + lda $8,0($27) + /* set FEN */ + lda $16,1($31) + call_pal PAL_wrfen + /* ... and then we can start the processor. */ + jsr $26,start_secondary + halt + .end __start_cpu +#endif /* __SMP__ */ + .align 3 .globl wrent .ent wrent diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 41d5d5f01..bcac2da2b 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -30,6 +30,10 @@ #define vulp volatile unsigned long * #define vuip volatile unsigned int * +extern void timer_interrupt(struct pt_regs * regs); +extern void cserve_update_hw(unsigned long, unsigned long); +extern void handle_ipi(struct pt_regs *); + #define RTC_IRQ 8 #ifdef CONFIG_RTC #define TIMER_IRQ 0 /* timer is the pit */ @@ -45,12 +49,15 @@ #if defined(CONFIG_ALPHA_P2K) /* always mask out unused timer irq 0 and RTC irq 8 */ # define PROBE_MASK (((1UL << NR_IRQS) - 1) & ~0x101UL) -#elif defined(CONFIG_ALPHA_ALCOR) +#elif defined(CONFIG_ALPHA_ALCOR) || defined(CONFIG_ALPHA_XLT) /* always mask out unused timer irq 0, "irqs" 20-30, and the EISA cascade: */ # define PROBE_MASK (((1UL << NR_IRQS) - 1) & ~0xfff000000001UL) #elif defined(CONFIG_ALPHA_RUFFIAN) /* must leave timer irq 0 in the mask */ # define PROBE_MASK ((1UL << NR_IRQS) - 1) +#elif NR_IRQS == 64 + /* always mask out unused timer irq 0: */ +# define PROBE_MASK (~1UL) #else /* always mask out unused timer irq 0: */ # define PROBE_MASK (((1UL << NR_IRQS) - 1) & ~1UL) @@ -119,7 +126,7 @@ sable_update_hw(unsigned long irq, unsigned long mask) { /* The "irq" argument is really the mask bit number */ switch (irq) { - default: /* 16 ... 23 */ + case 16 ... 23: outb(mask >> 16, 0x53d); break; case 8 ... 15: @@ -135,7 +142,7 @@ static inline void noritake_update_hw(unsigned long irq, unsigned long mask) { switch (irq) { - default: /* 32 ... 47 */ + case 32 ... 47: outw(~(mask >> 32), 0x54c); break; case 16 ... 31: @@ -155,7 +162,7 @@ static inline void miata_update_hw(unsigned long irq, unsigned long mask) { switch (irq) { - default: /* 16 ... 47 */ + case 16 ... 47: /* Make CERTAIN none of the bogus ints get enabled... */ *(vulp)PYXIS_INT_MASK = ~((long)mask >> 16) & ~0x4000000000000e3bUL; @@ -178,7 +185,7 @@ static inline void alcor_and_xlt_update_hw(unsigned long irq, unsigned long mask) { switch (irq) { - default: /* 16 ... 47 */ + case 16 ... 47: /* On Alcor, at least, lines 20..30 are not connected and can generate spurrious interrupts if we turn them on while IRQ probing. So explicitly mask them out. */ @@ -202,7 +209,7 @@ static inline void mikasa_update_hw(unsigned long irq, unsigned long mask) { switch (irq) { - default: /* 16 ... 31 */ + case 16 ... 31: outw(~(mask >> 16), 0x536); /* note invert */ break; case 8 ... 15: /* ISA PIC2 */ @@ -214,7 +221,7 @@ mikasa_update_hw(unsigned long irq, unsigned long mask) } } -#ifdef CONFIG_ALPHA_RUFFIAN +#if defined(CONFIG_ALPHA_RUFFIAN) static inline void ruffian_update_hw(unsigned long irq, unsigned long mask) { @@ -223,8 +230,7 @@ ruffian_update_hw(unsigned long irq, unsigned long mask) /* Note inverted sense of mask bits: */ /* Make CERTAIN none of the bogus ints get enabled... */ *(vulp)PYXIS_INT_MASK = - ~((long)mask >> 16) & 0x00000000ffffffbfUL; - mb(); + ~((long)mask >> 16) & 0x00000000ffffffbfUL; mb(); /* ... and read it back to make sure it got written. */ *(vulp)PYXIS_INT_MASK; break; @@ -236,20 +242,23 @@ ruffian_update_hw(unsigned long irq, unsigned long mask) break; } } -#endif +#endif /* RUFFIAN */ -#ifdef CONFIG_ALPHA_SX164 +#if defined(CONFIG_ALPHA_SX164) static inline void sx164_update_hw(unsigned long irq, unsigned long mask) { switch (irq) { case 16 ... 39: - /* Make CERTAIN none of the bogus ints get enabled */ +#if defined(CONFIG_ALPHA_SRM) + cserve_update_hw(irq, mask); +#else + /* make CERTAIN none of the bogus ints get enabled */ *(vulp)PYXIS_INT_MASK = - ~((long)mask >> 16) & ~0x000000000000003bUL; - mb(); + ~((long)mask >> 16) & ~0x000000000000003bUL; mb(); /* ... and read it back to make sure it got written. */ *(vulp)PYXIS_INT_MASK; +#endif /* SRM */ break; case 8 ... 15: /* ISA PIC2 */ outb(mask >> 8, 0xA1); @@ -258,20 +267,23 @@ sx164_update_hw(unsigned long irq, unsigned long mask) outb(mask, 0x21); break; } -} -#endif -/* Unlabeled mechanisms based on the number of irqs. Someone should - probably document and name these. */ +} +#endif /* SX164 */ +#if defined(CONFIG_ALPHA_DP264) static inline void -update_hw_33(unsigned long irq, unsigned long mask) +dp264_update_hw(unsigned long irq, unsigned long mask) { switch (irq) { - default: /* 16 ... 32 */ - outl(mask >> 16, 0x804); + case 16 ... 63: + /* make CERTAIN none of the bogus ints get enabled */ + /* HACK ALERT! only CPU#0 is used currently */ + *(vulp)TSUNAMI_CSR_DIM0 = + ~(mask) & ~0x0000000000000000UL; mb(); + /* ... and read it back to make sure it got written. */ + *(vulp)TSUNAMI_CSR_DIM0; break; - case 8 ... 15: /* ISA PIC2 */ outb(mask >> 8, 0xA1); break; @@ -280,16 +292,24 @@ update_hw_33(unsigned long irq, unsigned long mask) break; } } +#endif /* DP264 */ +#if defined(CONFIG_ALPHA_RAWHIDE) static inline void -update_hw_32(unsigned long irq, unsigned long mask) +rawhide_update_hw(unsigned long irq, unsigned long mask) { switch (irq) { - default: /* 24 ... 31 */ - outb(mask >> 24, 0x27); + case 16 ... 39: /* PCI bus 0 with EISA bridge */ + *(vuip)MCPCIA_INT_MASK0(0) = + (~((mask) >> 16) & 0x00ffffffU) | 0x00ff0000U; mb(); + /* ... and read it back to make sure it got written. */ + *(vuip)MCPCIA_INT_MASK0(0); break; - case 16 ... 23: - outb(mask >> 16, 0x26); + case 40 ... 63: /* PCI bus 1 with builtin NCR810 SCSI */ + *(vuip)MCPCIA_INT_MASK0(1) = + (~((mask) >> 40) & 0x00ffffffU) | 0x00fe0000U; mb(); + /* ... and read it back to make sure it got written. */ + *(vuip)MCPCIA_INT_MASK0(1); break; case 8 ... 15: /* ISA PIC2 */ outb(mask >> 8, 0xA1); @@ -299,12 +319,29 @@ update_hw_32(unsigned long irq, unsigned long mask) break; } } +#endif /* RAWHIDE */ +/* + * HW update code for the following platforms: + * + * CABRIOLET (AlphaPC64) + * EB66P + * EB164 + * PC164 + * LX164 + */ static inline void -update_hw_16(unsigned long irq, unsigned long mask) +update_hw_35(unsigned long irq, unsigned long mask) { switch (irq) { - default: /* 8 ... 15, ISA PIC2 */ + case 16 ... 34: +#if defined(CONFIG_ALPHA_SRM) + cserve_update_hw(irq, mask); +#else /* SRM */ + outl(irq_mask >> 16, 0x804); +#endif /* SRM */ + break; + case 8 ... 15: /* ISA PIC2 */ outb(mask >> 8, 0xA1); break; case 0 ... 7: /* ISA PIC1 */ @@ -313,42 +350,38 @@ update_hw_16(unsigned long irq, unsigned long mask) } } -#if (defined(CONFIG_ALPHA_PC164) || defined(CONFIG_ALPHA_LX164)) \ - && defined(CONFIG_ALPHA_SRM) -/* - * On the pc164, we cannot take over the IRQs from the SRM, - * so we call down to do our dirty work. Too bad the SRM - * isn't consistent across platforms otherwise we could do - * this always. - */ - -extern void cserve_ena(unsigned long); -extern void cserve_dis(unsigned long); - -static inline void mask_irq(unsigned long irq) +static inline void +update_hw_32(unsigned long irq, unsigned long mask) { - irq_mask |= (1UL << irq); - cserve_dis(irq - 16); + switch (irq) { + case 24 ... 31: + outb(mask >> 24, 0x27); + break; + case 16 ... 23: + outb(mask >> 16, 0x26); + break; + case 8 ... 15: /* ISA PIC2 */ + outb(mask >> 8, 0xA1); + break; + case 0 ... 7: /* ISA PIC1 */ + outb(mask, 0x21); + break; } - -static inline void unmask_irq(unsigned long irq) -{ - irq_mask &= ~(1UL << irq); - cserve_ena(irq - 16); } -/* Since we are calling down to PALcode, no need to diddle IPL. */ -void disable_irq(unsigned int irq_nr) +static inline void +update_hw_16(unsigned long irq, unsigned long mask) { - mask_irq(IRQ_TO_MASK(irq_nr)); + switch (irq) { + case 8 ... 15: /* ISA PIC2 */ + outb(mask >> 8, 0xA1); + break; + case 0 ... 7: /* ISA PIC1 */ + outb(mask, 0x21); + break; } - -void enable_irq(unsigned int irq_nr) -{ - unmask_irq(IRQ_TO_MASK(irq_nr)); } -#else /* * We manipulate the hardware ourselves. */ @@ -369,9 +402,18 @@ static void update_hw(unsigned long irq, unsigned long mask) sx164_update_hw(irq, mask); #elif defined(CONFIG_ALPHA_RUFFIAN) ruffian_update_hw(irq, mask); -#elif NR_IRQS == 33 - update_hw_33(irq, mask); -#elif NR_IRQS == 32 +#elif defined(CONFIG_ALPHA_DP264) + dp264_update_hw(irq, mask); +#elif defined(CONFIG_ALPHA_RAWHIDE) + rawhide_update_hw(irq, mask); +#elif defined(CONFIG_ALPHA_CABRIOLET) || \ + defined(CONFIG_ALPHA_EB66P) || \ + defined(CONFIG_ALPHA_EB164) || \ + defined(CONFIG_ALPHA_PC164) || \ + defined(CONFIG_ALPHA_LX164) + update_hw_35(irq, mask); +#elif defined(CONFIG_ALPHA_EB66) || \ + defined(CONFIG_ALPHA_EB64P) update_hw_32(irq, mask); #elif NR_IRQS == 16 update_hw_16(irq, mask); @@ -396,8 +438,7 @@ void disable_irq(unsigned int irq_nr) { unsigned long flags; - save_flags(flags); - cli(); + save_and_cli(flags); mask_irq(IRQ_TO_MASK(irq_nr)); restore_flags(flags); } @@ -406,12 +447,10 @@ void enable_irq(unsigned int irq_nr) { unsigned long flags; - save_flags(flags); - cli(); + save_and_cli(flags); unmask_irq(IRQ_TO_MASK(irq_nr)); restore_flags(flags); } -#endif /* (PC164 || LX164) && SRM */ /* * Initial irq handlers. @@ -423,13 +462,14 @@ int get_irq_list(char *buf) { int i, len = 0; struct irqaction * action; + int cpu = smp_processor_id(); for (i = 0; i < NR_IRQS; i++) { action = irq_action[i]; if (!action) continue; len += sprintf(buf+len, "%2d: %10u %c %s", - i, kstat.irqs[0][i], + i, kstat.irqs[cpu][i], (action->flags & SA_INTERRUPT) ? '+' : ' ', action->name); for (action=action->next; action; action = action->next) { @@ -463,16 +503,18 @@ static inline void ack_irq(int irq) #elif defined(CONFIG_ALPHA_RUFFIAN) if (irq < 16) { /* Ack PYXIS ISA interrupt. */ - *(vulp)PYXIS_INT_REQ = 1 << 7; - mb(); + *(vulp)PYXIS_INT_REQ = 1L << 7; mb(); + /* ... and read it back to make sure it got written. */ + *(vulp)PYXIS_INT_REQ; if (irq > 7) { outb(0x20, 0xa0); } outb(0x20, 0x20); } else { - /* Ack PYXIS interrupt. */ + /* Ack PYXIS PCI interrupt. */ *(vulp)PYXIS_INT_REQ = (1UL << (irq - 16)); - mb(); + /* ... and read it back to make sure it got written. */ + *(vulp)PYXIS_INT_REQ; } #else if (irq < 16) { @@ -488,7 +530,7 @@ static inline void ack_irq(int irq) /* on ALCOR/XLT, need to dismiss interrupt via GRU */ *(vuip)GRU_INT_CLEAR = 0x80000000; mb(); *(vuip)GRU_INT_CLEAR = 0x00000000; mb(); -#endif +#endif /* ALCOR || XLT */ } #endif } @@ -556,8 +598,7 @@ int request_irq(unsigned int irq, action->next = NULL; action->dev_id = dev_id; - save_flags(flags); - cli(); + save_and_cli(flags); *p = action; if (!shared) @@ -585,8 +626,7 @@ void free_irq(unsigned int irq, void *dev_id) continue; /* Found it - now free it */ - save_flags(flags); - cli(); + save_and_cli(flags); *p = action->next; if (!irq[irq_action]) mask_irq(IRQ_TO_MASK(irq)); @@ -607,7 +647,277 @@ unsigned int local_irq_count[NR_CPUS]; unsigned int local_bh_count[NR_CPUS]; #ifdef __SMP__ -#error "Me no hablo Alpha SMP" +/* Who has global_irq_lock. */ +unsigned char global_irq_holder = NO_PROC_ID; + +/* This protects IRQ's. */ +spinlock_t global_irq_lock = SPIN_LOCK_UNLOCKED; + +/* Global IRQ locking depth. */ +atomic_t global_irq_count = ATOMIC_INIT(0); + +/* This protects BH software state (masks, things like that). */ +atomic_t global_bh_lock = ATOMIC_INIT(0); +atomic_t global_bh_count = ATOMIC_INIT(0); + +static unsigned long previous_irqholder = NO_PROC_ID; + +#undef INIT_STUCK +#define INIT_STUCK 100000000 + +#undef STUCK +#define STUCK \ +if (!--stuck) {printk("wait_on_irq CPU#%d stuck at %08lx, waiting for %08lx (local=%d, global=%d)\n", cpu, where, previous_irqholder, local_count, atomic_read(&global_irq_count)); stuck = INIT_STUCK; } + +static inline void wait_on_irq(int cpu, unsigned long where) +{ + int stuck = INIT_STUCK; + int local_count = local_irq_count[cpu]; + + /* Are we the only one in an interrupt context? */ + while (local_count != atomic_read(&global_irq_count)) { + /* + * No such luck. Now we need to release the lock, + * _and_ release our interrupt context, because + * otherwise we'd have dead-locks and live-locks + * and other fun things. + */ + atomic_sub(local_count, &global_irq_count); + spin_unlock(&global_irq_lock); + + /* + * Wait for everybody else to go away and release + * their things before trying to get the lock again. + */ + for (;;) { + STUCK; + if (atomic_read(&global_irq_count)) + continue; + if (global_irq_lock.lock) + continue; + if (spin_trylock(&global_irq_lock)) + break; + } + atomic_add(local_count, &global_irq_count); + } +} + +#undef INIT_STUCK +#define INIT_STUCK 10000000 + +#undef STUCK +#define STUCK \ +if (!--stuck) {printk("get_irqlock stuck at %08lx, waiting for %08lx\n", where, previous_irqholder); stuck = INIT_STUCK;} + +static inline void get_irqlock(int cpu, unsigned long where) +{ + int stuck = INIT_STUCK; + + if (!spin_trylock(&global_irq_lock)) { + /* do we already hold the lock? */ + if ((unsigned char) cpu == global_irq_holder) { +#if 0 + printk("get_irqlock: already held at %08lx\n", + previous_irqholder); +#endif + return; + } + /* Uhhuh.. Somebody else got it. Wait.. */ + do { + do { + STUCK; + barrier(); + } while (global_irq_lock.lock); + } while (!spin_trylock(&global_irq_lock)); + } + /* + * Ok, we got the lock bit. + * But that's actually just the easy part.. Now + * we need to make sure that nobody else is running + * in an interrupt context. + */ + wait_on_irq(cpu, where); + + /* + * Finally. + */ + global_irq_holder = cpu; + previous_irqholder = where; +} + +void __global_cli(void) +{ + int cpu = smp_processor_id(); + unsigned long where; + + __asm__("mov $26, %0" : "=r" (where)); + __cli(); + + if (!local_irq_count[cpu]) + get_irqlock(smp_processor_id(), where); +} + +void __global_sti(void) +{ + int cpu = smp_processor_id(); + + if (!local_irq_count[cpu]) + release_irqlock(smp_processor_id()); + + __sti(); +} + +#if 0 +unsigned long __global_save_flags(void) +{ + return global_irq_holder == (unsigned char) smp_processor_id(); +} +#endif + +void __global_restore_flags(unsigned long flags) +{ + if (flags & 1) { + __global_cli(); + } else { + /* release_irqlock() */ + if (global_irq_holder == smp_processor_id()) { + global_irq_holder = NO_PROC_ID; + spin_unlock(&global_irq_lock); + } + if (!(flags & 2)) + __sti(); + } +} + +#undef INIT_STUCK +#define INIT_STUCK 200000000 + +#undef STUCK +#define STUCK \ +if (!--stuck) {printk("irq_enter stuck (irq=%d, cpu=%d, global=%d)\n",irq,cpu,global_irq_holder); stuck = INIT_STUCK;} + +#undef VERBOSE_IRQLOCK_DEBUGGING + +void irq_enter(int cpu, int irq) +{ +#ifdef VERBOSE_IRQLOCK_DEBUGGING + extern void smp_show_backtrace_all_cpus(void); +#endif + int stuck = INIT_STUCK; + + hardirq_enter(cpu); + barrier(); + while (global_irq_lock.lock) { + if ((unsigned char) cpu == global_irq_holder) { + int globl_locked = global_irq_lock.lock; + int globl_icount = atomic_read(&global_irq_count); + int local_count = local_irq_count[cpu]; + + /* It is very important that we load the state variables + * before we do the first call to printk() as printk() + * could end up changing them... + */ + +#if 0 + printk("CPU[%d]: BAD! Local IRQ's enabled," + " global disabled interrupt\n", cpu); +#endif + printk("CPU[%d]: where [%08lx] glocked[%d] gicnt[%d]" + " licnt[%d]\n", + cpu, previous_irqholder, globl_locked, + globl_icount, local_count); +#ifdef VERBOSE_IRQLOCK_DEBUGGING + printk("Performing backtrace on all cpus," + " write this down!\n"); + smp_show_backtrace_all_cpus(); +#endif + break; + } + STUCK; + barrier(); + } +} + +void irq_exit(int cpu, int irq) +{ + hardirq_exit(cpu); + release_irqlock(cpu); +} + +static void show(char * str) +{ +#if 0 + int i; + unsigned long *stack; +#endif + int cpu = smp_processor_id(); + + printk("\n%s, CPU %d:\n", str, cpu); + printk("irq: %d [%d %d]\n", + atomic_read(&global_irq_count), local_irq_count[0], + local_irq_count[1]); + printk("bh: %d [%d %d]\n", + atomic_read(&global_bh_count), local_bh_count[0], + local_bh_count[1]); +#if 0 + stack = (unsigned long *) &str; + for (i = 40; i ; i--) { + unsigned long x = *++stack; + if (x > (unsigned long) &init_task_union && + x < (unsigned long) &vsprintf) { + printk("<[%08lx]> ", x); + } + } +#endif +} + +#define MAXCOUNT 100000000 + +static inline void wait_on_bh(void) +{ + int count = MAXCOUNT; + do { + if (!--count) { + show("wait_on_bh"); + count = ~0; + } + /* nothing .. wait for the other bh's to go away */ + } while (atomic_read(&global_bh_count) != 0); +} + +/* + * This is called when we want to synchronize with + * bottom half handlers. We need to wait until + * no other CPU is executing any bottom half handler. + * + * Don't wait if we're already running in an interrupt + * context or are inside a bh handler. + */ +void synchronize_bh(void) +{ + if (atomic_read(&global_bh_count)) { + int cpu = smp_processor_id(); + if (!local_irq_count[cpu] && !local_bh_count[cpu]) { + wait_on_bh(); + } + } +} + +/* There has to be a better way. */ +void synchronize_irq(void) +{ + int cpu = smp_processor_id(); + int local_count = local_irq_count[cpu]; + + if (local_count != atomic_read(&global_irq_count)) { + unsigned long flags; + + /* An infamously unpopular approach. */ + save_and_cli(flags); + restore_flags(flags); + } +} + #else #define irq_enter(cpu, irq) (++local_irq_count[cpu]) #define irq_exit(cpu, irq) (--local_irq_count[cpu]) @@ -647,7 +957,7 @@ static inline void handle_irq(int irq, struct pt_regs * regs) int cpu = smp_processor_id(); irq_enter(cpu, irq); - kstat.irqs[0][irq] += 1; + kstat.irqs[cpu][irq] += 1; if (!action) { unexpected_irq(irq, regs); } else { @@ -670,8 +980,9 @@ static inline void device_interrupt(int irq, int ack, struct pt_regs * regs) } irq_enter(cpu, irq); - kstat.irqs[0][irq] += 1; + kstat.irqs[cpu][irq] += 1; action = irq_action[irq]; + /* * For normal interrupts, we mask it out, and then ACK it. * This way another (more timing-critical) interrupt can @@ -691,6 +1002,10 @@ static inline void device_interrupt(int irq, int ack, struct pt_regs * regs) action = action->next; } while (action); unmask_irq(ack); + } else { +#if 1 + printk("device_interrupt: unexpected interrupt %d\n", irq); +#endif } irq_exit(cpu, irq); } @@ -711,6 +1026,8 @@ static inline void isa_device_interrupt(unsigned long vector, # define IACK_SC CIA_IACK_SC #elif defined(CONFIG_ALPHA_PYXIS) # define IACK_SC PYXIS_IACK_SC +#elif defined(CONFIG_ALPHA_TSUNAMI) +# define IACK_SC TSUNAMI_PCI0_IACK_SC #else /* * This is bogus but necessary to get it to compile @@ -729,7 +1046,7 @@ static inline void isa_device_interrupt(unsigned long vector, * interrupt that is pending. The PALcode sets up the * interrupts vectors such that irq level L generates vector L. */ - j = *(volatile int *) IACK_SC; + j = *(vuip) IACK_SC; j &= 0xff; if (j == 7) { if (!(inb(0x20) & 0x80)) { @@ -775,10 +1092,9 @@ alcor_and_xlt_device_interrupt(unsigned long vector, struct pt_regs *regs) unsigned int i; unsigned long flags; - save_flags(flags); - cli(); + save_and_cli(flags); - /* read the interrupt summary register of the GRU */ + /* Read the interrupt summary register of the GRU */ pld = (*(vuip)GRU_INT_REQ) & GRU_INT_REQ_BITS; #if 0 @@ -810,10 +1126,9 @@ cabriolet_and_eb66p_device_interrupt(unsigned long vector, unsigned int i; unsigned long flags; - save_flags(flags); - cli(); + save_and_cli(flags); - /* read the interrupt summary registers */ + /* Read the interrupt summary registers */ pld = inb(0x804) | (inb(0x805) << 8) | (inb(0x806) << 16); #if 0 @@ -843,10 +1158,9 @@ mikasa_device_interrupt(unsigned long vector, struct pt_regs *regs) unsigned int i; unsigned long flags; - save_flags(flags); - cli(); + save_and_cli(flags); - /* read the interrupt summary registers */ + /* Read the interrupt summary registers */ pld = (((unsigned long) (~inw(0x534)) & 0x0000ffffUL) << 16) | (((unsigned long) inb(0xa0)) << 8) | ((unsigned long) inb(0x20)); @@ -878,10 +1192,9 @@ eb66_and_eb64p_device_interrupt(unsigned long vector, struct pt_regs *regs) unsigned int i; unsigned long flags; - save_flags(flags); - cli(); + save_and_cli(flags); - /* read the interrupt summary registers */ + /* Read the interrupt summary registers */ pld = inb(0x26) | (inb(0x27) << 8); /* * Now, for every possible bit set, work through @@ -909,30 +1222,34 @@ miata_device_interrupt(unsigned long vector, struct pt_regs *regs) unsigned int i; unsigned long flags; - save_flags(flags); - cli(); + save_and_cli(flags); - /* read the interrupt summary register of PYXIS */ - pld = (*(vulp)PYXIS_INT_REQ); + /* Read the interrupt summary register of PYXIS */ + pld = *(vulp)PYXIS_INT_REQ; #if 0 printk("[0x%08lx/0x%08lx/0x%04x]", pld, *(vulp)PYXIS_INT_MASK, inb(0x20) | (inb(0xA0) << 8)); #endif - /* For now, AND off any bits we are not interested in. */ -#if defined(CONFIG_ALPHA_MIATA) - /* HALT (2), timer (6), ISA Bridge (7), 21142/3 (8), - then all the PCI slots/INTXs (12-31). */ +#ifdef CONFIG_ALPHA_MIATA + /* + * For now, AND off any bits we are not interested in: + * HALT (2), timer (6), ISA Bridge (7), 21142/3 (8) + * then all the PCI slots/INTXs (12-31). + */ /* Maybe HALT should only be used for SRM console boots? */ pld &= 0x00000000fffff1c4UL; #endif -#if defined(CONFIG_ALPHA_SX164) - /* HALT (2), timer (6), ISA Bridge (7), - then all the PCI slots/INTXs (8-23). */ - /* HALT should only be used for SRM console boots. */ +#ifdef CONFIG_ALPHA_SX164 + /* + * For now, AND off any bits we are not interested in: + * HALT (2), timer (6), ISA Bridge (7) + * then all the PCI slots/INTXs (8-23) + */ + /* Maybe HALT should only be used for SRM console boots? */ pld &= 0x0000000000ffffc0UL; -#endif +#endif /* SX164 */ /* * Now for every possible bit set, work through them and call @@ -962,10 +1279,9 @@ noritake_device_interrupt(unsigned long vector, struct pt_regs *regs) unsigned int i; unsigned long flags; - save_flags(flags); - cli(); + save_and_cli(flags); - /* read the interrupt summary registers of NORITAKE */ + /* Read the interrupt summary registers of NORITAKE */ pld = ((unsigned long) inw(0x54c) << 32) | ((unsigned long) inw(0x54a) << 16) | ((unsigned long) inb(0xa0) << 8) | @@ -991,16 +1307,76 @@ noritake_device_interrupt(unsigned long vector, struct pt_regs *regs) restore_flags(flags); } +#if defined(CONFIG_ALPHA_DP264) +/* we have to conditionally compile this because of TSUNAMI_xxx symbols */ +static inline void dp264_device_interrupt(unsigned long vector, + struct pt_regs * regs) +{ + unsigned long pld, tmp; + unsigned int i; + unsigned long flags; + + __save_and_cli(flags); + + /* Read the interrupt summary register of TSUNAMI */ + pld = (*(vulp)TSUNAMI_CSR_DIR0); + +#if 0 + printk("[0x%08lx/0x%08lx/0x%04x]", pld, + *(vulp)TSUNAMI_CSR_DIM0, + inb(0x20) | (inb(0xA0) << 8)); +#endif + + /* + * Now for every possible bit set, work through them and call + * the appropriate interrupt handler. + */ + while (pld) { + i = ffz(~pld); + pld &= pld - 1; /* clear least bit set */ + if (i == 55) { + isa_device_interrupt(vector, regs); + } else { /* if not timer int */ + device_interrupt(16 + i, 16 + i, regs); + } +#if 0 + *(vulp)TSUNAMI_CSR_DIR0 = 1UL << i; mb(); + tmp = *(vulp)TSUNAMI_CSR_DIR0; +#endif + } + __restore_flags(flags); +} +#endif /* DP264 */ + +#if defined(CONFIG_ALPHA_RAWHIDE) +/* we have to conditionally compile this because of MCPCIA_xxx symbols */ +static inline void rawhide_device_interrupt(unsigned long vector, + struct pt_regs * regs) +{ +#if 0 + unsigned long pld; + unsigned int i; + unsigned long flags; + + __save_and_cli(flags); + + /* PLACEHOLDER, perhaps never used if we always do SRM */ + + __restore_flags(flags); +#endif +} +#endif /* RAWHIDE */ + #if defined(CONFIG_ALPHA_RUFFIAN) static inline void ruffian_device_interrupt(unsigned long vector, struct pt_regs *regs) + { unsigned long pld; unsigned int i; unsigned long flags; - save_flags(flags); - cli(); + save_and_cli(flags); /* Read the interrupt summary register of PYXIS */ pld = *(vulp)PYXIS_INT_REQ; @@ -1010,16 +1386,16 @@ ruffian_device_interrupt(unsigned long vector, struct pt_regs *regs) * then all the PCI slots/INTXs (12-31) * flash(5) :DWH: */ - pld &= 0x00000000ffffff9fUL; + pld &= 0x00000000ffffff9fUL;/* was ffff7f */ /* * Now for every possible bit set, work through them and call * the appropriate interrupt handler. */ + while (pld) { i = ffz(~pld); pld &= pld - 1; /* clear least bit set */ - if (i == 7) { /* Copy this bit from isa_device_interrupt cause we need to hook into int 0 for the timer. I @@ -1041,19 +1417,57 @@ ruffian_device_interrupt(unsigned long vector, struct pt_regs *regs) } else { device_interrupt(j, j, regs); } - } else { + } else { /* if not timer int */ device_interrupt(16 + i, 16 + i, regs); } - *(vulp)PYXIS_INT_REQ = 1UL << i; - mb(); - *(vulp)PYXIS_INT_REQ; + *(vulp)PYXIS_INT_REQ = 1UL << i; mb(); + *(vulp)PYXIS_INT_REQ; /* read to force the write */ } - restore_flags(flags); } #endif /* RUFFIAN */ +static inline void takara_device_interrupt(unsigned long vector, + struct pt_regs * regs) +{ + unsigned long flags; + unsigned intstatus; + + save_and_cli(flags); + + /* + * The PALcode will have passed us vectors 0x800 or 0x810, + * which are fairly arbitrary values and serve only to tell + * us whether an interrupt has come in on IRQ0 or IRQ1. If + * it's IRQ1 it's a PCI interrupt; if it's IRQ0, it's + * probably ISA, but PCI interrupts can come through IRQ0 + * as well if the interrupt controller isn't in accelerated + * mode. + * + * OTOH, the accelerator thing doesn't seem to be working + * overly well, so what we'll do instead is try directly + * examining the Master Interrupt Register to see if it's a + * PCI interrupt, and if _not_ then we'll pass it on to the + * ISA handler. + */ + + intstatus = inw(0x500) & 15; + if (intstatus) { + /* + * This is a PCI interrupt. Check each bit and + * despatch an interrupt if it's set. + */ + if (intstatus & 8) device_interrupt(16+3, 16+3, regs); + if (intstatus & 4) device_interrupt(16+2, 16+2, regs); + if (intstatus & 2) device_interrupt(16+1, 16+1, regs); + if (intstatus & 1) device_interrupt(16+0, 16+0, regs); + } else + isa_device_interrupt (vector, regs); + + restore_flags(flags); +} + #endif /* CONFIG_PCI */ /* @@ -1085,9 +1499,11 @@ srm_device_interrupt(unsigned long vector, struct pt_regs * regs) int irq, ack; unsigned long flags; - save_flags(flags); - cli(); + __save_and_cli(flags); +#ifdef __SMP__ +if (smp_processor_id()) printk("srm_device_interrupt on other CPU\n"); +#endif ack = irq = (vector - 0x800) >> 4; @@ -1131,9 +1547,9 @@ srm_device_interrupt(unsigned long vector, struct pt_regs * regs) #ifdef CONFIG_ALPHA_NORITAKE /* - * I really hate to do this, but the NORITAKE SRM console reports - * PCI vectors *lower* than I expected from the bit numbering in - * the documentation. + * I really hate to do this, too, but the NORITAKE SRM console also + * reports PCI vectors *lower* than I expected from the bit numbers + * in the documentation. * But I really don't want to change the fixup code for allocation * of IRQs, nor the irq_mask maintenance stuff, both of which look * nice and clean now. @@ -1153,9 +1569,41 @@ srm_device_interrupt(unsigned long vector, struct pt_regs * regs) #endif #endif /* CONFIG_ALPHA_SABLE */ +#ifdef CONFIG_ALPHA_DP264 + /* + * the DP264 SRM console reports PCI interrupts with a vector + * 0x100 *higher* than one might expect, as PCI IRQ 0 (ie bit 0) + * shows up as IRQ 16, etc, etc. We adjust it down by 16 to have + * it line up with the actual bit numbers from the DIM registers, + * which is how we manage the interrupts/mask. Sigh... + */ + if (irq >= 32) + ack = irq = irq - 16; +#endif /* DP264 */ + +#ifdef CONFIG_ALPHA_RAWHIDE + /* + * the RAWHIDE SRM console reports PCI interrupts with a vector + * 0x80 *higher* than one might expect, as PCI IRQ 0 (ie bit 0) + * shows up as IRQ 24, etc, etc. We adjust it down by 8 to have + * it line up with the actual bit numbers from the REQ registers, + * which is how we manage the interrupts/mask. Sigh... + * + * also, PCI #1 interrupts are offset some more... :-( + */ + if (irq == 52) + ack = irq = 56; /* SCSI on PCI 1 is special */ + else { + if (irq >= 24) /* adjust all PCI interrupts down 8 */ + ack = irq = irq - 8; + if (irq >= 48) /* adjust PCI bus 1 interrupts down another 8 */ + ack = irq = irq - 8; + } +#endif /* RAWHIDE */ + device_interrupt(irq, ack, regs); - restore_flags(flags); + __restore_flags(flags); } /* @@ -1218,6 +1666,10 @@ extern void pyxis_machine_check(unsigned long vector, unsigned long la, struct pt_regs * regs); extern void t2_machine_check(unsigned long vector, unsigned long la, struct pt_regs * regs); +extern void tsunami_machine_check(unsigned long vector, unsigned long la, + struct pt_regs * regs); +extern void mcpcia_machine_check(unsigned long vector, unsigned long la, + struct pt_regs * regs); static void machine_check(unsigned long vector, unsigned long la, struct pt_regs *regs) @@ -1232,6 +1684,10 @@ machine_check(unsigned long vector, unsigned long la, struct pt_regs *regs) pyxis_machine_check(vector, la, regs); #elif defined(CONFIG_ALPHA_T2) t2_machine_check(vector, la, regs); +#elif defined(CONFIG_ALPHA_TSUNAMI) + tsunami_machine_check(vector, la, regs); +#elif defined(CONFIG_ALPHA_MCPCIA) + mcpcia_machine_check(vector, la, regs); #else printk("Machine check\n"); #endif @@ -1244,7 +1700,14 @@ do_entInt(unsigned long type, unsigned long vector, unsigned long la_ptr, { switch (type) { case 0: +#ifdef __SMP__ +/* irq_enter(smp_processor_id(), 0); ??????? */ + handle_ipi(®s); +/* irq_exit(smp_processor_id(), 0); ??????? */ + return; +#else /* __SMP__ */ printk("Interprocessor interrupt? You must be kidding\n"); +#endif /* __SMP__ */ break; case 1: handle_irq(RTC_IRQ, ®s); @@ -1253,23 +1716,38 @@ do_entInt(unsigned long type, unsigned long vector, unsigned long la_ptr, machine_check(vector, la_ptr, ®s); return; case 3: -#if defined(CONFIG_ALPHA_JENSEN) || defined(CONFIG_ALPHA_NONAME) || \ - defined(CONFIG_ALPHA_P2K) || defined(CONFIG_ALPHA_SRM) +#if defined(CONFIG_ALPHA_JENSEN) || \ + defined(CONFIG_ALPHA_NONAME) || \ + defined(CONFIG_ALPHA_P2K) || \ + defined(CONFIG_ALPHA_SRM) srm_device_interrupt(vector, ®s); -#elif defined(CONFIG_ALPHA_MIATA) || defined(CONFIG_ALPHA_SX164) +#elif defined(CONFIG_ALPHA_MIATA) || \ + defined(CONFIG_ALPHA_SX164) miata_device_interrupt(vector, ®s); #elif defined(CONFIG_ALPHA_NORITAKE) noritake_device_interrupt(vector, ®s); -#elif defined(CONFIG_ALPHA_ALCOR) || defined(CONFIG_ALPHA_XLT) +#elif defined(CONFIG_ALPHA_ALCOR) || \ + defined(CONFIG_ALPHA_XLT) alcor_and_xlt_device_interrupt(vector, ®s); -#elif defined(CONFIG_ALPHA_RUFFIAN) - ruffian_device_interrupt(vector, ®s); +#elif defined(CONFIG_ALPHA_CABRIOLET) || \ + defined(CONFIG_ALPHA_EB66P) || \ + defined(CONFIG_ALPHA_EB164) || \ + defined(CONFIG_ALPHA_PC164) || \ + defined(CONFIG_ALPHA_LX164) + cabriolet_and_eb66p_device_interrupt(vector, ®s); #elif defined(CONFIG_ALPHA_MIKASA) mikasa_device_interrupt(vector, ®s); -#elif NR_IRQS == 33 - cabriolet_and_eb66p_device_interrupt(vector, ®s); -#elif NR_IRQS == 32 +#elif defined(CONFIG_ALPHA_EB66) || \ + defined(CONFIG_ALPHA_EB64P) eb66_and_eb64p_device_interrupt(vector, ®s); +#elif defined(CONFIG_ALPHA_RUFFIAN) + ruffian_device_interrupt(vector, ®s); +#elif defined(CONFIG_ALPHA_DP264) + dp264_device_interrupt(vector, ®s); +#elif defined(CONFIG_ALPHA_RAWHIDE) + rawhide_device_interrupt(vector, ®s); +#elif defined(CONFIG_ALPHA_TAKARA) + takara_device_interrupt(vector, ®s); #elif NR_IRQS == 16 isa_device_interrupt(vector, ®s); #endif @@ -1293,22 +1771,21 @@ static inline void sable_init_IRQ(void) outb(0x44, 0x535); /* enable cascades in master */ } -#ifdef CONFIG_ALPHA_SX164 +#if defined(CONFIG_ALPHA_SX164) static inline void sx164_init_IRQ(void) { +#if !defined(CONFIG_ALPHA_SRM) /* note invert on MASK bits */ *(vulp)PYXIS_INT_MASK = ~((long)irq_mask >> 16); mb(); -#if 0 - *(vulp)PYXIS_INT_HILO = 0x000000B2UL; mb(); /* ISA/NMI HI */ - *(vulp)PYXIS_RT_COUNT = 0UL; mb(); /* clear count */ -#endif + *(vulp)PYXIS_INT_MASK; +#endif /* !SRM */ enable_irq(16 + 6); /* enable timer */ enable_irq(16 + 7); /* enable ISA PIC cascade */ enable_irq(2); /* enable cascade */ } #endif /* SX164 */ -#ifdef CONFIG_ALPHA_RUFFIAN +#if defined(CONFIG_ALPHA_RUFFIAN) static inline void ruffian_init_IRQ(void) { /* invert 6&7 for i82371 */ @@ -1343,19 +1820,19 @@ static inline void ruffian_init_IRQ(void) } #endif /* RUFFIAN */ - #ifdef CONFIG_ALPHA_MIATA static inline void miata_init_IRQ(void) { /* note invert on MASK bits */ *(vulp)PYXIS_INT_MASK = ~((long)irq_mask >> 16); mb(); /* invert */ +#if 0 + /* these break on MiataGL so we'll try not to do it at all */ *(vulp)PYXIS_INT_HILO = 0x000000B2UL; mb(); /* ISA/NMI HI */ *(vulp)PYXIS_RT_COUNT = 0UL; mb(); /* clear count */ - *(vulp)PYXIS_INT_REQ = 0x4000000000000000UL; mb(); /* clear upper timer */ -#if 0 - *(vulp)PYXIS_INT_ROUTE = 0UL; mb(); /* all are level */ - *(vulp)PYXIS_INT_CNFG = 0UL; mb(); /* all clear */ #endif + /* clear upper timer */ + *(vulp)PYXIS_INT_REQ = 0x4000000000000000UL; mb(); + enable_irq(16 + 2); /* enable HALT switch - SRM only? */ enable_irq(16 + 6); /* enable timer */ enable_irq(16 + 7); /* enable ISA PIC cascade */ @@ -1381,7 +1858,7 @@ static inline void alcor_and_xlt_init_IRQ(void) enable_irq(16 + 31); /* enable (E)ISA PIC cascade */ enable_irq(2); /* enable cascade */ } -#endif +#endif /* ALCOR || XLT */ static inline void mikasa_init_IRQ(void) { @@ -1389,9 +1866,56 @@ static inline void mikasa_init_IRQ(void) enable_irq(2); /* enable cascade */ } -static inline void init_IRQ_33(void) +#if defined(CONFIG_ALPHA_DP264) +static inline void dp264_init_IRQ(void) +{ + /* note invert on MASK bits */ + *(vulp)TSUNAMI_CSR_DIM0 = + ~(irq_mask) & ~0x0000000000000000UL; mb(); + *(vulp)TSUNAMI_CSR_DIM0; + enable_irq(55); /* enable CYPRESS interrupt controller (ISA) */ + enable_irq(2); +} +#endif /* DP264 */ + +#if defined(CONFIG_ALPHA_RAWHIDE) +static inline void rawhide_init_IRQ(void) { + /* HACK ALERT! only PCI busses 0 and 1 are used currently, + and routing is only to CPU #1*/ + + *(vuip)MCPCIA_INT_MASK0(0) = + (~((irq_mask) >> 16) & 0x00ffffffU) | 0x00ff0000U; mb(); + /* ... and read it back to make sure it got written. */ + *(vuip)MCPCIA_INT_MASK0(0); + + *(vuip)MCPCIA_INT_MASK0(1) = + (~((irq_mask) >> 40) & 0x00ffffffU) | 0x00fe0000U; mb(); + /* ... and read it back to make sure it got written. */ + *(vuip)MCPCIA_INT_MASK0(1); + enable_irq(2); +} +#endif /* RAWHIDE */ + +static inline void takara_init_IRQ(void) +{ + unsigned int ctlreg = inl(0x500); + + ctlreg &= ~0x8000; /* return to non-accelerated mode */ + outw(ctlreg >> 16, 0x502); + outw(ctlreg & 0xFFFF, 0x500); + ctlreg = 0x05107c00; /* enable the PCI interrupt register */ + printk("Setting to 0x%08x\n", ctlreg); + outw(ctlreg >> 16, 0x502); + outw(ctlreg & 0xFFFF, 0x500); + enable_irq(2); +} + +static inline void init_IRQ_35(void) +{ +#if !defined(CONFIG_ALPHA_SRM) outl(irq_mask >> 16, 0x804); +#endif /* !SRM */ enable_irq(16 + 4); /* enable SIO cascade */ enable_irq(2); /* enable cascade */ } @@ -1413,13 +1937,20 @@ void __init init_IRQ(void) { wrent(entInt, 0); - dma_outb(0, DMA1_RESET_REG); - dma_outb(0, DMA2_RESET_REG); -#ifndef CONFIG_ALPHA_SX164 - dma_outb(0, DMA1_CLR_MASK_REG); - /* We need to figure out why this fails on the SX164. */ - dma_outb(0, DMA2_CLR_MASK_REG); -#endif + +/* FIXME FIXME FIXME FIXME FIXME */ +#if !defined(CONFIG_ALPHA_DP264) + /* we need to figure out why these fail on the DP264 */ + outb(0, DMA1_RESET_REG); + outb(0, DMA2_RESET_REG); +#endif /* !DP264 */ +/* FIXME FIXME FIXME FIXME FIXME */ +#if !defined(CONFIG_ALPHA_SX164) && !defined(CONFIG_ALPHA_DP264) + outb(0, DMA1_CLR_MASK_REG); + /* we need to figure out why this fails on the SX164 */ + outb(0, DMA2_CLR_MASK_REG); +#endif /* !SX164 && !DP264 */ +/* end FIXMEs */ #if defined(CONFIG_ALPHA_SABLE) sable_init_IRQ(); @@ -1431,17 +1962,21 @@ init_IRQ(void) noritake_init_IRQ(); #elif defined(CONFIG_ALPHA_ALCOR) || defined(CONFIG_ALPHA_XLT) alcor_and_xlt_init_IRQ(); -#elif (defined(CONFIG_ALPHA_PC164) || defined(CONFIG_ALPHA_LX164)) \ - && defined(CONFIG_ALPHA_SRM) - /* Disable all the PCI interrupts? Otherwise, everthing was - done by SRM already. */ #elif defined(CONFIG_ALPHA_MIKASA) mikasa_init_IRQ(); +#elif defined(CONFIG_ALPHA_CABRIOLET) || defined(CONFIG_ALPHA_EB66P) || \ + defined(CONFIG_ALPHA_PC164) || defined(CONFIG_ALPHA_LX164) || \ + defined(CONFIG_ALPHA_EB164) + init_IRQ_35(); #elif defined(CONFIG_ALPHA_RUFFIAN) ruffian_init_IRQ(); -#elif NR_IRQS == 33 - init_IRQ_33(); -#elif NR_IRQS == 32 +#elif defined(CONFIG_ALPHA_DP264) + dp264_init_IRQ(); +#elif defined(CONFIG_ALPHA_RAWHIDE) + rawhide_init_IRQ(); +#elif defined(CONFIG_ALPHA_TAKARA) + takara_init_IRQ(); +#elif defined(CONFIG_ALPHA_EB66) || defined(CONFIG_ALPHA_EB64P) init_IRQ_32(); #elif NR_IRQS == 16 init_IRQ_16(); diff --git a/arch/alpha/kernel/lca.c b/arch/alpha/kernel/lca.c index 2a39a1cf9..a0b8aea8d 100644 --- a/arch/alpha/kernel/lca.c +++ b/arch/alpha/kernel/lca.c @@ -6,8 +6,8 @@ * bios code. */ #include <linux/kernel.h> +#include <linux/config.h> #include <linux/types.h> -#include <linux/bios32.h> #include <linux/pci.h> #include <asm/ptrace.h> @@ -44,6 +44,11 @@ #define MCHK_K_SIO_IOCHK 0x206 /* all platforms so far */ #define MCHK_K_DCSR 0x208 /* all but Noname */ +#ifdef CONFIG_ALPHA_SRM_SETUP +unsigned int LCA_DMA_WIN_BASE = LCA_DMA_WIN_BASE_DEFAULT; +unsigned int LCA_DMA_WIN_SIZE = LCA_DMA_WIN_SIZE_DEFAULT; +#endif /* SRM_SETUP */ + /* * Given a bus, device, and function number, compute resulting * configuration space address and setup the LCA_IOC_CONF register @@ -100,11 +105,11 @@ static int mk_conf_addr(unsigned char bus, unsigned char device_fn, return -1; } - *((vulp) LCA_IOC_CONF) = 0; + *(vulp)LCA_IOC_CONF = 0; addr = (1 << (11 + device)) | (func << 8) | where; } else { /* type 1 configuration cycle: */ - *((vulp) LCA_IOC_CONF) = 1; + *(vulp)LCA_IOC_CONF = 1; addr = (bus << 16) | (device_fn << 8) | where; } *pci_addr = addr; @@ -130,7 +135,7 @@ static unsigned int conf_read(unsigned long addr) value = *(vuip)addr; draina(); - stat0 = *((unsigned long*)LCA_IOC_STAT0); + stat0 = *(vulp)LCA_IOC_STAT0; if (stat0 & LCA_IOC_STAT0_ERR) { code = ((stat0 >> LCA_IOC_STAT0_CODE_SHIFT) & LCA_IOC_STAT0_CODE_MASK); @@ -167,7 +172,7 @@ static void conf_write(unsigned long addr, unsigned int value) *(vuip)addr = value; draina(); - stat0 = *((unsigned long*)LCA_IOC_STAT0); + stat0 = *(vulp)LCA_IOC_STAT0; if (stat0 & LCA_IOC_STAT0_ERR) { code = ((stat0 >> LCA_IOC_STAT0_CODE_SHIFT) & LCA_IOC_STAT0_CODE_MASK); @@ -287,6 +292,40 @@ int pcibios_write_config_dword (unsigned char bus, unsigned char device_fn, unsigned long lca_init(unsigned long mem_start, unsigned long mem_end) { +#ifdef CONFIG_ALPHA_SRM_SETUP + /* check window 0 for enabled and mapped to 0 */ + if ((*(vulp)LCA_IOC_W_BASE0 & (1UL<<33)) && + (*(vulp)LCA_IOC_T_BASE0 == 0)) + { + LCA_DMA_WIN_BASE = *(vulp)LCA_IOC_W_BASE0 & 0xffffffffUL; + LCA_DMA_WIN_SIZE = *(vulp)LCA_IOC_W_MASK0 & 0xffffffffUL; + LCA_DMA_WIN_SIZE += 1; +#if 1 + printk("lca_init: using Window 0 settings\n"); + printk("lca_init: BASE 0x%lx MASK 0x%lx TRANS 0x%lx\n", + *(vulp)LCA_IOC_W_BASE0, + *(vulp)LCA_IOC_W_MASK0, + *(vulp)LCA_IOC_T_BASE0); +#endif + } + else /* check window 2 for enabled and mapped to 0 */ + if ((*(vulp)LCA_IOC_W_BASE1 & (1UL<<33)) && + (*(vulp)LCA_IOC_T_BASE1 == 0)) + { + LCA_DMA_WIN_BASE = *(vulp)LCA_IOC_W_BASE1 & 0xffffffffUL; + LCA_DMA_WIN_SIZE = *(vulp)LCA_IOC_W_MASK1 & 0xffffffffUL; + LCA_DMA_WIN_SIZE += 1; +#if 1 + printk("lca_init: using Window 1 settings\n"); + printk("lca_init: BASE 0x%lx MASK 0x%lx TRANS 0x%lx\n", + *(vulp)LCA_IOC_W_BASE1, + *(vulp)LCA_IOC_W_MASK1, + *(vulp)LCA_IOC_T_BASE1); +#endif + } + else /* we must use our defaults... */ +#endif /* SRM_SETUP */ + { /* * Set up the PCI->physical memory translation windows. * For now, window 1 is disabled. In the future, we may @@ -294,9 +333,11 @@ unsigned long lca_init(unsigned long mem_start, unsigned long mem_end) * goes at 1 GB and is 1 GB large. */ *(vulp)LCA_IOC_W_BASE1 = 0UL<<33; + *(vulp)LCA_IOC_W_BASE0 = 1UL<<33 | LCA_DMA_WIN_BASE; *(vulp)LCA_IOC_W_MASK0 = LCA_DMA_WIN_SIZE - 1; *(vulp)LCA_IOC_T_BASE0 = 0; + } /* * Disable PCI parity for now. The NCR53c810 chip has diff --git a/arch/alpha/kernel/mcpcia.c b/arch/alpha/kernel/mcpcia.c new file mode 100644 index 000000000..6a9dab59a --- /dev/null +++ b/arch/alpha/kernel/mcpcia.c @@ -0,0 +1,977 @@ +/* + * Code common to all MCbus-PCI adaptor chipsets + * + * Based on code written by David A Rusling (david.rusling@reo.mts.dec.com). + * + */ +#include <linux/kernel.h> +#include <linux/config.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/sched.h> + +#include <asm/system.h> +#include <asm/io.h> +#include <asm/hwrpb.h> +#include <asm/ptrace.h> +#include <asm/mmu_context.h> +#include <asm/delay.h> + +/* + * NOTE: Herein lie back-to-back mb instructions. They are magic. + * One plausible explanation is that the i/o controller does not properly + * handle the system transaction. Another involves timing. Ho hum. + */ + +extern struct hwrpb_struct *hwrpb; +extern asmlinkage void wrmces(unsigned long mces); + +/* + * BIOS32-style PCI interface: + */ + +#ifdef CONFIG_ALPHA_MCPCIA + +#undef DEBUG_CFG + +#ifdef DEBUG_CFG +# define DBG_CFG(args) printk args +#else +# define DBG_CFG(args) +#endif + +#undef DEBUG_PCI + +#ifdef DEBUG_PCI +# define DBG_PCI(args) printk args +#else +# define DBG_PCI(args) +#endif + +#define DEBUG_MCHECK + +#ifdef DEBUG_MCHECK +# define DBG_MCK(args) printk args +# define DEBUG_MCHECK_DUMP +#else +# define DBG_MCK(args) +#endif + +#define vuip volatile unsigned int * +#define vulp volatile unsigned long * + +static volatile unsigned int MCPCIA_mcheck_expected[NR_CPUS]; +static volatile unsigned int MCPCIA_mcheck_taken[NR_CPUS]; +static unsigned int MCPCIA_jd[NR_CPUS]; + +#define MCPCIA_MAX_HOSES 2 +static int mcpcia_num_hoses = 0; + +static int pci_probe_enabled = 0; /* disable to start */ + +static struct linux_hose_info *mcpcia_root = NULL, *mcpcia_last_hose; + +struct linux_hose_info *bus2hose[256]; + +static inline unsigned long long_align(unsigned long addr) +{ + return ((addr + (sizeof(unsigned long) - 1)) & + ~(sizeof(unsigned long) - 1)); +} + +#ifdef CONFIG_ALPHA_SRM_SETUP +unsigned int MCPCIA_DMA_WIN_BASE = MCPCIA_DMA_WIN_BASE_DEFAULT; +unsigned int MCPCIA_DMA_WIN_SIZE = MCPCIA_DMA_WIN_SIZE_DEFAULT; +unsigned long mcpcia_sm_base_r1, mcpcia_sm_base_r2, mcpcia_sm_base_r3; +#endif /* SRM_SETUP */ + +/* + * Given a bus, device, and function number, compute resulting + * configuration space address and setup the MCPCIA_HAXR2 register + * accordingly. It is therefore not safe to have concurrent + * invocations to configuration space access routines, but there + * really shouldn't be any need for this. + * + * Type 0: + * + * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 + * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * 31:11 Device select bit. + * 10:8 Function number + * 7:2 Register number + * + * Type 1: + * + * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 + * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * 31:24 reserved + * 23:16 bus number (8 bits = 128 possible buses) + * 15:11 Device number (5 bits) + * 10:8 function number + * 7:2 register number + * + * Notes: + * The function number selects which function of a multi-function device + * (e.g., scsi and ethernet). + * + * The register selects a DWORD (32 bit) register offset. Hence it + * doesn't get shifted by 2 bits as we want to "drop" the bottom two + * bits. + */ + +static unsigned int +conf_read(unsigned long addr, unsigned char type1, + struct linux_hose_info *hose) +{ + unsigned long flags; + unsigned long hoseno = hose->pci_hose_index; + unsigned int stat0, value, temp, cpu; + + cpu = smp_processor_id(); + + save_and_cli(flags); + + DBG_CFG(("conf_read(addr=0x%lx, type1=%d, hose=%d)\n", + addr, type1, hoseno)); + + /* reset status register to avoid losing errors: */ + stat0 = *(vuip)MCPCIA_CAP_ERR(hoseno); + *(vuip)MCPCIA_CAP_ERR(hoseno) = stat0; mb(); + temp = *(vuip)MCPCIA_CAP_ERR(hoseno); + DBG_CFG(("conf_read: MCPCIA CAP_ERR(%d) was 0x%x\n", hoseno, stat0)); + + mb(); + draina(); + MCPCIA_mcheck_expected[cpu] = 1; + MCPCIA_mcheck_taken[cpu] = 0; + mb(); + /* access configuration space: */ + value = *((vuip)addr); + mb(); + mb(); /* magic */ + if (MCPCIA_mcheck_taken[cpu]) { + MCPCIA_mcheck_taken[cpu] = 0; + value = 0xffffffffU; + mb(); + } + MCPCIA_mcheck_expected[cpu] = 0; + mb(); + + DBG_CFG(("conf_read(): finished\n")); + + restore_flags(flags); + return value; +} + + +static void +conf_write(unsigned long addr, unsigned int value, unsigned char type1, + struct linux_hose_info *hose) +{ + unsigned long flags; + unsigned long hoseno = hose->pci_hose_index; + unsigned int stat0, temp, cpu; + + cpu = smp_processor_id(); + + save_and_cli(flags); /* avoid getting hit by machine check */ + + /* reset status register to avoid losing errors: */ + stat0 = *(vuip)MCPCIA_CAP_ERR(hoseno); + *(vuip)MCPCIA_CAP_ERR(hoseno) = stat0; mb(); + temp = *(vuip)MCPCIA_CAP_ERR(hoseno); + DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", hoseno, stat0)); + + draina(); + MCPCIA_mcheck_expected[cpu] = 1; + mb(); + /* access configuration space: */ + *((vuip)addr) = value; + mb(); + mb(); /* magic */ + temp = *(vuip)MCPCIA_CAP_ERR(hoseno); /* read to force the write */ + MCPCIA_mcheck_expected[cpu] = 0; + mb(); + + DBG_CFG(("conf_write(): finished\n")); + restore_flags(flags); +} + +static int mk_conf_addr(struct linux_hose_info *hose, + unsigned char bus, unsigned char device_fn, + unsigned char where, unsigned long *pci_addr, + unsigned char *type1) +{ + unsigned long addr; + + if (!pci_probe_enabled) /* if doing standard pci_init(), ignore */ + return -1; + + DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x," + " pci_addr=0x%p, type1=0x%p)\n", + bus, device_fn, where, pci_addr, type1)); + + /* type 1 configuration cycle for *ALL* busses */ + *type1 = 1; + + if (hose->pci_first_busno == bus) + bus = 0; + addr = (bus << 16) | (device_fn << 8) | (where); + addr <<= 5; /* swizzle for SPARSE */ + addr |= hose->pci_config_space; + + *pci_addr = addr; + DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); + return 0; +} + + +int hose_read_config_byte (struct linux_hose_info *hose, + unsigned char bus, unsigned char device_fn, + unsigned char where, unsigned char *value) +{ + unsigned long addr; + unsigned char type1; + + *value = 0xff; + + if (mk_conf_addr(hose, bus, device_fn, where, &addr, &type1) < 0) { + return PCIBIOS_SUCCESSFUL; + } + + addr |= 0x00; /* or in length */ + + *value = conf_read(addr, type1, hose) >> ((where & 3) * 8); + return PCIBIOS_SUCCESSFUL; +} + + +int hose_read_config_word (struct linux_hose_info *hose, + unsigned char bus, unsigned char device_fn, + unsigned char where, unsigned short *value) +{ + unsigned long addr; + unsigned char type1; + + *value = 0xffff; + + if (where & 0x1) { + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + if (mk_conf_addr(hose, bus, device_fn, where, &addr, &type1)) { + return PCIBIOS_SUCCESSFUL; + } + + addr |= 0x08; /* or in length */ + + *value = conf_read(addr, type1, hose) >> ((where & 3) * 8); + return PCIBIOS_SUCCESSFUL; +} + + +int hose_read_config_dword (struct linux_hose_info *hose, + unsigned char bus, unsigned char device_fn, + unsigned char where, unsigned int *value) +{ + unsigned long addr; + unsigned char type1; + + *value = 0xffffffff; + + if (where & 0x3) { + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + if (mk_conf_addr(hose, bus, device_fn, where, &addr, &type1)) { + return PCIBIOS_SUCCESSFUL; + } + addr |= 0x18; /* or in length */ + + *value = conf_read(addr, type1, hose); + return PCIBIOS_SUCCESSFUL; +} + + +int hose_write_config_byte (struct linux_hose_info *hose, + unsigned char bus, unsigned char device_fn, + unsigned char where, unsigned char value) +{ + unsigned long addr; + unsigned char type1; + + if (mk_conf_addr(hose, bus, device_fn, where, &addr, &type1) < 0) { + return PCIBIOS_SUCCESSFUL; + } + + addr |= 0x00; /* or in length */ + + conf_write(addr, value << ((where & 3) * 8), type1, hose); + return PCIBIOS_SUCCESSFUL; +} + + +int hose_write_config_word (struct linux_hose_info *hose, + unsigned char bus, unsigned char device_fn, + unsigned char where, unsigned short value) +{ + unsigned long addr; + unsigned char type1; + + if (mk_conf_addr(hose, bus, device_fn, where, &addr, &type1) < 0) { + return PCIBIOS_SUCCESSFUL; + } + + addr |= 0x08; /* or in length */ + + conf_write(addr, value << ((where & 3) * 8), type1, hose); + return PCIBIOS_SUCCESSFUL; +} + + +int hose_write_config_dword (struct linux_hose_info *hose, + unsigned char bus, unsigned char device_fn, + unsigned char where, unsigned int value) +{ + unsigned long addr; + unsigned char type1; + + if (mk_conf_addr(hose, bus, device_fn, where, &addr, &type1) < 0) { + return PCIBIOS_SUCCESSFUL; + } + + addr |= 0x18; /* or in length */ + + conf_write(addr, value << ((where & 3) * 8), type1, hose); + return PCIBIOS_SUCCESSFUL; +} + +int pcibios_read_config_byte (unsigned char bus, unsigned char devfn, + unsigned char where, unsigned char *value) +{ + return hose_read_config_byte(bus2hose[bus], bus, devfn, where, value); +} + +int pcibios_read_config_word (unsigned char bus, unsigned char devfn, + unsigned char where, unsigned short *value) +{ + return hose_read_config_word(bus2hose[bus], bus, devfn, where, value); +} + +int pcibios_read_config_dword (unsigned char bus, unsigned char devfn, + unsigned char where, unsigned int *value) +{ + return hose_read_config_dword(bus2hose[bus], bus, devfn, where, value); +} + +int pcibios_write_config_byte (unsigned char bus, unsigned char devfn, + unsigned char where, unsigned char value) +{ + return hose_write_config_byte(bus2hose[bus], bus, devfn, where, value); +} + +int pcibios_write_config_word (unsigned char bus, unsigned char devfn, + unsigned char where, unsigned short value) +{ + return hose_write_config_word(bus2hose[bus], bus, devfn, where, value); +} + +int pcibios_write_config_dword (unsigned char bus, unsigned char devfn, + unsigned char where, unsigned int value) +{ + return hose_write_config_dword(bus2hose[bus], bus, devfn, where, value); +} + +unsigned long mcpcia_init(unsigned long mem_start, unsigned long mem_end) +{ + struct linux_hose_info *hose; + unsigned int mcpcia_err; + unsigned int pci_rev; + int h; + + mem_start = long_align(mem_start); + + for (h = 0; h < NR_CPUS; h++) { + MCPCIA_mcheck_expected[h] = 0; + MCPCIA_mcheck_taken[h] = 0; + } + + /* first, find how many hoses we have */ + for (h = 0; h < MCPCIA_MAX_HOSES; h++) { + pci_rev = *(vuip)MCPCIA_REV(h); +#if 0 + printk("mcpcia_init: got 0x%x for PCI_REV for hose %d\n", + pci_rev, h); +#endif + if ((pci_rev >> 16) == PCI_CLASS_BRIDGE_HOST) { + mcpcia_num_hoses++; + + hose = (struct linux_hose_info *)mem_start; + mem_start = long_align(mem_start + sizeof(*hose)); + + memset(hose, 0, sizeof(*hose)); + + if (mcpcia_root) + mcpcia_last_hose->next = hose; + else + mcpcia_root = hose; + mcpcia_last_hose = hose; + + hose->pci_io_space = MCPCIA_IO(h); + hose->pci_mem_space = MCPCIA_DENSE(h); + hose->pci_config_space = MCPCIA_CONF(h); + hose->pci_sparse_space = MCPCIA_SPARSE(h); + hose->pci_hose_index = h; + hose->pci_first_busno = 255; + hose->pci_last_busno = 0; + } + } + +#if 1 + printk("mcpcia_init: found %d hoses\n", mcpcia_num_hoses); +#endif + + /* now do init for each hose */ + for (hose = mcpcia_root; hose; hose = hose->next) { + h = hose->pci_hose_index; +#if 0 +#define PRINTK printk +PRINTK("mcpcia_init: -------- hose %d --------\n",h); +PRINTK("mcpcia_init: MCPCIA_REV 0x%x\n", *(vuip)MCPCIA_REV(h)); +PRINTK("mcpcia_init: MCPCIA_WHOAMI 0x%x\n", *(vuip)MCPCIA_WHOAMI(h)); +PRINTK("mcpcia_init: MCPCIA_HAE_MEM 0x%x\n", *(vuip)MCPCIA_HAE_MEM(h)); +PRINTK("mcpcia_init: MCPCIA_HAE_IO 0x%x\n", *(vuip)MCPCIA_HAE_IO(h)); +PRINTK("mcpcia_init: MCPCIA_HAE_DENSE 0x%x\n", *(vuip)MCPCIA_HAE_DENSE(h)); +PRINTK("mcpcia_init: MCPCIA_INT_CTL 0x%x\n", *(vuip)MCPCIA_INT_CTL(h)); +PRINTK("mcpcia_init: MCPCIA_INT_REQ 0x%x\n", *(vuip)MCPCIA_INT_REQ(h)); +PRINTK("mcpcia_init: MCPCIA_INT_TARG 0x%x\n", *(vuip)MCPCIA_INT_TARG(h)); +PRINTK("mcpcia_init: MCPCIA_INT_ADR 0x%x\n", *(vuip)MCPCIA_INT_ADR(h)); +PRINTK("mcpcia_init: MCPCIA_INT_ADR_EXT 0x%x\n", *(vuip)MCPCIA_INT_ADR_EXT(h)); +PRINTK("mcpcia_init: MCPCIA_INT_MASK0 0x%x\n", *(vuip)MCPCIA_INT_MASK0(h)); +PRINTK("mcpcia_init: MCPCIA_INT_MASK1 0x%x\n", *(vuip)MCPCIA_INT_MASK1(h)); +PRINTK("mcpcia_init: MCPCIA_HBASE 0x%x\n", *(vuip)MCPCIA_HBASE(h)); +#endif + + /* + * Set up error reporting. Make sure CPU_PE is OFF in the mask. + */ +#if 0 + mcpcia_err = *(vuip)MCPCIA_ERR_MASK(h); + mcpcia_err &= ~4; + *(vuip)MCPCIA_ERR_MASK(h) = mcpcia_err; + mb(); + mcpcia_err = *(vuip)MCPCIA_ERR_MASK; +#endif + + mcpcia_err = *(vuip)MCPCIA_CAP_ERR(h); + mcpcia_err |= 0x0006; /* master/target abort */ + *(vuip)MCPCIA_CAP_ERR(h) = mcpcia_err; + mb() ; + mcpcia_err = *(vuip)MCPCIA_CAP_ERR(h); + +#ifdef CONFIG_ALPHA_SRM_SETUP + /* check window 0 for enabled and mapped to 0 */ + if (((*(vuip)MCPCIA_W0_BASE(h) & 3) == 1) && + (*(vuip)MCPCIA_T0_BASE(h) == 0) && + ((*(vuip)MCPCIA_W0_MASK(h) & 0xfff00000U) > 0x0ff00000U)) + { + MCPCIA_DMA_WIN_BASE = *(vuip)MCPCIA_W0_BASE(h) & 0xfff00000U; + MCPCIA_DMA_WIN_SIZE = *(vuip)MCPCIA_W0_MASK(h) & 0xfff00000U; + MCPCIA_DMA_WIN_SIZE += 0x00100000U; +#if 1 + printk("mcpcia_init: using Window 0 settings\n"); + printk("mcpcia_init: BASE 0x%x MASK 0x%x TRANS 0x%x\n", + *(vuip)MCPCIA_W0_BASE(h), + *(vuip)MCPCIA_W0_MASK(h), + *(vuip)MCPCIA_T0_BASE(h)); +#endif + } + else /* check window 1 for enabled and mapped to 0 */ + if (((*(vuip)MCPCIA_W1_BASE(h) & 3) == 1) && + (*(vuip)MCPCIA_T1_BASE(h) == 0) && + ((*(vuip)MCPCIA_W1_MASK(h) & 0xfff00000U) > 0x0ff00000U)) +{ + MCPCIA_DMA_WIN_BASE = *(vuip)MCPCIA_W1_BASE(h) & 0xfff00000U; + MCPCIA_DMA_WIN_SIZE = *(vuip)MCPCIA_W1_MASK(h) & 0xfff00000U; + MCPCIA_DMA_WIN_SIZE += 0x00100000U; +#if 1 + printk("mcpcia_init: using Window 1 settings\n"); + printk("mcpcia_init: BASE 0x%x MASK 0x%x TRANS 0x%x\n", + *(vuip)MCPCIA_W1_BASE(h), + *(vuip)MCPCIA_W1_MASK(h), + *(vuip)MCPCIA_T1_BASE(h)); +#endif + } + else /* check window 2 for enabled and mapped to 0 */ + if (((*(vuip)MCPCIA_W2_BASE(h) & 3) == 1) && + (*(vuip)MCPCIA_T2_BASE(h) == 0) && + ((*(vuip)MCPCIA_W2_MASK(h) & 0xfff00000U) > 0x0ff00000U)) + { + MCPCIA_DMA_WIN_BASE = *(vuip)MCPCIA_W2_BASE(h) & 0xfff00000U; + MCPCIA_DMA_WIN_SIZE = *(vuip)MCPCIA_W2_MASK(h) & 0xfff00000U; + MCPCIA_DMA_WIN_SIZE += 0x00100000U; +#if 1 + printk("mcpcia_init: using Window 2 settings\n"); + printk("mcpcia_init: BASE 0x%x MASK 0x%x TRANS 0x%x\n", + *(vuip)MCPCIA_W2_BASE(h), + *(vuip)MCPCIA_W2_MASK(h), + *(vuip)MCPCIA_T2_BASE(h)); +#endif + } + else /* check window 3 for enabled and mapped to 0 */ + if (((*(vuip)MCPCIA_W3_BASE(h) & 3) == 1) && + (*(vuip)MCPCIA_T3_BASE(h) == 0) && + ((*(vuip)MCPCIA_W3_MASK(h) & 0xfff00000U) > 0x0ff00000U)) + { + MCPCIA_DMA_WIN_BASE = *(vuip)MCPCIA_W3_BASE(h) & 0xfff00000U; + MCPCIA_DMA_WIN_SIZE = *(vuip)MCPCIA_W3_MASK(h) & 0xfff00000U; + MCPCIA_DMA_WIN_SIZE += 0x00100000U; +#if 1 + printk("mcpcia_init: using Window 3 settings\n"); + printk("mcpcia_init: BASE 0x%x MASK 0x%x TRANS 0x%x\n", + *(vuip)MCPCIA_W3_BASE(h), + *(vuip)MCPCIA_W3_MASK(h), + *(vuip)MCPCIA_T3_BASE(h)); +#endif + } + else /* we must use our defaults which were pre-initialized... */ +#endif /* SRM_SETUP */ + { + /* + * Set up the PCI->physical memory translation windows. + * For now, windows 1,2 and 3 are disabled. In the future, we may + * want to use them to do scatter/gather DMA. Window 0 + * goes at 1 GB and is 1 GB large. + */ + + *(vuip)MCPCIA_W0_BASE(h) = 1U | (MCPCIA_DMA_WIN_BASE & 0xfff00000U); + *(vuip)MCPCIA_W0_MASK(h) = (MCPCIA_DMA_WIN_SIZE - 1) & 0xfff00000U; + *(vuip)MCPCIA_T0_BASE(h) = 0; + + *(vuip)MCPCIA_W1_BASE(h) = 0x0 ; + *(vuip)MCPCIA_W2_BASE(h) = 0x0 ; + *(vuip)MCPCIA_W3_BASE(h) = 0x0 ; + + *(vuip)MCPCIA_HBASE(h) = 0x0 ; + mb(); + } + + /* + * check ASN in HWRPB for validity, report if bad + */ + if (hwrpb->max_asn != MAX_ASN) { + printk("mcpcia_init: max ASN from HWRPB is bad (0x%lx)\n", + hwrpb->max_asn); + hwrpb->max_asn = MAX_ASN; + } + +#if 0 + { + unsigned int mcpcia_int_ctl = *((vuip)MCPCIA_INT_CTL(h)); + printk("mcpcia_init: INT_CTL was 0x%x\n", mcpcia_int_ctl); + *(vuip)MCPCIA_INT_CTL(h) = 1U; mb(); + mcpcia_int_ctl = *(vuip)MCPCIA_INT_CTL(h); + } +#endif + + { + unsigned int mcpcia_hae_mem = *(vuip)MCPCIA_HAE_MEM(h); + unsigned int mcpcia_hae_io = *(vuip)MCPCIA_HAE_IO(h); +#if 0 + printk("mcpcia_init: HAE_MEM was 0x%x\n", mcpcia_hae_mem); + printk("mcpcia_init: HAE_IO was 0x%x\n", mcpcia_hae_io); +#endif +#ifdef CONFIG_ALPHA_SRM_SETUP + /* + sigh... For the SRM setup, unless we know apriori what the HAE + contents will be, we need to setup the arbitrary region bases + so we can test against the range of addresses and tailor the + region chosen for the SPARSE memory access. + + see include/asm-alpha/mcpcia.h for the SPARSE mem read/write + */ + mcpcia_sm_base_r1 = (mcpcia_hae_mem ) & 0xe0000000UL;/* reg 1 */ + mcpcia_sm_base_r2 = (mcpcia_hae_mem << 16) & 0xf8000000UL;/* reg 2 */ + mcpcia_sm_base_r3 = (mcpcia_hae_mem << 24) & 0xfc000000UL;/* reg 3 */ + /* + Set the HAE cache, so that setup_arch() code + will use the SRM setting always. Our readb/writeb + code in mcpcia.h expects never to have to change + the contents of the HAE. + */ + hae.cache = mcpcia_hae_mem; +#else /* SRM_SETUP */ + *(vuip)MCPCIA_HAE_MEM(h) = 0U; mb(); + mcpcia_hae_mem = *(vuip)MCPCIA_HAE_MEM(h); + *(vuip)MCPCIA_HAE_IO(h) = 0; mb(); + mcpcia_hae_io = *(vuip)MCPCIA_HAE_IO(h); +#endif /* SRM_SETUP */ + } + } /* end for-loop on hoses */ + return mem_start; +} + +int mcpcia_pci_clr_err(int h) +{ + unsigned int cpu = smp_processor_id(); + + MCPCIA_jd[cpu] = *(vuip)MCPCIA_CAP_ERR(h); +#if 0 + DBG_MCK(("MCPCIA_pci_clr_err: MCPCIA CAP_ERR(%d) after read 0x%x\n", + h, MCPCIA_jd[cpu])); +#endif + *(vuip)MCPCIA_CAP_ERR(h) = 0xffffffff; mb(); /* clear them all */ + MCPCIA_jd[cpu] = *(vuip)MCPCIA_CAP_ERR(h); + return 0; +} + +static void +mcpcia_print_uncorrectable(struct el_MCPCIA_uncorrected_frame_mcheck *logout) +{ + struct el_common_EV5_uncorrectable_mcheck *frame; + int i; + + frame = &logout->procdata; + + /* Print PAL fields */ + for (i = 0; i < 24; i += 2) { + printk("\tpal temp[%d-%d]\t\t= %16lx %16lx\n\r", + i, i+1, frame->paltemp[i], frame->paltemp[i+1]); + } + for (i = 0; i < 8; i += 2) { + printk("\tshadow[%d-%d]\t\t= %16lx %16lx\n\r", + i, i+1, frame->shadow[i], + frame->shadow[i+1]); + } + printk("\tAddr of excepting instruction\t= %16lx\n\r", + frame->exc_addr); + printk("\tSummary of arithmetic traps\t= %16lx\n\r", + frame->exc_sum); + printk("\tException mask\t\t\t= %16lx\n\r", + frame->exc_mask); + printk("\tBase address for PALcode\t= %16lx\n\r", + frame->pal_base); + printk("\tInterrupt Status Reg\t\t= %16lx\n\r", + frame->isr); + printk("\tCURRENT SETUP OF EV5 IBOX\t= %16lx\n\r", + frame->icsr); + printk("\tI-CACHE Reg %s parity error\t= %16lx\n\r", + (frame->ic_perr_stat & 0x800L) ? + "Data" : "Tag", + frame->ic_perr_stat); + printk("\tD-CACHE error Reg\t\t= %16lx\n\r", + frame->dc_perr_stat); + if (frame->dc_perr_stat & 0x2) { + switch (frame->dc_perr_stat & 0x03c) { + case 8: + printk("\t\tData error in bank 1\n\r"); + break; + case 4: + printk("\t\tData error in bank 0\n\r"); + break; + case 20: + printk("\t\tTag error in bank 1\n\r"); + break; + case 10: + printk("\t\tTag error in bank 0\n\r"); + break; + } + } + printk("\tEffective VA\t\t\t= %16lx\n\r", + frame->va); + printk("\tReason for D-stream\t\t= %16lx\n\r", + frame->mm_stat); + printk("\tEV5 SCache address\t\t= %16lx\n\r", + frame->sc_addr); + printk("\tEV5 SCache TAG/Data parity\t= %16lx\n\r", + frame->sc_stat); + printk("\tEV5 BC_TAG_ADDR\t\t\t= %16lx\n\r", + frame->bc_tag_addr); + printk("\tEV5 EI_ADDR: Phys addr of Xfer\t= %16lx\n\r", + frame->ei_addr); + printk("\tFill Syndrome\t\t\t= %16lx\n\r", + frame->fill_syndrome); + printk("\tEI_STAT reg\t\t\t= %16lx\n\r", + frame->ei_stat); + printk("\tLD_LOCK\t\t\t\t= %16lx\n\r", + frame->ld_lock); +} + +void mcpcia_machine_check(unsigned long type, unsigned long la_ptr, + struct pt_regs * regs) +{ +#if 0 + printk("mcpcia machine check ignored\n") ; +#else + struct el_common *mchk_header; + struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout; + unsigned int cpu = smp_processor_id(); + int h = 0; + + mchk_header = (struct el_common *)la_ptr; + mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr; + +#if 0 + DBG_MCK(("mcpcia_machine_check: type=0x%lx la_ptr=0x%lx\n", + type, la_ptr)); + DBG_MCK(("\t\t pc=0x%lx size=0x%x procoffset=0x%x sysoffset 0x%x\n", + regs->pc, mchk_header->size, mchk_header->proc_offset, + mchk_header->sys_offset)); +#endif + /* + * Check if machine check is due to a badaddr() and if so, + * ignore the machine check. + */ + mb(); + mb(); /* magic */ + if (MCPCIA_mcheck_expected[cpu]) { +#if 0 + DBG_MCK(("MCPCIA machine check expected\n")); +#endif + MCPCIA_mcheck_expected[cpu] = 0; + MCPCIA_mcheck_taken[cpu] = 1; + mb(); + mb(); /* magic */ + draina(); + mcpcia_pci_clr_err(h); + wrmces(0x7); + mb(); + } +#if 1 + else { + printk("MCPCIA machine check NOT expected on CPU %d\n", cpu); + DBG_MCK(("mcpcia_machine_check: type=0x%lx pc=0x%lx" + " code=0x%lx\n", + type, regs->pc, mchk_header->code)); + + MCPCIA_mcheck_expected[cpu] = 0; + MCPCIA_mcheck_taken[cpu] = 1; + mb(); + mb(); /* magic */ + draina(); + mcpcia_pci_clr_err(h); + wrmces(0x7); + mb(); +#ifdef DEBUG_MCHECK_DUMP + if (type == 0x620) + printk("MCPCIA machine check: system CORRECTABLE!\n"); + else if (type == 0x630) + printk("MCPCIA machine check: processor CORRECTABLE!\n"); + else + mcpcia_print_uncorrectable(mchk_logout); +#endif /* DEBUG_MCHECK_DUMP */ + } +#endif +#endif +} + +/*==========================================================================*/ + +#define PRIMARY(b) ((b)&0xff) +#define SECONDARY(b) (((b)>>8)&0xff) +#define SUBORDINATE(b) (((b)>>16)&0xff) + +static int +hose_scan_bridges(struct linux_hose_info *hose, unsigned char bus) +{ + unsigned int devfn, l, class; + unsigned char hdr_type = 0; + unsigned int found = 0; + + for (devfn = 0; devfn < 0xff; ++devfn) { + if (PCI_FUNC(devfn) == 0) { + hose_read_config_byte(hose, bus, devfn, + PCI_HEADER_TYPE, &hdr_type); + } else if (!(hdr_type & 0x80)) { + /* not a multi-function device */ + continue; + } + + /* Check if there is anything here. */ + hose_read_config_dword(hose, bus, devfn, PCI_VENDOR_ID, &l); + if (l == 0xffffffff || l == 0x00000000) { + hdr_type = 0; + continue; + } + + /* See if this is a bridge device. */ + hose_read_config_dword(hose, bus, devfn, + PCI_CLASS_REVISION, &class); + + if ((class >> 16) == PCI_CLASS_BRIDGE_PCI) { + unsigned int busses; + + found++; + + hose_read_config_dword(hose, bus, devfn, + PCI_PRIMARY_BUS, &busses); + +DBG_PCI(("hose_scan_bridges: hose %d bus %d slot %d busses 0x%x\n", + hose->pci_hose_index, bus, PCI_SLOT(devfn), busses)); + /* + * do something with first_busno and last_busno + */ + if (hose->pci_first_busno > PRIMARY(busses)) { + hose->pci_first_busno = PRIMARY(busses); +DBG_PCI(("hose_scan_bridges: hose %d bus %d slot %d change first to %d\n", + hose->pci_hose_index, bus, PCI_SLOT(devfn), PRIMARY(busses))); + } + if (hose->pci_last_busno < SUBORDINATE(busses)) { + hose->pci_last_busno = SUBORDINATE(busses); +DBG_PCI(("hose_scan_bridges: hose %d bus %d slot %d change last to %d\n", + hose->pci_hose_index, bus, PCI_SLOT(devfn), SUBORDINATE(busses))); + } + /* + * Now scan everything underneath the bridge. + */ + hose_scan_bridges(hose, SECONDARY(busses)); + } + } + return found; +} + +static void +hose_reconfigure_bridges(struct linux_hose_info *hose, unsigned char bus) +{ + unsigned int devfn, l, class; + unsigned char hdr_type = 0; + + for (devfn = 0; devfn < 0xff; ++devfn) { + if (PCI_FUNC(devfn) == 0) { + hose_read_config_byte(hose, bus, devfn, + PCI_HEADER_TYPE, &hdr_type); + } else if (!(hdr_type & 0x80)) { + /* not a multi-function device */ + continue; + } + + /* Check if there is anything here. */ + hose_read_config_dword(hose, bus, devfn, PCI_VENDOR_ID, &l); + if (l == 0xffffffff || l == 0x00000000) { + hdr_type = 0; + continue; + } + + /* See if this is a bridge device. */ + hose_read_config_dword(hose, bus, devfn, + PCI_CLASS_REVISION, &class); + + if ((class >> 16) == PCI_CLASS_BRIDGE_PCI) { + unsigned int busses; + + hose_read_config_dword(hose, bus, devfn, + PCI_PRIMARY_BUS, &busses); + + /* + * First reconfigure everything underneath the bridge. + */ + hose_reconfigure_bridges(hose, (busses >> 8) & 0xff); + + /* + * Unconfigure this bridges bus numbers, + * pci_scan_bus() will fix this up properly. + */ + busses &= 0xff000000; + hose_write_config_dword(hose, bus, devfn, + PCI_PRIMARY_BUS, busses); + } + } +} + +static void mcpcia_fixup_busno(struct linux_hose_info *hose, unsigned char bus) +{ + unsigned int nbus; + + /* + * First, scan for all bridge devices underneath this hose, + * to determine the first and last busnos. + */ + if (!hose_scan_bridges(hose, 0)) { + /* none found, exit */ + hose->pci_first_busno = bus; + hose->pci_last_busno = bus; + } else { + /* + * Reconfigure all bridge devices underneath this hose. + */ + hose_reconfigure_bridges(hose, hose->pci_first_busno); + } + + /* + * Now reconfigure the hose to it's new bus number and set up + * our bus2hose mapping for this hose. + */ + nbus = hose->pci_last_busno - hose->pci_first_busno; + + hose->pci_first_busno = bus; + +DBG_PCI(("mcpcia_fixup_busno: hose %d startbus %d nbus %d\n", + hose->pci_hose_index, bus, nbus)); + + do { + bus2hose[bus++] = hose; + } while (nbus-- > 0); +} + +static void mcpcia_probe(struct linux_hose_info *hose, + unsigned long *mem_start) +{ + static struct pci_bus *pchain = NULL; + struct pci_bus *pbus = &hose->pci_bus; + static unsigned char busno = 0; + + /* Hoses include child PCI bridges in bus-range property, + * but we don't scan each of those ourselves, Linux generic PCI + * probing code will find child bridges and link them into this + * hose's root PCI device hierarchy. + */ + + pbus->number = pbus->secondary = busno; + pbus->sysdata = hose; + + mcpcia_fixup_busno(hose, busno); + + pbus->subordinate = pci_scan_bus(pbus, mem_start); /* the original! */ + + /* + * Set the maximum subordinate bus of this hose. + */ + hose->pci_last_busno = pbus->subordinate; +#if 0 + hose_write_config_byte(hose, busno, 0, 0x41, hose->pci_last_busno); +#endif + busno = pbus->subordinate + 1; + + /* + * Fixup the chain of primary PCI busses. + */ + if (pchain) { + pchain->next = &hose->pci_bus; + pchain = pchain->next; + } else { + pchain = &pci_root; + memcpy(pchain, &hose->pci_bus, sizeof(pci_root)); + } +} + +void mcpcia_fixup(void) +{ + struct linux_hose_info *hose; + + /* turn on Config space access finally! */ + pci_probe_enabled = 1; + + /* for each hose, probe and setup the devices on the hose */ + for (hose = mcpcia_root; hose; hose = hose->next) { + mcpcia_probe(hose, &memory_start); + } +} +#endif /* CONFIG_ALPHA_MCPCIA */ diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index f8146c54f..401a7af46 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -66,10 +66,49 @@ asmlinkage int sys_sethae(unsigned long hae, unsigned long a1, unsigned long a2, unsigned long a3, unsigned long a4, unsigned long a5, struct pt_regs regs) { +#if !defined(CONFIG_ALPHA_TSUNAMI) (®s)->hae = hae; +#endif return 0; } +#ifdef __SMP__ +/* This is being executed in task 0 'user space'. */ +#define resched_needed() 1 +int cpu_idle(void *unused) +{ + extern volatile int smp_commenced; + + current->priority = -100; + while (1) { + /* + * tq_scheduler currently assumes we're running in a process + * context (ie that we hold the kernel lock..) + */ + if (tq_scheduler) { + lock_kernel(); + run_task_queue(&tq_scheduler); + unlock_kernel(); + } + /* endless idle loop with no priority at all */ + current->counter = -100; + if (!smp_commenced || resched_needed()) { + schedule(); + } + } +} + +asmlinkage int sys_idle(void) +{ + if(current->pid != 0) + return -EPERM; + + cpu_idle(NULL); + return 0; +} + +#else /* __SMP__ */ + asmlinkage int sys_idle(void) { int ret = -EPERM; @@ -88,6 +127,12 @@ out: unlock_kernel(); return ret; } +#endif /* __SMP__ */ + +#if defined(CONFIG_ALPHA_SRM_SETUP) +extern void reset_for_srm(void); +extern unsigned long srm_hae; +#endif static void finish_shutdown(void) { @@ -96,8 +141,8 @@ static void finish_shutdown(void) unsigned long flags; /* i'm not sure if i really need to disable interrupts here */ - save_flags(flags); - cli(); + save_and_cli(flags); + /* reset periodic interrupt frequency */ CMOS_WRITE(0x26, RTC_FREQ_SELECT); @@ -131,6 +176,10 @@ void machine_restart(char * __unused) /* flags |= 0x0000000000030000UL; *//* this is "warm bootstrap" */ cpup->flags = flags; mb(); +#if defined(CONFIG_ALPHA_SRM_SETUP) + reset_for_srm(); + set_hae(srm_hae); +#endif #endif /* SRM */ finish_shutdown(); @@ -150,6 +199,10 @@ void machine_halt(void) flags |= 0x0000000000040000UL; /* this is "remain halted" */ cpup->flags = flags; mb(); +#if defined(CONFIG_ALPHA_SRM_SETUP) + reset_for_srm(); + set_hae(srm_hae); +#endif finish_shutdown(); #endif /* SRM */ @@ -228,6 +281,7 @@ int alpha_clone(unsigned long clone_flags, unsigned long usp, } extern void ret_from_sys_call(void); +extern void ret_from_smpfork(void); /* * Copy an alpha thread.. * @@ -258,7 +312,11 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, stack = ((struct switch_stack *) regs) - 1; childstack = ((struct switch_stack *) childregs) - 1; *childstack = *stack; +#ifdef __SMP__ + childstack->r26 = (unsigned long) ret_from_smpfork; +#else childstack->r26 = (unsigned long) ret_from_sys_call; +#endif p->tss.usp = usp; p->tss.ksp = (unsigned long) childstack; p->tss.pal_flags = 1; /* set FEN, clear everything else */ diff --git a/arch/alpha/kernel/pyxis.c b/arch/alpha/kernel/pyxis.c index b4c5d188e..de3814b66 100644 --- a/arch/alpha/kernel/pyxis.c +++ b/arch/alpha/kernel/pyxis.c @@ -7,7 +7,6 @@ #include <linux/config.h> /* CONFIG_ALPHA_RUFFIAN. */ #include <linux/kernel.h> #include <linux/types.h> -#include <linux/bios32.h> #include <linux/pci.h> #include <linux/sched.h> @@ -17,13 +16,12 @@ #include <asm/ptrace.h> #include <asm/mmu_context.h> -/* NOTE: Herein are back-to-back mb insns. They are magic. - A plausible explanation is that the i/o controler does not properly +/* NOTE: Herein are back-to-back mb instructions. They are magic. + One plausible explanation is that the I/O controller does not properly handle the system transaction. Another involves timing. Ho hum. */ extern struct hwrpb_struct *hwrpb; extern asmlinkage void wrmces(unsigned long mces); -extern int alpha_sys_type; /* * BIOS32-style PCI interface: @@ -38,6 +36,7 @@ extern int alpha_sys_type; #define DEBUG_MCHECK #ifdef DEBUG_MCHECK # define DBG_MCK(args) printk args +#define DEBUG_MCHECK_DUMP #else # define DBG_MCK(args) #endif @@ -49,6 +48,11 @@ static volatile unsigned int PYXIS_mcheck_expected = 0; static volatile unsigned int PYXIS_mcheck_taken = 0; static unsigned int PYXIS_jd; +#ifdef CONFIG_ALPHA_SRM_SETUP +unsigned int PYXIS_DMA_WIN_BASE = PYXIS_DMA_WIN_BASE_DEFAULT; +unsigned int PYXIS_DMA_WIN_SIZE = PYXIS_DMA_WIN_SIZE_DEFAULT; +unsigned long pyxis_sm_base_r1, pyxis_sm_base_r2, pyxis_sm_base_r3; +#endif /* SRM_SETUP */ /* * Given a bus, device, and function number, compute resulting @@ -129,24 +133,23 @@ static int mk_conf_addr(unsigned char bus, unsigned char device_fn, static unsigned int conf_read(unsigned long addr, unsigned char type1) { unsigned long flags; - unsigned int stat0, value; + unsigned int stat0, value, temp; unsigned int pyxis_cfg = 0; /* to keep gcc quiet */ - save_flags(flags); /* avoid getting hit by machine check */ - cli(); + save_and_cli(flags); /* avoid getting hit by machine check */ DBG(("conf_read(addr=0x%lx, type1=%d)\n", addr, type1)); /* reset status register to avoid losing errors: */ stat0 = *(vuip)PYXIS_ERR; - *(vuip)PYXIS_ERR = stat0; - mb(); + *(vuip)PYXIS_ERR = stat0; mb(); + temp = *(vuip)PYXIS_ERR; /* re-read to force write */ DBG(("conf_read: PYXIS ERR was 0x%x\n", stat0)); /* if Type1 access, must set PYXIS CFG */ if (type1) { pyxis_cfg = *(vuip)PYXIS_CFG; - *(vuip)PYXIS_CFG = pyxis_cfg | 1; - mb(); + *(vuip)PYXIS_CFG = pyxis_cfg | 1; mb(); + temp = *(vuip)PYXIS_CFG; /* re-read to force write */ DBG(("conf_read: TYPE1 access\n")); } @@ -166,36 +169,11 @@ static unsigned int conf_read(unsigned long addr, unsigned char type1) } PYXIS_mcheck_expected = 0; mb(); - /* - * david.rusling@reo.mts.dec.com. This code is needed for the - * EB64+ as it does not generate a machine check (why I don't - * know). When we build kernels for one particular platform - * then we can make this conditional on the type. - */ -#if 0 - draina(); - - /* now look for any errors */ - stat0 = *(vuip)PYXIS_IOC_PYXIS_ERR; - DBG(("conf_read: PYXIS ERR after read 0x%x\n", stat0)); - if (stat0 & 0x8280U) { /* is any error bit set? */ - /* if not NDEV, print status */ - if (!(stat0 & 0x0080)) { - printk("PYXIS.c:conf_read: got stat0=%x\n", stat0); - } - - /* reset error status: */ - *(vulp)PYXIS_IOC_PYXIS_ERR = stat0; - mb(); - wrmces(0x7); /* reset machine check */ - value = 0xffffffff; - } -#endif /* if Type1 access, must reset IOC CFG so normal IO space ops work */ if (type1) { - *(vuip)PYXIS_CFG = pyxis_cfg & ~1; - mb(); + *(vuip)PYXIS_CFG = pyxis_cfg & ~1; mb(); + temp = *(vuip)PYXIS_CFG; /* re-read to force write */ } DBG(("conf_read(): finished\n")); @@ -209,22 +187,21 @@ static void conf_write(unsigned long addr, unsigned int value, unsigned char type1) { unsigned long flags; - unsigned int stat0; + unsigned int stat0, temp; unsigned int pyxis_cfg = 0; /* to keep gcc quiet */ - save_flags(flags); /* avoid getting hit by machine check */ - cli(); + save_and_cli(flags); /* avoid getting hit by machine check */ /* reset status register to avoid losing errors: */ stat0 = *(vuip)PYXIS_ERR; - *(vuip)PYXIS_ERR = stat0; - mb(); + *(vuip)PYXIS_ERR = stat0; mb(); + temp = *(vuip)PYXIS_ERR; /* re-read to force write */ DBG(("conf_write: PYXIS ERR was 0x%x\n", stat0)); /* if Type1 access, must set PYXIS CFG */ if (type1) { pyxis_cfg = *(vuip)PYXIS_CFG; - *(vuip)PYXIS_CFG = pyxis_cfg | 1; - mb(); + *(vuip)PYXIS_CFG = pyxis_cfg | 1; mb(); + temp = *(vuip)PYXIS_CFG; /* re-read to force write */ DBG(("conf_read: TYPE1 access\n")); } @@ -235,13 +212,14 @@ static void conf_write(unsigned long addr, unsigned int value, *(vuip)addr = value; mb(); mb(); /* magic */ + temp = *(vuip)PYXIS_ERR; /* do a PYXIS read to force the write */ PYXIS_mcheck_expected = 0; mb(); /* if Type1 access, must reset IOC CFG so normal IO space ops work */ if (type1) { - *(vuip)PYXIS_CFG = pyxis_cfg & ~1; - mb(); + *(vuip)PYXIS_CFG = pyxis_cfg & ~1; mb(); + temp = *(vuip)PYXIS_CFG; /* re-read to force write */ } DBG(("conf_write(): finished\n")); @@ -367,19 +345,105 @@ unsigned long pyxis_init(unsigned long mem_start, unsigned long mem_end) { unsigned int pyxis_err ; +#if 0 +printk("pyxis_init: PYXIS_ERR_MASK 0x%x\n", *(vuip)PYXIS_ERR_MASK); +printk("pyxis_init: PYXIS_ERR 0x%x\n", *(vuip)PYXIS_ERR); + +printk("pyxis_init: PYXIS_INT_REQ 0x%lx\n", *(vulp)PYXIS_INT_REQ); +printk("pyxis_init: PYXIS_INT_MASK 0x%lx\n", *(vulp)PYXIS_INT_MASK); +printk("pyxis_init: PYXIS_INT_ROUTE 0x%lx\n", *(vulp)PYXIS_INT_ROUTE); +printk("pyxis_init: PYXIS_INT_HILO 0x%lx\n", *(vulp)PYXIS_INT_HILO); +printk("pyxis_init: PYXIS_INT_CNFG 0x%x\n", *(vuip)PYXIS_INT_CNFG); +printk("pyxis_init: PYXIS_RT_COUNT 0x%lx\n", *(vulp)PYXIS_RT_COUNT); +#endif + /* - * Set up error reporting. + * Set up error reporting. Make sure CPU_PE is OFF in the mask. */ + pyxis_err = *(vuip)PYXIS_ERR_MASK; + pyxis_err &= ~4; + *(vuip)PYXIS_ERR_MASK = pyxis_err; mb(); + pyxis_err = *(vuip)PYXIS_ERR_MASK; /* re-read to force write */ + pyxis_err = *(vuip)PYXIS_ERR ; pyxis_err |= 0x180; /* master/target abort */ - *(vuip)PYXIS_ERR = pyxis_err ; - mb() ; - pyxis_err = *(vuip)PYXIS_ERR ; - -#ifdef CONFIG_ALPHA_RUFFIAN - printk("pyxis_init: Skipping window register rewrites --" + *(vuip)PYXIS_ERR = pyxis_err; mb(); + pyxis_err = *(vuip)PYXIS_ERR; /* re-read to force write */ + +#ifdef CONFIG_ALPHA_SRM_SETUP + /* check window 0 for enabled and mapped to 0 */ + if (((*(vuip)PYXIS_W0_BASE & 3) == 1) && + (*(vuip)PYXIS_T0_BASE == 0) && + ((*(vuip)PYXIS_W0_MASK & 0xfff00000U) > 0x0ff00000U)) + { + PYXIS_DMA_WIN_BASE = *(vuip)PYXIS_W0_BASE & 0xfff00000U; + PYXIS_DMA_WIN_SIZE = *(vuip)PYXIS_W0_MASK & 0xfff00000U; + PYXIS_DMA_WIN_SIZE += 0x00100000U; +#if 1 + printk("pyxis_init: using Window 0 settings\n"); + printk("pyxis_init: BASE 0x%x MASK 0x%x TRANS 0x%x\n", + *(vuip)PYXIS_W0_BASE, + *(vuip)PYXIS_W0_MASK, + *(vuip)PYXIS_T0_BASE); +#endif + } + else /* check window 1 for enabled and mapped to 0 */ + if (((*(vuip)PYXIS_W1_BASE & 3) == 1) && + (*(vuip)PYXIS_T1_BASE == 0) && + ((*(vuip)PYXIS_W1_MASK & 0xfff00000U) > 0x0ff00000U)) +{ + PYXIS_DMA_WIN_BASE = *(vuip)PYXIS_W1_BASE & 0xfff00000U; + PYXIS_DMA_WIN_SIZE = *(vuip)PYXIS_W1_MASK & 0xfff00000U; + PYXIS_DMA_WIN_SIZE += 0x00100000U; +#if 1 + printk("pyxis_init: using Window 1 settings\n"); + printk("pyxis_init: BASE 0x%x MASK 0x%x TRANS 0x%x\n", + *(vuip)PYXIS_W1_BASE, + *(vuip)PYXIS_W1_MASK, + *(vuip)PYXIS_T1_BASE); +#endif + } + else /* check window 2 for enabled and mapped to 0 */ + if (((*(vuip)PYXIS_W2_BASE & 3) == 1) && + (*(vuip)PYXIS_T2_BASE == 0) && + ((*(vuip)PYXIS_W2_MASK & 0xfff00000U) > 0x0ff00000U)) + { + PYXIS_DMA_WIN_BASE = *(vuip)PYXIS_W2_BASE & 0xfff00000U; + PYXIS_DMA_WIN_SIZE = *(vuip)PYXIS_W2_MASK & 0xfff00000U; + PYXIS_DMA_WIN_SIZE += 0x00100000U; +#if 1 + printk("pyxis_init: using Window 2 settings\n"); + printk("pyxis_init: BASE 0x%x MASK 0x%x TRANS 0x%x\n", + *(vuip)PYXIS_W2_BASE, + *(vuip)PYXIS_W2_MASK, + *(vuip)PYXIS_T2_BASE); +#endif + } + else /* check window 3 for enabled and mapped to 0 */ + if (((*(vuip)PYXIS_W3_BASE & 3) == 1) && + (*(vuip)PYXIS_T3_BASE == 0) && + ((*(vuip)PYXIS_W3_MASK & 0xfff00000U) > 0x0ff00000U)) + { + PYXIS_DMA_WIN_BASE = *(vuip)PYXIS_W3_BASE & 0xfff00000U; + PYXIS_DMA_WIN_SIZE = *(vuip)PYXIS_W3_MASK & 0xfff00000U; + PYXIS_DMA_WIN_SIZE += 0x00100000U; +#if 1 + printk("pyxis_init: using Window 3 settings\n"); + printk("pyxis_init: BASE 0x%x MASK 0x%x TRANS 0x%x\n", + *(vuip)PYXIS_W3_BASE, + *(vuip)PYXIS_W3_MASK, + *(vuip)PYXIS_T3_BASE); +#endif + } + else /* we must use our defaults which were pre-initialized... */ +#endif /* SRM_SETUP */ + { +#if defined(CONFIG_ALPHA_RUFFIAN) +#if 1 + printk("pyxis_init: skipping window register rewrites... " " trust DeskStation firmware!\n"); -#else +#endif +#else /* RUFFIAN */ /* * Set up the PCI->physical memory translation windows. * For now, windows 1,2 and 3 are disabled. In the future, we may @@ -395,7 +459,8 @@ unsigned long pyxis_init(unsigned long mem_start, unsigned long mem_end) *(vuip)PYXIS_W2_BASE = 0x0 ; *(vuip)PYXIS_W3_BASE = 0x0 ; mb(); -#endif +#endif /* RUFFIAN */ + } /* * check ASN in HWRPB for validity, report if bad @@ -407,18 +472,21 @@ unsigned long pyxis_init(unsigned long mem_start, unsigned long mem_end) } /* - * Finally, clear the PYXIS_CFG register, which gets used + * Next, clear the PYXIS_CFG register, which gets used * for PCI Config Space accesses. That is the way * we want to use it, and we do not want to depend on * what ARC or SRM might have left behind... */ { - unsigned int pyxis_cfg; + unsigned int pyxis_cfg, temp; pyxis_cfg = *(vuip)PYXIS_CFG; mb(); -#if 0 + if (pyxis_cfg != 0) { +#if 1 printk("PYXIS_init: CFG was 0x%x\n", pyxis_cfg); #endif *(vuip)PYXIS_CFG = 0; mb(); + temp = *(vuip)PYXIS_CFG; /* re-read to force write */ + } } { @@ -428,10 +496,48 @@ unsigned long pyxis_init(unsigned long mem_start, unsigned long mem_end) printk("PYXIS_init: HAE_MEM was 0x%x\n", pyxis_hae_mem); printk("PYXIS_init: HAE_IO was 0x%x\n", pyxis_hae_io); #endif - *(vuip)PYXIS_HAE_MEM = 0; mb(); - pyxis_hae_mem = *(vuip)PYXIS_HAE_MEM; +#ifdef CONFIG_ALPHA_SRM_SETUP + /* + * sigh... For the SRM setup, unless we know apriori what the HAE + * contents will be, we need to setup the arbitrary region bases + * so we can test against the range of addresses and tailor the + * region chosen for the SPARSE memory access. + * + * see include/asm-alpha/pyxis.h for the SPARSE mem read/write + */ + pyxis_sm_base_r1 = (pyxis_hae_mem ) & 0xe0000000UL;/* region 1 */ + pyxis_sm_base_r2 = (pyxis_hae_mem << 16) & 0xf8000000UL;/* region 2 */ + pyxis_sm_base_r3 = (pyxis_hae_mem << 24) & 0xfc000000UL;/* region 3 */ + + /* + Set the HAE cache, so that setup_arch() code + will use the SRM setting always. Our readb/writeb + code in pyxis.h expects never to have to change + the contents of the HAE. + */ + hae.cache = pyxis_hae_mem; +#else /* SRM_SETUP */ + *(vuip)PYXIS_HAE_MEM = 0U; mb(); + pyxis_hae_mem = *(vuip)PYXIS_HAE_MEM; /* re-read to force write */ *(vuip)PYXIS_HAE_IO = 0; mb(); - pyxis_hae_io = *(vuip)PYXIS_HAE_IO; + pyxis_hae_io = *(vuip)PYXIS_HAE_IO; /* re-read to force write */ +#endif /* SRM_SETUP */ + } + + /* + * Finally, check that the PYXIS_CTRL1 has IOA_BEN set for + * enabling byte/word PCI bus space(s) access. + */ + { + unsigned int ctrl1; + ctrl1 = *(vuip) PYXIS_CTRL1; + if (!(ctrl1 & 1)) { +#if 1 + printk("PYXIS_init: enabling byte/word PCI space\n"); +#endif + *(vuip) PYXIS_CTRL1 = ctrl1 | 1; mb(); + ctrl1 = *(vuip)PYXIS_CTRL1; /* re-read to force write */ + } } return mem_start; @@ -441,9 +547,8 @@ int pyxis_pci_clr_err(void) { PYXIS_jd = *(vuip)PYXIS_ERR; DBG(("PYXIS_pci_clr_err: PYXIS ERR after read 0x%x\n", PYXIS_jd)); - *(vuip)PYXIS_ERR = 0x0180; - mb(); - PYXIS_jd = *(vuip)PYXIS_ERR; + *(vuip)PYXIS_ERR = 0x0180; mb(); + PYXIS_jd = *(vuip)PYXIS_ERR; /* re-read to force write */ return 0; } @@ -486,7 +591,7 @@ void pyxis_machine_check(unsigned long vector, unsigned long la_ptr, */ mb(); mb(); /* magic */ - if (PYXIS_mcheck_expected/* && (mchk_sysdata->epic_dcsr && 0x0c00UL)*/) { + if (PYXIS_mcheck_expected) { DBG(("PYXIS machine check expected\n")); PYXIS_mcheck_expected = 0; PYXIS_mcheck_taken = 1; @@ -502,7 +607,8 @@ void pyxis_machine_check(unsigned long vector, unsigned long la_ptr, printk("PYXIS machine check NOT expected\n") ; DBG_MCK(("pyxis_machine_check: vector=0x%lx la_ptr=0x%lx\n", vector, la_ptr)); - DBG_MCK(("\t\t pc=0x%lx size=0x%x procoffset=0x%x sysoffset 0x%x\n", + DBG_MCK(("\t\t pc=0x%lx size=0x%x procoffset=0x%x" + " sysoffset 0x%x\n", regs->pc, mchk_header->size, mchk_header->proc_offset, mchk_header->sys_offset)); PYXIS_mcheck_expected = 0; diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index 9ce9c1f17..bcab8667f 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c @@ -22,6 +22,7 @@ #include <linux/delay.h> #include <linux/config.h> /* CONFIG_ALPHA_LCA etc */ #include <linux/ioport.h> +#include <linux/mc146818rtc.h> #ifdef CONFIG_RTC #include <linux/timex.h> @@ -34,11 +35,24 @@ #include <asm/dma.h> #include <asm/io.h> +extern void setup_smp(void); +extern char *smp_info(void); + +#if 1 +# define DBG_SRM(args) printk args +#else +# define DBG_SRM(args) +#endif + struct hae hae = { 0, (unsigned long*) HAE_ADDRESS }; +#ifdef CONFIG_ALPHA_SRM_SETUP +unsigned long srm_hae; +#endif + struct hwrpb_struct *hwrpb; unsigned char aux_device_present = 0xaa; @@ -106,12 +120,13 @@ static void init_pit (void) outb(LATCH & 0xff, 0x40); /* LSB */ outb(LATCH >> 8, 0x40); /* MSB */ request_region(0x40, 0x20, "timer"); /* reserve pit */ -#else -#ifndef CONFIG_ALPHA_RUFFIAN +#else /* RTC */ +#if !defined(CONFIG_ALPHA_RUFFIAN) + /* Ruffian depends on the system timer established in MILO!! */ outb(0x36, 0x43); /* counter 0: system timer */ outb(0x00, 0x40); outb(0x00, 0x40); -#endif +#endif /* RUFFIAN */ request_region(0x70, 0x10, "timer"); /* reserve rtc */ #endif /* RTC */ @@ -148,9 +163,21 @@ void setup_arch(char **cmdline_p, init_pit(); + if ((CMOS_READ(RTC_FREQ_SELECT) & 0x3f) != 0x26) { + printk("setup_arch: setting RTC_FREQ to 1024/sec\n"); + CMOS_WRITE(0x26, RTC_FREQ_SELECT); + } + hwrpb = (struct hwrpb_struct*)(IDENT_ADDR + INIT_HWRPB->phys_addr); +#if !defined(CONFIG_ALPHA_TSUNAMI) +#ifdef CONFIG_ALPHA_SRM_SETUP + srm_hae = *hae.reg; /* save SRM setting for restoration */ + DBG_SRM(("setup_arch: old HAE base: 0x%016lx\n", srm_hae)); +#endif /* SRM_SETUP */ set_hae(hae.cache); /* sync HAE register w/hae_cache */ +#endif /* !TSUNAMI */ + wrmces(0x7); /* reset enable correctable error reports */ ROOT_DEV = to_kdev_t(0x0802); /* sda2 */ @@ -185,12 +212,54 @@ void setup_arch(char **cmdline_p, *memory_start_p = pyxis_init(*memory_start_p, *memory_end_p); #elif defined(CONFIG_ALPHA_T2) *memory_start_p = t2_init(*memory_start_p, *memory_end_p); +#elif defined(CONFIG_ALPHA_TSUNAMI) + *memory_start_p = tsunami_init(*memory_start_p, *memory_end_p); +#elif defined(CONFIG_ALPHA_MCPCIA) + *memory_start_p = mcpcia_init(*memory_start_p, *memory_end_p); +#endif + +#ifdef __SMP__ + setup_smp(); #endif } #define N(a) (sizeof(a)/sizeof(a[0])) +/* A change was made to the HWRPB via an ECO and the following code tracks + * a part of the ECO. The HWRPB version must be 5 or higher or the ECO + * was not implemented in the console firmware. If its at rev 5 or greater + * we can get the platform ascii string name from the HWRPB. Thats what this + * function does. It checks the rev level and if the string is in the HWRPB + * it returns the addtess of the string ... a pointer to the platform name. + * + * Returns: + * - Pointer to a ascii string if its in the HWRPB + * - Pointer to a blank string if the data is not in the HWRPB. + */ +static char * +platform_string(void) +{ + struct dsr_struct *dsr; + static char unk_system_string[] = "N/A"; + + /* Go to the console for the string pointer. + * If the rpb_vers is not 5 or greater the rpb + * is old and does not have this data in it. + */ + if (hwrpb->revision < 5) + return (unk_system_string); + else { + /* The Dynamic System Recognition struct + * has the system platform name starting + * after the character count of the string. + */ + dsr = ((struct dsr_struct *) + ((char *)hwrpb + hwrpb->dsr_offset)); + return ((char *)dsr + (dsr->sysname_off + + sizeof(long))); + } +} static void get_sysnames(long type, long variation, @@ -222,6 +291,10 @@ get_sysnames(long type, long variation, static char * eb66_names[] = {"EB66", "EB66+"}; static int eb66_indices[] = {0,0,1}; + static char * rawhide_names[] = {"Dodge", "Wrangler", "Durango", + "Tincup", "DaVinci"}; + static int rawhide_indices[] = {0,0,0,1,1,2,2,3,3,4,4}; + long member; /* Restore real CABRIO and EB66+ family names, ie EB64+ and EB66 */ @@ -249,7 +322,9 @@ get_sysnames(long type, long variation, member = (variation >> 10) & 0x3f; /* member ID is a bit-field */ - switch (type) { + switch (type) { /* select by family */ + default: /* default to variation "0" for now */ + break; case ST_DEC_EB164: if (member < N(eb164_indices)) *variation_name = eb164_names[eb164_indices[member]]; @@ -266,7 +341,11 @@ get_sysnames(long type, long variation, if (member < N(eb66_indices)) *variation_name = eb66_names[eb66_indices[member]]; break; - } + case ST_DEC_RAWHIDE: + if (member < N(rawhide_indices)) + *variation_name = rawhide_names[rawhide_indices[member]]; + break; + } /* end family switch */ } /* @@ -315,7 +394,12 @@ int get_cpuinfo(char *buffer) "max. addr. space #\t: %ld\n" "BogoMIPS\t\t: %lu.%02lu\n" "kernel unaligned acc\t: %ld (pc=%lx,va=%lx)\n" - "user unaligned acc\t: %ld (pc=%lx,va=%lx)\n", + "user unaligned acc\t: %ld (pc=%lx,va=%lx)\n" + "platform string\t\t: %s\n" +#ifdef __SMP__ + "%s" +#endif + , cpu_name, cpu->variation, cpu->revision, (char*)cpu->serial_no, @@ -329,5 +413,10 @@ int get_cpuinfo(char *buffer) hwrpb->max_asn, loops_per_sec / 500000, (loops_per_sec / 5000) % 100, unaligned[0].count, unaligned[0].pc, unaligned[0].va, - unaligned[1].count, unaligned[1].pc, unaligned[1].va); + unaligned[1].count, unaligned[1].pc, unaligned[1].va, + platform_string() +#ifdef __SMP__ + , smp_info() +#endif + ); } diff --git a/arch/alpha/kernel/smc37c669.c b/arch/alpha/kernel/smc37c669.c index 6724372fe..bcc4ed212 100644 --- a/arch/alpha/kernel/smc37c669.c +++ b/arch/alpha/kernel/smc37c669.c @@ -3,8 +3,6 @@ */ #include <linux/kernel.h> -#include <linux/bios32.h> -#include <linux/pci.h> #include <linux/malloc.h> #include <linux/mm.h> #include <linux/init.h> @@ -996,7 +994,7 @@ static SMC37c669_CONFIG_REGS *SMC37c669 __initdata = NULL; ** and standard ISA IRQs. ** */ -static SMC37c669_IRQ_TRANSLATION_ENTRY *SMC37c669_irq_table __initdata; +static SMC37c669_IRQ_TRANSLATION_ENTRY *SMC37c669_irq_table __initdata = 0; /* ** The following definition is for the default IRQ @@ -1022,7 +1020,7 @@ __initdata = ** ISA DMA channels. ** */ -static SMC37c669_DRQ_TRANSLATION_ENTRY *SMC37c669_drq_table __initdata; +static SMC37c669_DRQ_TRANSLATION_ENTRY *SMC37c669_drq_table __initdata = 0; /* ** The following definition is the default DRQ diff --git a/arch/alpha/kernel/smc37c93x.c b/arch/alpha/kernel/smc37c93x.c index a75998d7e..b0284b032 100644 --- a/arch/alpha/kernel/smc37c93x.c +++ b/arch/alpha/kernel/smc37c93x.c @@ -5,8 +5,6 @@ #include <linux/config.h> #include <linux/kernel.h> -#include <linux/bios32.h> -#include <linux/pci.h> #include <linux/malloc.h> #include <linux/mm.h> #include <linux/init.h> diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c new file mode 100644 index 000000000..fed91a1c1 --- /dev/null +++ b/arch/alpha/kernel/smp.c @@ -0,0 +1,1097 @@ +#include <linux/config.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/kernel_stat.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/tasks.h> +#include <linux/smp.h> +#include <linux/smp_lock.h> +#include <linux/interrupt.h> +#include <linux/init.h> + +#include <asm/hwrpb.h> +#include <asm/ptrace.h> +#include <asm/atomic.h> + +#include <asm/delay.h> +#include <asm/irq.h> +#include <asm/bitops.h> +#include <asm/pgtable.h> +#include <asm/spinlock.h> +#include <asm/hardirq.h> +#include <asm/softirq.h> + +#define __KERNEL_SYSCALLS__ +#include <asm/unistd.h> + +struct ipi_msg_flush_tb_struct ipi_msg_flush_tb; + +struct cpuinfo_alpha cpu_data[NR_CPUS]; + +/* Processor holding kernel spinlock */ +klock_info_t klock_info = { KLOCK_CLEAR, 0 }; + +spinlock_t ticker_lock = SPIN_LOCK_UNLOCKED; + +unsigned int boot_cpu_id = 0; +static int smp_activated = 0; + +int smp_found_config = 0; /* Have we found an SMP box */ +static int max_cpus = -1; + +unsigned int cpu_present_map = 0; + +int smp_num_cpus = 1; +int smp_num_probed = 0; /* Internal processor count */ + +int smp_threads_ready = 0; +volatile unsigned long cpu_callin_map[NR_CPUS] = {0,}; +volatile unsigned long smp_spinning[NR_CPUS] = { 0, }; + +unsigned int prof_multiplier[NR_CPUS]; +unsigned int prof_counter[NR_CPUS]; + +volatile int ipi_bits[NR_CPUS]; + +unsigned long boot_cpu_palrev; + +volatile int smp_commenced = 0; +volatile int smp_processors_ready = 0; + +volatile int cpu_number_map[NR_CPUS]; +volatile int cpu_logical_map[NR_CPUS]; + +extern int cpu_idle(void *unused); +extern void calibrate_delay(void); +extern struct hwrpb_struct *hwrpb; +extern struct thread_struct * original_pcb_ptr; +extern void __start_cpu(unsigned long); + +static void smp_setup_percpu_timer(void); +static void secondary_cpu_start(int, struct task_struct *); +static void send_cpu_msg(char *, int); + +/* process bootcommand SMP options, like "nosmp" and "maxcpus=" */ +__initfunc(void smp_setup(char *str, int *ints)) +{ + if (ints && ints[0] > 0) + max_cpus = ints[1]; + else + max_cpus = 0; +} + +void smp_store_cpu_info(int id) +{ + /* This is it on Alpha, so far. */ + cpu_data[id].loops_per_sec = loops_per_sec; +} + +void smp_commence(void) +{ + /* Lets the callin's below out of their loop. */ + mb(); + smp_commenced = 1; +} + +void smp_callin(void) +{ + int cpuid = hard_smp_processor_id(); + +#if 0 + printk("CALLIN %d state 0x%lx\n", cpuid, current->state); +#endif +#ifdef HUH + local_flush_cache_all(); + local_flush_tlb_all(); +#endif +#if 0 + set_irq_udt(mid_xlate[boot_cpu_id]); +#endif + + /* Get our local ticker going. */ + smp_setup_percpu_timer(); + +#if 0 + calibrate_delay(); +#endif + smp_store_cpu_info(cpuid); +#ifdef HUH + local_flush_cache_all(); + local_flush_tlb_all(); +#endif + + /* Allow master to continue. */ + set_bit(cpuid, (unsigned long *)&cpu_callin_map[cpuid]); +#ifdef HUH + local_flush_cache_all(); + local_flush_tlb_all(); +#endif + +#ifdef NOT_YET + while(!task[cpuid] || current_set[cpuid] != task[cpuid]) + barrier(); +#endif /* NOT_YET */ + +#if 0 + /* Fix idle thread fields. */ + __asm__ __volatile__("ld [%0], %%g6\n\t" + : : "r" (¤t_set[cpuid]) + : "memory" /* paranoid */); + current->mm->mmap->vm_page_prot = PAGE_SHARED; + current->mm->mmap->vm_start = PAGE_OFFSET; + current->mm->mmap->vm_end = init_task.mm->mmap->vm_end; +#endif + +#ifdef HUH + local_flush_cache_all(); + local_flush_tlb_all(); +#endif +#if 0 + __sti(); +#endif +} + +asmlinkage int start_secondary(void *unused) +{ + extern asmlinkage void entInt(void); + extern void paging_init_secondary(void); + + wrmces(7); + paging_init_secondary(); + trap_init(); + wrent(entInt, 0); + + smp_callin(); + while (!smp_commenced) + barrier(); +#if 1 +printk("start_secondary: commencing CPU %d current %p\n", + hard_smp_processor_id(), current); +#endif + return cpu_idle(NULL); +} + +/* + * Cycle through the processors sending START msgs to boot each. + */ +void smp_boot_cpus(void) +{ + int cpucount = 0; + int i, first, prev; + + printk("smp_boot_cpus: Entering SMP Mode...\n"); + +#if 0 + __sti(); +#endif + + for(i=0; i < NR_CPUS; i++) { + cpu_number_map[i] = -1; + cpu_logical_map[i] = -1; + prof_counter[i] = 1; + prof_multiplier[i] = 1; + ipi_bits[i] = 0; + } + + cpu_number_map[boot_cpu_id] = 0; + cpu_logical_map[0] = boot_cpu_id; + current->processor = boot_cpu_id; /* ??? */ + klock_info.akp = boot_cpu_id; + + smp_store_cpu_info(boot_cpu_id); +#ifdef NOT_YET + printk("CPU%d: ", boot_cpu_id); + print_cpu_info(&cpu_data[boot_cpu_id]); + set_irq_udt(mid_xlate[boot_cpu_id]); +#endif /* NOT_YET */ + smp_setup_percpu_timer(); +#ifdef HUH + local_flush_cache_all(); +#endif + if (smp_num_probed == 1) + return; /* Not an MP box. */ + +#if NOT_YET + /* + * If SMP should be disabled, then really disable it! + */ + if (!max_cpus) + { + smp_found_config = 0; + printk(KERN_INFO "SMP mode deactivated.\n"); + } +#endif /* NOT_YET */ + + for (i = 0; i < NR_CPUS; i++) { + + if (i == boot_cpu_id) + continue; + + if (cpu_present_map & (1 << i)) { + struct task_struct *idle; + int timeout; + + /* Cook up an idler for this guy. */ + kernel_thread(start_secondary, NULL, CLONE_PID); + idle = task[++cpucount]; + if (!idle) + panic("No idle process for CPU %d", i); + idle->processor = i; + +#if 0 +printk("smp_boot_cpus: CPU %d state 0x%lx flags 0x%lx\n", + i, idle->state, idle->flags); +#endif + + /* whirrr, whirrr, whirrrrrrrrr... */ +#ifdef HUH + local_flush_cache_all(); +#endif + secondary_cpu_start(i, idle); + + /* wheee... it's going... wait for 5 secs...*/ + for (timeout = 0; timeout < 50000; timeout++) { + if (cpu_callin_map[i]) + break; + udelay(100); + } + if (cpu_callin_map[i]) { + /* Another "Red Snapper". */ + cpu_number_map[i] = cpucount; + cpu_logical_map[cpucount] = i; + } else { + cpucount--; + printk("smp_boot_cpus: Processor %d" + " is stuck 0x%lx.\n", i, idle->flags); + } + } + if (!(cpu_callin_map[i])) { + cpu_present_map &= ~(1 << i); + cpu_number_map[i] = -1; + } + } +#ifdef HUH + local_flush_cache_all(); +#endif + if (cpucount == 0) { + printk("smp_boot_cpus: ERROR - only one Processor found.\n"); + cpu_present_map = (1 << smp_processor_id()); + } else { + unsigned long bogosum = 0; + for (i = 0; i < NR_CPUS; i++) { + if (cpu_present_map & (1 << i)) + bogosum += cpu_data[i].loops_per_sec; + } + printk("smp_boot_cpus: Total of %d Processors activated" + " (%lu.%02lu BogoMIPS).\n", + cpucount + 1, + (bogosum + 2500)/500000, + ((bogosum + 2500)/5000)%100); + smp_activated = 1; + smp_num_cpus = cpucount + 1; + } + + /* Setup CPU list for IRQ distribution scheme. */ + first = prev = -1; + for (i = 0; i < NR_CPUS; i++) { + if (cpu_present_map & (1 << i)) { + if (first == -1) + first = i; + if (prev != -1) + cpu_data[i].next = i; + prev = i; + } + } + cpu_data[prev].next = first; + + /* Ok, they are spinning and ready to go. */ + smp_processors_ready = 1; +} + +__initfunc(void ioapic_pirq_setup(char *str, int *ints)) +{ + /* this is prolly INTEL-specific */ +} + +static void smp_setup_percpu_timer(void) +{ + int cpu = smp_processor_id(); + + prof_counter[cpu] = prof_multiplier[cpu] = 1; +#ifdef NOT_YET + load_profile_irq(mid_xlate[cpu], lvl14_resolution); + if (cpu == boot_cpu_id) + enable_pil_irq(14); +#endif +} + +extern void update_one_process(struct task_struct *p, unsigned long ticks, + unsigned long user, unsigned long system, + int cpu); + +void smp_percpu_timer_interrupt(struct pt_regs *regs) +{ + int cpu = smp_processor_id(); + +#ifdef NOT_YET + clear_profile_irq(mid_xlate[cpu]); +#ifdef CONFIG_PROFILE + if(!user_mode(regs)) + sparc_do_profile(regs->pc); +#endif +#endif + + if (!--prof_counter[cpu]) { + int user = user_mode(regs); + if (current->pid) { + update_one_process(current, 1, user, !user, cpu); + + if (--current->counter < 0) { + current->counter = 0; + need_resched = 1; + } + + spin_lock(&ticker_lock); + if (user) { + if (current->priority < DEF_PRIORITY) { + kstat.cpu_nice++; + kstat.per_cpu_nice[cpu]++; + } else { + kstat.cpu_user++; + kstat.per_cpu_user[cpu]++; + } + } else { + kstat.cpu_system++; + kstat.per_cpu_system[cpu]++; + } + spin_unlock(&ticker_lock); + } + prof_counter[cpu] = prof_multiplier[cpu]; + } +} + +int setup_profiling_timer(unsigned int multiplier) +{ +#ifdef NOT_YET + int i; + unsigned long flags; + + /* Prevent level14 ticker IRQ flooding. */ + if((!multiplier) || (lvl14_resolution / multiplier) < 500) + return -EINVAL; + + save_and_cli(flags); + for(i = 0; i < NR_CPUS; i++) { + if(cpu_present_map & (1 << i)) { + load_profile_irq(mid_xlate[i], lvl14_resolution / multip +lier); + prof_multiplier[i] = multiplier; + } + } + restore_flags(flags); + + return 0; + +#endif + return -EINVAL; +} + +/* Only broken Intel needs this, thus it should not even be referenced + * globally... + */ +__initfunc(void initialize_secondary(void)) +{ + printk("initialize_secondary: entry\n"); +} + +static void +secondary_cpu_start(int cpuid, struct task_struct *idle) +{ + struct percpu_struct *cpu; + int timeout; + + cpu = (struct percpu_struct *) + ((char*)hwrpb + + hwrpb->processor_offset + + cpuid * hwrpb->processor_size); + + /* set context to idle thread this CPU will use when running */ + /* assumption is that the idle thread is all set to go... ??? */ + memcpy(&cpu->hwpcb[0], &idle->tss, sizeof(struct pcb_struct)); + cpu->hwpcb[4] = cpu->hwpcb[0]; /* UNIQUE set to KSP ??? */ +#if 0 +printk("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx\n", + cpu->hwpcb[0], cpu->hwpcb[2], hwrpb->vptb); +printk("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n", + cpuid, idle->state, idle->tss.pal_flags); +#endif + + /* setup HWRPB fields that SRM uses to activate secondary CPU */ + hwrpb->CPU_restart = __start_cpu; + hwrpb->CPU_restart_data = (unsigned long) idle; + + /* recalculate and update the HWRPB checksum */ + { + unsigned long sum, *lp1, *lp2; + sum = 0; + lp1 = (unsigned long *)hwrpb; + lp2 = &hwrpb->chksum; + while (lp1 < lp2) + sum += *lp1++; + *lp2 = sum; + } + + /* + * Send a "start" command to the specified processor. + */ + + /* SRM III 3.4.1.3 */ + cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */ + cpu->flags &= ~1;/* turn off Bootstrap In Progress */ + mb(); + + send_cpu_msg("START\r\n", cpuid); + + /* now, we wait... */ + for (timeout = 10000; !(cpu->flags & 1); timeout--) { + if (timeout <= 0) { + printk("Processor %d failed to start\n", cpuid); + /* needed for pset_info to work */ +#if 0 + ipc_processor_enable(cpu_to_processor(cpunum)); +#endif + return; + } + udelay(1000); + } +#if 0 + printk("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid); +#endif +} + +static void +send_cpu_msg(char *str, int cpuid) +{ + struct percpu_struct *cpu; + register char *cp1, *cp2; + unsigned long cpumask; + int timeout; + + + cpu = (struct percpu_struct *) + ((char*)hwrpb + + hwrpb->processor_offset + + cpuid * hwrpb->processor_size); + + cpumask = (1L << cpuid); + for (timeout = 10000; (hwrpb->txrdy & cpumask); timeout--) { + if (timeout <= 0) { + printk("Processor %x not ready\n", cpuid); + return; + } + udelay(1000); + } + + cp1 = (char *) &cpu->ipc_buffer[1]; + cp2 = str; + while (*cp2) *cp1++ = *cp2++; + *(unsigned int *)&cpu->ipc_buffer[0] = cp2 - str; /* hack */ + + /* atomic test and set */ + set_bit(cpuid, &hwrpb->rxrdy); + + for (timeout = 10000; (hwrpb->txrdy & cpumask); timeout--) { + if (timeout <= 0) { + printk("Processor %x not ready\n", cpuid); + return; + } + udelay(1000); + } +} + +/* + * setup_smp() + * + * called from arch/alpha/kernel/setup.c:setup_arch() when __SMP__ defined + */ +__initfunc(void setup_smp(void)) +{ + struct percpu_struct *cpubase, *cpu; + int i; + + boot_cpu_id = hard_smp_processor_id(); + if (boot_cpu_id != 0) { + printk("setup_smp: boot_cpu_id != 0 (%d).\n", boot_cpu_id); + } + + if (hwrpb->nr_processors > 1) { +#if 0 +printk("setup_smp: nr_processors 0x%lx\n", + hwrpb->nr_processors); +#endif + cpubase = (struct percpu_struct *) + ((char*)hwrpb + hwrpb->processor_offset); + boot_cpu_palrev = cpubase->pal_revision; + + for (i = 0; i < hwrpb->nr_processors; i++ ) { + cpu = (struct percpu_struct *) + ((char *)cpubase + i*hwrpb->processor_size); + if ((cpu->flags & 0x1cc) == 0x1cc) { + smp_num_probed++; + /* assume here that "whami" == index */ + cpu_present_map |= (1 << i); + if (i != boot_cpu_id) + cpu->pal_revision = boot_cpu_palrev; + } +#if 0 +printk("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n", + i, cpu->flags, cpu->type); + printk("setup_smp: CPU %d: PAL rev 0x%lx\n", + i, cpu->pal_revision); +#endif + } + } else { + smp_num_probed = 1; + cpu_present_map = (1 << boot_cpu_id); + } + printk("setup_smp: %d CPUs probed, cpu_present_map 0x%x," + " boot_cpu_id %d\n", + smp_num_probed, cpu_present_map, boot_cpu_id); +} + +static void +secondary_console_message(void) +{ + int mycpu, i, cnt; + unsigned long txrdy = hwrpb->txrdy; + char *cp1, *cp2, buf[80]; + struct percpu_struct *cpu; + + mycpu = hard_smp_processor_id(); + +#if 0 +printk("secondary_console_message: TXRDY 0x%lx.\n", txrdy); +#endif + for (i = 0; i < NR_CPUS; i++) { + if (txrdy & (1L << i)) { +#if 0 +printk("secondary_console_message: TXRDY contains CPU %d.\n", i); +#endif + cpu = (struct percpu_struct *) + ((char*)hwrpb + + hwrpb->processor_offset + + i * hwrpb->processor_size); +#if 1 + printk("secondary_console_message: on %d from %d" + " HALT_REASON 0x%lx FLAGS 0x%lx\n", + mycpu, i, cpu->halt_reason, cpu->flags); +#endif + cnt = cpu->ipc_buffer[0] >> 32; + if (cnt <= 0 || cnt >= 80) + strcpy(buf,"<<< BOGUS MSG >>>"); + else { + cp1 = (char *) &cpu->ipc_buffer[11]; + cp2 = buf; + while (cnt--) { + if (*cp1 == '\r' || *cp1 == '\n') { + *cp2++ = ' '; cp1++; + } else + *cp2++ = *cp1++; + } + *cp2 = 0; + } +#if 1 + printk("secondary_console_message: on %d message is '%s'\n", + mycpu, buf); +#endif + } + } + hwrpb->txrdy = 0; + return; +} + +static int +halt_on_panic(unsigned int this_cpu) +{ + halt(); + return 0; +} + +static int +local_flush_tlb_all(unsigned int this_cpu) +{ + tbia(); + clear_bit(this_cpu, &ipi_msg_flush_tb.flush_tb_mask); + mb(); + return 0; +} + +static int +local_flush_tlb_mm(unsigned int this_cpu) +{ + struct mm_struct * mm = ipi_msg_flush_tb.p.flush_mm; + if (mm != current->mm) + flush_tlb_other(mm); + else + flush_tlb_current(mm); + clear_bit(this_cpu, &ipi_msg_flush_tb.flush_tb_mask); + mb(); + return 0; +} + +static int +local_flush_tlb_page(unsigned int this_cpu) +{ + struct vm_area_struct * vma = ipi_msg_flush_tb.p.flush_vma; + struct mm_struct * mm = vma->vm_mm; + + if (mm != current->mm) + flush_tlb_other(mm); + else + flush_tlb_current_page(mm, vma, ipi_msg_flush_tb.flush_addr); + clear_bit(this_cpu, &ipi_msg_flush_tb.flush_tb_mask); + mb(); + return 0; +} + +static int +wrapper_local_flush_tlb_page(unsigned int this_cpu) +{ +#if 0 + int cpu = smp_processor_id(); + + if (cpu) { + printk("wrapper: ipi_msg_flush_tb.flush_addr 0x%lx [%d]\n", + ipi_msg_flush_tb.flush_addr, atomic_read(&global_irq_count)); + } +#endif + local_flush_tlb_page(this_cpu); + return 0; +} + +static int +unknown_ipi(unsigned int this_cpu) +{ + printk("unknown_ipi() on cpu %d: ", this_cpu); + return 1; +} + +enum ipi_message_type { + CPU_STOP, + TLB_ALL, + TLB_MM, + TLB_PAGE, + TLB_RANGE +}; + +static int (* ipi_func[32])(unsigned int) = { + halt_on_panic, + local_flush_tlb_all, + local_flush_tlb_mm, + wrapper_local_flush_tlb_page, + local_flush_tlb_mm, /* a.k.a. local_flush_tlb_range */ + unknown_ipi, unknown_ipi, unknown_ipi, unknown_ipi, unknown_ipi, unknown_ipi, + unknown_ipi, unknown_ipi, unknown_ipi, unknown_ipi, unknown_ipi, unknown_ipi, + unknown_ipi, unknown_ipi, unknown_ipi, unknown_ipi, unknown_ipi, unknown_ipi, + unknown_ipi, unknown_ipi, unknown_ipi, unknown_ipi, unknown_ipi, unknown_ipi, + unknown_ipi, unknown_ipi, unknown_ipi +}; + +void +handle_ipi(struct pt_regs *regs) +{ + int this_cpu = smp_processor_id(); + volatile int * pending_ipis = &ipi_bits[this_cpu]; + int ops; + + mb(); +#if 0 + printk("handle_ipi: on CPU %d ops 0x%x PC 0x%lx\n", + this_cpu, *pending_ipis, regs->pc); +#endif + while ((ops = *pending_ipis)) { + int first; + for (first = 0; (ops & 1) == 0; ++first, ops >>= 1) + ; /* look for the first thing to do */ + clear_bit(first, pending_ipis); + mb(); + if ((*ipi_func[first])(this_cpu)) + printk("%d\n", first); + mb(); + } + if (hwrpb->txrdy) + secondary_console_message(); +} + +void +send_ipi_message(long to_whom, enum ipi_message_type operation) +{ + int i; + unsigned int j; + + for (i = 0, j = 1; i < NR_CPUS; ++i, j += j) { + if ((to_whom & j) == 0) + continue; + set_bit(operation, &ipi_bits[i]); + mb(); + wripir(i); + } +} + +static char smp_buf[256]; + +char *smp_info(void) +{ + sprintf(smp_buf, "CPUs probed %d active %d map 0x%x AKP %d\n", + smp_num_probed, smp_num_cpus, cpu_present_map, + klock_info.akp); + + return smp_buf; +} + +/* wrapper for call from panic() */ +void +smp_message_pass(int target, int msg, unsigned long data, int wait) +{ + int me = smp_processor_id(); + + if (msg != MSG_STOP_CPU) + goto barf; + + send_ipi_message(CPU_STOP, cpu_present_map ^ (1 << me)); + return; +barf: + printk("Yeeee, trying to send SMP msg(%d) on cpu %d\n", msg, me); + panic("Bogon SMP message pass."); +} + +void +flush_tlb_all(void) +{ + unsigned int to_whom = cpu_present_map ^ (1 << smp_processor_id()); + int timeout = 10000; + +#if 1 + if (!kernel_lock_held()) { + printk("flush_tlb_all: kernel_flag %d (cpu %d akp %d)!\n", + klock_info.kernel_flag, smp_processor_id(), klock_info.akp); + } +#endif + ipi_msg_flush_tb.flush_tb_mask = to_whom; + send_ipi_message(to_whom, TLB_ALL); + tbia(); + + while (ipi_msg_flush_tb.flush_tb_mask) { + if (--timeout < 0) { + printk("flush_tlb_all: STUCK on CPU %d mask 0x%x\n", + smp_processor_id(), ipi_msg_flush_tb.flush_tb_mask); + ipi_msg_flush_tb.flush_tb_mask = 0; + break; + } + udelay(100); + ; /* Wait for all clear from other CPUs. */ + } +} + +void +flush_tlb_mm(struct mm_struct *mm) +{ + unsigned int to_whom = cpu_present_map ^ (1 << smp_processor_id()); + int timeout = 10000; + +#if 1 + if (!kernel_lock_held()) { + printk("flush_tlb_mm: kernel_flag %d (cpu %d akp %d)!\n", + klock_info.kernel_flag, smp_processor_id(), klock_info.akp); + } +#endif + ipi_msg_flush_tb.p.flush_mm = mm; + ipi_msg_flush_tb.flush_tb_mask = to_whom; + send_ipi_message(to_whom, TLB_MM); + + if (mm != current->mm) + flush_tlb_other(mm); + else + flush_tlb_current(mm); + + while (ipi_msg_flush_tb.flush_tb_mask) { + if (--timeout < 0) { + printk("flush_tlb_mm: STUCK on CPU %d mask 0x%x\n", + smp_processor_id(), ipi_msg_flush_tb.flush_tb_mask); + ipi_msg_flush_tb.flush_tb_mask = 0; + break; + } + udelay(100); + ; /* Wait for all clear from other CPUs. */ + } +} + +void +flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) +{ + int cpu = smp_processor_id(); + unsigned int to_whom = cpu_present_map ^ (1 << cpu); + struct mm_struct * mm = vma->vm_mm; + int timeout = 10000; + +#if 1 + if (!kernel_lock_held()) { + printk("flush_tlb_page: kernel_flag %d (cpu %d akp %d)!\n", + klock_info.kernel_flag, cpu, klock_info.akp); + } +#endif + ipi_msg_flush_tb.p.flush_vma = vma; + ipi_msg_flush_tb.flush_addr = addr; + ipi_msg_flush_tb.flush_tb_mask = to_whom; + send_ipi_message(to_whom, TLB_PAGE); + + if (mm != current->mm) + flush_tlb_other(mm); + else + flush_tlb_current_page(mm, vma, addr); + + while (ipi_msg_flush_tb.flush_tb_mask) { + if (--timeout < 0) { + printk("flush_tlb_page: STUCK on CPU %d [0x%x,0x%lx,%d,%d]\n", + cpu, ipi_msg_flush_tb.flush_tb_mask, addr, + klock_info.akp, global_irq_holder); + ipi_msg_flush_tb.flush_tb_mask = 0; + break; + } + udelay(100); + ; /* Wait for all clear from other CPUs. */ + } +} + +void +flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end) +{ +#if 0 + flush_tlb_mm(mm); +#else + unsigned int to_whom; + int timeout; + unsigned long where; + + __asm__("mov $26, %0" : "=r" (where)); + + timeout = 10000; + to_whom = cpu_present_map ^ (1 << smp_processor_id()); + +#if 1 + if (!kernel_lock_held()) { + printk("flush_tlb_range: kernel_flag %d (cpu %d akp %d) @ 0x%lx\n", + klock_info.kernel_flag, smp_processor_id(), klock_info.akp, + where); + } +#endif + ipi_msg_flush_tb.p.flush_mm = mm; + ipi_msg_flush_tb.flush_tb_mask = to_whom; + send_ipi_message(to_whom, TLB_MM); + + if (mm != current->mm) + flush_tlb_other(mm); + else + flush_tlb_current(mm); + + while (ipi_msg_flush_tb.flush_tb_mask) { + if (--timeout < 0) { + printk("flush_tlb_range: STUCK on CPU %d mask 0x%x\n", + smp_processor_id(), ipi_msg_flush_tb.flush_tb_mask); + ipi_msg_flush_tb.flush_tb_mask = 0; + break; + } + udelay(100); + ; /* Wait for all clear from other CPUs. */ + } +#endif +} + +#ifdef DEBUG_KERNEL_LOCK +void ___lock_kernel(klock_info_t *klip, int cpu, long ipl) +{ + long regx; + int stuck_lock; + unsigned long inline_pc; + + __asm__("mov $26, %0" : "=r" (inline_pc)); + + try_again: + + stuck_lock = 1<<26; + + __asm__ __volatile__( + "1: ldl_l %1,%0;" + " blbs %1,6f;" + " or %1,1,%1;" + " stl_c %1,%0;" + " beq %1,6f;" + "4: mb\n" + ".section .text2,\"ax\"\n" + "6: mov %5,$16;" + " call_pal %4;" + "7: ldl %1,%0;" + " blt %2,4b # debug\n" + " subl %2,1,%2 # debug\n" + " blbs %1,7b;" + " bis $31,7,$16;" + " call_pal %4;" + " br 1b\n" + ".previous" + : "=m,=m" (__dummy_lock(klip)), "=&r,=&r" (regx), + "=&r,=&r" (stuck_lock) + : "0,0" (__dummy_lock(klip)), "i,i" (PAL_swpipl), + "i,r" (ipl), "2,2" (stuck_lock) + : "$0", "$1", "$16", "$22", "$23", "$24", "$25", "memory"); + + if (stuck_lock < 0) { + printk("___kernel_lock stuck at %lx(%d) held %lx(%d)\n", + inline_pc, cpu, klip->pc, klip->cpu); + goto try_again; + } else { + klip->pc = inline_pc; + klip->cpu = cpu; + } +} +#endif + +#ifdef DEBUG_SPINLOCK +void spin_lock(spinlock_t * lock) +{ + long tmp; + long stuck; + unsigned long inline_pc; + + __asm__("mov $26, %0" : "=r" (inline_pc)); + + try_again: + + stuck = 0x10000000; /* was 4G, now 256M */ + + /* Use sub-sections to put the actual loop at the end + of this object file's text section so as to perfect + branch prediction. */ + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " subq %2,1,%2\n" + " blbs %0,2f\n" + " or %0,1,%0\n" + " stq_c %0,%1\n" + " beq %0,3f\n" + "4: mb\n" + ".section .text2,\"ax\"\n" + "2: ldq %0,%1\n" + " subq %2,1,%2\n" + "3: blt %2,4b\n" + " blbs %0,2b\n" + " br 1b\n" + ".previous" + : "=r" (tmp), + "=m" (__dummy_lock(lock)), + "=r" (stuck) + : "2" (stuck)); + + if (stuck < 0) { + printk("spinlock stuck at %lx (cur=%lx, own=%lx)\n", + inline_pc, +#if 0 + lock->previous, lock->task +#else + (unsigned long) current, lock->task +#endif + ); + goto try_again; + } else { + lock->previous = (unsigned long) inline_pc; + lock->task = (unsigned long) current; + } +} +#endif /* DEBUG_SPINLOCK */ + +#ifdef DEBUG_RWLOCK +void write_lock(rwlock_t * lock) +{ + long regx, regy; + int stuck_lock, stuck_reader; + unsigned long inline_pc; + + __asm__("mov $26, %0" : "=r" (inline_pc)); + + try_again: + + stuck_lock = 1<<26; + stuck_reader = 1<<26; + + __asm__ __volatile__( + "1: ldl_l %1,%0;" + " blbs %1,6f;" + " or %1,1,%2;" + " stl_c %2,%0;" + " beq %2,6f;" + " blt %1,8f;" + "4: mb\n" + ".section .text2,\"ax\"\n" + "6: ldl %1,%0;" + " blt %3,4b # debug\n" + " subl %3,1,%3 # debug\n" + " blbs %1,6b;" + " br 1b;" + "8: ldl %1,%0;" + " blt %4,4b # debug\n" + " subl %4,1,%4 # debug\n" + " blt %1,8b;" + "9: br 4b\n" + ".previous" + : "=m" (__dummy_lock(lock)), "=&r" (regx), "=&r" (regy) + , "=&r" (stuck_lock), "=&r" (stuck_reader) + : "0" (__dummy_lock(lock)) + , "3" (stuck_lock), "4" (stuck_reader) + ); + + if (stuck_lock < 0) { + printk("write_lock stuck at %lx\n", inline_pc); + goto try_again; + } + if (stuck_reader < 0) { + printk("write_lock stuck on readers at %lx\n", inline_pc); + goto try_again; + } +} + +void _read_lock(rwlock_t * lock) +{ + long regx; + int stuck_lock; + unsigned long inline_pc; + + __asm__("mov $26, %0" : "=r" (inline_pc)); + + try_again: + + stuck_lock = 1<<26; + + __asm__ __volatile__( + "1: ldl_l %1,%0;" + " blbs %1,6f;" + " subl %1,2,%1;" + " stl_c %1,%0;" + " beq %1,6f;" + "4: mb\n" + ".section .text2,\"ax\"\n" + "6: ldl %1,%0;" + " blt %2,4b # debug\n" + " subl %2,1,%2 # debug\n" + " blbs %1,6b;" + " br 1b\n" + ".previous" + : "=m" (__dummy_lock(lock)), "=&r" (regx), "=&r" (stuck_lock) + : "0" (__dummy_lock(lock)), "2" (stuck_lock) + ); + + if (stuck_lock < 0) { + printk("_read_lock stuck at %lx\n", inline_pc); + goto try_again; + } +} +#endif /* DEBUG_RWLOCK */ diff --git a/arch/alpha/kernel/t2.c b/arch/alpha/kernel/t2.c index 398aaebb5..69ca71404 100644 --- a/arch/alpha/kernel/t2.c +++ b/arch/alpha/kernel/t2.c @@ -8,8 +8,8 @@ * */ #include <linux/kernel.h> +#include <linux/config.h> #include <linux/types.h> -#include <linux/bios32.h> #include <linux/pci.h> #include <linux/sched.h> @@ -19,17 +19,14 @@ #include <asm/ptrace.h> #include <asm/mmu_context.h> -/* NOTE: Herein are back-to-back mb insns. They are magic. - A plausable explanation is that the i/o controler does not properly - handle the system transaction. Another involves timing. Ho hum. */ +/* + * NOTE: Herein lie back-to-back mb instructions. They are magic. + * One plausible explanation is that the i/o controller does not properly + * handle the system transaction. Another involves timing. Ho hum. + */ extern struct hwrpb_struct *hwrpb; extern asmlinkage void wrmces(unsigned long mces); -extern asmlinkage unsigned long whami(void); -extern int alpha_sys_type; - -#define CPUID whami() - /* * Machine check reasons. Defined according to PALcode sources @@ -62,10 +59,14 @@ extern int alpha_sys_type; #define vulp volatile unsigned long * #define vuip volatile unsigned int * -static volatile unsigned int T2_mcheck_expected = 0; -static volatile unsigned int T2_mcheck_taken = 0; -static unsigned long T2_jd; +static volatile unsigned int T2_mcheck_expected[NR_CPUS]; +static volatile unsigned int T2_mcheck_taken[NR_CPUS]; +#ifdef CONFIG_ALPHA_SRM_SETUP +unsigned int T2_DMA_WIN_BASE = T2_DMA_WIN_BASE_DEFAULT; +unsigned int T2_DMA_WIN_SIZE = T2_DMA_WIN_SIZE_DEFAULT; +unsigned long t2_sm_base; +#endif /* SRM_SETUP */ /* * Given a bus, device, and function number, compute resulting @@ -145,8 +146,10 @@ static int mk_conf_addr(unsigned char bus, unsigned char device_fn, static unsigned int conf_read(unsigned long addr, unsigned char type1) { unsigned long flags; - unsigned int stat0, value; - unsigned int t2_cfg = 0; /* to keep gcc quiet */ + unsigned int stat0, value, cpu; + unsigned long t2_cfg = 0; /* to keep gcc quiet */ + + cpu = smp_processor_id(); save_flags(flags); /* avoid getting hit by machine check */ cli(); @@ -155,43 +158,41 @@ static unsigned int conf_read(unsigned long addr, unsigned char type1) #if 0 /* reset status register to avoid losing errors: */ - stat0 = *(vuip)T2_IOCSR; - *(vuip)T2_IOCSR = stat0; + stat0 = *(vulp)T2_IOCSR; + *(vulp)T2_IOCSR = stat0; mb(); DBG(("conf_read: T2 IOCSR was 0x%x\n", stat0)); +#endif /* if Type1 access, must set T2 CFG */ if (type1) { - t2_cfg = *(vuip)T2_IOC_CFG; + t2_cfg = *(vulp)T2_HAE_3 & ~0xc0000000UL; + *(vulp)T2_HAE_3 = 0x40000000UL | t2_cfg; mb(); - *(vuip)T2_IOC_CFG = t2_cfg | 1; DBG(("conf_read: TYPE1 access\n")); } mb(); draina(); -#endif - T2_mcheck_expected = 1; - T2_mcheck_taken = 0; + T2_mcheck_expected[cpu] = 1; + T2_mcheck_taken[cpu] = 0; mb(); /* access configuration space: */ value = *(vuip)addr; mb(); mb(); /* magic */ - if (T2_mcheck_taken) { - T2_mcheck_taken = 0; + if (T2_mcheck_taken[cpu]) { + T2_mcheck_taken[cpu] = 0; value = 0xffffffffU; mb(); } - T2_mcheck_expected = 0; + T2_mcheck_expected[cpu] = 0; mb(); -#if 0 - /* if Type1 access, must reset IOC CFG so normal IO space ops work */ + /* if Type1 access, must reset T2 CFG so normal IO space ops work */ if (type1) { - *(vuip)T2_IOC_CFG = t2_cfg & ~1; + *(vulp)T2_HAE_3 = t2_cfg; mb(); } -#endif DBG(("conf_read(): finished\n")); restore_flags(flags); @@ -203,44 +204,45 @@ static void conf_write(unsigned long addr, unsigned int value, unsigned char type1) { unsigned long flags; - unsigned int stat0; - unsigned int t2_cfg = 0; /* to keep gcc quiet */ + unsigned int stat0, cpu; + unsigned long t2_cfg = 0; /* to keep gcc quiet */ + + cpu = smp_processor_id(); save_flags(flags); /* avoid getting hit by machine check */ cli(); #if 0 /* reset status register to avoid losing errors: */ - stat0 = *(vuip)T2_IOCSR; - *(vuip)T2_IOCSR = stat0; + stat0 = *(vulp)T2_IOCSR; + *(vulp)T2_IOCSR = stat0; mb(); DBG(("conf_write: T2 ERR was 0x%x\n", stat0)); +#endif /* if Type1 access, must set T2 CFG */ if (type1) { - t2_cfg = *(vuip)T2_IOC_CFG; + t2_cfg = *(vulp)T2_HAE_3 & ~0xc0000000UL; + *(vulp)T2_HAE_3 = t2_cfg | 0x40000000UL; mb(); - *(vuip)T2_IOC_CFG = t2_cfg | 1; DBG(("conf_write: TYPE1 access\n")); } + mb(); draina(); -#endif - T2_mcheck_expected = 1; + T2_mcheck_expected[cpu] = 1; mb(); /* access configuration space: */ *(vuip)addr = value; mb(); mb(); /* magic */ - T2_mcheck_expected = 0; + T2_mcheck_expected[cpu] = 0; mb(); -#if 0 - /* if Type1 access, must reset IOC CFG so normal IO space ops work */ + /* if Type1 access, must reset T2 CFG so normal IO space ops work */ if (type1) { - *(vuip)T2_IOC_CFG = t2_cfg & ~1; + *(vulp)T2_HAE_3 = t2_cfg; mb(); } -#endif DBG(("conf_write(): finished\n")); restore_flags(flags); } @@ -362,17 +364,21 @@ int pcibios_write_config_dword (unsigned char bus, unsigned char device_fn, unsigned long t2_init(unsigned long mem_start, unsigned long mem_end) { - unsigned int t2_err; - struct percpu_struct *cpu; - int i; + unsigned long t2_err; + unsigned int i; + + for (i = 0; i < NR_CPUS; i++) { + T2_mcheck_expected[i] = 0; + T2_mcheck_taken[i] = 0; + } #if 0 /* * Set up error reporting. */ - t2_err = *(vuip)T2_IOCSR ; + t2_err = *(vulp)T2_IOCSR ; t2_err |= (0x1 << 7) ; /* master abort */ - *(vuip)T2_IOC_T2_ERR = t2_err ; + *(vulp)T2_IOCSR = t2_err ; mb() ; #endif @@ -388,6 +394,42 @@ unsigned long t2_init(unsigned long mem_start, unsigned long mem_end) *(vulp)T2_TBASE2); #endif +#ifdef CONFIG_ALPHA_SRM_SETUP + /* check window 1 for enabled and mapped to 0 */ + if (((*(vulp)T2_WBASE1 & (3UL<<18)) == (2UL<<18)) && + (*(vulp)T2_TBASE1 == 0)) + { + T2_DMA_WIN_BASE = *(vulp)T2_WBASE1 & 0xfff00000UL; + T2_DMA_WIN_SIZE = *(vulp)T2_WMASK1 & 0xfff00000UL; + T2_DMA_WIN_SIZE += 0x00100000UL; +/* DISABLE window 2!! ?? */ +#if 1 + printk("t2_init: using Window 1 settings\n"); + printk("t2_init: BASE 0x%lx MASK 0x%lx TRANS 0x%lx\n", + *(vulp)T2_WBASE1, + *(vulp)T2_WMASK1, + *(vulp)T2_TBASE1); +#endif + } + else /* check window 2 for enabled and mapped to 0 */ + if (((*(vulp)T2_WBASE2 & (3UL<<18)) == (2UL<<18)) && + (*(vulp)T2_TBASE2 == 0)) + { + T2_DMA_WIN_BASE = *(vulp)T2_WBASE2 & 0xfff00000UL; + T2_DMA_WIN_SIZE = *(vulp)T2_WMASK2 & 0xfff00000UL; + T2_DMA_WIN_SIZE += 0x00100000UL; +/* DISABLE window 1!! ?? */ +#if 1 + printk("t2_init: using Window 2 settings\n"); + printk("t2_init: BASE 0x%lx MASK 0x%lx TRANS 0x%lx\n", + *(vulp)T2_WBASE2, + *(vulp)T2_WMASK2, + *(vulp)T2_TBASE2); +#endif + } + else /* we must use our defaults... */ +#endif /* SRM_SETUP */ + { /* * Set up the PCI->physical memory translation windows. * For now, window 2 is disabled. In the future, we may @@ -396,13 +438,13 @@ unsigned long t2_init(unsigned long mem_start, unsigned long mem_end) */ /* WARNING!! must correspond to the DMA_WIN params!!! */ - *(vuip)T2_WBASE1 = 0x400807ffU; - *(vuip)T2_WMASK1 = 0x3ff00000U; - *(vuip)T2_TBASE1 = 0; - - *(vuip)T2_WBASE2 = 0x0; + *(vulp)T2_WBASE1 = 0x400807ffU; + *(vulp)T2_WMASK1 = 0x3ff00000U; + *(vulp)T2_TBASE1 = 0; - *(vuip)T2_HBASE = 0x0; + *(vulp)T2_WBASE2 = 0x0; + *(vulp)T2_HBASE = 0x0; + } /* * check ASN in HWRPB for validity, report if bad @@ -420,41 +462,43 @@ unsigned long t2_init(unsigned long mem_start, unsigned long mem_end) * what ARC or SRM might have left behind... */ { -#if 0 - printk("T2_init: HAE1 was 0x%lx\n", *(vulp)T2_HAE_1); - printk("T2_init: HAE2 was 0x%lx\n", *(vulp)T2_HAE_2); - printk("T2_init: HAE3 was 0x%lx\n", *(vulp)T2_HAE_3); - printk("T2_init: HAE4 was 0x%lx\n", *(vulp)T2_HAE_4); + unsigned long t2_hae_1 = *(vulp)T2_HAE_1; + unsigned long t2_hae_2 = *(vulp)T2_HAE_2; + unsigned long t2_hae_3 = *(vulp)T2_HAE_3; + unsigned long t2_hae_4 = *(vulp)T2_HAE_4; +#if 1 + printk("T2_init: HAE1 was 0x%lx\n", t2_hae_1); + printk("T2_init: HAE2 was 0x%lx\n", t2_hae_2); + printk("T2_init: HAE3 was 0x%lx\n", t2_hae_3); + printk("T2_init: HAE4 was 0x%lx\n", t2_hae_4); #endif +#ifdef CONFIG_ALPHA_SRM_SETUP + /* + * sigh... For the SRM setup, unless we know apriori what the HAE + * contents will be, we need to setup the arbitrary region bases + * so we can test against the range of addresses and tailor the + * region chosen for the SPARSE memory access. + * + * see include/asm-alpha/t2.h for the SPARSE mem read/write + */ + t2_sm_base = (t2_hae_1 << 27) & 0xf8000000UL; + /* + Set the HAE cache, so that setup_arch() code + will use the SRM setting always. Our readb/writeb + code in .h expects never to have to change + the contents of the HAE. + */ + hae.cache = t2_hae_1; +#else /* SRM_SETUP */ + *(vulp)T2_HAE_1 = 0; mb(); + *(vulp)T2_HAE_2 = 0; mb(); + *(vulp)T2_HAE_3 = 0; mb(); #if 0 - *(vuip)T2_HAE_1 = 0; mb(); - *(vuip)T2_HAE_2 = 0; mb(); - *(vuip)T2_HAE_3 = 0; mb(); - *(vuip)T2_HAE_4 = 0; mb(); + *(vulp)T2_HAE_4 = 0; mb(); /* do not touch this */ #endif +#endif /* SRM_SETUP */ } -#if 1 - if (hwrpb->nr_processors > 1) { - printk("T2_init: nr_processors 0x%lx\n", - hwrpb->nr_processors); - printk("T2_init: processor_size 0x%lx\n", - hwrpb->processor_size); - printk("T2_init: processor_offset 0x%lx\n", - hwrpb->processor_offset); - - cpu = (struct percpu_struct *) - ((char*)hwrpb + hwrpb->processor_offset); - - for (i = 0; i < hwrpb->nr_processors; i++ ) { - printk("T2_init: CPU 0x%x: flags 0x%lx type 0x%lx\n", - i, cpu->flags, cpu->type); - cpu = (struct percpu_struct *) - ((char *)cpu + hwrpb->processor_size); - } - } -#endif - return mem_start; } @@ -469,17 +513,19 @@ static struct sable_cpu_csr *sable_cpu_regs[4] = { int t2_clear_errors(void) { + unsigned int cpu = smp_processor_id(); + DBGMC(("???????? t2_clear_errors\n")); - sable_cpu_regs[CPUID]->sic &= ~SIC_SEIC; + sable_cpu_regs[cpu]->sic &= ~SIC_SEIC; /* * clear cpu errors */ - sable_cpu_regs[CPUID]->bcce |= sable_cpu_regs[CPUID]->bcce; - sable_cpu_regs[CPUID]->cbe |= sable_cpu_regs[CPUID]->cbe; - sable_cpu_regs[CPUID]->bcue |= sable_cpu_regs[CPUID]->bcue; - sable_cpu_regs[CPUID]->dter |= sable_cpu_regs[CPUID]->dter; + sable_cpu_regs[cpu]->bcce |= sable_cpu_regs[cpu]->bcce; + sable_cpu_regs[cpu]->cbe |= sable_cpu_regs[cpu]->cbe; + sable_cpu_regs[cpu]->bcue |= sable_cpu_regs[cpu]->bcue; + sable_cpu_regs[cpu]->dter |= sable_cpu_regs[cpu]->dter; *(vulp)T2_CERR1 |= *(vulp)T2_CERR1; *(vulp)T2_PERR1 |= *(vulp)T2_PERR1; @@ -499,6 +545,7 @@ void t2_machine_check(unsigned long vector, unsigned long la_ptr, const char * reason; char buf[128]; long i; + unsigned int cpu = smp_processor_id(); DBGMC(("t2_machine_check: vector=0x%lx la_ptr=0x%lx\n", vector, la_ptr)); @@ -516,7 +563,7 @@ void t2_machine_check(unsigned long vector, unsigned long la_ptr, DBGMC((" pc=0x%lx size=0x%x procoffset=0x%x sysoffset 0x%x\n", regs->pc, mchk_header->elfl_size, mchk_header->elfl_procoffset, mchk_header->elfl_sysoffset)); - DBGMC(("t2_machine_check: expected %d\n", T2_mcheck_expected)); + DBGMC(("t2_machine_check: expected %d\n", T2_mcheck_expected[cpu])); #ifdef DEBUG_DUMP { @@ -537,11 +584,11 @@ void t2_machine_check(unsigned long vector, unsigned long la_ptr, */ mb(); mb(); /* magic */ - if (T2_mcheck_expected/* && (mchk_sysdata->epic_dcsr && 0x0c00UL)*/) { + if (T2_mcheck_expected[cpu]) { DBGMC(("T2 machine check expected\n")); - T2_mcheck_taken = 1; + T2_mcheck_taken[cpu] = 1; t2_clear_errors(); - T2_mcheck_expected = 0; + T2_mcheck_expected[cpu] = 0; mb(); mb(); /* magic */ wrmces(rdmces()|1);/* ??? */ diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index 9d8b56dc3..0456eb171 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c @@ -82,6 +82,16 @@ void timer_interrupt(int irq, void *dev, struct pt_regs * regs) __u32 now; long nticks; +#ifdef __SMP__ + extern void smp_percpu_timer_interrupt(struct pt_regs *); + extern unsigned int boot_cpu_id; + /* when SMP, do this for *all* CPUs, + but only do the rest for the boot CPU */ + smp_percpu_timer_interrupt(regs); + if (smp_processor_id() != boot_cpu_id) + return; +#endif + /* * Estimate how many ticks have passed since the last update. * Round the result, .5 to even. When we loose ticks due to diff --git a/arch/alpha/kernel/tsunami.c b/arch/alpha/kernel/tsunami.c new file mode 100644 index 000000000..3d0fdde89 --- /dev/null +++ b/arch/alpha/kernel/tsunami.c @@ -0,0 +1,503 @@ +/* + * Code common to all TSUNAMI chips. + * + * Based on code written by David A Rusling (david.rusling@reo.mts.dec.com). + * + */ +#include <linux/kernel.h> +#include <linux/config.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/sched.h> + +#include <asm/system.h> +#include <asm/io.h> +#include <asm/hwrpb.h> +#include <asm/ptrace.h> +#include <asm/mmu_context.h> + +/* + * NOTE: Herein lie back-to-back mb instructions. They are magic. + * One plausible explanation is that the i/o controller does not properly + * handle the system transaction. Another involves timing. Ho hum. + */ + +extern struct hwrpb_struct *hwrpb; +extern asmlinkage void wrmces(unsigned long mces); + +/* + * BIOS32-style PCI interface: + */ + +#ifdef CONFIG_ALPHA_TSUNAMI + +#ifdef DEBUG +# define DBG(args) printk args +#else +# define DBG(args) +#endif + +#define DEBUG_MCHECK +#ifdef DEBUG_MCHECK +# define DBG_MCK(args) printk args +#define DEBUG_MCHECK_DUMP +#else +# define DBG_MCK(args) +#endif + +#define vuip volatile unsigned int * +#define vulp volatile unsigned long * + +static volatile unsigned int TSUNAMI_mcheck_expected[NR_CPUS]; +static volatile unsigned int TSUNAMI_mcheck_taken[NR_CPUS]; +static unsigned int TSUNAMI_jd[NR_CPUS]; + +#ifdef CONFIG_ALPHA_SRM_SETUP +unsigned int TSUNAMI_DMA_WIN_BASE = TSUNAMI_DMA_WIN_BASE_DEFAULT; +unsigned int TSUNAMI_DMA_WIN_SIZE = TSUNAMI_DMA_WIN_SIZE_DEFAULT; +#endif /* SRM_SETUP */ + +/* + * Given a bus, device, and function number, compute resulting + * configuration space address + * accordingly. It is therefore not safe to have concurrent + * invocations to configuration space access routines, but there + * really shouldn't be any need for this. + * + * Note that all config space accesses use Type 1 address format. + * + * Note also that type 1 is determined by non-zero bus number. + * + * Type 1: + * + * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 + * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * 31:24 reserved + * 23:16 bus number (8 bits = 128 possible buses) + * 15:11 Device number (5 bits) + * 10:8 function number + * 7:2 register number + * + * Notes: + * The function number selects which function of a multi-function device + * (e.g., scsi and ethernet). + * + * The register selects a DWORD (32 bit) register offset. Hence it + * doesn't get shifted by 2 bits as we want to "drop" the bottom two + * bits. + */ +static int mk_conf_addr(unsigned char bus, unsigned char device_fn, + unsigned char where, unsigned long *pci_addr, + unsigned char *type1) +{ + unsigned long addr; + + DBG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, pci_addr=0x%p, type1=0x%p)\n", + bus, device_fn, where, pci_addr, type1)); + + if (bus == 0) { + *type1 = 0; + } else { + /* type 1 configuration cycle: */ + *type1 = 1; + } + addr = (bus << 16) | (device_fn << 8) | (where); + *pci_addr = addr; + DBG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); + return 0; +} + +int pcibios_read_config_byte (unsigned char bus, unsigned char device_fn, + unsigned char where, unsigned char *value) +{ + unsigned long addr; + unsigned char type1; + unsigned char result; + + *value = 0xff; + + if (mk_conf_addr(bus, device_fn, where, &addr, &type1) < 0) { + return PCIBIOS_SUCCESSFUL; + } + + __asm__ __volatile__ ( + "ldbu %0,%1" + : "=r" (result) + : "m" (*(unsigned char *)(addr+TSUNAMI_PCI0_CONF))); + + *value = result; + return PCIBIOS_SUCCESSFUL; +} + + +int pcibios_read_config_word (unsigned char bus, unsigned char device_fn, + unsigned char where, unsigned short *value) +{ + unsigned long addr; + unsigned char type1; + unsigned short result; + + *value = 0xffff; + + if (where & 0x1) { + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + if (mk_conf_addr(bus, device_fn, where, &addr, &type1)) { + return PCIBIOS_SUCCESSFUL; + } + + __asm__ __volatile__ ( + "ldwu %0,%1" + : "=r" (result) + : "m" (*(unsigned short *)(addr+TSUNAMI_PCI0_CONF))); + + *value = result; + return PCIBIOS_SUCCESSFUL; +} + + +int pcibios_read_config_dword (unsigned char bus, unsigned char device_fn, + unsigned char where, unsigned int *value) +{ + unsigned long addr; + unsigned char type1; + unsigned int result; + + *value = 0xffffffff; + if (where & 0x3) { + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + if (mk_conf_addr(bus, device_fn, where, &addr, &type1)) { + return PCIBIOS_SUCCESSFUL; + } + + __asm__ __volatile__ ( + "ldl %0,%1" + : "=r" (result) + : "m" (*(unsigned int *)(addr+TSUNAMI_PCI0_CONF))); + + *value = result; + return PCIBIOS_SUCCESSFUL; +} + + +int pcibios_write_config_byte (unsigned char bus, unsigned char device_fn, + unsigned char where, unsigned char value) +{ + unsigned long addr; + unsigned char type1; + + if (mk_conf_addr(bus, device_fn, where, &addr, &type1) < 0) { + return PCIBIOS_SUCCESSFUL; + } + + __asm__ __volatile__ ( + "stb %1,%0\n\t" + "mb" + : : "m" (*(unsigned char *)(addr+TSUNAMI_PCI0_CONF)), + "r" (value)); + + return PCIBIOS_SUCCESSFUL; +} + + +int pcibios_write_config_word (unsigned char bus, unsigned char device_fn, + unsigned char where, unsigned short value) +{ + unsigned long addr; + unsigned char type1; + + if (where & 0x1) { + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + if (mk_conf_addr(bus, device_fn, where, &addr, &type1) < 0) { + return PCIBIOS_SUCCESSFUL; + } + + __asm__ __volatile__ ( + "stw %1,%0\n\t" + "mb" + : : "m" (*(unsigned short *)(addr+TSUNAMI_PCI0_CONF)), + "r" (value)); + + return PCIBIOS_SUCCESSFUL; +} + + +int pcibios_write_config_dword (unsigned char bus, unsigned char device_fn, + unsigned char where, unsigned int value) +{ + unsigned long addr; + unsigned char type1; + + if (where & 0x3) { + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + if (mk_conf_addr(bus, device_fn, where, &addr, &type1) < 0) { + return PCIBIOS_SUCCESSFUL; + } + + __asm__ __volatile__ ( + "stl %1,%0\n\t" + "mb" + : : "m" (*(unsigned int *)(addr+TSUNAMI_PCI0_CONF)), + "r" (value)); + + return PCIBIOS_SUCCESSFUL; +} + + +unsigned long tsunami_init(unsigned long mem_start, unsigned long mem_end) +{ + unsigned long tsunami_err; + unsigned int i; + +#if 0 +printk("tsunami_init: CChip registers:\n"); +printk("tsunami_init: CSR_CSC 0x%lx\n", *(vulp)TSUNAMI_CSR_CSC); +printk("tsunami_init: CSR_MTR 0x%lx\n", *(vulp)TSUNAMI_CSR_MTR); +printk("tsunami_init: CSR_MISC 0x%lx\n", *(vulp)TSUNAMI_CSR_MISC); +printk("tsunami_init: CSR_DIM0 0x%lx\n", *(vulp)TSUNAMI_CSR_DIM0); +printk("tsunami_init: CSR_DIM1 0x%lx\n", *(vulp)TSUNAMI_CSR_DIM1); +printk("tsunami_init: CSR_DIR0 0x%lx\n", *(vulp)TSUNAMI_CSR_DIR0); +printk("tsunami_init: CSR_DIR1 0x%lx\n", *(vulp)TSUNAMI_CSR_DIR1); +printk("tsunami_init: CSR_DRIR 0x%lx\n", *(vulp)TSUNAMI_CSR_DRIR); + +printk("tsunami_init: DChip registers:\n"); +printk("tsunami_init: CSR_DSC 0x%lx\n", *(vulp)TSUNAMI_CSR_DSC); +printk("tsunami_init: CSR_STR 0x%lx\n", *(vulp)TSUNAMI_CSR_STR); +printk("tsunami_init: CSR_DREV 0x%lx\n", *(vulp)TSUNAMI_CSR_DREV); + +printk("tsunami_init: PChip registers:\n"); +printk("tsunami_init: PCHIP0_WSBA0 0x%lx\n", *(vulp)TSUNAMI_PCHIP0_WSBA0); +printk("tsunami_init: PCHIP0_WSBA1 0x%lx\n", *(vulp)TSUNAMI_PCHIP0_WSBA1); +printk("tsunami_init: PCHIP0_WSBA2 0x%lx\n", *(vulp)TSUNAMI_PCHIP0_WSBA2); +printk("tsunami_init: PCHIP0_WSBA3 0x%lx\n", *(vulp)TSUNAMI_PCHIP0_WSBA3); +printk("tsunami_init: PCHIP0_WSM0 0x%lx\n", *(vulp)TSUNAMI_PCHIP0_WSM0); +printk("tsunami_init: PCHIP0_WSM1 0x%lx\n", *(vulp)TSUNAMI_PCHIP0_WSM1); +printk("tsunami_init: PCHIP0_WSM2 0x%lx\n", *(vulp)TSUNAMI_PCHIP0_WSM2); +printk("tsunami_init: PCHIP0_WSM3 0x%lx\n", *(vulp)TSUNAMI_PCHIP0_WSM3); +printk("tsunami_init: PCHIP0_TBA0 0x%lx\n", *(vulp)TSUNAMI_PCHIP0_TBA0); +printk("tsunami_init: PCHIP0_TBA1 0x%lx\n", *(vulp)TSUNAMI_PCHIP0_TBA1); +printk("tsunami_init: PCHIP0_TBA2 0x%lx\n", *(vulp)TSUNAMI_PCHIP0_TBA2); +printk("tsunami_init: PCHIP0_TBA3 0x%lx\n", *(vulp)TSUNAMI_PCHIP0_TBA3); + +printk("tsunami_init: PCHIP0_PCTL 0x%lx\n", *(vulp)TSUNAMI_PCHIP0_PCTL); +printk("tsunami_init: PCHIP0_PLAT 0x%lx\n", *(vulp)TSUNAMI_PCHIP0_PLAT); +printk("tsunami_init: PCHIP0_PERROR 0x%lx\n", *(vulp)TSUNAMI_PCHIP0_PERROR); +printk("tsunami_init: PCHIP0_PERRMASK 0x%lx\n", *(vulp)TSUNAMI_PCHIP0_PERRMASK); + +#endif + + for (i = 0; i < NR_CPUS; i++) { + TSUNAMI_mcheck_expected[i] = 0; + TSUNAMI_mcheck_taken[i] = 0; + } +#ifdef NOT_YET + /* + * Set up error reporting. Make sure CPU_PE is OFF in the mask. + */ + tsunami_err = *(vulp)TSUNAMI_PCHIP0_PERRMASK; + tsunami_err &= ~20; + *(vulp)TSUNAMI_PCHIP0_PERRMASK = tsunami_err; + mb(); + tsunami_err = *(vulp)TSUNAMI_PCHIP0_PERRMASK; + + tsunami_err = *(vulp)TSUNAMI_PCHIP0_PERROR ; + tsunami_err |= 0x40; /* master/target abort */ + *(vulp)TSUNAMI_PCHIP0_PERROR = tsunami_err ; + mb() ; + tsunami_err = *(vulp)TSUNAMI_PCHIP0_PERROR ; +#endif /* NOT_YET */ + +#ifdef CONFIG_ALPHA_SRM_SETUP + /* check window 0 for enabled and mapped to 0 */ + if (((*(vulp)TSUNAMI_PCHIP0_WSBA0 & 3) == 1) && + (*(vulp)TSUNAMI_PCHIP0_TBA0 == 0) && + ((*(vulp)TSUNAMI_PCHIP0_WSM0 & 0xfff00000U) > 0x0ff00000U)) + { + TSUNAMI_DMA_WIN_BASE = *(vulp)TSUNAMI_PCHIP0_WSBA0 & 0xfff00000U; + TSUNAMI_DMA_WIN_SIZE = *(vulp)TSUNAMI_PCHIP0_WSM0 & 0xfff00000U; + TSUNAMI_DMA_WIN_SIZE += 0x00100000U; +#if 1 + printk("tsunami_init: using Window 0 settings\n"); + printk("tsunami_init: BASE 0x%x MASK 0x%x TRANS 0x%x\n", + *(vulp)TSUNAMI_PCHIP0_WSBA0, + *(vulp)TSUNAMI_PCHIP0_WSM0, + *(vulp)TSUNAMI_PCHIP0_TBA0); +#endif + } + else /* check window 1 for enabled and mapped to 0 */ + if (((*(vulp)TSUNAMI_PCHIP0_WSBA1 & 3) == 1) && + (*(vulp)TSUNAMI_PCHIP0_TBA1 == 0) && + ((*(vulp)TSUNAMI_PCHIP0_WSM1 & 0xfff00000U) > 0x0ff00000U)) +{ + TSUNAMI_DMA_WIN_BASE = *(vulp)TSUNAMI_PCHIP0_WSBA1 & 0xfff00000U; + TSUNAMI_DMA_WIN_SIZE = *(vulp)TSUNAMI_PCHIP0_WSM1 & 0xfff00000U; + TSUNAMI_DMA_WIN_SIZE += 0x00100000U; +#if 1 + printk("tsunami_init: using Window 1 settings\n"); + printk("tsunami_init: BASE 0x%x MASK 0x%x TRANS 0x%x\n", + *(vulp)TSUNAMI_PCHIP0_WSBA1, + *(vulp)TSUNAMI_PCHIP0_WSM1, + *(vulp)TSUNAMI_PCHIP0_TBA1); +#endif + } + else /* check window 2 for enabled and mapped to 0 */ + if (((*(vulp)TSUNAMI_PCHIP0_WSBA2 & 3) == 1) && + (*(vulp)TSUNAMI_PCHIP0_TSB2 == 0) && + ((*(vulp)TSUNAMI_PCHIP0_WSM2 & 0xfff00000U) > 0x0ff00000U)) + { + TSUNAMI_DMA_WIN_BASE = *(vulp)TSUNAMI_PCHIP0_WSBA2 & 0xfff00000U; + TSUNAMI_DMA_WIN_SIZE = *(vulp)TSUNAMI_PCHIP0_WSM2 & 0xfff00000U; + TSUNAMI_DMA_WIN_SIZE += 0x00100000U; +#if 1 + printk("tsunami_init: using Window 2 settings\n"); + printk("tsunami_init: BASE 0x%x MASK 0x%x TRANS 0x%x\n", + *(vulp)TSUNAMI_PCHIP0_WSBA2, + *(vulp)TSUNAMI_PCHIP0_WSM2, + *(vulp)TSUNAMI_PCHIP0_TSB2); +#endif + } + else /* check window 3 for enabled and mapped to 0 */ + if (((*(vulp)TSUNAMI_PCHIP0_WSBA3 & 3) == 1) && + (*(vulp)TSUNAMI_PCHIP0_TBA3 == 0) && + ((*(vulp)TSUNAMI_PCHIP0_WSM3 & 0xfff00000U) > 0x0ff00000U)) + { + TSUNAMI_DMA_WIN_BASE = *(vulp)TSUNAMI_PCHIP0_WSBA3 & 0xfff00000U; + TSUNAMI_DMA_WIN_SIZE = *(vulp)TSUNAMI_PCHIP0_WSM3 & 0xfff00000U; + TSUNAMI_DMA_WIN_SIZE += 0x00100000U; +#if 1 + printk("tsunami_init: using Window 3 settings\n"); + printk("tsunami_init: BASE 0x%x MASK 0x%x TRANS 0x%x\n", + *(vulp)TSUNAMI_PCHIP0_WSBA3, + *(vulp)TSUNAMI_PCHIP0_WSM3, + *(vulp)TSUNAMI_PCHIP0_TBA3); +#endif + } + else /* we must use our defaults which were pre-initialized... */ +#endif /* SRM_SETUP */ + { + /* + * Set up the PCI->physical memory translation windows. + * For now, windows 1,2 and 3 are disabled. In the future, we may + * want to use them to do scatter/gather DMA. Window 0 + * goes at 1 GB and is 1 GB large. + */ + + *(vulp)TSUNAMI_PCHIP0_WSBA0 = 1L | (TSUNAMI_DMA_WIN_BASE & 0xfff00000U); + *(vulp)TSUNAMI_PCHIP0_WSM0 = (TSUNAMI_DMA_WIN_SIZE - 1) & 0xfff00000UL; + *(vulp)TSUNAMI_PCHIP0_TBA0 = 0UL; + + *(vulp)TSUNAMI_PCHIP0_WSBA1 = 0UL; + *(vulp)TSUNAMI_PCHIP0_WSBA2 = 0UL; + *(vulp)TSUNAMI_PCHIP0_WSBA3 = 0UL; + mb(); + } + + /* + * check ASN in HWRPB for validity, report if bad + */ + if (hwrpb->max_asn != MAX_ASN) { + printk("TSUNAMI_init: max ASN from HWRPB is bad (0x%lx)\n", + hwrpb->max_asn); + hwrpb->max_asn = MAX_ASN; + } + + return mem_start; +} + +int tsunami_pci_clr_err(void) +{ + unsigned int cpu = smp_processor_id(); + + TSUNAMI_jd[cpu] = *((vulp)TSUNAMI_PCHIP0_PERROR); + DBG(("TSUNAMI_pci_clr_err: PERROR after read 0x%x\n", TSUNAMI_jd[cpu])); + *((vulp)TSUNAMI_PCHIP0_PERROR) = 0x040; mb(); + TSUNAMI_jd[cpu] = *((vulp)TSUNAMI_PCHIP0_PERROR); + return 0; +} + +void tsunami_machine_check(unsigned long vector, unsigned long la_ptr, + struct pt_regs * regs) +{ +#if 1 + printk("TSUNAMI machine check ignored\n") ; +#else + struct el_common *mchk_header; + struct el_TSUNAMI_sysdata_mcheck *mchk_sysdata; + unsigned int cpu = smp_processor_id(); + + mchk_header = (struct el_common *)la_ptr; + + mchk_sysdata = + (struct el_TSUNAMI_sysdata_mcheck *)(la_ptr + mchk_header->sys_offset); + +#if 0 + DBG_MCK(("tsunami_machine_check: vector=0x%lx la_ptr=0x%lx\n", + vector, la_ptr)); + DBG_MCK(("\t\t pc=0x%lx size=0x%x procoffset=0x%x sysoffset 0x%x\n", + regs->pc, mchk_header->size, mchk_header->proc_offset, + mchk_header->sys_offset)); + DBG_MCK(("tsunami_machine_check: expected %d DCSR 0x%lx PEAR 0x%lx\n", + TSUNAMI_mcheck_expected[cpu], mchk_sysdata->epic_dcsr, + mchk_sysdata->epic_pear)); +#endif +#ifdef DEBUG_MCHECK_DUMP + { + unsigned long *ptr; + int i; + + ptr = (unsigned long *)la_ptr; + for (i = 0; i < mchk_header->size / sizeof(long); i += 2) { + printk(" +%lx %lx %lx\n", i*sizeof(long), ptr[i], ptr[i+1]); + } + } +#endif /* DEBUG_MCHECK_DUMP */ + /* + * Check if machine check is due to a badaddr() and if so, + * ignore the machine check. + */ + mb(); + mb(); /* magic */ + if (TSUNAMI_mcheck_expected[cpu]) { + DBG(("TSUNAMI machine check expected\n")); + TSUNAMI_mcheck_expected[cpu] = 0; + TSUNAMI_mcheck_taken[cpu] = 1; + mb(); + mb(); /* magic */ + draina(); + tsunami_pci_clr_err(); + wrmces(0x7); + mb(); + } +#if 1 + else { + printk("TSUNAMI machine check NOT expected\n") ; + DBG_MCK(("tsunami_machine_check: vector=0x%lx la_ptr=0x%lx\n", + vector, la_ptr)); + DBG_MCK(("\t\t pc=0x%lx size=0x%x procoffset=0x%x sysoffset 0x%x\n", + regs->pc, mchk_header->size, mchk_header->proc_offset, + mchk_header->sys_offset)); + TSUNAMI_mcheck_expected[cpu] = 0; + TSUNAMI_mcheck_taken[cpu] = 1; + mb(); + mb(); /* magic */ + draina(); + tsunami_pci_clr_err(); + wrmces(0x7); + mb(); + } +#endif +#endif +} + +#endif /* CONFIG_ALPHA_TSUNAMI */ diff --git a/arch/alpha/lib/checksum.c b/arch/alpha/lib/checksum.c index f95b535ca..5165279f0 100644 --- a/arch/alpha/lib/checksum.c +++ b/arch/alpha/lib/checksum.c @@ -37,6 +37,27 @@ unsigned short int csum_tcpudp_magic(unsigned long saddr, ((unsigned long) proto << 8)); } +unsigned int csum_tcpudp_nofold(unsigned long saddr, + unsigned long daddr, + unsigned short len, + unsigned short proto, + unsigned int sum) +{ + unsigned long result; + + result = (saddr + daddr + sum + + ((unsigned long) ntohs(len) << 16) + + ((unsigned long) proto << 8)); + + /* Fold down to 32-bits so we don't loose in the typedef-less + network stack. */ + /* 64 to 33 */ + result = (result & 0xffffffff) + (result >> 32); + /* 33 to 32 */ + result = (result & 0xffffffff) + (result >> 32); + return result; +} + /* * Do a 64-bit checksum on an arbitrary memory area.. * diff --git a/arch/alpha/lib/copy_user.S b/arch/alpha/lib/copy_user.S index da57fd6d1..aa309b9f5 100644 --- a/arch/alpha/lib/copy_user.S +++ b/arch/alpha/lib/copy_user.S @@ -27,11 +27,18 @@ */ /* Allow an exception for an insn; exit if we get one. */ -#define EX(x,y...) \ +#define EXI(x,y...) \ 99: x,##y; \ .section __ex_table,"a"; \ .gprel32 99b; \ - lda $31, $exit-99b($31); \ + lda $31, $exitin-99b($31); \ + .previous + +#define EXO(x,y...) \ + 99: x,##y; \ + .section __ex_table,"a"; \ + .gprel32 99b; \ + lda $31, $exitout-99b($31); \ .previous .set noat @@ -45,14 +52,14 @@ __copy_user: subq $3,8,$3 .align 5 $37: - EX( ldq_u $1,0($7) ) - EX( ldq_u $2,0($6) ) + EXI( ldq_u $1,0($7) ) + EXO( ldq_u $2,0($6) ) extbl $1,$7,$1 mskbl $2,$6,$2 insbl $1,$6,$1 addq $3,1,$3 bis $1,$2,$1 - EX( stq_u $1,0($6) ) + EXO( stq_u $1,0($6) ) subq $0,1,$0 addq $6,1,$6 addq $7,1,$7 @@ -63,10 +70,10 @@ $36: bic $0,7,$4 beq $1,$43 beq $4,$48 - EX( ldq_u $3,0($7) ) + EXI( ldq_u $3,0($7) ) .align 5 $50: - EX( ldq_u $2,8($7) ) + EXI( ldq_u $2,8($7) ) subq $4,8,$4 extql $3,$7,$3 extqh $2,$7,$1 @@ -81,13 +88,13 @@ $48: beq $0,$41 .align 5 $57: - EX( ldq_u $1,0($7) ) - EX( ldq_u $2,0($6) ) + EXI( ldq_u $1,0($7) ) + EXO( ldq_u $2,0($6) ) extbl $1,$7,$1 mskbl $2,$6,$2 insbl $1,$6,$1 bis $1,$2,$1 - EX( stq_u $1,0($6) ) + EXO( stq_u $1,0($6) ) subq $0,1,$0 addq $6,1,$6 addq $7,1,$7 @@ -98,7 +105,7 @@ $43: beq $4,$65 .align 5 $66: - EX( ldq $1,0($7) ) + EXI( ldq $1,0($7) ) subq $4,8,$4 stq $1,0($6) addq $7,8,$7 @@ -107,15 +114,31 @@ $66: bne $4,$66 $65: beq $0,$41 - EX( ldq $2,0($7) ) - EX( ldq $1,0($6) ) + EXI( ldq $2,0($7) ) + EXO( ldq $1,0($6) ) mskql $2,$0,$2 mskqh $1,$0,$1 bis $2,$1,$2 - EX( stq $2,0($6) ) + EXO( stq $2,0($6) ) bis $31,$31,$0 $41: $35: -$exit: +$exitout: ret $31,($28),1 + +$exitin: + /* A stupid byte-by-byte zeroing of the rest of the output + buffer. This cures security holes by never leaving + random kernel data around to be copied elsewhere. */ + + mov $0,$1 +$101: + EXO ( ldq_u $2,0($6) ) + subq $1,1,$1 + mskbl $2,$6,$2 + EXO ( stq_u $2,0($6) ) + addq $6,1,$6 + bgt $1,$101 + ret $31,($28),1 + .end __copy_user diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c index 1328eeaba..713081330 100644 --- a/arch/alpha/lib/csum_partial_copy.c +++ b/arch/alpha/lib/csum_partial_copy.c @@ -365,6 +365,12 @@ csum_partial_copy_from_user(const char *src, char *dst, int len, } unsigned int +csum_partial_copy_nocheck(const char *src, char *dst, int len, unsigned int sum) +{ + return do_csum_partial_copy_from_user(src, dst, len, sum, NULL); +} + +unsigned int csum_partial_copy (const char *src, char *dst, int len, unsigned int sum) { unsigned int ret; diff --git a/arch/alpha/lib/memcpy.c b/arch/alpha/lib/memcpy.c index bcfac1020..dc708c73e 100644 --- a/arch/alpha/lib/memcpy.c +++ b/arch/alpha/lib/memcpy.c @@ -104,7 +104,7 @@ static inline void __memcpy_aligned(unsigned long d, unsigned long s, long n) DO_REST_ALIGNED(d,s,n); } -void * __memcpy(void * dest, const void *src, size_t n) +void * memcpy(void * dest, const void *src, size_t n) { if (!(((unsigned long) dest ^ (unsigned long) src) & 7)) { __memcpy_aligned((unsigned long) dest, (unsigned long) src, n); @@ -114,22 +114,5 @@ void * __memcpy(void * dest, const void *src, size_t n) return dest; } -/* - * Broken compiler uses "bcopy" to do internal - * assignments. Silly OSF/1 BSDism. - */ -char * bcopy(const char * src, char * dest, size_t n) -{ - __memcpy(dest, src, n); - return dest; -} - -/* - * gcc-2.7.1 and newer generate calls to memset and memcpy. So we - * need to define that here: - */ -#ifdef __ELF__ - asm (".weak memcpy; memcpy = __memcpy"); -#else - asm (".weakext memcpy, __memcpy"); -#endif +/* For backward modules compatibility, define __memcpy. */ +asm("__memcpy = memcpy; .globl __memcpy"); diff --git a/arch/alpha/math-emu/ieee-math.c b/arch/alpha/math-emu/ieee-math.c index 59d7dfa6d..b3d896389 100644 --- a/arch/alpha/math-emu/ieee-math.c +++ b/arch/alpha/math-emu/ieee-math.c @@ -733,19 +733,23 @@ ieee_CVTQT (int f, unsigned long a, unsigned long *b) * FPCR_INV if invalid operation occurred, etc. */ unsigned long -ieee_CVTTQ (int f, unsigned long a, unsigned long *b) +ieee_CVTTQ (int f, unsigned long a, unsigned long *pb) { unsigned int midway; - unsigned long ov, uv, res = 0; + unsigned long ov, uv, res, b; fpclass_t a_type; EXTENDED temp; - *b = 0; a_type = extend_ieee(a, &temp, DOUBLE); + + b = 0x7fffffffffffffff; + res = FPCR_INV; if (a_type == NaN || a_type == INFTY) - return FPCR_INV; + goto out; + + res = 0; if (a_type == QNaN) - return 0; + goto out; if (temp.e > 0) { ov = 0; @@ -757,7 +761,7 @@ ieee_CVTTQ (int f, unsigned long a, unsigned long *b) if (ov || (temp.f[1] & 0xffc0000000000000)) res |= FPCR_IOV | FPCR_INE; } - if (temp.e < 0) { + else if (temp.e < 0) { while (temp.e < 0) { ++temp.e; uv = temp.f[0] & 1; /* save sticky bit */ @@ -765,7 +769,8 @@ ieee_CVTTQ (int f, unsigned long a, unsigned long *b) temp.f[0] |= uv; } } - *b = ((temp.f[1] << 9) | (temp.f[0] >> 55)) & 0x7fffffffffffffff; + b = (temp.f[1] << 9) | (temp.f[0] >> 55); + /* * Notice: the fraction is only 52 bits long. Thus, rounding * cannot possibly result in an integer overflow. @@ -776,18 +781,18 @@ ieee_CVTTQ (int f, unsigned long a, unsigned long *b) midway = (temp.f[0] & 0x003fffffffffffff) == 0; if ((midway && (temp.f[0] & 0x0080000000000000)) || !midway) - ++*b; + ++b; } break; case ROUND_PINF: if ((temp.f[0] & 0x007fffffffffffff) != 0) - ++*b; + ++b; break; case ROUND_NINF: if ((temp.f[0] & 0x007fffffffffffff) != 0) - --*b; + --b; break; case ROUND_CHOP: @@ -798,8 +803,11 @@ ieee_CVTTQ (int f, unsigned long a, unsigned long *b) res |= FPCR_INE; if (temp.s) { - *b = -*b; + b = -b; } + +out: + *pb = b; return res; } diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index e44639fb2..603b33149 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -14,13 +14,52 @@ #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> +#include <linux/smp.h> +#include <linux/smp_lock.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> +#ifdef __SMP__ +unsigned long last_asn[NR_CPUS] = { /* gag */ + ASN_FIRST_VERSION + (0 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (1 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (2 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (3 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (4 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (5 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (6 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (7 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (8 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (9 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (10 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (11 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (12 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (13 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (14 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (15 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (16 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (17 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (18 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (19 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (20 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (21 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (22 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (23 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (24 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (25 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (26 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (27 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (28 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (29 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (30 << WIDTH_HARDWARE_ASN), + ASN_FIRST_VERSION + (31 << WIDTH_HARDWARE_ASN) +}; +#else unsigned long asn_cache = ASN_FIRST_VERSION; +#endif /* __SMP__ */ #ifndef BROKEN_ASN /* @@ -30,7 +69,8 @@ unsigned long asn_cache = ASN_FIRST_VERSION; */ void get_new_asn_and_reload(struct task_struct *tsk, struct mm_struct *mm) { - get_new_mmu_context(tsk, mm, asn_cache); + mm->context = 0; + get_new_mmu_context(tsk, mm); reload_context(tsk); } #endif @@ -84,6 +124,7 @@ asmlinkage void do_page_fault(unsigned long address, unsigned long mmcsr, } } + lock_kernel(); down(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) @@ -112,7 +153,7 @@ good_area: } handle_mm_fault(current, vma, address, cause > 0); up(&mm->mmap_sem); - return; + goto out; /* * Something tried to access memory that isn't in our memory map.. @@ -123,16 +164,17 @@ bad_area: if (user_mode(regs)) { force_sig(SIGSEGV, current); - return; + goto out; } /* Are we prepared to handle this fault as an exception? */ if ((fixup = search_exception_table(regs->pc)) != 0) { unsigned long newpc; newpc = fixup_exception(dpf_reg, fixup, regs->pc); - printk("%s: Exception at [<%lx>] (%lx)\n", current->comm, regs->pc, newpc); + printk("%s: Exception at [<%lx>] (%lx)\n", + current->comm, regs->pc, newpc); regs->pc = newpc; - return; + goto out; } /* @@ -143,4 +185,7 @@ bad_area: "virtual address %016lx\n", address); die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16); do_exit(SIGKILL); + out: + unlock_kernel(); } + diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 67faa97d4..7562f6709 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -26,6 +26,8 @@ extern void die_if_kernel(char *,struct pt_regs *,long); extern void show_net_buffers(void); +struct thread_struct * original_pcb_ptr; + /* * BAD_PAGE is the page that is used for page faults when linux * is out-of-memory. Older versions of linux just did a @@ -81,15 +83,22 @@ void show_mem(void) extern unsigned long free_area_init(unsigned long, unsigned long); -static void load_PCB(struct thread_struct * pcb) +static struct thread_struct * load_PCB(struct thread_struct * pcb) { + struct thread_struct *old_pcb; + __asm__ __volatile__( - "stq $30,0(%0)\n\t" - "bis %0,%0,$16\n\t" - "call_pal %1" - : /* no outputs */ + "stq $30,0(%1)\n\t" + "bis %1,%1,$16\n\t" +#ifdef CONFIG_ALPHA_DP264 + "zap $16,0xe0,$16\n\t" +#endif /* DP264 */ + "call_pal %2\n\t" + "bis $0,$0,%0" + : "=r" (old_pcb) : "r" (pcb), "i" (PAL_swpctx) : "$0", "$1", "$16", "$22", "$23", "$24", "$25"); + return old_pcb; } /* @@ -107,7 +116,8 @@ unsigned long paging_init(unsigned long start_mem, unsigned long end_mem) start_mem = free_area_init(start_mem, end_mem); /* find free clusters, update mem_map[] accordingly */ - memdesc = (struct memdesc_struct *) (INIT_HWRPB->mddt_offset + (unsigned long) INIT_HWRPB); + memdesc = (struct memdesc_struct *) + (INIT_HWRPB->mddt_offset + (unsigned long) INIT_HWRPB); cluster = memdesc->cluster; for (i = memdesc->numclusters ; i > 0; i--, cluster++) { unsigned long pfn, nr; @@ -129,16 +139,47 @@ unsigned long paging_init(unsigned long start_mem, unsigned long end_mem) memset((void *) ZERO_PAGE, 0, PAGE_SIZE); memset(swapper_pg_dir, 0, PAGE_SIZE); newptbr = ((unsigned long) swapper_pg_dir - PAGE_OFFSET) >> PAGE_SHIFT; - pgd_val(swapper_pg_dir[1023]) = (newptbr << 32) | pgprot_val(PAGE_KERNEL); + pgd_val(swapper_pg_dir[1023]) = + (newptbr << 32) | pgprot_val(PAGE_KERNEL); init_task.tss.ptbr = newptbr; init_task.tss.pal_flags = 1; /* set FEN, clear everything else */ init_task.tss.flags = 0; - load_PCB(&init_task.tss); + original_pcb_ptr = + phys_to_virt((unsigned long)load_PCB(&init_task.tss)); +#if 0 +printk("OKSP 0x%lx OPTBR 0x%lx\n", + original_pcb_ptr->ksp, original_pcb_ptr->ptbr); +#endif - flush_tlb_all(); + tbia(); return start_mem; } +#ifdef __SMP__ +/* + * paging_init_secondary(), called ONLY by secondary CPUs, + * sets up current->tss contents appropriately and does a load_PCB. + * note that current should be pointing at the idle thread task struct + * for this CPU. + */ +void paging_init_secondary(void) +{ + current->tss.ptbr = init_task.tss.ptbr; + current->tss.pal_flags = 1; + current->tss.flags = 0; + +#if 0 +printk("paging_init_secondary: KSP 0x%lx PTBR 0x%lx\n", + current->tss.ksp, current->tss.ptbr); +#endif + + load_PCB(¤t->tss); + tbia(); + + return; +} +#endif /* __SMP__ */ + void mem_init(unsigned long start_mem, unsigned long end_mem) { unsigned long tmp; |