diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2000-02-05 06:47:02 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2000-02-05 06:47:02 +0000 |
commit | 99a7e12f34b3661a0d1354eef83a0eef4df5e34c (patch) | |
tree | 3560aca9ca86792f9ab7bd87861ea143a1b3c7a3 /arch/sparc | |
parent | e73a04659c0b8cdee4dd40e58630e2cf63afb316 (diff) |
Merge with Linux 2.3.38.
Diffstat (limited to 'arch/sparc')
49 files changed, 2961 insertions, 2123 deletions
diff --git a/arch/sparc/ap1000/Makefile b/arch/sparc/ap1000/Makefile index 83df0ef8e..f070f7675 100644 --- a/arch/sparc/ap1000/Makefile +++ b/arch/sparc/ap1000/Makefile @@ -6,9 +6,9 @@ # .S.s: - $(CPP) -D__ASSEMBLY__ -ansi $< -o $*.s + $(CPP) -D__ASSEMBLY__ $(AFLAGS) -ansi $< -o $*.s .S.o: - $(CC) -D__ASSEMBLY__ -ansi -c $< -o $*.o + $(CC) -D__ASSEMBLY__ $(AFLAGS) -ansi -c $< -o $*.o all: ap1000lib.o diff --git a/arch/sparc/config.in b/arch/sparc/config.in index 27a756a2a..9ed06dc1f 100644 --- a/arch/sparc/config.in +++ b/arch/sparc/config.in @@ -1,4 +1,4 @@ -# $Id: config.in,v 1.73 1999/08/31 10:09:01 davem Exp $ +# $Id: config.in,v 1.79 1999/12/23 01:46:00 davem Exp $ # For a description of the syntax of this configuration file, # see the Configure script. # @@ -57,21 +57,18 @@ else fi fi -if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then - tristate 'Openprom tree appears in /proc/openprom (EXPERIMENTAL)' CONFIG_SUN_OPENPROMFS -fi +tristate 'Openprom tree appears in /proc/openprom' CONFIG_SUN_OPENPROMFS bool 'Networking support' CONFIG_NET bool 'System V IPC' CONFIG_SYSVIPC bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT bool 'Sysctl support' CONFIG_SYSCTL if [ "$CONFIG_PROC_FS" = "y" ]; then - choice 'Kernel core (/proc/kcore) format' \ - "ELF CONFIG_KCORE_ELF \ - A.OUT CONFIG_KCORE_AOUT" ELF + define_bool CONFIG_KCORE_ELF y fi tristate 'Kernel support for a.out binaries' CONFIG_BINFMT_AOUT tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC +bool 'SunOS binary emulation' CONFIG_SUNOS_EMUL source drivers/parport/Config.in dep_tristate ' Parallel printer support' CONFIG_PRINTER $CONFIG_PARPORT endmenu @@ -89,7 +86,7 @@ mainmenu_option next_comment comment 'Floppy, IDE, and other block devices' bool 'Normal floppy disk support' CONFIG_BLK_DEV_FD - +define_bool CONFIG_BLK_DEV_IDE n bool 'Multiple devices driver support' CONFIG_BLK_DEV_MD if [ "$CONFIG_BLK_DEV_MD" = "y" ]; then tristate ' Linear (append) mode' CONFIG_MD_LINEAR @@ -146,7 +143,7 @@ if [ "$CONFIG_SCSI" != "n" ]; then mainmenu_option next_comment comment 'SCSI low-level drivers' - bool 'Sparc ESP Scsi Driver' CONFIG_SCSI_SUNESP $CONFIG_SCSI + tristate 'Sparc ESP Scsi Driver' CONFIG_SCSI_SUNESP $CONFIG_SCSI tristate 'PTI Qlogic,ISP Driver' CONFIG_SCSI_QLOGICPTI $CONFIG_SCSI endmenu fi @@ -173,9 +170,7 @@ if [ "$CONFIG_NET" = "y" ]; then fi tristate ' Sun LANCE support' CONFIG_SUNLANCE tristate ' Sun Happy Meal 10/100baseT support' CONFIG_HAPPYMEAL - if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then - tristate ' Sun BigMAC 10/100baseT support (EXPERIMENTAL)' CONFIG_SUNBMAC - fi + tristate ' Sun BigMAC 10/100baseT support (EXPERIMENTAL)' CONFIG_SUNBMAC tristate ' Sun QuadEthernet support' CONFIG_SUNQE tristate ' MyriCOM Gigabit Ethernet support' CONFIG_MYRI_SBUS # bool ' FDDI driver support' CONFIG_FDDI diff --git a/arch/sparc/defconfig b/arch/sparc/defconfig index 1c14e11ff..b02a836b6 100644 --- a/arch/sparc/defconfig +++ b/arch/sparc/defconfig @@ -37,6 +37,7 @@ CONFIG_FB_BWTWO=y CONFIG_FB_CGTHREE=y CONFIG_FB_TCX=y CONFIG_FB_CGFOURTEEN=y +# CONFIG_FB_P9100 is not set CONFIG_FB_LEO=y # CONFIG_FB_VIRTUAL is not set # CONFIG_FBCON_ADVANCED is not set @@ -47,7 +48,7 @@ CONFIG_FONT_SUN8x16=y # CONFIG_FBCON_FONTS is not set CONFIG_SBUS=y CONFIG_SBUSCHAR=y -CONFIG_MOUSE=y +CONFIG_BUSMOUSE=y CONFIG_SUN_MOUSE=y CONFIG_SERIAL=y CONFIG_SUN_SERIAL=y @@ -65,6 +66,7 @@ CONFIG_SUN_MOSTEK_RTC=y # CONFIG_SUN_BPP is not set # CONFIG_SUN_VIDEOPIX is not set CONFIG_SUN_AURORA=m +# CONFIG_TADPOLE_TS102_UCTRL is not set # # Linux/SPARC audio subsystem (EXPERIMENTAL) @@ -84,6 +86,9 @@ CONFIG_KCORE_ELF=y CONFIG_BINFMT_AOUT=y CONFIG_BINFMT_ELF=y CONFIG_BINFMT_MISC=m +CONFIG_SUNOS_EMUL=y +# CONFIG_PARPORT is not set +# CONFIG_PRINTER is not set # # Floppy, IDE, and other block devices @@ -103,8 +108,9 @@ CONFIG_BLK_DEV_NBD=m # Networking options # CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set # CONFIG_NETLINK is not set -# CONFIG_FIREWALL is not set +# CONFIG_NETFILTER is not set # CONFIG_FILTER is not set CONFIG_UNIX=y CONFIG_INET=y @@ -120,10 +126,11 @@ CONFIG_INET=y # # (it is safe to leave these untouched) # -CONFIG_INET_RARP=m CONFIG_SKB_LARGE=y CONFIG_IPV6=m # CONFIG_IPV6_EUI64 is not set +# CONFIG_KHTTPD is not set +# CONFIG_ATM is not set # # @@ -144,7 +151,6 @@ CONFIG_DECNET_RAW=y # CONFIG_WAN_ROUTER is not set # CONFIG_NET_FASTROUTE is not set # CONFIG_NET_HW_FLOWCONTROL is not set -# CONFIG_CPU_IS_SLOW is not set # # QoS and/or fair queueing @@ -240,6 +246,7 @@ CONFIG_VFAT_FS=m CONFIG_EFS_FS=m CONFIG_ISO9660_FS=m # CONFIG_JOLIET is not set +# CONFIG_UDF_FS is not set CONFIG_MINIX_FS=m # CONFIG_NTFS_FS is not set CONFIG_HPFS_FS=m @@ -275,13 +282,13 @@ CONFIG_NCP_FS=m # # Partition Types # +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y CONFIG_BSD_DISKLABEL=y -# CONFIG_MAC_PARTITION is not set -CONFIG_SMD_DISKLABEL=y CONFIG_SOLARIS_X86_PARTITION=y -# CONFIG_SGI_DISKLABEL is not set # CONFIG_UNIXWARE_DISKLABEL is not set -CONFIG_AMIGA_PARTITION=y +# CONFIG_SGI_PARTITION is not set +CONFIG_SUN_PARTITION=y CONFIG_NLS=y # @@ -312,6 +319,7 @@ CONFIG_NLS=y # CONFIG_NLS_ISO8859_7 is not set # CONFIG_NLS_ISO8859_8 is not set # CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_14 is not set # CONFIG_NLS_ISO8859_15 is not set # CONFIG_NLS_KOI8_R is not set diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 2e2fdef2c..0d93f3a2b 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -1,4 +1,4 @@ -# $Id: Makefile,v 1.50 1999/08/31 13:26:13 anton Exp $ +# $Id: Makefile,v 1.52 1999/12/21 04:02:17 davem Exp $ # Makefile for the linux kernel. # # Note! Dependencies are done automagically by 'make dep', which also @@ -19,9 +19,9 @@ O_TARGET := kernel.o IRQ_OBJS := irq.o sun4m_irq.o sun4c_irq.o sun4d_irq.o O_OBJS := entry.o wof.o wuf.o etrap.o rtrap.o traps.o ${IRQ_OBJS} \ process.o signal.o ioport.o setup.o idprom.o \ - sys_sparc.o sunos_asm.o sparc-stub.o systbls.o sys_sunos.o \ - sunos_ioctl.o time.o windows.o cpu.o devices.o \ - sclow.o solaris.o tadpole.o tick14.o ptrace.o sys_solaris.o \ + sys_sparc.o sunos_asm.o sparc-stub.o systbls.o \ + time.o windows.o cpu.o devices.o sclow.o solaris.o \ + tadpole.o tick14.o ptrace.o sys_solaris.o \ unaligned.o muldiv.o pcic.o semaphore.o OX_OBJS := sparc_ksyms.o @@ -30,6 +30,10 @@ ifdef CONFIG_SUN4 O_OBJS += sun4setup.o endif +ifdef CONFIG_SUNOS_EMUL +O_OBJS += sys_sunos.o sunos_ioctl.o +endif + ifdef CONFIG_SMP O_OBJS += trampoline.o smp.o sun4m_smp.o sun4d_smp.o endif @@ -55,11 +59,13 @@ check_asm: dummy @echo "#ifndef CONFIG_SMP" >> asm_offsets.h @echo "" >> asm_offsets.h @echo "#include <linux/config.h>" > tmp.c + @echo "#undef __SMP__" >> tmp.c @echo "#undef CONFIG_SMP" >> tmp.c @echo "#include <linux/sched.h>" >> tmp.c - $(CC) -E tmp.c -o tmp.i + $(CC) $(CPPFLAGS) -E tmp.c -o tmp.i @echo "/* Automatically generated. Do not edit. */" > check_asm.c @echo "#include <linux/config.h>" >> check_asm.c + @echo "#undef __SMP__" >> check_asm.c @echo "#undef CONFIG_SMP" >> check_asm.c @echo "#include <linux/sched.h>" >> check_asm.c @echo 'struct task_struct _task;' >> check_asm.c @@ -71,7 +77,7 @@ check_asm: dummy $(SH) ./check_asm.sh thread tmp.i check_asm.c @echo 'return 0; }' >> check_asm.c @rm -f tmp.[ci] - $(CC) -o check_asm check_asm.c + $(CC) $(CFLAGS) -o check_asm check_asm.c ./check_asm >> asm_offsets.h @rm -f check_asm check_asm.c @echo "" >> asm_offsets.h @@ -81,7 +87,7 @@ check_asm: dummy @echo "#undef CONFIG_SMP" >> tmp.c @echo "#define CONFIG_SMP 1" >> tmp.c @echo "#include <linux/sched.h>" >> tmp.c - $(CC) -D__SMP__ -E tmp.c -o tmp.i + $(CC) $(CPPFLAGS) -D__SMP__ -E tmp.c -o tmp.i @echo "/* Automatically generated. Do not edit. */" > check_asm.c @echo "#include <linux/config.h>" >> check_asm.c @echo "#undef CONFIG_SMP" >> check_asm.c @@ -96,7 +102,7 @@ check_asm: dummy $(SH) ./check_asm.sh thread tmp.i check_asm.c @echo 'return 0; }' >> check_asm.c @rm -f tmp.[ci] - $(CC) -D__SMP__ -o check_asm check_asm.c + $(CC) $(CFLAGS) -D__SMP__ -o check_asm check_asm.c ./check_asm >> asm_offsets.h @rm -f check_asm check_asm.c @echo "" >> asm_offsets.h diff --git a/arch/sparc/kernel/auxio.c b/arch/sparc/kernel/auxio.c index 099c14eca..fe34b0900 100644 --- a/arch/sparc/kernel/auxio.c +++ b/arch/sparc/kernel/auxio.c @@ -9,6 +9,7 @@ #include <asm/oplib.h> #include <asm/io.h> #include <asm/auxio.h> +#include <asm/string.h> /* memset(), Linux has no bzero() */ /* Probe and map in the Auxiliary I/O register */ unsigned char *auxio_register; @@ -17,6 +18,7 @@ void __init auxio_probe(void) { int node, auxio_nd; struct linux_prom_registers auxregs[1]; + struct resource r; switch (sparc_cpu_model) { case sun4d: @@ -51,10 +53,11 @@ void __init auxio_probe(void) prom_getproperty(auxio_nd, "reg", (char *) auxregs, sizeof(auxregs)); prom_apply_obio_ranges(auxregs, 0x1); /* Map the register both read and write */ - auxio_register = (unsigned char *) sparc_alloc_io(auxregs[0].phys_addr, 0, - auxregs[0].reg_size, - "auxiliaryIO", - auxregs[0].which_io, 0x0); + r.flags = auxregs[0].which_io & 0xF; + r.start = auxregs[0].phys_addr; + r.end = auxregs[0].phys_addr + auxregs[0].reg_size - 1; + auxio_register = (unsigned char *) sbus_ioremap(&r, 0, + auxregs[0].reg_size, "auxio"); /* Fix the address on sun4m and sun4c. */ if((((unsigned long) auxregs[0].phys_addr) & 3) == 3 || sparc_cpu_model == sun4c) @@ -72,6 +75,7 @@ void __init auxio_power_probe(void) { struct linux_prom_registers regs; int node; + struct resource r; /* Attempt to find the sun4m power control node. */ node = prom_getchild(prom_root_node); @@ -84,9 +88,12 @@ void __init auxio_power_probe(void) /* Map the power control register. */ prom_getproperty(node, "reg", (char *)®s, sizeof(regs)); prom_apply_obio_ranges(®s, 1); - auxio_power_register = (volatile unsigned char *) - sparc_alloc_io(regs.phys_addr, 0, regs.reg_size, - "power off control", regs.which_io, 0); + memset(&r, 0, sizeof(r)); + r.flags = regs.which_io & 0xF; + r.start = regs.phys_addr; + r.end = regs.phys_addr + regs.reg_size - 1; + auxio_power_register = (unsigned char *) sbus_ioremap(&r, 0, + regs.reg_size, "auxpower"); /* Display a quick message on the console. */ if (auxio_power_register) diff --git a/arch/sparc/kernel/ebus.c b/arch/sparc/kernel/ebus.c index d0a9c622a..c1ef01be3 100644 --- a/arch/sparc/kernel/ebus.c +++ b/arch/sparc/kernel/ebus.c @@ -1,4 +1,4 @@ -/* $Id: ebus.c,v 1.4 1999/08/31 06:54:19 davem Exp $ +/* $Id: ebus.c,v 1.8 1999/11/27 22:40:38 zaitcev Exp $ * ebus.c: PCI to EBus bridge device. * * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) @@ -23,9 +23,8 @@ #include <asm/bpp.h> #undef PROM_DEBUG -#undef DEBUG_FILL_EBUS_DEV -#ifdef PROM_DEBUG +#if 0 /* separate from PROM_DEBUG for the sake of PROLL */ #define dprintk prom_printf #else #define dprintk printk @@ -79,7 +78,7 @@ void __init fill_ebus_child(int node, struct linux_prom_registers *preg, dev->prom_name, len, dev->parent->num_addrs); panic(__FUNCTION__); } - dev->base_address[i] = dev->parent->base_address[regs[i]]; + dev->resource[i].start = dev->parent->resource[regs[i]].start; /* XXX resource */ } /* @@ -110,22 +109,8 @@ void __init fill_ebus_child(int node, struct linux_prom_registers *preg, dev->irqs[0] = 0; } else { dev->irqs[0] = pcic_pin_to_irq(irqs[0], dev->prom_name); -/* P3 remove */ printk("EBUS: dev %s irq %d from PROM\n", dev->prom_name, dev->irqs[0]); } } - -#ifdef DEBUG_FILL_EBUS_DEV - dprintk("child '%s': address%s\n", dev->prom_name, - dev->num_addrs > 1 ? "es" : ""); - for (i = 0; i < dev->num_addrs; i++) - dprintk(" %016lx\n", dev->base_address[i]); - if (dev->num_irqs) { - dprintk(" IRQ%s", dev->num_irqs > 1 ? "s" : ""); - for (i = 0; i < dev->num_irqs; i++) - dprintk(" %08x", dev->irqs[i]); - dprintk("\n"); - } -#endif } void __init fill_ebus_device(int node, struct linux_ebus_device *dev) @@ -135,6 +120,7 @@ void __init fill_ebus_device(int node, struct linux_ebus_device *dev) int irqs[PROMINTR_MAX]; char lbuf[128]; int i, n, len; + unsigned long baseaddr; dev->prom_node = node; prom_getstring(node, "name", lbuf, sizeof(lbuf)); @@ -175,28 +161,20 @@ void __init fill_ebus_device(int node, struct linux_ebus_device *dev) ; } - dev->base_address[i] = dev->bus->self->base_address[n]; - dev->base_address[i] += regs[i].phys_addr; - - if (dev->base_address[i]) { - dev->base_address[i] = - (unsigned long)sparc_alloc_io (dev->base_address[i], 0, - regs[i].reg_size, - dev->prom_name, 0, 0); -#if 0 /* - * This release_region() screwes those who do sparc_alloc_io(). - * Change drivers which do check_region(). See drivers/block/floppy.c. + * XXX Now as we have regions, why don't we make an on-demand allocation... */ - /* Some drivers call 'check_region', so we release it */ - release_region(dev->base_address[i] & PAGE_MASK, PAGE_SIZE); -#endif - - if (dev->base_address[i] == 0 ) { - panic("ebus: unable sparc_alloc_io for dev %s", - dev->prom_name); - } + dev->resource[i].start = 0; + if ((baseaddr = dev->bus->self->resource[n].start + + regs[i].phys_addr) != 0) { + /* dev->resource[i].name = dev->prom_name; */ + if ((baseaddr = (unsigned long) ioremap(baseaddr, + regs[i].reg_size)) == 0) { + panic("ebus: unable to remap dev %s", + dev->prom_name); + } } + dev->resource[i].start = baseaddr; /* XXX Unaligned */ } len = prom_getproperty(node, "interrupts", (char *)&irqs, sizeof(irqs)); @@ -216,22 +194,9 @@ void __init fill_ebus_device(int node, struct linux_ebus_device *dev) dev->irqs[0] = 0; } else { dev->irqs[0] = pcic_pin_to_irq(irqs[0], dev->prom_name); -/* P3 remove */ printk("EBUS: child %s irq %d from PROM\n", dev->prom_name, dev->irqs[0]); } } -#ifdef DEBUG_FILL_EBUS_DEV - dprintk("'%s': address%s\n", dev->prom_name, - dev->num_addrs > 1 ? "es" : ""); - for (i = 0; i < dev->num_addrs; i++) - dprintk(" %016lx\n", dev->base_address[i]); - if (dev->num_irqs) { - dprintk(" IRQ%s", dev->num_irqs > 1 ? "s" : ""); - for (i = 0; i < dev->num_irqs; i++) - dprintk(" %08x", dev->irqs[i]); - dprintk("\n"); - } -#endif if ((node = prom_getchild(node))) { dev->children = (struct linux_ebus_child *) ebus_alloc(sizeof(struct linux_ebus_child)); @@ -312,7 +277,7 @@ void __init ebus_init(void) } nreg = len / sizeof(struct linux_prom_pci_registers); - base = &ebus->self->base_address[0]; + base = &ebus->self->resource[0].start; for (reg = 0; reg < nreg; reg++) { if (!(regs[reg].which_io & 0x03000000)) continue; diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S index 0f1d1122e..226e53897 100644 --- a/arch/sparc/kernel/entry.S +++ b/arch/sparc/kernel/entry.S @@ -1,4 +1,4 @@ -/* $Id: entry.S,v 1.161 1999/08/14 03:51:05 anton Exp $ +/* $Id: entry.S,v 1.163 1999/11/19 04:11:24 davem Exp $ * arch/sparc/kernel/entry.S: Sparc trap low-level entry points. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) @@ -826,13 +826,13 @@ linux_trap_nmi_sun4c: .globl C_LABEL(invalid_segment_patch1_ff) .globl C_LABEL(invalid_segment_patch2_ff) C_LABEL(invalid_segment_patch1_ff): cmp %l4, 0xff -C_LABEL(invalid_segment_patch2_ff): mov 0xff, %l4 +C_LABEL(invalid_segment_patch2_ff): mov 0xff, %l3 .align 4 .globl C_LABEL(invalid_segment_patch1_1ff) .globl C_LABEL(invalid_segment_patch2_1ff) C_LABEL(invalid_segment_patch1_1ff): cmp %l4, 0x1ff -C_LABEL(invalid_segment_patch2_1ff): mov 0x1ff, %l4 +C_LABEL(invalid_segment_patch2_1ff): mov 0x1ff, %l3 .align 4 .globl C_LABEL(num_context_patch1_16), C_LABEL(num_context_patch2_16) @@ -853,7 +853,7 @@ C_LABEL(vac_linesize_patch_32): subcc %l7, 32, %l7 #ifdef CONFIG_SUN4 C_LABEL(vac_hwflush_patch1_on): nop #else -C_LABEL(vac_hwflush_patch1_on): subcc %l7, (PAGE_SIZE - 4), %l7 +C_LABEL(vac_hwflush_patch1_on): addcc %l7, -PAGE_SIZE, %l7 #endif C_LABEL(vac_hwflush_patch2_on): sta %g0, [%l3 + %l7] ASI_HWFLUSHSEG @@ -969,12 +969,12 @@ C_LABEL(invalid_segment_patch1): bne 1f sethi %hi(C_LABEL(sun4c_kfree_ring)), %l4 or %l4, %lo(C_LABEL(sun4c_kfree_ring)), %l4 - ld [%l4 + 0x10], %l3 + ld [%l4 + 0x18], %l3 deccc %l3 ! do we have a free entry? bcs,a 2f ! no, unmap one. sethi %hi(C_LABEL(sun4c_kernel_ring)), %l4 - st %l3, [%l4 + 0x10] ! sun4c_kfree_ring.num_entries-- + st %l3, [%l4 + 0x18] ! sun4c_kfree_ring.num_entries-- ld [%l4 + 0x00], %l6 ! entry = sun4c_kfree_ring.ringhd.next st %l5, [%l6 + 0x08] ! entry->vaddr = address @@ -997,10 +997,10 @@ C_LABEL(invalid_segment_patch1): st %l6, [%l4 + 0x00] ! head->next = entry - ld [%l4 + 0x10], %l3 + ld [%l4 + 0x18], %l3 inc %l3 ! sun4c_kernel_ring.num_entries++ b 4f - st %l3, [%l4 + 0x10] + ld [%l6 + 0x08], %l5 2: or %l4, %lo(C_LABEL(sun4c_kernel_ring)), %l4 @@ -1020,7 +1020,7 @@ C_LABEL(invalid_segment_patch1): C_LABEL(vac_hwflush_patch1): C_LABEL(vac_linesize_patch): subcc %l7, 16, %l7 - bg 9b + bne 9b C_LABEL(vac_hwflush_patch2): sta %g0, [%l3 + %l7] ASI_FLUSHSEG @@ -1041,47 +1041,36 @@ C_LABEL(vac_hwflush_patch2): mov %l3, %l5 ! address = tmp +4: C_LABEL(num_context_patch1): mov 0x08, %l7 -C_LABEL(invalid_segment_patch2): - mov 0x7f, %l4 + ld [%l6 + 0x08], %l4 + ldub [%l6 + 0x0c], %l3 + or %l4, %l3, %l4 ! encode new vaddr/pseg into l4 sethi %hi(AC_CONTEXT), %l3 lduba [%l3] ASI_CONTROL, %l6 -3: - deccc %l7 - stba %l7, [%l3] ASI_CONTROL - bne 3b - stXa %l4, [%l5] ASI_SEGMAP - - stba %l6, [%l3] ASI_CONTROL - - ! reload the entry - - sethi %hi(C_LABEL(sun4c_kernel_ring)), %l4 - ld [%l4 + %lo(C_LABEL(sun4c_kernel_ring))], %l6 - - ld [%l6 + 0x08], %l5 ! restore address from entry->vaddr - -4: -C_LABEL(num_context_patch2): - mov 0x08, %l7 - - ldub [%l6 + 0x0c], %l4 ! entry->pseg - + /* Invalidate old mapping, instantiate new mapping, + * for each context. Registers l6/l7 are live across + * this loop. + */ +3: deccc %l7 sethi %hi(AC_CONTEXT), %l3 - lduba [%l3] ASI_CONTROL, %l6 - -3: - deccc %l7 stba %l7, [%l3] ASI_CONTROL +C_LABEL(invalid_segment_patch2): + mov 0x7f, %l3 + stXa %l3, [%l5] ASI_SEGMAP + andn %l4, 0x1ff, %l3 bne 3b - stXa %l4, [%l5] ASI_SEGMAP + stXa %l4, [%l3] ASI_SEGMAP + sethi %hi(AC_CONTEXT), %l3 stba %l6, [%l3] ASI_CONTROL + andn %l4, 0x1ff, %l5 + 1: sethi %hi(SUN4C_VMALLOC_START), %l4 cmp %l5, %l4 @@ -1149,6 +1138,7 @@ C_LABEL(num_context_patch2): sun4c_fault_fromuser: SAVE_ALL + nop mov %l7, %o1 ! Decode the info from %l7 mov %l7, %o2 @@ -1199,6 +1189,7 @@ C_LABEL(srmmu_fault): RESTORE_ALL +#ifdef CONFIG_SUNOS_EMUL /* SunOS uses syscall zero as the 'indirect syscall' it looks * like indir_syscall(scall_num, arg0, arg1, arg2...); etc. * This is complete brain damage. @@ -1226,6 +1217,7 @@ C_LABEL(sunos_indir): mov %o5, %o4 call %l6 mov %l4, %o7 +#endif .align 4 .globl C_LABEL(sys_nis_syscall) @@ -1639,6 +1631,20 @@ solaris_syscall: b ret_trap_entry st %l1, [%sp + REGWIN_SZ + PT_NPC] +#ifndef CONFIG_SUNOS_EMUL + .align 4 + .globl sunos_syscall +sunos_syscall: + SAVE_ALL_HEAD + rd %wim, %l3 + wr %l0, PSR_ET, %psr + nop + nop + mov %i0, %l5 + call C_LABEL(do_sunos_syscall) + add %sp, REGWIN_SZ, %o0 +#endif + /* {net, open}bsd system calls enter here... */ .align 4 .globl bsd_syscall diff --git a/arch/sparc/kernel/head.S b/arch/sparc/kernel/head.S index fbb2e680d..eb3d007f0 100644 --- a/arch/sparc/kernel/head.S +++ b/arch/sparc/kernel/head.S @@ -1,4 +1,4 @@ -/* $Id: head.S,v 1.97 1999/08/14 03:51:10 anton Exp $ +/* $Id: head.S,v 1.101 1999/12/02 08:34:56 jj Exp $ * head.S: The initial boot code for the Sparc port of Linux. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) @@ -78,6 +78,11 @@ sun4e_notsup: .asciz "Sparc-Linux sun4e support does not exist\n\n" .align 4 +#ifndef CONFIG_SUNOS_EMUL +#undef SUNOS_SYSCALL_TRAP +#define SUNOS_SYSCALL_TRAP SUNOS_NO_SYSCALL_TRAP +#endif + /* The Sparc trap table, bootloader gives us control at _start. */ .text .globl start, _stext, _start, __stext @@ -171,7 +176,8 @@ t_bad96:BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) BAD_TRAP(0x9 t_bad9b:BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) BAD_TRAP(0x9f) t_getcc:GETCC_TRAP /* Get Condition Codes */ t_setcc:SETCC_TRAP /* Set Condition Codes */ -t_bada2:BAD_TRAP(0xa2) BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6) +t_getpsr:GETPSR_TRAP /* Get PSR Register */ +t_bada3:BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6) t_slowi:INDIRECT_SOLARIS_SYSCALL(156) t_bada8:BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab) t_badac:BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0) @@ -247,8 +253,8 @@ C_LABEL(trapbase_cpu1): LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) - BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP - BAD_TRAP(0xa2) BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6) + BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP GETPSR_TRAP + BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6) INDIRECT_SOLARIS_SYSCALL(156) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab) BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0) BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5) @@ -315,8 +321,8 @@ C_LABEL(trapbase_cpu2): LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) - BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP - BAD_TRAP(0xa2) BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6) + BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP GETPSR_TRAP + BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6) INDIRECT_SOLARIS_SYSCALL(156) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab) BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0) BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5) @@ -383,8 +389,8 @@ C_LABEL(trapbase_cpu3): LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) - BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP - BAD_TRAP(0xa2) BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6) + BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP GETPSR_TRAP + BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6) INDIRECT_SOLARIS_SYSCALL(156) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab) BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0) BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5) @@ -436,7 +442,7 @@ C_LABEL(empty_zero_page): .skip PAGE_SIZE */ .ascii "HdrS" .word LINUX_VERSION_CODE - .half 0x0201 /* HdrS version */ + .half 0x0203 /* HdrS version */ C_LABEL(root_flags): .half 1 C_LABEL(root_dev): @@ -448,6 +454,8 @@ C_LABEL(sparc_ramdisk_image): C_LABEL(sparc_ramdisk_size): .word 0 .word C_LABEL(reboot_command) + .word 0, 0, 0 + .word _end /* Cool, here we go. Pick up the romvec pointer in %o0 and stash it in * %g7 and at prom_vector_p. And also quickly check whether we are on diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 74706e79a..e2b6b1ae4 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -1,139 +1,539 @@ -/* $Id: ioport.c,v 1.24 1997/04/10 03:02:32 davem Exp $ +/* $Id: ioport.c,v 1.28 1999/12/27 06:08:28 anton Exp $ * ioport.c: Simple io mapping allocator. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx) * - * The routines in this file should be changed for a memory allocator - * that would be setup just like NetBSD does : you create regions that - * are administered by a general purpose allocator, and then you call - * that allocator with your handle and the block size instead of this - * weak stuff. + * 1996: sparc_free_io, 1999: ioremap()/iounmap() by Pete Zaitcev. */ +#include <linux/config.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/ioport.h> #include <linux/mm.h> +#include <linux/malloc.h> #include <asm/io.h> #include <asm/vaddrs.h> #include <asm/oplib.h> #include <asm/page.h> +#include <asm/pgalloc.h> #include <asm/pgtable.h> -/* This points to the next to use virtual memory for io mappings */ -static unsigned long dvma_next_free = DVMA_VADDR; -unsigned long sparc_iobase_vaddr = IOBASE_VADDR; +struct resource *sparc_find_resource_bystart(struct resource *, unsigned long); +struct resource *sparc_find_resource_by_hit(struct resource *, unsigned long); + +static void *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz); +static void *_sparc_alloc_io(unsigned int busno, unsigned long phys, + unsigned long size, char *name); +static void _sparc_free_io(struct resource *res); + +/* This points to the next to use virtual memory for DVMA mappings */ +static struct resource sparc_dvma = { + "sparc_dvma", DVMA_VADDR, DVMA_VADDR + DVMA_LEN - 1 +}; +/* This points to the start of I/O mappings, cluable from outside. */ + struct resource sparc_iomap = { + "sparc_iomap", IOBASE_VADDR, IOBASE_END-1 +}; /* - * sparc_alloc_io: - * Map and allocates an obio device. - * Implements a simple linear allocator, you can force the function - * to use your own mapping, but in practice this should not be used. - * - * Input: - * address: the obio address to map - * virtual: if non zero, specifies a fixed virtual address where - * the mapping should take place. - * len: the length of the mapping - * bus_type: The bus on which this io area sits. + * Our mini-allocator... + * Boy this is gross! We need it because we must map I/O for + * timers and interrupt controller before the kmalloc is available. + */ + +#define XNMLN 15 +#define XNRES 10 /* SS-10 uses 8 */ + +struct xresource { + struct resource xres; /* Must be first */ + int xflag; /* 1 == used */ + char xname[XNMLN+1]; +}; + +static struct xresource xresv[XNRES]; + +static struct xresource *xres_alloc(void) { + struct xresource *xrp; + int n; + + xrp = xresv; + for (n = 0; n < XNRES; n++) { + if (xrp->xflag == 0) { + xrp->xflag = 1; + return xrp; + } + xrp++; + } + return NULL; +} + +static void xres_free(struct xresource *xrp) { + xrp->xflag = 0; +} + +/* + */ +extern void sun4c_mapioaddr(unsigned long, unsigned long, int bus_type, int rdonly); +extern void srmmu_mapioaddr(unsigned long, unsigned long, int bus_type, int rdonly); + +static void mapioaddr(unsigned long physaddr, unsigned long virt_addr, + int bus, int rdonly) +{ + switch(sparc_cpu_model) { + case sun4c: + case sun4: + sun4c_mapioaddr(physaddr, virt_addr, bus, rdonly); + break; + case sun4m: + case sun4d: + case sun4e: + srmmu_mapioaddr(physaddr, virt_addr, bus, rdonly); + break; + default: + printk("mapioaddr: Trying to map IO space for unsupported machine.\n"); + printk("mapioaddr: sparc_cpu_model = %d\n", sparc_cpu_model); + printk("mapioaddr: Halting...\n"); + halt(); + }; + return; +} + +extern void srmmu_unmapioaddr(unsigned long virt); +extern void sun4c_unmapioaddr(unsigned long virt); + +static void unmapioaddr(unsigned long virt_addr) +{ + switch(sparc_cpu_model) { + case sun4c: + case sun4: + sun4c_unmapioaddr(virt_addr); + break; + case sun4m: + case sun4d: + case sun4e: + srmmu_unmapioaddr(virt_addr); + break; + default: + printk("unmapioaddr: sparc_cpu_model = %d, halt...\n", sparc_cpu_model); + halt(); + }; + return; +} + +/* + * These are typically used in PCI drivers + * which are trying to be cross-platform. * - * Returns: - * The virtual address where the mapping actually took place. + * Bus type is always zero on IIep. */ +void *ioremap(unsigned long offset, unsigned long size) +{ + char name[14]; + + sprintf(name, "phys_%08x", (u32)offset); + return _sparc_alloc_io(0, offset, size, name); +} -void *sparc_alloc_io (u32 address, void *virtual, int len, char *name, - u32 bus_type, int rdonly) +/* + * Comlimentary to ioremap(). + */ +void iounmap(void *virtual) { - unsigned long vaddr, base_address; - unsigned long addr = (unsigned long) address; - unsigned long offset = (addr & (~PAGE_MASK)); + unsigned long vaddr = (unsigned long) virtual & PAGE_MASK; + struct resource *res; - if (virtual) { - vaddr = (unsigned long) virtual; + if ((res = sparc_find_resource_bystart(&sparc_iomap, vaddr)) == NULL) { + printk("free_io/iounmap: cannot free %lx\n", vaddr); + return; + } + _sparc_free_io(res); - len += offset; - if(((unsigned long) virtual + len) > (IOBASE_VADDR + IOBASE_LEN)) { - prom_printf("alloc_io: Mapping outside IOBASE area\n"); - prom_halt(); - } - if(check_region ((vaddr | offset), len)) { - prom_printf("alloc_io: 0x%lx is already in use\n", vaddr); - prom_halt(); - } + if ((char *)res >= (char*)xresv && (char *)res < (char *)&xresv[XNRES]) { + xres_free((struct xresource *)res); + } else { + kfree(res); + } +} + +/* + * Davem's version of sbus_ioremap. + */ +unsigned long sbus_ioremap(struct resource *phyres, unsigned long offset, + unsigned long size, char *name) +{ + return (unsigned long) _sparc_alloc_io(phyres->flags & 0xF, + phyres->start + offset, size, name); +} - /* Tell Linux resource manager about the mapping */ - request_region ((vaddr | offset), len, name); +/* + */ +void sbus_iounmap(unsigned long addr, unsigned long size) +{ + iounmap((void *)addr); +} + +/* + * Meat of mapping + */ +static void *_sparc_alloc_io(unsigned int busno, unsigned long phys, + unsigned long size, char *name) +{ + static int printed_full = 0; + struct xresource *xres; + struct resource *res; + char *tack; + int tlen; + void *va; /* P3 diag */ + + if (name == NULL) name = "???"; + + if ((xres = xres_alloc()) != 0) { + tack = xres->xname; + res = &xres->xres; } else { - vaddr = occupy_region(sparc_iobase_vaddr, IOBASE_END, - (offset + len + PAGE_SIZE-1) & PAGE_MASK, PAGE_SIZE, name); - if (vaddr == 0) { - /* Usually we cannot see printks in this case. */ - prom_printf("alloc_io: cannot occupy %d region\n", len); - prom_halt(); + if (!printed_full) { + printk("ioremap: done with statics, switching to malloc\n"); + printed_full = 1; } + tlen = strlen(name); + tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL); + if (tack == NULL) return NULL; + res = (struct resource *) tack; + tack += sizeof (struct resource); } - base_address = vaddr; - /* Do the actual mapping */ - for (; len > 0; len -= PAGE_SIZE) { - mapioaddr(addr, vaddr, bus_type, rdonly); - vaddr += PAGE_SIZE; - addr += PAGE_SIZE; - } + strncpy(tack, name, XNMLN); + tack[XNMLN] = 0; + res->name = tack; - return (void *) (base_address | offset); + va = _sparc_ioremap(res, busno, phys, size); + /* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */ + return va; } -void sparc_free_io (void *virtual, int len) +/* + * This is called from _sparc_alloc_io only, we left it separate + * in case Davem changes his mind about interface to sbus_ioremap(). + */ +static void * +_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz) { - unsigned long vaddr = (unsigned long) virtual & PAGE_MASK; - unsigned long plen = (((unsigned long)virtual & ~PAGE_MASK) + len + PAGE_SIZE-1) & PAGE_MASK; + unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK); + unsigned long va; + unsigned int psz; + + if (allocate_resource(&sparc_iomap, res, + (offset + sz + PAGE_SIZE-1) & PAGE_MASK, + sparc_iomap.start, sparc_iomap.end, PAGE_SIZE, NULL, NULL) != 0) { + /* Usually we cannot see printks in this case. */ + prom_printf("alloc_io_res(%s): cannot occupy\n", + (res->name != NULL)? res->name: "???"); + prom_halt(); + } + + va = res->start; + pa &= PAGE_MASK; + for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) { + mapioaddr(pa, va, bus, 0); + va += PAGE_SIZE; + pa += PAGE_SIZE; + } + + /* + * XXX Playing with implementation details here. + * On sparc64 Ebus has resources with precise boundaries. + * We share drivers with sparc64. Too clever drivers use + * start of a resource instead of a base adress. + * + * XXX-2 This may be not valid anymore, clean when + * interface to sbus_ioremap() is resolved. + */ + res->start += offset; + res->end = res->start + sz - 1; /* not strictly necessary.. */ + + return (void *) res->start; +} - release_region(vaddr, plen); +/* + * Comlimentary to _sparc_ioremap(). + */ +static void _sparc_free_io(struct resource *res) +{ + unsigned long plen; - for (; plen != 0;) { + plen = res->end - res->start + 1; + while (plen != 0) { plen -= PAGE_SIZE; - unmapioaddr(vaddr + plen); + unmapioaddr(res->start + plen); + } + + release_resource(res); +} + +#ifdef CONFIG_SBUS + +void sbus_set_sbus64(struct sbus_dev *sdev, int x) { + printk("sbus_set_sbus64: unsupported\n"); +} + +/* + * Allocate a chunk of memory suitable for DMA. + * Typically devices use them for control blocks. + * CPU may access them without any explicit flushing. + */ +void *sbus_alloc_consistant(struct sbus_dev *sdev, long len, u32 *dma_addrp) +{ + unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; + unsigned long va; + struct resource *res; + int order; + + /* XXX why are some lenghts signed, others unsigned? */ + if (len <= 0) { + return NULL; + } + /* XXX So what is maxphys for us and how do drivers know it? */ + if (len > 256*1024) { /* __get_free_pages() limit */ + return NULL; } + + for (order = 0; order < 6; order++) /* 2^6 pages == 256K */ + if ((1 << (order + PAGE_SHIFT)) >= len_total) + break; + va = __get_free_pages(GFP_KERNEL, order); + if (va == 0) { + /* + * printk here may be flooding... Consider removal XXX. + */ + printk("sbus_alloc_consistant: no %ld pages\n", len_total>>PAGE_SHIFT); + return NULL; + } + + if ((res = kmalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { + free_pages(va, order); + printk("sbus_alloc_consistant: no core\n"); + return NULL; + } + + if (allocate_resource(&sparc_dvma, res, len_total, + sparc_dvma.start, sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { + printk("sbus_alloc_consistant: cannot occupy 0x%lx", len); + free_pages(va, order); + kfree(res); + return NULL; + } + + *dma_addrp = res->start; + mmu_map_dma_area(va, res->start, len); + + /* + * "Official" or "natural" address of pages we got is va. + * We want to return uncached range. We could make va[len] + * uncached but it's difficult to make cached back [P3: hmm] + * We use the artefact of sun4c, replicated everywhere else, + * that CPU can use bus addresses to access the same memory. + */ + res->name = (void *)va; /* XXX Ouch.. we got to hide it somewhere */ + return (void *)res->start; } -/* Does DVMA allocations with PAGE_SIZE granularity. How this basically - * works is that the ESP chip can do DVMA transfers at ANY address with - * certain size and boundary restrictions. But other devices that are - * attached to it and would like to do DVMA have to set things up in - * a special way, if the DVMA sees a device attached to it transfer data - * at addresses above DVMA_VADDR it will grab them, this way it does not - * now have to know the peculiarities of where to read the Lance data - * from. (for example) +void sbus_free_consistant(struct sbus_dev *sdev, long n, void *p, u32 ba) +{ + struct resource *res; + unsigned long pgp; + int order; + + if ((res = sparc_find_resource_bystart(&sparc_dvma, + (unsigned long)p)) == NULL) { + printk("sbus_free_consistant: cannot free %p\n", p); + return; + } + + if (((unsigned long)p & (PAGE_MASK-1)) != 0) { + printk("sbus_free_consistant: unaligned va %p\n", p); + return; + } + + n = (n + PAGE_SIZE-1) & PAGE_MASK; + if ((res->end-res->start)+1 != n) { + printk("sbus_free_consistant: region 0x%lx asked 0x%lx\n", + (long)((res->end-res->start)+1), n); + return; + } + + mmu_inval_dma_area((unsigned long)res->name, n); /* XXX Ouch */ + mmu_unmap_dma_area(ba, n); + release_resource(res); + + pgp = (unsigned long) res->name; /* XXX Ouch */ + for (order = 0; order < 6; order++) + if ((1 << (order + PAGE_SHIFT)) >= n) + break; + free_pages(pgp, order); + + kfree(res); +} + +/* + * Map a chunk of memory so that devices can see it. + * CPU view of this memory may be inconsistent with + * a device view and explicit flushing is necessary. */ -void *_sparc_dvma_malloc (int len, char *name) +u32 sbus_map_single(struct sbus_dev *sdev, void *va, long len) { - unsigned long vaddr, base_address; +#if 0 /* This is the version that abuses consistant space */ + unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; + struct resource *res; - vaddr = dvma_next_free; - if(check_region (vaddr, len)) { - prom_printf("alloc_dma: 0x%lx is already in use\n", vaddr); - prom_halt(); + /* XXX why are some lenghts signed, others unsigned? */ + if (len <= 0) { + return 0; } - if(vaddr + len > (DVMA_VADDR + DVMA_LEN)) { - prom_printf("alloc_dvma: out of dvma memory\n"); - prom_halt(); + /* XXX So what is maxphys for us and how do drivers know it? */ + if (len > 256*1024) { /* __get_free_pages() limit */ + return 0; + } + + if ((res = kmalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { + printk("sbus_map_single: no core\n"); + return 0; + } + res->name = va; + + if (allocate_resource(&sparc_dvma, res, len_total, + sparc_dvma.start, sparc_dvma.end, PAGE_SIZE) != 0) { + printk("sbus_map_single: cannot occupy 0x%lx", len); + kfree(res); + return 0; + } + + mmu_map_dma_area(va, res->start, len_total); + mmu_flush_dma_area((unsigned long)va, len_total); /* in all contexts? */ + + return res->start; +#endif +#if 1 /* "trampoline" version */ + /* XXX why are some lenghts signed, others unsigned? */ + if (len <= 0) { + return 0; + } + /* XXX So what is maxphys for us and how do drivers know it? */ + if (len > 256*1024) { /* __get_free_pages() limit */ + return 0; + } +/* BTFIXUPDEF_CALL(__u32, mmu_get_scsi_one, char *, unsigned long, struct sbus_bus *sbus) */ + return mmu_get_scsi_one(va, len, sdev->bus); +#endif +} + +void sbus_unmap_single(struct sbus_dev *sdev, u32 ba, long n) +{ +#if 0 /* This is the version that abuses consistant space */ + struct resource *res; + unsigned long va; + + if ((res = sparc_find_resource_bystart(&sparc_dvma, ba)) == NULL) { + printk("sbus_unmap_single: cannot find %08x\n", (unsigned)ba); + return; } - /* Basically these can be mapped just like any old - * IO pages, cacheable bit off, etc. The physical - * pages are now mapped dynamically to save space. + n = (n + PAGE_SIZE-1) & PAGE_MASK; + if ((res->end-res->start)+1 != n) { + printk("sbus_unmap_single: region 0x%lx asked 0x%lx\n", + (long)((res->end-res->start)+1), n); + return; + } + + va = (unsigned long) res->name; /* XXX Ouch */ + mmu_inval_dma_area(va, n); /* in all contexts, mm's?... */ + mmu_unmap_dma_area(ba, n); /* iounit cache flush is here */ + release_resource(res); + kfree(res); +#endif +#if 1 /* "trampoline" version */ +/* BTFIXUPDEF_CALL(void, mmu_release_scsi_one, __u32, unsigned long, struct sbus_bus *sbus) */ + mmu_release_scsi_one(ba, n, sdev->bus); +#endif +} + +int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n) +{ +/* BTFIXUPDEF_CALL(void, mmu_get_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus) */ + mmu_get_scsi_sgl(sg, n, sdev->bus); + + /* + * XXX sparc64 can return a partial length here. sun4c should do this + * but it currently panics if it can't fulfill the request - Anton */ - base_address = vaddr; - mmu_map_dma_area(base_address, len); - /* Assign the memory area. */ - dvma_next_free = PAGE_ALIGN(dvma_next_free+len); + return n; +} - request_region(base_address, len, name); +void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n) +{ +/* BTFIXUPDEF_CALL(void, mmu_release_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus) */ + mmu_release_scsi_sgl(sg, n, sdev->bus); +} +#endif + +/* + * P3: I think a partial flush is permitted... + * We are not too efficient at doing it though. + * + * If only DaveM understood a concept of an allocation cookie, + * we could avoid find_resource_by_hit() here and a major + * performance hit. + */ +void sbus_dma_sync_single(struct sbus_dev *sdev, u32 ba, long size) +{ + unsigned long va; + struct resource *res; + + res = sparc_find_resource_by_hit(&sparc_dvma, ba); + if (res == NULL) + panic("sbus_dma_sync_single: 0x%x\n", ba); + + va = (unsigned long) res->name; + /* if (va == 0) */ + + mmu_inval_dma_area(va, (res->end - res->start) + 1); +} + +void sbus_dma_sync_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n) +{ + printk("dma_sync_sg: not implemented yet\n"); +} + +/* + * This is a version of find_resource and it belongs to kernel/resource.c. + * Until we have agreement with Linus and Martin, it lingers here. + * + * "same start" is more strict than "hit into" + */ +struct resource * +sparc_find_resource_bystart(struct resource *root, unsigned long start) +{ + struct resource *tmp; + + for (tmp = root->child; tmp != 0; tmp = tmp->sibling) { + if (tmp->start == start) + return tmp; + } + return NULL; +} + +struct resource * +sparc_find_resource_by_hit(struct resource *root, unsigned long hit) +{ + struct resource *tmp; - return (void *) base_address; + for (tmp = root->child; tmp != 0; tmp = tmp->sibling) { + if (tmp->start <= hit && tmp->end >= hit) + return tmp; + } + return NULL; } diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c index fb4e0dab1..c89d04872 100644 --- a/arch/sparc/kernel/irq.c +++ b/arch/sparc/kernel/irq.c @@ -1,4 +1,4 @@ -/* $Id: irq.c,v 1.97 1999/09/10 10:40:21 davem Exp $ +/* $Id: irq.c,v 1.99 1999/12/27 06:08:29 anton Exp $ * arch/sparc/kernel/irq.c: Interrupt request handling routines. On the * Sparc the IRQ's are basically 'cast in stone' * and you are supposed to probe the prom's device @@ -40,6 +40,7 @@ #include <asm/traps.h> #include <asm/irq.h> #include <asm/io.h> +#include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/hardirq.h> #include <asm/softirq.h> @@ -726,7 +727,7 @@ void __init init_IRQ(void) case sun4m: #ifdef CONFIG_PCI pcic_probe(); - if (pci_present()) { + if (pcic_present()) { sun4m_pci_init_IRQ(); break; } diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c index 53bf4623b..94847460b 100644 --- a/arch/sparc/kernel/pcic.c +++ b/arch/sparc/kernel/pcic.c @@ -1,4 +1,4 @@ -/* $Id: pcic.c,v 1.8 1999/08/31 06:54:22 davem Exp $ +/* $Id: pcic.c,v 1.11 1999/11/25 05:22:05 zaitcev Exp $ * pcic.c: Sparc/PCI controller support * * Copyright (C) 1998 V. Roganov and G. Raiko @@ -22,17 +22,6 @@ #include <asm/swift.h> /* for cache flushing. */ #include <asm/io.h> -#undef PROM_DEBUG -#undef FIXUP_REGS_DEBUG -#undef FIXUP_IRQ_DEBUG -#undef FIXUP_VMA_DEBUG - -#ifdef PROM_DEBUG -#define dprintf prom_printf -#else -#define dprintf printk -#endif - #include <linux/ctype.h> #include <linux/pci.h> #include <linux/timex.h> @@ -57,7 +46,7 @@ asmlinkage int sys_pciconfig_read(unsigned long bus, unsigned long len, unsigned char *buf) { - return 0; + return -EINVAL; } asmlinkage int sys_pciconfig_write(unsigned long bus, @@ -66,11 +55,19 @@ asmlinkage int sys_pciconfig_write(unsigned long bus, unsigned long len, unsigned char *buf) { - return 0; + return -EINVAL; } #else +#ifdef CONFIG_SUN_JSFLASH +extern int jsflash_init(void); +#endif + +struct pci_fixup pcibios_fixups[] = { + { 0 } +}; + unsigned int pcic_pin_to_irq(unsigned int pin, char *name); /* @@ -120,7 +117,8 @@ static struct pcic_ca2irq pcic_i_jse[] = { { 0, 0x00, 0, 13, 0 }, /* Ebus - serial and keyboard */ { 0, 0x01, 1, 6, 0 }, /* hme */ { 0, 0x08, 2, 9, 0 }, /* VGA - we hope not used :) */ - { 0, 0x18, 6, 8, 0 }, /* PCI INTA# in Slot 1 */ + { 0, 0x10, 6, 8, 0 }, /* PCI INTA# in Slot 1 */ + { 0, 0x18, 7, 12, 0 }, /* PCI INTA# in Slot 2, shared w. RTC */ { 0, 0x38, 4, 9, 0 }, /* All ISA devices. Read 8259. */ { 0, 0x80, 5, 11, 0 }, /* EIDE */ /* {0,0x88, 0,0,0} - unknown device... PMU? Probably no interrupt. */ @@ -144,6 +142,16 @@ static struct pcic_ca2irq pcic_i_se6[] = { }; /* + * Krups (courtesy of Varol Kaptan) + * No documentation available, so we guess it, based on Espresso layout. + * Since we always run PROLL on Krups we may put map in there. + */ +static struct pcic_ca2irq pcic_i_jk[] = { + { 0, 0x00, 0, 13, 0 }, /* Ebus - serial and keyboard */ + { 0, 0x01, 1, 6, 0 }, /* hme */ +}; + +/* * Several entries in this list may point to the same routing map * as several PROMs may be installed on the same physical board. */ @@ -154,11 +162,17 @@ static struct pcic_sn2list pcic_known_sysnames[] = { SN2L_INIT("JE-1-name", pcic_i_je1), /* XXX Gleb, put name here, pls */ SN2L_INIT("SUNW,JS-E", pcic_i_jse), /* PROLL JavaStation-E */ SN2L_INIT("SUNW,SPARCengine-6", pcic_i_se6), /* SPARCengine-6/CP-1200 */ + SN2L_INIT("SUNW,JS-NC", pcic_i_jk), /* PROLL JavaStation-NC */ + SN2L_INIT("SUNW,JSIIep", pcic_i_jk), /* OBP JavaStation-NC */ { NULL, NULL, 0 } }; -static struct linux_pcic PCIC; -static struct linux_pcic *pcic = NULL; +/* + * Only one PCIC per IIep, + * and since we have no SMP IIep, only one per system. + */ +static int pcic0_up = 0; +static struct linux_pcic pcic0; unsigned int pcic_regs; volatile int pcic_speculative; @@ -167,23 +181,144 @@ volatile int pcic_trapped; static void pci_do_gettimeofday(struct timeval *tv); static void pci_do_settimeofday(struct timeval *tv); -void __init pcic_probe(void) +#define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (((unsigned int)bus) << 16) | (((unsigned int)device_fn) << 8) | (where & ~3)) + +static int pcic_read_config_dword(struct pci_dev *dev, int where, u32 *value); +static int pcic_write_config_dword(struct pci_dev *dev, int where, u32 value); + +static int pcic_read_config_byte(struct pci_dev *dev, int where, u8 *value) +{ + unsigned int v; + + pcic_read_config_dword(dev, where&~3, &v); + *value = 0xff & (v >> (8*(where & 3))); + return PCIBIOS_SUCCESSFUL; +} + +static int pcic_read_config_word(struct pci_dev *dev, int where, u16 *value) +{ + unsigned int v; + if (where&1) return PCIBIOS_BAD_REGISTER_NUMBER; + + pcic_read_config_dword(dev, where&~3, &v); + *value = 0xffff & (v >> (8*(where & 3))); + return PCIBIOS_SUCCESSFUL; +} + +static int pcic_read_config_dword(struct pci_dev *dev, int where, u32 *value) +{ + unsigned char bus = dev->bus->number; + unsigned char device_fn = dev->devfn; + /* unsigned char where; */ + + struct linux_pcic *pcic; + unsigned long flags; + + if (where&3) return PCIBIOS_BAD_REGISTER_NUMBER; + if (bus != 0) return PCIBIOS_DEVICE_NOT_FOUND; + pcic = &pcic0; + + save_and_cli(flags); +#if 0 /* does not fail here */ + pcic_speculative = 1; + pcic_trapped = 0; +#endif + writel(CONFIG_CMD(bus,device_fn,where), pcic->pcic_config_space_addr); +#if 0 /* does not fail here */ + nop(); + if (pcic_trapped) { + restore_flags(flags); + *value = ~0; + return PCIBIOS_SUCCESSFUL; + } +#endif + pcic_speculative = 2; + pcic_trapped = 0; + *value = readl(pcic->pcic_config_space_data + (where&4)); + nop(); + if (pcic_trapped) { + pcic_speculative = 0; + restore_flags(flags); + *value = ~0; + return PCIBIOS_SUCCESSFUL; + } + pcic_speculative = 0; + restore_flags(flags); + return PCIBIOS_SUCCESSFUL; +} + +static int pcic_write_config_byte(struct pci_dev *dev, int where, u8 value) { + unsigned int v; + + pcic_read_config_dword(dev, where&~3, &v); + v = (v & ~(0xff << (8*(where&3)))) | + ((0xff&(unsigned)value) << (8*(where&3))); + return pcic_write_config_dword(dev, where&~3, v); +} + +static int pcic_write_config_word(struct pci_dev *dev, int where, u16 value) +{ + unsigned int v; + + if (where&1) return PCIBIOS_BAD_REGISTER_NUMBER; + pcic_read_config_dword(dev, where&~3, &v); + v = (v & ~(0xffff << (8*(where&3)))) | + ((0xffff&(unsigned)value) << (8*(where&3))); + return pcic_write_config_dword(dev, where&~3, v); +} + +static int pcic_write_config_dword(struct pci_dev *dev, int where, u32 value) +{ + unsigned char bus = dev->bus->number; + unsigned char devfn = dev->devfn; + struct linux_pcic *pcic; + unsigned long flags; + + if (where&3) return PCIBIOS_BAD_REGISTER_NUMBER; + if (bus != 0) return PCIBIOS_DEVICE_NOT_FOUND; + pcic = &pcic0; + + save_and_cli(flags); + writel(CONFIG_CMD(bus,devfn,where), pcic->pcic_config_space_addr); + writel(value, pcic->pcic_config_space_data + (where&4)); + restore_flags(flags); + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops pcic_ops = { + pcic_read_config_byte, + pcic_read_config_word, + pcic_read_config_dword, + pcic_write_config_byte, + pcic_write_config_word, + pcic_write_config_dword, +}; + +/* + * On sparc64 pcibios_init() calls pci_controller_probe(). + * We want PCIC probed little ahead so that interrupt controller + * would be operational. + */ +int __init pcic_probe(void) +{ + struct linux_pcic *pcic; struct linux_prom_registers regs[PROMREG_MAX]; struct linux_pbm_info* pbm; char namebuf[64]; int node; int err; - if (pcibios_present()) { + if (pcic0_up) { prom_printf("PCIC: called twice!\n"); prom_halt(); } + pcic = &pcic0; node = prom_getchild (prom_root_node); node = prom_searchsiblings (node, "pci"); if (node == 0) - return; + return -ENODEV; /* * Map in PCIC register set, config space, and IO base */ @@ -193,31 +328,27 @@ void __init pcic_probe(void) "from PROM.\n"); prom_halt(); } - - pcic = &PCIC; - pcic->pcic_regs = (unsigned long)sparc_alloc_io(regs[0].phys_addr, NULL, - regs[0].reg_size, - "PCIC Registers", 0, 0); + pcic0_up = 1; + + pcic->pcic_res_regs.name = "pcic_registers"; + pcic->pcic_regs = (unsigned long) + ioremap(regs[0].phys_addr, regs[0].reg_size); if (!pcic->pcic_regs) { prom_printf("PCIC: Error, cannot map PCIC registers.\n"); prom_halt(); } - pcic->pcic_io_phys = regs[1].phys_addr; - pcic->pcic_io = (unsigned long)sparc_alloc_io(regs[1].phys_addr, NULL, - regs[1].reg_size, - "PCIC IO Base", 0, 0); - if (pcic->pcic_io == 0UL) { + pcic->pcic_res_io.name = "pcic_io"; + if ((pcic->pcic_io = (unsigned long) + ioremap(regs[1].phys_addr, 0x10000)) == 0) { prom_printf("PCIC: Error, cannot map PCIC IO Base.\n"); prom_halt(); } - pcic->pcic_config_space_addr = - (unsigned long)sparc_alloc_io (regs[2].phys_addr, NULL, - regs[2].reg_size * 2, - "PCI Config Space Address", 0, 0); - if (pcic->pcic_config_space_addr == 0UL) { + pcic->pcic_res_cfg_addr.name = "pcic_cfg_addr"; + if ((pcic->pcic_config_space_addr = (unsigned long) + ioremap(regs[2].phys_addr, regs[2].reg_size * 2)) == 0) { prom_printf("PCIC: Error, cannot map" "PCI Configuration Space Address.\n"); prom_halt(); @@ -227,11 +358,9 @@ void __init pcic_probe(void) * Docs say three least significant bits in address and data * must be the same. Thus, we need adjust size of data. */ - pcic->pcic_config_space_data = - (unsigned long)sparc_alloc_io (regs[3].phys_addr, NULL, - regs[3].reg_size * 2, - "PCI Config Space Data", 0, 0); - if (pcic->pcic_config_space_data == 0UL) { + pcic->pcic_res_cfg_data.name = "pcic_cfg_data"; + if ((pcic->pcic_config_space_data = (unsigned long) + ioremap(regs[3].phys_addr, regs[3].reg_size * 2)) == 0) { prom_printf("PCIC: Error, cannot map" "PCI Configuration Space Data.\n"); prom_halt(); @@ -239,7 +368,7 @@ void __init pcic_probe(void) pbm = &pcic->pbm; pbm->prom_node = node; - prom_getstring(node, "name", namebuf, sizeof(namebuf)); + prom_getstring(node, "name", namebuf, 63); namebuf[63] = 0; strcpy(pbm->prom_name, namebuf); { @@ -254,7 +383,7 @@ void __init pcic_probe(void) pcic_regs = pcic->pcic_regs; } - prom_getstring(prom_root_node, "name", namebuf, sizeof(namebuf)); + prom_getstring(prom_root_node, "name", namebuf, 63); namebuf[63] = 0; { struct pcic_sn2list *p; @@ -272,21 +401,37 @@ void __init pcic_probe(void) printk("PCIC: System %s is unknown, cannot route interrupts\n", namebuf); } + + return 0; } +static void __init pcic_pbm_scan_bus(struct linux_pcic *pcic) +{ + struct linux_pbm_info *pbm = &pcic->pbm; + + pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, &pcic_ops, pbm); +#if 0 /* deadwood transplanted from sparc64 */ + pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node); + pci_record_assignments(pbm, pbm->pci_bus); + pci_assign_unassigned(pbm, pbm->pci_bus); + pci_fixup_irq(pbm, pbm->pci_bus); +#endif +} + +/* + * Main entry point from the PCI subsystem. + */ void __init pcibios_init(void) { + struct linux_pcic *pcic; + /* * PCIC should be initialized at start of the timer. * So, here we report the presence of PCIC and do some magic passes. */ - if(!pcic) + if(!pcic0_up) return; - - printk("PCIC MAP: config addr=0x%lx; config data=0x%lx, " - "regs=0x%lx io=0x%lx\n", - pcic->pcic_config_space_addr, pcic->pcic_config_space_data, - pcic->pcic_regs, pcic->pcic_io); + pcic = &pcic0; /* * Switch off IOTLB translation. @@ -302,11 +447,18 @@ void __init pcibios_init(void) writel(0xF0000000UL, pcic->pcic_regs+PCI_SIZE_0); writel(0+PCI_BASE_ADDRESS_SPACE_MEMORY, pcic->pcic_regs+PCI_BASE_ADDRESS_0); + + pcic_pbm_scan_bus(pcic); + + ebus_init(); +#ifdef CONFIG_SUN_JSFLASH + jsflash_init(); +#endif } -int pcibios_present(void) +int pcic_present(void) { - return pcic != NULL; + return pcic0_up; } static int __init pdev_to_pnode(struct linux_pbm_info *pbm, @@ -334,91 +486,77 @@ static inline struct pcidev_cookie *pci_devcookie_alloc(void) return kmalloc(sizeof(struct pcidev_cookie), GFP_ATOMIC); } -static void pcic_map_pci_device (struct pci_dev *dev, int node) { - struct linux_prom_pci_assigned_addresses addrs[6]; - int addrlen; - int i, j; - - /* Is any valid address present ? */ - i = 0; - for(j = 0; j < 6; j++) - if (dev->base_address[j]) i++; - if (!i) return; /* nothing to do */ +static void pcic_map_pci_device(struct linux_pcic *pcic, + struct pci_dev *dev, int node) +{ + char namebuf[64]; + unsigned long address; + unsigned long flags; + int j; if (node == 0 || node == -1) { - printk("PCIC: no prom node for device ID (%x,%x)\n", - dev->device, dev->vendor); - return; - } - - /* - * find related address and get it's window length - */ - addrlen = prom_getproperty(node,"assigned-addresses", - (char*)addrs, sizeof(addrs)); - if (addrlen == -1) { - printk("PCIC: no \"assigned-addresses\" for device (%x,%x)\n", - dev->device, dev->vendor); - return; + strcpy(namebuf, "???"); + } else { + prom_getstring(node, "name", namebuf, 63); namebuf[63] = 0; } - addrlen /= sizeof(struct linux_prom_pci_assigned_addresses); - for (i = 0; i < addrlen; i++ ) - for (j = 0; j < 6; j++) { - if (!dev->base_address[j] || !addrs[i].phys_lo) - continue; - if (addrs[i].phys_lo == dev->base_address[j]) { - unsigned long address = dev->base_address[j]; - int length = addrs[i].size_lo; - char namebuf[128] = { 0, }; - unsigned long mapaddr, addrflags; - - prom_getstring(node, "name", namebuf, sizeof(namebuf)); - - /* - * failure in allocation too large space - */ - if (length > 0x200000) { - length = 0x200000; - prom_printf("PCIC: map window for device '%s' " - "reduced to 2MB !\n", namebuf); - } - - /* - * Be careful with MEM/IO address flags - */ - if ((address & PCI_BASE_ADDRESS_SPACE) == - PCI_BASE_ADDRESS_SPACE_IO) { - mapaddr = address & PCI_BASE_ADDRESS_IO_MASK; + for (j = 0; j < 6; j++) { + address = dev->resource[j].start; + if (address == 0) break; /* are sequential */ + flags = dev->resource[j].flags; + if ((flags & IORESOURCE_IO) != 0) { + if (address < 0x10000) { + /* + * A device responds to I/O cycles on PCI. + * We generate these cycles with memory + * access into the fixed map (phys 0x30000000). + * + * Since a device driver does not want to + * do ioremap() before accessing PC-style I/O, + * we supply virtual, ready to access address. + * + * Ebus devices do not come here even if + * CheerIO makes a similar conversion. + * See ebus.c for details. + * + * Note that check_region()/request_region() + * work for these devices. + * + * XXX Neat trick, but it's a *bad* idea + * to shit into regions like that. + * What if we want to allocate one more + * PCI base address... + */ + dev->resource[j].start = + pcic->pcic_io + address; + dev->resource[j].end = 1; /* XXX */ + dev->resource[j].flags = + (flags & ~IORESOURCE_IO) | IORESOURCE_MEM; } else { - mapaddr = address & PCI_BASE_ADDRESS_MEM_MASK; + /* + * OOPS... PCI Spec allows this. Sun does + * not have any devices getting above 64K + * so it must be user with a weird I/O + * board in a PCI slot. We must remap it + * under 64K but it is not done yet. XXX + */ + printk("PCIC: Skipping I/O space at 0x%lx," + "this will Oops if a driver attaches;" + "device '%s' (%x,%x)\n", address, namebuf, + dev->device, dev->vendor); } - addrflags = address ^ mapaddr; - - dev->base_address[j] = - (unsigned long)sparc_alloc_io(address, 0, - length, - namebuf, 0, 0); - if ( dev->base_address[j] == 0 ) - panic("PCIC: failed make mapping for " - "pci device '%s' with address %lx\n", - namebuf, address); - - dev->base_address[j] ^= addrflags; - return; } - } - - printk("PCIC: unable to match addresses for device (%x,%x)\n", - dev->device, dev->vendor); + } } -static void pcic_fill_irq(struct pci_dev *dev, int node) { +static void +pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node) +{ struct pcic_ca2irq *p; int i, ivec; char namebuf[64]; /* P3 remove */ - if (node == -1) { + if (node == 0 || node == -1) { strcpy(namebuf, "???"); } else { prom_getstring(node, "name", namebuf, sizeof(namebuf)); /* P3 remove */ @@ -474,53 +612,33 @@ static void pcic_fill_irq(struct pci_dev *dev, int node) { } /* - * Assign IO space for a device. - * This is a chance for devices which have the same IO and Mem Space to - * fork access to IO and Mem. - * - * Now, we assume there is one such device only (IGA 1682) but code below - * should work in cases when space of all such devices is less then 16MB. - */ -unsigned long pcic_alloc_io( unsigned long* addr ) -{ - unsigned long paddr = *addr; - unsigned long offset; - - if(pcic->pcic_mapped_io == 0) { - pcic->pcic_mapped_io = paddr & ~(PCI_SPACE_SIZE-1) ; - writeb((pcic->pcic_mapped_io>>24) & 0xff, - pcic->pcic_regs+PCI_PIBAR); - writeb((pcic->pcic_io_phys>>24) & PCI_SIBAR_ADDRESS_MASK, - pcic->pcic_regs+PCI_SIBAR); - writeb(PCI_ISIZE_16M, pcic->pcic_regs+PCI_ISIZE); - - } - if(paddr < pcic->pcic_mapped_io || - paddr >= pcic->pcic_mapped_io + 0x10000) - return 0; - offset = paddr - pcic->pcic_mapped_io; - *addr = pcic->pcic_io_phys + offset; - return pcic->pcic_io + offset; -} - -/* - * Stolen from both i386 and sparc64 branch + * Normally called from {do_}pci_scan_bus... */ -void __init pcibios_fixup(void) +void __init pcibios_fixup_bus(struct pci_bus *bus) { - struct pci_dev *dev; - int i, has_io, has_mem; - unsigned short cmd; - struct linux_pbm_info* pbm = &pcic->pbm; + struct pci_dev *dev; + int i, has_io, has_mem; + unsigned short cmd; + struct linux_pcic *pcic; + /* struct linux_pbm_info* pbm = &pcic->pbm; */ int node; struct pcidev_cookie *pcp; - if(pcic == NULL) { - prom_printf("PCI: Error, PCIC not found.\n"); - prom_halt(); + if (!pcic0_up) { + printk("pcibios_fixup_bus: no PCIC\n"); + return; } + pcic = &pcic0; - for (dev = pci_devices; dev; dev=dev->next) { + /* + * Next crud is an equivalent of pbm = pcic_bus_to_pbm(bus); + */ + if (bus->number != 0) { + printk("pcibios_fixup_bus: nonzero bus 0x%x\n", bus->number); + return; + } + + for (dev = bus->devices; dev; dev = dev->sibling) { /* * Comment from i386 branch: * There are buggy BIOSes that forget to enable I/O and memory @@ -531,44 +649,42 @@ void __init pcibios_fixup(void) */ has_io = has_mem = 0; for(i=0; i<6; i++) { - unsigned long a = dev->base_address[i]; - if (a & PCI_BASE_ADDRESS_SPACE_IO) { + unsigned long f = dev->resource[i].flags; + if (f & IORESOURCE_IO) { has_io = 1; - } else if (a & PCI_BASE_ADDRESS_MEM_MASK) + } else if (f & IORESOURCE_MEM) has_mem = 1; } - pci_read_config_word(dev, PCI_COMMAND, &cmd); + pcic_read_config_word(dev, PCI_COMMAND, &cmd); if (has_io && !(cmd & PCI_COMMAND_IO)) { printk("PCIC: Enabling I/O for device %02x:%02x\n", dev->bus->number, dev->devfn); cmd |= PCI_COMMAND_IO; - pci_write_config_word(dev, PCI_COMMAND, cmd); + pcic_write_config_word(dev, PCI_COMMAND, cmd); } if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) { printk("PCIC: Enabling memory for device %02x:%02x\n", dev->bus->number, dev->devfn); cmd |= PCI_COMMAND_MEMORY; - pci_write_config_word(dev, PCI_COMMAND, cmd); + pcic_write_config_word(dev, PCI_COMMAND, cmd); } - node = pdev_to_pnode(pbm, dev); + node = pdev_to_pnode(&pcic->pbm, dev); if(node == 0) node = -1; /* cookies */ pcp = pci_devcookie_alloc(); - pcp->pbm = pbm; + pcp->pbm = &pcic->pbm; pcp->prom_node = node; dev->sysdata = pcp; - /* memory mapping */ + /* fixing I/O to look like memory */ if ((dev->class>>16) != PCI_BASE_CLASS_BRIDGE) - pcic_map_pci_device(dev, node); + pcic_map_pci_device(pcic, dev, node); - pcic_fill_irq(dev, node); + pcic_fill_irq(pcic, dev, node); } - - ebus_init(); } /* @@ -577,6 +693,7 @@ void __init pcibios_fixup(void) unsigned int pcic_pin_to_irq(unsigned int pin, char *name) { + struct linux_pcic *pcic = &pcic0; unsigned int irq; unsigned int ivec; @@ -599,7 +716,7 @@ static volatile int pcic_timer_dummy; static void pcic_clear_clock_irq(void) { - pcic_timer_dummy = readl(pcic->pcic_regs+PCI_SYS_LIMIT); + pcic_timer_dummy = readl(pcic0.pcic_regs+PCI_SYS_LIMIT); } static void pcic_timer_handler (int irq, void *h, struct pt_regs *regs) @@ -613,6 +730,7 @@ static void pcic_timer_handler (int irq, void *h, struct pt_regs *regs) void __init pci_time_init(void) { + struct linux_pcic *pcic = &pcic0; unsigned long v; int timer_irq, irq; @@ -620,9 +738,9 @@ void __init pci_time_init(void) /* A hack until do_gettimeofday prototype is moved to arch specific headers and btfixupped. Patch do_gettimeofday with ba pci_do_gettimeofday; nop */ ((unsigned int *)do_gettimeofday)[0] = - 0x10800000 | ((((unsigned long)pci_do_gettimeofday - (unsigned long)do_gettimeofday) >> 2) & 0x003fffff); - ((unsigned int *)do_gettimeofday)[1] = - 0x01000000; + 0x10800000 | ((((unsigned long)pci_do_gettimeofday - + (unsigned long)do_gettimeofday) >> 2) & 0x003fffff); + ((unsigned int *)do_gettimeofday)[1] = 0x01000000; BTFIXUPSET_CALL(bus_do_settimeofday, pci_do_settimeofday, BTFIXUPCALL_NORM); btfixup(); @@ -650,7 +768,7 @@ static __inline__ unsigned long do_gettimeoffset(void) * to have microsecond resolution and to avoid overflow */ unsigned long count = - readl(pcic->pcic_regs+PCI_SYS_COUNTER) & ~PCI_SYS_COUNTER_OVERFLOW; + readl(pcic0.pcic_regs+PCI_SYS_COUNTER) & ~PCI_SYS_COUNTER_OVERFLOW; count = ((count/100)*USECS_PER_JIFFY) / (TICK_TIMER_LIMIT/100); if(test_bit(TIMER_BH, &bh_active)) @@ -705,103 +823,9 @@ static void watchdog_reset() { } #endif -#define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (((unsigned int)bus) << 16) | (((unsigned int)device_fn) << 8) | (where & ~3)) - -int pcibios_read_config_byte(unsigned char bus, unsigned char device_fn, - unsigned char where, unsigned char *value) -{ - unsigned int v; - - pcibios_read_config_dword (bus, device_fn, where&~3, &v); - *value = 0xff & (v >> (8*(where & 3))); - return PCIBIOS_SUCCESSFUL; -} - -int pcibios_read_config_word (unsigned char bus, - unsigned char device_fn, - unsigned char where, unsigned short *value) -{ - unsigned int v; - if (where&1) return PCIBIOS_BAD_REGISTER_NUMBER; - - pcibios_read_config_dword (bus, device_fn, where&~3, &v); - *value = 0xffff & (v >> (8*(where & 3))); - return PCIBIOS_SUCCESSFUL; -} - -int pcibios_read_config_dword (unsigned char bus, unsigned char device_fn, - unsigned char where, unsigned int *value) -{ - unsigned long flags; - - if (where&3) return PCIBIOS_BAD_REGISTER_NUMBER; - - save_and_cli(flags); -#if 0 - pcic_speculative = 1; - pcic_trapped = 0; -#endif - writel(CONFIG_CMD(bus,device_fn,where), pcic->pcic_config_space_addr); -#if 0 - nop(); - if (pcic_trapped) { - restore_flags(flags); - *value = ~0; - return PCIBIOS_SUCCESSFUL; - } -#endif - pcic_speculative = 2; - pcic_trapped = 0; - *value = readl(pcic->pcic_config_space_data + (where&4)); - nop(); - if (pcic_trapped) { - pcic_speculative = 0; - restore_flags(flags); - *value = ~0; - return PCIBIOS_SUCCESSFUL; - } - pcic_speculative = 0; - restore_flags(flags); - return PCIBIOS_SUCCESSFUL; -} - -int pcibios_write_config_byte (unsigned char bus, unsigned char devfn, - unsigned char where, unsigned char value) -{ - unsigned int v; - - pcibios_read_config_dword (bus, devfn, where&~3, &v); - v = (v & ~(0xff << (8*(where&3)))) | - ((0xff&(unsigned)value) << (8*(where&3))); - return pcibios_write_config_dword (bus, devfn, where&~3, v); -} - -int pcibios_write_config_word (unsigned char bus, unsigned char devfn, - unsigned char where, unsigned short value) -{ - unsigned int v; - if (where&1) return PCIBIOS_BAD_REGISTER_NUMBER; - - pcibios_read_config_dword (bus, devfn, where&~3, &v); - v = (v & ~(0xffff << (8*(where&3)))) | - ((0xffff&(unsigned)value) << (8*(where&3))); - return pcibios_write_config_dword (bus, devfn, where&~3, v); -} - -int pcibios_write_config_dword (unsigned char bus, unsigned char devfn, - unsigned char where, unsigned int value) -{ - unsigned long flags; - - if (where&3) return PCIBIOS_BAD_REGISTER_NUMBER; - - save_and_cli(flags); - writel(CONFIG_CMD(bus,devfn,where),pcic->pcic_config_space_addr); - writel(value, pcic->pcic_config_space_data + (where&4)); - restore_flags(flags); - return PCIBIOS_SUCCESSFUL; -} - +/* + * Other archs parse arguments here. + */ char * __init pcibios_setup(char *str) { return str; @@ -831,6 +855,9 @@ void pcic_nmi(unsigned int pend, struct pt_regs *regs) } /* + * XXX Gleb wrote me that he needs this for X server (only). + * Since we successfuly use XF86_FBDev, we do not need these anymore. + * * Following code added to handle extra PCI-related system calls */ asmlinkage int sys_pciconfig_read(unsigned long bus, @@ -870,7 +897,7 @@ asmlinkage int sys_pciconfig_read(unsigned long bus, return err; } - + asmlinkage int sys_pciconfig_write(unsigned long bus, unsigned long dfn, unsigned long off, @@ -936,7 +963,7 @@ static void pcic_disable_irq(unsigned int irq_nr) mask = get_irqmask(irq_nr); save_and_cli(flags); - writel(mask, pcic->pcic_regs+PCI_SYS_INT_TARGET_MASK_SET); + writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_SET); restore_flags(flags); } @@ -946,7 +973,7 @@ static void pcic_enable_irq(unsigned int irq_nr) mask = get_irqmask(irq_nr); save_and_cli(flags); - writel(mask, pcic->pcic_regs+PCI_SYS_INT_TARGET_MASK_CLEAR); + writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_CLEAR); restore_flags(flags); } @@ -965,12 +992,12 @@ static void pcic_load_profile_irq(int cpu, unsigned int limit) */ static void pcic_disable_pil_irq(unsigned int pil) { - writel(get_irqmask(pil), pcic->pcic_regs+PCI_SYS_INT_TARGET_MASK_SET); + writel(get_irqmask(pil), pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_SET); } static void pcic_enable_pil_irq(unsigned int pil) { - writel(get_irqmask(pil), pcic->pcic_regs+PCI_SYS_INT_TARGET_MASK_CLEAR); + writel(get_irqmask(pil), pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_CLEAR); } void __init sun4m_pci_init_IRQ(void) @@ -985,8 +1012,9 @@ void __init sun4m_pci_init_IRQ(void) BTFIXUPSET_CALL(__irq_itoa, pcic_irq_itoa, BTFIXUPCALL_NORM); } -void __init pcibios_fixup_bus(struct pci_bus *bus) +int pcibios_assign_resource(struct pci_dev *pdev, int resource) { + return -ENXIO; } #endif diff --git a/arch/sparc/kernel/process.c b/arch/sparc/kernel/process.c index 88db458e0..195f26df5 100644 --- a/arch/sparc/kernel/process.c +++ b/arch/sparc/kernel/process.c @@ -1,4 +1,4 @@ -/* $Id: process.c,v 1.139 1999/08/14 03:51:14 anton Exp $ +/* $Id: process.c,v 1.142 1999/12/27 06:08:31 anton Exp $ * linux/arch/sparc/kernel/process.c * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) @@ -33,6 +33,7 @@ #include <asm/uaccess.h> #include <asm/system.h> #include <asm/page.h> +#include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/delay.h> #include <asm/processor.h> @@ -55,7 +56,6 @@ int cpu_idle(void) { int ret = -EPERM; - lock_kernel(); if (current->pid != 0) goto out; @@ -100,7 +100,6 @@ int cpu_idle(void) } ret = 0; out: - unlock_kernel(); return ret; } @@ -188,9 +187,7 @@ void show_regwindow(struct reg_window *rw) rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]); } -#ifdef __SMP__ static spinlock_t sparc_backtrace_lock = SPIN_LOCK_UNLOCKED; -#endif void __show_backtrace(unsigned long fp) { @@ -702,37 +699,3 @@ pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) "g1", "g2", "g3", "o0", "o1", "memory", "cc"); return retval; } - -/* - * These bracket the sleeping functions.. - */ -extern void scheduling_functions_start_here(void); -extern void scheduling_functions_end_here(void); -#define first_sched ((unsigned long) scheduling_functions_start_here) -#define last_sched ((unsigned long) scheduling_functions_end_here) - -unsigned long get_wchan(struct task_struct *p) -{ - unsigned long pc, fp, bias = 0; - unsigned long task_base = (unsigned long) p; - struct reg_window *rw; - int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) - return 0; - - fp = p->thread.ksp + bias; - do { - /* Bogus frame pointer? */ - if (fp < (task_base + sizeof(struct task_struct)) || - fp >= (task_base + (2 * PAGE_SIZE))) - break; - rw = (struct reg_window *) fp; - pc = rw->ins[7]; - if (pc < first_sched || pc >= last_sched) - return pc; - fp = rw->ins[6] + bias; - } while (++count < 16); - return 0; -} -#undef last_sched -#undef first_sched diff --git a/arch/sparc/kernel/semaphore.c b/arch/sparc/kernel/semaphore.c index 2f2c8ecc7..1dcf35d42 100644 --- a/arch/sparc/kernel/semaphore.c +++ b/arch/sparc/kernel/semaphore.c @@ -1,5 +1,5 @@ -/* $Id: semaphore.c,v 1.1 1999/08/31 13:26:15 anton Exp $ - * Generic semaphore code. Buyer beware. Do your own +/* $Id: semaphore.c,v 1.2 1999/12/28 11:50:37 jj Exp $ + * Generic semaphore code. Buyer beware. Do your own * specific changes in <asm/semaphore-helper.h> */ @@ -62,8 +62,7 @@ void __up(struct semaphore *sem) #define DOWN_VAR \ struct task_struct *tsk = current; \ - wait_queue_t wait; \ - init_waitqueue_entry(&wait, tsk); + DECLARE_WAITQUEUE(wait, tsk); #define DOWN_HEAD(task_state) \ \ @@ -127,3 +126,115 @@ int __down_trylock(struct semaphore * sem) { return waking_non_zero_trylock(sem); } + +/* rw mutexes + * Implemented by Jakub Jelinek (jakub@redhat.com) based on + * i386 implementation by Ben LaHaise (bcrl@redhat.com). + */ + +extern inline int ldstub(unsigned char *p) +{ + int ret; + asm volatile("ldstub %1, %0" : "=r" (ret) : "m" (*p) : "memory"); + return ret; +} + +void down_read_failed_biased(struct rw_semaphore *sem) +{ + DOWN_VAR + + add_wait_queue(&sem->wait, &wait); /* put ourselves at the head of the list */ + + for (;;) { + if (!ldstub(&sem->read_not_granted)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (sem->read_not_granted) + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; +} + +void down_write_failed_biased(struct rw_semaphore *sem) +{ + DOWN_VAR + + add_wait_queue_exclusive(&sem->write_bias_wait, &wait); /* put ourselves at the end of the list */ + + for (;;) { + if (!ldstub(&sem->write_not_granted)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE); + if (sem->write_not_granted) + schedule(); + } + + remove_wait_queue(&sem->write_bias_wait, &wait); + tsk->state = TASK_RUNNING; + + /* if the lock is currently unbiased, awaken the sleepers + * FIXME: this wakes up the readers early in a bit of a + * stampede -> bad! + */ + if (sem->count >= 0) + wake_up(&sem->wait); +} + +/* Wait for the lock to become unbiased. Readers + * are non-exclusive. =) + */ +void down_read_failed(struct rw_semaphore *sem) +{ + DOWN_VAR + + __up_read(sem); /* this takes care of granting the lock */ + + add_wait_queue(&sem->wait, &wait); + + while (sem->count < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (sem->count >= 0) + break; + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; +} + +/* Wait for the lock to become unbiased. Since we're + * a writer, we'll make ourselves exclusive. + */ +void down_write_failed(struct rw_semaphore *sem) +{ + DOWN_VAR + + __up_write(sem); /* this takes care of granting the lock */ + + add_wait_queue_exclusive(&sem->wait, &wait); + + while (sem->count < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE); + if (sem->count >= 0) + break; /* we must attempt to aquire or bias the lock */ + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; +} + +void __rwsem_wake(struct rw_semaphore *sem, unsigned long readers) +{ + if (readers) { + /* Due to lame ldstub we don't do here + a BUG() consistency check */ + sem->read_not_granted = 0; + wake_up(&sem->wait); + } else { + sem->write_not_granted = 0; + wake_up(&sem->write_bias_wait); + } +} diff --git a/arch/sparc/kernel/setup.c b/arch/sparc/kernel/setup.c index c812e1b5f..60928d7aa 100644 --- a/arch/sparc/kernel/setup.c +++ b/arch/sparc/kernel/setup.c @@ -1,4 +1,4 @@ -/* $Id: setup.c,v 1.111 1999/09/10 10:40:24 davem Exp $ +/* $Id: setup.c,v 1.113 1999/12/16 14:37:35 anton Exp $ * linux/arch/sparc/kernel/setup.c * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) @@ -124,7 +124,9 @@ unsigned int boot_flags; #ifdef CONFIG_SUN_CONSOLE static int console_fb = 0; #endif -static unsigned long memory_size __initdata = 0; + +/* Exported for mm/init.c:paging_init. */ +unsigned long cmdline_memory_size __initdata = 0; void kernel_enter_debugger(void) { @@ -238,13 +240,13 @@ static void __init boot_flags_init(char *commands) * "mem=XXX[kKmM] overrides the PROM-reported * memory size. */ - memory_size = simple_strtoul(commands + 4, + cmdline_memory_size = simple_strtoul(commands + 4, &commands, 0); if (*commands == 'K' || *commands == 'k') { - memory_size <<= 10; + cmdline_memory_size <<= 10; commands++; } else if (*commands=='M' || *commands=='m') { - memory_size <<= 20; + cmdline_memory_size <<= 20; commands++; } } @@ -266,7 +268,7 @@ extern char cputypval; extern unsigned long start, end; extern void panic_setup(char *, int *); extern void srmmu_end_memory(unsigned long, unsigned long *); -extern unsigned long sun_serial_setup(unsigned long); +extern void sun_serial_setup(void); extern unsigned short root_flags; extern unsigned short root_dev; @@ -297,10 +299,10 @@ static struct console prom_console = { "PROM", prom_cons_write, 0, 0, 0, 0, 0, CON_PRINTBUFFER, 0, 0, 0 }; -void __init setup_arch(char **cmdline_p, - unsigned long * memory_start_p, unsigned long * memory_end_p) +void __init setup_arch(char **cmdline_p) { - int total, i, packed; + int i; + unsigned long highest_paddr; sparc_ttable = (struct tt_entry *) &start; @@ -329,27 +331,21 @@ void __init setup_arch(char **cmdline_p, strcpy(&cputypval, "ap+"); #endif printk("ARCH: "); - packed = 0; switch(sparc_cpu_model) { case sun4: printk("SUN4\n"); - packed = 0; break; case sun4c: printk("SUN4C\n"); - packed = 0; break; case sun4m: printk("SUN4M\n"); - packed = 1; break; case sun4d: printk("SUN4D\n"); - packed = 1; break; case sun4e: printk("SUN4E\n"); - packed = 0; break; case sun4u: printk("SUN4U\n"); @@ -357,7 +353,6 @@ void __init setup_arch(char **cmdline_p, case ap1000: register_console(&prom_console); printk("AP1000\n"); - packed = 1; break; default: printk("UNKNOWN!\n"); @@ -375,26 +370,20 @@ void __init setup_arch(char **cmdline_p, if (ARCH_SUN4C_SUN4) sun4c_probe_vac(); load_mmu(); - total = prom_probe_memory(); - *memory_start_p = PAGE_ALIGN(((unsigned long) &end)); - - if(!packed) { - for(i=0; sp_banks[i].num_bytes != 0; i++) { - end_of_phys_memory = sp_banks[i].base_addr + - sp_banks[i].num_bytes; - if (memory_size) { - if (end_of_phys_memory > memory_size) { - sp_banks[i].num_bytes -= - (end_of_phys_memory - memory_size); - end_of_phys_memory = memory_size; - sp_banks[++i].base_addr = 0xdeadbeef; - sp_banks[i].num_bytes = 0; - } - } - } - *memory_end_p = (end_of_phys_memory + KERNBASE); - } else - srmmu_end_memory(memory_size, memory_end_p); + (void) prom_probe_memory(); + + phys_base = 0xffffffffUL; + highest_paddr = 0UL; + for (i = 0; sp_banks[i].num_bytes != 0; i++) { + unsigned long top; + + if (sp_banks[i].base_addr < phys_base) + phys_base = sp_banks[i].base_addr; + top = sp_banks[i].base_addr + + sp_banks[i].num_bytes; + if (highest_paddr < top) + highest_paddr = top; + } if (!root_flags) root_mountflags &= ~MS_RDONLY; @@ -405,6 +394,7 @@ void __init setup_arch(char **cmdline_p, rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0); #endif #ifdef CONFIG_BLK_DEV_INITRD +// FIXME needs to do the new bootmem alloc stuff if (sparc_ramdisk_image) { initrd_start = sparc_ramdisk_image; if (initrd_start < KERNBASE) initrd_start += KERNBASE; @@ -434,7 +424,10 @@ void __init setup_arch(char **cmdline_p, prom_setsync(prom_sync_me); #ifdef CONFIG_SUN_SERIAL - *memory_start_p = sun_serial_setup(*memory_start_p); /* set this up ASAP */ +#if 0 + /* XXX We can't do this until the bootmem allocator is working. */ + sun_serial_setup(); /* set this up ASAP */ +#endif #endif { #if !CONFIG_SUN_SERIAL @@ -489,11 +482,10 @@ void __init setup_arch(char **cmdline_p, breakpoint(); } - /* Due to stack alignment restrictions and assumptions... */ init_mm.mmap->vm_page_prot = PAGE_SHARED; - init_mm.mmap->vm_start = KERNBASE; - init_mm.mmap->vm_end = *memory_end_p; + init_mm.mmap->vm_start = PAGE_OFFSET; + init_mm.mmap->vm_end = PAGE_OFFSET + highest_paddr; init_mm.context = (unsigned long) NO_CONTEXT; init_task.thread.kregs = &fake_swapper_regs; diff --git a/arch/sparc/kernel/signal.c b/arch/sparc/kernel/signal.c index f2065d20e..ea0c7e1ff 100644 --- a/arch/sparc/kernel/signal.c +++ b/arch/sparc/kernel/signal.c @@ -1,4 +1,4 @@ -/* $Id: signal.c,v 1.95 1999/08/14 03:51:22 anton Exp $ +/* $Id: signal.c,v 1.99 1999/12/27 06:08:32 anton Exp $ * linux/arch/sparc/kernel/signal.c * * Copyright (C) 1991, 1992 Linus Torvalds @@ -22,6 +22,7 @@ #include <asm/bitops.h> #include <asm/ptrace.h> #include <asm/svr4.h> +#include <asm/pgalloc.h> #include <asm/pgtable.h> #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c index e611cd828..58f1be4e8 100644 --- a/arch/sparc/kernel/smp.c +++ b/arch/sparc/kernel/smp.c @@ -22,6 +22,7 @@ #include <asm/delay.h> #include <asm/irq.h> #include <asm/page.h> +#include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/oplib.h> #include <asm/atops.h> @@ -163,7 +164,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm) local_flush_tlb_mm(mm); } else { xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm); - if(atomic_read(&mm->count) == 1 && current->mm == mm) + if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) mm->cpu_vm_mask = (1 << smp_processor_id()); } } diff --git a/arch/sparc/kernel/sparc-stub.c b/arch/sparc/kernel/sparc-stub.c index 8cd0e8b6d..c9bb60a86 100644 --- a/arch/sparc/kernel/sparc-stub.c +++ b/arch/sparc/kernel/sparc-stub.c @@ -1,4 +1,4 @@ -/* $Id: sparc-stub.c,v 1.25 1999/07/23 01:56:13 davem Exp $ +/* $Id: sparc-stub.c,v 1.26 1999/12/27 06:08:34 anton Exp $ * sparc-stub.c: KGDB support for the Linux kernel. * * Modifications to run under Linux @@ -107,6 +107,7 @@ #include <asm/traps.h> #include <asm/vac-ops.h> #include <asm/kgdb.h> +#include <asm/pgalloc.h> #include <asm/pgtable.h> /* * diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c index 95c5e37be..0c955178f 100644 --- a/arch/sparc/kernel/sparc_ksyms.c +++ b/arch/sparc/kernel/sparc_ksyms.c @@ -1,4 +1,4 @@ -/* $Id: sparc_ksyms.c,v 1.79 1999/09/10 10:40:28 davem Exp $ +/* $Id: sparc_ksyms.c,v 1.84 2000/01/07 18:15:14 jj Exp $ * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support. * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) @@ -50,8 +50,6 @@ struct poll { extern int svr4_getcontext (svr4_ucontext_t *, struct pt_regs *); extern int svr4_setcontext (svr4_ucontext_t *, struct pt_regs *); -extern unsigned long sunos_mmap(unsigned long, unsigned long, unsigned long, - unsigned long, unsigned long, unsigned long); void _sigpause_common (unsigned int set, struct pt_regs *); extern void (*__copy_1page)(void *, const void *); extern void __memmove(void *, const void *, __kernel_size_t); @@ -65,6 +63,7 @@ extern char saved_command_line[]; extern void bcopy (const char *, char *, int); extern int __ashrdi3(int, int); +extern int __ashldi3(int, int); extern int __lshrdi3(int, int); extern void dump_thread(struct pt_regs *, struct user *); @@ -118,6 +117,12 @@ EXPORT_SYMBOL_PRIVATE(_global_cli); #endif #endif +/* rw semaphores */ +EXPORT_SYMBOL_NOVERS(___down_read); +EXPORT_SYMBOL_NOVERS(___down_write); +EXPORT_SYMBOL_NOVERS(___up_read); +EXPORT_SYMBOL_NOVERS(___up_write); + EXPORT_SYMBOL(page_offset); EXPORT_SYMBOL(sparc_valid_addr_bitmap); @@ -157,11 +162,10 @@ EXPORT_SYMBOL(mstk48t02_regs); EXPORT_SYMBOL(auxio_register); #endif EXPORT_SYMBOL(request_fast_irq); -EXPORT_SYMBOL(sparc_alloc_io); -EXPORT_SYMBOL(sparc_free_io); EXPORT_SYMBOL(io_remap_page_range); -EXPORT_SYMBOL(iounit_map_dma_init); -EXPORT_SYMBOL(iounit_map_dma_page); + /* P3: iounit_xxx may be needed, sun4d users */ +/* EXPORT_SYMBOL(iounit_map_dma_init); */ +/* EXPORT_SYMBOL(iounit_map_dma_page); */ /* Btfixup stuff cannot have versions, it would be complicated too much */ #ifndef __SMP__ @@ -180,19 +184,27 @@ EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(mmu_get_scsi_one)); EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(mmu_release_scsi_sgl)); EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(mmu_release_scsi_one)); -EXPORT_SYMBOL(_sparc_dvma_malloc); -EXPORT_SYMBOL(sun4c_unmapioaddr); -EXPORT_SYMBOL(srmmu_unmapioaddr); #if CONFIG_SBUS -EXPORT_SYMBOL(SBus_chain); +EXPORT_SYMBOL(sbus_root); EXPORT_SYMBOL(dma_chain); +EXPORT_SYMBOL(sbus_set_sbus64); +EXPORT_SYMBOL(sbus_alloc_consistant); +EXPORT_SYMBOL(sbus_free_consistant); +EXPORT_SYMBOL(sbus_map_single); +EXPORT_SYMBOL(sbus_unmap_single); +EXPORT_SYMBOL(sbus_map_sg); +EXPORT_SYMBOL(sbus_unmap_sg); +EXPORT_SYMBOL(sbus_dma_sync_single); +EXPORT_SYMBOL(sbus_dma_sync_sg); +#endif +#if CONFIG_PCI +/* We do not have modular drivers for PCI devices yet. */ #endif /* Solaris/SunOS binary compatibility */ EXPORT_SYMBOL(svr4_setcontext); EXPORT_SYMBOL(svr4_getcontext); EXPORT_SYMBOL(_sigpause_common); -EXPORT_SYMBOL(sunos_mmap); /* Should really be in linux/kernel/ksyms.c */ EXPORT_SYMBOL(dump_thread); @@ -215,9 +227,9 @@ EXPORT_SYMBOL(prom_getname); EXPORT_SYMBOL(prom_feval); EXPORT_SYMBOL(prom_getbool); EXPORT_SYMBOL(prom_getstring); -EXPORT_SYMBOL(prom_apply_sbus_ranges); EXPORT_SYMBOL(prom_getint); EXPORT_SYMBOL(prom_getintdefault); +EXPORT_SYMBOL(prom_finddevice); EXPORT_SYMBOL(romvec); EXPORT_SYMBOL(__prom_getchild); EXPORT_SYMBOL(__prom_getsibling); @@ -271,6 +283,7 @@ EXPORT_SYMBOL_NOVERS(memcpy); EXPORT_SYMBOL_NOVERS(memset); EXPORT_SYMBOL_NOVERS(memmove); EXPORT_SYMBOL_NOVERS(__ashrdi3); +EXPORT_SYMBOL_NOVERS(__ashldi3); EXPORT_SYMBOL_NOVERS(__lshrdi3); EXPORT_SYMBOL_DOT(rem); @@ -279,5 +292,3 @@ EXPORT_SYMBOL_DOT(mul); EXPORT_SYMBOL_DOT(umul); EXPORT_SYMBOL_DOT(div); EXPORT_SYMBOL_DOT(udiv); - -EXPORT_SYMBOL(get_wchan); diff --git a/arch/sparc/kernel/sun4c_irq.c b/arch/sparc/kernel/sun4c_irq.c index e954c2ec1..a8efdd12a 100644 --- a/arch/sparc/kernel/sun4c_irq.c +++ b/arch/sparc/kernel/sun4c_irq.c @@ -35,6 +35,11 @@ #include <asm/idprom.h> #include <asm/machines.h> +#if 0 +static struct resource sun4c_timer_eb = { "sun4c_timer" }; +static struct resource sun4c_intr_eb = { "sun4c_intr" }; +#endif + /* Pointer to the interrupt enable byte * * Dave Redman (djhr@tadpole.co.uk) @@ -150,10 +155,9 @@ static void __init sun4c_init_timers(void (*counter_fn)(int, void *, struct pt_r sun4c_timers = &sun4_timer; else #endif - sun4c_timers = sparc_alloc_io (SUN_TIMER_PHYSADDR, 0, - sizeof(struct sun4c_timer_info), - "timer", 0x0, 0x0); - + sun4c_timers = ioremap(SUN_TIMER_PHYSADDR, + sizeof(struct sun4c_timer_info)); + /* Have the level 10 timer tick at 100HZ. We don't touch the * level 14 timer limit since we are letting the prom handle * them until we have a real console driver so L1-A works. @@ -190,13 +194,11 @@ void __init sun4c_init_IRQ(void) int ie_node; if (ARCH_SUN4) { - interrupt_enable = - (char *) sparc_alloc_io(sun4_ie_physaddr, 0, - PAGE_SIZE, - "sun4c_interrupts", - 0x0, 0x0); + interrupt_enable = (char *) + ioremap(sun4_ie_physaddr, PAGE_SIZE); } else { - + struct resource phyres; + ie_node = prom_searchsiblings (prom_getchild(prom_root_node), "interrupt-enable"); if(ie_node == 0) @@ -204,11 +206,11 @@ void __init sun4c_init_IRQ(void) /* Depending on the "address" property is bad news... */ prom_getproperty(ie_node, "reg", (char *) int_regs, sizeof(int_regs)); - interrupt_enable = - (char *) sparc_alloc_io(int_regs[0].phys_addr, 0, - int_regs[0].reg_size, - "sun4c_interrupts", - int_regs[0].which_io, 0x0); + memset(&phyres, 0, sizeof(struct resource)); + phyres.flags = int_regs[0].which_io; + phyres.start = int_regs[0].phys_addr; + interrupt_enable = (char *) sbus_ioremap(&phyres, 0, + int_regs[0].reg_size, "sun4c_intr"); } BTFIXUPSET_CALL(enable_irq, sun4c_enable_irq, BTFIXUPCALL_NORM); diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c index 9b9112d4c..dbbb4edca 100644 --- a/arch/sparc/kernel/sun4d_irq.c +++ b/arch/sparc/kernel/sun4d_irq.c @@ -1,4 +1,4 @@ -/* $Id: sun4d_irq.c,v 1.20 1999/09/10 10:40:30 davem Exp $ +/* $Id: sun4d_irq.c,v 1.24 1999/12/27 06:08:34 anton Exp $ * arch/sparc/kernel/sun4d_irq.c: * SS1000/SC2000 interrupt handling. * @@ -32,6 +32,7 @@ #include <asm/traps.h> #include <asm/irq.h> #include <asm/io.h> +#include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/sbus.h> #include <asm/sbi.h> @@ -237,12 +238,12 @@ void sun4d_handler_irq(int irq, struct pt_regs * regs) irq_exit(cpu, irq); } -unsigned int sun4d_build_irq(struct linux_sbus_device *sdev, int irq) +unsigned int sun4d_build_irq(struct sbus_dev *sdev, int irq) { int sbusl = pil_to_sbus[irq]; - + if (sbusl) - return ((sdev->my_bus->board + 1) << 5) + (sbusl << 2) + sdev->slot; + return ((sdev->bus->board + 1) << 5) + (sbusl << 2) + sdev->slot; else return irq; } @@ -369,7 +370,7 @@ static void sun4d_set_udt(int cpu) void __init sun4d_distribute_irqs(void) { #ifdef DISTRIBUTE_IRQS - struct linux_sbus *sbus; + struct sbus_bus *sbus; unsigned long sbus_serving_map; sbus_serving_map = cpu_present_map; @@ -401,7 +402,7 @@ void __init sun4d_distribute_irqs(void) set_sbi_tid(sbus->devid, sbus_tid[sbus->board] << 3); } #else - struct linux_sbus *sbus; + struct sbus_bus *sbus; int cpuid = cpu_logical_map(1); if (cpuid == -1) @@ -436,16 +437,19 @@ static void __init sun4d_init_timers(void (*counter_fn)(int, void *, struct pt_r int irq; extern struct prom_cpuinfo linux_cpus[NR_CPUS]; int cpu; + struct resource r; /* Map the User Timer registers. */ + memset(&r, 0, sizeof(r)); #ifdef __SMP__ - sun4d_timers = sparc_alloc_io(CSR_BASE(boot_cpu_id)+BW_TIMER_LIMIT, 0, - PAGE_SIZE, "user timer", 0xf, 0x0); + r.start = CSR_BASE(boot_cpu_id)+BW_TIMER_LIMIT; #else - sun4d_timers = sparc_alloc_io(CSR_BASE(0)+BW_TIMER_LIMIT, 0, - PAGE_SIZE, "user timer", 0xf, 0x0); + r.start = CSR_BASE(0)+BW_TIMER_LIMIT; #endif - + r.flags = 0xf; + sun4d_timers = (struct sun4d_timer_regs *) sbus_ioremap(&r, 0, + PAGE_SIZE, "user timer"); + sun4d_timers->l10_timer_limit = (((1000000/HZ) + 1) << 10); master_l10_counter = &sun4d_timers->l10_cur_count; master_l10_limit = &sun4d_timers->l10_timer_limit; @@ -494,7 +498,7 @@ static void __init sun4d_init_timers(void (*counter_fn)(int, void *, struct pt_r void __init sun4d_init_sbi_irq(void) { - struct linux_sbus *sbus; + struct sbus_bus *sbus; unsigned mask; nsbi = 0; diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index b52cd08a8..ec105ec18 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c @@ -25,6 +25,7 @@ #include <asm/delay.h> #include <asm/irq.h> #include <asm/page.h> +#include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/oplib.h> #include <asm/atops.h> @@ -99,6 +100,14 @@ void __init smp4d_callin(void) local_flush_cache_all(); local_flush_tlb_all(); + /* + * Unblock the master CPU _only_ when the scheduler state + * of all secondary CPUs will be up-to-date, so after + * the SMP initialization the master will be just allowed + * to call the scheduler code. + */ + init_idle(); + /* Get our local ticker going. */ smp_setup_percpu_timer(); diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c index 3f2959f16..0f3cf9564 100644 --- a/arch/sparc/kernel/sun4m_irq.c +++ b/arch/sparc/kernel/sun4m_irq.c @@ -19,6 +19,7 @@ #include <linux/interrupt.h> #include <linux/malloc.h> #include <linux/init.h> +#include <linux/ioport.h> #include <asm/ptrace.h> #include <asm/processor.h> @@ -29,6 +30,7 @@ #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/traps.h> +#include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/smp.h> #include <asm/irq.h> @@ -225,6 +227,7 @@ static void __init sun4m_init_timers(void (*counter_fn)(int, void *, struct pt_r int reg_count, irq, cpu; struct linux_prom_registers cnt_regs[PROMREG_MAX]; int obio_node, cnt_node; + struct resource r; cnt_node = 0; if((obio_node = @@ -250,18 +253,19 @@ static void __init sun4m_init_timers(void (*counter_fn)(int, void *, struct pt_r cnt_regs[obio_node].reg_size = cnt_regs[obio_node-1].reg_size; cnt_regs[obio_node].which_io = cnt_regs[obio_node-1].which_io; } - + + memset((char*)&r, 0, sizeof(struct resource)); /* Map the per-cpu Counter registers. */ - sun4m_timers = sparc_alloc_io(cnt_regs[0].phys_addr, 0, - PAGE_SIZE*SUN4M_NCPUS, "counters_percpu", - cnt_regs[0].which_io, 0x0); - + r.flags = cnt_regs[0].which_io; + r.start = cnt_regs[0].phys_addr; + sun4m_timers = (struct sun4m_timer_regs *) sbus_ioremap(&r, 0, + PAGE_SIZE*SUN4M_NCPUS, "sun4m_cpu_cnt"); /* Map the system Counter register. */ - sparc_alloc_io(cnt_regs[4].phys_addr, 0, - cnt_regs[4].reg_size, - "counters_system", - cnt_regs[4].which_io, 0x0); - + /* XXX Here we expect consequent calls to yeld adjusent maps. */ + r.flags = cnt_regs[4].which_io; + r.start = cnt_regs[4].phys_addr; + sbus_ioremap(&r, 0, cnt_regs[4].reg_size, "sun4m_sys_cnt"); + sun4m_timers->l10_timer_limit = (((1000000/HZ) + 1) << 10); master_l10_counter = &sun4m_timers->l10_cur_count; master_l10_limit = &sun4m_timers->l10_timer_limit; @@ -308,6 +312,7 @@ void __init sun4m_init_IRQ(void) int ie_node,i; struct linux_prom_registers int_regs[PROMREG_MAX]; int num_regs; + struct resource r; __cli(); if((ie_node = prom_searchsiblings(prom_getchild(prom_root_node), "obio")) == 0 || @@ -332,16 +337,18 @@ void __init sun4m_init_IRQ(void) int_regs[ie_node].which_io = int_regs[ie_node-1].which_io; } + memset((char *)&r, 0, sizeof(struct resource)); /* Map the interrupt registers for all possible cpus. */ - sun4m_interrupts = sparc_alloc_io(int_regs[0].phys_addr, 0, - PAGE_SIZE*SUN4M_NCPUS, "interrupts_percpu", - int_regs[0].which_io, 0x0); - + r.flags = int_regs[0].which_io; + r.start = int_regs[0].phys_addr; + sun4m_interrupts = (struct sun4m_intregs *) sbus_ioremap(&r, 0, + PAGE_SIZE*SUN4M_NCPUS, "interrupts_percpu"); + /* Map the system interrupt control registers. */ - sparc_alloc_io(int_regs[4].phys_addr, 0, - int_regs[4].reg_size, "interrupts_system", - int_regs[4].which_io, 0x0); - + r.flags = int_regs[4].which_io; + r.start = int_regs[4].phys_addr; + sbus_ioremap(&r, 0, int_regs[4].reg_size, "interrupts_system"); + sun4m_interrupts->set = ~SUN4M_INT_MASKALL; for (i=0; i<linux_num_cpus; i++) sun4m_interrupts->cpu_intregs[i].clear = ~0x17fff; diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c index b9990ce0d..2d2d97810 100644 --- a/arch/sparc/kernel/sun4m_smp.c +++ b/arch/sparc/kernel/sun4m_smp.c @@ -21,6 +21,7 @@ #include <asm/delay.h> #include <asm/irq.h> #include <asm/page.h> +#include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/oplib.h> #include <asm/atops.h> @@ -93,6 +94,14 @@ void __init smp4m_callin(void) local_flush_cache_all(); local_flush_tlb_all(); + /* + * Unblock the master CPU _only_ when the scheduler state + * of all secondary CPUs will be up-to-date, so after + * the SMP initialization the master will be just allowed + * to call the scheduler code. + */ + init_idle(); + /* Allow master to continue. */ swap((unsigned long *)&cpu_callin_map[cpuid], 1); local_flush_cache_all(); diff --git a/arch/sparc/kernel/sys_solaris.c b/arch/sparc/kernel/sys_solaris.c index 6bef6e523..6c46c60f0 100644 --- a/arch/sparc/kernel/sys_solaris.c +++ b/arch/sparc/kernel/sys_solaris.c @@ -4,6 +4,7 @@ * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) */ +#include <linux/config.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/string.h> @@ -24,7 +25,7 @@ do_solaris_syscall (struct pt_regs *regs) current->exec_domain = lookup_exec_domain(PER_SVR4); if (current->exec_domain && current->exec_domain->handler){ - current->exec_domain->handler (regs); + current->exec_domain->handler (0, regs); /* What is going on here? Why do we do this? */ @@ -39,3 +40,16 @@ do_solaris_syscall (struct pt_regs *regs) unlock_kernel(); return ret; } + +#ifndef CONFIG_SUNOS_EMUL +asmlinkage int +do_sunos_syscall (struct pt_regs *regs) +{ + static int cnt = 0; + if (++cnt < 10) printk ("SunOS binary emulation not compiled in\n"); + lock_kernel(); + force_sig (SIGSEGV, current); + unlock_kernel(); + return 0; +} +#endif diff --git a/arch/sparc/kernel/sys_sparc.c b/arch/sparc/kernel/sys_sparc.c index f946b36a2..69379de4a 100644 --- a/arch/sparc/kernel/sys_sparc.c +++ b/arch/sparc/kernel/sys_sparc.c @@ -1,4 +1,4 @@ -/* $Id: sys_sparc.c,v 1.53 1999/08/14 03:51:25 anton Exp $ +/* $Id: sys_sparc.c,v 1.56 2000/01/04 11:01:26 jj Exp $ * linux/arch/sparc/kernel/sys_sparc.c * * This file contains various random system calls that @@ -176,26 +176,34 @@ out: } /* Linux version of mmap */ -asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, +static unsigned long do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, - unsigned long off) + unsigned long pgoff) { struct file * file = NULL; unsigned long retval = -EBADF; - down(¤t->mm->mmap_sem); - lock_kernel(); if (!(flags & MAP_ANONYMOUS)) { file = fget(fd); if (!file) goto out; } + + down(¤t->mm->mmap_sem); + lock_kernel(); retval = -ENOMEM; len = PAGE_ALIGN(len); - if(!(flags & MAP_FIXED) && !addr) { - addr = get_unmapped_area(addr, len); + if(!(flags & MAP_FIXED) && + (!addr || (ARCH_SUN4C_SUN4 && + (addr >= 0x20000000 && addr < 0xe0000000)))) { + addr = get_unmapped_area(0, len); if(!addr) goto out_putf; + if (ARCH_SUN4C_SUN4 && + (addr >= 0x20000000 && addr < 0xe0000000)) { + retval = -EINVAL; + goto out_putf; + } } /* See asm-sparc/uaccess.h */ @@ -203,26 +211,34 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, if((len > (TASK_SIZE - PAGE_SIZE)) || (addr > (TASK_SIZE-len-PAGE_SIZE))) goto out_putf; - if(ARCH_SUN4C_SUN4) { - if(((addr >= 0x20000000) && (addr < 0xe0000000))) { - /* VM hole */ - retval = current->mm->brk; - goto out_putf; - } - } - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - retval = do_mmap(file, addr, len, prot, flags, off); + retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); out_putf: + unlock_kernel(); + up(¤t->mm->mmap_sem); if (file) fput(file); out: - unlock_kernel(); - up(¤t->mm->mmap_sem); return retval; } +asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, unsigned long fd, + unsigned long pgoff) +{ + /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE + we have. */ + return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12)); +} + +asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, unsigned long fd, + unsigned long off) +{ + return do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT); +} + /* we come to here via sys_nis_syscall so it can setup the regs argument */ asmlinkage unsigned long c_sys_nis_syscall (struct pt_regs *regs) diff --git a/arch/sparc/kernel/sys_sunos.c b/arch/sparc/kernel/sys_sunos.c index 56075e512..ddac348fe 100644 --- a/arch/sparc/kernel/sys_sunos.c +++ b/arch/sparc/kernel/sys_sunos.c @@ -1,4 +1,4 @@ -/* $Id: sys_sunos.c,v 1.104 1999/08/31 12:30:50 anton Exp $ +/* $Id: sys_sunos.c,v 1.108 2000/01/06 23:51:46 davem Exp $ * sys_sunos.c: SunOS specific syscall compatibility support. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) @@ -85,10 +85,17 @@ asmlinkage unsigned long sunos_mmap(unsigned long addr, unsigned long len, } retval = -ENOMEM; - if(!(flags & MAP_FIXED) && !addr) { - addr = get_unmapped_area(addr, len); + if(!(flags & MAP_FIXED) && + (!addr || (ARCH_SUN4C_SUN4 && + (addr >= 0x20000000 && addr < 0xe0000000)))) { + addr = get_unmapped_area(0, len); if(!addr) goto out_putf; + if (ARCH_SUN4C_SUN4 && + (addr >= 0x20000000 && addr < 0xe0000000)) { + retval = -EINVAL; + goto out_putf; + } } /* If this is ld.so or a shared library doing an mmap * of /dev/zero, transform it into an anonymous mapping. @@ -111,13 +118,6 @@ asmlinkage unsigned long sunos_mmap(unsigned long addr, unsigned long len, if((len > (TASK_SIZE - PAGE_SIZE)) || (addr > (TASK_SIZE-len-PAGE_SIZE))) goto out_putf; - if(ARCH_SUN4C_SUN4) { - if(((addr >= 0x20000000) && (addr < 0xe0000000))) { - retval = current->mm->brk; - goto out_putf; - } - } - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); retval = do_mmap(file, addr, len, prot, flags, off); if(!ret_type) @@ -195,10 +195,10 @@ asmlinkage int sunos_brk(unsigned long brk) * simple, it hopefully works in most obvious cases.. Easy to * fool it, but this should catch most mistakes. */ - freepages = atomic_read(&buffermem) >> PAGE_SHIFT; + freepages = atomic_read(&buffermem_pages) >> PAGE_SHIFT; freepages += atomic_read(&page_cache_size); freepages >>= 1; - freepages += nr_free_pages; + freepages += nr_free_pages(); freepages += nr_swap_pages; freepages -= num_physpages >> 4; freepages -= (newbrk-oldbrk) >> PAGE_SHIFT; @@ -721,7 +721,7 @@ struct sunos_nfs_mount_args { }; -extern int do_mount(kdev_t, const char *, const char *, char *, int, void *); +extern int do_mount(struct block_device *, const char *, const char *, char *, int, void *); extern dev_t get_unnamed_dev(void); extern void put_unnamed_dev(dev_t); extern asmlinkage int sys_mount(char *, char *, char *, unsigned long, void *); @@ -797,7 +797,6 @@ asmlinkage int sunos_nfs_mount(char *dir_name, int linux_flags, void *data) char *the_name; struct nfs_mount_data linux_nfs_mount; struct sunos_nfs_mount_args *sunos_mount = data; - dev_t dev; /* Ok, here comes the fun part: Linux's nfs mount needs a * socket connection to the server, but SunOS mount does not @@ -839,13 +838,7 @@ asmlinkage int sunos_nfs_mount(char *dir_name, int linux_flags, void *data) linux_nfs_mount.hostname [255] = 0; putname (the_name); - dev = get_unnamed_dev (); - - ret = do_mount (dev, "", dir_name, "nfs", linux_flags, &linux_nfs_mount); - if (ret) - put_unnamed_dev(dev); - - return ret; + return do_mount (NULL, "", dir_name, "nfs", linux_flags, &linux_nfs_mount); } asmlinkage int diff --git a/arch/sparc/kernel/systbls.S b/arch/sparc/kernel/systbls.S index 97e7df77b..d4be69030 100644 --- a/arch/sparc/kernel/systbls.S +++ b/arch/sparc/kernel/systbls.S @@ -1,4 +1,4 @@ -/* $Id: systbls.S,v 1.84 1999/08/14 03:51:29 anton Exp $ +/* $Id: systbls.S,v 1.88 1999/12/21 14:09:06 jj Exp $ * systbls.S: System call entry point tables for OS compatibility. * The native Linux system call table lives here also. * @@ -9,6 +9,8 @@ * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu) */ +#include <linux/config.h> + .data .align 4 @@ -27,12 +29,12 @@ sys_call_table: /*40*/ .long sys_newlstat, sys_dup, sys_pipe, sys_times, sys_nis_syscall /*45*/ .long sys_umount, sys_setgid, sys_getgid, sys_signal, sys_geteuid /*50*/ .long sys_getegid, sys_acct, sys_nis_syscall, sys_nis_syscall, sys_ioctl -/*55*/ .long sys_reboot, sys_nis_syscall, sys_symlink, sys_readlink, sys_execve -/*60*/ .long sys_umask, sys_chroot, sys_newfstat, sys_nis_syscall, sys_getpagesize +/*55*/ .long sys_reboot, sys_mmap2, sys_symlink, sys_readlink, sys_execve +/*60*/ .long sys_umask, sys_chroot, sys_newfstat, sys_fstat64, sys_getpagesize /*65*/ .long sys_msync, sys_vfork, sys_pread, sys_pwrite, sys_nis_syscall /*70*/ .long sys_nis_syscall, sys_mmap, sys_nis_syscall, sys_munmap, sys_mprotect -/*75*/ .long sys_nis_syscall, sys_vhangup, sys_nis_syscall, sys_nis_syscall, sys_getgroups -/*80*/ .long sys_setgroups, sys_getpgrp, sys_nis_syscall, sys_setitimer, sys_nis_syscall +/*75*/ .long sys_nis_syscall, sys_vhangup, sys_truncate64, sys_nis_syscall, sys_getgroups +/*80*/ .long sys_setgroups, sys_getpgrp, sys_nis_syscall, sys_setitimer, sys_ftruncate64 /*85*/ .long sys_swapon, sys_getitimer, sys_nis_syscall, sys_sethostname, sys_nis_syscall /*90*/ .long sys_dup2, sys_nis_syscall, sys_fcntl, sys_select, sys_nis_syscall /*95*/ .long sys_fsync, sys_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall @@ -42,8 +44,8 @@ sys_call_table: /*115*/ .long sys_nis_syscall, sys_gettimeofday, sys_getrusage, sys_nis_syscall, sys_getcwd /*120*/ .long sys_readv, sys_writev, sys_settimeofday, sys_fchown, sys_fchmod /*125*/ .long sys_nis_syscall, sys_setreuid, sys_setregid, sys_rename, sys_truncate -/*130*/ .long sys_ftruncate, sys_flock, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall -/*135*/ .long sys_nis_syscall, sys_mkdir, sys_rmdir, sys_utimes, sys_nis_syscall +/*130*/ .long sys_ftruncate, sys_flock, sys_lstat64, sys_nis_syscall, sys_nis_syscall +/*135*/ .long sys_nis_syscall, sys_mkdir, sys_rmdir, sys_utimes, sys_stat64 /*140*/ .long sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_getrlimit /*145*/ .long sys_setrlimit, sys_nis_syscall, sys_prctl, sys_pciconfig_read, sys_pciconfig_write /*150*/ .long sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_nis_syscall @@ -70,6 +72,7 @@ sys_call_table: /*250*/ .long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl /*255*/ .long sys_aplib, sys_nis_syscall +#ifdef CONFIG_SUNOS_EMUL /* Now the SunOS syscall table. */ .align 4 @@ -162,3 +165,5 @@ sunos_sys_table: .long sunos_nosys, sunos_nosys /*250*/ .long sunos_nosys, sunos_nosys, sunos_nosys .long sunos_nosys, sunos_nosys, sys_aplib + +#endif diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c index 7ee196241..008567aba 100644 --- a/arch/sparc/kernel/time.c +++ b/arch/sparc/kernel/time.c @@ -1,4 +1,4 @@ -/* $Id: time.c,v 1.46 1999/08/31 13:11:26 anton Exp $ +/* $Id: time.c,v 1.49 1999/11/17 07:34:07 zaitcev Exp $ * linux/arch/sparc/kernel/time.c * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) @@ -26,6 +26,7 @@ #include <linux/timex.h> #include <linux/init.h> #include <linux/pci.h> +#include <linux/ioport.h> #include <asm/oplib.h> #include <asm/segment.h> @@ -38,6 +39,7 @@ #include <asm/machines.h> #include <asm/sun4paddr.h> #include <asm/page.h> +#include <asm/pcic.h> extern rwlock_t xtime_lock; @@ -207,13 +209,14 @@ static __inline__ void sun4_clock_probe(void) { #ifdef CONFIG_SUN4 int temp; + struct resource r; + memset(&r, 0, sizeof(r)); if( idprom->id_machtype == (SM_SUN4 | SM_4_330) ) { sp_clock_typ = MSTK48T02; - mstk48t02_regs = (unsigned long) - sparc_alloc_io(sun4_clock_physaddr, 0, - sizeof(struct mostek48t02), - "clock", 0x0, 0x0); + r.start = sun4_clock_physaddr; + mstk48t02_regs = sbus_ioremap(&r, 0, + sizeof(struct mostek48t02), 0); mstk48t08_regs = 0; /* To catch weirdness */ intersil_clock = 0; /* just in case */ @@ -224,10 +227,9 @@ static __inline__ void sun4_clock_probe(void) /* intersil setup code */ printk("Clock: INTERSIL at %8x ",sun4_clock_physaddr); sp_clock_typ = INTERSIL; + r.start = sun4_clock_physaddr; intersil_clock = (struct intersil *) - sparc_alloc_io(sun4_clock_physaddr, 0, - sizeof(*intersil_clock), - "clock", 0x0, 0x0); + sparc_ioremap(&r, 0, sizeof(*intersil_clock), "intersil"); mstk48t02_regs = 0; /* just be sure */ mstk48t08_regs = 0; /* ditto */ /* initialise the clock */ @@ -256,8 +258,10 @@ static __inline__ void clock_probe(void) struct linux_prom_registers clk_reg[2]; char model[128]; register int node, cpuunit, bootbus; + struct resource r; cpuunit = bootbus = 0; + memset(&r, 0, sizeof(r)); /* Determine the correct starting PROM node for the probe. */ node = prom_getchild(prom_root_node); @@ -297,10 +301,10 @@ static __inline__ void clock_probe(void) else prom_apply_obio_ranges(clk_reg, 1); /* Map the clock register io area read-only */ - mstk48t02_regs = (unsigned long) - sparc_alloc_io(clk_reg[0].phys_addr, - (void *) 0, sizeof(struct mostek48t02), - "clock", clk_reg[0].which_io, 0x0); + r.flags = clk_reg[0].which_io; + r.start = clk_reg[0].phys_addr; + mstk48t02_regs = sbus_ioremap(&r, 0, + sizeof(struct mostek48t02), "mk48t02"); mstk48t08_regs = 0; /* To catch weirdness */ } else if (strcmp(model, "mk48t08") == 0) { sp_clock_typ = MSTK48T08; @@ -314,10 +318,11 @@ static __inline__ void clock_probe(void) else prom_apply_obio_ranges(clk_reg, 1); /* Map the clock register io area read-only */ - mstk48t08_regs = (struct mostek48t08 *) - sparc_alloc_io(clk_reg[0].phys_addr, - (void *) 0, sizeof(*mstk48t08_regs), - "clock", clk_reg[0].which_io, 0x0); + /* XXX r/o attribute is somewhere in r.flags */ + r.flags = clk_reg[0].which_io; + r.start = clk_reg[0].phys_addr; + mstk48t08_regs = (struct mostek48t08 *) sbus_ioremap(&r, 0, + sizeof(struct mostek48t08), "mk48t08"); mstk48t02_regs = (unsigned long)&mstk48t08_regs->regs; } else { @@ -420,7 +425,7 @@ void __init time_init(void) { #ifdef CONFIG_PCI extern void pci_time_init(void); - if (pci_present()) { + if (pcic_present()) { pci_time_init(); return; } diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index d5b475480..1a8c404e2 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile @@ -1,11 +1,12 @@ -# $Id: Makefile,v 1.28 1999/03/21 06:37:44 davem Exp $ +# $Id: Makefile,v 1.31 1999/12/28 11:50:39 jj Exp $ # Makefile for Sparc library files.. # OBJS = mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \ strlen.o checksum.o blockops.o memscan.o memcmp.o strncmp.o \ strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \ - copy_user.o locks.o atomic.o bitops.o debuglocks.o lshrdi3.o + copy_user.o locks.o atomic.o bitops.o debuglocks.o lshrdi3.o \ + ashldi3.o rwsem.o ifdef CONFIG_SMP OBJS += irqlock.o @@ -15,82 +16,11 @@ lib.a: $(OBJS) $(AR) rcs lib.a $(OBJS) sync -checksum.o: checksum.S - $(CC) -D__ASSEMBLY__ -ansi -c -o checksum.o checksum.S +.S.s: + $(CPP) -D__ASSEMBLY__ $(AFLAGS) -ansi -DST_DIV0=0x2 $< -o $*.s -memcpy.o: memcpy.S - $(CC) -D__ASSEMBLY__ -ansi -c -o memcpy.o memcpy.S - -memcmp.o: memcmp.S - $(CC) -D__ASSEMBLY__ -ansi -c -o memcmp.o memcmp.S - -memscan.o: memscan.S - $(CC) -D__ASSEMBLY__ -ansi -c -o memscan.o memscan.S - -strncmp.o: strncmp.S - $(CC) -D__ASSEMBLY__ -ansi -c -o strncmp.o strncmp.S - -strncpy_from_user.o: strncpy_from_user.S - $(CC) -D__ASSEMBLY__ -ansi -c -o strncpy_from_user.o strncpy_from_user.S - -strlen_user.o: strlen_user.S - $(CC) -D__ASSEMBLY__ -ansi -c -o strlen_user.o strlen_user.S - -copy_user.o: copy_user.S - $(CC) -D__ASSEMBLY__ -ansi -c -o copy_user.o copy_user.S - -blockops.o: blockops.S - $(CC) -D__ASSEMBLY__ -ansi -c -o blockops.o blockops.S - -memset.o: memset.S - $(CC) -D__ASSEMBLY__ -ansi -c -o memset.o memset.S - -locks.o: locks.S - $(CC) -D__ASSEMBLY__ $(AFLAGS) -ansi -c -o locks.o locks.S - -atomic.o: atomic.S - $(CC) -D__ASSEMBLY__ $(AFLAGS) -ansi -c -o atomic.o atomic.S - -bitops.o: bitops.S - $(CC) -D__ASSEMBLY__ $(AFLAGS) -ansi -c -o bitops.o bitops.S - -ifdef CONFIG_SMP -irqlock.o: irqlock.S - $(CC) -D__ASSEMBLY__ $(AFLAGS) -ansi -c -o irqlock.o irqlock.S -endif - -strlen.o: strlen.S - $(CC) -D__ASSEMBLY__ -ansi -c -o strlen.o strlen.S - -divdi3.o: divdi3.S - $(CC) -D__ASSEMBLY__ -ansi -c -o divdi3.o divdi3.S - -udivdi3.o: udivdi3.S - $(CC) -D__ASSEMBLY__ -ansi -c -o udivdi3.o udivdi3.S - -mul.o: mul.S - $(CC) -D__ASSEMBLY__ -c -o mul.o mul.S - -rem.o: rem.S - $(CC) -D__ASSEMBLY__ -DST_DIV0=0x2 -c -o rem.o rem.S - -sdiv.o: sdiv.S - $(CC) -D__ASSEMBLY__ -DST_DIV0=0x2 -c -o sdiv.o sdiv.S - -udiv.o: udiv.S - $(CC) -D__ASSEMBLY__ -DST_DIV0=0x2 -c -o udiv.o udiv.S - -umul.o: umul.S - $(CC) -D__ASSEMBLY__ -c -o umul.o umul.S - -urem.o: urem.S - $(CC) -D__ASSEMBLY__ -DST_DIV0=0x2 -c -o urem.o urem.S - -ashrdi3.o: ashrdi3.S - $(CC) -D__ASSEMBLY__ -c -o ashrdi3.o ashrdi3.S - -lshrdi3.o: lshrdi3.S - $(CC) -D__ASSEMBLY__ -c -o lshrdi3.o lshrdi3.S +.S.o: + $(CC) -D__ASSEMBLY__ $(AFLAGS) -ansi -DST_DIV0=0x2 -c $< -o $*.o dep: diff --git a/arch/sparc/lib/ashldi3.S b/arch/sparc/lib/ashldi3.S new file mode 100644 index 000000000..e3b8e0572 --- /dev/null +++ b/arch/sparc/lib/ashldi3.S @@ -0,0 +1,36 @@ +/* $Id: ashldi3.S,v 1.2 1999/11/19 04:11:46 davem Exp $ + * ashldi3.S: GCC emits these for certain drivers playing + * with long longs. + * + * Copyright (C) 1999 David S. Miller (davem@redhat.com) + */ + +#include <asm/cprefix.h> + + .text + .align 4 + .globl C_LABEL(__ashldi3) +C_LABEL(__ashldi3): + cmp %o2, 0 + be 9f + mov 0x20, %g2 + + sub %g2, %o2, %g2 + cmp %g2, 0 + bg 7f + sll %o0, %o2, %g3 + + neg %g2 + clr %o5 + b 8f + sll %o1, %g2, %o4 +7: + srl %o1, %g2, %g2 + sll %o1, %o2, %o5 + or %g3, %g2, %o4 +8: + mov %o4, %o0 + mov %o5, %o1 +9: + retl + nop diff --git a/arch/sparc/lib/ashrdi3.S b/arch/sparc/lib/ashrdi3.S index bf589c283..871d4521e 100644 --- a/arch/sparc/lib/ashrdi3.S +++ b/arch/sparc/lib/ashrdi3.S @@ -1,4 +1,4 @@ -/* $Id: ashrdi3.S,v 1.3 1996/09/07 23:18:10 davem Exp $ +/* $Id: ashrdi3.S,v 1.4 1999/11/19 04:11:49 davem Exp $ * ashrdi3.S: The filesystem code creates all kinds of references to * this little routine on the sparc with gcc. * @@ -7,7 +7,9 @@ #include <asm/cprefix.h> - .globl C_LABEL(__ashrdi3) + .text + .align 4 + .globl C_LABEL(__ashrdi3) C_LABEL(__ashrdi3): tst %o2 be 3f diff --git a/arch/sparc/lib/rwsem.S b/arch/sparc/lib/rwsem.S new file mode 100644 index 000000000..0d5f74139 --- /dev/null +++ b/arch/sparc/lib/rwsem.S @@ -0,0 +1,191 @@ +/* $Id: rwsem.S,v 1.2 2000/01/05 01:00:38 davem Exp $ + * Assembly part of rw semaphores. + * + * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com) + */ + +#include <asm/ptrace.h> +#include <asm/psr.h> + + .text + .align 4 + + .globl ___down_read +___down_read: + rd %psr, %g3 + nop + nop + nop + or %g3, PSR_PIL, %g7 + wr %g7, 0, %psr + nop + nop + nop +#ifdef __SMP__ +1: ldstub [%g1 + 4], %g7 + tst %g7 + bne 1b + ld [%g1], %g7 + subcc %g7, 1, %g7 + st %g7, [%g1] + stb %g0, [%g1 + 4] +#else + ld [%g1], %g7 + subcc %g7, 1, %g7 + st %g7, [%g1] +#endif + wr %g3, 0, %psr + nop + bneg 3f + nop +2: jmpl %o7, %g0 + mov %g4, %o7 +3: save %sp, -64, %sp + mov %g1, %l1 + mov %g4, %l4 + bcs 4f + mov %g5, %l5 + call down_read_failed + mov %l1, %o0 + mov %l1, %g1 + mov %l4, %g4 + ba ___down_read + restore %l5, %g0, %g5 +4: call down_read_failed_biased + mov %l1, %o0 + mov %l1, %g1 + mov %l4, %g4 + ba 2b + restore %l5, %g0, %g5 + + .globl ___down_write +___down_write: + rd %psr, %g3 + nop + nop + nop + or %g3, PSR_PIL, %g7 + wr %g7, 0, %psr + sethi %hi(0x01000000), %g2 + nop + nop +#ifdef __SMP__ +1: ldstub [%g1 + 4], %g7 + tst %g7 + bne 1b + ld [%g1], %g7 + subcc %g7, %g2, %g7 + st %g7, [%g1] + stb %g0, [%g1 + 4] +#else + ld [%g1], %g7 + subcc %g7, %g2, %g7 + st %g7, [%g1] +#endif + wr %g3, 0, %psr + nop + bne 3f + nop +2: jmpl %o7, %g0 + mov %g4, %o7 +3: save %sp, -64, %sp + mov %g1, %l1 + mov %g4, %l4 + bcs 4f + mov %g5, %l5 + call down_write_failed + mov %l1, %o0 + mov %l1, %g1 + mov %l4, %g4 + ba ___down_write + restore %l5, %g0, %g5 +4: call down_write_failed_biased + mov %l1, %o0 + mov %l1, %g1 + mov %l4, %g4 + ba 2b + restore %l5, %g0, %g5 + + .globl ___up_read +___up_read: + rd %psr, %g3 + nop + nop + nop + or %g3, PSR_PIL, %g7 + wr %g7, 0, %psr + nop + nop + nop +#ifdef __SMP__ +1: ldstub [%g1 + 4], %g7 + tst %g7 + bne 1b + ld [%g1], %g7 + addcc %g7, 1, %g7 + st %g7, [%g1] + stb %g0, [%g1 + 4] +#else + ld [%g1], %g7 + addcc %g7, 1, %g7 + st %g7, [%g1] +#endif + wr %g3, 0, %psr + nop + be 3f + nop +2: jmpl %o7, %g0 + mov %g4, %o7 +3: save %sp, -64, %sp + mov %g1, %l1 + mov %g4, %l4 + mov %g5, %l5 + clr %o1 + call __rwsem_wake + mov %l1, %o0 + mov %l1, %g1 + mov %l4, %g4 + ba 2b + restore %l5, %g0, %g5 + + .globl ___up_write +___up_write: + rd %psr, %g3 + nop + nop + nop + or %g3, PSR_PIL, %g7 + wr %g7, 0, %psr + sethi %hi(0x01000000), %g2 + nop + nop +#ifdef __SMP__ +1: ldstub [%g1 + 4], %g7 + tst %g7 + bne 1b + ld [%g1], %g7 + addcc %g7, %g2, %g7 + st %g7, [%g1] + stb %g0, [%g1 + 4] +#else + ld [%g1], %g7 + addcc %g7, %g2, %g7 + st %g7, [%g1] +#endif + wr %g3, 0, %psr + nop + bcs 3f + nop +2: jmpl %o7, %g0 + mov %g4, %o7 +3: save %sp, -64, %sp + mov %g1, %l1 + mov %g4, %l4 + mov %g5, %l5 + mov %g7, %o1 + call __rwsem_wake + mov %l1, %o0 + mov %l1, %g1 + mov %l4, %g4 + ba 2b + restore %l5, %g0, %g5 diff --git a/arch/sparc/lib/strlen_user.S b/arch/sparc/lib/strlen_user.S index 6f2328e06..3dc3c3820 100644 --- a/arch/sparc/lib/strlen_user.S +++ b/arch/sparc/lib/strlen_user.S @@ -47,8 +47,11 @@ mov 3, %o0 .align 4 - .global C_LABEL(__strlen_user) + .global C_LABEL(__strlen_user), C_LABEL(__strnlen_user) C_LABEL(__strlen_user): + sethi %hi(32768), %o1 +C_LABEL(__strnlen_user): + mov %o1, %g1 mov %o0, %o1 andcc %o0, 3, %g0 bne 10b @@ -63,11 +66,16 @@ C_LABEL(__strlen_user): 2: sub %o5, %o2, %o4 andcc %o4, %o3, %g0 - be 13b + bne 82f add %o0, 4, %o0 + sub %o0, %o1, %g2 +81: cmp %g2, %g1 + blu 13b + mov %o0, %o4 + ba,a 1f /* Check every byte. */ - srl %o5, 24, %g5 +82: srl %o5, 24, %g5 andcc %g5, 0xff, %g0 be 1f add %o0, -3, %o4 @@ -80,9 +88,9 @@ C_LABEL(__strlen_user): be 1f add %o4, 1, %o4 andcc %o5, 0xff, %g0 - bne,a 2b -14: - ld [%o0], %o5 + bne 81b + sub %o0, %o1, %g2 + add %o4, 1, %o4 1: retl @@ -101,4 +109,3 @@ C_LABEL(__strlen_user): .word 11b, 9b .word 12b, 9b .word 13b, 9b - .word 14b, 9b diff --git a/arch/sparc/math-emu/Makefile b/arch/sparc/math-emu/Makefile index 6e4862a83..e8880cd07 100644 --- a/arch/sparc/math-emu/Makefile +++ b/arch/sparc/math-emu/Makefile @@ -11,10 +11,10 @@ O_TARGET := math-emu.o O_OBJS := math.o ashldi3.o .S.s: - $(CPP) -D__ASSEMBLY__ -ansi $< -o $*.s + $(CPP) -D__ASSEMBLY__ $(AFLAGS) -ansi $< -o $*.s .S.o: - $(CC) -D__ASSEMBLY__ -ansi -c $< -o $*.o + $(CC) -D__ASSEMBLY__ $(AFLAGS) -ansi -c $< -o $*.o CFLAGS += -I. -I$(TOPDIR)/include/math-emu -w diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile index 7caf69e90..5e304411c 100644 --- a/arch/sparc/mm/Makefile +++ b/arch/sparc/mm/Makefile @@ -1,4 +1,4 @@ -# $Id: Makefile,v 1.34 1999/08/14 03:51:42 anton Exp $ +# $Id: Makefile,v 1.35 1999/10/09 05:32:01 zaitcev Exp $ # Makefile for the linux Sparc-specific parts of the memory manager. # # Note! Dependencies are done automagically by 'make dep', which also @@ -15,7 +15,7 @@ endif ifeq ($(CONFIG_SUN4),y) O_OBJS += nosrmmu.o else -O_OBJS += srmmu.o iommu.o io-unit.o hypersparc.o viking.o tsunami.o +O_OBJS += srmmu.o iommu.o io-unit.o hypersparc.o viking.o tsunami.o swift.o endif ifdef CONFIG_SMP O_OBJS += nosun4c.o @@ -33,3 +33,6 @@ viking.o: viking.S tsunami.o: tsunami.S $(CC) -D__ASSEMBLY__ $(AFLAGS) -ansi -c -o tsunami.o tsunami.S + +swift.o: swift.S + $(CC) -D__ASSEMBLY__ $(AFLAGS) -ansi -c -o swift.o swift.S diff --git a/arch/sparc/mm/asyncd.c b/arch/sparc/mm/asyncd.c index d17979cd4..6ed8a3c99 100644 --- a/arch/sparc/mm/asyncd.c +++ b/arch/sparc/mm/asyncd.c @@ -1,4 +1,4 @@ -/* $Id: asyncd.c,v 1.17 1999/08/14 03:51:44 anton Exp $ +/* $Id: asyncd.c,v 1.18 1999/12/27 06:30:02 anton Exp $ * The asyncd kernel daemon. This handles paging on behalf of * processes that receive page faults due to remote (async) memory * accesses. @@ -25,6 +25,7 @@ #include <asm/system.h> /* for cli()/sti() */ #include <asm/segment.h> /* for memcpy_to/fromfs */ #include <asm/bitops.h> +#include <asm/pgalloc.h> #include <asm/pgtable.h> #define DEBUG 0 diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c index 4ecf94360..9b766f4eb 100644 --- a/arch/sparc/mm/btfixup.c +++ b/arch/sparc/mm/btfixup.c @@ -1,4 +1,4 @@ -/* $Id: btfixup.c,v 1.8 1999/08/31 06:54:31 davem Exp $ +/* $Id: btfixup.c,v 1.9 1999/12/27 06:30:02 anton Exp $ * btfixup.c: Boot time code fixup and relocator, so that * we can get rid of most indirect calls to achieve single * image sun4c and srmmu kernel. @@ -11,6 +11,7 @@ #include <linux/init.h> #include <asm/btfixup.h> #include <asm/page.h> +#include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/oplib.h> #include <asm/system.h> diff --git a/arch/sparc/mm/fault.c b/arch/sparc/mm/fault.c index c400a0179..ba75681b1 100644 --- a/arch/sparc/mm/fault.c +++ b/arch/sparc/mm/fault.c @@ -1,4 +1,4 @@ -/* $Id: fault.c,v 1.107 1999/08/14 03:51:46 anton Exp $ +/* $Id: fault.c,v 1.111 1999/10/24 13:45:59 anton Exp $ * fault.c: Page fault handlers for the Sparc. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) @@ -146,10 +146,11 @@ static void unhandled_fault(unsigned long address, struct task_struct *tsk, printk(KERN_ALERT "Unable to handle kernel paging request " "at virtual address %08lx\n", address); } - printk(KERN_ALERT "tsk->mm->context = %08lx\n", - (unsigned long) tsk->mm->context); - printk(KERN_ALERT "tsk->mm->pgd = %08lx\n", - (unsigned long) tsk->mm->pgd); + printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n", + (tsk->mm ? tsk->mm->context : tsk->active_mm->context)); + printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n", + (tsk->mm ? (unsigned long) tsk->mm->pgd : + (unsigned long) tsk->active_mm->pgd)); die_if_kernel("Oops", regs); } @@ -309,8 +310,18 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write, pgd_t *pgdp; pte_t *ptep; - if (text_fault) + if (text_fault) { address = regs->pc; + } else if (!write && + !(regs->psr & PSR_PS)) { + unsigned int insn, *ip; + + ip = (unsigned int *)regs->pc; + if (! get_user(insn, ip)) { + if ((insn & 0xc1680000) == 0xc0680000) + write = 1; + } + } pgdp = sun4c_pgd_offset(mm, address); ptep = sun4c_pte_offset((pmd_t *) pgdp, address); @@ -319,28 +330,36 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write, if (write) { if ((pte_val(*ptep) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) == (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) { + unsigned long flags; *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED | _SUN4C_PAGE_MODIFIED | _SUN4C_PAGE_VALID | _SUN4C_PAGE_DIRTY); + save_and_cli(flags); if (sun4c_get_segmap(address) != invalid_segment) { sun4c_put_pte(address, pte_val(*ptep)); + restore_flags(flags); return; } + restore_flags(flags); } } else { if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) == (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) { + unsigned long flags; *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED | _SUN4C_PAGE_VALID); + save_and_cli(flags); if (sun4c_get_segmap(address) != invalid_segment) { sun4c_put_pte(address, pte_val(*ptep)); + restore_flags(flags); return; } + restore_flags(flags); } } } @@ -415,31 +434,25 @@ void window_overflow_fault(void) { unsigned long sp; - lock_kernel(); sp = current->thread.rwbuf_stkptrs[0]; if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) force_user_fault(sp + 0x38, 1); force_user_fault(sp, 1); - unlock_kernel(); } void window_underflow_fault(unsigned long sp) { - lock_kernel(); if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) force_user_fault(sp + 0x38, 0); force_user_fault(sp, 0); - unlock_kernel(); } void window_ret_fault(struct pt_regs *regs) { unsigned long sp; - lock_kernel(); sp = regs->u_regs[UREG_FP]; if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) force_user_fault(sp + 0x38, 0); force_user_fault(sp, 0); - unlock_kernel(); } diff --git a/arch/sparc/mm/generic.c b/arch/sparc/mm/generic.c index 9669f5111..9e599fd9d 100644 --- a/arch/sparc/mm/generic.c +++ b/arch/sparc/mm/generic.c @@ -1,4 +1,4 @@ -/* $Id: generic.c,v 1.6 1998/10/27 23:28:00 davem Exp $ +/* $Id: generic.c,v 1.9 1999/12/27 06:30:03 anton Exp $ * generic.c: Generic Sparc mm routines that are not dependent upon * MMU type but are Sparc specific. * @@ -9,46 +9,26 @@ #include <linux/mm.h> #include <linux/swap.h> +#include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/page.h> - -/* Allocate a block of RAM which is aligned to its size. - * This procedure can be used until the call to mem_init(). - */ -void *sparc_init_alloc(unsigned long *kbrk, unsigned long size) -{ - unsigned long mask = size - 1; - unsigned long ret; - - if(!size) - return 0x0; - if(size & mask) { - prom_printf("panic: sparc_init_alloc botch\n"); - prom_halt(); - } - ret = (*kbrk + mask) & ~mask; - *kbrk = ret + size; - memset((void*) ret, 0, size); - return (void*) ret; -} - static inline void forget_pte(pte_t page) { if (pte_none(page)) return; if (pte_present(page)) { - unsigned long addr = pte_page(page); - if (MAP_NR(addr) >= max_mapnr || PageReserved(mem_map+MAP_NR(addr))) + unsigned long nr = pte_pagenr(page); + if (nr >= max_mapnr || PageReserved(mem_map+nr)) return; /* * free_page() used to be able to clear swap cache * entries. We may now have to do it manually. */ - free_page_and_swap_cache(addr); + free_page_and_swap_cache(mem_map+nr); return; } - swap_free(pte_val(page)); + swap_free(pte_to_swp_entry(page)); } /* Remap IO memory, the same way as remap_page_range(), but use diff --git a/arch/sparc/mm/init.c b/arch/sparc/mm/init.c index 40aab1d66..221496f98 100644 --- a/arch/sparc/mm/init.c +++ b/arch/sparc/mm/init.c @@ -1,4 +1,4 @@ -/* $Id: init.c,v 1.69 1999/09/06 22:56:17 ecd Exp $ +/* $Id: init.c,v 1.72 1999/12/27 06:30:06 anton Exp $ * linux/arch/sparc/mm/init.c * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) @@ -22,6 +22,8 @@ #include <linux/blk.h> #endif #include <linux/init.h> +#include <linux/highmem.h> +#include <linux/bootmem.h> #include <asm/system.h> #include <asm/segment.h> @@ -30,22 +32,21 @@ #include <asm/pgtable.h> #include <asm/vaddrs.h> -/* Turn this off if you suspect some place in some physical memory hole - might get into page tables (something would be broken very much). */ - -#define FREE_UNUSED_MEM_MAP - extern void show_net_buffers(void); unsigned long *sparc_valid_addr_bitmap; +unsigned long phys_base; + struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; unsigned long sparc_unmapped_base; struct pgtable_cache_struct pgt_quicklists; /* References to section boundaries */ -extern char __init_begin, __init_end, etext; +extern char __init_begin, __init_end, _start, _end, etext , edata; + +static unsigned long totalram_pages = 0; /* * BAD_PAGE is the page that is used for page faults when linux @@ -62,50 +63,31 @@ extern char __init_begin, __init_end, etext; */ pte_t *__bad_pagetable(void) { - memset((void *) EMPTY_PGT, 0, PAGE_SIZE); - return (pte_t *) EMPTY_PGT; + memset((void *) &empty_bad_page_table, 0, PAGE_SIZE); + return (pte_t *) &empty_bad_page_table; } pte_t __bad_page(void) { - memset((void *) EMPTY_PGE, 0, PAGE_SIZE); - return pte_mkdirty(mk_pte((unsigned long) EMPTY_PGE, PAGE_SHARED)); + memset((void *) &empty_bad_page, 0, PAGE_SIZE); + return pte_mkdirty(mk_pte_phys((((unsigned long) &empty_bad_page) + - PAGE_OFFSET + phys_base), + PAGE_SHARED)); } void show_mem(void) { - int free = 0,total = 0,reserved = 0; - int shared = 0, cached = 0; - struct page *page, *end; - - printk("\nMem-info:\n"); + printk("Mem-info:\n"); show_free_areas(); - printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10)); - for (page = mem_map, end = mem_map + max_mapnr; - page < end; page++) { - if (PageSkip(page)) { - if (page->next_hash < page) - break; - page = page->next_hash; - } - total++; - if (PageReserved(page)) - reserved++; - else if (PageSwapCache(page)) - cached++; - else if (!atomic_read(&page->count)) - free++; - else - shared += atomic_read(&page->count) - 1; - } - printk("%d pages of RAM\n",total); - printk("%d free pages\n",free); - printk("%d reserved pages\n",reserved); - printk("%d pages shared\n",shared); - printk("%d pages swap cached\n",cached); - printk("%ld page tables cached\n",pgtable_cache_size); + printk("Free swap: %6dkB\n", + nr_swap_pages << (PAGE_SHIFT-10)); + printk("%ld pages of RAM\n", totalram_pages); + printk("%d free pages\n", nr_free_pages()); + printk("%ld pages in page table cache\n",pgtable_cache_size); +#ifndef __SMP__ if (sparc_cpu_model == sun4m || sparc_cpu_model == sun4d) - printk("%ld page dirs cached\n", pgd_cache_size); + printk("%ld entries in page dir cache\n",pgd_cache_size); +#endif show_buffers(); #ifdef CONFIG_NET show_net_buffers(); @@ -114,12 +96,12 @@ void show_mem(void) extern pgprot_t protection_map[16]; -unsigned long __init sparc_context_init(unsigned long start_mem, int numctx) +void __init sparc_context_init(int numctx) { int ctx; - ctx_list_pool = (struct ctx_list *) start_mem; - start_mem += (numctx * sizeof(struct ctx_list)); + ctx_list_pool = __alloc_bootmem(numctx * sizeof(struct ctx_list), SMP_CACHE_BYTES, 0UL); + for(ctx = 0; ctx < numctx; ctx++) { struct ctx_list *clist; @@ -131,7 +113,98 @@ unsigned long __init sparc_context_init(unsigned long start_mem, int numctx) ctx_used.next = ctx_used.prev = &ctx_used; for(ctx = 0; ctx < numctx; ctx++) add_to_free_ctxlist(ctx_list_pool + ctx); - return start_mem; +} + +#undef DEBUG_BOOTMEM + +extern unsigned long cmdline_memory_size; + +unsigned long __init bootmem_init(void) +{ + unsigned long bootmap_size, start_pfn, end_pfn; + unsigned long end_of_phys_memory = 0UL; + int i; + + /* XXX It is a bit ambiguous here, whether we should + * XXX treat the user specified mem=xxx as total wanted + * XXX physical memory, or as a limit to the upper + * XXX physical address we allow. For now it is the + * XXX latter. -DaveM + */ +#ifdef DEBUG_BOOTMEM + prom_printf("bootmem_init: Scan sp_banks, "); +#endif + for (i = 0; sp_banks[i].num_bytes != 0; i++) { + end_of_phys_memory = sp_banks[i].base_addr + + sp_banks[i].num_bytes; + if (cmdline_memory_size) { + if (end_of_phys_memory > cmdline_memory_size) { + if (cmdline_memory_size > sp_banks[i].base_addr) { + end_of_phys_memory = + sp_banks[i-1].base_addr + + sp_banks[i-1].num_bytes; + sp_banks[i].base_addr = 0xdeadbeef; + sp_banks[i].num_bytes = 0; + } else { + sp_banks[i].num_bytes -= + (end_of_phys_memory - + cmdline_memory_size); + end_of_phys_memory = cmdline_memory_size; + sp_banks[++i].base_addr = 0xdeadbeef; + sp_banks[i].num_bytes = 0; + } + break; + } + } + } + + /* Start with page aligned address of last symbol in kernel + * image. + */ + start_pfn = PAGE_ALIGN((unsigned long) &_end) - PAGE_OFFSET; + + /* Adjust up to the physical address where the kernel begins. */ + start_pfn += phys_base; + + /* Now shift down to get the real physical page frame number. */ + start_pfn >>= PAGE_SHIFT; + + end_pfn = end_of_phys_memory >> PAGE_SHIFT; + + /* Initialize the boot-time allocator. */ +#ifdef DEBUG_BOOTMEM + prom_printf("init_bootmem(spfn[%lx],epfn[%lx])\n", + start_pfn, end_pfn); +#endif + bootmap_size = init_bootmem(start_pfn, end_pfn); + + /* Now register the available physical memory with the + * allocator. + */ + for (i = 0; sp_banks[i].num_bytes != 0; i++) { +#ifdef DEBUG_BOOTMEM + prom_printf("free_bootmem: base[%lx] size[%lx]\n", + sp_banks[i].base_addr, + sp_banks[i].num_bytes); +#endif + free_bootmem(sp_banks[i].base_addr, + sp_banks[i].num_bytes); + } + + /* Reserve the kernel text/data/bss and the bootmem bitmap. */ +#ifdef DEBUG_BOOTMEM + prom_printf("reserve_bootmem: base[%lx] size[%lx]\n", + phys_base, + (((start_pfn << PAGE_SHIFT) + + bootmap_size) - phys_base)); +#endif + reserve_bootmem(phys_base, (((start_pfn << PAGE_SHIFT) + + bootmap_size) - phys_base)); + +#ifdef DEBUG_BOOTMEM + prom_printf("init_bootmem: return end_pfn[%lx]\n", end_pfn); +#endif + return end_pfn; } /* @@ -139,31 +212,32 @@ unsigned long __init sparc_context_init(unsigned long start_mem, int numctx) * init routine based upon the Sun model type on the Sparc. * */ -extern unsigned long sun4c_paging_init(unsigned long, unsigned long); -extern unsigned long srmmu_paging_init(unsigned long, unsigned long); -extern unsigned long device_scan(unsigned long); +extern void sun4c_paging_init(void); +extern void srmmu_paging_init(void); +extern void device_scan(void); + +unsigned long last_valid_pfn; -unsigned long __init -paging_init(unsigned long start_mem, unsigned long end_mem) +void __init paging_init(void) { switch(sparc_cpu_model) { case sun4c: case sun4e: case sun4: - start_mem = sun4c_paging_init(start_mem, end_mem); + sun4c_paging_init(); sparc_unmapped_base = 0xe0000000; BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000); break; case sun4m: case sun4d: - start_mem = srmmu_paging_init(start_mem, end_mem); + srmmu_paging_init(); sparc_unmapped_base = 0x50000000; BTFIXUPSET_SETHI(sparc_unmapped_base, 0x50000000); break; case ap1000: #if CONFIG_AP1000 - start_mem = apmmu_paging_init(start_mem, end_mem); + apmmu_paging_init(); sparc_unmapped_base = 0x50000000; BTFIXUPSET_SETHI(sparc_unmapped_base, 0x50000000); break; @@ -194,74 +268,121 @@ paging_init(unsigned long start_mem, unsigned long end_mem) protection_map[14] = PAGE_SHARED; protection_map[15] = PAGE_SHARED; btfixup(); - return device_scan(start_mem); + device_scan(); } struct cache_palias *sparc_aliases; -extern void srmmu_frob_mem_map(unsigned long); +static void __init taint_real_pages(void) +{ + int i; -int physmem_mapped_contig __initdata = 1; + for (i = 0; sp_banks[i].num_bytes; i++) { + unsigned long start, end; -static void __init taint_real_pages(unsigned long start_mem, unsigned long end_mem) -{ - unsigned long addr, tmp2 = 0; - - if(physmem_mapped_contig) { - for(addr = PAGE_OFFSET; addr < end_mem; addr += PAGE_SIZE) { - if(addr >= KERNBASE && addr < start_mem) - addr = start_mem; - for(tmp2=0; sp_banks[tmp2].num_bytes != 0; tmp2++) { - unsigned long phys_addr = (addr - PAGE_OFFSET); - unsigned long base = sp_banks[tmp2].base_addr; - unsigned long limit = base + sp_banks[tmp2].num_bytes; - - if((phys_addr >= base) && (phys_addr < limit) && - ((phys_addr + PAGE_SIZE) < limit)) { - mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved); - set_bit(MAP_NR(addr) >> 8, sparc_valid_addr_bitmap); - } - } + start = sp_banks[i].base_addr; + end = start + + sp_banks[i].num_bytes; + while (start < end) { + set_bit (start >> 20, + sparc_valid_addr_bitmap); + start += PAGE_SIZE; } - } else { - if((sparc_cpu_model == sun4m) || (sparc_cpu_model == sun4d)) { - srmmu_frob_mem_map(start_mem); + } +} + +void __init free_mem_map_range(struct page *first, struct page *last) +{ + first = (struct page *) PAGE_ALIGN((unsigned long)first); + last = (struct page *) ((unsigned long)last & PAGE_MASK); +#ifdef DEBUG_BOOTMEM + prom_printf("[%p,%p] ", first, last); +#endif + while (first < last) { + ClearPageReserved(mem_map + MAP_NR(first)); + set_page_count(mem_map + MAP_NR(first), 1); + free_page((unsigned long)first); + totalram_pages++; + num_physpages++; + + first = (struct page *)((unsigned long)first + PAGE_SIZE); + } +} + +/* Walk through holes in sp_banks regions, if the mem_map array + * areas representing those holes consume a page or more, free + * up such pages. This helps a lot on machines where physical + * ram is configured such that it begins at some hugh value. + * + * The sp_banks array is sorted by base address. + */ +void __init free_unused_mem_map(void) +{ + int i; + +#ifdef DEBUG_BOOTMEM + prom_printf("free_unused_mem_map: "); +#endif + for (i = 0; sp_banks[i].num_bytes; i++) { + if (i == 0) { + struct page *first, *last; + + first = mem_map; + last = &mem_map[sp_banks[i].base_addr >> PAGE_SHIFT]; + free_mem_map_range(first, last); } else { - for(addr = start_mem; addr < end_mem; addr += PAGE_SIZE) { - mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved); - set_bit(MAP_NR(addr) >> 8, sparc_valid_addr_bitmap); + struct page *first, *last; + unsigned long prev_end; + + prev_end = sp_banks[i-1].base_addr + + sp_banks[i-1].num_bytes; + prev_end = PAGE_ALIGN(prev_end); + first = &mem_map[prev_end >> PAGE_SHIFT]; + last = &mem_map[sp_banks[i].base_addr >> PAGE_SHIFT]; + + free_mem_map_range(first, last); + + if (!sp_banks[i+1].num_bytes) { + prev_end = sp_banks[i].base_addr + + sp_banks[i].num_bytes; + first = &mem_map[prev_end >> PAGE_SHIFT]; + last = &mem_map[last_valid_pfn]; + free_mem_map_range(first, last); } } } +#ifdef DEBUG_BOOTMEM + prom_printf("\n"); +#endif } -void __init mem_init(unsigned long start_mem, unsigned long end_mem) +void __init mem_init(void) { int codepages = 0; int datapages = 0; int initpages = 0; int i; - unsigned long addr; - struct page *page, *end; + unsigned long addr, last; /* Saves us work later. */ memset((void *) ZERO_PAGE(0), 0, PAGE_SIZE); - end_mem &= PAGE_MASK; - max_mapnr = MAP_NR(end_mem); - high_memory = (void *) end_mem; - - sparc_valid_addr_bitmap = (unsigned long *)start_mem; - i = max_mapnr >> (8 + 5); + i = last_valid_pfn >> (8 + 5); i += 1; - memset(sparc_valid_addr_bitmap, 0, i << 2); - start_mem += i << 2; - start_mem = PAGE_ALIGN(start_mem); - num_physpages = 0; + sparc_valid_addr_bitmap = (unsigned long *) + __alloc_bootmem(i << 2, SMP_CACHE_BYTES, 0UL); + + if (sparc_valid_addr_bitmap == NULL) { + prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); + prom_halt(); + } + memset(sparc_valid_addr_bitmap, 0, i << 2); addr = KERNBASE; - while(addr < start_mem) { + last = PAGE_ALIGN((unsigned long)&_end); + /* fix this */ + while(addr < last) { #ifdef CONFIG_BLK_DEV_INITRD if (initrd_below_start_ok && addr >= initrd_start && addr < initrd_end) mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved); @@ -272,69 +393,39 @@ void __init mem_init(unsigned long start_mem, unsigned long end_mem) addr += PAGE_SIZE; } - taint_real_pages(start_mem, end_mem); - -#ifdef FREE_UNUSED_MEM_MAP - end = mem_map + max_mapnr; - for (page = mem_map; page < end; page++) { - if (PageSkip(page)) { - unsigned long low, high; - - /* See srmmu_frob_mem_map() for why this is done. -DaveM */ - page++; - - low = PAGE_ALIGN((unsigned long)(page+1)); - if (page->next_hash < page) - high = ((unsigned long)end) & PAGE_MASK; - else - high = ((unsigned long)page->next_hash) & PAGE_MASK; - while (low < high) { - mem_map[MAP_NR(low)].flags &= ~(1<<PG_reserved); - low += PAGE_SIZE; - } - } - } + taint_real_pages(); + + max_mapnr = last_valid_pfn; + high_memory = __va(last_valid_pfn << PAGE_SHIFT); + +#ifdef DEBUG_BOOTMEM + prom_printf("mem_init: Calling free_all_bootmem().\n"); #endif - - for (addr = PAGE_OFFSET; addr < end_mem; addr += PAGE_SIZE) { - if (PageSkip(mem_map + MAP_NR(addr))) { - unsigned long next = mem_map[MAP_NR(addr)].next_hash - mem_map; + num_physpages = totalram_pages = free_all_bootmem(); - next = (next << PAGE_SHIFT) + PAGE_OFFSET; - if (next < addr || next >= end_mem) - break; - addr = next; - } - num_physpages++; - if(PageReserved(mem_map + MAP_NR(addr))) { - if ((addr < (unsigned long) &etext) && (addr >= KERNBASE)) - codepages++; - else if((addr >= (unsigned long)&__init_begin && addr < (unsigned long)&__init_end)) - initpages++; - else if((addr < start_mem) && (addr >= KERNBASE)) - datapages++; - continue; - } - atomic_set(&mem_map[MAP_NR(addr)].count, 1); -#ifdef CONFIG_BLK_DEV_INITRD - if (!initrd_start || - (addr < initrd_start || addr >= initrd_end)) +#if 0 + free_unused_mem_map(); #endif - free_page(addr); - } + + codepages = (((unsigned long) &etext) - ((unsigned long)&_start)); + codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; + datapages = (((unsigned long) &edata) - ((unsigned long)&etext)); + datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT; + initpages = (((unsigned long) &__init_end) - ((unsigned long) &__init_begin)); + initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; printk("Memory: %dk available (%dk kernel code, %dk data, %dk init) [%08lx,%08lx]\n", - nr_free_pages << (PAGE_SHIFT-10), + nr_free_pages() << (PAGE_SHIFT-10), codepages << (PAGE_SHIFT-10), datapages << (PAGE_SHIFT-10), initpages << (PAGE_SHIFT-10), - (unsigned long)PAGE_OFFSET, end_mem); + (unsigned long)PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT)); /* NOTE NOTE NOTE NOTE * Please keep track of things and make sure this * always matches the code in mm/page_alloc.c -DaveM */ - i = nr_free_pages >> 7; + i = nr_free_pages() >> 7; if (i < 48) i = 48; if (i > 256) @@ -347,39 +438,34 @@ void __init mem_init(unsigned long start_mem, unsigned long end_mem) void free_initmem (void) { unsigned long addr; - + addr = (unsigned long)(&__init_begin); for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { - mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved); - atomic_set(&mem_map[MAP_NR(addr)].count, 1); - free_page(addr); + unsigned long page; + struct page *p; + + page = (addr + + ((unsigned long) __va(phys_base)) - + PAGE_OFFSET); + p = mem_map + MAP_NR(page); + + ClearPageReserved(p); + set_page_count(p, 1); + __free_page(p); + totalram_pages++; + num_physpages++; } } void si_meminfo(struct sysinfo *val) { - struct page *page, *end; - - val->totalram = 0; + val->totalram = totalram_pages; val->sharedram = 0; - val->freeram = nr_free_pages << PAGE_SHIFT; - val->bufferram = atomic_read(&buffermem); - for (page = mem_map, end = mem_map + max_mapnr; - page < end; page++) { - if (PageSkip(page)) { - if (page->next_hash < page) - break; - page = page->next_hash; - } - if (PageReserved(page)) - continue; - val->totalram++; - if (!atomic_read(&page->count)) - continue; - val->sharedram += atomic_read(&page->count) - 1; - } - val->totalram <<= PAGE_SHIFT; - val->sharedram <<= PAGE_SHIFT; - val->totalbig = 0; - val->freebig = 0; + val->freeram = nr_free_pages(); + val->bufferram = atomic_read(&buffermem_pages); + + val->totalhigh = 0; + val->freehigh = 0; + + val->mem_unit = PAGE_SIZE; } diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c index 45c74c0aa..1a3476a16 100644 --- a/arch/sparc/mm/io-unit.c +++ b/arch/sparc/mm/io-unit.c @@ -1,4 +1,4 @@ -/* $Id: io-unit.c,v 1.15 1999/09/10 10:40:38 davem Exp $ +/* $Id: io-unit.c,v 1.18 1999/12/28 04:28:55 anton Exp $ * io-unit.c: IO-UNIT specific routines for memory management. * * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) @@ -9,6 +9,8 @@ #include <linux/init.h> #include <linux/malloc.h> #include <linux/spinlock.h> +#include <asm/scatterlist.h> +#include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/sbus.h> #include <asm/io.h> @@ -27,14 +29,15 @@ #define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM) void __init -iounit_init(int sbi_node, int io_node, struct linux_sbus *sbus) +iounit_init(int sbi_node, int io_node, struct sbus_bus *sbus) { iopte_t *xpt, *xptend; struct iounit_struct *iounit; struct linux_prom_registers iommu_promregs[PROMREG_MAX]; - + struct resource r; + iounit = kmalloc(sizeof(struct iounit_struct), GFP_ATOMIC); - + memset(iounit, 0, sizeof(*iounit)); iounit->limit[0] = IOUNIT_BMAP1_START; iounit->limit[1] = IOUNIT_BMAP2_START; @@ -42,13 +45,14 @@ iounit_init(int sbi_node, int io_node, struct linux_sbus *sbus) iounit->limit[3] = IOUNIT_BMAPM_END; iounit->rotor[1] = IOUNIT_BMAP2_START; iounit->rotor[2] = IOUNIT_BMAPM_START; - + prom_getproperty(sbi_node, "reg", (void *) iommu_promregs, sizeof(iommu_promregs)); prom_apply_generic_ranges(io_node, 0, iommu_promregs, 3); - xpt = (iopte_t *) - sparc_alloc_io(iommu_promregs[2].phys_addr, 0, (PAGE_SIZE * 16), - "XPT", iommu_promregs[2].which_io, 0x0); + memset(&r, 0, sizeof(r)); + r.flags = iommu_promregs[2].which_io; + r.start = iommu_promregs[2].phys_addr; + xpt = (iopte_t *) sbus_ioremap(&r, 0, PAGE_SIZE * 16, "XPT"); if(!xpt) panic("Cannot map External Page Table."); sbus->iommu = (struct iommu_struct *)iounit; @@ -108,7 +112,7 @@ nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan); return vaddr; } -static __u32 iounit_get_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus) +static __u32 iounit_get_scsi_one(char *vaddr, unsigned long len, struct sbus_bus *sbus) { unsigned long ret, flags; struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu; @@ -119,7 +123,7 @@ static __u32 iounit_get_scsi_one(char *vaddr, unsigned long len, struct linux_sb return ret; } -static void iounit_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus) +static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus) { unsigned long flags; struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu; @@ -127,12 +131,13 @@ static void iounit_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus /* FIXME: Cache some resolved pages - often several sg entries are to the same page */ spin_lock_irqsave(&iounit->lock, flags); for (; sz >= 0; sz--) { - sg[sz].dvma_addr = iounit_get_area(iounit, (unsigned long)sg[sz].addr, sg[sz].len); + sg[sz].dvma_address = iounit_get_area(iounit, (unsigned long)sg[sz].address, sg[sz].length); + sg[sz].dvma_length = sg[sz].length; } spin_unlock_irqrestore(&iounit->lock, flags); } -static void iounit_release_scsi_one(__u32 vaddr, unsigned long len, struct linux_sbus *sbus) +static void iounit_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus) { unsigned long flags; struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu; @@ -146,16 +151,16 @@ static void iounit_release_scsi_one(__u32 vaddr, unsigned long len, struct linux spin_unlock_irqrestore(&iounit->lock, flags); } -static void iounit_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus) +static void iounit_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus) { unsigned long flags; unsigned long vaddr, len; struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu; - + spin_lock_irqsave(&iounit->lock, flags); for (; sz >= 0; sz--) { - len = ((sg[sz].dvma_addr & ~PAGE_MASK) + sg[sz].len + (PAGE_SIZE-1)) >> PAGE_SHIFT; - vaddr = (sg[sz].dvma_addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT; + len = ((sg[sz].dvma_address & ~PAGE_MASK) + sg[sz].length + (PAGE_SIZE-1)) >> PAGE_SHIFT; + vaddr = (sg[sz].dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT; IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr)); for (len += vaddr; vaddr < len; vaddr++) clear_bit(vaddr, iounit->bmap); @@ -164,21 +169,18 @@ static void iounit_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_ } #ifdef CONFIG_SBUS -static void iounit_map_dma_area(unsigned long addr, int len) +static void iounit_map_dma_area(unsigned long va, __u32 addr, int len) { unsigned long page, end; pgprot_t dvma_prot; iopte_t *iopte; - struct linux_sbus *sbus; + struct sbus_bus *sbus; dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV); end = PAGE_ALIGN((addr + len)); while(addr < end) { - page = get_free_page(GFP_KERNEL); - if(!page) { - prom_printf("alloc_dvma: Cannot get a dvma page\n"); - prom_halt(); - } else { + page = va; + { pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; @@ -200,10 +202,15 @@ static void iounit_map_dma_area(unsigned long addr, int len) } } addr += PAGE_SIZE; + va += PAGE_SIZE; } flush_cache_all(); flush_tlb_all(); } + +static void iounit_unmap_dma_area(unsigned long addr, int len) +{ +} #endif static char *iounit_lockarea(char *vaddr, unsigned long len) @@ -229,10 +236,11 @@ void __init ld_mmu_iounit(void) #ifdef CONFIG_SBUS BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM); #endif } -__u32 iounit_map_dma_init(struct linux_sbus *sbus, int size) +__u32 iounit_map_dma_init(struct sbus_bus *sbus, int size) { int i, j, k, npages; unsigned long rotor, scan, limit; @@ -271,7 +279,7 @@ nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan); return ret; } -__u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct linux_sbus *sbus) +__u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct sbus_bus *sbus) { int scan = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT; struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu; diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 99f29c655..041e00c6a 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -1,8 +1,8 @@ -/* $Id: iommu.c,v 1.11 1999/08/31 06:54:34 davem Exp $ +/* $Id: iommu.c,v 1.16 1999/12/28 04:28:54 anton Exp $ * iommu.c: IOMMU specific routines for memory management. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) - * Copyright (C) 1995 Peter A. Zaitcev (zaitcev@ithil.mcst.ru) + * Copyright (C) 1995 Pete Zaitcev * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ @@ -12,10 +12,13 @@ #include <linux/init.h> #include <linux/mm.h> #include <linux/malloc.h> +#include <asm/scatterlist.h> +#include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/sbus.h> #include <asm/io.h> #include <asm/mxcc.h> +#include <asm/mbus.h> /* srmmu.c */ extern int viking_mxcc_present; @@ -45,20 +48,23 @@ static inline void iommu_map_dvma_pages_for_iommu(struct iommu_struct *iommu) } void __init -iommu_init(int iommund, struct linux_sbus *sbus) +iommu_init(int iommund, struct sbus_bus *sbus) { unsigned int impl, vers, ptsize; unsigned long tmp; struct iommu_struct *iommu; struct linux_prom_registers iommu_promregs[PROMREG_MAX]; + struct resource r; int i; iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC); prom_getproperty(iommund, "reg", (void *) iommu_promregs, sizeof(iommu_promregs)); + memset(&r, 0, sizeof(r)); + r.flags = iommu_promregs[0].which_io; + r.start = iommu_promregs[0].phys_addr; iommu->regs = (struct iommu_regs *) - sparc_alloc_io(iommu_promregs[0].phys_addr, 0, (PAGE_SIZE * 3), - "IOMMU registers", iommu_promregs[0].which_io, 0x0); + sbus_ioremap(&r, 0, PAGE_SIZE * 3, "iommu_regs"); if(!iommu->regs) panic("Cannot map IOMMU registers."); impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28; @@ -137,18 +143,18 @@ iommu_init(int iommund, struct linux_sbus *sbus) impl, vers, iommu->page_table, ptsize); } -static __u32 iommu_get_scsi_one_noflush(char *vaddr, unsigned long len, struct linux_sbus *sbus) +static __u32 iommu_get_scsi_one_noflush(char *vaddr, unsigned long len, struct sbus_bus *sbus) { return (__u32)vaddr; } -static __u32 iommu_get_scsi_one_gflush(char *vaddr, unsigned long len, struct linux_sbus *sbus) +static __u32 iommu_get_scsi_one_gflush(char *vaddr, unsigned long len, struct sbus_bus *sbus) { flush_page_for_dma(0); return (__u32)vaddr; } -static __u32 iommu_get_scsi_one_pflush(char *vaddr, unsigned long len, struct linux_sbus *sbus) +static __u32 iommu_get_scsi_one_pflush(char *vaddr, unsigned long len, struct sbus_bus *sbus) { unsigned long page = ((unsigned long) vaddr) & PAGE_MASK; @@ -159,81 +165,110 @@ static __u32 iommu_get_scsi_one_pflush(char *vaddr, unsigned long len, struct li return (__u32)vaddr; } -static void iommu_get_scsi_sgl_noflush(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus) +static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus) { - for (; sz >= 0; sz--) - sg[sz].dvma_addr = (__u32) (sg[sz].addr); + for (; sz >= 0; sz--) { + sg[sz].dvma_address = (__u32) (sg[sz].address); + sg[sz].dvma_length = (__u32) (sg[sz].length); + } } -static void iommu_get_scsi_sgl_gflush(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus) +static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus) { flush_page_for_dma(0); - for (; sz >= 0; sz--) - sg[sz].dvma_addr = (__u32) (sg[sz].addr); + for (; sz >= 0; sz--) { + sg[sz].dvma_address = (__u32) (sg[sz].address); + sg[sz].dvma_length = (__u32) (sg[sz].length); + } } -static void iommu_get_scsi_sgl_pflush(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus) +static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus) { unsigned long page, oldpage = 0; while(sz >= 0) { - page = ((unsigned long) sg[sz].addr) & PAGE_MASK; + page = ((unsigned long) sg[sz].address) & PAGE_MASK; if (oldpage == page) page += PAGE_SIZE; /* We flushed that page already */ - while(page < (unsigned long)(sg[sz].addr + sg[sz].len)) { + while(page < (unsigned long)(sg[sz].address + sg[sz].length)) { flush_page_for_dma(page); page += PAGE_SIZE; } - sg[sz].dvma_addr = (__u32) (sg[sz].addr); + sg[sz].dvma_address = (__u32) (sg[sz].address); + sg[sz].dvma_length = (__u32) (sg[sz].length); sz--; oldpage = page - PAGE_SIZE; } } -static void iommu_release_scsi_one(__u32 vaddr, unsigned long len, struct linux_sbus *sbus) +static void iommu_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus) { } -static void iommu_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus) +static void iommu_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus) { } #ifdef CONFIG_SBUS -static void iommu_map_dma_area(unsigned long addr, int len) +static void iommu_map_dma_area(unsigned long va, __u32 addr, int len) { - unsigned long page, end; + unsigned long page, end, ipte_cache; pgprot_t dvma_prot; - struct iommu_struct *iommu = SBus_chain->iommu; + struct iommu_struct *iommu = sbus_root->iommu; iopte_t *iopte = iommu->page_table; iopte_t *first; - if(viking_mxcc_present) + if(viking_mxcc_present || srmmu_modtype == HyperSparc) { dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV); - else + ipte_cache = 1; + } else { dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV); + ipte_cache = 0; + } iopte += ((addr - iommu->start) >> PAGE_SHIFT); first = iopte; end = PAGE_ALIGN((addr + len)); while(addr < end) { - page = get_free_page(GFP_KERNEL); - if(!page) { - prom_printf("alloc_dvma: Cannot get a dvma page\n"); - prom_halt(); - } else { + page = va; + { pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; - pgdp = pgd_offset(init_task.mm, addr); + if (viking_mxcc_present) + viking_mxcc_flush_page(page); + else if (viking_flush) + viking_flush_page(page); + else + flush_page_to_ram(page); + + pgdp = pgd_offset(&init_mm, addr); pmdp = pmd_offset(pgdp, addr); ptep = pte_offset(pmdp, addr); set_pte(ptep, pte_val(mk_pte(page, dvma_prot))); - iopte_val(*iopte++) = MKIOPTE(mmu_v2p(page)); + if (ipte_cache != 0) { + iopte_val(*iopte++) = MKIOPTE(mmu_v2p(page)); + } else { + iopte_val(*iopte++) = + MKIOPTE(mmu_v2p(page)) & ~IOPTE_CACHE; + } } addr += PAGE_SIZE; + va += PAGE_SIZE; } + /* P3: why do we need this? + * + * DAVEM: Because there are several aspects, none of which + * are handled by a single interface. Some cpus are + * completely not I/O DMA coherent, and some have + * virtually indexed caches. The driver DMA flushing + * methods handle the former case, but here during + * IOMMU page table modifications, and usage of non-cacheable + * cpu mappings of pages potentially in the cpu caches, we have + * to handle the latter case as well. + */ flush_cache_all(); if(viking_mxcc_present) { unsigned long start = ((unsigned long) first) & PAGE_MASK; @@ -253,6 +288,10 @@ static void iommu_map_dma_area(unsigned long addr, int len) flush_tlb_all(); iommu_invalidate(iommu->regs); } + +static void iommu_unmap_dma_area(unsigned long addr, int len) +{ +} #endif static char *iommu_lockarea(char *vaddr, unsigned long len) @@ -287,5 +326,6 @@ void __init ld_mmu_iommu(void) #ifdef CONFIG_SBUS BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM); #endif } diff --git a/arch/sparc/mm/nosrmmu.c b/arch/sparc/mm/nosrmmu.c index bae3bd140..4e49380d2 100644 --- a/arch/sparc/mm/nosrmmu.c +++ b/arch/sparc/mm/nosrmmu.c @@ -1,4 +1,4 @@ -/* $Id: nosrmmu.c,v 1.3 1999/08/31 06:54:35 davem Exp $ +/* $Id: nosrmmu.c,v 1.5 1999/11/19 04:11:54 davem Exp $ * nosrmmu.c: This file is a bunch of dummies for sun4 compiles, * so that it does not need srmmu and avoid ifdefs. * @@ -14,6 +14,8 @@ static char shouldnothappen[] __initdata = "SUN4 kernel can only run on SUN4\n"; enum mbus_module srmmu_modtype; +int vac_cache_size = 0; + static void __init should_not_happen(void) { prom_printf(shouldnothappen); @@ -49,12 +51,12 @@ void __init srmmu_end_memory(unsigned long memory_size, unsigned long *mem_end_p return 0; } -__u32 iounit_map_dma_init(struct linux_sbus *sbus, int size) +__u32 iounit_map_dma_init(struct sbus_bus *sbus, int size) { return 0; } -__u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct linux_sbus *sbus) +__u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct sbus_bus *sbus) { return 0; } diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 5d99b02dd..c365cf0d5 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -1,8 +1,8 @@ -/* $Id: srmmu.c,v 1.192 1999/09/10 10:40:40 davem Exp $ +/* $Id: srmmu.c,v 1.199 1999/12/23 02:00:51 davem Exp $ * srmmu.c: SRMMU specific routines for memory management. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) - * Copyright (C) 1995 Peter A. Zaitcev (zaitcev@ithil.mcst.ru) + * Copyright (C) 1995 Pete Zaitcev * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ @@ -53,7 +53,7 @@ int vac_cache_size; int vac_line_size; int vac_badbits; -extern unsigned long sparc_iobase_vaddr; +extern struct resource sparc_iomap; #ifdef __SMP__ #define FLUSH_BEGIN(mm) @@ -284,7 +284,7 @@ void __init srmmu_frob_mem_map(unsigned long start_mem) } /* The very generic SRMMU page table operations. */ -static inline int srmmu_device_memory(unsigned long x) +static inline int srmmu_device_memory(unsigned long x) { return ((x & 0xF0000000) != 0); } @@ -464,17 +464,6 @@ static inline pte_t *srmmu_s_pte_offset(pmd_t * dir, unsigned long address) return (pte_t *) srmmu_s_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1)); } -/* This must update the context table entry for this process. */ -static void srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp) -{ - if(tsk->mm->context != NO_CONTEXT && - tsk->mm->pgd != pgdp) { - flush_cache_mm(tsk->mm); - ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp); - flush_tlb_mm(tsk->mm); - } -} - static inline pte_t *srmmu_get_pte_fast(void) { struct page *ret; @@ -777,11 +766,11 @@ static void srmmu_quick_kernel_fault(unsigned long address) #else printk("Kernel faults at addr=0x%08lx\n", address); printk("PTE=%08lx\n", srmmu_hwprobe((address & PAGE_MASK))); - die_if_kernel("SRMMU bolixed...", current->tss.kregs); + die_if_kernel("SRMMU bolixed...", current->thread.kregs); #endif } -static inline void alloc_context(struct mm_struct *mm) +static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm) { struct ctx_list *ctxp; @@ -794,7 +783,7 @@ static inline void alloc_context(struct mm_struct *mm) return; } ctxp = ctx_used.next; - if(ctxp->ctx_mm == current->mm) + if(ctxp->ctx_mm == old_mm) ctxp = ctxp->next; if(ctxp == &ctx_used) panic("out of mmu contexts"); @@ -817,29 +806,16 @@ static inline void free_context(int context) } -static void srmmu_switch_to_context(struct task_struct *tsk) +static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, + struct task_struct *tsk, int cpu) { - if(tsk->mm->context == NO_CONTEXT) { + if(mm->context == NO_CONTEXT) { spin_lock(&srmmu_context_spinlock); - alloc_context(tsk->mm); + alloc_context(old_mm, mm); spin_unlock(&srmmu_context_spinlock); - ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd); + ctxd_set(&srmmu_context_table[mm->context], mm->pgd); } - srmmu_set_context(tsk->mm->context); -} - -static void srmmu_init_new_context(struct mm_struct *mm) -{ - spin_lock(&srmmu_context_spinlock); - alloc_context(mm); - spin_unlock(&srmmu_context_spinlock); - - flush_cache_mm(mm); - ctxd_set(&srmmu_context_table[mm->context], mm->pgd); - flush_tlb_mm(mm); - - if(mm == current->mm) - srmmu_set_context(mm->context); + srmmu_set_context(mm->context); } /* Low level IO area allocation on the SRMMU. */ @@ -885,9 +861,6 @@ void srmmu_unmapioaddr(unsigned long virt_addr) flush_tlb_all(); } -/* This is used in many routines below. */ -#define UWINMASK_OFFSET (const unsigned long)(&(((struct task_struct *)0)->tss.uwinmask)) - /* On the SRMMU we do not have the problems with limited tlb entries * for mapping kernel pages, so we just take things from the free page * pool. As a side effect we are putting a little too much pressure @@ -919,110 +892,85 @@ extern void tsunami_flush_tlb_all(void); extern void tsunami_flush_tlb_mm(struct mm_struct *mm); extern void tsunami_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end); extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); - -/* Workaround, until we find what's going on with Swift. When low on memory, it sometimes - * loops in fault/handle_mm_fault incl. flush_tlb_page to find out it is already in page tables/ - * fault again on the same instruction. I really don't understand it, have checked it and contexts - * are right, flush_tlb_all is done as well, and it faults again... Strange. -jj +extern void tsunami_setup_blockops(void); + +/* Workaround, until we find what's going on with Swift. When low on memory, + * it sometimes loops in fault/handle_mm_fault incl. flush_tlb_page to find + * out it is already in page tables/ fault again on the same instruction. + * I really don't understand it, have checked it and contexts + * are right, flush_tlb_all is done as well, and it faults again... + * Strange. -jj + * + * The following code is a deadwood that may be necessary when + * we start to make precise page flushes again. --zaitcev */ static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) { +#if 0 static unsigned long last; - - if (last == address) viking_hwprobe(address); + unsigned int val; + /* unsigned int n; */ + + if (address == last) { + val = srmmu_hwprobe(address); + if (val != 0 && pte_val(pte) != val) { + printk("swift_update_mmu_cache: " + "addr %lx put %08x probed %08x from %p\n", + address, pte_val(pte), val, + __builtin_return_address(0)); + srmmu_flush_whole_tlb(); + } + } last = address; +#endif } -/* Swift flushes. It has the recommended SRMMU specification flushing - * facilities, so we can do things in a more fine grained fashion than we - * could on the tsunami. Let's watch out for HARDWARE BUGS... - */ - -static void swift_flush_cache_all(void) -{ - flush_user_windows(); - swift_idflash_clear(); -} - -static void swift_flush_cache_mm(struct mm_struct *mm) -{ - FLUSH_BEGIN(mm) - flush_user_windows(); - swift_idflash_clear(); - FLUSH_END -} - -static void swift_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end) -{ - FLUSH_BEGIN(mm) - flush_user_windows(); - swift_idflash_clear(); - FLUSH_END -} - -static void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page) -{ - FLUSH_BEGIN(vma->vm_mm) - flush_user_windows(); - if(vma->vm_flags & VM_EXEC) - swift_flush_icache(); - swift_flush_dcache(); - FLUSH_END -} - -/* Not copy-back on swift. */ -static void swift_flush_page_to_ram(unsigned long page) -{ -} - -/* But not IO coherent either. */ -static void swift_flush_page_for_dma(unsigned long page) -{ - swift_flush_dcache(); -} - -/* Again, Swift is non-snooping split I/D cache'd just like tsunami, - * so have to punt the icache for on-stack signal insns. Only the - * icache need be flushed since the dcache is write-through. - */ -static void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) -{ - swift_flush_icache(); -} - -static void swift_flush_chunk(unsigned long chunk) -{ -} - -static void swift_flush_tlb_all(void) -{ - srmmu_flush_whole_tlb(); - module_stats.invall++; -} - -static void swift_flush_tlb_mm(struct mm_struct *mm) -{ - FLUSH_BEGIN(mm) - srmmu_flush_whole_tlb(); - module_stats.invmm++; - FLUSH_END -} +/* swift.S */ +extern void swift_flush_cache_all(void); +extern void swift_flush_cache_mm(struct mm_struct *mm); +extern void swift_flush_cache_range(struct mm_struct *mm, + unsigned long start, unsigned long end); +extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page); +extern void swift_flush_page_to_ram(unsigned long page); +extern void swift_flush_page_for_dma(unsigned long page); +extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); +extern void swift_flush_chunk(unsigned long chunk); +extern void swift_flush_tlb_all(void); +extern void swift_flush_tlb_mm(struct mm_struct *mm); +extern void swift_flush_tlb_range(struct mm_struct *mm, + unsigned long start, unsigned long end); +extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); -static void swift_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end) +#if 0 /* P3: deadwood to debug precise flushes on Swift. */ +void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { - FLUSH_BEGIN(mm) - srmmu_flush_whole_tlb(); - module_stats.invrnge++; - FLUSH_END -} + int cctx, ctx1; -static void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) -{ - FLUSH_BEGIN(vma->vm_mm) - srmmu_flush_whole_tlb(); + page &= PAGE_MASK; + if ((ctx1 = vma->vm_mm->context) != -1) { + cctx = srmmu_get_context(); +/* Is context # ever different from current context? P3 */ + if (cctx != ctx1) { + printk("flush ctx %02x curr %02x\n", ctx1, cctx); + srmmu_set_context(ctx1); + swift_flush_page(page); + __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : + "r" (page), "i" (ASI_M_FLUSH_PROBE)); + srmmu_set_context(cctx); + } else { + /* Rm. prot. bits from virt. c. */ + /* swift_flush_cache_all(); */ + /* swift_flush_cache_page(vma, page); */ + swift_flush_page(page); + + __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : + "r" (page), "i" (ASI_M_FLUSH_PROBE)); + /* same as above: srmmu_flush_tlb_page() */ + } + } module_stats.invpg++; - FLUSH_END } +#endif /* The following are all MBUS based SRMMU modules, and therefore could * be found in a multiprocessor configuration. On the whole, these @@ -1333,103 +1281,21 @@ static void hypersparc_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) hyper_flush_whole_icache(); } -static void hypersparc_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp) +static void hypersparc_switch_mm(struct mm_struct *old_mm, + struct mm_struct *mm, struct task_struct *tsk, int cpu) { - unsigned long page = ((unsigned long) pgdp) & PAGE_MASK; - - if(pgdp != swapper_pg_dir) - hypersparc_flush_page_to_ram(page); - - if(tsk->mm->context != NO_CONTEXT && - tsk->mm->pgd != pgdp) { - flush_cache_mm(tsk->mm); - ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp); - flush_tlb_mm(tsk->mm); - } -} - -static void viking_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp) -{ - if(pgdp != swapper_pg_dir) - flush_chunk((unsigned long)pgdp); - if(tsk->mm->context != NO_CONTEXT && - tsk->mm->pgd != pgdp) { - flush_cache_mm(tsk->mm); - ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp); - flush_tlb_mm(tsk->mm); - } -} - -static void cypress_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp) -{ - register unsigned long a, b, c, d, e, f, g; - unsigned long page = ((unsigned long) pgdp) & PAGE_MASK; - unsigned long line; - - if(pgdp == swapper_pg_dir) - goto skip_flush; - - a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; - page &= PAGE_MASK; - line = (page + PAGE_SIZE) - 0x100; - goto inside; - do { - line -= 0x100; - inside: - __asm__ __volatile__("sta %%g0, [%0] %1\n\t" - "sta %%g0, [%0 + %2] %1\n\t" - "sta %%g0, [%0 + %3] %1\n\t" - "sta %%g0, [%0 + %4] %1\n\t" - "sta %%g0, [%0 + %5] %1\n\t" - "sta %%g0, [%0 + %6] %1\n\t" - "sta %%g0, [%0 + %7] %1\n\t" - "sta %%g0, [%0 + %8] %1\n\t" : : - "r" (line), - "i" (ASI_M_FLUSH_PAGE), - "r" (a), "r" (b), "r" (c), "r" (d), - "r" (e), "r" (f), "r" (g)); - } while(line != page); -skip_flush: - if(tsk->mm->context != NO_CONTEXT && - tsk->mm->pgd != pgdp) { - flush_cache_mm(tsk->mm); - ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp); - flush_tlb_mm(tsk->mm); - } -} - -static void hypersparc_switch_to_context(struct task_struct *tsk) -{ - if(tsk->mm->context == NO_CONTEXT) { + if(mm->context == NO_CONTEXT) { ctxd_t *ctxp; spin_lock(&srmmu_context_spinlock); - alloc_context(tsk->mm); + alloc_context(old_mm, mm); spin_unlock(&srmmu_context_spinlock); - ctxp = &srmmu_context_table[tsk->mm->context]; - srmmu_set_entry((pte_t *)ctxp, __pte((SRMMU_ET_PTD | (srmmu_v2p((unsigned long) tsk->mm->pgd) >> 4)))); + ctxp = &srmmu_context_table[mm->context]; + srmmu_set_entry((pte_t *)ctxp, __pte((SRMMU_ET_PTD | (srmmu_v2p((unsigned long) mm->pgd) >> 4)))); hypersparc_flush_page_to_ram((unsigned long)ctxp); } hyper_flush_whole_icache(); - srmmu_set_context(tsk->mm->context); -} - -static void hypersparc_init_new_context(struct mm_struct *mm) -{ - ctxd_t *ctxp; - - spin_lock(&srmmu_context_spinlock); - alloc_context(mm); - spin_unlock(&srmmu_context_spinlock); - - ctxp = &srmmu_context_table[mm->context]; - srmmu_set_entry((pte_t *)ctxp, __pte((SRMMU_ET_PTD | (srmmu_v2p((unsigned long) mm->pgd) >> 4)))); - hypersparc_flush_page_to_ram((unsigned long)ctxp); - - if(mm == current->mm) { - hyper_flush_whole_icache(); - srmmu_set_context(mm->context); - } + srmmu_set_context(mm->context); } static unsigned long mempool; @@ -1694,7 +1560,8 @@ static unsigned long __init map_spbank(unsigned long vbase, int sp_entry) srmmu_map[srmmu_bank].vbase = vbase; srmmu_map[srmmu_bank].pbase = sp_banks[sp_entry].base_addr; srmmu_map[srmmu_bank].size = sp_banks[sp_entry].num_bytes; - srmmu_bank++; + if (srmmu_map[srmmu_bank].size) + srmmu_bank++; map_spbank_last_pa = pstart - SRMMU_PGDIR_SIZE; return vstart; } @@ -1949,8 +1816,8 @@ unsigned long __init srmmu_paging_init(unsigned long start_mem, unsigned long en int i, cpunode; char node_str[128]; - sparc_iobase_vaddr = 0xfd000000; /* 16MB of IOSPACE on all sun4m's. */ - physmem_mapped_contig = 0; /* for init.c:taint_real_pages() */ + sparc_iomap.start = 0xfd000000; /* 16MB of IOSPACE on all sun4m's. */ + physmem_mapped_contig = 0; /* for init.c:taint_real_pages() */ if (sparc_cpu_model == sun4d) num_contexts = 65536; /* We know it is Viking */ @@ -1981,7 +1848,7 @@ unsigned long __init srmmu_paging_init(unsigned long start_mem, unsigned long en srmmu_allocate_ptable_skeleton(KERNBASE, end_mem); #if CONFIG_SUN_IO - srmmu_allocate_ptable_skeleton(sparc_iobase_vaddr, IOBASE_END); + srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END); srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END); #endif @@ -2051,16 +1918,14 @@ static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long ad static void srmmu_destroy_context(struct mm_struct *mm) { - if(mm->context != NO_CONTEXT && atomic_read(&mm->count) == 1) { - /* XXX This could be drastically improved. - * XXX We are only called from __exit_mm and it just did - * XXX cache/tlb mm flush and right after this will (re-) - * XXX SET_PAGE_DIR to swapper_pg_dir. -DaveM - */ + + if(mm->context != NO_CONTEXT) { flush_cache_mm(mm); ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir); flush_tlb_mm(mm); + spin_lock(&srmmu_context_spinlock); free_context(mm->context); + spin_unlock(&srmmu_context_spinlock); mm->context = NO_CONTEXT; } } @@ -2136,7 +2001,7 @@ static void srmmu_vac_update_mmu_cache(struct vm_area_struct * vma, static void hypersparc_destroy_context(struct mm_struct *mm) { - if(mm->context != NO_CONTEXT && atomic_read(&mm->count) == 1) { + if(mm->context != NO_CONTEXT) { ctxd_t *ctxp; /* HyperSparc is copy-back, any data for this @@ -2151,7 +2016,9 @@ static void hypersparc_destroy_context(struct mm_struct *mm) hypersparc_flush_page_to_ram((unsigned long)ctxp); flush_tlb_mm(mm); + spin_lock(&srmmu_context_spinlock); free_context(mm->context); + spin_unlock(&srmmu_context_spinlock); mm->context = NO_CONTEXT; } } @@ -2267,11 +2134,9 @@ static void __init init_hypersparc(void) BTFIXUPSET_CALL(flush_chunk, hypersparc_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */ BTFIXUPSET_CALL(ctxd_set, hypersparc_ctxd_set, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(switch_to_context, hypersparc_switch_to_context, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(init_new_context, hypersparc_init_new_context, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(switch_mm, hypersparc_switch_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(destroy_context, hypersparc_destroy_context, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(update_mmu_cache, srmmu_vac_update_mmu_cache, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(sparc_update_rootmmu_dir, hypersparc_update_rootmmu_dir, BTFIXUPCALL_NORM); poke_srmmu = poke_hypersparc; hypersparc_setup_blockops(); @@ -2339,7 +2204,6 @@ static void __init init_cypress_common(void) BTFIXUPSET_CALL(flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP); - BTFIXUPSET_CALL(sparc_update_rootmmu_dir, cypress_update_rootmmu_dir, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(update_mmu_cache, srmmu_vac_update_mmu_cache, BTFIXUPCALL_NORM); poke_srmmu = poke_cypress; @@ -2371,12 +2235,14 @@ static void __init init_cypress_605(unsigned long mrev) static void __init poke_swift(void) { - unsigned long mreg = srmmu_get_mmureg(); + unsigned long mreg; /* Clear any crap from the cache or else... */ - swift_idflash_clear(); - mreg |= (SWIFT_IE | SWIFT_DE); /* I & D caches on */ + swift_flush_cache_all(); + /* Enable I & D caches */ + mreg = srmmu_get_mmureg(); + mreg |= (SWIFT_IE | SWIFT_DE); /* The Swift branch folding logic is completely broken. At * trap time, if things are just right, if can mistakenly * think that a trap is coming from kernel mode when in fact @@ -2442,19 +2308,21 @@ static void __init init_swift(void) BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(flush_chunk, swift_flush_chunk, BTFIXUPCALL_NOP); /* local flush _only_ */ + BTFIXUPSET_CALL(flush_chunk, swift_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */ BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NOP); + BTFIXUPSET_CALL(flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM); + flush_page_for_dma_global = 0; + /* Are you now convinced that the Swift is one of the * biggest VLSI abortions of all time? Bravo Fujitsu! * Fujitsu, the !#?!%$'d up processor people. I bet if @@ -2611,7 +2479,7 @@ static void __init init_turbosparc(void) BTFIXUPSET_CALL(flush_chunk, turbosparc_flush_chunk, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP); - BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NOP); + BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM); poke_srmmu = poke_turbosparc; } @@ -2642,7 +2510,7 @@ static void __init init_tsunami(void) BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(flush_chunk, tsunami_flush_chunk, BTFIXUPCALL_NOP); /* local flush _only_ */ + BTFIXUPSET_CALL(flush_chunk, tsunami_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */ BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM); @@ -2654,6 +2522,8 @@ static void __init init_tsunami(void) BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM); poke_srmmu = poke_tsunami; + + tsunami_setup_blockops(); } static void __init poke_viking(void) @@ -2725,7 +2595,6 @@ static void __init init_viking(void) BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(sparc_update_rootmmu_dir, viking_update_rootmmu_dir, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_chunk, viking_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */ @@ -2736,8 +2605,7 @@ static void __init init_viking(void) * which we use the IOMMU. */ BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM); - /* Also, this is so far the only chip which actually uses - the page argument to flush_page_for_dma */ + flush_page_for_dma_global = 0; } else { srmmu_name = "TI Viking/MXCC"; @@ -2928,6 +2796,16 @@ static int srmmu_check_pgt_cache(int low, int high) return freed; } +static void srmmu_flush_dma_area(unsigned long addr, int len) +{ + /* XXX Later */ +} + +static void srmmu_inval_dma_area(unsigned long addr, int len) +{ + /* XXX Later */ +} + extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme, tsetup_mmu_patchme, rtrap_mmu_patchme; @@ -2999,21 +2877,18 @@ void __init ld_mmu_srmmu(void) BTFIXUPSET_CALL(free_pte_slow, srmmu_free_pte_slow, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(free_pgd_slow, srmmu_free_pgd_slow, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NORM); - + BTFIXUPSET_CALL(set_pgdir, srmmu_set_pgdir, BTFIXUPCALL_NORM); - + BTFIXUPSET_CALL(set_pte, srmmu_set_pte_cacheable, BTFIXUPCALL_SWAPO0O1); - BTFIXUPSET_CALL(init_new_context, srmmu_init_new_context, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(switch_to_context, srmmu_switch_to_context, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(switch_mm, srmmu_switch_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_page, srmmu_pte_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_page, srmmu_pgd_page, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(sparc_update_rootmmu_dir, srmmu_update_rootmmu_dir, BTFIXUPCALL_NORM); + BTFIXUPSET_SETHI(none_mask, 0xF0000000); /* P3: is it used? */ - BTFIXUPSET_SETHI(none_mask, 0xF0000000); - BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0); @@ -3072,6 +2947,11 @@ void __init ld_mmu_srmmu(void) BTFIXUPSET_CALL(ctxd_set, srmmu_ctxd_set, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM); +/* hmm isn't flush_dma_area the same thing as flush_page_for_dma? */ +/* It is, except flush_page_for_dma was local to srmmu.c */ + BTFIXUPSET_CALL(mmu_flush_dma_area, srmmu_flush_dma_area, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mmu_inval_dma_area, srmmu_inval_dma_area, BTFIXUPCALL_NORM); + get_srmmu_type(); patch_window_trap_handlers(); @@ -3104,6 +2984,7 @@ void __init ld_mmu_srmmu(void) BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM); #endif + if (sparc_cpu_model == sun4d) ld_mmu_iounit(); else diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c index f91ab1ce6..0530e635f 100644 --- a/arch/sparc/mm/sun4c.c +++ b/arch/sparc/mm/sun4c.c @@ -1,19 +1,24 @@ -/* $Id: sun4c.c,v 1.176 1999/08/31 06:54:42 davem Exp $ +/* $Id: sun4c.c,v 1.182 1999/12/27 06:30:04 anton Exp $ * sun4c.c: Doing in software what should be done in hardware. * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1996 Andrew Tridgell (Andrew.Tridgell@anu.edu.au) - * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au) + * Copyright (C) 1997,99 Anton Blanchard (anton@progsoc.uts.edu.au) * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ +#define NR_TASK_BUCKETS 512 + #include <linux/config.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/init.h> +#include <linux/bootmem.h> +#include <asm/scatterlist.h> #include <asm/page.h> +#include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/vaddrs.h> #include <asm/idprom.h> @@ -27,42 +32,19 @@ #include <asm/mmu_context.h> #include <asm/sun4paddr.h> -/* TODO: Make it such that interrupt handlers cannot dick with - * the user segment lists, most of the cli/sti pairs can - * disappear once that is taken care of. - */ - -/* XXX Ok the real performance win, I figure, will be to use a combined hashing - * XXX and bitmap scheme to keep track of what we have mapped where. The whole - * XXX incentive is to make it such that the range flushes can be serviced - * XXX always in near constant time. --DaveM +/* Because of our dynamic kernel TLB miss strategy, and how + * our DVMA mapping allocation works, you _MUST_: + * + * 1) Disable interrupts _and_ not touch any dynamic kernel + * memory while messing with kernel MMU state. By + * dynamic memory I mean any object which is not in + * the kernel image itself or a task_struct (both of + * which are locked into the MMU). + * 2) Disable interrupts while messing with user MMU state. */ extern int num_segmaps, num_contexts; -/* Define this to get extremely anal debugging, undefine for performance. */ -/* #define DEBUG_SUN4C_MM */ - -#define UWINMASK_OFFSET (const unsigned long)(&(((struct task_struct *)0)->tss.uwinmask)) - -/* This is used in many routines below. */ -#define FUW_INLINE do { \ - register int ctr asm("g5"); \ - ctr = 0; \ - __asm__ __volatile__("\n" \ - "1: ld [%%g6 + %2], %%g4 ! flush user windows\n" \ - " orcc %%g0, %%g4, %%g0\n" \ - " add %0, 1, %0\n" \ - " bne 1b\n" \ - " save %%sp, -64, %%sp\n" \ - "2: subcc %0, 1, %0\n" \ - " bne 2b\n" \ - " restore %%g0, %%g0, %%g0\n" \ - : "=&r" (ctr) \ - : "0" (ctr), "i" (UWINMASK_OFFSET) \ - : "g4", "cc"); \ -} while(0); - #ifdef CONFIG_SUN4 #define SUN4C_VAC_SIZE sun4c_vacinfo.num_bytes #else @@ -82,58 +64,21 @@ extern int num_segmaps, num_contexts; #define MIN(a,b) ((a)<(b)?(a):(b)) #endif - -#define KGPROF_PROFILING 0 -#if KGPROF_PROFILING -#define KGPROF_DEPTH 3 /* this needs to match the code below */ -#define KGPROF_SIZE 100 -static struct { - unsigned addr[KGPROF_DEPTH]; - unsigned count; -} kgprof_counters[KGPROF_SIZE]; - -/* just call this function from whatever function you think needs it then - look at /proc/cpuinfo to see where the function is being called from - and how often. This gives a type of "kernel gprof" */ -#define NEXT_PROF(prev,lvl) (prev>PAGE_OFFSET?__builtin_return_address(lvl):0) -static inline void kgprof_profile(void) -{ - unsigned ret[KGPROF_DEPTH]; - int i,j; - /* you can't use a variable argument to __builtin_return_address() */ - ret[0] = (unsigned)__builtin_return_address(0); - ret[1] = (unsigned)NEXT_PROF(ret[0],1); - ret[2] = (unsigned)NEXT_PROF(ret[1],2); - - for (i=0;i<KGPROF_SIZE && kgprof_counters[i].addr[0];i++) { - for (j=0;j<KGPROF_DEPTH;j++) - if (ret[j] != kgprof_counters[i].addr[j]) break; - if (j==KGPROF_DEPTH) break; - } - if (i<KGPROF_SIZE) { - for (j=0;j<KGPROF_DEPTH;j++) - kgprof_counters[i].addr[j] = ret[j]; - kgprof_counters[i].count++; - } -} -#endif - - /* Flushing the cache. */ struct sun4c_vac_props sun4c_vacinfo; -static int ctxflushes, segflushes, pageflushes; unsigned long sun4c_kernel_faults; /* convert a virtual address to a physical address and vice - versa. Easy on the 4c */ + * versa. Easy on the 4c + */ static unsigned long sun4c_v2p(unsigned long vaddr) { - return(vaddr - PAGE_OFFSET); + return (vaddr - PAGE_OFFSET); } static unsigned long sun4c_p2v(unsigned long vaddr) { - return(vaddr + PAGE_OFFSET); + return (vaddr + PAGE_OFFSET); } @@ -142,44 +87,64 @@ void sun4c_flush_all(void) { unsigned long begin, end; - if(sun4c_vacinfo.on) + if (sun4c_vacinfo.on) panic("SUN4C: AIEEE, trying to invalidate vac while" " it is on."); /* Clear 'valid' bit in all cache line tags */ begin = AC_CACHETAGS; end = (AC_CACHETAGS + SUN4C_VAC_SIZE); - while(begin < end) { + while (begin < end) { __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r" (begin), "i" (ASI_CONTROL)); begin += sun4c_vacinfo.linesize; } } -/* Context level flush. */ -static inline void sun4c_flush_context_hw(void) +static __inline__ void sun4c_flush_context_hw(void) { unsigned long end = SUN4C_VAC_SIZE; - unsigned pgsz = PAGE_SIZE; - ctxflushes++; - __asm__ __volatile__(" -1: subcc %0, %2, %0 - bg 1b - sta %%g0, [%0] %3 - nop; nop; nop; ! Weitek hwbug -" : "=&r" (end) - : "0" (end), "r" (pgsz), "i" (ASI_HWFLUSHCONTEXT) + __asm__ __volatile__( + "1: addcc %0, -4096, %0\n\t" + " bne 1b\n\t" + " sta %%g0, [%0] %2" + : "=&r" (end) + : "0" (end), "i" (ASI_HWFLUSHCONTEXT) : "cc"); } +/* Must be called minimally with IRQs disabled. */ +static void sun4c_flush_segment_hw(unsigned long addr) +{ + if (sun4c_get_segmap(addr) != invalid_segment) { + unsigned long vac_size = SUN4C_VAC_SIZE; + + __asm__ __volatile__( + "1: addcc %0, -4096, %0\n\t" + " bne 1b\n\t" + " sta %%g0, [%2 + %0] %3" + : "=&r" (vac_size) + : "0" (vac_size), "r" (addr), "i" (ASI_HWFLUSHSEG) + : "cc"); + } +} + +/* Must be called minimally with interrupts disabled. */ +static __inline__ void sun4c_flush_page_hw(unsigned long addr) +{ + addr &= PAGE_MASK; + if ((int)sun4c_get_pte(addr) < 0) + __asm__ __volatile__("sta %%g0, [%0] %1" + : : "r" (addr), "i" (ASI_HWFLUSHPAGE)); +} + /* Don't inline the software version as it eats too many cache lines if expanded. */ static void sun4c_flush_context_sw(void) { unsigned long nbytes = SUN4C_VAC_SIZE; unsigned long lsize = sun4c_vacinfo.linesize; - ctxflushes++; __asm__ __volatile__(" add %2, %2, %%g1 add %2, %%g1, %%g2 @@ -203,72 +168,13 @@ static void sun4c_flush_context_sw(void) : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc"); } -/* Scrape the segment starting at ADDR from the virtual cache. */ -static inline void sun4c_flush_segment(unsigned long addr) -{ - if(sun4c_get_segmap(addr) == invalid_segment) - return; - - segflushes++; - if(sun4c_vacinfo.do_hwflushes) { - unsigned long end = (addr + SUN4C_VAC_SIZE); - - for( ; addr < end; addr += PAGE_SIZE) - __asm__ __volatile__("sta %%g0, [%0] %1;nop;nop;nop;\n\t" : : - "r" (addr), "i" (ASI_HWFLUSHSEG)); - } else { - unsigned long nbytes = SUN4C_VAC_SIZE; - unsigned long lsize = sun4c_vacinfo.linesize; - - __asm__ __volatile__("add %2, %2, %%g1\n\t" - "add %2, %%g1, %%g2\n\t" - "add %2, %%g2, %%g3\n\t" - "add %2, %%g3, %%g4\n\t" - "add %2, %%g4, %%g5\n\t" - "add %2, %%g5, %%o4\n\t" - "add %2, %%o4, %%o5\n" - "1:\n\t" - "subcc %1, %%o5, %1\n\t" - "sta %%g0, [%0] %6\n\t" - "sta %%g0, [%0 + %2] %6\n\t" - "sta %%g0, [%0 + %%g1] %6\n\t" - "sta %%g0, [%0 + %%g2] %6\n\t" - "sta %%g0, [%0 + %%g3] %6\n\t" - "sta %%g0, [%0 + %%g4] %6\n\t" - "sta %%g0, [%0 + %%g5] %6\n\t" - "sta %%g0, [%0 + %%o4] %6\n\t" - "bg 1b\n\t" - " add %0, %%o5, %0\n\t" - : "=&r" (addr), "=&r" (nbytes), "=&r" (lsize) - : "0" (addr), "1" (nbytes), "2" (lsize), - "i" (ASI_FLUSHSEG) - : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc"); - } -} - -/* Call this version when you know hardware flushes are available. */ -static inline void sun4c_flush_segment_hw(unsigned long addr) -{ - if(sun4c_get_segmap(addr) != invalid_segment) { - unsigned long end; - - segflushes++; - for(end = addr + SUN4C_VAC_SIZE; addr < end; addr += PAGE_SIZE) - __asm__ __volatile__("sta %%g0, [%0] %1" - : : "r" (addr), "i" (ASI_HWFLUSHSEG)); - /* Weitek POWER-UP hwbug workaround. */ - __asm__ __volatile__("nop;nop;nop; ! Weitek hwbug"); - } -} - /* Don't inline the software version as it eats too many cache lines if expanded. */ static void sun4c_flush_segment_sw(unsigned long addr) { - if(sun4c_get_segmap(addr) != invalid_segment) { + if (sun4c_get_segmap(addr) != invalid_segment) { unsigned long nbytes = SUN4C_VAC_SIZE; unsigned long lsize = sun4c_vacinfo.linesize; - segflushes++; __asm__ __volatile__(" add %2, %2, %%g1 add %2, %%g1, %%g2 @@ -300,12 +206,11 @@ static void sun4c_flush_page(unsigned long addr) { addr &= PAGE_MASK; - if((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) != - _SUN4C_PAGE_VALID) + if ((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) != + _SUN4C_PAGE_VALID) return; - pageflushes++; - if(sun4c_vacinfo.do_hwflushes) { + if (sun4c_vacinfo.do_hwflushes) { __asm__ __volatile__("sta %%g0, [%0] %1;nop;nop;nop;\n\t" : : "r" (addr), "i" (ASI_HWFLUSHPAGE)); } else { @@ -338,30 +243,15 @@ static void sun4c_flush_page(unsigned long addr) } } -/* Again, hw-only and sw-only cache page-level flush variants. */ -static inline void sun4c_flush_page_hw(unsigned long addr) -{ - addr &= PAGE_MASK; - if((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) == - _SUN4C_PAGE_VALID) { - pageflushes++; - __asm__ __volatile__("sta %%g0, [%0] %1" - : : "r" (addr), "i" (ASI_HWFLUSHPAGE)); - /* Weitek POWER-UP hwbug workaround. */ - __asm__ __volatile__("nop;nop;nop; ! Weitek hwbug"); - } -} - /* Don't inline the software version as it eats too many cache lines if expanded. */ static void sun4c_flush_page_sw(unsigned long addr) { addr &= PAGE_MASK; - if((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) == - _SUN4C_PAGE_VALID) { + if ((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) == + _SUN4C_PAGE_VALID) { unsigned long left = PAGE_SIZE; unsigned long lsize = sun4c_vacinfo.linesize; - pageflushes++; __asm__ __volatile__(" add %2, %2, %%g1 add %2, %%g1, %%g2 @@ -411,7 +301,7 @@ static inline void sun4c_init_clean_segmap(unsigned char pseg) unsigned long vaddr; sun4c_put_segmap(0, pseg); - for(vaddr = 0; vaddr < SUN4C_REAL_PGDIR_SIZE; vaddr+=PAGE_SIZE) + for (vaddr = 0; vaddr < SUN4C_REAL_PGDIR_SIZE; vaddr += PAGE_SIZE) sun4c_put_pte(vaddr, 0); sun4c_put_segmap(0, invalid_segment); } @@ -423,15 +313,15 @@ static inline void sun4c_init_clean_mmu(unsigned long kernel_end) savectx = sun4c_get_context(); kernel_end = SUN4C_REAL_PGDIR_ALIGN(kernel_end); - for(ctx = 0; ctx < num_contexts; ctx++) { + for (ctx = 0; ctx < num_contexts; ctx++) { sun4c_set_context(ctx); - for(vaddr = 0; vaddr < 0x20000000; vaddr += SUN4C_REAL_PGDIR_SIZE) + for (vaddr = 0; vaddr < 0x20000000; vaddr += SUN4C_REAL_PGDIR_SIZE) sun4c_put_segmap(vaddr, invalid_segment); - for(vaddr = 0xe0000000; vaddr < KERNBASE; vaddr += SUN4C_REAL_PGDIR_SIZE) + for (vaddr = 0xe0000000; vaddr < KERNBASE; vaddr += SUN4C_REAL_PGDIR_SIZE) sun4c_put_segmap(vaddr, invalid_segment); - for(vaddr = kernel_end; vaddr < KADB_DEBUGGER_BEGVM; vaddr += SUN4C_REAL_PGDIR_SIZE) + for (vaddr = kernel_end; vaddr < KADB_DEBUGGER_BEGVM; vaddr += SUN4C_REAL_PGDIR_SIZE) sun4c_put_segmap(vaddr, invalid_segment); - for(vaddr = LINUX_OPPROM_ENDVM; vaddr; vaddr += SUN4C_REAL_PGDIR_SIZE) + for (vaddr = LINUX_OPPROM_ENDVM; vaddr; vaddr += SUN4C_REAL_PGDIR_SIZE) sun4c_put_segmap(vaddr, invalid_segment); } sun4c_set_context(savectx); @@ -442,7 +332,7 @@ void __init sun4c_probe_vac(void) sun4c_disable_vac(); if (ARCH_SUN4) { - switch(idprom->id_machtype) { + switch (idprom->id_machtype) { case (SM_SUN4|SM_4_110): sun4c_vacinfo.type = NONE; @@ -477,12 +367,12 @@ void __init sun4c_probe_vac(void) default: prom_printf("Cannot initialize VAC - wierd sun4 model idprom->id_machtype = %d", idprom->id_machtype); prom_halt(); - } + }; } else { sun4c_vacinfo.type = WRITE_THROUGH; - if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) || - (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) { + if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) || + (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) { /* PROM on SS1 lacks this info, to be super safe we * hard code it here since this arch is cast in stone. */ @@ -497,7 +387,7 @@ void __init sun4c_probe_vac(void) sun4c_vacinfo.do_hwflushes = prom_getintdefault(prom_root_node, "vac-hwflush", 0); - if(sun4c_vacinfo.do_hwflushes == 0) + if (sun4c_vacinfo.do_hwflushes == 0) sun4c_vacinfo.do_hwflushes = prom_getintdefault(prom_root_node, "vac_hwflush", 0); @@ -509,7 +399,7 @@ void __init sun4c_probe_vac(void) sun4c_vacinfo.num_lines = (sun4c_vacinfo.num_bytes / sun4c_vacinfo.linesize); - switch(sun4c_vacinfo.linesize) { + switch (sun4c_vacinfo.linesize) { case 16: sun4c_vacinfo.log2lsize = 4; break; @@ -566,7 +456,7 @@ static void patch_kernel_fault_handler(void) prom_printf("Unhandled number of segmaps: %d\n", num_segmaps); prom_halt(); - } + }; switch (num_contexts) { case 8: /* Default, nothing to do. */ @@ -574,19 +464,22 @@ static void patch_kernel_fault_handler(void) case 16: PATCH_INSN(num_context_patch1_16, num_context_patch1); +#if 0 PATCH_INSN(num_context_patch2_16, num_context_patch2); +#endif break; default: prom_printf("Unhandled number of contexts: %d\n", num_contexts); prom_halt(); - } - if(sun4c_vacinfo.do_hwflushes != 0) { + }; + + if (sun4c_vacinfo.do_hwflushes != 0) { PATCH_INSN(vac_hwflush_patch1_on, vac_hwflush_patch1); PATCH_INSN(vac_hwflush_patch2_on, vac_hwflush_patch2); } else { - switch(sun4c_vacinfo.linesize) { + switch (sun4c_vacinfo.linesize) { case 16: /* Default, nothing to do. */ break; @@ -604,7 +497,7 @@ static void patch_kernel_fault_handler(void) static void __init sun4c_probe_mmu(void) { if (ARCH_SUN4) { - switch(idprom->id_machtype) { + switch (idprom->id_machtype) { case (SM_SUN4|SM_4_110): prom_printf("No support for 4100 yet\n"); prom_halt(); @@ -631,10 +524,10 @@ static void __init sun4c_probe_mmu(void) default: prom_printf("Invalid SUN4 model\n"); prom_halt(); - } + }; } else { - if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) || - (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) { + if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) || + (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) { /* Hardcode these just to be safe, PROM on SS1 does * not have this info available in the root node. */ @@ -658,20 +551,15 @@ void __init sun4c_probe_memerr_reg(void) struct linux_prom_registers regs[1]; if (ARCH_SUN4) { - sun4c_memerr_reg = sparc_alloc_io(sun4_memreg_physaddr, 0, - PAGE_SIZE, - "memory parity error", - 0x0, 0); + sun4c_memerr_reg = ioremap(sun4_memreg_physaddr, PAGE_SIZE); } else { node = prom_getchild(prom_root_node); node = prom_searchsiblings(prom_root_node, "memory-error"); if (!node) return; prom_getproperty(node, "reg", (char *)regs, sizeof(regs)); - sun4c_memerr_reg = sparc_alloc_io(regs[0].phys_addr, 0, - regs[0].reg_size, - "memory parity error", - regs[0].which_io, 0); + /* hmm I think regs[0].which_io is zero here anyways */ + sun4c_memerr_reg = ioremap(regs[0].phys_addr, regs[0].reg_size); } } @@ -679,10 +567,10 @@ static inline void sun4c_init_ss2_cache_bug(void) { extern unsigned long start; - if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS2)) || - (idprom->id_machtype == (SM_SUN4C | SM_4C_IPX)) || - (idprom->id_machtype == (SM_SUN4 | SM_4_330)) || - (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC))) { + if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS2)) || + (idprom->id_machtype == (SM_SUN4C | SM_4C_IPX)) || + (idprom->id_machtype == (SM_SUN4 | SM_4_330)) || + (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC))) { /* Whee.. */ printk("SS2 cache bug detected, uncaching trap table page\n"); sun4c_flush_page((unsigned int) &start); @@ -692,17 +580,13 @@ static inline void sun4c_init_ss2_cache_bug(void) } /* Addr is always aligned on a page boundry for us already. */ -static void sun4c_map_dma_area(unsigned long addr, int len) +static void sun4c_map_dma_area(unsigned long va, u32 addr, int len) { unsigned long page, end; end = PAGE_ALIGN((addr + len)); - while(addr < end) { - page = get_free_page(GFP_KERNEL); - if(!page) { - prom_printf("alloc_dvma: Cannot get a dvma page\n"); - prom_halt(); - } + while (addr < end) { + page = va; sun4c_flush_page(page); page -= PAGE_OFFSET; page >>= PAGE_SHIFT; @@ -710,9 +594,21 @@ static void sun4c_map_dma_area(unsigned long addr, int len) _SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_PRIV); sun4c_put_pte(addr, page); addr += PAGE_SIZE; + va += PAGE_SIZE; } } +static void sun4c_unmap_dma_area(unsigned long addr, int len) +{ +} + +static void sun4c_inval_dma_area(unsigned long addr, int len) +{ +} + +static void sun4c_flush_dma_area(unsigned long addr, int len) +{ +} /* TLB management. */ @@ -726,6 +622,13 @@ struct sun4c_mmu_entry { unsigned long vaddr; unsigned char pseg; unsigned char locked; + + /* For user mappings only, and completely hidden from kernel + * TLB miss code. + */ + unsigned char ctx; + struct sun4c_mmu_entry *lru_next; + struct sun4c_mmu_entry *lru_prev; }; static struct sun4c_mmu_entry mmu_entry_pool[SUN4C_MAX_SEGMAPS]; @@ -734,12 +637,15 @@ static void __init sun4c_init_mmu_entry_pool(void) { int i; - for(i=0; i < SUN4C_MAX_SEGMAPS; i++) { + for (i=0; i < SUN4C_MAX_SEGMAPS; i++) { mmu_entry_pool[i].pseg = i; mmu_entry_pool[i].next = 0; mmu_entry_pool[i].prev = 0; mmu_entry_pool[i].vaddr = 0; mmu_entry_pool[i].locked = 0; + mmu_entry_pool[i].ctx = 0; + mmu_entry_pool[i].lru_next = 0; + mmu_entry_pool[i].lru_prev = 0; } mmu_entry_pool[invalid_segment].locked = 1; } @@ -750,8 +656,8 @@ static inline void fix_permissions(unsigned long vaddr, unsigned long bits_on, unsigned long start, end; end = vaddr + SUN4C_REAL_PGDIR_SIZE; - for(start = vaddr; start < end; start += PAGE_SIZE) - if(sun4c_get_pte(start) & _SUN4C_PAGE_VALID) + for (start = vaddr; start < end; start += PAGE_SIZE) + if (sun4c_get_pte(start) & _SUN4C_PAGE_VALID) sun4c_put_pte(start, (sun4c_get_pte(start) | bits_on) & ~bits_off); } @@ -762,16 +668,16 @@ static inline void sun4c_init_map_kernelprom(unsigned long kernel_end) unsigned char pseg, ctx; #ifdef CONFIG_SUN4 /* sun4/110 and 260 have no kadb. */ - if((idprom->id_machtype != (SM_SUN4 | SM_4_260)) && - (idprom->id_machtype != (SM_SUN4 | SM_4_110))) { + if ((idprom->id_machtype != (SM_SUN4 | SM_4_260)) && + (idprom->id_machtype != (SM_SUN4 | SM_4_110))) { #endif - for(vaddr = KADB_DEBUGGER_BEGVM; - vaddr < LINUX_OPPROM_ENDVM; - vaddr += SUN4C_REAL_PGDIR_SIZE) { + for (vaddr = KADB_DEBUGGER_BEGVM; + vaddr < LINUX_OPPROM_ENDVM; + vaddr += SUN4C_REAL_PGDIR_SIZE) { pseg = sun4c_get_segmap(vaddr); - if(pseg != invalid_segment) { + if (pseg != invalid_segment) { mmu_entry_pool[pseg].locked = 1; - for(ctx = 0; ctx < num_contexts; ctx++) + for (ctx = 0; ctx < num_contexts; ctx++) prom_putsegment(ctx, vaddr, pseg); fix_permissions(vaddr, _SUN4C_PAGE_PRIV, 0); } @@ -779,10 +685,10 @@ static inline void sun4c_init_map_kernelprom(unsigned long kernel_end) #ifdef CONFIG_SUN4 } #endif - for(vaddr = KERNBASE; vaddr < kernel_end; vaddr += SUN4C_REAL_PGDIR_SIZE) { + for (vaddr = KERNBASE; vaddr < kernel_end; vaddr += SUN4C_REAL_PGDIR_SIZE) { pseg = sun4c_get_segmap(vaddr); mmu_entry_pool[pseg].locked = 1; - for(ctx = 0; ctx < num_contexts; ctx++) + for (ctx = 0; ctx < num_contexts; ctx++) prom_putsegment(ctx, vaddr, pseg); fix_permissions(vaddr, _SUN4C_PAGE_PRIV, _SUN4C_PAGE_NOCACHE); } @@ -792,13 +698,13 @@ static void __init sun4c_init_lock_area(unsigned long start, unsigned long end) { int i, ctx; - while(start < end) { - for(i=0; i < invalid_segment; i++) - if(!mmu_entry_pool[i].locked) + while (start < end) { + for (i = 0; i < invalid_segment; i++) + if (!mmu_entry_pool[i].locked) break; mmu_entry_pool[i].locked = 1; sun4c_init_clean_segmap(i); - for(ctx = 0; ctx < num_contexts; ctx++) + for (ctx = 0; ctx < num_contexts; ctx++) prom_putsegment(ctx, start, mmu_entry_pool[i].pseg); start += SUN4C_REAL_PGDIR_SIZE; } @@ -815,13 +721,15 @@ struct sun4c_mmu_ring { static struct sun4c_mmu_ring sun4c_context_ring[SUN4C_MAX_CONTEXTS]; /* used user entries */ static struct sun4c_mmu_ring sun4c_ufree_ring; /* free user entries */ +static struct sun4c_mmu_ring sun4c_ulru_ring; /* LRU user entries */ struct sun4c_mmu_ring sun4c_kernel_ring; /* used kernel entries */ struct sun4c_mmu_ring sun4c_kfree_ring; /* free kernel entries */ -static inline void sun4c_init_rings(unsigned long *mempool) +static inline void sun4c_init_rings(void) { int i; - for(i=0; i<SUN4C_MAX_CONTEXTS; i++) { + + for (i = 0; i < SUN4C_MAX_CONTEXTS; i++) { sun4c_context_ring[i].ringhd.next = sun4c_context_ring[i].ringhd.prev = &sun4c_context_ring[i].ringhd; @@ -830,6 +738,9 @@ static inline void sun4c_init_rings(unsigned long *mempool) sun4c_ufree_ring.ringhd.next = sun4c_ufree_ring.ringhd.prev = &sun4c_ufree_ring.ringhd; sun4c_ufree_ring.num_entries = 0; + sun4c_ulru_ring.ringhd.lru_next = sun4c_ulru_ring.ringhd.lru_prev = + &sun4c_ulru_ring.ringhd; + sun4c_ulru_ring.num_entries = 0; sun4c_kernel_ring.ringhd.next = sun4c_kernel_ring.ringhd.prev = &sun4c_kernel_ring.ringhd; sun4c_kernel_ring.num_entries = 0; @@ -838,8 +749,8 @@ static inline void sun4c_init_rings(unsigned long *mempool) sun4c_kfree_ring.num_entries = 0; } -static inline void add_ring(struct sun4c_mmu_ring *ring, - struct sun4c_mmu_entry *entry) +static void add_ring(struct sun4c_mmu_ring *ring, + struct sun4c_mmu_entry *entry) { struct sun4c_mmu_entry *head = &ring->ringhd; @@ -849,49 +760,58 @@ static inline void add_ring(struct sun4c_mmu_ring *ring, ring->num_entries++; } -static inline void add_ring_ordered(struct sun4c_mmu_ring *ring, - struct sun4c_mmu_entry *entry) +static __inline__ void add_lru(struct sun4c_mmu_entry *entry) +{ + struct sun4c_mmu_ring *ring = &sun4c_ulru_ring; + struct sun4c_mmu_entry *head = &ring->ringhd; + + entry->lru_next = head; + (entry->lru_prev = head->lru_prev)->lru_next = entry; + head->lru_prev = entry; +} + +static void add_ring_ordered(struct sun4c_mmu_ring *ring, + struct sun4c_mmu_entry *entry) { struct sun4c_mmu_entry *head = &ring->ringhd; unsigned long addr = entry->vaddr; - if(head->next != &ring->ringhd) { - while((head->next != &ring->ringhd) && (head->next->vaddr < addr)) - head = head->next; - } + while ((head->next != &ring->ringhd) && (head->next->vaddr < addr)) + head = head->next; + entry->prev = head; (entry->next = head->next)->prev = entry; head->next = entry; ring->num_entries++; + + add_lru(entry); } -static inline void remove_ring(struct sun4c_mmu_ring *ring, - struct sun4c_mmu_entry *entry) +static __inline__ void remove_ring(struct sun4c_mmu_ring *ring, + struct sun4c_mmu_entry *entry) { struct sun4c_mmu_entry *next = entry->next; (next->prev = entry->prev)->next = next; ring->num_entries--; -#ifdef DEBUG_SUN4C_MM - if(ring->num_entries < 0) - panic("sun4c: Ring num_entries < 0!"); -#endif } -static inline void free_user_entry(int ctx, struct sun4c_mmu_entry *entry) +static void remove_lru(struct sun4c_mmu_entry *entry) { - remove_ring(sun4c_context_ring+ctx, entry); - add_ring(&sun4c_ufree_ring, entry); + struct sun4c_mmu_entry *next = entry->lru_next; + + (next->lru_prev = entry->lru_prev)->lru_next = next; } -static inline void assign_user_entry(int ctx, struct sun4c_mmu_entry *entry) +static void free_user_entry(int ctx, struct sun4c_mmu_entry *entry) { - remove_ring(&sun4c_ufree_ring, entry); - add_ring_ordered(sun4c_context_ring+ctx, entry); + remove_ring(sun4c_context_ring+ctx, entry); + remove_lru(entry); + add_ring(&sun4c_ufree_ring, entry); } -static inline void free_kernel_entry(struct sun4c_mmu_entry *entry, - struct sun4c_mmu_ring *ring) +static void free_kernel_entry(struct sun4c_mmu_entry *entry, + struct sun4c_mmu_ring *ring) { remove_ring(ring, entry); add_ring(&sun4c_kfree_ring, entry); @@ -901,9 +821,9 @@ static void __init sun4c_init_fill_kernel_ring(int howmany) { int i; - while(howmany) { - for(i=0; i < invalid_segment; i++) - if(!mmu_entry_pool[i].locked) + while (howmany) { + for (i = 0; i < invalid_segment; i++) + if (!mmu_entry_pool[i].locked) break; mmu_entry_pool[i].locked = 1; sun4c_init_clean_segmap(i); @@ -916,54 +836,40 @@ static void __init sun4c_init_fill_user_ring(void) { int i; - for(i=0; i < invalid_segment; i++) { - if(mmu_entry_pool[i].locked) + for (i = 0; i < invalid_segment; i++) { + if (mmu_entry_pool[i].locked) continue; sun4c_init_clean_segmap(i); add_ring(&sun4c_ufree_ring, &mmu_entry_pool[i]); } } -static inline void sun4c_kernel_unmap(struct sun4c_mmu_entry *kentry) +static void sun4c_kernel_unmap(struct sun4c_mmu_entry *kentry) { int savectx, ctx; savectx = sun4c_get_context(); - for(ctx = 0; ctx < num_contexts; ctx++) { + for (ctx = 0; ctx < num_contexts; ctx++) { sun4c_set_context(ctx); sun4c_put_segmap(kentry->vaddr, invalid_segment); } sun4c_set_context(savectx); } -static inline void sun4c_kernel_map(struct sun4c_mmu_entry *kentry) +static void sun4c_kernel_map(struct sun4c_mmu_entry *kentry) { int savectx, ctx; savectx = sun4c_get_context(); - for(ctx = 0; ctx < num_contexts; ctx++) { + for (ctx = 0; ctx < num_contexts; ctx++) { sun4c_set_context(ctx); sun4c_put_segmap(kentry->vaddr, kentry->pseg); } sun4c_set_context(savectx); } -static inline void sun4c_user_unmap(struct sun4c_mmu_entry *uentry) -{ - sun4c_put_segmap(uentry->vaddr, invalid_segment); -} - -static inline void sun4c_user_map(struct sun4c_mmu_entry *uentry) -{ - unsigned long start = uentry->vaddr; - unsigned long end = start + SUN4C_REAL_PGDIR_SIZE; - - sun4c_put_segmap(uentry->vaddr, uentry->pseg); - while(start < end) { - sun4c_put_pte(start, 0); - start += PAGE_SIZE; - } -} +#define sun4c_user_unmap(__entry) \ + sun4c_put_segmap((__entry)->vaddr, invalid_segment) static void sun4c_demap_context_hw(struct sun4c_mmu_ring *crp, unsigned char ctx) { @@ -971,11 +877,11 @@ static void sun4c_demap_context_hw(struct sun4c_mmu_ring *crp, unsigned char ctx unsigned long flags; save_and_cli(flags); - if(head->next != head) { + if (head->next != head) { struct sun4c_mmu_entry *entry = head->next; int savectx = sun4c_get_context(); - FUW_INLINE + flush_user_windows(); sun4c_set_context(ctx); sun4c_flush_context_hw(); do { @@ -985,7 +891,7 @@ static void sun4c_demap_context_hw(struct sun4c_mmu_ring *crp, unsigned char ctx free_user_entry(ctx, entry); entry = next; - } while(entry != head); + } while (entry != head); sun4c_set_context(savectx); } restore_flags(flags); @@ -997,11 +903,11 @@ static void sun4c_demap_context_sw(struct sun4c_mmu_ring *crp, unsigned char ctx unsigned long flags; save_and_cli(flags); - if(head->next != head) { + if (head->next != head) { struct sun4c_mmu_entry *entry = head->next; int savectx = sun4c_get_context(); - FUW_INLINE + flush_user_windows(); sun4c_set_context(ctx); sun4c_flush_context_sw(); do { @@ -1011,49 +917,31 @@ static void sun4c_demap_context_sw(struct sun4c_mmu_ring *crp, unsigned char ctx free_user_entry(ctx, entry); entry = next; - } while(entry != head); + } while (entry != head); sun4c_set_context(savectx); } restore_flags(flags); } -static inline void sun4c_demap_one(struct sun4c_mmu_ring *crp, unsigned char ctx) -{ - /* by using .prev we get a kind of "lru" algorithm */ - struct sun4c_mmu_entry *entry = crp->ringhd.prev; - unsigned long flags; - int savectx = sun4c_get_context(); - -#ifdef DEBUG_SUN4C_MM - if(entry == &crp->ringhd) - panic("sun4c_demap_one: Freeing from empty ctx ring."); -#endif - FUW_INLINE - save_and_cli(flags); - sun4c_set_context(ctx); - sun4c_flush_segment(entry->vaddr); - sun4c_user_unmap(entry); - free_user_entry(ctx, entry); - sun4c_set_context(savectx); - restore_flags(flags); -} - static int sun4c_user_taken_entries = 0; /* This is how much we have. */ static int max_user_taken_entries = 0; /* This limits us and prevents deadlock. */ -static inline struct sun4c_mmu_entry *sun4c_kernel_strategy(void) +static struct sun4c_mmu_entry *sun4c_kernel_strategy(void) { struct sun4c_mmu_entry *this_entry; /* If some are free, return first one. */ - if(sun4c_kfree_ring.num_entries) { + if (sun4c_kfree_ring.num_entries) { this_entry = sun4c_kfree_ring.ringhd.next; return this_entry; } /* Else free one up. */ this_entry = sun4c_kernel_ring.ringhd.prev; - sun4c_flush_segment(this_entry->vaddr); + if (sun4c_vacinfo.do_hwflushes) + sun4c_flush_segment_hw(this_entry->vaddr); + else + sun4c_flush_segment_sw(this_entry->vaddr); sun4c_kernel_unmap(this_entry); free_kernel_entry(this_entry, &sun4c_kernel_ring); this_entry = sun4c_kfree_ring.ringhd.next; @@ -1061,141 +949,73 @@ static inline struct sun4c_mmu_entry *sun4c_kernel_strategy(void) return this_entry; } -void sun4c_shrink_kernel_ring(void) -{ - struct sun4c_mmu_entry *entry; - unsigned long flags; - - /* If an interrupt comes in here, we die... */ - save_and_cli(flags); - - if (sun4c_user_taken_entries) { - entry = sun4c_kernel_strategy(); - remove_ring(&sun4c_kfree_ring, entry); - add_ring(&sun4c_ufree_ring, entry); - sun4c_user_taken_entries--; -#if 0 - printk("shrink: ufree= %d, kfree= %d, kernel= %d\n", - sun4c_ufree_ring.num_entries, - sun4c_kfree_ring.num_entries, - sun4c_kernel_ring.num_entries); -#endif -#ifdef DEBUG_SUN4C_MM - if(sun4c_user_taken_entries < 0) - panic("sun4c_shrink_kernel_ring: taken < 0."); -#endif - } - restore_flags(flags); -} - /* Using this method to free up mmu entries eliminates a lot of * potential races since we have a kernel that incurs tlb * replacement faults. There may be performance penalties. + * + * NOTE: Must be called with interrupts disabled. */ -static inline struct sun4c_mmu_entry *sun4c_user_strategy(void) +static struct sun4c_mmu_entry *sun4c_user_strategy(void) { - struct ctx_list *next_one; - struct sun4c_mmu_ring *rp = 0; + struct sun4c_mmu_entry *entry; unsigned char ctx; -#ifdef DEBUG_SUN4C_MM - int lim = num_contexts; -#endif + int savectx; /* If some are free, return first one. */ - if(sun4c_ufree_ring.num_entries) { -#ifdef DEBUG_SUN4C_MM - if(sun4c_ufree_ring.ringhd.next == &sun4c_ufree_ring.ringhd) - panic("sun4c_user_strategy: num_entries!=0 but ring empty."); -#endif - return sun4c_ufree_ring.ringhd.next; + if (sun4c_ufree_ring.num_entries) { + entry = sun4c_ufree_ring.ringhd.next; + goto unlink_out; } if (sun4c_user_taken_entries) { - sun4c_shrink_kernel_ring(); -#ifdef DEBUG_SUN4C_MM - if(sun4c_ufree_ring.ringhd.next == &sun4c_ufree_ring.ringhd) - panic("sun4c_user_strategy: kernel shrunk but ufree empty."); -#endif - return sun4c_ufree_ring.ringhd.next; + entry = sun4c_kernel_strategy(); + sun4c_user_taken_entries--; + goto kunlink_out; } - /* Grab one from the LRU context. */ - next_one = ctx_used.next; - while ((sun4c_context_ring[next_one->ctx_number].num_entries == 0) -#ifdef DEBUG_SUN4C_MM - && (--lim >= 0) -#endif - ) - next_one = next_one->next; + /* Grab from the beginning of the LRU list. */ + entry = sun4c_ulru_ring.ringhd.lru_next; + ctx = entry->ctx; -#ifdef DEBUG_SUN4C_MM - if(lim < 0) - panic("No user segmaps!"); -#endif + savectx = sun4c_get_context(); + flush_user_windows(); + sun4c_set_context(ctx); + if (sun4c_vacinfo.do_hwflushes) + sun4c_flush_segment_hw(entry->vaddr); + else + sun4c_flush_segment_sw(entry->vaddr); + sun4c_user_unmap(entry); + remove_ring(sun4c_context_ring + ctx, entry); + remove_lru(entry); + sun4c_set_context(savectx); - ctx = next_one->ctx_number; - rp = &sun4c_context_ring[ctx]; + return entry; - sun4c_demap_one(rp, ctx); -#ifdef DEBUG_SUN4C_MM - if(sun4c_ufree_ring.ringhd.next == &sun4c_ufree_ring.ringhd) - panic("sun4c_user_strategy: demapped one but ufree empty."); -#endif - return sun4c_ufree_ring.ringhd.next; +unlink_out: + remove_ring(&sun4c_ufree_ring, entry); + return entry; +kunlink_out: + remove_ring(&sun4c_kfree_ring, entry); + return entry; } +/* NOTE: Must be called with interrupts disabled. */ void sun4c_grow_kernel_ring(void) { struct sun4c_mmu_entry *entry; -#if 0 - printk("grow: "); -#endif - /* Prevent deadlock condition. */ - if(sun4c_user_taken_entries >= max_user_taken_entries) { -#if 0 - printk("deadlock avoidance, taken= %d max= %d\n", - sun4c_user_taken_entries, max_user_taken_entries); -#endif + if (sun4c_user_taken_entries >= max_user_taken_entries) return; - } if (sun4c_ufree_ring.num_entries) { entry = sun4c_ufree_ring.ringhd.next; -#ifdef DEBUG_SUN4C_MM - if(entry == &sun4c_ufree_ring.ringhd) - panic("\nsun4c_grow_kernel_ring: num_entries!=0, ring empty."); -#endif remove_ring(&sun4c_ufree_ring, entry); add_ring(&sun4c_kfree_ring, entry); -#ifdef DEBUG_SUN4C_MM - if(sun4c_user_taken_entries < 0) - panic("\nsun4c_grow_kernel_ring: taken < 0."); -#endif sun4c_user_taken_entries++; -#if 0 - printk("ufree= %d, kfree= %d, kernel= %d\n", - sun4c_ufree_ring.num_entries, - sun4c_kfree_ring.num_entries, - sun4c_kernel_ring.num_entries); -#endif } } -static inline void alloc_user_segment(unsigned long address, unsigned char ctx) -{ - struct sun4c_mmu_entry *entry; - unsigned long flags; - - save_and_cli(flags); - entry = sun4c_user_strategy(); - entry->vaddr = (address & SUN4C_REAL_PGDIR_MASK); - assign_user_entry(ctx, entry); - sun4c_user_map(entry); - restore_flags(flags); -} - /* This is now a fast in-window trap handler to avoid any and all races. */ static void sun4c_quick_kernel_fault(unsigned long address) { @@ -1209,8 +1029,8 @@ static void sun4c_quick_kernel_fault(unsigned long address) * bucket[0] * bucket[1] * [ ... ] - * bucket[NR_TASKS-1] - * TASK_STACK_BEGIN + (sizeof(struct task_bucket) * NR_TASKS) + * bucket[NR_TASK_BUCKETS-1] + * TASK_STACK_BEGIN + (sizeof(struct task_bucket) * NR_TASK_BUCKETS) * * Each slot looks like: * @@ -1218,7 +1038,7 @@ static void sun4c_quick_kernel_fault(unsigned long address) * page 2 -- rest of kernel stack */ -union task_union *sun4c_bucket[NR_TASKS]; +union task_union *sun4c_bucket[NR_TASK_BUCKETS]; static int sun4c_lowbucket_avail; @@ -1232,7 +1052,7 @@ static int sun4c_lowbucket_avail; #define BUCKET_PTE_PAGE(pte) \ (PAGE_OFFSET + (((pte) & SUN4C_PFN_MASK) << PAGE_SHIFT)) -static inline void get_locked_segment(unsigned long addr) +static void get_locked_segment(unsigned long addr) { struct sun4c_mmu_entry *stolen; unsigned long flags; @@ -1240,19 +1060,14 @@ static inline void get_locked_segment(unsigned long addr) save_and_cli(flags); addr &= SUN4C_REAL_PGDIR_MASK; stolen = sun4c_user_strategy(); - remove_ring(&sun4c_ufree_ring, stolen); max_user_taken_entries--; -#ifdef DEBUG_SUN4C_MM - if(max_user_taken_entries < 0) - panic("get_locked_segment: max_user_taken < 0."); -#endif stolen->vaddr = addr; - FUW_INLINE + flush_user_windows(); sun4c_kernel_map(stolen); restore_flags(flags); } -static inline void free_locked_segment(unsigned long addr) +static void free_locked_segment(unsigned long addr) { struct sun4c_mmu_entry *entry; unsigned long flags; @@ -1263,14 +1078,13 @@ static inline void free_locked_segment(unsigned long addr) pseg = sun4c_get_segmap(addr); entry = &mmu_entry_pool[pseg]; - FUW_INLINE - sun4c_flush_segment(addr); + flush_user_windows(); + if (sun4c_vacinfo.do_hwflushes) + sun4c_flush_segment_hw(addr); + else + sun4c_flush_segment_sw(addr); sun4c_kernel_unmap(entry); add_ring(&sun4c_ufree_ring, entry); -#ifdef DEBUG_SUN4C_MM - if(max_user_taken_entries < 0) - panic("free_locked_segment: max_user_taken < 0."); -#endif max_user_taken_entries++; restore_flags(flags); } @@ -1282,8 +1096,8 @@ static inline void garbage_collect(int entry) /* 32 buckets per segment... */ entry &= ~31; start = entry; - for(end = (start + 32); start < end; start++) - if(sun4c_bucket[start] != BUCKET_EMPTY) + for (end = (start + 32); start < end; start++) + if (sun4c_bucket[start] != BUCKET_EMPTY) return; /* Entire segment empty, release it. */ @@ -1302,23 +1116,39 @@ static struct task_struct *sun4c_alloc_task_struct(void) int entry; pages = __get_free_pages(GFP_KERNEL, TASK_STRUCT_ORDER); - if(!pages) + if (!pages) return (struct task_struct *) 0; - for(entry = sun4c_lowbucket_avail; entry < NR_TASKS; entry++) - if(sun4c_bucket[entry] == BUCKET_EMPTY) + for (entry = sun4c_lowbucket_avail; entry < NR_TASK_BUCKETS; entry++) + if (sun4c_bucket[entry] == BUCKET_EMPTY) break; - if(entry == NR_TASKS) { + if (entry == NR_TASK_BUCKETS) { free_pages(pages, TASK_STRUCT_ORDER); return (struct task_struct *) 0; } - if(entry >= sun4c_lowbucket_avail) + if (entry >= sun4c_lowbucket_avail) sun4c_lowbucket_avail = entry + 1; addr = BUCKET_ADDR(entry); sun4c_bucket[entry] = (union task_union *) addr; if(sun4c_get_segmap(addr) == invalid_segment) get_locked_segment(addr); + + /* We are changing the virtual color of the page(s) + * so we must flush the cache to guarentee consistancy. + */ + if (sun4c_vacinfo.do_hwflushes) { + sun4c_flush_page_hw(pages); +#ifndef CONFIG_SUN4 + sun4c_flush_page_hw(pages + PAGE_SIZE); +#endif + } else { + sun4c_flush_page_sw(pages); +#ifndef CONFIG_SUN4 + sun4c_flush_page_sw(pages + PAGE_SIZE); +#endif + } + sun4c_put_pte(addr, BUCKET_PTE(pages)); #ifndef CONFIG_SUN4 sun4c_put_pte(addr + PAGE_SIZE, BUCKET_PTE(pages + PAGE_SIZE)); @@ -1342,7 +1172,7 @@ static void sun4c_free_task_struct_hw(struct task_struct *tsk) sun4c_put_pte(tsaddr + PAGE_SIZE, 0); #endif sun4c_bucket[entry] = BUCKET_EMPTY; - if(entry < sun4c_lowbucket_avail) + if (entry < sun4c_lowbucket_avail) sun4c_lowbucket_avail = entry; free_pages(pages, TASK_STRUCT_ORDER); @@ -1365,7 +1195,7 @@ static void sun4c_free_task_struct_sw(struct task_struct *tsk) sun4c_put_pte(tsaddr + PAGE_SIZE, 0); #endif sun4c_bucket[entry] = BUCKET_EMPTY; - if(entry < sun4c_lowbucket_avail) + if (entry < sun4c_lowbucket_avail) sun4c_lowbucket_avail = entry; free_pages(pages, TASK_STRUCT_ORDER); @@ -1376,10 +1206,10 @@ static void __init sun4c_init_buckets(void) { int entry; - if(sizeof(union task_union) != (PAGE_SIZE << TASK_STRUCT_ORDER)) { + if (sizeof(union task_union) != (PAGE_SIZE << TASK_STRUCT_ORDER)) { prom_printf("task union not %d page(s)!\n", 1 << TASK_STRUCT_ORDER); } - for(entry = 0; entry < NR_TASKS; entry++) + for (entry = 0; entry < NR_TASK_BUCKETS; entry++) sun4c_bucket[entry] = BUCKET_EMPTY; sun4c_lowbucket_avail = 0; } @@ -1494,37 +1324,38 @@ static void sun4c_unlockarea(char *vaddr, unsigned long size) * by implication and fool the page locking code above * if passed to by mistake. */ -static __u32 sun4c_get_scsi_one(char *bufptr, unsigned long len, struct linux_sbus *sbus) +static __u32 sun4c_get_scsi_one(char *bufptr, unsigned long len, struct sbus_bus *sbus) { unsigned long page; page = ((unsigned long)bufptr) & PAGE_MASK; - if(MAP_NR(page) > max_mapnr) { + if (MAP_NR(page) > max_mapnr) { sun4c_flush_page(page); return (__u32)bufptr; /* already locked */ } return (__u32)sun4c_lockarea(bufptr, len); } -static void sun4c_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus) +static void sun4c_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus) { - while(sz >= 0) { - sg[sz].dvma_addr = (__u32)sun4c_lockarea(sg[sz].addr, sg[sz].len); + while (sz >= 0) { + sg[sz].dvma_address = (__u32)sun4c_lockarea(sg[sz].address, sg[sz].length); + sg[sz].dvma_length = sg[sz].length; sz--; } } -static void sun4c_release_scsi_one(__u32 bufptr, unsigned long len, struct linux_sbus *sbus) +static void sun4c_release_scsi_one(__u32 bufptr, unsigned long len, struct sbus_bus *sbus) { - if(bufptr < sun4c_iobuffer_start) + if (bufptr < sun4c_iobuffer_start) return; /* On kernel stack or similar, see above */ sun4c_unlockarea((char *)bufptr, len); } -static void sun4c_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus) +static void sun4c_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus) { - while(sz >= 0) { - sun4c_unlockarea((char *)sg[sz].dvma_addr, sg[sz].len); + while (sz >= 0) { + sun4c_unlockarea((char *)sg[sz].dvma_address, sg[sz].length); sz--; } } @@ -1534,7 +1365,7 @@ static void sun4c_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_s struct vm_area_struct sun4c_kstack_vma; -static unsigned long __init sun4c_init_lock_areas(unsigned long start_mem) +static void __init sun4c_init_lock_areas(void) { unsigned long sun4c_taskstack_start; unsigned long sun4c_taskstack_end; @@ -1543,9 +1374,9 @@ static unsigned long __init sun4c_init_lock_areas(unsigned long start_mem) sun4c_init_buckets(); sun4c_taskstack_start = SUN4C_LOCK_VADDR; sun4c_taskstack_end = (sun4c_taskstack_start + - (TASK_ENTRY_SIZE * NR_TASKS)); - if(sun4c_taskstack_end >= SUN4C_LOCK_END) { - prom_printf("Too many tasks, decrease NR_TASKS please.\n"); + (TASK_ENTRY_SIZE * NR_TASK_BUCKETS)); + if (sun4c_taskstack_end >= SUN4C_LOCK_END) { + prom_printf("Too many tasks, decrease NR_TASK_BUCKETS please.\n"); prom_halt(); } @@ -1556,9 +1387,8 @@ static unsigned long __init sun4c_init_lock_areas(unsigned long start_mem) bitmap_size = (bitmap_size + 7) >> 3; bitmap_size = LONG_ALIGN(bitmap_size); iobuffer_map_size = bitmap_size << 3; - sun4c_iobuffer_map = (unsigned long *) start_mem; - memset((void *) start_mem, 0, bitmap_size); - start_mem += bitmap_size; + sun4c_iobuffer_map = __alloc_bootmem(bitmap_size, SMP_CACHE_BYTES, 0UL); + memset((void *) sun4c_iobuffer_map, 0, bitmap_size); sun4c_kstack_vma.vm_mm = &init_mm; sun4c_kstack_vma.vm_start = sun4c_taskstack_start; @@ -1566,7 +1396,6 @@ static unsigned long __init sun4c_init_lock_areas(unsigned long start_mem) sun4c_kstack_vma.vm_page_prot = PAGE_SHARED; sun4c_kstack_vma.vm_flags = VM_READ | VM_WRITE | VM_EXEC; insert_vm_struct(&init_mm, &sun4c_kstack_vma); - return start_mem; } /* Cache flushing on the sun4c. */ @@ -1574,12 +1403,12 @@ static void sun4c_flush_cache_all(void) { unsigned long begin, end; - FUW_INLINE + flush_user_windows(); begin = (KERNBASE + SUN4C_REAL_PGDIR_SIZE); end = (begin + SUN4C_VAC_SIZE); - if(sun4c_vacinfo.linesize == 32) { - while(begin < end) { + if (sun4c_vacinfo.linesize == 32) { + while (begin < end) { __asm__ __volatile__(" ld [%0 + 0x00], %%g0 ld [%0 + 0x20], %%g0 @@ -1601,7 +1430,7 @@ static void sun4c_flush_cache_all(void) begin += 512; } } else { - while(begin < end) { + while (begin < end) { __asm__ __volatile__(" ld [%0 + 0x00], %%g0 ld [%0 + 0x10], %%g0 @@ -1629,29 +1458,31 @@ static void sun4c_flush_cache_mm_hw(struct mm_struct *mm) { int new_ctx = mm->context; - if(new_ctx != NO_CONTEXT && sun4c_context_ring[new_ctx].num_entries) { - struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; - unsigned long flags; + if (new_ctx != NO_CONTEXT) { + flush_user_windows(); + if (sun4c_context_ring[new_ctx].num_entries) { + struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; + unsigned long flags; - save_and_cli(flags); - if(head->next != head) { - struct sun4c_mmu_entry *entry = head->next; - int savectx = sun4c_get_context(); + save_and_cli(flags); + if (head->next != head) { + struct sun4c_mmu_entry *entry = head->next; + int savectx = sun4c_get_context(); - FUW_INLINE - sun4c_set_context(new_ctx); - sun4c_flush_context_hw(); - do { - struct sun4c_mmu_entry *next = entry->next; + sun4c_set_context(new_ctx); + sun4c_flush_context_hw(); + do { + struct sun4c_mmu_entry *next = entry->next; - sun4c_user_unmap(entry); - free_user_entry(new_ctx, entry); + sun4c_user_unmap(entry); + free_user_entry(new_ctx, entry); - entry = next; - } while(entry != head); - sun4c_set_context(savectx); + entry = next; + } while (entry != head); + sun4c_set_context(savectx); + } + restore_flags(flags); } - restore_flags(flags); } } @@ -1659,29 +1490,28 @@ static void sun4c_flush_cache_range_hw(struct mm_struct *mm, unsigned long start { int new_ctx = mm->context; -#if KGPROF_PROFILING - kgprof_profile(); -#endif - if(new_ctx != NO_CONTEXT) { + if (new_ctx != NO_CONTEXT) { struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; struct sun4c_mmu_entry *entry; unsigned long flags; - FUW_INLINE + flush_user_windows(); + save_and_cli(flags); /* All user segmap chains are ordered on entry->vaddr. */ - for(entry = head->next; - (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start); - entry = entry->next) + for (entry = head->next; + (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start); + entry = entry->next) ; /* Tracing various job mixtures showed that this conditional * only passes ~35% of the time for most worse case situations, * therefore we avoid all of this gross overhead ~65% of the time. */ - if((entry != head) && (entry->vaddr < end)) { + if ((entry != head) && (entry->vaddr < end)) { int octx = sun4c_get_context(); + sun4c_set_context(new_ctx); /* At this point, always, (start >= entry->vaddr) and @@ -1696,11 +1526,11 @@ static void sun4c_flush_cache_range_hw(struct mm_struct *mm, unsigned long start /* "realstart" is always >= entry->vaddr */ realend = entry->vaddr + SUN4C_REAL_PGDIR_SIZE; - if(end < realend) + if (end < realend) realend = end; - if((realend - entry->vaddr) <= (PAGE_SIZE << 3)) { + if ((realend - entry->vaddr) <= (PAGE_SIZE << 3)) { unsigned long page = entry->vaddr; - while(page < realend) { + while (page < realend) { sun4c_flush_page_hw(page); page += PAGE_SIZE; } @@ -1710,14 +1540,13 @@ static void sun4c_flush_cache_range_hw(struct mm_struct *mm, unsigned long start free_user_entry(new_ctx, entry); } entry = next; - } while((entry != head) && (entry->vaddr < end)); + } while ((entry != head) && (entry->vaddr < end)); sun4c_set_context(octx); } restore_flags(flags); } } -/* XXX no save_and_cli/restore_flags needed, but put here if darkside still crashes */ static void sun4c_flush_cache_page_hw(struct vm_area_struct *vma, unsigned long page) { struct mm_struct *mm = vma->vm_mm; @@ -1726,76 +1555,85 @@ static void sun4c_flush_cache_page_hw(struct vm_area_struct *vma, unsigned long /* Sun4c has no separate I/D caches so cannot optimize for non * text page flushes. */ - if(new_ctx != NO_CONTEXT) { + if (new_ctx != NO_CONTEXT) { int octx = sun4c_get_context(); + unsigned long flags; - FUW_INLINE + flush_user_windows(); + save_and_cli(flags); sun4c_set_context(new_ctx); sun4c_flush_page_hw(page); sun4c_set_context(octx); + restore_flags(flags); } } -static void sun4c_flush_page_to_ram_hw(unsigned long page) +static void sun4c_flush_page_to_ram_hw(struct page *page) { - sun4c_flush_page_hw(page); + unsigned long flags; + unsigned long addr = page_address(page); + + save_and_cli(flags); + sun4c_flush_page_hw(addr); + restore_flags(flags); } static void sun4c_flush_cache_mm_sw(struct mm_struct *mm) { int new_ctx = mm->context; - if(new_ctx != NO_CONTEXT && sun4c_context_ring[new_ctx].num_entries) { - struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; - unsigned long flags; + if (new_ctx != NO_CONTEXT) { + flush_user_windows(); - save_and_cli(flags); - if(head->next != head) { - struct sun4c_mmu_entry *entry = head->next; - int savectx = sun4c_get_context(); + if (sun4c_context_ring[new_ctx].num_entries) { + struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; + unsigned long flags; - FUW_INLINE - sun4c_set_context(new_ctx); - sun4c_flush_context_sw(); - do { - struct sun4c_mmu_entry *next = entry->next; + save_and_cli(flags); + if (head->next != head) { + struct sun4c_mmu_entry *entry = head->next; + int savectx = sun4c_get_context(); - sun4c_user_unmap(entry); - free_user_entry(new_ctx, entry); + sun4c_set_context(new_ctx); + sun4c_flush_context_sw(); + do { + struct sun4c_mmu_entry *next = entry->next; - entry = next; - } while(entry != head); - sun4c_set_context(savectx); + sun4c_user_unmap(entry); + free_user_entry(new_ctx, entry); + + entry = next; + } while (entry != head); + sun4c_set_context(savectx); + } + restore_flags(flags); } - restore_flags(flags); } } static void sun4c_flush_cache_range_sw(struct mm_struct *mm, unsigned long start, unsigned long end) { int new_ctx = mm->context; - -#if KGPROF_PROFILING - kgprof_profile(); -#endif - if(new_ctx != NO_CONTEXT) { + + if (new_ctx != NO_CONTEXT) { struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; struct sun4c_mmu_entry *entry; unsigned long flags; - FUW_INLINE + flush_user_windows(); + save_and_cli(flags); /* All user segmap chains are ordered on entry->vaddr. */ - for(entry = head->next; - (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start); - entry = entry->next) + for (entry = head->next; + (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start); + entry = entry->next) ; /* Tracing various job mixtures showed that this conditional * only passes ~35% of the time for most worse case situations, * therefore we avoid all of this gross overhead ~65% of the time. */ - if((entry != head) && (entry->vaddr < end)) { + if ((entry != head) && (entry->vaddr < end)) { int octx = sun4c_get_context(); sun4c_set_context(new_ctx); @@ -1811,11 +1649,11 @@ static void sun4c_flush_cache_range_sw(struct mm_struct *mm, unsigned long start /* "realstart" is always >= entry->vaddr */ realend = entry->vaddr + SUN4C_REAL_PGDIR_SIZE; - if(end < realend) + if (end < realend) realend = end; - if((realend - entry->vaddr) <= (PAGE_SIZE << 3)) { + if ((realend - entry->vaddr) <= (PAGE_SIZE << 3)) { unsigned long page = entry->vaddr; - while(page < realend) { + while (page < realend) { sun4c_flush_page_sw(page); page += PAGE_SIZE; } @@ -1825,7 +1663,7 @@ static void sun4c_flush_cache_range_sw(struct mm_struct *mm, unsigned long start free_user_entry(new_ctx, entry); } entry = next; - } while((entry != head) && (entry->vaddr < end)); + } while ((entry != head) && (entry->vaddr < end)); sun4c_set_context(octx); } restore_flags(flags); @@ -1840,19 +1678,27 @@ static void sun4c_flush_cache_page_sw(struct vm_area_struct *vma, unsigned long /* Sun4c has no separate I/D caches so cannot optimize for non * text page flushes. */ - if(new_ctx != NO_CONTEXT) { + if (new_ctx != NO_CONTEXT) { int octx = sun4c_get_context(); + unsigned long flags; - FUW_INLINE + flush_user_windows(); + save_and_cli(flags); sun4c_set_context(new_ctx); sun4c_flush_page_sw(page); sun4c_set_context(octx); + restore_flags(flags); } } -static void sun4c_flush_page_to_ram_sw(unsigned long page) +static void sun4c_flush_page_to_ram_sw(struct page *page) { - sun4c_flush_page_sw(page); + unsigned long flags; + unsigned long addr = page_address(page); + + save_and_cli(flags); + sun4c_flush_page_sw(addr); + restore_flags(flags); } /* Sun4c cache is unified, both instructions and data live there, so @@ -1879,8 +1725,11 @@ static void sun4c_flush_tlb_all(void) flush_user_windows(); while (sun4c_kernel_ring.num_entries) { next_entry = this_entry->next; - sun4c_flush_segment(this_entry->vaddr); - for(ctx = 0; ctx < num_contexts; ctx++) { + if (sun4c_vacinfo.do_hwflushes) + sun4c_flush_segment_hw(this_entry->vaddr); + else + sun4c_flush_segment_sw(this_entry->vaddr); + for (ctx = 0; ctx < num_contexts; ctx++) { sun4c_set_context(ctx); sun4c_put_segmap(this_entry->vaddr, invalid_segment); } @@ -1895,16 +1744,15 @@ static void sun4c_flush_tlb_mm_hw(struct mm_struct *mm) { int new_ctx = mm->context; - if(new_ctx != NO_CONTEXT) { + if (new_ctx != NO_CONTEXT) { struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; unsigned long flags; save_and_cli(flags); - if(head->next != head) { + if (head->next != head) { struct sun4c_mmu_entry *entry = head->next; int savectx = sun4c_get_context(); - FUW_INLINE sun4c_set_context(new_ctx); sun4c_flush_context_hw(); do { @@ -1914,7 +1762,7 @@ static void sun4c_flush_tlb_mm_hw(struct mm_struct *mm) free_user_entry(new_ctx, entry); entry = next; - } while(entry != head); + } while (entry != head); sun4c_set_context(savectx); } restore_flags(flags); @@ -1925,26 +1773,21 @@ static void sun4c_flush_tlb_range_hw(struct mm_struct *mm, unsigned long start, { int new_ctx = mm->context; - if(new_ctx != NO_CONTEXT) { + if (new_ctx != NO_CONTEXT) { struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; struct sun4c_mmu_entry *entry; unsigned long flags; -#if KGPROF_PROFILING - kgprof_profile(); -#endif save_and_cli(flags); /* See commentary in sun4c_flush_cache_range_*(). */ - for(entry = head->next; - (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start); - entry = entry->next) + for (entry = head->next; + (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start); + entry = entry->next) ; - if((entry != head) && (entry->vaddr < end)) { + if ((entry != head) && (entry->vaddr < end)) { int octx = sun4c_get_context(); - /* This window flush is paranoid I think... -DaveM */ - FUW_INLINE sun4c_set_context(new_ctx); do { struct sun4c_mmu_entry *next = entry->next; @@ -1954,7 +1797,7 @@ static void sun4c_flush_tlb_range_hw(struct mm_struct *mm, unsigned long start, free_user_entry(new_ctx, entry); entry = next; - } while((entry != head) && (entry->vaddr < end)); + } while ((entry != head) && (entry->vaddr < end)); sun4c_set_context(octx); } restore_flags(flags); @@ -1966,15 +1809,17 @@ static void sun4c_flush_tlb_page_hw(struct vm_area_struct *vma, unsigned long pa struct mm_struct *mm = vma->vm_mm; int new_ctx = mm->context; - if(new_ctx != NO_CONTEXT) { + if (new_ctx != NO_CONTEXT) { int savectx = sun4c_get_context(); + unsigned long flags; - FUW_INLINE + save_and_cli(flags); sun4c_set_context(new_ctx); page &= PAGE_MASK; sun4c_flush_page_hw(page); sun4c_put_pte(page, 0); sun4c_set_context(savectx); + restore_flags(flags); } } @@ -1982,16 +1827,15 @@ static void sun4c_flush_tlb_mm_sw(struct mm_struct *mm) { int new_ctx = mm->context; - if(new_ctx != NO_CONTEXT) { + if (new_ctx != NO_CONTEXT) { struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; unsigned long flags; save_and_cli(flags); - if(head->next != head) { + if (head->next != head) { struct sun4c_mmu_entry *entry = head->next; int savectx = sun4c_get_context(); - FUW_INLINE sun4c_set_context(new_ctx); sun4c_flush_context_sw(); do { @@ -2001,7 +1845,7 @@ static void sun4c_flush_tlb_mm_sw(struct mm_struct *mm) free_user_entry(new_ctx, entry); entry = next; - } while(entry != head); + } while (entry != head); sun4c_set_context(savectx); } restore_flags(flags); @@ -2012,27 +1856,21 @@ static void sun4c_flush_tlb_range_sw(struct mm_struct *mm, unsigned long start, { int new_ctx = mm->context; - if(new_ctx != NO_CONTEXT) { + if (new_ctx != NO_CONTEXT) { struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; struct sun4c_mmu_entry *entry; unsigned long flags; -#if KGPROF_PROFILING - kgprof_profile(); -#endif - save_and_cli(flags); /* See commentary in sun4c_flush_cache_range_*(). */ - for(entry = head->next; - (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start); - entry = entry->next) + for (entry = head->next; + (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start); + entry = entry->next) ; - if((entry != head) && (entry->vaddr < end)) { + if ((entry != head) && (entry->vaddr < end)) { int octx = sun4c_get_context(); - /* This window flush is paranoid I think... -DaveM */ - FUW_INLINE sun4c_set_context(new_ctx); do { struct sun4c_mmu_entry *next = entry->next; @@ -2042,7 +1880,7 @@ static void sun4c_flush_tlb_range_sw(struct mm_struct *mm, unsigned long start, free_user_entry(new_ctx, entry); entry = next; - } while((entry != head) && (entry->vaddr < end)); + } while ((entry != head) && (entry->vaddr < end)); sun4c_set_context(octx); } restore_flags(flags); @@ -2054,15 +1892,17 @@ static void sun4c_flush_tlb_page_sw(struct vm_area_struct *vma, unsigned long pa struct mm_struct *mm = vma->vm_mm; int new_ctx = mm->context; - if(new_ctx != NO_CONTEXT) { + if (new_ctx != NO_CONTEXT) { int savectx = sun4c_get_context(); + unsigned long flags; - FUW_INLINE + save_and_cli(flags); sun4c_set_context(new_ctx); page &= PAGE_MASK; sun4c_flush_page_sw(page); sun4c_put_pte(page, 0); sun4c_set_context(savectx); + restore_flags(flags); } } @@ -2075,7 +1915,6 @@ static void sun4c_pgd_set(pgd_t * pgdp, pmd_t * pmdp) { } - void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly) { @@ -2083,7 +1922,7 @@ void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr, page_entry = ((physaddr >> PAGE_SHIFT) & SUN4C_PFN_MASK); page_entry |= ((pg_iobits | _SUN4C_PAGE_PRIV) & ~(_SUN4C_PAGE_PRESENT)); - if(rdonly) + if (rdonly) page_entry &= ~_SUN4C_WRITEABLE; sun4c_put_pte(virt_addr, page_entry); } @@ -2093,12 +1932,12 @@ void sun4c_unmapioaddr(unsigned long virt_addr) sun4c_put_pte(virt_addr, 0); } -static void sun4c_alloc_context_hw(struct mm_struct *mm) +static void sun4c_alloc_context_hw(struct mm_struct *old_mm, struct mm_struct *mm) { struct ctx_list *ctxp; ctxp = ctx_free.next; - if(ctxp != &ctx_free) { + if (ctxp != &ctx_free) { remove_from_ctx_list(ctxp); add_to_used_ctxlist(ctxp); mm->context = ctxp->ctx_number; @@ -2106,40 +1945,33 @@ static void sun4c_alloc_context_hw(struct mm_struct *mm) return; } ctxp = ctx_used.next; - if(ctxp->ctx_mm == current->mm) + if (ctxp->ctx_mm == old_mm) ctxp = ctxp->next; -#ifdef DEBUG_SUN4C_MM - if(ctxp == &ctx_used) - panic("out of mmu contexts"); -#endif remove_from_ctx_list(ctxp); add_to_used_ctxlist(ctxp); ctxp->ctx_mm->context = NO_CONTEXT; ctxp->ctx_mm = mm; mm->context = ctxp->ctx_number; sun4c_demap_context_hw(&sun4c_context_ring[ctxp->ctx_number], - ctxp->ctx_number); + ctxp->ctx_number); } -static void sun4c_switch_to_context_hw(struct task_struct *tsk) +/* Switch the current MM context. */ +static void sun4c_switch_mm_hw(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu) { struct ctx_list *ctx; + int dirty = 0; - if(tsk->mm->context == NO_CONTEXT) { - sun4c_alloc_context_hw(tsk->mm); + if (mm->context == NO_CONTEXT) { + dirty = 1; + sun4c_alloc_context_hw(old_mm, mm); } else { /* Update the LRU ring of contexts. */ - ctx = ctx_list_pool + tsk->mm->context; + ctx = ctx_list_pool + mm->context; remove_from_ctx_list(ctx); add_to_used_ctxlist(ctx); } - sun4c_set_context(tsk->mm->context); -} - -static void sun4c_init_new_context_hw(struct mm_struct *mm) -{ - sun4c_alloc_context_hw(mm); - if(mm == current->mm) + if (dirty || old_mm != mm) sun4c_set_context(mm->context); } @@ -2147,7 +1979,7 @@ static void sun4c_destroy_context_hw(struct mm_struct *mm) { struct ctx_list *ctx_old; - if(mm->context != NO_CONTEXT && atomic_read(&mm->count) == 1) { + if (mm->context != NO_CONTEXT) { sun4c_demap_context_hw(&sun4c_context_ring[mm->context], mm->context); ctx_old = ctx_list_pool + mm->context; remove_from_ctx_list(ctx_old); @@ -2156,12 +1988,12 @@ static void sun4c_destroy_context_hw(struct mm_struct *mm) } } -static void sun4c_alloc_context_sw(struct mm_struct *mm) +static void sun4c_alloc_context_sw(struct mm_struct *old_mm, struct mm_struct *mm) { struct ctx_list *ctxp; ctxp = ctx_free.next; - if(ctxp != &ctx_free) { + if (ctxp != &ctx_free) { remove_from_ctx_list(ctxp); add_to_used_ctxlist(ctxp); mm->context = ctxp->ctx_number; @@ -2169,40 +2001,34 @@ static void sun4c_alloc_context_sw(struct mm_struct *mm) return; } ctxp = ctx_used.next; - if(ctxp->ctx_mm == current->mm) + if(ctxp->ctx_mm == old_mm) ctxp = ctxp->next; -#ifdef DEBUG_SUN4C_MM - if(ctxp == &ctx_used) - panic("out of mmu contexts"); -#endif remove_from_ctx_list(ctxp); add_to_used_ctxlist(ctxp); ctxp->ctx_mm->context = NO_CONTEXT; ctxp->ctx_mm = mm; mm->context = ctxp->ctx_number; sun4c_demap_context_sw(&sun4c_context_ring[ctxp->ctx_number], - ctxp->ctx_number); + ctxp->ctx_number); } -static void sun4c_switch_to_context_sw(struct task_struct *tsk) +/* Switch the current MM context. */ +static void sun4c_switch_mm_sw(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu) { struct ctx_list *ctx; + int dirty = 0; - if(tsk->mm->context == NO_CONTEXT) { - sun4c_alloc_context_sw(tsk->mm); + if (mm->context == NO_CONTEXT) { + dirty = 1; + sun4c_alloc_context_sw(old_mm, mm); } else { /* Update the LRU ring of contexts. */ - ctx = ctx_list_pool + tsk->mm->context; + ctx = ctx_list_pool + mm->context; remove_from_ctx_list(ctx); add_to_used_ctxlist(ctx); } - sun4c_set_context(tsk->mm->context); -} -static void sun4c_init_new_context_sw(struct mm_struct *mm) -{ - sun4c_alloc_context_sw(mm); - if(mm == current->mm) + if (dirty || old_mm != mm) sun4c_set_context(mm->context); } @@ -2210,7 +2036,7 @@ static void sun4c_destroy_context_sw(struct mm_struct *mm) { struct ctx_list *ctx_old; - if(mm->context != NO_CONTEXT && atomic_read(&mm->count) == 1) { + if (mm->context != NO_CONTEXT) { sun4c_demap_context_sw(&sun4c_context_ring[mm->context], mm->context); ctx_old = ctx_list_pool + mm->context; remove_from_ctx_list(ctx_old); @@ -2225,7 +2051,7 @@ static int sun4c_mmu_info(char *buf) int len; used_user_entries = 0; - for(i=0; i < num_contexts; i++) + for (i = 0; i < num_contexts; i++) used_user_entries += sun4c_context_ring[i].num_entries; len = sprintf(buf, @@ -2239,10 +2065,7 @@ static int sun4c_mmu_info(char *buf) "usedpsegs\t: %d\n" "ufreepsegs\t: %d\n" "user_taken\t: %d\n" - "max_taken\t: %d\n" - "context\t\t: %d flushes\n" - "segment\t\t: %d flushes\n" - "page\t\t: %d flushes\n", + "max_taken\t: %d\n", sun4c_vacinfo.num_bytes, (sun4c_vacinfo.do_hwflushes ? "yes" : "no"), sun4c_vacinfo.linesize, @@ -2253,22 +2076,7 @@ static int sun4c_mmu_info(char *buf) used_user_entries, sun4c_ufree_ring.num_entries, sun4c_user_taken_entries, - max_user_taken_entries, - ctxflushes, segflushes, pageflushes); - -#if KGPROF_PROFILING - { - int i,j; - len += sprintf(buf + len,"kgprof profiling:\n"); - for (i=0;i<KGPROF_SIZE && kgprof_counters[i].addr[0];i++) { - len += sprintf(buf + len,"%5d ",kgprof_counters[i].count); - for (j=0;j<KGPROF_DEPTH;j++) { - len += sprintf(buf + len,"%08x ",kgprof_counters[i].addr[j]); - } - len += sprintf(buf + len,"\n"); - } - } -#endif + max_user_taken_entries); return len; } @@ -2277,13 +2085,6 @@ static int sun4c_mmu_info(char *buf) * data structures. */ -#if 0 /* Not used due to BTFIXUPs */ -static unsigned int sun4c_pmd_align(unsigned int addr) { return SUN4C_PMD_ALIGN(addr); } -#endif -#if 0 /* Not used due to BTFIXUPs */ -static unsigned int sun4c_pgdir_align(unsigned int addr) { return SUN4C_PGDIR_ALIGN(addr); } -#endif - /* First the functions which the mid-level code uses to directly * manipulate the software page tables. Some defines since we are * emulating the i386 page directory layout. @@ -2295,17 +2096,6 @@ static unsigned int sun4c_pgdir_align(unsigned int addr) { return SUN4C_PGDIR_AL #define PGD_DIRTY 0x040 #define PGD_TABLE (PGD_PRESENT | PGD_RW | PGD_USER | PGD_ACCESSED | PGD_DIRTY) -#if 0 /* Not used due to BTFIXUPs */ -static unsigned long sun4c_vmalloc_start(void) -{ - return SUN4C_VMALLOC_START; -} -#endif - -#if 0 /* Not used due to BTFIXUPs */ -static int sun4c_pte_none(pte_t pte) { return !pte_val(pte); } -#endif - static int sun4c_pte_present(pte_t pte) { return ((pte_val(pte) & (_SUN4C_PAGE_PRESENT | _SUN4C_PAGE_PRIV)) != 0); @@ -2334,48 +2124,6 @@ static void sun4c_pgd_clear(pgd_t * pgdp) { } * The following only work if pte_present() is true. * Undefined behaviour if not.. */ -#if 0 /* Not used due to BTFIXUPs */ -static int sun4c_pte_write(pte_t pte) -{ - return pte_val(pte) & _SUN4C_PAGE_WRITE; -} -#endif - -#if 0 /* Not used due to BTFIXUPs */ -static int sun4c_pte_dirty(pte_t pte) -{ - return pte_val(pte) & _SUN4C_PAGE_MODIFIED; -} -#endif - -#if 0 /* Not used due to BTFIXUPs */ -static int sun4c_pte_young(pte_t pte) -{ - return pte_val(pte) & _SUN4C_PAGE_ACCESSED; -} -#endif - -#if 0 /* Not used due to BTFIXUPs */ -static pte_t sun4c_pte_wrprotect(pte_t pte) -{ - return __pte(pte_val(pte) & ~(_SUN4C_PAGE_WRITE | _SUN4C_PAGE_SILENT_WRITE)); -} -#endif - -#if 0 /* Not used due to BTFIXUPs */ -static pte_t sun4c_pte_mkclean(pte_t pte) -{ - return __pte(pte_val(pte) & ~(_SUN4C_PAGE_MODIFIED | _SUN4C_PAGE_SILENT_WRITE)); -} -#endif - -#if 0 /* Not used due to BTFIXUPs */ -static pte_t sun4c_pte_mkold(pte_t pte) -{ - return __pte(pte_val(pte) & ~(_SUN4C_PAGE_ACCESSED | _SUN4C_PAGE_SILENT_READ)); -} -#endif - static pte_t sun4c_pte_mkwrite(pte_t pte) { pte = __pte(pte_val(pte) | _SUN4C_PAGE_WRITE); @@ -2404,9 +2152,9 @@ static pte_t sun4c_pte_mkyoung(pte_t pte) * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ -static pte_t sun4c_mk_pte(unsigned long page, pgprot_t pgprot) +static pte_t sun4c_mk_pte(struct page *page, pgprot_t pgprot) { - return __pte(((page - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(pgprot)); + return __pte((page - mem_map) | pgprot_val(pgprot)); } static pte_t sun4c_mk_pte_phys(unsigned long phys_page, pgprot_t pgprot) @@ -2419,17 +2167,9 @@ static pte_t sun4c_mk_pte_io(unsigned long page, pgprot_t pgprot, int space) return __pte(((page - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(pgprot)); } -#if 0 /* Not used due to BTFIXUPs */ -static pte_t sun4c_pte_modify(pte_t pte, pgprot_t newprot) -{ - return __pte((pte_val(pte) & _SUN4C_PAGE_CHG_MASK) | - pgprot_val(newprot)); -} -#endif - -static unsigned long sun4c_pte_page(pte_t pte) +static unsigned long sun4c_pte_pagenr(pte_t pte) { - return (PAGE_OFFSET + ((pte_val(pte) & SUN4C_PFN_MASK) << (PAGE_SHIFT))); + return (pte_val(pte) & SUN4C_PFN_MASK); } static inline unsigned long sun4c_pmd_page(pmd_t pmd) @@ -2460,11 +2200,6 @@ pte_t *sun4c_pte_offset(pmd_t * dir, unsigned long address) return (pte_t *) sun4c_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1)); } -/* Update the root mmu directory. */ -static void sun4c_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdir) -{ -} - /* Please take special note on the foo_kernel() routines below, our * fast in window fault handler wants to get at the pte's for vmalloc * area with traps off, therefore they _MUST_ be locked down to prevent @@ -2487,7 +2222,7 @@ static void sun4c_pte_free_kernel(pte_t *pte) static pte_t *sun4c_pte_alloc_kernel(pmd_t *pmd, unsigned long address) { - if(address >= SUN4C_LOCK_VADDR) + if (address >= SUN4C_LOCK_VADDR) return NULL; address = (address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1); if (sun4c_pmd_none(*pmd)) @@ -2527,7 +2262,7 @@ extern __inline__ pgd_t *sun4c_get_pgd_fast(void) { unsigned long *ret; - if((ret = pgd_quicklist) != NULL) { + if ((ret = pgd_quicklist) != NULL) { pgd_quicklist = (unsigned long *)(*ret); ret[0] = ret[1]; pgtable_cache_size--; @@ -2546,15 +2281,15 @@ extern __inline__ pgd_t *sun4c_get_pgd_fast(void) static int sun4c_check_pgt_cache(int low, int high) { int freed = 0; - if(pgtable_cache_size > high) { + if (pgtable_cache_size > high) { do { - if(pgd_quicklist) + if (pgd_quicklist) free_pgd_slow(get_pgd_fast()), freed++; - if(pmd_quicklist) + if (pmd_quicklist) free_pmd_slow(get_pmd_fast()), freed++; - if(pte_quicklist) + if (pte_quicklist) free_pte_slow(get_pte_fast()), freed++; - } while(pgtable_cache_size > low); + } while (pgtable_cache_size > low); } return freed; } @@ -2575,7 +2310,7 @@ extern __inline__ pte_t *sun4c_get_pte_fast(void) { unsigned long *ret; - if((ret = (unsigned long *)pte_quicklist) != NULL) { + if ((ret = (unsigned long *)pte_quicklist) != NULL) { pte_quicklist = (unsigned long *)(*ret); ret[0] = ret[1]; pgtable_cache_size--; @@ -2691,19 +2426,21 @@ static void sun4c_vac_alias_fixup(struct vm_area_struct *vma, unsigned long addr unsigned long start; /* Do not mistake ourselves as another mapping. */ - if(vmaring == vma) + if (vmaring == vma) continue; if (S4CVAC_BADALIAS(vaddr, address)) { alias_found++; start = vmaring->vm_start; - while(start < vmaring->vm_end) { + while (start < vmaring->vm_end) { pgdp = sun4c_pgd_offset(vmaring->vm_mm, start); - if(!pgdp) goto next; + if (!pgdp) + goto next; ptep = sun4c_pte_offset((pmd_t *) pgdp, start); - if(!ptep) goto next; + if (!ptep) + goto next; - if(pte_val(*ptep) & _SUN4C_PAGE_PRESENT) { + if (pte_val(*ptep) & _SUN4C_PAGE_PRESENT) { flush_cache_page(vmaring, start); *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_NOCACHE); @@ -2716,54 +2453,112 @@ static void sun4c_vac_alias_fixup(struct vm_area_struct *vma, unsigned long addr } while ((vmaring = vmaring->vm_next_share) != NULL); spin_unlock(&inode->i_shared_lock); - if(alias_found && !(pte_val(pte) & _SUN4C_PAGE_NOCACHE)) { + if (alias_found && !(pte_val(pte) & _SUN4C_PAGE_NOCACHE)) { pgdp = sun4c_pgd_offset(vma->vm_mm, address); ptep = sun4c_pte_offset((pmd_t *) pgdp, address); *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_NOCACHE); - pte = pte_val(*ptep); + pte = *ptep; } } } +/* An experiment, turn off by default for now... -DaveM */ +#define SUN4C_PRELOAD_PSEG + void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { unsigned long flags; + int pseg; save_and_cli(flags); address &= PAGE_MASK; - if(sun4c_get_segmap(address) == invalid_segment) - alloc_user_segment(address, sun4c_get_context()); + if ((pseg = sun4c_get_segmap(address)) == invalid_segment) { + struct sun4c_mmu_entry *entry = sun4c_user_strategy(); + struct mm_struct *mm = vma->vm_mm; + unsigned long start, end; + + entry->vaddr = start = (address & SUN4C_REAL_PGDIR_MASK); + entry->ctx = mm->context; + add_ring_ordered(sun4c_context_ring + mm->context, entry); + sun4c_put_segmap(entry->vaddr, entry->pseg); + end = start + SUN4C_REAL_PGDIR_SIZE; + while (start < end) { +#ifdef SUN4C_PRELOAD_PSEG + pgd_t *pgdp = sun4c_pgd_offset(mm, start); + pte_t *ptep; + + if (!pgdp) + goto no_mapping; + ptep = sun4c_pte_offset((pmd_t *) pgdp, start); + if (!ptep || !(pte_val(*ptep) & _SUN4C_PAGE_PRESENT)) + goto no_mapping; + sun4c_put_pte(start, pte_val(*ptep)); + goto next; + + no_mapping: +#endif + sun4c_put_pte(start, 0); +#ifdef SUN4C_PRELOAD_PSEG + next: +#endif + start += PAGE_SIZE; + } + if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)) + sun4c_vac_alias_fixup(vma, address, pte); +#ifndef SUN4C_PRELOAD_PSEG + sun4c_put_pte(address, pte_val(pte)); +#endif + restore_flags(flags); + return; + } else { + struct sun4c_mmu_entry *entry = &mmu_entry_pool[pseg]; + + remove_lru(entry); + add_lru(entry); + } - if((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)) + if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)) sun4c_vac_alias_fixup(vma, address, pte); sun4c_put_pte(address, pte_val(pte)); restore_flags(flags); } -extern unsigned long free_area_init(unsigned long, unsigned long); -extern unsigned long sparc_context_init(unsigned long, int); +extern void sparc_context_init(int); extern unsigned long end; +extern unsigned long bootmem_init(void); +extern unsigned long last_valid_pfn; +extern void sun_serial_setup(void); -unsigned long __init sun4c_paging_init(unsigned long start_mem, unsigned long end_mem) +void __init sun4c_paging_init(void) { int i, cnt; unsigned long kernel_end, vaddr; - extern unsigned long sparc_iobase_vaddr; + extern struct resource sparc_iomap; + unsigned long end_pfn; kernel_end = (unsigned long) &end; kernel_end += (SUN4C_REAL_PGDIR_SIZE * 4); kernel_end = SUN4C_REAL_PGDIR_ALIGN(kernel_end); + + last_valid_pfn = end_pfn = bootmem_init(); + + /* This does not logically belong here, but we need to + * call it at the moment we are able to use the bootmem + * allocator. + */ + sun_serial_setup(); + sun4c_probe_mmu(); invalid_segment = (num_segmaps - 1); sun4c_init_mmu_entry_pool(); - sun4c_init_rings(&start_mem); + sun4c_init_rings(); sun4c_init_map_kernelprom(kernel_end); sun4c_init_clean_mmu(kernel_end); sun4c_init_fill_kernel_ring(SUN4C_KERNEL_BUCKETS); - sun4c_init_lock_area(sparc_iobase_vaddr, IOBASE_END); + sun4c_init_lock_area(sparc_iomap.start, IOBASE_END); sun4c_init_lock_area(DVMA_VADDR, DVMA_END); - start_mem = sun4c_init_lock_areas(start_mem); + sun4c_init_lock_areas(); sun4c_init_fill_user_ring(); sun4c_set_context(0); @@ -2783,18 +2578,23 @@ unsigned long __init sun4c_paging_init(unsigned long start_mem, unsigned long en vaddr += SUN4C_PGDIR_SIZE; swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg3); sun4c_init_ss2_cache_bug(); - start_mem = PAGE_ALIGN(start_mem); - start_mem = sparc_context_init(start_mem, num_contexts); - start_mem = free_area_init(start_mem, end_mem); + sparc_context_init(num_contexts); + + { + unsigned int zones_size[MAX_NR_ZONES] = { 0, 0, 0}; + + zones_size[ZONE_DMA] = end_pfn; + free_area_init(zones_size); + } + cnt = 0; - for(i = 0; i < num_segmaps; i++) - if(mmu_entry_pool[i].locked) + for (i = 0; i < num_segmaps; i++) + if (mmu_entry_pool[i].locked) cnt++; max_user_taken_entries = num_segmaps - cnt - 40 - 1; printk("SUN4C: %d mmu entries for the kernel\n", cnt); - return start_mem; } /* Load up routines and constants for sun4c mmu */ @@ -2839,7 +2639,7 @@ void __init ld_mmu_sun4c(void) BTFIXUPSET_CALL(flush_cache_all, sun4c_flush_cache_all, BTFIXUPCALL_NORM); - if(sun4c_vacinfo.do_hwflushes) { + if (sun4c_vacinfo.do_hwflushes) { BTFIXUPSET_CALL(flush_cache_mm, sun4c_flush_cache_mm_hw, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, sun4c_flush_cache_range_hw, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, sun4c_flush_cache_page_hw, BTFIXUPCALL_NORM); @@ -2848,9 +2648,8 @@ void __init ld_mmu_sun4c(void) BTFIXUPSET_CALL(flush_tlb_range, sun4c_flush_tlb_range_hw, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, sun4c_flush_tlb_page_hw, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_task_struct, sun4c_free_task_struct_hw, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(switch_to_context, sun4c_switch_to_context_hw, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(switch_mm, sun4c_switch_mm_hw, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(destroy_context, sun4c_destroy_context_hw, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(init_new_context, sun4c_init_new_context_hw, BTFIXUPCALL_NORM); } else { BTFIXUPSET_CALL(flush_cache_mm, sun4c_flush_cache_mm_sw, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, sun4c_flush_cache_range_sw, BTFIXUPCALL_NORM); @@ -2860,9 +2659,8 @@ void __init ld_mmu_sun4c(void) BTFIXUPSET_CALL(flush_tlb_range, sun4c_flush_tlb_range_sw, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, sun4c_flush_tlb_page_sw, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_task_struct, sun4c_free_task_struct_sw, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(switch_to_context, sun4c_switch_to_context_sw, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(switch_mm, sun4c_switch_mm_sw, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(destroy_context, sun4c_destroy_context_sw, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(init_new_context, sun4c_init_new_context_sw, BTFIXUPCALL_NORM); } BTFIXUPSET_CALL(flush_tlb_all, sun4c_flush_tlb_all, BTFIXUPCALL_NORM); @@ -2871,15 +2669,13 @@ void __init ld_mmu_sun4c(void) BTFIXUPSET_CALL(set_pte, sun4c_set_pte, BTFIXUPCALL_STO1O0); - BTFIXUPSET_CALL(pte_page, sun4c_pte_page, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pte_pagenr, sun4c_pte_pagenr, BTFIXUPCALL_NORM); #if PAGE_SHIFT <= 12 BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_ANDNINT(PAGE_SIZE - 1)); #else BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_NORM); #endif - BTFIXUPSET_CALL(sparc_update_rootmmu_dir, sun4c_update_rootmmu_dir, BTFIXUPCALL_NOP); - BTFIXUPSET_CALL(pte_present, sun4c_pte_present, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_clear, sun4c_pte_clear, BTFIXUPCALL_STG0O0); @@ -2931,6 +2727,9 @@ void __init ld_mmu_sun4c(void) BTFIXUPSET_CALL(mmu_release_scsi_sgl, sun4c_release_scsi_sgl, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mmu_map_dma_area, sun4c_map_dma_area, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mmu_unmap_dma_area, sun4c_unmap_dma_area, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mmu_flush_dma_area, sun4c_flush_dma_area, BTFIXUPCALL_NOP); + BTFIXUPSET_CALL(mmu_inval_dma_area, sun4c_inval_dma_area, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mmu_v2p, sun4c_v2p, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mmu_p2v, sun4c_p2v, BTFIXUPCALL_NORM); diff --git a/arch/sparc/mm/swift.S b/arch/sparc/mm/swift.S new file mode 100644 index 000000000..e9fe43293 --- /dev/null +++ b/arch/sparc/mm/swift.S @@ -0,0 +1,275 @@ +/* $Id: swift.S,v 1.3 1999/11/14 06:13:56 zaitcev Exp $ + * swift.S: MicroSparc-II mmu/cache operations. + * + * Copyright (C) 1999 David S. Miller (davem@redhat.com) + */ + +#include <asm/psr.h> +#include <asm/asi.h> +#include <asm/page.h> +#include <asm/pgtsrmmu.h> +#include <asm/asm_offsets.h> + +#define WINDOW_FLUSH(tmp1, tmp2) \ + mov 0, tmp1; \ +98: ld [%g6 + AOFF_task_thread + AOFF_thread_uwinmask], tmp2; \ + orcc %g0, tmp2, %g0; \ + add tmp1, 1, tmp1; \ + bne 98b; \ + save %sp, -64, %sp; \ +99: subcc tmp1, 1, tmp1; \ + bne 99b; \ + restore %g0, %g0, %g0; + + .text + .align 4 + +#if 1 /* XXX screw this, I can't get the VAC flushes working + * XXX reliably... -DaveM + */ + .globl swift_flush_cache_all, swift_flush_cache_mm + .globl swift_flush_cache_range, swift_flush_cache_page + .globl swift_flush_page_for_dma, swift_flush_chunk + .globl swift_flush_page_to_ram + +swift_flush_cache_all: +swift_flush_cache_mm: +swift_flush_cache_range: +swift_flush_cache_page: +swift_flush_page_for_dma: +swift_flush_chunk: +swift_flush_page_to_ram: + sethi %hi(0x2000), %o0 +1: subcc %o0, 0x10, %o0 + sta %g0, [%o0] ASI_M_TXTC_TAG + sta %g0, [%o0] ASI_M_DATAC_TAG + bne 1b + nop + retl + nop +#else + + .globl swift_flush_cache_all +swift_flush_cache_all: + WINDOW_FLUSH(%g4, %g5) + + /* Just clear out all the tags. */ + sethi %hi(16 * 1024), %o0 +1: subcc %o0, 16, %o0 + sta %g0, [%o0] ASI_M_TXTC_TAG + bne 1b + sta %g0, [%o0] ASI_M_DATAC_TAG + retl + nop + + .globl swift_flush_cache_mm +swift_flush_cache_mm: +#ifndef __SMP__ + ld [%o0 + AOFF_mm_context], %g2 + cmp %g2, -1 + be swift_flush_cache_mm_out +#endif + WINDOW_FLUSH(%g4, %g5) + rd %psr, %g1 + andn %g1, PSR_ET, %g3 + wr %g3, 0x0, %psr + nop + nop + mov SRMMU_CTX_REG, %g7 + lda [%g7] ASI_M_MMUREGS, %g5 + sta %g2, [%g7] ASI_M_MMUREGS + +#if 1 + sethi %hi(0x2000), %o0 +1: subcc %o0, 0x10, %o0 + sta %g0, [%o0] ASI_M_FLUSH_CTX + bne 1b + nop +#else + clr %o0 + or %g0, 2048, %g7 + or %g0, 2048, %o1 + add %o1, 2048, %o2 + add %o2, 2048, %o3 + mov 16, %o4 + add %o4, 2048, %o5 + add %o5, 2048, %g2 + add %g2, 2048, %g3 +1: sta %g0, [%o0 ] ASI_M_FLUSH_CTX + sta %g0, [%o0 + %o1] ASI_M_FLUSH_CTX + sta %g0, [%o0 + %o2] ASI_M_FLUSH_CTX + sta %g0, [%o0 + %o3] ASI_M_FLUSH_CTX + sta %g0, [%o0 + %o4] ASI_M_FLUSH_CTX + sta %g0, [%o0 + %o5] ASI_M_FLUSH_CTX + sta %g0, [%o0 + %g2] ASI_M_FLUSH_CTX + sta %g0, [%o0 + %g3] ASI_M_FLUSH_CTX + subcc %g7, 32, %g7 + bne 1b + add %o0, 32, %o0 +#endif + + mov SRMMU_CTX_REG, %g7 + sta %g5, [%g7] ASI_M_MMUREGS + wr %g1, 0x0, %psr + nop + nop +swift_flush_cache_mm_out: + retl + nop + + .globl swift_flush_cache_range +swift_flush_cache_range: + sub %o2, %o1, %o2 + sethi %hi(4096), %o3 + cmp %o2, %o3 + bgu swift_flush_cache_mm + nop + b 70f + nop + + .globl swift_flush_cache_page +swift_flush_cache_page: + ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ +70: +#ifndef __SMP__ + ld [%o0 + AOFF_mm_context], %g2 + cmp %g2, -1 + be swift_flush_cache_page_out +#endif + WINDOW_FLUSH(%g4, %g5) + rd %psr, %g1 + andn %g1, PSR_ET, %g3 + wr %g3, 0x0, %psr + nop + nop + mov SRMMU_CTX_REG, %g7 + lda [%g7] ASI_M_MMUREGS, %g5 + sta %g2, [%g7] ASI_M_MMUREGS + + andn %o1, (PAGE_SIZE - 1), %o1 +#if 1 + sethi %hi(0x1000), %o0 +1: subcc %o0, 0x10, %o0 + sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE + bne 1b + nop +#else + or %g0, 512, %g7 + or %g0, 512, %o0 + add %o0, 512, %o2 + add %o2, 512, %o3 + add %o3, 512, %o4 + add %o4, 512, %o5 + add %o5, 512, %g3 + add %g3, 512, %g4 +1: sta %g0, [%o1 ] ASI_M_FLUSH_PAGE + sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE + sta %g0, [%o1 + %o2] ASI_M_FLUSH_PAGE + sta %g0, [%o1 + %o3] ASI_M_FLUSH_PAGE + sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE + sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE + sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE + sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE + subcc %g7, 16, %g7 + bne 1b + add %o1, 16, %o1 +#endif + + mov SRMMU_CTX_REG, %g7 + sta %g5, [%g7] ASI_M_MMUREGS + wr %g1, 0x0, %psr + nop + nop +swift_flush_cache_page_out: + retl + nop + + /* Swift is write-thru, however it is not + * I/O nor TLB-walk coherent. Also it has + * caches which are virtually indexed and tagged. + */ + .globl swift_flush_page_for_dma + .globl swift_flush_chunk + .globl swift_flush_page_to_ram +swift_flush_page_for_dma: +swift_flush_chunk: +swift_flush_page_to_ram: + andn %o0, (PAGE_SIZE - 1), %o1 +#if 1 + sethi %hi(0x1000), %o0 +1: subcc %o0, 0x10, %o0 + sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE + bne 1b + nop +#else + or %g0, 512, %g7 + or %g0, 512, %o0 + add %o0, 512, %o2 + add %o2, 512, %o3 + add %o3, 512, %o4 + add %o4, 512, %o5 + add %o5, 512, %g3 + add %g3, 512, %g4 +1: sta %g0, [%o1 ] ASI_M_FLUSH_PAGE + sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE + sta %g0, [%o1 + %o2] ASI_M_FLUSH_PAGE + sta %g0, [%o1 + %o3] ASI_M_FLUSH_PAGE + sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE + sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE + sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE + sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE + subcc %g7, 16, %g7 + bne 1b + add %o1, 16, %o1 +#endif + retl + nop +#endif + + .globl swift_flush_sig_insns +swift_flush_sig_insns: + flush %o1 + retl + flush %o1 + 4 + + .globl swift_flush_tlb_mm + .globl swift_flush_tlb_range + .globl swift_flush_tlb_all +swift_flush_tlb_mm: +swift_flush_tlb_range: +#ifndef __SMP__ + ld [%o0 + AOFF_mm_context], %g2 + cmp %g2, -1 + be swift_flush_tlb_all_out +#endif +swift_flush_tlb_all: + mov 0x400, %o1 + sta %g0, [%o1] ASI_M_FLUSH_PROBE +swift_flush_tlb_all_out: + retl + nop + + .globl swift_flush_tlb_page +swift_flush_tlb_page: + ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + mov SRMMU_CTX_REG, %g1 + ld [%o0 + AOFF_mm_context], %o3 + andn %o1, (PAGE_SIZE - 1), %o1 +#ifndef __SMP__ + cmp %o3, -1 + be swift_flush_tlb_page_out + nop +#endif +#if 1 + mov 0x400, %o1 + sta %g0, [%o1] ASI_M_FLUSH_PROBE +#else + lda [%g1] ASI_M_MMUREGS, %g5 + sta %o3, [%g1] ASI_M_MMUREGS + sta %g0, [%o1] ASI_M_FLUSH_PAGE /* rem. virt. cache. prot. */ + sta %g0, [%o1] ASI_M_FLUSH_PROBE + sta %g5, [%g1] ASI_M_MMUREGS +#endif +swift_flush_tlb_page_out: + retl + nop diff --git a/arch/sparc/mm/tsunami.S b/arch/sparc/mm/tsunami.S index 1c4356fa0..07c5ed620 100644 --- a/arch/sparc/mm/tsunami.S +++ b/arch/sparc/mm/tsunami.S @@ -1,4 +1,4 @@ -/* $Id: tsunami.S,v 1.2 1999/08/14 03:51:48 anton Exp $ +/* $Id: tsunami.S,v 1.3 1999/10/09 05:32:19 zaitcev Exp $ * tsunami.S: High speed MicroSparc-I mmu/cache operations. * * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) @@ -44,11 +44,11 @@ tsunami_flush_cache_range: tsunami_flush_cache_all: WINDOW_FLUSH(%g4, %g5) tsunami_flush_page_for_dma: - sta %g0, [%g0] ASI_M_DC_FLCLEAR sta %g0, [%g0] ASI_M_IC_FLCLEAR +tsunami_flush_chunk: + sta %g0, [%g0] ASI_M_DC_FLCLEAR tsunami_flush_cache_out: tsunami_flush_page_to_ram: -tsunami_flush_chunk: retl nop @@ -68,6 +68,11 @@ tsunami_flush_tlb_range: tsunami_flush_tlb_all: mov 0x400, %o1 sta %g0, [%o1] ASI_M_FLUSH_PROBE + nop + nop + nop + nop + nop tsunami_flush_tlb_out: retl nop @@ -85,6 +90,59 @@ tsunami_flush_tlb_page: lda [%g1] ASI_M_MMUREGS, %g5 sta %o3, [%g1] ASI_M_MMUREGS sta %g0, [%o1] ASI_M_FLUSH_PROBE + nop + nop + nop + nop + nop tsunami_flush_tlb_page_out: retl sta %g5, [%g1] ASI_M_MMUREGS + +#define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3) \ + ldd [src + offset + 0x18], t0; \ + std t0, [dst + offset + 0x18]; \ + ldd [src + offset + 0x10], t2; \ + std t2, [dst + offset + 0x10]; \ + ldd [src + offset + 0x08], t0; \ + std t0, [dst + offset + 0x08]; \ + ldd [src + offset + 0x00], t2; \ + std t2, [dst + offset + 0x00]; + + .globl tsunami_copy_1page +tsunami_copy_1page: +/* NOTE: This routine has to be shorter than 70insns --jj */ + or %g0, (PAGE_SIZE >> 8), %g1 +1: + MIRROR_BLOCK(%o0, %o1, 0x00, %o2, %o3, %o4, %o5) + MIRROR_BLOCK(%o0, %o1, 0x20, %o2, %o3, %o4, %o5) + MIRROR_BLOCK(%o0, %o1, 0x40, %o2, %o3, %o4, %o5) + MIRROR_BLOCK(%o0, %o1, 0x60, %o2, %o3, %o4, %o5) + MIRROR_BLOCK(%o0, %o1, 0x80, %o2, %o3, %o4, %o5) + MIRROR_BLOCK(%o0, %o1, 0xa0, %o2, %o3, %o4, %o5) + MIRROR_BLOCK(%o0, %o1, 0xc0, %o2, %o3, %o4, %o5) + MIRROR_BLOCK(%o0, %o1, 0xe0, %o2, %o3, %o4, %o5) + subcc %g1, 1, %g1 + add %o0, 0x100, %o0 + bne 1b + add %o1, 0x100, %o1 + + .globl tsunami_setup_blockops +tsunami_setup_blockops: + sethi %hi(__copy_1page), %o0 + or %o0, %lo(__copy_1page), %o0 + sethi %hi(tsunami_copy_1page), %o1 + or %o1, %lo(tsunami_copy_1page), %o1 + sethi %hi(tsunami_setup_blockops), %o2 + or %o2, %lo(tsunami_setup_blockops), %o2 + ld [%o1], %o4 +1: add %o1, 4, %o1 + st %o4, [%o0] + add %o0, 4, %o0 + cmp %o1, %o2 + bne 1b + ld [%o1], %o4 + sta %g0, [%g0] ASI_M_IC_FLCLEAR + sta %g0, [%g0] ASI_M_DC_FLCLEAR + retl + nop diff --git a/arch/sparc/prom/Makefile b/arch/sparc/prom/Makefile index 917aa9ad7..f8c41ccdf 100644 --- a/arch/sparc/prom/Makefile +++ b/arch/sparc/prom/Makefile @@ -1,4 +1,4 @@ -# $Id: Makefile,v 1.6 1998/01/30 10:58:59 jj Exp $ +# $Id: Makefile,v 1.7 1999/12/21 04:02:21 davem Exp $ # Makefile for the Sun Boot PROM interface library under # Linux. # @@ -22,6 +22,6 @@ promlib.a: $(OBJS) sync dep: - $(CPP) -M *.c > .depend + $(CPP) $(CPPFLAGS) -M *.c > .depend include $(TOPDIR)/Rules.make diff --git a/arch/sparc/prom/ranges.c b/arch/sparc/prom/ranges.c index 89748c49c..244619eb6 100644 --- a/arch/sparc/prom/ranges.c +++ b/arch/sparc/prom/ranges.c @@ -1,4 +1,4 @@ -/* $Id: ranges.c,v 1.12 1999/08/31 06:54:47 davem Exp $ +/* $Id: ranges.c,v 1.14 1999/10/06 19:28:54 zaitcev Exp $ * ranges.c: Handle ranges in newer proms for obio/sbus. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) @@ -8,6 +8,7 @@ #include <linux/init.h> #include <asm/openprom.h> #include <asm/oplib.h> +#include <asm/types.h> #include <asm/sbus.h> #include <asm/system.h> @@ -64,24 +65,6 @@ prom_apply_obio_ranges(struct linux_prom_registers *regs, int nregs) prom_adjust_regs(regs, nregs, promlib_obio_ranges, num_obio_ranges); } -/* Apply probed sbus ranges to registers passed, if no ranges return. */ -void prom_apply_sbus_ranges(struct linux_sbus *sbus, struct linux_prom_registers *regs, - int nregs, struct linux_sbus_device *sdev) -{ - if(sbus && sbus->num_sbus_ranges) { - if(sdev && (sdev->ranges_applied == 0)) { - sdev->ranges_applied = 1; - prom_adjust_regs(regs, nregs, sbus->sbus_ranges, - sbus->num_sbus_ranges); - } else if(!sdev) { - printk("PROMLIB: Aieee, old SBUS driver, update it to use new " - "prom_apply_sbus_ranges interface now!\n"); - prom_adjust_regs(regs, nregs, sbus->sbus_ranges, - sbus->num_sbus_ranges); - } - } -} - void __init prom_ranges_init(void) { int node, obio_node; @@ -107,32 +90,6 @@ void __init prom_ranges_init(void) return; } -void __init prom_sbus_ranges_init(int parentnd, struct linux_sbus *sbus) -{ - int success; - - sbus->num_sbus_ranges = 0; - if(sparc_cpu_model == sun4c) - return; - success = prom_getproperty(sbus->prom_node, "ranges", - (char *) sbus->sbus_ranges, - sizeof (sbus->sbus_ranges)); - if (success != -1) - sbus->num_sbus_ranges = (success/sizeof(struct linux_prom_ranges)); - if (sparc_cpu_model == sun4d) { - struct linux_prom_ranges iounit_ranges[PROMREG_MAX]; - int num_iounit_ranges; - - success = prom_getproperty(parentnd, "ranges", - (char *) iounit_ranges, - sizeof (iounit_ranges)); - if (success != -1) { - num_iounit_ranges = (success/sizeof(struct linux_prom_ranges)); - prom_adjust_ranges (sbus->sbus_ranges, sbus->num_sbus_ranges, iounit_ranges, num_iounit_ranges); - } - } -} - void prom_apply_generic_ranges (int node, int parent, struct linux_prom_registers *regs, int nregs) { |