diff options
Diffstat (limited to 'arch/arm')
29 files changed, 1082 insertions, 1110 deletions
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 0895fc0ba..f8c91aca3 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -16,10 +16,10 @@ LD := $(CROSS_COMPILE)ld OBJCOPY := $(CROSS_COMPILE)objcopy -O binary -R .note -R .comment -S CPP := $(CC) -E PERL := perl -LINKFLAGS := -X -T arch/arm/vmlinux.lds +LINKFLAGS := -p -X -T arch/arm/vmlinux.lds ARCHCC := $(word 1,$(CC)) - +AFLAGS += -mno-fpu CFLAGS_PIPE := -pipe CFLAGS := $(CFLAGS) $(CFLAGS_PIPE) @@ -31,6 +31,15 @@ ifdef CONFIG_DEBUG_INFO CFLAGS += -g endif +# Ensure this is ld "2.9.4" or later +NEW_LINKER := $(shell if $(LD) --gc-sections --version >/dev/null 2>&1; then echo y; else echo n; fi) + +ifneq ($(NEW_LINKER),y) +dummy:; @echo '*** 2.3 kernels no longer build correctly with old versions of binutils.' + @echo '*** Please upgrade your binutils to 2.9.5.' + @false +endif + # GCC 2.7 uses different options to later compilers; sort out which we have NEW_GCC := $(shell if $(CC) --version 2>&1 | grep '^2\.7' > /dev/null; then echo n; else echo y; fi) @@ -55,21 +64,6 @@ CFLAGS_ARM7 := -m6 CFLAGS_SA110 := -m6 endif -# See if this is ld "2.9.4" or later -NEW_LINKER := $(shell if $(LD) --gc-sections --version >/dev/null 2>&1; then echo y; else echo n; fi) - -ifeq ($(NEW_LINKER),y) -AFLAGS += -mno-fpu -AFLAGS_PROC_CPU_26 := -mapcs-26 -AFLAGS_PROC_CPU_32v3 := -mapcs-32 -marmv3m -AFLAGS_PROC_CPU_32v4 := -mapcs-32 -marmv4t -LINKFLAGS := -p $(LINKFLAGS) -else -AFLAGS_PROC_CPU_26 := -m3 -AFLAGS_PROC_CPU_32v3 := -m6 -AFLAGS_PROC_CPU_32v4 := -m6 -endif - # # Select CPU dependent flags # @@ -77,7 +71,7 @@ ifeq ($(CONFIG_CPU_26),y) PROCESSOR = armo TEXTADDR = 0x02080000 CFLAGS += $(CFLAGS_PROC_CPU_26) - AFLAGS += $(AFLAGS_PROC_CPU_26) + AFLAGS += -mapcs-26 endif ifeq ($(CONFIG_CPU_32),y) @@ -85,10 +79,10 @@ ifeq ($(CONFIG_CPU_32),y) TEXTADDR = 0xC0008000 ifeq ($(CONFIG_CPU_32v4),y) CFLAGS += $(CFLAGS_PROC_CPU_32v4) - AFLAGS += $(AFLAGS_PROC_CPU_32v4) + AFLAGS += -mapcs-32 -marmv4 else CFLAGS += $(CFLAGS_PROC_CPU_32v3) - AFLAGS += $(AFLAGS_PROC_CPU_32v3) + AFLAGS += -mapcs-32 -marmv3m endif # # Exactly one of the following must be selected @@ -156,7 +150,7 @@ LIBS := arch/arm/lib/lib.a $(LIBS) $(GCCLIB) DRIVERS += arch/arm/special/special.a ifeq ($(CONFIG_NWFPE),y) -CORE_FILES += arch/arm/nwfpe/math-emu.o +LIBS := arch/arm/nwfpe/math-emu.o $(LIBS) endif ifeq ($(CONFIG_ARCH_ACORN),y) @@ -173,7 +167,7 @@ MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot # to date before starting compilation CONSTANTS := constants -constants: dummy +constants: $(TOPDIR)/include/asm-arm/proc-fns.h dummy @$(MAKE) -C arch/arm/lib constants.h symlinks: archsymlinks diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 3c0478ab3..a9d71cb7b 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -8,7 +8,7 @@ OBJS = misc.o SYSTEM = $(TOPDIR)/vmlinux CFLAGS = -O2 -DSTDC_HEADERS $(CFLAGS_PROC) FONTC = $(TOPDIR)/drivers/video/font_acorn_8x8.c -ZLDFLAGS = -X -T vmlinux.lds +ZLDFLAGS = -p -X -T vmlinux.lds # # Architecture dependencies diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 721967e4b..e87c0a72e 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -96,6 +96,8 @@ start: */ reloc_start: add r8, r5, r0 #if 0 + mov r0, #'\n' + bl putc mov r0, r6 mov r1, #8 bl phex @@ -139,8 +141,8 @@ reloc_start: add r8, r5, r0 bl phex mov r0, #'\n' bl putc - mov r0, r4 - bl memdump + mov r0, r4 + bl memdump #endif eor r0, r6, #0x44 << 24 @ SA-110? eor r0, r0, #0x01 << 16 @@ -155,6 +157,25 @@ call_kernel: mov r0, #0 phexbuf: .space 12 +#if 0 + .macro loadsp, rb + mov \rb, #0x7c000000 + .endm + + .macro writeb, rb + strb \rb, [r3, #0x3f8] + .endm +#else + .macro loadsp, rb + mov \rb, #0x03000000 + orr \rb, \rb, #0x00010000 + .endm + + .macro writeb, rb + strb \rb, [r3, #0x3f8 << 2] + .endm +#endif + phex: adr r3, phexbuf mov r2, #0 strb r2, [r3, r1] @@ -169,11 +190,11 @@ phex: adr r3, phexbuf strb r2, [r3, r1] b 1b -puts: mov r3, #0x7c000000 +puts: loadsp r3 1: ldrb r2, [r0], #1 teq r2, #0 moveq pc, lr -2: strb r2, [r3, #0x3f8] +2: writeb r2 mov r1, #0x00020000 3: subs r1, r1, #1 bne 3b @@ -186,7 +207,7 @@ puts: mov r3, #0x7c000000 putc: mov r2, r0 mov r0, #0 - mov r3, #0x7c000000 + loadsp r3 b 2b memdump: mov r12, r0 diff --git a/arch/arm/config.in b/arch/arm/config.in index 3e0bcd0f3..249759e1a 100644 --- a/arch/arm/config.in +++ b/arch/arm/config.in @@ -21,6 +21,7 @@ choice 'ARM system type' \ EBSA-110 CONFIG_ARCH_EBSA110 \ FootBridge-based CONFIG_FOOTBRIDGE" RiscPC # SA1100-based CONFIG_ARCH_SA1100 + if [ "$CONFIG_FOOTBRIDGE" = "y" ]; then bool 'FootBridge in HOST mode' CONFIG_HOST_FOOTBRIDGE if [ "$CONFIG_HOST_FOOTBRIDGE" = "y" ]; then @@ -45,13 +46,13 @@ fi if [ "$CONFIG_ARCH_SA1100" = "y" ]; then define_bool CONFIG_CPU_SA1100 y choice 'SA1100 implementation' \ - "Brutus CONFIG_SA1100_BRUTUS \ - empeg CONFIG_SA1100_EMPEG \ - Itsy CONFIG_SA1100_ITSY \ - LART CONFIG_SA1100_LART \ - PLEB CONFIG_SA1100_PLEB \ - Victor CONFIG_SA1100_VICTOR \ - Tifon CONFIG_SA1100_TIFON" Brutus + "Brutus CONFIG_SA1100_BRUTUS \ + Empeg CONFIG_SA1100_EMPEG \ + Itsy CONFIG_SA1100_ITSY \ + LART CONFIG_SA1100_LART \ + PLEB CONFIG_SA1100_PLEB \ + Victor CONFIG_SA1100_VICTOR \ + Tifon CONFIG_SA1100_TIFON" Brutus fi # @@ -121,8 +122,6 @@ else define_bool CONFIG_ISA_DMA n fi -endmenu - if [ "$CONFIG_CPU_32" = "y" -a "$CONFIG_ARCH_EBSA110" != "y" -a "$CONFIG_EXPERIMENTAL" = "y" ]; then bool 'Enable kernel-mode alignment trap handler (EXPERIMENTAL)' CONFIG_ALIGNMENT_TRAP fi @@ -145,6 +144,11 @@ bool 'System V IPC' CONFIG_SYSVIPC bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT bool 'Sysctl support' CONFIG_SYSCTL tristate 'Math emulation' CONFIG_NWFPE +if [ "$CONFIG_PROC_FS" = "y" ]; then + choice 'Kernel core (/proc/kcore) format' \ + "ELF CONFIG_KCORE_ELF \ + A.OUT CONFIG_KCORE_AOUT" ELF +fi tristate 'Kernel support for a.out binaries' CONFIG_BINFMT_AOUT tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC diff --git a/arch/arm/defconfig b/arch/arm/defconfig index d289870ea..ef7d1e315 100644 --- a/arch/arm/defconfig +++ b/arch/arm/defconfig @@ -49,6 +49,8 @@ CONFIG_SYSVIPC=y # CONFIG_BSD_PROCESS_ACCT is not set CONFIG_SYSCTL=y CONFIG_NWFPE=y +CONFIG_KCORE_ELF=y +# CONFIG_KCORE_AOUT is not set CONFIG_BINFMT_AOUT=y CONFIG_BINFMT_ELF=y # CONFIG_BINFMT_MISC is not set diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index ebb2f150d..43cc0574f 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -12,6 +12,7 @@ #include <asm/byteorder.h> #include <asm/elf.h> #include <asm/io.h> +#include <asm/irq.h> #include <asm/dma.h> #include <asm/pgtable.h> #include <asm/proc-fns.h> @@ -98,7 +99,8 @@ EXPORT_SYMBOL(kernel_thread); EXPORT_SYMBOL(system_rev); EXPORT_SYMBOL(system_serial_low); EXPORT_SYMBOL(system_serial_high); - +EXPORT_SYMBOL(__bug); +EXPORT_SYMBOL(__readwrite_bug); EXPORT_SYMBOL(enable_irq); EXPORT_SYMBOL(disable_irq); @@ -152,8 +154,8 @@ EXPORT_SYMBOL(__bus_to_virt); #ifndef CONFIG_NO_PGT_CACHE EXPORT_SYMBOL(quicklists); #endif -EXPORT_SYMBOL(__bad_pmd); -EXPORT_SYMBOL(__bad_pmd_kernel); +EXPORT_SYMBOL(__handle_bad_pmd); +EXPORT_SYMBOL(__handle_bad_pmd_kernel); /* string / mem functions */ EXPORT_SYMBOL_NOVERS(strcpy); @@ -235,3 +237,5 @@ EXPORT_SYMBOL_NOVERS(__down_failed); EXPORT_SYMBOL_NOVERS(__down_interruptible_failed); EXPORT_SYMBOL_NOVERS(__down_trylock_failed); EXPORT_SYMBOL_NOVERS(__up_wakeup); + +EXPORT_SYMBOL(get_wchan); diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 3bd7a7358..28c12d3e8 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -13,16 +13,14 @@ #include <asm/irq.h> #include <asm/system.h> -int have_isa_bridge; +#include "bios32.h" -int (*pci_irq_fixup)(struct pci_dev *dev); +static int debug_pci; +int have_isa_bridge; -extern struct pci_ops *dc21285_init(int pass); -extern void pcibios_fixup_ebsa285(struct pci_dev *dev); extern void hw_init(void); -void -pcibios_report_device_errors(void) +void pcibios_report_device_errors(void) { struct pci_dev *dev; @@ -31,16 +29,17 @@ pcibios_report_device_errors(void) pci_read_config_word(dev, PCI_STATUS, &status); - if (status & 0xf900) { - pci_write_config_word(dev, PCI_STATUS, status & 0xf900); - printk(KERN_DEBUG "PCI: %02x:%02x status = %X\n", - dev->bus->number, dev->devfn, status); - } + if ((status & 0xf900) == 0) + continue; + + pci_write_config_word(dev, PCI_STATUS, status & 0xf900); + printk(KERN_DEBUG "PCI: status %04X on %s\n", + status, dev->name); } } /* - * We don't use this to fix the device, but more our initialisation. + * We don't use this to fix the device, but initialisation of it. * It's not the correct use for this, but it works. The actions we * take are: * - enable only IO @@ -68,196 +67,108 @@ static void __init pci_fixup_83c553(struct pci_dev *dev) pci_write_config_byte(dev, 0x81, 0x01); } -struct pci_fixup pcibios_fixups[] = { - { PCI_FIXUP_HEADER, PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_83C553, pci_fixup_83c553 }, - { 0 } -}; - -/* - * Assign new address to PCI resource. We hope our resource information - * is complete. On the PC, we don't re-assign resources unless we are - * forced to do so. - * - * Expects start=0, end=size-1, flags=resource type. - */ - -int __init pcibios_assign_resource(struct pci_dev *dev, int i) +static void __init pci_fixup_unassign(struct pci_dev *dev) { - struct resource *r = &dev->resource[i]; - struct resource *pr = pci_find_parent_resource(dev, r); - unsigned long size = r->end + 1; - unsigned long flags = 0; - - if (!pr) - return -EINVAL; - if (r->flags & IORESOURCE_IO) { - if (size > 0x100) - return -EFBIG; - if (allocate_resource(pr, r, size, 0x9000, ~0, 1024)) - return -EBUSY; - flags = PCI_BASE_ADDRESS_SPACE_IO; - } else { - if (allocate_resource(pr, r, size, 0x00100000, 0x7fffffff, size)) - return -EBUSY; - } - if (i < 6) - pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4*i, r->start | flags); - return 0; + dev->resource[0].end -= dev->resource[0].start; + dev->resource[0].start = 0; } /* - * Assign an address to an I/O range. + * PCI IDE controllers use non-standard I/O port + * decoding, respect it. */ -static void __init pcibios_fixup_io_addr(struct pci_dev *dev, struct resource *r, int idx) +static void __init pci_fixup_ide_bases(struct pci_dev *dev) { - unsigned int reg = PCI_BASE_ADDRESS_0 + (idx << 2); - unsigned int size = r->end - r->start + 1; - u32 try; + struct resource *r; + int i; - /* - * We need to avoid collisions with `mirrored' VGA ports and other strange - * ISA hardware, so we always want the addresses kilobyte aligned. - */ - if (!size || size > 256) { - printk(KERN_ERR "PCI: Cannot assign I/O space to %s, " - "%d bytes are too much.\n", dev->name, size); + if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE) return; - } - - if (allocate_resource(&ioport_resource, r, size, 0x9000, ~0, 1024)) { - printk(KERN_ERR "PCI: Unable to find free %d bytes of I/O " - "space for %s.\n", size, dev->name); - return; - } - printk("PCI: Assigning I/O space %04lx-%04lx to %s\n", - r->start, r->end, dev->name); - - pci_write_config_dword(dev, reg, r->start | PCI_BASE_ADDRESS_SPACE_IO); - pci_read_config_dword(dev, reg, &try); - - if ((try & PCI_BASE_ADDRESS_IO_MASK) != r->start) { - r->start = 0; - pci_write_config_dword(dev, reg, 0); - printk(KERN_ERR "PCI: I/O address setup failed, got %04x\n", try); + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + r = dev->resource + i; + if ((r->start & ~0x80) == 0x374) { + r->start |= 2; + r->end = r->start; + } } } +struct pci_fixup pcibios_fixups[] = { + { + PCI_FIXUP_HEADER, + PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_83C553, + pci_fixup_83c553 + }, { + PCI_FIXUP_HEADER, + PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_89C940F, + pci_fixup_unassign + }, { + PCI_FIXUP_HEADER, + PCI_ANY_ID, PCI_ANY_ID, + pci_fixup_ide_bases + }, { 0 } +}; + /* - * Assign an address to an memory range. + * Allocate resources for all PCI devices that have been enabled. + * We need to do that before we try to fix up anything. */ -static void __init pcibios_fixup_mem_addr(struct pci_dev *dev, struct resource *r, int idx) +static void __init pcibios_claim_resources(void) { - unsigned int reg = PCI_BASE_ADDRESS_0 + (idx << 2); - unsigned int size = r->end - r->start + 1; - u32 try; - - if (!size) { - printk(KERN_ERR "PCI: Cannot assign memory space to %s, " - "%d bytes are too much.\n", dev->name, size); - return; - } - - if (allocate_resource(&iomem_resource, r, size, - 0x00100000, 0x0fffffff, 1024)) { - printk(KERN_ERR "PCI: Unable to find free %d bytes of memory " - "space for %s.\n", size, dev->name); - return; - } - - printk("PCI: Assigning memory space %08lx-%08lx to %s\n", - r->start, r->end, dev->name); - - pci_write_config_dword(dev, reg, r->start); - pci_read_config_dword(dev, reg, &try); + struct pci_dev *dev; + int idx; - if (try != r->start) { - r->start = 0; - pci_write_config_dword(dev, reg, 0); - printk(KERN_ERR "PCI: memory address setup failed, " - "got %08x\n", try); - } + for (dev = pci_devices; dev; dev = dev->next) + for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) + if (dev->resource[idx].flags && + dev->resource[idx].start) + pci_claim_resource(dev, idx); } -#define _PCI_REGION_IO 1 -#define _PCI_REGION_MEM 2 - -/* - * Fix up one PCI devices regions, enables and interrupt lines - */ -static void __init pcibios_fixup_device(struct pci_dev *dev, u16 *cmd) +void __init +pcibios_update_resource(struct pci_dev *dev, struct resource *root, + struct resource *res, int resource) { - int i, has_regions = 0; - - /* - * Fix up the regions. Any regions which aren't allocated - * are given a free region. - */ - for (i = 0; i < 6; i++) { - struct resource *r = dev->resource + i; + unsigned long where, size; + u32 reg; - if (r->flags & IORESOURCE_IO) { - has_regions |= _PCI_REGION_IO; + if (debug_pci) + printk("PCI: Assigning %3s %08lx to %s\n", + res->flags & IORESOURCE_IO ? "IO" : "MEM", + res->start, dev->name); - if (!r->start || r->end == 0xffffffff) - pcibios_fixup_io_addr(dev, r, i); - } else if (r->end) { - has_regions |= _PCI_REGION_MEM; + where = PCI_BASE_ADDRESS_0 + resource * 4; + size = res->end - res->start; - if (!r->start) - pcibios_fixup_mem_addr(dev, r, i); - } - } - - switch (dev->class >> 8) { - case PCI_CLASS_BRIDGE_ISA: - case PCI_CLASS_BRIDGE_EISA: - /* - * If this device is an ISA bridge, set the have_isa_bridge - * flag. We will then go looking for things like keyboard, - * etc - */ - have_isa_bridge = !0; - /* FALL THROUGH */ - - default: - /* - * Don't enable VGA-compatible cards since they have - * fixed I/O and memory space. - * - * Don't enabled disabled IDE interfaces either because - * some BIOSes may reallocate the same address when they - * find that no devices are attached. - */ - if (has_regions & _PCI_REGION_IO && - !((*cmd) & PCI_COMMAND_IO)) { - printk("PCI: Enabling I/O for %s\n", dev->name); - *cmd |= PCI_COMMAND_IO; - } + pci_read_config_dword(dev, where, ®); + reg = (reg & size) | (((u32)(res->start - root->start)) & ~size); + pci_write_config_dword(dev, where, reg); +} - if (has_regions & _PCI_REGION_MEM && - !((*cmd) & PCI_COMMAND_MEMORY)) { - printk("PCI: Enabling memory for %s\n", dev->name); - *cmd |= PCI_COMMAND_MEMORY; - } - } +void __init pcibios_update_irq(struct pci_dev *dev, int irq) +{ + if (debug_pci) + printk("PCI: Assigning IRQ %02d to %s\n", irq, dev->name); + pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); } /* - * Fix base addresses, I/O and memory enables and IRQ's + * Called after each bus is probed, but before its children + * are examined. */ -static void __init pcibios_fixup_devices(void) +void __init pcibios_fixup_bus(struct pci_bus *bus) { struct pci_dev *dev; - for (dev = pci_devices; dev; dev = dev->next) { + for (dev = bus->devices; dev; dev = dev->sibling) { u16 cmd; /* - * architecture specific hacks. - * I don't really want this here, - * but I don't see any other place - * for it to live. + * architecture specific hacks. I don't really want + * this here, but I don't see any other place for it + * to live. Shame the device doesn't support + * capabilities */ if (machine_is_netwinder() && dev->vendor == PCI_VENDOR_ID_DEC && @@ -266,119 +177,165 @@ static void __init pcibios_fixup_devices(void) pci_write_config_dword(dev, 0x40, 0x80000000); /* + * If this device is an ISA bridge, set the have_isa_bridge + * flag. We will then go looking for things like keyboard, + * etc + */ + if (dev->class >> 8 == PCI_CLASS_BRIDGE_ISA || + dev->class >> 8 == PCI_CLASS_BRIDGE_EISA) + have_isa_bridge = !0; + + /* * Set latency timer to 32, and a cache line size to 32 bytes. * Also, set system error enable, parity error enable, and * fast back to back transaction enable. Disable ROM. */ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 32); pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 8); - pci_write_config_dword(dev, PCI_ROM_ADDRESS, 0); pci_read_config_word(dev, PCI_COMMAND, &cmd); cmd |= PCI_COMMAND_FAST_BACK | PCI_COMMAND_SERR | PCI_COMMAND_PARITY; - pcibios_fixup_device(dev, &cmd); - pci_write_config_word(dev, PCI_COMMAND, cmd); pci_read_config_word(dev, PCI_COMMAND, &cmd); + pci_write_config_dword(dev, PCI_ROM_ADDRESS, 0); + } +} - /* - * now fixup the IRQs, if required - */ - if (pci_irq_fixup) - dev->irq = pci_irq_fixup(dev); +static u8 __init no_swizzle(struct pci_dev *dev, u8 *pin) +{ + return 0; +} - /* - * If any remaining IRQs are weird, fix it now. - */ - if (dev->irq >= NR_IRQS) - dev->irq = 0; +/* ebsa285 host-specific stuff */ +static int irqmap_ebsa285[] __initdata = { IRQ_IN1, IRQ_IN0, IRQ_PCI, IRQ_IN3 }; - /* - * catch any drivers still reading this from the - * device itself. This can be removed once - * all drivers are fixed. (are there any?) - */ - pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); - } +static u8 __init ebsa285_swizzle(struct pci_dev *dev, u8 *pin) +{ + return PCI_SLOT(dev->devfn); } -/* - * Allocate resources for all PCI devices that have been enabled. - * We need to do that before we try to fix up anything. - */ -static void __init pcibios_claim_resources(void) +static int __init ebsa285_map_irq(struct pci_dev *dev, u8 slot, u8 pin) { - struct pci_dev *dev; - int idx; + return irqmap_ebsa285[(slot + pin) & 3]; +} - for (dev = pci_devices; dev; dev = dev->next) - for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) { - struct resource *a, *r = &dev->resource[idx]; - - /* - * Ignore regions that start at 0 or - * end at 0xffffffff - */ - if (!r->start || r->end == 0xffffffff) - continue; - - if (r->flags & IORESOURCE_IO) - a = &ioport_resource; - else - a = &iomem_resource; - - if (request_resource(a, r) < 0) - printk(KERN_ERR "PCI: Address space collision " - "on region %d of %s\n", - idx, dev->name); - /* We probably should disable the region, - * shouldn't we? - */ - } +static struct hw_pci ebsa285_pci __initdata = { + dc21285_init, + 0x9000, + 0x00100000, + ebsa285_swizzle, + ebsa285_map_irq +}; + +/* cats host-specific stuff */ +static int irqmap_cats[] __initdata = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 }; + +static int __init cats_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + if (dev->irq >= 128) + return 16 + (dev->irq & 0x1f); + + if (dev->irq >= 1 && dev->irq <= 4) + return irqmap_cats[dev->irq - 1]; + + if (dev->irq != 0) + printk("PCI: device %02x:%02x has unknown irq line %x\n", + dev->bus->number, dev->devfn, dev->irq); + + return -1; } -/* - * Called after each bus is probed, but before its children - * are examined. - * - * No fixup of bus required - */ -void __init pcibios_fixup_bus(struct pci_bus *bus) +static struct hw_pci cats_pci __initdata = { + dc21285_init, + 0x9000, + 0x00100000, + no_swizzle, + cats_map_irq +}; + +/* netwinder host-specific stuff */ +static int __init netwinder_map_irq(struct pci_dev *dev, u8 slot, u8 pin) { +#define DEV(v,d) ((v)<<16|(d)) + switch (DEV(dev->vendor, dev->device)) { + case DEV(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142): + return IRQ_NETWINDER_ETHER100; + + case DEV(PCI_VENDOR_ID_WINBOND2, 0x5a5a): + return IRQ_NETWINDER_ETHER10; + + case DEV(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_83C553): + return 0; + + case DEV(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105): + return IRQ_ISA_HARDDISK1; + + case DEV(PCI_VENDOR_ID_INTERG, PCI_DEVICE_ID_INTERG_2000): + return IRQ_NETWINDER_VGA; + + default: + printk(KERN_ERR "PCI: %02X:%02X [%04X:%04X] unknown device\n", + dev->bus->number, dev->devfn, + dev->vendor, dev->device); + return 0; + } } +static struct hw_pci netwinder_pci __initdata = { + dc21285_init, + 0x9000, + 0x00100000, + no_swizzle, + netwinder_map_irq +}; + void __init pcibios_init(void) { - struct pci_ops *ops; + struct hw_pci *hw_pci = NULL; + + if (machine_is_ebsa285()) + hw_pci = &ebsa285_pci; + else if (machine_is_cats()) + hw_pci = &cats_pci; + else if (machine_is_netwinder()) + hw_pci = &netwinder_pci; + + if (hw_pci == NULL) + return; /* - * Pre-initialisation. Set up the host bridge. + * Set up the host bridge, and scan the bus. */ - ops = dc21285_init(0); + hw_pci->init(); - printk("PCI: Probing PCI hardware\n"); - - pci_scan_bus(0, ops, NULL); + /* + * Other architectures don't seem to do this... should we? + */ pcibios_claim_resources(); - pcibios_fixup_devices(); /* - * Now clear down any PCI error IRQs and - * register the error handler + * Assign any unassigned resources. Note that we really ought to + * have min/max stuff here - max mem address is 0x0fffffff */ - dc21285_init(1); + pci_assign_unassigned_resources(hw_pci->io_start, hw_pci->mem_start); + pci_fixup_irqs(hw_pci->swizzle, hw_pci->map_irq); + pci_set_bus_ranges(); /* - * Initialise any other hardware after we've - * got the PCI bus initialised. We may need - * the PCI bus to talk to this other hardware. + * Initialise any other hardware after we've got the PCI bus + * initialised. We may need the PCI bus to talk to this other + * hardware. */ hw_init(); } char * __init pcibios_setup(char *str) { + if (!strcmp(str, "debug")) { + debug_pci = 1; + return NULL; + } return str; } diff --git a/arch/arm/kernel/dec21285.c b/arch/arm/kernel/dec21285.c index 42a9a616f..2622dec25 100644 --- a/arch/arm/kernel/dec21285.c +++ b/arch/arm/kernel/dec21285.c @@ -18,11 +18,12 @@ #include <asm/irq.h> #include <asm/system.h> +#include "bios32.h" + #define MAX_SLOTS 21 extern int setup_arm_irq(int, struct irqaction *); extern void pcibios_report_device_errors(void); -extern int (*pci_irq_fixup)(struct pci_dev *dev); static unsigned long dc21285_base_address(struct pci_dev *dev, int where) @@ -202,129 +203,53 @@ static struct irqaction dc21285_error_action = { dc21285_error, SA_INTERRUPT, 0, "PCI error", NULL, NULL }; -static int irqmap_ebsa[] __initdata = { IRQ_IN1, IRQ_IN0, IRQ_PCI, IRQ_IN3 }; - -static int __init ebsa_irqval(struct pci_dev *dev) -{ - u8 pin; - - pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); - - return irqmap_ebsa[(PCI_SLOT(dev->devfn) + pin) & 3]; -} - -static int irqmap_cats[] __initdata = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 }; - -static int __init cats_irqval(struct pci_dev *dev) -{ - if (dev->irq >= 128) - return 16 + (dev->irq & 0x1f); - - switch (dev->irq) { - case 1 ... 4: - return irqmap_cats[dev->irq - 1]; - - default: - printk("PCI: device %02x:%02x has unknown irq line %x\n", - dev->bus->number, dev->devfn, dev->irq); - case 0: - break; - } - return 0; -} - -static int __init netwinder_irqval(struct pci_dev *dev) -{ -#define DEV(v,d) ((v)<<16|(d)) - switch (DEV(dev->vendor, dev->device)) { - case DEV(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142): - return IRQ_NETWINDER_ETHER100; - - case DEV(PCI_VENDOR_ID_WINBOND2, 0x5a5a): - return IRQ_NETWINDER_ETHER10; - - case DEV(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_83C553): - return 0; - - case DEV(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105): - return IRQ_ISA_HARDDISK1; - - case DEV(PCI_VENDOR_ID_INTERG, PCI_DEVICE_ID_INTERG_2000): - return IRQ_NETWINDER_VGA; - - default: - printk(KERN_ERR "PCI: %02X:%02X [%04X:%04X] unknown device\n", - dev->bus->number, dev->devfn, - dev->vendor, dev->device); - return 0; - } -} - -struct pci_ops * __init dc21285_init(int pass) +void __init dc21285_init(void) { unsigned int mem_size; unsigned long cntl; - if (pass == 0) { - mem_size = (unsigned int)high_memory - PAGE_OFFSET; - *CSR_SDRAMBASEMASK = (mem_size - 1) & 0x0ffc0000; - *CSR_SDRAMBASEOFFSET = 0; - *CSR_ROMBASEMASK = 0x80000000; - *CSR_CSRBASEMASK = 0; - *CSR_CSRBASEOFFSET = 0; - *CSR_PCIADDR_EXTN = 0; + mem_size = (unsigned int)high_memory - PAGE_OFFSET; + *CSR_SDRAMBASEMASK = (mem_size - 1) & 0x0ffc0000; + *CSR_SDRAMBASEOFFSET = 0; + *CSR_ROMBASEMASK = 0x80000000; + *CSR_CSRBASEMASK = 0; + *CSR_CSRBASEOFFSET = 0; + *CSR_PCIADDR_EXTN = 0; #ifdef CONFIG_HOST_FOOTBRIDGE - /* - * Map our SDRAM at a known address in PCI space, just in case - * the firmware had other ideas. Using a nonzero base is - * necessary, since some VGA cards forcefully use PCI addresses - * in the range 0x000a0000 to 0x000c0000. (eg, S3 cards). - */ - *CSR_PCICACHELINESIZE = 0x00002008; - *CSR_PCICSRBASE = 0; - *CSR_PCICSRIOBASE = 0; - *CSR_PCISDRAMBASE = virt_to_bus((void *)PAGE_OFFSET); - *CSR_PCIROMBASE = 0; - *CSR_PCICMD = PCI_COMMAND_IO | PCI_COMMAND_MEMORY | - PCI_COMMAND_MASTER | PCI_COMMAND_FAST_BACK | - PCI_COMMAND_INVALIDATE | PCI_COMMAND_PARITY | - (1 << 31) | (1 << 29) | (1 << 28) | (1 << 24); + /* + * Map our SDRAM at a known address in PCI space, just in case + * the firmware had other ideas. Using a nonzero base is + * necessary, since some VGA cards forcefully use PCI addresses + * in the range 0x000a0000 to 0x000c0000. (eg, S3 cards). + */ + *CSR_PCICACHELINESIZE = 0x00002008; + *CSR_PCICSRBASE = 0; + *CSR_PCICSRIOBASE = 0; + *CSR_PCISDRAMBASE = virt_to_bus((void *)PAGE_OFFSET); + *CSR_PCIROMBASE = 0; + *CSR_PCICMD = PCI_COMMAND_IO | PCI_COMMAND_MEMORY | + PCI_COMMAND_MASTER | PCI_COMMAND_FAST_BACK | + PCI_COMMAND_INVALIDATE | PCI_COMMAND_PARITY | + (1 << 31) | (1 << 29) | (1 << 28) | (1 << 24); #endif - printk(KERN_DEBUG"PCI: DC21285 footbridge, revision %02lX\n", - *CSR_CLASSREV & 0xff); - - switch (machine_arch_type) { - case MACH_TYPE_EBSA285: - pci_irq_fixup = ebsa_irqval; - break; - - case MACH_TYPE_CATS: - pci_irq_fixup = cats_irqval; - break; - - case MACH_TYPE_NETWINDER: - pci_irq_fixup = netwinder_irqval; - break; - } - - return &dc21285_ops; - } else { - /* - * Clear any existing errors - we aren't - * interested in historical data... - */ - cntl = *CSR_SA110_CNTL & 0xffffde07; - *CSR_SA110_CNTL = cntl | SA110_CNTL_RXSERR; - cntl = *CSR_PCICMD & 0x0000ffff; - *CSR_PCICMD = cntl | 1 << 31 | 1 << 29 | 1 << 28 | 1 << 24; - - /* - * Initialise PCI error IRQ after we've finished probing - */ - setup_arm_irq(IRQ_PCI_ERR, &dc21285_error_action); - - return NULL; - } + printk(KERN_DEBUG"PCI: DC21285 footbridge, revision %02lX\n", + *CSR_CLASSREV & 0xff); + + pci_scan_bus(0, &dc21285_ops, NULL); + + /* + * Clear any existing errors - we aren't + * interested in historical data... + */ + cntl = *CSR_SA110_CNTL & 0xffffde07; + *CSR_SA110_CNTL = cntl | SA110_CNTL_RXSERR; + cntl = *CSR_PCICMD & 0x0000ffff; + *CSR_PCICMD = cntl | 1 << 31 | 1 << 29 | 1 << 28 | 1 << 24; + + /* + * Initialise PCI error IRQ after we've finished probing + */ + setup_arm_irq(IRQ_PCI_ERR, &dc21285_error_action); } diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c index 99b2b2b1d..c777db993 100644 --- a/arch/arm/kernel/ecard.c +++ b/arch/arm/kernel/ecard.c @@ -888,19 +888,13 @@ int get_ecard_dev_info(char *buf, char **start, off_t pos, int count, int wr) return (count > cnt) ? cnt : count; } -static struct proc_dir_entry proc_ecard_devices = { - PROC_BUS_ECARD_DEVICES, 7, "devices", - S_IFREG | S_IRUGO, 1, 0, 0, - 0, &proc_array_inode_operations, - get_ecard_dev_info -}; - -static struct proc_dir_entry *proc_bus_ecard_dir; +static struct proc_dir_entry *proc_bus_ecard_dir = NULL; static void ecard_proc_init(void) { proc_bus_ecard_dir = create_proc_entry("ecard", S_IFDIR, proc_bus); - proc_register(proc_bus_ecard_dir, &proc_ecard_devices); + create_proc_info_entry("devices", 0, proc_bus_ecard_dir, + get_ecard_dev_info); } /* diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index ab92aae52..3c82ee68c 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -337,3 +337,31 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) return __ret; } +/* + * These bracket the sleeping functions.. + */ +extern void scheduling_functions_start_here(void); +extern void scheduling_functions_end_here(void); +#define first_sched ((unsigned long) scheduling_functions_start_here) +#define last_sched ((unsigned long) scheduling_functions_end_here) + +unsigned long get_wchan(struct task_struct *p) +{ + unsigned long fp, lr; + unsigned long stack_page; + int count = 0; + if (!p || p == current || p->state == TASK_RUNNING) + return 0; + + stack_page = 4096 + (unsigned long)p; + fp = get_css_fp(&p->thread); + do { + if (fp < stack_page || fp > 4092+stack_page) + return 0; + lr = pc_pointer (((unsigned long *)fp)[-1]); + if (lr < first_sched || lr > last_sched) + return lr; + fp = *(unsigned long *) (fp - 12); + } while (count ++ < 16); + return 0; +} diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index b09b3798b..340700dbe 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -20,6 +20,7 @@ #include <linux/blk.h> #include <linux/console.h> #include <linux/init.h> +#include <linux/bootmem.h> #include <asm/elf.h> #include <asm/hardware.h> @@ -42,50 +43,74 @@ extern void reboot_setup(char *str, int *ints); extern void disable_hlt(void); - -struct drive_info_struct { char dummy[32]; } drive_info; -struct screen_info screen_info = { - orig_video_lines: 30, - orig_video_cols: 80, - orig_video_mode: 0, - orig_video_ega_bx: 0, - orig_video_isVGA: 1, - orig_video_points: 8 -}; - extern int root_mountflags; -extern int _text, _etext, _edata, _end; - -unsigned char aux_device_present; - char elf_platform[ELF_PLATFORM_SIZE]; -unsigned int elf_hwcap; +extern int _stext, _text, _etext, _edata, _end; -/* - * From head-armv.S - */ unsigned int processor_id; unsigned int __machine_arch_type; unsigned int vram_size; unsigned int system_rev; unsigned int system_serial_low; unsigned int system_serial_high; -#ifdef MULTI_CPU -struct processor processor; -#endif +unsigned int elf_hwcap; + #ifdef CONFIG_ARCH_ACORN unsigned int memc_ctrl_reg; unsigned int number_mfm_drives; #endif +struct meminfo meminfo; + +#ifdef MULTI_CPU +struct processor processor; +#endif + +struct drive_info_struct { char dummy[32]; } drive_info; + +struct screen_info screen_info = { + orig_video_lines: 30, + orig_video_cols: 80, + orig_video_mode: 0, + orig_video_ega_bx: 0, + orig_video_isVGA: 1, + orig_video_points: 8 +}; + +unsigned char aux_device_present; +char elf_platform[ELF_PLATFORM_SIZE]; +char saved_command_line[COMMAND_LINE_SIZE]; + static struct proc_info_item proc_info; +static char command_line[COMMAND_LINE_SIZE] = { 0, }; + +static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; #define ENDIANNESS ((char)endian_test.l) -/*------------------------------------------------------------------------- - * Early initialisation routines for various configurable items in the - * kernel. Each one either supplies a setup_ function, or defines this - * symbol to be empty if not configured. +/* + * Standard memory resources */ +static struct resource mem_res[] = { + { "System RAM", 0, 0, IORESOURCE_MEM | IORESOURCE_BUSY }, + { "Video RAM", 0, 0, IORESOURCE_MEM }, + { "Kernel code", 0, 0, IORESOURCE_MEM }, + { "Kernel data", 0, 0, IORESOURCE_MEM } +}; + +#define system_ram mem_res[0] +#define video_ram mem_res[1] +#define kernel_code mem_res[2] +#define kernel_data mem_res[3] + +static struct resource io_res[] = { + { "reserved", 0x3bc, 0x3be, IORESOURCE_IO | IORESOURCE_BUSY }, + { "reserved", 0x378, 0x37f, IORESOURCE_IO | IORESOURCE_BUSY }, + { "reserved", 0x278, 0x27f, IORESOURCE_IO | IORESOURCE_BUSY } +}; + +#define lp0 io_res[0] +#define lp1 io_res[1] +#define lp2 io_res[2] static void __init setup_processor(void) { @@ -124,55 +149,69 @@ static void __init setup_processor(void) cpu_proc_init(); } -static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; -static char command_line[COMMAND_LINE_SIZE] = { 0, }; - char saved_command_line[COMMAND_LINE_SIZE]; +static unsigned long __init memparse(char *ptr, char **retptr) +{ + unsigned long ret = simple_strtoul(ptr, retptr, 0); + + switch (**retptr) { + case 'M': + case 'm': + ret <<= 10; + case 'K': + case 'k': + ret <<= 10; + (*retptr)++; + default: + break; + } + return ret; +} +/* + * Initial parsing of the command line. We need to pick out the + * memory size. We look for mem=size@start, where start and size + * are "size[KkMm]" + */ static void __init -setup_mem(char *cmd_line, unsigned long *mem_sz) +parse_cmdline(char **cmdline_p, char *from) { char c = ' ', *to = command_line; - int len = 0; - - if (!*mem_sz) - *mem_sz = MEM_SIZE; + int usermem = 0, len = 0; for (;;) { - if (c == ' ') { - if (cmd_line[0] == 'm' && - cmd_line[1] == 'e' && - cmd_line[2] == 'm' && - cmd_line[3] == '=') { - *mem_sz = simple_strtoul(cmd_line+4, &cmd_line, 0); - switch(*cmd_line) { - case 'M': - case 'm': - *mem_sz <<= 10; - case 'K': - case 'k': - *mem_sz <<= 10; - cmd_line++; - } - } - /* if there are two spaces, remove one */ - if (*cmd_line == ' ') { - cmd_line++; - continue; + if (c == ' ' && !memcmp(from, "mem=", 4)) { + unsigned long size, start; + + if (to != command_line) + to -= 1; + + /* If the user specifies memory size, we + * blow away any automatically generated + * size. + */ + if (usermem == 0) { + usermem = 1; + meminfo.nr_banks = 0; } + + start = 0; + size = memparse(from + 4, &from); + if (*from == '@') + start = memparse(from + 1, &from); + + meminfo.bank[meminfo.nr_banks].start = start; + meminfo.bank[meminfo.nr_banks].size = size; + meminfo.nr_banks += 1; } - c = *cmd_line++; + c = *from++; if (!c) break; if (COMMAND_LINE_SIZE <= ++len) break; *to++ = c; } - *to = '\0'; - - /* remove trailing spaces */ - while (*--to == ' ' && to != command_line) - *to = '\0'; + *cmdline_p = command_line; } static void __init @@ -199,51 +238,90 @@ setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz) static void __init setup_initrd(unsigned int start, unsigned int size) { #ifdef CONFIG_BLK_DEV_INITRD - if (start) { - initrd_start = start; - initrd_end = start + size; - } else { - initrd_start = 0; - initrd_end = 0; - } + if (start == 0) + size = 0; + initrd_start = start; + initrd_end = start + size; #endif } -static void __init check_initrd(unsigned long mem_end) +/* + * Work out our memory regions. Note that "pfn" is the physical page number + * relative to the first physical page, not the physical address itself. + */ +static void __init setup_bootmem(void) { + unsigned int end_pfn, bootmem_end; + int bank; + + /* + * Calculate the end of memory. + */ + for (bank = 0; bank < meminfo.nr_banks; bank++) { + if (meminfo.bank[bank].size) { + unsigned long end; + + end = meminfo.bank[bank].start + + meminfo.bank[bank].size; + if (meminfo.end < end) + meminfo.end = end; + } + } + + bootmem_end = __pa(PAGE_ALIGN((unsigned long)&_end)); + end_pfn = meminfo.end >> PAGE_SHIFT; + + /* + * Initialise the boot-time allocator + */ + bootmem_end += init_bootmem(bootmem_end >> PAGE_SHIFT, end_pfn); + + /* + * Register all available RAM with the bootmem allocator. + * The address is relative to the start of physical memory. + */ + for (bank = 0; bank < meminfo.nr_banks; bank ++) + free_bootmem(meminfo.bank[bank].start, meminfo.bank[bank].size); + + /* + * reserve the following regions: + * physical page 0 - it contains the exception vectors + * kernel and the bootmem structure + * swapper page directory (if any) + * initrd (if any) + */ + reserve_bootmem(0, PAGE_SIZE); +#ifdef CONFIG_CPU_32 + reserve_bootmem(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(void *)); +#endif + reserve_bootmem(__pa(&_stext), bootmem_end - __pa(&_stext)); #ifdef CONFIG_BLK_DEV_INITRD - if (initrd_end > mem_end) { + if (__pa(initrd_end) > (end_pfn << PAGE_SHIFT)) { printk ("initrd extends beyond end of memory " - "(0x%08lx > 0x%08lx) - disabling initrd\n", - initrd_end, mem_end); + "(0x%08lx > 0x%08x) - disabling initrd\n", + __pa(initrd_end), end_pfn << PAGE_SHIFT); initrd_start = 0; } + + if (initrd_start) + reserve_bootmem(__pa(initrd_start), + initrd_end - initrd_start); #endif } -/* - * Standard memory resources - */ -static struct resource system_ram = { "System RAM", 0, 0, IORESOURCE_MEM | IORESOURCE_BUSY }; -static struct resource video_ram = { "Video RAM", 0, 0, IORESOURCE_MEM }; -static struct resource kernel_code = { "Kernel code", 0, 0, IORESOURCE_MEM }; -static struct resource kernel_data = { "Kernel data", 0, 0, IORESOURCE_MEM }; -static struct resource lpt1 = { "reserved", 0x3bc, 0x3be, IORESOURCE_IO | IORESOURCE_BUSY }; -static struct resource lpt2 = { "reserved", 0x378, 0x37f, IORESOURCE_IO | IORESOURCE_BUSY }; -static struct resource lpt3 = { "reserved", 0x278, 0x27f, IORESOURCE_IO | IORESOURCE_BUSY }; - -static void __init request_standard_resources(unsigned long end) +static void __init request_standard_resources(void) { kernel_code.start = __virt_to_bus((unsigned long) &_text); kernel_code.end = __virt_to_bus((unsigned long) &_etext - 1); kernel_data.start = __virt_to_bus((unsigned long) &_etext); kernel_data.end = __virt_to_bus((unsigned long) &_edata - 1); system_ram.start = __virt_to_bus(PAGE_OFFSET); - system_ram.end = __virt_to_bus(end - 1); + system_ram.end = __virt_to_bus(meminfo.end + PAGE_OFFSET - 1); request_resource(&iomem_resource, &system_ram); request_resource(&system_ram, &kernel_code); request_resource(&system_ram, &kernel_data); + if (video_ram.start != video_ram.end) request_resource(&iomem_resource, &video_ram); @@ -253,17 +331,16 @@ static void __init request_standard_resources(unsigned long end) */ if (machine_is_ebsa110() || machine_is_riscpc() || machine_is_netwinder()) - request_resource(&ioport_resource, &lpt1); + request_resource(&ioport_resource, &lp0); if (machine_is_riscpc()) - request_resource(&ioport_resource, &lpt2); + request_resource(&ioport_resource, &lp1); if (machine_is_ebsa110() || machine_is_netwinder()) - request_resource(&ioport_resource, &lpt3); + request_resource(&ioport_resource, &lp2); } -void __init setup_arch(char **cmdline_p, unsigned long * memory_start_p, unsigned long * memory_end_p) +void __init setup_arch(char **cmdline_p) { struct param_struct *params = (struct param_struct *)PARAMS_BASE; - unsigned long memory_end = 0; char *from = default_command_line; #if defined(CONFIG_ARCH_ARC) @@ -296,10 +373,6 @@ void __init setup_arch(char **cmdline_p, unsigned long * memory_start_p, unsigne case MACH_TYPE_RISCPC: /* RiscPC can't handle half-word loads and stores */ elf_hwcap &= ~HWCAP_HALF; - { - extern void init_dram_banks(struct param_struct *); - init_dram_banks(params); - } switch (params->u1.s.pages_in_vram) { case 512: @@ -309,6 +382,17 @@ void __init setup_arch(char **cmdline_p, unsigned long * memory_start_p, unsigne default: break; } + { + int i; + + for (i = 0; i < 4; i++) { + meminfo.bank[i].start = i << 26; + meminfo.bank[i].size = + params->u1.s.pages_in_bank[i] * + params->u1.s.page_size; + } + meminfo.nr_banks = 4; + } #endif case MACH_TYPE_ARCHIMEDES: case MACH_TYPE_A5K: @@ -347,7 +431,7 @@ void __init setup_arch(char **cmdline_p, unsigned long * memory_start_p, unsigne */ reboot_setup("s", NULL); params = NULL; - ORIG_VIDEO_LINES = 25; + ORIG_VIDEO_LINES = 25; ORIG_VIDEO_POINTS = 16; ORIG_Y = 24; video_ram.start = 0x0a0000; @@ -393,7 +477,11 @@ void __init setup_arch(char **cmdline_p, unsigned long * memory_start_p, unsigne } if (params) { - memory_end = PAGE_SIZE * params->u1.s.nr_pages; + if (meminfo.nr_banks == 0) { + meminfo.nr_banks = 1; + meminfo.bank[0].start = 0; + meminfo.bank[0].size = params->u1.s.nr_pages << PAGE_SHIFT; + } ROOT_DEV = to_kdev_t(params->u1.s.rootdev); system_rev = params->u1.s.system_rev; system_serial_low = params->u1.s.system_serial_low; @@ -413,24 +501,23 @@ void __init setup_arch(char **cmdline_p, unsigned long * memory_start_p, unsigne from = params->commandline; } - /* Save unparsed command line copy for /proc/cmdline */ - memcpy(saved_command_line, from, COMMAND_LINE_SIZE); - saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; - - setup_mem(from, &memory_end); - - memory_end += PAGE_OFFSET; + if (meminfo.nr_banks == 0) { + meminfo.nr_banks = 1; + meminfo.bank[0].start = 0; + meminfo.bank[0].size = MEM_SIZE; + } - *cmdline_p = command_line; init_mm.start_code = (unsigned long) &_text; init_mm.end_code = (unsigned long) &_etext; init_mm.end_data = (unsigned long) &_edata; init_mm.brk = (unsigned long) &_end; - *memory_start_p = (unsigned long) &_end; - *memory_end_p = memory_end; - request_standard_resources(memory_end); - check_initrd(memory_end); + /* Save unparsed command line copy for /proc/cmdline */ + memcpy(saved_command_line, from, COMMAND_LINE_SIZE); + saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; + parse_cmdline(cmdline_p, from); + setup_bootmem(); + request_standard_resources(); #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c index 3d39c8d39..f25544c14 100644 --- a/arch/arm/kernel/sys_arm.c +++ b/arch/arm/kernel/sys_arm.c @@ -29,8 +29,6 @@ /* * Constant strings used in inlined functions in header files */ -/* proc/system.h */ -const char xchg_str[] = "xchg"; /* * sys_pipe() is the normal C calling standard for creating diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index da0d464f6..038946e9c 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -34,7 +34,10 @@ char *processor_modes[]= "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32" }; -static char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; +/* proc/system.h */ +const char xchg_str[] = "xchg"; + +static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; static inline void console_verbose(void) { @@ -335,10 +338,11 @@ asmlinkage void deferred(int n, struct pt_regs *regs) } #ifdef CONFIG_DEBUG_USER - printk(KERN_ERR "[%d] %s: old system call.\n", current->pid, - current->comm); + printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n", current->pid, + current->comm, n); #endif force_sig(SIGILL, current); + die_if_kernel("Oops", regs, n); } asmlinkage void arm_malalignedptr(const char *str, void *pc, volatile void *ptr) @@ -385,8 +389,37 @@ asmlinkage void baddataabort(int code, unsigned long instr, struct pt_regs *regs } #endif +void __bug(const char *file, int line, void *data) +{ + printk(KERN_CRIT"kernel BUG at %s:%d!\n", file, line); + if (data) + printk(KERN_CRIT"extra data = %p\n", data); + *(int *)0 = 0; +} + +void __readwrite_bug(const char *fn) +{ + printk("%s called, but not implemented", fn); + *(int *)0 = 0; +} + +void __pte_error(const char *file, int line, unsigned long val) +{ + printk("%s:%d: bad pte %08lx.\n", file, line, val); +} + +void __pmd_error(const char *file, int line, unsigned long val) +{ + printk("%s:%d: bad pmd %08lx.\n", file, line, val); +} + +void __pgd_error(const char *file, int line, unsigned long val) +{ + printk("%s:%d: bad pgd %08lx.\n", file, line, val); +} + asmlinkage void __div0(void) { - printk("Awooga, division by zero in kernel.\n"); + printk("Division by zero in kernel.\n"); __backtrace(); } diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 8c2df7f5e..8715ea271 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -90,6 +90,7 @@ static unsigned long ai_half; static unsigned long ai_word; static unsigned long ai_multi; +#ifdef CONFIG_SYSCTL static int proc_alignment_read(char *page, char **start, off_t off, int count, int *eof, void *data) { @@ -113,23 +114,18 @@ static int proc_alignment_read(char *page, char **start, off_t off, return len; } -#ifdef CONFIG_SYSCTL /* * This needs to be done after sysctl_init, otherwise sys/ * will be overwritten. */ void __init alignment_init(void) { - struct proc_dir_entry *e; - - e = create_proc_entry("sys/debug/alignment", S_IFREG | S_IRUGO, NULL); - - if (e) - e->read_proc = proc_alignment_read; + create_proc_read_entry("sys/debug/alignment", 0, NULL, + proc_alignment_read); } __initcall(alignment_init); -#endif +#endif /* CONFIG_SYSCTL */ static int do_alignment_exception(struct pt_regs *regs) diff --git a/arch/arm/mm/fault-common.c b/arch/arm/mm/fault-common.c index c87fa760e..e516261ed 100644 --- a/arch/arm/mm/fault-common.c +++ b/arch/arm/mm/fault-common.c @@ -8,24 +8,6 @@ extern void die(char *msg, struct pt_regs *regs, unsigned int err); -void __bad_pmd(pmd_t *pmd) -{ - printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd)); -#ifdef CONFIG_DEBUG_ERRORS - __backtrace(); -#endif - set_pmd(pmd, mk_user_pmd(BAD_PAGETABLE)); -} - -void __bad_pmd_kernel(pmd_t *pmd) -{ - printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd)); -#ifdef CONFIG_DEBUG_ERRORS - __backtrace(); -#endif - set_pmd(pmd, mk_kernel_pmd(BAD_PAGETABLE)); -} - /* * This is useful to dump out the page tables associated with * 'addr' in mm 'mm'. diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 115cec885..229b4dcd7 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -1,7 +1,7 @@ /* * linux/arch/arm/mm/init.c * - * Copyright (C) 1995-1999 Russell King + * Copyright (C) 1995-1999 Russell King */ #include <linux/config.h> @@ -18,6 +18,7 @@ #include <linux/swapctl.h> #include <linux/smp.h> #include <linux/init.h> +#include <linux/bootmem.h> #ifdef CONFIG_BLK_DEV_INITRD #include <linux/blk.h> #endif @@ -27,73 +28,94 @@ #include <asm/pgtable.h> #include <asm/dma.h> #include <asm/hardware.h> +#include <asm/setup.h> #include "map.h" +static unsigned long totalram_pages; pgd_t swapper_pg_dir[PTRS_PER_PGD]; -#ifndef CONFIG_NO_PGT_CACHE -struct pgtable_cache_struct quicklists; -#endif -extern unsigned long free_area_init(unsigned long, unsigned long); extern void show_net_buffers(void); -extern char _etext, _text, _edata, __bss_start, _end; -extern char __init_begin, __init_end; - -int do_check_pgt_cache(int low, int high) -{ - int freed = 0; -#ifndef CONFIG_NO_PGT_CACHE - if(pgtable_cache_size > high) { - do { - if(pgd_quicklist) - free_pgd_slow(get_pgd_fast()), freed++; - if(pmd_quicklist) - free_pmd_slow(get_pmd_fast()), freed++; - if(pte_quicklist) - free_pte_slow(get_pte_fast()), freed++; - } while(pgtable_cache_size > low); - } -#endif - return freed; -} - /* - * BAD_PAGE is the page that is used for page faults when linux - * is out-of-memory. Older versions of linux just did a + * empty_bad_page is the page that is used for page faults when + * linux is out-of-memory. Older versions of linux just did a * do_exit(), but using this instead means there is less risk * for a process dying in kernel mode, possibly leaving a inode * unused etc.. * - * BAD_PAGETABLE is the accompanying page-table: it is initialized - * to point to BAD_PAGE entries. + * empty_bad_pte_table is the accompanying page-table: it is + * initialized to point to BAD_PAGE entries. * - * ZERO_PAGE is a special page that is used for zero-initialized - * data and COW. + * empty_zero_page is a special page that is used for + * zero-initialized data and COW. */ -pte_t *empty_bad_page_table; +struct page *empty_zero_page; +struct page *empty_bad_page; +pte_t *empty_bad_pte_table; -pte_t *__bad_pagetable(void) +pte_t *get_bad_pte_table(void) { - pte_t bad_page; + pte_t v; int i; - bad_page = BAD_PAGE; + v = pte_mkdirty(mk_pte(empty_bad_page, PAGE_SHARED)); + for (i = 0; i < PTRS_PER_PTE; i++) - set_pte(empty_bad_page_table + i, bad_page); + set_pte(empty_bad_pte_table + i, v); + + return empty_bad_pte_table; +} + +void __handle_bad_pmd(pmd_t *pmd) +{ + pmd_ERROR(*pmd); +#ifdef CONFIG_DEBUG_ERRORS + __backtrace(); +#endif + set_pmd(pmd, mk_user_pmd(get_bad_pte_table())); +} - return empty_bad_page_table; +void __handle_bad_pmd_kernel(pmd_t *pmd) +{ + pmd_ERROR(*pmd); +#ifdef CONFIG_DEBUG_ERRORS + __backtrace(); +#endif + set_pmd(pmd, mk_kernel_pmd(get_bad_pte_table())); } -unsigned long *empty_zero_page; -unsigned long *empty_bad_page; +#ifndef CONFIG_NO_PGT_CACHE +struct pgtable_cache_struct quicklists; + +int do_check_pgt_cache(int low, int high) +{ + int freed = 0; -pte_t __bad_page(void) + if(pgtable_cache_size > high) { + do { + if(pgd_quicklist) { + free_pgd_slow(get_pgd_fast()); + freed++; + } + if(pmd_quicklist) { + free_pmd_slow(get_pmd_fast()); + freed++; + } + if(pte_quicklist) { + free_pte_slow(get_pte_fast()); + freed++; + } + } while(pgtable_cache_size > low); + } + return freed; +} +#else +int do_check_pgt_cache(int low, int high) { - memzero (empty_bad_page, PAGE_SIZE); - return pte_nocache(pte_mkdirty(mk_pte((unsigned long) empty_bad_page, PAGE_SHARED))); + return 0; } +#endif void show_mem(void) { @@ -104,23 +126,28 @@ void show_mem(void) printk("Mem-info:\n"); show_free_areas(); printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10)); - for (page = mem_map, end = mem_map + max_mapnr; - page < end; page++) { + + page = mem_map; + end = mem_map + max_mapnr; + + do { if (PageSkip(page)) { - if (page->next_hash < page) - break; page = page->next_hash; + if (page == NULL) + break; } total++; if (PageReserved(page)) reserved++; else if (PageSwapCache(page)) cached++; - else if (!atomic_read(&page->count)) + else if (!page_count(page)) free++; else shared += atomic_read(&page->count) - 1; - } + page++; + } while (page < end); + printk("%d pages of RAM\n", total); printk("%d free pages\n", free); printk("%d reserved pages\n", reserved); @@ -138,31 +165,42 @@ void show_mem(void) /* * paging_init() sets up the page tables... */ -unsigned long __init paging_init(unsigned long start_mem, unsigned long end_mem) +void __init paging_init(void) { - start_mem = PAGE_ALIGN(start_mem); - - empty_zero_page = (unsigned long *)start_mem; - memzero(empty_zero_page, PAGE_SIZE); - start_mem += PAGE_SIZE; - - empty_bad_page = (unsigned long *)start_mem; - start_mem += PAGE_SIZE; + void *zero_page, *bad_page, *bad_table; #ifdef CONFIG_CPU_32 - start_mem += PTRS_PER_PTE * BYTES_PER_PTR; +#define TABLE_OFFSET (PTRS_PER_PTE) +#else +#define TABLE_OFFSET 0 #endif - empty_bad_page_table = (pte_t *)start_mem; - start_mem += PTRS_PER_PTE * BYTES_PER_PTR; - - start_mem = setup_page_tables(start_mem, end_mem); - +#define TABLE_SIZE ((TABLE_OFFSET + PTRS_PER_PTE) * sizeof(void *)) + + /* + * allocate what we need for the bad pages + */ + zero_page = alloc_bootmem_low_pages(PAGE_SIZE); + bad_page = alloc_bootmem_low_pages(PAGE_SIZE); + bad_table = alloc_bootmem_low_pages(TABLE_SIZE); + + /* + * initialise the page tables + */ + pagetable_init(); flush_tlb_all(); - end_mem &= PAGE_MASK; - high_memory = (void *)end_mem; + free_area_init(max_low_pfn); + + /* + * finish off the bad pages once + * the mem_map is initialised + */ + memzero(zero_page, PAGE_SIZE); + memzero(bad_page, PAGE_SIZE); - return free_area_init(start_mem, end_mem); + empty_zero_page = mem_map + MAP_NR(zero_page); + empty_bad_page = mem_map + MAP_NR(bad_page); + empty_bad_pte_table = ((pte_t *)bad_table) + TABLE_OFFSET; } static inline void free_unused_mem_map(void) @@ -184,7 +222,7 @@ static inline void free_unused_mem_map(void) high = ((unsigned long)page->next_hash) & PAGE_MASK; while (low < high) { - clear_bit(PG_reserved, &mem_map[MAP_NR(low)].flags); + ClearPageReserved(mem_map + MAP_NR(low)); low += PAGE_SIZE; } } @@ -195,67 +233,35 @@ static inline void free_unused_mem_map(void) * memory is free. This is done after various parts of the system have * claimed their memory after the kernel image. */ -void __init mem_init(unsigned long start_mem, unsigned long end_mem) +void __init mem_init(void) { int codepages = 0; int reservedpages = 0; int datapages = 0; int initpages = 0, i, min_nr; - unsigned long tmp; - end_mem &= PAGE_MASK; - high_memory = (void *)end_mem; - max_mapnr = MAP_NR(end_mem); - num_physpages = 0; - - /* setup address validity bitmap */ - start_mem = create_mem_holes(start_mem, end_mem); - - start_mem = PAGE_ALIGN(start_mem); - - /* mark usable pages in the mem_map[] */ - mark_usable_memory_areas(start_mem, end_mem); - - /* free unused mem_map[] entries */ - free_unused_mem_map(); + max_mapnr = max_low_pfn; + high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); -#define BETWEEN(w,min,max) ((w) >= (unsigned long)(min) && \ - (w) < (unsigned long)(max)) - - for (tmp = PAGE_OFFSET; tmp < end_mem ; tmp += PAGE_SIZE) { - if (PageSkip(mem_map+MAP_NR(tmp))) { - unsigned long next; - - next = mem_map[MAP_NR(tmp)].next_hash - mem_map; - - next = (next << PAGE_SHIFT) + PAGE_OFFSET; - - if (next < tmp || next >= end_mem) - break; - tmp = next; - } - num_physpages++; - if (PageReserved(mem_map+MAP_NR(tmp))) { - if (BETWEEN(tmp, &__init_begin, &__init_end)) - initpages++; - else if (BETWEEN(tmp, &_text, &_etext)) - codepages++; - else if (BETWEEN(tmp, &_etext, &_edata)) - datapages++; - else if (BETWEEN(tmp, &__bss_start, start_mem)) - datapages++; - else - reservedpages++; - continue; - } - atomic_set(&mem_map[MAP_NR(tmp)].count, 1); -#ifdef CONFIG_BLK_DEV_INITRD - if (!initrd_start || !BETWEEN(tmp, initrd_start, initrd_end)) +#ifdef CONFIG_CPU_32 + /* + * We may have non-contiguous memory. Setup the PageSkip stuff, + * and mark the areas of mem_map which can be freed + */ + if (meminfo.nr_banks != 1) + create_memmap_holes(); #endif - free_page(tmp); - } -#undef BETWEEN + /* this will put all unused low memory onto the freelists */ + totalram_pages += free_all_bootmem(); + + /* + * Since our memory may not be contiguous, calculate the + * real number of pages we have in this system + */ + num_physpages = 0; + for (i = 0; i < meminfo.nr_banks; i++) + num_physpages += meminfo.bank[i].size >> PAGE_SHIFT; printk ("Memory: %luk/%luM available (%dk code, %dk reserved, %dk data, %dk init)\n", (unsigned long) nr_free_pages << (PAGE_SHIFT-10), @@ -265,6 +271,9 @@ void __init mem_init(unsigned long start_mem, unsigned long end_mem) datapages << (PAGE_SHIFT-10), initpages << (PAGE_SHIFT-10)); + /* + * Correct freepages watermarks + */ i = nr_free_pages >> 7; if (PAGE_SIZE < 32768) min_nr = 10; @@ -288,22 +297,26 @@ void __init mem_init(unsigned long start_mem, unsigned long end_mem) #endif } -static void free_area(unsigned long addr, unsigned long end, char *s) +static inline void free_area(unsigned long addr, unsigned long end, char *s) { unsigned int size = (end - addr) >> 10; + struct page *page = mem_map + MAP_NR(addr); - for (; addr < end; addr += PAGE_SIZE) { - mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved); - atomic_set(&mem_map[MAP_NR(addr)].count, 1); + for (; addr < end; addr += PAGE_SIZE, page ++) { + ClearPageReserved(page); + set_page_count(page, 1); free_page(addr); + totalram_pages++; } if (size) printk(" %dk %s", size, s); } -void free_initmem (void) +void free_initmem(void) { + extern char __init_begin, __init_end; + printk("Freeing unused kernel memory:"); free_area((unsigned long)(&__init_begin), @@ -333,28 +346,11 @@ void free_initmem (void) void si_meminfo(struct sysinfo *val) { - struct page *page, *end; - - val->totalram = 0; + val->totalram = totalram_pages; val->sharedram = 0; - val->freeram = nr_free_pages << PAGE_SHIFT; - val->bufferram = atomic_read(&buffermem); - for (page = mem_map, end = mem_map + max_mapnr; - page < end; page++) { - if (PageSkip(page)) { - if (page->next_hash < page) - break; - page = page->next_hash; - } - if (PageReserved(page)) - continue; - val->totalram++; - if (!atomic_read(&page->count)) - continue; - val->sharedram += atomic_read(&page->count) - 1; - } - val->totalram <<= PAGE_SHIFT; - val->sharedram <<= PAGE_SHIFT; - val->totalbig = 0; - val->freebig = 0; + val->freeram = nr_free_pages; + val->bufferram = atomic_read(&buffermem_pages); + val->totalhigh = 0; + val->freehigh = 0; + val->mem_unit = PAGE_SIZE; } diff --git a/arch/arm/mm/map.h b/arch/arm/mm/map.h index a1fc92b2c..0a3ee8b4d 100644 --- a/arch/arm/mm/map.h +++ b/arch/arm/mm/map.h @@ -16,17 +16,9 @@ struct map_desc { bufferable:1; }; -struct mem_desc { - unsigned long virt_start; - unsigned long virt_end; -}; - extern struct map_desc io_desc[]; extern unsigned int io_desc_size; -extern struct mem_desc mem_desc[]; -extern unsigned int mem_desc_size; -extern void mark_usable_memory_areas(unsigned long start, unsigned long end); -extern unsigned long create_mem_holes(unsigned long start, unsigned long end); -extern unsigned long setup_page_tables(unsigned long start, unsigned long end); +extern void create_memmap_holes(void); +extern void pagetable_init(void); diff --git a/arch/arm/mm/mm-armo.c b/arch/arm/mm/mm-armo.c index 55245f4e8..5ee95ea45 100644 --- a/arch/arm/mm/mm-armo.c +++ b/arch/arm/mm/mm-armo.c @@ -8,6 +8,7 @@ #include <linux/sched.h> #include <linux/mm.h> #include <linux/init.h> +#include <linux/bootmem.h> #include <asm/pgtable.h> #include <asm/page.h> @@ -18,40 +19,54 @@ #define MEMC_TABLE_SIZE (256*sizeof(unsigned long)) #define PGD_TABLE_SIZE (PTRS_PER_PGD * BYTES_PER_PTR) -/* - * FIXME: the following over-allocates by 6400% - */ -static inline void *alloc_table(int size, int prio) -{ - if (size != 128) - printk("invalid table size\n"); - return (void *)get_page_8k(prio); -} +int page_nr; + +extern unsigned long get_page_2k(int prio); +extern void free_page_2k(unsigned long); +extern pte_t *get_bad_pte_table(void); /* * Allocate a page table. Note that we place the MEMC * table before the page directory. This means we can * easily get to both tightly-associated data structures - * with a single pointer. This function is slightly - * better - it over-allocates by only 711% + * with a single pointer. + * + * We actually only need 1152 bytes, 896 bytes is wasted. + * We could try to fit 7 PTEs into that slot somehow. */ static inline void *alloc_pgd_table(int priority) { - unsigned long pg8k; + unsigned long pg2k; - pg8k = get_page_8k(priority); - if (pg8k) - pg8k += MEMC_TABLE_SIZE; + pg2k = get_page_2k(priority); + if (pg2k) + pg2k += MEMC_TABLE_SIZE; - return (void *)pg8k; + return (void *)pg2k; } -void free_table(void *table) +void free_pgd_slow(pgd_t *pgd) { - unsigned long tbl = (unsigned long)table; + unsigned long tbl = (unsigned long)pgd; + + tbl -= MEMC_TABLE_SIZE; + free_page_2k(tbl); +} - tbl &= ~8191; - free_page_8k(tbl); +/* + * FIXME: the following over-allocates by 1600% + */ +static inline void *alloc_pte_table(int size, int prio) +{ + if (size != 128) + printk("invalid table size\n"); + return (void *)get_page_2k(prio); +} + +void free_pte_slow(pte_t *pte) +{ + unsigned long tbl = (unsigned long)pte; + free_page_2k(tbl); } pgd_t *get_pgd_slow(void) @@ -62,9 +77,9 @@ pgd_t *get_pgd_slow(void) if (pgd) { pgd_t *init = pgd_offset(&init_mm, 0); - memzero(pgd, USER_PTRS_PER_PGD * BYTES_PER_PTR); + memzero(pgd, USER_PTRS_PER_PGD * sizeof(pgd_t)); memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, - (PTRS_PER_PGD - USER_PTRS_PER_PGD) * BYTES_PER_PTR); + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); /* * On ARM, first page must always be allocated @@ -92,7 +107,7 @@ pgd_t *get_pgd_slow(void) nomem_pmd: pmd_free(new_pmd); nomem: - free_table(pgd); + free_pgd_slow(pgd); return NULL; } @@ -100,19 +115,19 @@ pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset) { pte_t *pte; - pte = (pte_t *)alloc_table(PTRS_PER_PTE * BYTES_PER_PTR, GFP_KERNEL); + pte = (pte_t *)alloc_pte_table(PTRS_PER_PTE * sizeof(pte_t), GFP_KERNEL); if (pmd_none(*pmd)) { if (pte) { - memzero(pte, PTRS_PER_PTE * BYTES_PER_PTR); - set_pmd(pmd, mk_pmd(pte)); + memzero(pte, PTRS_PER_PTE * sizeof(pte_t)); + set_pmd(pmd, mk_user_pmd(pte)); return pte + offset; } - set_pmd(pmd, mk_pmd(BAD_PAGETABLE)); + set_pmd(pmd, mk_user_pmd(get_bad_pte_table())); return NULL; } - free_table((void *)pte); + free_pte_slow(pte); if (pmd_bad(*pmd)) { - __bad_pmd(pmd); + __handle_bad_pmd(pmd); return NULL; } return (pte_t *) pmd_page(*pmd) + offset; @@ -124,47 +139,22 @@ pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset) * some more work to get it to fit into our separate processor and * architecture structure. */ -int page_nr; - -#define PTE_SIZE (PTRS_PER_PTE * BYTES_PER_PTR) - -static inline void setup_swapper_dir (int index, pte_t *ptep) +void __init pagetable_init(void) { - set_pmd (pmd_offset (swapper_pg_dir + index, 0), mk_pmd (ptep)); -} - -unsigned long __init -setup_page_tables(unsigned long start_mem, unsigned long end_mem) -{ - unsigned int i; - union { unsigned long l; pte_t *pte; } u; + pte_t *pte; + int i; - page_nr = MAP_NR(end_mem); + page_nr = max_low_pfn; - /* map in pages for (0x0000 - 0x8000) */ - u.l = ((start_mem + (PTE_SIZE-1)) & ~(PTE_SIZE-1)); - start_mem = u.l + PTE_SIZE; - memzero (u.pte, PTE_SIZE); - u.pte[0] = mk_pte(PAGE_OFFSET + 491520, PAGE_READONLY); - setup_swapper_dir (0, u.pte); + pte = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t)); + memzero(pte, PTRS_PER_PTE * sizeof(pte_t)); + pte[0] = mk_pte_phys(PAGE_OFFSET + 491520, PAGE_READONLY); + set_pmd(pmd_offset(swapper_pg_dir, 0), mk_kernel_pmd(pte)); for (i = 1; i < PTRS_PER_PGD; i++) pgd_val(swapper_pg_dir[i]) = 0; - - return start_mem; } -unsigned long __init -create_mem_holes(unsigned long start, unsigned long end) +void __init create_memmap_holes(void) { - return start; -} - -void __init -mark_usable_memory_areas(unsigned long start_mem, unsigned long end_mem) -{ - while (start_mem < end_mem) { - clear_bit(PG_reserved, &mem_map[MAP_NR(start_mem)].flags); - start_mem += PAGE_SIZE; - } } diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c index d52c21cc4..3df6c13b5 100644 --- a/arch/arm/mm/mm-armv.c +++ b/arch/arm/mm/mm-armv.c @@ -1,22 +1,28 @@ /* - * arch/arm/mm/mm-armv.c + * linux/arch/arm/mm/mm-armv.c * - * Page table sludge for ARM v3 and v4 processor architectures. + * Page table sludge for ARM v3 and v4 processor architectures. * - * Copyright (C) 1998-1999 Russell King + * Copyright (C) 1998-1999 Russell King */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/init.h> +#include <linux/bootmem.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/io.h> +#include <asm/setup.h> #include "map.h" unsigned long *valid_addr_bitmap; +extern unsigned long get_page_2k(int priority); +extern void free_page_2k(unsigned long page); +extern pte_t *get_bad_pte_table(void); + /* * need to get a 16k page for level 1 */ @@ -26,12 +32,12 @@ pgd_t *get_pgd_slow(void) pmd_t *new_pmd; if (pgd) { - pgd_t *init = pgd_offset(&init_mm, 0); + pgd_t *init = pgd_offset_k(0); - memzero(pgd, USER_PTRS_PER_PGD * BYTES_PER_PTR); + memzero(pgd, USER_PTRS_PER_PGD * sizeof(pgd_t)); memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, - (PTRS_PER_PGD - USER_PTRS_PER_PGD) * BYTES_PER_PTR); - clean_cache_area(pgd, PTRS_PER_PGD * BYTES_PER_PTR); + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + clean_cache_area(pgd, PTRS_PER_PGD * sizeof(pgd_t)); /* * On ARM, first page must always be allocated @@ -48,7 +54,7 @@ pgd_t *get_pgd_slow(void) pte_t *new_pte = pte_offset(new_pmd, 0); pte_t *old_pte = pte_offset(old_pmd, 0); - set_pte (new_pte, *old_pte); + set_pte(new_pte, *old_pte); } } } @@ -61,6 +67,31 @@ nomem: return NULL; } +void free_pgd_slow(pgd_t *pgd) +{ + if (pgd) { /* can pgd be NULL? */ + pmd_t *pmd; + pte_t *pte; + + /* pgd is always present and good */ + pmd = (pmd_t *)pgd; + if (pmd_none(*pmd)) + goto free; + if (pmd_bad(*pmd)) { + pmd_ERROR(*pmd); + pmd_clear(pmd); + goto free; + } + + pte = pte_offset(pmd, 0); + pmd_clear(pmd); + pte_free(pte); + pmd_free(pmd); + } +free: + free_pages((unsigned long) pgd, 2); +} + pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset) { pte_t *pte; @@ -68,18 +99,18 @@ pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset) pte = (pte_t *)get_page_2k(GFP_KERNEL); if (pmd_none(*pmd)) { if (pte) { - memzero(pte, 2 * PTRS_PER_PTE * BYTES_PER_PTR); - clean_cache_area(pte, PTRS_PER_PTE * BYTES_PER_PTR); + memzero(pte, 2 * PTRS_PER_PTE * sizeof(pte_t)); + clean_cache_area(pte, PTRS_PER_PTE * sizeof(pte_t)); pte += PTRS_PER_PTE; set_pmd(pmd, mk_user_pmd(pte)); return pte + offset; } - set_pmd(pmd, mk_user_pmd(BAD_PAGETABLE)); + set_pmd(pmd, mk_user_pmd(get_bad_pte_table())); return NULL; } free_page_2k((unsigned long)pte); if (pmd_bad(*pmd)) { - __bad_pmd(pmd); + __handle_bad_pmd(pmd); return NULL; } return (pte_t *) pmd_page(*pmd) + offset; @@ -92,23 +123,28 @@ pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long offset) pte = (pte_t *)get_page_2k(GFP_KERNEL); if (pmd_none(*pmd)) { if (pte) { - memzero(pte, 2 * PTRS_PER_PTE * BYTES_PER_PTR); - clean_cache_area(pte, PTRS_PER_PTE * BYTES_PER_PTR); + memzero(pte, 2 * PTRS_PER_PTE * sizeof(pte_t)); + clean_cache_area(pte, PTRS_PER_PTE * sizeof(pte_t)); pte += PTRS_PER_PTE; set_pmd(pmd, mk_kernel_pmd(pte)); return pte + offset; } - set_pmd(pmd, mk_kernel_pmd(BAD_PAGETABLE)); + set_pmd(pmd, mk_kernel_pmd(get_bad_pte_table())); return NULL; } free_page_2k((unsigned long)pte); if (pmd_bad(*pmd)) { - __bad_pmd_kernel(pmd); + __handle_bad_pmd_kernel(pmd); return NULL; } return (pte_t *) pmd_page(*pmd) + offset; } +void free_pte_slow(pte_t *pte) +{ + free_page_2k((unsigned long)(pte - PTRS_PER_PTE)); +} + /* * Create a SECTION PGD between VIRT and PHYS in domain * DOMAIN with protection PROT @@ -131,34 +167,22 @@ alloc_init_section(unsigned long virt, unsigned long phys, int prot) * the hardware pte table. */ static inline void -alloc_init_page(unsigned long *mem, unsigned long virt, unsigned long phys, int domain, int prot) +alloc_init_page(unsigned long virt, unsigned long phys, int domain, int prot) { pmd_t *pmdp; pte_t *ptep; pmdp = pmd_offset(pgd_offset_k(virt), virt); -#define PTE_SIZE (PTRS_PER_PTE * BYTES_PER_PTR) - if (pmd_none(*pmdp)) { - unsigned long memory = *mem; - - memory = (memory + PTE_SIZE - 1) & ~(PTE_SIZE - 1); + pte_t *ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * + sizeof(pte_t)); - ptep = (pte_t *)memory; - memzero(ptep, PTE_SIZE); - memory += PTE_SIZE; - - ptep = (pte_t *)memory; - memzero(ptep, PTE_SIZE); + memzero(ptep, 2 * PTRS_PER_PTE * sizeof(pte_t)); + ptep += PTRS_PER_PTE; set_pmd(pmdp, __mk_pmd(ptep, PMD_TYPE_TABLE | PMD_DOMAIN(domain))); - - *mem = memory + PTE_SIZE; } - -#undef PTE_SIZE - ptep = pte_offset(pmdp, virt); set_pte(ptep, mk_pte_phys(phys, __pgprot(prot))); @@ -169,8 +193,7 @@ alloc_init_page(unsigned long *mem, unsigned long virt, unsigned long phys, int * the clearance is done by the middle-level functions (pmd) * rather than the top-level (pgd) functions. */ -static inline void -free_init_section(unsigned long virt) +static inline void free_init_section(unsigned long virt) { pmd_clear(pmd_offset(pgd_offset_k(virt), virt)); } @@ -181,8 +204,7 @@ free_init_section(unsigned long virt) * are able to cope here with varying sizes and address * offsets, and we take full advantage of sections. */ -static void __init -create_mapping(unsigned long *mem_ptr, struct map_desc *md) +static void __init create_mapping(struct map_desc *md) { unsigned long virt, length; int prot_sect, prot_pte; @@ -205,7 +227,7 @@ create_mapping(unsigned long *mem_ptr, struct map_desc *md) length = md->length; while ((virt & 1048575 || (virt + off) & 1048575) && length >= PAGE_SIZE) { - alloc_init_page(mem_ptr, virt, virt + off, md->domain, prot_pte); + alloc_init_page(virt, virt + off, md->domain, prot_pte); virt += PAGE_SIZE; length -= PAGE_SIZE; @@ -219,7 +241,7 @@ create_mapping(unsigned long *mem_ptr, struct map_desc *md) } while (length >= PAGE_SIZE) { - alloc_init_page(mem_ptr, virt, virt + off, md->domain, prot_pte); + alloc_init_page(virt, virt + off, md->domain, prot_pte); virt += PAGE_SIZE; length -= PAGE_SIZE; @@ -227,17 +249,15 @@ create_mapping(unsigned long *mem_ptr, struct map_desc *md) } /* - * Initial boot-time mapping. This covers just the - * zero page, kernel and the flush area. NB: it - * must be sorted by virtual address, and no + * Initial boot-time mapping. This covers just the zero page, kernel and + * the flush area. NB: it must be sorted by virtual address, and no * virtual address overlaps. - * init_map[2..4] are for architectures with small - * amounts of banked memory. + * init_map[2..4] are for architectures with banked memory. */ static struct map_desc init_map[] __initdata = { { 0, 0, PAGE_SIZE, DOMAIN_USER, 0, 0, 1, 0 }, /* zero page */ { 0, 0, 0, DOMAIN_KERNEL, 0, 1, 1, 1 }, /* kernel memory */ - { 0, 0, 0, DOMAIN_KERNEL, 0, 1, 1, 1 }, + { 0, 0, 0, DOMAIN_KERNEL, 0, 1, 1, 1 }, /* (4 banks) */ { 0, 0, 0, DOMAIN_KERNEL, 0, 1, 1, 1 }, { 0, 0, 0, DOMAIN_KERNEL, 0, 1, 1, 1 }, { 0, 0, PGDIR_SIZE, DOMAIN_KERNEL, 1, 0, 1, 1 }, /* cache flush 1 */ @@ -246,19 +266,15 @@ static struct map_desc init_map[] __initdata = { #define NR_INIT_MAPS (sizeof(init_map) / sizeof(init_map[0])) -unsigned long __init -setup_page_tables(unsigned long start_mem, unsigned long end_mem) +void __init pagetable_init(void) { unsigned long address = 0; - int idx = 0; + int i; /* - * Correct the above mappings + * Setup the above mappings */ - init_map[0].physical = - init_map[1].physical = __virt_to_phys(PAGE_OFFSET); - init_map[1].virtual = PAGE_OFFSET; - init_map[1].length = end_mem - PAGE_OFFSET; + init_map[0].physical = PHYS_OFFSET; init_map[5].physical = FLUSH_BASE_PHYS; init_map[5].virtual = FLUSH_BASE; #ifdef FLUSH_BASE_MINICACHE @@ -267,109 +283,108 @@ setup_page_tables(unsigned long start_mem, unsigned long end_mem) init_map[6].length = PGDIR_SIZE; #endif + for (i = 0; i < meminfo.nr_banks; i++) { + init_map[i+1].physical = PHYS_OFFSET + meminfo.bank[i].start; + init_map[i+1].virtual = PAGE_OFFSET + meminfo.bank[i].start; + init_map[i+1].length = meminfo.bank[i].size; + } + /* - * Firstly, go through the initial mappings, - * but clear out any pgdir entries that are - * not in the description. + * Go through the initial mappings, but clear out any + * pgdir entries that are not in the description. */ + i = 0; do { - if (address < init_map[idx].virtual || idx == NR_INIT_MAPS) { + if (address < init_map[i].virtual || i == NR_INIT_MAPS) { free_init_section(address); address += PGDIR_SIZE; } else { - create_mapping(&start_mem, init_map + idx); + create_mapping(init_map + i); - address = init_map[idx].virtual + init_map[idx].length; + address = init_map[i].virtual + init_map[i].length; address = (address + PGDIR_SIZE - 1) & PGDIR_MASK; do { - idx += 1; - } while (init_map[idx].length == 0 && idx < NR_INIT_MAPS); + i += 1; + } while (init_map[i].length == 0 && i < NR_INIT_MAPS); } } while (address != 0); /* - * Now, create the architecture specific mappings + * Create the architecture specific mappings */ - for (idx = 0; idx < io_desc_size; idx++) - create_mapping(&start_mem, io_desc + idx); + for (i = 0; i < io_desc_size; i++) + create_mapping(io_desc + i); flush_cache_all(); - - return start_mem; } /* - * The mem_map array can get very big. Mark the end of the - * valid mem_map banks with PG_skip, and setup the address - * validity bitmap. + * The mem_map array can get very big. Mark the end of the valid mem_map + * banks with PG_skip, and setup the address validity bitmap. */ -unsigned long __init -create_mem_holes(unsigned long start_mem, unsigned long end_mem) +void __init create_memmap_holes(void) { + unsigned int start_pfn, end_pfn = -1; struct page *pg = NULL; unsigned int sz, i; - if (!machine_is_riscpc()) - return start_mem; + for (i = 0; i < meminfo.nr_banks; i++) { + if (meminfo.bank[i].size == 0) + continue; - sz = (end_mem - PAGE_OFFSET) >> 20; - sz = (sz + 31) >> 3; - - valid_addr_bitmap = (unsigned long *)start_mem; - start_mem += sz; + start_pfn = meminfo.bank[i].start >> PAGE_SHIFT; - memset(valid_addr_bitmap, 0, sz); + /* + * subtle here - if we have a full bank, then + * start_pfn == end_pfn, and we don't want to + * set PG_skip, or next_hash + */ + if (pg && start_pfn != end_pfn) { + set_bit(PG_skip, &pg->flags); + pg->next_hash = mem_map + start_pfn; - if (start_mem > mem_desc[0].virt_end) - printk(KERN_CRIT "*** Error: RAM bank 0 too small\n"); + start_pfn = PAGE_ALIGN(__pa(pg + 1)); + end_pfn = __pa(pg->next_hash) & PAGE_MASK; - for (i = 0; i < mem_desc_size; i++) { - unsigned int idx, end; + if (end_pfn != start_pfn) + free_bootmem(start_pfn, end_pfn - start_pfn); - if (pg) { - pg->next_hash = mem_map + - MAP_NR(mem_desc[i].virt_start); pg = NULL; } - idx = __kern_valid_idx(mem_desc[i].virt_start); - end = __kern_valid_idx(mem_desc[i].virt_end); - - do - set_bit(idx, valid_addr_bitmap); - while (++idx < end); - - if (mem_desc[i].virt_end < end_mem) { - pg = mem_map + MAP_NR(mem_desc[i].virt_end); + end_pfn = (meminfo.bank[i].start + + meminfo.bank[i].size) >> PAGE_SHIFT; - set_bit(PG_skip, &pg->flags); - } + if (end_pfn != meminfo.end >> PAGE_SHIFT) + pg = mem_map + end_pfn; } - if (pg) + if (pg) { + set_bit(PG_skip, &pg->flags); pg->next_hash = NULL; - - return start_mem; -} - -void __init -mark_usable_memory_areas(unsigned long start_mem, unsigned long end_mem) -{ - /* - * Mark all of memory from the end of kernel to end of memory - */ - while (start_mem < end_mem) { - clear_bit(PG_reserved, &mem_map[MAP_NR(start_mem)].flags); - start_mem += PAGE_SIZE; } +#if 0 /* - * Mark memory from page 1 to start of the swapper page directory + * setup address validity map + * - don't think this is used anymore? */ - start_mem = PAGE_OFFSET + PAGE_SIZE; - while (start_mem < (unsigned long)&swapper_pg_dir) { - clear_bit(PG_reserved, &mem_map[MAP_NR(start_mem)].flags); - start_mem += PAGE_SIZE; + sz = meminfo.end >> (PAGE_SHIFT + 8); /* in MB */ + sz = (sz + 31) >> 3; + + valid_addr_bitmap = alloc_bootmem(sz); + memzero(valid_addr_bitmap, sz); + + for (i = 0; i < meminfo.nr_banks; i++) { + int idx, end; + + idx = meminfo.bank[i].start >> 20; + end = (meminfo.bank[i].start + + meminfo.bank[i].size) >> 20; + do + set_bit(idx, valid_addr_bitmap); + while (++idx < end); } -} +#endif +} diff --git a/arch/arm/mm/mm-ebsa110.c b/arch/arm/mm/mm-ebsa110.c index 8086bbc08..a1172b1f1 100644 --- a/arch/arm/mm/mm-ebsa110.c +++ b/arch/arm/mm/mm-ebsa110.c @@ -13,17 +13,11 @@ #include "map.h" -struct mem_desc mem_desc[] __initdata = { - 0, 0 -}; - -unsigned int __initdata mem_desc_size = 0; +#define SIZE(x) (sizeof(x) / sizeof(x[0])) const struct map_desc io_desc[] __initdata = { { IO_BASE - PGDIR_SIZE, 0xc0000000, PGDIR_SIZE, DOMAIN_IO, 0, 1, 0, 0 }, { IO_BASE , IO_START , IO_SIZE , DOMAIN_IO, 0, 1, 0, 0 } }; -#define SIZEOFMAP (sizeof(mapping) / sizeof(mapping[0])) - -unsigned int __initdata io_desc_size = SIZEOFMAP; +unsigned int __initdata io_desc_size = SIZE(io_desc); diff --git a/arch/arm/mm/mm-footbridge.c b/arch/arm/mm/mm-footbridge.c index 74bac27ea..b67cdec33 100644 --- a/arch/arm/mm/mm-footbridge.c +++ b/arch/arm/mm/mm-footbridge.c @@ -17,6 +17,8 @@ #include "map.h" +#define SIZE(x) (sizeof(x) / sizeof(x[0])) + /* * The first entry allows us to fiddle with the EEPROM from user-space. * This entry will go away in time, once the fmu32 can mmap() the @@ -89,17 +91,9 @@ unsigned long __bus_to_virt(unsigned long res) #endif -struct mem_desc mem_desc[] __initdata = { - 0, 0 -}; - -unsigned int __initdata mem_desc_size = 0; - struct map_desc io_desc[] __initdata = { MAPPING }; -#define SIZE(x) (sizeof(x) / sizeof(x[0])) - unsigned int __initdata io_desc_size = SIZE(io_desc); diff --git a/arch/arm/mm/mm-nexuspci.c b/arch/arm/mm/mm-nexuspci.c index a4ee48f8d..5b66faa51 100644 --- a/arch/arm/mm/mm-nexuspci.c +++ b/arch/arm/mm/mm-nexuspci.c @@ -18,11 +18,7 @@ #include "map.h" -struct mem_desc mem_desc[] __initdata = { - 0, 0 -}; - -unsigned int __initdata mem_desc_size = 0; +#define SIZE(x) (sizeof(x) / sizeof(x[0])) const struct map_desc io_desc[] __initdata = { { 0xfff00000, 0x10000000, 0x00001000, DOMAIN_IO, 0, 1, 0, 0 }, @@ -32,6 +28,4 @@ const struct map_desc io_desc[] __initdata = { { 0xfd000000, 0x88000000, 0x00100000, DOMAIN_IO, 0, 1, 0, 0 } }; -#define SIZEOFMAP (sizeof(mapping) / sizeof(mapping[0])) - -unsigned int __initdata io_desc_size = SIZEOFMAP; +unsigned int __initdata io_desc_size = SIZE(io_desc); diff --git a/arch/arm/mm/mm-rpc.c b/arch/arm/mm/mm-rpc.c index 634bb3c8f..0490cbdd0 100644 --- a/arch/arm/mm/mm-rpc.c +++ b/arch/arm/mm/mm-rpc.c @@ -16,28 +16,6 @@ #define SIZE(x) (sizeof(x) / sizeof(x[0])) -struct mem_desc mem_desc[] __initdata = { - { 0xc0000000, 0xc0000000 }, - { 0xc4000000, 0xc4000000 }, - { 0xc8000000, 0xc8000000 }, - { 0xcc000000, 0xcc000000 } -}; - -unsigned int __initdata mem_desc_size = SIZE(mem_desc); - -void __init -init_dram_banks(struct param_struct *params) -{ - unsigned int bank; - - for (bank = 0; bank < mem_desc_size; bank++) - mem_desc[bank].virt_end += PAGE_SIZE * - params->u1.s.pages_in_bank[bank]; - - params->u1.s.nr_pages = mem_desc[3].virt_end - PAGE_OFFSET; - params->u1.s.nr_pages /= PAGE_SIZE; -} - struct map_desc io_desc[] __initdata = { /* VRAM */ { SCREEN2_BASE, SCREEN_START, 2*1048576, DOMAIN_IO, 0, 1, 0, 0 }, diff --git a/arch/arm/mm/mm-tbox.c b/arch/arm/mm/mm-tbox.c index a6dd2a28f..78250336e 100644 --- a/arch/arm/mm/mm-tbox.c +++ b/arch/arm/mm/mm-tbox.c @@ -18,11 +18,7 @@ #include "map.h" -struct mem_desc mem_desc[] __initdata = { - 0, 0 -}; - -unsigned int __initdata mem_desc_size = 0; +#define SIZE(x) (sizeof(x) / sizeof(x[0])) /* Logical Physical * 0xffff1000 0x00100000 DMA registers @@ -60,6 +56,4 @@ const struct map_desc io_desc[] __initdata = { { 0xffffe000, 0x00e00000, 0x00001000, DOMAIN_IO, 0, 1, 0, 0 } }; -#define SIZEOFMAP (sizeof(mapping) / sizeof(mapping[0])) - -unsigned int __initdata io_desc_size = SIZEOFMAP; +unsigned int __initdata io_desc_size = SIZE(io_desc); diff --git a/arch/arm/mm/proc-arm2,3.S b/arch/arm/mm/proc-arm2,3.S index df2e13357..dcb5c10dc 100644 --- a/arch/arm/mm/proc-arm2,3.S +++ b/arch/arm/mm/proc-arm2,3.S @@ -327,7 +327,7 @@ arm2_elf_name: .asciz "v1" arm3_elf_name: .asciz "v2" .align - .section ".proc.info", #alloc + .section ".proc.info", #alloc, #execinstr .long 0x41560200 .long 0xfffffff0 diff --git a/arch/arm/mm/proc-arm6,7.S b/arch/arm/mm/proc-arm6,7.S index f3819fa01..b085c3c4e 100644 --- a/arch/arm/mm/proc-arm6,7.S +++ b/arch/arm/mm/proc-arm6,7.S @@ -532,7 +532,7 @@ cpu_elf_name: .asciz "v3" .size cpu_elf_name, . - cpu_elf_name .align - .section ".proc.info", #alloc + .section ".proc.info", #alloc, #execinstr .type __arm6_proc_info, #object __arm6_proc_info: diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S index 73c0f83df..266d960b5 100644 --- a/arch/arm/mm/proc-sa110.S +++ b/arch/arm/mm/proc-sa110.S @@ -446,7 +446,8 @@ ENTRY(cpu_sa1100_reset) bl cpu_sa110_flush_tlb_all mcr p15, 0, ip, c7, c7, 0 @ flush I,D caches mrc p15, 0, r0, c1, c0, 0 @ ctrl register - bic r0, r0, #1 @ ...............m + bic r0, r0, #0x000f @ ............wcam + bic r0, r0, #0x1100 @ ...i...s........ ldmfd sp!, {r1, pc} /* * Purpose : Function pointers used to access above functions - all calls @@ -546,6 +547,7 @@ cpu_elf_name: .asciz "v4" .align .section ".proc.info", #alloc, #execinstr + .type __sa110_proc_info,#object __sa110_proc_info: .long 0x4401a100 diff --git a/arch/arm/mm/small_page.c b/arch/arm/mm/small_page.c index 6bdc6cfc7..ac303d45e 100644 --- a/arch/arm/mm/small_page.c +++ b/arch/arm/mm/small_page.c @@ -21,205 +21,201 @@ #include <linux/swap.h> #include <linux/smp.h> -#if PAGE_SIZE == 4096 -/* 2K blocks */ -#define SMALL_ALLOC_SHIFT (11) -#define NAME(x) x##_2k -#elif PAGE_SIZE == 32768 || PAGE_SIZE == 16384 -/* 8K blocks */ -#define SMALL_ALLOC_SHIFT (13) -#define NAME(x) x##_8k -#endif +#include <asm/bitops.h> +#include <asm/pgtable.h> -#define SMALL_ALLOC_SIZE (1 << SMALL_ALLOC_SHIFT) -#define NR_BLOCKS (PAGE_SIZE / SMALL_ALLOC_SIZE) -#define BLOCK_MASK ((1 << NR_BLOCKS) - 1) +#define PEDANTIC -#define USED(pg) ((atomic_read(&(pg)->count) >> 8) & BLOCK_MASK) -#define SET_USED(pg,off) (atomic_read(&(pg)->count) |= 256 << off) -#define CLEAR_USED(pg,off) (atomic_read(&(pg)->count) &= ~(256 << off)) -#define ALL_USED BLOCK_MASK -#define IS_FREE(pg,off) (!(atomic_read(&(pg)->count) & (256 << off))) -#define SM_PAGE_PTR(page,block) ((struct free_small_page *)((page) + \ - ((block) << SMALL_ALLOC_SHIFT))) - -#if NR_BLOCKS != 2 && NR_BLOCKS != 4 -#error I only support 2 or 4 blocks per page -#endif +/* + * Requirement: + * We need to be able to allocate naturally aligned memory of finer + * granularity than the page size. This is typically used for the + * second level page tables on 32-bit ARMs. + * + * Theory: + * We "misuse" the Linux memory management system. We use alloc_page + * to allocate a page and then mark it as reserved. The Linux memory + * management system will then ignore the "offset", "next_hash" and + * "pprev_hash" entries in the mem_map for this page. + * + * We then use a bitstring in the "offset" field to mark which segments + * of the page are in use, and manipulate this as required during the + * allocation and freeing of these small pages. + * + * We also maintain a queue of pages being used for this purpose using + * the "next_hash" and "pprev_hash" entries of mem_map; + */ -struct free_small_page { - unsigned long next; - unsigned long prev; +struct order { + struct page *queue; + unsigned int mask; /* (1 << shift) - 1 */ + unsigned int shift; /* (1 << shift) size of page */ + unsigned int block_mask; /* nr_blocks - 1 */ + unsigned int all_used; /* (1 << nr_blocks) - 1 */ }; -/* - * To handle allocating small pages, we use the main get_free_page routine, - * and split the page up into 4. The page is marked in mem_map as reserved, - * so it can't be free'd by free_page. The count field is used to keep track - * of which sections of this page are allocated. - */ -static unsigned long small_page_ptr; - -static unsigned char offsets[1<<NR_BLOCKS] = { - 0, /* 0000 */ - 1, /* 0001 */ - 0, /* 0010 */ - 2, /* 0011 */ -#if NR_BLOCKS == 4 - 0, /* 0100 */ - 1, /* 0101 */ - 0, /* 0110 */ - 3, /* 0111 */ - 0, /* 1000 */ - 1, /* 1001 */ - 0, /* 1010 */ - 2, /* 1011 */ - 0, /* 1100 */ - 1, /* 1101 */ - 0, /* 1110 */ - 4 /* 1111 */ + +static struct order orders[] = { +#if PAGE_SIZE == 4096 + { NULL, 2047, 11, 1, 0x00000003 } +#elif PAGE_SIZE == 32768 + { NULL, 2047, 11, 15, 0x0000ffff }, + { NULL, 8191, 13, 3, 0x0000000f } +#else +#error unsupported page size #endif }; -static inline void clear_page_links(unsigned long page) -{ - struct free_small_page *fsp; - int i; +#define USED_MAP(pg) ((pg)->offset) +#define TEST_AND_CLEAR_USED(pg,off) (test_and_clear_bit(off, &(pg)->offset)) +#define SET_USED(pg,off) (set_bit(off, &(pg)->offset)) - for (i = 0; i < NR_BLOCKS; i++) { - fsp = SM_PAGE_PTR(page, i); - fsp->next = fsp->prev = 0; - } -} - -static inline void set_page_links_prev(unsigned long page, unsigned long prev) +static void add_page_to_queue(struct page *page, struct page **p) { - struct free_small_page *fsp; - unsigned int mask; - int i; - - if (!page) - return; - - mask = USED(&mem_map[MAP_NR(page)]); - for (i = 0; i < NR_BLOCKS; i++) { - if (mask & (1 << i)) - continue; - fsp = SM_PAGE_PTR(page, i); - fsp->prev = prev; - } +#ifdef PEDANTIC + if (page->pprev_hash) + PAGE_BUG(page); +#endif + page->next_hash = *p; + if (*p) + (*p)->pprev_hash = &page->next_hash; + *p = page; + page->pprev_hash = p; } -static inline void set_page_links_next(unsigned long page, unsigned long next) +static void remove_page_from_queue(struct page *page) { - struct free_small_page *fsp; - unsigned int mask; - int i; - - if (!page) - return; - - mask = USED(&mem_map[MAP_NR(page)]); - for (i = 0; i < NR_BLOCKS; i++) { - if (mask & (1 << i)) - continue; - fsp = SM_PAGE_PTR(page, i); - fsp->next = next; + if (page->pprev_hash) { + if (page->next_hash) + page->next_hash->pprev_hash = page->pprev_hash; + *page->pprev_hash = page->next_hash; + page->pprev_hash = NULL; } } -unsigned long NAME(get_page)(int priority) +static unsigned long __get_small_page(int priority, struct order *order) { - struct free_small_page *fsp; - unsigned long new_page; unsigned long flags; struct page *page; int offset; save_flags(flags); - if (!small_page_ptr) + if (!order->queue) goto need_new_page; + cli(); + page = order->queue; again: - page = mem_map + MAP_NR(small_page_ptr); - offset = offsets[USED(page)]; +#ifdef PEDANTIC + if (USED_MAP(page) & ~order->all_used) + PAGE_BUG(page); +#endif + offset = ffz(USED_MAP(page)); SET_USED(page, offset); - new_page = (unsigned long)SM_PAGE_PTR(small_page_ptr, offset); - if (USED(page) == ALL_USED) { - fsp = (struct free_small_page *)new_page; - set_page_links_prev (fsp->next, 0); - small_page_ptr = fsp->next; - } + if (USED_MAP(page) == order->all_used) + remove_page_from_queue(page); restore_flags(flags); - return new_page; + + return page_address(page) + (offset << order->shift); need_new_page: - new_page = __get_free_page(priority); - if (!small_page_ptr) { - if (new_page) { - set_bit (PG_reserved, &mem_map[MAP_NR(new_page)].flags); - clear_page_links (new_page); - cli(); - small_page_ptr = new_page; - goto again; - } - restore_flags(flags); - return 0; + page = alloc_page(priority); + if (!order->queue) { + if (!page) + goto no_page; + SetPageReserved(page); + USED_MAP(page) = 0; + cli(); + add_page_to_queue(page, &order->queue); + } else { + __free_page(page); + cli(); + page = order->queue; } - free_page(new_page); - cli(); goto again; + +no_page: + restore_flags(flags); + return 0; } -void NAME(free_page)(unsigned long spage) +static void __free_small_page(unsigned long spage, struct order *order) { - struct free_small_page *ofsp, *cfsp; unsigned long flags; + unsigned long nr; struct page *page; - int offset, oldoffset; - - if (!spage) - goto none; - - offset = (spage >> SMALL_ALLOC_SHIFT) & (NR_BLOCKS - 1); - spage -= offset << SMALL_ALLOC_SHIFT; - - page = mem_map + MAP_NR(spage); - if (!PageReserved(page) || !USED(page)) - goto non_small; - - if (IS_FREE(page, offset)) - goto free; - - save_flags_cli (flags); - oldoffset = offsets[USED(page)]; - CLEAR_USED(page, offset); - ofsp = SM_PAGE_PTR(spage, oldoffset); - cfsp = SM_PAGE_PTR(spage, offset); - - if (oldoffset == NR_BLOCKS) { /* going from totally used to mostly used */ - cfsp->prev = 0; - cfsp->next = small_page_ptr; - set_page_links_prev (small_page_ptr, spage); - small_page_ptr = spage; - } else if (!USED(page)) { - set_page_links_prev (ofsp->next, ofsp->prev); - set_page_links_next (ofsp->prev, ofsp->next); - if (spage == small_page_ptr) - small_page_ptr = ofsp->next; - clear_bit (PG_reserved, &page->flags); + + nr = MAP_NR(spage); + if (nr < max_mapnr) { + page = mem_map + nr; + + /* + * The container-page must be marked Reserved + */ + if (!PageReserved(page) || spage & order->mask) + goto non_small; + +#ifdef PEDANTIC + if (USED_MAP(page) & ~order->all_used) + PAGE_BUG(page); +#endif + + spage = spage >> order->shift; + spage &= order->block_mask; + + /* + * the following must be atomic wrt get_page + */ + save_flags_cli(flags); + + if (USED_MAP(page) == order->all_used) + add_page_to_queue(page, &order->queue); + + if (!TEST_AND_CLEAR_USED(page, spage)) + goto already_free; + + if (USED_MAP(page) == 0) + goto free_page; + restore_flags(flags); - free_page (spage); - } else - *cfsp = *ofsp; + } + return; + +free_page: + /* + * unlink the page from the small page queue and free it + */ + remove_page_from_queue(page); restore_flags(flags); + ClearPageReserved(page); + __free_page(page); return; non_small: - printk ("Trying to free non-small page from %p\n", __builtin_return_address(0)); - return; -free: - printk ("Trying to free free small page from %p\n", __builtin_return_address(0)); -none: + printk("Trying to free non-small page from %p\n", __builtin_return_address(0)); return; +already_free: + printk("Trying to free free small page from %p\n", __builtin_return_address(0)); } + +unsigned long get_page_2k(int priority) +{ + return __get_small_page(priority, orders+0); +} + +void free_page_2k(unsigned long spage) +{ + __free_small_page(spage, orders+0); +} + +#if PAGE_SIZE > 8192 +unsigned long get_page_8k(int priority) +{ + return __get_small_page(priority, orders+1); +} + +void free_page_8k(unsigned long spage) +{ + __free_small_page(spage, orders+1); +} +#endif diff --git a/arch/arm/vmlinux-armo.lds.in b/arch/arm/vmlinux-armo.lds.in index 446f49924..5fbafe77a 100644 --- a/arch/arm/vmlinux-armo.lds.in +++ b/arch/arm/vmlinux-armo.lds.in @@ -7,7 +7,8 @@ ENTRY(stext) SECTIONS { . = TEXTADDR; - __init_begin = .; + + __init_begin = .; /* Init code and data */ .text.init : { *(.text.init) } __proc_info_begin = .; .proc.info : { *(.proc.info) } @@ -27,43 +28,44 @@ SECTIONS *(.init.task) } - _text = .; /* Text and read-only data */ + _text = .; /* Text and read-only data */ .text : { *(.text) *(.fixup) *(.gnu.warning) } + .text.lock : { *(.text.lock) } /* out-of-line lock text */ .rodata : { *(.rodata) } .kstrtab : { *(.kstrtab) } - . = ALIGN(16); /* Exception table */ + . = ALIGN(16); /* Exception table */ __start___ex_table = .; __ex_table : { *(__ex_table) } __stop___ex_table = .; - __start___ksymtab = .; /* Kernel symbol table */ + __start___ksymtab = .; /* Kernel symbol table */ __ksymtab : { *(__ksymtab) } __stop___ksymtab = .; - .got : { *(.got) } /* Global offset table */ + .got : { *(.got) } /* Global offset table */ - _etext = .; /* End of text section */ + _etext = .; /* End of text section */ - .data : { /* Data */ + .data : { /* Data */ *(.data) CONSTRUCTORS } - _edata = .; /* End of data section */ + _edata = .; /* End of data section */ - __bss_start = .; /* BSS */ + __bss_start = .; /* BSS */ .bss : { *(.bss) } _end = . ; - /* Stabs debugging sections. */ + /* Stabs debugging sections. */ .stab 0 : { *(.stab) } .stabstr 0 : { *(.stabstr) } .stab.excl 0 : { *(.stab.excl) } |