diff options
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/config.in | 3 | ||||
-rw-r--r-- | arch/sparc/kernel/irq.c | 23 | ||||
-rw-r--r-- | arch/sparc/kernel/rtrap.S | 6 | ||||
-rw-r--r-- | arch/sparc/kernel/sparc_ksyms.c | 2 | ||||
-rw-r--r-- | arch/sparc/mm/init.c | 135 | ||||
-rw-r--r-- | arch/sparc/mm/srmmu.c | 29 |
6 files changed, 140 insertions, 58 deletions
diff --git a/arch/sparc/config.in b/arch/sparc/config.in index e79dfd803..570f86325 100644 --- a/arch/sparc/config.in +++ b/arch/sparc/config.in @@ -1,10 +1,11 @@ -# $Id: config.in,v 1.98 2000/07/06 01:41:29 davem Exp $ +# $Id: config.in,v 1.99 2000/08/01 04:53:58 anton Exp $ # For a description of the syntax of this configuration file, # see Documentation/kbuild/config-language.txt. # mainmenu_name "Linux/SPARC Kernel Configuration" define_bool CONFIG_UID16 y +define_bool CONFIG_HIGHMEM y mainmenu_option next_comment comment 'Code maturity level options' diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c index 65e95afdb..a6fd32fa4 100644 --- a/arch/sparc/kernel/irq.c +++ b/arch/sparc/kernel/irq.c @@ -195,16 +195,9 @@ void free_irq(unsigned int irq, void *dev_id) restore_flags(flags); } -#ifndef CONFIG_SMP -unsigned int __local_bh_count; -unsigned int __local_irq_count; - -#else +#ifdef CONFIG_SMP /* SMP interrupt locking on Sparc. */ -unsigned int __local_bh_count[NR_CPUS]; -unsigned int __local_irq_count[NR_CPUS]; - /* Who has global_irq_lock. */ unsigned char global_irq_holder = NO_PROC_ID; @@ -229,14 +222,14 @@ static void show(char * str) printk("irq: %d [ ", atomic_read(&global_irq_count)); for (i = 0; i < NR_CPUS; i++) { - printk("%d ", __local_irq_count[i]); + printk("%d ", local_irq_count(i)); } printk("]\n"); printk("bh: %d [ ", (spin_is_locked(&global_bh_lock) ? 1 : 0)); for (i = 0; i < NR_CPUS; i++) { - printk("%d ", __local_bh_count[cpu]); + printk("%d ", local_bh_count(cpu)); } printk("]\n"); @@ -263,7 +256,7 @@ static inline void wait_on_irq(int cpu) * already executing in one.. */ if (!atomic_read(&global_irq_count)) { - if (__local_bh_count[cpu] || !spin_is_locked(&global_bh_lock)) + if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock)) break; } @@ -282,7 +275,7 @@ static inline void wait_on_irq(int cpu) continue; if (spin_is_locked (&global_irq_lock)) continue; - if (!__local_bh_count[cpu] && spin_is_locked(&global_bh_lock)) + if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock)) continue; if (spin_trylock(&global_irq_lock)) break; @@ -358,7 +351,7 @@ void __global_cli(void) if ((flags & PSR_PIL) != PSR_PIL) { int cpu = smp_processor_id(); __cli(); - if (!__local_irq_count[cpu]) + if (!local_irq_count(cpu)) get_irqlock(cpu); } } @@ -367,7 +360,7 @@ void __global_sti(void) { int cpu = smp_processor_id(); - if (!__local_irq_count[cpu]) + if (!local_irq_count(cpu)) release_irqlock(cpu); __sti(); } @@ -394,7 +387,7 @@ unsigned long __global_save_flags(void) retval = 2 + local_enabled; /* check for global flags if we're not in an interrupt */ - if (!__local_irq_count[smp_processor_id()]) { + if (!local_irq_count(smp_processor_id())) { if (local_enabled) retval = 1; if (global_irq_holder == (unsigned char) smp_processor_id()) diff --git a/arch/sparc/kernel/rtrap.S b/arch/sparc/kernel/rtrap.S index dcfc2dc78..c44040bde 100644 --- a/arch/sparc/kernel/rtrap.S +++ b/arch/sparc/kernel/rtrap.S @@ -50,10 +50,10 @@ rtrap_7win_patch5: and %g1, 0x7f, %g1 ret_trap_entry: ld [%curptr + AOFF_task_processor], %l3 sll %l3, 5, %l3 - sethi %hi(C_LABEL(softirq_state)), %l4 + sethi %hi(C_LABEL(irq_stat)), %l4 ! &softirq_active add %l4, %l3, %l4 - ld [%l4 + %lo(C_LABEL(softirq_state))], %g5 - ld [%l4 + %lo(C_LABEL(softirq_state) + 4)], %g4 + ld [%l4 + %lo(C_LABEL(irq_stat))], %g5 ! softirq_active + ld [%l4 + %lo(C_LABEL(irq_stat) + 4)], %g4 ! softirq_mask andcc %g4, %g5, %g0 be C_LABEL(ret_trap_lockless_ipi) nop diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c index a51995713..37c276c7f 100644 --- a/arch/sparc/kernel/sparc_ksyms.c +++ b/arch/sparc/kernel/sparc_ksyms.c @@ -146,8 +146,6 @@ EXPORT_SYMBOL(global_bh_lock); EXPORT_SYMBOL(global_irq_count); EXPORT_SYMBOL(synchronize_irq); #endif -EXPORT_SYMBOL(__local_irq_count); -EXPORT_SYMBOL(__local_bh_count); EXPORT_SYMBOL(udelay); EXPORT_SYMBOL(mstk48t02_regs); diff --git a/arch/sparc/mm/init.c b/arch/sparc/mm/init.c index 5ac1845a3..ced31b91f 100644 --- a/arch/sparc/mm/init.c +++ b/arch/sparc/mm/init.c @@ -1,4 +1,4 @@ -/* $Id: init.c,v 1.88 2000/07/10 20:56:53 anton Exp $ +/* $Id: init.c,v 1.89 2000/08/01 04:53:58 anton Exp $ * linux/arch/sparc/mm/init.c * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) @@ -51,7 +51,9 @@ extern char __init_begin, __init_end, _start, _end, etext , edata; extern unsigned int sparc_ramdisk_image; extern unsigned int sparc_ramdisk_size; -unsigned long totalram_pages = 0; +unsigned long highstart_pfn, highend_pfn; +unsigned long totalram_pages; +static unsigned long totalhigh_pages; /* * BAD_PAGE is the page that is used for page faults when linux @@ -79,6 +81,21 @@ pte_t __bad_page(void) PAGE_SHARED)); } +pte_t *kmap_pte; +pgprot_t kmap_prot; + +#define kmap_get_fixed_pte(vaddr) \ + pte_offset(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)) + +void __init kmap_init(void) +{ + unsigned long pteval; + + /* cache the first kmap pte */ + kmap_pte = kmap_get_fixed_pte(FIX_KMAP_BEGIN); + kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE); +} + void show_mem(void) { printk("Mem-info:\n"); @@ -119,22 +136,15 @@ void __init sparc_context_init(int numctx) #define DEBUG_BOOTMEM extern unsigned long cmdline_memory_size; +extern unsigned long last_valid_pfn; -unsigned long __init bootmem_init(void) +void __init bootmem_init(void) { - unsigned long bootmap_size, start_pfn, end_pfn; + unsigned long bootmap_size, start_pfn, max_pfn; unsigned long end_of_phys_memory = 0UL; unsigned long bootmap_pfn; int i; - /* - * XXX Limit maximum memory until we implement highmem for sparc. - * The nocache region has taken up some room but I'll rearrange - * the virtual address regions soon - Anton - */ - if (!cmdline_memory_size || cmdline_memory_size > 0x0c000000) - cmdline_memory_size = 0x0c000000; - /* XXX It is a bit ambiguous here, whether we should * XXX treat the user specified mem=xxx as total wanted * XXX physical memory, or as a limit to the upper @@ -181,7 +191,16 @@ unsigned long __init bootmem_init(void) bootmap_pfn = start_pfn; - end_pfn = end_of_phys_memory >> PAGE_SHIFT; + max_pfn = end_of_phys_memory >> PAGE_SHIFT; + + max_low_pfn = max_pfn; + highstart_pfn = highend_pfn = max_pfn; + + if (max_low_pfn > (SRMMU_MAXMEM >> PAGE_SHIFT)) { + highstart_pfn = max_low_pfn = (SRMMU_MAXMEM >> PAGE_SHIFT); + printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", + (highend_pfn - highstart_pfn) >> (20-PAGE_SHIFT)); + } #ifdef CONFIG_BLK_DEV_INITRD /* Now have to check initial ramdisk, so that bootmap does not overwrite it */ @@ -205,22 +224,41 @@ unsigned long __init bootmem_init(void) #endif /* Initialize the boot-time allocator. */ #ifdef DEBUG_BOOTMEM - prom_printf("init_bootmem(spfn[%lx],bpfn[%lx],epfn[%lx])\n", - start_pfn, bootmap_pfn, end_pfn); + prom_printf("init_bootmem(spfn[%lx],bpfn[%lx],mlpfn[%lx])\n", + start_pfn, bootmap_pfn, max_low_pfn); #endif - bootmap_size = init_bootmem(bootmap_pfn, end_pfn); + bootmap_size = init_bootmem(bootmap_pfn, max_low_pfn); /* Now register the available physical memory with the * allocator. */ for (i = 0; sp_banks[i].num_bytes != 0; i++) { + unsigned long curr_pfn, last_pfn, size; + + curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; + if (curr_pfn >= max_low_pfn) + break; + + last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; + if (last_pfn > max_low_pfn) + last_pfn = max_low_pfn; + + /* + * .. finally, did all the rounding and playing + * around just make the area go away? + */ + if (last_pfn <= curr_pfn) + continue; + + size = (last_pfn - curr_pfn) << PAGE_SHIFT; + #ifdef DEBUG_BOOTMEM prom_printf("free_bootmem: base[%lx] size[%lx]\n", sp_banks[i].base_addr, - sp_banks[i].num_bytes); + size); #endif free_bootmem(sp_banks[i].base_addr, - sp_banks[i].num_bytes); + size); } /* Reserve the kernel text/data/bss, the bootmem bitmap and initrd. */ @@ -245,10 +283,7 @@ unsigned long __init bootmem_init(void) reserve_bootmem(phys_base, (start_pfn << PAGE_SHIFT) - phys_base); reserve_bootmem((bootmap_pfn << PAGE_SHIFT), bootmap_size); -#ifdef DEBUG_BOOTMEM - prom_printf("init_bootmem: return end_pfn[%lx]\n", end_pfn); -#endif - return end_pfn; + last_valid_pfn = max_pfn; } /* @@ -391,6 +426,25 @@ void __init free_unused_mem_map(void) #endif } +void map_high_region(unsigned long start_pfn, unsigned long end_pfn) +{ + unsigned long tmp; + +#ifdef DEBUG_HIGHMEM + printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn); +#endif + + for (tmp = start_pfn; tmp < end_pfn; tmp++) { + struct page *page = mem_map + tmp; + + ClearPageReserved(page); + set_bit(PG_highmem, &page->flags); + atomic_set(&page->count, 1); + __free_page(page); + totalhigh_pages++; + } +} + void __init mem_init(void) { int codepages = 0; @@ -401,6 +455,10 @@ void __init mem_init(void) unsigned long addr, last; #endif + highmem_start_page = mem_map + highstart_pfn; + /* cache the highmem_mapnr */ + highmem_mapnr = highstart_pfn; + /* Saves us work later. */ memset((void *)&empty_zero_page, 0, PAGE_SIZE); @@ -419,7 +477,7 @@ void __init mem_init(void) taint_real_pages(); max_mapnr = last_valid_pfn; - high_memory = __va(last_valid_pfn << PAGE_SHIFT); + high_memory = __va(max_low_pfn << PAGE_SHIFT); #ifdef DEBUG_BOOTMEM prom_printf("mem_init: Calling free_all_bootmem().\n"); @@ -430,6 +488,21 @@ void __init mem_init(void) free_unused_mem_map(); #endif + for (i = 0; sp_banks[i].num_bytes != 0; i++) { + unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; + unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; + + if (end_pfn <= highstart_pfn) + continue; + + if (start_pfn < highstart_pfn) + start_pfn = highstart_pfn; + + map_high_region(start_pfn, end_pfn); + } + + totalram_pages += totalhigh_pages; + codepages = (((unsigned long) &etext) - ((unsigned long)&_start)); codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; datapages = (((unsigned long) &edata) - ((unsigned long)&etext)); @@ -437,11 +510,12 @@ void __init mem_init(void) initpages = (((unsigned long) &__init_end) - ((unsigned long) &__init_begin)); initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; - printk("Memory: %dk available (%dk kernel code, %dk data, %dk init) [%08lx,%08lx]\n", + printk("Memory: %dk available (%dk kernel code, %dk data, %dk init, %ldk highmem) [%08lx,%08lx]\n", nr_free_pages() << (PAGE_SHIFT-10), codepages << (PAGE_SHIFT-10), datapages << (PAGE_SHIFT-10), initpages << (PAGE_SHIFT-10), + totalhigh_pages << (PAGE_SHIFT-10), (unsigned long)PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT)); /* NOTE NOTE NOTE NOTE @@ -501,9 +575,16 @@ void si_meminfo(struct sysinfo *val) val->sharedram = 0; val->freeram = nr_free_pages(); val->bufferram = atomic_read(&buffermem_pages); - - val->totalhigh = 0; - val->freehigh = 0; + val->totalhigh = totalhigh_pages; + val->freehigh = nr_free_highpages(); val->mem_unit = PAGE_SIZE; } + +void flush_page_to_ram(struct page *page) +{ + unsigned long vaddr; + vaddr = kmap(page); + __flush_page_to_ram(page_address(page)); + kunmap(page); +} diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 612436b47..c7f839f28 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -1,4 +1,4 @@ -/* $Id: srmmu.c,v 1.218 2000/07/10 23:22:32 anton Exp $ +/* $Id: srmmu.c,v 1.219 2000/08/01 04:53:58 anton Exp $ * srmmu.c: SRMMU specific routines for memory management. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) @@ -87,7 +87,7 @@ char *srmmu_name; ctxd_t *srmmu_ctx_table_phys; ctxd_t *srmmu_context_table; -int viking_mxcc_present = 0; +int viking_mxcc_present; spinlock_t srmmu_context_spinlock = SPIN_LOCK_UNLOCKED; int is_hypersparc; @@ -117,10 +117,6 @@ static inline int srmmu_device_memory(unsigned long x) int srmmu_cache_pagetables; /* XXX Make this dynamic based on ram size - Anton */ -#define SRMMU_NOCACHE_NPAGES 256 -#define SRMMU_NOCACHE_VADDR 0xfc000000 -#define SRMMU_NOCACHE_SIZE (SRMMU_NOCACHE_NPAGES*PAGE_SIZE) -#define SRMMU_NOCACHE_END (SRMMU_NOCACHE_VADDR + SRMMU_NOCACHE_SIZE) #define SRMMU_NOCACHE_BITMAP_SIZE (SRMMU_NOCACHE_NPAGES * 16) #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4) @@ -1190,9 +1186,11 @@ void __init srmmu_paging_init(void) { int i, cpunode; char node_str[128]; - unsigned long end_pfn; + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; - sparc_iomap.start = 0xfd000000; /* 16MB of IOSPACE on all sun4m's. */ + sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */ if (sparc_cpu_model == sun4d) num_contexts = 65536; /* We know it is Viking */ @@ -1215,7 +1213,7 @@ void __init srmmu_paging_init(void) prom_halt(); } - last_valid_pfn = end_pfn = bootmem_init(); + bootmem_init(); srmmu_nocache_init(); srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE)); @@ -1238,6 +1236,14 @@ void __init srmmu_paging_init(void) srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END); #endif + srmmu_allocate_ptable_skeleton(FIX_KMAP_BEGIN, FIX_KMAP_END); + srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_BASE_END); + + pgd = pgd_offset_k(PKMAP_BASE); + pmd = pmd_offset(pgd, PKMAP_BASE); + pte = pte_offset(pmd, PKMAP_BASE); + pkmap_page_table = pte; + flush_cache_all(); flush_tlb_all(); @@ -1253,10 +1259,13 @@ void __init srmmu_paging_init(void) sparc_context_init(num_contexts); + kmap_init(); + { unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0}; - zones_size[ZONE_DMA] = end_pfn; + zones_size[ZONE_DMA] = max_low_pfn; + zones_size[ZONE_HIGHMEM] = highend_pfn - max_low_pfn; free_area_init(zones_size); } } |