summaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1998-05-07 02:55:41 +0000
committerRalf Baechle <ralf@linux-mips.org>1998-05-07 02:55:41 +0000
commitdcec8a13bf565e47942a1751a9cec21bec5648fe (patch)
tree548b69625b18cc2e88c3e68d0923be546c9ebb03 /arch/sparc
parent2e0f55e79c49509b7ff70ff1a10e1e9e90a3dfd4 (diff)
o Merge with Linux 2.1.99.
o Fix ancient bug in the ELF loader making ldd crash. o Fix ancient bug in the keyboard code for SGI, SNI and Jazz.
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/Makefile29
-rw-r--r--arch/sparc/ap1000/apmmu.c364
-rw-r--r--arch/sparc/boot/Makefile8
-rw-r--r--arch/sparc/boot/btfixupprep.c345
-rw-r--r--arch/sparc/config.in24
-rw-r--r--arch/sparc/defconfig59
-rw-r--r--arch/sparc/kernel/Makefile62
-rw-r--r--arch/sparc/kernel/auxio.c6
-rw-r--r--arch/sparc/kernel/cpu.c13
-rw-r--r--arch/sparc/kernel/devices.c16
-rw-r--r--arch/sparc/kernel/entry.S204
-rw-r--r--arch/sparc/kernel/etrap.S27
-rw-r--r--arch/sparc/kernel/head.S124
-rw-r--r--arch/sparc/kernel/init_task.c6
-rw-r--r--arch/sparc/kernel/irq.c46
-rw-r--r--arch/sparc/kernel/process.c98
-rw-r--r--arch/sparc/kernel/rtrap.S5
-rw-r--r--arch/sparc/kernel/sclow.S3
-rw-r--r--arch/sparc/kernel/setup.c144
-rw-r--r--arch/sparc/kernel/signal.c127
-rw-r--r--arch/sparc/kernel/smp.c472
-rw-r--r--arch/sparc/kernel/sparc-stub.c15
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c32
-rw-r--r--arch/sparc/kernel/sun4c_irq.c58
-rw-r--r--arch/sparc/kernel/sun4d_irq.c252
-rw-r--r--arch/sparc/kernel/sun4d_smp.c576
-rw-r--r--arch/sparc/kernel/sun4m_irq.c26
-rw-r--r--arch/sparc/kernel/sun4m_smp.c545
-rw-r--r--arch/sparc/kernel/sunos_ioctl.c4
-rw-r--r--arch/sparc/kernel/sys_sparc.c26
-rw-r--r--arch/sparc/kernel/sys_sunos.c147
-rw-r--r--arch/sparc/kernel/systbls.S12
-rw-r--r--arch/sparc/kernel/tadpole.c3
-rw-r--r--arch/sparc/kernel/time.c36
-rw-r--r--arch/sparc/kernel/trampoline.S91
-rw-r--r--arch/sparc/kernel/traps.c60
-rw-r--r--arch/sparc/kernel/wof.S17
-rw-r--r--arch/sparc/kernel/wuf.S19
-rw-r--r--arch/sparc/lib/Makefile32
-rw-r--r--arch/sparc/lib/atomic.S14
-rw-r--r--arch/sparc/lib/blockops.S7
-rw-r--r--arch/sparc/math-emu/.cvsignore2
-rw-r--r--arch/sparc/math-emu/Makefile37
-rw-r--r--arch/sparc/math-emu/ashldi3.S36
-rw-r--r--arch/sparc/math-emu/fabss.c6
-rw-r--r--arch/sparc/math-emu/fcmpd.c18
-rw-r--r--arch/sparc/math-emu/fcmped.c18
-rw-r--r--arch/sparc/math-emu/fcmpeq.c18
-rw-r--r--arch/sparc/math-emu/fcmpes.c18
-rw-r--r--arch/sparc/math-emu/fcmpq.c18
-rw-r--r--arch/sparc/math-emu/fcmps.c18
-rw-r--r--arch/sparc/math-emu/fdmulq.c16
-rw-r--r--arch/sparc/math-emu/fdtoq.c13
-rw-r--r--arch/sparc/math-emu/fdtos.c13
-rw-r--r--arch/sparc/math-emu/fmovs.c5
-rw-r--r--arch/sparc/math-emu/fnegs.c7
-rw-r--r--arch/sparc/math-emu/fqtod.c13
-rw-r--r--arch/sparc/math-emu/fqtos.c13
-rw-r--r--arch/sparc/math-emu/fsmuld.c16
-rw-r--r--arch/sparc/math-emu/fstod.c13
-rw-r--r--arch/sparc/math-emu/fstoq.c13
-rw-r--r--arch/sparc/math-emu/math.c416
-rw-r--r--arch/sparc/math-emu/sfp-machine.h363
-rw-r--r--arch/sparc/mm/Makefile16
-rw-r--r--arch/sparc/mm/btfixup.c334
-rw-r--r--arch/sparc/mm/fault.c4
-rw-r--r--arch/sparc/mm/hypersparc.S9
-rw-r--r--arch/sparc/mm/init.c114
-rw-r--r--arch/sparc/mm/io-unit.c152
-rw-r--r--arch/sparc/mm/iommu.c45
-rw-r--r--arch/sparc/mm/loadmmu.c136
-rw-r--r--arch/sparc/mm/nosrmmu.c50
-rw-r--r--arch/sparc/mm/nosun4c.c77
-rw-r--r--arch/sparc/mm/srmmu.c1623
-rw-r--r--arch/sparc/mm/sun4c.c629
-rw-r--r--arch/sparc/mm/turbosparc.S4
-rw-r--r--arch/sparc/mm/viking.S78
-rw-r--r--arch/sparc/prom/Makefile8
-rw-r--r--arch/sparc/prom/bootstr.c8
-rw-r--r--arch/sparc/prom/console.c26
-rw-r--r--arch/sparc/prom/devmap.c13
-rw-r--r--arch/sparc/prom/devops.c17
-rw-r--r--arch/sparc/prom/init.c16
-rw-r--r--arch/sparc/prom/memory.c22
-rw-r--r--arch/sparc/prom/misc.c21
-rw-r--r--arch/sparc/prom/mp.c21
-rw-r--r--arch/sparc/prom/ranges.c4
-rw-r--r--arch/sparc/prom/segment.c9
-rw-r--r--arch/sparc/prom/sun4prom.c161
-rw-r--r--arch/sparc/prom/tree.c30
-rw-r--r--arch/sparc/vmlinux.lds1
91 files changed, 6270 insertions, 2602 deletions
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index 7a8d46a07..a4870e117 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -1,4 +1,4 @@
-# $Id: Makefile,v 1.29 1997/07/11 11:05:23 jj Exp $
+# $Id: Makefile,v 1.34 1998/04/06 16:09:34 jj Exp $
# sparc/Makefile
#
# Makefile for the architecture dependent flags and dependencies on the
@@ -23,14 +23,18 @@ LINKFLAGS = -T arch/sparc/vmlinux.lds
HEAD := arch/sparc/kernel/head.o arch/sparc/kernel/init_task.o
-SUBDIRS := $(SUBDIRS) arch/sparc/kernel arch/sparc/lib arch/sparc/mm \
- arch/sparc/prom
+# Note arch/sparc/mm has to be the last subdir
+SUBDIRS := $(SUBDIRS) arch/sparc/kernel arch/sparc/lib arch/sparc/prom \
+ arch/sparc/mm
CORE_FILES := arch/sparc/kernel/kernel.o arch/sparc/mm/mm.o $(CORE_FILES)
LIBS := $(TOPDIR)/lib/lib.a $(LIBS) $(TOPDIR)/arch/sparc/prom/promlib.a \
$(TOPDIR)/arch/sparc/lib/lib.a
+SUBDIRS += arch/sparc/math-emu
+CORE_FILES += arch/sparc/math-emu/math-emu.o
+
ifdef CONFIG_AP1000
SUBDIRS := $(SUBDIRS) arch/sparc/ap1000 mpp
CORE_FILES := $(TOPDIR)/arch/sparc/ap1000/ap1000lib.o \
@@ -40,11 +44,30 @@ CFLAGS := $(CFLAGS) -D__MPP__=1
endif
archclean:
+ -$(MAKE) -C arch/sparc/boot archclean
+ -$(MAKE) -C arch/sparc/math-emu cleansymlinks
archdep:
+ -$(MAKE) -C arch/sparc/math-emu symlinks
check_asm:
$(MAKE) -C arch/sparc/kernel check_asm
tftpboot.img:
$(MAKE) -C arch/sparc/boot tftpboot.img
+
+vmlinux.o: $(CONFIGURATION) init/main.o init/version.o linuxsubdirs
+ $(LD) -r $(VMLINUX.OBJS) -o vmlinux.o
+
+arch/sparc/boot/btfix.s: arch/sparc/boot/btfixupprep vmlinux.o
+ $(OBJDUMP) -x vmlinux.o | arch/sparc/boot/btfixupprep > arch/sparc/boot/btfix.s
+
+arch/sparc/boot/btfix.o: arch/sparc/boot/btfix.s
+ $(CC) -c -o arch/sparc/boot/btfix.o arch/sparc/boot/btfix.s
+
+arch/sparc/boot/btfixupprep: arch/sparc/boot/btfixupprep.c
+ $(MAKE) -C arch/sparc/boot btfixupprep
+
+vmlinux: arch/sparc/boot/btfix.o
+ $(LD) $(LINKFLAGS) vmlinux.o arch/sparc/boot/btfix.o -o vmlinux
+ $(NM) vmlinux | grep -v '\(compiled\)\|\(\.o$$\)\|\( [aU] \)\|\(\.\.ng$$\)\|\(LASH[RL]DI\)' | sort > System.map
diff --git a/arch/sparc/ap1000/apmmu.c b/arch/sparc/ap1000/apmmu.c
index 0bafe3fc9..e07b4f4b1 100644
--- a/arch/sparc/ap1000/apmmu.c
+++ b/arch/sparc/ap1000/apmmu.c
@@ -36,16 +36,10 @@
#include <asm/viking.h>
-static unsigned long (*mmu_getpage)(void);
-static void (*ctxd_set)(ctxd_t *ctxp, pgd_t *pgdp);
-static void (*pmd_set)(pmd_t *pmdp, pte_t *ptep);
-
-static void (*flush_page_for_dma)(unsigned long page);
-static void (*flush_cache_page_to_uncache)(unsigned long page);
-static void (*flush_tlb_page_for_cbit)(unsigned long page);
-
extern void mc_tlb_flush_all(void);
+static void poke_viking(void);
+static void viking_flush_tlb_page_for_cbit)(unsigned long page);
static struct apmmu_stats {
int invall;
@@ -103,11 +97,6 @@ static inline unsigned long apmmu_swap(unsigned long *addr, unsigned long value)
static unsigned int apmmu_pmd_align(unsigned int addr) { return APMMU_PMD_ALIGN(addr); }
static unsigned int apmmu_pgdir_align(unsigned int addr) { return APMMU_PGDIR_ALIGN(addr); }
-static unsigned long apmmu_vmalloc_start(void)
-{
- return APMMU_VMALLOC_START;
-}
-
static inline int apmmu_device_memory(unsigned long x)
{
return ((x & 0xF0000000) != 0);
@@ -152,13 +141,6 @@ static int apmmu_pgd_present(pgd_t pgd)
static void apmmu_pgd_clear(pgd_t * pgdp) { set_pte((pte_t *)pgdp, __pte(0)); }
-static int apmmu_pte_write(pte_t pte) { return pte_val(pte) & APMMU_WRITE; }
-static int apmmu_pte_dirty(pte_t pte) { return pte_val(pte) & APMMU_DIRTY; }
-static int apmmu_pte_young(pte_t pte) { return pte_val(pte) & APMMU_REF; }
-
-static pte_t apmmu_pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~APMMU_WRITE);}
-static pte_t apmmu_pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~APMMU_DIRTY);}
-static pte_t apmmu_pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~APMMU_REF);}
static pte_t apmmu_pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | APMMU_WRITE);}
static pte_t apmmu_pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | APMMU_DIRTY);}
static pte_t apmmu_pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | APMMU_REF);}
@@ -221,7 +203,7 @@ static void apmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
{
if(tsk->mm->context != NO_CONTEXT) {
flush_cache_mm(current->mm);
- ctxd_set(&apmmu_context_table[tsk->mm->context], pgdp);
+ apmmu_ctxd_set(&apmmu_context_table[tsk->mm->context], pgdp);
flush_tlb_mm(current->mm);
}
}
@@ -311,8 +293,6 @@ static inline unsigned long apmmu_hwprobe(unsigned long vaddr)
return retval;
}
-
-
static inline void apmmu_uncache_page(unsigned long addr)
{
pgd_t *pgdp = apmmu_pgd_offset(init_task.mm, addr);
@@ -330,9 +310,8 @@ static inline void apmmu_uncache_page(unsigned long addr)
}
}
- flush_cache_page_to_uncache(addr);
set_pte(ptep, __pte((pte_val(*ptep) & ~APMMU_CACHE)));
- flush_tlb_page_for_cbit(addr);
+ viking_flush_tlb_page_for_cbit(addr);
}
static inline void apmmu_recache_page(unsigned long addr)
@@ -352,10 +331,10 @@ static inline void apmmu_recache_page(unsigned long addr)
}
}
set_pte(ptep, __pte((pte_val(*ptep) | APMMU_CACHE)));
- flush_tlb_page_for_cbit(addr);
+ viking_flush_tlb_page_for_cbit(addr);
}
-static unsigned long apmmu_getpage(void)
+static inline unsigned long apmmu_getpage(void)
{
unsigned long page = get_free_page(GFP_KERNEL);
@@ -368,13 +347,44 @@ static inline void apmmu_putpage(unsigned long page)
}
/* The easy versions. */
-#define NEW_PGD() (pgd_t *) mmu_getpage()
-#define NEW_PMD() (pmd_t *) mmu_getpage()
-#define NEW_PTE() (pte_t *) mmu_getpage()
+#define NEW_PGD() (pgd_t *) apmmu_getpage()
+#define NEW_PMD() (pmd_t *) apmmu_getpage()
+#define NEW_PTE() (pte_t *) apmmu_getpage()
#define FREE_PGD(chunk) apmmu_putpage((unsigned long)(chunk))
#define FREE_PMD(chunk) apmmu_putpage((unsigned long)(chunk))
#define FREE_PTE(chunk) apmmu_putpage((unsigned long)(chunk))
+static pte_t *apmmu_get_pte_fast(void)
+{
+ return (pte_t *)0;
+}
+
+static pmd_t *apmmu_get_pmd_fast(void)
+{
+ return (pmd_t *)0;
+}
+
+static pgd_t *apmmu_get_pgd_fast(void)
+{
+ return (pgd_t *)0;
+}
+
+static void apmmu_free_pte_slow(pte_t *pte)
+{
+/* TBD */
+}
+
+static void apmmu_free_pmd_slow(pmd_t *pmd)
+{
+/* TBD */
+}
+
+static void apmmu_free_pgd_slow(pgd_t *pgd)
+{
+/* TBD */
+}
+
+
/*
* Allocate and free page tables. The xxx_kernel() versions are
* used to allocate a kernel page table - this turns on ASN bits
@@ -392,17 +402,17 @@ static pte_t *apmmu_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
pte_t *page = NEW_PTE();
if(apmmu_pmd_none(*pmd)) {
if(page) {
- pmd_set(pmd, page);
+ apmmu_pmd_set(pmd, page);
return page + address;
}
- pmd_set(pmd, BAD_PAGETABLE);
+ apmmu_pmd_set(pmd, BAD_PAGETABLE);
return NULL;
}
FREE_PTE(page);
}
if(apmmu_pmd_bad(*pmd)) {
printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
- pmd_set(pmd, BAD_PAGETABLE);
+ apmmu_pmd_set(pmd, BAD_PAGETABLE);
return NULL;
}
return (pte_t *) apmmu_pmd_page(*pmd) + address;
@@ -449,17 +459,17 @@ static pte_t *apmmu_pte_alloc(pmd_t * pmd, unsigned long address)
pte_t *page = NEW_PTE();
if(apmmu_pmd_none(*pmd)) {
if(page) {
- pmd_set(pmd, page);
+ apmmu_pmd_set(pmd, page);
return page + address;
}
- pmd_set(pmd, BAD_PAGETABLE);
+ apmmu_pmd_set(pmd, BAD_PAGETABLE);
return NULL;
}
FREE_PTE(page);
}
if(apmmu_pmd_bad(*pmd)) {
printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
- pmd_set(pmd, BAD_PAGETABLE);
+ apmmu_pmd_set(pmd, BAD_PAGETABLE);
return NULL;
}
return ((pte_t *) apmmu_pmd_page(*pmd)) + address;
@@ -525,7 +535,7 @@ static inline void alloc_context(struct task_struct *tsk)
struct mm_struct *mm = tsk->mm;
struct ctx_list *ctxp;
- if (tsk->taskid >= MPP_TASK_BASE) {
+ if (tsk->taskid >= MPP_TASK_BASE) {
mm->context = MPP_CONTEXT_BASE + (tsk->taskid - MPP_TASK_BASE);
return;
}
@@ -570,7 +580,7 @@ static void apmmu_switch_to_context(struct task_struct *tsk)
if(tsk->mm->context == NO_CONTEXT) {
alloc_context(tsk);
flush_cache_mm(current->mm);
- ctxd_set(&apmmu_context_table[tsk->mm->context], tsk->mm->pgd);
+ apmmu_ctxd_set(&apmmu_context_table[tsk->mm->context], tsk->mm->pgd);
flush_tlb_mm(current->mm);
}
apmmu_set_context(tsk->mm->context);
@@ -590,29 +600,11 @@ struct task_struct *apmmu_alloc_task_struct(void)
return (struct task_struct *) kmalloc(sizeof(struct task_struct), GFP_KERNEL);
}
-static unsigned long apmmu_alloc_kernel_stack(struct task_struct *tsk)
-{
- unsigned long kstk = __get_free_pages(GFP_KERNEL, 1);
-
- if(!kstk)
- kstk = (unsigned long) vmalloc(PAGE_SIZE << 1);
-
- return kstk;
-}
-
static void apmmu_free_task_struct(struct task_struct *tsk)
{
kfree(tsk);
}
-static void apmmu_free_kernel_stack(unsigned long stack)
-{
- if(stack < VMALLOC_START)
- free_pages(stack, 1);
- else
- vfree((char *)stack);
-}
-
static void apmmu_null_func(void)
{
}
@@ -925,22 +917,20 @@ extern unsigned long sparc_context_init(unsigned long, int);
extern int physmem_mapped_contig;
extern int linux_num_cpus;
-void (*poke_apmmu)(void);
-
__initfunc(unsigned long apmmu_paging_init(unsigned long start_mem, unsigned long end_mem))
{
int i;
physmem_mapped_contig = 1; /* for init.c:taint_real_pages() */
- num_contexts = AP_NUM_CONTEXTS;
+ num_contexts = AP_NUM_CONTEXTS;
mempool = PAGE_ALIGN(start_mem);
memset(swapper_pg_dir, 0, PAGE_SIZE);
apmmu_allocate_ptable_skeleton(KERNBASE, end_mem);
mempool = PAGE_ALIGN(mempool);
map_kernel();
- ap_setup_mappings();
+ ap_setup_mappings();
/* the MSC wants this aligned on a 16k boundary */
apmmu_context_table =
@@ -950,14 +940,14 @@ __initfunc(unsigned long apmmu_paging_init(unsigned long start_mem, unsigned lon
num_contexts*sizeof(ctxd_t));
apmmu_ctx_table_phys = (ctxd_t *) apmmu_v2p((unsigned long) apmmu_context_table);
for(i = 0; i < num_contexts; i++)
- ctxd_set(&apmmu_context_table[i], swapper_pg_dir);
+ apmmu_ctxd_set(&apmmu_context_table[i], swapper_pg_dir);
start_mem = PAGE_ALIGN(mempool);
flush_cache_all();
apmmu_set_ctable_ptr((unsigned long) apmmu_ctx_table_phys);
flush_tlb_all();
- poke_apmmu();
+ poke_viking();
/* on the AP we don't put the top few contexts into the free
context list as these are reserved for parallel tasks */
@@ -967,11 +957,10 @@ __initfunc(unsigned long apmmu_paging_init(unsigned long start_mem, unsigned lon
return PAGE_ALIGN(start_mem);
}
-static char apmmuinfo[512];
-
-static char *apmmu_mmu_info(void)
+static int apmmu_mmu_info(char *buf)
{
- sprintf(apmmuinfo, "MMU type\t: %s\n"
+ return sprintf(buf,
+ "MMU type\t: %s\n"
"invall\t\t: %d\n"
"invmm\t\t: %d\n"
"invrnge\t\t: %d\n"
@@ -984,35 +973,12 @@ static char *apmmu_mmu_info(void)
module_stats.invpg,
num_contexts
);
- return apmmuinfo;
}
static void apmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
}
-static void apmmu_exit_hook(void)
-{
- struct mm_struct *mm = current->mm;
-
- if(mm->context != NO_CONTEXT && mm->count == 1) {
- ctxd_set(&apmmu_context_table[mm->context], swapper_pg_dir);
- viking_flush_tlb_mm(mm);
- free_context(mm->context);
- mm->context = NO_CONTEXT;
- }
-}
-
-static void apmmu_flush_hook(void)
-{
- if(current->tss.flags & SPARC_FLAG_KTHREAD) {
- alloc_context(current);
- ctxd_set(&apmmu_context_table[current->mm->context], current->mm->pgd);
- viking_flush_tlb_mm(current->mm);
- apmmu_set_context(current->mm->context);
- }
-}
-
__initfunc(static void poke_viking(void))
{
unsigned long mreg = apmmu_get_mmureg();
@@ -1020,7 +986,7 @@ __initfunc(static void poke_viking(void))
mreg |= VIKING_SPENABLE;
mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
mreg &= ~VIKING_ACENABLE;
- mreg &= ~VIKING_SBENABLE;
+ mreg &= ~VIKING_SBENABLE;
mreg |= VIKING_TCENABLE;
apmmu_set_mmureg(mreg);
}
@@ -1029,24 +995,18 @@ __initfunc(static void init_viking(void))
{
apmmu_name = "TI Viking/AP1000";
- flush_cache_page_to_uncache = apmmu_null_func;
- flush_page_for_dma = apmmu_null_func;
-
- flush_cache_all = apmmu_null_func;
- flush_cache_mm = apmmu_null_func;
- flush_cache_page = apmmu_null_func;
- flush_cache_range = apmmu_null_func;
+ BTFIXUPSET_CALL(flush_cache_all, apmmu_null_func, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(flush_cache_mm, apmmu_null_func, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(flush_cache_page, apmmu_null_func, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(flush_cache_range, apmmu_null_func, BTFIXUPCALL_NOP);
- flush_tlb_all = viking_flush_tlb_all;
- flush_tlb_mm = viking_flush_tlb_mm;
- flush_tlb_page = viking_flush_tlb_page;
- flush_tlb_range = viking_flush_tlb_range;
+ BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM);
- flush_page_to_ram = apmmu_null_func;
- flush_sig_insns = apmmu_null_func;
- flush_tlb_page_for_cbit = viking_flush_tlb_page_for_cbit;
-
- poke_apmmu = poke_viking;
+ BTFIXUPSET_CALL(flush_page_to_ram, apmmu_null_func, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(flush_sig_insns, apmmu_null_func, BTFIXUPCALL_NOP);
}
@@ -1062,7 +1022,7 @@ extern unsigned long srmmu_fault;
iaddr = &(insn); \
daddr = &(dest); \
*iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \
- } while(0);
+ } while(0);
__initfunc(static void patch_window_trap_handlers(void))
{
@@ -1077,113 +1037,109 @@ __initfunc(static void patch_window_trap_handlers(void))
PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault);
}
-/* Load up routines and constants for sun4m mmu */
+/* Load up routines and constants for apmmu */
__initfunc(void ld_mmu_apmmu(void))
{
/* First the constants */
- pmd_shift = APMMU_PMD_SHIFT;
- pmd_size = APMMU_PMD_SIZE;
- pmd_mask = APMMU_PMD_MASK;
- pgdir_shift = APMMU_PGDIR_SHIFT;
- pgdir_size = APMMU_PGDIR_SIZE;
- pgdir_mask = APMMU_PGDIR_MASK;
-
- ptrs_per_pte = APMMU_PTRS_PER_PTE;
- ptrs_per_pmd = APMMU_PTRS_PER_PMD;
- ptrs_per_pgd = APMMU_PTRS_PER_PGD;
-
- page_none = APMMU_PAGE_NONE;
- page_shared = APMMU_PAGE_SHARED;
- page_copy = APMMU_PAGE_COPY;
- page_readonly = APMMU_PAGE_RDONLY;
- page_kernel = APMMU_PAGE_KERNEL;
+ BTFIXUPSET_SIMM13(pmd_shift, APMMU_PMD_SHIFT);
+ BTFIXUPSET_SETHI(pmd_size, APMMU_PMD_SIZE);
+ BTFIXUPSET_SETHI(pmd_mask, APMMU_PMD_MASK);
+ BTFIXUPSET_SIMM13(pgdir_shift, APMMU_PGDIR_SHIFT);
+ BTFIXUPSET_SETHI(pgdir_size, APMMU_PGDIR_SIZE);
+ BTFIXUPSET_SETHI(pgdir_mask, APMMU_PGDIR_MASK);
+
+ BTFIXUPSET_SIMM13(ptrs_per_pte, APMMU_PTRS_PER_PTE);
+ BTFIXUPSET_SIMM13(ptrs_per_pmd, APMMU_PTRS_PER_PMD);
+ BTFIXUPSET_SIMM13(ptrs_per_pgd, APMMU_PTRS_PER_PGD);
+
+ BTFIXUPSET_INT(page_none, pgprot_val(APMMU_PAGE_NONE));
+ BTFIXUPSET_INT(page_shared, pgprot_val(APMMU_PAGE_SHARED));
+ BTFIXUPSET_INT(page_copy, pgprot_val(APMMU_PAGE_COPY));
+ BTFIXUPSET_INT(page_readonly, pgprot_val(APMMU_PAGE_RDONLY));
+ BTFIXUPSET_INT(page_kernel, pgprot_val(APMMU_PAGE_KERNEL));
pg_iobits = APMMU_VALID | APMMU_WRITE | APMMU_REF;
/* Functions */
- mmu_getpage = apmmu_getpage;
- set_pte = apmmu_set_pte_cacheable;
- switch_to_context = apmmu_switch_to_context;
- pmd_align = apmmu_pmd_align;
- pgdir_align = apmmu_pgdir_align;
- vmalloc_start = apmmu_vmalloc_start;
-
- pte_page = apmmu_pte_page;
- pmd_page = apmmu_pmd_page;
- pgd_page = apmmu_pgd_page;
-
- sparc_update_rootmmu_dir = apmmu_update_rootmmu_dir;
-
- pte_none = apmmu_pte_none;
- pte_present = apmmu_pte_present;
- pte_clear = apmmu_pte_clear;
-
- pmd_none = apmmu_pmd_none;
- pmd_bad = apmmu_pmd_bad;
- pmd_present = apmmu_pmd_present;
- pmd_clear = apmmu_pmd_clear;
-
- pgd_none = apmmu_pgd_none;
- pgd_bad = apmmu_pgd_bad;
- pgd_present = apmmu_pgd_present;
- pgd_clear = apmmu_pgd_clear;
-
- mk_pte = apmmu_mk_pte;
- mk_pte_phys = apmmu_mk_pte_phys;
- pgd_set = apmmu_pgd_set;
- mk_pte_io = apmmu_mk_pte_io;
- pte_modify = apmmu_pte_modify;
- pgd_offset = apmmu_pgd_offset;
- pmd_offset = apmmu_pmd_offset;
- pte_offset = apmmu_pte_offset;
- pte_free_kernel = apmmu_pte_free_kernel;
- pmd_free_kernel = apmmu_pmd_free_kernel;
- pte_alloc_kernel = apmmu_pte_alloc_kernel;
- pmd_alloc_kernel = apmmu_pmd_alloc_kernel;
- pte_free = apmmu_pte_free;
- pte_alloc = apmmu_pte_alloc;
- pmd_free = apmmu_pmd_free;
- pmd_alloc = apmmu_pmd_alloc;
- pgd_free = apmmu_pgd_free;
- pgd_alloc = apmmu_pgd_alloc;
- pgd_flush = apmmu_pgd_flush;
-
- pte_write = apmmu_pte_write;
- pte_dirty = apmmu_pte_dirty;
- pte_young = apmmu_pte_young;
- pte_wrprotect = apmmu_pte_wrprotect;
- pte_mkclean = apmmu_pte_mkclean;
- pte_mkold = apmmu_pte_mkold;
- pte_mkwrite = apmmu_pte_mkwrite;
- pte_mkdirty = apmmu_pte_mkdirty;
- pte_mkyoung = apmmu_pte_mkyoung;
- update_mmu_cache = apmmu_update_mmu_cache;
- mmu_exit_hook = apmmu_exit_hook;
- mmu_flush_hook = apmmu_flush_hook;
- mmu_lockarea = apmmu_lockarea;
- mmu_unlockarea = apmmu_unlockarea;
-
- mmu_get_scsi_one = NULL;
- mmu_get_scsi_sgl = NULL;
- mmu_release_scsi_one = NULL;
- mmu_release_scsi_sgl = NULL;
-
- mmu_info = apmmu_mmu_info;
- mmu_v2p = apmmu_v2p;
- mmu_p2v = apmmu_p2v;
+ BTFIXUPSET_CALL(get_pte_fast, apmmu_get_pte_fast, BTFIXUPCALL_RETINT(0));
+ BTFIXUPSET_CALL(get_pmd_fast, apmmu_get_pmd_fast, BTFIXUPCALL_RETINT(0));
+ BTFIXUPSET_CALL(get_pgd_fast, apmmu_get_pgd_fast, BTFIXUPCALL_RETINT(0));
+ BTFIXUPSET_CALL(free_pte_slow, apmmu_free_pte_slow, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(free_pmd_slow, apmmu_free_pmd_slow, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(free_pgd_slow, apmmu_free_pgd_slow, BTFIXUPCALL_NOP);
- /* Task struct and kernel stack allocating/freeing. */
- alloc_kernel_stack = apmmu_alloc_kernel_stack;
- alloc_task_struct = apmmu_alloc_task_struct;
- free_kernel_stack = apmmu_free_kernel_stack;
- free_task_struct = apmmu_free_task_struct;
+ BTFIXUPSET_CALL(set_pte, apmmu_set_pte_cacheable, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(switch_to_context, apmmu_switch_to_context, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(pte_page, apmmu_pte_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_page, apmmu_pmd_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_page, apmmu_pgd_page, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(sparc_update_rootmmu_dir, apmmu_update_rootmmu_dir, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_SETHI(none_mask, 0xF0000000);
+
+ BTFIXUPSET_CALL(pte_present, apmmu_pte_present, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_clear, apmmu_pte_clear, BTFIXUPCALL_NORM);
- quick_kernel_fault = apmmu_quick_kernel_fault;
+ BTFIXUPSET_CALL(pmd_bad, apmmu_pmd_bad, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_present, apmmu_pmd_present, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_clear, apmmu_pmd_clear, BTFIXUPCALL_NORM);
- ctxd_set = apmmu_ctxd_set;
- pmd_set = apmmu_pmd_set;
+ BTFIXUPSET_CALL(pgd_none, apmmu_pgd_none, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_bad, apmmu_pgd_bad, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_present, apmmu_pgd_present, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_clear, apmmu_pgd_clear, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(mk_pte, apmmu_mk_pte, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mk_pte_phys, apmmu_mk_pte_phys, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mk_pte_io, apmmu_mk_pte_io, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_set, apmmu_pgd_set, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_INT(pte_modify_mask, APMMU_CHG_MASK);
+ BTFIXUPSET_CALL(pgd_offset, apmmu_pgd_offset, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_offset, apmmu_pmd_offset, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_offset, apmmu_pte_offset, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_free_kernel, apmmu_pte_free_kernel, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_free_kernel, apmmu_pmd_free_kernel, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_alloc_kernel, apmmu_pte_alloc_kernel, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_alloc_kernel, apmmu_pmd_alloc_kernel, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_free, apmmu_pte_free, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_alloc, apmmu_pte_alloc, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_free, apmmu_pmd_free, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_alloc, apmmu_pmd_alloc, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_free, apmmu_pgd_free, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_alloc, apmmu_pgd_alloc, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_flush, apmmu_pgd_flush, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_HALF(pte_writei, APMMU_WRITE);
+ BTFIXUPSET_HALF(pte_dirtyi, APMMU_DIRTY);
+ BTFIXUPSET_HALF(pte_youngi, APMMU_REF);
+ BTFIXUPSET_HALF(pte_wrprotecti, APMMU_WRITE);
+ BTFIXUPSET_HALF(pte_mkcleani, APMMU_DIRTY);
+ BTFIXUPSET_HALF(pte_mkoldi, APMMU_REF);
+ BTFIXUPSET_CALL(pte_mkwrite, apmmu_pte_mkwrite, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_mkdirty, apmmu_pte_mkdirty, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_mkyoung, apmmu_pte_mkyoung, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(update_mmu_cache, apmmu_update_mmu_cache, BTFIXUPCALL_NOP);
+
+ BTFIXUPSET_CALL(mmu_lockarea, apmmu_lockarea, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_unlockarea, apmmu_unlockarea, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(mmu_get_scsi_one, apmmu_null_func, BTFIXUPCALL_RETO0);
+ BTFIXUPSET_CALL(mmu_get_scsi_sgl, apmmu_null_func, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(mmu_release_scsi_one, apmmu_null_func, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(mmu_release_scsi_sgl, apmmu_null_func, BTFIXUPCALL_NOP);
+
+ BTFIXUPSET_CALL(mmu_info, apmmu_mmu_info, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_v2p, apmmu_v2p, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_p2v, apmmu_p2v, BTFIXUPCALL_NORM);
+
+ /* Task struct and kernel stack allocating/freeing. */
+ BTFIXUPSET_CALL(alloc_task_struct, apmmu_alloc_task_struct, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(free_task_struct, apmmu_free_task_struct, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(quick_kernel_fault, apmmu_quick_kernel_fault, BTFIXUPCALL_NORM);
init_viking();
patch_window_trap_handlers();
}
-
-
diff --git a/arch/sparc/boot/Makefile b/arch/sparc/boot/Makefile
index af462db3a..c9301a79e 100644
--- a/arch/sparc/boot/Makefile
+++ b/arch/sparc/boot/Makefile
@@ -1,4 +1,4 @@
-# $Id: Makefile,v 1.4 1997/07/11 11:05:18 jj Exp $
+# $Id: Makefile,v 1.6 1998/02/23 01:44:39 rth Exp $
# Makefile for the Sparc boot stuff.
#
# Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -19,5 +19,11 @@ tftpboot.img: piggyback
piggyback: piggyback.c
$(HOSTCC) $(HOSTCFLAGS) -o piggyback piggyback.c
+btfixupprep: btfixupprep.c
+ $(HOSTCC) $(HOSTCFLAGS) -o btfixupprep btfixupprep.c
+
+archclean:
+ rm -f btfixupprep piggyback tftpboot.img
+
dep:
diff --git a/arch/sparc/boot/btfixupprep.c b/arch/sparc/boot/btfixupprep.c
new file mode 100644
index 000000000..1bef965af
--- /dev/null
+++ b/arch/sparc/boot/btfixupprep.c
@@ -0,0 +1,345 @@
+/* $Id: btfixupprep.c,v 1.3 1998/03/09 14:03:10 jj Exp $
+ Simple utility to prepare vmlinux image for sparc.
+ Resolves all BTFIXUP uses and settings and creates
+ a special .s object to link to the image.
+
+ Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include <stdio.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <malloc.h>
+
+#define MAXSYMS 1024
+
+static char *relrec = "RELOCATION RECORDS FOR [";
+static int rellen;
+
+struct _btfixup;
+
+typedef struct _btfixuprel {
+ char *sect;
+ unsigned long offset;
+ struct _btfixup *f;
+ int frel;
+ struct _btfixuprel *next;
+} btfixuprel;
+
+typedef struct _btfixup {
+ int type;
+ int setinitval;
+ unsigned int initval;
+ char *initvalstr;
+ char *name;
+ btfixuprel *rel;
+} btfixup;
+
+btfixup array[MAXSYMS];
+int last = 0;
+char buffer[1024];
+unsigned long lastfoffset = -1;
+unsigned long lastfrelno;
+btfixup *lastf;
+
+void fatal(void) __attribute__((noreturn));
+void fatal(void)
+{
+ fprintf(stderr, "Malformed output from objdump\n%s\n", buffer);
+ exit(1);
+}
+
+btfixup *find(int type, char *name)
+{
+ int i;
+ for (i = 0; i < last; i++) {
+ if (array[i].type == type && !strcmp(array[i].name, name))
+ return array + i;
+ }
+ array[last].type = type;
+ array[last].name = strdup(name);
+ array[last].setinitval = 0;
+ if (!array[last].name) fatal();
+ array[last].rel = NULL;
+ last++;
+ if (last >= MAXSYMS) {
+ fprintf(stderr, "Ugh. Something strange. More than %d different BTFIXUP symbols\n", MAXSYMS);
+ exit(1);
+ }
+ return array + last - 1;
+}
+
+int main(int argc,char **argv)
+{
+ char *p, *q;
+ char *sect;
+ int i, j, k;
+ unsigned int initval;
+ int shift;
+ btfixup *f;
+ btfixuprel *r, **rr;
+ unsigned long offset;
+ char *initvalstr;
+
+ rellen = strlen(relrec);
+ while (fgets (buffer, 1024, stdin) != NULL)
+ if (!strncmp (buffer, relrec, rellen))
+ goto main1;
+ fatal();
+main1:
+ sect = malloc(strlen (buffer + rellen) + 1);
+ if (!sect) fatal();
+ strcpy (sect, buffer + rellen);
+ p = strchr (sect, ']');
+ if (!p) fatal();
+ *p = 0;
+ if (fgets (buffer, 1024, stdin) == NULL)
+ fatal();
+ while (fgets (buffer, 1024, stdin) != NULL) {
+ if (!strncmp (buffer, relrec, rellen))
+ goto main1;
+ p = strchr (buffer, '\n');
+ if (p) *p = 0;
+ if (strlen (buffer) < 30)
+ continue;
+ if (strncmp (buffer + 8, " R_SPARC_", 9))
+ continue;
+ if (buffer[27] != '_' || buffer[28] != '_' || buffer[29] != '_')
+ continue;
+ switch (buffer[30]) {
+ case 'f': /* CALL */
+ case 'b': /* BLACKBOX */
+ case 's': /* SIMM13 */
+ case 'a': /* HALF */
+ case 'h': /* SETHI */
+ case 'i': /* INT */
+ break;
+ default:
+ continue;
+ }
+ p = strchr (buffer + 32, '+');
+ if (p) *p = 0;
+ shift = 32;
+ if (buffer[31] == 's' && buffer[32] == '_') {
+ shift = 33;
+ if (strcmp (sect, ".text.init")) {
+ fprintf(stderr, "Wrong use of '%s' BTFIXUPSET.\nBTFIXUPSET_CALL can be used only in __init sections\n", buffer+shift);
+ exit(1);
+ }
+ } else if (buffer[31] != '_')
+ continue;
+ if (strcmp (sect, ".text") && strcmp (sect, ".text.init") && (strcmp (sect, "__ksymtab") || buffer[30] != 'f')) {
+ if (buffer[30] == 'f')
+ fprintf(stderr, "Wrong use of '%s' in '%s' section. It can be only used in .text, .text.init and __ksymtab\n", buffer + shift, sect);
+ else
+ fprintf(stderr, "Wrong use of '%s' in '%s' section. It can be only used in .text and .text.init\n", buffer + shift, sect);
+ exit(1);
+ }
+ p = strstr (buffer + shift, "__btset_");
+ if (p && buffer[31] == 's') {
+ fprintf(stderr, "__btset_ in BTFIXUP name can only be used when defining the variable, not for setting\n%s\n", buffer);
+ exit(1);
+ }
+ initval = 0;
+ initvalstr = NULL;
+ if (p) {
+ if (p[8] != '0' || p[9] != 'x') {
+ fprintf(stderr, "Pre-initialized values can be only initialized with hexadecimal constants starting 0x\n%s\n", buffer);
+ exit(1);
+ }
+ initval = strtoul(p + 10, &q, 16);
+ if (*q || !initval) {
+ fprintf(stderr, "Pre-initialized values can be only in the form name__btset_0xXXXXXXXX where X are hex digits.\nThey cannot be name__btset_0x00000000 though. Use BTFIXUPDEF_XX instead of BTFIXUPDEF_XX_INIT then.\n%s\n", buffer);
+ exit(1);
+ }
+ initvalstr = p + 10;
+ *p = 0;
+ }
+ f = find(buffer[30], buffer + shift);
+ if (buffer[31] == 's')
+ continue;
+ switch (buffer[30]) {
+ case 'f':
+ if (initval) {
+ fprintf(stderr, "Cannot use pre-initalized fixups for calls\n%s\n", buffer);
+ exit(1);
+ }
+ if (!strcmp (sect, "__ksymtab")) {
+ if (strncmp (buffer + 17, "32 ", 10)) {
+ fprintf(stderr, "BTFIXUP_CALL in EXPORT_SYMBOL results in relocation other than R_SPARC_32\n\%s\n", buffer);
+ exit(1);
+ }
+ } else if (strncmp (buffer + 17, "WDISP30 ", 10) &&
+ strncmp (buffer + 17, "HI22 ", 10) &&
+ strncmp (buffer + 17, "LO10 ", 10)) {
+ fprintf(stderr, "BTFIXUP_CALL results in relocation other than R_SPARC_WDISP30, R_SPARC_HI22 or R_SPARC_LO10\n%s\n", buffer);
+ exit(1);
+ }
+ break;
+ case 'b':
+ if (initval) {
+ fprintf(stderr, "Cannot use pre-initialized fixups for blackboxes\n%s\n", buffer);
+ exit(1);
+ }
+ if (strncmp (buffer + 17, "HI22 ", 10)) {
+ fprintf(stderr, "BTFIXUP_BLACKBOX results in relocation other than R_SPARC_HI22\n%s\n", buffer);
+ exit(1);
+ }
+ break;
+ case 's':
+ if (initval + 0x1000 >= 0x2000) {
+ fprintf(stderr, "Wrong initializer for SIMM13. Has to be from $fffff000 to $00000fff\n%s\n", buffer);
+ exit(1);
+ }
+ if (strncmp (buffer + 17, "13 ", 10)) {
+ fprintf(stderr, "BTFIXUP_SIMM13 results in relocation other than R_SPARC_13\n%s\n", buffer);
+ exit(1);
+ }
+ break;
+ case 'a':
+ if (initval + 0x1000 >= 0x2000 && (initval & 0x3ff)) {
+ fprintf(stderr, "Wrong initializer for HALF.\n%s\n", buffer);
+ exit(1);
+ }
+ if (strncmp (buffer + 17, "13 ", 10)) {
+ fprintf(stderr, "BTFIXUP_HALF results in relocation other than R_SPARC_13\n%s\n", buffer);
+ exit(1);
+ }
+ break;
+ case 'h':
+ if (initval & 0x3ff) {
+ fprintf(stderr, "Wrong initializer for SETHI. Cannot have set low 10 bits\n%s\n", buffer);
+ exit(1);
+ }
+ if (strncmp (buffer + 17, "HI22 ", 10)) {
+ fprintf(stderr, "BTFIXUP_SETHI results in relocation other than R_SPARC_HI22\n%s\n", buffer);
+ exit(1);
+ }
+ break;
+ case 'i':
+ if (initval) {
+ fprintf(stderr, "Cannot use pre-initalized fixups for INT\n%s\n", buffer);
+ exit(1);
+ }
+ if (strncmp (buffer + 17, "HI22 ", 10) && strncmp (buffer + 17, "LO10 ", 10)) {
+ fprintf(stderr, "BTFIXUP_INT results in relocation other than R_SPARC_HI22 and R_SPARC_LO10\n%s\n", buffer);
+ exit(1);
+ }
+ break;
+ }
+ if (!f->setinitval) {
+ f->initval = initval;
+ if (initvalstr) {
+ f->initvalstr = strdup(initvalstr);
+ if (!f->initvalstr) fatal();
+ }
+ f->setinitval = 1;
+ } else if (f->initval != initval) {
+ fprintf(stderr, "Btfixup %s previously used with initializer %s which doesn't match with current initializer\n%s\n",
+ f->name, f->initvalstr ? : "0x00000000", buffer);
+ exit(1);
+ } else if (initval && strcmp(f->initvalstr, initvalstr)) {
+ fprintf(stderr, "Btfixup %s previously used with initializer %s which doesn't match with current initializer.\n"
+ "Initializers have to match literally as well.\n%s\n",
+ f->name, f->initvalstr, buffer);
+ exit(1);
+ }
+ offset = strtoul(buffer, &q, 16);
+ if (q != buffer + 8 || (!offset && strncmp (buffer, "00000000 ", 9))) {
+ fprintf(stderr, "Malformed relocation address in\n%s\n", buffer);
+ exit(1);
+ }
+ for (k = 0, r = f->rel, rr = &f->rel; r; rr = &r->next, r = r->next, k++)
+ if (r->offset == offset && !strcmp(r->sect, sect)) {
+ fprintf(stderr, "Ugh. One address has two relocation records\n");
+ exit(1);
+ }
+ *rr = malloc(sizeof(btfixuprel));
+ if (!*rr) fatal();
+ (*rr)->offset = offset;
+ (*rr)->f = NULL;
+ if (buffer[30] == 'f') {
+ lastf = f;
+ lastfoffset = offset;
+ lastfrelno = k;
+ } else if (lastfoffset + 4 == offset) {
+ (*rr)->f = lastf;
+ (*rr)->frel = lastfrelno;
+ }
+ (*rr)->sect = sect;
+ (*rr)->next = NULL;
+ }
+ printf("! Generated by btfixupprep. Do not edit.\n\n");
+ printf("\t.section\t\".data.init\",#alloc,#write\n\t.align\t4\n\n");
+ printf("\t.global\t___btfixup_start\n___btfixup_start:\n\n");
+ for (i = 0; i < last; i++) {
+ f = array + i;
+ printf("\t.global\t___%cs_%s\n", f->type, f->name);
+ if (f->type == 'f')
+ printf("___%cs_%s:\n\t.word 0x%08x,0,0,", f->type, f->name, f->type << 24);
+ else
+ printf("___%cs_%s:\n\t.word 0x%08x,0,", f->type, f->name, f->type << 24);
+ for (j = 0, r = f->rel; r != NULL; j++, r = r->next);
+ if (j)
+ printf("%d\n\t.word\t", j * 2);
+ else
+ printf("0\n");
+ for (r = f->rel, j--; r != NULL; j--, r = r->next) {
+ if (!strcmp (r->sect, ".text"))
+ printf ("_stext+0x%08x", r->offset);
+ else if (!strcmp (r->sect, ".text.init"))
+ printf ("__init_begin+0x%08x", r->offset);
+ else if (!strcmp (r->sect, "__ksymtab"))
+ printf ("__start___ksymtab+0x%08x", r->offset);
+ else
+ fatal();
+ if (f->type == 'f' || !r->f)
+ printf (",0");
+ else
+ printf (",___fs_%s+0x%08x", r->f->name, (4 + r->frel*2)*4 + 4);
+ if (j) printf (",");
+ else printf ("\n");
+ }
+ printf("\n");
+ }
+ printf("\n\t.global\t___btfixup_end\n___btfixup_end:\n");
+ printf("\n\n! Define undefined references\n\n");
+ for (i = 0; i < last; i++) {
+ f = array + i;
+ if (f->type == 'f') {
+ printf("\t.global\t___f_%s\n", f->name);
+ printf("___f_%s:\n", f->name);
+ }
+ }
+ printf("\tretl\n\t nop\n\n");
+ for (i = 0; i < last; i++) {
+ f = array + i;
+ if (f->type != 'f') {
+ if (!f->initval) {
+ printf("\t.global\t___%c_%s\n", f->type, f->name);
+ printf("___%c_%s = 0\n", f->type, f->name);
+ } else {
+ printf("\t.global\t___%c_%s__btset_0x%s\n", f->type, f->name, f->initvalstr);
+ printf("___%c_%s__btset_0x%s = 0x%08x\n", f->type, f->name, f->initvalstr, f->initval);
+ }
+ }
+ }
+ printf("\n\n");
+ exit(0);
+}
diff --git a/arch/sparc/config.in b/arch/sparc/config.in
index 4a087fca2..3f0ab09b6 100644
--- a/arch/sparc/config.in
+++ b/arch/sparc/config.in
@@ -1,4 +1,4 @@
-# $Id: config.in,v 1.51 1998/01/08 04:16:54 baccala Exp $
+# $Id: config.in,v 1.54 1998/03/27 06:59:39 davem Exp $
# For a description of the syntax of this configuration file,
# see the Configure script.
#
@@ -34,6 +34,8 @@ if [ "$CONFIG_AP1000" = "y" ]; then
define_bool CONFIG_APBIF y
tristate 'OPIU DDV Driver' CONFIG_DDV
else
+ bool 'Support for SUN4 machines (disables SUN4[CDM] support)' CONFIG_SUN4
+
# Global things across all Sun machines.
define_bool CONFIG_SBUS y
define_bool CONFIG_SBUSCHAR y
@@ -45,8 +47,13 @@ else
define_bool CONFIG_SUN_CONSOLE y
define_bool CONFIG_SUN_AUXIO y
define_bool CONFIG_SUN_IO y
- source drivers/sbus/char/Config.in
- source drivers/sbus/audio/Config.in
+ if [ "$CONFIG_SUN4" = "y" ]; then
+ bool 'Sun FB drivers appear in PROCFS' SUN_FBS_IN_PROCFS
+ bool 'bwtwo support' SUN_FB_BWTWO
+ else
+ source drivers/sbus/char/Config.in
+ source drivers/sbus/audio/Config.in
+ fi
fi
tristate 'Openprom tree appears in /proc/openprom (EXPERIMENTAL)' CONFIG_SUN_OPENPROMFS
@@ -159,17 +166,6 @@ if [ "$CONFIG_NET" = "y" ]; then
endmenu
fi
-# Conditionally compile in the Uniform CD-ROM driver
-if [ "$CONFIG_BLK_DEV_SR" = "y" ]; then
- define_bool CONFIG_CDROM y
-else
- if [ "$CONFIG_BLK_DEV_SR" = "m" ]; then
- define_bool CONFIG_CDROM m
- else
- define_bool CONFIG_CDROM n
- fi
-fi
-
source fs/Config.in
source fs/nls/Config.in
diff --git a/arch/sparc/defconfig b/arch/sparc/defconfig
index 38b6096cc..1abeca2d6 100644
--- a/arch/sparc/defconfig
+++ b/arch/sparc/defconfig
@@ -20,11 +20,13 @@ CONFIG_KMOD=y
CONFIG_VT=y
CONFIG_VT_CONSOLE=y
# CONFIG_AP1000 is not set
+# CONFIG_SUN4 is not set
CONFIG_SBUS=y
CONFIG_SBUSCHAR=y
CONFIG_SUN_MOUSE=y
CONFIG_SERIAL=y
CONFIG_SUN_SERIAL=y
+CONFIG_SERIAL_CONSOLE=y
CONFIG_SUN_KEYBOARD=y
CONFIG_SUN_CONSOLE=y
CONFIG_SUN_AUXIO=y
@@ -80,6 +82,7 @@ CONFIG_MD_RAID5=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_NBD is not set
#
# Networking options
@@ -89,8 +92,8 @@ CONFIG_NETLINK=y
CONFIG_RTNETLINK=y
# CONFIG_NETLINK_DEV is not set
CONFIG_FIREWALL=y
-# CONFIG_NET_SECURITY is not set
CONFIG_NET_ALIAS=y
+# CONFIG_FILTER is not set
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
@@ -107,11 +110,18 @@ CONFIG_IP_MASQUERADE=y
#
# Protocol-specific masquerading support will be built as modules.
#
+# CONFIG_IP_MASQUERADE_ICMP is not set
+
+#
+# Protocol-specific masquerading support will be built as modules.
+#
+# CONFIG_IP_MASQUERADE_IPAUTOFW is not set
+# CONFIG_IP_MASQUERADE_IPPORTFW is not set
# CONFIG_IP_ROUTER is not set
# CONFIG_NET_IPIP is not set
# CONFIG_NET_IPGRE is not set
# CONFIG_IP_MROUTE is not set
-CONFIG_IP_ALIAS=m
+CONFIG_IP_ALIAS=y
# CONFIG_ARPD is not set
# CONFIG_SYN_COOKIES is not set
@@ -123,31 +133,39 @@ CONFIG_IP_NOSR=y
CONFIG_SKB_LARGE=y
CONFIG_IPV6=m
# CONFIG_IPV6_EUI64 is not set
-# CONFIG_IPV6_NO_PB is not set
#
#
#
CONFIG_IPX=m
+
+#
+# IPX options
+#
# CONFIG_IPX_INTERN is not set
-# CONFIG_IPX_PPROP_ROUTING is not set
CONFIG_ATALK=m
-# CONFIG_AX25 is not set
CONFIG_X25=m
# CONFIG_LAPB is not set
# CONFIG_BRIDGE is not set
# CONFIG_LLC is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
+# CONFIG_NET_HW_FLOWCONTROL is not set
# CONFIG_CPU_IS_SLOW is not set
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_CSZ=m
-CONFIG_NET_SCH_HFQ=m
CONFIG_NET_SCH_RED=m
CONFIG_NET_SCH_SFQ=m
CONFIG_NET_SCH_TBF=y
CONFIG_NET_SCH_PFIFO=y
CONFIG_NET_SCH_PRIO=y
+# CONFIG_NET_PROFILE is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
#
# SCSI support
@@ -176,6 +194,21 @@ CONFIG_SCSI_SUNESP=y
CONFIG_SCSI_QLOGICPTI=m
#
+# Fibre Channel support
+#
+CONFIG_FC4=m
+
+#
+# FC4 drivers
+#
+CONFIG_FC4_SOC=m
+
+#
+# FC4 targets
+#
+CONFIG_SCSI_PLUTO=m
+
+#
# Network device support
#
CONFIG_NETDEVICES=y
@@ -193,6 +226,7 @@ CONFIG_SUNLANCE=y
CONFIG_HAPPYMEAL=m
CONFIG_SUNQE=m
CONFIG_MYRI_SBUS=m
+CONFIG_CDROM=y
#
# Filesystems
@@ -211,24 +245,35 @@ CONFIG_NFS_FS=y
CONFIG_NFSD=m
CONFIG_SUNRPC=y
CONFIG_LOCKD=y
+# CONFIG_CODA_FS is not set
CONFIG_SMB_FS=m
CONFIG_SMB_WIN95=y
CONFIG_NCP_FS=m
+# CONFIG_NCPFS_PACKET_SIGNING is not set
+# CONFIG_NCPFS_IOCTL_LOCKING is not set
+# CONFIG_NCPFS_STRONG is not set
+# CONFIG_NCPFS_NFS_NS is not set
+# CONFIG_NCPFS_OS2_NS is not set
+# CONFIG_NCPFS_MOUNT_SUBDIR is not set
CONFIG_HPFS_FS=m
+# CONFIG_NTFS_FS is not set
CONFIG_SYSV_FS=m
CONFIG_AFFS_FS=m
+# CONFIG_HFS_FS is not set
CONFIG_ROMFS_FS=m
CONFIG_AUTOFS_FS=m
CONFIG_AMIGA_PARTITION=y
CONFIG_UFS_FS=y
CONFIG_BSD_DISKLABEL=y
CONFIG_SMD_DISKLABEL=y
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_ADFS_FS is not set
# CONFIG_MAC_PARTITION is not set
+CONFIG_NLS=y
#
# Native Language Support
#
-CONFIG_NLS=y
# CONFIG_NLS_CODEPAGE_437 is not set
# CONFIG_NLS_CODEPAGE_737 is not set
# CONFIG_NLS_CODEPAGE_775 is not set
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 7be8ddd3a..53ed6d340 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -1,4 +1,4 @@
-# $Id: Makefile,v 1.41 1997/11/19 15:11:59 jj Exp $
+# $Id: Makefile,v 1.43 1998/03/09 14:03:34 jj Exp $
# Makefile for the linux kernel.
#
# Note! Dependencies are done automagically by 'make dep', which also
@@ -15,8 +15,6 @@ ifdef SMP
.S.o:
$(CC) -D__ASSEMBLY__ $(AFLAGS) -ansi -c $< -o $*.o
-CHECKASM_CC = $(CC) -D__SMP__
-
else
.S.s:
@@ -25,7 +23,6 @@ else
.S.o:
$(CC) -D__ASSEMBLY__ -ansi -c $< -o $*.o
-CHECKASM_CC = $(CC)
endif
all: kernel.o head.o init_task.o
@@ -42,7 +39,7 @@ O_OBJS := entry.o wof.o wuf.o etrap.o rtrap.o traps.o ${IRQ_OBJS} \
OX_OBJS := sparc_ksyms.o
ifdef SMP
-O_OBJS += trampoline.o smp.o
+O_OBJS += trampoline.o smp.o sun4m_smp.o sun4d_smp.o
endif
ifdef CONFIG_SUN_AUXIO
@@ -62,18 +59,61 @@ head.o: head.S
endif
check_asm: dummy
+ @echo "/* Automatically generated. Do not edit. */" > asm_offsets.h
+ @echo "#ifndef __ASM_OFFSETS_H__" >> asm_offsets.h
+ @echo "#define __ASM_OFFSETS_H__" >> asm_offsets.h
+ @echo "" >> asm_offsets.h
+ @echo "#ifndef __SMP__" >> asm_offsets.h
+ @echo "" >> asm_offsets.h
+ @echo "#include <linux/sched.h>" > tmp.c
+ $(CC) -E tmp.c -o tmp.i
+ @echo "/* Automatically generated. Do not edit. */" > check_asm.c
+ @echo "#include <linux/sched.h>" >> check_asm.c
+ @echo 'struct task_struct _task;' >> check_asm.c
+ @echo 'struct mm_struct _mm;' >> check_asm.c
+ @echo 'struct thread_struct _thread;' >> check_asm.c
+ @echo 'int main(void) {' >> check_asm.c
+ $(SH) ./check_asm.sh task tmp.i check_asm.c
+ $(SH) ./check_asm.sh mm tmp.i check_asm.c
+ $(SH) ./check_asm.sh thread tmp.i check_asm.c
+ @echo 'return 0; }' >> check_asm.c
+ @rm -f tmp.[ci]
+ $(CC) -o check_asm check_asm.c
+ ./check_asm >> asm_offsets.h
+ @rm -f check_asm check_asm.c
+ @echo "" >> asm_offsets.h
+ @echo "#else /* __SMP__ */" >> asm_offsets.h
+ @echo "" >> asm_offsets.h
@echo "#include <linux/sched.h>" > tmp.c
- $(CHECKASM_CC) -E tmp.c -o tmp.i
- @echo "/* Automatically generated. Do not edit. */" > check_asm.c; echo "#include <linux/sched.h>" >> check_asm.c; echo 'struct task_struct _task; struct mm_struct _mm; struct thread_struct _thread; int main(void) { printf ("/* Automatically generated. Do not edit. */\n#ifndef __ASM_OFFSETS_H__\n#define __ASM_OFFSETS_H__\n\n");' >> check_asm.c
+ $(CC) -D__SMP__ -E tmp.c -o tmp.i
+ @echo "/* Automatically generated. Do not edit. */" > check_asm.c
+ @echo "#include <linux/sched.h>" >> check_asm.c
+ @echo 'struct task_struct _task;' >> check_asm.c
+ @echo 'struct mm_struct _mm;' >> check_asm.c
+ @echo 'struct thread_struct _thread;' >> check_asm.c
+ @echo 'int main(void) {' >> check_asm.c
$(SH) ./check_asm.sh task tmp.i check_asm.c
$(SH) ./check_asm.sh mm tmp.i check_asm.c
$(SH) ./check_asm.sh thread tmp.i check_asm.c
- @echo 'printf ("\n#endif /* __ASM_OFFSETS_H__ */\n"); return 0; }' >> check_asm.c
+ @echo 'return 0; }' >> check_asm.c
@rm -f tmp.[ci]
- $(CHECKASM_CC) -o check_asm check_asm.c
- ./check_asm > asm_offsets.h
- @if test -r $(HPATH)/asm/asm_offsets.h; then if cmp -s asm_offsets.h $(HPATH)/asm/asm_offsets.h; then echo $(HPATH)/asm/asm_offsets.h is unchanged; rm -f asm_offsets.h; else mv -f asm_offsets.h $(HPATH)/asm/asm_offsets.h; fi; else mv -f asm_offsets.h $(HPATH)/asm/asm_offsets.h; fi
+ $(CC) -D__SMP__ -o check_asm check_asm.c
+ ./check_asm >> asm_offsets.h
@rm -f check_asm check_asm.c
+ @echo "" >> asm_offsets.h
+ @echo "#endif /* __SMP__ */" >> asm_offsets.h
+ @echo "" >> asm_offsets.h
+ @echo "#endif /* __ASM_OFFSETS_H__ */" >> asm_offsets.h
+ @if test -r $(HPATH)/asm/asm_offsets.h; then \
+ if cmp -s asm_offsets.h $(HPATH)/asm/asm_offsets.h; then \
+ echo $(HPATH)/asm/asm_offsets.h is unchanged; \
+ rm -f asm_offsets.h; \
+ else \
+ mv -f asm_offsets.h $(HPATH)/asm/asm_offsets.h; \
+ fi; \
+ else \
+ mv -f asm_offsets.h $(HPATH)/asm/asm_offsets.h; \
+ fi
include $(TOPDIR)/Rules.make
diff --git a/arch/sparc/kernel/auxio.c b/arch/sparc/kernel/auxio.c
index 5835347d1..13d34310f 100644
--- a/arch/sparc/kernel/auxio.c
+++ b/arch/sparc/kernel/auxio.c
@@ -17,9 +17,13 @@ __initfunc(void auxio_probe(void))
int node, auxio_nd;
struct linux_prom_registers auxregs[1];
- if (sparc_cpu_model == sun4d) {
+ switch (sparc_cpu_model) {
+ case sun4d:
+ case sun4:
auxio_register = 0;
return;
+ default:
+ break;
}
node = prom_getchild(prom_root_node);
auxio_nd = prom_searchsiblings(node, "auxiliary-io");
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index aeb5a46c8..1ca98407a 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -6,6 +6,8 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/tasks.h>
#include <asm/oplib.h>
#include <asm/page.h>
#include <asm/head.h>
@@ -116,22 +118,25 @@ struct cpu_iu_info linux_sparc_chips[] = {
#define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info))
-char *sparc_cpu_type[NCPUS] = { "cpu-oops", "cpu-oops1", "cpu-oops2", "cpu-oops3" };
-char *sparc_fpu_type[NCPUS] = { "fpu-oops", "fpu-oops1", "fpu-oops2", "fpu-oops3" };
+char *sparc_cpu_type[NR_CPUS] = { 0 };
+char *sparc_fpu_type[NR_CPUS] = { 0 };
unsigned int fsr_storage;
__initfunc(void cpu_probe(void))
{
int psr_impl, psr_vers, fpu_vers;
- int i, cpuid;
+ int i, cpuid, psr;
- cpuid = get_cpuid();
+ cpuid = hard_smp_processor_id();
psr_impl = ((get_psr()>>28)&0xf);
psr_vers = ((get_psr()>>24)&0xf);
+ psr = get_psr();
+ put_psr(psr | PSR_EF);
fpu_vers = ((get_fsr()>>17)&0x7);
+ put_psr(psr);
for(i = 0; i<NSPARCCHIPS; i++) {
if(linux_sparc_chips[i].psr_impl == psr_impl)
diff --git a/arch/sparc/kernel/devices.c b/arch/sparc/kernel/devices.c
index b9c6495cf..dd4dbb3c6 100644
--- a/arch/sparc/kernel/devices.c
+++ b/arch/sparc/kernel/devices.c
@@ -14,7 +14,7 @@
#include <asm/smp.h>
#include <asm/system.h>
-struct prom_cpuinfo linux_cpus[NCPUS];
+struct prom_cpuinfo linux_cpus[NR_CPUS];
int linux_num_cpus;
extern void cpu_probe(void);
@@ -26,7 +26,7 @@ device_scan(unsigned long mem_start))
{
char node_str[128];
int nd, prom_node_cpu, thismid;
- int cpu_nds[NCPUS]; /* One node for each cpu */
+ int cpu_nds[NR_CPUS]; /* One node for each cpu */
int cpu_ctr = 0;
prom_getstring(prom_root_node, "device_type", node_str, sizeof(node_str));
@@ -62,11 +62,9 @@ device_scan(unsigned long mem_start))
prom_getstring(node, "device_type", node_str, sizeof(node_str));
if (strcmp(node_str, "cpu") == 0) {
prom_getproperty(node, "cpu-id", (char *) &thismid, sizeof(thismid));
- if (cpu_ctr < NCPUS) {
- cpu_nds[cpu_ctr] = node;
- linux_cpus[cpu_ctr].prom_node = node;
- linux_cpus[cpu_ctr].mid = thismid;
- }
+ cpu_nds[cpu_ctr] = node;
+ linux_cpus[cpu_ctr].prom_node = node;
+ linux_cpus[cpu_ctr].mid = thismid;
prom_printf("Found CPU %d <node=%08lx,mid=%d>\n",
cpu_ctr, (unsigned long) node,
thismid);
@@ -74,8 +72,6 @@ device_scan(unsigned long mem_start))
}
}
}
- if (cpu_ctr > NCPUS)
- cpu_ctr = NCPUS;
}
if(cpu_ctr == 0) {
printk("No CPU nodes found, cannot continue.\n");
@@ -99,7 +95,7 @@ device_scan(unsigned long mem_start))
#endif
clock_stop_probe();
- if (sparc_cpu_model == sun4c)
+ if (ARCH_SUN4C_SUN4)
sun4c_probe_memerr_reg();
return mem_start;
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index d82c098d5..d393a9543 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -1,10 +1,11 @@
-/* $Id: entry.S,v 1.142 1998/01/07 06:33:47 baccala Exp $
+/* $Id: entry.S,v 1.149 1998/03/19 15:36:30 jj Exp $
* arch/sparc/kernel/entry.S: Sparc trap low-level entry points.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
- * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1996,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
*/
#include <linux/config.h>
@@ -21,9 +22,15 @@
#include <asm/vaddrs.h>
#include <asm/memreg.h>
#include <asm/page.h>
+#ifdef CONFIG_SUN4
+#include <asm/pgtsun4.h>
+#else
#include <asm/pgtsun4c.h>
+#endif
#include <asm/winmacro.h>
#include <asm/signal.h>
+#include <asm/obio.h>
+#include <asm/mxcc.h>
#include <asm/asmmacro.h>
@@ -288,10 +295,14 @@ real_irq_entry:
SAVE_ALL
#ifdef __SMP__
+ .globl patchme_maybe_smp_msg
+
cmp %l7, 12
- bgu maybe_smp_msg
+patchme_maybe_smp_msg:
+ bgu maybe_smp4m_msg
nop
#endif
+
real_irq_continue:
or %l0, PSR_PIL, %g2
wr %g2, 0x0, %psr
@@ -309,14 +320,14 @@ patch_handler_irq:
#ifdef __SMP__
/* SMP per-cpu ticker interrupts are handled specially. */
-smp_ticker:
- bne real_irq_continue
+smp4m_ticker:
+ bne real_irq_continue+4
or %l0, PSR_PIL, %g2
wr %g2, 0x0, %psr
WRITE_PAUSE
wr %g2, PSR_ET, %psr
WRITE_PAUSE
- call C_LABEL(smp_percpu_timer_interrupt)
+ call C_LABEL(smp4m_percpu_timer_interrupt)
add %sp, REGWIN_SZ, %o0
wr %l0, PSR_ET, %psr
WRITE_PAUSE
@@ -326,7 +337,7 @@ smp_ticker:
* on some level other than 15 which is the NMI and only used
* for cross calls. That has a seperate entry point below.
*/
-maybe_smp_msg:
+maybe_smp4m_msg:
GET_PROCESSOR_MID(o3, o2)
set C_LABEL(sun4m_interrupts), %l5
ld [%l5], %o5
@@ -334,7 +345,7 @@ maybe_smp_msg:
sll %o3, 12, %o3
ld [%o5 + %o3], %o1
andcc %o1, %o4, %g0
- be,a smp_ticker
+ be,a smp4m_ticker
cmp %l7, 14
cmp %l7, 13
add %o5, %o3, %o5
@@ -383,7 +394,7 @@ linux_trap_ipi15_sun4m:
WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
- call C_LABEL(smp_cross_call_irq)
+ call C_LABEL(smp4m_cross_call_irq)
nop
b ret_trap_lockless_ipi
clr %l6
@@ -409,6 +420,64 @@ linux_trap_ipi15_sun4m:
ld [%l5], %g0
WRITE_PAUSE
RESTORE_ALL
+
+ .globl smp4d_ticker
+ /* SMP per-cpu ticker interrupts are handled specially. */
+smp4d_ticker:
+ SAVE_ALL
+ or %l0, PSR_PIL, %g2
+ sethi %hi(CC_ICLR), %o0
+ sethi %hi(1 << 14), %o1
+ or %o0, %lo(CC_ICLR), %o0
+ stha %o1, [%o0] ASI_M_MXCC /* Clear PIL 14 in MXCC's ICLR */
+ wr %g2, 0x0, %psr
+ WRITE_PAUSE
+ wr %g2, PSR_ET, %psr
+ WRITE_PAUSE
+ call C_LABEL(smp4d_percpu_timer_interrupt)
+ add %sp, REGWIN_SZ, %o0
+ wr %l0, PSR_ET, %psr
+ WRITE_PAUSE
+ RESTORE_ALL
+
+ .align 4
+ .globl linux_trap_ipi15_sun4d
+linux_trap_ipi15_sun4d:
+ SAVE_ALL
+ sethi %hi(CC_BASE), %o4
+ sethi %hi(MXCC_ERR_ME|MXCC_ERR_PEW|MXCC_ERR_ASE|MXCC_ERR_PEE), %o2
+ or %o4, (CC_EREG - CC_BASE), %o0
+ ldda [%o0] ASI_M_MXCC, %o0
+ andcc %o0, %o2, %g0
+ bne 1f
+ sethi %hi(BB_STAT2), %o2
+ lduba [%o2] ASI_M_CTL, %o2
+ andcc %o2, BB_STAT2_MASK, %g0
+ bne 2f
+ or %o4, (CC_ICLR - CC_BASE), %o0
+ sethi %hi(1 << 15), %o1
+ stha %o1, [%o0] ASI_M_MXCC /* Clear PIL 15 in MXCC's ICLR */
+ or %l0, PSR_PIL, %l4
+ wr %l4, 0x0, %psr
+ WRITE_PAUSE
+ wr %l4, PSR_ET, %psr
+ WRITE_PAUSE
+ call C_LABEL(smp4d_cross_call_irq)
+ nop
+ b ret_trap_lockless_ipi
+ clr %l6
+
+1: /* MXCC error */
+2: /* BB error */
+ /* Disable PIL 15 */
+ set CC_IMSK, %l4
+ lduha [%l4] ASI_M_MXCC, %l5
+ sethi %hi(1 << 15), %l7
+ or %l5, %l7, %l5
+ stha %l5, [%l4] ASI_M_MXCC
+ /* FIXME */
+1: b,a 1b
+
#endif /* __SMP__ */
/* This routine handles illegal instructions and privileged
@@ -417,6 +486,12 @@ linux_trap_ipi15_sun4m:
.align 4
.globl bad_instruction
bad_instruction:
+ sethi %hi(0xc1f80000), %l4
+ ld [%l1], %l5
+ sethi %hi(0x81d80000), %l7
+ and %l5, %l4, %l5
+ cmp %l5, %l7
+ be 1f
SAVE_ALL
wr %l0, PSR_ET, %psr ! re-enable traps
@@ -430,6 +505,10 @@ bad_instruction:
RESTORE_ALL
+1: /* unimplemented flush - just skip */
+ jmpl %l2, %g0
+ rett %l2 + 4
+
.align 4
.globl priv_instruction
priv_instruction:
@@ -601,23 +680,6 @@ do_cp_disabled:
RESTORE_ALL
- /* This routine handles Unimplemented FLUSH Exceptions. */
- .align 4
- .globl do_bad_flush
-do_bad_flush:
- SAVE_ALL
-
- wr %l0, PSR_ET, %psr ! re-enable traps
- WRITE_PAUSE
-
- add %sp, REGWIN_SZ, %o0
- mov %l1, %o1
- mov %l2, %o2
- call C_LABEL(handle_bad_flush)
- mov %l0, %o3
-
- RESTORE_ALL
-
/* This routine handles Co-Processor Exceptions. */
.align 4
.globl do_cp_exception
@@ -766,6 +828,12 @@ C_LABEL(invalid_segment_patch1_ff): cmp %l4, 0xff
C_LABEL(invalid_segment_patch2_ff): mov 0xff, %l4
.align 4
+ .globl C_LABEL(invalid_segment_patch1_1ff)
+ .globl C_LABEL(invalid_segment_patch2_1ff)
+C_LABEL(invalid_segment_patch1_1ff): cmp %l4, 0x1ff
+C_LABEL(invalid_segment_patch2_1ff): mov 0x1ff, %l4
+
+ .align 4
.globl C_LABEL(num_context_patch1_16), C_LABEL(num_context_patch2_16)
C_LABEL(num_context_patch1_16): mov 0x10, %l7
C_LABEL(num_context_patch2_16): mov 0x10, %l7
@@ -776,7 +844,17 @@ C_LABEL(vac_linesize_patch_32): subcc %l7, 32, %l7
.align 4
.globl C_LABEL(vac_hwflush_patch1_on), C_LABEL(vac_hwflush_patch2_on)
+
+/*
+ * Ugly, but we cant use hardware flushing on the sun4 and we'd require
+ * two instructions (Anton)
+ */
+#ifdef CONFIG_SUN4
+C_LABEL(vac_hwflush_patch1_on): nop
+#else
C_LABEL(vac_hwflush_patch1_on): subcc %l7, (PAGE_SIZE - 4), %l7
+#endif
+
C_LABEL(vac_hwflush_patch2_on): sta %g0, [%l3 + %l7] ASI_HWFLUSHSEG
.globl C_LABEL(invalid_segment_patch1), C_LABEL(invalid_segment_patch2)
@@ -786,11 +864,50 @@ C_LABEL(vac_hwflush_patch2_on): sta %g0, [%l3 + %l7] ASI_HWFLUSHSEG
.align 4
.globl sun4c_fault
+
+! %l0 = %psr
+! %l1 = %pc
+! %l2 = %npc
+! %l3 = %wim
+! %l7 = 1 for textfault
+! We want error in %l5, vaddr in %l6
sun4c_fault:
+#ifdef CONFIG_SUN4
+ sethi C_LABEL(sun4c_memerr_reg), %l4
+ ld [%l4+%lo(C_LABEL(sun4c_memerr_reg))], %l4 ! memerr ctrl reg addr
+ ld [%l4], %l6 ! memerr ctrl reg
+ ld [%l4 + 4], %l5 ! memerr vaddr reg
+ andcc %l6, 0x80, %g0 ! check for error type
+ st %g0, [%l4 + 4] ! clear the error
+ be 0f ! normal error
+ sethi %hi(AC_BUS_ERROR), %l4 ! bus err reg addr
+
+ call C_LABEL(prom_halt) ! something weird happened
+ ! what exactly did happen?
+ ! what should we do here?
+
+0: or %l4, %lo(AC_BUS_ERROR), %l4 ! bus err reg addr
+ lduba [%l4] ASI_CONTROL, %l6 ! bus err reg
+
+ cmp %l7, 1 ! text fault?
+ be 1f ! yes
+ nop
+
+ ld [%l1], %l4 ! load instruction that caused fault
+ srl %l4, 21, %l4
+ andcc %l4, 1, %g0 ! store instruction?
+
+ be 1f ! no
+ sethi %hi(SUN4C_SYNC_BADWRITE), %l4 ! yep
+ ! %lo(SUN4C_SYNC_BADWRITE) = 0
+ or %l4, %l6, %l6 ! set write bit to emulate sun4c
+1:
+#else
sethi %hi(AC_SYNC_ERR), %l4
add %l4, 0x4, %l6 ! AC_SYNC_VA in %l6
lda [%l6] ASI_CONTROL, %l5 ! Address
lda [%l4] ASI_CONTROL, %l6 ! Error, retained for a bit
+#endif
andn %l5, 0xfff, %l5 ! Encode all info into l7
srl %l6, 14, %l4
@@ -830,17 +947,21 @@ sun4c_fault:
sethi %hi(SUN4C_VMALLOC_START), %l4
cmp %l5, %l4
blu,a C_LABEL(invalid_segment_patch1)
- lduba [%l5] ASI_SEGMAP, %l4
+ lduXa [%l5] ASI_SEGMAP, %l4
- srl %l5, SUN4C_PGDIR_SHIFT, %l6
sethi %hi(C_LABEL(swapper_pg_dir)), %l4
+ srl %l5, SUN4C_PGDIR_SHIFT, %l6
or %l4, %lo(C_LABEL(swapper_pg_dir)), %l4
sll %l6, 2, %l6
ld [%l4 + %l6], %l4
+#ifdef CONFIG_SUN4
+ sethi PAGE_MASK, %l6
+ andcc %l4, %l6, %g0
+#else
andcc %l4, PAGE_MASK, %g0
-
+#endif
be sun4c_fault_fromuser
- lduba [%l5] ASI_SEGMAP, %l4
+ lduXa [%l5] ASI_SEGMAP, %l4
C_LABEL(invalid_segment_patch1):
cmp %l4, 0x7f
@@ -889,7 +1010,11 @@ C_LABEL(invalid_segment_patch1):
ld [%l6 + 0x08], %l3 ! tmp = entry->vaddr
! Flush segment from the cache.
+#ifdef CONFIG_SUN4
+ sethi %hi((128 * 1024)), %l7
+#else
sethi %hi((64 * 1024)), %l7
+#endif
9:
C_LABEL(vac_hwflush_patch1):
C_LABEL(vac_linesize_patch):
@@ -928,7 +1053,7 @@ C_LABEL(invalid_segment_patch2):
deccc %l7
stba %l7, [%l3] ASI_CONTROL
bne 3b
- stba %l4, [%l5] ASI_SEGMAP
+ stXa %l4, [%l5] ASI_SEGMAP
stba %l6, [%l3] ASI_CONTROL
@@ -952,7 +1077,7 @@ C_LABEL(num_context_patch2):
deccc %l7
stba %l7, [%l3] ASI_CONTROL
bne 3b
- stba %l4, [%l5] ASI_SEGMAP
+ stXa %l4, [%l5] ASI_SEGMAP
stba %l6, [%l3] ASI_CONTROL
@@ -988,7 +1113,12 @@ C_LABEL(num_context_patch2):
or %l4, %lo(C_LABEL(swapper_pg_dir)), %l4
sll %l3, 2, %l3
ld [%l4 + %l3], %l4
+#ifndef CONFIG_SUN4
and %l4, PAGE_MASK, %l4
+#else
+ sethi PAGE_MASK, %l6
+ and %l4, %l6, %l4
+#endif
srl %l5, (PAGE_SHIFT - 2), %l6
and %l6, ((SUN4C_PTRS_PER_PTE - 1) << 2), %l6
@@ -1640,9 +1770,8 @@ C_LABEL(udelay):
call .umul
ld [%o3 + %lo(C_LABEL(loops_per_sec))], %o1
#else
- GET_PROCESSOR_OFFSET(o4)
+ GET_PROCESSOR_OFFSET(o4, o2)
set C_LABEL(cpu_data), %o3
- sll %o4, 1, %o4
call .umul
ld [%o3 + %o4], %o1
#endif
@@ -1718,4 +1847,11 @@ kuw_patch1:
retl ! return
st %g0, [%g6 + AOFF_task_tss + AOFF_thread_w_saved] ! no windows saved
+ .align 4
+ .globl C_LABEL(restore_current)
+C_LABEL(restore_current):
+ LOAD_CURRENT(g6, o0)
+ retl
+ nop
+
/* End of entry.S */
diff --git a/arch/sparc/kernel/etrap.S b/arch/sparc/kernel/etrap.S
index fc6c4cead..5496b061e 100644
--- a/arch/sparc/kernel/etrap.S
+++ b/arch/sparc/kernel/etrap.S
@@ -1,4 +1,4 @@
-/* $Id: etrap.S,v 1.26 1997/05/01 08:53:32 davem Exp $
+/* $Id: etrap.S,v 1.29 1998/02/09 13:48:40 jj Exp $
* etrap.S: Sparc trap window preparation for entry into the
* Linux kernel.
*
@@ -13,6 +13,7 @@
#include <asm/psr.h>
#include <asm/ptrace.h>
#include <asm/winmacro.h>
+#include <asm/asmmacro.h>
/* Registers to not touch at all. */
#define t_psr l0 /* Set by caller */
@@ -126,13 +127,13 @@ tsetup_patch2:
jmpl %t_retpc + 0x8, %g0 ! return to caller
mov %t_kstack, %sp ! and onto new kernel stack
+#define STACK_OFFSET (TASK_UNION_SIZE - (TRACEREG_SZ + REGWIN_SZ))
trap_setup_from_user:
/* We can't use %curptr yet. */
LOAD_CURRENT(t_kstack, t_twinmask)
- mov 1, %t_twinmask
- sll %t_twinmask, (PAGE_SHIFT + 1), %t_twinmask
- sub %t_twinmask, (TRACEREG_SZ + REGWIN_SZ), %t_twinmask
+ sethi %hi(STACK_OFFSET), %t_twinmask
+ or %t_twinmask, %lo(STACK_OFFSET), %t_twinmask
add %t_kstack, %t_twinmask, %t_kstack
mov 1, %t_twinmask
@@ -141,11 +142,18 @@ trap_setup_from_user:
/* Build pt_regs frame. */
STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
- /* Clear current->tss.w_saved */
- mov 1, %curptr
- sll %curptr, (PAGE_SHIFT + 1), %curptr
- sub %curptr, (TRACEREG_SZ + REGWIN_SZ), %curptr
+#if 0
+ /* If we're sure every task_struct is TASK_UNION_SIZE aligned,
+ we can speed this up. */
+ sethi %hi(STACK_OFFSET), %curptr
+ or %curptr, %lo(STACK_OFFSET), %curptr
sub %t_kstack, %curptr, %curptr
+#else
+ sethi %hi(~(TASK_UNION_SIZE - 1)), %curptr
+ and %t_kstack, %curptr, %curptr
+#endif
+
+ /* Clear current->tss.w_saved */
st %g0, [%curptr + AOFF_task_tss + AOFF_thread_w_saved]
/* See if we are in the trap window. */
@@ -269,9 +277,8 @@ tsetup_sun4c_onepage:
.globl C_LABEL(tsetup_srmmu_stackchk)
C_LABEL(tsetup_srmmu_stackchk):
/* Check results of callers andcc %sp, 0x7, %g0 */
- sethi %hi(C_LABEL(page_offset)), %glob_tmp
bne trap_setup_user_stack_is_bolixed
- ld [%glob_tmp + %lo(C_LABEL(page_offset))], %glob_tmp
+ GET_PAGE_OFFSET(glob_tmp)
cmp %glob_tmp, %sp
bleu,a 1f
diff --git a/arch/sparc/kernel/head.S b/arch/sparc/kernel/head.S
index 2de97e92b..1536a5e55 100644
--- a/arch/sparc/kernel/head.S
+++ b/arch/sparc/kernel/head.S
@@ -1,10 +1,11 @@
-/* $Id: head.S,v 1.84 1997/11/19 15:12:01 jj Exp $
+/* $Id: head.S,v 1.90 1998/03/24 18:12:05 jj Exp $
* head.S: The initial boot code for the Sparc port of Linux.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1995 Peter Zaitcev (Zaitcev@ipmce.su)
* Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997 Michael A. Griffith (grif@acm.org)
*/
#include <linux/version.h>
@@ -60,9 +61,16 @@ C_LABEL(cputypvar_sun4m):
.asciz "compatible"
.align 4
+
+#ifndef CONFIG_SUN4
sun4_notsup:
- .asciz "Sparc-Linux sun4 support not implemented yet\n\n"
+ .asciz "Sparc-Linux sun4 needs a specially compiled kernel, turn CONFIG_SUN4 on.\n\n"
+ .align 4
+#else
+sun4cdm_notsup:
+ .asciz "Kernel compiled with CONFIG_SUN4 cannot run on SUN4C/SUN4M/SUN4D\nTurn CONFIG_SUN4 off.\n\n"
.align 4
+#endif
sun4e_notsup:
.asciz "Sparc-Linux sun4e support does not exist\n\n"
@@ -111,13 +119,14 @@ t_irq14:TRAP_ENTRY_INTERRUPT(14) /* IRQ Timer #2 */
#ifndef __SMP__
t_nmi: NMI_TRAP /* Level 15 (NMI) */
#else
- TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
+ .globl t_nmi
+t_nmi: TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
#endif
t_racc: TRAP_ENTRY(0x20, do_reg_access) /* General Register Access Error */
t_iacce:BAD_TRAP(0x21) /* Instr Access Error */
t_bad22:BAD_TRAP(0x22) BAD_TRAP(0x23)
t_cpdis:TRAP_ENTRY(0x24, do_cp_disabled) /* Co-Processor Disabled */
-t_uflsh:TRAP_ENTRY(0x25, do_bad_flush) /* Unimplemented FLUSH inst. */
+t_uflsh:SKIP_TRAP(0x25, unimp_flush) /* Unimplemented FLUSH inst. */
t_bad26:BAD_TRAP(0x26) BAD_TRAP(0x27)
t_cpexc:TRAP_ENTRY(0x28, do_cp_exception) /* Co-Processor Exception */
t_dacce:SPARC_DFAULT /* Data Access Error */
@@ -205,7 +214,7 @@ C_LABEL(trapbase_cpu1):
TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22)
- BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) TRAP_ENTRY(0x25, do_bad_flush)
+ BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) SKIP_TRAP(0x25, unimp_flush)
BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception)
SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c)
BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
@@ -273,7 +282,7 @@ C_LABEL(trapbase_cpu2):
TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22)
- BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) TRAP_ENTRY(0x25, do_bad_flush)
+ BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) SKIP_TRAP(0x25, unimp_flush)
BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception)
SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c)
BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
@@ -341,7 +350,7 @@ C_LABEL(trapbase_cpu3):
TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22)
- BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) TRAP_ENTRY(0x25, do_bad_flush)
+ BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) SKIP_TRAP(0x25, unimp_flush)
BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception)
SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c)
BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
@@ -394,28 +403,26 @@ C_LABEL(trapbase_cpu3):
BAD_TRAP(0xfc) BAD_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
#endif
- .skip 4096
+ .align 4096
/* This was the only reasonable way I could think of to properly align
* these page-table data structures.
*/
.globl C_LABEL(bootup_user_stack)
- .globl C_LABEL(bootup_kernel_stack)
.globl C_LABEL(pg0), C_LABEL(pg1), C_LABEL(pg2), C_LABEL(pg3)
.globl C_LABEL(empty_bad_page)
.globl C_LABEL(empty_bad_page_table)
.globl C_LABEL(empty_zero_page)
.globl C_LABEL(swapper_pg_dir)
C_LABEL(bootup_user_stack): .skip 0x2000
-C_LABEL(bootup_kernel_stack): .skip 0x2000
-C_LABEL(swapper_pg_dir): .skip 0x1000
-C_LABEL(pg0): .skip 0x1000
-C_LABEL(pg1): .skip 0x1000
-C_LABEL(pg2): .skip 0x1000
-C_LABEL(pg3): .skip 0x1000
-C_LABEL(empty_bad_page): .skip 0x1000
-C_LABEL(empty_bad_page_table): .skip 0x1000
-C_LABEL(empty_zero_page): .skip 0x1000
+C_LABEL(swapper_pg_dir): .skip PAGE_SIZE
+C_LABEL(pg0): .skip PAGE_SIZE
+C_LABEL(pg1): .skip PAGE_SIZE
+C_LABEL(pg2): .skip PAGE_SIZE
+C_LABEL(pg3): .skip PAGE_SIZE
+C_LABEL(empty_bad_page): .skip PAGE_SIZE
+C_LABEL(empty_bad_page_table): .skip PAGE_SIZE
+C_LABEL(empty_zero_page): .skip PAGE_SIZE
.global C_LABEL(root_flags)
.global C_LABEL(ram_flags)
@@ -778,18 +785,24 @@ execute_in_high_mem:
* your code. Sun probably still does that because they don't even
* trust their own "OpenBoot" specifications.
*/
-
set LOAD_ADDR, %g6
cmp %o0, %g6 ! an old sun4?
- be no_sun4_here
+ be sun4_init
nop
found_version:
-
+#ifdef CONFIG_SUN4
+/* For people who try sun4 kernels, even if Configure.help advises them. */
+ ld [%g7 + 0x68], %o1
+ set sun4cdm_notsup, %o0
+ call %o1
+ nop
+ b halt_me
+ nop
+#endif
/* Get the machine type via the mysterious romvec node operations. */
- or %g0, %g7, %l1
- add %l1, 0x1c, %l1
+ add %g7, 0x1c, %l1
ld [%l1], %l0
ld [%l0], %l0
call %l0
@@ -825,10 +838,11 @@ got_prop:
set C_LABEL(cputypval), %o2
ldub [%o2 + 0x4], %l1
- cmp %l1, 'c' ! We already know we are not
- be 1f ! on a plain sun4 because of
- ! the check for 0x4000 in %o0
- cmp %l1, 'm' ! at start
+ cmp %l1, ' '
+ be 1f
+ cmp %l1, 'c'
+ be 1f
+ cmp %l1, 'm'
be 1f
cmp %l1, 'd'
be 1f
@@ -853,6 +867,9 @@ got_prop:
b sun4c_continue_boot
nop
+/* CPUID in bootbus can be found at PA 0xff0140000 */
+#define SUN4D_BOOTBUS_CPUID 0xf0140000
+
sun4d_init:
/* Need to patch call to handler_irq */
set C_LABEL(patch_handler_irq), %g4
@@ -862,6 +879,21 @@ sun4d_init:
srl %g5, 2, %g5
or %g5, %g3, %g5
st %g5, [%g4]
+
+#ifdef __SMP__
+ /* Get our CPU id out of bootbus */
+ set SUN4D_BOOTBUS_CPUID, %g3
+ lduba [%g3] ASI_M_CTL, %g3
+ and %g3, 0xf8, %g3
+ srl %g3, 3, %g4
+ sta %g4, [%g0] ASI_M_VIKING_TMP1
+ sethi %hi(boot_cpu_id), %g5
+ stb %g4, [%g5 + %lo(boot_cpu_id)]
+ sll %g4, 2, %g4
+ sethi %hi(boot_cpu_id4), %g5
+ stb %g4, [%g5 + %lo(boot_cpu_id4)]
+#endif
+
/* Fall through to sun4m_init */
sun4m_init:
@@ -974,7 +1006,8 @@ sun4c_continue_boot:
/* I want a kernel stack NOW! */
set C_LABEL(bootup_user_stack), %g1
- add %g1, (PAGE_SIZE - REGWIN_SZ), %sp
+ set (0x2000 - REGWIN_SZ), %g2
+ add %g1, %g2, %sp
mov 0, %fp /* And for good luck */
/* Zero out our BSS section. */
@@ -988,10 +1021,16 @@ sun4c_continue_boot:
add %o0, 0x1, %o0
/* Initialize the umask value for init_task just in case.
- * But first make current_set[0] point to something useful.
+ * But first make current_set[boot_cpu_id] point to something useful.
*/
set C_LABEL(init_task_union), %g6
set C_LABEL(current_set), %g2
+#ifdef __SMP__
+ sethi %hi(C_LABEL(boot_cpu_id4)), %g3
+ ldub [%g3 + %lo(C_LABEL(boot_cpu_id4))], %g3
+ st %g6, [%g2]
+ add %g2, %g3, %g2
+#endif
st %g6, [%g2]
st %g0, [%g6 + AOFF_task_tss + AOFF_thread_uwinmask]
@@ -1114,19 +1153,28 @@ sun4c_continue_boot:
call halt_me
nop
+sun4_init:
+#ifdef CONFIG_SUN4
/* There, happy now Adrian? */
+ set C_LABEL(cputypval), %o2 ! Let everyone know we
+ set ' ', %o0 ! are a "sun4 " architecture
+ stb %o0, [%o2 + 0x4]
- /* XXX Fix this... XXX */
-no_sun4_here:
- sethi %hi(SUN4_PROM_VECTOR+SUN4_PRINTF), %o1
- ld [%o1 + %lo(SUN4_PROM_VECTOR+SUN4_PRINTF)], %o1
- set sun4_notsup, %o0
- call %o1
+ b got_prop
nop
-1:
- ba 1b ! Cannot exit into KMON
+#else
+ sethi %hi(SUN4_PROM_VECTOR+0x84), %o1
+ ld [%o1 + %lo(SUN4_PROM_VECTOR+0x84)], %o1
+ set sun4_notsup, %o0
+ call %o1 /* printf */
nop
-
+ sethi %hi(SUN4_PROM_VECTOR+0xc4), %o1
+ ld [%o1 + %lo(SUN4_PROM_VECTOR+0xc4)], %o1
+ call %o1 /* exittomon */
+ nop
+1: ba 1b ! Cannot exit into KMON
+ nop
+#endif
no_sun4e_here:
ld [%g7 + 0x68], %o1
set sun4e_notsup, %o0
diff --git a/arch/sparc/kernel/init_task.c b/arch/sparc/kernel/init_task.c
index 1829daeea..506a98622 100644
--- a/arch/sparc/kernel/init_task.c
+++ b/arch/sparc/kernel/init_task.c
@@ -11,9 +11,9 @@ static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS;
struct mm_struct init_mm = INIT_MM;
-/* .text section in head.S is aligned at 2 page boundry and this gets linked
+/* .text section in head.S is aligned at 8k boundry and this gets linked
* right after that so that the init_task_union is aligned properly as well.
- * We really don't need this special alignment like the Intel does, but
- * I do it anyways for completeness.
+ * If this is not aligned on a 8k boundry, then you should change code
+ * in etrap.S which assumes it.
*/
union task_union init_task_union __attribute__((__section__(".text"))) = { INIT_TASK };
diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c
index 08c0be5c6..b29eca496 100644
--- a/arch/sparc/kernel/irq.c
+++ b/arch/sparc/kernel/irq.c
@@ -1,4 +1,4 @@
-/* $Id: irq.c,v 1.77 1997/11/19 15:33:05 jj Exp $
+/* $Id: irq.c,v 1.85 1998/03/09 14:03:40 jj Exp $
* arch/sparc/kernel/irq.c: Interrupt request handling routines. On the
* Sparc the IRQ's are basically 'cast in stone'
* and you are supposed to probe the prom's device
@@ -67,22 +67,9 @@ static void irq_panic(void)
prom_halt();
}
-void (*enable_irq)(unsigned int) = (void (*)(unsigned int)) irq_panic;
-void (*disable_irq)(unsigned int) = (void (*)(unsigned int)) irq_panic;
-void (*enable_pil_irq)(unsigned int) = (void (*)(unsigned int)) irq_panic;
-void (*disable_pil_irq)(unsigned int) = (void (*)(unsigned int)) irq_panic;
-void (*clear_clock_irq)(void) = irq_panic;
-void (*clear_profile_irq)(int) = (void (*)(int)) irq_panic;
-void (*load_profile_irq)(int, unsigned int) = (void (*)(int, unsigned int)) irq_panic;
void (*init_timers)(void (*)(int, void *,struct pt_regs *)) =
(void (*)(void (*)(int, void *,struct pt_regs *))) irq_panic;
-#ifdef __SMP__
-void (*set_cpu_int)(int, int);
-void (*clear_cpu_int)(int, int);
-void (*set_irq_udt)(int);
-#endif
-
/*
* Dave Redman (djhr@tadpole.co.uk)
*
@@ -109,6 +96,9 @@ int get_irq_list(char *buf)
{
int i, len = 0;
struct irqaction * action;
+#ifdef __SMP__
+ int j;
+#endif
if (sparc_cpu_model == sun4d) {
extern int sun4d_get_irq_list(char *);
@@ -119,8 +109,15 @@ int get_irq_list(char *buf)
action = *(i + irq_action);
if (!action)
continue;
- len += sprintf(buf+len, "%2d: %8d %c %s",
- i, kstat.interrupts[i],
+ len += sprintf(buf+len, "%3d: ", i);
+#ifndef __SMP__
+ len += sprintf(buf+len, "%10u ", kstat_irqs(i));
+#else
+ for (j = 0; j < smp_num_cpus; j++)
+ len += sprintf(buf+len, "%10u ",
+ kstat.irqs[cpu_logical_map(j)][i]);
+#endif
+ len += sprintf(buf+len, " %c %s",
(action->flags & SA_INTERRUPT) ? '+' : ' ',
action->name);
for (action=action->next; action; action = action->next) {
@@ -280,7 +277,7 @@ static inline void get_irqlock(int cpu, unsigned long where)
do {
STUCK;
barrier();
- } while (*((unsigned char *)&global_irq_lock));
+ } while (*((volatile unsigned char *)&global_irq_lock));
} while (!spin_trylock(&global_irq_lock));
}
/*
@@ -352,7 +349,7 @@ void irq_enter(int cpu, int irq, void *_opaque)
hardirq_enter(cpu);
barrier();
- while (*((unsigned char *)&global_irq_lock)) {
+ while (*((volatile unsigned char *)&global_irq_lock)) {
if ((unsigned char) cpu == global_irq_holder) {
struct pt_regs *regs = _opaque;
int sbh_cnt = atomic_read(&__sparc_bh_counter);
@@ -436,18 +433,20 @@ void handler_irq(int irq, struct pt_regs * regs)
struct irqaction * action;
int cpu = smp_processor_id();
#ifdef __SMP__
- extern void smp_irq_rotate(int cpu);
+ extern void smp4m_irq_rotate(int cpu);
#endif
-
+
disable_pil_irq(irq);
+#if 0 /* FIXME: rotating IRQs halts the machine during SCSI probe. -ecd */
#ifdef __SMP__
/* Only rotate on lower priority IRQ's (scsi, ethernet, etc.). */
if(irq < 10)
- smp_irq_rotate(cpu);
+ smp4m_irq_rotate(cpu);
+#endif
#endif
irq_enter(cpu, irq, regs);
action = *(irq + irq_action);
- kstat.interrupts[irq]++;
+ kstat.irqs[cpu][irq]++;
do {
if (!action || !action->handler)
unexpected_irq(irq, 0, regs);
@@ -467,6 +466,7 @@ void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs)
disable_pil_irq(irq);
irq_enter(cpu, irq, regs);
+ kstat.irqs[cpu][irq]++;
floppy_interrupt(irq, dev_id, regs);
irq_exit(cpu, irq);
enable_pil_irq(irq);
@@ -667,6 +667,7 @@ __initfunc(void init_IRQ(void))
switch(sparc_cpu_model) {
case sun4c:
+ case sun4:
sun4c_init_IRQ();
break;
@@ -688,4 +689,5 @@ __initfunc(void init_IRQ(void))
prom_printf("Cannot initialize IRQ's on this Sun machine...");
break;
}
+ btfixup();
}
diff --git a/arch/sparc/kernel/process.c b/arch/sparc/kernel/process.c
index 32feff3de..8d4c29e62 100644
--- a/arch/sparc/kernel/process.c
+++ b/arch/sparc/kernel/process.c
@@ -1,4 +1,4 @@
-/* $Id: process.c,v 1.102 1997/12/01 03:36:31 davem Exp $
+/* $Id: process.c,v 1.110 1998/04/08 16:15:51 jj Exp $
* linux/arch/sparc/kernel/process.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -40,6 +40,7 @@
#include <asm/elf.h>
extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *);
+extern void srmmu_check_pgt_cache(void);
struct task_struct *current_set[NR_CPUS] = {&init_task, };
@@ -62,7 +63,7 @@ asmlinkage int sys_idle(void)
current->priority = -100;
current->counter = -100;
for (;;) {
- if (sparc_cpu_model == sun4c) {
+ if (ARCH_SUN4C_SUN4) {
static int count = HZ;
static unsigned long last_jiffies = 0;
static unsigned long last_faults = 0;
@@ -91,7 +92,9 @@ asmlinkage int sys_idle(void)
}
}
restore_flags(flags);
- }
+ check_pgt_cache();
+ } else
+ srmmu_check_pgt_cache();
schedule();
}
ret = 0;
@@ -109,6 +112,7 @@ int cpu_idle(void *unused)
current->priority = -100;
while(1) {
+ srmmu_check_pgt_cache();
/*
* tq_scheduler currently assumes we're running in a process
* context (ie that we hold the kernel lock..)
@@ -187,12 +191,12 @@ void machine_power_off(void)
void show_regwindow(struct reg_window *rw)
{
- printk("l0: %08lx l1: %08lx l2: %08lx l3: %08lx\n"
+ printk("l0: %08lx l1: %08lx l2: %08lx l3: %08lx "
"l4: %08lx l5: %08lx l6: %08lx l7: %08lx\n",
rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
rw->locals[4], rw->locals[5], rw->locals[6], rw->locals[7]);
- printk("i0: %08lx i1: %08lx i2: %08lx i3: %08lx\n"
- "i4: %08lx i5: %08lx i6: %08lx i7: %08lx\n",
+ printk("i0: %08lx i1: %08lx i2: %08lx i3: %08lx "
+ "i4: %08lx i5: %08lx fp: %08lx i7: %08lx\n",
rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3],
rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]);
}
@@ -201,15 +205,13 @@ void show_regwindow(struct reg_window *rw)
static spinlock_t sparc_backtrace_lock = SPIN_LOCK_UNLOCKED;
#endif
-void show_backtrace(void)
+void __show_backtrace(unsigned long fp)
{
struct reg_window *rw;
unsigned long flags;
- unsigned long fp;
int cpu = smp_processor_id();
spin_lock_irqsave(&sparc_backtrace_lock, flags);
- __asm__ __volatile__("mov %%i6, %0" : "=r" (fp));
rw = (struct reg_window *) fp;
while(rw) {
printk("CPU[%d]: ARGS[%08lx,%08lx,%08lx,%08lx,%08lx,%08lx] "
@@ -223,6 +225,31 @@ void show_backtrace(void)
spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
}
+void show_backtrace(void)
+{
+ unsigned long fp;
+
+ __asm__ __volatile__(
+ "save %%sp, -64, %%sp\n\t"
+ "save %%sp, -64, %%sp\n\t"
+ "save %%sp, -64, %%sp\n\t"
+ "save %%sp, -64, %%sp\n\t"
+ "save %%sp, -64, %%sp\n\t"
+ "save %%sp, -64, %%sp\n\t"
+ "save %%sp, -64, %%sp\n\t"
+ "save %%sp, -64, %%sp\n\t"
+ "restore\n\t"
+ "restore\n\t"
+ "restore\n\t"
+ "restore\n\t"
+ "restore\n\t"
+ "restore\n\t"
+ "restore\n\t"
+ "restore\n\t"
+ "mov %%i6, %0" : "=r" (fp));
+ __show_backtrace(fp);
+}
+
#ifdef __SMP__
void smp_show_backtrace_all_cpus(void)
{
@@ -236,15 +263,15 @@ void show_stackframe(struct sparc_stackf *sf)
unsigned long *stk;
int i;
- printk("l0: %08lx l1: %08lx l2: %08lx l3: %08lx\n"
+ printk("l0: %08lx l1: %08lx l2: %08lx l3: %08lx "
"l4: %08lx l5: %08lx l6: %08lx l7: %08lx\n",
sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3],
sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
- printk("i0: %08lx i1: %08lx i2: %08lx i3: %08lx\n"
- "i4: %08lx i5: %08lx fp: %08lx ret_pc: %08lx\n",
+ printk("i0: %08lx i1: %08lx i2: %08lx i3: %08lx "
+ "i4: %08lx i5: %08lx fp: %08lx i7: %08lx\n",
sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3],
sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc);
- printk("sp: %08lx x0: %08lx x1: %08lx x2: %08lx\n"
+ printk("sp: %08lx x0: %08lx x1: %08lx x2: %08lx "
"x3: %08lx x4: %08lx x5: %08lx xx: %08lx\n",
(unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1],
sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
@@ -265,36 +292,32 @@ void show_regs(struct pt_regs * regs)
#endif
printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx\n", regs->psr,
regs->pc, regs->npc, regs->y);
- printk("g0: %08lx g1: %08lx g2: %08lx g3: %08lx\n",
+ printk("g0: %08lx g1: %08lx g2: %08lx g3: %08lx ",
regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
regs->u_regs[3]);
printk("g4: %08lx g5: %08lx g6: %08lx g7: %08lx\n",
regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
regs->u_regs[7]);
- printk("o0: %08lx o1: %08lx o2: %08lx o3: %08lx\n",
+ printk("o0: %08lx o1: %08lx o2: %08lx o3: %08lx ",
regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
regs->u_regs[11]);
- printk("o4: %08lx o5: %08lx sp: %08lx ret_pc: %08lx\n",
+ printk("o4: %08lx o5: %08lx sp: %08lx o7: %08lx\n",
regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
regs->u_regs[15]);
show_regwindow((struct reg_window *)regs->u_regs[14]);
}
+#if NOTUSED
void show_thread(struct thread_struct *tss)
{
int i;
- printk("uwinmask: 0x%08lx\n", tss->uwinmask);
- printk("kregs: 0x%08lx\n", (unsigned long)tss->kregs);
+ printk("uwinmask: 0x%08lx kregs: 0x%08lx\n", tss->uwinmask, (unsigned long)tss->kregs);
show_regs(tss->kregs);
- printk("sig_address: 0x%08lx\n", tss->sig_address);
- printk("sig_desc: 0x%08lx\n", tss->sig_desc);
- printk("ksp: 0x%08lx\n", tss->ksp);
- printk("kpc: 0x%08lx\n", tss->kpc);
- printk("kpsr: 0x%08lx\n", tss->kpsr);
- printk("kwim: 0x%08lx\n", tss->kwim);
- printk("fork_kpsr: 0x%08lx\n", tss->fork_kpsr);
- printk("fork_kwim: 0x%08lx\n", tss->fork_kwim);
+ printk("sig_address: 0x%08lx sig_desc: 0x%08lx\n", tss->sig_address, tss->sig_desc);
+ printk("ksp: 0x%08lx kpc: 0x%08lx\n", tss->ksp, tss->kpc);
+ printk("kpsr: 0x%08lx kwim: 0x%08lx\n", tss->kpsr, tss->kwim);
+ printk("fork_kpsr: 0x%08lx fork_kwim: 0x%08lx\n", tss->fork_kpsr, tss->fork_kwim);
for (i = 0; i < NSWINS; i++) {
if (!tss->rwbuf_stkptrs[i])
@@ -306,19 +329,19 @@ void show_thread(struct thread_struct *tss)
printk("w_saved: 0x%08lx\n", tss->w_saved);
/* XXX missing: float_regs */
- printk("fsr: 0x%08lx\n", tss->fsr);
- printk("fpqdepth: 0x%08lx\n", tss->fpqdepth);
+ printk("fsr: 0x%08lx fpqdepth: 0x%08lx\n", tss->fsr, tss->fpqdepth);
/* XXX missing: fpqueue */
- printk("sstk_info.stack: 0x%08lx\n",
- (unsigned long)tss->sstk_info.the_stack);
- printk("sstk_info.status: 0x%08lx\n",
- (unsigned long)tss->sstk_info.cur_status);
- printk("flags: 0x%08lx\n", tss->flags);
- printk("current_ds: 0x%08x\n", tss->current_ds);
+ printk("sstk_info.stack: 0x%08lx sstk_info.status: 0x%08lx\n",
+ (unsigned long)tss->sstk_info.the_stack,
+ (unsigned long)tss->sstk_info.cur_status);
+ printk("flags: 0x%08lx current_ds: 0x%08lx\n", tss->flags, tss->current_ds.seg);
+
+ show_regwindow((struct reg_window *)tss->ksp);
/* XXX missing: core_exec */
}
+#endif
/*
* Free current thread data structures etc..
@@ -367,8 +390,11 @@ void flush_thread(void)
}
/* Now, this task is no longer a kernel thread. */
- current->tss.flags &= ~SPARC_FLAG_KTHREAD;
current->tss.current_ds = USER_DS;
+ if (current->tss.flags & SPARC_FLAG_KTHREAD) {
+ current->tss.flags &= ~SPARC_FLAG_KTHREAD;
+ switch_to_context(current);
+ }
}
static __inline__ void copy_regs(struct pt_regs *dst, struct pt_regs *src)
@@ -475,7 +501,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
}
/* Calculate offset to stack_frame & pt_regs */
- stack_offset = ((PAGE_SIZE<<1) - TRACEREG_SZ);
+ stack_offset = TASK_UNION_SIZE - TRACEREG_SZ;
if(regs->psr & PSR_PS)
stack_offset -= REGWIN_SZ;
diff --git a/arch/sparc/kernel/rtrap.S b/arch/sparc/kernel/rtrap.S
index 68f3dc9af..3c0d311ba 100644
--- a/arch/sparc/kernel/rtrap.S
+++ b/arch/sparc/kernel/rtrap.S
@@ -1,4 +1,4 @@
-/* $Id: rtrap.S,v 1.49 1997/12/14 23:24:24 ecd Exp $
+/* $Id: rtrap.S,v 1.50 1998/02/05 14:18:43 jj Exp $
* rtrap.S: Return from Sparc trap low-level code.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -296,9 +296,8 @@ sun4c_rett_onepage:
.globl C_LABEL(srmmu_rett_stackchk)
C_LABEL(srmmu_rett_stackchk):
- sethi %hi(C_LABEL(page_offset)), %g1
bne ret_trap_user_stack_is_bolixed
- ld [%g1 + %lo(C_LABEL(page_offset))], %g1
+ GET_PAGE_OFFSET(g1)
cmp %g1, %fp
bleu ret_trap_user_stack_is_bolixed
mov AC_M_SFSR, %g1
diff --git a/arch/sparc/kernel/sclow.S b/arch/sparc/kernel/sclow.S
index 06c028395..4da88bd0e 100644
--- a/arch/sparc/kernel/sclow.S
+++ b/arch/sparc/kernel/sclow.S
@@ -11,6 +11,7 @@
#include <asm/errno.h>
#include <asm/winmacro.h>
#include <asm/psr.h>
+#include <asm/page.h>
#define CC_AND_RETT \
set PSR_C, %l4; \
@@ -94,7 +95,7 @@ LABEL(sunossmask):
.globl LABEL(getpagesize)
LABEL(getpagesize):
- set 4096, %i0
+ set PAGE_SIZE, %i0
CC_AND_RETT
/* XXX sys_nice() XXX */
diff --git a/arch/sparc/kernel/setup.c b/arch/sparc/kernel/setup.c
index e53c343da..b5625cd12 100644
--- a/arch/sparc/kernel/setup.c
+++ b/arch/sparc/kernel/setup.c
@@ -1,4 +1,4 @@
-/* $Id: setup.c,v 1.87 1997/12/18 02:42:42 ecd Exp $
+/* $Id: setup.c,v 1.93 1998/03/09 14:03:18 jj Exp $
* linux/arch/sparc/kernel/setup.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -43,6 +43,7 @@
#include <asm/spinlock.h>
#include <asm/softirq.h>
#include <asm/hardirq.h>
+#include <asm/machines.h>
struct screen_info screen_info = {
0, 0, /* orig-x, orig-y */
@@ -58,11 +59,6 @@ struct screen_info screen_info = {
unsigned int phys_bytes_of_ram, end_of_phys_memory;
-unsigned long bios32_init(unsigned long memory_start, unsigned long memory_end)
-{
- return memory_start;
-}
-
/* Typing sync at the prom prompt calls the function pointed to by
* romvec->pv_synchook which I set to the following function.
* This should sync all filesystems and return, for now it just
@@ -127,7 +123,7 @@ unsigned int boot_flags;
extern char *console_fb_path;
static int console_fb = 0;
#endif
-static unsigned long memory_size = 0;
+static unsigned long memory_size __initdata = 0;
void kernel_enter_debugger(void)
{
@@ -260,7 +256,7 @@ extern void sun4c_probe_vac(void);
extern char cputypval;
extern unsigned long start, end;
extern void panic_setup(char *, int *);
-extern unsigned long srmmu_endmem_fixup(unsigned long);
+extern void srmmu_end_memory(unsigned long, unsigned long *);
extern unsigned long sun_serial_setup(unsigned long);
extern unsigned short root_flags;
@@ -311,6 +307,13 @@ __initfunc(void setup_arch(char **cmdline_p,
if(!strcmp(&cputypval,"sun4d")) { sparc_cpu_model=sun4d; }
if(!strcmp(&cputypval,"sun4e")) { sparc_cpu_model=sun4e; }
if(!strcmp(&cputypval,"sun4u")) { sparc_cpu_model=sun4u; }
+
+#ifdef CONFIG_SUN4
+ if (sparc_cpu_model != sun4) {
+ prom_printf("This kernel is for Sun4 architecture only.\n");
+ prom_halt();
+ }
+#endif
#if CONFIG_AP1000
sparc_cpu_model=ap1000;
strcpy(&cputypval, "ap+");
@@ -320,12 +323,10 @@ __initfunc(void setup_arch(char **cmdline_p,
switch(sparc_cpu_model) {
case sun4:
printk("SUN4\n");
- sun4c_probe_vac();
packed = 0;
break;
case sun4c:
printk("SUN4C\n");
- sun4c_probe_vac();
packed = 0;
break;
case sun4m:
@@ -356,6 +357,8 @@ __initfunc(void setup_arch(char **cmdline_p,
boot_flags_init(*cmdline_p);
idprom_init();
+ if (ARCH_SUN4C_SUN4)
+ sun4c_probe_vac();
load_mmu();
total = prom_probe_memory();
*memory_start_p = (((unsigned long) &end));
@@ -374,41 +377,37 @@ __initfunc(void setup_arch(char **cmdline_p,
}
}
}
- } else {
- unsigned int sum = 0;
+ *memory_end_p = (end_of_phys_memory + KERNBASE);
+ } else
+ srmmu_end_memory(memory_size, memory_end_p);
- for(i = 0; sp_banks[i].num_bytes != 0; i++) {
- sum += sp_banks[i].num_bytes;
- if (memory_size) {
- if (sum > memory_size) {
- sp_banks[i].num_bytes -=
- (sum - memory_size);
- sum = memory_size;
- sp_banks[++i].base_addr = 0xdeadbeef;
- sp_banks[i].num_bytes = 0;
- break;
- }
- }
+ if (!root_flags)
+ root_mountflags &= ~MS_RDONLY;
+ ROOT_DEV = to_kdev_t(root_dev);
+#ifdef CONFIG_BLK_DEV_RAM
+ rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
+ rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
+ rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
+#endif
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (ramdisk_image) {
+ initrd_start = ramdisk_image;
+ if (initrd_start < KERNBASE) initrd_start += KERNBASE;
+ initrd_end = initrd_start + ramdisk_size;
+ if (initrd_end > *memory_end_p) {
+ printk(KERN_CRIT "initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+ initrd_end,*memory_end_p);
+ initrd_start = 0;
+ }
+ if (initrd_start >= *memory_start_p && initrd_start < *memory_start_p + 2 * PAGE_SIZE) {
+ initrd_below_start_ok = 1;
+ *memory_start_p = PAGE_ALIGN (initrd_end);
}
- end_of_phys_memory = sum;
}
-
+#endif
prom_setsync(prom_sync_me);
- *memory_end_p = (end_of_phys_memory + KERNBASE);
- if((sparc_cpu_model == sun4c) ||
- (sparc_cpu_model == sun4))
- goto not_relevant;
- if(end_of_phys_memory >= 0x0d000000) {
- *memory_end_p = 0xfd000000;
- } else {
- if((sparc_cpu_model == sun4m) ||
- (sparc_cpu_model == sun4d) ||
- (sparc_cpu_model == ap1000))
- *memory_end_p = srmmu_endmem_fixup(*memory_end_p);
- }
-not_relevant:
-
#ifdef CONFIG_SUN_SERIAL
*memory_start_p = sun_serial_setup(*memory_start_p); /* set this up ASAP */
#endif
@@ -459,31 +458,6 @@ not_relevant:
breakpoint();
}
- if (!root_flags)
- root_mountflags &= ~MS_RDONLY;
- ROOT_DEV = to_kdev_t(root_dev);
-#ifdef CONFIG_BLK_DEV_RAM
- rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
- rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
- rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
-#endif
-#ifdef CONFIG_BLK_DEV_INITRD
- if (ramdisk_image) {
- initrd_start = ramdisk_image;
- if (initrd_start < KERNBASE) initrd_start += KERNBASE;
- initrd_end = initrd_start + ramdisk_size;
- if (initrd_end > *memory_end_p) {
- printk(KERN_CRIT "initrd extends beyond end of memory "
- "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
- initrd_end,*memory_end_p);
- initrd_start = 0;
- }
- if (initrd_start >= *memory_start_p && initrd_start < *memory_start_p + 2 * PAGE_SIZE) {
- initrd_below_start_ok = 1;
- *memory_start_p = PAGE_ALIGN (initrd_end);
- }
- }
-#endif
/* Due to stack alignment restrictions and assumptions... */
init_task.mm->mmap->vm_page_prot = PAGE_SHARED;
@@ -504,13 +478,12 @@ asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int on)
extern char *sparc_cpu_type[];
extern char *sparc_fpu_type[];
-extern char *smp_info(void);
-
int get_cpuinfo(char *buffer)
{
- int cpuid=get_cpuid();
+ int cpuid=hard_smp_processor_id();
+ int len;
- return sprintf(buffer, "cpu\t\t: %s\n"
+ len = sprintf(buffer, "cpu\t\t: %s\n"
"fpu\t\t: %s\n"
"promlib\t\t: Version %d Revision %d\n"
"prom\t\t: %d.%d\n"
@@ -519,34 +492,23 @@ int get_cpuinfo(char *buffer)
"ncpus active\t: %d\n"
#ifndef __SMP__
"BogoMips\t: %lu.%02lu\n"
-#else
- "Cpu0Bogo\t: %lu.%02lu\n"
- "Cpu1Bogo\t: %lu.%02lu\n"
- "Cpu2Bogo\t: %lu.%02lu\n"
- "Cpu3Bogo\t: %lu.%02lu\n"
-#endif
- "%s"
-#ifdef __SMP__
- "%s"
#endif
,
- sparc_cpu_type[cpuid],
- sparc_fpu_type[cpuid],
+ sparc_cpu_type[cpuid] ? : "undetermined",
+ sparc_fpu_type[cpuid] ? : "undetermined",
romvec->pv_romvers, prom_rev, romvec->pv_printrev >> 16, (short)romvec->pv_printrev,
&cputypval,
- linux_num_cpus, smp_num_cpus,
+ linux_num_cpus, smp_num_cpus
#ifndef __SMP__
- loops_per_sec/500000, (loops_per_sec/5000) % 100,
-#else
- cpu_data[0].udelay_val/500000, (cpu_data[0].udelay_val/5000)%100,
- cpu_data[1].udelay_val/500000, (cpu_data[1].udelay_val/5000)%100,
- cpu_data[2].udelay_val/500000, (cpu_data[2].udelay_val/5000)%100,
- cpu_data[3].udelay_val/500000, (cpu_data[3].udelay_val/5000)%100,
+ , loops_per_sec/500000, (loops_per_sec/5000) % 100
#endif
- mmu_info()
+ );
#ifdef __SMP__
- , smp_info()
+ len += smp_bogo_info(buffer + len);
#endif
- );
-
+ len += mmu_info(buffer + len);
+#ifdef __SMP__
+ len += smp_info(buffer + len);
+#endif
+ return len;
}
diff --git a/arch/sparc/kernel/signal.c b/arch/sparc/kernel/signal.c
index 5c10faa81..efdb362a4 100644
--- a/arch/sparc/kernel/signal.c
+++ b/arch/sparc/kernel/signal.c
@@ -1,4 +1,4 @@
-/* $Id: signal.c,v 1.77 1997/12/22 03:06:32 ecd Exp $
+/* $Id: signal.c,v 1.79 1998/04/04 07:11:41 davem Exp $
* linux/arch/sparc/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
@@ -78,9 +78,20 @@ struct new_signal_frame {
__siginfo_fpu_t fpu_state;
};
+struct rt_signal_frame {
+ struct sparc_stackf ss;
+ siginfo_t info;
+ struct pt_regs regs;
+ sigset_t mask;
+ __siginfo_fpu_t *fpu_save;
+ unsigned int insns [2];
+ __siginfo_fpu_t fpu_state;
+};
+
/* Align macros */
#define SF_ALIGNEDSZ (((sizeof(struct signal_sframe) + 7) & (~7)))
#define NF_ALIGNEDSZ (((sizeof(struct new_signal_frame) + 7) & (~7)))
+#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
/*
* atomically swap in the new signal mask, and wait for a signal.
@@ -318,7 +329,60 @@ segv_and_exit:
asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
{
- printk("XXX: FIXME: write do_rt_sigreturn\n");
+ struct rt_signal_frame *sf;
+ unsigned int psr, pc, npc;
+ __siginfo_fpu_t *fpu_save;
+ sigset_t set;
+
+ synchronize_user_stack();
+ sf = (struct rt_signal_frame *) regs->u_regs[UREG_FP];
+ if(verify_area(VERIFY_READ, sf, sizeof(*sf)) ||
+ (((unsigned long) sf) & 0x03))
+ goto segv;
+
+ get_user(pc, &sf->regs.pc);
+ __get_user(npc, &sf->regs.npc);
+ if((pc | npc) & 0x03)
+ goto segv;
+
+ regs->pc = pc;
+ regs->npc = npc;
+
+ __get_user(regs->y, &sf->regs.y);
+ __get_user(psr, &sf->regs.psr);
+
+ __get_user(regs->u_regs[UREG_G1], &sf->regs.u_regs[UREG_G1]);
+ __get_user(regs->u_regs[UREG_G2], &sf->regs.u_regs[UREG_G2]);
+ __get_user(regs->u_regs[UREG_G3], &sf->regs.u_regs[UREG_G3]);
+ __get_user(regs->u_regs[UREG_G4], &sf->regs.u_regs[UREG_G4]);
+ __get_user(regs->u_regs[UREG_G5], &sf->regs.u_regs[UREG_G5]);
+ __get_user(regs->u_regs[UREG_G6], &sf->regs.u_regs[UREG_G6]);
+ __get_user(regs->u_regs[UREG_G7], &sf->regs.u_regs[UREG_G7]);
+ __get_user(regs->u_regs[UREG_I0], &sf->regs.u_regs[UREG_I0]);
+ __get_user(regs->u_regs[UREG_I1], &sf->regs.u_regs[UREG_I1]);
+ __get_user(regs->u_regs[UREG_I2], &sf->regs.u_regs[UREG_I2]);
+ __get_user(regs->u_regs[UREG_I3], &sf->regs.u_regs[UREG_I3]);
+ __get_user(regs->u_regs[UREG_I4], &sf->regs.u_regs[UREG_I4]);
+ __get_user(regs->u_regs[UREG_I5], &sf->regs.u_regs[UREG_I5]);
+ __get_user(regs->u_regs[UREG_I6], &sf->regs.u_regs[UREG_I6]);
+ __get_user(regs->u_regs[UREG_I7], &sf->regs.u_regs[UREG_I7]);
+
+ regs->psr = (regs->psr & ~PSR_ICC) | (psr & PSR_ICC);
+
+ __get_user(fpu_save, &sf->fpu_save);
+ if(fpu_save)
+ restore_fpu_state(regs, &sf->fpu_state);
+ if(copy_from_user(&set, &sf->mask, sizeof(sigset_t)))
+ goto segv;
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sigmask_lock);
+ current->blocked = set;
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+ return;
+segv:
+ lock_kernel();
+ do_exit(SIGSEGV);
}
/* Checks if the fp is valid */
@@ -514,7 +578,63 @@ static inline void
new_setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
int signo, sigset_t *oldset, siginfo_t *info)
{
- printk("XXX: FIXME: new_setup_rt_frame unimplemented\n");
+ struct rt_signal_frame *sf;
+ int sigframe_size;
+ unsigned int psr;
+ int i;
+
+ synchronize_user_stack();
+ sigframe_size = RT_ALIGNEDSZ;
+ if(!current->used_math)
+ sigframe_size -= sizeof(__siginfo_fpu_t);
+ sf = (struct rt_signal_frame *)(regs->u_regs[UREG_FP] - sigframe_size);
+ if(invalid_frame_pointer(sf, sigframe_size))
+ goto sigill;
+ if(current->tss.w_saved != 0)
+ goto sigill;
+
+ put_user(regs->pc, &sf->regs.pc);
+ __put_user(regs->npc, &sf->regs.npc);
+ __put_user(regs->y, &sf->regs.y);
+ psr = regs->psr;
+ if(current->used_math)
+ psr |= PSR_EF;
+ __put_user(psr, &sf->regs.psr);
+ for(i = 0; i < 16; i++)
+ __put_user(regs->u_regs[i], &sf->regs.u_regs[i]);
+ if(psr & PSR_EF) {
+ save_fpu_state(regs, &sf->fpu_state);
+ __put_user(&sf->fpu_state, &sf->fpu_save);
+ } else {
+ __put_user(0, &sf->fpu_save);
+ }
+ __copy_to_user(&sf->mask, &oldset->sig[0], sizeof(sigset_t));
+ copy_to_user(sf, (char *) regs->u_regs [UREG_FP],
+ sizeof (struct reg_window));
+
+ regs->u_regs[UREG_FP] = (unsigned long) sf;
+ regs->u_regs[UREG_I0] = signo;
+ regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
+
+ regs->pc = (unsigned long) ka->sa.sa_handler;
+ regs->npc = (regs->pc + 4);
+
+ if(ka->ka_restorer)
+ regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
+ else {
+ regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2);
+
+ __put_user(0x821020d8, &sf->insns[0]); /* mov __NR_sigreturn, %g1 */
+ __put_user(0x91d02010, &sf->insns[1]); /* t 0x10 */
+
+ /* Flush instruction space. */
+ flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
+ }
+ return;
+
+sigill:
+ lock_kernel();
+ do_exit(SIGILL);
}
/* Setup a Solaris stack frame */
@@ -783,6 +903,7 @@ handle_signal(unsigned long signr, struct k_sigaction *ka,
spin_lock_irq(&current->sigmask_lock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
sigaddset(&current->blocked, signr);
+ recalc_sigpending(current);
spin_unlock_irq(&current->sigmask_lock);
}
}
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c
index 15364d44f..c6d86d36c 100644
--- a/arch/sparc/kernel/smp.c
+++ b/arch/sparc/kernel/smp.c
@@ -1,9 +1,9 @@
/* smp.c: Sparc SMP support.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
-#include <linux/config.h> /* for CONFIG_PROFILE */
#include <asm/head.h>
#include <linux/kernel.h>
@@ -13,6 +13,7 @@
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
+#include <linux/init.h>
#include <asm/ptrace.h>
#include <asm/atomic.h>
@@ -34,34 +35,29 @@
#define IRQ_STOP_CPU 14
#define IRQ_CROSS_CALL 15
-extern ctxd_t *srmmu_ctx_table_phys;
-extern int linux_num_cpus;
-
-extern void calibrate_delay(void);
-
-/* XXX Let's get rid of this thing if we can... */
-extern struct task_struct *current_set[NR_CPUS];
-
volatile int smp_processors_ready = 0;
-
unsigned long cpu_present_map = 0;
int smp_num_cpus = 1;
int smp_threads_ready=0;
unsigned char mid_xlate[NR_CPUS] = { 0, 0, 0, 0, };
-volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
+volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,};
+#ifdef NOTUSED
volatile unsigned long smp_spinning[NR_CPUS] = { 0, };
+#endif
unsigned long smp_proc_in_lock[NR_CPUS] = { 0, };
struct cpuinfo_sparc cpu_data[NR_CPUS];
-static unsigned char boot_cpu_id = 0;
-static int smp_activated = 0;
+unsigned long cpu_offset[NR_CPUS];
+unsigned char boot_cpu_id = 0;
+unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
+int smp_activated = 0;
volatile int cpu_number_map[NR_CPUS];
-volatile int cpu_logical_map[NR_CPUS];
+volatile int __cpu_logical_map[NR_CPUS];
/* The only guaranteed locking primitive available on all Sparc
* processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
* places the current byte at the effective address into dest_reg and
* places 0xff there afterwards. Pretty lame locking primitive
- * compared to the Alpha and the intel no? Most Sparcs have 'swap'
+ * compared to the Alpha and the Intel no? Most Sparcs have 'swap'
* instruction which is much better...
*/
struct klock_info klock_info = { KLOCK_CLEAR, 0 };
@@ -69,42 +65,11 @@ struct klock_info klock_info = { KLOCK_CLEAR, 0 };
volatile unsigned long ipi_count;
volatile int smp_process_available=0;
-
-/*#define SMP_DEBUG*/
-
-#ifdef SMP_DEBUG
-#define SMP_PRINTK(x) printk x
-#else
-#define SMP_PRINTK(x)
-#endif
-
volatile int smp_commenced = 0;
-static char smp_buf[512];
-
/* Not supported on Sparc yet. */
-void smp_setup(char *str, int *ints)
-{
-}
-
-char *smp_info(void)
-{
- sprintf(smp_buf,
-" CPU0\t\tCPU1\t\tCPU2\t\tCPU3\n"
-"State: %s\t\t%s\t\t%s\t\t%s\n",
-(cpu_present_map & 1) ? ((klock_info.akp == 0) ? "akp" : "online") : "offline",
-(cpu_present_map & 2) ? ((klock_info.akp == 1) ? "akp" : "online") : "offline",
-(cpu_present_map & 4) ? ((klock_info.akp == 2) ? "akp" : "online") : "offline",
-(cpu_present_map & 8) ? ((klock_info.akp == 3) ? "akp" : "online") : "offline");
- return smp_buf;
-}
-
-static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val)
+__initfunc(void smp_setup(char *str, int *ints))
{
- __asm__ __volatile__("swap [%1], %0\n\t" :
- "=&r" (val), "=&r" (ptr) :
- "0" (val), "1" (ptr));
- return val;
}
/*
@@ -112,12 +77,12 @@ static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val)
* a given CPU
*/
-void smp_store_cpu_info(int id)
+__initfunc(void smp_store_cpu_info(int id))
{
cpu_data[id].udelay_val = loops_per_sec; /* this is it on sparc. */
}
-void smp_commence(void)
+__initfunc(void smp_commence(void))
{
/*
* Lets the callin's below out of their loop.
@@ -129,65 +94,19 @@ void smp_commence(void)
local_flush_tlb_all();
}
-static void smp_setup_percpu_timer(void);
-
-void smp_callin(void)
-{
- int cpuid = hard_smp_processor_id();
-
- local_flush_cache_all();
- local_flush_tlb_all();
- set_irq_udt(mid_xlate[boot_cpu_id]);
-
- /* Get our local ticker going. */
- smp_setup_percpu_timer();
-
- calibrate_delay();
- smp_store_cpu_info(cpuid);
- local_flush_cache_all();
- local_flush_tlb_all();
-
- /* Allow master to continue. */
- swap((unsigned long *)&cpu_callin_map[cpuid], 1);
- local_flush_cache_all();
- local_flush_tlb_all();
-
- while(!task[cpuid] || current_set[cpuid] != task[cpuid])
- barrier();
-
- /* Fix idle thread fields. */
- __asm__ __volatile__("ld [%0], %%g6\n\t"
- : : "r" (&current_set[cpuid])
- : "memory" /* paranoid */);
- current->mm->mmap->vm_page_prot = PAGE_SHARED;
- current->mm->mmap->vm_start = PAGE_OFFSET;
- current->mm->mmap->vm_end = init_task.mm->mmap->vm_end;
-
- while(!smp_commenced)
- barrier();
-
- local_flush_cache_all();
- local_flush_tlb_all();
-
- __sti();
-}
-
-extern int cpu_idle(void *unused);
-extern void init_IRQ(void);
-
/* Only broken Intel needs this, thus it should not even be referenced
* globally...
*/
-void initialize_secondary(void)
+__initfunc(void initialize_secondary(void))
{
}
+extern int cpu_idle(void *unused);
+
/* Activate a secondary processor. */
int start_secondary(void *unused)
{
- trap_init();
- init_IRQ();
- smp_callin();
+ prom_printf("Start secondary called. Should not happen\n");
return cpu_idle(NULL);
}
@@ -201,255 +120,25 @@ void cpu_panic(void)
* Cycle through the processors asking the PROM to start each one.
*/
-extern struct prom_cpuinfo linux_cpus[NCPUS];
-static struct linux_prom_registers penguin_ctable;
+extern struct prom_cpuinfo linux_cpus[NR_CPUS];
+struct linux_prom_registers smp_penguin_ctable __initdata = { 0 };
-void smp_boot_cpus(void)
+__initfunc(void smp_boot_cpus(void))
{
- int cpucount = 0;
- int i = 0;
- int first, prev;
-
- printk("Entering SMP Mode...\n");
-
- penguin_ctable.which_io = 0;
- penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
- penguin_ctable.reg_size = 0;
-
- __sti();
- cpu_present_map = 0;
- for(i=0; i < linux_num_cpus; i++)
- cpu_present_map |= (1<<i);
- for(i=0; i < NR_CPUS; i++)
- cpu_number_map[i] = -1;
- for(i=0; i < NR_CPUS; i++)
- cpu_logical_map[i] = -1;
- mid_xlate[boot_cpu_id] = (linux_cpus[boot_cpu_id].mid & ~8);
- cpu_number_map[boot_cpu_id] = 0;
- cpu_logical_map[0] = boot_cpu_id;
- klock_info.akp = boot_cpu_id;
- current->processor = boot_cpu_id;
- smp_store_cpu_info(boot_cpu_id);
- set_irq_udt(mid_xlate[boot_cpu_id]);
- smp_setup_percpu_timer();
- local_flush_cache_all();
- if(linux_num_cpus == 1)
- return; /* Not an MP box. */
- for(i = 0; i < NR_CPUS; i++) {
- if(i == boot_cpu_id)
- continue;
-
- if(cpu_present_map & (1 << i)) {
- extern unsigned long sparc_cpu_startup;
- unsigned long *entry = &sparc_cpu_startup;
- struct task_struct *p;
- int timeout;
-
- /* Cook up an idler for this guy. */
- kernel_thread(start_secondary, NULL, CLONE_PID);
-
- p = task[++cpucount];
-
- p->processor = i;
- current_set[i] = p;
-
- /* See trampoline.S for details... */
- entry += ((i-1) * 3);
-
- /* whirrr, whirrr, whirrrrrrrrr... */
- printk("Starting CPU %d at %p\n", i, entry);
- mid_xlate[i] = (linux_cpus[i].mid & ~8);
- local_flush_cache_all();
- prom_startcpu(linux_cpus[i].prom_node,
- &penguin_ctable, 0, (char *)entry);
-
- /* wheee... it's going... */
- for(timeout = 0; timeout < 5000000; timeout++) {
- if(cpu_callin_map[i])
- break;
- udelay(100);
- }
- if(cpu_callin_map[i]) {
- /* Another "Red Snapper". */
- cpu_number_map[i] = i;
- cpu_logical_map[i] = i;
- } else {
- cpucount--;
- printk("Processor %d is stuck.\n", i);
- }
- }
- if(!(cpu_callin_map[i])) {
- cpu_present_map &= ~(1 << i);
- cpu_number_map[i] = -1;
- }
- }
- local_flush_cache_all();
- if(cpucount == 0) {
- printk("Error: only one Processor found.\n");
- cpu_present_map = (1 << smp_processor_id());
- } else {
- unsigned long bogosum = 0;
- for(i = 0; i < NR_CPUS; i++) {
- if(cpu_present_map & (1 << i))
- bogosum += cpu_data[i].udelay_val;
- }
- printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
- cpucount + 1,
- (bogosum + 2500)/500000,
- ((bogosum + 2500)/5000)%100);
- smp_activated = 1;
- smp_num_cpus = cpucount + 1;
- }
-
- /* Setup CPU list for IRQ distribution scheme. */
- first = prev = -1;
- for(i = 0; i < NR_CPUS; i++) {
- if(cpu_present_map & (1 << i)) {
- if(first == -1)
- first = i;
- if(prev != -1)
- cpu_data[i].next = i;
- cpu_data[i].mid = mid_xlate[i];
- prev = i;
- }
- }
- cpu_data[prev].next = first;
-
- /* Ok, they are spinning and ready to go. */
- smp_processors_ready = 1;
-}
-
-/* At each hardware IRQ, we get this called to forward IRQ reception
- * to the next processor. The caller must disable the IRQ level being
- * serviced globally so that there are no double interrupts received.
- */
-void smp_irq_rotate(int cpu)
-{
- if(smp_processors_ready)
- set_irq_udt(cpu_data[cpu_data[cpu].next].mid);
-}
-
-/* Cross calls, in order to work efficiently and atomically do all
- * the message passing work themselves, only stopcpu and reschedule
- * messages come through here.
- */
-void smp_message_pass(int target, int msg, unsigned long data, int wait)
-{
- static unsigned long smp_cpu_in_msg[NR_CPUS];
- unsigned long mask;
- int me = smp_processor_id();
- int irq, i;
-
- if(msg == MSG_RESCHEDULE) {
- irq = IRQ_RESCHEDULE;
-
- if(smp_cpu_in_msg[me])
- return;
- } else if(msg == MSG_STOP_CPU) {
- irq = IRQ_STOP_CPU;
- } else {
- goto barf;
- }
-
- smp_cpu_in_msg[me]++;
- if(target == MSG_ALL_BUT_SELF || target == MSG_ALL) {
- mask = cpu_present_map;
- if(target == MSG_ALL_BUT_SELF)
- mask &= ~(1 << me);
- for(i = 0; i < 4; i++) {
- if(mask & (1 << i))
- set_cpu_int(mid_xlate[i], irq);
- }
- } else {
- set_cpu_int(mid_xlate[target], irq);
- }
- smp_cpu_in_msg[me]--;
-
- return;
-barf:
- printk("Yeeee, trying to send SMP msg(%d) on cpu %d\n", msg, me);
- panic("Bogon SMP message pass.");
-}
-
-struct smp_funcall {
- smpfunc_t func;
- unsigned long arg1;
- unsigned long arg2;
- unsigned long arg3;
- unsigned long arg4;
- unsigned long arg5;
- unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */
- unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */
-} ccall_info;
-
-static spinlock_t cross_call_lock = SPIN_LOCK_UNLOCKED;
-
-/* Cross calls must be serialized, at least currently. */
-void smp_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
- unsigned long arg3, unsigned long arg4, unsigned long arg5)
-{
- if(smp_processors_ready) {
- register int ncpus = smp_num_cpus;
- unsigned long flags;
-
- spin_lock_irqsave(&cross_call_lock, flags);
-
- /* Init function glue. */
- ccall_info.func = func;
- ccall_info.arg1 = arg1;
- ccall_info.arg2 = arg2;
- ccall_info.arg3 = arg3;
- ccall_info.arg4 = arg4;
- ccall_info.arg5 = arg5;
-
- /* Init receive/complete mapping, plus fire the IPI's off. */
- {
- register void (*send_ipi)(int,int) = set_cpu_int;
- register unsigned long mask;
- register int i;
-
- mask = (cpu_present_map & ~(1 << smp_processor_id()));
- for(i = 0; i < ncpus; i++) {
- if(mask & (1 << i)) {
- ccall_info.processors_in[i] = 0;
- ccall_info.processors_out[i] = 0;
- send_ipi(mid_xlate[i], IRQ_CROSS_CALL);
- } else {
- ccall_info.processors_in[i] = 1;
- ccall_info.processors_out[i] = 1;
- }
- }
- }
-
- /* First, run local copy. */
- func(arg1, arg2, arg3, arg4, arg5);
-
- {
- register int i;
-
- i = 0;
- do {
- while(!ccall_info.processors_in[i])
- barrier();
- } while(++i < ncpus);
-
- i = 0;
- do {
- while(!ccall_info.processors_out[i])
- barrier();
- } while(++i < ncpus);
- }
-
- spin_unlock_irqrestore(&cross_call_lock, flags);
- } else
- func(arg1, arg2, arg3, arg4, arg5); /* Just need to run local copy. */
+ extern void smp4m_boot_cpus(void);
+ extern void smp4d_boot_cpus(void);
+
+ if (sparc_cpu_model == sun4m)
+ smp4m_boot_cpus();
+ else
+ smp4d_boot_cpus();
}
void smp_flush_cache_all(void)
-{ xc0((smpfunc_t) local_flush_cache_all); }
+{ xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all)); }
void smp_flush_tlb_all(void)
-{ xc0((smpfunc_t) local_flush_tlb_all); }
+{ xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all)); }
void smp_flush_cache_mm(struct mm_struct *mm)
{
@@ -457,7 +146,7 @@ void smp_flush_cache_mm(struct mm_struct *mm)
if(mm->cpu_vm_mask == (1 << smp_processor_id()))
local_flush_cache_mm(mm);
else
- xc1((smpfunc_t) local_flush_cache_mm, (unsigned long) mm);
+ xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
}
}
@@ -467,7 +156,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
if(mm->cpu_vm_mask == (1 << smp_processor_id())) {
local_flush_tlb_mm(mm);
} else {
- xc1((smpfunc_t) local_flush_tlb_mm, (unsigned long) mm);
+ xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
if(mm->count == 1 && current->mm == mm)
mm->cpu_vm_mask = (1 << smp_processor_id());
}
@@ -481,7 +170,7 @@ void smp_flush_cache_range(struct mm_struct *mm, unsigned long start,
if(mm->cpu_vm_mask == (1 << smp_processor_id()))
local_flush_cache_range(mm, start, end);
else
- xc3((smpfunc_t) local_flush_cache_range, (unsigned long) mm,
+ xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) mm,
start, end);
}
}
@@ -493,7 +182,7 @@ void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
if(mm->cpu_vm_mask == (1 << smp_processor_id()))
local_flush_tlb_range(mm, start, end);
else
- xc3((smpfunc_t) local_flush_tlb_range, (unsigned long) mm,
+ xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) mm,
start, end);
}
}
@@ -506,7 +195,7 @@ void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
if(mm->cpu_vm_mask == (1 << smp_processor_id()))
local_flush_cache_page(vma, page);
else
- xc2((smpfunc_t) local_flush_cache_page,
+ xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page),
(unsigned long) vma, page);
}
}
@@ -519,7 +208,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
if(mm->cpu_vm_mask == (1 << smp_processor_id()))
local_flush_tlb_page(vma, page);
else
- xc2((smpfunc_t) local_flush_tlb_page, (unsigned long) vma, page);
+ xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
}
}
@@ -532,7 +221,7 @@ void smp_flush_page_to_ram(unsigned long page)
* XXX This experiment failed, research further... -DaveM
*/
#if 1
- xc1((smpfunc_t) local_flush_page_to_ram, page);
+ xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page);
#else
local_flush_page_to_ram(page);
#endif
@@ -543,7 +232,7 @@ void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
if(mm->cpu_vm_mask == (1 << smp_processor_id()))
local_flush_sig_insns(mm, insn_addr);
else
- xc2((smpfunc_t) local_flush_sig_insns, (unsigned long) mm, insn_addr);
+ xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
}
/* Reschedule call back. */
@@ -552,17 +241,6 @@ void smp_reschedule_irq(void)
need_resched = 1;
}
-/* Running cross calls. */
-void smp_cross_call_irq(void)
-{
- int i = smp_processor_id();
-
- ccall_info.processors_in[i] = 1;
- ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
- ccall_info.arg4, ccall_info.arg5);
- ccall_info.processors_out[i] = 1;
-}
-
/* Stopping processors. */
void smp_stop_cpu_irq(void)
{
@@ -571,84 +249,10 @@ void smp_stop_cpu_irq(void)
barrier();
}
-/* Protects counters touched during level14 ticker */
-spinlock_t ticker_lock = SPIN_LOCK_UNLOCKED;
-
-#ifdef CONFIG_PROFILE
-
-/* 32-bit Sparc specific profiling function. */
-static inline void sparc_do_profile(unsigned long pc)
-{
- if(prof_buffer && current->pid) {
- extern int _stext;
-
- pc -= (unsigned long) &_stext;
- pc >>= prof_shift;
-
- spin_lock(&ticker_lock);
- if(pc < prof_len)
- prof_buffer[pc]++;
- else
- prof_buffer[prof_len - 1]++;
- spin_unlock(&ticker_lock);
- }
-}
-
-#endif
-
unsigned int prof_multiplier[NR_CPUS];
unsigned int prof_counter[NR_CPUS];
-
-extern void update_one_process(struct task_struct *p, unsigned long ticks,
- unsigned long user, unsigned long system);
-
-void smp_percpu_timer_interrupt(struct pt_regs *regs)
-{
- int cpu = smp_processor_id();
-
- clear_profile_irq(mid_xlate[cpu]);
-#ifdef CONFIG_PROFILE
- if(!user_mode(regs))
- sparc_do_profile(regs->pc);
-#endif
- if(!--prof_counter[cpu]) {
- int user = user_mode(regs);
- if(current->pid) {
- update_one_process(current, 1, user, !user);
-
- if(--current->counter < 0) {
- current->counter = 0;
- need_resched = 1;
- }
-
- spin_lock(&ticker_lock);
- if(user) {
- if(current->priority < DEF_PRIORITY)
- kstat.cpu_nice++;
- else
- kstat.cpu_user++;
- } else {
- kstat.cpu_system++;
- }
- spin_unlock(&ticker_lock);
- }
- prof_counter[cpu] = prof_multiplier[cpu];
- }
-}
-
extern unsigned int lvl14_resolution;
-static void smp_setup_percpu_timer(void)
-{
- int cpu = smp_processor_id();
-
- prof_counter[cpu] = prof_multiplier[cpu] = 1;
- load_profile_irq(mid_xlate[cpu], lvl14_resolution);
-
- if(cpu == boot_cpu_id)
- enable_pil_irq(14);
-}
-
int setup_profiling_timer(unsigned int multiplier)
{
int i;
diff --git a/arch/sparc/kernel/sparc-stub.c b/arch/sparc/kernel/sparc-stub.c
index e259ffade..9426ec0d9 100644
--- a/arch/sparc/kernel/sparc-stub.c
+++ b/arch/sparc/kernel/sparc-stub.c
@@ -1,4 +1,4 @@
-/* $Id: sparc-stub.c,v 1.22 1998/01/07 06:33:48 baccala Exp $
+/* $Id: sparc-stub.c,v 1.24 1998/02/08 07:58:44 ecd Exp $
* sparc-stub.c: KGDB support for the Linux kernel.
*
* Modifications to run under Linux
@@ -165,9 +165,10 @@ unsigned long get_sun4csegmap(unsigned long addr)
return entry;
}
-static void flush_cache_all_nop(void)
-{
-}
+#if 0
+/* Have to sort this out. This cannot be done after initialization. */
+static void flush_cache_all_nop(void) {}
+#endif
/* Place where we save old trap entries for restoration */
struct tt_entry kgdb_savettable[256];
@@ -398,10 +399,12 @@ set_debug_traps(void)
{
struct hard_trap_info *ht;
unsigned long flags;
- unsigned char c;
save_and_cli(flags);
- flush_cache_all = flush_cache_all_nop;
+#if 0
+/* Have to sort this out. This cannot be done after initialization. */
+ BTFIXUPSET_CALL(flush_cache_all, flush_cache_all_nop, BTFIXUPCALL_NOP);
+#endif
/* Initialize our copy of the Linux Sparc trap table */
eh_init();
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
index 92be6f74d..1690a7c69 100644
--- a/arch/sparc/kernel/sparc_ksyms.c
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -1,4 +1,4 @@
-/* $Id: sparc_ksyms.c,v 1.61 1997/11/19 07:57:44 jj Exp $
+/* $Id: sparc_ksyms.c,v 1.64 1998/03/19 15:36:43 jj Exp $
* arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
@@ -15,7 +15,6 @@
#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/in6.h>
-#include <linux/pci.h>
#include <asm/oplib.h>
#include <asm/delay.h>
@@ -138,7 +137,6 @@ EXPORT_SYMBOL(page_offset);
EXPORT_SYMBOL(stack_top);
/* Atomic operations. */
-EXPORT_SYMBOL_PRIVATE(_xchg32);
EXPORT_SYMBOL_PRIVATE(_atomic_add);
EXPORT_SYMBOL_PRIVATE(_atomic_sub);
@@ -168,13 +166,23 @@ EXPORT_SYMBOL(request_fast_irq);
EXPORT_SYMBOL(sparc_alloc_io);
EXPORT_SYMBOL(sparc_free_io);
EXPORT_SYMBOL(io_remap_page_range);
-EXPORT_SYMBOL(mmu_v2p);
-EXPORT_SYMBOL(mmu_unlockarea);
-EXPORT_SYMBOL(mmu_lockarea);
-EXPORT_SYMBOL(mmu_get_scsi_sgl);
-EXPORT_SYMBOL(mmu_get_scsi_one);
-EXPORT_SYMBOL(mmu_release_scsi_sgl);
-EXPORT_SYMBOL(mmu_release_scsi_one);
+
+/* Btfixup stuff cannot have versions, it would be complicated too much */
+#ifndef __SMP__
+EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(___xchg32));
+#else
+EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(__smp_processor_id));
+#endif
+EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(enable_irq));
+EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(disable_irq));
+EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(mmu_v2p));
+EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(mmu_unlockarea));
+EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(mmu_lockarea));
+EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(mmu_get_scsi_sgl));
+EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(mmu_get_scsi_one));
+EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(mmu_release_scsi_sgl));
+EXPORT_SYMBOL_NOVERS(BTFIXUP_CALL(mmu_release_scsi_one));
+
EXPORT_SYMBOL(_sparc_dvma_malloc);
EXPORT_SYMBOL(sun4c_unmapioaddr);
EXPORT_SYMBOL(srmmu_unmapioaddr);
@@ -272,7 +280,3 @@ EXPORT_SYMBOL_DOT(mul);
EXPORT_SYMBOL_DOT(umul);
EXPORT_SYMBOL_DOT(div);
EXPORT_SYMBOL_DOT(udiv);
-
-#if CONFIG_PCI
-EXPORT_SYMBOL(pci_devices);
-#endif
diff --git a/arch/sparc/kernel/sun4c_irq.c b/arch/sparc/kernel/sun4c_irq.c
index bc9569688..cef6370ce 100644
--- a/arch/sparc/kernel/sun4c_irq.c
+++ b/arch/sparc/kernel/sun4c_irq.c
@@ -30,6 +30,9 @@
#include <asm/traps.h>
#include <asm/irq.h>
#include <asm/io.h>
+#include <asm/sun4paddr.h>
+#include <asm/idprom.h>
+#include <asm/machines.h>
/* Pointer to the interrupt enable byte
*
@@ -128,7 +131,7 @@ __initfunc(static void sun4c_init_timers(void (*counter_fn)(int, void *, struct
/* Map the Timer chip, this is implemented in hardware inside
* the cache chip on the sun4c.
*/
- sun4c_timers = sparc_alloc_io (SUN4C_TIMER_PHYSADDR, 0,
+ sun4c_timers = sparc_alloc_io (SUN_TIMER_PHYSADDR, 0,
sizeof(struct sun4c_timer_info),
"timer", 0x0, 0x0);
@@ -160,30 +163,41 @@ __initfunc(void sun4c_init_IRQ(void))
{
struct linux_prom_registers int_regs[2];
int ie_node;
+
+ if (ARCH_SUN4) {
+ interrupt_enable =
+ (char *) sparc_alloc_io(SUN4_IE_PHYSADDR, 0,
+ PAGE_SIZE,
+ "sun4c_interrupts",
+ 0x0, 0x0);
+ } else {
- ie_node = prom_searchsiblings (prom_getchild(prom_root_node),
- "interrupt-enable");
- if(ie_node == 0)
- panic("Cannot find /interrupt-enable node");
-
- /* Depending on the "address" property is bad news... */
- prom_getproperty(ie_node, "reg", (char *) int_regs, sizeof(int_regs));
- interrupt_enable = (char *) sparc_alloc_io(int_regs[0].phys_addr, 0,
- int_regs[0].reg_size,
- "sun4c_interrupts",
- int_regs[0].which_io, 0x0);
- enable_irq = sun4c_enable_irq;
- disable_irq = sun4c_disable_irq;
- enable_pil_irq = sun4c_enable_irq;
- disable_pil_irq = sun4c_disable_irq;
- clear_clock_irq = sun4c_clear_clock_irq;
- clear_profile_irq = sun4c_clear_profile_irq;
- load_profile_irq = sun4c_load_profile_irq;
+ ie_node = prom_searchsiblings (prom_getchild(prom_root_node),
+ "interrupt-enable");
+ if(ie_node == 0)
+ panic("Cannot find /interrupt-enable node");
+
+ /* Depending on the "address" property is bad news... */
+ prom_getproperty(ie_node, "reg", (char *) int_regs, sizeof(int_regs));
+ interrupt_enable =
+ (char *) sparc_alloc_io(int_regs[0].phys_addr, 0,
+ int_regs[0].reg_size,
+ "sun4c_interrupts",
+ int_regs[0].which_io, 0x0);
+ }
+
+ BTFIXUPSET_CALL(enable_irq, sun4c_enable_irq, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(disable_irq, sun4c_disable_irq, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(enable_pil_irq, sun4c_enable_irq, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(disable_pil_irq, sun4c_disable_irq, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(clear_clock_irq, sun4c_clear_clock_irq, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(clear_profile_irq, sun4c_clear_profile_irq, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(load_profile_irq, sun4c_load_profile_irq, BTFIXUPCALL_NOP);
init_timers = sun4c_init_timers;
#ifdef __SMP__
- set_cpu_int = (void (*) (int, int))sun4c_nop;
- clear_cpu_int = (void (*) (int, int))sun4c_nop;
- set_irq_udt = (void (*) (int))sun4c_nop;
+ BTFIXUPSET_CALL(set_cpu_int, sun4c_nop, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(clear_cpu_int, sun4c_nop, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(set_irq_udt, sun4c_nop, BTFIXUPCALL_NOP);
#endif
*interrupt_enable = (SUN4C_INT_ENABLE);
/* Cannot enable interrupts until OBP ticker is disabled. */
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index f22fe1495..302df86f4 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -1,8 +1,8 @@
-/* $Id: sun4d_irq.c,v 1.3 1997/12/22 16:09:15 jj Exp $
- * arch/sparc/kernel/sun4d_irq.c:
+/* $Id: sun4d_irq.c,v 1.12 1998/03/19 15:36:36 jj Exp $
+ * arch/sparc/kernel/sun4d_irq.c:
* SS1000/SC2000 interrupt handling.
*
- * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Heavily based on arch/sparc/kernel/irq.c.
*/
@@ -36,32 +36,47 @@
#include <asm/sbus.h>
#include <asm/sbi.h>
+/* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */
+/* #define DISTRIBUTE_IRQS */
+
struct sun4d_timer_regs *sun4d_timers;
#define TIMER_IRQ 10
#define MAX_STATIC_ALLOC 4
extern struct irqaction static_irqaction[MAX_STATIC_ALLOC];
extern int static_irq_count;
+unsigned char cpu_leds[32];
+#ifdef __SMP__
+unsigned char sbus_tid[32];
+#endif
extern struct irqaction *irq_action[];
struct sbus_action {
struct irqaction *action;
- unsigned char lock;
- unsigned char active;
- unsigned char disabled;
+ /* For SMP this needs to be extended */
} *sbus_actions;
static int pil_to_sbus[] = {
0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
};
+static int sbus_to_pil[] = {
+ 0, 2, 3, 5, 7, 9, 11, 13,
+};
+
static int nsbi;
+#ifdef __SMP__
+spinlock_t sun4d_imsk_lock = SPIN_LOCK_UNLOCKED;
+#endif
int sun4d_get_irq_list(char *buf)
{
int i, j = 0, k = 0, len = 0, sbusl;
struct irqaction * action;
+#ifdef __SMP__
+ int x;
+#endif
for (i = 0 ; i < NR_IRQS ; i++) {
sbusl = pil_to_sbus[i];
@@ -77,8 +92,15 @@ int sun4d_get_irq_list(char *buf)
}
continue;
}
-found_it: len += sprintf(buf+len, "%2d: %8d %c %s",
- i, kstat.interrupts[i],
+found_it: len += sprintf(buf+len, "%3d: ", i);
+#ifndef __SMP__
+ len += sprintf(buf+len, "%10u ", kstat_irqs(i));
+#else
+ for (x = 0; x < smp_num_cpus; x++)
+ len += sprintf(buf+len, "%10u ",
+ kstat.irqs[cpu_logical_map(x)][i]);
+#endif
+ len += sprintf(buf+len, "%c %s",
(action->flags & SA_INTERRUPT) ? '+' : ' ',
action->name);
action = action->next;
@@ -172,7 +194,7 @@ void sun4d_handler_irq(int irq, struct pt_regs * regs)
cc_set_iclr(1 << irq);
irq_enter(cpu, irq, regs);
- kstat.interrupts[irq]++;
+ kstat.irqs[cpu][irq]++;
if (!sbusl) {
action = *(irq + irq_action);
if (!action)
@@ -183,7 +205,6 @@ void sun4d_handler_irq(int irq, struct pt_regs * regs)
} while (action);
} else {
int bus_mask = bw_get_intr_mask(sbusl) & 0x3ffff;
- int lock;
int sbino;
struct sbus_action *actionp;
unsigned mask, slot;
@@ -202,19 +223,13 @@ void sun4d_handler_irq(int irq, struct pt_regs * regs)
if (mask & slot) {
mask &= ~slot;
action = actionp->action;
- __asm__ __volatile__ ("ldstub [%1 + 4], %0"
- : "=r" (lock) : "r" (actionp));
- if (!lock) {
- if (!action)
- unexpected_irq(irq, 0, regs);
- do {
- action->handler(irq, action->dev_id, regs);
- action = action->next;
- } while (action);
- actionp->lock = 0;
- } else
- actionp->active = 1;
+ if (!action)
+ unexpected_irq(irq, 0, regs);
+ do {
+ action->handler(irq, action->dev_id, regs);
+ action = action->next;
+ } while (action);
release_sbi(SBI2DEVID(sbino), slot);
}
}
@@ -305,79 +320,49 @@ int sun4d_request_irq(unsigned int irq,
else
*actionp = action;
- if (ret) irq = *ret;
-
- if (irq > NR_IRQS) {
- struct sbus_action *s = sbus_actions + irq - (1 << 5);
-
- if (s->disabled) {
- s->disabled = 0;
- s->active = 0;
- s->lock = 0;
- }
- }
-
+ enable_irq(irq);
restore_flags(flags);
return 0;
}
static void sun4d_disable_irq(unsigned int irq)
{
- struct sbus_action *s;
-
- if (irq < NR_IRQS) {
- /* FIXME */
- printk ("Unable to disable IRQ %d\n", irq);
- return;
- }
- s = sbus_actions + irq - (1 << 5);
+#ifdef __SMP__
+ int tid = sbus_tid[(irq >> 5) - 1];
+ unsigned long flags;
+#endif
- if (s->disabled) return;
- s->disabled = 1;
- __asm__ __volatile__ ("
-1: ldstub [%0 + 4], %%g1
- orcc %%g1, 0, %%g0
- bne 1b"
- : : "r" (s) : "g1", "cc");
+ if (irq < NR_IRQS) return;
+#ifdef __SMP__
+ spin_lock_irqsave(&sun4d_imsk_lock, flags);
+ cc_set_imsk_other(tid, cc_get_imsk_other(tid) | (1 << sbus_to_pil[(irq >> 2) & 7]));
+ spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
+#else
+ cc_set_imsk(cc_get_imsk() | (1 << sbus_to_pil[(irq >> 2) & 7]));
+#endif
}
static void sun4d_enable_irq(unsigned int irq)
{
- struct sbus_action *s;
- struct irqaction *action;
-
- if (irq < NR_IRQS)
- /* FIXME */
- return;
- s = sbus_actions + irq - (1 << 5);
+#ifdef __SMP__
+ int tid = sbus_tid[(irq >> 5) - 1];
+ unsigned long flags;
+#endif
- if (!s->disabled) return;
- action = s->action;
- s->disabled = 0;
- while (s->active) {
- s->active = 0;
- while (action) {
- /* FIXME: Hope no sbus intr handler uses regs */
- action->handler(irq, action->dev_id, NULL);
- action = action->next;
- }
- }
- s->lock = 0;
+ if (irq < NR_IRQS) return;
+#ifdef __SMP__
+ spin_lock_irqsave(&sun4d_imsk_lock, flags);
+ cc_set_imsk_other(tid, cc_get_imsk_other(tid) & ~(1 << sbus_to_pil[(irq >> 2) & 7]));
+ spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
+#else
+ cc_set_imsk(cc_get_imsk() & ~(1 << sbus_to_pil[(irq >> 2) & 7]));
+#endif
}
#ifdef __SMP__
-
-/* +-------+-------------+-----------+------------------------------------+
- * | bcast | devid | sid | levels mask |
- * +-------+-------------+-----------+------------------------------------+
- * 31 30 23 22 15 14 0
- */
-#define IGEN_MESSAGE(bcast, devid, sid, levels) \
- (((bcast) << 31) | ((devid) << 23) | ((sid) << 15) | (levels))
-
-static void sun4d_send_ipi(int cpu, int level)
+static void sun4d_set_cpu_int(int cpu, int level)
{
- cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1)));
+ sun4d_send_ipi(cpu, level);
}
static void sun4d_clear_ipi(int cpu, int level)
@@ -387,6 +372,55 @@ static void sun4d_clear_ipi(int cpu, int level)
static void sun4d_set_udt(int cpu)
{
}
+
+/* Setup IRQ distribution scheme. */
+__initfunc(void sun4d_distribute_irqs(void))
+{
+#ifdef DISTRIBUTE_IRQS
+ struct linux_sbus *sbus;
+ unsigned long sbus_serving_map;
+
+ sbus_serving_map = cpu_present_map;
+ for_each_sbus(sbus) {
+ if ((sbus->board * 2) == boot_cpu_id && (cpu_present_map & (1 << (sbus->board * 2 + 1))))
+ sbus_tid[sbus->board] = (sbus->board * 2 + 1);
+ else if (cpu_present_map & (1 << (sbus->board * 2)))
+ sbus_tid[sbus->board] = (sbus->board * 2);
+ else if (cpu_present_map & (1 << (sbus->board * 2 + 1)))
+ sbus_tid[sbus->board] = (sbus->board * 2 + 1);
+ else
+ sbus_tid[sbus->board] = 0xff;
+ if (sbus_tid[sbus->board] != 0xff)
+ sbus_serving_map &= ~(1 << sbus_tid[sbus->board]);
+ }
+ for_each_sbus(sbus)
+ if (sbus_tid[sbus->board] == 0xff) {
+ int i = 31;
+
+ if (!sbus_serving_map)
+ sbus_serving_map = cpu_present_map;
+ while (!(sbus_serving_map & (1 << i)))
+ i--;
+ sbus_tid[sbus->board] = i;
+ sbus_serving_map &= ~(1 << i);
+ }
+ for_each_sbus(sbus) {
+ printk("sbus%d IRQs directed to CPU%d\n", sbus->board, sbus_tid[sbus->board]);
+ set_sbi_tid(sbus->devid, sbus_tid[sbus->board] << 3);
+ }
+#else
+ struct linux_sbus *sbus;
+ int cpuid = cpu_logical_map(1);
+
+ if (cpuid == -1)
+ cpuid = cpu_logical_map(0);
+ for_each_sbus(sbus) {
+ sbus_tid[sbus->board] = cpuid;
+ set_sbi_tid(sbus->devid, cpuid << 3);
+ }
+ printk("All sbus IRQs directed to CPU%d\n", cpuid);
+#endif
+}
#endif
static void sun4d_clear_clock_irq(void)
@@ -408,7 +442,7 @@ static void sun4d_load_profile_irq(int cpu, unsigned int limit)
__initfunc(static void sun4d_init_timers(void (*counter_fn)(int, void *, struct pt_regs *)))
{
int irq;
- extern struct prom_cpuinfo linux_cpus[NCPUS];
+ extern struct prom_cpuinfo linux_cpus[NR_CPUS];
int cpu;
/* Map the User Timer registers. */
@@ -431,15 +465,39 @@ __initfunc(static void sun4d_init_timers(void (*counter_fn)(int, void *, struct
/* Enable user timer free run for CPU 0 in BW */
/* bw_set_ctrl(0, bw_get_ctrl(0) | BW_CTRL_USER_TIMER); */
- for(cpu = 0; cpu < NCPUS; cpu++)
- sun4d_load_profile_irq(linux_cpus[cpu].mid, 0);
+ for(cpu = 0; cpu < linux_num_cpus; cpu++)
+ sun4d_load_profile_irq((linux_cpus[cpu].mid >> 3), 0);
+
+#ifdef __SMP__
+ {
+ unsigned long flags;
+ extern unsigned long lvl14_save[4];
+ struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];
+ extern unsigned int real_irq_entry[], smp4d_ticker[];
+ extern unsigned int patchme_maybe_smp_msg[];
+
+ /* Adjust so that we jump directly to smp4d_ticker */
+ lvl14_save[2] += smp4d_ticker - real_irq_entry;
+
+ /* For SMP we use the level 14 ticker, however the bootup code
+ * has copied the firmwares level 14 vector into boot cpu's
+ * trap table, we must fix this now or we get squashed.
+ */
+ __save_and_cli(flags);
+ patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */
+ trap_table->inst_one = lvl14_save[0];
+ trap_table->inst_two = lvl14_save[1];
+ trap_table->inst_three = lvl14_save[2];
+ trap_table->inst_four = lvl14_save[3];
+ local_flush_cache_all();
+ __restore_flags(flags);
+ }
+#endif
}
__initfunc(unsigned long sun4d_init_sbi_irq(unsigned long memory_start))
{
struct linux_sbus *sbus;
- struct sbus_action *s;
- int i;
unsigned mask;
nsbi = 0;
@@ -449,11 +507,13 @@ __initfunc(unsigned long sun4d_init_sbi_irq(unsigned long memory_start))
sbus_actions = (struct sbus_action *)memory_start;
memory_start += (nsbi * 8 * 4 * sizeof(struct sbus_action));
memset (sbus_actions, 0, (nsbi * 8 * 4 * sizeof(struct sbus_action)));
- for (i = 0, s = sbus_actions; i < nsbi * 8 * 4; i++, s++) {
- s->lock = 0xff;
- s->disabled = 1;
- }
for_each_sbus(sbus) {
+#ifdef __SMP__
+ extern unsigned char boot_cpu_id;
+
+ set_sbi_tid(sbus->devid, boot_cpu_id << 3);
+ sbus_tid[sbus->board] = boot_cpu_id;
+#endif
/* Get rid of pending irqs from PROM */
mask = acquire_sbi(sbus->devid, 0xffffffff);
if (mask) {
@@ -468,16 +528,16 @@ __initfunc(void sun4d_init_IRQ(void))
{
__cli();
- enable_irq = sun4d_enable_irq;
- disable_irq = sun4d_disable_irq;
- clear_clock_irq = sun4d_clear_clock_irq;
- clear_profile_irq = sun4d_clear_profile_irq;
- load_profile_irq = sun4d_load_profile_irq;
+ BTFIXUPSET_CALL(enable_irq, sun4d_enable_irq, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(disable_irq, sun4d_disable_irq, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(clear_clock_irq, sun4d_clear_clock_irq, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(clear_profile_irq, sun4d_clear_profile_irq, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM);
init_timers = sun4d_init_timers;
#ifdef __SMP__
- set_cpu_int = (void (*) (int, int))sun4d_send_ipi;
- clear_cpu_int = (void (*) (int, int))sun4d_clear_ipi;
- set_irq_udt = (void (*) (int))sun4d_set_udt;
+ BTFIXUPSET_CALL(set_cpu_int, sun4d_set_cpu_int, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(clear_cpu_int, sun4d_clear_ipi, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(set_irq_udt, sun4d_set_udt, BTFIXUPCALL_NOP);
#endif
/* Cannot enable interrupts until OBP ticker is disabled. */
}
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
new file mode 100644
index 000000000..46ce7a83f
--- /dev/null
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -0,0 +1,576 @@
+/* sun4d_smp.c: Sparc SS1000/SC2000 SMP support.
+ *
+ * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ *
+ * Based on sun4m's smp.c, which is:
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/config.h> /* for CONFIG_PROFILE */
+#include <asm/head.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/tasks.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/init.h>
+
+#include <asm/ptrace.h>
+#include <asm/atomic.h>
+
+#include <asm/delay.h>
+#include <asm/irq.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/oplib.h>
+#include <asm/atops.h>
+#include <asm/spinlock.h>
+#include <asm/hardirq.h>
+#include <asm/softirq.h>
+#include <asm/sbus.h>
+#include <asm/sbi.h>
+
+#define __KERNEL_SYSCALLS__
+#include <linux/unistd.h>
+
+#define IRQ_CROSS_CALL 15
+
+extern ctxd_t *srmmu_ctx_table_phys;
+extern int linux_num_cpus;
+
+extern void calibrate_delay(void);
+
+extern struct task_struct *current_set[NR_CPUS];
+extern volatile int smp_processors_ready;
+extern unsigned long cpu_present_map;
+extern int smp_num_cpus;
+static int smp_highest_cpu = 0;
+extern int smp_threads_ready;
+extern unsigned char mid_xlate[NR_CPUS];
+extern volatile unsigned long cpu_callin_map[NR_CPUS];
+extern unsigned long smp_proc_in_lock[NR_CPUS];
+extern struct cpuinfo_sparc cpu_data[NR_CPUS];
+extern unsigned long cpu_offset[NR_CPUS];
+extern unsigned char boot_cpu_id;
+extern int smp_activated;
+extern volatile int cpu_number_map[NR_CPUS];
+extern volatile int __cpu_logical_map[NR_CPUS];
+extern struct klock_info klock_info;
+extern volatile unsigned long ipi_count;
+extern volatile int smp_process_available;
+extern volatile int smp_commenced;
+extern int __smp4d_processor_id(void);
+
+/* #define SMP_DEBUG */
+
+#ifdef SMP_DEBUG
+#define SMP_PRINTK(x) printk x
+#else
+#define SMP_PRINTK(x)
+#endif
+
+int smp4d_bogo_info(char *buf)
+{
+ int len = 0, i;
+
+ for (i = 0; i < NR_CPUS; i++)
+ if (cpu_present_map & (1 << i))
+ len += sprintf(buf + len, "Cpu%dBogo\t: %lu.%02lu\n",
+ i,
+ cpu_data[i].udelay_val/500000,
+ (cpu_data[i].udelay_val/5000)%100);
+ return len;
+}
+
+int smp4d_info(char *buf)
+{
+ int len = 0, i;
+
+ for (i = 0; i < NR_CPUS; i++)
+ if (cpu_present_map & (1 << i))
+ len += sprintf(buf + len, "CPU%d\t\t: %s\n",
+ i,
+ (klock_info.akp == i) ? "akp" : "online");
+ return len;
+}
+
+static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val)
+{
+ __asm__ __volatile__("swap [%1], %0\n\t" :
+ "=&r" (val), "=&r" (ptr) :
+ "0" (val), "1" (ptr));
+ return val;
+}
+
+static void smp_setup_percpu_timer(void);
+extern void cpu_probe(void);
+extern void sun4d_distribute_irqs(void);
+
+__initfunc(void smp4d_callin(void))
+{
+ int cpuid = hard_smp4d_processor_id();
+ extern spinlock_t sun4d_imsk_lock;
+ unsigned long flags;
+
+ /* Show we are alive */
+ cpu_leds[cpuid] = 0x6;
+ show_leds(cpuid);
+
+ /* Enable level15 interrupt, disable level14 interrupt for now */
+ cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000);
+
+ local_flush_cache_all();
+ local_flush_tlb_all();
+
+ /* Get our local ticker going. */
+ smp_setup_percpu_timer();
+
+ calibrate_delay();
+ smp_store_cpu_info(cpuid);
+ local_flush_cache_all();
+ local_flush_tlb_all();
+
+ /* Allow master to continue. */
+ swap((unsigned long *)&cpu_callin_map[cpuid], 1);
+ local_flush_cache_all();
+ local_flush_tlb_all();
+
+ cpu_probe();
+
+ while((unsigned long)current_set[cpuid] < PAGE_OFFSET)
+ barrier();
+
+ while(current_set[cpuid]->processor != cpuid)
+ barrier();
+
+ /* Fix idle thread fields. */
+ __asm__ __volatile__("ld [%0], %%g6\n\t"
+ "sta %%g6, [%%g0] %1\n\t"
+ : : "r" (&current_set[cpuid]), "i" (ASI_M_VIKING_TMP2)
+ : "memory" /* paranoid */);
+
+ cpu_leds[cpuid] = 0x9;
+ show_leds(cpuid);
+
+ current->mm->mmap->vm_page_prot = PAGE_SHARED;
+ current->mm->mmap->vm_start = PAGE_OFFSET;
+ current->mm->mmap->vm_end = init_task.mm->mmap->vm_end;
+
+ local_flush_cache_all();
+ local_flush_tlb_all();
+
+ __sti(); /* We don't allow PIL 14 yet */
+
+ while(!smp_commenced)
+ barrier();
+
+ spin_lock_irqsave(&sun4d_imsk_lock, flags);
+ cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */
+ spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
+}
+
+extern int cpu_idle(void *unused);
+extern void init_IRQ(void);
+extern void cpu_panic(void);
+extern int start_secondary(void *unused);
+
+/*
+ * Cycle through the processors asking the PROM to start each one.
+ */
+
+extern struct prom_cpuinfo linux_cpus[NR_CPUS];
+extern struct linux_prom_registers smp_penguin_ctable;
+extern unsigned long trapbase_cpu1[];
+extern unsigned long trapbase_cpu2[];
+extern unsigned long trapbase_cpu3[];
+
+__initfunc(void smp4d_boot_cpus(void))
+{
+ int cpucount = 0;
+ int i = 0;
+
+ printk("Entering SMP Mode...\n");
+
+ smp_penguin_ctable.which_io = 0;
+ smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
+ smp_penguin_ctable.reg_size = 0;
+
+ for (i = 0; i < NR_CPUS; i++)
+ cpu_offset[i] = (char *)&cpu_data[i] - (char *)&cpu_data;
+
+ if (boot_cpu_id)
+ current_set[0] = NULL;
+
+ __sti();
+ cpu_present_map = 0;
+ for(i=0; i < linux_num_cpus; i++)
+ cpu_present_map |= (1<<linux_cpus[i].mid);
+ SMP_PRINTK(("cpu_present_map %08lx\n", cpu_present_map));
+ for(i=0; i < NR_CPUS; i++)
+ cpu_number_map[i] = -1;
+ for(i=0; i < NR_CPUS; i++)
+ __cpu_logical_map[i] = -1;
+ for(i=0; i < NR_CPUS; i++)
+ mid_xlate[i] = i;
+ cpu_number_map[boot_cpu_id] = 0;
+ __cpu_logical_map[0] = boot_cpu_id;
+ klock_info.akp = boot_cpu_id;
+ current->processor = boot_cpu_id;
+ smp_store_cpu_info(boot_cpu_id);
+ smp_setup_percpu_timer();
+ local_flush_cache_all();
+ if(linux_num_cpus == 1)
+ return; /* Not an MP box. */
+ SMP_PRINTK(("Iterating over CPUs\n"));
+ for(i = 0; i < NR_CPUS; i++) {
+ if(i == boot_cpu_id)
+ continue;
+
+ if(cpu_present_map & (1 << i)) {
+ extern unsigned long sun4d_cpu_startup;
+ unsigned long *entry = &sun4d_cpu_startup;
+ struct task_struct *p;
+ int timeout;
+ int no;
+
+ /* Cook up an idler for this guy. */
+ kernel_thread(start_secondary, NULL, CLONE_PID);
+
+ p = task[++cpucount];
+
+ p->processor = i;
+ current_set[i] = p;
+
+ for (no = 0; no < linux_num_cpus; no++)
+ if (linux_cpus[no].mid == i)
+ break;
+
+ /* whirrr, whirrr, whirrrrrrrrr... */
+ SMP_PRINTK(("Starting CPU %d at %p task %d node %08x\n", i, entry, cpucount, linux_cpus[no].prom_node));
+ local_flush_cache_all();
+ prom_startcpu(linux_cpus[no].prom_node,
+ &smp_penguin_ctable, 0, (char *)entry);
+
+ SMP_PRINTK(("prom_startcpu returned :)\n"));
+
+ /* wheee... it's going... */
+ for(timeout = 0; timeout < 5000000; timeout++) {
+ if(cpu_callin_map[i])
+ break;
+ udelay(100);
+ }
+
+ if(cpu_callin_map[i]) {
+ /* Another "Red Snapper". */
+ cpu_number_map[i] = cpucount;
+ __cpu_logical_map[cpucount] = i;
+ } else {
+ cpucount--;
+ printk("Processor %d is stuck.\n", i);
+ }
+ }
+ if(!(cpu_callin_map[i])) {
+ cpu_present_map &= ~(1 << i);
+ cpu_number_map[i] = -1;
+ }
+ }
+ local_flush_cache_all();
+ if(cpucount == 0) {
+ printk("Error: only one Processor found.\n");
+ cpu_present_map = (1 << hard_smp4d_processor_id());
+ } else {
+ unsigned long bogosum = 0;
+
+ for(i = 0; i < NR_CPUS; i++) {
+ if(cpu_present_map & (1 << i)) {
+ bogosum += cpu_data[i].udelay_val;
+ smp_highest_cpu = i;
+ }
+ }
+ SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, (bogosum + 2500)/500000, ((bogosum + 2500)/5000)%100));
+ printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
+ cpucount + 1,
+ (bogosum + 2500)/500000,
+ ((bogosum + 2500)/5000)%100);
+ smp_activated = 1;
+ smp_num_cpus = cpucount + 1;
+ }
+
+ /* Free unneeded trap tables */
+
+ mem_map[MAP_NR((unsigned long)trapbase_cpu1)].flags &= ~(1 << PG_reserved);
+ free_page((unsigned long)trapbase_cpu1);
+ mem_map[MAP_NR((unsigned long)trapbase_cpu2)].flags &= ~(1 << PG_reserved);
+ free_page((unsigned long)trapbase_cpu2);
+ mem_map[MAP_NR((unsigned long)trapbase_cpu3)].flags &= ~(1 << PG_reserved);
+ free_page((unsigned long)trapbase_cpu3);
+
+ /* Ok, they are spinning and ready to go. */
+ smp_processors_ready = 1;
+ sun4d_distribute_irqs();
+}
+
+static struct smp_funcall {
+ smpfunc_t func;
+ unsigned long arg1;
+ unsigned long arg2;
+ unsigned long arg3;
+ unsigned long arg4;
+ unsigned long arg5;
+ unsigned char processors_in[NR_CPUS]; /* Set when ipi entered. */
+ unsigned char processors_out[NR_CPUS]; /* Set when ipi exited. */
+} ccall_info __attribute__((aligned(8)));
+
+static spinlock_t cross_call_lock = SPIN_LOCK_UNLOCKED;
+
+/* Cross calls must be serialized, at least currently. */
+void smp4d_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4, unsigned long arg5)
+{
+ if(smp_processors_ready) {
+ register int high = smp_highest_cpu;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cross_call_lock, flags);
+
+ {
+ /* If you make changes here, make sure gcc generates proper code... */
+ smpfunc_t f asm("i0") = func;
+ unsigned long a1 asm("i1") = arg1;
+ unsigned long a2 asm("i2") = arg2;
+ unsigned long a3 asm("i3") = arg3;
+ unsigned long a4 asm("i4") = arg4;
+ unsigned long a5 asm("i5") = arg5;
+
+ __asm__ __volatile__("
+ std %0, [%6]
+ std %2, [%6 + 8]
+ std %4, [%6 + 16]" : :
+ "r"(f), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5),
+ "r" (&ccall_info.func));
+ }
+
+ /* Init receive/complete mapping, plus fire the IPI's off. */
+ {
+ register unsigned long mask;
+ register int i;
+
+ mask = (cpu_present_map & ~(1 << hard_smp4d_processor_id()));
+ for(i = 0; i <= high; i++) {
+ if(mask & (1 << i)) {
+ ccall_info.processors_in[i] = 0;
+ ccall_info.processors_out[i] = 0;
+ sun4d_send_ipi(i, IRQ_CROSS_CALL);
+ }
+ }
+ }
+
+ /* First, run local copy. */
+ func(arg1, arg2, arg3, arg4, arg5);
+
+ {
+ register int i;
+
+ i = 0;
+ do {
+ while(!ccall_info.processors_in[i])
+ barrier();
+ } while(++i <= high);
+
+ i = 0;
+ do {
+ while(!ccall_info.processors_out[i])
+ barrier();
+ } while(++i <= high);
+ }
+
+ spin_unlock_irqrestore(&cross_call_lock, flags);
+ } else
+ func(arg1, arg2, arg3, arg4, arg5); /* Just need to run local copy. */
+}
+
+/* Running cross calls. */
+void smp4d_cross_call_irq(void)
+{
+ int i = hard_smp4d_processor_id();
+
+ ccall_info.processors_in[i] = 1;
+ ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
+ ccall_info.arg4, ccall_info.arg5);
+ ccall_info.processors_out[i] = 1;
+}
+
+static int smp4d_stop_cpu_sender;
+
+static void smp4d_stop_cpu(void)
+{
+ int me = hard_smp4d_processor_id();
+
+ if (me != smp4d_stop_cpu_sender)
+ while(1) barrier();
+}
+
+/* Cross calls, in order to work efficiently and atomically do all
+ * the message passing work themselves, only stopcpu and reschedule
+ * messages come through here.
+ */
+void smp4d_message_pass(int target, int msg, unsigned long data, int wait)
+{
+ int me = hard_smp4d_processor_id();
+
+ SMP_PRINTK(("smp4d_message_pass %d %d %08lx %d\n", target, msg, data, wait));
+ if (msg == MSG_STOP_CPU && target == MSG_ALL_BUT_SELF) {
+ unsigned long flags;
+ static spinlock_t stop_cpu_lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_irqsave(&stop_cpu_lock, flags);
+ smp4d_stop_cpu_sender = me;
+ smp4d_cross_call((smpfunc_t)smp4d_stop_cpu, 0, 0, 0, 0, 0);
+ spin_unlock_irqrestore(&stop_cpu_lock, flags);
+ }
+ printk("Yeeee, trying to send SMP msg(%d) to %d on cpu %d\n", msg, target, me);
+ panic("Bogon SMP message pass.");
+}
+
+/* Protects counters touched during level14 ticker */
+static spinlock_t ticker_lock = SPIN_LOCK_UNLOCKED;
+
+#ifdef CONFIG_PROFILE
+
+/* 32-bit Sparc specific profiling function. */
+static inline void sparc_do_profile(unsigned long pc)
+{
+ if(prof_buffer && current->pid) {
+ extern int _stext;
+
+ pc -= (unsigned long) &_stext;
+ pc >>= prof_shift;
+
+ spin_lock(&ticker_lock);
+ if(pc < prof_len)
+ prof_buffer[pc]++;
+ else
+ prof_buffer[prof_len - 1]++;
+ spin_unlock(&ticker_lock);
+ }
+}
+
+#endif
+
+extern unsigned int prof_multiplier[NR_CPUS];
+extern unsigned int prof_counter[NR_CPUS];
+
+extern void update_one_process(struct task_struct *p, unsigned long ticks,
+ unsigned long user, unsigned long system,
+ int cpu);
+
+
+void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
+{
+ int cpu = hard_smp4d_processor_id();
+ static int cpu_tick[NR_CPUS];
+ static char led_mask[] = { 0xe, 0xd, 0xb, 0x7, 0xb, 0xd };
+
+ bw_get_prof_limit(cpu);
+ bw_clear_intr_mask(0, 1); /* INTR_TABLE[0] & 1 is Profile IRQ */
+
+ cpu_tick[cpu]++;
+ if (!(cpu_tick[cpu] & 15)) {
+ if (cpu_tick[cpu] == 0x60)
+ cpu_tick[cpu] = 0;
+ cpu_leds[cpu] = led_mask[cpu_tick[cpu] >> 4];
+ show_leds(cpu);
+ }
+
+#ifdef CONFIG_PROFILE
+ if(!user_mode(regs))
+ sparc_do_profile(regs->pc);
+#endif
+ if(!--prof_counter[cpu]) {
+ int user = user_mode(regs);
+ if(current->pid) {
+ update_one_process(current, 1, user, !user, cpu);
+
+ if(--current->counter < 0) {
+ current->counter = 0;
+ need_resched = 1;
+ }
+
+ spin_lock(&ticker_lock);
+ if(user) {
+ if(current->priority < DEF_PRIORITY) {
+ kstat.cpu_nice++;
+ kstat.per_cpu_nice[cpu]++;
+ } else {
+ kstat.cpu_user++;
+ kstat.per_cpu_user[cpu]++;
+ }
+ } else {
+ kstat.cpu_system++;
+ kstat.per_cpu_system[cpu]++;
+ }
+ spin_unlock(&ticker_lock);
+ }
+ prof_counter[cpu] = prof_multiplier[cpu];
+ }
+}
+
+extern unsigned int lvl14_resolution;
+
+__initfunc(static void smp_setup_percpu_timer(void))
+{
+ int cpu = hard_smp4d_processor_id();
+
+ prof_counter[cpu] = prof_multiplier[cpu] = 1;
+ load_profile_irq(cpu, lvl14_resolution);
+}
+
+__initfunc(void smp4d_blackbox_id(unsigned *addr))
+{
+ int rd = *addr & 0x3e000000;
+
+ addr[0] = 0xc0800800 | rd; /* lda [%g0] ASI_M_VIKING_TMP1, reg */
+ addr[1] = 0x01000000; /* nop */
+ addr[2] = 0x01000000; /* nop */
+}
+
+__initfunc(void smp4d_blackbox_current(unsigned *addr))
+{
+ /* We have a nice Linux current register :) */
+ int rd = addr[1] & 0x3e000000;
+
+ addr[0] = 0x10800006; /* b .+24 */
+ addr[1] = 0xc0800820 | rd; /* lda [%g0] ASI_M_VIKING_TMP2, reg */
+}
+
+__initfunc(void sun4d_init_smp(void))
+{
+ int i;
+ extern unsigned int patchme_store_new_current[];
+ extern unsigned int t_nmi[], linux_trap_ipi15_sun4d[], linux_trap_ipi15_sun4m[];
+
+ /* Store current into Linux current register :) */
+ __asm__ __volatile__("sta %%g6, [%%g0] %0" : : "i"(ASI_M_VIKING_TMP2));
+
+ /* Patch switch_to */
+ patchme_store_new_current[0] = (patchme_store_new_current[0] & 0x3e000000) | 0xc0a00820;
+
+ /* Patch ipi15 trap table */
+ t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_sun4d - linux_trap_ipi15_sun4m);
+
+ /* And set btfixup... */
+ BTFIXUPSET_BLACKBOX(smp_processor_id, smp4d_blackbox_id);
+ BTFIXUPSET_BLACKBOX(load_current, smp4d_blackbox_current);
+ BTFIXUPSET_CALL(smp_cross_call, smp4d_cross_call, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(smp_message_pass, smp4d_message_pass, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(smp_bogo_info, smp4d_bogo_info, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(smp_info, smp4d_info, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(__smp_processor_id, __smp4d_processor_id, BTFIXUPCALL_NORM);
+
+ for (i = 0; i < NR_CPUS; i++) {
+ ccall_info.processors_in[i] = 1;
+ ccall_info.processors_out[i] = 1;
+ }
+}
diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c
index 81db1a4ce..e55839a7a 100644
--- a/arch/sparc/kernel/sun4m_irq.c
+++ b/arch/sparc/kernel/sun4m_irq.c
@@ -83,9 +83,9 @@ inline unsigned long sun4m_get_irqmask(unsigned int irq)
if (!mask)
printk("sun4m_get_irqmask: IRQ%d has no valid mask!\n",irq);
} else {
- /* Soft Interrupts will come here
- * Currently there is no way to trigger them but I'm sure something
- * could be cooked up.
+ /* Soft Interrupts will come here.
+ * Currently there is no way to trigger them but I'm sure
+ * something could be cooked up.
*/
irq &= 0xf;
mask = SUN4M_SOFT_INT(irq);
@@ -349,18 +349,18 @@ __initfunc(void sun4m_init_IRQ(void))
&sun4m_interrupts->undirected_target;
sun4m_interrupts->undirected_target = 0;
}
- enable_irq = sun4m_enable_irq;
- disable_irq = sun4m_disable_irq;
- enable_pil_irq = sun4m_enable_pil_irq;
- disable_pil_irq = sun4m_disable_pil_irq;
- clear_clock_irq = sun4m_clear_clock_irq;
- clear_profile_irq = sun4m_clear_profile_irq;
- load_profile_irq = sun4m_load_profile_irq;
+ BTFIXUPSET_CALL(enable_irq, sun4m_enable_irq, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(disable_irq, sun4m_disable_irq, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(enable_pil_irq, sun4m_enable_pil_irq, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(disable_pil_irq, sun4m_disable_pil_irq, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(clear_clock_irq, sun4m_clear_clock_irq, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(clear_profile_irq, sun4m_clear_profile_irq, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(load_profile_irq, sun4m_load_profile_irq, BTFIXUPCALL_NORM);
init_timers = sun4m_init_timers;
#ifdef __SMP__
- set_cpu_int = (void (*) (int, int))sun4m_send_ipi;
- clear_cpu_int = (void (*) (int, int))sun4m_clear_ipi;
- set_irq_udt = (void (*) (int))sun4m_set_udt;
+ BTFIXUPSET_CALL(set_cpu_int, sun4m_send_ipi, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(clear_cpu_int, sun4m_clear_ipi, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(set_irq_udt, sun4m_set_udt, BTFIXUPCALL_NORM);
#endif
/* Cannot enable interrupts until OBP ticker is disabled. */
}
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
new file mode 100644
index 000000000..ec1ef424b
--- /dev/null
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -0,0 +1,545 @@
+/* sun4m_smp.c: Sparc SUN4M SMP support.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/config.h> /* for CONFIG_PROFILE */
+#include <asm/head.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/tasks.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/init.h>
+
+#include <asm/ptrace.h>
+#include <asm/atomic.h>
+
+#include <asm/delay.h>
+#include <asm/irq.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/oplib.h>
+#include <asm/atops.h>
+#include <asm/spinlock.h>
+#include <asm/hardirq.h>
+#include <asm/softirq.h>
+
+#define __KERNEL_SYSCALLS__
+#include <linux/unistd.h>
+
+#define IRQ_RESCHEDULE 13
+#define IRQ_STOP_CPU 14
+#define IRQ_CROSS_CALL 15
+
+extern ctxd_t *srmmu_ctx_table_phys;
+extern int linux_num_cpus;
+
+extern void calibrate_delay(void);
+
+extern struct task_struct *current_set[NR_CPUS];
+extern volatile int smp_processors_ready;
+extern unsigned long cpu_present_map;
+extern int smp_num_cpus;
+extern int smp_threads_ready;
+extern unsigned char mid_xlate[NR_CPUS];
+extern volatile unsigned long cpu_callin_map[NR_CPUS];
+extern unsigned long smp_proc_in_lock[NR_CPUS];
+extern struct cpuinfo_sparc cpu_data[NR_CPUS];
+extern unsigned long cpu_offset[NR_CPUS];
+extern unsigned char boot_cpu_id;
+extern int smp_activated;
+extern volatile int cpu_number_map[NR_CPUS];
+extern volatile int __cpu_logical_map[NR_CPUS];
+extern struct klock_info klock_info;
+extern volatile unsigned long ipi_count;
+extern volatile int smp_process_available;
+extern volatile int smp_commenced;
+extern int __smp4m_processor_id(void);
+
+/*#define SMP_DEBUG*/
+
+#ifdef SMP_DEBUG
+#define SMP_PRINTK(x) printk x
+#else
+#define SMP_PRINTK(x)
+#endif
+
+int smp4m_bogo_info(char *buf)
+{
+ return sprintf(buf,
+ "Cpu0Bogo\t: %lu.%02lu\n"
+ "Cpu1Bogo\t: %lu.%02lu\n"
+ "Cpu2Bogo\t: %lu.%02lu\n"
+ "Cpu3Bogo\t: %lu.%02lu\n",
+ cpu_data[0].udelay_val/500000, (cpu_data[0].udelay_val/5000)%100,
+ cpu_data[1].udelay_val/500000, (cpu_data[1].udelay_val/5000)%100,
+ cpu_data[2].udelay_val/500000, (cpu_data[2].udelay_val/5000)%100,
+ cpu_data[3].udelay_val/500000, (cpu_data[3].udelay_val/5000)%100);
+}
+
+int smp4m_info(char *buf)
+{
+ return sprintf(buf,
+" CPU0\t\tCPU1\t\tCPU2\t\tCPU3\n"
+"State: %s\t\t%s\t\t%s\t\t%s\n",
+(cpu_present_map & 1) ? ((klock_info.akp == 0) ? "akp" : "online") : "offline",
+(cpu_present_map & 2) ? ((klock_info.akp == 1) ? "akp" : "online") : "offline",
+(cpu_present_map & 4) ? ((klock_info.akp == 2) ? "akp" : "online") : "offline",
+(cpu_present_map & 8) ? ((klock_info.akp == 3) ? "akp" : "online") : "offline");
+}
+
+static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val)
+{
+ __asm__ __volatile__("swap [%1], %0\n\t" :
+ "=&r" (val), "=&r" (ptr) :
+ "0" (val), "1" (ptr));
+ return val;
+}
+
+static void smp_setup_percpu_timer(void);
+extern void cpu_probe(void);
+
+__initfunc(void smp4m_callin(void))
+{
+ int cpuid = hard_smp_processor_id();
+
+ local_flush_cache_all();
+ local_flush_tlb_all();
+ set_irq_udt(mid_xlate[boot_cpu_id]);
+
+ /* Get our local ticker going. */
+ smp_setup_percpu_timer();
+
+ calibrate_delay();
+ smp_store_cpu_info(cpuid);
+ local_flush_cache_all();
+ local_flush_tlb_all();
+
+ /* Allow master to continue. */
+ swap((unsigned long *)&cpu_callin_map[cpuid], 1);
+ local_flush_cache_all();
+ local_flush_tlb_all();
+
+ cpu_probe();
+
+ while(!task[cpuid] || current_set[cpuid] != task[cpuid])
+ barrier();
+
+ /* Fix idle thread fields. */
+ __asm__ __volatile__("ld [%0], %%g6\n\t"
+ : : "r" (&current_set[cpuid])
+ : "memory" /* paranoid */);
+ current->mm->mmap->vm_page_prot = PAGE_SHARED;
+ current->mm->mmap->vm_start = PAGE_OFFSET;
+ current->mm->mmap->vm_end = init_task.mm->mmap->vm_end;
+
+ while(!smp_commenced)
+ barrier();
+
+ local_flush_cache_all();
+ local_flush_tlb_all();
+
+ __sti();
+}
+
+extern int cpu_idle(void *unused);
+extern void init_IRQ(void);
+extern void cpu_panic(void);
+extern int start_secondary(void *unused);
+
+/*
+ * Cycle through the processors asking the PROM to start each one.
+ */
+
+extern struct prom_cpuinfo linux_cpus[NR_CPUS];
+extern struct linux_prom_registers smp_penguin_ctable;
+extern unsigned long trapbase_cpu1[];
+extern unsigned long trapbase_cpu2[];
+extern unsigned long trapbase_cpu3[];
+
+__initfunc(void smp4m_boot_cpus(void))
+{
+ int cpucount = 0;
+ int i = 0;
+ int first, prev;
+
+ printk("Entering SMP Mode...\n");
+
+ smp_penguin_ctable.which_io = 0;
+ smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
+ smp_penguin_ctable.reg_size = 0;
+
+ for (i = 0; i < NR_CPUS; i++)
+ cpu_offset[i] = (char *)&cpu_data[i] - (char *)&cpu_data;
+
+ __sti();
+ cpu_present_map = 0;
+ for(i=0; i < linux_num_cpus; i++)
+ cpu_present_map |= (1<<i);
+ for(i=0; i < NR_CPUS; i++)
+ cpu_number_map[i] = -1;
+ for(i=0; i < NR_CPUS; i++)
+ __cpu_logical_map[i] = -1;
+ mid_xlate[boot_cpu_id] = (linux_cpus[boot_cpu_id].mid & ~8);
+ cpu_number_map[boot_cpu_id] = 0;
+ __cpu_logical_map[0] = boot_cpu_id;
+ klock_info.akp = boot_cpu_id;
+ current->processor = boot_cpu_id;
+ smp_store_cpu_info(boot_cpu_id);
+ set_irq_udt(mid_xlate[boot_cpu_id]);
+ smp_setup_percpu_timer();
+ local_flush_cache_all();
+ if(linux_num_cpus == 1)
+ return; /* Not an MP box. */
+ for(i = 0; i < NR_CPUS; i++) {
+ if(i == boot_cpu_id)
+ continue;
+
+ if(cpu_present_map & (1 << i)) {
+ extern unsigned long sun4m_cpu_startup;
+ unsigned long *entry = &sun4m_cpu_startup;
+ struct task_struct *p;
+ int timeout;
+
+ /* Cook up an idler for this guy. */
+ kernel_thread(start_secondary, NULL, CLONE_PID);
+
+ p = task[++cpucount];
+
+ p->processor = i;
+ current_set[i] = p;
+
+ /* See trampoline.S for details... */
+ entry += ((i-1) * 3);
+
+ /* whirrr, whirrr, whirrrrrrrrr... */
+ printk("Starting CPU %d at %p\n", i, entry);
+ mid_xlate[i] = (linux_cpus[i].mid & ~8);
+ local_flush_cache_all();
+ prom_startcpu(linux_cpus[i].prom_node,
+ &smp_penguin_ctable, 0, (char *)entry);
+
+ /* wheee... it's going... */
+ for(timeout = 0; timeout < 5000000; timeout++) {
+ if(cpu_callin_map[i])
+ break;
+ udelay(100);
+ }
+ if(cpu_callin_map[i]) {
+ /* Another "Red Snapper". */
+ cpu_number_map[i] = i;
+ __cpu_logical_map[i] = i;
+ } else {
+ cpucount--;
+ printk("Processor %d is stuck.\n", i);
+ }
+ }
+ if(!(cpu_callin_map[i])) {
+ cpu_present_map &= ~(1 << i);
+ cpu_number_map[i] = -1;
+ }
+ }
+ local_flush_cache_all();
+ if(cpucount == 0) {
+ printk("Error: only one Processor found.\n");
+ cpu_present_map = (1 << smp_processor_id());
+ } else {
+ unsigned long bogosum = 0;
+ for(i = 0; i < NR_CPUS; i++) {
+ if(cpu_present_map & (1 << i))
+ bogosum += cpu_data[i].udelay_val;
+ }
+ printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
+ cpucount + 1,
+ (bogosum + 2500)/500000,
+ ((bogosum + 2500)/5000)%100);
+ smp_activated = 1;
+ smp_num_cpus = cpucount + 1;
+ }
+
+ /* Setup CPU list for IRQ distribution scheme. */
+ first = prev = -1;
+ for(i = 0; i < NR_CPUS; i++) {
+ if(cpu_present_map & (1 << i)) {
+ if(first == -1)
+ first = i;
+ if(prev != -1)
+ cpu_data[prev].next = i;
+ cpu_data[i].mid = mid_xlate[i];
+ prev = i;
+ }
+ }
+ cpu_data[prev].next = first;
+
+ /* Free unneeded trap tables */
+
+ if (!(cpu_present_map & (1 << 1))) {
+ mem_map[MAP_NR((unsigned long)trapbase_cpu1)].flags &= ~(1 << PG_reserved);
+ free_page((unsigned long)trapbase_cpu1);
+ }
+ if (!(cpu_present_map & (1 << 2))) {
+ mem_map[MAP_NR((unsigned long)trapbase_cpu2)].flags &= ~(1 << PG_reserved);
+ free_page((unsigned long)trapbase_cpu2);
+ }
+ if (!(cpu_present_map & (1 << 3))) {
+ mem_map[MAP_NR((unsigned long)trapbase_cpu3)].flags &= ~(1 << PG_reserved);
+ free_page((unsigned long)trapbase_cpu3);
+ }
+
+ /* Ok, they are spinning and ready to go. */
+ smp_processors_ready = 1;
+}
+
+/* At each hardware IRQ, we get this called to forward IRQ reception
+ * to the next processor. The caller must disable the IRQ level being
+ * serviced globally so that there are no double interrupts received.
+ */
+void smp4m_irq_rotate(int cpu)
+{
+ if(smp_processors_ready)
+ set_irq_udt(cpu_data[cpu_data[cpu].next].mid);
+}
+
+/* Cross calls, in order to work efficiently and atomically do all
+ * the message passing work themselves, only stopcpu and reschedule
+ * messages come through here.
+ */
+void smp4m_message_pass(int target, int msg, unsigned long data, int wait)
+{
+ static unsigned long smp_cpu_in_msg[NR_CPUS];
+ unsigned long mask;
+ int me = smp_processor_id();
+ int irq, i;
+
+ if(msg == MSG_RESCHEDULE) {
+ irq = IRQ_RESCHEDULE;
+
+ if(smp_cpu_in_msg[me])
+ return;
+ } else if(msg == MSG_STOP_CPU) {
+ irq = IRQ_STOP_CPU;
+ } else {
+ goto barf;
+ }
+
+ smp_cpu_in_msg[me]++;
+ if(target == MSG_ALL_BUT_SELF || target == MSG_ALL) {
+ mask = cpu_present_map;
+ if(target == MSG_ALL_BUT_SELF)
+ mask &= ~(1 << me);
+ for(i = 0; i < 4; i++) {
+ if(mask & (1 << i))
+ set_cpu_int(mid_xlate[i], irq);
+ }
+ } else {
+ set_cpu_int(mid_xlate[target], irq);
+ }
+ smp_cpu_in_msg[me]--;
+
+ return;
+barf:
+ printk("Yeeee, trying to send SMP msg(%d) on cpu %d\n", msg, me);
+ panic("Bogon SMP message pass.");
+}
+
+static struct smp_funcall {
+ smpfunc_t func;
+ unsigned long arg1;
+ unsigned long arg2;
+ unsigned long arg3;
+ unsigned long arg4;
+ unsigned long arg5;
+ unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */
+ unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */
+} ccall_info;
+
+static spinlock_t cross_call_lock = SPIN_LOCK_UNLOCKED;
+
+/* Cross calls must be serialized, at least currently. */
+void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4, unsigned long arg5)
+{
+ if(smp_processors_ready) {
+ register int ncpus = smp_num_cpus;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cross_call_lock, flags);
+
+ /* Init function glue. */
+ ccall_info.func = func;
+ ccall_info.arg1 = arg1;
+ ccall_info.arg2 = arg2;
+ ccall_info.arg3 = arg3;
+ ccall_info.arg4 = arg4;
+ ccall_info.arg5 = arg5;
+
+ /* Init receive/complete mapping, plus fire the IPI's off. */
+ {
+ register unsigned long mask;
+ register int i;
+
+ mask = (cpu_present_map & ~(1 << smp_processor_id()));
+ for(i = 0; i < ncpus; i++) {
+ if(mask & (1 << i)) {
+ ccall_info.processors_in[i] = 0;
+ ccall_info.processors_out[i] = 0;
+ set_cpu_int(mid_xlate[i], IRQ_CROSS_CALL);
+ } else {
+ ccall_info.processors_in[i] = 1;
+ ccall_info.processors_out[i] = 1;
+ }
+ }
+ }
+
+ /* First, run local copy. */
+ func(arg1, arg2, arg3, arg4, arg5);
+
+ {
+ register int i;
+
+ i = 0;
+ do {
+ while(!ccall_info.processors_in[i])
+ barrier();
+ } while(++i < ncpus);
+
+ i = 0;
+ do {
+ while(!ccall_info.processors_out[i])
+ barrier();
+ } while(++i < ncpus);
+ }
+
+ spin_unlock_irqrestore(&cross_call_lock, flags);
+ } else
+ func(arg1, arg2, arg3, arg4, arg5); /* Just need to run local copy. */
+}
+
+/* Running cross calls. */
+void smp4m_cross_call_irq(void)
+{
+ int i = smp_processor_id();
+
+ ccall_info.processors_in[i] = 1;
+ ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
+ ccall_info.arg4, ccall_info.arg5);
+ ccall_info.processors_out[i] = 1;
+}
+
+/* Protects counters touched during level14 ticker */
+static spinlock_t ticker_lock = SPIN_LOCK_UNLOCKED;
+
+#ifdef CONFIG_PROFILE
+
+/* 32-bit Sparc specific profiling function. */
+static inline void sparc_do_profile(unsigned long pc)
+{
+ if(prof_buffer && current->pid) {
+ extern int _stext;
+
+ pc -= (unsigned long) &_stext;
+ pc >>= prof_shift;
+
+ spin_lock(&ticker_lock);
+ if(pc < prof_len)
+ prof_buffer[pc]++;
+ else
+ prof_buffer[prof_len - 1]++;
+ spin_unlock(&ticker_lock);
+ }
+}
+
+#endif
+
+extern unsigned int prof_multiplier[NR_CPUS];
+extern unsigned int prof_counter[NR_CPUS];
+
+extern void update_one_process(struct task_struct *p, unsigned long ticks,
+ unsigned long user, unsigned long system,
+ int cpu);
+
+void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+
+ clear_profile_irq(mid_xlate[cpu]);
+#ifdef CONFIG_PROFILE
+ if(!user_mode(regs))
+ sparc_do_profile(regs->pc);
+#endif
+ if(!--prof_counter[cpu]) {
+ int user = user_mode(regs);
+ if(current->pid) {
+ update_one_process(current, 1, user, !user, cpu);
+
+ if(--current->counter < 0) {
+ current->counter = 0;
+ need_resched = 1;
+ }
+
+ spin_lock(&ticker_lock);
+ if(user) {
+ if(current->priority < DEF_PRIORITY) {
+ kstat.cpu_nice++;
+ kstat.per_cpu_nice[cpu]++;
+ } else {
+ kstat.cpu_user++;
+ kstat.per_cpu_user[cpu]++;
+ }
+ } else {
+ kstat.cpu_system++;
+ kstat.per_cpu_system[cpu]++;
+ }
+ spin_unlock(&ticker_lock);
+ }
+ prof_counter[cpu] = prof_multiplier[cpu];
+ }
+}
+
+extern unsigned int lvl14_resolution;
+
+__initfunc(static void smp_setup_percpu_timer(void))
+{
+ int cpu = smp_processor_id();
+
+ prof_counter[cpu] = prof_multiplier[cpu] = 1;
+ load_profile_irq(mid_xlate[cpu], lvl14_resolution);
+
+ if(cpu == boot_cpu_id)
+ enable_pil_irq(14);
+}
+
+__initfunc(void smp4m_blackbox_id(unsigned *addr))
+{
+ int rd = *addr & 0x3e000000;
+ int rs1 = rd >> 11;
+
+ addr[0] = 0x81580000 | rd; /* rd %tbr, reg */
+ addr[1] = 0x8130200c | rd | rs1; /* srl reg, 0xc, reg */
+ addr[2] = 0x80082003 | rd | rs1; /* and reg, 3, reg */
+}
+
+__initfunc(void smp4m_blackbox_current(unsigned *addr))
+{
+ int rd = *addr & 0x3e000000;
+ int rs1 = rd >> 11;
+
+ addr[0] = 0x81580000 | rd; /* rd %tbr, reg */
+ addr[2] = 0x8130200a | rd | rs1; /* srl reg, 0xa, reg */
+ addr[4] = 0x8008200c | rd | rs1; /* and reg, 3, reg */
+}
+
+__initfunc(void sun4m_init_smp(void))
+{
+ BTFIXUPSET_BLACKBOX(smp_processor_id, smp4m_blackbox_id);
+ BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current);
+ BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(smp_message_pass, smp4m_message_pass, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(smp_bogo_info, smp4m_bogo_info, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(smp_info, smp4m_info, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(__smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);
+}
diff --git a/arch/sparc/kernel/sunos_ioctl.c b/arch/sparc/kernel/sunos_ioctl.c
index 4f6b2accc..deb1aa79e 100644
--- a/arch/sparc/kernel/sunos_ioctl.c
+++ b/arch/sparc/kernel/sunos_ioctl.c
@@ -1,4 +1,4 @@
-/* $Id: sunos_ioctl.c,v 1.29 1997/09/18 10:37:31 rth Exp $
+/* $Id: sunos_ioctl.c,v 1.30 1998/01/21 06:17:32 ecd Exp $
* sunos_ioctl.c: The Linux Operating system: SunOS ioctl compatibility.
*
* Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
@@ -114,7 +114,7 @@ asmlinkage int sunos_ioctl (int fd, unsigned long cmd, unsigned long arg)
ret = sys_ioctl(fd, SIOCGIFBRDADDR, arg);
goto out;
case _IOW('i', 24, struct ifreq):
- ret = sys_ioctl(fd, SIOCGIFBRDADDR, arg);
+ ret = sys_ioctl(fd, SIOCSIFBRDADDR, arg);
goto out;
case _IOWR('i', 25, struct ifreq):
ret = sys_ioctl(fd, SIOCGIFNETMASK, arg);
diff --git a/arch/sparc/kernel/sys_sparc.c b/arch/sparc/kernel/sys_sparc.c
index 5b82aa8eb..2fe56b344 100644
--- a/arch/sparc/kernel/sys_sparc.c
+++ b/arch/sparc/kernel/sys_sparc.c
@@ -1,4 +1,4 @@
-/* $Id: sys_sparc.c,v 1.38 1998/01/09 16:42:48 jj Exp $
+/* $Id: sys_sparc.c,v 1.40 1998/03/28 08:29:26 davem Exp $
* linux/arch/sparc/kernel/sys_sparc.c
*
* This file contains various random system calls that
@@ -10,8 +10,9 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/config.h>
-#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/file.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
@@ -39,7 +40,7 @@ asmlinkage unsigned long sparc_brk(unsigned long brk)
unsigned long ret;
lock_kernel();
- if(sparc_cpu_model == sun4c) {
+ if(ARCH_SUN4C_SUN4) {
if(brk >= 0x20000000 && brk < 0xe0000000) {
ret = current->mm->brk;
goto out;
@@ -192,31 +193,34 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
lock_kernel();
if (!(flags & MAP_ANONYMOUS)) {
- if (fd >= NR_OPEN || !(file = current->files->fd[fd])){
+ file = fget(fd);
+ if (!file)
goto out;
- }
}
retval = -ENOMEM;
if(!(flags & MAP_FIXED) && !addr) {
addr = get_unmapped_area(addr, len);
- if(!addr){
- goto out;
- }
+ if(!addr)
+ goto out_putf;
}
/* See asm-sparc/uaccess.h */
retval = -EINVAL;
if((len > (TASK_SIZE - PAGE_SIZE)) || (addr > (TASK_SIZE-len-PAGE_SIZE)))
- goto out;
+ goto out_putf;
- if(sparc_cpu_model == sun4c) {
+ if(ARCH_SUN4C_SUN4) {
if(((addr >= 0x20000000) && (addr < 0xe0000000))) {
retval = current->mm->brk;
- goto out;
+ goto out_putf;
}
}
retval = do_mmap(file, addr, len, prot, flags, off);
+
+out_putf:
+ if (file)
+ fput(file);
out:
unlock_kernel();
return retval;
diff --git a/arch/sparc/kernel/sys_sunos.c b/arch/sparc/kernel/sys_sunos.c
index 29f00f6b8..bd7bf5d77 100644
--- a/arch/sparc/kernel/sys_sunos.c
+++ b/arch/sparc/kernel/sys_sunos.c
@@ -1,4 +1,4 @@
-/* $Id: sys_sunos.c,v 1.83 1997/12/14 23:24:28 ecd Exp $
+/* $Id: sys_sunos.c,v 1.87 1998/03/29 03:48:16 shadow Exp $
* sys_sunos.c: SunOS specific syscall compatibility support.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -17,6 +17,7 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/fs.h>
+#include <linux/file.h>
#include <linux/resource.h>
#include <linux/ipc.h>
#include <linux/shm.h>
@@ -77,14 +78,19 @@ asmlinkage unsigned long sunos_mmap(unsigned long addr, unsigned long len,
flags &= ~MAP_NORESERVE;
}
retval = -EBADF;
- if(!(flags & MAP_ANONYMOUS))
- if (fd >= SUNOS_NR_OPEN || !(file = current->files->fd[fd]))
+ if(!(flags & MAP_ANONYMOUS)) {
+ if (fd >= SUNOS_NR_OPEN)
goto out;
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
retval = -ENOMEM;
if(!(flags & MAP_FIXED) && !addr) {
addr = get_unmapped_area(addr, len);
if(!addr)
- goto out;
+ goto out_putf;
}
/* If this is ld.so or a shared library doing an mmap
* of /dev/zero, transform it into an anonymous mapping.
@@ -105,18 +111,22 @@ asmlinkage unsigned long sunos_mmap(unsigned long addr, unsigned long len,
/* See asm-sparc/uaccess.h */
retval = -EINVAL;
if((len > (TASK_SIZE - PAGE_SIZE)) || (addr > (TASK_SIZE-len-PAGE_SIZE)))
- goto out;
+ goto out_putf;
- if(sparc_cpu_model == sun4c) {
+ if(ARCH_SUN4C_SUN4) {
if(((addr >= 0x20000000) && (addr < 0xe0000000))) {
retval = current->mm->brk;
- goto out;
+ goto out_putf;
}
}
retval = do_mmap(file, addr, len, prot, flags, off);
if(!ret_type)
retval = ((retval < PAGE_OFFSET) ? 0 : retval);
+
+out_putf:
+ if (file)
+ fput(file);
out:
unlock_kernel();
return retval;
@@ -139,7 +149,7 @@ asmlinkage int sunos_brk(unsigned long brk)
unsigned long newbrk, oldbrk;
lock_kernel();
- if(sparc_cpu_model == sun4c) {
+ if(ARCH_SUN4C_SUN4) {
if(brk >= 0x20000000 && brk < 0xe0000000) {
goto out;
}
@@ -423,39 +433,48 @@ static int sunos_filldir(void * __buf, const char * name, int namlen,
asmlinkage int sunos_getdents(unsigned int fd, void * dirent, int cnt)
{
struct file * file;
+ struct inode * inode;
struct sunos_dirent * lastdirent;
struct sunos_dirent_callback buf;
int error = -EBADF;
lock_kernel();
- if(fd >= SUNOS_NR_OPEN)
+ if (fd >= SUNOS_NR_OPEN)
goto out;
- file = current->files->fd[fd];
- if(!file)
+ file = fget(fd);
+ if (!file)
goto out;
error = -ENOTDIR;
if (!file->f_op || !file->f_op->readdir)
- goto out;
+ goto out_putf;
error = -EINVAL;
- if(cnt < (sizeof(struct sunos_dirent) + 255))
- goto out;
+ if (cnt < (sizeof(struct sunos_dirent) + 255))
+ goto out_putf;
buf.curr = (struct sunos_dirent *) dirent;
buf.previous = NULL;
buf.count = cnt;
buf.error = 0;
+
+ inode = file->f_dentry->d_inode;
+ down(&inode->i_sem);
error = file->f_op->readdir(file, &buf, sunos_filldir);
+ up(&inode->i_sem);
if (error < 0)
- goto out;
+ goto out_putf;
+
lastdirent = buf.previous;
error = buf.error;
if (lastdirent) {
put_user(file->f_pos, &lastdirent->d_off);
error = cnt - buf.count;
}
+
+out_putf:
+ fput(file);
out:
unlock_kernel();
return error;
@@ -503,39 +522,48 @@ static int sunos_filldirentry(void * __buf, const char * name, int namlen,
asmlinkage int sunos_getdirentries(unsigned int fd, void * dirent, int cnt, unsigned int *basep)
{
struct file * file;
+ struct inode * inode;
struct sunos_direntry * lastdirent;
struct sunos_direntry_callback buf;
int error = -EBADF;
lock_kernel();
- if(fd >= SUNOS_NR_OPEN)
+ if (fd >= SUNOS_NR_OPEN)
goto out;
- file = current->files->fd[fd];
- if(!file)
+ file = fget(fd);
+ if (!file)
goto out;
error = -ENOTDIR;
if (!file->f_op || !file->f_op->readdir)
- goto out;
+ goto out_putf;
error = -EINVAL;
if(cnt < (sizeof(struct sunos_direntry) + 255))
- goto out;
+ goto out_putf;
buf.curr = (struct sunos_direntry *) dirent;
buf.previous = NULL;
buf.count = cnt;
buf.error = 0;
+
+ inode = file->f_dentry->d_inode;
+ down(&inode->i_sem);
error = file->f_op->readdir(file, &buf, sunos_filldirentry);
+ up(&inode->i_sem);
if (error < 0)
- goto out;
+ goto out_putf;
+
lastdirent = buf.previous;
error = buf.error;
if (lastdirent) {
put_user(file->f_pos, basep);
error = cnt - buf.count;
}
+
+out_putf:
+ fput(file);
out:
unlock_kernel();
return error;
@@ -669,6 +697,15 @@ asmlinkage int sunos_select(int width, fd_set *inp, fd_set *outp, fd_set *exp, s
lock_kernel();
current->personality |= STICKY_TIMEOUTS;
ret = sys_select (width, inp, outp, exp, tvp);
+ if (ret == -EINTR && tvp) {
+ time_t sec, usec;
+
+ __get_user(sec, &tvp->tv_sec);
+ __get_user(usec, &tvp->tv_usec);
+
+ if (sec == 0 && usec == 0)
+ ret = 0;
+ }
unlock_kernel();
return ret;
}
@@ -720,7 +757,7 @@ extern asmlinkage int sys_bind(int fd, struct sockaddr *umyaddr, int addrlen);
/* Bind the socket on a local reserved port and connect it to the
* remote server. This on Linux/i386 is done by the mount program,
- * not by the kernel.
+ * not by the kernel.
*/
static int
sunos_nfs_get_server_fd (int fd, struct sockaddr_in *addr)
@@ -728,16 +765,16 @@ sunos_nfs_get_server_fd (int fd, struct sockaddr_in *addr)
struct sockaddr_in local;
struct sockaddr_in server;
int try_port;
- int ret;
struct socket *socket;
- struct dentry *dentry;
struct inode *inode;
struct file *file;
+ int ret, result = 0;
- file = current->files->fd [fd];
- dentry = file->f_dentry;
- if(!dentry || !(inode = dentry->d_inode))
- return 0;
+ file = fget(fd);
+ if (!file)
+ goto out;
+ if (!file->f_dentry || !(inode = file->f_dentry->d_inode))
+ goto out_putf;
socket = &inode->u.socket_i;
local.sin_family = AF_INET;
@@ -752,7 +789,7 @@ sunos_nfs_get_server_fd (int fd, struct sockaddr_in *addr)
} while (ret && try_port > (1024 / 2));
if (ret)
- return 0;
+ goto out_putf;
server.sin_family = AF_INET;
server.sin_addr = addr->sin_addr;
@@ -761,9 +798,13 @@ sunos_nfs_get_server_fd (int fd, struct sockaddr_in *addr)
/* Call sys_connect */
ret = socket->ops->connect (socket, (struct sockaddr *) &server,
sizeof (server), file->f_flags);
- if (ret < 0)
- return 0;
- return 1;
+ if (ret >= 0)
+ result = 1;
+
+out_putf:
+ fput(file);
+out:
+ return result;
}
static int get_default (int value, int def_value)
@@ -1139,10 +1180,13 @@ asmlinkage int sunos_open(const char *filename, int flags, int mode)
file descriptors that have been set non-blocking
using 4.2BSD style calls. (tridge) */
-static inline int check_nonblock(int ret,int fd)
+static inline int check_nonblock(int ret, int fd)
{
- if (ret == -EAGAIN && (current->files->fd[fd]->f_flags & O_NDELAY))
- return -SUNOS_EWOULDBLOCK;
+ if (ret == -EAGAIN) {
+ struct file * file = fcheck(fd);
+ if (file && (file->f_flags & O_NDELAY))
+ ret = -SUNOS_EWOULDBLOCK;
+ }
return ret;
}
@@ -1215,12 +1259,41 @@ asmlinkage int sunos_send(int fd, void * buff, int len, unsigned flags)
return ret;
}
+extern asmlinkage int sys_setsockopt(int fd, int level, int optname,
+ char *optval, int optlen);
+
+asmlinkage int sunos_socket(int family, int type, int protocol)
+{
+ int ret, one = 1;
+
+ lock_kernel();
+ ret = sys_socket(family, type, protocol);
+ if (ret < 0)
+ goto out;
+
+ sys_setsockopt(ret, SOL_SOCKET, SO_BSDCOMPAT,
+ (char *)&one, sizeof(one));
+out:
+ unlock_kernel();
+ return ret;
+}
+
asmlinkage int sunos_accept(int fd, struct sockaddr *sa, int *addrlen)
{
- int ret;
+ int ret, one = 1;
lock_kernel();
- ret = check_nonblock(sys_accept(fd,sa,addrlen),fd);
+ while (1) {
+ ret = check_nonblock(sys_accept(fd,sa,addrlen),fd);
+ if (ret != -ENETUNREACH && ret != -EHOSTUNREACH)
+ break;
+ }
+ if (ret < 0)
+ goto out;
+
+ sys_setsockopt(ret, SOL_SOCKET, SO_BSDCOMPAT,
+ (char *)&one, sizeof(one));
+out:
unlock_kernel();
return ret;
}
diff --git a/arch/sparc/kernel/systbls.S b/arch/sparc/kernel/systbls.S
index 5ea64d2a4..320264255 100644
--- a/arch/sparc/kernel/systbls.S
+++ b/arch/sparc/kernel/systbls.S
@@ -1,4 +1,4 @@
-/* $Id: systbls.S,v 1.68 1997/12/24 17:26:38 ecd Exp $
+/* $Id: systbls.S,v 1.71 1998/03/24 06:25:06 ecd Exp $
* systbls.S: System call entry point tables for OS compatibility.
* The native Linux system call table lives here also.
*
@@ -23,9 +23,9 @@ C_LABEL(sys_call_table):
/*5*/ .long C_LABEL(sys_open), C_LABEL(sys_close), C_LABEL(sys_wait4)
.long C_LABEL(sys_creat), C_LABEL(sys_link)
/*10*/ .long C_LABEL(sys_unlink), C_LABEL(sunos_execv), C_LABEL(sys_chdir)
- .long C_LABEL(sys_nis_syscall), C_LABEL(sys_mknod)
-/*15*/ .long C_LABEL(sys_chmod), C_LABEL(sys_chown), C_LABEL(sparc_brk)
- .long C_LABEL(sys_nis_syscall), C_LABEL(sys_lseek)
+ .long C_LABEL(sys_xstat), C_LABEL(sys_mknod)
+/*15*/ .long C_LABEL(sys_chmod), C_LABEL(sys_lchown), C_LABEL(sparc_brk)
+ .long C_LABEL(sys_xmknod), C_LABEL(sys_lseek)
/*20*/ .long C_LABEL(sys_getpid), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
.long C_LABEL(sys_setuid), C_LABEL(sys_getuid)
/*25*/ .long C_LABEL(sys_time), C_LABEL(sys_ptrace), C_LABEL(sys_alarm)
@@ -137,7 +137,7 @@ C_LABEL(sunos_sys_table):
.long C_LABEL(sys_close), C_LABEL(sunos_wait4), C_LABEL(sys_creat)
.long C_LABEL(sys_link), C_LABEL(sys_unlink), C_LABEL(sunos_execv)
.long C_LABEL(sys_chdir), C_LABEL(sunos_nosys), C_LABEL(sys_mknod)
- .long C_LABEL(sys_chmod), C_LABEL(sys_chown), C_LABEL(sunos_brk)
+ .long C_LABEL(sys_chmod), C_LABEL(sys_lchown), C_LABEL(sunos_brk)
.long C_LABEL(sunos_nosys), C_LABEL(sys_lseek), C_LABEL(sunos_getpid)
.long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
.long C_LABEL(sunos_getuid), C_LABEL(sunos_nosys), C_LABEL(sys_ptrace)
@@ -164,7 +164,7 @@ C_LABEL(sunos_sys_table):
.long C_LABEL(sys_getitimer), C_LABEL(sys_gethostname), C_LABEL(sys_sethostname)
.long C_LABEL(sunos_getdtablesize), C_LABEL(sys_dup2), C_LABEL(sunos_nop)
.long C_LABEL(sys_fcntl), C_LABEL(sunos_select), C_LABEL(sunos_nop)
- .long C_LABEL(sys_fsync), C_LABEL(sys_setpriority), C_LABEL(sys_socket)
+ .long C_LABEL(sys_fsync), C_LABEL(sys_setpriority), C_LABEL(sunos_socket)
.long C_LABEL(sys_connect), C_LABEL(sunos_accept)
/*100*/ .long C_LABEL(sys_getpriority), C_LABEL(sunos_send), C_LABEL(sunos_recv)
.long C_LABEL(sunos_nosys), C_LABEL(sys_bind), C_LABEL(sunos_setsockopt)
diff --git a/arch/sparc/kernel/tadpole.c b/arch/sparc/kernel/tadpole.c
index 03fe3cff9..f618bf53b 100644
--- a/arch/sparc/kernel/tadpole.c
+++ b/arch/sparc/kernel/tadpole.c
@@ -6,6 +6,7 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/init.h>
#include <asm/asi.h>
#include <asm/oplib.h>
@@ -94,7 +95,7 @@ static void swift_clockstop(void)
clk_ctrl[0] = 0;
}
-void clock_stop_probe(void)
+__initfunc(void clock_stop_probe(void))
{
unsigned int node, clk_nd;
char name[20];
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index 77401391e..5eb49e22c 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -1,4 +1,4 @@
-/* $Id: time.c,v 1.29 1997/04/18 09:48:44 davem Exp $
+/* $Id: time.c,v 1.32 1998/03/23 08:41:13 jj Exp $
* linux/arch/sparc/kernel/time.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -24,6 +24,10 @@
#include <asm/system.h>
#include <asm/irq.h>
#include <asm/io.h>
+#include <asm/idprom.h>
+#include <asm/machines.h>
+#include <asm/sun4paddr.h>
+#include <asm/page.h>
enum sparc_clock_type sp_clock_typ;
struct mostek48t02 *mstk48t02_regs = 0;
@@ -88,7 +92,7 @@ static inline unsigned long mktime(unsigned int year, unsigned int mon,
}
/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
-static void kick_start_clock(void)
+__initfunc(static void kick_start_clock(void))
{
register struct mostek48t02 *regs = mstk48t02_regs;
unsigned char sec;
@@ -137,7 +141,7 @@ static void kick_start_clock(void)
}
/* Return nonzero if the clock chip battery is low. */
-static int has_low_battery(void)
+static __inline__ int has_low_battery(void)
{
register struct mostek48t02 *regs = mstk48t02_regs;
unsigned char data1, data2;
@@ -150,8 +154,24 @@ static int has_low_battery(void)
return (data1 == data2); /* Was the write blocked? */
}
-/* Probe for the real time clock chip. */
-__initfunc(static void clock_probe(void))
+/* Probe for the real time clock chip on Sun4/300. */
+static __inline__ void sun4_clock_probe(void)
+{
+ sp_clock_typ = MSTK48T02;
+ mstk48t02_regs = (struct mostek48t02 *)
+ sparc_alloc_io(SUN4_300_MOSTEK_PHYSADDR, 0,
+ sizeof(*mstk48t02_regs),
+ "clock", 0x0, 0x0);
+ mstk48t08_regs = 0; /* To catch weirdness */
+ /* Kick start the clock if it is completely stopped. */
+ if (mstk48t02_regs->sec & MSTK_STOP) {
+ kick_start_clock();
+ }
+
+}
+
+/* Probe for the mostek real time clock chip. */
+static __inline__ void clock_probe(void)
{
struct linux_prom_registers clk_reg[2];
char model[128];
@@ -247,7 +267,11 @@ __initfunc(void time_init(void))
return;
#endif
- clock_probe();
+ if (ARCH_SUN4)
+ sun4_clock_probe();
+ else
+ clock_probe();
+
init_timers(timer_interrupt);
mregs = mstk48t02_regs;
diff --git a/arch/sparc/kernel/trampoline.S b/arch/sparc/kernel/trampoline.S
index 9ee5bd14a..3515e265d 100644
--- a/arch/sparc/kernel/trampoline.S
+++ b/arch/sparc/kernel/trampoline.S
@@ -1,7 +1,8 @@
-/* $Id: trampoline.S,v 1.9 1997/05/01 08:53:34 davem Exp $
+/* $Id: trampoline.S,v 1.12 1998/03/19 15:36:38 jj Exp $
* trampoline.S: SMP cpu boot-up trampoline code.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <asm/cprefix.h>
@@ -12,9 +13,12 @@
#include <asm/ptrace.h>
#include <asm/vaddrs.h>
#include <asm/contregs.h>
+#include <asm/init.h>
+ .globl C_LABEL(sun4m_cpu_startup), C_LABEL(__smp4m_processor_id)
+ .globl C_LABEL(sun4d_cpu_startup), C_LABEL(__smp4d_processor_id)
- .text
+ __INIT
.align 4
/* When we start up a cpu for the first time it enters this routine.
@@ -22,8 +26,7 @@
* in and sets PIL in %psr to 15, no irqs.
*/
- .globl C_LABEL(sparc_cpu_startup)
-C_LABEL(sparc_cpu_startup):
+C_LABEL(sun4m_cpu_startup):
cpu1_startup:
sethi %hi(C_LABEL(trapbase_cpu1)), %g3
b 1f
@@ -60,9 +63,8 @@ cpu3_startup:
and %g4, 0xc, %g4
ld [%g5 + %g4], %g6
- mov 1, %sp
- sll %sp, (PAGE_SHIFT + 1), %sp
- sub %sp, REGWIN_SZ, %sp
+ sethi %hi(TASK_UNION_SIZE - REGWIN_SZ), %sp
+ or %sp, %lo(TASK_UNION_SIZE - REGWIN_SZ), %sp
add %g6, %sp, %sp
/* Turn on traps (PSR_ET). */
@@ -77,11 +79,84 @@ cpu3_startup:
nop
/* Start this processor. */
- call C_LABEL(smp_callin)
+ call C_LABEL(smp4m_callin)
nop
+ b,a smp_do_cpu_idle
+
+ .text
+ .align 4
+
+smp_do_cpu_idle:
call C_LABEL(cpu_idle)
mov 0, %o0
call C_LABEL(cpu_panic)
nop
+
+C_LABEL(__smp4m_processor_id):
+ rd %tbr, %g2
+ srl %g2, 12, %g2
+ and %g2, 3, %g2
+ retl
+ mov %g1, %o7
+
+C_LABEL(__smp4d_processor_id):
+ lda [%g0] ASI_M_VIKING_TMP1, %g2
+ retl
+ mov %g1, %o7
+
+/* CPUID in bootbus can be found at PA 0xff0140000 */
+#define SUN4D_BOOTBUS_CPUID 0xf0140000
+
+ __INIT
+ .align 4
+
+C_LABEL(sun4d_cpu_startup):
+ /* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */
+ set (PSR_PIL | PSR_S | PSR_PS), %g1
+ wr %g1, 0x0, %psr ! traps off though
+ WRITE_PAUSE
+
+ /* Our %wim is one behind CWP */
+ mov 2, %g1
+ wr %g1, 0x0, %wim
+ WRITE_PAUSE
+
+ /* Set tbr - we use just one trap table. */
+ set C_LABEL(trapbase), %g1
+ wr %g1, 0x0, %tbr
+ WRITE_PAUSE
+
+ /* Get our CPU id out of bootbus */
+ set SUN4D_BOOTBUS_CPUID, %g3
+ lduba [%g3] ASI_M_CTL, %g3
+ and %g3, 0xf8, %g3
+ srl %g3, 3, %g1
+ sta %g1, [%g0] ASI_M_VIKING_TMP1
+
+ /* Give ourselves a stack and curptr. */
+ set C_LABEL(current_set), %g5
+ srl %g3, 1, %g4
+ ld [%g5 + %g4], %g6
+
+ sethi %hi(TASK_UNION_SIZE - REGWIN_SZ), %sp
+ or %sp, %lo(TASK_UNION_SIZE - REGWIN_SZ), %sp
+ add %g6, %sp, %sp
+
+ /* Turn on traps (PSR_ET). */
+ rd %psr, %g1
+ wr %g1, PSR_ET, %psr ! traps on
+ WRITE_PAUSE
+
+ /* Init our caches, etc. */
+ set C_LABEL(poke_srmmu), %g5
+ ld [%g5], %g5
+ call %g5
+ nop
+
+ /* Start this processor. */
+ call C_LABEL(smp4d_callin)
+ nop
+
+ b,a smp_do_cpu_idle
diff --git a/arch/sparc/kernel/traps.c b/arch/sparc/kernel/traps.c
index 19a3afbd0..015d05357 100644
--- a/arch/sparc/kernel/traps.c
+++ b/arch/sparc/kernel/traps.c
@@ -1,4 +1,4 @@
-/* $Id: traps.c,v 1.53 1997/01/25 02:43:05 miguel Exp $
+/* $Id: traps.c,v 1.56 1998/04/06 16:08:32 jj Exp $
* arch/sparc/kernel/traps.c
*
* Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -62,6 +62,14 @@ void sun4m_nmi(struct pt_regs *regs)
prom_halt();
}
+void sun4d_nmi(struct pt_regs *regs)
+{
+ printk("Aieee: sun4d NMI received!\n");
+ printk("you lose buddy boy...\n");
+ show_regs(regs);
+ prom_halt();
+}
+
void instruction_dump (unsigned long *pc)
{
int i;
@@ -229,10 +237,13 @@ static unsigned long fake_fsr;
static unsigned long fake_queue[32] __attribute__ ((aligned (8)));
static unsigned long fake_depth;
+extern int do_mathemu(struct pt_regs *, struct task_struct *);
+
void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
static calls = 0;
+ int ret;
#ifndef __SMP__
struct task_struct *fpt = last_task_used_math;
#else
@@ -255,6 +266,40 @@ void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
}
fpsave(&fpt->tss.float_regs[0], &fpt->tss.fsr,
&fpt->tss.fpqueue[0], &fpt->tss.fpqdepth);
+#ifdef DEBUG_FPU
+ printk("Hmm, FP exception, fsr was %016lx\n", fpt->tss.fsr);
+#endif
+
+ switch ((fpt->tss.fsr & 0x1c000)) {
+ /* switch on the contents of the ftt [floating point trap type] field */
+#ifdef DEBUG_FPU
+ case (1 << 14):
+ printk("IEEE_754_exception\n");
+ break;
+#endif
+ case (2 << 14): /* unfinished_FPop (underflow & co) */
+ case (3 << 14): /* unimplemented_FPop (quad stuff, maybe sqrt) */
+ ret = do_mathemu(regs, fpt);
+ break;
+#ifdef DEBUG_FPU
+ case (4 << 14):
+ printk("sequence_error (OS bug...)\n");
+ break;
+ case (5 << 14):
+ printk("hardware_error (uhoh!)\n");
+ break;
+ case (6 << 14):
+ printk("invalid_fp_register (user error)\n");
+ break;
+#endif /* DEBUG_FPU */
+ }
+ /* If we successfully emulated the FPop, we pretend the trap never happened :-> */
+ if (ret) {
+ fpload(&current->tss.float_regs[0], &current->tss.fsr);
+ return;
+ }
+ /* nope, better SIGFPE the offending process... */
+
fpt->tss.sig_address = pc;
fpt->tss.sig_desc = SUBSIG_FPERROR; /* as good as any */
#ifdef __SMP__
@@ -331,19 +376,6 @@ void handle_cp_disabled(struct pt_regs *regs, unsigned long pc, unsigned long np
unlock_kernel();
}
-void handle_bad_flush(struct pt_regs *regs, unsigned long pc, unsigned long npc,
- unsigned long psr)
-{
- lock_kernel();
-#ifdef TRAP_DEBUG
- printk("Unimplemented FLUSH Exception at PC %08lx NPC %08lx PSR %08lx\n",
- pc, npc, psr);
-#endif
- printk("INSTRUCTION=%08lx\n", *((unsigned long *) regs->pc));
- send_sig(SIGILL, current, 1);
- unlock_kernel();
-}
-
void handle_cp_exception(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
diff --git a/arch/sparc/kernel/wof.S b/arch/sparc/kernel/wof.S
index 890676bfb..1d321d521 100644
--- a/arch/sparc/kernel/wof.S
+++ b/arch/sparc/kernel/wof.S
@@ -1,4 +1,4 @@
-/* $Id: wof.S,v 1.36 1997/05/01 08:53:35 davem Exp $
+/* $Id: wof.S,v 1.38 1998/02/06 14:14:22 jj Exp $
* wof.S: Sparc window overflow handler.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -234,9 +234,10 @@ spwin_user_stack_is_bolixed:
spnwin_patch3: and %twin_tmp, 0xff, %twin_tmp ! patched on 7win Sparcs
st %twin_tmp, [%curptr + AOFF_task_tss + AOFF_thread_uwinmask]
- mov 1, %sp
- sll %sp, (PAGE_SHIFT + 1), %sp
- sub %sp, (TRACEREG_SZ + REGWIN_SZ), %sp
+#define STACK_OFFSET (TASK_UNION_SIZE - TRACEREG_SZ - REGWIN_SZ)
+
+ sethi %hi(STACK_OFFSET), %sp
+ or %sp, %lo(STACK_OFFSET), %sp
add %curptr, %sp, %sp
/* Restore the saved globals and build a pt_regs frame. */
@@ -244,9 +245,8 @@ spnwin_patch3: and %twin_tmp, 0xff, %twin_tmp ! patched on 7win Sparcs
mov %saved_g6, %g6
STORE_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
- mov 1, %g6
- sll %g6, (PAGE_SHIFT + 1), %g6
- sub %g6, (TRACEREG_SZ + REGWIN_SZ), %g6
+ sethi %hi(STACK_OFFSET), %g6
+ or %g6, %lo(STACK_OFFSET), %g6
sub %sp, %g6, %g6
/* Turn on traps and call c-code to deal with it. */
@@ -394,9 +394,8 @@ C_LABEL(spwin_srmmu_stackchk):
* kernel is page aligned, which should always be the case.
*/
/* Check results of callers andcc %sp, 0x7, %g0 */
- sethi %hi(C_LABEL(page_offset)), %glob_tmp
bne spwin_user_stack_is_bolixed
- ld [%glob_tmp + %lo(C_LABEL(page_offset))], %glob_tmp
+ GET_PAGE_OFFSET(glob_tmp)
cmp %glob_tmp, %sp
bleu spwin_user_stack_is_bolixed
mov AC_M_SFSR, %glob_tmp
diff --git a/arch/sparc/kernel/wuf.S b/arch/sparc/kernel/wuf.S
index cb407aa69..bfa5b5191 100644
--- a/arch/sparc/kernel/wuf.S
+++ b/arch/sparc/kernel/wuf.S
@@ -1,4 +1,4 @@
-/* $Id: wuf.S,v 1.34 1997/05/01 08:53:36 davem Exp $
+/* $Id: wuf.S,v 1.37 1998/02/19 21:25:50 ecd Exp $
* wuf.S: Window underflow trap handler for the Sparc.
*
* Copyright (C) 1995 David S. Miller
@@ -138,6 +138,8 @@ fwin_from_user:
C_LABEL(fwin_mmu_patchme): b C_LABEL(sun4c_fwin_stackchk)
andcc %sp, 0x7, %g0
+#define STACK_OFFSET (TASK_UNION_SIZE - TRACEREG_SZ - REGWIN_SZ)
+
fwin_user_stack_is_bolixed:
/* LOCATION: Window 'W' */
@@ -146,9 +148,8 @@ fwin_user_stack_is_bolixed:
*/
LOAD_CURRENT(l4, l5)
- mov 1, %l5
- sll %l5, (PAGE_SHIFT + 1), %l5
- sub %l5, (TRACEREG_SZ + REGWIN_SZ), %l5
+ sethi %hi(STACK_OFFSET), %l5
+ or %l5, %lo(STACK_OFFSET), %l5
add %l4, %l5, %l5
/* Store globals into pt_regs frame. */
@@ -169,10 +170,9 @@ fwin_user_stack_is_bolixed:
/* LOCATION: Window 'T' */
- mov 1, %sp
- sll %sp, (PAGE_SHIFT + 1), %sp
- sub %sp, (TRACEREG_SZ + REGWIN_SZ), %sp
- add %curptr, %sp, %sp
+ sethi %hi(STACK_OFFSET), %l5
+ or %l5, %lo(STACK_OFFSET), %l5
+ add %curptr, %l5, %sp
/* Build rest of pt_regs. */
STORE_PT_INS(sp)
@@ -299,9 +299,8 @@ C_LABEL(srmmu_fwin_stackchk):
/* LOCATION: Window 'W' */
/* Caller did 'andcc %sp, 0x7, %g0' */
- sethi %hi(C_LABEL(page_offset)), %l5
bne fwin_user_stack_is_bolixed
- ld [%l5 + %lo(C_LABEL(page_offset))], %l5
+ GET_PAGE_OFFSET(l5)
/* Check if the users stack is in kernel vma, then our
* trial and error technique below would succeed for
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index cefe7a851..6ec986c86 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -1,4 +1,4 @@
-# $Id: Makefile,v 1.24 1997/05/08 17:45:26 davem Exp $
+# $Id: Makefile,v 1.25 1998/01/30 10:58:43 jj Exp $
# Makefile for Sparc library files..
#
@@ -16,19 +16,19 @@ lib.a: $(OBJS)
sync
checksum.o: checksum.S
- $(CC) -ansi -c -o checksum.o checksum.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o checksum.o checksum.S
memcpy.o: memcpy.S
$(CC) -D__ASSEMBLY__ -ansi -c -o memcpy.o memcpy.S
memcmp.o: memcmp.S
- $(CC) -ansi -c -o memcmp.o memcmp.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o memcmp.o memcmp.S
memscan.o: memscan.S
- $(CC) -ansi -c -o memscan.o memscan.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o memscan.o memscan.S
strncmp.o: strncmp.S
- $(CC) -ansi -c -o strncmp.o strncmp.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o strncmp.o strncmp.S
strncpy_from_user.o: strncpy_from_user.S
$(CC) -D__ASSEMBLY__ -ansi -c -o strncpy_from_user.o strncpy_from_user.S
@@ -40,7 +40,7 @@ copy_user.o: copy_user.S
$(CC) -D__ASSEMBLY__ -ansi -c -o copy_user.o copy_user.S
blockops.o: blockops.S
- $(CC) -ansi -c -o blockops.o blockops.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o blockops.o blockops.S
memset.o: memset.S
$(CC) -D__ASSEMBLY__ -ansi -c -o memset.o memset.S
@@ -73,34 +73,34 @@ bitops.o: bitops.S
endif
strlen.o: strlen.S
- $(CC) -ansi -c -o strlen.o strlen.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o strlen.o strlen.S
divdi3.o: divdi3.S
- $(CC) -ansi -c -o divdi3.o divdi3.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o divdi3.o divdi3.S
udivdi3.o: udivdi3.S
- $(CC) -ansi -c -o udivdi3.o udivdi3.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o udivdi3.o udivdi3.S
mul.o: mul.S
- $(CC) -c -o mul.o mul.S
+ $(CC) -D__ASSEMBLY__ -c -o mul.o mul.S
rem.o: rem.S
- $(CC) -DST_DIV0=0x2 -c -o rem.o rem.S
+ $(CC) -D__ASSEMBLY__ -DST_DIV0=0x2 -c -o rem.o rem.S
sdiv.o: sdiv.S
- $(CC) -DST_DIV0=0x2 -c -o sdiv.o sdiv.S
+ $(CC) -D__ASSEMBLY__ -DST_DIV0=0x2 -c -o sdiv.o sdiv.S
udiv.o: udiv.S
- $(CC) -DST_DIV0=0x2 -c -o udiv.o udiv.S
+ $(CC) -D__ASSEMBLY__ -DST_DIV0=0x2 -c -o udiv.o udiv.S
umul.o: umul.S
- $(CC) -c -o umul.o umul.S
+ $(CC) -D__ASSEMBLY__ -c -o umul.o umul.S
urem.o: urem.S
- $(CC) -DST_DIV0=0x2 -c -o urem.o urem.S
+ $(CC) -D__ASSEMBLY__ -DST_DIV0=0x2 -c -o urem.o urem.S
ashrdi3.o: ashrdi3.S
- $(CC) -c -o ashrdi3.o ashrdi3.S
+ $(CC) -D__ASSEMBLY__ -c -o ashrdi3.o ashrdi3.S
dep:
diff --git a/arch/sparc/lib/atomic.S b/arch/sparc/lib/atomic.S
index 89bf2c5fb..4e4aa0646 100644
--- a/arch/sparc/lib/atomic.S
+++ b/arch/sparc/lib/atomic.S
@@ -10,8 +10,9 @@
.text
.align 4
- .globl ___xchg32
-___xchg32:
+#ifndef __SMP__
+ .globl ___xchg32_sun4c
+___xchg32_sun4c:
rd %psr, %g3
andcc %g3, PSR_PIL, %g0
bne 1f
@@ -27,9 +28,16 @@ ___xchg32:
nop; nop; nop
1:
mov %g7, %g2
- jmpl %o7, %g0 /* Note, not + 0x8, see call in system.h */
+ jmpl %o7 + 8, %g0
mov %g4, %o7
+ .globl ___xchg32_sun4md
+___xchg32_sun4md:
+ swap [%g1], %g2
+ jmpl %o7 + 8, %g0
+ mov %g4, %o7
+#endif
+
/* Read asm-sparc/atomic.h carefully to understand how this works for SMP.
* Really, some things here for SMP are overly clever, go read the header.
*/
diff --git a/arch/sparc/lib/blockops.S b/arch/sparc/lib/blockops.S
index a5a4bffad..3f09ec1db 100644
--- a/arch/sparc/lib/blockops.S
+++ b/arch/sparc/lib/blockops.S
@@ -1,10 +1,11 @@
-/* $Id: blockops.S,v 1.7 1997/05/20 07:58:28 jj Exp $
+/* $Id: blockops.S,v 1.8 1998/01/30 10:58:44 jj Exp $
* blockops.S: Common block zero optimized routines.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/cprefix.h>
+#include <asm/page.h>
/* Zero out 64 bytes of memory at (buf + offset).
* Assumes %g1 contains zero.
@@ -53,7 +54,7 @@ C_LABEL(bzero_1page):
/* %o0 = buf */
or %g0, %g0, %g1
or %o0, %g0, %o1
- or %g0, 0x10, %g2
+ or %g0, (PAGE_SIZE >> 8), %g2
1:
BLAST_BLOCK(%o0, 0x00)
BLAST_BLOCK(%o0, 0x40)
@@ -70,7 +71,7 @@ C_LABEL(__copy_1page):
/* NOTE: If you change the number of insns of this routine, please check
* arch/sparc/mm/hypersparc.S */
/* %o0 = dst, %o1 = src */
- or %g0, 0x10, %g1
+ or %g0, (PAGE_SIZE >> 8), %g1
1:
MIRROR_BLOCK(%o0, %o1, 0x00, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
MIRROR_BLOCK(%o0, %o1, 0x20, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
diff --git a/arch/sparc/math-emu/.cvsignore b/arch/sparc/math-emu/.cvsignore
new file mode 100644
index 000000000..857dd22e9
--- /dev/null
+++ b/arch/sparc/math-emu/.cvsignore
@@ -0,0 +1,2 @@
+.depend
+.*.flags
diff --git a/arch/sparc/math-emu/Makefile b/arch/sparc/math-emu/Makefile
new file mode 100644
index 000000000..d7642b2e9
--- /dev/null
+++ b/arch/sparc/math-emu/Makefile
@@ -0,0 +1,37 @@
+#
+# Makefile for the FPU instruction emulation.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definition is now in the main makefile...
+
+O_TARGET := math-emu.o
+O_OBJS := math.o ashldi3.o fabss.o faddd.o faddq.o fadds.o \
+ fcmpd.o fcmped.o fcmpeq.o fcmpes.o fcmpq.o fcmps.o \
+ fdivd.o fdivq.o fdivs.o fdmulq.o fdtoi.o fdtoq.o \
+ fdtos.o fitoq.o fmovs.o fmuld.o fmulq.o fmuls.o \
+ fnegs.o fqtod.o fqtoi.o fqtos.o fsmuld.o fsqrtd.o \
+ fsqrtq.o fsqrts.o fstod.o fstoi.o fstoq.o fsubd.o \
+ fsubq.o fsubs.o udivmodti4.o
+
+LINKS := double.h faddd.c faddq.c fadds.c fdivd.c fdivq.c fdivs.c \
+ fdtoi.c fitoq.c fmuld.c fmulq.c fmuls.c fqtoi.c \
+ fsqrtd.c fsqrtq.c fsqrts.c fstoi.c fsubd.c \
+ fsubq.c fsubs.c op-1.h op-2.h op-4.h op-common.h quad.h \
+ single.h soft-fp.h udivmodti4.c
+
+.S.s:
+ $(CPP) -D__ASSEMBLY__ -ansi $< -o $*.s
+
+.S.o:
+ $(CC) -D__ASSEMBLY__ -ansi -c $< -o $*.o
+
+include $(TOPDIR)/Rules.make
+
+symlinks:
+ ln -sf $(patsubst %,../../sparc64/math-emu/%,$(LINKS)) .
+
+cleansymlinks:
+ rm -f $(LINKS)
diff --git a/arch/sparc/math-emu/ashldi3.S b/arch/sparc/math-emu/ashldi3.S
new file mode 100644
index 000000000..eab1d0972
--- /dev/null
+++ b/arch/sparc/math-emu/ashldi3.S
@@ -0,0 +1,36 @@
+/* $Id: ashldi3.S,v 1.1 1998/04/06 16:09:28 jj Exp $
+ * ashldi3.S: Math-emu code creates all kinds of references to
+ * this little routine on the sparc with gcc.
+ *
+ * Copyright (C) 1998 Jakub Jelinek(jj@ultra.linux.cz)
+ */
+
+#include <asm/cprefix.h>
+
+ .globl C_LABEL(__ashldi3)
+C_LABEL(__ashldi3):
+ tst %o2
+ be 3f
+ mov 32, %g2
+
+ sub %g2, %o2, %g2
+
+ tst %g2
+ bg 1f
+ srl %o1, %g2, %g3
+
+ clr %o5
+ neg %g2
+ ba 2f
+ sll %o1, %g2, %o4
+
+1:
+ sll %o1, %o2, %o5
+ srl %o0, %o2, %g2
+ or %g2, %g3, %o4
+2:
+ mov %o4, %o0
+ mov %o5, %o1
+3:
+ jmpl %o7 + 8, %g0
+ nop
diff --git a/arch/sparc/math-emu/fabss.c b/arch/sparc/math-emu/fabss.c
new file mode 100644
index 000000000..accfd4f59
--- /dev/null
+++ b/arch/sparc/math-emu/fabss.c
@@ -0,0 +1,6 @@
+int FABSS(unsigned long *rd, unsigned long *rs2)
+{
+ /* Clear the sign bit (high bit of word 0) */
+ rd[0] = rs2[0] & 0x7fffffffUL;
+ return 1;
+}
diff --git a/arch/sparc/math-emu/fcmpd.c b/arch/sparc/math-emu/fcmpd.c
new file mode 100644
index 000000000..3a9926575
--- /dev/null
+++ b/arch/sparc/math-emu/fcmpd.c
@@ -0,0 +1,18 @@
+#include "soft-fp.h"
+#include "double.h"
+
+int FCMPD(void *rd, void *rs2, void *rs1)
+{
+ FP_DECL_D(A); FP_DECL_D(B);
+ long ret;
+ unsigned long *fsr = rd;
+
+ __FP_UNPACK_D(A, rs1);
+ __FP_UNPACK_D(B, rs2);
+ FP_CMP_D(ret, B, A, 2);
+ if (ret == -1)
+ ret = 2;
+
+ *fsr = (*fsr & ~0xc00) | (ret << 10);
+ return 1;
+}
diff --git a/arch/sparc/math-emu/fcmped.c b/arch/sparc/math-emu/fcmped.c
new file mode 100644
index 000000000..a8c188042
--- /dev/null
+++ b/arch/sparc/math-emu/fcmped.c
@@ -0,0 +1,18 @@
+#include "soft-fp.h"
+#include "double.h"
+
+int FCMPED(void *rd, void *rs2, void *rs1)
+{
+ FP_DECL_D(A); FP_DECL_D(B);
+ long ret;
+ unsigned long *fsr = rd;
+
+ __FP_UNPACK_D(A, rs1);
+ __FP_UNPACK_D(B, rs2);
+ FP_CMP_D(ret, B, A, 2);
+ if (ret == -1)
+ ret = 2;
+
+ *fsr = (*fsr & ~0xc00) | (ret << 10);
+ return 1;
+}
diff --git a/arch/sparc/math-emu/fcmpeq.c b/arch/sparc/math-emu/fcmpeq.c
new file mode 100644
index 000000000..c109c51ce
--- /dev/null
+++ b/arch/sparc/math-emu/fcmpeq.c
@@ -0,0 +1,18 @@
+#include "soft-fp.h"
+#include "quad.h"
+
+int FCMPEQ(void *rd, void *rs2, void *rs1)
+{
+ FP_DECL_Q(A); FP_DECL_Q(B);
+ long ret;
+ unsigned long fsr;
+
+ __FP_UNPACK_Q(A, rs1);
+ __FP_UNPACK_Q(B, rs2);
+ FP_CMP_Q(ret, B, A, 3);
+ if (ret == -1) ret = 2;
+ fsr = *(unsigned long *)rd;
+ fsr &= ~0xc00; fsr |= (ret << 10);
+ *(unsigned long *)rd = fsr;
+ return 1;
+}
diff --git a/arch/sparc/math-emu/fcmpes.c b/arch/sparc/math-emu/fcmpes.c
new file mode 100644
index 000000000..e20884cfd
--- /dev/null
+++ b/arch/sparc/math-emu/fcmpes.c
@@ -0,0 +1,18 @@
+#include "soft-fp.h"
+#include "single.h"
+
+int FCMPES(void *rd, void *rs2, void *rs1)
+{
+ FP_DECL_S(A); FP_DECL_S(B);
+ long ret;
+ unsigned long *fsr = rd;
+
+ __FP_UNPACK_S(A, rs1);
+ __FP_UNPACK_S(B, rs2);
+ FP_CMP_S(ret, B, A, 1);
+ if (ret == -1)
+ ret = 2;
+
+ *fsr = (*fsr & ~0xc00) | (ret << 10);
+ return 1;
+}
diff --git a/arch/sparc/math-emu/fcmpq.c b/arch/sparc/math-emu/fcmpq.c
new file mode 100644
index 000000000..549f02cae
--- /dev/null
+++ b/arch/sparc/math-emu/fcmpq.c
@@ -0,0 +1,18 @@
+#include "soft-fp.h"
+#include "quad.h"
+
+int FCMPQ(void *rd, void *rs2, void *rs1)
+{
+ FP_DECL_Q(A); FP_DECL_Q(B);
+ long ret;
+ unsigned long fsr;
+
+ __FP_UNPACK_Q(A, rs1);
+ __FP_UNPACK_Q(B, rs2);
+ FP_CMP_Q(ret, B, A, 3);
+ if (ret == -1) ret = 2;
+ fsr = *(unsigned long *)rd;
+ fsr &= ~0xc00; fsr |= (ret << 10);
+ *(unsigned long *)rd = fsr;
+ return 1;
+}
diff --git a/arch/sparc/math-emu/fcmps.c b/arch/sparc/math-emu/fcmps.c
new file mode 100644
index 000000000..1b53312ae
--- /dev/null
+++ b/arch/sparc/math-emu/fcmps.c
@@ -0,0 +1,18 @@
+#include "soft-fp.h"
+#include "single.h"
+
+int FCMPS(void *rd, void *rs2, void *rs1)
+{
+ FP_DECL_S(A); FP_DECL_S(B);
+ long ret;
+ unsigned long *fsr = rd;
+
+ __FP_UNPACK_S(A, rs1);
+ __FP_UNPACK_S(B, rs2);
+ FP_CMP_S(ret, B, A, 1);
+ if (ret == -1)
+ ret = 2;
+
+ *fsr = (*fsr & ~0xc00) | (ret << 10);
+ return 1;
+}
diff --git a/arch/sparc/math-emu/fdmulq.c b/arch/sparc/math-emu/fdmulq.c
new file mode 100644
index 000000000..1d5bc5053
--- /dev/null
+++ b/arch/sparc/math-emu/fdmulq.c
@@ -0,0 +1,16 @@
+#include "soft-fp.h"
+#include "quad.h"
+#include "double.h"
+
+int FDMULQ(void *rd, void *rs2, void *rs1)
+{
+ FP_DECL_D(IN); FP_DECL_Q(A); FP_DECL_Q(B); FP_DECL_Q(R);
+
+ __FP_UNPACK_D(IN, rs1);
+ FP_CONV(Q,D,4,2,A,IN);
+ __FP_UNPACK_D(IN, rs2);
+ FP_CONV(Q,D,4,2,B,IN);
+ FP_MUL_Q(R, A, B);
+ __FP_PACK_Q(rd, R);
+ return 1;
+}
diff --git a/arch/sparc/math-emu/fdtoq.c b/arch/sparc/math-emu/fdtoq.c
new file mode 100644
index 000000000..84ebcf4a2
--- /dev/null
+++ b/arch/sparc/math-emu/fdtoq.c
@@ -0,0 +1,13 @@
+#include "soft-fp.h"
+#include "quad.h"
+#include "double.h"
+
+int FDTOQ(void *rd, void *rs2)
+{
+ FP_DECL_D(A); FP_DECL_Q(R);
+
+ __FP_UNPACK_D(A, rs2);
+ FP_CONV(Q,D,4,2,R,A);
+ __FP_PACK_Q(rd, R);
+ return 1;
+}
diff --git a/arch/sparc/math-emu/fdtos.c b/arch/sparc/math-emu/fdtos.c
new file mode 100644
index 000000000..83b8a14ed
--- /dev/null
+++ b/arch/sparc/math-emu/fdtos.c
@@ -0,0 +1,13 @@
+#include "soft-fp.h"
+#include "double.h"
+#include "single.h"
+
+int FDTOS(void *rd, void *rs2)
+{
+ FP_DECL_D(A); FP_DECL_S(R);
+
+ __FP_UNPACK_D(A, rs2);
+ FP_CONV(S,D,1,2,R,A);
+ __FP_PACK_S(rd, R);
+ return 1;
+}
diff --git a/arch/sparc/math-emu/fmovs.c b/arch/sparc/math-emu/fmovs.c
new file mode 100644
index 000000000..f113c0bb1
--- /dev/null
+++ b/arch/sparc/math-emu/fmovs.c
@@ -0,0 +1,5 @@
+int FMOVS(unsigned long *rd, unsigned long *rs2)
+{
+ rd[0] = rs2[0];
+ return 0;
+}
diff --git a/arch/sparc/math-emu/fnegs.c b/arch/sparc/math-emu/fnegs.c
new file mode 100644
index 000000000..39188eea6
--- /dev/null
+++ b/arch/sparc/math-emu/fnegs.c
@@ -0,0 +1,7 @@
+int FNEGS(unsigned long *rd, unsigned long *rs2)
+{
+ /* just change the sign bit */
+ rd[0] = rs2[0] ^ 0x80000000UL;
+ return 1;
+}
+
diff --git a/arch/sparc/math-emu/fqtod.c b/arch/sparc/math-emu/fqtod.c
new file mode 100644
index 000000000..dc5b6f9aa
--- /dev/null
+++ b/arch/sparc/math-emu/fqtod.c
@@ -0,0 +1,13 @@
+#include "soft-fp.h"
+#include "quad.h"
+#include "double.h"
+
+int FQTOD(void *rd, void *rs2)
+{
+ FP_DECL_Q(A); FP_DECL_D(R);
+
+ __FP_UNPACK_Q(A, rs2);
+ FP_CONV(D,Q,2,4,R,A);
+ __FP_PACK_D(rd, R);
+ return 1;
+}
diff --git a/arch/sparc/math-emu/fqtos.c b/arch/sparc/math-emu/fqtos.c
new file mode 100644
index 000000000..608f57be0
--- /dev/null
+++ b/arch/sparc/math-emu/fqtos.c
@@ -0,0 +1,13 @@
+#include "soft-fp.h"
+#include "quad.h"
+#include "single.h"
+
+int FQTOS(void *rd, void *rs2)
+{
+ FP_DECL_Q(A); FP_DECL_S(R);
+
+ __FP_UNPACK_Q(A, rs2);
+ FP_CONV(S,Q,1,4,R,A);
+ __FP_PACK_S(rd, R);
+ return 1;
+}
diff --git a/arch/sparc/math-emu/fsmuld.c b/arch/sparc/math-emu/fsmuld.c
new file mode 100644
index 000000000..dead5a042
--- /dev/null
+++ b/arch/sparc/math-emu/fsmuld.c
@@ -0,0 +1,16 @@
+#include "soft-fp.h"
+#include "double.h"
+#include "single.h"
+
+int FSMULD(void *rd, void *rs2, void *rs1)
+{
+ FP_DECL_S(IN); FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(R);
+
+ __FP_UNPACK_S(IN, rs1);
+ FP_CONV(D,S,2,1,A,IN);
+ __FP_UNPACK_S(IN, rs2);
+ FP_CONV(D,S,2,1,B,IN);
+ FP_MUL_D(R, A, B);
+ __FP_PACK_D(rd, R);
+ return 1;
+}
diff --git a/arch/sparc/math-emu/fstod.c b/arch/sparc/math-emu/fstod.c
new file mode 100644
index 000000000..cb34329c9
--- /dev/null
+++ b/arch/sparc/math-emu/fstod.c
@@ -0,0 +1,13 @@
+#include "soft-fp.h"
+#include "double.h"
+#include "single.h"
+
+int FSTOD(void *rd, void *rs2)
+{
+ FP_DECL_S(A); FP_DECL_D(R);
+
+ __FP_UNPACK_S(A, rs2);
+ FP_CONV(D,S,2,1,R,A);
+ __FP_PACK_D(rd, R);
+ return 1;
+}
diff --git a/arch/sparc/math-emu/fstoq.c b/arch/sparc/math-emu/fstoq.c
new file mode 100644
index 000000000..081c4d4d0
--- /dev/null
+++ b/arch/sparc/math-emu/fstoq.c
@@ -0,0 +1,13 @@
+#include "soft-fp.h"
+#include "quad.h"
+#include "single.h"
+
+int FSTOQ(void *rd, void *rs2)
+{
+ FP_DECL_S(A); FP_DECL_Q(R);
+
+ __FP_UNPACK_S(A, rs2);
+ FP_CONV(Q,S,4,1,R,A);
+ __FP_PACK_Q(rd, R);
+ return 1;
+}
diff --git a/arch/sparc/math-emu/math.c b/arch/sparc/math-emu/math.c
new file mode 100644
index 000000000..df5c879c5
--- /dev/null
+++ b/arch/sparc/math-emu/math.c
@@ -0,0 +1,416 @@
+/*
+ * arch/sparc/math-emu/math.c
+ *
+ * Copyright (C) 1998 Peter Maydell (pmaydell@chiark.greenend.org.uk)
+ * Based on the sparc64 code by Jakub Jelinek.
+ *
+ * This is a good place to start if you're trying to understand the
+ * emulation code, because it's pretty simple. What we do is
+ * essentially analyse the instruction to work out what the operation
+ * is and which registers are involved. We then execute the appropriate
+ * FXXXX function. [The floating point queue introduces a minor wrinkle;
+ * see below...]
+ * The fxxxxx.c files each emulate a single insn. They look relatively
+ * simple because the complexity is hidden away in an unholy tangle
+ * of preprocessor macros.
+ *
+ * WARNING : don't look at the macro definitions unless you
+ * absolutely have to! They're extremely ugly, rather complicated
+ * and a single line in an fxxxx.c file can expand to the equivalent
+ * of 30 lines or more of C. Of course, any error in those 30 lines
+ * is reported by the compiler as an error in the single line with the
+ * macro usage...
+ * Question: should we replace them with inline functions?
+ *
+ * The first layer of macros is single.h, double.h, quad.h. Generally
+ * these files define macros for working with floating point numbers
+ * of the three IEEE formats. FP_ADD_D(R,A,B) is for adding doubles,
+ * for instance. These macros are usually defined as calls to more
+ * generic macros (in this case _FP_ADD(D,2,R,X,Y) where the number
+ * of machine words required to store the given IEEE format is passed
+ * as a parameter. [double.h and co check the number of bits in a word
+ * and define FP_ADD_D & co appropriately].
+ * The generic macros are defined in op-common.h. This is where all
+ * the grotty stuff like handling NaNs is coded. To handle the possible
+ * word sizes macros in op-common.h use macros like _FP_FRAC_SLL_##wc()
+ * where wc is the 'number of machine words' parameter (here 2).
+ * These are defined in the third layer of macros: op-1.h, op-2.h
+ * and op-4.h. These handle operations on floating point numbers composed
+ * of 1,2 and 4 machine words respectively. [For example, on sparc64
+ * doubles are one machine word so macros in double.h eventually use
+ * constructs in op-1.h, but on sparc32 they use op-2.h definitions.]
+ * soft-fp.h is on the same level as op-common.h, and defines some
+ * macros which are independent of both word size and FP format.
+ * Finally, sfp-machine.h is the machine dependent part of the
+ * code: it defines the word size and what type a word is. It also
+ * defines how _FP_MUL_MEAT_t() maps to _FP_MUL_MEAT_n_* : op-n.h
+ * provide several possible flavours of multiply algorithm, most
+ * of which require that you supply some form of asm or C primitive to
+ * do the actual multiply. (such asm primitives should be defined
+ * in sfp-machine.h too). udivmodti4.c is the same sort of thing.
+ *
+ * There may be some errors here because I'm working from a
+ * SPARC architecture manual V9, and what I really want is V8...
+ * Also, the insns which can generate exceptions seem to be a
+ * greater subset of the FPops than for V9 (for example, FCMPED
+ * has to be emulated on V8). So I think I'm going to have
+ * to emulate them all just to be on the safe side...
+ *
+ * Emulation routines originate from soft-fp package, which is
+ * part of glibc and has appropriate copyrights in it (allegedly).
+ *
+ * NB: on sparc int == long == 4 bytes, long long == 8 bytes.
+ * Most bits of the kernel seem to go for long rather than int,
+ * so we follow that practice...
+ */
+
+/* WISHLIST:
+ *
+ * + Replace all the macros with inline functions. These should
+ * have the same effect but be much easier to work with.
+ *
+ * + Emulate the IEEE exception flags. We don't currently do this
+ * because a) it would require significant alterations to
+ * the emulation macros [see the comments about _FP_NEG()
+ * in op-common.c and note that we'd need to invent a convention
+ * for passing in the flags to FXXXX fns and returning them] and
+ * b) SPARClinux doesn't let users access the flags anyway
+ * [contrast Solaris, which allows you to examine, clear or set
+ * the flags, and request that exceptions cause SIGFPE
+ * [which you then set up a signal handler for, obviously...]].
+ * Erm, (b) may quite possibly be garbage. %fsr is user-writable
+ * so you don't need a syscall. There may or may not be library
+ * support.
+ *
+ * + Emulation of FMULQ, FDIVQ, FSQRTQ, FDMULQ needs to be
+ * written!
+ *
+ * + reindent code to conform to Linux kernel standard :->
+ *
+ * + work out whether all the compile-time warnings are bogus
+ *
+ * + check that conversion to/from integers works
+ *
+ * + check with the SPARC architecture manual to see if we resolve
+ * the implementation-dependent bits of the IEEE spec in the
+ * same manner as the hardware.
+ *
+ * + more test cases for the test script always welcome!
+ *
+ * + illegal opcodes currently cause SIGFPEs. We should arrange
+ * to tell the traps.c code to SIGILL instead. Currently,
+ * everywhere that we return 0 should cause SIGILL, I think.
+ * SIGFPE should only be caused if we set an IEEE exception bit
+ * and the relevant trap bit is also set. (this means that
+ * traps.c should do this; also it should handle the case of
+ * IEEE exception generated directly by the hardware.)
+ * Should illegal_fp_register (which is a flavour of fp exception)
+ * cause SIGFPE or SIGILL?
+ *
+ * + the test script needs to be extended to handle the quadword
+ * and comparison insns.
+ *
+ * + _FP_DIV_MEAT_2_udiv_64() appears to work but it should be
+ * checked by somebody who understands the algorithm :->
+ *
+ * + fpsave() saves the FP queue but fpload() doesn't reload it.
+ * Therefore when we context switch or change FPU ownership
+ * we have to check to see if the queue had anything in it and
+ * emulate it if it did. This is going to be a pain.
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <asm/uaccess.h>
+
+
+#define FLOATFUNC(x) extern int x(void *,void *,void *)
+
+/* Current status: we don't properly emulate the difficult quadword
+ * insns (MUL, DIV, SQRT).
+ * There are also some ops involving the FP registers which we don't
+ * emulate: the branch on FP condition flags and the load/store to
+ * FP regs or FSR. I'm assuming that these will never generate traps
+ * (not unreasonable if there's an FPU at all; comments in the NetBSD
+ * kernel source agree on this point). If we wanted to allow
+ * purely software-emulation of the FPU with FPU totally disabled
+ * or non-existent, we'd have to emulate these as well. We'd also
+ * need to alter the fp_disabled trap handler to call the math-emu
+ * code appropriately. The structure of do_one_mathemu() is also
+ * inappropriate for these ops (as it has no way to alter the pc,
+ * for a start) and it might be better to special-case them in do_mathemu().
+ * Oh, and you'd need to alter the traps.c code so it didn't try to
+ * fpsave() and fpload(). If there's genuinely no FPU then there's
+ * probably bits of kernel stuff that just won't work anyway...
+ */
+
+/* The Vn labels indicate what version of the SPARC architecture gas thinks
+ * each insn is. This is from the binutils source :->
+ */
+/* quadword instructions */
+FLOATFUNC(FSQRTQ); /* v8 NYI */
+FLOATFUNC(FADDQ); /* v8 */
+FLOATFUNC(FSUBQ); /* v8 */
+FLOATFUNC(FMULQ); /* v8 NYI */
+FLOATFUNC(FDIVQ); /* v8 NYI */
+FLOATFUNC(FDMULQ); /* v8 NYI */
+FLOATFUNC(FQTOS); /* v8 */
+FLOATFUNC(FQTOD); /* v8 */
+FLOATFUNC(FITOQ); /* v8 */
+FLOATFUNC(FSTOQ); /* v8 */
+FLOATFUNC(FDTOQ); /* v8 */
+FLOATFUNC(FQTOI); /* v8 */
+FLOATFUNC(FCMPQ); /* v8 */
+FLOATFUNC(FCMPEQ); /* v8 */
+/* single/double instructions (subnormal): should all work */
+FLOATFUNC(FSQRTS); /* v7 */
+FLOATFUNC(FSQRTD); /* v7 */
+FLOATFUNC(FADDS); /* v6 */
+FLOATFUNC(FADDD); /* v6 */
+FLOATFUNC(FSUBS); /* v6 */
+FLOATFUNC(FSUBD); /* v6 */
+FLOATFUNC(FMULS); /* v6 */
+FLOATFUNC(FMULD); /* v6 */
+FLOATFUNC(FDIVS); /* v6 */
+FLOATFUNC(FDIVD); /* v6 */
+FLOATFUNC(FSMULD); /* v8 */
+FLOATFUNC(FDTOS); /* v6 */
+FLOATFUNC(FSTOD); /* v6 */
+FLOATFUNC(FSTOI); /* v6 */
+FLOATFUNC(FDTOI); /* v6 */
+FLOATFUNC(FABSS); /* v6 */
+FLOATFUNC(FCMPS); /* v6 */
+FLOATFUNC(FCMPES); /* v6 */
+FLOATFUNC(FCMPD); /* v6 */
+FLOATFUNC(FCMPED); /* v6 */
+FLOATFUNC(FMOVS); /* v6 */
+FLOATFUNC(FNEGS); /* v6 */
+FLOATFUNC(FITOS); /* v6 */
+FLOATFUNC(FITOD); /* v6 */
+
+static int do_one_mathemu(u32 insn, unsigned long *fsr, unsigned long *fregs);
+
+/* Unlike the Sparc64 version (which has a struct fpustate), we
+ * pass the taskstruct corresponding to the task which currently owns the
+ * FPU. This is partly because we don't have the fpustate struct and
+ * partly because the task owning the FPU isn't always current (as is
+ * the case for the Sparc64 port). This is probably SMP-related...
+ * This function returns 1 if all queued insns were emulated successfully.
+ * The test for unimplemented FPop in kernel mode has been moved into
+ * kernel/traps.c for simplicity.
+ */
+int do_mathemu(struct pt_regs *regs, struct task_struct *fpt)
+{
+ /* regs->pc isn't necessarily the PC at which the offending insn is sitting.
+ * The FPU maintains a queue of FPops which cause traps.
+ * When it hits an instruction that requires that the trapped op succeeded
+ * (usually because it reads a reg. that the trapped op wrote) then it
+ * causes this exception. We need to emulate all the insns on the queue
+ * and then allow the op to proceed.
+ * This code should also handle the case where the trap was precise,
+ * in which case the queue length is zero and regs->pc points at the
+ * single FPop to be emulated. (this case is untested, though :->)
+ * You'll need this case if you want to be able to emulate all FPops
+ * because the FPU either doesn't exist or has been software-disabled.
+ * [The UltraSPARC makes FP a precise trap; this isn't as stupid as it
+ * might sound because the Ultra does funky things with a superscalar
+ * architecture.]
+ */
+
+ /* You wouldn't believe how often I typed 'ftp' when I meant 'fpt' :-> */
+
+ int i;
+ int retcode = 0; /* assume all succeed */
+ unsigned long insn;
+
+#ifdef DEBUG_MATHEMU
+ printk("In do_mathemu()... pc is %08lx\n", regs->pc);
+ printk("fpqdepth is %ld\n",fpt->tss.fpqdepth);
+ for (i = 0; i < fpt->tss.fpqdepth; i++)
+ printk("%d: %08lx at %08lx\n",i,fpt->tss.fpqueue[i].insn, (unsigned long)fpt->tss.fpqueue[i].insn_addr);
+#endif
+
+ if (fpt->tss.fpqdepth == 0) { /* no queue, guilty insn is at regs->pc */
+#ifdef DEBUG_MATHEMU
+ printk("precise trap at %08lx\n", regs->pc);
+#endif
+ if (!get_user(insn, (u32 *)regs->pc)) {
+ retcode = do_one_mathemu(insn, &fpt->tss.fsr, fpt->tss.float_regs);
+ if (retcode) {
+ /* in this case we need to fix up PC & nPC */
+ regs->pc = regs->npc;
+ regs->npc += 4;
+ }
+ }
+ return retcode;
+ }
+
+ /* Normal case: need to empty the queue... */
+ for (i = 0; i < fpt->tss.fpqdepth; i++)
+ {
+ retcode = do_one_mathemu(fpt->tss.fpqueue[i].insn, &(fpt->tss.fsr), fpt->tss.float_regs);
+ if (!retcode) /* insn failed, no point doing any more */
+ break;
+ }
+ /* Now empty the queue and clear the queue_not_empty flag */
+ fpt->tss.fsr &= ~0x3000;
+ fpt->tss.fpqdepth = 0;
+
+ return retcode;
+}
+
+static int do_one_mathemu(u32 insn, unsigned long *fsr, unsigned long *fregs)
+{
+ /* Emulate the given insn, updating fsr and fregs appropriately. */
+ int type = 0;
+ /* 01 is single, 10 is double, 11 is quad,
+ * 000011 is rs1, 001100 is rs2, 110000 is rd (00 in rd is fcc)
+ * 111100000000 tells which ftt that may happen in
+ * (this field not used on sparc32 code, as we can't
+ * extract trap type info for ops on the FP queue)
+ */
+ int freg;
+ int (*func)(void *,void *,void *) = NULL;
+ void *rs1 = NULL, *rs2 = NULL, *rd = NULL;
+
+#ifdef DEBUG_MATHEMU
+ printk("In do_mathemu(), emulating %08lx\n", insn);
+#endif
+
+ if ((insn & 0xc1f80000) == 0x81a00000) /* FPOP1 */ {
+ switch ((insn >> 5) & 0x1ff) {
+ /* QUAD - ftt == 3 */
+ case 0x001: type = 0x314; func = FMOVS; break;
+ case 0x005: type = 0x314; func = FNEGS; break;
+ case 0x009: type = 0x314; func = FABSS; break;
+ case 0x02b: type = 0x33c; func = FSQRTQ; break;
+ case 0x043: type = 0x33f; func = FADDQ; break;
+ case 0x047: type = 0x33f; func = FSUBQ; break;
+ case 0x04b: type = 0x33f; func = FMULQ; break;
+ case 0x04f: type = 0x33f; func = FDIVQ; break;
+ case 0x06e: type = 0x33a; func = FDMULQ; break;
+ case 0x0c7: type = 0x31c; func = FQTOS; break;
+ case 0x0cb: type = 0x32c; func = FQTOD; break;
+ case 0x0cc: type = 0x334; func = FITOQ; break;
+ case 0x0cd: type = 0x334; func = FSTOQ; break;
+ case 0x0ce: type = 0x338; func = FDTOQ; break;
+ case 0x0d3: type = 0x31c; func = FQTOI; break;
+ /* SUBNORMAL - ftt == 2 */
+ case 0x029: type = 0x214; func = FSQRTS; break;
+ case 0x02a: type = 0x228; func = FSQRTD; break;
+ case 0x041: type = 0x215; func = FADDS; break;
+ case 0x042: type = 0x22a; func = FADDD; break;
+ case 0x045: type = 0x215; func = FSUBS; break;
+ case 0x046: type = 0x22a; func = FSUBD; break;
+ case 0x049: type = 0x215; func = FMULS; break;
+ case 0x04a: type = 0x22a; func = FMULD; break;
+ case 0x04d: type = 0x215; func = FDIVS; break;
+ case 0x04e: type = 0x22a; func = FDIVD; break;
+ case 0x069: type = 0x225; func = FSMULD; break;
+ case 0x0c6: type = 0x218; func = FDTOS; break;
+ case 0x0c9: type = 0x224; func = FSTOD; break;
+ case 0x0d1: type = 0x214; func = FSTOI; break;
+ case 0x0d2: type = 0x218; func = FDTOI; break;
+ default:
+#ifdef DEBUG_MATHEMU
+ printk("unknown FPop1: %03lx\n",(insn>>5)&0x1ff);
+#endif
+ }
+ }
+ else if ((insn & 0xc1f80000) == 0x81a80000) /* FPOP2 */ {
+ switch ((insn >> 5) & 0x1ff) {
+ case 0x051: type = 0x305; func = FCMPS; break;
+ case 0x052: type = 0x30a; func = FCMPD; break;
+ case 0x053: type = 0x30f; func = FCMPQ; break;
+ case 0x055: type = 0x305; func = FCMPES; break;
+ case 0x056: type = 0x30a; func = FCMPED; break;
+ case 0x057: type = 0x30f; func = FCMPEQ; break;
+ default:
+#ifdef DEBUG_MATHEMU
+ printk("unknown FPop2: %03lx\n",(insn>>5)&0x1ff);
+#endif
+ }
+ }
+
+ if (!type) { /* oops, didn't recognise that FPop */
+ printk("attempt to emulate unrecognised FPop!\n");
+ return 0;
+ }
+
+ /* Decode the registers to be used */
+ freg = (*fsr >> 14) & 0xf;
+
+ *fsr &= ~0x1c000; /* clear the traptype bits */
+
+ freg = ((insn >> 14) & 0x1f);
+ switch (type & 0x3) /* is rs1 single, double or quad? */
+ {
+ case 3:
+ if (freg & 3) /* quadwords must have bits 4&5 of the */
+ { /* encoded reg. number set to zero. */
+ *fsr |= (6 << 14);
+ return 0; /* simulate invalid_fp_register exception */
+ }
+ /* fall through */
+ case 2:
+ if (freg & 1) /* doublewords must have bit 5 zeroed */
+ {
+ *fsr |= (6 << 14);
+ return 0;
+ }
+ }
+ rs1 = (void *)&fregs[freg];
+ freg = (insn & 0x1f);
+ switch ((type >> 2) & 0x3)
+ { /* same again for rs2 */
+ case 3:
+ if (freg & 3) /* quadwords must have bits 4&5 of the */
+ { /* encoded reg. number set to zero. */
+ *fsr |= (6 << 14);
+ return 0; /* simulate invalid_fp_register exception */
+ }
+ /* fall through */
+ case 2:
+ if (freg & 1) /* doublewords must have bit 5 zeroed */
+ {
+ *fsr |= (6 << 14);
+ return 0;
+ }
+ }
+ rs2 = (void *)&fregs[freg];
+ freg = ((insn >> 25) & 0x1f);
+ switch ((type >> 4) & 0x3) /* and finally rd. This one's a bit different */
+ {
+ case 0: /* dest is fcc. (this must be FCMPQ or FCMPEQ) */
+ if (freg) /* V8 has only one set of condition codes, so */
+ { /* anything but 0 in the rd field is an error */
+ *fsr |= (6 << 14); /* (should probably flag as invalid opcode */
+ return 0; /* but SIGFPE will do :-> ) */
+ }
+ rd = (void *)(fsr); /* FCMPQ and FCMPEQ are special and only */
+ break; /* set bits they're supposed to :-> */
+ case 3:
+ if (freg & 3) /* quadwords must have bits 4&5 of the */
+ { /* encoded reg. number set to zero. */
+ *fsr |= (6 << 14);
+ return 0; /* simulate invalid_fp_register exception */
+ }
+ /* fall through */
+ case 2:
+ if (freg & 1) /* doublewords must have bit 5 zeroed */
+ {
+ *fsr |= (6 << 14);
+ return 0;
+ }
+ /* fall through */
+ case 1:
+ rd = (void *)&fregs[freg];
+ break;
+ }
+#ifdef DEBUG_MATHEMU
+ printk("executing insn...\n");
+#endif
+ func(rd, rs2, rs1); /* do the Right Thing */
+ return 1; /* success! */
+}
diff --git a/arch/sparc/math-emu/sfp-machine.h b/arch/sparc/math-emu/sfp-machine.h
new file mode 100644
index 000000000..eafad4273
--- /dev/null
+++ b/arch/sparc/math-emu/sfp-machine.h
@@ -0,0 +1,363 @@
+/* Machine-dependent software floating-point definitions. Sparc version.
+ Copyright (C) 1997 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If
+ not, write to the Free Software Foundation, Inc.,
+ 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ Actually, this is a sparc (32bit) version, written based on the
+ i386 and sparc64 versions, by me,
+ Peter Maydell (pmaydell@chiark.greenend.org.uk).
+ Comments are by and large also mine, although they may be inaccurate.
+
+ In picking out asm fragments I've gone with the lowest common
+ denominator, which also happens to be the hardware I have :->
+ That is, a SPARC without hardware multiply and divide.
+ */
+
+
+/* basic word size definitions */
+#define _FP_W_TYPE_SIZE 32
+#define _FP_W_TYPE unsigned long
+#define _FP_WS_TYPE signed long
+#define _FP_I_TYPE long
+
+/* You can optionally code some things like addition in asm. For
+ * example, i386 defines __FP_FRAC_ADD_2 as asm. If you don't
+ * then you get a fragment of C code [if you change an #ifdef 0
+ * in op-2.h] or a call to add_ssaaaa (see below).
+ * Good places to look for asm fragments to use are gcc and glibc.
+ * gcc's longlong.h is useful.
+ */
+
+/* We need to know how to multiply and divide. If the host word size
+ * is >= 2*fracbits you can use FP_MUL_MEAT_n_imm(t,R,X,Y) which
+ * codes the multiply with whatever gcc does to 'a * b'.
+ * _FP_MUL_MEAT_n_wide(t,R,X,Y,f) is used when you have an asm
+ * function that can multiply two 1W values and get a 2W result.
+ * Otherwise you're stuck with _FP_MUL_MEAT_n_hard(t,R,X,Y) which
+ * does bitshifting to avoid overflow.
+ * For division there is FP_DIV_MEAT_n_imm(t,R,X,Y,f) for word size
+ * >= 2*fracbits, where f is either _FP_DIV_HELP_imm or
+ * _FP_DIV_HELP_ldiv (see op-1.h).
+ * _FP_DIV_MEAT_udiv() is if you have asm to do 2W/1W => (1W, 1W).
+ * [GCC and glibc have longlong.h which has the asm macro udiv_qrnnd
+ * to do this.]
+ * In general, 'n' is the number of words required to hold the type,
+ * and 't' is either S, D or Q for single/double/quad.
+ * -- PMM
+ */
+/* Example: SPARC64:
+ * #define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_imm(S,R,X,Y)
+ * #define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_1_wide(D,R,X,Y,umul_ppmm)
+ * #define _FP_MUL_MEAT_Q(R,X,Y) _FP_MUL_MEAT_2_wide(Q,R,X,Y,umul_ppmm)
+ *
+ * #define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm)
+ * #define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv(D,R,X,Y)
+ * #define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_2_udiv_64(Q,R,X,Y)
+ *
+ * Example: i386:
+ * #define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(S,R,X,Y,_i386_mul_32_64)
+ * #define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(D,R,X,Y,_i386_mul_32_64)
+ *
+ * #define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y,_i386_div_64_32)
+ * #define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv_64(D,R,X,Y)
+ */
+#define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(S,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(D,R,X,Y,umul_ppmm)
+/* FIXME: This is not implemented, but should be soon */
+#define _FP_MUL_MEAT_Q(R,X,Y) _FP_FRAC_SET_4(R, _FP_ZEROFRAC_4)
+#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y)
+#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv_64(D,R,X,Y)
+/* FIXME: This is not implemented, but should be soon */
+#define _FP_DIV_MEAT_Q(R,X,Y) _FP_FRAC_SET_4(R, _FP_ZEROFRAC_4)
+
+/* These macros define what NaN looks like. They're supposed to expand to
+ * a comma-separated set of 32bit unsigned ints that encode NaN.
+ */
+#define _FP_NANFRAC_S _FP_QNANBIT_S
+#define _FP_NANFRAC_D _FP_QNANBIT_D, 0
+#define _FP_NANFRAC_Q _FP_QNANBIT_Q, 0, 0, 0
+
+#define _FP_KEEPNANFRACP 1
+
+/* This macro appears to be called when both X and Y are NaNs, and
+ * has to choose one and copy it to R. i386 goes for the larger of the
+ * two, sparc64 just picks Y. I don't understand this at all so I'll
+ * go with sparc64 because it's shorter :-> -- PMM
+ */
+#define _FP_CHOOSENAN(fs, wc, R, X, Y) \
+ do { \
+ R##_s = Y##_s; \
+ _FP_FRAC_COPY_##wc(R,Y); \
+ R##_c = FP_CLS_NAN; \
+ } while (0)
+
+#define __FP_UNPACK_RAW_1(fs, X, val) \
+ do { \
+ union _FP_UNION_##fs *_flo = \
+ (union _FP_UNION_##fs *)val; \
+ \
+ X##_f = _flo->bits.frac; \
+ X##_e = _flo->bits.exp; \
+ X##_s = _flo->bits.sign; \
+ } while (0)
+
+#define __FP_PACK_RAW_1(fs, val, X) \
+ do { \
+ union _FP_UNION_##fs *_flo = \
+ (union _FP_UNION_##fs *)val; \
+ \
+ _flo->bits.frac = X##_f; \
+ _flo->bits.exp = X##_e; \
+ _flo->bits.sign = X##_s; \
+ } while (0)
+
+#define __FP_UNPACK_RAW_2(fs, X, val) \
+ do { \
+ union _FP_UNION_##fs *_flo = \
+ (union _FP_UNION_##fs *)val; \
+ \
+ X##_f0 = _flo->bits.frac0; \
+ X##_f1 = _flo->bits.frac1; \
+ X##_e = _flo->bits.exp; \
+ X##_s = _flo->bits.sign; \
+ } while (0)
+
+#define __FP_PACK_RAW_2(fs, val, X) \
+ do { \
+ union _FP_UNION_##fs *_flo = \
+ (union _FP_UNION_##fs *)val; \
+ \
+ _flo->bits.frac0 = X##_f0; \
+ _flo->bits.frac1 = X##_f1; \
+ _flo->bits.exp = X##_e; \
+ _flo->bits.sign = X##_s; \
+ } while (0)
+
+#define __FP_UNPACK_RAW_4(fs, X, val) \
+ do { \
+ union _FP_UNION_##fs *_flo = \
+ (union _FP_UNION_##fs *)val; \
+ \
+ X##_f[0] = _flo->bits.frac0; \
+ X##_f[1] = _flo->bits.frac1; \
+ X##_f[2] = _flo->bits.frac2; \
+ X##_f[3] = _flo->bits.frac3; \
+ X##_e = _flo->bits.exp; \
+ X##_s = _flo->bits.sign; \
+ } while (0)
+
+#define __FP_PACK_RAW_4(fs, val, X) \
+ do { \
+ union _FP_UNION_##fs *_flo = \
+ (union _FP_UNION_##fs *)val; \
+ \
+ _flo->bits.frac0 = X##_f[0]; \
+ _flo->bits.frac1 = X##_f[1]; \
+ _flo->bits.frac2 = X##_f[2]; \
+ _flo->bits.frac3 = X##_f[3]; \
+ _flo->bits.exp = X##_e; \
+ _flo->bits.sign = X##_s; \
+ } while (0)
+
+#define __FP_UNPACK_S(X,val) \
+ do { \
+ __FP_UNPACK_RAW_1(S,X,val); \
+ _FP_UNPACK_CANONICAL(S,1,X); \
+ } while (0)
+
+#define __FP_PACK_S(val,X) \
+ do { \
+ _FP_PACK_CANONICAL(S,1,X); \
+ __FP_PACK_RAW_1(S,val,X); \
+ } while (0)
+
+#define __FP_UNPACK_D(X,val) \
+ do { \
+ __FP_UNPACK_RAW_2(D,X,val); \
+ _FP_UNPACK_CANONICAL(D,2,X); \
+ } while (0)
+
+#define __FP_PACK_D(val,X) \
+ do { \
+ _FP_PACK_CANONICAL(D,2,X); \
+ __FP_PACK_RAW_2(D,val,X); \
+ } while (0)
+
+#define __FP_UNPACK_Q(X,val) \
+ do { \
+ __FP_UNPACK_RAW_4(Q,X,val); \
+ _FP_UNPACK_CANONICAL(Q,4,X); \
+ } while (0)
+
+#define __FP_PACK_Q(val,X) \
+ do { \
+ _FP_PACK_CANONICAL(Q,4,X); \
+ __FP_PACK_RAW_4(Q,val,X); \
+ } while (0)
+
+/* the asm fragments go here: all these are taken from glibc-2.0.5's stdlib/longlong.h */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+/* add_ssaaaa is used in op-2.h and should be equivalent to
+ * #define add_ssaaaa(sh,sl,ah,al,bh,bl) (sh = ah+bh+ (( sl = al+bl) < al))
+ * add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
+ * high_addend_2, low_addend_2) adds two UWtype integers, composed by
+ * HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2
+ * respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow
+ * (i.e. carry out) is not stored anywhere, and is lost.
+ */
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addcc %r4,%5,%1
+ addx %r2,%3,%0" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "%rJ" ((USItype)(ah)), \
+ "rI" ((USItype)(bh)), \
+ "%rJ" ((USItype)(al)), \
+ "rI" ((USItype)(bl)) \
+ : "cc")
+
+
+/* sub_ddmmss is used in op-2.h and udivmodti4.c and should be equivalent to
+ * #define sub_ddmmss(sh, sl, ah, al, bh, bl) (sh = ah-bh - ((sl = al-bl) > al))
+ * sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend,
+ * high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers,
+ * composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and
+ * LOW_SUBTRAHEND_2 respectively. The result is placed in HIGH_DIFFERENCE
+ * and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
+ * and is lost.
+ */
+
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subcc %r4,%5,%1
+ subx %r2,%3,%0" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "rJ" ((USItype)(ah)), \
+ "rI" ((USItype)(bh)), \
+ "rJ" ((USItype)(al)), \
+ "rI" ((USItype)(bl)) \
+ : "cc")
+
+
+/* asm fragments for mul and div */
+/* umul_ppmm(high_prod, low_prod, multipler, multiplicand) multiplies two
+ * UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype
+ * word product in HIGH_PROD and LOW_PROD.
+ * These look ugly because the sun4/4c don't have umul/udiv/smul/sdiv in
+ * hardware.
+ */
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("! Inlined umul_ppmm
+ wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr
+ sra %3,31,%%g2 ! Don't move this insn
+ and %2,%%g2,%%g2 ! Don't move this insn
+ andcc %%g0,0,%%g1 ! Don't move this insn
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,0,%%g1
+ add %%g1,%%g2,%0
+ rd %%y,%1" \
+ : "=r" ((USItype)(w1)), \
+ "=r" ((USItype)(w0)) \
+ : "%rI" ((USItype)(u)), \
+ "r" ((USItype)(v)) \
+ : "%g1", "%g2", "cc")
+
+/* udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
+ * denominator) divides a UDWtype, composed by the UWtype integers
+ * HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient
+ * in QUOTIENT and the remainder in REMAINDER. HIGH_NUMERATOR must be less
+ * than DENOMINATOR for correct operation. If, in addition, the most
+ * significant bit of DENOMINATOR must be 1, then the pre-processor symbol
+ * UDIV_NEEDS_NORMALIZATION is defined to 1.
+ */
+
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("! Inlined udiv_qrnnd
+ mov 32,%%g1
+ subcc %1,%2,%%g0
+1: bcs 5f
+ addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
+ sub %1,%2,%1 ! this kills msb of n
+ addx %1,%1,%1 ! so this can't give carry
+ subcc %%g1,1,%%g1
+2: bne 1b
+ subcc %1,%2,%%g0
+ bcs 3f
+ addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
+ b 3f
+ sub %1,%2,%1 ! this kills msb of n
+4: sub %1,%2,%1
+5: addxcc %1,%1,%1
+ bcc 2b
+ subcc %%g1,1,%%g1
+! Got carry from n. Subtract next step to cancel this carry.
+ bne 4b
+ addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb
+ sub %1,%2,%1
+3: xnor %0,0,%0
+ ! End of inline udiv_qrnnd" \
+ : "=&r" ((USItype) (q)), \
+ "=&r" ((USItype) (r)) \
+ : "r" ((USItype) (d)), \
+ "1" ((USItype) (n1)), \
+ "0" ((USItype) (n0)) : "%g1", "cc")
+
+#define UDIV_NEEDS_NORMALIZATION 0
+
+#define abort() \
+ return 0
+
+#ifdef __BIG_ENDIAN
+#define __BYTE_ORDER __BIG_ENDIAN
+#else
+#define __BYTE_ORDER __LITTLE_ENDIAN
+#endif
+
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index bae5d323a..18eeb1f52 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -1,4 +1,4 @@
-# $Id: Makefile,v 1.27 1997/11/07 15:01:27 jj Exp $
+# $Id: Makefile,v 1.30 1998/03/09 14:03:53 jj Exp $
# Makefile for the linux Sparc-specific parts of the memory manager.
#
# Note! Dependencies are done automagically by 'make dep', which also
@@ -8,9 +8,17 @@
# Note 2! The CFLAGS definition is now in the main makefile...
O_TARGET := mm.o
-O_OBJS := fault.o init.o sun4c.o srmmu.o hypersparc.o viking.o \
- tsunami.o loadmmu.o generic.o asyncd.o extable.o \
- turbosparc.o iommu.o io-unit.o
+O_OBJS := fault.o init.o loadmmu.o generic.o asyncd.o extable.o btfixup.o
+ifeq ($(CONFIG_SUN4),y)
+O_OBJS += nosrmmu.o
+else
+O_OBJS += srmmu.o iommu.o io-unit.o hypersparc.o viking.o tsunami.o turbosparc.o
+endif
+ifdef SMP
+O_OBJS += nosun4c.o
+else
+O_OBJS += sun4c.o
+endif
include $(TOPDIR)/Rules.make
diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c
new file mode 100644
index 000000000..e61ccc158
--- /dev/null
+++ b/arch/sparc/mm/btfixup.c
@@ -0,0 +1,334 @@
+/* $Id: btfixup.c,v 1.7 1998/03/09 14:03:56 jj Exp $
+ * btfixup.c: Boot time code fixup and relocator, so that
+ * we can get rid of most indirect calls to achieve single
+ * image sun4c and srmmu kernel.
+ *
+ * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/btfixup.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/oplib.h>
+#include <asm/system.h>
+
+#define BTFIXUP_OPTIMIZE_NOP
+#define BTFIXUP_OPTIMIZE_OTHER
+
+extern char *srmmu_name;
+static char version[] __initdata = "Boot time fixup v1.6. 4/Mar/98 Jakub Jelinek (jj@ultra.linux.cz). Patching kernel for ";
+#ifdef CONFIG_SUN4
+static char str_sun4c[] __initdata = "sun4\n";
+#else
+static char str_sun4c[] __initdata = "sun4c\n";
+#endif
+static char str_srmmu[] __initdata = "srmmu[%s]/";
+static char str_iommu[] __initdata = "iommu\n";
+static char str_iounit[] __initdata = "io-unit\n";
+
+static int visited __initdata = 0;
+extern unsigned int ___btfixup_start[], ___btfixup_end[], __init_begin[], __init_end[], __init_text_end[];
+extern unsigned int _stext[], _end[], __start___ksymtab[], __stop___ksymtab[];
+static char wrong_f[] __initdata = "Trying to set f fixup %p to invalid function %08x\n";
+static char wrong_b[] __initdata = "Trying to set b fixup %p to invalid function %08x\n";
+static char wrong_s[] __initdata = "Trying to set s fixup %p to invalid value %08x\n";
+static char wrong_h[] __initdata = "Trying to set h fixup %p to invalid value %08x\n";
+static char wrong_a[] __initdata = "Trying to set a fixup %p to invalid value %08x\n";
+static char wrong[] __initdata = "Wrong address for %c fixup %p\n";
+static char insn_f[] __initdata = "Fixup f %p refers to weird instructions at %p[%08x,%08x]\n";
+static char insn_b[] __initdata = "Fixup b %p doesn't refer to a SETHI at %p[%08x]\n";
+static char insn_s[] __initdata = "Fixup s %p doesn't refer to an OR at %p[%08x]\n";
+static char insn_h[] __initdata = "Fixup h %p doesn't refer to a SETHI at %p[%08x]\n";
+static char insn_a[] __initdata = "Fixup a %p doesn't refer to a SETHI nor OR at %p[%08x]\n";
+static char insn_i[] __initdata = "Fixup i %p doesn't refer to a valid instruction at %p[%08x]\n";
+static char fca_und[] __initdata = "flush_cache_all undefined in btfixup()\n";
+static char wrong_setaddr[] __initdata = "Garbled CALL/INT patch at %p[%08x,%08x,%08x]=%08x\n";
+
+#ifdef BTFIXUP_OPTIMIZE_OTHER
+__initfunc(static void set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value))
+{
+ if (!fmangled)
+ *addr = value;
+ else {
+ unsigned int *q = (unsigned int *)q1;
+ if (*addr == 0x01000000) {
+ /* Noped */
+ *q = value;
+ } else if (addr[-1] == *q) {
+ /* Moved */
+ addr[-1] = value;
+ *q = value;
+ } else {
+ prom_printf(wrong_setaddr, addr-1, addr[-1], *addr, *q, value);
+ prom_halt();
+ }
+ }
+}
+#else
+static __inline__ void set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value)
+{
+ *addr = value;
+}
+#endif
+
+__initfunc(void btfixup(void))
+{
+ unsigned int *p, *q;
+ int type, count;
+ unsigned insn;
+ unsigned *addr;
+ int fmangled = 0;
+ void (*flush_cacheall)(void);
+
+ if (!visited) {
+ visited++;
+ printk(version);
+ if (ARCH_SUN4C_SUN4)
+ printk(str_sun4c);
+ else {
+ printk(str_srmmu, srmmu_name);
+ if (sparc_cpu_model == sun4d)
+ printk(str_iounit);
+ else
+ printk(str_iommu);
+ }
+ }
+ for (p = ___btfixup_start; p < ___btfixup_end; ) {
+ count = p[2];
+ q = p + 3;
+ switch (type = *(unsigned char *)p) {
+ case 'f':
+ count = p[3];
+ q = p + 4;
+ if (((p[0] & 1) || p[1])
+ && ((p[1] & 3) || (unsigned *)(p[1]) < _stext || (unsigned *)(p[1]) >= _end)) {
+ prom_printf(wrong_f, p, p[1]);
+ prom_halt();
+ }
+ break;
+ case 'b':
+ if (p[1] < (unsigned long)__init_begin || p[1] >= (unsigned long)__init_text_end || (p[1] & 3)) {
+ prom_printf(wrong_b, p, p[1]);
+ prom_halt();
+ }
+ break;
+ case 's':
+ if (p[1] + 0x1000 >= 0x2000) {
+ prom_printf(wrong_s, p, p[1]);
+ prom_halt();
+ }
+ break;
+ case 'h':
+ if (p[1] & 0x3ff) {
+ prom_printf(wrong_h, p, p[1]);
+ prom_halt();
+ }
+ break;
+ case 'a':
+ if (p[1] + 0x1000 >= 0x2000 && (p[1] & 0x3ff)) {
+ prom_printf(wrong_a, p, p[1]);
+ prom_halt();
+ }
+ break;
+ }
+ if (p[0] & 1) {
+ p[0] &= ~1;
+ while (count) {
+ fmangled = 0;
+ addr = (unsigned *)*q;
+ if (addr < _stext || addr >= _end) {
+ prom_printf(wrong, type, p);
+ prom_halt();
+ }
+ insn = *addr;
+#ifdef BTFIXUP_OPTIMIZE_OTHER
+ if (type != 'f' && q[1]) {
+ insn = *(unsigned int *)q[1];
+ if (!insn || insn == 1)
+ insn = *addr;
+ else
+ fmangled = 1;
+ }
+#endif
+ switch (type) {
+ case 'f': /* CALL */
+ if (addr >= __start___ksymtab && addr < __stop___ksymtab) {
+ *addr = p[1];
+ break;
+ } else if (!q[1]) {
+ if ((insn & 0xc1c00000) == 0x01000000) { /* SETHI */
+ *addr = (insn & 0xffc00000) | (p[1] >> 10); break;
+ } else if ((insn & 0xc1f82000) == 0x80102000) { /* OR X, %LO(i), Y */
+ *addr = (insn & 0xffffe000) | (p[1] & 0x3ff); break;
+ } else if ((insn & 0xc0000000) != 0x40000000) { /* !CALL */
+ bad_f:
+ prom_printf(insn_f, p, addr, insn, addr[1]);
+ prom_halt();
+ }
+ } else if (q[1] != 1)
+ addr[1] = q[1];
+ if (p[2] == BTFIXUPCALL_NORM) {
+ norm_f:
+ *addr = 0x40000000 | ((p[1] - (unsigned)addr) >> 2);
+ q[1] = 0;
+ break;
+ }
+#ifndef BTFIXUP_OPTIMIZE_NOP
+ goto norm_f;
+#else
+ if (!(addr[1] & 0x80000000)) {
+ if ((addr[1] & 0xc1c00000) != 0x01000000) /* !SETHI */
+ goto bad_f; /* CALL, Bicc, FBfcc, CBccc are weird in delay slot, aren't they? */
+ } else {
+ if ((addr[1] & 0x01800000) == 0x01800000) {
+ if ((addr[1] & 0x01f80000) == 0x01e80000) {
+ /* RESTORE */
+ goto norm_f; /* It is dangerous to patch that */
+ }
+ goto bad_f;
+ }
+ if ((addr[1] & 0xffffe003) == 0x9e03e000) {
+ /* ADD %O7, XX, %o7 */
+ int displac = (addr[1] << 19);
+
+ displac = (displac >> 21) + 2;
+ *addr = (0x10800000) + (displac & 0x3fffff);
+ q[1] = addr[1];
+ addr[1] = p[2];
+ break;
+ }
+ if ((addr[1] & 0x201f) == 0x200f || (addr[1] & 0x7c000) == 0x3c000)
+ goto norm_f; /* Someone is playing bad tricks with us: rs1 or rs2 is o7 */
+ if ((addr[1] & 0x3e000000) == 0x1e000000)
+ goto norm_f; /* rd is %o7. We'd better take care. */
+ }
+ if (p[2] == BTFIXUPCALL_NOP) {
+ *addr = 0x01000000;
+ q[1] = 1;
+ break;
+ }
+#ifndef BTFIXUP_OPTIMIZE_OTHER
+ goto norm_f;
+#else
+ if (addr[1] == 0x01000000) { /* NOP in the delay slot */
+ q[1] = addr[1];
+ *addr = p[2];
+ break;
+ }
+ if ((addr[1] & 0xc0000000) != 0xc0000000) {
+ /* Not a memory operation */
+ if ((addr[1] & 0x30000000) == 0x10000000) {
+ /* Ok, non-memory op with rd %oX */
+ if ((addr[1] & 0x3e000000) == 0x1c000000)
+ goto bad_f; /* Aiee. Someone is playing strange %sp tricks */
+ if ((addr[1] & 0x3e000000) > 0x12000000 ||
+ ((addr[1] & 0x3e000000) == 0x12000000 &&
+ p[2] != BTFIXUPCALL_STO1O0 && p[2] != BTFIXUPCALL_SWAPO0O1) ||
+ ((p[2] & 0xffffe000) == BTFIXUPCALL_RETINT(0))) {
+ /* Nobody uses the result. We can nop it out. */
+ *addr = p[2];
+ q[1] = addr[1];
+ addr[1] = 0x01000000;
+ break;
+ }
+ if ((addr[1] & 0xf1ffffe0) == 0x90100000) {
+ /* MOV %reg, %Ox */
+ if ((addr[1] & 0x3e000000) == 0x10000000 &&
+ (p[2] & 0x7c000) == 0x20000) {
+ /* Ok, it is call xx; mov reg, %o0 and call optimizes
+ to doing something on %o0. Patch the patch. */
+ *addr = (p[2] & ~0x7c000) | ((addr[1] & 0x1f) << 14);
+ q[1] = addr[1];
+ addr[1] = 0x01000000;
+ break;
+ }
+ if ((addr[1] & 0x3e000000) == 0x12000000 &&
+ p[2] == BTFIXUPCALL_STO1O0) {
+ *addr = (p[2] & ~0x3e000000) | ((addr[1] & 0x1f) << 25);
+ q[1] = addr[1];
+ addr[1] = 0x01000000;
+ break;
+ }
+ }
+ }
+ }
+ *addr = addr[1];
+ q[1] = addr[1];
+ addr[1] = p[2];
+ break;
+#endif /* BTFIXUP_OPTIMIZE_OTHER */
+#endif /* BTFIXUP_OPTIMIZE_NOP */
+ case 'b': /* BLACKBOX */
+ /* Has to be sethi i, xx */
+ if ((insn & 0xc1c00000) != 0x01000000) {
+ prom_printf(insn_b, p, addr, insn);
+ prom_halt();
+ } else {
+ void (*do_fixup)(unsigned *);
+
+ do_fixup = (void (*)(unsigned *))p[1];
+ do_fixup(addr);
+ }
+ break;
+ case 's': /* SIMM13 */
+ /* Has to be or %g0, i, xx */
+ if ((insn & 0xc1ffe000) != 0x80102000) {
+ prom_printf(insn_s, p, addr, insn);
+ prom_halt();
+ }
+ set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x1fff));
+ break;
+ case 'h': /* SETHI */
+ /* Has to be sethi i, xx */
+ if ((insn & 0xc1c00000) != 0x01000000) {
+ prom_printf(insn_h, p, addr, insn);
+ prom_halt();
+ }
+ set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
+ break;
+ case 'a': /* HALF */
+ /* Has to be sethi i, xx or or %g0, i, xx */
+ if ((insn & 0xc1c00000) != 0x01000000 &&
+ (insn & 0xc1ffe000) != 0x80102000) {
+ prom_printf(insn_a, p, addr, insn);
+ prom_halt();
+ }
+ if (p[1] & 0x3ff)
+ set_addr(addr, q[1], fmangled,
+ (insn & 0x3e000000) | 0x80102000 | (p[1] & 0x1fff));
+ else
+ set_addr(addr, q[1], fmangled,
+ (insn & 0x3e000000) | 0x01000000 | (p[1] >> 10));
+ break;
+ case 'i': /* INT */
+ if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
+ set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
+ else if ((insn & 0x80002000) == 0x80002000 &&
+ (insn & 0x01800000) != 0x01800000) /* %LO */
+ set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
+ else {
+ prom_printf(insn_i, p, addr, insn);
+ prom_halt();
+ }
+ break;
+ }
+ count -= 2;
+ q += 2;
+ }
+ } else
+ p = q + count;
+ }
+#ifdef __SMP__
+ flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(local_flush_cache_all);
+#else
+ flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(flush_cache_all);
+#endif
+ if (!flush_cacheall) {
+ prom_printf(fca_und);
+ prom_halt();
+ }
+ (*flush_cacheall)();
+}
diff --git a/arch/sparc/mm/fault.c b/arch/sparc/mm/fault.c
index 0d6490860..274b9eebf 100644
--- a/arch/sparc/mm/fault.c
+++ b/arch/sparc/mm/fault.c
@@ -1,4 +1,4 @@
-/* $Id: fault.c,v 1.92 1997/05/15 21:14:21 davem Exp $
+/* $Id: fault.c,v 1.93 1998/03/25 10:43:16 jj Exp $
* fault.c: Page fault handlers for the Sparc.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -271,7 +271,7 @@ bad_area:
#endif
tsk->tss.sig_address = address;
tsk->tss.sig_desc = SUBSIG_NOMAPPING;
- send_sig(SIGSEGV, tsk, 1);
+ force_sig(SIGSEGV, tsk);
goto out;
}
unhandled_fault (address, tsk, regs);
diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S
index 2c27bfdab..dcf3fd990 100644
--- a/arch/sparc/mm/hypersparc.S
+++ b/arch/sparc/mm/hypersparc.S
@@ -1,4 +1,4 @@
-/* $Id: hypersparc.S,v 1.12 1997/11/27 15:42:30 jj Exp $
+/* $Id: hypersparc.S,v 1.13 1998/02/13 15:35:09 jj Exp $
* hypersparc.S: High speed Hypersparc mmu/cache operations.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -301,14 +301,13 @@ hypersparc_flush_tlb_range:
cmp %o3, -1
be hypersparc_flush_tlb_range_out
#endif
- srl %o1, SRMMU_PGDIR_SHIFT, %o1
+ sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
sta %o3, [%g1] ASI_M_MMUREGS
- sll %o1, SRMMU_PGDIR_SHIFT, %o1
- sethi %hi(1 << SRMMU_PGDIR_SHIFT), %o4
+ and %o1, %o4, %o1
add %o1, 0x200, %o1
sta %g0, [%o1] ASI_M_FLUSH_PROBE
1:
- add %o1, %o4, %o1
+ sub %o1, %o4, %o1
cmp %o1, %o2
blu,a 1b
sta %g0, [%o1] ASI_M_FLUSH_PROBE
diff --git a/arch/sparc/mm/init.c b/arch/sparc/mm/init.c
index aa85666c6..db6559214 100644
--- a/arch/sparc/mm/init.c
+++ b/arch/sparc/mm/init.c
@@ -1,8 +1,9 @@
-/* $Id: init.c,v 1.50 1998/01/10 18:19:42 ecd Exp $
+/* $Id: init.c,v 1.59 1998/03/27 06:59:57 davem Exp $
* linux/arch/sparc/mm/init.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1995 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/config.h>
@@ -30,11 +31,18 @@
#include <asm/pgtable.h>
#include <asm/vaddrs.h>
+/* Turn this off if you suspect some place in some physical memory hole
+ might get into page tables (something would be broken very much). */
+
+#define FREE_UNUSED_MEM_MAP
+
extern void show_net_buffers(void);
struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
unsigned long sparc_unmapped_base;
+struct pgtable_cache_struct pgt_quicklists;
+
/* References to section boundaries */
extern char __init_begin, __init_end, etext;
@@ -65,26 +73,38 @@ pte_t __bad_page(void)
void show_mem(void)
{
- int i,free = 0,total = 0,reserved = 0;
- int shared = 0;
+ int free = 0,total = 0,reserved = 0;
+ int shared = 0, cached = 0;
+ struct page *page, *end;
printk("\nMem-info:\n");
show_free_areas();
printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
- i = max_mapnr;
- while (i-- > 0) {
+ for (page = mem_map, end = mem_map + max_mapnr;
+ page < end; page++) {
+ if (PageSkip(page)) {
+ if (page->next_hash < page)
+ break;
+ page = page->next_hash;
+ }
total++;
- if (PageReserved(mem_map + i))
+ if (PageReserved(page))
reserved++;
- else if (!atomic_read(&mem_map[i].count))
+ else if (PageSwapCache(page))
+ cached++;
+ else if (!atomic_read(&page->count))
free++;
else
- shared += atomic_read(&mem_map[i].count) - 1;
+ shared += atomic_read(&page->count) - 1;
}
printk("%d pages of RAM\n",total);
printk("%d free pages\n",free);
printk("%d reserved pages\n",reserved);
printk("%d pages shared\n",shared);
+ printk("%d pages swap cached\n",cached);
+ printk("%ld page tables cached\n",pgtable_cache_size);
+ if (sparc_cpu_model == sun4m || sparc_cpu_model == sun4d)
+ printk("%ld page dirs cached\n", pgd_cache_size);
show_buffers();
#ifdef CONFIG_NET
show_net_buffers();
@@ -128,19 +148,23 @@ paging_init(unsigned long start_mem, unsigned long end_mem))
switch(sparc_cpu_model) {
case sun4c:
case sun4e:
+ case sun4:
start_mem = sun4c_paging_init(start_mem, end_mem);
sparc_unmapped_base = 0xe0000000;
+ BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000);
break;
case sun4m:
case sun4d:
start_mem = srmmu_paging_init(start_mem, end_mem);
sparc_unmapped_base = 0x50000000;
+ BTFIXUPSET_SETHI(sparc_unmapped_base, 0x50000000);
break;
case ap1000:
#if CONFIG_AP1000
start_mem = apmmu_paging_init(start_mem, end_mem);
sparc_unmapped_base = 0x50000000;
+ BTFIXUPSET_SETHI(sparc_unmapped_base, 0x50000000);
break;
#endif
@@ -168,6 +192,7 @@ paging_init(unsigned long start_mem, unsigned long end_mem))
protection_map[13] = PAGE_READONLY;
protection_map[14] = PAGE_SHARED;
protection_map[15] = PAGE_SHARED;
+ btfixup();
return device_scan(start_mem);
}
@@ -175,7 +200,7 @@ struct cache_palias *sparc_aliases;
extern void srmmu_frob_mem_map(unsigned long);
-int physmem_mapped_contig = 1;
+int physmem_mapped_contig __initdata = 1;
__initfunc(static void taint_real_pages(unsigned long start_mem, unsigned long end_mem))
{
@@ -210,7 +235,8 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem))
int codepages = 0;
int datapages = 0;
int initpages = 0;
- unsigned long tmp2, addr;
+ unsigned long addr;
+ struct page *page, *end;
/* Saves us work later. */
memset((void *) ZERO_PAGE, 0, PAGE_SIZE);
@@ -220,33 +246,60 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem))
high_memory = (void *) end_mem;
start_mem = PAGE_ALIGN(start_mem);
- num_physpages = (start_mem - KERNBASE) >> PAGE_SHIFT;
+ num_physpages = 0;
addr = KERNBASE;
while(addr < start_mem) {
#ifdef CONFIG_BLK_DEV_INITRD
- if (initrd_below_start_ok && addr >= initrd_start && addr < initrd_end) {
+ if (initrd_below_start_ok && addr >= initrd_start && addr < initrd_end)
mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved);
- num_physpages--;
- } else
+ else
#endif
mem_map[MAP_NR(addr)].flags |= (1<<PG_reserved);
addr += PAGE_SIZE;
}
taint_real_pages(start_mem, end_mem);
+
+#ifdef FREE_UNUSED_MEM_MAP
+ end = mem_map + max_mapnr;
+ for (page = mem_map; page < end; page++) {
+ if (PageSkip(page)) {
+ unsigned long low, high;
+
+ low = PAGE_ALIGN((unsigned long)(page+1));
+ if (page->next_hash < page)
+ high = ((unsigned long)end) & PAGE_MASK;
+ else
+ high = ((unsigned long)page->next_hash) & PAGE_MASK;
+ while (low < high) {
+ mem_map[MAP_NR(low)].flags &= ~(1<<PG_reserved);
+ low += PAGE_SIZE;
+ }
+ }
+ }
+#endif
+
for (addr = PAGE_OFFSET; addr < end_mem; addr += PAGE_SIZE) {
+ if (PageSkip(mem_map + MAP_NR(addr))) {
+ unsigned long next = mem_map[MAP_NR(addr)].next_hash - mem_map;
+
+ next = (next << PAGE_SHIFT) + PAGE_OFFSET;
+ if (next < addr || next >= end_mem)
+ break;
+ addr = next;
+ }
+ num_physpages++;
if(PageReserved(mem_map + MAP_NR(addr))) {
if ((addr < (unsigned long) &etext) && (addr >= KERNBASE))
codepages++;
- else if((addr >= (unsigned long)&__init_begin && addr < (unsigned long)&__init_end))
- initpages++;
- else if((addr < start_mem) && (addr >= KERNBASE))
+ else if((addr >= (unsigned long)&__init_begin && addr < (unsigned long)&__init_end))
+ initpages++;
+ else if((addr < start_mem) && (addr >= KERNBASE))
datapages++;
continue;
}
atomic_set(&mem_map[MAP_NR(addr)].count, 1);
- num_physpages++;
#ifdef CONFIG_BLK_DEV_INITRD
if (!initrd_start ||
(addr < initrd_start || addr >= initrd_end))
@@ -254,14 +307,12 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem))
free_page(addr);
}
- tmp2 = nr_free_pages << PAGE_SHIFT;
-
- printk("Memory: %luk available (%dk kernel code, %dk data, %dk init) [%08lx,%08lx]\n",
- tmp2 >> 10,
+ printk("Memory: %dk available (%dk kernel code, %dk data, %dk init) [%08lx,%08lx]\n",
+ nr_free_pages << (PAGE_SHIFT-10),
codepages << (PAGE_SHIFT-10),
datapages << (PAGE_SHIFT-10),
initpages << (PAGE_SHIFT-10),
- PAGE_OFFSET, end_mem);
+ (unsigned long)PAGE_OFFSET, end_mem);
freepages.min = nr_free_pages >> 7;
if(freepages.min < 16)
@@ -284,20 +335,25 @@ void free_initmem (void)
void si_meminfo(struct sysinfo *val)
{
- int i;
+ struct page *page, *end;
- i = MAP_NR(high_memory);
val->totalram = 0;
val->sharedram = 0;
val->freeram = nr_free_pages << PAGE_SHIFT;
val->bufferram = buffermem;
- while (i-- > 0) {
- if (PageReserved(mem_map + i))
+ for (page = mem_map, end = mem_map + max_mapnr;
+ page < end; page++) {
+ if (PageSkip(page)) {
+ if (page->next_hash < page)
+ break;
+ page = page->next_hash;
+ }
+ if (PageReserved(page))
continue;
val->totalram++;
- if (!atomic_read(&mem_map[i].count))
+ if (!atomic_read(&page->count))
continue;
- val->sharedram += atomic_read(&mem_map[i].count) - 1;
+ val->sharedram += atomic_read(&page->count) - 1;
}
val->totalram <<= PAGE_SHIFT;
val->sharedram <<= PAGE_SHIFT;
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
index 519c124c9..d293fc71c 100644
--- a/arch/sparc/mm/io-unit.c
+++ b/arch/sparc/mm/io-unit.c
@@ -1,7 +1,7 @@
-/* $Id: io-unit.c,v 1.5 1997/12/22 16:09:26 jj Exp $
+/* $Id: io-unit.c,v 1.10 1998/03/03 12:31:14 jj Exp $
* io-unit.c: IO-UNIT specific routines for memory management.
*
- * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/config.h>
@@ -13,28 +13,41 @@
#include <asm/io.h>
#include <asm/io-unit.h>
#include <asm/mxcc.h>
+#include <asm/spinlock.h>
+#include <asm/bitops.h>
+
+/* #define IOUNIT_DEBUG */
+#ifdef IOUNIT_DEBUG
+#define IOD(x) printk(x)
+#else
+#define IOD(x) do { } while (0)
+#endif
#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
#define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
-#define MKIOPTE(phys) ((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
+#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
-unsigned long sun4d_dma_base;
-unsigned long sun4d_dma_vbase;
-unsigned long sun4d_dma_size;
__initfunc(unsigned long
iounit_init(int sbi_node, int io_node, unsigned long memory_start,
unsigned long memory_end, struct linux_sbus *sbus))
{
iopte_t *xpt, *xptend;
- unsigned long paddr;
struct iounit_struct *iounit;
struct linux_prom_registers iommu_promregs[PROMREG_MAX];
memory_start = LONG_ALIGN(memory_start);
iounit = (struct iounit_struct *)memory_start;
- memory_start += sizeof(struct iounit_struct);
-
+ memory_start = LONG_ALIGN(memory_start + sizeof(struct iounit_struct));
+
+ memset(iounit, 0, sizeof(*iounit));
+ iounit->limit[0] = IOUNIT_BMAP1_START;
+ iounit->limit[1] = IOUNIT_BMAP2_START;
+ iounit->limit[2] = IOUNIT_BMAPM_START;
+ iounit->limit[3] = IOUNIT_BMAPM_END;
+ iounit->rotor[1] = IOUNIT_BMAP2_START;
+ iounit->rotor[2] = IOUNIT_BMAPM_START;
+
prom_getproperty(sbi_node, "reg", (void *) iommu_promregs,
sizeof(iommu_promregs));
prom_apply_generic_ranges(io_node, 0, iommu_promregs, 3);
@@ -46,11 +59,6 @@ iounit_init(int sbi_node, int io_node, unsigned long memory_start,
sbus->iommu = (struct iommu_struct *)iounit;
iounit->page_table = xpt;
- /* Initialize new table. */
- paddr = IOUNIT_DMA_BASE - sun4d_dma_base;
- for (xptend = xpt + (sun4d_dma_size >> PAGE_SHIFT);
- xpt < xptend; paddr++)
- *xpt++ = MKIOPTE(paddr);
for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
xpt < xptend;)
*xpt++ = 0;
@@ -58,36 +66,108 @@ iounit_init(int sbi_node, int io_node, unsigned long memory_start,
return memory_start;
}
+/* One has to hold iounit->lock to call this */
+static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
+{
+ int i, j, k, npages;
+ unsigned long rotor, scan, limit;
+ iopte_t iopte;
+
+ npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+
+ /* A tiny bit of magic ingredience :) */
+ switch (npages) {
+ case 1: i = 0x0231; break;
+ case 2: i = 0x0132; break;
+ default: i = 0x0213; break;
+ }
+
+ IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
+
+next: j = (i & 15);
+ rotor = iounit->rotor[j - 1];
+ limit = iounit->limit[j];
+ scan = rotor;
+nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
+ if (scan + npages > limit) {
+ if (limit != rotor) {
+ limit = rotor;
+ scan = iounit->limit[j - 1];
+ goto nexti;
+ }
+ i >>= 4;
+ if (!(i & 15))
+ panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
+ goto next;
+ }
+ for (k = 1, scan++; k < npages; k++)
+ if (test_bit(scan++, iounit->bmap))
+ goto nexti;
+ iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
+ scan -= npages;
+ iopte = MKIOPTE(mmu_v2p(vaddr & PAGE_MASK));
+ vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
+ for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
+ set_bit(scan, iounit->bmap);
+ iounit->page_table[scan] = iopte;
+ }
+ IOD(("%08lx\n", vaddr));
+ return vaddr;
+}
+
static __u32 iounit_get_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
{
- /* Viking MXCC is IO coherent, just need to translate the address to DMA handle */
-#ifdef IOUNIT_DEBUG
- if ((((unsigned long) vaddr) & PAGE_MASK) < sun4d_dma_vaddr ||
- (((unsigned long) vaddr) & PAGE_MASK) + len > sun4d_dma_vbase + sun4d_dma_size)
- panic("Using non-DMA memory for iounit_get_scsi_one");
-#endif
- return (__u32)(sun4d_dma_base + mmu_v2p((long)vaddr));
+ unsigned long ret, flags;
+ struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
+
+ spin_lock_irqsave(&iounit->lock, flags);
+ ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
+ spin_unlock_irqrestore(&iounit->lock, flags);
+ return ret;
}
static void iounit_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
{
- /* Viking MXCC is IO coherent, just need to translate the address to DMA handle */
+ unsigned long flags;
+ struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
+
+ /* FIXME: Cache some resolved pages - often several sg entries are to the same page */
+ spin_lock_irqsave(&iounit->lock, flags);
for (; sz >= 0; sz--) {
-#ifdef IOUNIT_DEBUG
- unsigned long page = ((unsigned long) sg[sz].addr) & PAGE_MASK;
- if (page < sun4d_dma_vbase || page + sg[sz].len > sun4d_dma_vbase + sun4d_dma_size)
- panic("Using non-DMA memory for iounit_get_scsi_sgl");
-#endif
- sg[sz].dvma_addr = (__u32) (sun4d_dma_base + mmu_v2p((long)sg[sz].addr));;
+ sg[sz].dvma_addr = iounit_get_area(iounit, (unsigned long)sg[sz].addr, sg[sz].len);
}
+ spin_unlock_irqrestore(&iounit->lock, flags);
}
static void iounit_release_scsi_one(__u32 vaddr, unsigned long len, struct linux_sbus *sbus)
{
+ unsigned long flags;
+ struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
+
+ spin_lock_irqsave(&iounit->lock, flags);
+ len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+ vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
+ IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
+ for (len += vaddr; vaddr < len; vaddr++)
+ clear_bit(vaddr, iounit->bmap);
+ spin_unlock_irqrestore(&iounit->lock, flags);
}
static void iounit_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
{
+ unsigned long flags;
+ unsigned long vaddr, len;
+ struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
+
+ spin_lock_irqsave(&iounit->lock, flags);
+ for (; sz >= 0; sz--) {
+ len = ((sg[sz].dvma_addr & ~PAGE_MASK) + sg[sz].len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+ vaddr = (sg[sz].dvma_addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
+ IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
+ for (len += vaddr; vaddr < len; vaddr++)
+ clear_bit(vaddr, iounit->bmap);
+ }
+ spin_unlock_irqrestore(&iounit->lock, flags);
}
#ifdef CONFIG_SBUS
@@ -135,24 +215,26 @@ static void iounit_map_dma_area(unsigned long addr, int len)
static char *iounit_lockarea(char *vaddr, unsigned long len)
{
+/* FIXME: Write this */
return vaddr;
}
static void iounit_unlockarea(char *vaddr, unsigned long len)
{
+/* FIXME: Write this */
}
__initfunc(void ld_mmu_iounit(void))
{
- mmu_lockarea = iounit_lockarea;
- mmu_unlockarea = iounit_unlockarea;
+ BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0);
+ BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP);
- mmu_get_scsi_one = iounit_get_scsi_one;
- mmu_get_scsi_sgl = iounit_get_scsi_sgl;
- mmu_release_scsi_one = iounit_release_scsi_one;
- mmu_release_scsi_sgl = iounit_release_scsi_sgl;
+ BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM);
#ifdef CONFIG_SBUS
- mmu_map_dma_area = iounit_map_dma_area;
+ BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM);
#endif
}
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index 301946326..e46216233 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -1,10 +1,10 @@
-/* $Id: iommu.c,v 1.4 1997/11/21 17:31:31 jj Exp $
+/* $Id: iommu.c,v 1.7 1998/02/22 10:32:26 ecd Exp $
* iommu.c: IOMMU specific routines for memory management.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1995 Peter A. Zaitcev (zaitcev@ithil.mcst.ru)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
- * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/config.h>
@@ -18,8 +18,10 @@
/* srmmu.c */
extern int viking_mxcc_present;
-extern void (*flush_page_for_dma)(unsigned long page);
+BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
+#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
extern int flush_page_for_dma_global;
+static int viking_flush = 0;
/* viking.S */
extern void viking_flush_page(unsigned long page);
extern void viking_mxcc_flush_page(unsigned long page);
@@ -113,7 +115,7 @@ iommu_init(int iommund, unsigned long memory_start,
viking_mxcc_flush_page(start);
start += PAGE_SIZE;
}
- } else if(flush_page_for_dma == viking_flush_page) {
+ } else if (viking_flush) {
unsigned long start = (unsigned long) iommu->page_table;
unsigned long end = (start + ptsize);
while(start < end) {
@@ -199,7 +201,7 @@ static void iommu_map_dma_area(unsigned long addr, int len)
pgprot_t dvma_prot;
struct iommu_struct *iommu = SBus_chain->iommu;
iopte_t *iopte = iommu->page_table;
- iopte_t *iopte_first = iopte;
+ iopte_t *first;
if(viking_mxcc_present)
dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
@@ -207,6 +209,7 @@ static void iommu_map_dma_area(unsigned long addr, int len)
dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
iopte += ((addr - iommu->start) >> PAGE_SHIFT);
+ first = iopte;
end = PAGE_ALIGN((addr + len));
while(addr < end) {
page = get_free_page(GFP_KERNEL);
@@ -223,21 +226,20 @@ static void iommu_map_dma_area(unsigned long addr, int len)
ptep = pte_offset(pmdp, addr);
set_pte(ptep, pte_val(mk_pte(page, dvma_prot)));
-
iopte_val(*iopte++) = MKIOPTE(mmu_v2p(page));
}
addr += PAGE_SIZE;
}
flush_cache_all();
if(viking_mxcc_present) {
- unsigned long start = ((unsigned long) iopte_first) & PAGE_MASK;
+ unsigned long start = ((unsigned long) first) & PAGE_MASK;
unsigned long end = PAGE_ALIGN(((unsigned long) iopte));
while(start < end) {
viking_mxcc_flush_page(start);
start += PAGE_SIZE;
}
- } else if(flush_page_for_dma == viking_flush_page) {
- unsigned long start = ((unsigned long) iopte_first) & PAGE_MASK;
+ } else if(viking_flush) {
+ unsigned long start = ((unsigned long) first) & PAGE_MASK;
unsigned long end = PAGE_ALIGN(((unsigned long) iopte));
while(start < end) {
viking_flush_page(start);
@@ -260,25 +262,26 @@ static void iommu_unlockarea(char *vaddr, unsigned long len)
__initfunc(void ld_mmu_iommu(void))
{
- mmu_lockarea = iommu_lockarea;
- mmu_unlockarea = iommu_unlockarea;
+ viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page);
+ BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0);
+ BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP);
- if (!flush_page_for_dma) {
+ if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
/* IO coherent chip */
- mmu_get_scsi_one = iommu_get_scsi_one_noflush;
- mmu_get_scsi_sgl = iommu_get_scsi_sgl_noflush;
+ BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0);
+ BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM);
} else if (flush_page_for_dma_global) {
/* flush_page_for_dma flushes everything, no matter of what page is it */
- mmu_get_scsi_one = iommu_get_scsi_one_gflush;
- mmu_get_scsi_sgl = iommu_get_scsi_sgl_gflush;
+ BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM);
} else {
- mmu_get_scsi_one = iommu_get_scsi_one_pflush;
- mmu_get_scsi_sgl = iommu_get_scsi_sgl_pflush;
+ BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM);
}
- mmu_release_scsi_one = iommu_release_scsi_one;
- mmu_release_scsi_sgl = iommu_release_scsi_sgl;
+ BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NOP);
#ifdef CONFIG_SBUS
- mmu_map_dma_area = iommu_map_dma_area;
+ BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
#endif
}
diff --git a/arch/sparc/mm/loadmmu.c b/arch/sparc/mm/loadmmu.c
index 10eebecce..b38eea6d8 100644
--- a/arch/sparc/mm/loadmmu.c
+++ b/arch/sparc/mm/loadmmu.c
@@ -1,9 +1,10 @@
-/* $Id: loadmmu.c,v 1.46 1997/04/10 05:12:51 davem Exp $
+/* $Id: loadmmu.c,v 1.50 1998/02/05 14:19:02 jj Exp $
* loadmmu.c: This code loads up all the mm function pointers once the
* machine type has been determined. It also sets the static
* mmu values such as PAGE_NONE, etc.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/kernel.h>
@@ -16,6 +17,7 @@
#include <asm/pgtable.h>
#include <asm/a.out.h>
#include <asm/mmu_context.h>
+#include <asm/oplib.h>
unsigned long page_offset = 0xf0000000;
unsigned long stack_top = 0xf0000000 - PAGE_SIZE;
@@ -24,132 +26,8 @@ struct ctx_list *ctx_list_pool;
struct ctx_list ctx_free;
struct ctx_list ctx_used;
-unsigned long (*alloc_kernel_stack)(struct task_struct *tsk);
-void (*free_kernel_stack)(unsigned long stack);
-struct task_struct *(*alloc_task_struct)(void);
-void (*free_task_struct)(struct task_struct *tsk);
-
-void (*quick_kernel_fault)(unsigned long);
-
-void (*init_new_context)(struct mm_struct *mm);
-void (*destroy_context)(struct mm_struct *mm);
-
-/* translate between physical and virtual addresses */
-unsigned long (*mmu_v2p)(unsigned long);
-unsigned long (*mmu_p2v)(unsigned long);
-
-char *(*mmu_lockarea)(char *, unsigned long);
-void (*mmu_unlockarea)(char *, unsigned long);
-
-__u32 (*mmu_get_scsi_one)(char *, unsigned long, struct linux_sbus *sbus);
-void (*mmu_get_scsi_sgl)(struct mmu_sglist *, int, struct linux_sbus *sbus);
-void (*mmu_release_scsi_one)(__u32, unsigned long, struct linux_sbus *sbus);
-void (*mmu_release_scsi_sgl)(struct mmu_sglist *, int, struct linux_sbus *sbus);
-
-void (*mmu_map_dma_area)(unsigned long addr, int len);
-
-void (*update_mmu_cache)(struct vm_area_struct *vma, unsigned long address, pte_t pte);
-
-#ifdef __SMP__
-void (*local_flush_cache_all)(void);
-void (*local_flush_cache_mm)(struct mm_struct *);
-void (*local_flush_cache_range)(struct mm_struct *, unsigned long start,
- unsigned long end);
-void (*local_flush_cache_page)(struct vm_area_struct *, unsigned long address);
-
-void (*local_flush_tlb_all)(void);
-void (*local_flush_tlb_mm)(struct mm_struct *);
-void (*local_flush_tlb_range)(struct mm_struct *, unsigned long start,
- unsigned long end);
-void (*local_flush_tlb_page)(struct vm_area_struct *, unsigned long address);
-void (*local_flush_page_to_ram)(unsigned long address);
-void (*local_flush_sig_insns)(struct mm_struct *mm, unsigned long insn_addr);
-#endif
-
-void (*flush_cache_all)(void);
-void (*flush_cache_mm)(struct mm_struct *);
-void (*flush_cache_range)(struct mm_struct *, unsigned long start,
- unsigned long end);
-void (*flush_cache_page)(struct vm_area_struct *, unsigned long address);
-
-void (*flush_tlb_all)(void);
-void (*flush_tlb_mm)(struct mm_struct *);
-void (*flush_tlb_range)(struct mm_struct *, unsigned long start,
- unsigned long end);
-void (*flush_tlb_page)(struct vm_area_struct *, unsigned long address);
-
-void (*flush_page_to_ram)(unsigned long page);
-
-void (*flush_sig_insns)(struct mm_struct *mm, unsigned long insn_addr);
-
-void (*set_pte)(pte_t *pteptr, pte_t pteval);
-
-unsigned int pmd_shift, pmd_size, pmd_mask;
-unsigned int (*pmd_align)(unsigned int);
-unsigned int pgdir_shift, pgdir_size, pgdir_mask;
-unsigned int (*pgdir_align)(unsigned int);
-unsigned int ptrs_per_pte, ptrs_per_pmd, ptrs_per_pgd;
unsigned int pg_iobits;
-pgprot_t page_none, page_shared, page_copy, page_readonly, page_kernel;
-
-unsigned long (*pte_page)(pte_t);
-unsigned long (*pmd_page)(pmd_t);
-unsigned long (*pgd_page)(pgd_t);
-
-void (*sparc_update_rootmmu_dir)(struct task_struct *, pgd_t *pgdir);
-unsigned long (*(vmalloc_start))(void);
-void (*switch_to_context)(struct task_struct *tsk);
-
-int (*pte_none)(pte_t);
-int (*pte_present)(pte_t);
-void (*pte_clear)(pte_t *);
-
-int (*pmd_none)(pmd_t);
-int (*pmd_bad)(pmd_t);
-int (*pmd_present)(pmd_t);
-void (*pmd_clear)(pmd_t *);
-
-int (*pgd_none)(pgd_t);
-int (*pgd_bad)(pgd_t);
-int (*pgd_present)(pgd_t);
-void (*pgd_clear)(pgd_t *);
-
-pte_t (*mk_pte)(unsigned long, pgprot_t);
-pte_t (*mk_pte_phys)(unsigned long, pgprot_t);
-pte_t (*mk_pte_io)(unsigned long, pgprot_t, int);
-void (*pgd_set)(pgd_t *, pmd_t *);
-pte_t (*pte_modify)(pte_t, pgprot_t);
-pgd_t * (*pgd_offset)(struct mm_struct *, unsigned long);
-pmd_t * (*pmd_offset)(pgd_t *, unsigned long);
-pte_t * (*pte_offset)(pmd_t *, unsigned long);
-void (*pte_free_kernel)(pte_t *);
-pte_t * (*pte_alloc_kernel)(pmd_t *, unsigned long);
-
-void (*pmd_free_kernel)(pmd_t *);
-pmd_t * (*pmd_alloc_kernel)(pgd_t *, unsigned long);
-void (*pte_free)(pte_t *);
-pte_t * (*pte_alloc)(pmd_t *, unsigned long);
-
-void (*pmd_free)(pmd_t *);
-pmd_t * (*pmd_alloc)(pgd_t *, unsigned long);
-void (*pgd_free)(pgd_t *);
-
-pgd_t * (*pgd_alloc)(void);
-
-int (*pte_write)(pte_t);
-int (*pte_dirty)(pte_t);
-int (*pte_young)(pte_t);
-
-pte_t (*pte_wrprotect)(pte_t);
-pte_t (*pte_mkclean)(pte_t);
-pte_t (*pte_mkold)(pte_t);
-pte_t (*pte_mkwrite)(pte_t);
-pte_t (*pte_mkdirty)(pte_t);
-pte_t (*pte_mkyoung)(pte_t);
-
-char *(*mmu_info)(void);
-
extern void ld_mmu_sun4c(void);
extern void ld_mmu_srmmu(void);
@@ -157,6 +35,7 @@ __initfunc(void load_mmu(void))
{
switch(sparc_cpu_model) {
case sun4c:
+ case sun4:
ld_mmu_sun4c();
break;
case sun4m:
@@ -169,9 +48,8 @@ __initfunc(void load_mmu(void))
break;
#endif
default:
- printk("load_mmu:MMU support not available for this architecture\n");
- printk("load_mmu:sparc_cpu_model = %d\n", (int) sparc_cpu_model);
- printk("load_mmu:Halting...\n");
- panic("load_mmu()");
+ prom_printf("load_mmu: %d unsupported\n", (int)sparc_cpu_model);
+ prom_halt();
}
+ btfixup();
}
diff --git a/arch/sparc/mm/nosrmmu.c b/arch/sparc/mm/nosrmmu.c
new file mode 100644
index 000000000..f82599f42
--- /dev/null
+++ b/arch/sparc/mm/nosrmmu.c
@@ -0,0 +1,50 @@
+/* $Id: nosrmmu.c,v 1.1 1998/03/09 14:04:15 jj Exp $
+ * nosrmmu.c: This file is a bunch of dummies for sun4 compiles,
+ * so that it does not need srmmu and avoid ifdefs.
+ *
+ * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <asm/mbus.h>
+
+static char shouldnothappen[] __initdata = "SUN4 kernel can only run on SUN4\n";
+
+enum mbus_module srmmu_modtype;
+
+__initfunc(static void should_not_happen(void))
+{
+ prom_printf(shouldnothappen);
+ prom_halt();
+}
+
+__initfunc(void srmmu_frob_mem_map(unsigned long start_mem))
+{
+ should_not_happen();
+}
+
+__initfunc(unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem))
+{
+ should_not_happen();
+ return 0;
+}
+
+__initfunc(void ld_mmu_srmmu(void))
+{
+ should_not_happen();
+}
+
+void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
+{
+}
+
+void srmmu_unmapioaddr(unsigned long virt_addr)
+{
+}
+
+__initfunc(void srmmu_end_memory(unsigned long memory_size, unsigned long *mem_end_p))
+{
+ return 0;
+}
diff --git a/arch/sparc/mm/nosun4c.c b/arch/sparc/mm/nosun4c.c
new file mode 100644
index 000000000..7da883a31
--- /dev/null
+++ b/arch/sparc/mm/nosun4c.c
@@ -0,0 +1,77 @@
+/* $Id: nosun4c.c,v 1.1 1998/03/09 14:04:16 jj Exp $
+ * nosun4c.c: This file is a bunch of dummies for SMP compiles,
+ * so that it does not need sun4c and avoid ifdefs.
+ *
+ * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <asm/pgtable.h>
+
+static char shouldnothappen[] __initdata = "32bit SMP kernel only supports sun4m and sun4d\n";
+
+/* Dummies */
+struct sun4c_mmu_ring {
+ unsigned long xxx1[3];
+ unsigned char xxx2[2];
+ int xxx3;
+};
+struct sun4c_mmu_ring sun4c_kernel_ring;
+struct sun4c_mmu_ring sun4c_kfree_ring;
+unsigned long sun4c_kernel_faults;
+unsigned long *sun4c_memerr_reg;
+
+__initfunc(static void should_not_happen(void))
+{
+ prom_printf(shouldnothappen);
+ prom_halt();
+}
+
+__initfunc(unsigned long sun4c_paging_init(unsigned long start_mem, unsigned long end_mem))
+{
+ should_not_happen();
+ return 0;
+}
+
+__initfunc(void ld_mmu_sun4c(void))
+{
+ should_not_happen();
+}
+
+void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
+{
+}
+
+void sun4c_unmapioaddr(unsigned long virt_addr)
+{
+}
+
+void sun4c_complete_all_stores(void)
+{
+}
+
+pgd_t *sun4c_pgd_offset(struct mm_struct * mm, unsigned long address)
+{
+ return NULL;
+}
+
+pte_t *sun4c_pte_offset(pmd_t * dir, unsigned long address)
+{
+ return NULL;
+}
+
+void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+{
+}
+
+__initfunc(void sun4c_probe_vac(void))
+{
+ should_not_happen();
+}
+
+__initfunc(void sun4c_probe_memerr_reg(void))
+{
+ should_not_happen();
+}
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index b16e3cc1e..f9794125d 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -1,10 +1,10 @@
-/* $Id: srmmu.c,v 1.156 1997/11/28 14:23:42 jj Exp $
+/* $Id: srmmu.c,v 1.170 1998/03/09 14:04:01 jj Exp $
* srmmu.c: SRMMU specific routines for memory management.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1995 Peter A. Zaitcev (zaitcev@ithil.mcst.ru)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
- * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/config.h>
@@ -30,6 +30,7 @@
#include <asm/a.out.h>
#include <asm/mmu_context.h>
#include <asm/io-unit.h>
+#include <asm/spinlock.h>
/* Now the cpu specific definitions. */
#include <asm/viking.h>
@@ -39,6 +40,11 @@
#include <asm/swift.h>
#include <asm/turbosparc.h>
+#include <asm/btfixup.h>
+
+/* #define DEBUG_MAP_KERNEL */
+/* #define PAGESKIP_DEBUG */
+
enum mbus_module srmmu_modtype;
unsigned int hwbug_bitmask;
int vac_cache_size;
@@ -47,10 +53,6 @@ int vac_badbits;
extern unsigned long sparc_iobase_vaddr;
-extern unsigned long sun4d_dma_base;
-extern unsigned long sun4d_dma_size;
-extern unsigned long sun4d_dma_vbase;
-
#ifdef __SMP__
#define FLUSH_BEGIN(mm)
#define FLUSH_END
@@ -60,16 +62,24 @@ extern unsigned long sun4d_dma_vbase;
#endif
static int phys_mem_contig;
-long page_contig_offset;
+BTFIXUPDEF_SETHI(page_contig_offset)
+
+BTFIXUPDEF_CALL(void, ctxd_set, ctxd_t *, pgd_t *)
+BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *)
-static void (*ctxd_set)(ctxd_t *ctxp, pgd_t *pgdp);
-static void (*pmd_set)(pmd_t *pmdp, pte_t *ptep);
+#define ctxd_set(ctxp,pgdp) BTFIXUP_CALL(ctxd_set)(ctxp,pgdp)
+#define pmd_set(pmdp,ptep) BTFIXUP_CALL(pmd_set)(pmdp,ptep)
-void (*flush_page_for_dma)(unsigned long page);
+BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
+BTFIXUPDEF_CALL(void, flush_chunk, unsigned long)
+
+#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
int flush_page_for_dma_global = 1;
-static void (*flush_chunk)(unsigned long chunk);
+#define flush_chunk(chunk) BTFIXUP_CALL(flush_chunk)(chunk)
#ifdef __SMP__
-static void (*local_flush_page_for_dma)(unsigned long page);
+BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long)
+
+#define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page)
#endif
static struct srmmu_stats {
@@ -79,7 +89,7 @@ static struct srmmu_stats {
int invmm;
} module_stats;
-static char *srmmu_name;
+char *srmmu_name;
ctxd_t *srmmu_ctx_table_phys;
ctxd_t *srmmu_context_table;
@@ -96,8 +106,8 @@ static struct srmmu_trans {
#define SRMMU_HASHSZ 256
/* Not static, viking.S uses it. */
-struct srmmu_trans *srmmu_v2p_hash[SRMMU_HASHSZ];
-static struct srmmu_trans *srmmu_p2v_hash[SRMMU_HASHSZ];
+unsigned long srmmu_v2p_hash[SRMMU_HASHSZ];
+static unsigned long srmmu_p2v_hash[SRMMU_HASHSZ];
#define srmmu_ahashfn(addr) ((addr) >> 24)
@@ -111,20 +121,17 @@ int viking_mxcc_present = 0;
*/
static inline unsigned long srmmu_v2p(unsigned long vaddr)
{
- struct srmmu_trans *tp = srmmu_v2p_hash[srmmu_ahashfn(vaddr)];
-
- if(tp)
- return (vaddr - tp->vbase + tp->pbase);
- else
- return 0xffffffffUL;
+ unsigned long off = srmmu_v2p_hash[srmmu_ahashfn(vaddr)];
+
+ return (vaddr + off);
}
static inline unsigned long srmmu_p2v(unsigned long paddr)
{
- struct srmmu_trans *tp = srmmu_p2v_hash[srmmu_ahashfn(paddr)];
-
- if(tp)
- return (paddr - tp->pbase + tp->vbase);
+ unsigned long off = srmmu_p2v_hash[srmmu_ahashfn(paddr)];
+
+ if (off != 0xffffffffUL)
+ return (paddr - off);
else
return 0xffffffffUL;
}
@@ -132,16 +139,47 @@ static inline unsigned long srmmu_p2v(unsigned long paddr)
/* Physical memory on most SS1000/SC2000 can be contiguous, so we handle that case
* as a special case to make things faster.
*/
+/* FIXME: gcc is stupid here and generates very very bad code in this
+ * heavily used routine. So we help it a bit. */
static inline unsigned long srmmu_c_v2p(unsigned long vaddr)
{
+#if KERNBASE != 0xf0000000
if (vaddr >= KERNBASE) return vaddr - KERNBASE;
- return (vaddr - page_contig_offset);
+ return vaddr - BTFIXUP_SETHI(page_contig_offset);
+#else
+ register unsigned long kernbase;
+
+ __asm__ ("sethi %%hi(0xf0000000), %0" : "=r"(kernbase));
+ return vaddr - ((vaddr >= kernbase) ? kernbase : BTFIXUP_SETHI(page_contig_offset));
+#endif
}
static inline unsigned long srmmu_c_p2v(unsigned long paddr)
{
+#if KERNBASE != 0xf0000000
if (paddr < (0xfd000000 - KERNBASE)) return paddr + KERNBASE;
- return (paddr + page_contig_offset);
+ return (paddr + BTFIXUP_SETHI(page_contig_offset));
+#else
+ register unsigned long kernbase;
+ register unsigned long limit;
+
+ __asm__ ("sethi %%hi(0x0d000000), %0" : "=r"(limit));
+ __asm__ ("sethi %%hi(0xf0000000), %0" : "=r"(kernbase));
+
+ return paddr + ((paddr < limit) ? kernbase : BTFIXUP_SETHI(page_contig_offset));
+#endif
+}
+
+/* On boxes where there is no lots_of_ram, KERNBASE is mapped to PA<0> and highest
+ PA is below 0x0d000000, we can optimize even more :) */
+static inline unsigned long srmmu_s_v2p(unsigned long vaddr)
+{
+ return vaddr - PAGE_OFFSET;
+}
+
+static inline unsigned long srmmu_s_p2v(unsigned long paddr)
+{
+ return paddr + PAGE_OFFSET;
}
/* In general all page table modifications should use the V8 atomic
@@ -157,19 +195,43 @@ static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
/* Functions really use this, not srmmu_swap directly. */
#define srmmu_set_entry(ptr, newentry) srmmu_swap((unsigned long *) (ptr), (newentry))
+#ifdef PAGESKIP_DEBUG
+#define PGSKIP_DEBUG(from,to) prom_printf("PG_skip %ld->%ld\n", (long)(from), (long)(to)); printk("PG_skip %ld->%ld\n", (long)(from), (long)(to))
+#else
+#define PGSKIP_DEBUG(from,to) do { } while (0)
+#endif
+
__initfunc(void srmmu_frob_mem_map(unsigned long start_mem))
{
- unsigned long bank_start, bank_end;
+ unsigned long bank_start, bank_end = 0;
unsigned long addr;
int i;
/* First, mark all pages as invalid. */
for(addr = PAGE_OFFSET; MAP_NR(addr) < max_mapnr; addr += PAGE_SIZE)
mem_map[MAP_NR(addr)].flags |= (1<<PG_reserved);
+
+ /* Next, pg[0-3] is sun4c cruft, so we can free it... */
+ mem_map[MAP_NR(pg0)].flags &= ~(1<<PG_reserved);
+ mem_map[MAP_NR(pg1)].flags &= ~(1<<PG_reserved);
+ mem_map[MAP_NR(pg2)].flags &= ~(1<<PG_reserved);
+ mem_map[MAP_NR(pg3)].flags &= ~(1<<PG_reserved);
start_mem = PAGE_ALIGN(start_mem);
for(i = 0; srmmu_map[i].size; i++) {
bank_start = srmmu_map[i].vbase;
+
+ if (i && bank_start - bank_end > 2 * PAGE_SIZE) {
+ mem_map[MAP_NR(bank_end)].flags |= (1<<PG_skip);
+ mem_map[MAP_NR(bank_end)].next_hash = mem_map + MAP_NR(bank_start);
+ PGSKIP_DEBUG(MAP_NR(bank_end), MAP_NR(bank_start));
+ if (bank_end > KERNBASE && bank_start < KERNBASE) {
+ mem_map[0].flags |= (1<<PG_skip);
+ mem_map[0].next_hash = mem_map + MAP_NR(bank_start);
+ PGSKIP_DEBUG(0, MAP_NR(bank_start));
+ }
+ }
+
bank_end = bank_start + srmmu_map[i].size;
while(bank_start < bank_end) {
if((bank_start >= KERNBASE) &&
@@ -180,23 +242,28 @@ __initfunc(void srmmu_frob_mem_map(unsigned long start_mem))
mem_map[MAP_NR(bank_start)].flags &= ~(1<<PG_reserved);
bank_start += PAGE_SIZE;
}
+
+ if (bank_end == 0xfd000000)
+ bank_end = PAGE_OFFSET;
}
- if (sparc_cpu_model == sun4d) {
- for (addr = PAGE_OFFSET; MAP_NR(addr) < max_mapnr; addr += PAGE_SIZE)
- if (addr < sun4d_dma_vbase || addr >= sun4d_dma_vbase + sun4d_dma_size)
- clear_bit(PG_DMA, &mem_map[MAP_NR(addr)].flags);
+
+ if (bank_end < KERNBASE) {
+ mem_map[MAP_NR(bank_end)].flags |= (1<<PG_skip);
+ mem_map[MAP_NR(bank_end)].next_hash = mem_map + MAP_NR(KERNBASE);
+ PGSKIP_DEBUG(MAP_NR(bank_end), MAP_NR(KERNBASE));
+ } else if (MAP_NR(bank_end) < max_mapnr) {
+ mem_map[MAP_NR(bank_end)].flags |= (1<<PG_skip);
+ if (mem_map[0].flags & (1 << PG_skip)) {
+ mem_map[MAP_NR(bank_end)].next_hash = mem_map[0].next_hash;
+ PGSKIP_DEBUG(MAP_NR(bank_end), mem_map[0].next_hash - mem_map);
+ } else {
+ mem_map[MAP_NR(bank_end)].next_hash = mem_map;
+ PGSKIP_DEBUG(MAP_NR(bank_end), 0);
+ }
}
}
/* The very generic SRMMU page table operations. */
-static unsigned int srmmu_pmd_align(unsigned int addr) { return SRMMU_PMD_ALIGN(addr); }
-static unsigned int srmmu_pgdir_align(unsigned int addr) { return SRMMU_PGDIR_ALIGN(addr); }
-
-static unsigned long srmmu_vmalloc_start(void)
-{
- return SRMMU_VMALLOC_START;
-}
-
static inline int srmmu_device_memory(unsigned long x)
{
return ((x & 0xF0000000) != 0);
@@ -220,44 +287,53 @@ static unsigned long srmmu_c_pmd_page(pmd_t pmd)
static unsigned long srmmu_c_pte_page(pte_t pte)
{ return srmmu_device_memory(pte_val(pte))?~0:srmmu_c_p2v((pte_val(pte) & SRMMU_PTE_PMASK) << 4); }
-static int srmmu_pte_none(pte_t pte)
+static unsigned long srmmu_s_pgd_page(pgd_t pgd)
+{ return srmmu_device_memory(pgd_val(pgd))?~0:srmmu_s_p2v((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
+
+static unsigned long srmmu_s_pmd_page(pmd_t pmd)
+{ return srmmu_device_memory(pmd_val(pmd))?~0:srmmu_s_p2v((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); }
+
+static unsigned long srmmu_s_pte_page(pte_t pte)
+{ return srmmu_device_memory(pte_val(pte))?~0:srmmu_s_p2v((pte_val(pte) & SRMMU_PTE_PMASK) << 4); }
+
+static inline int srmmu_pte_none(pte_t pte)
{ return !(pte_val(pte) & 0xFFFFFFF); }
-static int srmmu_pte_present(pte_t pte)
+static inline int srmmu_pte_present(pte_t pte)
{ return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
-static void srmmu_pte_clear(pte_t *ptep) { set_pte(ptep, __pte(0)); }
+static inline void srmmu_pte_clear(pte_t *ptep) { set_pte(ptep, __pte(0)); }
-static int srmmu_pmd_none(pmd_t pmd)
+static inline int srmmu_pmd_none(pmd_t pmd)
{ return !(pmd_val(pmd) & 0xFFFFFFF); }
-static int srmmu_pmd_bad(pmd_t pmd)
+static inline int srmmu_pmd_bad(pmd_t pmd)
{ return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
-static int srmmu_pmd_present(pmd_t pmd)
+static inline int srmmu_pmd_present(pmd_t pmd)
{ return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
-static void srmmu_pmd_clear(pmd_t *pmdp) { set_pte((pte_t *)pmdp, __pte(0)); }
+static inline void srmmu_pmd_clear(pmd_t *pmdp) { set_pte((pte_t *)pmdp, __pte(0)); }
-static int srmmu_pgd_none(pgd_t pgd)
+static inline int srmmu_pgd_none(pgd_t pgd)
{ return !(pgd_val(pgd) & 0xFFFFFFF); }
-static int srmmu_pgd_bad(pgd_t pgd)
+static inline int srmmu_pgd_bad(pgd_t pgd)
{ return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
-static int srmmu_pgd_present(pgd_t pgd)
+static inline int srmmu_pgd_present(pgd_t pgd)
{ return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
-static void srmmu_pgd_clear(pgd_t * pgdp) { set_pte((pte_t *)pgdp, __pte(0)); }
+static inline void srmmu_pgd_clear(pgd_t * pgdp) { set_pte((pte_t *)pgdp, __pte(0)); }
-static int srmmu_pte_write(pte_t pte) { return pte_val(pte) & SRMMU_WRITE; }
-static int srmmu_pte_dirty(pte_t pte) { return pte_val(pte) & SRMMU_DIRTY; }
-static int srmmu_pte_young(pte_t pte) { return pte_val(pte) & SRMMU_REF; }
+static inline int srmmu_pte_write(pte_t pte) { return pte_val(pte) & SRMMU_WRITE; }
+static inline int srmmu_pte_dirty(pte_t pte) { return pte_val(pte) & SRMMU_DIRTY; }
+static inline int srmmu_pte_young(pte_t pte) { return pte_val(pte) & SRMMU_REF; }
-static pte_t srmmu_pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_WRITE);}
-static pte_t srmmu_pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_DIRTY);}
-static pte_t srmmu_pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_REF);}
-static pte_t srmmu_pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | SRMMU_WRITE);}
-static pte_t srmmu_pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | SRMMU_DIRTY);}
-static pte_t srmmu_pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | SRMMU_REF);}
+static inline pte_t srmmu_pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_WRITE);}
+static inline pte_t srmmu_pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_DIRTY);}
+static inline pte_t srmmu_pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_REF);}
+static inline pte_t srmmu_pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | SRMMU_WRITE);}
+static inline pte_t srmmu_pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | SRMMU_DIRTY);}
+static inline pte_t srmmu_pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | SRMMU_REF);}
/*
* Conversion functions: convert a page and protection to a page entry,
@@ -269,6 +345,9 @@ static pte_t srmmu_mk_pte(unsigned long page, pgprot_t pgprot)
static pte_t srmmu_c_mk_pte(unsigned long page, pgprot_t pgprot)
{ return __pte(((srmmu_c_v2p(page)) >> 4) | pgprot_val(pgprot)); }
+static pte_t srmmu_s_mk_pte(unsigned long page, pgprot_t pgprot)
+{ return __pte(((srmmu_s_v2p(page)) >> 4) | pgprot_val(pgprot)); }
+
static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot)
{ return __pte(((page) >> 4) | pgprot_val(pgprot)); }
@@ -307,41 +386,64 @@ static void srmmu_c_pmd_set(pmd_t * pmdp, pte_t * ptep)
set_pte((pte_t *)pmdp, (SRMMU_ET_PTD | (srmmu_c_v2p((unsigned long) ptep) >> 4)));
}
-static pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
+static void srmmu_s_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
+{
+ set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (srmmu_s_v2p((unsigned long) pgdp) >> 4)));
+}
+
+static void srmmu_s_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
+{
+ set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (srmmu_s_v2p((unsigned long) pmdp) >> 4)));
+}
+
+static void srmmu_s_pmd_set(pmd_t * pmdp, pte_t * ptep)
+{
+ set_pte((pte_t *)pmdp, (SRMMU_ET_PTD | (srmmu_s_v2p((unsigned long) ptep) >> 4)));
+}
+
+static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot));
}
/* to find an entry in a top-level page table... */
-static pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
+static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
{
- return mm->pgd + ((address >> SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD - 1));
+ return mm->pgd + (address >> SRMMU_PGDIR_SHIFT);
}
/* Find an entry in the second-level page table.. */
-static pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
+static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
{
return (pmd_t *) srmmu_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
}
/* Find an entry in the third-level page table.. */
-static pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
+static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
{
return (pte_t *) srmmu_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
}
-/* Find an entry in the second-level page table.. */
-static pmd_t *srmmu_c_pmd_offset(pgd_t * dir, unsigned long address)
+static inline pmd_t *srmmu_c_pmd_offset(pgd_t * dir, unsigned long address)
{
return (pmd_t *) srmmu_c_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
}
-/* Find an entry in the third-level page table.. */
-static pte_t *srmmu_c_pte_offset(pmd_t * dir, unsigned long address)
+static inline pte_t *srmmu_c_pte_offset(pmd_t * dir, unsigned long address)
{
return (pte_t *) srmmu_c_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
}
+static inline pmd_t *srmmu_s_pmd_offset(pgd_t * dir, unsigned long address)
+{
+ return (pmd_t *) srmmu_s_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
+}
+
+static inline pte_t *srmmu_s_pte_offset(pmd_t * dir, unsigned long address)
+{
+ return (pte_t *) srmmu_s_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
+}
+
/* This must update the context table entry for this process. */
static void srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
{
@@ -352,334 +454,146 @@ static void srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
}
}
-static inline void srmmu_putpage(unsigned long page)
-{
- free_page(page);
-}
-
-#define LC_HIGH_WATER 128
-#define BC_HIGH_WATER 32
-
-static unsigned long *lcnks = 0;
-static unsigned long *bcnks = 0;
-static int lcwater = 0;
-static int bcwater = 0;
-static int chunk_pages = 0;
-static int clct_pages = 0;
-
-#define RELAX_JIFFIES 16
-
-static int lcjiffies;
-static int bcjiffies;
-
-struct chunk {
- struct chunk *next;
- struct chunk *prev;
- struct chunk *npage;
- struct chunk *ppage;
- int count;
-};
-
-static int garbage_calls = 0;
-
-#define OTHER_PAGE(p,q) (((unsigned long)(p) ^ (unsigned long)(q)) & PAGE_MASK)
-
-static int garbage_collect(unsigned long **cnks, int n, int cpp)
+static inline pte_t *srmmu_get_pte_fast(void)
{
- struct chunk *root = (struct chunk *)*cnks;
- struct chunk *p, *q, *curr, *next;
- int water = n;
-
- next = root->next;
- curr = root->prev = root->next = root->npage = root->ppage = root;
- root->count = 1;
-
- garbage_calls++;
-
- while (--n) {
- p = next;
- next = next->next;
-
- if (OTHER_PAGE(p, curr)) {
-
- q = curr->npage;
- while (q != curr) {
- if (!OTHER_PAGE(p, q))
- break;
- q = q->npage;
- }
-
- if (q == curr) {
-
- (p->npage = curr->npage)->ppage = p;
- curr->npage = p;
- p->ppage = curr;
-
- p->next = p->prev = p;
- p->count = 1;
-
- curr = p;
-
- continue;
- }
- curr = q;
- }
-
- (p->next = curr->next)->prev = p;
- curr->next = p;
- p->prev = curr;
-
- if (++curr->count == cpp) {
-
- q = curr->npage;
- if (curr == q) {
-
- srmmu_putpage((unsigned long)curr & PAGE_MASK);
- water -= cpp;
-
- clct_pages++;
- chunk_pages--;
-
- if (--n) {
- p = next;
- next = next->next;
-
- curr = root->prev =
- root->next = root->npage =
- root->ppage = root = p;
- root->count = 1;
-
- continue;
- }
- return 0;
- }
-
- if (curr == root)
- root = q;
-
- curr->ppage->npage = q;
- q->ppage = curr->ppage;
-
- srmmu_putpage((unsigned long)curr & PAGE_MASK);
- water -= cpp;
-
- clct_pages++;
- chunk_pages--;
-
- curr = q;
- }
- }
-
- p = root;
- while (p->npage != root) {
- p->prev->next = p->npage;
- p = p->npage;
+ struct page *ret;
+
+ spin_lock(&pte_spinlock);
+ if ((ret = (struct page *)pte_quicklist) != NULL) {
+ unsigned int mask = (unsigned int)ret->pprev_hash;
+ unsigned int tmp, off;
+
+ if (mask & 0xff)
+ for (tmp = 0x001, off = 0; (mask & tmp) == 0; tmp <<= 1, off += 256);
+ else
+ for (tmp = 0x100, off = 2048; (mask & tmp) == 0; tmp <<= 1, off += 256);
+ (unsigned int)ret->pprev_hash = mask & ~tmp;
+ if (!(mask & ~tmp))
+ pte_quicklist = (unsigned long *)ret->next_hash;
+ ret = (struct page *)(PAGE_OFFSET + (ret->map_nr << PAGE_SHIFT) + off);
+ pgtable_cache_size--;
}
-
- *cnks = (unsigned long *)root;
- return water;
+ spin_unlock(&pte_spinlock);
+ return (pte_t *)ret;
}
-static unsigned long *get_small_chunk(void)
+static inline pte_t *srmmu_get_pte_slow(void)
{
- unsigned long *rval;
- unsigned long flags;
-
- save_and_cli(flags);
- if(lcwater) {
- lcwater--;
- rval = lcnks;
- lcnks = (unsigned long *) *rval;
- } else {
- rval = (unsigned long *) __get_free_page(GFP_KERNEL);
-
- if(!rval) {
- restore_flags(flags);
- return 0;
- }
- chunk_pages++;
-
- lcnks = (rval + 64);
-
- /* Cache stomping, I know... */
- *(rval + 64) = (unsigned long) (rval + 128);
- *(rval + 128) = (unsigned long) (rval + 192);
- *(rval + 192) = (unsigned long) (rval + 256);
- *(rval + 256) = (unsigned long) (rval + 320);
- *(rval + 320) = (unsigned long) (rval + 384);
- *(rval + 384) = (unsigned long) (rval + 448);
- *(rval + 448) = (unsigned long) (rval + 512);
- *(rval + 512) = (unsigned long) (rval + 576);
- *(rval + 576) = (unsigned long) (rval + 640);
- *(rval + 640) = (unsigned long) (rval + 704);
- *(rval + 704) = (unsigned long) (rval + 768);
- *(rval + 768) = (unsigned long) (rval + 832);
- *(rval + 832) = (unsigned long) (rval + 896);
- *(rval + 896) = (unsigned long) (rval + 960);
- *(rval + 960) = 0;
- lcwater = 15;
+ pte_t *ret;
+ struct page *page;
+
+ ret = (pte_t *)get_free_page(GFP_KERNEL);
+ if (ret) {
+ page = mem_map + MAP_NR(ret);
+ flush_chunk((unsigned long)ret);
+ (unsigned int)page->pprev_hash = 0xfffe;
+ spin_lock(&pte_spinlock);
+ (unsigned long *)page->next_hash = pte_quicklist;
+ pte_quicklist = (unsigned long *)page;
+ pgtable_cache_size += 15;
}
- lcjiffies = jiffies;
- restore_flags(flags);
- memset(rval, 0, 256);
- flush_chunk((unsigned long)rval);
- return rval;
-}
-
-static inline void free_small_chunk(unsigned long *it)
-{
- unsigned long flags;
-
- save_and_cli(flags);
- *it = (unsigned long) lcnks;
- lcnks = it;
- lcwater++;
-
- if ((lcwater > LC_HIGH_WATER) &&
- (jiffies > lcjiffies + RELAX_JIFFIES))
- lcwater = garbage_collect(&lcnks, lcwater, 16);
-
- restore_flags(flags);
+ return ret;
}
-static unsigned long *get_big_chunk(void)
+static inline pgd_t *srmmu_get_pgd_fast(void)
{
- unsigned long *rval;
- unsigned long flags;
-
- save_and_cli(flags);
- if(bcwater) {
- bcwater--;
- rval = bcnks;
- bcnks = (unsigned long *) *rval;
- } else {
- rval = (unsigned long *) __get_free_page(GFP_KERNEL);
-
- if(!rval) {
- restore_flags(flags);
- return 0;
- }
- chunk_pages++;
-
- bcnks = (rval + 256);
+ struct page *ret;
- /* Cache stomping, I know... */
- *(rval + 256) = (unsigned long) (rval + 512);
- *(rval + 512) = (unsigned long) (rval + 768);
- *(rval + 768) = 0;
- bcwater = 3;
+ spin_lock(&pgd_spinlock);
+ if ((ret = (struct page *)pgd_quicklist) != NULL) {
+ unsigned int mask = (unsigned int)ret->pprev_hash;
+ unsigned int tmp, off;
+
+ for (tmp = 0x001, off = 0; (mask & tmp) == 0; tmp <<= 1, off += 1024);
+ (unsigned int)ret->pprev_hash = mask & ~tmp;
+ if (!(mask & ~tmp))
+ pgd_quicklist = (unsigned long *)ret->next_hash;
+ ret = (struct page *)(PAGE_OFFSET + (ret->map_nr << PAGE_SHIFT) + off);
+ pgd_cache_size--;
}
- bcjiffies = jiffies;
- restore_flags(flags);
- memset(rval, 0, 1024);
- flush_chunk((unsigned long)rval);
- return rval;
+ spin_unlock(&pgd_spinlock);
+ return (pte_t *)ret;
}
-static inline void free_big_chunk(unsigned long *it)
+static inline pgd_t *srmmu_get_pgd_slow(void)
{
- unsigned long flags;
-
- save_and_cli(flags);
- *it = (unsigned long) bcnks;
- bcnks = it;
- bcwater++;
-
- if ((bcwater > BC_HIGH_WATER) &&
- (jiffies > bcjiffies + RELAX_JIFFIES))
- bcwater = garbage_collect(&bcnks, bcwater, 4);
-
- restore_flags(flags);
+ pgd_t *ret;
+ struct page *page;
+
+ ret = (pgd_t *)__get_free_page(GFP_KERNEL);
+ if (ret) {
+ pgd_t *init = pgd_offset(&init_mm, 0);
+ memset(ret + (0 * PTRS_PER_PGD), 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ memcpy(ret + (0 * PTRS_PER_PGD) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ memset(ret + (1 * PTRS_PER_PGD), 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ memcpy(ret + (1 * PTRS_PER_PGD) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ memset(ret + (2 * PTRS_PER_PGD), 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ memcpy(ret + (2 * PTRS_PER_PGD) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ memset(ret + (3 * PTRS_PER_PGD), 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ memcpy(ret + (3 * PTRS_PER_PGD) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ page = mem_map + MAP_NR(ret);
+ flush_chunk((unsigned long)ret);
+ (unsigned int)page->pprev_hash = 0xe;
+ spin_lock(&pgd_spinlock);
+ (unsigned long *)page->next_hash = pgd_quicklist;
+ pgd_quicklist = (unsigned long *)page;
+ pgd_cache_size += 3;
+ spin_unlock(&pgd_spinlock);
+ }
+ return ret;
}
-#define NEW_PGD() (pgd_t *) get_big_chunk()
-#define NEW_PMD() (pmd_t *) get_small_chunk()
-#define NEW_PTE() (pte_t *) get_small_chunk()
-#define FREE_PGD(chunk) free_big_chunk((unsigned long *)(chunk))
-#define FREE_PMD(chunk) free_small_chunk((unsigned long *)(chunk))
-#define FREE_PTE(chunk) free_small_chunk((unsigned long *)(chunk))
-
-/*
- * Allocate and free page tables. The xxx_kernel() versions are
- * used to allocate a kernel page table - this turns on ASN bits
- * if any, and marks the page tables reserved.
- */
-static void srmmu_pte_free_kernel(pte_t *pte)
+static void srmmu_free_pte_slow(pte_t *pte)
{
- FREE_PTE(pte);
}
-static pte_t *srmmu_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
+static void srmmu_free_pgd_slow(pgd_t *pgd)
{
- address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
- if(srmmu_pmd_none(*pmd)) {
- pte_t *page = NEW_PTE();
- if(srmmu_pmd_none(*pmd)) {
- if(page) {
- pmd_set(pmd, page);
- return page + address;
- }
- pmd_set(pmd, BAD_PAGETABLE);
- return NULL;
- }
- FREE_PTE(page);
- }
- if(srmmu_pmd_bad(*pmd)) {
- printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
- pmd_set(pmd, BAD_PAGETABLE);
- return NULL;
- }
- return (pte_t *) pmd_page(*pmd) + address;
}
-static void srmmu_pmd_free_kernel(pmd_t *pmd)
+static inline void srmmu_pte_free(pte_t *pte)
{
- FREE_PMD(pmd);
-}
+ struct page *page = mem_map + MAP_NR(pte);
-static pmd_t *srmmu_pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
-{
- address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
- if(srmmu_pgd_none(*pgd)) {
- pmd_t *page;
- page = NEW_PMD();
- if(srmmu_pgd_none(*pgd)) {
- if(page) {
- pgd_set(pgd, page);
- return page + address;
- }
- pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
- return NULL;
- }
- FREE_PMD(page);
+ spin_lock(&pte_spinlock);
+ if (!page->pprev_hash) {
+ (unsigned long *)page->next_hash = pte_quicklist;
+ pte_quicklist = (unsigned long *)page;
}
- if(srmmu_pgd_bad(*pgd)) {
- printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
- pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
- return NULL;
- }
- return (pmd_t *) pgd_page(*pgd) + address;
-}
-
-static void srmmu_pte_free(pte_t *pte)
-{
- FREE_PTE(pte);
+ (unsigned int)page->pprev_hash |= (1 << ((((unsigned long)pte) >> 8) & 15));
+ pgtable_cache_size++;
+ spin_unlock(&pte_spinlock);
}
static pte_t *srmmu_pte_alloc(pmd_t * pmd, unsigned long address)
{
address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
if(srmmu_pmd_none(*pmd)) {
- pte_t *page = NEW_PTE();
+ pte_t *page = srmmu_get_pte_fast();
+
+ if (page) {
+ pmd_set(pmd, page);
+ return page + address;
+ }
+ page = srmmu_get_pte_slow();
if(srmmu_pmd_none(*pmd)) {
if(page) {
+ spin_unlock(&pte_spinlock);
pmd_set(pmd, page);
return page + address;
}
pmd_set(pmd, BAD_PAGETABLE);
return NULL;
}
- FREE_PTE(page);
+ if (page) {
+ (unsigned int)(((struct page *)pte_quicklist)->pprev_hash) = 0xffff;
+ pgtable_cache_size++;
+ spin_unlock(&pte_spinlock);
+ }
}
if(srmmu_pmd_bad(*pmd)) {
printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
@@ -692,23 +606,34 @@ static pte_t *srmmu_pte_alloc(pmd_t * pmd, unsigned long address)
/* Real three-level page tables on SRMMU. */
static void srmmu_pmd_free(pmd_t * pmd)
{
- FREE_PMD(pmd);
+ return srmmu_pte_free((pte_t *)pmd);
}
static pmd_t *srmmu_pmd_alloc(pgd_t * pgd, unsigned long address)
{
address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
if(srmmu_pgd_none(*pgd)) {
- pmd_t *page = NEW_PMD();
+ pmd_t *page = (pmd_t *)srmmu_get_pte_fast();
+
+ if (page) {
+ pgd_set(pgd, page);
+ return page + address;
+ }
+ page = (pmd_t *)srmmu_get_pte_slow();
if(srmmu_pgd_none(*pgd)) {
if(page) {
+ spin_unlock(&pte_spinlock);
pgd_set(pgd, page);
return page + address;
}
pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
return NULL;
}
- FREE_PMD(page);
+ if (page) {
+ (unsigned int)(((struct page *)pte_quicklist)->pprev_hash) = 0xffff;
+ pgtable_cache_size++;
+ spin_unlock(&pte_spinlock);
+ }
}
if(srmmu_pgd_bad(*pgd)) {
printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
@@ -720,12 +645,58 @@ static pmd_t *srmmu_pmd_alloc(pgd_t * pgd, unsigned long address)
static void srmmu_pgd_free(pgd_t *pgd)
{
- FREE_PGD(pgd);
+ struct page *page = mem_map + MAP_NR(pgd);
+
+ spin_lock(&pgd_spinlock);
+ if (!page->pprev_hash) {
+ (unsigned long *)page->next_hash = pgd_quicklist;
+ pgd_quicklist = (unsigned long *)page;
+ }
+ (unsigned int)page->pprev_hash |= (1 << ((((unsigned long)pgd) >> 10) & 3));
+ pgd_cache_size++;
+ spin_unlock(&pgd_spinlock);
}
static pgd_t *srmmu_pgd_alloc(void)
{
- return NEW_PGD();
+ pgd_t *ret;
+
+ ret = srmmu_get_pgd_fast();
+ if (ret) return ret;
+ return srmmu_get_pgd_slow();
+}
+
+
+static void srmmu_set_pgdir(unsigned long address, pgd_t entry)
+{
+ struct task_struct * p;
+ struct page *page;
+
+ read_lock(&tasklist_lock);
+ for_each_task(p) {
+ if (!p->mm)
+ continue;
+ *pgd_offset(p->mm,address) = entry;
+ }
+ read_unlock(&tasklist_lock);
+ spin_lock(&pgd_spinlock);
+ address >>= SRMMU_PGDIR_SHIFT;
+ for (page = (struct page *)pgd_quicklist; page; page = page->next_hash) {
+ pgd_t *pgd = (pgd_t *)(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT));
+ unsigned int mask = (unsigned int)page->pprev_hash;
+
+ if (mask & 1)
+ pgd[address + 0 * SRMMU_PTRS_PER_PGD] = entry;
+ if (mask & 2)
+ pgd[address + 1 * SRMMU_PTRS_PER_PGD] = entry;
+ if (mask & 4)
+ pgd[address + 2 * SRMMU_PTRS_PER_PGD] = entry;
+ if (mask & 8)
+ pgd[address + 3 * SRMMU_PTRS_PER_PGD] = entry;
+ if (mask)
+ flush_chunk((unsigned long)pgd);
+ }
+ spin_unlock(&pgd_spinlock);
}
static void srmmu_set_pte_cacheable(pte_t *ptep, pte_t pteval)
@@ -926,6 +897,19 @@ extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
extern void tsunami_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end);
extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+/* Workaround, until we find what's going on with Swift. When low on memory, it sometimes
+ * loops in fault/handle_mm_fault incl. flush_tlb_page to find out it is already in page tables/
+ * fault again on the same instruction. I really don't understand it, have checked it and contexts
+ * are right, flush_tlb_all is done as well, and it faults again... Strange. -jj
+ */
+static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
+{
+ static unsigned long last;
+
+ if (last == address) viking_hwprobe(address);
+ last = address;
+}
+
/* Swift flushes. It has the recommended SRMMU specification flushing
* facilities, so we can do things in a more fine grained fashion than we
* could on the tsunami. Let's watch out for HARDWARE BUGS...
@@ -1191,12 +1175,10 @@ static void cypress_flush_chunk(unsigned long chunk)
cypress_flush_page_to_ram(chunk);
}
-#if NOTUSED
/* Cypress is also IO cache coherent. */
static void cypress_flush_page_for_dma(unsigned long page)
{
}
-#endif
/* Cypress has unified L2 VIPT, from which both instructions and data
* are stored. It does not have an onboard icache of any sort, therefore
@@ -1282,9 +1264,8 @@ extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
extern void viking_flush_page(unsigned long page);
extern void viking_mxcc_flush_page(unsigned long page);
extern void viking_flush_chunk(unsigned long chunk);
-extern void viking_c_flush_page(unsigned long page);
-extern void viking_c_mxcc_flush_page(unsigned long page);
extern void viking_c_flush_chunk(unsigned long chunk);
+extern void viking_s_flush_chunk(unsigned long chunk);
extern void viking_mxcc_flush_chunk(unsigned long chunk);
extern void viking_flush_tlb_all(void);
extern void viking_flush_tlb_mm(struct mm_struct *mm);
@@ -1481,7 +1462,7 @@ static inline void srmmu_allocate_ptable_skeleton(unsigned long start, unsigned
* looking at the prom's page table directly which is what most
* other OS's do. Yuck... this is much better.
*/
-void srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)
+__initfunc(void srmmu_inherit_prom_mappings(unsigned long start,unsigned long end))
{
pgd_t *pgdp;
pmd_t *pmdp;
@@ -1539,21 +1520,79 @@ void srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)
}
}
-/* #define DEBUG_MAP_KERNEL */
-
#ifdef DEBUG_MAP_KERNEL
#define MKTRACE(foo) prom_printf foo
#else
#define MKTRACE(foo)
#endif
-static int lots_of_ram = 0;
-static int large_pte_optimize = 1;
+static int lots_of_ram __initdata = 0;
+static int srmmu_low_pa __initdata = 0;
+static unsigned long end_of_phys_memory __initdata = 0;
+
+__initfunc(void srmmu_end_memory(unsigned long memory_size, unsigned long *end_mem_p))
+{
+ unsigned int sum = 0;
+ unsigned long last = 0xff000000;
+ long first, cur;
+ unsigned long pa;
+ unsigned long total = 0;
+ int i;
+
+ pa = srmmu_hwprobe(KERNBASE + PAGE_SIZE);
+ pa = (pa & SRMMU_PTE_PMASK) << 4;
+ if (!sp_banks[0].base_addr && pa == PAGE_SIZE) {
+ for(i = 0; sp_banks[i].num_bytes != 0; i++) {
+ if (sp_banks[i].base_addr + sp_banks[i].num_bytes > 0x0d000000)
+ break;
+ }
+ if (!sp_banks[i].num_bytes) {
+ srmmu_low_pa = 1;
+ end_of_phys_memory = SRMMU_PGDIR_ALIGN(sp_banks[i-1].base_addr + sp_banks[i-1].num_bytes);
+ *end_mem_p = KERNBASE + end_of_phys_memory;
+ if (sp_banks[0].num_bytes >= (6 * 1024 * 1024) || end_of_phys_memory <= 0x06000000) {
+ /* Make sure there will be enough memory for the whole mem_map (even if sparse) */
+ return;
+ }
+ }
+ }
+ for(i = 0; sp_banks[i].num_bytes != 0; i++) {
+ pa = sp_banks[i].base_addr;
+ first = (pa & (~SRMMU_PGDIR_MASK));
+ cur = (sp_banks[i].num_bytes + first - SRMMU_PGDIR_SIZE);
+ if (cur < 0) cur = 0;
+ if (!first || last != (pa & SRMMU_PGDIR_MASK))
+ total += SRMMU_PGDIR_SIZE;
+ sum += sp_banks[i].num_bytes;
+ if (memory_size) {
+ if (sum > memory_size) {
+ sp_banks[i].num_bytes -=
+ (sum - memory_size);
+ cur = (sp_banks[i].num_bytes + first - SRMMU_PGDIR_SIZE);
+ if (cur < 0) cur = 0;
+ total += SRMMU_PGDIR_ALIGN(cur);
+ sum = memory_size;
+ sp_banks[++i].base_addr = 0xdeadbeef;
+ sp_banks[i].num_bytes = 0;
+ break;
+ }
+ }
+ total += SRMMU_PGDIR_ALIGN(cur);
+ last = (sp_banks[i].base_addr + sp_banks[i].num_bytes - 1) & SRMMU_PGDIR_MASK;
+ }
+ if (total <= 0x0d000000)
+ *end_mem_p = KERNBASE + total;
+ else {
+ *end_mem_p = 0xfd000000;
+ lots_of_ram = 1;
+ }
+ end_of_phys_memory = total;
+}
#define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
/* Create a third-level SRMMU 16MB page mapping. */
-static inline void do_large_mapping(unsigned long vaddr, unsigned long phys_base)
+__initfunc(static void do_large_mapping(unsigned long vaddr, unsigned long phys_base))
{
pgd_t *pgdp = srmmu_pgd_offset(init_task.mm, vaddr);
unsigned long big_pte;
@@ -1563,47 +1602,6 @@ static inline void do_large_mapping(unsigned long vaddr, unsigned long phys_base
*pgdp = __pgd(big_pte);
}
-/* Create second-level SRMMU 256K medium sized page mappings. */
-static inline void do_medium_mapping(unsigned long vaddr, unsigned long vend,
- unsigned long phys_base)
-{
- pgd_t *pgdp;
- pmd_t *pmdp;
- unsigned long medium_pte;
-
- MKTRACE(("dmm[v<%08lx,%08lx>-->p<%08lx>]", vaddr, vend, phys_base));
- while(vaddr < vend) {
- pgdp = srmmu_pgd_offset(init_task.mm, vaddr);
- pmdp = srmmu_early_pmd_offset(pgdp, vaddr);
- medium_pte = KERNEL_PTE(phys_base >> 4);
- *pmdp = __pmd(medium_pte);
- phys_base += SRMMU_PMD_SIZE;
- vaddr += SRMMU_PMD_SIZE;
- }
-}
-
-/* Create a normal set of SRMMU page mappings for the virtual range
- * START to END, using physical pages beginning at PHYS_BASE.
- */
-static inline void do_small_mapping(unsigned long start, unsigned long end,
- unsigned long phys_base)
-{
- pgd_t *pgdp;
- pmd_t *pmdp;
- pte_t *ptep;
-
- MKTRACE(("dsm[v<%08lx,%08lx>-->p<%08lx>]", start, end, phys_base));
- while(start < end) {
- pgdp = srmmu_pgd_offset(init_task.mm, start);
- pmdp = srmmu_early_pmd_offset(pgdp, start);
- ptep = srmmu_early_pte_offset(pmdp, start);
-
- *ptep = __pte(KERNEL_PTE(phys_base >> 4));
- phys_base += PAGE_SIZE;
- start += PAGE_SIZE;
- }
-}
-
/* Look in the sp_bank for the given physical page, return the
* index number the entry was found in, or -1 for not found.
*/
@@ -1625,7 +1623,7 @@ static inline int find_in_spbanks(unsigned long phys_page)
* array of char's, each member indicating if that spbank is mapped
* yet or not.
*/
-static inline int find_free_spbank(char *taken_vector)
+__initfunc(static int find_free_spbank(char *taken_vector))
{
int entry;
@@ -1635,78 +1633,28 @@ static inline int find_free_spbank(char *taken_vector)
return entry;
}
-/* Same as above, but with a given bank size limit BLIMIT. */
-static inline int find_free_spbank_limited(char *taken_vector, unsigned long limit)
-{
- int entry;
-
- for(entry = 0; sp_banks[entry].num_bytes; entry++)
- if(!taken_vector[entry] &&
- (sp_banks[entry].num_bytes < limit))
- break;
- return entry;
-}
+static unsigned long map_spbank_last_pa __initdata = 0xff000000;
/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE.
- * This routine is expected to update the srmmu_map and try as
- * hard as possible to use 16MB level-one SRMMU pte's when at all
- * possible to get short termination and faster translations.
*/
-static inline unsigned long map_spbank(unsigned long vbase, int sp_entry)
+__initfunc(static unsigned long map_spbank(unsigned long vbase, int sp_entry))
{
- unsigned long pstart = sp_banks[sp_entry].base_addr;
- unsigned long vstart = vbase;
- unsigned long vend = vbase + sp_banks[sp_entry].num_bytes;
+ unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
+ unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
+ unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
static int srmmu_bank = 0;
- /* If physically not aligned on 16MB boundry, just shortcut
- * right here by mapping them with 4k normal pages, and bumping
- * the next virtual address to the next 16MB boundry. You can
- * get this with various RAM configurations due to the way in
- * which the PROM carves out it's own chunks of memory.
- */
- if(pstart & ~SRMMU_PGDIR_MASK) {
- do_small_mapping(vstart, vend, pstart);
- vstart = SRMMU_PGDIR_ALIGN(vend);
- goto finish_up;
- }
+ MKTRACE(("map_spbank %d[v<%08lx>p<%08lx>s<%08lx>]", sp_entry, vbase, sp_banks[sp_entry].base_addr, sp_banks[sp_entry].num_bytes));
+ MKTRACE(("map_spbank2 %d[p%08lx v%08lx-%08lx]", sp_entry, pstart, vstart, vend));
while(vstart < vend) {
- unsigned long coverage, next_aligned;
- if(vstart & ~SRMMU_PMD_MASK) {
- next_aligned = SRMMU_PMD_ALIGN(vstart);
- if(next_aligned <= vend) {
- coverage = (next_aligned - vstart);
- do_small_mapping(vstart, next_aligned, pstart);
- } else {
- coverage = (vend - vstart);
- do_small_mapping(vstart, vend, pstart);
- }
- } else if(vstart & ~SRMMU_PGDIR_MASK) {
- next_aligned = SRMMU_PGDIR_ALIGN(vstart);
- if(next_aligned <= vend) {
- coverage = (next_aligned - vstart);
- do_medium_mapping(vstart, next_aligned, pstart);
- } else {
- coverage = (vend - vstart);
- do_small_mapping(vstart, vend, pstart);
- }
- } else {
- coverage = SRMMU_PGDIR_SIZE;
- if(large_pte_optimize || ((vstart+coverage)<=vend)) {
- do_large_mapping(vstart, pstart);
- } else {
- coverage = (vend - vstart);
- do_small_mapping(vstart, vend, pstart);
- }
- }
- vstart += coverage; pstart += coverage;
+ do_large_mapping(vstart, pstart);
+ vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
}
-finish_up:
srmmu_map[srmmu_bank].vbase = vbase;
srmmu_map[srmmu_bank].pbase = sp_banks[sp_entry].base_addr;
srmmu_map[srmmu_bank].size = sp_banks[sp_entry].num_bytes;
- MKTRACE(("SRMMUBANK[v<%08lx>p<%08lx>s<%08lx>]", vbase, sp_banks[sp_entry].base_addr, sp_banks[sp_entry].num_bytes));
srmmu_bank++;
+ map_spbank_last_pa = pstart - SRMMU_PGDIR_SIZE;
return vstart;
}
@@ -1721,10 +1669,10 @@ static inline void memprobe_error(char *msg)
* is part of a full bank which is at least 4MB in size and begins at
* 0xf0000000 (ie. KERNBASE).
*/
-static void map_kernel(void)
+static inline void map_kernel(void)
{
unsigned long raw_pte, physpage;
- unsigned long vaddr, tally, low_base;
+ unsigned long vaddr, low_base;
char etaken[SPARC_PHYS_BANKS];
int entry;
@@ -1735,17 +1683,7 @@ static void map_kernel(void)
low_base = KERNBASE;
- /* Step 2: Calculate 'lots_of_ram'. */
- tally = 0;
- for(entry = 0; sp_banks[entry].num_bytes; entry++)
- tally += sp_banks[entry].num_bytes;
- if(tally > (0xfd000000 - KERNBASE))
- lots_of_ram = 1;
- else
- lots_of_ram = 0;
- MKTRACE(("tally=%08lx lots_of_ram<%d>\n", tally, lots_of_ram));
-
- /* Step 3: Fill in KERNBASE base pgd. Lots of sanity checking here. */
+ /* Step 2: Fill in KERNBASE base pgd. Lots of sanity checking here. */
raw_pte = srmmu_hwprobe(KERNBASE + PAGE_SIZE);
if((raw_pte & SRMMU_ET_MASK) != SRMMU_ET_PTE)
memprobe_error("Wheee, kernel not mapped at all by boot loader.\n");
@@ -1757,11 +1695,10 @@ static void map_kernel(void)
if(entry == -1 || (sp_banks[entry].base_addr != physpage))
memprobe_error("Kernel mapped in non-existant memory.\n");
MKTRACE(("map_kernel: map_spbank(vbase=%08x, entry<%d>)[%08lx,%08lx]\n", KERNBASE, entry, sp_banks[entry].base_addr, sp_banks[entry].num_bytes));
- if(((KERNBASE + (sp_banks[entry].num_bytes)) > 0xfd000000) ||
- ((KERNBASE + (sp_banks[entry].num_bytes)) < KERNBASE)) {
+ if (sp_banks[entry].num_bytes > 0x0d000000) {
unsigned long orig_base = sp_banks[entry].base_addr;
unsigned long orig_len = sp_banks[entry].num_bytes;
- unsigned long can_map = (0xfd000000 - KERNBASE);
+ unsigned long can_map = 0x0d000000;
/* Map a partial bank in this case, adjust the base
* and the length, but don't mark it used.
@@ -1779,7 +1716,7 @@ static void map_kernel(void)
vaddr = map_spbank(KERNBASE, entry);
etaken[entry] = 1;
- /* Step 4: Map what we can above KERNBASE. */
+ /* Step 3: Map what we can above KERNBASE. */
MKTRACE(("map_kernel: vaddr=%08lx, entering first loop\n", vaddr));
for(;;) {
unsigned long bank_size;
@@ -1790,8 +1727,14 @@ static void map_kernel(void)
MKTRACE(("<%d> base=%08lx bs=%08lx ", entry, sp_banks[entry].base_addr, bank_size));
if(!bank_size)
break;
- if(((vaddr + bank_size) > 0xfd000000) ||
- ((vaddr + bank_size) < KERNBASE)) {
+ if (srmmu_low_pa)
+ vaddr = KERNBASE + sp_banks[entry].base_addr;
+ else if (sp_banks[entry].base_addr & (~SRMMU_PGDIR_MASK)) {
+ if (map_spbank_last_pa == (sp_banks[entry].base_addr & SRMMU_PGDIR_MASK))
+ vaddr -= SRMMU_PGDIR_SIZE;
+ vaddr += (sp_banks[entry].base_addr & (~SRMMU_PGDIR_MASK));
+ }
+ if ((vaddr + bank_size - KERNBASE) > 0x0d000000) {
unsigned long orig_base = sp_banks[entry].base_addr;
unsigned long orig_len = sp_banks[entry].num_bytes;
unsigned long can_map = (0xfd000000 - vaddr);
@@ -1808,8 +1751,6 @@ static void map_kernel(void)
MKTRACE(("adjust[%08lx,%08lx]\n", (orig_base + can_map), (orig_len - can_map)));
break;
}
- if(!bank_size)
- break;
/* Ok, we can map this one, do it. */
MKTRACE(("map_spbank(%08lx,entry<%d>) ", vaddr, entry));
@@ -1823,22 +1764,16 @@ loop_skip:
if(!lots_of_ram)
goto check_and_return;
- /* Step 5: Map the rest (if any) right below KERNBASE. */
+ /* Step 4: Map the rest (if any) right below KERNBASE. */
MKTRACE(("map_kernel: doing low mappings... "));
- tally = 0;
- for(entry = 0; sp_banks[entry].num_bytes; entry++) {
- if(!etaken[entry])
- tally += SRMMU_PGDIR_ALIGN(sp_banks[entry].num_bytes);
- }
- if(!tally)
- memprobe_error("Whee, lots_of_ram yet no low pages to map.\n");
- low_base = (KERNBASE - tally);
- MKTRACE(("tally=%08lx low_base=%08lx\n", tally, low_base));
+ low_base = (KERNBASE - end_of_phys_memory + 0x0d000000);
+ MKTRACE(("end_of_phys_memory=%08lx low_base=%08lx\n", end_of_phys_memory, low_base));
/* Ok, now map 'em. */
MKTRACE(("map_kernel: Allocate pt skeleton (%08lx, %08x)\n",low_base,KERNBASE));
srmmu_allocate_ptable_skeleton(low_base, KERNBASE);
vaddr = low_base;
+ map_spbank_last_pa = 0xff000000;
MKTRACE(("map_kernel: vaddr=%08lx Entering second loop for low maps.\n", vaddr));
for(;;) {
unsigned long bank_size;
@@ -1848,19 +1783,22 @@ loop_skip:
MKTRACE(("map_kernel: e<%d> base=%08lx bs=%08lx ", entry, sp_banks[entry].base_addr, bank_size));
if(!bank_size)
break;
+ if (sp_banks[entry].base_addr & (~SRMMU_PGDIR_MASK)) {
+ if (map_spbank_last_pa == (sp_banks[entry].base_addr & SRMMU_PGDIR_MASK))
+ vaddr -= SRMMU_PGDIR_SIZE;
+ vaddr += (sp_banks[entry].base_addr & (~SRMMU_PGDIR_MASK));
+ }
if((vaddr + bank_size) > KERNBASE)
memprobe_error("Wheee, kernel low mapping overflow.\n");
MKTRACE(("map_spbank(%08lx, %d) ", vaddr, entry));
vaddr = map_spbank(vaddr, entry);
etaken[entry] = 1;
- tally -= SRMMU_PGDIR_ALIGN(bank_size);
- MKTRACE(("Now, vaddr=%08lx tally=%08lx\n", vaddr, tally));
+ MKTRACE(("Now, vaddr=%08lx end_of_phys_memory=%08lx\n", vaddr, end_of_phys_memory));
}
MKTRACE(("\n"));
- if(tally)
- memprobe_error("Wheee, did not map all of low mappings.\n");
+
check_and_return:
- /* Step 6: Sanity check, make sure we did it all. */
+ /* Step 5: Sanity check, make sure we did it all. */
MKTRACE(("check_and_return: "));
for(entry = 0; sp_banks[entry].num_bytes; entry++) {
MKTRACE(("e[%d]=%d ", entry, etaken[entry]));
@@ -1872,6 +1810,10 @@ check_and_return:
MKTRACE(("success\n"));
init_task.mm->mmap->vm_start = page_offset = low_base;
stack_top = page_offset - PAGE_SIZE;
+ BTFIXUPSET_SETHI(page_offset, low_base);
+ BTFIXUPSET_SETHI(stack_top, page_offset - PAGE_SIZE);
+ BTFIXUPSET_SIMM13(user_ptrs_per_pgd, page_offset / SRMMU_PGDIR_SIZE);
+
#if 1
for(entry = 0; srmmu_map[entry].size; entry++) {
printk("[%d]: v[%08lx,%08lx](%lx) p[%08lx]\n", entry,
@@ -1884,90 +1826,73 @@ check_and_return:
/* Now setup the p2v/v2p hash tables. */
for(entry = 0; entry < SRMMU_HASHSZ; entry++)
- srmmu_v2p_hash[entry] = srmmu_p2v_hash[entry] = NULL;
+ srmmu_v2p_hash[entry] = ((0xff - entry) << 24);
+ for(entry = 0; entry < SRMMU_HASHSZ; entry++)
+ srmmu_p2v_hash[entry] = 0xffffffffUL;
for(entry = 0; srmmu_map[entry].size; entry++) {
unsigned long addr;
for(addr = srmmu_map[entry].vbase;
addr < (srmmu_map[entry].vbase + srmmu_map[entry].size);
addr += (1 << 24))
- srmmu_v2p_hash[srmmu_ahashfn(addr)] = &srmmu_map[entry];
+ srmmu_v2p_hash[srmmu_ahashfn(addr)] =
+ srmmu_map[entry].pbase - srmmu_map[entry].vbase;
for(addr = srmmu_map[entry].pbase;
addr < (srmmu_map[entry].pbase + srmmu_map[entry].size);
addr += (1 << 24))
- srmmu_p2v_hash[srmmu_ahashfn(addr)] = &srmmu_map[entry];
+ srmmu_p2v_hash[srmmu_ahashfn(addr)] =
+ srmmu_map[entry].pbase - srmmu_map[entry].vbase;
}
- page_contig_offset = page_offset - (0xfd000000 - KERNBASE);
- phys_mem_contig = 1;
- for(entry = 0; srmmu_map[entry].size; entry++)
- if (srmmu_map[entry].pbase != srmmu_c_v2p (srmmu_map[entry].vbase)) {
- phys_mem_contig = 0;
- break;
- }
- if (phys_mem_contig) {
- printk ("SRMMU: Physical memory is contiguous, bypassing VA<->PA hashes\n");
- pte_page = srmmu_c_pte_page;
- pmd_page = srmmu_c_pmd_page;
- pgd_page = srmmu_c_pgd_page;
- mk_pte = srmmu_c_mk_pte;
- pte_offset = srmmu_c_pte_offset;
- pmd_offset = srmmu_c_pmd_offset;
- if (ctxd_set == srmmu_ctxd_set)
- ctxd_set = srmmu_c_ctxd_set;
- pgd_set = srmmu_c_pgd_set;
- pmd_set = srmmu_c_pmd_set;
- mmu_v2p = srmmu_c_v2p;
- mmu_p2v = srmmu_c_p2v;
- if (flush_chunk == viking_flush_chunk)
- flush_chunk = viking_c_flush_chunk;
- }
-
- if (sparc_cpu_model == sun4d) {
- int i, j = -1;
- unsigned long bank_start, bank_end;
-
- sun4d_dma_vbase = 0;
- sun4d_dma_size = IOUNIT_DMA_SIZE - IOUNIT_DVMA_SIZE;
- for (i = 0; srmmu_map[i].size; i++) {
- bank_start = srmmu_map[i].vbase;
- bank_end = bank_start + srmmu_map[i].size;
- if (bank_start <= KERNBASE && bank_end > KERNBASE)
- j = i;
- else if (srmmu_map[i].size >= sun4d_dma_size) {
- sun4d_dma_vbase = srmmu_map[i].vbase;
+ BTFIXUPSET_SETHI(page_contig_offset, page_offset - (0xfd000000 - KERNBASE));
+ if (srmmu_low_pa)
+ phys_mem_contig = 0;
+ else {
+ phys_mem_contig = 1;
+ for(entry = 0; srmmu_map[entry].size; entry++)
+ if (srmmu_map[entry].pbase != srmmu_c_v2p (srmmu_map[entry].vbase)) {
+ phys_mem_contig = 0;
break;
}
- }
- if (!sun4d_dma_vbase && j != -1) {
- if (srmmu_map[j].size >= sun4d_dma_size + 0x1000000)
- sun4d_dma_vbase = srmmu_map[j].vbase + 0x1000000;
- else {
- sun4d_dma_vbase = srmmu_map[j].vbase;
- if (srmmu_map[j].size < sun4d_dma_size)
- sun4d_dma_size = srmmu_map[j].size;
- }
- }
- sun4d_dma_base = IOUNIT_DMA_BASE - srmmu_v2p(sun4d_dma_vbase);
}
+ if (phys_mem_contig) {
+ printk ("SRMMU: Physical memory is contiguous, bypassing VA<->PA hashes.\n");
+ BTFIXUPSET_CALL(pte_page, srmmu_c_pte_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_page, srmmu_c_pmd_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_page, srmmu_c_pgd_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mk_pte, srmmu_c_mk_pte, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_offset, srmmu_c_pte_offset, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_offset, srmmu_c_pmd_offset, BTFIXUPCALL_NORM);
+ if (BTFIXUPVAL_CALL(ctxd_set) == (unsigned long)srmmu_ctxd_set)
+ BTFIXUPSET_CALL(ctxd_set, srmmu_c_ctxd_set, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_set, srmmu_c_pgd_set, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_set, srmmu_c_pmd_set, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_v2p, srmmu_c_v2p, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_p2v, srmmu_c_p2v, BTFIXUPCALL_NORM);
+ if (BTFIXUPVAL_CALL(flush_chunk) == (unsigned long)viking_flush_chunk)
+ BTFIXUPSET_CALL(flush_chunk, viking_c_flush_chunk, BTFIXUPCALL_NORM);
+ } else if (srmmu_low_pa) {
+ printk ("SRMMU: Compact physical memory. Using strightforward VA<->PA translations.\n");
+ BTFIXUPSET_CALL(pte_page, srmmu_s_pte_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_page, srmmu_s_pmd_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_page, srmmu_s_pgd_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mk_pte, srmmu_s_mk_pte, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_offset, srmmu_s_pte_offset, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_offset, srmmu_s_pmd_offset, BTFIXUPCALL_NORM);
+ if (BTFIXUPVAL_CALL(ctxd_set) == (unsigned long)srmmu_ctxd_set)
+ BTFIXUPSET_CALL(ctxd_set, srmmu_s_ctxd_set, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_set, srmmu_s_pgd_set, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_set, srmmu_s_pmd_set, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_v2p, srmmu_s_v2p, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_p2v, srmmu_s_p2v, BTFIXUPCALL_NORM);
+ if (BTFIXUPVAL_CALL(flush_chunk) == (unsigned long)viking_flush_chunk)
+ BTFIXUPSET_CALL(flush_chunk, viking_s_flush_chunk, BTFIXUPCALL_NORM);
+ }
+ btfixup();
return; /* SUCCESS! */
}
-unsigned long srmmu_endmem_fixup(unsigned long mem_end_now)
-{
- unsigned long tally = 0;
- int i;
-
- for(i = 0; sp_banks[i].num_bytes; i++)
- tally += SRMMU_PGDIR_ALIGN(sp_banks[i].num_bytes);
- if(tally < (0x0d000000UL)) {
- return KERNBASE + tally;
- } else {
- return 0xfd000000UL;
- }
-}
-
/* Paging initialization on the Sparc Reference MMU. */
extern unsigned long free_area_init(unsigned long, unsigned long);
extern unsigned long sparc_context_init(unsigned long, int);
@@ -1975,9 +1900,9 @@ extern unsigned long sparc_context_init(unsigned long, int);
extern int physmem_mapped_contig;
extern int linux_num_cpus;
-void (*poke_srmmu)(void);
+void (*poke_srmmu)(void) __initdata = NULL;
-unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
+__initfunc(unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem))
{
unsigned long ptables_start;
int i, cpunode;
@@ -2029,7 +1954,7 @@ unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
start_mem = PAGE_ALIGN(mempool);
flush_cache_all();
- if(flush_page_for_dma == viking_flush_page) {
+ if(BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page) {
unsigned long start = ptables_start;
unsigned long end = start_mem;
@@ -2048,37 +1973,22 @@ unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
return PAGE_ALIGN(start_mem);
}
-static char srmmuinfo[512];
-
-static char *srmmu_mmu_info(void)
+static int srmmu_mmu_info(char *buf)
{
- sprintf(srmmuinfo, "MMU type\t: %s\n"
+ return sprintf(buf,
+ "MMU type\t: %s\n"
"invall\t\t: %d\n"
"invmm\t\t: %d\n"
"invrnge\t\t: %d\n"
"invpg\t\t: %d\n"
"contexts\t: %d\n"
-#ifdef USE_CHUNK_ALLOC
- "big chunks\t: %d\n"
- "little chunks\t: %d\n"
- "chunk pages\t: %d\n"
- "garbage\t\t: %d\n"
- "garbage hits\t: %d\n"
-#endif
, srmmu_name,
module_stats.invall,
module_stats.invmm,
module_stats.invrnge,
module_stats.invpg,
num_contexts
-#ifdef USE_CHUNK_ALLOC
- , bcwater, lcwater,
- chunk_pages,
- garbage_calls,
- clct_pages
-#endif
- );
- return srmmuinfo;
+ );
}
static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
@@ -2242,7 +2152,7 @@ __initfunc(static void init_vac_layout(void))
(int)vac_cache_size, (int)vac_line_size);
}
-static void poke_hypersparc(void)
+__initfunc(static void poke_hypersparc(void))
{
volatile unsigned long clear;
unsigned long mreg = srmmu_get_mmureg();
@@ -2271,35 +2181,38 @@ __initfunc(static void init_hypersparc(void))
init_vac_layout();
- set_pte = srmmu_set_pte_nocache_hyper;
- flush_cache_all = hypersparc_flush_cache_all;
- flush_cache_mm = hypersparc_flush_cache_mm;
- flush_cache_range = hypersparc_flush_cache_range;
- flush_cache_page = hypersparc_flush_cache_page;
-
- flush_tlb_all = hypersparc_flush_tlb_all;
- flush_tlb_mm = hypersparc_flush_tlb_mm;
- flush_tlb_range = hypersparc_flush_tlb_range;
- flush_tlb_page = hypersparc_flush_tlb_page;
-
- flush_page_to_ram = hypersparc_flush_page_to_ram;
- flush_sig_insns = hypersparc_flush_sig_insns;
- flush_page_for_dma = NULL /* hypersparc_flush_page_for_dma */;
-
- flush_chunk = hypersparc_flush_chunk; /* local flush _only_ */
-
- ctxd_set = hypersparc_ctxd_set;
- switch_to_context = hypersparc_switch_to_context;
- init_new_context = hypersparc_init_new_context;
- destroy_context = hypersparc_destroy_context;
- update_mmu_cache = srmmu_vac_update_mmu_cache;
- sparc_update_rootmmu_dir = hypersparc_update_rootmmu_dir;
+ BTFIXUPSET_CALL(set_pte, srmmu_set_pte_nocache_hyper, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP);
+
+ BTFIXUPSET_CALL(flush_chunk, hypersparc_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */
+
+ BTFIXUPSET_CALL(ctxd_set, hypersparc_ctxd_set, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(switch_to_context, hypersparc_switch_to_context, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(init_new_context, hypersparc_init_new_context, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(destroy_context, hypersparc_destroy_context, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(update_mmu_cache, srmmu_vac_update_mmu_cache, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(sparc_update_rootmmu_dir, hypersparc_update_rootmmu_dir, BTFIXUPCALL_NORM);
poke_srmmu = poke_hypersparc;
hypersparc_setup_blockops();
}
-static void poke_cypress(void)
+__initfunc(static void poke_cypress(void))
{
unsigned long mreg = srmmu_get_mmureg();
unsigned long faddr, tagval;
@@ -2342,25 +2255,28 @@ __initfunc(static void init_cypress_common(void))
{
init_vac_layout();
- set_pte = srmmu_set_pte_nocache_cypress;
- flush_cache_all = cypress_flush_cache_all;
- flush_cache_mm = cypress_flush_cache_mm;
- flush_cache_range = cypress_flush_cache_range;
- flush_cache_page = cypress_flush_cache_page;
+ BTFIXUPSET_CALL(set_pte, srmmu_set_pte_nocache_cypress, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM);
- flush_tlb_all = cypress_flush_tlb_all;
- flush_tlb_mm = cypress_flush_tlb_mm;
- flush_tlb_page = cypress_flush_tlb_page;
- flush_tlb_range = cypress_flush_tlb_range;
+ BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM);
- flush_chunk = cypress_flush_chunk; /* local flush _only_ */
+ BTFIXUPSET_CALL(flush_chunk, cypress_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */
- flush_page_to_ram = cypress_flush_page_to_ram;
- flush_sig_insns = cypress_flush_sig_insns;
- flush_page_for_dma = NULL /* cypress_flush_page_for_dma */;
- sparc_update_rootmmu_dir = cypress_update_rootmmu_dir;
+ BTFIXUPSET_CALL(flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(sparc_update_rootmmu_dir, cypress_update_rootmmu_dir, BTFIXUPCALL_NORM);
- update_mmu_cache = srmmu_vac_update_mmu_cache;
+ BTFIXUPSET_CALL(update_mmu_cache, srmmu_vac_update_mmu_cache, BTFIXUPCALL_NORM);
poke_srmmu = poke_cypress;
}
@@ -2388,7 +2304,7 @@ __initfunc(static void init_cypress_605(unsigned long mrev))
init_cypress_common();
}
-static void poke_swift(void)
+__initfunc(static void poke_swift(void))
{
unsigned long mreg = srmmu_get_mmureg();
@@ -2456,21 +2372,23 @@ __initfunc(static void init_swift(void))
break;
};
- flush_cache_all = swift_flush_cache_all;
- flush_cache_mm = swift_flush_cache_mm;
- flush_cache_page = swift_flush_cache_page;
- flush_cache_range = swift_flush_cache_range;
+ BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(flush_chunk, swift_flush_chunk, BTFIXUPCALL_NOP); /* local flush _only_ */
- flush_chunk = swift_flush_chunk; /* local flush _only_ */
+ BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM);
- flush_tlb_all = swift_flush_tlb_all;
- flush_tlb_mm = swift_flush_tlb_mm;
- flush_tlb_page = swift_flush_tlb_page;
- flush_tlb_range = swift_flush_tlb_range;
+ BTFIXUPSET_CALL(flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM);
- flush_page_to_ram = swift_flush_page_to_ram;
- flush_sig_insns = swift_flush_sig_insns;
- flush_page_for_dma = swift_flush_page_for_dma;
+ BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM);
/* Are you now convinced that the Swift is one of the
* biggest VLSI abortions of all time? Bravo Fujitsu!
@@ -2484,8 +2402,9 @@ __initfunc(static void init_swift(void))
/* turbosparc.S */
extern void turbosparc_flush_cache_all(void);
extern void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
+extern void turbosparc_flush_page_for_dma(unsigned long page);
-static void poke_turbosparc(void)
+__initfunc(static void poke_turbosparc(void))
{
unsigned long mreg = srmmu_get_mmureg();
unsigned long ccreg;
@@ -2529,31 +2448,31 @@ __initfunc(static void init_turbosparc(void))
srmmu_name = "Fujitsu TurboSparc";
srmmu_modtype = TurboSparc;
- flush_cache_all = turbosparc_flush_cache_all;
- flush_cache_mm = hypersparc_flush_cache_mm;
- flush_cache_page = hypersparc_flush_cache_page;
- flush_cache_range = hypersparc_flush_cache_range;
+ BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM);
- flush_tlb_all = hypersparc_flush_tlb_all;
- flush_tlb_mm = hypersparc_flush_tlb_mm;
- flush_tlb_page = hypersparc_flush_tlb_page;
- flush_tlb_range = hypersparc_flush_tlb_range;
+ BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM);
#ifdef TURBOSPARC_WRITEBACK
- flush_page_to_ram = hypersparc_flush_page_to_ram;
- flush_chunk = hypersparc_flush_chunk;
+ BTFIXUPSET_CALL(flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_chunk, hypersparc_flush_chunk, BTFIXUPCALL_NORM);
#else
- flush_page_to_ram = swift_flush_page_to_ram;
- flush_chunk = swift_flush_chunk;
+ BTFIXUPSET_CALL(flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(flush_chunk, swift_flush_chunk, BTFIXUPCALL_NOP);
#endif
- flush_sig_insns = turbosparc_flush_sig_insns;
- flush_page_for_dma = NULL /* turbosparc_flush_page_for_dma */;
+ BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NOP);
poke_srmmu = poke_turbosparc;
}
-static void poke_tsunami(void)
+__initfunc(static void poke_tsunami(void))
{
unsigned long mreg = srmmu_get_mmureg();
@@ -2574,26 +2493,26 @@ __initfunc(static void init_tsunami(void))
srmmu_name = "TI Tsunami";
srmmu_modtype = Tsunami;
- flush_cache_all = tsunami_flush_cache_all;
- flush_cache_mm = tsunami_flush_cache_mm;
- flush_cache_page = tsunami_flush_cache_page;
- flush_cache_range = tsunami_flush_cache_range;
+ BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM);
- flush_chunk = tsunami_flush_chunk; /* local flush _only_ */
+ BTFIXUPSET_CALL(flush_chunk, tsunami_flush_chunk, BTFIXUPCALL_NOP); /* local flush _only_ */
- flush_tlb_all = tsunami_flush_tlb_all;
- flush_tlb_mm = tsunami_flush_tlb_mm;
- flush_tlb_page = tsunami_flush_tlb_page;
- flush_tlb_range = tsunami_flush_tlb_range;
+ BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM);
- flush_page_to_ram = tsunami_flush_page_to_ram;
- flush_sig_insns = tsunami_flush_sig_insns;
- flush_page_for_dma = tsunami_flush_page_for_dma;
+ BTFIXUPSET_CALL(flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM);
poke_srmmu = poke_tsunami;
}
-static void poke_viking(void)
+__initfunc(static void poke_viking(void))
{
unsigned long mreg = srmmu_get_mmureg();
static int smp_catch = 0;
@@ -2637,13 +2556,14 @@ static void poke_viking(void)
#ifdef __SMP__
/* Avoid unnecessary cross calls. */
- flush_cache_all = local_flush_cache_all;
- flush_cache_mm = local_flush_cache_mm;
- flush_cache_range = local_flush_cache_range;
- flush_cache_page = local_flush_cache_page;
- flush_page_to_ram = local_flush_page_to_ram;
- flush_sig_insns = local_flush_sig_insns;
- flush_page_for_dma = local_flush_page_for_dma;
+ BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all);
+ BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm);
+ BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range);
+ BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page);
+ BTFIXUPCOPY_CALL(flush_page_to_ram, local_flush_page_to_ram);
+ BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns);
+ BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma);
+ btfixup();
#endif
}
@@ -2664,10 +2584,13 @@ __initfunc(static void init_viking(void))
msi_set_sync();
- set_pte = srmmu_set_pte_nocache_viking;
- sparc_update_rootmmu_dir = viking_update_rootmmu_dir;
+ BTFIXUPSET_CALL(set_pte, srmmu_set_pte_nocache_viking, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(sparc_update_rootmmu_dir, viking_update_rootmmu_dir, BTFIXUPCALL_NORM);
- flush_chunk = viking_flush_chunk; /* local flush _only_ */
+ BTFIXUPSET_CALL(flush_chunk, viking_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */
/* We need this to make sure old viking takes no hits
* on it's cache for dma snoops to workaround the
@@ -2675,7 +2598,7 @@ __initfunc(static void init_viking(void))
* This is only necessary because of the new way in
* which we use the IOMMU.
*/
- flush_page_for_dma = viking_flush_page;
+ BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM);
/* Also, this is so far the only chip which actually uses
the page argument to flush_page_for_dma */
flush_page_for_dma_global = 0;
@@ -2683,24 +2606,25 @@ __initfunc(static void init_viking(void))
srmmu_name = "TI Viking/MXCC";
viking_mxcc_present = 1;
- flush_chunk = viking_mxcc_flush_chunk; /* local flush _only_ */
+ BTFIXUPSET_CALL(flush_chunk, viking_mxcc_flush_chunk, BTFIXUPCALL_NOP); /* local flush _only_ */
/* MXCC vikings lack the DMA snooping bug. */
- flush_page_for_dma = NULL /* viking_flush_page_for_dma */;
+ BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP);
}
- flush_cache_all = viking_flush_cache_all;
- flush_cache_mm = viking_flush_cache_mm;
- flush_cache_page = viking_flush_cache_page;
- flush_cache_range = viking_flush_cache_range;
+ /* flush_cache_* are nops */
+ BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NOP);
- flush_tlb_all = viking_flush_tlb_all;
- flush_tlb_mm = viking_flush_tlb_mm;
- flush_tlb_page = viking_flush_tlb_page;
- flush_tlb_range = viking_flush_tlb_range;
+ BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM);
- flush_page_to_ram = viking_flush_page_to_ram;
- flush_sig_insns = viking_flush_sig_insns;
+ BTFIXUPSET_CALL(flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP);
poke_srmmu = poke_viking;
}
@@ -2798,6 +2722,67 @@ __initfunc(static void get_srmmu_type(void))
srmmu_is_bad();
}
+/* Low and high watermarks for page table cache.
+ The system should try to have pgt_water[0] <= cache elements <= pgt_water[1]
+ */
+extern int pgt_cache_water[2];
+
+void srmmu_check_pgt_cache(void)
+{
+ struct page *page, *page2;
+
+ if (pgtable_cache_size > pgt_cache_water[0]) {
+ spin_lock(&pte_spinlock);
+ for (page2 = NULL, page = (struct page *)pte_quicklist; page;) {
+ if ((unsigned int)page->pprev_hash == 0xffff) {
+ if (page2)
+ page2->next_hash = page->next_hash;
+ else
+ (struct page *)pte_quicklist = page->next_hash;
+ page->next_hash = NULL;
+ page->pprev_hash = NULL;
+ pgtable_cache_size -= 16;
+ free_page(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT));
+ if (page2)
+ page = page2->next_hash;
+ else
+ page = (struct page *)pte_quicklist;
+ if (pgtable_cache_size <= pgt_cache_water[1])
+ break;
+ continue;
+ }
+ page2 = page;
+ page = page->next_hash;
+ }
+ spin_unlock(&pte_spinlock);
+ }
+ if (pgd_cache_size > pgt_cache_water[0] / 4) {
+ spin_lock(&pgd_spinlock);
+ for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) {
+ if ((unsigned int)page->pprev_hash == 0xf) {
+ if (page2)
+ page2->next_hash = page->next_hash;
+ else
+ (struct page *)pgd_quicklist = page->next_hash;
+ page->next_hash = NULL;
+ page->pprev_hash = NULL;
+ pgd_cache_size -= 4;
+ free_page(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT));
+ if (page2)
+ page = page2->next_hash;
+ else
+ page = (struct page *)pgd_quicklist;
+ if (pgd_cache_size <= pgt_cache_water[1] / 4)
+ break;
+ continue;
+ }
+ page2 = page;
+ page = page->next_hash;
+ }
+ spin_unlock(&pgd_spinlock);
+ }
+}
+
extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
tsetup_mmu_patchme, rtrap_mmu_patchme;
@@ -2810,7 +2795,7 @@ extern unsigned long srmmu_fault;
iaddr = &(insn); \
daddr = &(dest); \
*iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \
- } while(0);
+ } while(0);
__initfunc(static void patch_window_trap_handlers(void))
{
@@ -2829,7 +2814,7 @@ __initfunc(static void patch_window_trap_handlers(void))
/* Local cross-calls. */
static void smp_flush_page_for_dma(unsigned long page)
{
- xc1((smpfunc_t) local_flush_page_for_dma, page);
+ xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page);
}
#endif
@@ -2839,98 +2824,107 @@ __initfunc(void ld_mmu_srmmu(void))
{
extern void ld_mmu_iommu(void);
extern void ld_mmu_iounit(void);
+ extern void ___xchg32_sun4md(void);
/* First the constants */
- pmd_shift = SRMMU_PMD_SHIFT;
- pmd_size = SRMMU_PMD_SIZE;
- pmd_mask = SRMMU_PMD_MASK;
- pgdir_shift = SRMMU_PGDIR_SHIFT;
- pgdir_size = SRMMU_PGDIR_SIZE;
- pgdir_mask = SRMMU_PGDIR_MASK;
-
- ptrs_per_pte = SRMMU_PTRS_PER_PTE;
- ptrs_per_pmd = SRMMU_PTRS_PER_PMD;
- ptrs_per_pgd = SRMMU_PTRS_PER_PGD;
-
- page_none = SRMMU_PAGE_NONE;
- page_shared = SRMMU_PAGE_SHARED;
- page_copy = SRMMU_PAGE_COPY;
- page_readonly = SRMMU_PAGE_RDONLY;
- page_kernel = SRMMU_PAGE_KERNEL;
+ BTFIXUPSET_SIMM13(pmd_shift, SRMMU_PMD_SHIFT);
+ BTFIXUPSET_SETHI(pmd_size, SRMMU_PMD_SIZE);
+ BTFIXUPSET_SETHI(pmd_mask, SRMMU_PMD_MASK);
+ BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT);
+ BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE);
+ BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK);
+
+ BTFIXUPSET_SIMM13(ptrs_per_pte, SRMMU_PTRS_PER_PTE);
+ BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD);
+ BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD);
+
+ BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE));
+ BTFIXUPSET_INT(page_shared, pgprot_val(SRMMU_PAGE_SHARED));
+ BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
+ BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
+ BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF;
-
+
/* Functions */
- set_pte = srmmu_set_pte_cacheable;
- init_new_context = srmmu_init_new_context;
- switch_to_context = srmmu_switch_to_context;
- pmd_align = srmmu_pmd_align;
- pgdir_align = srmmu_pgdir_align;
- vmalloc_start = srmmu_vmalloc_start;
-
- pte_page = srmmu_pte_page;
- pmd_page = srmmu_pmd_page;
- pgd_page = srmmu_pgd_page;
-
- sparc_update_rootmmu_dir = srmmu_update_rootmmu_dir;
-
- pte_none = srmmu_pte_none;
- pte_present = srmmu_pte_present;
- pte_clear = srmmu_pte_clear;
-
- pmd_none = srmmu_pmd_none;
- pmd_bad = srmmu_pmd_bad;
- pmd_present = srmmu_pmd_present;
- pmd_clear = srmmu_pmd_clear;
-
- pgd_none = srmmu_pgd_none;
- pgd_bad = srmmu_pgd_bad;
- pgd_present = srmmu_pgd_present;
- pgd_clear = srmmu_pgd_clear;
-
- mk_pte = srmmu_mk_pte;
- mk_pte_phys = srmmu_mk_pte_phys;
- pgd_set = srmmu_pgd_set;
- mk_pte_io = srmmu_mk_pte_io;
- pte_modify = srmmu_pte_modify;
- pgd_offset = srmmu_pgd_offset;
- pmd_offset = srmmu_pmd_offset;
- pte_offset = srmmu_pte_offset;
- pte_free_kernel = srmmu_pte_free_kernel;
- pmd_free_kernel = srmmu_pmd_free_kernel;
- pte_alloc_kernel = srmmu_pte_alloc_kernel;
- pmd_alloc_kernel = srmmu_pmd_alloc_kernel;
- pte_free = srmmu_pte_free;
- pte_alloc = srmmu_pte_alloc;
- pmd_free = srmmu_pmd_free;
- pmd_alloc = srmmu_pmd_alloc;
- pgd_free = srmmu_pgd_free;
- pgd_alloc = srmmu_pgd_alloc;
-
- pte_write = srmmu_pte_write;
- pte_dirty = srmmu_pte_dirty;
- pte_young = srmmu_pte_young;
- pte_wrprotect = srmmu_pte_wrprotect;
- pte_mkclean = srmmu_pte_mkclean;
- pte_mkold = srmmu_pte_mkold;
- pte_mkwrite = srmmu_pte_mkwrite;
- pte_mkdirty = srmmu_pte_mkdirty;
- pte_mkyoung = srmmu_pte_mkyoung;
- update_mmu_cache = srmmu_update_mmu_cache;
- destroy_context = srmmu_destroy_context;
+#ifndef __SMP__
+ BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2);
+#endif
+ BTFIXUPSET_CALL(get_pte_fast, srmmu_get_pte_fast, BTFIXUPCALL_RETINT(0));
+ BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_RETINT(0));
+ BTFIXUPSET_CALL(free_pte_slow, srmmu_free_pte_slow, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(free_pgd_slow, srmmu_free_pgd_slow, BTFIXUPCALL_NOP);
+
+ BTFIXUPSET_CALL(set_pgdir, srmmu_set_pgdir, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(set_pte, srmmu_set_pte_cacheable, BTFIXUPCALL_SWAPO0O1);
+ BTFIXUPSET_CALL(init_new_context, srmmu_init_new_context, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(switch_to_context, srmmu_switch_to_context, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(pte_page, srmmu_pte_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_page, srmmu_pgd_page, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(sparc_update_rootmmu_dir, srmmu_update_rootmmu_dir, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_SETHI(none_mask, 0xF0000000);
+
+ BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0);
+
+ BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_SWAPO0G0);
+
+ BTFIXUPSET_CALL(pgd_none, srmmu_pgd_none, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_bad, srmmu_pgd_bad, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_present, srmmu_pgd_present, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0);
+
+ BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM);
- mmu_info = srmmu_mmu_info;
- mmu_v2p = srmmu_v2p;
- mmu_p2v = srmmu_p2v;
+ BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK);
+ BTFIXUPSET_CALL(pgd_offset, srmmu_pgd_offset, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_offset, srmmu_pte_offset, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_free_kernel, srmmu_pte_free, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_free_kernel, srmmu_pmd_free, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_alloc_kernel, srmmu_pte_alloc, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_alloc_kernel, srmmu_pmd_alloc, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_alloc, srmmu_pte_alloc, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_free, srmmu_pmd_free, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_alloc, srmmu_pmd_alloc, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_free, srmmu_pgd_free, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_alloc, srmmu_pgd_alloc, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_HALF(pte_writei, SRMMU_WRITE);
+ BTFIXUPSET_HALF(pte_dirtyi, SRMMU_DIRTY);
+ BTFIXUPSET_HALF(pte_youngi, SRMMU_REF);
+ BTFIXUPSET_HALF(pte_wrprotecti, SRMMU_WRITE);
+ BTFIXUPSET_HALF(pte_mkcleani, SRMMU_DIRTY);
+ BTFIXUPSET_HALF(pte_mkoldi, SRMMU_REF);
+ BTFIXUPSET_CALL(pte_mkwrite, srmmu_pte_mkwrite, BTFIXUPCALL_ORINT(SRMMU_WRITE));
+ BTFIXUPSET_CALL(pte_mkdirty, srmmu_pte_mkdirty, BTFIXUPCALL_ORINT(SRMMU_DIRTY));
+ BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF));
+ BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_v2p, srmmu_v2p, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_p2v, srmmu_p2v, BTFIXUPCALL_NORM);
/* Task struct and kernel stack allocating/freeing. */
- alloc_task_struct = srmmu_alloc_task_struct;
- free_task_struct = srmmu_free_task_struct;
+ BTFIXUPSET_CALL(alloc_task_struct, srmmu_alloc_task_struct, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(free_task_struct, srmmu_free_task_struct, BTFIXUPCALL_NORM);
- quick_kernel_fault = srmmu_quick_kernel_fault;
+ BTFIXUPSET_CALL(quick_kernel_fault, srmmu_quick_kernel_fault, BTFIXUPCALL_NORM);
/* SRMMU specific. */
- ctxd_set = srmmu_ctxd_set;
- pmd_set = srmmu_pmd_set;
+ BTFIXUPSET_CALL(ctxd_set, srmmu_ctxd_set, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM);
get_srmmu_type();
patch_window_trap_handlers();
@@ -2938,33 +2932,38 @@ __initfunc(void ld_mmu_srmmu(void))
#ifdef __SMP__
/* El switcheroo... */
- local_flush_cache_all = flush_cache_all;
- local_flush_cache_mm = flush_cache_mm;
- local_flush_cache_range = flush_cache_range;
- local_flush_cache_page = flush_cache_page;
- local_flush_tlb_all = flush_tlb_all;
- local_flush_tlb_mm = flush_tlb_mm;
- local_flush_tlb_range = flush_tlb_range;
- local_flush_tlb_page = flush_tlb_page;
- local_flush_page_to_ram = flush_page_to_ram;
- local_flush_sig_insns = flush_sig_insns;
- local_flush_page_for_dma = flush_page_for_dma;
-
- flush_cache_all = smp_flush_cache_all;
- flush_cache_mm = smp_flush_cache_mm;
- flush_cache_range = smp_flush_cache_range;
- flush_cache_page = smp_flush_cache_page;
- flush_tlb_all = smp_flush_tlb_all;
- flush_tlb_mm = smp_flush_tlb_mm;
- flush_tlb_range = smp_flush_tlb_range;
- flush_tlb_page = smp_flush_tlb_page;
- flush_page_to_ram = smp_flush_page_to_ram;
- flush_sig_insns = smp_flush_sig_insns;
- if (flush_page_for_dma)
- flush_page_for_dma = smp_flush_page_for_dma;
+ BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all);
+ BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm);
+ BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range);
+ BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page);
+ BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all);
+ BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm);
+ BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range);
+ BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page);
+ BTFIXUPCOPY_CALL(local_flush_page_to_ram, flush_page_to_ram);
+ BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns);
+ BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma);
+
+ BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM);
#endif
if (sparc_cpu_model == sun4d)
ld_mmu_iounit();
else
ld_mmu_iommu();
+#ifdef __SMP__
+ if (sparc_cpu_model == sun4d)
+ sun4d_init_smp();
+ else
+ sun4m_init_smp();
+#endif
}
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index c70753fa4..d247e1f2d 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -1,11 +1,14 @@
-/* $Id: sun4c.c,v 1.149 1997/07/20 05:59:38 davem Exp $
+/* $Id: sun4c.c,v 1.163 1998/03/11 04:08:21 tdyas Exp $
* sun4c.c: Doing in software what should be done in hardware.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1996 Andrew Tridgell (Andrew.Tridgell@anu.edu.au)
+ * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
+ * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
+#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
@@ -22,6 +25,7 @@
#include <asm/oplib.h>
#include <asm/openprom.h>
#include <asm/mmu_context.h>
+#include <asm/sun4paddr.h>
/* TODO: Make it such that interrupt handlers cannot dick with
* the user segment lists, most of the cli/sti pairs can
@@ -59,11 +63,15 @@ extern int num_segmaps, num_contexts;
: "g4", "cc"); \
} while(0);
-/* That's it, we prom_halt() if the cache size is something other than 65536.
+#ifdef CONFIG_SUN4
+#define SUN4C_VAC_SIZE sun4c_vacinfo.num_bytes
+#else
+/* That's it, we prom_halt() on sun4c if the cache size is something other than 65536.
* So let's save some cycles and just use that everywhere except for that bootup
* sanity check.
*/
-#define SUN4C_VAC_SIZE 65536
+#define SUN4C_VAC_SIZE 65536
+#endif
#define SUN4C_KERNEL_BUCKETS 32
@@ -427,22 +435,76 @@ static inline void sun4c_init_clean_mmu(unsigned long kernel_end)
sun4c_set_context(savectx);
}
-void sun4c_probe_vac(void)
+__initfunc(void sun4c_probe_vac(void))
{
sun4c_disable_vac();
- if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
- (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
- /* PROM on SS1 lacks this info, to be super safe we
- * hard code it here since this arch is cast in stone.
- */
- sun4c_vacinfo.num_bytes = 65536;
- sun4c_vacinfo.linesize = 16;
+
+ if (ARCH_SUN4) {
+ switch(idprom->id_machtype) {
+
+ case (SM_SUN4|SM_4_110):
+ sun4c_vacinfo.type = NONE;
+ sun4c_vacinfo.num_bytes = 0;
+ sun4c_vacinfo.linesize = 0;
+ sun4c_vacinfo.do_hwflushes = 0;
+ prom_printf("No VAC. Get some bucks and buy a real computer.");
+ prom_halt();
+ break;
+
+ case (SM_SUN4|SM_4_260):
+ sun4c_vacinfo.type = WRITE_BACK;
+ sun4c_vacinfo.num_bytes = 128 * 1024;
+ sun4c_vacinfo.linesize = 16;
+ sun4c_vacinfo.do_hwflushes = 0;
+ break;
+
+ case (SM_SUN4|SM_4_330):
+ sun4c_vacinfo.type = WRITE_THROUGH;
+ sun4c_vacinfo.num_bytes = 128 * 1024;
+ sun4c_vacinfo.linesize = 16;
+ sun4c_vacinfo.do_hwflushes = 0;
+ break;
+
+ case (SM_SUN4|SM_4_470):
+ sun4c_vacinfo.type = WRITE_BACK;
+ sun4c_vacinfo.num_bytes = 128 * 1024;
+ sun4c_vacinfo.linesize = 32;
+ sun4c_vacinfo.do_hwflushes = 0;
+ break;
+
+ default:
+ prom_printf("Cannot initialize VAC - wierd sun4 model idprom->id_machtype = %d", idprom->id_machtype);
+ prom_halt();
+ }
} else {
- sun4c_vacinfo.num_bytes = prom_getintdefault(prom_root_node,
- "vac-size", 65536);
- sun4c_vacinfo.linesize = prom_getintdefault(prom_root_node,
- "vac-linesize", 16);
+ sun4c_vacinfo.type = WRITE_THROUGH;
+
+ if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
+ (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
+ /* PROM on SS1 lacks this info, to be super safe we
+ * hard code it here since this arch is cast in stone.
+ */
+ sun4c_vacinfo.num_bytes = 65536;
+ sun4c_vacinfo.linesize = 16;
+ } else {
+ sun4c_vacinfo.num_bytes =
+ prom_getintdefault(prom_root_node, "vac-size", 65536);
+ sun4c_vacinfo.linesize =
+ prom_getintdefault(prom_root_node, "vac-linesize", 16);
+ }
+ sun4c_vacinfo.do_hwflushes =
+ prom_getintdefault(prom_root_node, "vac-hwflush", 0);
+
+ if(sun4c_vacinfo.do_hwflushes == 0)
+ sun4c_vacinfo.do_hwflushes =
+ prom_getintdefault(prom_root_node, "vac_hwflush", 0);
+
+ if (sun4c_vacinfo.num_bytes != 65536) {
+ prom_printf("WEIRD Sun4C VAC cache size, tell davem");
+ prom_halt();
+ }
}
+
sun4c_vacinfo.num_lines =
(sun4c_vacinfo.num_bytes / sun4c_vacinfo.linesize);
switch(sun4c_vacinfo.linesize) {
@@ -458,17 +520,6 @@ void sun4c_probe_vac(void)
prom_halt();
};
- sun4c_vacinfo.do_hwflushes = prom_getintdefault(prom_root_node,
- "vac-hwflush", 0);
- if(sun4c_vacinfo.do_hwflushes == 0)
- sun4c_vacinfo.do_hwflushes = prom_getintdefault(prom_root_node,
- "vac_hwflush", 0);
-
- if(sun4c_vacinfo.num_bytes != 65536) {
- prom_printf("WEIRD Sun4C VAC cache size, tell davem");
- prom_halt();
- }
-
sun4c_flush_all();
sun4c_enable_vac();
}
@@ -476,6 +527,7 @@ void sun4c_probe_vac(void)
/* Patch instructions for the low level kernel fault handler. */
extern unsigned long invalid_segment_patch1, invalid_segment_patch1_ff;
extern unsigned long invalid_segment_patch2, invalid_segment_patch2_ff;
+extern unsigned long invalid_segment_patch1_1ff, invalid_segment_patch2_1ff;
extern unsigned long num_context_patch1, num_context_patch1_16;
extern unsigned long num_context_patch2, num_context_patch2_16;
extern unsigned long vac_linesize_patch, vac_linesize_patch_32;
@@ -502,6 +554,12 @@ static void patch_kernel_fault_handler(void)
PATCH_INSN(invalid_segment_patch2_ff,
invalid_segment_patch2);
break;
+ case 512:
+ PATCH_INSN(invalid_segment_patch1_1ff,
+ invalid_segment_patch1);
+ PATCH_INSN(invalid_segment_patch2_1ff,
+ invalid_segment_patch2);
+ break;
default:
prom_printf("Unhandled number of segmaps: %d\n",
num_segmaps);
@@ -541,38 +599,80 @@ static void patch_kernel_fault_handler(void)
}
}
-static void sun4c_probe_mmu(void)
+__initfunc(static void sun4c_probe_mmu(void))
{
- if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
- (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
- /* Hardcode these just to be safe, PROM on SS1 does
- * not have this info available in the root node.
- */
- num_segmaps = 128;
- num_contexts = 8;
+ if (ARCH_SUN4) {
+ switch(idprom->id_machtype) {
+ case (SM_SUN4|SM_4_110):
+ prom_printf("No support for 4100 yet\n");
+ prom_halt();
+ num_segmaps = 256;
+ num_contexts = 8;
+ break;
+
+ case (SM_SUN4|SM_4_260):
+ prom_printf("No support for 4200 yet\n");
+ prom_halt();
+ num_segmaps = 512;
+ num_contexts = 16;
+ break;
+
+ case (SM_SUN4|SM_4_330):
+ num_segmaps = 256;
+ num_contexts = 16;
+ break;
+
+ case (SM_SUN4|SM_4_470):
+ prom_printf("No support for 4400 yet\n");
+ prom_halt();
+ num_segmaps = 1024;
+ num_contexts = 64;
+ break;
+ default:
+ prom_printf("Invalid SUN4 model\n");
+ prom_halt();
+ }
} else {
- num_segmaps = prom_getintdefault(prom_root_node, "mmu-npmg", 128);
- num_contexts = prom_getintdefault(prom_root_node, "mmu-nctx", 0x8);
+ if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
+ (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
+ /* Hardcode these just to be safe, PROM on SS1 does
+ * not have this info available in the root node.
+ */
+ num_segmaps = 128;
+ num_contexts = 8;
+ } else {
+ num_segmaps =
+ prom_getintdefault(prom_root_node, "mmu-npmg", 128);
+ num_contexts =
+ prom_getintdefault(prom_root_node, "mmu-nctx", 0x8);
+ }
}
patch_kernel_fault_handler();
}
volatile unsigned long *sun4c_memerr_reg = 0;
-void sun4c_probe_memerr_reg(void)
+__initfunc(void sun4c_probe_memerr_reg(void))
{
int node;
struct linux_prom_registers regs[1];
- node = prom_getchild(prom_root_node);
- node = prom_searchsiblings(prom_root_node, "memory-error");
- if (!node)
- return;
- prom_getproperty(node, "reg", (char *)regs, sizeof(regs));
- sun4c_memerr_reg = sparc_alloc_io(regs[0].phys_addr, 0,
- regs[0].reg_size,
- "memory parity error",
- regs[0].which_io, 0);
+ if (ARCH_SUN4) {
+ sun4c_memerr_reg = sparc_alloc_io(SUN4_MEMREG_PHYSADDR, 0,
+ PAGE_SIZE,
+ "memory parity error",
+ 0x0, 0);
+ } else {
+ node = prom_getchild(prom_root_node);
+ node = prom_searchsiblings(prom_root_node, "memory-error");
+ if (!node)
+ return;
+ prom_getproperty(node, "reg", (char *)regs, sizeof(regs));
+ sun4c_memerr_reg = sparc_alloc_io(regs[0].phys_addr, 0,
+ regs[0].reg_size,
+ "memory parity error",
+ regs[0].which_io, 0);
+ }
}
static inline void sun4c_init_ss2_cache_bug(void)
@@ -581,6 +681,7 @@ static inline void sun4c_init_ss2_cache_bug(void)
if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS2)) ||
(idprom->id_machtype == (SM_SUN4C | SM_4C_IPX)) ||
+ (idprom->id_machtype == (SM_SUN4 | SM_4_330)) ||
(idprom->id_machtype == (SM_SUN4C | SM_4C_ELC))) {
/* Whee.. */
printk("SS2 cache bug detected, uncaching trap table page\n");
@@ -626,13 +727,14 @@ struct sun4c_mmu_entry {
unsigned char pseg;
unsigned char locked;
};
-static struct sun4c_mmu_entry mmu_entry_pool[256];
+
+static struct sun4c_mmu_entry mmu_entry_pool[SUN4C_MAX_SEGMAPS];
__initfunc(static void sun4c_init_mmu_entry_pool(void))
{
int i;
- for(i=0; i < 256; i++) {
+ for(i=0; i < SUN4C_MAX_SEGMAPS; i++) {
mmu_entry_pool[i].pseg = i;
mmu_entry_pool[i].next = 0;
mmu_entry_pool[i].prev = 0;
@@ -703,7 +805,8 @@ struct sun4c_mmu_ring {
struct sun4c_mmu_entry ringhd;
int num_entries;
};
-static struct sun4c_mmu_ring sun4c_context_ring[16]; /* used user entries */
+
+static struct sun4c_mmu_ring sun4c_context_ring[SUN4C_MAX_CONTEXTS]; /* used user entries */
static struct sun4c_mmu_ring sun4c_ufree_ring; /* free user entries */
struct sun4c_mmu_ring sun4c_kernel_ring; /* used kernel entries */
struct sun4c_mmu_ring sun4c_kfree_ring; /* free kernel entries */
@@ -711,7 +814,7 @@ struct sun4c_mmu_ring sun4c_kfree_ring; /* free kernel entries */
static inline void sun4c_init_rings(unsigned long *mempool)
{
int i;
- for(i=0; i<16; i++) {
+ for(i=0; i<SUN4C_MAX_CONTEXTS; i++) {
sun4c_context_ring[i].ringhd.next =
sun4c_context_ring[i].ringhd.prev =
&sun4c_context_ring[i].ringhd;
@@ -1120,7 +1223,7 @@ static int sun4c_lowbucket_avail;
#define BUCKET_PTE(page) \
((((page) - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(SUN4C_PAGE_KERNEL))
#define BUCKET_PTE_PAGE(pte) \
- (PAGE_OFFSET + (((pte) & 0xffff) << PAGE_SHIFT))
+ (PAGE_OFFSET + (((pte) & SUN4C_PFN_MASK) << PAGE_SHIFT))
static inline void get_locked_segment(unsigned long addr)
{
@@ -1180,12 +1283,18 @@ static inline void garbage_collect(int entry)
free_locked_segment(BUCKET_ADDR(entry));
}
+#ifdef CONFIG_SUN4
+#define TASK_STRUCT_ORDER 0
+#else
+#define TASK_STRUCT_ORDER 1
+#endif
+
static struct task_struct *sun4c_alloc_task_struct(void)
{
unsigned long addr, pages;
int entry;
- pages = __get_free_pages(GFP_KERNEL, 1);
+ pages = __get_free_pages(GFP_KERNEL, TASK_STRUCT_ORDER);
if(!pages)
return (struct task_struct *) 0;
@@ -1193,7 +1302,7 @@ static struct task_struct *sun4c_alloc_task_struct(void)
if(sun4c_bucket[entry] == BUCKET_EMPTY)
break;
if(entry == NR_TASKS) {
- free_pages(pages, 1);
+ free_pages(pages, TASK_STRUCT_ORDER);
return (struct task_struct *) 0;
}
if(entry >= sun4c_lowbucket_avail)
@@ -1204,8 +1313,9 @@ static struct task_struct *sun4c_alloc_task_struct(void)
if(sun4c_get_segmap(addr) == invalid_segment)
get_locked_segment(addr);
sun4c_put_pte(addr, BUCKET_PTE(pages));
+#ifndef CONFIG_SUN4
sun4c_put_pte(addr + PAGE_SIZE, BUCKET_PTE(pages + PAGE_SIZE));
-
+#endif
return (struct task_struct *) addr;
}
@@ -1217,15 +1327,18 @@ static void sun4c_free_task_struct_hw(struct task_struct *tsk)
/* We are deleting a mapping, so the flush here is mandatory. */
sun4c_flush_page_hw(tsaddr);
+#ifndef CONFIG_SUN4
sun4c_flush_page_hw(tsaddr + PAGE_SIZE);
-
+#endif
sun4c_put_pte(tsaddr, 0);
+#ifndef CONFIG_SUN4
sun4c_put_pte(tsaddr + PAGE_SIZE, 0);
+#endif
sun4c_bucket[entry] = BUCKET_EMPTY;
if(entry < sun4c_lowbucket_avail)
sun4c_lowbucket_avail = entry;
- free_pages(pages, 1);
+ free_pages(pages, TASK_STRUCT_ORDER);
garbage_collect(entry);
}
@@ -1237,15 +1350,18 @@ static void sun4c_free_task_struct_sw(struct task_struct *tsk)
/* We are deleting a mapping, so the flush here is mandatory. */
sun4c_flush_page_sw(tsaddr);
+#ifndef CONFIG_SUN4
sun4c_flush_page_sw(tsaddr + PAGE_SIZE);
-
+#endif
sun4c_put_pte(tsaddr, 0);
+#ifndef CONFIG_SUN4
sun4c_put_pte(tsaddr + PAGE_SIZE, 0);
+#endif
sun4c_bucket[entry] = BUCKET_EMPTY;
if(entry < sun4c_lowbucket_avail)
sun4c_lowbucket_avail = entry;
- free_pages(pages, 1);
+ free_pages(pages, TASK_STRUCT_ORDER);
garbage_collect(entry);
}
@@ -1253,9 +1369,8 @@ __initfunc(static void sun4c_init_buckets(void))
{
int entry;
- if(sizeof(union task_union) != (PAGE_SIZE << 1)) {
- prom_printf("task union not 2 pages!\n");
- prom_halt();
+ if(sizeof(union task_union) != (PAGE_SIZE << TASK_STRUCT_ORDER)) {
+ prom_printf("task union not %d page(s)!\n", 1 << TASK_STRUCT_ORDER);
}
for(entry = 0; entry < NR_TASKS; entry++)
sun4c_bucket[entry] = BUCKET_EMPTY;
@@ -1949,12 +2064,17 @@ static void sun4c_set_pte(pte_t *ptep, pte_t pte)
*ptep = pte;
}
+static void sun4c_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
+{
+}
+
+
void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr,
int bus_type, int rdonly)
{
unsigned long page_entry;
- page_entry = ((physaddr >> PAGE_SHIFT) & 0xffff);
+ page_entry = ((physaddr >> PAGE_SHIFT) & SUN4C_PFN_MASK);
page_entry |= ((pg_iobits | _SUN4C_PAGE_PRIV) & ~(_SUN4C_PAGE_PRESENT));
if(rdonly)
page_entry &= ~_SUN4C_WRITEABLE;
@@ -2092,21 +2212,17 @@ static void sun4c_destroy_context_sw(struct mm_struct *mm)
}
}
-#if KGPROF_PROFILING
-static char s4cinfo[10240];
-#else
-static char s4cinfo[512];
-#endif
-
-static char *sun4c_mmu_info(void)
+static int sun4c_mmu_info(char *buf)
{
int used_user_entries, i;
+ int len;
used_user_entries = 0;
for(i=0; i < num_contexts; i++)
used_user_entries += sun4c_context_ring[i].num_entries;
- sprintf(s4cinfo, "vacsize\t\t: %d bytes\n"
+ len = sprintf(buf,
+ "vacsize\t\t: %d bytes\n"
"vachwflush\t: %s\n"
"vaclinesize\t: %d bytes\n"
"mmuctxs\t\t: %d\n"
@@ -2135,29 +2251,31 @@ static char *sun4c_mmu_info(void)
#if KGPROF_PROFILING
{
- char *p = s4cinfo + strlen(s4cinfo);
int i,j;
- sprintf(p,"kgprof profiling:\n"); p += strlen(p);
+ len += sprintf(buf + len,"kgprof profiling:\n");
for (i=0;i<KGPROF_SIZE && kgprof_counters[i].addr[0];i++) {
- sprintf(p,"%5d ",kgprof_counters[i].count); p += strlen(p);
+ len += sprintf(buf + len,"%5d ",kgprof_counters[i].count);
for (j=0;j<KGPROF_DEPTH;j++) {
- sprintf(p,"%08x ",kgprof_counters[i].addr[j]);
- p += strlen(p);
+ len += sprintf(buf + len,"%08x ",kgprof_counters[i].addr[j]);
}
- sprintf(p,"\n"); p += strlen(p);
+ len += sprintf(buf + len,"\n");
}
}
#endif
- return s4cinfo;
+ return len;
}
/* Nothing below here should touch the mmu hardware nor the mmu_entry
* data structures.
*/
+#if 0 /* Not used due to BTFIXUPs */
static unsigned int sun4c_pmd_align(unsigned int addr) { return SUN4C_PMD_ALIGN(addr); }
+#endif
+#if 0 /* Not used due to BTFIXUPs */
static unsigned int sun4c_pgdir_align(unsigned int addr) { return SUN4C_PGDIR_ALIGN(addr); }
+#endif
/* First the functions which the mid-level code uses to directly
* manipulate the software page tables. Some defines since we are
@@ -2170,12 +2288,17 @@ static unsigned int sun4c_pgdir_align(unsigned int addr) { return SUN4C_PGDIR_AL
#define PGD_DIRTY 0x040
#define PGD_TABLE (PGD_PRESENT | PGD_RW | PGD_USER | PGD_ACCESSED | PGD_DIRTY)
+#if 0 /* Not used due to BTFIXUPs */
static unsigned long sun4c_vmalloc_start(void)
{
return SUN4C_VMALLOC_START;
}
+#endif
+#if 0 /* Not used due to BTFIXUPs */
static int sun4c_pte_none(pte_t pte) { return !pte_val(pte); }
+#endif
+
static int sun4c_pte_present(pte_t pte)
{
return ((pte_val(pte) & (_SUN4C_PAGE_PRESENT | _SUN4C_PAGE_PRIV)) != 0);
@@ -2204,35 +2327,47 @@ static void sun4c_pgd_clear(pgd_t * pgdp) { }
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
+#if 0 /* Not used due to BTFIXUPs */
static int sun4c_pte_write(pte_t pte)
{
return pte_val(pte) & _SUN4C_PAGE_WRITE;
}
+#endif
+#if 0 /* Not used due to BTFIXUPs */
static int sun4c_pte_dirty(pte_t pte)
{
return pte_val(pte) & _SUN4C_PAGE_MODIFIED;
}
+#endif
+#if 0 /* Not used due to BTFIXUPs */
static int sun4c_pte_young(pte_t pte)
{
return pte_val(pte) & _SUN4C_PAGE_ACCESSED;
}
+#endif
+#if 0 /* Not used due to BTFIXUPs */
static pte_t sun4c_pte_wrprotect(pte_t pte)
{
return __pte(pte_val(pte) & ~(_SUN4C_PAGE_WRITE | _SUN4C_PAGE_SILENT_WRITE));
}
+#endif
+#if 0 /* Not used due to BTFIXUPs */
static pte_t sun4c_pte_mkclean(pte_t pte)
{
return __pte(pte_val(pte) & ~(_SUN4C_PAGE_MODIFIED | _SUN4C_PAGE_SILENT_WRITE));
}
+#endif
+#if 0 /* Not used due to BTFIXUPs */
static pte_t sun4c_pte_mkold(pte_t pte)
{
return __pte(pte_val(pte) & ~(_SUN4C_PAGE_ACCESSED | _SUN4C_PAGE_SILENT_READ));
}
+#endif
static pte_t sun4c_pte_mkwrite(pte_t pte)
{
@@ -2277,22 +2412,29 @@ static pte_t sun4c_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
return __pte(((page - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(pgprot));
}
+#if 0 /* Not used due to BTFIXUPs */
static pte_t sun4c_pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _SUN4C_PAGE_CHG_MASK) |
pgprot_val(newprot));
}
+#endif
static unsigned long sun4c_pte_page(pte_t pte)
{
- return (PAGE_OFFSET + ((pte_val(pte) & 0xffff) << (PAGE_SHIFT)));
+ return (PAGE_OFFSET + ((pte_val(pte) & SUN4C_PFN_MASK) << (PAGE_SHIFT)));
}
-static unsigned long sun4c_pmd_page(pmd_t pmd)
+static inline unsigned long sun4c_pmd_page(pmd_t pmd)
{
return (pmd_val(pmd) & PAGE_MASK);
}
+static unsigned long sun4c_pgd_page(pgd_t pgd)
+{
+ return 0;
+}
+
/* to find an entry in a page-table-directory */
pgd_t *sun4c_pgd_offset(struct mm_struct * mm, unsigned long address)
{
@@ -2351,6 +2493,16 @@ static pte_t *sun4c_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
return (pte_t *) sun4c_pmd_page(*pmd) + address;
}
+static void sun4c_free_pte_slow(pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static void sun4c_free_pgd_slow(pgd_t *pgd)
+{
+ free_page((unsigned long)pgd);
+}
+
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
@@ -2364,16 +2516,73 @@ static pmd_t *sun4c_pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
return (pmd_t *) pgd;
}
+extern __inline__ pgd_t *sun4c_get_pgd_fast(void)
+{
+ unsigned long *ret;
+
+ if((ret = pgd_quicklist) != NULL) {
+ pgd_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
+ } else {
+ pgd_t *init;
+
+ ret = (unsigned long *)__get_free_page(GFP_KERNEL);
+ memset (ret, 0, (KERNBASE / SUN4C_PGDIR_SIZE) * sizeof(pgd_t));
+ init = pgd_offset(&init_mm, 0);
+ memcpy (((pgd_t *)ret) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+ return (pgd_t *)ret;
+}
+
+static void sun4c_set_pgdir(unsigned long address, pgd_t entry)
+{
+ /* Nothing to do */
+}
+
+extern __inline__ void sun4c_free_pgd_fast(pgd_t *pgd)
+{
+ *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
+ pgd_quicklist = (unsigned long *) pgd;
+ pgtable_cache_size++;
+}
+
+extern __inline__ pte_t *sun4c_get_pte_fast(void)
+{
+ unsigned long *ret;
+
+ if((ret = (unsigned long *)pte_quicklist) != NULL) {
+ pte_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
+ }
+ return (pte_t *)ret;
+}
+
+extern __inline__ void sun4c_free_pte_fast(pte_t *pte)
+{
+ *(unsigned long *)pte = (unsigned long) pte_quicklist;
+ pte_quicklist = (unsigned long *) pte;
+ pgtable_cache_size++;
+}
+
static void sun4c_pte_free(pte_t *pte)
{
- free_page((unsigned long) pte);
+ sun4c_free_pte_fast(pte);
}
static pte_t *sun4c_pte_alloc(pmd_t * pmd, unsigned long address)
{
address = (address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1);
if (sun4c_pmd_none(*pmd)) {
- pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
+ pte_t *page = (pte_t *) sun4c_get_pte_fast();
+
+ if (page) {
+ *pmd = __pmd(PGD_TABLE | (unsigned long) page);
+ return page + address;
+ }
+ page = (pte_t *) get_free_page(GFP_KERNEL);
if (sun4c_pmd_none(*pmd)) {
if (page) {
*pmd = __pmd(PGD_TABLE | (unsigned long) page);
@@ -2392,13 +2601,17 @@ static pte_t *sun4c_pte_alloc(pmd_t * pmd, unsigned long address)
return (pte_t *) sun4c_pmd_page(*pmd) + address;
}
+static pte_t *sun4c_pte_get(void)
+{
+ return sun4c_get_pte_fast();
+}
+
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
*/
static void sun4c_pmd_free(pmd_t * pmd)
{
- *pmd = __pmd(0);
}
static pmd_t *sun4c_pmd_alloc(pgd_t * pgd, unsigned long address)
@@ -2408,12 +2621,12 @@ static pmd_t *sun4c_pmd_alloc(pgd_t * pgd, unsigned long address)
static void sun4c_pgd_free(pgd_t *pgd)
{
- free_page((unsigned long) pgd);
+ sun4c_free_pgd_fast(pgd);
}
static pgd_t *sun4c_pgd_alloc(void)
{
- return (pgd_t *) get_free_page(GFP_KERNEL);
+ return sun4c_get_pgd_fast();
}
/* There are really two cases of aliases to watch out for, and these
@@ -2435,12 +2648,13 @@ static pgd_t *sun4c_pgd_alloc(void)
*/
static void sun4c_vac_alias_fixup(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
- struct dentry *dentry;
+ struct dentry *dentry = NULL;
struct inode *inode = NULL;
pgd_t *pgdp;
pte_t *ptep;
- dentry = vma->vm_dentry;
+ if (vma->vm_file)
+ dentry = vma->vm_file->f_dentry;
if(dentry)
inode = dentry->d_inode;
if(inode) {
@@ -2556,134 +2770,147 @@ __initfunc(unsigned long sun4c_paging_init(unsigned long start_mem, unsigned lon
/* Load up routines and constants for sun4c mmu */
__initfunc(void ld_mmu_sun4c(void))
{
+ extern void ___xchg32_sun4c(void);
+
printk("Loading sun4c MMU routines\n");
/* First the constants */
- pmd_shift = SUN4C_PMD_SHIFT;
- pmd_size = SUN4C_PMD_SIZE;
- pmd_mask = SUN4C_PMD_MASK;
- pgdir_shift = SUN4C_PGDIR_SHIFT;
- pgdir_size = SUN4C_PGDIR_SIZE;
- pgdir_mask = SUN4C_PGDIR_MASK;
-
- ptrs_per_pte = SUN4C_PTRS_PER_PTE;
- ptrs_per_pmd = SUN4C_PTRS_PER_PMD;
- ptrs_per_pgd = SUN4C_PTRS_PER_PGD;
-
- page_none = SUN4C_PAGE_NONE;
- page_shared = SUN4C_PAGE_SHARED;
- page_copy = SUN4C_PAGE_COPY;
- page_readonly = SUN4C_PAGE_READONLY;
- page_kernel = SUN4C_PAGE_KERNEL;
+ BTFIXUPSET_SIMM13(pmd_shift, SUN4C_PMD_SHIFT);
+ BTFIXUPSET_SETHI(pmd_size, SUN4C_PMD_SIZE);
+ BTFIXUPSET_SETHI(pmd_mask, SUN4C_PMD_MASK);
+ BTFIXUPSET_SIMM13(pgdir_shift, SUN4C_PGDIR_SHIFT);
+ BTFIXUPSET_SETHI(pgdir_size, SUN4C_PGDIR_SIZE);
+ BTFIXUPSET_SETHI(pgdir_mask, SUN4C_PGDIR_MASK);
+
+ BTFIXUPSET_SIMM13(ptrs_per_pte, SUN4C_PTRS_PER_PTE);
+ BTFIXUPSET_SIMM13(ptrs_per_pmd, SUN4C_PTRS_PER_PMD);
+ BTFIXUPSET_SIMM13(ptrs_per_pgd, SUN4C_PTRS_PER_PGD);
+ BTFIXUPSET_SIMM13(user_ptrs_per_pgd, KERNBASE / SUN4C_PGDIR_SIZE);
+
+ BTFIXUPSET_INT(page_none, pgprot_val(SUN4C_PAGE_NONE));
+ BTFIXUPSET_INT(page_shared, pgprot_val(SUN4C_PAGE_SHARED));
+ BTFIXUPSET_INT(page_copy, pgprot_val(SUN4C_PAGE_COPY));
+ BTFIXUPSET_INT(page_readonly, pgprot_val(SUN4C_PAGE_READONLY));
+ BTFIXUPSET_INT(page_kernel, pgprot_val(SUN4C_PAGE_KERNEL));
pg_iobits = _SUN4C_PAGE_PRESENT | _SUN4C_READABLE | _SUN4C_WRITEABLE |
_SUN4C_PAGE_IO | _SUN4C_PAGE_NOCACHE;
/* Functions */
- flush_cache_all = sun4c_flush_cache_all;
+#ifndef __SMP__
+ BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4c, BTFIXUPCALL_NORM);
+#endif
+ BTFIXUPSET_CALL(get_pte_fast, sun4c_pte_get, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(get_pgd_fast, sun4c_pgd_alloc, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(free_pte_slow, sun4c_free_pte_slow, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(free_pgd_slow, sun4c_free_pgd_slow, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(set_pgdir, sun4c_set_pgdir, BTFIXUPCALL_NOP);
+
+ BTFIXUPSET_CALL(flush_cache_all, sun4c_flush_cache_all, BTFIXUPCALL_NORM);
if(sun4c_vacinfo.do_hwflushes) {
- flush_cache_mm = sun4c_flush_cache_mm_hw;
- flush_cache_range = sun4c_flush_cache_range_hw;
- flush_cache_page = sun4c_flush_cache_page_hw;
- flush_page_to_ram = sun4c_flush_page_to_ram_hw;
- flush_tlb_mm = sun4c_flush_tlb_mm_hw;
- flush_tlb_range = sun4c_flush_tlb_range_hw;
- flush_tlb_page = sun4c_flush_tlb_page_hw;
- free_task_struct = sun4c_free_task_struct_hw;
- switch_to_context = sun4c_switch_to_context_hw;
- destroy_context = sun4c_destroy_context_hw;
- init_new_context = sun4c_init_new_context_hw;
+ BTFIXUPSET_CALL(flush_cache_mm, sun4c_flush_cache_mm_hw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_range, sun4c_flush_cache_range_hw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_page, sun4c_flush_cache_page_hw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_page_to_ram, sun4c_flush_page_to_ram_hw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_mm, sun4c_flush_tlb_mm_hw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_range, sun4c_flush_tlb_range_hw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_page, sun4c_flush_tlb_page_hw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(free_task_struct, sun4c_free_task_struct_hw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(switch_to_context, sun4c_switch_to_context_hw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(destroy_context, sun4c_destroy_context_hw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(init_new_context, sun4c_init_new_context_hw, BTFIXUPCALL_NORM);
} else {
- flush_cache_mm = sun4c_flush_cache_mm_sw;
- flush_cache_range = sun4c_flush_cache_range_sw;
- flush_cache_page = sun4c_flush_cache_page_sw;
- flush_page_to_ram = sun4c_flush_page_to_ram_sw;
- flush_tlb_mm = sun4c_flush_tlb_mm_sw;
- flush_tlb_range = sun4c_flush_tlb_range_sw;
- flush_tlb_page = sun4c_flush_tlb_page_sw;
- free_task_struct = sun4c_free_task_struct_sw;
- switch_to_context = sun4c_switch_to_context_sw;
- destroy_context = sun4c_destroy_context_sw;
- init_new_context = sun4c_init_new_context_sw;
- }
-
- flush_tlb_all = sun4c_flush_tlb_all;
-
- flush_sig_insns = sun4c_flush_sig_insns;
-
- set_pte = sun4c_set_pte;
- pmd_align = sun4c_pmd_align;
- pgdir_align = sun4c_pgdir_align;
- vmalloc_start = sun4c_vmalloc_start;
-
- pte_page = sun4c_pte_page;
- pmd_page = sun4c_pmd_page;
-
- sparc_update_rootmmu_dir = sun4c_update_rootmmu_dir;
-
- pte_none = sun4c_pte_none;
- pte_present = sun4c_pte_present;
- pte_clear = sun4c_pte_clear;
-
- pmd_none = sun4c_pmd_none;
- pmd_bad = sun4c_pmd_bad;
- pmd_present = sun4c_pmd_present;
- pmd_clear = sun4c_pmd_clear;
-
- pgd_none = sun4c_pgd_none;
- pgd_bad = sun4c_pgd_bad;
- pgd_present = sun4c_pgd_present;
- pgd_clear = sun4c_pgd_clear;
-
- mk_pte = sun4c_mk_pte;
- mk_pte_phys = sun4c_mk_pte_phys;
- mk_pte_io = sun4c_mk_pte_io;
- pte_modify = sun4c_pte_modify;
- pgd_offset = sun4c_pgd_offset;
- pmd_offset = sun4c_pmd_offset;
- pte_offset = sun4c_pte_offset;
- pte_free_kernel = sun4c_pte_free_kernel;
- pmd_free_kernel = sun4c_pmd_free_kernel;
- pte_alloc_kernel = sun4c_pte_alloc_kernel;
- pmd_alloc_kernel = sun4c_pmd_alloc_kernel;
- pte_free = sun4c_pte_free;
- pte_alloc = sun4c_pte_alloc;
- pmd_free = sun4c_pmd_free;
- pmd_alloc = sun4c_pmd_alloc;
- pgd_free = sun4c_pgd_free;
- pgd_alloc = sun4c_pgd_alloc;
-
- pte_write = sun4c_pte_write;
- pte_dirty = sun4c_pte_dirty;
- pte_young = sun4c_pte_young;
- pte_wrprotect = sun4c_pte_wrprotect;
- pte_mkclean = sun4c_pte_mkclean;
- pte_mkold = sun4c_pte_mkold;
- pte_mkwrite = sun4c_pte_mkwrite;
- pte_mkdirty = sun4c_pte_mkdirty;
- pte_mkyoung = sun4c_pte_mkyoung;
- update_mmu_cache = sun4c_update_mmu_cache;
-
- mmu_lockarea = sun4c_lockarea;
- mmu_unlockarea = sun4c_unlockarea;
-
- mmu_get_scsi_one = sun4c_get_scsi_one;
- mmu_get_scsi_sgl = sun4c_get_scsi_sgl;
- mmu_release_scsi_one = sun4c_release_scsi_one;
- mmu_release_scsi_sgl = sun4c_release_scsi_sgl;
-
- mmu_map_dma_area = sun4c_map_dma_area;
-
- mmu_v2p = sun4c_v2p;
- mmu_p2v = sun4c_p2v;
+ BTFIXUPSET_CALL(flush_cache_mm, sun4c_flush_cache_mm_sw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_range, sun4c_flush_cache_range_sw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_page, sun4c_flush_cache_page_sw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_page_to_ram, sun4c_flush_page_to_ram_sw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_mm, sun4c_flush_tlb_mm_sw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_range, sun4c_flush_tlb_range_sw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_page, sun4c_flush_tlb_page_sw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(free_task_struct, sun4c_free_task_struct_sw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(switch_to_context, sun4c_switch_to_context_sw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(destroy_context, sun4c_destroy_context_sw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(init_new_context, sun4c_init_new_context_sw, BTFIXUPCALL_NORM);
+ }
+
+ BTFIXUPSET_CALL(flush_tlb_all, sun4c_flush_tlb_all, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(flush_sig_insns, sun4c_flush_sig_insns, BTFIXUPCALL_NOP);
+
+ BTFIXUPSET_CALL(set_pte, sun4c_set_pte, BTFIXUPCALL_STO1O0);
+
+ BTFIXUPSET_CALL(pte_page, sun4c_pte_page, BTFIXUPCALL_NORM);
+#if PAGE_SHIFT <= 12
+ BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_ANDNINT(PAGE_SIZE - 1));
+#else
+ BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_NORM);
+#endif
+
+ BTFIXUPSET_CALL(sparc_update_rootmmu_dir, sun4c_update_rootmmu_dir, BTFIXUPCALL_NOP);
+
+ BTFIXUPSET_CALL(pte_present, sun4c_pte_present, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_clear, sun4c_pte_clear, BTFIXUPCALL_STG0O0);
+
+ BTFIXUPSET_CALL(pmd_bad, sun4c_pmd_bad, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_present, sun4c_pmd_present, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_clear, sun4c_pmd_clear, BTFIXUPCALL_STG0O0);
+
+ BTFIXUPSET_CALL(pgd_none, sun4c_pgd_none, BTFIXUPCALL_RETINT(0));
+ BTFIXUPSET_CALL(pgd_bad, sun4c_pgd_bad, BTFIXUPCALL_RETINT(0));
+ BTFIXUPSET_CALL(pgd_present, sun4c_pgd_present, BTFIXUPCALL_RETINT(1));
+ BTFIXUPSET_CALL(pgd_clear, sun4c_pgd_clear, BTFIXUPCALL_NOP);
+
+ BTFIXUPSET_CALL(mk_pte, sun4c_mk_pte, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mk_pte_phys, sun4c_mk_pte_phys, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mk_pte_io, sun4c_mk_pte_io, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_INT(pte_modify_mask, _SUN4C_PAGE_CHG_MASK);
+ BTFIXUPSET_CALL(pgd_offset, sun4c_pgd_offset, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_offset, sun4c_pmd_offset, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_offset, sun4c_pte_offset, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_free_kernel, sun4c_pte_free_kernel, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_free_kernel, sun4c_pmd_free_kernel, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(pte_alloc_kernel, sun4c_pte_alloc_kernel, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_alloc_kernel, sun4c_pmd_alloc_kernel, BTFIXUPCALL_RETO0);
+ BTFIXUPSET_CALL(pte_free, sun4c_pte_free, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_alloc, sun4c_pte_alloc, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_free, sun4c_pmd_free, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(pmd_alloc, sun4c_pmd_alloc, BTFIXUPCALL_RETO0);
+ BTFIXUPSET_CALL(pgd_free, sun4c_pgd_free, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_alloc, sun4c_pgd_alloc, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_HALF(pte_writei, _SUN4C_PAGE_WRITE);
+ BTFIXUPSET_HALF(pte_dirtyi, _SUN4C_PAGE_MODIFIED);
+ BTFIXUPSET_HALF(pte_youngi, _SUN4C_PAGE_ACCESSED);
+ BTFIXUPSET_HALF(pte_wrprotecti, _SUN4C_PAGE_WRITE|_SUN4C_PAGE_SILENT_WRITE);
+ BTFIXUPSET_HALF(pte_mkcleani, _SUN4C_PAGE_MODIFIED|_SUN4C_PAGE_SILENT_WRITE);
+ BTFIXUPSET_HALF(pte_mkoldi, _SUN4C_PAGE_ACCESSED|_SUN4C_PAGE_SILENT_READ);
+ BTFIXUPSET_CALL(pte_mkwrite, sun4c_pte_mkwrite, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_mkdirty, sun4c_pte_mkdirty, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_mkyoung, sun4c_pte_mkyoung, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(update_mmu_cache, sun4c_update_mmu_cache, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(mmu_lockarea, sun4c_lockarea, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_unlockarea, sun4c_unlockarea, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(mmu_get_scsi_one, sun4c_get_scsi_one, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_get_scsi_sgl, sun4c_get_scsi_sgl, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_release_scsi_one, sun4c_release_scsi_one, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_release_scsi_sgl, sun4c_release_scsi_sgl, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(mmu_map_dma_area, sun4c_map_dma_area, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(mmu_v2p, sun4c_v2p, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_p2v, sun4c_p2v, BTFIXUPCALL_NORM);
/* Task struct and kernel stack allocating/freeing. */
- alloc_task_struct = sun4c_alloc_task_struct;
+ BTFIXUPSET_CALL(alloc_task_struct, sun4c_alloc_task_struct, BTFIXUPCALL_NORM);
- quick_kernel_fault = sun4c_quick_kernel_fault;
- mmu_info = sun4c_mmu_info;
+ BTFIXUPSET_CALL(quick_kernel_fault, sun4c_quick_kernel_fault, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_info, sun4c_mmu_info, BTFIXUPCALL_NORM);
/* These should _never_ get called with two level tables. */
- pgd_set = 0;
- pgd_page = 0;
+ BTFIXUPSET_CALL(pgd_set, sun4c_pgd_set, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(pgd_page, sun4c_pgd_page, BTFIXUPCALL_RETO0);
}
diff --git a/arch/sparc/mm/turbosparc.S b/arch/sparc/mm/turbosparc.S
index 415f09056..df580a85c 100644
--- a/arch/sparc/mm/turbosparc.S
+++ b/arch/sparc/mm/turbosparc.S
@@ -1,4 +1,4 @@
-/* $Id: turbosparc.S,v 1.2 1998/03/16 08:40:31 ralf Exp $
+/* $Id: turbosparc.S,v 1.3 1998/05/04 12:41:29 ralf Exp $
* turbosparc.S: High speed TurboSparc mmu/cache operations.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -27,6 +27,7 @@
.globl turbosparc_flush_cache_all
.globl turbosparc_flush_sig_insns
+ .globl turbosparc_flush_page_for_dma
turbosparc_flush_cache_all:
WINDOW_FLUSH(%g4, %g5)
@@ -42,5 +43,6 @@ turbosparc_flush_cache_all:
sta %g0, [%g0] ASI_M_IC_FLCLEAR
turbosparc_flush_sig_insns:
+turbosparc_flush_page_for_dma:
retl
nop
diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S
index b05b7b416..c65f72007 100644
--- a/arch/sparc/mm/viking.S
+++ b/arch/sparc/mm/viking.S
@@ -1,8 +1,8 @@
-/* $Id: viking.S,v 1.6 1997/11/27 15:42:32 jj Exp $
+/* $Id: viking.S,v 1.11 1998/02/20 18:07:50 jj Exp $
* viking.S: High speed Viking cache/mmu operations
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
- * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <asm/ptrace.h>
@@ -13,6 +13,7 @@
#include <asm/pgtsrmmu.h>
#include <asm/viking.h>
#include <asm/cprefix.h>
+#include <asm/btfixup.h>
#define WINDOW_FLUSH(tmp1, tmp2) \
mov 0, tmp1; \
@@ -37,40 +38,33 @@
.globl viking_flush_tlb_all, viking_flush_tlb_mm
.globl viking_flush_tlb_range, viking_flush_tlb_page
- .globl viking_c_mxcc_flush_page
- .globl viking_c_flush_page, viking_c_flush_chunk
+ .globl viking_c_flush_chunk, viking_s_flush_chunk
+
+viking_s_flush_chunk:
+ sethi %hi(KERNBASE), %g2
+ ba 2f
+ sub %o0, %g2, %g3
-viking_c_flush_page:
viking_c_flush_chunk:
sethi %hi(KERNBASE), %g2
cmp %o0, %g2
bgeu 2f
sub %o0, %g2, %g3
- sethi %hi(C_LABEL(page_contig_offset)), %g2
- ld [%g2 + %lo(C_LABEL(page_contig_offset))], %g2
+ sethi BTFIXUP_SETHI(page_contig_offset), %g2
ba 2f
sub %o0, %g2, %g3
viking_flush_page:
viking_flush_chunk:
sethi %hi(C_LABEL(srmmu_v2p_hash)), %g2
- or %g2, %lo(C_LABEL(srmmu_v2p_hash)), %g2
srl %o0, 24, %o1
+ or %g2, %lo(C_LABEL(srmmu_v2p_hash)), %g2
sll %o1, 2, %o1
-
ld [%g2 + %o1], %g3
- cmp %g3, 0
- bne 1f
- and %o0, PAGE_MASK, %o0
-
- retl
- nop
-
-1:
- ld [%g3], %o1
- sub %o0, %o1, %g2
- ld [%g3 + 4], %o0
- add %g2, %o0, %g3
+ and %o0, PAGE_MASK, %o0
+ cmp %g3, -1
+ be 9f
+ add %o0, %g3, %g3
2: srl %g3, 12, %g1 ! ppage >> 12
clr %o1 ! set counter, 0 - 127
@@ -124,41 +118,22 @@ viking_flush_chunk:
ble 5b
clr %o2
- retl
+9: retl
nop
-viking_c_mxcc_flush_page:
- sethi %hi(KERNBASE), %g2
- cmp %o0, %g2
- bgeu 2f
- sub %o0, %g2, %g3
- sethi %hi(C_LABEL(page_contig_offset)), %g2
- ld [%g2 + %lo(C_LABEL(page_contig_offset))], %g2
- ba 2f
- sub %o0, %g2, %g3
-
viking_mxcc_flush_page:
sethi %hi(C_LABEL(srmmu_v2p_hash)), %g2
- or %g2, %lo(C_LABEL(srmmu_v2p_hash)), %g2
srl %o0, 24, %o1
+ or %g2, %lo(C_LABEL(srmmu_v2p_hash)), %g2
sll %o1, 2, %o1
-
ld [%g2 + %o1], %g3
- cmp %g3, 0
- bne 1f
- and %o0, PAGE_MASK, %o0
-
- retl
- nop
-
-1:
- ld [%g3], %o1
- sub %o0, %o1, %g2
- ld [%g3 + 4], %o0
- add %g2, %o0, %g3
+ and %o0, PAGE_MASK, %o0
+ cmp %g3, -1
+ be 9f
+ add %o0, %g3, %g3
2: sub %g3, -PAGE_SIZE, %g3 ! ppage + PAGE_SIZE
- mov 0x10, %g2 ! set cacheable bit
sethi %hi(MXCC_SRCSTREAM), %o3 ! assume %hi(MXCC_SRCSTREAM) == %hi(MXCC_DESTSTREAM)
+ mov 0x10, %g2 ! set cacheable bit
or %o3, %lo(MXCC_SRCSTREAM), %o2
or %o3, %lo(MXCC_DESSTREAM), %o3
sub %g3, MXCC_STREAM_SIZE, %g3
@@ -169,7 +144,7 @@ viking_mxcc_flush_page:
bne 6b
sub %g3, MXCC_STREAM_SIZE, %g3
- retl
+9: retl
nop
viking_mxcc_flush_chunk:
@@ -212,13 +187,12 @@ viking_flush_tlb_range:
cmp %o3, -1
be 2f
#endif
- srl %o1, SRMMU_PGDIR_SHIFT, %o1
+ sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
sta %o3, [%g1] ASI_M_MMUREGS
- sll %o1, SRMMU_PGDIR_SHIFT, %o1
- sethi %hi(1 << SRMMU_PGDIR_SHIFT), %o4
+ and %o1, %o4, %o1
add %o1, 0x200, %o1
sta %g0, [%o1] ASI_M_FLUSH_PROBE
-1: add %o1, %o4, %o1
+1: sub %o1, %o4, %o1
cmp %o1, %o2
blu,a 1b
sta %g0, [%o1] ASI_M_FLUSH_PROBE
diff --git a/arch/sparc/prom/Makefile b/arch/sparc/prom/Makefile
index 9c820a006..917aa9ad7 100644
--- a/arch/sparc/prom/Makefile
+++ b/arch/sparc/prom/Makefile
@@ -1,4 +1,4 @@
-# $Id: Makefile,v 1.5 1995/11/25 00:59:48 davem Exp $
+# $Id: Makefile,v 1.6 1998/01/30 10:58:59 jj Exp $
# Makefile for the Sun Boot PROM interface library under
# Linux.
#
@@ -9,7 +9,11 @@
# Note 2! The CFLAGS definitions are now in the main makefile...
OBJS = bootstr.o devmap.o devops.o init.o memory.o misc.o mp.o \
- palloc.o ranges.o segment.o tree.o console.o printf.o
+ palloc.o ranges.o segment.o console.o printf.o tree.o
+
+ifeq ($(CONFIG_SUN4),y)
+OBJS += sun4prom.o
+endif
all: promlib.a
diff --git a/arch/sparc/prom/bootstr.c b/arch/sparc/prom/bootstr.c
index e7bd9b06d..10a603455 100644
--- a/arch/sparc/prom/bootstr.c
+++ b/arch/sparc/prom/bootstr.c
@@ -1,4 +1,4 @@
-/* $Id: bootstr.c,v 1.14 1997/06/19 16:28:49 jj Exp $
+/* $Id: bootstr.c,v 1.17 1998/02/09 13:26:21 jj Exp $
* bootstr.c: Boot string/argument acquisition from the PROM.
*
* Copyright(C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -7,12 +7,15 @@
#include <linux/config.h>
#include <linux/string.h>
#include <asm/oplib.h>
+#include <asm/sun4prom.h>
#include <linux/init.h>
#define BARG_LEN 256
-static char barg_buf[BARG_LEN] __initdata = { 0 };
+static char barg_buf[BARG_LEN] = { 0 };
static char fetched __initdata = 0;
+extern linux_sun4_romvec *sun4_romvec;
+
__initfunc(char *
prom_getbootargs(void))
{
@@ -26,6 +29,7 @@ prom_getbootargs(void))
switch(prom_vers) {
case PROM_V0:
+ case PROM_SUN4:
cp = barg_buf;
/* Start from 1 and go over fd(0,0,0)kernel */
for(iter = 1; iter < 8; iter++) {
diff --git a/arch/sparc/prom/console.c b/arch/sparc/prom/console.c
index 4c999477b..3bbc7ade0 100644
--- a/arch/sparc/prom/console.c
+++ b/arch/sparc/prom/console.c
@@ -1,4 +1,4 @@
-/* $Id: console.c,v 1.14 1997/05/14 20:44:58 davem Exp $
+/* $Id: console.c,v 1.17 1998/03/09 14:04:21 jj Exp $
* console.c: Routines that deal with sending and receiving IO
* to/from the current console device using the PROM.
*
@@ -10,12 +10,12 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <asm/openprom.h>
+#include <asm/sun4prom.h>
#include <asm/oplib.h>
#include <asm/system.h>
#include <linux/string.h>
-/* XXX Let's get rid of this thing if we can... */
-extern struct task_struct *current_set[NR_CPUS];
+extern void restore_current(void);
/* Non blocking get character from console input device, returns -1
* if no input was taken. This can be used for polling.
@@ -30,6 +30,7 @@ prom_nbgetchar(void)
save_flags(flags); cli();
switch(prom_vers) {
case PROM_V0:
+ case PROM_SUN4:
i = (*(romvec->pv_nbgetchar))();
break;
case PROM_V2:
@@ -45,9 +46,7 @@ prom_nbgetchar(void)
i = -1;
break;
};
- __asm__ __volatile__("ld [%0], %%g6\n\t" : :
- "r" (&current_set[hard_smp_processor_id()]) :
- "memory");
+ restore_current();
restore_flags(flags);
return i; /* Ugh, we could spin forever on unsupported proms ;( */
}
@@ -65,6 +64,7 @@ prom_nbputchar(char c)
save_flags(flags); cli();
switch(prom_vers) {
case PROM_V0:
+ case PROM_SUN4:
i = (*(romvec->pv_nbputchar))(c);
break;
case PROM_V2:
@@ -89,9 +89,7 @@ prom_nbputchar(char c)
i = -1;
break;
};
- __asm__ __volatile__("ld [%0], %%g6\n\t" : :
- "r" (&current_set[hard_smp_processor_id()]) :
- "memory");
+ restore_current();
restore_flags(flags);
return i; /* Ugh, we could spin forever on unsupported proms ;( */
}
@@ -125,6 +123,7 @@ prom_query_input_device()
switch(prom_vers) {
case PROM_V0:
case PROM_V2:
+ case PROM_SUN4:
default:
switch(*romvec->pv_stdin) {
case PROMDEV_KBD: return PROMDEV_IKBD;
@@ -136,9 +135,7 @@ prom_query_input_device()
case PROM_V3:
save_flags(flags); cli();
st_p = (*romvec->pv_v2devops.v2_inst2pkg)(*romvec->pv_v2bootargs.fd_stdin);
- __asm__ __volatile__("ld [%0], %%g6\n\t" : :
- "r" (&current_set[hard_smp_processor_id()]) :
- "memory");
+ restore_current();
restore_flags(flags);
if(prom_node_has_property(st_p, "keyboard"))
return PROMDEV_IKBD;
@@ -173,6 +170,7 @@ prom_query_output_device()
switch(prom_vers) {
case PROM_V0:
+ case PROM_SUN4:
switch(*romvec->pv_stdin) {
case PROMDEV_SCREEN: return PROMDEV_OSCREEN;
case PROMDEV_TTYA: return PROMDEV_OTTYA;
@@ -183,9 +181,7 @@ prom_query_output_device()
case PROM_V3:
save_flags(flags); cli();
st_p = (*romvec->pv_v2devops.v2_inst2pkg)(*romvec->pv_v2bootargs.fd_stdout);
- __asm__ __volatile__("ld [%0], %%g6\n\t" : :
- "r" (&current_set[hard_smp_processor_id()]) :
- "memory");
+ restore_current();
restore_flags(flags);
propl = prom_getproperty(st_p, "device_type", propb, sizeof(propb));
if (propl >= 0 && propl == sizeof("display") &&
diff --git a/arch/sparc/prom/devmap.c b/arch/sparc/prom/devmap.c
index cd99ac3d6..463b07527 100644
--- a/arch/sparc/prom/devmap.c
+++ b/arch/sparc/prom/devmap.c
@@ -1,4 +1,4 @@
-/* $Id: devmap.c,v 1.5 1997/05/14 20:44:59 davem Exp $
+/* $Id: devmap.c,v 1.6 1998/03/09 14:04:23 jj Exp $
* promdevmap.c: Map device/IO areas to virtual addresses.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -11,8 +11,7 @@
#include <asm/openprom.h>
#include <asm/oplib.h>
-/* XXX Let's get rid of this thing if we can... */
-extern struct task_struct *current_set[NR_CPUS];
+extern void restore_current(void);
/* Just like the routines in palloc.c, these should not be used
* by the kernel at all. Bootloader facility mainly. And again,
@@ -35,9 +34,7 @@ prom_mapio(char *vhint, int ios, unsigned int paddr, unsigned int num_bytes)
else
ret = (*(romvec->pv_v2devops.v2_dumb_mmap))(vhint, ios, paddr,
num_bytes);
- __asm__ __volatile__("ld [%0], %%g6\n\t" : :
- "r" (&current_set[hard_smp_processor_id()]) :
- "memory");
+ restore_current();
restore_flags(flags);
return ret;
}
@@ -51,9 +48,7 @@ prom_unmapio(char *vaddr, unsigned int num_bytes)
if(num_bytes == 0x0) return;
save_flags(flags); cli();
(*(romvec->pv_v2devops.v2_dumb_munmap))(vaddr, num_bytes);
- __asm__ __volatile__("ld [%0], %%g6\n\t" : :
- "r" (&current_set[hard_smp_processor_id()]) :
- "memory");
+ restore_current();
restore_flags(flags);
return;
}
diff --git a/arch/sparc/prom/devops.c b/arch/sparc/prom/devops.c
index f7feb0815..c273b9922 100644
--- a/arch/sparc/prom/devops.c
+++ b/arch/sparc/prom/devops.c
@@ -1,4 +1,4 @@
-/* $Id: devops.c,v 1.10 1997/05/14 20:44:59 davem Exp $
+/* $Id: devops.c,v 1.11 1998/03/09 14:04:24 jj Exp $
* devops.c: Device operations using the PROM.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -10,8 +10,7 @@
#include <asm/openprom.h>
#include <asm/oplib.h>
-/* XXX Let's get rid of this thing if we can... */
-extern struct task_struct *current_set[NR_CPUS];
+extern void restore_current(void);
/* Open the device described by the string 'dstr'. Returns the handle
* to that device used for subsequent operations on that device.
@@ -37,9 +36,7 @@ prom_devopen(char *dstr)
handle = -1;
break;
};
- __asm__ __volatile__("ld [%0], %%g6\n\t" : :
- "r" (&current_set[hard_smp_processor_id()]) :
- "memory");
+ restore_current();
restore_flags(flags);
return handle;
@@ -63,9 +60,7 @@ prom_devclose(int dhandle)
default:
break;
};
- __asm__ __volatile__("ld [%0], %%g6\n\t" : :
- "r" (&current_set[hard_smp_processor_id()]) :
- "memory");
+ restore_current();
restore_flags(flags);
return 0;
}
@@ -90,9 +85,7 @@ prom_seek(int dhandle, unsigned int seekhi, unsigned int seeklo)
default:
break;
};
- __asm__ __volatile__("ld [%0], %%g6\n\t" : :
- "r" (&current_set[hard_smp_processor_id()]) :
- "memory");
+ restore_current();
restore_flags(flags);
return;
diff --git a/arch/sparc/prom/init.c b/arch/sparc/prom/init.c
index 6f691464a..2c70dd95a 100644
--- a/arch/sparc/prom/init.c
+++ b/arch/sparc/prom/init.c
@@ -1,8 +1,9 @@
-/* $Id: init.c,v 1.11 1997/03/18 17:58:24 jj Exp $
+/* $Id: init.c,v 1.12 1998/01/30 10:59:02 jj Exp $
* init.c: Initialize internal variables used by the PROM
* library functions.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/config.h>
@@ -11,10 +12,12 @@
#include <asm/openprom.h>
#include <asm/oplib.h>
+#include <asm/sun4prom.h>
struct linux_romvec *romvec;
enum prom_major_version prom_vers;
unsigned int prom_rev, prom_prev;
+linux_sun4_romvec *sun4_romvec;
/* The root node of the prom device tree. */
int prom_root_node;
@@ -34,11 +37,14 @@ extern void prom_ranges_init(void);
__initfunc(void prom_init(struct linux_romvec *rp))
{
+#ifdef CONFIG_SUN4
+ extern struct linux_romvec *sun4_prom_init(void);
+ rp = sun4_prom_init();
+#endif
#if CONFIG_AP1000
extern struct linux_romvec *ap_prom_init(void);
rp = ap_prom_init();
#endif
-
romvec = rp;
switch(romvec->pv_romvers) {
@@ -51,6 +57,9 @@ __initfunc(void prom_init(struct linux_romvec *rp))
case 3:
prom_vers = PROM_V3;
break;
+ case 40:
+ prom_vers = PROM_SUN4;
+ break;
case 42: /* why not :-) */
prom_vers = PROM_AP1000;
break;
@@ -83,8 +92,11 @@ __initfunc(void prom_init(struct linux_romvec *rp))
prom_ranges_init();
+#ifndef CONFIG_SUN4
+ /* SUN4 prints this in sun4_prom_init */
printk("PROMLIB: Sun Boot Prom Version %d Revision %d\n",
romvec->pv_romvers, prom_rev);
+#endif
/* Initialization successful. */
return;
diff --git a/arch/sparc/prom/memory.c b/arch/sparc/prom/memory.c
index b53bd17ea..af5019eb8 100644
--- a/arch/sparc/prom/memory.c
+++ b/arch/sparc/prom/memory.c
@@ -1,8 +1,9 @@
-/* $Id: memory.c,v 1.12 1997/05/27 06:45:57 davem Exp $
+/* $Id: memory.c,v 1.13 1998/01/30 10:59:03 jj Exp $
* memory.c: Prom routine for acquiring various bits of information
* about RAM on the machine, both virtual and physical.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997 Michael A. Griffith (grif@acm.org)
*/
#include <linux/config.h>
@@ -10,6 +11,7 @@
#include <linux/init.h>
#include <asm/openprom.h>
+#include <asm/sun4prom.h>
#include <asm/oplib.h>
/* This routine, for consistency, returns the ram parameters in the
@@ -177,6 +179,21 @@ __initfunc(void prom_meminit(void))
prom_sortmemlist(prom_phys_avail);
break;
+ case PROM_SUN4:
+#ifdef CONFIG_SUN4
+ /* how simple :) */
+ prom_phys_total[0].start_adr = 0x0;
+ prom_phys_total[0].num_bytes = *(sun4_romvec->memorysize);
+ prom_phys_total[0].theres_more = 0x0;
+ prom_prom_taken[0].start_adr = 0x0;
+ prom_prom_taken[0].num_bytes = 0x0;
+ prom_prom_taken[0].theres_more = 0x0;
+ prom_phys_avail[0].start_adr = 0x0;
+ prom_phys_avail[0].num_bytes = *(sun4_romvec->memoryavail);
+ prom_phys_avail[0].theres_more = 0x0;
+#endif
+ break;
+
case PROM_AP1000:
#if CONFIG_AP1000
/* really simple memory map */
@@ -189,9 +206,6 @@ __initfunc(void prom_meminit(void))
prom_phys_avail[0].start_adr = 0x00000000;
prom_phys_avail[0].num_bytes = prom_phys_total[0].num_bytes;
prom_phys_avail[0].theres_more = 0x0;
- prom_sortmemlist(prom_phys_total);
- prom_sortmemlist(prom_prom_taken);
- prom_sortmemlist(prom_phys_avail);
#endif
default:
break;
diff --git a/arch/sparc/prom/misc.c b/arch/sparc/prom/misc.c
index fede033dd..d2ec600e1 100644
--- a/arch/sparc/prom/misc.c
+++ b/arch/sparc/prom/misc.c
@@ -1,4 +1,4 @@
-/* $Id: misc.c,v 1.15 1997/05/14 20:45:00 davem Exp $
+/* $Id: misc.c,v 1.16 1998/03/09 14:04:25 jj Exp $
* misc.c: Miscellaneous prom functions that don't belong
* anywhere else.
*
@@ -13,8 +13,7 @@
#include <asm/oplib.h>
#include <asm/auxio.h>
-/* XXX Let's get rid of this thing if we can... */
-extern struct task_struct *current_set[NR_CPUS];
+extern void restore_current(void);
/* Reset and reboot the machine with the command 'bcommand'. */
void
@@ -24,9 +23,7 @@ prom_reboot(char *bcommand)
save_flags(flags); cli();
(*(romvec->pv_reboot))(bcommand);
/* Never get here. */
- __asm__ __volatile__("ld [%0], %%g6\n\t" : :
- "r" (&current_set[hard_smp_processor_id()]) :
- "memory");
+ restore_current();
restore_flags(flags);
}
@@ -42,9 +39,7 @@ prom_feval(char *fstring)
(*(romvec->pv_fortheval.v0_eval))(strlen(fstring), fstring);
else
(*(romvec->pv_fortheval.v2_eval))(fstring);
- __asm__ __volatile__("ld [%0], %%g6\n\t" : :
- "r" (&current_set[hard_smp_processor_id()]) :
- "memory");
+ restore_current();
restore_flags(flags);
}
@@ -74,9 +69,7 @@ prom_cmdline(void)
install_obp_ticker();
save_flags(flags); cli();
(*(romvec->pv_abort))();
- __asm__ __volatile__("ld [%0], %%g6\n\t" : :
- "r" (&current_set[hard_smp_processor_id()]) :
- "memory");
+ restore_current();
restore_flags(flags);
install_linux_ticker();
#ifdef CONFIG_SUN_AUXIO
@@ -99,9 +92,7 @@ again:
save_flags(flags); cli();
(*(romvec->pv_halt))();
/* Never get here. */
- __asm__ __volatile__("ld [%0], %%g6\n\t" : :
- "r" (&current_set[hard_smp_processor_id()]) :
- "memory");
+ restore_current();
restore_flags(flags);
goto again; /* PROM is out to get me -DaveM */
}
diff --git a/arch/sparc/prom/mp.c b/arch/sparc/prom/mp.c
index 8f07f9d40..2346e3564 100644
--- a/arch/sparc/prom/mp.c
+++ b/arch/sparc/prom/mp.c
@@ -1,4 +1,4 @@
-/* $Id: mp.c,v 1.9 1997/05/14 20:45:01 davem Exp $
+/* $Id: mp.c,v 1.10 1998/03/09 14:04:26 jj Exp $
* mp.c: OpenBoot Prom Multiprocessor support routines. Don't call
* these on a UP or else you will halt and catch fire. ;)
*
@@ -12,8 +12,7 @@
#include <asm/openprom.h>
#include <asm/oplib.h>
-/* XXX Let's get rid of this thing if we can... */
-extern struct task_struct *current_set[NR_CPUS];
+extern void restore_current(void);
/* Start cpu with prom-tree node 'cpunode' using context described
* by 'ctable_reg' in context 'ctx' at program counter 'pc'.
@@ -38,9 +37,7 @@ prom_startcpu(int cpunode, struct linux_prom_registers *ctable_reg, int ctx, cha
ret = (*(romvec->v3_cpustart))(cpunode, (int) ctable_reg, ctx, pc);
break;
};
- __asm__ __volatile__("ld [%0], %%g6\n\t" : :
- "r" (&current_set[hard_smp_processor_id()]) :
- "memory");
+ restore_current();
restore_flags(flags);
return ret;
@@ -67,9 +64,7 @@ prom_stopcpu(int cpunode)
ret = (*(romvec->v3_cpustop))(cpunode);
break;
};
- __asm__ __volatile__("ld [%0], %%g6\n\t" : :
- "r" (&current_set[hard_smp_processor_id()]) :
- "memory");
+ restore_current();
restore_flags(flags);
return ret;
@@ -96,9 +91,7 @@ prom_idlecpu(int cpunode)
ret = (*(romvec->v3_cpuidle))(cpunode);
break;
};
- __asm__ __volatile__("ld [%0], %%g6\n\t" : :
- "r" (&current_set[hard_smp_processor_id()]) :
- "memory");
+ restore_current();
restore_flags(flags);
return ret;
@@ -125,9 +118,7 @@ prom_restartcpu(int cpunode)
ret = (*(romvec->v3_cpuresume))(cpunode);
break;
};
- __asm__ __volatile__("ld [%0], %%g6\n\t" : :
- "r" (&current_set[hard_smp_processor_id()]) :
- "memory");
+ restore_current();
restore_flags(flags);
return ret;
diff --git a/arch/sparc/prom/ranges.c b/arch/sparc/prom/ranges.c
index 7f7b1da54..b4fd3989e 100644
--- a/arch/sparc/prom/ranges.c
+++ b/arch/sparc/prom/ranges.c
@@ -1,4 +1,4 @@
-/* $Id: ranges.c,v 1.10 1997/12/19 12:37:18 jj Exp $
+/* $Id: ranges.c,v 1.11 1998/01/30 10:59:05 jj Exp $
* ranges.c: Handle ranges in newer proms for obio/sbus.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -68,7 +68,7 @@ prom_apply_obio_ranges(struct linux_prom_registers *regs, int nregs)
void prom_apply_sbus_ranges(struct linux_sbus *sbus, struct linux_prom_registers *regs,
int nregs, struct linux_sbus_device *sdev)
{
- if(sbus->num_sbus_ranges) {
+ if(sbus && sbus->num_sbus_ranges) {
if(sdev && (sdev->ranges_applied == 0)) {
sdev->ranges_applied = 1;
prom_adjust_regs(regs, nregs, sbus->sbus_ranges,
diff --git a/arch/sparc/prom/segment.c b/arch/sparc/prom/segment.c
index 96b543727..62b3f8542 100644
--- a/arch/sparc/prom/segment.c
+++ b/arch/sparc/prom/segment.c
@@ -1,4 +1,4 @@
-/* $Id: segment.c,v 1.5 1997/05/14 20:45:02 davem Exp $
+/* $Id: segment.c,v 1.6 1998/03/09 14:04:27 jj Exp $
* segment.c: Prom routine to map segments in other contexts before
* a standalone is completely mapped. This is for sun4 and
* sun4c architectures only.
@@ -12,8 +12,7 @@
#include <asm/openprom.h>
#include <asm/oplib.h>
-/* XXX Let's get rid of this thing if we can... */
-extern struct task_struct *current_set[NR_CPUS];
+extern void restore_current(void);
/* Set physical segment 'segment' at virtual address 'vaddr' in
* context 'ctx'.
@@ -24,9 +23,7 @@ prom_putsegment(int ctx, unsigned long vaddr, int segment)
unsigned long flags;
save_flags(flags); cli();
(*(romvec->pv_setctxt))(ctx, (char *) vaddr, segment);
- __asm__ __volatile__("ld [%0], %%g6\n\t" : :
- "r" (&current_set[hard_smp_processor_id()]) :
- "memory");
+ restore_current();
restore_flags(flags);
return;
}
diff --git a/arch/sparc/prom/sun4prom.c b/arch/sparc/prom/sun4prom.c
new file mode 100644
index 000000000..ce15ebb43
--- /dev/null
+++ b/arch/sparc/prom/sun4prom.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 1996 The Australian National University.
+ * Copyright (C) 1996 Fujitsu Laboratories Limited
+ * Copyright (C) 1997 Michael A. Griffith (grif@acm.org)
+ * Copyright (C) 1997 Sun Weenie (ko@ko.reno.nv.us)
+ * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ *
+ * This software may be distributed under the terms of the Gnu
+ * Public License version 2 or later
+ *
+ * fake a really simple Sun prom for the SUN4
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <asm/oplib.h>
+#include <asm/idprom.h>
+#include <asm/machines.h>
+#include <asm/sun4prom.h>
+#include <asm/asi.h>
+#include <asm/contregs.h>
+#include <linux/init.h>
+
+static struct linux_romvec sun4romvec;
+static struct idprom sun4_idprom;
+
+struct property {
+ char *name;
+ char *value;
+ int length;
+};
+
+struct node {
+ int level;
+ struct property *properties;
+};
+
+struct property null_properties = { NULL, NULL, -1 };
+
+struct property root_properties[] = {
+ {"device_type", "cpu", 4},
+ {"idprom", (char *)&sun4_idprom, sizeof(struct idprom)},
+ {NULL, NULL, -1}
+};
+
+struct node nodes[] = {
+ { 0, &null_properties },
+ { 0, root_properties },
+ { -1,&null_properties }
+};
+
+
+static int no_nextnode(int node)
+{
+ if (nodes[node].level == nodes[node+1].level)
+ return node+1;
+ return -1;
+}
+
+static int no_child(int node)
+{
+ if (nodes[node].level == nodes[node+1].level-1)
+ return node+1;
+ return -1;
+}
+
+static struct property *find_property(int node,char *name)
+{
+ struct property *prop = &nodes[node].properties[0];
+ while (prop && prop->name) {
+ if (strcmp(prop->name,name) == 0) return prop;
+ prop++;
+ }
+ return NULL;
+}
+
+static int no_proplen(int node,char *name)
+{
+ struct property *prop = find_property(node,name);
+ if (prop) return prop->length;
+ return -1;
+}
+
+static int no_getprop(int node,char *name,char *value)
+{
+ struct property *prop = find_property(node,name);
+ if (prop) {
+ memcpy(value,prop->value,prop->length);
+ return 1;
+ }
+ return -1;
+}
+
+static int no_setprop(int node,char *name,char *value,int len)
+{
+ return -1;
+}
+
+static char *no_nextprop(int node,char *name)
+{
+ struct property *prop = find_property(node,name);
+ if (prop) return prop[1].name;
+ return NULL;
+}
+
+static struct linux_nodeops sun4_nodeops = {
+ no_nextnode,
+ no_child,
+ no_proplen,
+ no_getprop,
+ no_setprop,
+ no_nextprop
+};
+
+static int synch_hook;
+
+__initfunc(struct linux_romvec *sun4_prom_init(void))
+{
+ int i;
+ unsigned char x;
+ char *p;
+
+ p = (char *)&sun4_idprom;
+ for (i = 0; i < sizeof(sun4_idprom); i++) {
+ __asm__ __volatile__ ("lduba [%1] %2, %0" : "=r" (x) :
+ "r" (AC_IDPROM + i), "i" (ASI_CONTROL));
+ *p++ = x;
+ }
+
+ memset(&sun4romvec,0,sizeof(sun4romvec));
+
+ sun4_romvec = (linux_sun4_romvec *) SUN4_PROM_VECTOR;
+
+ sun4romvec.pv_romvers = 40;
+ sun4romvec.pv_nodeops = &sun4_nodeops;
+ sun4romvec.pv_reboot = sun4_romvec->reboot;
+ sun4romvec.pv_abort = sun4_romvec->abortentry;
+ sun4romvec.pv_halt = sun4_romvec->exittomon;
+ sun4romvec.pv_synchook = (void (**)(void))&synch_hook;
+ sun4romvec.pv_setctxt = sun4_romvec->setcxsegmap;
+ sun4romvec.pv_v0bootargs = sun4_romvec->bootParam;
+ sun4romvec.pv_nbgetchar = sun4_romvec->mayget;
+ sun4romvec.pv_nbputchar = sun4_romvec->mayput;
+ sun4romvec.pv_stdin = sun4_romvec->insource;
+ sun4romvec.pv_stdout = sun4_romvec->outsink;
+
+ /*
+ * We turn on the LEDs to let folks without monitors or
+ * terminals know we booted. Nothing too fancy now. They
+ * are all on, except for LED 5, which blinks. When we
+ * have more time, we can teach the penguin to say "By your
+ * command" or "Activating turbo boost, Michael". :-)
+ */
+ sun4_romvec->setLEDs(0x0);
+
+ printk("PROMLIB: Old Sun4 boot PROM monitor %s, romvec version %d\n",
+ sun4_romvec->monid,
+ sun4_romvec->romvecversion);
+
+ return &sun4romvec;
+}
diff --git a/arch/sparc/prom/tree.c b/arch/sparc/prom/tree.c
index 616180e81..1256aacec 100644
--- a/arch/sparc/prom/tree.c
+++ b/arch/sparc/prom/tree.c
@@ -1,4 +1,4 @@
-/* $Id: tree.c,v 1.22 1997/09/25 02:19:22 davem Exp $
+/* $Id: tree.c,v 1.24 1998/03/09 14:04:29 jj Exp $
* tree.c: Basic device tree traversal/scanning for the Linux
* prom library.
*
@@ -15,13 +15,7 @@
#include <asm/openprom.h>
#include <asm/oplib.h>
-/* XXX Let's get rid of this thing if we can... */
-extern struct task_struct *current_set[NR_CPUS];
-
-/* Macro to restore "current" to the g6 register. */
-#define restore_current() __asm__ __volatile__("ld [%0], %%g6\n\t" : : \
- "r" (&current_set[hard_smp_processor_id()]) : \
- "memory")
+extern void restore_current(void);
static char promlib_buf[128];
@@ -95,12 +89,11 @@ int prom_getproplen(int node, char *prop)
int ret;
unsigned long flags;
- save_flags(flags); cli();
-
if((!node) || (!prop))
- ret = -1;
- else
- ret = prom_nodeops->no_proplen(node, prop);
+ return -1;
+
+ save_flags(flags); cli();
+ ret = prom_nodeops->no_proplen(node, prop);
restore_current();
restore_flags(flags);
return ret;
@@ -115,15 +108,12 @@ int prom_getproperty(int node, char *prop, char *buffer, int bufsize)
int plen, ret;
unsigned long flags;
- save_flags(flags); cli();
-
plen = prom_getproplen(node, prop);
if((plen > bufsize) || (plen == 0) || (plen == -1))
- ret = -1;
- else {
- /* Ok, things seem all right. */
- ret = prom_nodeops->no_getprop(node, prop, buffer);
- }
+ return -1;
+ /* Ok, things seem all right. */
+ save_flags(flags); cli();
+ ret = prom_nodeops->no_getprop(node, prop, buffer);
restore_current();
restore_flags(flags);
return ret;
diff --git a/arch/sparc/vmlinux.lds b/arch/sparc/vmlinux.lds
index 8141f5755..cbfc9fb3c 100644
--- a/arch/sparc/vmlinux.lds
+++ b/arch/sparc/vmlinux.lds
@@ -32,6 +32,7 @@ SECTIONS
. = ALIGN(4096);
__init_begin = .;
.text.init : { *(.text.init) }
+ __init_text_end = .;
.data.init : { *(.data.init) }
. = ALIGN(4096);
__init_end = .;