summaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1995-11-14 08:00:00 +0000
committer <ralf@linux-mips.org>1995-11-14 08:00:00 +0000
commite7c2a72e2680827d6a733931273a93461c0d8d1b (patch)
treec9abeda78ef7504062bb2e816bcf3e3c9d680112 /arch/sparc/mm
parentec6044459060a8c9ce7f64405c465d141898548c (diff)
Import of Linux/MIPS 1.3.0
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r--arch/sparc/mm/Makefile32
-rw-r--r--arch/sparc/mm/fault.c173
-rw-r--r--arch/sparc/mm/init.c364
-rw-r--r--arch/sparc/mm/vac-flush.c94
4 files changed, 663 insertions, 0 deletions
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
new file mode 100644
index 000000000..a4148d013
--- /dev/null
+++ b/arch/sparc/mm/Makefile
@@ -0,0 +1,32 @@
+#
+# Makefile for the linux Sparc-specific parts of the memory manager.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definition is now in the main makefile...
+
+.c.o:
+ $(CC) $(CFLAGS) -c $<
+.s.o:
+ $(AS) -o $*.o $<
+.c.s:
+ $(CC) $(CFLAGS) -S $<
+
+OBJS = fault.o vac-flush.o init.o
+
+mm.o: $(OBJS)
+ $(LD) -r -o mm.o $(OBJS)
+
+modules:
+
+dep:
+ $(CPP) -M *.c > .depend
+
+#
+# include a dependency file if one exists
+#
+ifeq (.depend,$(wildcard .depend))
+include .depend
+endif
diff --git a/arch/sparc/mm/fault.c b/arch/sparc/mm/fault.c
new file mode 100644
index 000000000..4c5fd0bc3
--- /dev/null
+++ b/arch/sparc/mm/fault.c
@@ -0,0 +1,173 @@
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/signal.h>
+#include <linux/mm.h>
+
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <asm/openprom.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+extern unsigned long pg0[1024]; /* page table for 0-4MB for everybody */
+extern struct sparc_phys_banks sp_banks[14];
+
+extern void die_if_kernel(char *,struct pt_regs *,long);
+
+struct linux_romvec *romvec;
+
+/* foo */
+
+int tbase_needs_unmapping;
+
+/* At boot time we determine these two values necessary for setting
+ * up the segment maps and page table entries (pte's).
+ */
+
+int num_segmaps, num_contexts;
+int invalid_segment;
+
+/* various Virtual Address Cache parameters we find at boot time... */
+
+int vac_size, vac_linesize, vac_do_hw_vac_flushes;
+int vac_entries_per_context, vac_entries_per_segment;
+int vac_entries_per_page;
+
+/*
+ * Define this if things work differently on a i386 and a i486:
+ * it will (on a i486) warn about kernel memory accesses that are
+ * done without a 'verify_area(VERIFY_WRITE,..)'
+ */
+#undef CONFIG_TEST_VERIFY_AREA
+
+/* Traverse the memory lists in the prom to see how much physical we
+ * have.
+ */
+
+unsigned long
+probe_memory(void)
+{
+ register struct linux_romvec *lprom;
+ register struct linux_mlist_v0 *mlist;
+ register unsigned long bytes, base_paddr, tally;
+ register int i;
+
+ bytes = tally = 0;
+ base_paddr = 0;
+ i=0;
+ lprom = romvec;
+ switch(lprom->pv_romvers)
+ {
+ case 0:
+ mlist=(*(lprom->pv_v0mem.v0_totphys));
+ bytes = tally = mlist->num_bytes;
+ base_paddr = (unsigned long) mlist->start_adr;
+
+ sp_banks[0].base_addr = base_paddr;
+ sp_banks[0].num_bytes = bytes;
+
+ if(mlist->theres_more != (void *)0) {
+ i++;
+ mlist=mlist->theres_more;
+ bytes=mlist->num_bytes;
+ tally += bytes;
+ sp_banks[i].base_addr = (unsigned long) mlist->start_adr;
+ sp_banks[i].num_bytes = mlist->num_bytes;
+ }
+ break;
+ case 2:
+ printk("no v2 memory probe support yet.\n");
+ (*(lprom->pv_halt))();
+ break;
+ }
+
+ i++;
+ sp_banks[i].base_addr = 0xdeadbeef;
+ sp_banks[i].num_bytes = 0;
+
+ return tally;
+}
+
+/* Sparc routine to reserve the mapping of the open boot prom */
+
+/* uncomment this for FAME and FORTUNE! */
+/* #define DEBUG_MAP_PROM */
+
+int
+map_the_prom(int curr_num_segs)
+{
+ register unsigned long prom_va_begin;
+ register unsigned long prom_va_end;
+ register int segmap_entry, i;
+
+ prom_va_begin = LINUX_OPPROM_BEGVM;
+ prom_va_end = LINUX_OPPROM_ENDVM;
+
+#ifdef DEBUG_MAP_PROM
+ printk("\ncurr_num_segs = 0x%x\n", curr_num_segs);
+#endif
+
+ while( prom_va_begin < prom_va_end)
+ {
+ segmap_entry=get_segmap(prom_va_begin);
+
+ curr_num_segs = ((segmap_entry<curr_num_segs)
+ ? segmap_entry : curr_num_segs);
+
+ for(i = num_contexts; --i > 0;)
+ (*romvec->pv_setctxt)(i, (char *) prom_va_begin,
+ segmap_entry);
+
+ if(segmap_entry == invalid_segment)
+ {
+
+#ifdef DEBUG_MAP_PROM
+ printk("invalid_segments, virt_addr 0x%x\n", prom_va_begin);
+#endif
+
+ prom_va_begin += 0x40000; /* num bytes per segment entry */
+ continue;
+ }
+
+ /* DUH, prom maps itself so that users can access it. This is
+ * broken.
+ */
+
+#ifdef DEBUG_MAP_PROM
+ printk("making segmap for prom privileged, va = 0x%x\n",
+ prom_va_begin);
+#endif
+
+ for(i = 0x40; --i >= 0; prom_va_begin+=4096)
+ {
+ put_pte(prom_va_begin, get_pte(prom_va_begin) | 0x20000000);
+ }
+
+ }
+
+ printk("Mapped the PROM in all contexts...\n");
+
+#ifdef DEBUG_MAP_PROM
+ printk("curr_num_segs = 0x%x\n", curr_num_segs);
+#endif
+
+ return curr_num_segs;
+
+}
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ */
+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
+{
+ die_if_kernel("Oops", regs, error_code);
+ do_exit(SIGKILL);
+}
+
+
+
+
diff --git a/arch/sparc/mm/init.c b/arch/sparc/mm/init.c
new file mode 100644
index 000000000..a65e9e094
--- /dev/null
+++ b/arch/sparc/mm/init.c
@@ -0,0 +1,364 @@
+/*
+ * linux/arch/sparc/mm/init.c
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/head.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <asm/vac-ops.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+extern void scsi_mem_init(unsigned long);
+extern void sound_mem_init(void);
+extern void die_if_kernel(char *,struct pt_regs *,long);
+extern void show_net_buffers(void);
+
+extern int map_the_prom(int);
+
+struct sparc_phys_banks sp_banks[14];
+unsigned long *sun4c_mmu_table;
+extern int invalid_segment, num_segmaps, num_contexts;
+
+/*
+ * BAD_PAGE is the page that is used for page faults when linux
+ * is out-of-memory. Older versions of linux just did a
+ * do_exit(), but using this instead means there is less risk
+ * for a process dying in kernel mode, possibly leaving a inode
+ * unused etc..
+ *
+ * BAD_PAGETABLE is the accompanying page-table: it is initialized
+ * to point to BAD_PAGE entries.
+ *
+ * ZERO_PAGE is a special page that is used for zero-initialized
+ * data and COW.
+ */
+pte_t *__bad_pagetable(void)
+{
+ memset((void *) EMPTY_PGT, 0, PAGE_SIZE);
+ return (pte_t *) EMPTY_PGT;
+}
+
+pte_t __bad_page(void)
+{
+ memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
+ return pte_mkdirty(mk_pte((unsigned long) EMPTY_PGE, PAGE_SHARED));
+}
+
+unsigned long __zero_page(void)
+{
+ memset((void *) ZERO_PGE, 0, PAGE_SIZE);
+ return ZERO_PGE;
+}
+
+void show_mem(void)
+{
+ int i,free = 0,total = 0,reserved = 0;
+ int shared = 0;
+
+ printk("Mem-info:\n");
+ show_free_areas();
+ printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
+ i = high_memory >> PAGE_SHIFT;
+ while (i-- > 0) {
+ total++;
+ if (mem_map[i] & MAP_PAGE_RESERVED)
+ reserved++;
+ else if (!mem_map[i])
+ free++;
+ else
+ shared += mem_map[i]-1;
+ }
+ printk("%d pages of RAM\n",total);
+ printk("%d free pages\n",free);
+ printk("%d reserved pages\n",reserved);
+ printk("%d pages shared\n",shared);
+ show_buffers();
+#ifdef CONFIG_NET
+ show_net_buffers();
+#endif
+}
+
+extern unsigned long free_area_init(unsigned long, unsigned long);
+
+/*
+ * paging_init() sets up the page tables: in the alpha version this actually
+ * unmaps the bootup page table (as we're now in KSEG, so we don't need it).
+ *
+ * The bootup sequence put the virtual page table into high memory: that
+ * means that we can change the L1 page table by just using VL1p below.
+ */
+
+unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
+{
+ unsigned long i, a, b, mask=0;
+ unsigned long curseg, curpte, num_inval;
+ unsigned long address;
+ pte_t *pg_table;
+
+ register int num_segs, num_ctx;
+ register char * c;
+
+ num_segs = num_segmaps;
+ num_ctx = num_contexts;
+
+ num_segs -= 1;
+ invalid_segment = num_segs;
+
+ start_mem = free_area_init(start_mem, end_mem);
+
+/* On the sparc we first need to allocate the segmaps for the
+ * PROM's virtual space, and make those segmaps unusable. We
+ * map the PROM in ALL contexts therefore the break key and the
+ * sync command work no matter what state you took the machine
+ * out of
+ */
+
+ printk("mapping the prom...\n");
+ num_segs = map_the_prom(num_segs);
+
+ start_mem = PAGE_ALIGN(start_mem);
+
+ /* Set up static page tables in kernel space, this will be used
+ * so that the low-level page fault handler can fill in missing
+ * TLB entries since all mmu entries cannot be loaded at once
+ * on the sun4c.
+ */
+
+#if 0
+ /* ugly debugging code */
+ for(i=0; i<40960; i+=PAGE_SIZE)
+ printk("address=0x%x vseg=%d pte=0x%x\n", (unsigned int) i,
+ (int) get_segmap(i), (unsigned int) get_pte(i));
+#endif
+
+ printk("Setting up kernel static mmu table... bounce bounce\n");
+
+ address = 0; /* ((unsigned long) &end) + 524288; */
+ sun4c_mmu_table = (unsigned long *) start_mem;
+ pg_table = (pte_t *) start_mem;
+ curseg = curpte = num_inval = 0;
+ while(address < end_mem) {
+ if(curpte == 0)
+ put_segmap((address&PGDIR_MASK), curseg);
+ for(i=0; sp_banks[i].num_bytes != 0; i++)
+ if((address >= sp_banks[i].base_addr) &&
+ (address <= (sp_banks[i].base_addr + sp_banks[i].num_bytes)))
+ goto good_address;
+ /* No physical memory here, so set the virtual segment to
+ * the invalid one, and put an invalid pte in the static
+ * kernel table.
+ */
+ *pg_table = mk_pte((address >> PAGE_SHIFT), PAGE_INVALID);
+ pg_table++; curpte++; num_inval++;
+ if(curpte > 63) {
+ if(curpte == num_inval) {
+ put_segmap((address&PGDIR_MASK), invalid_segment);
+ } else {
+ put_segmap((address&PGDIR_MASK), curseg);
+ curseg++;
+ }
+ curpte = num_inval = 0;
+ }
+ address += PAGE_SIZE;
+ continue;
+
+ good_address:
+ /* create pte entry */
+ if(address < (((unsigned long) &end) + 524288)) {
+ pte_val(*pg_table) = get_pte(address);
+ } else {
+ *pg_table = mk_pte((address >> PAGE_SHIFT), PAGE_KERNEL);
+ put_pte(address, pte_val(*pg_table));
+ }
+
+ pg_table++; curpte++;
+ if(curpte > 63) {
+ put_segmap((address&PGDIR_MASK), curseg);
+ curpte = num_inval = 0;
+ curseg++;
+ }
+ address += PAGE_SIZE;
+ }
+
+ start_mem = (unsigned long) pg_table;
+ /* ok, allocate the kernel pages, map them in all contexts
+ * (with help from the prom), and lock them. Isn't the sparc
+ * fun kiddies? TODO
+ */
+
+#if 0
+ /* ugly debugging code */
+ for(i=0x1a3000; i<(0x1a3000+40960); i+=PAGE_SIZE)
+ printk("address=0x%x vseg=%d pte=0x%x\n", (unsigned int) i,
+ (int) get_segmap(i), (unsigned int) get_pte(i));
+ halt();
+#endif
+
+ b=PGDIR_ALIGN(start_mem)>>18;
+ c= (char *)0x0;
+
+ printk("mapping kernel in all contexts...\n");
+
+ for(a=0; a<b; a++)
+ {
+ for(i=0; i<num_contexts; i++)
+ {
+ /* map the kernel virt_addrs */
+ (*(romvec->pv_setctxt))(i, (char *) c, a);
+ }
+ c += 0x40000;
+ }
+
+ /* Ok, since now mapped in all contexts, we can free up
+ * context zero to be used amongst user processes.
+ */
+
+ /* free context 0 here TODO */
+
+ /* invalidate all user pages and initialize the pte struct
+ * for userland. TODO
+ */
+
+ /* Make the kernel text unwritable and cacheable, the prom
+ * loaded our text as writable, only sneaky sunos kernels need
+ * self-modifying code.
+ */
+
+ a= (unsigned long) &etext;
+ mask=~(PTE_NC|PTE_W); /* make cacheable + not writable */
+
+ /* must do for every segment since kernel uses all contexts
+ * and unlike some sun kernels I know of, we can't hard wire
+ * context 0 just for the kernel, that is unnecessary.
+ */
+
+ for(i=0; i<8; i++)
+ {
+ b=PAGE_ALIGN((unsigned long) &trapbase);
+
+ switch_to_context(i);
+
+ for(;b<a; b+=4096)
+ {
+ put_pte(b, (get_pte(b) & mask));
+ }
+ }
+
+ invalidate(); /* flush the virtual address cache */
+
+ printk("\nCurrently in context - ");
+ for(i=0; i<num_contexts; i++)
+ {
+ switch_to_context(i);
+ printk("%d ", (int) i);
+ }
+ printk("\n");
+
+ switch_to_context(0);
+
+ invalidate();
+ return start_mem;
+}
+
+void mem_init(unsigned long start_mem, unsigned long end_mem)
+{
+ unsigned long start_low_mem = PAGE_SIZE;
+ int codepages = 0;
+ int reservedpages = 0;
+ int datapages = 0;
+ int i = 0;
+ unsigned long tmp, limit, tmp2, addr;
+ extern char etext;
+
+ end_mem &= PAGE_MASK;
+ high_memory = end_mem;
+
+ start_low_mem = PAGE_ALIGN(start_low_mem);
+ start_mem = PAGE_ALIGN(start_mem);
+
+ for(i = 0; sp_banks[i].num_bytes != 0; i++) {
+ tmp = sp_banks[i].base_addr;
+ limit = (sp_banks[i].base_addr + sp_banks[i].num_bytes);
+ if(tmp<start_mem) {
+ if(limit>start_mem)
+ tmp = start_mem;
+ else continue;
+ }
+
+ while(tmp<limit) {
+ mem_map[MAP_NR(tmp)] = 0;
+ tmp += PAGE_SIZE;
+ }
+ if(sp_banks[i+1].num_bytes != 0)
+ while(tmp < sp_banks[i+1].base_addr) {
+ mem_map[MAP_NR(tmp)] = MAP_PAGE_RESERVED;
+ tmp += PAGE_SIZE;
+ }
+ }
+
+#ifdef CONFIG_SCSI
+ scsi_mem_init(high_memory);
+#endif
+
+ for (addr = 0; addr < high_memory; addr += PAGE_SIZE) {
+ if(mem_map[MAP_NR(addr)]) {
+ if (addr < (unsigned long) &etext)
+ codepages++;
+ else if(addr < start_mem)
+ datapages++;
+ else
+ reservedpages++;
+ continue;
+ }
+ mem_map[MAP_NR(addr)] = 1;
+ free_page(addr);
+ }
+
+ tmp2 = nr_free_pages << PAGE_SHIFT;
+
+ printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
+ tmp2 >> 10,
+ high_memory >> 10,
+ codepages << (PAGE_SHIFT-10),
+ reservedpages << (PAGE_SHIFT-10),
+ datapages << (PAGE_SHIFT-10));
+
+ invalidate();
+ return;
+}
+
+void si_meminfo(struct sysinfo *val)
+{
+ int i;
+
+ i = high_memory >> PAGE_SHIFT;
+ val->totalram = 0;
+ val->sharedram = 0;
+ val->freeram = nr_free_pages << PAGE_SHIFT;
+ val->bufferram = buffermem;
+ while (i-- > 0) {
+ if (mem_map[i] & MAP_PAGE_RESERVED)
+ continue;
+ val->totalram++;
+ if (!mem_map[i])
+ continue;
+ val->sharedram += mem_map[i]-1;
+ }
+ val->totalram <<= PAGE_SHIFT;
+ val->sharedram <<= PAGE_SHIFT;
+ return;
+}
diff --git a/arch/sparc/mm/vac-flush.c b/arch/sparc/mm/vac-flush.c
new file mode 100644
index 000000000..796366b53
--- /dev/null
+++ b/arch/sparc/mm/vac-flush.c
@@ -0,0 +1,94 @@
+/* vac.c: Routines for flushing various amount of the Sparc VAC
+ (virtual address cache).
+
+ Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu)
+*/
+
+#include <asm/vac-ops.h>
+#include <asm/page.h>
+
+/* Flush all VAC entries for the current context */
+
+extern int vac_do_hw_vac_flushes, vac_size, vac_linesize;
+extern int vac_entries_per_context, vac_entries_per_segment;
+extern int vac_entries_per_page;
+
+void
+flush_vac_context()
+{
+ register int entries_left, offset;
+ register char* address;
+
+ entries_left = vac_entries_per_context;
+ address = (char *) 0;
+
+ if(vac_do_hw_vac_flushes)
+ {
+ while(entries_left-- >=0)
+ {
+ hw_flush_vac_context_entry(address);
+ address += PAGE_SIZE;
+ }
+ }
+ else
+ {
+ offset = vac_linesize;
+ while(entries_left-- >=0)
+ {
+ sw_flush_vac_context_entry(address);
+ address += offset;
+ }
+ }
+}
+
+void
+flush_vac_segment(register unsigned int segment)
+{
+ register int entries_left, offset;
+ register char* address = (char *) 0;
+
+ entries_left = vac_entries_per_segment;
+ __asm__ __volatile__("sll %0, 18, %1\n\t"
+ "sra %1, 0x2, %1\n\t"
+ : "=r" (segment) : "0" (address));
+
+ if(vac_do_hw_vac_flushes)
+ {
+ while(entries_left-- >=0)
+ {
+ hw_flush_vac_segment_entry(address);
+ address += PAGE_SIZE;
+ }
+ }
+ else
+ {
+ offset = vac_linesize;
+ while(entries_left-- >=0)
+ {
+ sw_flush_vac_segment_entry(address);
+ address += offset;
+ }
+ }
+}
+
+void
+flush_vac_page(register unsigned int addr)
+{
+ register int entries_left, offset;
+
+ if(vac_do_hw_vac_flushes)
+ {
+ hw_flush_vac_page_entry((unsigned long *) addr);
+ }
+ else
+ {
+ entries_left = vac_entries_per_page;
+ offset = vac_linesize;
+ while(entries_left-- >=0)
+ {
+ sw_flush_vac_page_entry((unsigned long *) addr);
+ addr += offset;
+ }
+ }
+}
+