summaryrefslogtreecommitdiffstats
path: root/arch/i386/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-02-04 07:40:19 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-02-04 07:40:19 +0000
commit33263fc5f9ac8e8cb2b22d06af3ce5ac1dd815e4 (patch)
tree2d1b86a40bef0958a68cf1a2eafbeb0667a70543 /arch/i386/mm
parent216f5f51aa02f8b113aa620ebc14a9631a217a00 (diff)
Merge with Linux 2.3.32.
Diffstat (limited to 'arch/i386/mm')
-rw-r--r--arch/i386/mm/fault.c2
-rw-r--r--arch/i386/mm/init.c113
-rw-r--r--arch/i386/mm/ioremap.c17
3 files changed, 90 insertions, 42 deletions
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index b2a98859b..618b36544 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -19,7 +19,7 @@
#include <asm/system.h>
#include <asm/uaccess.h>
-#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
#include <asm/hardirq.h>
extern void die(const char *,struct pt_regs *,long);
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 7a05654d8..b99daee84 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -30,6 +30,7 @@
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/fixmap.h>
#include <asm/e820.h>
@@ -75,7 +76,7 @@ static pmd_t * get_bad_pmd_table(void)
pmd_t v;
int i;
- pmd_val(v) = _PAGE_TABLE + __pa(empty_bad_pte_table);
+ set_pmd(&v, __pmd(_PAGE_TABLE + __pa(empty_bad_pte_table)));
for (i = 0; i < PAGE_SIZE/sizeof(pmd_t); i++)
empty_bad_pmd_table[i] = v;
@@ -102,13 +103,13 @@ static pte_t * get_bad_pte_table(void)
void __handle_bad_pmd(pmd_t *pmd)
{
pmd_ERROR(*pmd);
- pmd_val(*pmd) = _PAGE_TABLE + __pa(get_bad_pte_table());
+ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(get_bad_pte_table())));
}
void __handle_bad_pmd_kernel(pmd_t *pmd)
{
pmd_ERROR(*pmd);
- pmd_val(*pmd) = _KERNPG_TABLE + __pa(get_bad_pte_table());
+ set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(get_bad_pte_table())));
}
pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long offset)
@@ -119,10 +120,10 @@ pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long offset)
if (pmd_none(*pmd)) {
if (pte) {
clear_page(pte);
- pmd_val(*pmd) = _KERNPG_TABLE + __pa(pte);
+ set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
return pte + offset;
}
- pmd_val(*pmd) = _KERNPG_TABLE + __pa(get_bad_pte_table());
+ set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(get_bad_pte_table())));
return NULL;
}
free_page((unsigned long)pte);
@@ -141,10 +142,10 @@ pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
if (pmd_none(*pmd)) {
if (pte) {
clear_page((void *)pte);
- pmd_val(*pmd) = _PAGE_TABLE + __pa(pte);
+ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)));
return (pte_t *)pte + offset;
}
- pmd_val(*pmd) = _PAGE_TABLE + __pa(get_bad_pte_table());
+ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(get_bad_pte_table())));
return NULL;
}
free_page(pte);
@@ -267,7 +268,7 @@ void set_fixmap (enum fixed_addresses idx, unsigned long phys)
printk("Invalid set_fixmap\n");
return;
}
- set_pte_phys (address,phys);
+ set_pte_phys(address,phys);
}
static void __init fixrange_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
@@ -285,8 +286,7 @@ static void __init fixrange_init (unsigned long start, unsigned long end, pgd_t
#if CONFIG_X86_PAE
if (pgd_none(*pgd)) {
pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- memset((void*)pmd, 0, PAGE_SIZE);
- pgd_val(*pgd) = __pa(pmd) + 0x1;
+ set_pgd(pgd, __pgd(__pa(pmd) + 0x1));
if (pmd != pmd_offset(pgd, start))
BUG();
}
@@ -297,8 +297,7 @@ static void __init fixrange_init (unsigned long start, unsigned long end, pgd_t
for (; (j < PTRS_PER_PMD) && start; pmd++, j++) {
if (pmd_none(*pmd)) {
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- memset((void*)pte, 0, PAGE_SIZE);
- pmd_val(*pmd) = _KERNPG_TABLE + __pa(pte);
+ set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
if (pte != pte_offset(pmd, 0))
BUG();
}
@@ -327,8 +326,7 @@ static void __init pagetable_init(void)
vaddr = i*PGDIR_SIZE;
#if CONFIG_X86_PAE
pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- memset((void*)pmd, 0, PAGE_SIZE);
- pgd_val(*pgd) = __pa(pmd) + 0x1;
+ set_pgd(pgd, __pgd(__pa(pmd) + 0x1));
#else
pmd = (pmd_t *)pgd;
#endif
@@ -347,13 +345,12 @@ static void __init pagetable_init(void)
set_in_cr4(X86_CR4_PGE);
__pe += _PAGE_GLOBAL;
}
- pmd_val(*pmd) = __pe;
+ set_pmd(pmd, __pmd(__pe));
continue;
}
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- memset((void*)pte, 0, PAGE_SIZE);
- pmd_val(*pmd) = _KERNPG_TABLE + __pa(pte);
+ set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
if (pte != pte_offset(pmd, 0))
BUG();
@@ -379,7 +376,7 @@ static void __init pagetable_init(void)
* Permanent kmaps:
*/
vaddr = PKMAP_BASE;
- fixrange_init(vaddr, vaddr + 4*1024*1024, pgd_base);
+ fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
pgd = swapper_pg_dir + __pgd_offset(vaddr);
pmd = pmd_offset(pgd, vaddr);
@@ -412,7 +409,11 @@ void __init zap_low_mappings (void)
* that case).
*/
for (i = 0; i < USER_PTRS_PER_PGD; i++)
- pgd_val(swapper_pg_dir[i]) = 0;
+#if CONFIG_X86_PAE
+ pgd_clear(swapper_pg_dir+i);
+#else
+ set_pgd(swapper_pg_dir+i, __pgd(0));
+#endif
flush_tlb_all();
}
@@ -448,13 +449,22 @@ void __init paging_init(void)
kmap_init();
#endif
{
- unsigned int zones_size[3];
-
- zones_size[0] = virt_to_phys((char *)MAX_DMA_ADDRESS)
- >> PAGE_SHIFT;
- zones_size[1] = max_low_pfn - zones_size[0];
- zones_size[2] = highend_pfn - zones_size[0] - zones_size[1];
-
+ unsigned int zones_size[MAX_NR_ZONES] = {0, 0, 0};
+ unsigned int max_dma, high, low;
+
+ max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+ low = max_low_pfn;
+ high = highend_pfn;
+
+ if (low < max_dma)
+ zones_size[ZONE_DMA] = low;
+ else {
+ zones_size[ZONE_DMA] = max_dma;
+ zones_size[ZONE_NORMAL] = low - max_dma;
+#ifdef CONFIG_HIGHMEM
+ zones_size[ZONE_HIGHMEM] = high - low;
+#endif
+ }
free_area_init(zones_size);
}
return;
@@ -514,13 +524,18 @@ static inline int page_is_ram (unsigned long pagenr)
int i;
for (i = 0; i < e820.nr_map; i++) {
- unsigned long addr, size;
+ unsigned long addr, end;
if (e820.map[i].type != E820_RAM) /* not usable memory */
continue;
+ /*
+ * !!!FIXME!!! Some BIOSen report areas as RAM that
+ * are not. Notably the 640->1Mb area. We need a sanity
+ * check here.
+ */
addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
- size = e820.map[i].size >> PAGE_SHIFT;
- if ((pagenr >= addr) && (pagenr < addr+size))
+ end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
+ if ((pagenr >= addr) && (pagenr < end))
return 1;
}
return 0;
@@ -528,15 +543,13 @@ static inline int page_is_ram (unsigned long pagenr)
void __init mem_init(void)
{
- int codepages = 0;
- int reservedpages = 0;
- int datapages = 0;
- int initpages = 0;
-#ifdef CONFIG_HIGHMEM
+ int codesize, reservedpages, datasize, initsize;
int tmp;
if (!mem_map)
BUG();
+
+#ifdef CONFIG_HIGHMEM
highmem_start_page = mem_map + highstart_pfn;
/* cache the highmem_mapnr */
highmem_mapnr = highstart_pfn;
@@ -552,6 +565,13 @@ void __init mem_init(void)
/* this will put all low memory onto the freelists */
totalram_pages += free_all_bootmem();
+ reservedpages = 0;
+ for (tmp = 0; tmp < max_low_pfn; tmp++)
+ /*
+ * Only count reserved RAM pages
+ */
+ if (page_is_ram(tmp) && PageReserved(mem_map+tmp))
+ reservedpages++;
#ifdef CONFIG_HIGHMEM
for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
struct page *page = mem_map + tmp;
@@ -568,19 +588,23 @@ void __init mem_init(void)
}
totalram_pages += totalhigh_pages;
#endif
+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
+ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
max_mapnr << (PAGE_SHIFT-10),
- codepages << (PAGE_SHIFT-10),
+ codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
- datapages << (PAGE_SHIFT-10),
- initpages << (PAGE_SHIFT-10),
+ datasize >> 10,
+ initsize >> 10,
(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
);
#if CONFIG_X86_PAE
if (!cpu_has_pae)
- panic("cannot execute a PAE-enabled kernel on a PAE-incapable CPU!");
+ panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
#endif
if (boot_cpu_data.wp_works_ok < 0)
test_wp_bit();
@@ -611,6 +635,19 @@ void free_initmem(void)
printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
}
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ for (; start < end; start += PAGE_SIZE) {
+ ClearPageReserved(mem_map + MAP_NR(start));
+ set_page_count(mem_map+MAP_NR(start), 1);
+ free_page(start);
+ totalram_pages++;
+ }
+ printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+}
+#endif
+
void si_meminfo(struct sysinfo *val)
{
val->totalram = totalram_pages;
diff --git a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c
index d69455310..cb4427672 100644
--- a/arch/i386/mm/ioremap.c
+++ b/arch/i386/mm/ioremap.c
@@ -10,6 +10,7 @@
#include <linux/vmalloc.h>
#include <asm/io.h>
+#include <asm/pgalloc.h>
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags)
@@ -118,8 +119,18 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
- if (phys_addr < virt_to_phys(high_memory))
- return NULL;
+ if (phys_addr < virt_to_phys(high_memory)) {
+ char *t_addr, *t_end;
+ int i;
+
+ t_addr = __va(phys_addr);
+ t_end = t_addr + (size - 1);
+
+ for(i = MAP_NR(t_addr); i < MAP_NR(t_end); i++) {
+ if(!PageReserved(mem_map + i))
+ return NULL;
+ }
+ }
/*
* Mappings have to be page-aligned
@@ -131,7 +142,7 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag
/*
* Ok, go for it..
*/
- area = get_vm_area(size);
+ area = get_vm_area(size, VM_IOREMAP);
if (!area)
return NULL;
addr = area->addr;