summaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-11-23 02:00:47 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-11-23 02:00:47 +0000
commit06615f62b17d7de6e12d2f5ec6b88cf30af08413 (patch)
tree8766f208847d4876a6db619aebbf54d53b76eb44 /arch/arm
parentfa9bdb574f4febb751848a685d9a9017e04e1d53 (diff)
Merge with Linux 2.4.0-test10.
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/config.in3
-rw-r--r--arch/arm/kernel/semaphore.c6
-rw-r--r--arch/arm/mm/init.c18
-rw-r--r--arch/arm/mm/mm-armv.c2
4 files changed, 15 insertions, 14 deletions
diff --git a/arch/arm/config.in b/arch/arm/config.in
index 1a79b55b9..fff056940 100644
--- a/arch/arm/config.in
+++ b/arch/arm/config.in
@@ -213,7 +213,8 @@ if [ "$CONFIG_ARCH_NEXUSPCI" = "y" -o \
define_bool CONFIG_PCI y
else
if [ "$CONFIG_ARCH_INTEGRATOR" = "y" ]; then
- bool 'PCI support' CONFIG_PCI
+ bool 'PCI support' CONFIG_PCI_INTEGRATOR
+ define_bool CONFIG_PCI $CONFIG_PCI_INTEGRATOR
else
define_bool CONFIG_PCI n
fi
diff --git a/arch/arm/kernel/semaphore.c b/arch/arm/kernel/semaphore.c
index e01a6417e..f89852641 100644
--- a/arch/arm/kernel/semaphore.c
+++ b/arch/arm/kernel/semaphore.c
@@ -19,8 +19,8 @@
/*
* Semaphores are implemented using a two-way counter:
* The "count" variable is decremented for each process
- * that tries to aquire the semaphore, while the "sleeping"
- * variable is a count of such aquires.
+ * that tries to acquire the semaphore, while the "sleeping"
+ * variable is a count of such acquires.
*
* Notably, the inline "up()" and "down()" functions can
* efficiently test if they need to do any extra work (up
@@ -257,7 +257,7 @@ struct rw_semaphore *down_write_failed(struct rw_semaphore *sem)
while (atomic_read(&sem->count) < 0) {
set_task_state(tsk, TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE);
if (atomic_read(&sem->count) >= 0)
- break; /* we must attempt to aquire or bias the lock */
+ break; /* we must attempt to acquire or bias the lock */
schedule();
}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index a235232d9..ea833c58d 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -56,7 +56,7 @@ extern char _stext, _text, _etext, _end, __init_begin, __init_end;
* The sole use of this is to pass memory configuration
* data from paging_init to mem_init.
*/
-static struct meminfo __initdata meminfo;
+static struct meminfo meminfo __initdata = { 0, };
/*
* empty_bad_page is the page that is used for page faults when
@@ -383,27 +383,27 @@ static inline void reserve_node_zero(unsigned int bootmap_pfn, unsigned int boot
* Register the kernel text and data with bootmem.
* Note that this can only be in node 0.
*/
- reserve_bootmem_node(0, __pa(&_stext), &_end - &_stext);
+ reserve_bootmem_node(NODE_DATA(0), __pa(&_stext), &_end - &_stext);
#ifdef CONFIG_CPU_32
/*
* Reserve the page tables. These are already in use,
* and can only be in node 0.
*/
- reserve_bootmem_node(0, __pa(swapper_pg_dir),
+ reserve_bootmem_node(NODE_DATA(0), __pa(swapper_pg_dir),
PTRS_PER_PGD * sizeof(void *));
#else
/*
* Stop this memory from being grabbed - its special DMA
* memory that is required for the screen.
*/
- reserve_bootmem_node(0, 0x02000000, 0x00080000);
+ reserve_bootmem_node(NODE_DATA(0), 0x02000000, 0x00080000);
#endif
/*
* And don't forget to reserve the allocator bitmap,
* which will be freed later.
*/
- reserve_bootmem_node(0, bootmap_pfn << PAGE_SHIFT,
+ reserve_bootmem_node(NODE_DATA(0), bootmap_pfn << PAGE_SHIFT,
bootmap_pages << PAGE_SHIFT);
}
@@ -416,7 +416,7 @@ static inline void free_bootmem_node_bank(int node, struct meminfo *mi)
for (bank = 0; bank < mi->nr_banks; bank++)
if (mi->bank[bank].node == node)
- free_bootmem_node(node, mi->bank[bank].start,
+ free_bootmem_node(NODE_DATA(node), mi->bank[bank].start,
mi->bank[bank].size);
}
@@ -450,7 +450,7 @@ void __init bootmem_init(struct meminfo *mi)
/*
* Initialise the bootmem allocator.
*/
- init_bootmem_node(node, map_pg, np->start, np->end);
+ init_bootmem_node(NODE_DATA(node), map_pg, np->start, np->end);
free_bootmem_node_bank(node, mi);
map_pg += np->bootmap_pages;
@@ -465,7 +465,7 @@ void __init bootmem_init(struct meminfo *mi)
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_node >= 0)
- reserve_bootmem_node(initrd_node, __pa(initrd_start),
+ reserve_bootmem_node(NODE_DATA(initrd_node), __pa(initrd_start),
initrd_end - initrd_start);
#endif
@@ -583,7 +583,7 @@ void __init mem_init(void)
/* this will put all unused low memory onto the freelists */
for (node = 0; node < numnodes; node++)
- totalram_pages += free_all_bootmem_node(node);
+ totalram_pages += free_all_bootmem_node(NODE_DATA(node));
/*
* Since our memory may not be contiguous, calculate the
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 9ddcc84bc..2ace55d47 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -438,7 +438,7 @@ static inline void free_memmap(int node, unsigned long start, unsigned long end)
start = __virt_to_phys(pg);
end = __virt_to_phys(pgend);
- free_bootmem_node(node, start, end - start);
+ free_bootmem_node(NODE_DATA(node), start, end - start);
}
static inline void free_unused_memmap_node(int node, struct meminfo *mi)