summaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/smpboot.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-10-05 01:18:40 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-10-05 01:18:40 +0000
commit012bb3e61e5eced6c610f9e036372bf0c8def2d1 (patch)
tree87efc733f9b164e8c85c0336f92c8fb7eff6d183 /arch/i386/kernel/smpboot.c
parent625a1589d3d6464b5d90b8a0918789e3afffd220 (diff)
Merge with Linux 2.4.0-test9. Please check DECstation, I had a number
of rejects to fixup while integrating Linus patches. I also found that this kernel will only boot SMP on Origin; the UP kernel freeze soon after bootup with SCSI timeout messages. I commit this anyway since I found that the last CVS versions had the same problem.
Diffstat (limited to 'arch/i386/kernel/smpboot.c')
-rw-r--r--arch/i386/kernel/smpboot.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 7629bb3d2..6092aec3b 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -46,7 +46,7 @@
#include <asm/pgalloc.h>
/* Set if we find a B stepping CPU */
-static int smp_b_stepping = 0;
+static int smp_b_stepping;
/* Setup configured maximum number of CPUs to activate */
static int max_cpus = -1;
@@ -55,21 +55,21 @@ static int max_cpus = -1;
int smp_num_cpus = 1;
/* Bitmask of currently online CPUs */
-unsigned long cpu_online_map = 0;
+unsigned long cpu_online_map;
/* which CPU (physical APIC ID) maps to which logical CPU number */
volatile int x86_apicid_to_cpu[NR_CPUS];
/* which logical CPU number maps to which CPU (physical APIC ID) */
volatile int x86_cpu_to_apicid[NR_CPUS];
-static volatile unsigned long cpu_callin_map = 0;
-static volatile unsigned long cpu_callout_map = 0;
+static volatile unsigned long cpu_callin_map;
+static volatile unsigned long cpu_callout_map;
/* Per CPU bogomips and other parameters */
struct cpuinfo_x86 cpu_data[NR_CPUS];
/* Set when the idlers are all forked */
-int smp_threads_ready = 0;
+int smp_threads_ready;
/*
* Setup routine for controlling SMP activation
@@ -194,7 +194,7 @@ void __init smp_commence(void)
static atomic_t tsc_start_flag = ATOMIC_INIT(0);
static atomic_t tsc_count_start = ATOMIC_INIT(0);
static atomic_t tsc_count_stop = ATOMIC_INIT(0);
-static unsigned long long tsc_values[NR_CPUS] = { 0, };
+static unsigned long long tsc_values[NR_CPUS];
#define NR_LOOPS 5
@@ -438,7 +438,7 @@ void __init smp_callin(void)
synchronize_tsc_ap();
}
-int cpucount = 0;
+int cpucount;
extern int cpu_idle(void);
@@ -497,7 +497,7 @@ static int __init fork_by_hand(void)
* don't care about the eip and regs settings since
* we'll never reschedule the forked task.
*/
- return do_fork(CLONE_VM|CLONE_PID, 0, &regs);
+ return do_fork(CLONE_VM|CLONE_PID, 0, &regs, 0);
}
#if APIC_DEBUG
@@ -774,7 +774,7 @@ static void __init do_boot_cpu (int apicid)
}
cycles_t cacheflush_time;
-extern unsigned long cpu_hz;
+extern unsigned long cpu_khz;
static void smp_tune_scheduling (void)
{
@@ -791,7 +791,7 @@ static void smp_tune_scheduling (void)
* the cache size)
*/
- if (!cpu_hz) {
+ if (!cpu_khz) {
/*
* this basically disables processor-affinity
* scheduling on SMP without a TSC.
@@ -805,12 +805,12 @@ static void smp_tune_scheduling (void)
bandwidth = 100;
}
- cacheflush_time = (cpu_hz>>20) * (cachesize<<10) / bandwidth;
+ cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth;
}
printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
- (long)cacheflush_time/(cpu_hz/1000000),
- ((long)cacheflush_time*100/(cpu_hz/1000000)) % 100);
+ (long)cacheflush_time/(cpu_khz/1000),
+ ((long)cacheflush_time*100/(cpu_khz/1000)) % 100);
}
/*