diff options
author | Ralf Baechle <ralf@linux-mips.org> | 1999-10-09 00:00:47 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 1999-10-09 00:00:47 +0000 |
commit | d6434e1042f3b0a6dfe1b1f615af369486f9b1fa (patch) | |
tree | e2be02f33984c48ec019c654051d27964e42c441 /arch/i386/kernel/process.c | |
parent | 609d1e803baf519487233b765eb487f9ec227a18 (diff) |
Merge with 2.3.19.
Diffstat (limited to 'arch/i386/kernel/process.c')
-rw-r--r-- | arch/i386/kernel/process.c | 311 |
1 files changed, 126 insertions, 185 deletions
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 08dde1ed7..4937efec2 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c @@ -40,24 +40,18 @@ #include <asm/ldt.h> #include <asm/processor.h> #include <asm/desc.h> +#include <asm/mmu_context.h> #ifdef CONFIG_MATH_EMULATION #include <asm/math_emu.h> #endif -#include "irq.h" +#include <linux/irq.h> spinlock_t semaphore_wake_lock = SPIN_LOCK_UNLOCKED; asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); -#ifdef CONFIG_APM -extern int apm_do_idle(void); -extern void apm_do_busy(void); -#endif - -static int hlt_counter=0; - -#define HARD_IDLE_TIMEOUT (HZ / 3) +int hlt_counter=0; void disable_hlt(void) { @@ -69,103 +63,39 @@ void enable_hlt(void) hlt_counter--; } -#ifndef __SMP__ - -static void hard_idle(void) -{ - while (!current->need_resched) { - if (boot_cpu_data.hlt_works_ok && !hlt_counter) { -#ifdef CONFIG_APM - /* If the APM BIOS is not enabled, or there - is an error calling the idle routine, we - should hlt if possible. We need to check - need_resched again because an interrupt - may have occurred in apm_do_idle(). */ - start_bh_atomic(); - if (!apm_do_idle() && !current->need_resched) - __asm__("hlt"); - end_bh_atomic(); -#else - __asm__("hlt"); -#endif - } - if (current->need_resched) - break; - schedule(); - } -#ifdef CONFIG_APM - apm_do_busy(); -#endif -} - /* - * The idle loop on a uniprocessor i386.. - */ -static int cpu_idle(void *unused) -{ - int work = 1; - unsigned long start_idle = 0; - - /* endless idle loop with no priority at all */ - current->priority = 0; - current->counter = -100; - init_idle(); - - for (;;) { - if (work) - start_idle = jiffies; - - if (jiffies - start_idle > HARD_IDLE_TIMEOUT) - hard_idle(); - else { - if (boot_cpu_data.hlt_works_ok && !hlt_counter && !current->need_resched) - __asm__("hlt"); - } - - work = current->need_resched; - schedule(); - check_pgt_cache(); - } -} - -#else + * Powermanagement idle function, if any.. + */ +void (*acpi_idle)(void) = NULL; /* - * This is being executed in task 0 'user space'. + * The idle thread. There's no useful work to be + * done, so just try to conserve power and have a + * low exit latency (ie sit in a loop waiting for + * somebody to say that they'd like to reschedule) */ - -int cpu_idle(void *unused) +void cpu_idle(void) { /* endless idle loop with no priority at all */ + init_idle(); current->priority = 0; current->counter = -100; - init_idle(); - while(1) { - if (current_cpu_data.hlt_works_ok && !hlt_counter && - !current->need_resched) - __asm__("hlt"); - /* - * although we are an idle CPU, we do not want to - * get into the scheduler unnecessarily. - */ - if (current->need_resched) { - schedule(); - check_pgt_cache(); + while (1) { + while (!current->need_resched) { + if (!current_cpu_data.hlt_works_ok) + continue; + if (hlt_counter) + continue; + asm volatile("sti ; hlt" : : : "memory"); } + schedule(); + check_pgt_cache(); + if (acpi_idle) + acpi_idle(); } } -#endif - -asmlinkage int sys_idle(void) -{ - if (current->pid != 0) - return -EPERM; - cpu_idle(NULL); - return 0; -} - /* * This routine reboots the machine by asking the keyboard * controller to pulse the reset-line low. We try that for a while, @@ -176,7 +106,7 @@ static long no_idt[2] = {0, 0}; static int reboot_mode = 0; static int reboot_thru_bios = 0; -__initfunc(void reboot_setup(char *str, int *ints)) +static int __init reboot_setup(char *str) { while(1) { switch (*str) { @@ -198,8 +128,10 @@ __initfunc(void reboot_setup(char *str, int *ints)) else break; } + return 1; } +__setup("reboot=", reboot_setup); /* The following code and data reboots the machine by switching to real mode and jumping to the BIOS reset entry point, as if the CPU has @@ -321,13 +253,9 @@ void machine_restart(char * __unused) pg0[0] = _PAGE_RW | _PAGE_PRESENT; /* - * Use `swapper_pg_dir' as our page directory. We bother with - * `SET_PAGE_DIR' because although might be rebooting, but if we change - * the way we set root page dir in the future, then we wont break a - * seldom used feature ;) + * Use `swapper_pg_dir' as our page directory. */ - - SET_PAGE_DIR(current,swapper_pg_dir); + asm volatile("movl %0,%%cr3": :"r" (__pa(swapper_pg_dir))); /* Write 0x1234 to absolute memory location 0x472. The BIOS reads this on booting to tell it to "Bypass memory test (also warm @@ -405,6 +333,7 @@ void show_regs(struct pt_regs * regs) regs->esi, regs->edi, regs->ebp); printk(" DS: %04x ES: %04x\n", 0xffff & regs->xds,0xffff & regs->xes); + __asm__("movl %%cr0, %0": "=r" (cr0)); __asm__("movl %%cr2, %0": "=r" (cr2)); __asm__("movl %%cr3, %0": "=r" (cr3)); @@ -475,11 +404,19 @@ void free_task_struct(struct task_struct *p) free_pages((unsigned long) p, 1); } +/* + * No need to lock the MM as we are the last user + */ void release_segments(struct mm_struct *mm) { - if (mm->segments) { - void * ldt = mm->segments; + void * ldt = mm->segments; + + /* + * free the LDT + */ + if (ldt) { mm->segments = NULL; + clear_LDT(); vfree(ldt); } } @@ -492,10 +429,9 @@ void forget_segments(void) : "r" (0)); /* - * Get the LDT entry from init_task. + * Load the LDT entry of init_task. */ - current->tss.ldt = _LDT(0); - load_ldt(0); + load_LDT(&init_mm); } /* @@ -537,12 +473,9 @@ void exit_thread(void) void flush_thread(void) { - int i; struct task_struct *tsk = current; - for (i=0 ; i<8 ; i++) - tsk->tss.debugreg[i] = 0; - + memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8); /* * Forget coprocessor state.. */ @@ -552,33 +485,45 @@ void flush_thread(void) void release_thread(struct task_struct *dead_task) { + if (dead_task->mm) { + void * ldt = dead_task->mm->segments; + + // temporary debugging check + if (ldt) { + printk("WARNING: dead process %8s still has LDT? <%p>\n", + dead_task->comm, ldt); + BUG(); + } + } } /* - * If new_mm is NULL, we're being called to set up the LDT descriptor - * for a clone task. Each clone must have a separate entry in the GDT. + * we do not have to muck with descriptors here, that is + * done in switch_mm() as needed. */ -void copy_segments(int nr, struct task_struct *p, struct mm_struct *new_mm) +void copy_segments(struct task_struct *p, struct mm_struct *new_mm) { struct mm_struct * old_mm = current->mm; void * old_ldt = old_mm->segments, * ldt = old_ldt; - /* default LDT - use the one from init_task */ - p->tss.ldt = _LDT(0); - if (old_ldt) { - if (new_mm) { - ldt = vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE); - new_mm->segments = ldt; - if (!ldt) { - printk(KERN_WARNING "ldt allocation failed\n"); - return; - } - memcpy(ldt, old_ldt, LDT_ENTRIES*LDT_ENTRY_SIZE); - } - p->tss.ldt = _LDT(nr); - set_ldt_desc(nr, ldt, LDT_ENTRIES); + if (!old_mm->segments) { + /* + * default LDT - use the one from init_task + */ + new_mm->segments = NULL; return; } + + /* + * Completely new LDT, we initialize it from the parent: + */ + ldt = vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE); + if (!ldt) + printk(KERN_WARNING "ldt allocation failed\n"); + else + memcpy(ldt, old_ldt, LDT_ENTRIES*LDT_ENTRY_SIZE); + new_mm->segments = ldt; + return; } /* @@ -592,31 +537,21 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp, { struct pt_regs * childregs; - childregs = ((struct pt_regs *) (2*PAGE_SIZE + (unsigned long) p)) - 1; + childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p)) - 1; *childregs = *regs; childregs->eax = 0; childregs->esp = esp; - p->tss.esp = (unsigned long) childregs; - p->tss.esp0 = (unsigned long) (childregs+1); - p->tss.ss0 = __KERNEL_DS; + p->thread.esp = (unsigned long) childregs; + p->thread.esp0 = (unsigned long) (childregs+1); - p->tss.tr = _TSS(nr); - set_tss_desc(nr,&(p->tss)); - p->tss.eip = (unsigned long) ret_from_fork; + p->thread.eip = (unsigned long) ret_from_fork; - savesegment(fs,p->tss.fs); - savesegment(gs,p->tss.gs); - - /* - * a bitmap offset pointing outside of the TSS limit causes a nicely - * controllable SIGSEGV. The first sys_ioperm() call sets up the - * bitmap properly. - */ - p->tss.bitmap = sizeof(struct thread_struct); + savesegment(fs,p->thread.fs); + savesegment(gs,p->thread.gs); unlazy_fpu(current); - p->tss.i387 = current->tss.i387; + p->thread.i387 = current->thread.i387; return 0; } @@ -632,7 +567,7 @@ int dump_fpu (struct pt_regs * regs, struct user_i387_struct* fpu) fpvalid = tsk->used_math; if (fpvalid) { unlazy_fpu(tsk); - memcpy(fpu,&tsk->tss.i387.hard,sizeof(*fpu)); + memcpy(fpu,&tsk->thread.i387.hard,sizeof(*fpu)); } return fpvalid; @@ -654,7 +589,7 @@ void dump_thread(struct pt_regs * regs, struct user * dump) dump->u_dsize -= dump->u_tsize; dump->u_ssize = 0; for (i = 0; i < 8; i++) - dump->u_debugreg[i] = current->tss.debugreg[i]; + dump->u_debugreg[i] = current->thread.debugreg[i]; if (dump->start_stack < TASK_SIZE) dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; @@ -683,11 +618,10 @@ void dump_thread(struct pt_regs * regs, struct user * dump) /* * This special macro can be used to load a debugging register */ -#define loaddebug(tsk,register) \ +#define loaddebug(thread,register) \ __asm__("movl %0,%%db" #register \ : /* no output */ \ - :"r" (tsk->tss.debugreg[register])) - + :"r" (thread->debugreg[register])) /* * switch_to(x,yn) should switch tasks from x to y. @@ -712,60 +646,67 @@ void dump_thread(struct pt_regs * regs, struct user * dump) * More important, however, is the fact that this allows us much * more flexibility. */ -void __switch_to(struct task_struct *prev, struct task_struct *next) +extern int cpus_initialized; +void __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { - /* Do the FPU save and set TS if it wasn't set before.. */ - unlazy_fpu(prev); + struct thread_struct *prev = &prev_p->thread, + *next = &next_p->thread; + struct tss_struct *tss = init_tss + smp_processor_id(); + + unlazy_fpu(prev_p); /* - * Reload TR, LDT and the page table pointers.. - * - * We need TR for the IO permission bitmask (and - * the vm86 bitmasks in case we ever use enhanced - * v86 mode properly). - * - * We may want to get rid of the TR register some - * day, and copy the bitmaps around by hand. Oh, - * well. In the meantime we have to clear the busy - * bit in the TSS entry, ugh. + * Reload esp0, LDT and the page table pointer: */ - gdt_table[next->tss.tr >> 3].b &= 0xfffffdff; - asm volatile("ltr %0": :"g" (*(unsigned short *)&next->tss.tr)); + tss->esp0 = next->esp0; /* * Save away %fs and %gs. No need to save %es and %ds, as * those are always kernel segments while inside the kernel. */ - asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->tss.fs)); - asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->tss.gs)); - - /* Re-load LDT if necessary */ - if (next->mm->segments != prev->mm->segments) - asm volatile("lldt %0": :"g" (*(unsigned short *)&next->tss.ldt)); - - /* Re-load page tables */ - { - unsigned long new_cr3 = next->tss.cr3; - if (new_cr3 != prev->tss.cr3) - asm volatile("movl %0,%%cr3": :"r" (new_cr3)); - } + asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs)); + asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs)); /* * Restore %fs and %gs. */ - loadsegment(fs,next->tss.fs); - loadsegment(gs,next->tss.gs); + loadsegment(fs, next->fs); + loadsegment(gs, next->gs); /* * Now maybe reload the debug registers */ - if (next->tss.debugreg[7]){ - loaddebug(next,0); - loaddebug(next,1); - loaddebug(next,2); - loaddebug(next,3); - loaddebug(next,6); - loaddebug(next,7); + if (next->debugreg[7]){ + loaddebug(next, 0); + loaddebug(next, 1); + loaddebug(next, 2); + loaddebug(next, 3); + /* no 4 and 5 */ + loaddebug(next, 6); + loaddebug(next, 7); + } + + if (prev->ioperm || next->ioperm) { + if (next->ioperm) { + /* + * 4 cachelines copy ... not good, but not that + * bad either. Anyone got something better? + * This only affects processes which use ioperm(). + * [Putting the TSSs into 4k-tlb mapped regions + * and playing VM tricks to switch the IO bitmap + * is not really acceptable.] + */ + memcpy(tss->io_bitmap, next->io_bitmap, + IO_BITMAP_SIZE*sizeof(unsigned long)); + tss->bitmap = IO_BITMAP_OFFSET; + } else + /* + * a bitmap offset pointing outside of the TSS limit + * causes a nicely controllable SIGSEGV if a process + * tries to use a port IO instruction. The first + * sys_ioperm() call sets up the bitmap properly. + */ + tss->bitmap = INVALID_IO_BITMAP_OFFSET; } } |