1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
|
/* $Id: mmu_context.h,v 1.26 1998/07/31 10:42:38 jj Exp $ */
#ifndef __SPARC64_MMU_CONTEXT_H
#define __SPARC64_MMU_CONTEXT_H
/* Derived heavily from Linus's Alpha/AXP ASN code... */
#include <asm/system.h>
#include <asm/spitfire.h>
#include <asm/spinlock.h>
#define NO_CONTEXT 0
#ifndef __ASSEMBLY__
extern unsigned long tlb_context_cache;
extern spinlock_t scheduler_lock;
extern unsigned long mmu_context_bmap[];
#define CTX_VERSION_SHIFT (PAGE_SHIFT - 3)
#define CTX_VERSION_MASK ((~0UL) << CTX_VERSION_SHIFT)
#define CTX_FIRST_VERSION ((1UL << CTX_VERSION_SHIFT) + 1UL)
extern void get_new_mmu_context(struct mm_struct *mm);
/* Initialize/destroy the context related info for a new mm_struct
* instance.
*/
#define init_new_context(mm) ((mm)->context = NO_CONTEXT)
#define destroy_context(mm) do { \
if ((mm)->context != NO_CONTEXT) { \
spin_lock(&scheduler_lock); \
if (!(((mm)->context ^ tlb_context_cache) & CTX_VERSION_MASK)) \
clear_bit((mm)->context & ~(CTX_VERSION_MASK), \
mmu_context_bmap); \
spin_unlock(&scheduler_lock); \
(mm)->context = NO_CONTEXT; \
} \
} while (0)
extern __inline__ void get_mmu_context(struct task_struct *tsk)
{
register unsigned long paddr asm("o5");
register unsigned long pgd_cache asm("o4");
struct mm_struct *mm = tsk->mm;
flushw_user();
if(!(tsk->tss.flags & SPARC_FLAG_KTHREAD) &&
!(tsk->flags & PF_EXITING)) {
unsigned long ctx = tlb_context_cache;
if((mm->context ^ ctx) & CTX_VERSION_MASK)
get_new_mmu_context(mm);
if(!(mm->cpu_vm_mask & (1UL<<smp_processor_id()))) {
spitfire_set_secondary_context(mm->context & 0x3ff);
__asm__ __volatile__("flush %g6");
spitfire_flush_dtlb_secondary_context();
spitfire_flush_itlb_secondary_context();
__asm__ __volatile__("flush %g6");
}
/* Don't worry, set_fs() will restore it... */
/* Sigh, damned include loops... just poke seg directly. */
tsk->tss.ctx = (tsk->tss.current_ds.seg ?
(mm->context & 0x3ff) : 0);
} else
tsk->tss.ctx = 0;
spitfire_set_secondary_context(tsk->tss.ctx);
__asm__ __volatile__("flush %g6");
paddr = __pa(mm->pgd);
if(tsk->tss.flags & SPARC_FLAG_32BIT)
pgd_cache = (unsigned long) mm->pgd[0];
else
pgd_cache = 0;
__asm__ __volatile__("
rdpr %%pstate, %%o3
wrpr %%o3, %2, %%pstate
mov %4, %%g4
mov %0, %%g7
stxa %1, [%%g4] %3
wrpr %%o3, 0x0, %%pstate
" : /* no outputs */
: "r" (paddr), "r" (pgd_cache), "i" (PSTATE_MG|PSTATE_IE),
"i" (ASI_DMMU), "i" (TSB_REG)
: "o3");
}
/*
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
#define activate_context(tsk) get_mmu_context(tsk)
#endif /* !(__ASSEMBLY__) */
#endif /* !(__SPARC64_MMU_CONTEXT_H) */
|