1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
|
#ifndef __PARISC_MMU_CONTEXT_H
#define __PARISC_MMU_CONTEXT_H
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{
}
/* on PA-RISC, we actually have enough contexts to justify an allocator
* for them. prumpf */
extern unsigned long alloc_sid(void);
extern void free_sid(unsigned long);
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
/*
* Init_new_context can be called for a cloned mm, so we
* only allocate a space id if one hasn't been allocated
* yet AND mm != &init_mm (cloned kernel thread which
* will run in the kernel space with spaceid 0).
*/
if ((mm != &init_mm) && (mm->context == 0)) {
mm->context = alloc_sid();
}
return 0;
}
static inline void
destroy_context(struct mm_struct *mm)
{
free_sid(mm->context);
mm->context = 0;
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
{
if (prev != next) {
/* Re-load page tables */
tsk->thread.pg_tables = __pa(next->pgd);
mtctl(tsk->thread.pg_tables, 25);
mtsp(next->context,3);
}
}
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
/*
* Activate_mm is our one chance to allocate a space id
* for a new mm created in the exec path. There's also
* some lazy tlb stuff, which is currently dead code, but
* we only allocate a space id if one hasn't been allocated
* already, so we should be OK.
*/
if (next == &init_mm) BUG(); /* Should never happen */
if (next->context == 0)
next->context = alloc_sid();
switch_mm(prev,next,current,0);
}
#endif
|