summaryrefslogtreecommitdiffstats
path: root/include/asm-ppc/mmu_context.h
blob: 88d6970a5649e4ff62c1c6bd4caa32f3f70d9109 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
#include <linux/config.h>

#ifndef __PPC_MMU_CONTEXT_H
#define __PPC_MMU_CONTEXT_H

/* the way contexts are handled on the ppc they are vsid's and
   don't need any special treatment right now.
   perhaps I can defer flushing the tlb by keeping a list of
   zombie vsid/context's and handling that through destroy_context
   later -- Cort

   The MPC8xx has only 16 contexts.  We rotate through them on each
   task switch.  A better way would be to keep track of tasks that
   own contexts, and implement an LRU usage.  That way very active
   tasks don't always have to pay the TLB reload overhead.  The
   kernel pages are mapped shared, so the kernel can run on behalf
   of any task that makes a kernel entry.  Shared does not mean they
   are not protected, just that the ASID comparison is not performed.
        -- Dan
 */

static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{
}
#ifdef CONFIG_8xx
#define NO_CONTEXT      	16
#define LAST_CONTEXT    	15
#define MUNGE_CONTEXT(n)        (n)

#else

/* PPC 6xx, 7xx CPUs */
#define NO_CONTEXT      	0
#define LAST_CONTEXT    	0xfffff

/*
 * Allocating context numbers this way tends to spread out
 * the entries in the hash table better than a simple linear
 * allocation.
 */
#define MUNGE_CONTEXT(n)        (((n) * 897) & LAST_CONTEXT)
#endif

extern atomic_t next_mmu_context;
extern void mmu_context_overflow(void);

/*
 * Set the current MMU context.
 * On 32-bit PowerPCs (other than the 8xx embedded chips), this is done by
 * loading up the segment registers for the user part of the address space.
 */
extern void set_context(int context);

#ifdef CONFIG_8xx
extern inline void mmu_context_overflow(void)
{
	atomic_set(&next_mmu_context, -1);
}
#endif

/*
 * Get a new mmu context for task tsk if necessary.
 */
#define get_mmu_context(mm)					\
do { 								\
	if (mm->context == NO_CONTEXT) {			\
		if (atomic_read(&next_mmu_context) == LAST_CONTEXT)		\
			mmu_context_overflow();			\
		mm->context = MUNGE_CONTEXT(atomic_inc_return(&next_mmu_context));\
	}							\
} while (0)

/*
 * Set up the context for a new address space.
 */
#define init_new_context(tsk,mm)	((mm)->context = NO_CONTEXT)

/*
 * We're finished using the context for an address space.
 */
#define destroy_context(mm)     do { } while (0)

static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
			     struct task_struct *tsk, int cpu)
{
	tsk->thread.pgdir = next->pgd;
	get_mmu_context(next);
	set_context(next->context);
}

/*
 * After we have set current->mm to a new value, this activates
 * the context for the new mm so we see the new mappings.
 */
static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
{
	current->thread.pgdir = mm->pgd;
	get_mmu_context(mm);
	set_context(mm->context);
}

/*
 * compute the vsid from the context and segment
 * segments > 7 are kernel segments and their
 * vsid is the segment -- Cort
 */
#define	VSID_FROM_CONTEXT(segment,context) \
   ((segment < 8) ? ((segment) | (context)<<4) : (segment))

#endif