summaryrefslogtreecommitdiffstats
path: root/include/asm-alpha/processor.h
blob: 6992e14456af61001f14068f429a86a26e47d579 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
/*
 * include/asm-alpha/processor.h
 *
 * Copyright (C) 1994 Linus Torvalds
 */

#ifndef __ASM_ALPHA_PROCESSOR_H
#define __ASM_ALPHA_PROCESSOR_H

/*
 * Returns current instruction pointer ("program counter").
 */
#define current_text_addr() \
  ({ void *__pc; __asm__ ("br %0,.+4" : "=r"(__pc)); __pc; })

/*
 * We have a 42-bit user address space: 4TB user VM...
 */
#define TASK_SIZE (0x40000000000UL)

/* This decides where the kernel will search for a free chunk of vm
 * space during mmap's.
 */
#define TASK_UNMAPPED_BASE \
  ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2)

/*
 * Bus types
 */
#define EISA_bus 1
#define EISA_bus__is_a_macro /* for versions in ksyms.c */
#define MCA_bus 0
#define MCA_bus__is_a_macro /* for versions in ksyms.c */

typedef struct {
	unsigned long seg;
} mm_segment_t;

struct thread_struct {
	/* the fields below are used by PALcode and must match struct pcb: */
	unsigned long ksp;
	unsigned long usp;
	unsigned long ptbr;
	unsigned int pcc;
	unsigned int asn;
	unsigned long unique;
	/*
	 * bit  0: floating point enable
	 * bit 62: performance monitor enable
	 */
	unsigned long pal_flags;
	unsigned long res1, res2;

	/*
	 * The fields below are Linux-specific:
	 *
	 * bit 1..5: IEEE_TRAP_ENABLE bits (see fpu.h)
	 * bit 6..8: UAC bits (see sysinfo.h)
	 * bit 17..21: IEEE_STATUS_MASK bits (see fpu.h)
	 * bit 63: die_if_kernel recursion lock
	 */
	unsigned long flags;

	/* Perform syscall argument validation (get/set_fs). */
	mm_segment_t fs;

	/* Breakpoint handling for ptrace.  */
	unsigned long bpt_addr[2];
	unsigned int bpt_insn[2];
	int bpt_nsaved;
};

#define INIT_MMAP { &init_mm, PAGE_OFFSET,  PAGE_OFFSET+0x10000000, \
	NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }

#define INIT_THREAD  { \
	0, 0, 0, \
	0, 0, 0, \
	0, 0, 0, \
	0, \
	KERNEL_DS \
}

#define THREAD_SIZE (2*PAGE_SIZE)

#include <asm/ptrace.h>

/*
 * Return saved PC of a blocked thread.  This assumes the frame
 * pointer is the 6th saved long on the kernel stack and that the
 * saved return address is the first long in the frame.  This all
 * holds provided the thread blocked through a call to schedule() ($15
 * is the frame pointer in schedule() and $15 is saved at offset 48 by
 * entry.S:do_switch_stack).
 *
 * Under heavy swap load I've seen this lose in an ugly way.  So do
 * some extra sanity checking on the ranges we expect these pointers
 * to be in so that we can fail gracefully.  This is just for ps after
 * all.  -- r~
 */
extern inline unsigned long thread_saved_pc(struct thread_struct *t)
{
	unsigned long fp, sp = t->ksp, base = (unsigned long)t;
 
	if (sp > base && sp+6*8 < base + 16*1024) {
		fp = ((unsigned long*)sp)[6];
		if (fp > sp && fp < base + 16*1024)
			return *(unsigned long *)fp;
	}

	return 0;
}

/* Do necessary setup to start up a newly executed thread.  */
extern void start_thread(struct pt_regs *, unsigned long, unsigned long);

struct task_struct;

/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);

/* Create a kernel thread without removing it from tasklists.  */
extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);

#define copy_segments(tsk, mm)		do { } while (0)
#define release_segments(mm)		do { } while (0)
#define forget_segments()		do { } while (0)

unsigned long get_wchan(struct task_struct *p);

/* See arch/alpha/kernel/ptrace.c for details.  */
#define PT_REG(reg)	(PAGE_SIZE*2 - sizeof(struct pt_regs)		\
			 + (long)&((struct pt_regs *)0)->reg)

#define SW_REG(reg)	(PAGE_SIZE*2 - sizeof(struct pt_regs)		\
			 - sizeof(struct switch_stack)			\
			 + (long)&((struct switch_stack *)0)->reg)

#define KSTK_EIP(tsk) \
    (*(unsigned long *)(PT_REG(pc) + (unsigned long)(tsk)))

#define KSTK_ESP(tsk)	((tsk) == current ? rdusp() : (tsk)->thread.usp)

/* NOTE: The task struct and the stack go together!  */
#define alloc_task_struct() \
        ((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
#define free_task_struct(p)     free_pages((unsigned long)(p),1)
#define get_task_struct(tsk)      atomic_inc(&mem_map[MAP_NR(tsk)].count)

#define init_task	(init_task_union.task)
#define init_stack	(init_task_union.stack)

#endif /* __ASM_ALPHA_PROCESSOR_H */