1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
|
/* $Id: signal.c,v 1.24 1997/09/02 20:53:03 davem Exp $
* arch/sparc64/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/mm.h>
#include <asm/uaccess.h>
#include <asm/bitops.h>
#include <asm/ptrace.h>
#include <asm/svr4.h>
#include <asm/pgtable.h>
#include <asm/fpumacro.h>
#include <asm/uctx.h>
#include <asm/smp_lock.h>
#define _S(nr) (1<<((nr)-1))
#define _BLOCKABLE (~(_S(SIGKILL) | _S(SIGSTOP)))
asmlinkage int sys_wait4(pid_t pid, unsigned long *stat_addr,
int options, unsigned long *ru);
asmlinkage int do_signal(unsigned long oldmask, struct pt_regs * regs,
unsigned long orig_o0, int ret_from_syscall);
/* This turned off for production... */
/* #define DEBUG_SIGNALS 1 */
/* {set, get}context() needed for 64-bit SparcLinux userland. */
asmlinkage void sparc64_set_context(struct pt_regs *regs)
{
struct ucontext *ucp = (struct ucontext *) regs->u_regs[UREG_I0];
struct thread_struct *tp = ¤t->tss;
mc_gregset_t *grp;
unsigned long pc, npc, tstate;
unsigned long fp, i7;
unsigned char fenab;
__asm__ __volatile__("flushw");
if(tp->w_saved ||
(((unsigned long)ucp) & (sizeof(unsigned long)-1)) ||
(!__access_ok((unsigned long)ucp, sizeof(*ucp))))
goto do_sigsegv;
grp = &ucp->uc_mcontext.mc_gregs;
__get_user(pc, &((*grp)[MC_PC]));
__get_user(npc, &((*grp)[MC_NPC]));
if((pc | npc) & 3)
goto do_sigsegv;
if(regs->u_regs[UREG_I1]) {
__get_user(current->blocked, &ucp->uc_sigmask);
current->blocked &= _BLOCKABLE;
}
regs->tpc = pc;
regs->tnpc = npc;
__get_user(regs->y, &((*grp)[MC_Y]));
__get_user(tstate, &((*grp)[MC_TSTATE]));
regs->tstate &= ~(TSTATE_ICC | TSTATE_XCC);
regs->tstate |= (tstate & (TSTATE_ICC | TSTATE_XCC));
__get_user(regs->u_regs[UREG_G1], (&(*grp)[MC_G1]));
__get_user(regs->u_regs[UREG_G2], (&(*grp)[MC_G2]));
__get_user(regs->u_regs[UREG_G3], (&(*grp)[MC_G3]));
__get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4]));
__get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5]));
__get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6]));
__get_user(regs->u_regs[UREG_G7], (&(*grp)[MC_G7]));
__get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0]));
__get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1]));
__get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2]));
__get_user(regs->u_regs[UREG_I3], (&(*grp)[MC_O3]));
__get_user(regs->u_regs[UREG_I4], (&(*grp)[MC_O4]));
__get_user(regs->u_regs[UREG_I5], (&(*grp)[MC_O5]));
__get_user(regs->u_regs[UREG_I6], (&(*grp)[MC_O6]));
__get_user(regs->u_regs[UREG_I7], (&(*grp)[MC_O7]));
__get_user(fp, &(ucp->uc_mcontext.mc_fp));
__get_user(i7, &(ucp->uc_mcontext.mc_i7));
__put_user(fp, (&(((struct reg_window *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
__put_user(i7, (&(((struct reg_window *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
__get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab));
if(fenab) {
unsigned long *fpregs = (unsigned long *)(regs+1);
unsigned long fprs;
__get_user(fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs));
if (fprs & FPRS_DL)
copy_from_user(fpregs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs),
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
copy_from_user(fpregs+16, ((unsigned long *)&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs))+16,
(sizeof(unsigned int) * 32));
__get_user(fpregs[32], &(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr));
__get_user(fpregs[33], &(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr));
regs->fprs = fprs;
regs->tstate |= TSTATE_PEF;
}
return;
do_sigsegv:
lock_kernel();
do_exit(SIGSEGV);
}
asmlinkage void sparc64_get_context(struct pt_regs *regs)
{
struct ucontext *ucp = (struct ucontext *) regs->u_regs[UREG_I0];
struct thread_struct *tp = ¤t->tss;
mc_gregset_t *grp;
mcontext_t *mcp;
unsigned long fp, i7;
unsigned char fenab = (current->tss.flags & SPARC_FLAG_USEDFPU);
synchronize_user_stack();
if(tp->w_saved || clear_user(ucp, sizeof(*ucp)))
goto do_sigsegv;
mcp = &ucp->uc_mcontext;
grp = &mcp->mc_gregs;
/* Skip over the trap instruction, first. */
regs->tpc = regs->tnpc;
regs->tnpc += 4;
__put_user(current->blocked, &ucp->uc_sigmask);
__put_user(regs->tstate, &((*grp)[MC_TSTATE]));
__put_user(regs->tpc, &((*grp)[MC_PC]));
__put_user(regs->tnpc, &((*grp)[MC_NPC]));
__put_user(regs->y, &((*grp)[MC_Y]));
__put_user(regs->u_regs[UREG_G1], &((*grp)[MC_G1]));
__put_user(regs->u_regs[UREG_G2], &((*grp)[MC_G2]));
__put_user(regs->u_regs[UREG_G3], &((*grp)[MC_G3]));
__put_user(regs->u_regs[UREG_G4], &((*grp)[MC_G4]));
__put_user(regs->u_regs[UREG_G5], &((*grp)[MC_G5]));
__put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G6]));
__put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G7]));
__put_user(regs->u_regs[UREG_I0], &((*grp)[MC_O0]));
__put_user(regs->u_regs[UREG_I1], &((*grp)[MC_O1]));
__put_user(regs->u_regs[UREG_I2], &((*grp)[MC_O2]));
__put_user(regs->u_regs[UREG_I3], &((*grp)[MC_O3]));
__put_user(regs->u_regs[UREG_I4], &((*grp)[MC_O4]));
__put_user(regs->u_regs[UREG_I5], &((*grp)[MC_O5]));
__put_user(regs->u_regs[UREG_I6], &((*grp)[MC_O6]));
__put_user(regs->u_regs[UREG_I7], &((*grp)[MC_O7]));
__get_user(fp, (&(((struct reg_window *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
__get_user(i7, (&(((struct reg_window *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
__put_user(fp, &(mcp->mc_fp));
__put_user(i7, &(mcp->mc_i7));
__put_user(fenab, &(mcp->mc_fpregs.mcfpu_enab));
if(fenab) {
unsigned long *fpregs = (unsigned long *)(regs+1);
unsigned long fprs;
fprs = (regs->fprs & FPRS_FEF) |
(current->tss.flags & (SPARC_FLAG_USEDFPUL | SPARC_FLAG_USEDFPUU));
if (fprs & FPRS_DL)
copy_to_user(&(mcp->mc_fpregs.mcfpu_fregs), fpregs,
(sizeof(unsigned int) * 32));
else
clear_user(&(mcp->mc_fpregs.mcfpu_fregs),
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
copy_to_user(((unsigned long *)&(mcp->mc_fpregs.mcfpu_fregs))+16, fpregs+16,
(sizeof(unsigned int) * 32));
else
clear_user(((unsigned long *)&(mcp->mc_fpregs.mcfpu_fregs))+16,
(sizeof(unsigned int) * 32));
__put_user(fpregs[32], &(mcp->mc_fpregs.mcfpu_fsr));
__put_user(fpregs[33], &(mcp->mc_fpregs.mcfpu_gsr));
__put_user(fprs, &(mcp->mc_fpregs.mcfpu_fprs));
}
return;
do_sigsegv:
lock_kernel();
do_exit(SIGSEGV);
}
/*
* The new signal frame, intended to be used for Linux applications only
* (we have enough in there to work with clone).
* All the interesting bits are in the info field.
*/
struct new_signal_frame {
struct sparc_stackf ss;
__siginfo_t info;
__siginfo_fpu_t * fpu_save;
unsigned int insns [2];
__siginfo_fpu_t fpu_state;
};
/* Align macros */
#define NF_ALIGNEDSZ (((sizeof(struct new_signal_frame) + 7) & (~7)))
/*
* atomically swap in the new signal mask, and wait for a signal.
* This is really tricky on the Sparc, watch out...
*/
asmlinkage void _sigpause_common(unsigned int set, struct pt_regs *regs)
{
unsigned long mask;
#ifdef CONFIG_SPARC32_COMPAT
if (current->tss.flags & SPARC_FLAG_32BIT) {
extern asmlinkage void _sigpause32_common(unsigned int,
struct pt_regs *);
_sigpause32_common(set, regs);
return;
}
#endif
spin_lock_irq(¤t->sigmask_lock);
mask = current->blocked;
current->blocked = set & _BLOCKABLE;
spin_unlock_irq(¤t->sigmask_lock);
regs->tpc = regs->tnpc;
regs->tnpc += 4;
/* Condition codes and return value where set here for sigpause,
* and so got used by setup_frame, which again causes sigreturn()
* to return -EINTR.
*/
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
/*
* Return -EINTR and set condition code here,
* so the interrupted system call actually returns
* these.
*/
regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
regs->u_regs[UREG_I0] = EINTR;
if (do_signal(mask, regs, 0, 0))
return;
}
}
asmlinkage void do_sigpause(unsigned int set, struct pt_regs *regs)
{
_sigpause_common(set, regs);
}
asmlinkage void do_sigsuspend(struct pt_regs *regs)
{
_sigpause_common(regs->u_regs[UREG_I0], regs);
}
static inline void
restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t *fpu)
{
unsigned long *fpregs = (unsigned long *)(regs+1);
unsigned long fprs;
__get_user(fprs, &fpu->si_fprs);
if (fprs & FPRS_DL)
copy_from_user(fpregs, &fpu->si_float_regs[0],
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
copy_from_user(fpregs+16, &fpu->si_float_regs[32],
(sizeof(unsigned int) * 32));
__get_user(fpregs[32], &fpu->si_fsr);
__get_user(fpregs[33], &fpu->si_gsr);
regs->fprs = fprs;
regs->tstate |= TSTATE_PEF;
}
void do_sigreturn(struct pt_regs *regs)
{
struct new_signal_frame *sf;
unsigned long tpc, tnpc, tstate;
__siginfo_fpu_t *fpu_save;
unsigned long mask;
#ifdef CONFIG_SPARC32_COMPAT
if (current->tss.flags & SPARC_FLAG_32BIT) {
extern asmlinkage void do_sigreturn32(struct pt_regs *);
return do_sigreturn32(regs);
}
#endif
synchronize_user_stack ();
sf = (struct new_signal_frame *)
(regs->u_regs [UREG_FP] + STACK_BIAS);
/* 1. Make sure we are not getting garbage from the user */
if (verify_area (VERIFY_READ, sf, sizeof (*sf)))
goto segv;
if (((unsigned long) sf) & 3)
goto segv;
get_user(tpc, &sf->info.si_regs.tpc);
__get_user(tnpc, &sf->info.si_regs.tnpc);
if ((tpc | tnpc) & 3)
goto segv;
regs->tpc = tpc;
regs->tnpc = tnpc;
/* 2. Restore the state */
__get_user(regs->y, &sf->info.si_regs.y);
__get_user(tstate, &sf->info.si_regs.tstate);
copy_from_user(regs->u_regs, sf->info.si_regs.u_regs, sizeof(regs->u_regs));
/* User can only change condition codes in %tstate. */
regs->tstate &= ~(TSTATE_ICC);
regs->tstate |= (tstate & TSTATE_ICC);
__get_user(fpu_save, &sf->fpu_save);
if (fpu_save)
restore_fpu_state(regs, &sf->fpu_state);
__get_user(mask, &sf->info.si_mask);
current->blocked = mask & _BLOCKABLE;
return;
segv:
lock_kernel();
do_exit(SIGSEGV);
}
/* Checks if the fp is valid */
static int invalid_frame_pointer(void *fp, int fplen)
{
if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x80000000000ULL - fplen)
return 1;
return 0;
}
static inline void
save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t *fpu)
{
unsigned long *fpregs = (unsigned long *)(regs+1);
unsigned long fprs;
fprs = (regs->fprs & FPRS_FEF) |
(current->tss.flags & (SPARC_FLAG_USEDFPUL | SPARC_FLAG_USEDFPUU));
if (fprs & FPRS_DL)
copy_to_user(&fpu->si_float_regs[0], fpregs,
(sizeof(unsigned int) * 32));
else
clear_user(&fpu->si_float_regs[0],
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
copy_to_user(&fpu->si_float_regs[32], fpregs+16,
(sizeof(unsigned int) * 32));
else
clear_user(&fpu->si_float_regs[32],
(sizeof(unsigned int) * 32));
__put_user(fpregs[32], &fpu->si_fsr);
__put_user(fpregs[33], &fpu->si_gsr);
__put_user(fprs, &fpu->si_fprs);
regs->tstate &= ~TSTATE_PEF;
}
static inline void
new_setup_frame(struct sigaction *sa, struct pt_regs *regs,
int signo, unsigned long oldmask)
{
struct new_signal_frame *sf;
int sigframe_size;
/* 1. Make sure everything is clean */
synchronize_user_stack();
sigframe_size = NF_ALIGNEDSZ;
if (!(current->tss.flags & SPARC_FLAG_USEDFPU))
sigframe_size -= sizeof(__siginfo_fpu_t);
sf = (struct new_signal_frame *)
(regs->u_regs[UREG_FP] + STACK_BIAS - sigframe_size);
if (invalid_frame_pointer (sf, sigframe_size))
goto sigill;
if (current->tss.w_saved != 0) {
printk ("%s[%d]: Invalid user stack frame for "
"signal delivery.\n", current->comm, current->pid);
goto sigill;
}
/* 2. Save the current process state */
copy_to_user(&sf->info.si_regs, regs, sizeof (*regs));
if (current->tss.flags & SPARC_FLAG_USEDFPU) {
save_fpu_state(regs, &sf->fpu_state);
__put_user((u64)&sf->fpu_state, &sf->fpu_save);
} else {
__put_user(0, &sf->fpu_save);
}
__put_user(oldmask, &sf->info.si_mask);
copy_in_user((u64 *)sf,
(u64 *)(regs->u_regs[UREG_FP]+STACK_BIAS),
sizeof(struct reg_window));
/* 3. return to kernel instructions */
__put_user(0x821020d8, &sf->insns[0]); /* mov __NR_sigreturn, %g1 */
__put_user(0x91d02011, &sf->insns[1]); /* t 0x11 */
/* 4. signal handler back-trampoline and parameters */
regs->u_regs[UREG_FP] = ((unsigned long) sf) - STACK_BIAS;
regs->u_regs[UREG_I0] = signo;
regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
/* 5. signal handler */
regs->tpc = (unsigned long) sa->sa_handler;
regs->tnpc = (regs->tpc + 4);
/* Flush instruction space. */
{
unsigned long address = ((unsigned long)&(sf->insns[0]));
pgd_t *pgdp = pgd_offset(current->mm, address);
pmd_t *pmdp = pmd_offset(pgdp, address);
pte_t *ptep = pte_offset(pmdp, address);
if(pte_present(*ptep)) {
unsigned long page = pte_page(*ptep);
__asm__ __volatile__("
membar #StoreStore
flush %0 + %1"
: : "r" (page), "r" (address & (PAGE_SIZE - 1))
: "memory");
}
}
return;
sigill:
lock_kernel();
do_exit(SIGILL);
}
static inline void handle_signal(unsigned long signr, struct sigaction *sa,
unsigned long oldmask, struct pt_regs *regs)
{
new_setup_frame(sa, regs, signr, oldmask);
if(sa->sa_flags & SA_ONESHOT)
sa->sa_handler = NULL;
if(!(sa->sa_flags & SA_NOMASK)) {
spin_lock_irq(¤t->sigmask_lock);
current->blocked |= (sa->sa_mask | _S(signr)) & _BLOCKABLE;
spin_unlock_irq(¤t->sigmask_lock);
}
}
static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
struct sigaction *sa)
{
switch(regs->u_regs[UREG_I0]) {
case ERESTARTNOHAND:
no_system_call_restart:
regs->u_regs[UREG_I0] = EINTR;
regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
break;
case ERESTARTSYS:
if(!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
/* fallthrough */
case ERESTARTNOINTR:
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
}
}
/* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
asmlinkage int do_signal(unsigned long oldmask, struct pt_regs * regs,
unsigned long orig_i0, int restart_syscall)
{
unsigned long signr, mask = ~current->blocked;
struct sigaction *sa;
#ifdef CONFIG_SPARC32_COMPAT
if (current->tss.flags & SPARC_FLAG_32BIT) {
extern asmlinkage int do_signal32(unsigned long, struct pt_regs *,
unsigned long, int);
return do_signal32(oldmask, regs, orig_i0, restart_syscall);
}
#endif
while ((signr = current->signal & mask) != 0) {
signr = ffz(~signr);
spin_lock_irq(¤t->sigmask_lock);
current->signal &= ~(1 << signr);
spin_unlock_irq(¤t->sigmask_lock);
sa = current->sig->action + signr;
signr++;
if ((current->flags & PF_PTRACED) && signr != SIGKILL) {
current->exit_code = signr;
current->state = TASK_STOPPED;
notify_parent(current, SIGCHLD);
schedule();
if (!(signr = current->exit_code))
continue;
current->exit_code = 0;
if (signr == SIGSTOP)
continue;
if (_S(signr) & current->blocked) {
spin_lock_irq(¤t->sigmask_lock);
current->signal |= _S(signr);
spin_unlock_irq(¤t->sigmask_lock);
continue;
}
sa = current->sig->action + signr - 1;
}
if(sa->sa_handler == SIG_IGN) {
if(signr != SIGCHLD)
continue;
/* sys_wait4() grabs the master kernel lock, so
* we need not do so, that sucker should be
* threaded and would not be that difficult to
* do anyways.
*/
while(sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
;
continue;
}
if(sa->sa_handler == SIG_DFL) {
if(current->pid == 1)
continue;
switch(signr) {
case SIGCONT: case SIGCHLD: case SIGWINCH:
continue;
case SIGTSTP: case SIGTTIN: case SIGTTOU:
if (is_orphaned_pgrp(current->pgrp))
continue;
case SIGSTOP:
if (current->flags & PF_PTRACED)
continue;
current->state = TASK_STOPPED;
current->exit_code = signr;
if(!(current->p_pptr->sig->action[SIGCHLD-1].sa_flags &
SA_NOCLDSTOP))
notify_parent(current, SIGCHLD);
schedule();
continue;
case SIGQUIT: case SIGILL: case SIGTRAP:
case SIGABRT: case SIGFPE: case SIGSEGV: case SIGBUS:
if(current->binfmt && current->binfmt->core_dump) {
lock_kernel();
if(current->binfmt->core_dump(signr, regs))
signr |= 0x80;
unlock_kernel();
}
#ifdef DEBUG_SIGNALS
/* Very useful to debug dynamic linker problems */
printk ("Sig ILL going...\n");
show_regs (regs);
#endif
/* fall through */
default:
spin_lock_irq(¤t->sigmask_lock);
current->signal |= _S(signr & 0x7f);
spin_unlock_irq(¤t->sigmask_lock);
current->flags |= PF_SIGNALED;
lock_kernel();
do_exit(signr);
unlock_kernel();
}
}
if(restart_syscall)
syscall_restart(orig_i0, regs, sa);
handle_signal(signr, sa, oldmask, regs);
return 1;
}
if(restart_syscall &&
(regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
regs->u_regs[UREG_I0] == ERESTARTSYS ||
regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
/* replay the system call when we are done */
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
}
return 0;
}
asmlinkage int
sys_sigstack(struct sigstack *ssptr, struct sigstack *ossptr)
{
int ret = -EFAULT;
lock_kernel();
/* First see if old state is wanted. */
if(ossptr) {
if (put_user ((u64)current->tss.sstk_info.the_stack, &ossptr->the_stack) ||
__put_user (current->tss.sstk_info.cur_status, &ossptr->cur_status))
goto out;
}
/* Now see if we want to update the new state. */
if(ssptr) {
if (get_user ((u64)current->tss.sstk_info.the_stack, &ssptr->the_stack) ||
__put_user (current->tss.sstk_info.cur_status, &ssptr->cur_status))
goto out;
}
ret = 0;
out:
unlock_kernel();
return ret;
}
|