blob: 1f6485fb09028e7f8c72007fa345ad35f30f547c (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
|
/*
* include/asm-s390/smplock.h
*
* S390 version
*
* Derived from "include/asm-i386/smplock.h"
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
|