summaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2005-12-05 22:52:25 -0500
committerPaul Mackerras <paulus@samba.org>2006-01-09 14:52:55 +1100
commit5110459f181ef1f11200bb3dec61953f08cc49e7 (patch)
tree73356ce50b3fb5055b4a6f39f237f046615f797d /include/asm-powerpc
parent3b3d22cb84a0bb12f6bbb2b1158972894bec3f21 (diff)
[PATCH] spufs: Improved SPU preemptability.
This patch makes it easier to preempt an SPU context by having the scheduler hold ctx->state_sema for much shorter periods of time. As part of this restructuring, the control logic for the "run" operation is moved from arch/ppc64/kernel/spu_base.c to fs/spufs/file.c. Of course the base retains "bottom half" handlers for class{0,1} irqs. The new run loop will re-acquire an SPU if preempted. From: Mark Nutter <mnutter@us.ibm.com> Signed-off-by: Arnd Bergmann <arndb@de.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r--include/asm-powerpc/spu.h5
1 files changed, 3 insertions, 2 deletions
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
index 092ec97be326..dd91ed8563d2 100644
--- a/include/asm-powerpc/spu.h
+++ b/include/asm-powerpc/spu.h
@@ -135,9 +135,9 @@ struct spu {
spinlock_t register_lock;
u32 stop_code;
- wait_queue_head_t stop_wq;
void (* wbox_callback)(struct spu *spu);
void (* ibox_callback)(struct spu *spu);
+ void (* stop_callback)(struct spu *spu);
char irq_c0[8];
char irq_c1[8];
@@ -146,7 +146,8 @@ struct spu {
struct spu *spu_alloc(void);
void spu_free(struct spu *spu);
-int spu_run(struct spu *spu);
+int spu_irq_class_0_bottom(struct spu *spu);
+int spu_irq_class_1_bottom(struct spu *spu);
extern struct spufs_calls {
asmlinkage long (*create_thread)(const char __user *name,