summaryrefslogtreecommitdiffstats
path: root/include/linux/tqueue.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/tqueue.h')
-rw-r--r--include/linux/tqueue.h65
1 files changed, 33 insertions, 32 deletions
diff --git a/include/linux/tqueue.h b/include/linux/tqueue.h
index 3eb8f14ae..a0363ffa6 100644
--- a/include/linux/tqueue.h
+++ b/include/linux/tqueue.h
@@ -14,6 +14,7 @@
#define _LINUX_TQUEUE_H
#include <linux/spinlock.h>
+#include <linux/list.h>
#include <asm/bitops.h>
#include <asm/system.h>
@@ -29,37 +30,33 @@
* used as a bottom half handler. This is for example useful for bottom
* halfs, which want to be delayed until the next clock tick.
*
- * Problems:
- * - The queue_task_irq() inline function is only atomic with respect to itself.
- * Problems can occur, when queue_task_irq() is called from a normal system
- * call, and an interrupt comes in. No problems occur, when queue_task_irq()
- * is called from an interrupt or bottom half, and interrupted, as run_task_queue()
- * will not be executed/continued before the last interrupt returns. If in
- * doubt, use queue_task(), not queue_task_irq().
+ * Notes:
* - Bottom halfs are called in the reverse order that they were linked into
* the list.
*/
struct tq_struct {
- struct tq_struct *next; /* linked list of active bh's */
+ struct list_head list; /* linked list of active bh's */
unsigned long sync; /* must be initialized to zero */
void (*routine)(void *); /* function to call */
void *data; /* argument to function */
};
-typedef struct tq_struct * task_queue;
+typedef struct list_head task_queue;
-#define DECLARE_TASK_QUEUE(q) task_queue q = NULL
+#define DECLARE_TASK_QUEUE(q) LIST_HEAD(q)
+#define TQ_ACTIVE(q) (!list_empty(&q))
-extern task_queue tq_timer, tq_immediate, tq_scheduler, tq_disk;
+extern task_queue tq_timer, tq_immediate, tq_disk;
/*
* To implement your own list of active bottom halfs, use the following
* two definitions:
*
- * struct tq_struct *my_bh = NULL;
+ * DECLARE_TASK_QUEUE(my_bh);
* struct tq_struct run_my_bh = {
- * 0, 0, (void (*)(void *)) run_task_queue, &my_bh
+ * routine: (void (*)(void *)) run_task_queue,
+ * data: &my_bh
* };
*
* To activate a bottom half on your list, use:
@@ -78,18 +75,21 @@ extern task_queue tq_timer, tq_immediate, tq_scheduler, tq_disk;
extern spinlock_t tqueue_lock;
/*
- * queue_task
+ * Queue a task on a tq. Return non-zero if it was successfully
+ * added.
*/
-static inline void queue_task(struct tq_struct *bh_pointer,
+static inline int queue_task(struct tq_struct *bh_pointer,
task_queue *bh_list)
{
+ int ret = 0;
if (!test_and_set_bit(0,&bh_pointer->sync)) {
unsigned long flags;
spin_lock_irqsave(&tqueue_lock, flags);
- bh_pointer->next = *bh_list;
- *bh_list = bh_pointer;
+ list_add_tail(&bh_pointer->list, bh_list);
spin_unlock_irqrestore(&tqueue_lock, flags);
+ ret = 1;
}
+ return ret;
}
/*
@@ -97,28 +97,29 @@ static inline void queue_task(struct tq_struct *bh_pointer,
*/
static inline void run_task_queue(task_queue *list)
{
- if (*list) {
+ while (!list_empty(list)) {
unsigned long flags;
- struct tq_struct *p;
+ struct list_head *next;
spin_lock_irqsave(&tqueue_lock, flags);
- p = *list;
- *list = NULL;
- spin_unlock_irqrestore(&tqueue_lock, flags);
-
- while (p) {
+ next = list->next;
+ if (next != list) {
void *arg;
void (*f) (void *);
- struct tq_struct *save_p;
- arg = p -> data;
- f = p -> routine;
- save_p = p;
- p = p -> next;
- smp_mb();
- save_p -> sync = 0;
+ struct tq_struct *p;
+
+ list_del(next);
+ p = list_entry(next, struct tq_struct, list);
+ arg = p->data;
+ f = p->routine;
+ p->sync = 0;
+ spin_unlock_irqrestore(&tqueue_lock, flags);
+
if (f)
- (*f)(arg);
+ f(arg);
+ continue;
}
+ spin_unlock_irqrestore(&tqueue_lock, flags);
}
}