diff options
author | Ralf Baechle <ralf@linux-mips.org> | 1999-06-17 13:25:08 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 1999-06-17 13:25:08 +0000 |
commit | 59223edaa18759982db0a8aced0e77457d10c68e (patch) | |
tree | 89354903b01fa0a447bffeefe00df3044495db2e /net/core/dst.c | |
parent | db7d4daea91e105e3859cf461d7e53b9b77454b2 (diff) |
Merge with Linux 2.3.6. Sorry, this isn't tested on silicon, I don't
have a MIPS box at hand.
Diffstat (limited to 'net/core/dst.c')
-rw-r--r-- | net/core/dst.c | 67 |
1 files changed, 59 insertions, 8 deletions
diff --git a/net/core/dst.c b/net/core/dst.c index 9007dde66..f1695ca84 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -16,11 +16,22 @@ #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/skbuff.h> +#include <linux/init.h> #include <net/dst.h> -struct dst_entry * dst_garbage_list; -atomic_t dst_total = ATOMIC_INIT(0); +/* Locking strategy: + * 1) Garbage collection state of dead destination cache + * entries is protected by dst_lock. + * 2) GC is run only from BH context, and is the only remover + * of entries. + * 3) Entries are added to the garbage list from both BH + * and non-BH context, so local BH disabling is needed. + * 4) All operations modify state, so a spinlock is used. + */ +static struct dst_entry *dst_garbage_list; +static atomic_t dst_total = ATOMIC_INIT(0); +static spinlock_t dst_lock = SPIN_LOCK_UNLOCKED; static unsigned long dst_gc_timer_expires; static unsigned long dst_gc_timer_inc = DST_GC_MAX; @@ -29,15 +40,17 @@ static void dst_run_gc(unsigned long); static struct timer_list dst_gc_timer = { NULL, NULL, DST_GC_MIN, 0L, dst_run_gc }; -#if RT_CACHE_DEBUG >= 2 -atomic_t hh_count; -#endif static void dst_run_gc(unsigned long dummy) { int delayed = 0; struct dst_entry * dst, **dstp; + if (!spin_trylock(&dst_lock)) { + mod_timer(&dst_gc_timer, jiffies + HZ/10); + return; + } + del_timer(&dst_gc_timer); dstp = &dst_garbage_list; while ((dst = *dstp) != NULL) { @@ -51,7 +64,7 @@ static void dst_run_gc(unsigned long dummy) } if (!dst_garbage_list) { dst_gc_timer_inc = DST_GC_MAX; - return; + goto out; } if ((dst_gc_timer_expires += dst_gc_timer_inc) > DST_GC_MAX) dst_gc_timer_expires = DST_GC_MAX; @@ -62,6 +75,9 @@ static void dst_run_gc(unsigned long dummy) atomic_read(&dst_total), delayed, dst_gc_timer_expires); #endif add_timer(&dst_gc_timer); + +out: + spin_unlock(&dst_lock); } static int dst_discard(struct sk_buff *skb) @@ -100,7 +116,8 @@ void * dst_alloc(int size, struct dst_ops * ops) void __dst_free(struct dst_entry * dst) { - start_bh_atomic(); + spin_lock_bh(&dst_lock); + /* The first case (dev==NULL) is required, when protocol module is unloaded. */ @@ -119,7 +136,8 @@ void __dst_free(struct dst_entry * dst) dst_gc_timer.expires = jiffies + dst_gc_timer_expires; add_timer(&dst_gc_timer); } - end_bh_atomic(); + + spin_unlock_bh(&dst_lock); } void dst_destroy(struct dst_entry * dst) @@ -143,3 +161,36 @@ void dst_destroy(struct dst_entry * dst) atomic_dec(&dst_total); kfree(dst); } + +static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr) +{ + struct device *dev = ptr; + struct dst_entry *dst; + + switch (event) { + case NETDEV_UNREGISTER: + case NETDEV_DOWN: + spin_lock_bh(&dst_lock); + for (dst = dst_garbage_list; dst; dst = dst->next) { + if (dst->dev == dev) { + dst->input = dst_discard; + dst->output = dst_blackhole; + dst->dev = &loopback_dev; + } + } + spin_unlock_bh(&dst_lock); + break; + } + return NOTIFY_DONE; +} + +struct notifier_block dst_dev_notifier = { + dst_dev_event, + NULL, + 0 +}; + +__initfunc(void dst_init(void)) +{ + register_netdevice_notifier(&dst_dev_notifier); +} |