/* * net/dst.c Protocol independent destination cache. * * Authors: Alexey Kuznetsov, * */ #include #include #include #include #include #include #include #include #include #include #include #include /* Locking strategy: * 1) Garbage collection state of dead destination cache * entries is protected by dst_lock. * 2) GC is run only from BH context, and is the only remover * of entries. * 3) Entries are added to the garbage list from both BH * and non-BH context, so local BH disabling is needed. * 4) All operations modify state, so a spinlock is used. */ static struct dst_entry *dst_garbage_list; static atomic_t dst_total = ATOMIC_INIT(0); static spinlock_t dst_lock = SPIN_LOCK_UNLOCKED; static unsigned long dst_gc_timer_expires; static unsigned long dst_gc_timer_inc = DST_GC_MAX; static void dst_run_gc(unsigned long); static struct timer_list dst_gc_timer = { NULL, NULL, DST_GC_MIN, 0L, dst_run_gc }; static void dst_run_gc(unsigned long dummy) { int delayed = 0; struct dst_entry * dst, **dstp; if (!spin_trylock(&dst_lock)) { mod_timer(&dst_gc_timer, jiffies + HZ/10); return; } del_timer(&dst_gc_timer); dstp = &dst_garbage_list; while ((dst = *dstp) != NULL) { if (atomic_read(&dst->use)) { dstp = &dst->next; delayed++; continue; } *dstp = dst->next; dst_destroy(dst); } if (!dst_garbage_list) { dst_gc_timer_inc = DST_GC_MAX; goto out; } if ((dst_gc_timer_expires += dst_gc_timer_inc) > DST_GC_MAX) dst_gc_timer_expires = DST_GC_MAX; dst_gc_timer_inc += DST_GC_INC; dst_gc_timer.expires = jiffies + dst_gc_timer_expires; #if RT_CACHE_DEBUG >= 2 printk("dst_total: %d/%d %ld\n", atomic_read(&dst_total), delayed, dst_gc_timer_expires); #endif add_timer(&dst_gc_timer); out: spin_unlock(&dst_lock); } static int dst_discard(struct sk_buff *skb) { kfree_skb(skb); return 0; } static int dst_blackhole(struct sk_buff *skb) { kfree_skb(skb); return 0; } void * dst_alloc(int size, struct dst_ops * ops) { struct dst_entry * dst; if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) { if (ops->gc()) return NULL; } dst = kmalloc(size, GFP_ATOMIC); if (!dst) return NULL; memset(dst, 0, size); dst->ops = ops; atomic_set(&dst->refcnt, 0); dst->lastuse = jiffies; dst->input = dst_discard; dst->output = dst_blackhole; atomic_inc(&dst_total); atomic_inc(&ops->entries); return dst; } void __dst_free(struct dst_entry * dst) { spin_lock_bh(&dst_lock); /* The first case (dev==NULL) is required, when protocol module is unloaded. */ if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) { dst->input = dst_discard; dst->output = dst_blackhole; dst->dev = &loopback_dev; } dst->obsolete = 2; dst->next = dst_garbage_list; dst_garbage_list = dst; if (dst_gc_timer_inc > DST_GC_INC) { del_timer(&dst_gc_timer); dst_gc_timer_inc = DST_GC_INC; dst_gc_timer_expires = DST_GC_MIN; dst_gc_timer.expires = jiffies + dst_gc_timer_expires; add_timer(&dst_gc_timer); } spin_unlock_bh(&dst_lock); } void dst_destroy(struct dst_entry * dst) { struct neighbour *neigh = dst->neighbour; struct hh_cache *hh = dst->hh; dst->hh = NULL; if (hh && atomic_dec_and_test(&hh->hh_refcnt)) kfree(hh); if (neigh) { dst->neighbour = NULL; neigh_release(neigh); } atomic_dec(&dst->ops->entries); if (dst->ops->destroy) dst->ops->destroy(dst); atomic_dec(&dst_total); kfree(dst); } static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct device *dev = ptr; struct dst_entry *dst; switch (event) { case NETDEV_UNREGISTER: case NETDEV_DOWN: spin_lock_bh(&dst_lock); for (dst = dst_garbage_list; dst; dst = dst->next) { if (dst->dev == dev) { dst->input = dst_discard; dst->output = dst_blackhole; dst->dev = &loopback_dev; } } spin_unlock_bh(&dst_lock); break; } return NOTIFY_DONE; } struct notifier_block dst_dev_notifier = { dst_dev_event, NULL, 0 }; __initfunc(void dst_init(void)) { register_netdevice_notifier(&dst_dev_notifier); }