1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
|
/*
* net/dst.c Protocol independent destination cache.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
*/
#include <asm/segment.h>
#include <asm/system.h>
#include <asm/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/dst.h>
struct dst_entry * dst_garbage_list;
atomic_t dst_total = ATOMIC_INIT(0);
static unsigned long dst_gc_timer_expires;
static unsigned long dst_gc_timer_inc = DST_GC_MAX;
static void dst_run_gc(unsigned long);
static struct timer_list dst_gc_timer =
{ NULL, NULL, DST_GC_MIN, 0L, dst_run_gc };
#if RT_CACHE_DEBUG >= 2
atomic_t hh_count;
#endif
static void dst_run_gc(unsigned long dummy)
{
int delayed = 0;
struct dst_entry * dst, **dstp;
del_timer(&dst_gc_timer);
dstp = &dst_garbage_list;
while ((dst = *dstp) != NULL) {
if (atomic_read(&dst->use)) {
dstp = &dst->next;
delayed++;
continue;
}
*dstp = dst->next;
dst_destroy(dst);
}
if (!dst_garbage_list) {
dst_gc_timer_inc = DST_GC_MAX;
return;
}
if ((dst_gc_timer_expires += dst_gc_timer_inc) > DST_GC_MAX)
dst_gc_timer_expires = DST_GC_MAX;
dst_gc_timer_inc += DST_GC_INC;
dst_gc_timer.expires = jiffies + dst_gc_timer_expires;
#if RT_CACHE_DEBUG >= 2
printk("dst_total: %d/%d %ld\n",
atomic_read(&dst_total), delayed, dst_gc_timer_expires);
#endif
add_timer(&dst_gc_timer);
}
static int dst_discard(struct sk_buff *skb)
{
kfree_skb(skb);
return 0;
}
static int dst_blackhole(struct sk_buff *skb)
{
kfree_skb(skb);
return 0;
}
void * dst_alloc(int size, struct dst_ops * ops)
{
struct dst_entry * dst;
if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) {
if (ops->gc())
return NULL;
}
dst = kmalloc(size, GFP_ATOMIC);
if (!dst)
return NULL;
memset(dst, 0, size);
dst->ops = ops;
atomic_set(&dst->refcnt, 0);
dst->lastuse = jiffies;
dst->input = dst_discard;
dst->output = dst_blackhole;
atomic_inc(&dst_total);
atomic_inc(&ops->entries);
return dst;
}
void __dst_free(struct dst_entry * dst)
{
start_bh_atomic();
dst->obsolete = 2;
dst->next = dst_garbage_list;
dst_garbage_list = dst;
if (dst_gc_timer_inc > DST_GC_INC) {
del_timer(&dst_gc_timer);
dst_gc_timer_inc = DST_GC_INC;
dst_gc_timer_expires = DST_GC_MIN;
dst_gc_timer.expires = jiffies + dst_gc_timer_expires;
add_timer(&dst_gc_timer);
}
end_bh_atomic();
}
void dst_destroy(struct dst_entry * dst)
{
struct neighbour *neigh = dst->neighbour;
struct hh_cache *hh = dst->hh;
dst->hh = NULL;
if (hh && atomic_dec_and_test(&hh->hh_refcnt))
kfree(hh);
if (neigh) {
dst->neighbour = NULL;
neigh_release(neigh);
}
atomic_dec(&dst->ops->entries);
if (dst->ops->destroy)
dst->ops->destroy(dst);
atomic_dec(&dst_total);
kfree(dst);
}
|