blob: ff235df973ea2caad0dfda97a1b9f1cbaea70f2a (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
|
#ifndef NFS_CLUSTER_H
#define NFS_CLUSTER_H
#ifdef __KERNEL__
#include <asm/atomic.h>
#include <linux/nfs_fs_sb.h>
/*
* Counters of total number and pending number of requests.
* When the total number of requests exceeds the soft limit, we start
* flushing out requests. If it exceeds the hard limit, we stall until
* it drops again.
*/
#define MAX_REQUEST_SOFT 192
#define MAX_REQUEST_HARD 256
/*
* Maximum number of requests per write cluster.
* 32 requests per cluster account for 128K of data on an intel box.
* Note: it's a good idea to make this number smaller than MAX_REQUEST_SOFT.
*
* For 100Mbps Ethernet, 128 pages (i.e. 256K) per cluster gives much
* better performance.
*/
#define REQUEST_HASH_SIZE 16
#define REQUEST_NR(off) ((off) >> PAGE_CACHE_SHIFT)
#define REQUEST_HASH(ino, off) (((ino) ^ REQUEST_NR(off)) & (REQUEST_HASH_SIZE - 1))
/*
* Functions
*/
extern int nfs_reqlist_alloc(struct nfs_server *);
extern void nfs_reqlist_free(struct nfs_server *);
extern int nfs_reqlist_init(struct nfs_server *);
extern void nfs_reqlist_exit(struct nfs_server *);
extern void inode_schedule_scan(struct inode *, unsigned long);
extern void inode_remove_flushd(struct inode *);
extern void nfs_wake_flushd(void);
/*
* This is the per-mount writeback cache.
*/
struct nfs_reqlist {
atomic_t nr_requests;
unsigned long runat;
wait_queue_head_t request_wait;
/* The async RPC task that is responsible for scanning the
* requests.
*/
struct rpc_task *task; /* request flush task */
/* Authentication flavor handle for this NFS client */
struct rpc_auth *auth;
/* The list of all inodes with pending writebacks. */
struct inode *inodes;
};
#endif
#endif
|