summaryrefslogtreecommitdiffstats
path: root/net/sunrpc/clnt.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-03-27 23:54:12 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-03-27 23:54:12 +0000
commitd3e71cb08747743fce908122bab08b479eb403a5 (patch)
treecbec6948fdbdee9af81cf3ecfb504070d2745d7b /net/sunrpc/clnt.c
parentfe7ff1706e323d0e5ed83972960a1ecc1ee538b3 (diff)
Merge with Linux 2.3.99-pre3.
Diffstat (limited to 'net/sunrpc/clnt.c')
-rw-r--r--net/sunrpc/clnt.c359
1 files changed, 186 insertions, 173 deletions
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index b4d6bd81a..806e14bce 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -42,14 +42,13 @@
static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
-static void call_bind(struct rpc_task *task);
static void call_reserve(struct rpc_task *task);
static void call_reserveresult(struct rpc_task *task);
static void call_allocate(struct rpc_task *task);
static void call_encode(struct rpc_task *task);
static void call_decode(struct rpc_task *task);
+static void call_bind(struct rpc_task *task);
static void call_transmit(struct rpc_task *task);
-static void call_receive(struct rpc_task *task);
static void call_status(struct rpc_task *task);
static void call_refresh(struct rpc_task *task);
static void call_refreshresult(struct rpc_task *task);
@@ -98,7 +97,7 @@ rpc_create_client(struct rpc_xprt *xprt, char *servname,
clnt->cl_port = xprt->addr.sin_port;
clnt->cl_prog = program->number;
clnt->cl_vers = version->number;
- clnt->cl_prot = IPPROTO_UDP;
+ clnt->cl_prot = xprt->prot;
clnt->cl_stats = program->stats;
clnt->cl_bindwait = RPC_INIT_WAITQ("bindwait");
@@ -117,10 +116,10 @@ out:
return clnt;
out_no_clnt:
- printk("RPC: out of memory in rpc_create_client\n");
+ printk(KERN_INFO "RPC: out of memory in rpc_create_client\n");
goto out;
out_no_auth:
- printk("RPC: Couldn't create auth handle (flavor %d)\n",
+ printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %d)\n",
flavor);
rpc_free(clnt);
clnt = NULL;
@@ -140,7 +139,7 @@ rpc_shutdown_client(struct rpc_clnt *clnt)
clnt->cl_protname, clnt->cl_server);
while (clnt->cl_users) {
#ifdef RPC_DEBUG
- printk("rpc_shutdown_client: client %s, tasks=%d\n",
+ dprintk("RPC: rpc_shutdown_client: client %s, tasks=%d\n",
clnt->cl_protname, clnt->cl_users);
#endif
/* Don't let rpc_release_client destroy us */
@@ -240,42 +239,72 @@ void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
/*
* New rpc_call implementation
*/
-int
-rpc_do_call(struct rpc_clnt *clnt, u32 proc, void *argp, void *resp,
- int flags, rpc_action func, void *data)
+int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
{
struct rpc_task my_task, *task = &my_task;
sigset_t oldset;
- int async, status;
+ int status;
/* If this client is slain all further I/O fails */
if (clnt->cl_dead)
return -EIO;
+ if (flags & RPC_TASK_ASYNC) {
+ printk("rpc_call_sync: Illegal flag combination for synchronous task\n");
+ flags &= ~RPC_TASK_ASYNC;
+ }
+
rpc_clnt_sigmask(clnt, &oldset);
/* Create/initialize a new RPC task */
- if ((async = (flags & RPC_TASK_ASYNC)) != 0) {
- if (!func)
- func = rpc_default_callback;
- status = -ENOMEM;
- if (!(task = rpc_new_task(clnt, func, flags)))
- goto out;
- task->tk_calldata = data;
- } else {
- rpc_init_task(task, clnt, NULL, flags);
- }
+ rpc_init_task(task, clnt, NULL, flags);
+ rpc_call_setup(task, msg, 0);
- /* Bind the user cred, set up the call info struct and
- * execute the task */
- if (rpcauth_lookupcred(task) != NULL) {
- rpc_call_setup(task, proc, argp, resp, 0);
- rpc_execute(task);
- } else
- async = 0;
+ /* Set up the call info struct and execute the task */
+ if (task->tk_status == 0)
+ status = rpc_execute(task);
+ else
+ status = task->tk_status;
+ rpc_release_task(task);
+
+ rpc_clnt_sigunmask(clnt, &oldset);
+
+ return status;
+}
- status = 0;
- if (!async) {
+/*
+ * New rpc_call implementation
+ */
+int
+rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
+ rpc_action callback, void *data)
+{
+ struct rpc_task *task;
+ sigset_t oldset;
+ int status;
+
+ /* If this client is slain all further I/O fails */
+ if (clnt->cl_dead)
+ return -EIO;
+
+ flags |= RPC_TASK_ASYNC;
+
+ rpc_clnt_sigmask(clnt, &oldset);
+
+ /* Create/initialize a new RPC task */
+ if (!callback)
+ callback = rpc_default_callback;
+ status = -ENOMEM;
+ if (!(task = rpc_new_task(clnt, callback, flags)))
+ goto out;
+ task->tk_calldata = data;
+
+ rpc_call_setup(task, msg, 0);
+
+ /* Set up the call info struct and execute the task */
+ if (task->tk_status == 0)
+ status = rpc_execute(task);
+ else {
status = task->tk_status;
rpc_release_task(task);
}
@@ -288,18 +317,24 @@ out:
void
-rpc_call_setup(struct rpc_task *task, u32 proc,
- void *argp, void *resp, int flags)
+rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags)
{
- task->tk_action = call_bind;
- task->tk_proc = proc;
- task->tk_argp = argp;
- task->tk_resp = resp;
+ task->tk_msg = *msg;
task->tk_flags |= flags;
+ /* Bind the user cred */
+ if (task->tk_msg.rpc_cred != NULL) {
+ rpcauth_holdcred(task);
+ } else
+ rpcauth_bindcred(task);
+
+ if (task->tk_status == 0)
+ task->tk_action = call_reserve;
+ else
+ task->tk_action = NULL;
/* Increment call count */
- if (task->tk_proc < task->tk_client->cl_maxproc)
- rpcproc_count(task->tk_client, proc)++;
+ if (task->tk_msg.rpc_proc < task->tk_client->cl_maxproc)
+ rpcproc_count(task->tk_client, task->tk_msg.rpc_proc)++;
}
/*
@@ -313,27 +348,8 @@ rpc_restart_call(struct rpc_task *task)
rpc_release_task(task);
return;
}
- task->tk_action = call_bind;
- rpcproc_count(task->tk_client, task->tk_proc)++;
-}
-
-/*
- * 0. Get the server port number if not yet set
- */
-static void
-call_bind(struct rpc_task *task)
-{
- struct rpc_clnt *clnt = task->tk_client;
- struct rpc_xprt *xprt = clnt->cl_xprt;
-
- if (xprt->stream && !xprt->connected)
- task->tk_action = call_reconnect;
- else
- task->tk_action = call_reserve;
- task->tk_status = 0;
-
- if (!clnt->cl_port)
- rpc_getport(task, clnt);
+ task->tk_action = call_reserve;
+ rpcproc_count(task->tk_client, task->tk_msg.rpc_proc)++;
}
/*
@@ -344,26 +360,22 @@ call_reserve(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
- dprintk("RPC: %4d call_reserve\n", task->tk_pid);
- if (!clnt->cl_port) {
- printk(KERN_NOTICE "%s: couldn't bind to server %s - %s.\n",
- clnt->cl_protname, clnt->cl_server,
- clnt->cl_softrtry? "giving up" : "retrying");
- if (!clnt->cl_softrtry) {
- task->tk_action = call_bind;
- rpc_delay(task, 5*HZ);
- return;
- }
+ if (task->tk_msg.rpc_proc > clnt->cl_maxproc) {
+ printk(KERN_WARNING "%s (vers %d): bad procedure number %d\n",
+ clnt->cl_protname, clnt->cl_vers, task->tk_msg.rpc_proc);
rpc_exit(task, -EIO);
return;
}
+
+ dprintk("RPC: %4d call_reserve\n", task->tk_pid);
if (!rpcauth_uptodatecred(task)) {
task->tk_action = call_refresh;
return;
}
+
+ task->tk_status = 0;
task->tk_action = call_reserveresult;
task->tk_timeout = clnt->cl_timeout.to_resrvval;
- task->tk_status = 0;
clnt->cl_stats->rpccnt++;
xprt_reserve(task);
}
@@ -374,6 +386,8 @@ call_reserve(struct rpc_task *task)
static void
call_reserveresult(struct rpc_task *task)
{
+ int status = task->tk_status;
+
dprintk("RPC: %4d call_reserveresult (status %d)\n",
task->tk_pid, task->tk_status);
/*
@@ -382,7 +396,7 @@ call_reserveresult(struct rpc_task *task)
*/
if ((task->tk_status >= 0 && !task->tk_rqstp) ||
(task->tk_status < 0 && task->tk_rqstp))
- printk("call_reserveresult: status=%d, request=%p??\n",
+ printk(KERN_ERR "call_reserveresult: status=%d, request=%p??\n",
task->tk_status, task->tk_rqstp);
if (task->tk_status >= 0) {
@@ -390,11 +404,11 @@ call_reserveresult(struct rpc_task *task)
return;
}
- switch (task->tk_status) {
+ task->tk_status = 0;
+ switch (status) {
case -EAGAIN:
case -ENOBUFS:
task->tk_timeout = task->tk_client->cl_timeout.to_resrvval;
- task->tk_status = 0;
task->tk_action = call_reserve;
break;
case -ETIMEDOUT:
@@ -402,11 +416,11 @@ call_reserveresult(struct rpc_task *task)
task->tk_action = call_timeout;
break;
default:
- task->tk_action = NULL;
if (!task->tk_rqstp) {
- printk("RPC: task has no request, exit EIO\n");
+ printk(KERN_INFO "RPC: task has no request, exit EIO\n");
rpc_exit(task, -EIO);
- }
+ } else
+ rpc_exit(task, status);
}
}
@@ -428,13 +442,13 @@ call_allocate(struct rpc_task *task)
/* FIXME: compute buffer requirements more exactly using
* auth->au_wslack */
- bufsiz = rpcproc_bufsiz(clnt, task->tk_proc) + RPC_SLACK_SPACE;
+ bufsiz = rpcproc_bufsiz(clnt, task->tk_msg.rpc_proc) + RPC_SLACK_SPACE;
if ((task->tk_buffer = rpc_malloc(task, bufsiz)) != NULL)
return;
- printk("RPC: buffer allocation failed for task %p\n", task);
+ printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task);
- if (!signalled()) {
+ if (RPC_IS_ASYNC(task) || !(task->tk_client->cl_intr && signalled())) {
xprt_release(task);
task->tk_action = call_reserve;
rpc_delay(task, HZ>>4);
@@ -460,10 +474,10 @@ call_encode(struct rpc_task *task)
dprintk("RPC: %4d call_encode (status %d)\n",
task->tk_pid, task->tk_status);
- task->tk_action = call_transmit;
+ task->tk_action = call_bind;
/* Default buffer setup */
- bufsiz = rpcproc_bufsiz(clnt, task->tk_proc)+RPC_SLACK_SPACE;
+ bufsiz = rpcproc_bufsiz(clnt, task->tk_msg.rpc_proc)+RPC_SLACK_SPACE;
req->rq_svec[0].iov_base = task->tk_buffer;
req->rq_svec[0].iov_len = bufsiz;
req->rq_slen = 0;
@@ -474,23 +488,16 @@ call_encode(struct rpc_task *task)
req->rq_rnr = 1;
req->rq_damaged = 0;
- if (task->tk_proc > clnt->cl_maxproc) {
- printk(KERN_WARNING "%s (vers %d): bad procedure number %d\n",
- clnt->cl_protname, clnt->cl_vers, task->tk_proc);
- rpc_exit(task, -EIO);
- return;
- }
-
/* Zero buffer so we have automatic zero-padding of opaque & string */
memset(task->tk_buffer, 0, bufsiz);
/* Encode header and provided arguments */
- encode = rpcproc_encode(clnt, task->tk_proc);
+ encode = rpcproc_encode(clnt, task->tk_msg.rpc_proc);
if (!(p = call_header(task))) {
- printk("RPC: call_header failed, exit EIO\n");
+ printk(KERN_INFO "RPC: call_header failed, exit EIO\n");
rpc_exit(task, -EIO);
} else
- if (encode && (status = encode(req, p, task->tk_argp)) < 0) {
+ if (encode && (status = encode(req, p, task->tk_msg.rpc_argp)) < 0) {
printk(KERN_WARNING "%s: can't encode arguments: %d\n",
clnt->cl_protname, -status);
rpc_exit(task, status);
@@ -498,38 +505,59 @@ call_encode(struct rpc_task *task)
}
/*
- * 4. Transmit the RPC request
+ * 4. Get the server port number if not yet set
*/
static void
-call_transmit(struct rpc_task *task)
+call_bind(struct rpc_task *task)
{
- dprintk("RPC: %4d call_transmit (status %d)\n",
- task->tk_pid, task->tk_status);
+ struct rpc_clnt *clnt = task->tk_client;
+ struct rpc_xprt *xprt = clnt->cl_xprt;
- task->tk_action = call_receive;
- task->tk_status = 0;
- xprt_transmit(task);
+ task->tk_action = (xprt->connected) ? call_transmit : call_reconnect;
+
+ if (!clnt->cl_port) {
+ task->tk_action = call_reconnect;
+ task->tk_timeout = clnt->cl_timeout.to_maxval;
+ rpc_getport(task, clnt);
+ }
}
/*
- * 5. Wait for the RPC reply
+ * 4a. Reconnect to the RPC server (TCP case)
*/
static void
-call_receive(struct rpc_task *task)
+call_reconnect(struct rpc_task *task)
{
- dprintk("RPC: %4d call_receive (status %d)\n",
- task->tk_pid, task->tk_status);
+ struct rpc_clnt *clnt = task->tk_client;
- task->tk_action = call_status;
+ dprintk("RPC: %4d call_reconnect status %d\n",
+ task->tk_pid, task->tk_status);
- /* Need to ensure cleanups are performed by xprt_receive_status() */
- xprt_receive(task);
+ task->tk_action = call_transmit;
+ if (task->tk_status < 0 || !clnt->cl_xprt->stream)
+ return;
+ clnt->cl_stats->netreconn++;
+ xprt_reconnect(task);
+}
- /* If we have no decode function, this means we're performing
- * a void call (a la lockd message passing). */
- if (!rpcproc_decode(task->tk_client, task->tk_proc)) {
- task->tk_action = NULL;
+/*
+ * 5. Transmit the RPC request, and wait for reply
+ */
+static void
+call_transmit(struct rpc_task *task)
+{
+ struct rpc_clnt *clnt = task->tk_client;
+
+ dprintk("RPC: %4d call_transmit (status %d)\n",
+ task->tk_pid, task->tk_status);
+
+ task->tk_action = call_status;
+ if (task->tk_status < 0)
return;
+ xprt_transmit(task);
+ if (!rpcproc_decode(clnt, task->tk_msg.rpc_proc)) {
+ task->tk_action = NULL;
+ rpc_wake_up_task(task);
}
}
@@ -558,34 +586,30 @@ call_status(struct rpc_task *task)
case -ETIMEDOUT:
task->tk_action = call_timeout;
break;
- case -EAGAIN:
- if (!req)
- task->tk_action = call_reserve;
- else if (!task->tk_buffer)
- task->tk_action = call_allocate;
- else if (req->rq_damaged) {
- task->tk_action = call_encode;
- clnt->cl_stats->rpcretrans++;
- } else {
- task->tk_action = call_transmit;
- clnt->cl_stats->rpcretrans++;
- }
- break;
case -ECONNREFUSED:
case -ENOTCONN:
+ req->rq_bytes_sent = 0;
if (clnt->cl_autobind || !clnt->cl_port) {
clnt->cl_port = 0;
task->tk_action = call_bind;
- } else if (xprt->stream)
+ break;
+ }
+ if (xprt->stream) {
task->tk_action = call_reconnect;
- else {
- rpc_delay(task, 5*HZ); /* Hope it all wears off */
- if (req->rq_damaged)
- task->tk_action = call_encode;
- else
- task->tk_action = call_transmit;
- clnt->cl_stats->rpcretrans++;
+ break;
}
+ /*
+ * Sleep and dream of an open connection
+ */
+ task->tk_timeout = 5 * HZ;
+ rpc_sleep_on(&xprt->sending, task, NULL, NULL);
+ case -ENOMEM:
+ case -EAGAIN:
+ if (req->rq_damaged)
+ task->tk_action = call_encode;
+ else
+ task->tk_action = call_transmit;
+ clnt->cl_stats->rpcretrans++;
break;
default:
if (clnt->cl_chatty)
@@ -614,15 +638,13 @@ call_timeout(struct rpc_task *task)
task->tk_pid);
goto minor_timeout;
}
- to->to_initval <<= 1;
- if (to->to_initval > to->to_maxval)
- to->to_initval = to->to_maxval;
+ to->to_retries = clnt->cl_timeout.to_retries;
}
dprintk("RPC: %4d call_timeout (major timeo)\n", task->tk_pid);
if (clnt->cl_softrtry) {
if (clnt->cl_chatty && !task->tk_exit)
- printk("%s: server %s not responding, timed out\n",
+ printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
clnt->cl_protname, clnt->cl_server);
rpc_exit(task, -EIO);
return;
@@ -630,24 +652,26 @@ call_timeout(struct rpc_task *task)
if (clnt->cl_chatty && !(task->tk_flags & RPC_CALL_MAJORSEEN)) {
task->tk_flags |= RPC_CALL_MAJORSEEN;
if (req)
- printk("%s: server %s not responding, still trying\n",
+ printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
clnt->cl_protname, clnt->cl_server);
else
- printk("%s: task %d can't get a request slot\n",
+ printk(KERN_NOTICE "%s: task %d can't get a request slot\n",
clnt->cl_protname, task->tk_pid);
}
if (clnt->cl_autobind)
clnt->cl_port = 0;
minor_timeout:
- if (!clnt->cl_port) {
+ if (!req)
+ task->tk_action = call_reserve;
+ else if (req->rq_damaged) {
+ task->tk_action = call_encode;
+ clnt->cl_stats->rpcretrans++;
+ } else if (!clnt->cl_port) {
task->tk_action = call_bind;
+ clnt->cl_stats->rpcretrans++;
} else if (clnt->cl_xprt->stream && !clnt->cl_xprt->connected) {
task->tk_action = call_reconnect;
- } else if (!req) {
- task->tk_action = call_reserve;
- } else if (req->rq_damaged) {
- task->tk_action = call_encode;
clnt->cl_stats->rpcretrans++;
} else {
task->tk_action = call_transmit;
@@ -657,21 +681,6 @@ minor_timeout:
}
/*
- * 6b. Reconnect to the RPC server (TCP case)
- */
-static void
-call_reconnect(struct rpc_task *task)
-{
- struct rpc_clnt *clnt = task->tk_client;
- dprintk("RPC: %4d call_reconnect status %d\n",
- task->tk_pid, task->tk_status);
- task->tk_action = call_reserve;
- task->tk_status = 0;
- clnt->cl_stats->netreconn++;
- xprt_reconnect(task);
-}
-
-/*
* 7. Decode the RPC reply
*/
static void
@@ -679,20 +688,20 @@ call_decode(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
struct rpc_rqst *req = task->tk_rqstp;
- kxdrproc_t decode = rpcproc_decode(clnt, task->tk_proc);
+ kxdrproc_t decode = rpcproc_decode(clnt, task->tk_msg.rpc_proc);
u32 *p;
dprintk("RPC: %4d call_decode (status %d)\n",
task->tk_pid, task->tk_status);
if (clnt->cl_chatty && (task->tk_flags & RPC_CALL_MAJORSEEN)) {
- printk("%s: server %s OK\n",
+ printk(KERN_NOTICE "%s: server %s OK\n",
clnt->cl_protname, clnt->cl_server);
task->tk_flags &= ~RPC_CALL_MAJORSEEN;
}
if (task->tk_status < 12) {
- printk("%s: too small RPC reply size (%d bytes)\n",
+ printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n",
clnt->cl_protname, task->tk_status);
rpc_exit(task, -EIO);
return;
@@ -708,10 +717,11 @@ call_decode(struct rpc_task *task)
*/
if (task->tk_client->cl_prog == 100003 &&
(ntohl(*p) == NFSERR_ACCES || ntohl(*p) == NFSERR_PERM)) {
- if (RPC_IS_SETUID(task) && (task->tk_suid_retry)--) {
+ if (RPC_IS_SETUID(task) && task->tk_suid_retry) {
dprintk("RPC: %4d retry squashed uid\n", task->tk_pid);
task->tk_flags ^= RPC_CALL_REALUID;
task->tk_action = call_encode;
+ task->tk_suid_retry--;
return;
}
}
@@ -719,7 +729,7 @@ call_decode(struct rpc_task *task)
task->tk_action = NULL;
if (decode)
- task->tk_status = decode(req, p, task->tk_resp);
+ task->tk_status = decode(req, p, task->tk_msg.rpc_resp);
dprintk("RPC: %4d call_decode result %d\n", task->tk_pid,
task->tk_status);
}
@@ -751,7 +761,7 @@ call_refreshresult(struct rpc_task *task)
if (task->tk_status < 0)
rpc_exit(task, -EACCES);
else
- task->tk_action = call_bind;
+ task->tk_action = call_reserve;
}
/*
@@ -772,7 +782,7 @@ call_header(struct rpc_task *task)
*p++ = htonl(RPC_VERSION); /* RPC version */
*p++ = htonl(clnt->cl_prog); /* program number */
*p++ = htonl(clnt->cl_vers); /* program version */
- *p++ = htonl(task->tk_proc); /* procedure */
+ *p++ = htonl(task->tk_msg.rpc_proc); /* procedure */
return rpcauth_marshcred(task, p);
}
@@ -787,20 +797,21 @@ call_verify(struct rpc_task *task)
p += 1; /* skip XID */
if ((n = ntohl(*p++)) != RPC_REPLY) {
- printk("call_verify: not an RPC reply: %x\n", n);
+ printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n);
goto garbage;
}
if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
int error = -EACCES;
if ((n = ntohl(*p++)) != RPC_AUTH_ERROR) {
- printk("call_verify: RPC call rejected: %x\n", n);
+ printk(KERN_WARNING "call_verify: RPC call rejected: %x\n", n);
} else
switch ((n = ntohl(*p++))) {
case RPC_AUTH_REJECTEDCRED:
case RPC_AUTH_REJECTEDVERF:
- if (!task->tk_cred_retry--)
+ if (!task->tk_cred_retry)
break;
+ task->tk_cred_retry--;
dprintk("RPC: %4d call_verify: retry stale creds\n",
task->tk_pid);
rpcauth_invalcred(task);
@@ -809,17 +820,18 @@ call_verify(struct rpc_task *task)
case RPC_AUTH_BADCRED:
case RPC_AUTH_BADVERF:
/* possibly garbled cred/verf? */
- if (!task->tk_garb_retry--)
+ if (!task->tk_garb_retry)
break;
+ task->tk_garb_retry--;
dprintk("RPC: %4d call_verify: retry garbled creds\n",
task->tk_pid);
task->tk_action = call_encode;
return NULL;
case RPC_AUTH_TOOWEAK:
- printk("call_verify: server requires stronger "
+ printk(KERN_NOTICE "call_verify: server requires stronger "
"authentication.\n");
default:
- printk("call_verify: unknown auth error: %x\n", n);
+ printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n);
error = -EIO;
}
dprintk("RPC: %4d call_verify: call rejected %d\n",
@@ -828,7 +840,7 @@ call_verify(struct rpc_task *task)
return NULL;
}
if (!(p = rpcauth_checkverf(task, p))) {
- printk("call_verify: auth check failed\n");
+ printk(KERN_WARNING "call_verify: auth check failed\n");
goto garbage; /* bad verifier, retry */
}
switch ((n = ntohl(*p++))) {
@@ -837,19 +849,20 @@ call_verify(struct rpc_task *task)
case RPC_GARBAGE_ARGS:
break; /* retry */
default:
- printk("call_verify: server accept status: %x\n", n);
+ printk(KERN_WARNING "call_verify: server accept status: %x\n", n);
/* Also retry */
}
garbage:
dprintk("RPC: %4d call_verify: server saw garbage\n", task->tk_pid);
task->tk_client->cl_stats->rpcgarbage++;
- if (task->tk_garb_retry--) {
- printk("RPC: garbage, retrying %4d\n", task->tk_pid);
+ if (task->tk_garb_retry) {
+ task->tk_garb_retry--;
+ printk(KERN_WARNING "RPC: garbage, retrying %4d\n", task->tk_pid);
task->tk_action = call_encode;
return NULL;
}
- printk("RPC: garbage, exit EIO\n");
+ printk(KERN_WARNING "RPC: garbage, exit EIO\n");
rpc_exit(task, -EIO);
return NULL;
}