PageRenderTime 77ms CodeModel.GetById 18ms app.highlight 46ms RepoModel.GetById 1ms app.codeStats 0ms

/net/sunrpc/clnt.c

https://bitbucket.org/abioy/linux
C | 1795 lines | 1325 code | 184 blank | 286 comment | 187 complexity | 11c028f4c70adb357fb049522f70eba8 MD5 | raw file
Possible License(s): CC-BY-SA-3.0, GPL-2.0, LGPL-2.0, AGPL-1.0
   1/*
   2 *  linux/net/sunrpc/clnt.c
   3 *
   4 *  This file contains the high-level RPC interface.
   5 *  It is modeled as a finite state machine to support both synchronous
   6 *  and asynchronous requests.
   7 *
   8 *  -	RPC header generation and argument serialization.
   9 *  -	Credential refresh.
  10 *  -	TCP connect handling.
  11 *  -	Retry of operation when it is suspected the operation failed because
  12 *	of uid squashing on the server, or when the credentials were stale
  13 *	and need to be refreshed, or when a packet was damaged in transit.
  14 *	This may be have to be moved to the VFS layer.
  15 *
  16 *  NB: BSD uses a more intelligent approach to guessing when a request
  17 *  or reply has been lost by keeping the RTO estimate for each procedure.
  18 *  We currently make do with a constant timeout value.
  19 *
  20 *  Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
  21 *  Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
  22 */
  23
  24#include <asm/system.h>
  25
  26#include <linux/module.h>
  27#include <linux/types.h>
  28#include <linux/kallsyms.h>
  29#include <linux/mm.h>
  30#include <linux/namei.h>
  31#include <linux/mount.h>
  32#include <linux/slab.h>
  33#include <linux/utsname.h>
  34#include <linux/workqueue.h>
  35#include <linux/in6.h>
  36
  37#include <linux/sunrpc/clnt.h>
  38#include <linux/sunrpc/rpc_pipe_fs.h>
  39#include <linux/sunrpc/metrics.h>
  40#include <linux/sunrpc/bc_xprt.h>
  41
  42#include "sunrpc.h"
  43
  44#ifdef RPC_DEBUG
  45# define RPCDBG_FACILITY	RPCDBG_CALL
  46#endif
  47
  48#define dprint_status(t)					\
  49	dprintk("RPC: %5u %s (status %d)\n", t->tk_pid,		\
  50			__func__, t->tk_status)
  51
  52/*
  53 * All RPC clients are linked into this list
  54 */
  55static LIST_HEAD(all_clients);
  56static DEFINE_SPINLOCK(rpc_client_lock);
  57
  58static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
  59
  60
  61static void	call_start(struct rpc_task *task);
  62static void	call_reserve(struct rpc_task *task);
  63static void	call_reserveresult(struct rpc_task *task);
  64static void	call_allocate(struct rpc_task *task);
  65static void	call_decode(struct rpc_task *task);
  66static void	call_bind(struct rpc_task *task);
  67static void	call_bind_status(struct rpc_task *task);
  68static void	call_transmit(struct rpc_task *task);
  69#if defined(CONFIG_NFS_V4_1)
  70static void	call_bc_transmit(struct rpc_task *task);
  71#endif /* CONFIG_NFS_V4_1 */
  72static void	call_status(struct rpc_task *task);
  73static void	call_transmit_status(struct rpc_task *task);
  74static void	call_refresh(struct rpc_task *task);
  75static void	call_refreshresult(struct rpc_task *task);
  76static void	call_timeout(struct rpc_task *task);
  77static void	call_connect(struct rpc_task *task);
  78static void	call_connect_status(struct rpc_task *task);
  79
  80static __be32	*rpc_encode_header(struct rpc_task *task);
  81static __be32	*rpc_verify_header(struct rpc_task *task);
  82static int	rpc_ping(struct rpc_clnt *clnt);
  83
  84static void rpc_register_client(struct rpc_clnt *clnt)
  85{
  86	spin_lock(&rpc_client_lock);
  87	list_add(&clnt->cl_clients, &all_clients);
  88	spin_unlock(&rpc_client_lock);
  89}
  90
  91static void rpc_unregister_client(struct rpc_clnt *clnt)
  92{
  93	spin_lock(&rpc_client_lock);
  94	list_del(&clnt->cl_clients);
  95	spin_unlock(&rpc_client_lock);
  96}
  97
  98static int
  99rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name)
 100{
 101	static uint32_t clntid;
 102	struct nameidata nd;
 103	struct path path;
 104	char name[15];
 105	struct qstr q = {
 106		.name = name,
 107	};
 108	int error;
 109
 110	clnt->cl_path.mnt = ERR_PTR(-ENOENT);
 111	clnt->cl_path.dentry = ERR_PTR(-ENOENT);
 112	if (dir_name == NULL)
 113		return 0;
 114
 115	path.mnt = rpc_get_mount();
 116	if (IS_ERR(path.mnt))
 117		return PTR_ERR(path.mnt);
 118	error = vfs_path_lookup(path.mnt->mnt_root, path.mnt, dir_name, 0, &nd);
 119	if (error)
 120		goto err;
 121
 122	for (;;) {
 123		q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
 124		name[sizeof(name) - 1] = '\0';
 125		q.hash = full_name_hash(q.name, q.len);
 126		path.dentry = rpc_create_client_dir(nd.path.dentry, &q, clnt);
 127		if (!IS_ERR(path.dentry))
 128			break;
 129		error = PTR_ERR(path.dentry);
 130		if (error != -EEXIST) {
 131			printk(KERN_INFO "RPC: Couldn't create pipefs entry"
 132					" %s/%s, error %d\n",
 133					dir_name, name, error);
 134			goto err_path_put;
 135		}
 136	}
 137	path_put(&nd.path);
 138	clnt->cl_path = path;
 139	return 0;
 140err_path_put:
 141	path_put(&nd.path);
 142err:
 143	rpc_put_mount();
 144	return error;
 145}
 146
 147static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
 148{
 149	struct rpc_program	*program = args->program;
 150	struct rpc_version	*version;
 151	struct rpc_clnt		*clnt = NULL;
 152	struct rpc_auth		*auth;
 153	int err;
 154	size_t len;
 155
 156	/* sanity check the name before trying to print it */
 157	err = -EINVAL;
 158	len = strlen(args->servername);
 159	if (len > RPC_MAXNETNAMELEN)
 160		goto out_no_rpciod;
 161	len++;
 162
 163	dprintk("RPC:       creating %s client for %s (xprt %p)\n",
 164			program->name, args->servername, xprt);
 165
 166	err = rpciod_up();
 167	if (err)
 168		goto out_no_rpciod;
 169	err = -EINVAL;
 170	if (!xprt)
 171		goto out_no_xprt;
 172
 173	if (args->version >= program->nrvers)
 174		goto out_err;
 175	version = program->version[args->version];
 176	if (version == NULL)
 177		goto out_err;
 178
 179	err = -ENOMEM;
 180	clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
 181	if (!clnt)
 182		goto out_err;
 183	clnt->cl_parent = clnt;
 184
 185	clnt->cl_server = clnt->cl_inline_name;
 186	if (len > sizeof(clnt->cl_inline_name)) {
 187		char *buf = kmalloc(len, GFP_KERNEL);
 188		if (buf != NULL)
 189			clnt->cl_server = buf;
 190		else
 191			len = sizeof(clnt->cl_inline_name);
 192	}
 193	strlcpy(clnt->cl_server, args->servername, len);
 194
 195	clnt->cl_xprt     = xprt;
 196	clnt->cl_procinfo = version->procs;
 197	clnt->cl_maxproc  = version->nrprocs;
 198	clnt->cl_protname = program->name;
 199	clnt->cl_prog     = args->prognumber ? : program->number;
 200	clnt->cl_vers     = version->number;
 201	clnt->cl_stats    = program->stats;
 202	clnt->cl_metrics  = rpc_alloc_iostats(clnt);
 203	err = -ENOMEM;
 204	if (clnt->cl_metrics == NULL)
 205		goto out_no_stats;
 206	clnt->cl_program  = program;
 207	INIT_LIST_HEAD(&clnt->cl_tasks);
 208	spin_lock_init(&clnt->cl_lock);
 209
 210	if (!xprt_bound(clnt->cl_xprt))
 211		clnt->cl_autobind = 1;
 212
 213	clnt->cl_timeout = xprt->timeout;
 214	if (args->timeout != NULL) {
 215		memcpy(&clnt->cl_timeout_default, args->timeout,
 216				sizeof(clnt->cl_timeout_default));
 217		clnt->cl_timeout = &clnt->cl_timeout_default;
 218	}
 219
 220	clnt->cl_rtt = &clnt->cl_rtt_default;
 221	rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
 222	clnt->cl_principal = NULL;
 223	if (args->client_name) {
 224		clnt->cl_principal = kstrdup(args->client_name, GFP_KERNEL);
 225		if (!clnt->cl_principal)
 226			goto out_no_principal;
 227	}
 228
 229	kref_init(&clnt->cl_kref);
 230
 231	err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
 232	if (err < 0)
 233		goto out_no_path;
 234
 235	auth = rpcauth_create(args->authflavor, clnt);
 236	if (IS_ERR(auth)) {
 237		printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n",
 238				args->authflavor);
 239		err = PTR_ERR(auth);
 240		goto out_no_auth;
 241	}
 242
 243	/* save the nodename */
 244	clnt->cl_nodelen = strlen(init_utsname()->nodename);
 245	if (clnt->cl_nodelen > UNX_MAXNODENAME)
 246		clnt->cl_nodelen = UNX_MAXNODENAME;
 247	memcpy(clnt->cl_nodename, init_utsname()->nodename, clnt->cl_nodelen);
 248	rpc_register_client(clnt);
 249	return clnt;
 250
 251out_no_auth:
 252	if (!IS_ERR(clnt->cl_path.dentry)) {
 253		rpc_remove_client_dir(clnt->cl_path.dentry);
 254		rpc_put_mount();
 255	}
 256out_no_path:
 257	kfree(clnt->cl_principal);
 258out_no_principal:
 259	rpc_free_iostats(clnt->cl_metrics);
 260out_no_stats:
 261	if (clnt->cl_server != clnt->cl_inline_name)
 262		kfree(clnt->cl_server);
 263	kfree(clnt);
 264out_err:
 265	xprt_put(xprt);
 266out_no_xprt:
 267	rpciod_down();
 268out_no_rpciod:
 269	return ERR_PTR(err);
 270}
 271
 272/*
 273 * rpc_create - create an RPC client and transport with one call
 274 * @args: rpc_clnt create argument structure
 275 *
 276 * Creates and initializes an RPC transport and an RPC client.
 277 *
 278 * It can ping the server in order to determine if it is up, and to see if
 279 * it supports this program and version.  RPC_CLNT_CREATE_NOPING disables
 280 * this behavior so asynchronous tasks can also use rpc_create.
 281 */
 282struct rpc_clnt *rpc_create(struct rpc_create_args *args)
 283{
 284	struct rpc_xprt *xprt;
 285	struct rpc_clnt *clnt;
 286	struct xprt_create xprtargs = {
 287		.ident = args->protocol,
 288		.srcaddr = args->saddress,
 289		.dstaddr = args->address,
 290		.addrlen = args->addrsize,
 291		.bc_xprt = args->bc_xprt,
 292	};
 293	char servername[48];
 294
 295	/*
 296	 * If the caller chooses not to specify a hostname, whip
 297	 * up a string representation of the passed-in address.
 298	 */
 299	if (args->servername == NULL) {
 300		servername[0] = '\0';
 301		switch (args->address->sa_family) {
 302		case AF_INET: {
 303			struct sockaddr_in *sin =
 304					(struct sockaddr_in *)args->address;
 305			snprintf(servername, sizeof(servername), "%pI4",
 306				 &sin->sin_addr.s_addr);
 307			break;
 308		}
 309		case AF_INET6: {
 310			struct sockaddr_in6 *sin =
 311					(struct sockaddr_in6 *)args->address;
 312			snprintf(servername, sizeof(servername), "%pI6",
 313				 &sin->sin6_addr);
 314			break;
 315		}
 316		default:
 317			/* caller wants default server name, but
 318			 * address family isn't recognized. */
 319			return ERR_PTR(-EINVAL);
 320		}
 321		args->servername = servername;
 322	}
 323
 324	xprt = xprt_create_transport(&xprtargs);
 325	if (IS_ERR(xprt))
 326		return (struct rpc_clnt *)xprt;
 327
 328	/*
 329	 * By default, kernel RPC client connects from a reserved port.
 330	 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
 331	 * but it is always enabled for rpciod, which handles the connect
 332	 * operation.
 333	 */
 334	xprt->resvport = 1;
 335	if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
 336		xprt->resvport = 0;
 337
 338	clnt = rpc_new_client(args, xprt);
 339	if (IS_ERR(clnt))
 340		return clnt;
 341
 342	if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
 343		int err = rpc_ping(clnt);
 344		if (err != 0) {
 345			rpc_shutdown_client(clnt);
 346			return ERR_PTR(err);
 347		}
 348	}
 349
 350	clnt->cl_softrtry = 1;
 351	if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
 352		clnt->cl_softrtry = 0;
 353
 354	if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
 355		clnt->cl_autobind = 1;
 356	if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
 357		clnt->cl_discrtry = 1;
 358	if (!(args->flags & RPC_CLNT_CREATE_QUIET))
 359		clnt->cl_chatty = 1;
 360
 361	return clnt;
 362}
 363EXPORT_SYMBOL_GPL(rpc_create);
 364
 365/*
 366 * This function clones the RPC client structure. It allows us to share the
 367 * same transport while varying parameters such as the authentication
 368 * flavour.
 369 */
 370struct rpc_clnt *
 371rpc_clone_client(struct rpc_clnt *clnt)
 372{
 373	struct rpc_clnt *new;
 374	int err = -ENOMEM;
 375
 376	new = kmemdup(clnt, sizeof(*new), GFP_KERNEL);
 377	if (!new)
 378		goto out_no_clnt;
 379	new->cl_parent = clnt;
 380	/* Turn off autobind on clones */
 381	new->cl_autobind = 0;
 382	INIT_LIST_HEAD(&new->cl_tasks);
 383	spin_lock_init(&new->cl_lock);
 384	rpc_init_rtt(&new->cl_rtt_default, clnt->cl_timeout->to_initval);
 385	new->cl_metrics = rpc_alloc_iostats(clnt);
 386	if (new->cl_metrics == NULL)
 387		goto out_no_stats;
 388	if (clnt->cl_principal) {
 389		new->cl_principal = kstrdup(clnt->cl_principal, GFP_KERNEL);
 390		if (new->cl_principal == NULL)
 391			goto out_no_principal;
 392	}
 393	kref_init(&new->cl_kref);
 394	err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
 395	if (err != 0)
 396		goto out_no_path;
 397	if (new->cl_auth)
 398		atomic_inc(&new->cl_auth->au_count);
 399	xprt_get(clnt->cl_xprt);
 400	kref_get(&clnt->cl_kref);
 401	rpc_register_client(new);
 402	rpciod_up();
 403	return new;
 404out_no_path:
 405	kfree(new->cl_principal);
 406out_no_principal:
 407	rpc_free_iostats(new->cl_metrics);
 408out_no_stats:
 409	kfree(new);
 410out_no_clnt:
 411	dprintk("RPC:       %s: returned error %d\n", __func__, err);
 412	return ERR_PTR(err);
 413}
 414EXPORT_SYMBOL_GPL(rpc_clone_client);
 415
 416/*
 417 * Properly shut down an RPC client, terminating all outstanding
 418 * requests.
 419 */
 420void rpc_shutdown_client(struct rpc_clnt *clnt)
 421{
 422	dprintk("RPC:       shutting down %s client for %s\n",
 423			clnt->cl_protname, clnt->cl_server);
 424
 425	while (!list_empty(&clnt->cl_tasks)) {
 426		rpc_killall_tasks(clnt);
 427		wait_event_timeout(destroy_wait,
 428			list_empty(&clnt->cl_tasks), 1*HZ);
 429	}
 430
 431	rpc_release_client(clnt);
 432}
 433EXPORT_SYMBOL_GPL(rpc_shutdown_client);
 434
 435/*
 436 * Free an RPC client
 437 */
 438static void
 439rpc_free_client(struct kref *kref)
 440{
 441	struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);
 442
 443	dprintk("RPC:       destroying %s client for %s\n",
 444			clnt->cl_protname, clnt->cl_server);
 445	if (!IS_ERR(clnt->cl_path.dentry)) {
 446		rpc_remove_client_dir(clnt->cl_path.dentry);
 447		rpc_put_mount();
 448	}
 449	if (clnt->cl_parent != clnt) {
 450		rpc_release_client(clnt->cl_parent);
 451		goto out_free;
 452	}
 453	if (clnt->cl_server != clnt->cl_inline_name)
 454		kfree(clnt->cl_server);
 455out_free:
 456	rpc_unregister_client(clnt);
 457	rpc_free_iostats(clnt->cl_metrics);
 458	kfree(clnt->cl_principal);
 459	clnt->cl_metrics = NULL;
 460	xprt_put(clnt->cl_xprt);
 461	rpciod_down();
 462	kfree(clnt);
 463}
 464
 465/*
 466 * Free an RPC client
 467 */
 468static void
 469rpc_free_auth(struct kref *kref)
 470{
 471	struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);
 472
 473	if (clnt->cl_auth == NULL) {
 474		rpc_free_client(kref);
 475		return;
 476	}
 477
 478	/*
 479	 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to
 480	 *       release remaining GSS contexts. This mechanism ensures
 481	 *       that it can do so safely.
 482	 */
 483	kref_init(kref);
 484	rpcauth_release(clnt->cl_auth);
 485	clnt->cl_auth = NULL;
 486	kref_put(kref, rpc_free_client);
 487}
 488
 489/*
 490 * Release reference to the RPC client
 491 */
 492void
 493rpc_release_client(struct rpc_clnt *clnt)
 494{
 495	dprintk("RPC:       rpc_release_client(%p)\n", clnt);
 496
 497	if (list_empty(&clnt->cl_tasks))
 498		wake_up(&destroy_wait);
 499	kref_put(&clnt->cl_kref, rpc_free_auth);
 500}
 501
 502/**
 503 * rpc_bind_new_program - bind a new RPC program to an existing client
 504 * @old: old rpc_client
 505 * @program: rpc program to set
 506 * @vers: rpc program version
 507 *
 508 * Clones the rpc client and sets up a new RPC program. This is mainly
 509 * of use for enabling different RPC programs to share the same transport.
 510 * The Sun NFSv2/v3 ACL protocol can do this.
 511 */
 512struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
 513				      struct rpc_program *program,
 514				      u32 vers)
 515{
 516	struct rpc_clnt *clnt;
 517	struct rpc_version *version;
 518	int err;
 519
 520	BUG_ON(vers >= program->nrvers || !program->version[vers]);
 521	version = program->version[vers];
 522	clnt = rpc_clone_client(old);
 523	if (IS_ERR(clnt))
 524		goto out;
 525	clnt->cl_procinfo = version->procs;
 526	clnt->cl_maxproc  = version->nrprocs;
 527	clnt->cl_protname = program->name;
 528	clnt->cl_prog     = program->number;
 529	clnt->cl_vers     = version->number;
 530	clnt->cl_stats    = program->stats;
 531	err = rpc_ping(clnt);
 532	if (err != 0) {
 533		rpc_shutdown_client(clnt);
 534		clnt = ERR_PTR(err);
 535	}
 536out:
 537	return clnt;
 538}
 539EXPORT_SYMBOL_GPL(rpc_bind_new_program);
 540
 541/*
 542 * Default callback for async RPC calls
 543 */
 544static void
 545rpc_default_callback(struct rpc_task *task, void *data)
 546{
 547}
 548
 549static const struct rpc_call_ops rpc_default_ops = {
 550	.rpc_call_done = rpc_default_callback,
 551};
 552
 553/**
 554 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
 555 * @task_setup_data: pointer to task initialisation data
 556 */
 557struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
 558{
 559	struct rpc_task *task, *ret;
 560
 561	task = rpc_new_task(task_setup_data);
 562	if (task == NULL) {
 563		rpc_release_calldata(task_setup_data->callback_ops,
 564				task_setup_data->callback_data);
 565		ret = ERR_PTR(-ENOMEM);
 566		goto out;
 567	}
 568
 569	if (task->tk_status != 0) {
 570		ret = ERR_PTR(task->tk_status);
 571		rpc_put_task(task);
 572		goto out;
 573	}
 574	atomic_inc(&task->tk_count);
 575	rpc_execute(task);
 576	ret = task;
 577out:
 578	return ret;
 579}
 580EXPORT_SYMBOL_GPL(rpc_run_task);
 581
 582/**
 583 * rpc_call_sync - Perform a synchronous RPC call
 584 * @clnt: pointer to RPC client
 585 * @msg: RPC call parameters
 586 * @flags: RPC call flags
 587 */
 588int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags)
 589{
 590	struct rpc_task	*task;
 591	struct rpc_task_setup task_setup_data = {
 592		.rpc_client = clnt,
 593		.rpc_message = msg,
 594		.callback_ops = &rpc_default_ops,
 595		.flags = flags,
 596	};
 597	int status;
 598
 599	BUG_ON(flags & RPC_TASK_ASYNC);
 600
 601	task = rpc_run_task(&task_setup_data);
 602	if (IS_ERR(task))
 603		return PTR_ERR(task);
 604	status = task->tk_status;
 605	rpc_put_task(task);
 606	return status;
 607}
 608EXPORT_SYMBOL_GPL(rpc_call_sync);
 609
 610/**
 611 * rpc_call_async - Perform an asynchronous RPC call
 612 * @clnt: pointer to RPC client
 613 * @msg: RPC call parameters
 614 * @flags: RPC call flags
 615 * @tk_ops: RPC call ops
 616 * @data: user call data
 617 */
 618int
 619rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
 620	       const struct rpc_call_ops *tk_ops, void *data)
 621{
 622	struct rpc_task	*task;
 623	struct rpc_task_setup task_setup_data = {
 624		.rpc_client = clnt,
 625		.rpc_message = msg,
 626		.callback_ops = tk_ops,
 627		.callback_data = data,
 628		.flags = flags|RPC_TASK_ASYNC,
 629	};
 630
 631	task = rpc_run_task(&task_setup_data);
 632	if (IS_ERR(task))
 633		return PTR_ERR(task);
 634	rpc_put_task(task);
 635	return 0;
 636}
 637EXPORT_SYMBOL_GPL(rpc_call_async);
 638
 639#if defined(CONFIG_NFS_V4_1)
 640/**
 641 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
 642 * rpc_execute against it
 643 * @req: RPC request
 644 * @tk_ops: RPC call ops
 645 */
 646struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
 647				const struct rpc_call_ops *tk_ops)
 648{
 649	struct rpc_task *task;
 650	struct xdr_buf *xbufp = &req->rq_snd_buf;
 651	struct rpc_task_setup task_setup_data = {
 652		.callback_ops = tk_ops,
 653	};
 654
 655	dprintk("RPC: rpc_run_bc_task req= %p\n", req);
 656	/*
 657	 * Create an rpc_task to send the data
 658	 */
 659	task = rpc_new_task(&task_setup_data);
 660	if (!task) {
 661		xprt_free_bc_request(req);
 662		task = ERR_PTR(-ENOMEM);
 663		goto out;
 664	}
 665	task->tk_rqstp = req;
 666
 667	/*
 668	 * Set up the xdr_buf length.
 669	 * This also indicates that the buffer is XDR encoded already.
 670	 */
 671	xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
 672			xbufp->tail[0].iov_len;
 673
 674	task->tk_action = call_bc_transmit;
 675	atomic_inc(&task->tk_count);
 676	BUG_ON(atomic_read(&task->tk_count) != 2);
 677	rpc_execute(task);
 678
 679out:
 680	dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
 681	return task;
 682}
 683#endif /* CONFIG_NFS_V4_1 */
 684
 685void
 686rpc_call_start(struct rpc_task *task)
 687{
 688	task->tk_action = call_start;
 689}
 690EXPORT_SYMBOL_GPL(rpc_call_start);
 691
 692/**
 693 * rpc_peeraddr - extract remote peer address from clnt's xprt
 694 * @clnt: RPC client structure
 695 * @buf: target buffer
 696 * @bufsize: length of target buffer
 697 *
 698 * Returns the number of bytes that are actually in the stored address.
 699 */
 700size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize)
 701{
 702	size_t bytes;
 703	struct rpc_xprt *xprt = clnt->cl_xprt;
 704
 705	bytes = sizeof(xprt->addr);
 706	if (bytes > bufsize)
 707		bytes = bufsize;
 708	memcpy(buf, &clnt->cl_xprt->addr, bytes);
 709	return xprt->addrlen;
 710}
 711EXPORT_SYMBOL_GPL(rpc_peeraddr);
 712
 713/**
 714 * rpc_peeraddr2str - return remote peer address in printable format
 715 * @clnt: RPC client structure
 716 * @format: address format
 717 *
 718 */
 719const char *rpc_peeraddr2str(struct rpc_clnt *clnt,
 720			     enum rpc_display_format_t format)
 721{
 722	struct rpc_xprt *xprt = clnt->cl_xprt;
 723
 724	if (xprt->address_strings[format] != NULL)
 725		return xprt->address_strings[format];
 726	else
 727		return "unprintable";
 728}
 729EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
 730
 731void
 732rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
 733{
 734	struct rpc_xprt *xprt = clnt->cl_xprt;
 735	if (xprt->ops->set_buffer_size)
 736		xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
 737}
 738EXPORT_SYMBOL_GPL(rpc_setbufsize);
 739
 740/*
 741 * Return size of largest payload RPC client can support, in bytes
 742 *
 743 * For stream transports, this is one RPC record fragment (see RFC
 744 * 1831), as we don't support multi-record requests yet.  For datagram
 745 * transports, this is the size of an IP packet minus the IP, UDP, and
 746 * RPC header sizes.
 747 */
 748size_t rpc_max_payload(struct rpc_clnt *clnt)
 749{
 750	return clnt->cl_xprt->max_payload;
 751}
 752EXPORT_SYMBOL_GPL(rpc_max_payload);
 753
 754/**
 755 * rpc_force_rebind - force transport to check that remote port is unchanged
 756 * @clnt: client to rebind
 757 *
 758 */
 759void rpc_force_rebind(struct rpc_clnt *clnt)
 760{
 761	if (clnt->cl_autobind)
 762		xprt_clear_bound(clnt->cl_xprt);
 763}
 764EXPORT_SYMBOL_GPL(rpc_force_rebind);
 765
 766/*
 767 * Restart an (async) RPC call from the call_prepare state.
 768 * Usually called from within the exit handler.
 769 */
 770void
 771rpc_restart_call_prepare(struct rpc_task *task)
 772{
 773	if (RPC_ASSASSINATED(task))
 774		return;
 775	task->tk_action = rpc_prepare_task;
 776}
 777EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
 778
 779/*
 780 * Restart an (async) RPC call. Usually called from within the
 781 * exit handler.
 782 */
 783void
 784rpc_restart_call(struct rpc_task *task)
 785{
 786	if (RPC_ASSASSINATED(task))
 787		return;
 788
 789	task->tk_action = call_start;
 790}
 791EXPORT_SYMBOL_GPL(rpc_restart_call);
 792
 793#ifdef RPC_DEBUG
 794static const char *rpc_proc_name(const struct rpc_task *task)
 795{
 796	const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
 797
 798	if (proc) {
 799		if (proc->p_name)
 800			return proc->p_name;
 801		else
 802			return "NULL";
 803	} else
 804		return "no proc";
 805}
 806#endif
 807
 808/*
 809 * 0.  Initial state
 810 *
 811 *     Other FSM states can be visited zero or more times, but
 812 *     this state is visited exactly once for each RPC.
 813 */
 814static void
 815call_start(struct rpc_task *task)
 816{
 817	struct rpc_clnt	*clnt = task->tk_client;
 818
 819	dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid,
 820			clnt->cl_protname, clnt->cl_vers,
 821			rpc_proc_name(task),
 822			(RPC_IS_ASYNC(task) ? "async" : "sync"));
 823
 824	/* Increment call count */
 825	task->tk_msg.rpc_proc->p_count++;
 826	clnt->cl_stats->rpccnt++;
 827	task->tk_action = call_reserve;
 828}
 829
 830/*
 831 * 1.	Reserve an RPC call slot
 832 */
 833static void
 834call_reserve(struct rpc_task *task)
 835{
 836	dprint_status(task);
 837
 838	if (!rpcauth_uptodatecred(task)) {
 839		task->tk_action = call_refresh;
 840		return;
 841	}
 842
 843	task->tk_status  = 0;
 844	task->tk_action  = call_reserveresult;
 845	xprt_reserve(task);
 846}
 847
 848/*
 849 * 1b.	Grok the result of xprt_reserve()
 850 */
 851static void
 852call_reserveresult(struct rpc_task *task)
 853{
 854	int status = task->tk_status;
 855
 856	dprint_status(task);
 857
 858	/*
 859	 * After a call to xprt_reserve(), we must have either
 860	 * a request slot or else an error status.
 861	 */
 862	task->tk_status = 0;
 863	if (status >= 0) {
 864		if (task->tk_rqstp) {
 865			task->tk_action = call_allocate;
 866			return;
 867		}
 868
 869		printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
 870				__func__, status);
 871		rpc_exit(task, -EIO);
 872		return;
 873	}
 874
 875	/*
 876	 * Even though there was an error, we may have acquired
 877	 * a request slot somehow.  Make sure not to leak it.
 878	 */
 879	if (task->tk_rqstp) {
 880		printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
 881				__func__, status);
 882		xprt_release(task);
 883	}
 884
 885	switch (status) {
 886	case -EAGAIN:	/* woken up; retry */
 887		task->tk_action = call_reserve;
 888		return;
 889	case -EIO:	/* probably a shutdown */
 890		break;
 891	default:
 892		printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
 893				__func__, status);
 894		break;
 895	}
 896	rpc_exit(task, status);
 897}
 898
 899/*
 900 * 2.	Allocate the buffer. For details, see sched.c:rpc_malloc.
 901 *	(Note: buffer memory is freed in xprt_release).
 902 */
 903static void
 904call_allocate(struct rpc_task *task)
 905{
 906	unsigned int slack = task->tk_msg.rpc_cred->cr_auth->au_cslack;
 907	struct rpc_rqst *req = task->tk_rqstp;
 908	struct rpc_xprt *xprt = task->tk_xprt;
 909	struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
 910
 911	dprint_status(task);
 912
 913	task->tk_status = 0;
 914	task->tk_action = call_bind;
 915
 916	if (req->rq_buffer)
 917		return;
 918
 919	if (proc->p_proc != 0) {
 920		BUG_ON(proc->p_arglen == 0);
 921		if (proc->p_decode != NULL)
 922			BUG_ON(proc->p_replen == 0);
 923	}
 924
 925	/*
 926	 * Calculate the size (in quads) of the RPC call
 927	 * and reply headers, and convert both values
 928	 * to byte sizes.
 929	 */
 930	req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen;
 931	req->rq_callsize <<= 2;
 932	req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen;
 933	req->rq_rcvsize <<= 2;
 934
 935	req->rq_buffer = xprt->ops->buf_alloc(task,
 936					req->rq_callsize + req->rq_rcvsize);
 937	if (req->rq_buffer != NULL)
 938		return;
 939
 940	dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
 941
 942	if (RPC_IS_ASYNC(task) || !signalled()) {
 943		task->tk_action = call_allocate;
 944		rpc_delay(task, HZ>>4);
 945		return;
 946	}
 947
 948	rpc_exit(task, -ERESTARTSYS);
 949}
 950
 951static inline int
 952rpc_task_need_encode(struct rpc_task *task)
 953{
 954	return task->tk_rqstp->rq_snd_buf.len == 0;
 955}
 956
 957static inline void
 958rpc_task_force_reencode(struct rpc_task *task)
 959{
 960	task->tk_rqstp->rq_snd_buf.len = 0;
 961	task->tk_rqstp->rq_bytes_sent = 0;
 962}
 963
 964static inline void
 965rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
 966{
 967	buf->head[0].iov_base = start;
 968	buf->head[0].iov_len = len;
 969	buf->tail[0].iov_len = 0;
 970	buf->page_len = 0;
 971	buf->flags = 0;
 972	buf->len = 0;
 973	buf->buflen = len;
 974}
 975
 976/*
 977 * 3.	Encode arguments of an RPC call
 978 */
 979static void
 980rpc_xdr_encode(struct rpc_task *task)
 981{
 982	struct rpc_rqst	*req = task->tk_rqstp;
 983	kxdrproc_t	encode;
 984	__be32		*p;
 985
 986	dprint_status(task);
 987
 988	rpc_xdr_buf_init(&req->rq_snd_buf,
 989			 req->rq_buffer,
 990			 req->rq_callsize);
 991	rpc_xdr_buf_init(&req->rq_rcv_buf,
 992			 (char *)req->rq_buffer + req->rq_callsize,
 993			 req->rq_rcvsize);
 994
 995	p = rpc_encode_header(task);
 996	if (p == NULL) {
 997		printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
 998		rpc_exit(task, -EIO);
 999		return;
1000	}
1001
1002	encode = task->tk_msg.rpc_proc->p_encode;
1003	if (encode == NULL)
1004		return;
1005
1006	task->tk_status = rpcauth_wrap_req(task, encode, req, p,
1007			task->tk_msg.rpc_argp);
1008}
1009
1010/*
1011 * 4.	Get the server port number if not yet set
1012 */
1013static void
1014call_bind(struct rpc_task *task)
1015{
1016	struct rpc_xprt *xprt = task->tk_xprt;
1017
1018	dprint_status(task);
1019
1020	task->tk_action = call_connect;
1021	if (!xprt_bound(xprt)) {
1022		task->tk_action = call_bind_status;
1023		task->tk_timeout = xprt->bind_timeout;
1024		xprt->ops->rpcbind(task);
1025	}
1026}
1027
1028/*
1029 * 4a.	Sort out bind result
1030 */
1031static void
1032call_bind_status(struct rpc_task *task)
1033{
1034	int status = -EIO;
1035
1036	if (task->tk_status >= 0) {
1037		dprint_status(task);
1038		task->tk_status = 0;
1039		task->tk_action = call_connect;
1040		return;
1041	}
1042
1043	switch (task->tk_status) {
1044	case -ENOMEM:
1045		dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
1046		rpc_delay(task, HZ >> 2);
1047		goto retry_timeout;
1048	case -EACCES:
1049		dprintk("RPC: %5u remote rpcbind: RPC program/version "
1050				"unavailable\n", task->tk_pid);
1051		/* fail immediately if this is an RPC ping */
1052		if (task->tk_msg.rpc_proc->p_proc == 0) {
1053			status = -EOPNOTSUPP;
1054			break;
1055		}
1056		rpc_delay(task, 3*HZ);
1057		goto retry_timeout;
1058	case -ETIMEDOUT:
1059		dprintk("RPC: %5u rpcbind request timed out\n",
1060				task->tk_pid);
1061		goto retry_timeout;
1062	case -EPFNOSUPPORT:
1063		/* server doesn't support any rpcbind version we know of */
1064		dprintk("RPC: %5u unrecognized remote rpcbind service\n",
1065				task->tk_pid);
1066		break;
1067	case -EPROTONOSUPPORT:
1068		dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n",
1069				task->tk_pid);
1070		task->tk_status = 0;
1071		task->tk_action = call_bind;
1072		return;
1073	case -ECONNREFUSED:		/* connection problems */
1074	case -ECONNRESET:
1075	case -ENOTCONN:
1076	case -EHOSTDOWN:
1077	case -EHOSTUNREACH:
1078	case -ENETUNREACH:
1079	case -EPIPE:
1080		dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
1081				task->tk_pid, task->tk_status);
1082		if (!RPC_IS_SOFTCONN(task)) {
1083			rpc_delay(task, 5*HZ);
1084			goto retry_timeout;
1085		}
1086		status = task->tk_status;
1087		break;
1088	default:
1089		dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
1090				task->tk_pid, -task->tk_status);
1091	}
1092
1093	rpc_exit(task, status);
1094	return;
1095
1096retry_timeout:
1097	task->tk_action = call_timeout;
1098}
1099
1100/*
1101 * 4b.	Connect to the RPC server
1102 */
1103static void
1104call_connect(struct rpc_task *task)
1105{
1106	struct rpc_xprt *xprt = task->tk_xprt;
1107
1108	dprintk("RPC: %5u call_connect xprt %p %s connected\n",
1109			task->tk_pid, xprt,
1110			(xprt_connected(xprt) ? "is" : "is not"));
1111
1112	task->tk_action = call_transmit;
1113	if (!xprt_connected(xprt)) {
1114		task->tk_action = call_connect_status;
1115		if (task->tk_status < 0)
1116			return;
1117		xprt_connect(task);
1118	}
1119}
1120
1121/*
1122 * 4c.	Sort out connect result
1123 */
1124static void
1125call_connect_status(struct rpc_task *task)
1126{
1127	struct rpc_clnt *clnt = task->tk_client;
1128	int status = task->tk_status;
1129
1130	dprint_status(task);
1131
1132	task->tk_status = 0;
1133	if (status >= 0 || status == -EAGAIN) {
1134		clnt->cl_stats->netreconn++;
1135		task->tk_action = call_transmit;
1136		return;
1137	}
1138
1139	switch (status) {
1140		/* if soft mounted, test if we've timed out */
1141	case -ETIMEDOUT:
1142		task->tk_action = call_timeout;
1143		break;
1144	default:
1145		rpc_exit(task, -EIO);
1146	}
1147}
1148
1149/*
1150 * 5.	Transmit the RPC request, and wait for reply
1151 */
1152static void
1153call_transmit(struct rpc_task *task)
1154{
1155	dprint_status(task);
1156
1157	task->tk_action = call_status;
1158	if (task->tk_status < 0)
1159		return;
1160	task->tk_status = xprt_prepare_transmit(task);
1161	if (task->tk_status != 0)
1162		return;
1163	task->tk_action = call_transmit_status;
1164	/* Encode here so that rpcsec_gss can use correct sequence number. */
1165	if (rpc_task_need_encode(task)) {
1166		BUG_ON(task->tk_rqstp->rq_bytes_sent != 0);
1167		rpc_xdr_encode(task);
1168		/* Did the encode result in an error condition? */
1169		if (task->tk_status != 0) {
1170			/* Was the error nonfatal? */
1171			if (task->tk_status == -EAGAIN)
1172				rpc_delay(task, HZ >> 4);
1173			else
1174				rpc_exit(task, task->tk_status);
1175			return;
1176		}
1177	}
1178	xprt_transmit(task);
1179	if (task->tk_status < 0)
1180		return;
1181	/*
1182	 * On success, ensure that we call xprt_end_transmit() before sleeping
1183	 * in order to allow access to the socket to other RPC requests.
1184	 */
1185	call_transmit_status(task);
1186	if (rpc_reply_expected(task))
1187		return;
1188	task->tk_action = rpc_exit_task;
1189	rpc_wake_up_queued_task(&task->tk_xprt->pending, task);
1190}
1191
1192/*
1193 * 5a.	Handle cleanup after a transmission
1194 */
1195static void
1196call_transmit_status(struct rpc_task *task)
1197{
1198	task->tk_action = call_status;
1199
1200	/*
1201	 * Common case: success.  Force the compiler to put this
1202	 * test first.
1203	 */
1204	if (task->tk_status == 0) {
1205		xprt_end_transmit(task);
1206		rpc_task_force_reencode(task);
1207		return;
1208	}
1209
1210	switch (task->tk_status) {
1211	case -EAGAIN:
1212		break;
1213	default:
1214		dprint_status(task);
1215		xprt_end_transmit(task);
1216		rpc_task_force_reencode(task);
1217		break;
1218		/*
1219		 * Special cases: if we've been waiting on the
1220		 * socket's write_space() callback, or if the
1221		 * socket just returned a connection error,
1222		 * then hold onto the transport lock.
1223		 */
1224	case -ECONNREFUSED:
1225	case -EHOSTDOWN:
1226	case -EHOSTUNREACH:
1227	case -ENETUNREACH:
1228		if (RPC_IS_SOFTCONN(task)) {
1229			xprt_end_transmit(task);
1230			rpc_exit(task, task->tk_status);
1231			break;
1232		}
1233	case -ECONNRESET:
1234	case -ENOTCONN:
1235	case -EPIPE:
1236		rpc_task_force_reencode(task);
1237	}
1238}
1239
1240#if defined(CONFIG_NFS_V4_1)
1241/*
1242 * 5b.	Send the backchannel RPC reply.  On error, drop the reply.  In
1243 * addition, disconnect on connectivity errors.
1244 */
1245static void
1246call_bc_transmit(struct rpc_task *task)
1247{
1248	struct rpc_rqst *req = task->tk_rqstp;
1249
1250	BUG_ON(task->tk_status != 0);
1251	task->tk_status = xprt_prepare_transmit(task);
1252	if (task->tk_status == -EAGAIN) {
1253		/*
1254		 * Could not reserve the transport. Try again after the
1255		 * transport is released.
1256		 */
1257		task->tk_status = 0;
1258		task->tk_action = call_bc_transmit;
1259		return;
1260	}
1261
1262	task->tk_action = rpc_exit_task;
1263	if (task->tk_status < 0) {
1264		printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1265			"error: %d\n", task->tk_status);
1266		return;
1267	}
1268
1269	xprt_transmit(task);
1270	xprt_end_transmit(task);
1271	dprint_status(task);
1272	switch (task->tk_status) {
1273	case 0:
1274		/* Success */
1275		break;
1276	case -EHOSTDOWN:
1277	case -EHOSTUNREACH:
1278	case -ENETUNREACH:
1279	case -ETIMEDOUT:
1280		/*
1281		 * Problem reaching the server.  Disconnect and let the
1282		 * forechannel reestablish the connection.  The server will
1283		 * have to retransmit the backchannel request and we'll
1284		 * reprocess it.  Since these ops are idempotent, there's no
1285		 * need to cache our reply at this time.
1286		 */
1287		printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1288			"error: %d\n", task->tk_status);
1289		xprt_conditional_disconnect(task->tk_xprt,
1290			req->rq_connect_cookie);
1291		break;
1292	default:
1293		/*
1294		 * We were unable to reply and will have to drop the
1295		 * request.  The server should reconnect and retransmit.
1296		 */
1297		BUG_ON(task->tk_status == -EAGAIN);
1298		printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1299			"error: %d\n", task->tk_status);
1300		break;
1301	}
1302	rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
1303}
1304#endif /* CONFIG_NFS_V4_1 */
1305
1306/*
1307 * 6.	Sort out the RPC call status
1308 */
1309static void
1310call_status(struct rpc_task *task)
1311{
1312	struct rpc_clnt	*clnt = task->tk_client;
1313	struct rpc_rqst	*req = task->tk_rqstp;
1314	int		status;
1315
1316	if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
1317		task->tk_status = req->rq_reply_bytes_recvd;
1318
1319	dprint_status(task);
1320
1321	status = task->tk_status;
1322	if (status >= 0) {
1323		task->tk_action = call_decode;
1324		return;
1325	}
1326
1327	task->tk_status = 0;
1328	switch(status) {
1329	case -EHOSTDOWN:
1330	case -EHOSTUNREACH:
1331	case -ENETUNREACH:
1332		/*
1333		 * Delay any retries for 3 seconds, then handle as if it
1334		 * were a timeout.
1335		 */
1336		rpc_delay(task, 3*HZ);
1337	case -ETIMEDOUT:
1338		task->tk_action = call_timeout;
1339		if (task->tk_client->cl_discrtry)
1340			xprt_conditional_disconnect(task->tk_xprt,
1341					req->rq_connect_cookie);
1342		break;
1343	case -ECONNRESET:
1344	case -ECONNREFUSED:
1345		rpc_force_rebind(clnt);
1346		rpc_delay(task, 3*HZ);
1347	case -EPIPE:
1348	case -ENOTCONN:
1349		task->tk_action = call_bind;
1350		break;
1351	case -EAGAIN:
1352		task->tk_action = call_transmit;
1353		break;
1354	case -EIO:
1355		/* shutdown or soft timeout */
1356		rpc_exit(task, status);
1357		break;
1358	default:
1359		if (clnt->cl_chatty)
1360			printk("%s: RPC call returned error %d\n",
1361			       clnt->cl_protname, -status);
1362		rpc_exit(task, status);
1363	}
1364}
1365
1366/*
1367 * 6a.	Handle RPC timeout
1368 * 	We do not release the request slot, so we keep using the
1369 *	same XID for all retransmits.
1370 */
1371static void
1372call_timeout(struct rpc_task *task)
1373{
1374	struct rpc_clnt	*clnt = task->tk_client;
1375
1376	if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
1377		dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid);
1378		goto retry;
1379	}
1380
1381	dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
1382	task->tk_timeouts++;
1383
1384	if (RPC_IS_SOFTCONN(task)) {
1385		rpc_exit(task, -ETIMEDOUT);
1386		return;
1387	}
1388	if (RPC_IS_SOFT(task)) {
1389		if (clnt->cl_chatty)
1390			printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
1391				clnt->cl_protname, clnt->cl_server);
1392		rpc_exit(task, -EIO);
1393		return;
1394	}
1395
1396	if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
1397		task->tk_flags |= RPC_CALL_MAJORSEEN;
1398		if (clnt->cl_chatty)
1399			printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
1400			clnt->cl_protname, clnt->cl_server);
1401	}
1402	rpc_force_rebind(clnt);
1403	/*
1404	 * Did our request time out due to an RPCSEC_GSS out-of-sequence
1405	 * event? RFC2203 requires the server to drop all such requests.
1406	 */
1407	rpcauth_invalcred(task);
1408
1409retry:
1410	clnt->cl_stats->rpcretrans++;
1411	task->tk_action = call_bind;
1412	task->tk_status = 0;
1413}
1414
1415/*
1416 * 7.	Decode the RPC reply
1417 */
1418static void
1419call_decode(struct rpc_task *task)
1420{
1421	struct rpc_clnt	*clnt = task->tk_client;
1422	struct rpc_rqst	*req = task->tk_rqstp;
1423	kxdrproc_t	decode = task->tk_msg.rpc_proc->p_decode;
1424	__be32		*p;
1425
1426	dprintk("RPC: %5u call_decode (status %d)\n",
1427			task->tk_pid, task->tk_status);
1428
1429	if (task->tk_flags & RPC_CALL_MAJORSEEN) {
1430		if (clnt->cl_chatty)
1431			printk(KERN_NOTICE "%s: server %s OK\n",
1432				clnt->cl_protname, clnt->cl_server);
1433		task->tk_flags &= ~RPC_CALL_MAJORSEEN;
1434	}
1435
1436	/*
1437	 * Ensure that we see all writes made by xprt_complete_rqst()
1438	 * before it changed req->rq_reply_bytes_recvd.
1439	 */
1440	smp_rmb();
1441	req->rq_rcv_buf.len = req->rq_private_buf.len;
1442
1443	/* Check that the softirq receive buffer is valid */
1444	WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
1445				sizeof(req->rq_rcv_buf)) != 0);
1446
1447	if (req->rq_rcv_buf.len < 12) {
1448		if (!RPC_IS_SOFT(task)) {
1449			task->tk_action = call_bind;
1450			clnt->cl_stats->rpcretrans++;
1451			goto out_retry;
1452		}
1453		dprintk("RPC:       %s: too small RPC reply size (%d bytes)\n",
1454				clnt->cl_protname, task->tk_status);
1455		task->tk_action = call_timeout;
1456		goto out_retry;
1457	}
1458
1459	p = rpc_verify_header(task);
1460	if (IS_ERR(p)) {
1461		if (p == ERR_PTR(-EAGAIN))
1462			goto out_retry;
1463		return;
1464	}
1465
1466	task->tk_action = rpc_exit_task;
1467
1468	if (decode) {
1469		task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
1470						      task->tk_msg.rpc_resp);
1471	}
1472	dprintk("RPC: %5u call_decode result %d\n", task->tk_pid,
1473			task->tk_status);
1474	return;
1475out_retry:
1476	task->tk_status = 0;
1477	/* Note: rpc_verify_header() may have freed the RPC slot */
1478	if (task->tk_rqstp == req) {
1479		req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
1480		if (task->tk_client->cl_discrtry)
1481			xprt_conditional_disconnect(task->tk_xprt,
1482					req->rq_connect_cookie);
1483	}
1484}
1485
1486/*
1487 * 8.	Refresh the credentials if rejected by the server
1488 */
1489static void
1490call_refresh(struct rpc_task *task)
1491{
1492	dprint_status(task);
1493
1494	task->tk_action = call_refreshresult;
1495	task->tk_status = 0;
1496	task->tk_client->cl_stats->rpcauthrefresh++;
1497	rpcauth_refreshcred(task);
1498}
1499
1500/*
1501 * 8a.	Process the results of a credential refresh
1502 */
1503static void
1504call_refreshresult(struct rpc_task *task)
1505{
1506	int status = task->tk_status;
1507
1508	dprint_status(task);
1509
1510	task->tk_status = 0;
1511	task->tk_action = call_reserve;
1512	if (status >= 0 && rpcauth_uptodatecred(task))
1513		return;
1514	if (status == -EACCES) {
1515		rpc_exit(task, -EACCES);
1516		return;
1517	}
1518	task->tk_action = call_refresh;
1519	if (status != -ETIMEDOUT)
1520		rpc_delay(task, 3*HZ);
1521	return;
1522}
1523
1524static __be32 *
1525rpc_encode_header(struct rpc_task *task)
1526{
1527	struct rpc_clnt *clnt = task->tk_client;
1528	struct rpc_rqst	*req = task->tk_rqstp;
1529	__be32		*p = req->rq_svec[0].iov_base;
1530
1531	/* FIXME: check buffer size? */
1532
1533	p = xprt_skip_transport_header(task->tk_xprt, p);
1534	*p++ = req->rq_xid;		/* XID */
1535	*p++ = htonl(RPC_CALL);		/* CALL */
1536	*p++ = htonl(RPC_VERSION);	/* RPC version */
1537	*p++ = htonl(clnt->cl_prog);	/* program number */
1538	*p++ = htonl(clnt->cl_vers);	/* program version */
1539	*p++ = htonl(task->tk_msg.rpc_proc->p_proc);	/* procedure */
1540	p = rpcauth_marshcred(task, p);
1541	req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
1542	return p;
1543}
1544
1545static __be32 *
1546rpc_verify_header(struct rpc_task *task)
1547{
1548	struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
1549	int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
1550	__be32	*p = iov->iov_base;
1551	u32 n;
1552	int error = -EACCES;
1553
1554	if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
1555		/* RFC-1014 says that the representation of XDR data must be a
1556		 * multiple of four bytes
1557		 * - if it isn't pointer subtraction in the NFS client may give
1558		 *   undefined results
1559		 */
1560		dprintk("RPC: %5u %s: XDR representation not a multiple of"
1561		       " 4 bytes: 0x%x\n", task->tk_pid, __func__,
1562		       task->tk_rqstp->rq_rcv_buf.len);
1563		goto out_eio;
1564	}
1565	if ((len -= 3) < 0)
1566		goto out_overflow;
1567
1568	p += 1; /* skip XID */
1569	if ((n = ntohl(*p++)) != RPC_REPLY) {
1570		dprintk("RPC: %5u %s: not an RPC reply: %x\n",
1571			task->tk_pid, __func__, n);
1572		goto out_garbage;
1573	}
1574
1575	if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
1576		if (--len < 0)
1577			goto out_overflow;
1578		switch ((n = ntohl(*p++))) {
1579			case RPC_AUTH_ERROR:
1580				break;
1581			case RPC_MISMATCH:
1582				dprintk("RPC: %5u %s: RPC call version "
1583						"mismatch!\n",
1584						task->tk_pid, __func__);
1585				error = -EPROTONOSUPPORT;
1586				goto out_err;
1587			default:
1588				dprintk("RPC: %5u %s: RPC call rejected, "
1589						"unknown error: %x\n",
1590						task->tk_pid, __func__, n);
1591				goto out_eio;
1592		}
1593		if (--len < 0)
1594			goto out_overflow;
1595		switch ((n = ntohl(*p++))) {
1596		case RPC_AUTH_REJECTEDCRED:
1597		case RPC_AUTH_REJECTEDVERF:
1598		case RPCSEC_GSS_CREDPROBLEM:
1599		case RPCSEC_GSS_CTXPROBLEM:
1600			if (!task->tk_cred_retry)
1601				break;
1602			task->tk_cred_retry--;
1603			dprintk("RPC: %5u %s: retry stale creds\n",
1604					task->tk_pid, __func__);
1605			rpcauth_invalcred(task);
1606			/* Ensure we obtain a new XID! */
1607			xprt_release(task);
1608			task->tk_action = call_refresh;
1609			goto out_retry;
1610		case RPC_AUTH_BADCRED:
1611		case RPC_AUTH_BADVERF:
1612			/* possibly garbled cred/verf? */
1613			if (!task->tk_garb_retry)
1614				break;
1615			task->tk_garb_retry--;
1616			dprintk("RPC: %5u %s: retry garbled creds\n",
1617					task->tk_pid, __func__);
1618			task->tk_action = call_bind;
1619			goto out_retry;
1620		case RPC_AUTH_TOOWEAK:
1621			printk(KERN_NOTICE "RPC: server %s requires stronger "
1622			       "authentication.\n", task->tk_client->cl_server);
1623			break;
1624		default:
1625			dprintk("RPC: %5u %s: unknown auth error: %x\n",
1626					task->tk_pid, __func__, n);
1627			error = -EIO;
1628		}
1629		dprintk("RPC: %5u %s: call rejected %d\n",
1630				task->tk_pid, __func__, n);
1631		goto out_err;
1632	}
1633	if (!(p = rpcauth_checkverf(task, p))) {
1634		dprintk("RPC: %5u %s: auth check failed\n",
1635				task->tk_pid, __func__);
1636		goto out_garbage;		/* bad verifier, retry */
1637	}
1638	len = p - (__be32 *)iov->iov_base - 1;
1639	if (len < 0)
1640		goto out_overflow;
1641	switch ((n = ntohl(*p++))) {
1642	case RPC_SUCCESS:
1643		return p;
1644	case RPC_PROG_UNAVAIL:
1645		dprintk("RPC: %5u %s: program %u is unsupported by server %s\n",
1646				task->tk_pid, __func__,
1647				(unsigned int)task->tk_client->cl_prog,
1648				task->tk_client->cl_server);
1649		error = -EPFNOSUPPORT;
1650		goto out_err;
1651	case RPC_PROG_MISMATCH:
1652		dprintk("RPC: %5u %s: program %u, version %u unsupported by "
1653				"server %s\n", task->tk_pid, __func__,
1654				(unsigned int)task->tk_client->cl_prog,
1655				(unsigned int)task->tk_client->cl_vers,
1656				task->tk_client->cl_server);
1657		error = -EPROTONOSUPPORT;
1658		goto out_err;
1659	case RPC_PROC_UNAVAIL:
1660		dprintk("RPC: %5u %s: proc %s unsupported by program %u, "
1661				"version %u on server %s\n",
1662				task->tk_pid, __func__,
1663				rpc_proc_name(task),
1664				task->tk_client->cl_prog,
1665				task->tk_client->cl_vers,
1666				task->tk_client->cl_server);
1667		error = -EOPNOTSUPP;
1668		goto out_err;
1669	case RPC_GARBAGE_ARGS:
1670		dprintk("RPC: %5u %s: server saw garbage\n",
1671				task->tk_pid, __func__);
1672		break;			/* retry */
1673	default:
1674		dprintk("RPC: %5u %s: server accept status: %x\n",
1675				task->tk_pid, __func__, n);
1676		/* Also retry */
1677	}
1678
1679out_garbage:
1680	task->tk_client->cl_stats->rpcgarbage++;
1681	if (task->tk_garb_retry) {
1682		task->tk_garb_retry--;
1683		dprintk("RPC: %5u %s: retrying\n",
1684				task->tk_pid, __func__);
1685		task->tk_action = call_bind;
1686out_retry:
1687		return ERR_PTR(-EAGAIN);
1688	}
1689out_eio:
1690	error = -EIO;
1691out_err:
1692	rpc_exit(task, error);
1693	dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid,
1694			__func__, error);
1695	return ERR_PTR(error);
1696out_overflow:
1697	dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid,
1698			__func__);
1699	goto out_garbage;
1700}
1701
1702static int rpcproc_encode_null(void *rqstp, __be32 *data, void *obj)
1703{
1704	return 0;
1705}
1706
1707static int rpcproc_decode_null(void *rqstp, __be32 *data, void *obj)
1708{
1709	return 0;
1710}
1711
1712static struct rpc_procinfo rpcproc_null = {
1713	.p_encode = rpcproc_encode_null,
1714	.p_decode = rpcproc_decode_null,
1715};
1716
1717static int rpc_ping(struct rpc_clnt *clnt)
1718{
1719	struct rpc_message msg = {
1720		.rpc_proc = &rpcproc_null,
1721	};
1722	int err;
1723	msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
1724	err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN);
1725	put_rpccred(msg.rpc_cred);
1726	return err;
1727}
1728
1729struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags)
1730{
1731	struct rpc_message msg = {
1732		.rpc_proc = &rpcproc_null,
1733		.rpc_cred = cred,
1734	};
1735	struct rpc_task_setup task_setup_data = {
1736		.rpc_client = clnt,
1737		.rpc_message = &msg,
1738		.callback_ops = &rpc_default_ops,
1739		.flags = flags,
1740	};
1741	return rpc_run_task(&task_setup_data);
1742}
1743EXPORT_SYMBOL_GPL(rpc_call_null);
1744
1745#ifdef RPC_DEBUG
1746static void rpc_show_header(void)
1747{
1748	printk(KERN_INFO "-pid- flgs status -client- --rqstp- "
1749		"-timeout ---ops--\n");
1750}
1751
1752static void rpc_show_task(const struct rpc_clnt *clnt,
1753			  const struct rpc_task *task)
1754{
1755	const char *rpc_waitq = "none";
1756	char *p, action[KSYM_SYMBOL_LEN];
1757
1758	if (RPC_IS_QUEUED(task))
1759		rpc_waitq = rpc_qname(task->tk_waitqueue);
1760
1761	/* map tk_action pointer to a function name; then trim off
1762	 * the "+0x0 [sunrpc]" */
1763	sprint_symbol(action, (unsigned long)task->tk_action);
1764	p = strchr(action, '+');
1765	if (p)
1766		*p = '\0';
1767
1768	printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%s q:%s\n",
1769		task->tk_pid, task->tk_flags, task->tk_status,
1770		clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
1771		clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task),
1772		action, rpc_waitq);
1773}
1774
1775void rpc_show_tasks(void)
1776{
1777	struct rpc_clnt *clnt;
1778	struct rpc_task *task;
1779	int header = 0;
1780
1781	spin_lock(&rpc_client_lock);
1782	list_for_each_entry(clnt, &all_clients, cl_clients) {
1783		spin_lock(&clnt->cl_lock);
1784		list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
1785			if (!header) {
1786				rpc_show_header();
1787				header++;
1788			}
1789			rpc_show_task(clnt, task);
1790		}
1791		spin_unlock(&clnt->cl_lock);
1792	}
1793	spin_unlock(&rpc_client_lock);
1794}
1795#endif