xprtsock.c 82.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6
/*
 * linux/net/sunrpc/xprtsock.c
 *
 * Client-side transport implementation for sockets.
 *
7 8
 * TCP callback races fixes (C) 1998 Red Hat
 * TCP send fixes (C) 1998 Red Hat
9 10 11 12 13 14
 * TCP NFS related read + write fixes
 *  (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
 *
 * Rewrite of larges part of the code in order to stabilize TCP stuff.
 * Fix behaviour when socket buffer is full.
 *  (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
15 16
 *
 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
17 18 19
 *
 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
 *   <gilles.quillard@bull.net>
20 21 22
 */

#include <linux/types.h>
23
#include <linux/string.h>
24
#include <linux/slab.h>
25
#include <linux/module.h>
26 27 28 29 30 31 32
#include <linux/capability.h>
#include <linux/pagemap.h>
#include <linux/errno.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/net.h>
#include <linux/mm.h>
33
#include <linux/un.h>
34 35 36
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/sunrpc/clnt.h>
37
#include <linux/sunrpc/addr.h>
38
#include <linux/sunrpc/sched.h>
39
#include <linux/sunrpc/svcsock.h>
40
#include <linux/sunrpc/xprtsock.h>
41
#include <linux/file.h>
42
#ifdef CONFIG_SUNRPC_BACKCHANNEL
43 44
#include <linux/sunrpc/bc_xprt.h>
#endif
45 46 47 48 49

#include <net/sock.h>
#include <net/checksum.h>
#include <net/udp.h>
#include <net/tcp.h>
50
#include <linux/bvec.h>
51
#include <linux/highmem.h>
52
#include <linux/uio.h>
53

54 55
#include <trace/events/sunrpc.h>

56
#include "sunrpc.h"
57 58

static void xs_close(struct rpc_xprt *xprt);
59 60
static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
		struct socket *sock);
61

62 63 64
/*
 * xprtsock tunables
 */
65 66 67
static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE;
static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
68

69 70
static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
71

72
#define XS_TCP_LINGER_TO	(15U * HZ)
73
static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
74

75 76 77 78 79 80 81 82 83 84 85
/*
 * We can register our own files under /proc/sys/sunrpc by
 * calling register_sysctl_table() again.  The files in that
 * directory become the union of all files registered there.
 *
 * We simply need to make sure that we don't collide with
 * someone else's file names!
 */

static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
86
static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT;
87 88 89 90 91 92 93 94 95
static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;

static struct ctl_table_header *sunrpc_table_header;

/*
 * FIXME: changing the UDP slot table size should also resize the UDP
 *        socket buffers for existing UDP transports
 */
96
static struct ctl_table xs_tunables_table[] = {
97 98 99 100 101
	{
		.procname	= "udp_slot_table_entries",
		.data		= &xprt_udp_slot_table_entries,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
102
		.proc_handler	= proc_dointvec_minmax,
103 104 105 106 107 108 109 110
		.extra1		= &min_slot_table_size,
		.extra2		= &max_slot_table_size
	},
	{
		.procname	= "tcp_slot_table_entries",
		.data		= &xprt_tcp_slot_table_entries,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
111
		.proc_handler	= proc_dointvec_minmax,
112 113 114
		.extra1		= &min_slot_table_size,
		.extra2		= &max_slot_table_size
	},
115 116 117 118 119 120 121 122 123
	{
		.procname	= "tcp_max_slot_table_entries",
		.data		= &xprt_max_tcp_slot_table_entries,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec_minmax,
		.extra1		= &min_slot_table_size,
		.extra2		= &max_tcp_slot_table_limit
	},
124 125 126 127 128
	{
		.procname	= "min_resvport",
		.data		= &xprt_min_resvport,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
129
		.proc_handler	= proc_dointvec_minmax,
130
		.extra1		= &xprt_min_resvport_limit,
131
		.extra2		= &xprt_max_resvport_limit
132 133 134 135 136 137
	},
	{
		.procname	= "max_resvport",
		.data		= &xprt_max_resvport,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
138
		.proc_handler	= proc_dointvec_minmax,
139
		.extra1		= &xprt_min_resvport_limit,
140 141
		.extra2		= &xprt_max_resvport_limit
	},
142 143 144 145 146
	{
		.procname	= "tcp_fin_timeout",
		.data		= &xs_tcp_fin_timeout,
		.maxlen		= sizeof(xs_tcp_fin_timeout),
		.mode		= 0644,
147
		.proc_handler	= proc_dointvec_jiffies,
148
	},
149
	{ },
150 151
};

152
static struct ctl_table sunrpc_table[] = {
153 154 155 156 157
	{
		.procname	= "sunrpc",
		.mode		= 0555,
		.child		= xs_tunables_table
	},
158
	{ },
159 160
};

161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
/*
 * Wait duration for a reply from the RPC portmapper.
 */
#define XS_BIND_TO		(60U * HZ)

/*
 * Delay if a UDP socket connect error occurs.  This is most likely some
 * kind of resource problem on the local host.
 */
#define XS_UDP_REEST_TO		(2U * HZ)

/*
 * The reestablish timeout allows clients to delay for a bit before attempting
 * to reconnect to a server that just dropped our connection.
 *
 * We implement an exponential backoff when trying to reestablish a TCP
 * transport connection with the server.  Some servers like to drop a TCP
 * connection when they are overworked, so we start with a short timeout and
 * increase over time if the server is down or not responding.
 */
#define XS_TCP_INIT_REEST_TO	(3U * HZ)

/*
 * TCP idle timeout; client drops the transport socket if it is idle
 * for this long.  Note that we also timeout UDP sockets to prevent
 * holding port numbers when there is no RPC traffic.
 */
#define XS_IDLE_DISC_TO		(5U * 60 * HZ)

190
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
191
# undef  RPC_DEBUG_DATA
192
# define RPCDBG_FACILITY	RPCDBG_TRANS
193 194 195
#endif

#ifdef RPC_DEBUG_DATA
196
static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
197
{
198 199
	u8 *buf = (u8 *) packet;
	int j;
200

201
	dprintk("RPC:       %s\n", msg);
202 203 204 205 206 207 208 209 210 211 212 213
	for (j = 0; j < count && j < 128; j += 4) {
		if (!(j & 31)) {
			if (j)
				dprintk("\n");
			dprintk("0x%04x ", j);
		}
		dprintk("%02x%02x%02x%02x ",
			buf[j], buf[j+1], buf[j+2], buf[j+3]);
	}
	dprintk("\n");
}
#else
214
static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
215 216 217 218 219
{
	/* NOP */
}
#endif

220 221 222 223 224
static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
{
	return (struct rpc_xprt *) sk->sk_user_data;
}

225 226 227 228 229
static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
{
	return (struct sockaddr *) &xprt->addr;
}

230 231 232 233 234
static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
{
	return (struct sockaddr_un *) &xprt->addr;
}

235
static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
236
{
237 238 239 240 241 242 243 244
	return (struct sockaddr_in *) &xprt->addr;
}

static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
{
	return (struct sockaddr_in6 *) &xprt->addr;
}

245
static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
246
{
247
	struct sockaddr *sap = xs_addr(xprt);
248 249
	struct sockaddr_in6 *sin6;
	struct sockaddr_in *sin;
250
	struct sockaddr_un *sun;
251
	char buf[128];
252

253
	switch (sap->sa_family) {
254 255 256 257 258 259
	case AF_LOCAL:
		sun = xs_addr_un(xprt);
		strlcpy(buf, sun->sun_path, sizeof(buf));
		xprt->address_strings[RPC_DISPLAY_ADDR] =
						kstrdup(buf, GFP_KERNEL);
		break;
260
	case AF_INET:
261 262 263
		(void)rpc_ntop(sap, buf, sizeof(buf));
		xprt->address_strings[RPC_DISPLAY_ADDR] =
						kstrdup(buf, GFP_KERNEL);
264
		sin = xs_addr_in(xprt);
265
		snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
266 267
		break;
	case AF_INET6:
268 269 270
		(void)rpc_ntop(sap, buf, sizeof(buf));
		xprt->address_strings[RPC_DISPLAY_ADDR] =
						kstrdup(buf, GFP_KERNEL);
271
		sin6 = xs_addr_in6(xprt);
272
		snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
273 274 275
		break;
	default:
		BUG();
276
	}
277

278
	xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
279 280
}

281
static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
282
{
283 284
	struct sockaddr *sap = xs_addr(xprt);
	char buf[128];
285

286
	snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
287
	xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
288

289
	snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
290 291
	xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
}
292

293 294 295
static void xs_format_peer_addresses(struct rpc_xprt *xprt,
				     const char *protocol,
				     const char *netid)
296
{
297 298
	xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
	xprt->address_strings[RPC_DISPLAY_NETID] = netid;
299
	xs_format_common_peer_addresses(xprt);
300
	xs_format_common_peer_ports(xprt);
301
}
302

303
static void xs_update_peer_port(struct rpc_xprt *xprt)
304
{
305 306
	kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
	kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
307

308
	xs_format_common_peer_ports(xprt);
309 310 311 312
}

static void xs_free_peer_addresses(struct rpc_xprt *xprt)
{
313 314 315 316 317 318 319 320 321 322
	unsigned int i;

	for (i = 0; i < RPC_DISPLAY_MAX; i++)
		switch (i) {
		case RPC_DISPLAY_PROTO:
		case RPC_DISPLAY_NETID:
			continue;
		default:
			kfree(xprt->address_strings[i]);
		}
323 324
}

325 326 327 328 329
static size_t
xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
{
	size_t i,n;

330
	if (!want || !(buf->flags & XDRBUF_SPARSE_PAGES))
331 332 333 334 335 336 337
		return want;
	n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT;
	for (i = 0; i < n; i++) {
		if (buf->pages[i])
			continue;
		buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp);
		if (!buf->pages[i]) {
338 339
			i *= PAGE_SIZE;
			return i > buf->page_base ? i - buf->page_base : 0;
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
		}
	}
	return want;
}

static ssize_t
xs_sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags, size_t seek)
{
	ssize_t ret;
	if (seek != 0)
		iov_iter_advance(&msg->msg_iter, seek);
	ret = sock_recvmsg(sock, msg, flags);
	return ret > 0 ? ret + seek : ret;
}

static ssize_t
xs_read_kvec(struct socket *sock, struct msghdr *msg, int flags,
		struct kvec *kvec, size_t count, size_t seek)
{
359
	iov_iter_kvec(&msg->msg_iter, READ, kvec, 1, count);
360 361 362 363 364 365 366 367
	return xs_sock_recvmsg(sock, msg, flags, seek);
}

static ssize_t
xs_read_bvec(struct socket *sock, struct msghdr *msg, int flags,
		struct bio_vec *bvec, unsigned long nr, size_t count,
		size_t seek)
{
368
	iov_iter_bvec(&msg->msg_iter, READ, bvec, nr, count);
369 370 371 372 373 374 375
	return xs_sock_recvmsg(sock, msg, flags, seek);
}

static ssize_t
xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
		size_t count)
{
376 377
	iov_iter_discard(&msg->msg_iter, READ, count);
	return sock_recvmsg(sock, msg, flags);
378 379
}

380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
static void
xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
{
	struct bvec_iter bi = {
		.bi_size = count,
	};
	struct bio_vec bv;

	bvec_iter_advance(bvec, &bi, seek & PAGE_MASK);
	for_each_bvec(bv, bvec, bi, bi)
		flush_dcache_page(bv.bv_page);
}
#else
static inline void
xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
{
}
#endif

400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
static ssize_t
xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
		struct xdr_buf *buf, size_t count, size_t seek, size_t *read)
{
	size_t want, seek_init = seek, offset = 0;
	ssize_t ret;

	if (seek < buf->head[0].iov_len) {
		want = min_t(size_t, count, buf->head[0].iov_len);
		ret = xs_read_kvec(sock, msg, flags, &buf->head[0], want, seek);
		if (ret <= 0)
			goto sock_err;
		offset += ret;
		if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
			goto out;
		if (ret != want)
416
			goto out;
417 418 419 420 421
		seek = 0;
	} else {
		seek -= buf->head[0].iov_len;
		offset += buf->head[0].iov_len;
	}
422 423 424 425 426

	want = xs_alloc_sparse_pages(buf,
			min_t(size_t, count - offset, buf->page_len),
			GFP_NOWAIT);
	if (seek < want) {
427 428 429 430 431 432
		ret = xs_read_bvec(sock, msg, flags, buf->bvec,
				xdr_buf_pagecount(buf),
				want + buf->page_base,
				seek + buf->page_base);
		if (ret <= 0)
			goto sock_err;
433
		xs_flush_bvec(buf->bvec, ret, seek + buf->page_base);
434 435 436 437
		offset += ret - buf->page_base;
		if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
			goto out;
		if (ret != want)
438
			goto out;
439 440
		seek = 0;
	} else {
441 442
		seek -= want;
		offset += want;
443
	}
444

445 446 447 448 449 450 451 452 453
	if (seek < buf->tail[0].iov_len) {
		want = min_t(size_t, count - offset, buf->tail[0].iov_len);
		ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek);
		if (ret <= 0)
			goto sock_err;
		offset += ret;
		if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
			goto out;
		if (ret != want)
454
			goto out;
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
	} else
		offset += buf->tail[0].iov_len;
	ret = -EMSGSIZE;
out:
	*read = offset - seek_init;
	return ret;
sock_err:
	offset += seek;
	goto out;
}

static void
xs_read_header(struct sock_xprt *transport, struct xdr_buf *buf)
{
	if (!transport->recv.copied) {
		if (buf->head[0].iov_len >= transport->recv.offset)
			memcpy(buf->head[0].iov_base,
					&transport->recv.xid,
					transport->recv.offset);
		transport->recv.copied = transport->recv.offset;
	}
}

static bool
xs_read_stream_request_done(struct sock_xprt *transport)
{
	return transport->recv.fraghdr & cpu_to_be32(RPC_LAST_STREAM_FRAGMENT);
}

static ssize_t
xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg,
		int flags, struct rpc_rqst *req)
{
	struct xdr_buf *buf = &req->rq_private_buf;
	size_t want, read;
	ssize_t ret;

	xs_read_header(transport, buf);

	want = transport->recv.len - transport->recv.offset;
	ret = xs_read_xdr_buf(transport->sock, msg, flags, buf,
			transport->recv.copied + want, transport->recv.copied,
			&read);
	transport->recv.offset += read;
	transport->recv.copied += read;
	if (transport->recv.offset == transport->recv.len) {
		if (xs_read_stream_request_done(transport))
			msg->msg_flags |= MSG_EOR;
503
		return read;
504 505 506
	}

	switch (ret) {
507 508
	default:
		break;
509
	case -EFAULT:
510
	case -EMSGSIZE:
511
		msg->msg_flags |= MSG_TRUNC;
512
		return read;
513 514 515
	case 0:
		return -ESHUTDOWN;
	}
516
	return ret < 0 ? ret : read;
517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
}

static size_t
xs_read_stream_headersize(bool isfrag)
{
	if (isfrag)
		return sizeof(__be32);
	return 3 * sizeof(__be32);
}

static ssize_t
xs_read_stream_header(struct sock_xprt *transport, struct msghdr *msg,
		int flags, size_t want, size_t seek)
{
	struct kvec kvec = {
		.iov_base = &transport->recv.fraghdr,
		.iov_len = want,
	};
	return xs_read_kvec(transport->sock, msg, flags, &kvec, want, seek);
}

#if defined(CONFIG_SUNRPC_BACKCHANNEL)
static ssize_t
xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
{
	struct rpc_xprt *xprt = &transport->xprt;
	struct rpc_rqst *req;
	ssize_t ret;

	/* Look up and lock the request corresponding to the given XID */
	req = xprt_lookup_bc_request(xprt, transport->recv.xid);
	if (!req) {
		printk(KERN_WARNING "Callback slot table overflowed\n");
		return -ESHUTDOWN;
	}

	ret = xs_read_stream_request(transport, msg, flags, req);
	if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
555
		xprt_complete_bc_request(req, transport->recv.copied);
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587

	return ret;
}
#else /* CONFIG_SUNRPC_BACKCHANNEL */
static ssize_t
xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
{
	return -ESHUTDOWN;
}
#endif /* CONFIG_SUNRPC_BACKCHANNEL */

static ssize_t
xs_read_stream_reply(struct sock_xprt *transport, struct msghdr *msg, int flags)
{
	struct rpc_xprt *xprt = &transport->xprt;
	struct rpc_rqst *req;
	ssize_t ret = 0;

	/* Look up and lock the request corresponding to the given XID */
	spin_lock(&xprt->queue_lock);
	req = xprt_lookup_rqst(xprt, transport->recv.xid);
	if (!req) {
		msg->msg_flags |= MSG_TRUNC;
		goto out;
	}
	xprt_pin_rqst(req);
	spin_unlock(&xprt->queue_lock);

	ret = xs_read_stream_request(transport, msg, flags, req);

	spin_lock(&xprt->queue_lock);
	if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
588
		xprt_complete_rqst(req->rq_task, transport->recv.copied);
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
	xprt_unpin_rqst(req);
out:
	spin_unlock(&xprt->queue_lock);
	return ret;
}

static ssize_t
xs_read_stream(struct sock_xprt *transport, int flags)
{
	struct msghdr msg = { 0 };
	size_t want, read = 0;
	ssize_t ret = 0;

	if (transport->recv.len == 0) {
		want = xs_read_stream_headersize(transport->recv.copied != 0);
		ret = xs_read_stream_header(transport, &msg, flags, want,
				transport->recv.offset);
		if (ret <= 0)
			goto out_err;
		transport->recv.offset = ret;
609 610
		if (transport->recv.offset != want)
			return transport->recv.offset;
611 612 613 614 615 616 617
		transport->recv.len = be32_to_cpu(transport->recv.fraghdr) &
			RPC_FRAGMENT_SIZE_MASK;
		transport->recv.offset -= sizeof(transport->recv.fraghdr);
		read = ret;
	}

	switch (be32_to_cpu(transport->recv.calldir)) {
618 619 620
	default:
		msg.msg_flags |= MSG_TRUNC;
		break;
621 622 623 624 625 626 627 628 629 630 631 632 633 634
	case RPC_CALL:
		ret = xs_read_stream_call(transport, &msg, flags);
		break;
	case RPC_REPLY:
		ret = xs_read_stream_reply(transport, &msg, flags);
	}
	if (msg.msg_flags & MSG_TRUNC) {
		transport->recv.calldir = cpu_to_be32(-1);
		transport->recv.copied = -1;
	}
	if (ret < 0)
		goto out_err;
	read += ret;
	if (transport->recv.offset < transport->recv.len) {
635 636
		if (!(msg.msg_flags & MSG_TRUNC))
			return read;
637
		msg.msg_flags = 0;
638 639 640 641 642 643 644
		ret = xs_read_discard(transport->sock, &msg, flags,
				transport->recv.len - transport->recv.offset);
		if (ret <= 0)
			goto out_err;
		transport->recv.offset += ret;
		read += ret;
		if (transport->recv.offset != transport->recv.len)
645
			return read;
646 647
	}
	if (xs_read_stream_request_done(transport)) {
648
		trace_xs_stream_read_request(transport);
649 650 651 652 653 654
		transport->recv.copied = 0;
	}
	transport->recv.offset = 0;
	transport->recv.len = 0;
	return read;
out_err:
655
	return ret != 0 ? ret : -ESHUTDOWN;
656 657
}

658 659 660 661 662 663
static void xs_stream_data_receive(struct sock_xprt *transport)
{
	size_t read = 0;
	ssize_t ret = 0;

	mutex_lock(&transport->recv_mutex);
664
	clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
665 666 667 668
	if (transport->sock == NULL)
		goto out;
	for (;;) {
		ret = xs_read_stream(transport, MSG_DONTWAIT);
669
		if (ret < 0)
670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
			break;
		read += ret;
		cond_resched();
	}
out:
	mutex_unlock(&transport->recv_mutex);
	trace_xs_stream_read_data(&transport->xprt, ret, read);
}

static void xs_stream_data_receive_workfn(struct work_struct *work)
{
	struct sock_xprt *transport =
		container_of(work, struct sock_xprt, recv_worker);
	xs_stream_data_receive(transport);
}

686 687 688 689 690 691 692 693 694 695 696
static void
xs_stream_reset_connect(struct sock_xprt *transport)
{
	transport->recv.offset = 0;
	transport->recv.len = 0;
	transport->recv.copied = 0;
	transport->xmit.offset = 0;
	transport->xprt.stat.connect_count++;
	transport->xprt.stat.connect_start = jiffies;
}

697 698
#define XS_SENDMSG_FLAGS	(MSG_DONTWAIT | MSG_NOSIGNAL)

699
static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more)
700 701 702 703
{
	struct msghdr msg = {
		.msg_name	= addr,
		.msg_namelen	= addrlen,
704 705 706 707 708
		.msg_flags	= XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0),
	};
	struct kvec iov = {
		.iov_base	= vec->iov_base + base,
		.iov_len	= vec->iov_len - base,
709 710
	};

711
	if (iov.iov_len != 0)
712 713 714 715
		return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
	return kernel_sendmsg(sock, &msg, NULL, 0, 0);
}

716
static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more, bool zerocopy, int *sent_p)
717
{
718 719
	ssize_t (*do_sendpage)(struct socket *sock, struct page *page,
			int offset, size_t size, int flags);
720 721
	struct page **ppage;
	unsigned int remainder;
722
	int err;
723 724 725 726 727

	remainder = xdr->page_len - base;
	base += xdr->page_base;
	ppage = xdr->pages + (base >> PAGE_SHIFT);
	base &= ~PAGE_MASK;
728 729 730
	do_sendpage = sock->ops->sendpage;
	if (!zerocopy)
		do_sendpage = sock_no_sendpage;
731 732 733
	for(;;) {
		unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
		int flags = XS_SENDMSG_FLAGS;
734

735
		remainder -= len;
736
		if (more)
737
			flags |= MSG_MORE;
738 739
		if (remainder != 0)
			flags |= MSG_SENDPAGE_NOTLAST | MSG_MORE;
740
		err = do_sendpage(sock, *ppage, base, len, flags);
741 742
		if (remainder == 0 || err != len)
			break;
743
		*sent_p += err;
744 745 746
		ppage++;
		base = 0;
	}
747 748 749 750 751
	if (err > 0) {
		*sent_p += err;
		err = 0;
	}
	return err;
752 753
}

754 755 756 757 758 759 760
/**
 * xs_sendpages - write pages directly to a socket
 * @sock: socket to send on
 * @addr: UDP only -- address of destination
 * @addrlen: UDP only -- length of destination address
 * @xdr: buffer containing this request
 * @base: starting position in the buffer
761
 * @zerocopy: true if it is safe to use sendpage()
762
 * @sent_p: return the total number of bytes successfully queued for sending
763
 *
764
 */
765
static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, bool zerocopy, int *sent_p)
766
{
767
	unsigned int remainder = xdr->len - base;
768 769
	int err = 0;
	int sent = 0;
770

771
	if (unlikely(!sock))
772
		return -ENOTSOCK;
773

774 775 776 777
	if (base != 0) {
		addr = NULL;
		addrlen = 0;
	}
778

779 780 781 782 783
	if (base < xdr->head[0].iov_len || addr != NULL) {
		unsigned int len = xdr->head[0].iov_len - base;
		remainder -= len;
		err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0);
		if (remainder == 0 || err != len)
784
			goto out;
785
		*sent_p += err;
786 787
		base = 0;
	} else
788
		base -= xdr->head[0].iov_len;
789

790 791 792
	if (base < xdr->page_len) {
		unsigned int len = xdr->page_len - base;
		remainder -= len;
793 794 795
		err = xs_send_pagedata(sock, xdr, base, remainder != 0, zerocopy, &sent);
		*sent_p += sent;
		if (remainder == 0 || sent != len)
796 797
			goto out;
		base = 0;
798 799 800 801
	} else
		base -= xdr->page_len;

	if (base >= xdr->tail[0].iov_len)
802
		return 0;
803
	err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0);
804
out:
805 806 807 808 809
	if (err > 0) {
		*sent_p += err;
		err = 0;
	}
	return err;
810 811
}

812
/**
813
 * xs_nospace - handle transmit was incomplete
814
 * @req: pointer to RPC request
815
 *
816
 */
817
static int xs_nospace(struct rpc_rqst *req)
818
{
819
	struct rpc_xprt *xprt = req->rq_xprt;
820
	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
821
	struct sock *sk = transport->inet;
822
	int ret = -EAGAIN;
823

824
	dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
825 826
			req->rq_task->tk_pid,
			req->rq_slen - transport->xmit.offset,
827 828
			req->rq_slen);

829 830 831 832 833
	/* Protect against races with write_space */
	spin_lock_bh(&xprt->transport_lock);

	/* Don't race with disconnect */
	if (xprt_connected(xprt)) {
834 835
		/* wait for more buffer space */
		sk->sk_write_pending++;
836
		xprt_wait_for_buffer_space(xprt);
837
	} else
838
		ret = -ENOTCONN;
839

840
	spin_unlock_bh(&xprt->transport_lock);
841 842

	/* Race breaker in case memory is freed before above code is called */
843 844 845 846 847 848 849 850 851 852
	if (ret == -EAGAIN) {
		struct socket_wq *wq;

		rcu_read_lock();
		wq = rcu_dereference(sk->sk_wq);
		set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags);
		rcu_read_unlock();

		sk->sk_write_space(sk);
	}
853
	return ret;
854 855
}

856 857 858 859 860 861
static void
xs_stream_prepare_request(struct rpc_rqst *req)
{
	req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_NOIO);
}

862 863 864 865 866 867 868 869 870 871
/*
 * Determine if the previous message in the stream was aborted before it
 * could complete transmission.
 */
static bool
xs_send_request_was_aborted(struct sock_xprt *transport, struct rpc_rqst *req)
{
	return transport->xmit.offset != 0 && req->rq_bytes_sent == 0;
}

872 873 874 875 876 877 878 879 880 881
/*
 * Construct a stream transport record marker in @buf.
 */
static inline void xs_encode_stream_record_marker(struct xdr_buf *buf)
{
	u32 reclen = buf->len - sizeof(rpc_fraghdr);
	rpc_fraghdr *base = buf->head[0].iov_base;
	*base = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | reclen);
}

882 883
/**
 * xs_local_send_request - write an RPC request to an AF_LOCAL socket
884
 * @req: pointer to RPC request
885 886 887 888 889 890 891 892
 *
 * Return values:
 *        0:	The request has been sent
 *   EAGAIN:	The socket was blocked, please call again later to
 *		complete the request
 * ENOTCONN:	Caller needs to invoke connect logic then call again
 *    other:	Some other error occured, the request was not sent
 */
893
static int xs_local_send_request(struct rpc_rqst *req)
894 895 896 897 898 899
{
	struct rpc_xprt *xprt = req->rq_xprt;
	struct sock_xprt *transport =
				container_of(xprt, struct sock_xprt, xprt);
	struct xdr_buf *xdr = &req->rq_snd_buf;
	int status;
900
	int sent = 0;
901

902 903 904 905 906 907
	/* Close the stream if the previous transmission was incomplete */
	if (xs_send_request_was_aborted(transport, req)) {
		xs_close(xprt);
		return -ENOTCONN;
	}

908 909 910 911 912
	xs_encode_stream_record_marker(&req->rq_snd_buf);

	xs_pktdump("packet data:",
			req->rq_svec->iov_base, req->rq_svec->iov_len);

913
	req->rq_xtime = ktime_get();
914 915
	status = xs_sendpages(transport->sock, NULL, 0, xdr,
			      transport->xmit.offset,
916
			      true, &sent);
917
	dprintk("RPC:       %s(%u) = %d\n",
918
			__func__, xdr->len - transport->xmit.offset, status);
919 920 921 922

	if (status == -EAGAIN && sock_writeable(transport->inet))
		status = -ENOBUFS;

923
	if (likely(sent > 0) || status == 0) {
924 925
		transport->xmit.offset += sent;
		req->rq_bytes_sent = transport->xmit.offset;
926
		if (likely(req->rq_bytes_sent >= req->rq_slen)) {
927
			req->rq_xmit_bytes_sent += transport->xmit.offset;
928
			req->rq_bytes_sent = 0;
929
			transport->xmit.offset = 0;
930 931 932 933 934 935
			return 0;
		}
		status = -EAGAIN;
	}

	switch (status) {
936
	case -ENOBUFS:
937
		break;
938
	case -EAGAIN:
939
		status = xs_nospace(req);
940 941 942 943
		break;
	default:
		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
			-status);
944
		/* fall through */
945 946 947 948 949 950 951 952
	case -EPIPE:
		xs_close(xprt);
		status = -ENOTCONN;
	}

	return status;
}

953 954
/**
 * xs_udp_send_request - write an RPC request to a UDP socket
955
 * @req: pointer to RPC request
956 957 958 959 960 961
 *
 * Return values:
 *        0:	The request has been sent
 *   EAGAIN:	The socket was blocked, please call again later to
 *		complete the request
 * ENOTCONN:	Caller needs to invoke connect logic then call again
962
 *    other:	Some other error occurred, the request was not sent
963
 */
964
static int xs_udp_send_request(struct rpc_rqst *req)
965 966
{
	struct rpc_xprt *xprt = req->rq_xprt;
967
	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
968
	struct xdr_buf *xdr = &req->rq_snd_buf;
969
	int sent = 0;
970
	int status;
971

972
	xs_pktdump("packet data:",
973 974 975
				req->rq_svec->iov_base,
				req->rq_svec->iov_len);

976 977
	if (!xprt_bound(xprt))
		return -ENOTCONN;
978 979 980 981

	if (!xprt_request_get_cong(xprt, req))
		return -EBADSLT;

982
	req->rq_xtime = ktime_get();
983
	status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen,
984
			      xdr, 0, true, &sent);
985

986
	dprintk("RPC:       xs_udp_send_request(%u) = %d\n",
987
			xdr->len, status);
988

989 990 991 992
	/* firewall is blocking us, don't return -EAGAIN or we end up looping */
	if (status == -EPERM)
		goto process_status;

993 994 995
	if (status == -EAGAIN && sock_writeable(transport->inet))
		status = -ENOBUFS;

996 997 998
	if (sent > 0 || status == 0) {
		req->rq_xmit_bytes_sent += sent;
		if (sent >= req->rq_slen)
999 1000
			return 0;
		/* Still some bytes left; set up for a retry later. */
1001
		status = -EAGAIN;
1002
	}
1003

1004
process_status:
1005
	switch (status) {
1006 1007 1008 1009
	case -ENOTSOCK:
		status = -ENOTCONN;
		/* Should we call xs_close() here? */
		break;
1010
	case -EAGAIN:
1011
		status = xs_nospace(req);
1012
		break;
1013
	case -ENETUNREACH:
1014
	case -ENOBUFS:
1015
	case -EPIPE:
1016
	case -ECONNREFUSED:
1017
	case -EPERM:
1018
		/* When the server has died, an ICMP port unreachable message
1019
		 * prompts ECONNREFUSED. */
1020 1021 1022 1023
		break;
	default:
		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
			-status);
1024
	}
1025

1026
	return status;
1027 1028
}

1029
/**
1030
 * xs_tcp_send_request - write an RPC request to a TCP socket
1031
 * @req: pointer to RPC request
1032 1033
 *
 * Return values:
1034 1035 1036 1037
 *        0:	The request has been sent
 *   EAGAIN:	The socket was blocked, please call again later to
 *		complete the request
 * ENOTCONN:	Caller needs to invoke connect logic then call again
1038
 *    other:	Some other error occurred, the request was not sent
1039 1040
 *
 * XXX: In the case of soft timeouts, should we eventually give up
1041
 *	if sendmsg is not able to make progress?
1042
 */
1043
static int xs_tcp_send_request(struct rpc_rqst *req)
1044 1045
{
	struct rpc_xprt *xprt = req->rq_xprt;
1046
	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1047
	struct xdr_buf *xdr = &req->rq_snd_buf;
1048
	bool zerocopy = true;
1049
	bool vm_wait = false;
1050
	int status;
1051
	int sent;
1052

1053 1054 1055 1056 1057 1058 1059
	/* Close the stream if the previous transmission was incomplete */
	if (xs_send_request_was_aborted(transport, req)) {
		if (transport->sock != NULL)
			kernel_sock_shutdown(transport->sock, SHUT_RDWR);
		return -ENOTCONN;
	}

1060
	xs_encode_stream_record_marker(&req->rq_snd_buf);
1061

1062 1063 1064
	xs_pktdump("packet data:",
				req->rq_svec->iov_base,
				req->rq_svec->iov_len);
1065 1066 1067 1068
	/* Don't use zero copy if this is a resend. If the RPC call
	 * completes while the socket holds a reference to the pages,
	 * then we may end up resending corrupted data.
	 */
1069
	if (req->rq_task->tk_flags & RPC_TASK_SENT)
1070
		zerocopy = false;
1071

1072 1073 1074
	if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state))
		xs_tcp_set_socket_timeouts(xprt, transport->sock);

1075 1076
	/* Continue transmitting the packet/record. We must be careful
	 * to cope with writespace callbacks arriving _after_ we have
1077
	 * called sendmsg(). */
1078
	req->rq_xtime = ktime_get();
1079
	while (1) {
1080 1081
		sent = 0;
		status = xs_sendpages(transport->sock, NULL, 0, xdr,
1082 1083
				      transport->xmit.offset,
				      zerocopy, &sent);
1084

1085
		dprintk("RPC:       xs_tcp_send_request(%u) = %d\n",
1086
				xdr->len - transport->xmit.offset, status);
1087

1088 1089
		/* If we've sent the entire packet, immediately
		 * reset the count of bytes sent. */
1090 1091
		transport->xmit.offset += sent;
		req->rq_bytes_sent = transport->xmit.offset;
1092
		if (likely(req->rq_bytes_sent >= req->rq_slen)) {
1093
			req->rq_xmit_bytes_sent += transport->xmit.offset;
1094
			req->rq_bytes_sent = 0;
1095
			transport->xmit.offset = 0;
1096 1097
			return 0;
		}
1098

1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
		WARN_ON_ONCE(sent == 0 && status == 0);

		if (status == -EAGAIN ) {
			/*
			 * Return EAGAIN if we're sure we're hitting the
			 * socket send buffer limits.
			 */
			if (test_bit(SOCK_NOSPACE, &transport->sock->flags))
				break;
			/*
			 * Did we hit a memory allocation failure?
			 */
			if (sent == 0) {
				status = -ENOBUFS;
				if (vm_wait)
					break;
				/* Retry, knowing now that we're below the
				 * socket send buffer limit
				 */
				vm_wait = true;
			}
			continue;
		}
1122 1123
		if (status < 0)
			break;
1124
		vm_wait = false;
1125 1126
	}

1127
	switch (status) {
1128 1129 1130 1131
	case -ENOTSOCK:
		status = -ENOTCONN;
		/* Should we call xs_close() here? */
		break;
1132
	case -EAGAIN:
1133
		status = xs_nospace(req);
1134 1135
		break;
	case -ECONNRESET:
1136
	case -ECONNREFUSED:
1137
	case -ENOTCONN:
1138
	case -EADDRINUSE:
1139
	case -ENOBUFS:
1140
	case -EPIPE:
1141 1142 1143 1144
		break;
	default:
		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
			-status);
1145
	}
1146

1147 1148 1149
	return status;
}

1150 1151 1152 1153 1154
static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
{
	transport->old_data_ready = sk->sk_data_ready;
	transport->old_state_change = sk->sk_state_change;
	transport->old_write_space = sk->sk_write_space;
1155
	transport->old_error_report = sk->sk_error_report;
1156 1157 1158 1159 1160 1161 1162
}

static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
{
	sk->sk_data_ready = transport->old_data_ready;
	sk->sk_state_change = transport->old_state_change;
	sk->sk_write_space = transport->old_write_space;
1163 1164 1165
	sk->sk_error_report = transport->old_error_report;
}

1166 1167 1168 1169 1170 1171 1172
static void xs_sock_reset_state_flags(struct rpc_xprt *xprt)
{
	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);

	clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
}

1173 1174 1175 1176 1177
static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
{
	smp_mb__before_atomic();
	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
	clear_bit(XPRT_CLOSING, &xprt->state);
1178
	xs_sock_reset_state_flags(xprt);
1179 1180 1181
	smp_mb__after_atomic();
}

1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
/**
 * xs_error_report - callback to handle TCP socket state errors
 * @sk: socket
 *
 * Note: we don't call sock_error() since there may be a rpc_task
 * using the socket, and so we don't want to clear sk->sk_err.
 */
static void xs_error_report(struct sock *sk)
{
	struct rpc_xprt *xprt;
	int err;

	read_lock_bh(&sk->sk_callback_lock);
	if (!(xprt = xprt_from_sock(sk)))
		goto out;

	err = -sk->sk_err;
	if (err == 0)
		goto out;
	dprintk("RPC:       xs_error_report client %p, error=%d...\n",
			xprt, -err);
1203
	trace_rpc_socket_error(xprt, sk->sk_socket, err);
1204 1205 1206
	xprt_wake_pending_tasks(xprt, err);
 out:
	read_unlock_bh(&sk->sk_callback_lock);
1207 1208
}

1209
static void xs_reset_transport(struct sock_xprt *transport)
1210
{
1211 1212
	struct socket *sock = transport->sock;
	struct sock *sk = transport->inet;
1213
	struct rpc_xprt *xprt = &transport->xprt;
1214

1215 1216
	if (sk == NULL)
		return;
1217

1218 1219 1220
	if (atomic_read(&transport->xprt.swapper))
		sk_clear_memalloc(sk);

1221 1222
	kernel_sock_shutdown(sock, SHUT_RDWR);

1223
	mutex_lock(&transport->recv_mutex);
1224
	write_lock_bh(&sk->sk_callback_lock);
1225 1226
	transport->inet = NULL;
	transport->sock = NULL;
1227

1228
	sk->sk_user_data = NULL;
1229 1230

	xs_restore_old_callbacks(transport, sk);
1231
	xprt_clear_connected(xprt);
1232
	write_unlock_bh(&sk->sk_callback_lock);
1233
	xs_sock_reset_connection_flags(xprt);
1234
	mutex_unlock(&transport->recv_mutex);
1235

1236
	trace_rpc_socket_close(xprt, sock);
1237
	sock_release(sock);
1238 1239

	xprt_disconnect_done(xprt);
1240 1241 1242 1243 1244 1245 1246 1247
}

/**
 * xs_close - close a socket
 * @xprt: transport
 *
 * This is used when all requests are complete; ie, no DRC state remains
 * on the server we want to save.
1248 1249 1250
 *
 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
 * xs_reset_transport() zeroing the socket from underneath a writer.
1251 1252 1253 1254 1255 1256 1257 1258
 */
static void xs_close(struct rpc_xprt *xprt)
{
	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);

	dprintk("RPC:       xs_close xprt %p\n", xprt);

	xs_reset_transport(transport);
1259
	xprt->reestablish_timeout = 0;
1260 1261
}

1262 1263 1264 1265 1266 1267 1268
static void xs_inject_disconnect(struct rpc_xprt *xprt)
{
	dprintk("RPC:       injecting transport disconnect on xprt=%p\n",
		xprt);
	xprt_disconnect_done(xprt);
}

1269 1270 1271 1272 1273 1274
static void xs_xprt_free(struct rpc_xprt *xprt)
{
	xs_free_peer_addresses(xprt);
	xprt_free(xprt);
}

1275 1276 1277 1278 1279 1280
/**
 * xs_destroy - prepare to shutdown a transport
 * @xprt: doomed transport
 *
 */
static void xs_destroy(struct rpc_xprt *xprt)
1281
{
1282 1283
	struct sock_xprt *transport = container_of(xprt,
			struct sock_xprt, xprt);
1284
	dprintk("RPC:       xs_destroy xprt %p\n", xprt);
1285

1286
	cancel_delayed_work_sync(&transport->connect_worker);
1287
	xs_close(xprt);
1288
	cancel_work_sync(&transport->recv_worker);
1289
	xs_xprt_free(xprt);
1290
	module_put(THIS_MODULE);
1291 1292
}

1293
/**
1294 1295 1296 1297
 * xs_udp_data_read_skb - receive callback for UDP sockets
 * @xprt: transport
 * @sk: socket
 * @skb: skbuff
1298
 *
1299
 */
1300 1301 1302
static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
		struct sock *sk,
		struct sk_buff *skb)
1303
{
1304
	struct rpc_task *task;
1305
	struct rpc_rqst *rovr;
1306
	int repsize, copied;
1307 1308
	u32 _xid;
	__be32 *xp;
1309

1310
	repsize = skb->len;
1311
	if (repsize < 4) {
1312
		dprintk("RPC:       impossible RPC reply size %d!\n", repsize);
1313
		return;
1314 1315 1316
	}

	/* Copy the XID from the skb... */
1317
	xp = skb_header_pointer(skb, 0, sizeof(_xid), &_xid);
1318
	if (xp == NULL)
1319
		return;
1320 1321

	/* Look up and lock the request corresponding to the given XID */
1322
	spin_lock(&xprt->queue_lock);
1323 1324 1325
	rovr = xprt_lookup_rqst(xprt, *xp);
	if (!rovr)
		goto out_unlock;
1326
	xprt_pin_rqst(rovr);
1327
	xprt_update_rtt(rovr->rq_task);
1328