netdev.c 201 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/* Intel PRO/1000 Linux driver
 * Copyright(c) 1999 - 2014 Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * The full GNU General Public License is included in this distribution in
 * the file called "COPYING".
 *
 * Contact Information:
 * Linux NICS <linux.nics@intel.com>
 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 */
21

22 23
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

24 25 26 27 28 29 30 31
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
32
#include <linux/interrupt.h>
33 34
#include <linux/tcp.h>
#include <linux/ipv6.h>
35
#include <linux/slab.h>
36 37 38 39 40 41
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/cpu.h>
#include <linux/smp.h>
42
#include <linux/pm_qos.h>
43
#include <linux/pm_runtime.h>
Jesse Brandeburg's avatar
Jesse Brandeburg committed
44
#include <linux/aer.h>
45
#include <linux/prefetch.h>
46 47 48

#include "e1000.h"

Bruce Allan's avatar
Bruce Allan committed
49
#define DRV_EXTRAVERSION "-k"
50

51
#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION
52 53 54
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;

55 56 57 58 59
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");

60 61 62 63
static const struct e1000_info *e1000_info_tbl[] = {
	[board_82571]		= &e1000_82571_info,
	[board_82572]		= &e1000_82572_info,
	[board_82573]		= &e1000_82573_info,
64
	[board_82574]		= &e1000_82574_info,
65
	[board_82583]		= &e1000_82583_info,
66 67 68
	[board_80003es2lan]	= &e1000_es2_info,
	[board_ich8lan]		= &e1000_ich8_info,
	[board_ich9lan]		= &e1000_ich9_info,
69
	[board_ich10lan]	= &e1000_ich10_info,
70
	[board_pchlan]		= &e1000_pch_info,
71
	[board_pch2lan]		= &e1000_pch2_info,
Bruce Allan's avatar
Bruce Allan committed
72
	[board_pch_lpt]		= &e1000_pch_lpt_info,
73 74
};

75 76 77 78 79 80 81 82 83 84 85 86 87 88
struct e1000_reg_info {
	u32 ofs;
	char *name;
};

static const struct e1000_reg_info e1000_reg_info_tbl[] = {
	/* General Registers */
	{E1000_CTRL, "CTRL"},
	{E1000_STATUS, "STATUS"},
	{E1000_CTRL_EXT, "CTRL_EXT"},

	/* Interrupt Registers */
	{E1000_ICR, "ICR"},

89
	/* Rx Registers */
90
	{E1000_RCTL, "RCTL"},
91 92 93
	{E1000_RDLEN(0), "RDLEN"},
	{E1000_RDH(0), "RDH"},
	{E1000_RDT(0), "RDT"},
94 95 96
	{E1000_RDTR, "RDTR"},
	{E1000_RXDCTL(0), "RXDCTL"},
	{E1000_ERT, "ERT"},
97 98
	{E1000_RDBAL(0), "RDBAL"},
	{E1000_RDBAH(0), "RDBAH"},
99 100 101 102 103 104
	{E1000_RDFH, "RDFH"},
	{E1000_RDFT, "RDFT"},
	{E1000_RDFHS, "RDFHS"},
	{E1000_RDFTS, "RDFTS"},
	{E1000_RDFPC, "RDFPC"},

105
	/* Tx Registers */
106
	{E1000_TCTL, "TCTL"},
107 108 109 110 111
	{E1000_TDBAL(0), "TDBAL"},
	{E1000_TDBAH(0), "TDBAH"},
	{E1000_TDLEN(0), "TDLEN"},
	{E1000_TDH(0), "TDH"},
	{E1000_TDT(0), "TDT"},
112 113 114 115 116 117 118 119 120 121 122
	{E1000_TIDV, "TIDV"},
	{E1000_TXDCTL(0), "TXDCTL"},
	{E1000_TADV, "TADV"},
	{E1000_TARC(0), "TARC"},
	{E1000_TDFH, "TDFH"},
	{E1000_TDFT, "TDFT"},
	{E1000_TDFHS, "TDFHS"},
	{E1000_TDFTS, "TDFTS"},
	{E1000_TDFPC, "TDFPC"},

	/* List Terminator */
123
	{0, NULL}
124 125
};

126
/**
127
 * e1000_regdump - register printout routine
128 129 130
 * @hw: pointer to the HW structure
 * @reginfo: pointer to the register info table
 **/
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
{
	int n = 0;
	char rname[16];
	u32 regs[8];

	switch (reginfo->ofs) {
	case E1000_RXDCTL(0):
		for (n = 0; n < 2; n++)
			regs[n] = __er32(hw, E1000_RXDCTL(n));
		break;
	case E1000_TXDCTL(0):
		for (n = 0; n < 2; n++)
			regs[n] = __er32(hw, E1000_TXDCTL(n));
		break;
	case E1000_TARC(0):
		for (n = 0; n < 2; n++)
			regs[n] = __er32(hw, E1000_TARC(n));
		break;
	default:
151 152
		pr_info("%-15s %08x\n",
			reginfo->name, __er32(hw, reginfo->ofs));
153 154 155 156
		return;
	}

	snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
157
	pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
158 159
}

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
				 struct e1000_buffer *bi)
{
	int i;
	struct e1000_ps_page *ps_page;

	for (i = 0; i < adapter->rx_ps_pages; i++) {
		ps_page = &bi->ps_pages[i];

		if (ps_page->page) {
			pr_info("packet dump for ps_page %d:\n", i);
			print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
				       16, 1, page_address(ps_page->page),
				       PAGE_SIZE, true);
		}
	}
}

178
/**
179
 * e1000e_dump - Print registers, Tx-ring and Rx-ring
180 181
 * @adapter: board private structure
 **/
182 183 184 185 186 187 188
static void e1000e_dump(struct e1000_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	struct e1000_hw *hw = &adapter->hw;
	struct e1000_reg_info *reginfo;
	struct e1000_ring *tx_ring = adapter->tx_ring;
	struct e1000_tx_desc *tx_desc;
189
	struct my_u0 {
190 191
		__le64 a;
		__le64 b;
192
	} *u0;
193 194 195
	struct e1000_buffer *buffer_info;
	struct e1000_ring *rx_ring = adapter->rx_ring;
	union e1000_rx_desc_packet_split *rx_desc_ps;
196
	union e1000_rx_desc_extended *rx_desc;
197
	struct my_u1 {
198 199 200 201
		__le64 a;
		__le64 b;
		__le64 c;
		__le64 d;
202
	} *u1;
203 204 205 206 207 208 209 210 211
	u32 staterr;
	int i = 0;

	if (!netif_msg_hw(adapter))
		return;

	/* Print netdevice Info */
	if (netdev) {
		dev_info(&adapter->pdev->dev, "Net device Info\n");
212
		pr_info("Device Name     state            trans_start      last_rx\n");
213 214
		pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
			netdev->state, netdev->trans_start, netdev->last_rx);
215 216 217 218
	}

	/* Print Registers */
	dev_info(&adapter->pdev->dev, "Register Dump\n");
219
	pr_info(" Register Name   Value\n");
220 221 222 223 224
	for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
	     reginfo->name; reginfo++) {
		e1000_regdump(hw, reginfo);
	}

225
	/* Print Tx Ring Summary */
226
	if (!netdev || !netif_running(netdev))
227
		return;
228

229
	dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
230
	pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
231
	buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
232 233 234 235 236 237
	pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
		0, tx_ring->next_to_use, tx_ring->next_to_clean,
		(unsigned long long)buffer_info->dma,
		buffer_info->length,
		buffer_info->next_to_watch,
		(unsigned long long)buffer_info->time_stamp);
238

239
	/* Print Tx Ring */
240 241 242
	if (!netif_msg_tx_done(adapter))
		goto rx_ring_summary;

243
	dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271

	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
	 *
	 * Legacy Transmit Descriptor
	 *   +--------------------------------------------------------------+
	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
	 *   +--------------------------------------------------------------+
	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
	 *   +--------------------------------------------------------------+
	 *   63       48 47        36 35    32 31     24 23    16 15        0
	 *
	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
	 *   63      48 47    40 39       32 31             16 15    8 7      0
	 *   +----------------------------------------------------------------+
	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
	 *   +----------------------------------------------------------------+
	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
	 *   +----------------------------------------------------------------+
	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
	 *
	 * Extended Data Descriptor (DTYP=0x1)
	 *   +----------------------------------------------------------------+
	 * 0 |                     Buffer Address [63:0]                      |
	 *   +----------------------------------------------------------------+
	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
	 *   +----------------------------------------------------------------+
	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
	 */
272 273 274
	pr_info("Tl[desc]     [address 63:0  ] [SpeCssSCmCsLen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Legacy format\n");
	pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Ext Context format\n");
	pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Ext Data format\n");
275
	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
276
		const char *next_desc;
277 278 279 280
		tx_desc = E1000_TX_DESC(*tx_ring, i);
		buffer_info = &tx_ring->buffer_info[i];
		u0 = (struct my_u0 *)tx_desc;
		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
281
			next_desc = " NTC/U";
282
		else if (i == tx_ring->next_to_use)
283
			next_desc = " NTU";
284
		else if (i == tx_ring->next_to_clean)
285
			next_desc = " NTC";
286
		else
287 288 289 290 291 292 293 294 295 296 297
			next_desc = "";
		pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p%s\n",
			(!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
			 ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
			i,
			(unsigned long long)le64_to_cpu(u0->a),
			(unsigned long long)le64_to_cpu(u0->b),
			(unsigned long long)buffer_info->dma,
			buffer_info->length, buffer_info->next_to_watch,
			(unsigned long long)buffer_info->time_stamp,
			buffer_info->skb, next_desc);
298

299
		if (netif_msg_pktdata(adapter) && buffer_info->skb)
300
			print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
301 302
				       16, 1, buffer_info->skb->data,
				       buffer_info->skb->len, true);
303 304
	}

305
	/* Print Rx Ring Summary */
306
rx_ring_summary:
307
	dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
308 309 310
	pr_info("Queue [NTU] [NTC]\n");
	pr_info(" %5d %5X %5X\n",
		0, rx_ring->next_to_use, rx_ring->next_to_clean);
311

312
	/* Print Rx Ring */
313
	if (!netif_msg_rx_status(adapter))
314
		return;
315

316
	dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
	switch (adapter->rx_ps_pages) {
	case 1:
	case 2:
	case 3:
		/* [Extended] Packet Split Receive Descriptor Format
		 *
		 *    +-----------------------------------------------------+
		 *  0 |                Buffer Address 0 [63:0]              |
		 *    +-----------------------------------------------------+
		 *  8 |                Buffer Address 1 [63:0]              |
		 *    +-----------------------------------------------------+
		 * 16 |                Buffer Address 2 [63:0]              |
		 *    +-----------------------------------------------------+
		 * 24 |                Buffer Address 3 [63:0]              |
		 *    +-----------------------------------------------------+
		 */
333
		pr_info("R  [desc]      [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma       ] [bi->skb] <-- Ext Pkt Split format\n");
334 335 336 337 338 339 340 341 342 343 344
		/* [Extended] Receive Descriptor (Write-Back) Format
		 *
		 *   63       48 47    32 31     13 12    8 7    4 3        0
		 *   +------------------------------------------------------+
		 * 0 | Packet   | IP     |  Rsvd   | MRQ   | Rsvd | MRQ RSS |
		 *   | Checksum | Ident  |         | Queue |      |  Type   |
		 *   +------------------------------------------------------+
		 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
		 *   +------------------------------------------------------+
		 *   63       48 47    32 31            20 19               0
		 */
345
		pr_info("RWB[desc]      [ck ipid mrqhsh] [vl   l0 ee  es] [ l3  l2  l1 hs] [reserved      ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
346
		for (i = 0; i < rx_ring->count; i++) {
347
			const char *next_desc;
348 349 350 351
			buffer_info = &rx_ring->buffer_info[i];
			rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
			u1 = (struct my_u1 *)rx_desc_ps;
			staterr =
352
			    le32_to_cpu(rx_desc_ps->wb.middle.status_error);
353 354 355 356 357 358 359 360

			if (i == rx_ring->next_to_use)
				next_desc = " NTU";
			else if (i == rx_ring->next_to_clean)
				next_desc = " NTC";
			else
				next_desc = "";

361 362
			if (staterr & E1000_RXD_STAT_DD) {
				/* Descriptor Done */
363 364 365 366 367 368 369
				pr_info("%s[0x%03X]     %016llX %016llX %016llX %016llX ---------------- %p%s\n",
					"RWB", i,
					(unsigned long long)le64_to_cpu(u1->a),
					(unsigned long long)le64_to_cpu(u1->b),
					(unsigned long long)le64_to_cpu(u1->c),
					(unsigned long long)le64_to_cpu(u1->d),
					buffer_info->skb, next_desc);
370
			} else {
371 372 373 374 375 376 377 378
				pr_info("%s[0x%03X]     %016llX %016llX %016llX %016llX %016llX %p%s\n",
					"R  ", i,
					(unsigned long long)le64_to_cpu(u1->a),
					(unsigned long long)le64_to_cpu(u1->b),
					(unsigned long long)le64_to_cpu(u1->c),
					(unsigned long long)le64_to_cpu(u1->d),
					(unsigned long long)buffer_info->dma,
					buffer_info->skb, next_desc);
379 380

				if (netif_msg_pktdata(adapter))
381 382
					e1000e_dump_ps_pages(adapter,
							     buffer_info);
383 384 385 386 387
			}
		}
		break;
	default:
	case 0:
388
		/* Extended Receive Descriptor (Read) Format
389
		 *
390 391 392 393 394
		 *   +-----------------------------------------------------+
		 * 0 |                Buffer Address [63:0]                |
		 *   +-----------------------------------------------------+
		 * 8 |                      Reserved                       |
		 *   +-----------------------------------------------------+
395
		 */
396
		pr_info("R  [desc]      [buf addr 63:0 ] [reserved 63:0 ] [bi->dma       ] [bi->skb] <-- Ext (Read) format\n");
397 398 399 400 401 402 403 404 405 406 407 408 409
		/* Extended Receive Descriptor (Write-Back) Format
		 *
		 *   63       48 47    32 31    24 23            4 3        0
		 *   +------------------------------------------------------+
		 *   |     RSS Hash      |        |               |         |
		 * 0 +-------------------+  Rsvd  |   Reserved    | MRQ RSS |
		 *   | Packet   | IP     |        |               |  Type   |
		 *   | Checksum | Ident  |        |               |         |
		 *   +------------------------------------------------------+
		 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
		 *   +------------------------------------------------------+
		 *   63       48 47    32 31            20 19               0
		 */
410
		pr_info("RWB[desc]      [cs ipid    mrq] [vt   ln xe  xs] [bi->skb] <-- Ext (Write-Back) format\n");
411 412

		for (i = 0; i < rx_ring->count; i++) {
413 414
			const char *next_desc;

415
			buffer_info = &rx_ring->buffer_info[i];
416 417 418
			rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
			u1 = (struct my_u1 *)rx_desc;
			staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
419 420 421 422 423 424 425 426

			if (i == rx_ring->next_to_use)
				next_desc = " NTU";
			else if (i == rx_ring->next_to_clean)
				next_desc = " NTC";
			else
				next_desc = "";

427 428
			if (staterr & E1000_RXD_STAT_DD) {
				/* Descriptor Done */
429 430 431 432 433
				pr_info("%s[0x%03X]     %016llX %016llX ---------------- %p%s\n",
					"RWB", i,
					(unsigned long long)le64_to_cpu(u1->a),
					(unsigned long long)le64_to_cpu(u1->b),
					buffer_info->skb, next_desc);
434
			} else {
435 436 437 438 439 440
				pr_info("%s[0x%03X]     %016llX %016llX %016llX %p%s\n",
					"R  ", i,
					(unsigned long long)le64_to_cpu(u1->a),
					(unsigned long long)le64_to_cpu(u1->b),
					(unsigned long long)buffer_info->dma,
					buffer_info->skb, next_desc);
441

442 443
				if (netif_msg_pktdata(adapter) &&
				    buffer_info->skb)
444 445 446
					print_hex_dump(KERN_INFO, "",
						       DUMP_PREFIX_ADDRESS, 16,
						       1,
447
						       buffer_info->skb->data,
448 449 450
						       adapter->rx_buffer_len,
						       true);
			}
451 452 453 454
		}
	}
}

455 456 457 458 459 460 461 462 463 464 465
/**
 * e1000_desc_unused - calculate if we have unused descriptors
 **/
static int e1000_desc_unused(struct e1000_ring *ring)
{
	if (ring->next_to_clean > ring->next_to_use)
		return ring->next_to_clean - ring->next_to_use - 1;

	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
}

466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
/**
 * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp
 * @adapter: board private structure
 * @hwtstamps: time stamp structure to update
 * @systim: unsigned 64bit system time value.
 *
 * Convert the system time value stored in the RX/TXSTMP registers into a
 * hwtstamp which can be used by the upper level time stamping functions.
 *
 * The 'systim_lock' spinlock is used to protect the consistency of the
 * system time value. This is needed because reading the 64 bit time
 * value involves reading two 32 bit registers. The first read latches the
 * value.
 **/
static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter,
				      struct skb_shared_hwtstamps *hwtstamps,
				      u64 systim)
{
	u64 ns;
	unsigned long flags;

	spin_lock_irqsave(&adapter->systim_lock, flags);
	ns = timecounter_cyc2time(&adapter->tc, systim);
	spin_unlock_irqrestore(&adapter->systim_lock, flags);

	memset(hwtstamps, 0, sizeof(*hwtstamps));
	hwtstamps->hwtstamp = ns_to_ktime(ns);
}

/**
 * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp
 * @adapter: board private structure
 * @status: descriptor extended error and status field
 * @skb: particular skb to include time stamp
 *
 * If the time stamp is valid, convert it into the timecounter ns value
 * and store that result into the shhwtstamps structure which is passed
 * up the network stack.
 **/
static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status,
			       struct sk_buff *skb)
{
	struct e1000_hw *hw = &adapter->hw;
	u64 rxstmp;

	if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) ||
	    !(status & E1000_RXDEXT_STATERR_TST) ||
	    !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
		return;

	/* The Rx time stamp registers contain the time stamp.  No other
	 * received packet will be time stamped until the Rx time stamp
	 * registers are read.  Because only one packet can be time stamped
	 * at a time, the register values must belong to this packet and
	 * therefore none of the other additional attributes need to be
	 * compared.
	 */
	rxstmp = (u64)er32(RXSTMPL);
	rxstmp |= (u64)er32(RXSTMPH) << 32;
	e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp);

	adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP;
}

530
/**
531
 * e1000_receive_skb - helper function to handle Rx indications
532
 * @adapter: board private structure
533
 * @staterr: descriptor extended error and status field as written by hardware
534 535 536 537
 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
 * @skb: pointer to sk_buff to be indicated to stack
 **/
static void e1000_receive_skb(struct e1000_adapter *adapter,
538
			      struct net_device *netdev, struct sk_buff *skb,
539
			      u32 staterr, __le16 vlan)
540
{
541
	u16 tag = le16_to_cpu(vlan);
542 543 544

	e1000e_rx_hwtstamp(adapter, staterr, skb);

545 546
	skb->protocol = eth_type_trans(skb, netdev);

547
	if (staterr & E1000_RXD_STAT_VP)
548
		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
549 550

	napi_gro_receive(&adapter->napi, skb);
551 552 553
}

/**
554
 * e1000_rx_checksum - Receive Checksum Offload
555 556 557 558
 * @adapter: board private structure
 * @status_err: receive descriptor status and error fields
 * @csum: receive descriptor csum field
 * @sk_buff: socket buffer with received data
559 560
 **/
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
561
			      struct sk_buff *skb)
562 563 564
{
	u16 status = (u16)status_err;
	u8 errors = (u8)(status_err >> 24);
565 566

	skb_checksum_none_assert(skb);
567

568 569 570 571
	/* Rx checksum disabled */
	if (!(adapter->netdev->features & NETIF_F_RXCSUM))
		return;

572 573 574
	/* Ignore Checksum bit is set */
	if (status & E1000_RXD_STAT_IXSM)
		return;
575

576 577
	/* TCP/UDP checksum error bit or IP checksum error bit is set */
	if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
578 579 580 581 582 583 584 585 586 587
		/* let the stack verify checksum errors */
		adapter->hw_csum_err++;
		return;
	}

	/* TCP/UDP Checksum has not been calculated */
	if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
		return;

	/* It must be a TCP or UDP packet with a valid checksum */
588
	skb->ip_summed = CHECKSUM_UNNECESSARY;
589 590 591
	adapter->hw_csum_good++;
}

592
static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
593
{
594
	struct e1000_adapter *adapter = rx_ring->adapter;
595
	struct e1000_hw *hw = &adapter->hw;
596 597 598
	s32 ret_val = __ew32_prepare(hw);

	writel(i, rx_ring->tail);
599

600
	if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
601 602 603 604 605 606 607
		u32 rctl = er32(RCTL);
		ew32(RCTL, rctl & ~E1000_RCTL_EN);
		e_err("ME firmware caused invalid RDT - resetting\n");
		schedule_work(&adapter->reset_task);
	}
}

608
static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
609
{
610
	struct e1000_adapter *adapter = tx_ring->adapter;
611
	struct e1000_hw *hw = &adapter->hw;
612
	s32 ret_val = __ew32_prepare(hw);
613

614 615 616
	writel(i, tx_ring->tail);

	if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
617 618 619 620 621 622 623
		u32 tctl = er32(TCTL);
		ew32(TCTL, tctl & ~E1000_TCTL_EN);
		e_err("ME firmware caused invalid TDT - resetting\n");
		schedule_work(&adapter->reset_task);
	}
}

624
/**
625
 * e1000_alloc_rx_buffers - Replace used receive buffers
626
 * @rx_ring: Rx descriptor ring
627
 **/
628
static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
629
				   int cleaned_count, gfp_t gfp)
630
{
631
	struct e1000_adapter *adapter = rx_ring->adapter;
632 633
	struct net_device *netdev = adapter->netdev;
	struct pci_dev *pdev = adapter->pdev;
634
	union e1000_rx_desc_extended *rx_desc;
635 636 637
	struct e1000_buffer *buffer_info;
	struct sk_buff *skb;
	unsigned int i;
638
	unsigned int bufsz = adapter->rx_buffer_len;
639 640 641 642 643 644 645 646 647 648 649

	i = rx_ring->next_to_use;
	buffer_info = &rx_ring->buffer_info[i];

	while (cleaned_count--) {
		skb = buffer_info->skb;
		if (skb) {
			skb_trim(skb, 0);
			goto map_skb;
		}

650
		skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
651 652 653 654 655 656 657 658
		if (!skb) {
			/* Better luck next round */
			adapter->alloc_rx_buff_failed++;
			break;
		}

		buffer_info->skb = skb;
map_skb:
659
		buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
660
						  adapter->rx_buffer_len,
661 662
						  DMA_FROM_DEVICE);
		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
663
			dev_err(&pdev->dev, "Rx DMA map failed\n");
664 665 666 667
			adapter->rx_dma_failed++;
			break;
		}

668 669
		rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
		rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
670

671
		if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
672
			/* Force memory writes to complete before letting h/w
673 674 675 676 677
			 * know there are new descriptors to fetch.  (Only
			 * applicable for weak-ordered memory model archs,
			 * such as IA-64).
			 */
			wmb();
678
			if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
679
				e1000e_update_rdt_wa(rx_ring, i);
680
			else
681
				writel(i, rx_ring->tail);
682
		}
683 684 685 686 687 688
		i++;
		if (i == rx_ring->count)
			i = 0;
		buffer_info = &rx_ring->buffer_info[i];
	}

689
	rx_ring->next_to_use = i;
690 691 692 693
}

/**
 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
694
 * @rx_ring: Rx descriptor ring
695
 **/
696
static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
697
				      int cleaned_count, gfp_t gfp)
698
{
699
	struct e1000_adapter *adapter = rx_ring->adapter;
700 701 702 703 704 705 706 707 708 709 710 711 712 713 714
	struct net_device *netdev = adapter->netdev;
	struct pci_dev *pdev = adapter->pdev;
	union e1000_rx_desc_packet_split *rx_desc;
	struct e1000_buffer *buffer_info;
	struct e1000_ps_page *ps_page;
	struct sk_buff *skb;
	unsigned int i, j;

	i = rx_ring->next_to_use;
	buffer_info = &rx_ring->buffer_info[i];

	while (cleaned_count--) {
		rx_desc = E1000_RX_DESC_PS(*rx_ring, i);

		for (j = 0; j < PS_PAGE_BUFFERS; j++) {
715 716 717
			ps_page = &buffer_info->ps_pages[j];
			if (j >= adapter->rx_ps_pages) {
				/* all unused desc entries get hw null ptr */
718 719
				rx_desc->read.buffer_addr[j + 1] =
				    ~cpu_to_le64(0);
720 721 722
				continue;
			}
			if (!ps_page->page) {
723
				ps_page->page = alloc_page(gfp);
724
				if (!ps_page->page) {
725 726 727
					adapter->alloc_rx_buff_failed++;
					goto no_buffers;
				}
728 729 730 731 732 733
				ps_page->dma = dma_map_page(&pdev->dev,
							    ps_page->page,
							    0, PAGE_SIZE,
							    DMA_FROM_DEVICE);
				if (dma_mapping_error(&pdev->dev,
						      ps_page->dma)) {
734
					dev_err(&adapter->pdev->dev,
735
						"Rx DMA page map failed\n");
736 737
					adapter->rx_dma_failed++;
					goto no_buffers;
738 739
				}
			}
740
			/* Refresh the desc even if buffer_addrs
741 742 743
			 * didn't change because each write-back
			 * erases this info.
			 */
744 745
			rx_desc->read.buffer_addr[j + 1] =
			    cpu_to_le64(ps_page->dma);
746 747
		}

748
		skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0,
749
						  gfp);
750 751 752 753 754 755 756

		if (!skb) {
			adapter->alloc_rx_buff_failed++;
			break;
		}

		buffer_info->skb = skb;
757
		buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
758
						  adapter->rx_ps_bsize0,
759 760
						  DMA_FROM_DEVICE);
		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
761
			dev_err(&pdev->dev, "Rx DMA map failed\n");
762 763 764 765 766 767 768 769 770
			adapter->rx_dma_failed++;
			/* cleanup skb */
			dev_kfree_skb_any(skb);
			buffer_info->skb = NULL;
			break;
		}

		rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);

771
		if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
772
			/* Force memory writes to complete before letting h/w
773 774 775 776 777
			 * know there are new descriptors to fetch.  (Only
			 * applicable for weak-ordered memory model archs,
			 * such as IA-64).
			 */
			wmb();
778
			if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
779
				e1000e_update_rdt_wa(rx_ring, i << 1);
780
			else
781
				writel(i << 1, rx_ring->tail);
782 783
		}

784 785 786 787 788 789 790
		i++;
		if (i == rx_ring->count)
			i = 0;
		buffer_info = &rx_ring->buffer_info[i];
	}

no_buffers:
791
	rx_ring->next_to_use = i;
792 793
}

794 795
/**
 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
796
 * @rx_ring: Rx descriptor ring
797 798 799
 * @cleaned_count: number of buffers to allocate this pass
 **/

800
static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
801
					 int cleaned_count, gfp_t gfp)
802
{
803
	struct e1000_adapter *adapter = rx_ring->adapter;
804 805
	struct net_device *netdev = adapter->netdev;
	struct pci_dev *pdev = adapter->pdev;
806
	union e1000_rx_desc_extended *rx_desc;
807 808 809
	struct e1000_buffer *buffer_info;
	struct sk_buff *skb;
	unsigned int i;
810
	unsigned int bufsz = 256 - 16;	/* for skb_reserve */
811 812 813 814 815 816 817 818 819 820 821

	i = rx_ring->next_to_use;
	buffer_info = &rx_ring->buffer_info[i];

	while (cleaned_count--) {
		skb = buffer_info->skb;
		if (skb) {
			skb_trim(skb, 0);
			goto check_page;
		}

822
		skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
823 824 825 826 827 828 829 830 831 832
		if (unlikely(!skb)) {
			/* Better luck next round */
			adapter->alloc_rx_buff_failed++;
			break;
		}

		buffer_info->skb = skb;
check_page:
		/* allocate a new page if necessary */
		if (!buffer_info->page) {
833
			buffer_info->page = alloc_page(gfp);
834 835 836 837 838 839
			if (unlikely(!buffer_info->page)) {
				adapter->alloc_rx_buff_failed++;
				break;
			}
		}

840
		if (!buffer_info->dma) {
841
			buffer_info->dma = dma_map_page(&pdev->dev,
842 843
							buffer_info->page, 0,
							PAGE_SIZE,
844
							DMA_FROM_DEVICE);
845 846 847 848 849
			if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
				adapter->alloc_rx_buff_failed++;
				break;
			}
		}
850

851 852
		rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
		rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
853 854 855 856 857 858 859 860 861 862 863 864 865 866

		if (unlikely(++i == rx_ring->count))
			i = 0;
		buffer_info = &rx_ring->buffer_info[i];
	}

	if (likely(rx_ring->next_to_use != i)) {
		rx_ring->next_to_use = i;
		if (unlikely(i-- == 0))
			i = (rx_ring->count - 1);

		/* Force memory writes to complete before letting h/w
		 * know there are new descriptors to fetch.  (Only
		 * applicable for weak-ordered memory model archs,
867 868
		 * such as IA-64).
		 */
869
		wmb();
870
		if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
871
			e1000e_update_rdt_wa(rx_ring, i);
872
		else
873
			writel(i, rx_ring->tail);
874 875 876
	}
}

877 878 879 880
static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
				 struct sk_buff *skb)
{
	if (netdev->features & NETIF_F_RXHASH)
Tom Herbert's avatar
Tom Herbert committed
881
		skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3);
882 883
}

884
/**
885 886
 * e1000_clean_rx_irq - Send received data up the network stack
 * @rx_ring: Rx descriptor ring
887 888 889 890
 *
 * the return value indicates whether actual cleaning was done, there
 * is no guarantee that everything was cleaned
 **/
891 892
static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
			       int work_to_do)
893
{
894
	struct e1000_adapter *adapter = rx_ring->adapter;
895 896
	struct net_device *netdev = adapter->netdev;
	struct pci_dev *pdev = adapter->pdev;
897
	struct e1000_hw *hw = &adapter->hw;
898
	union e1000_rx_desc_extended *rx_desc, *next_rxd;
899
	struct e1000_buffer *buffer_info, *next_buffer;
900
	u32 length, staterr;
901 902
	unsigned int i;
	int cleaned_count = 0;
903
	bool cleaned = false;
904 905 906
	unsigned int total_rx_bytes = 0, total_rx_packets = 0;

	i = rx_ring->next_to_clean;
907 908
	rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
909 910
	buffer_info = &rx_ring->buffer_info[i];

911
	while (staterr & E1000_RXD_STAT_DD) {
912 913 914 915 916
		struct sk_buff *skb;

		if (*work_done >= work_to_do)
			break;
		(*work_done)++;
917
		rmb();	/* read descriptor and rx_buffer_info after status DD */
918 919 920 921 922 923 924 925 926

		skb = buffer_info->skb;
		buffer_info->skb = NULL;

		prefetch(skb->data - NET_IP_ALIGN);

		i++;
		if (i == rx_ring->count)
			i = 0;
927
		next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
928 929 930 931
		prefetch(next_rxd);

		next_buffer = &rx_ring->buffer_info[i];

932
		cleaned = true;
933
		cleaned_count++;
934 935
		dma_unmap_single(&pdev->dev, buffer_info->dma,
				 adapter->rx_buffer_len, DMA_FROM_DEVICE);
936 937
		buffer_info->dma = 0;

938
		length = le16_to_cpu(rx_desc->wb.upper.length);
939