Skip to content
Permalink
Branch: master
Find file Copy path
Find file Copy path
7612 lines (6474 sloc) 213 KB
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 1999 - 2018 Intel Corporation. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/interrupt.h>
#include <linux/tcp.h>
#include <linux/ipv6.h>
#include <linux/slab.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
#include <linux/aer.h>
#include <linux/prefetch.h>
#include "e1000.h"
#define DRV_EXTRAVERSION "-k"
#define DRV_VERSION "3.2.6" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
static const struct e1000_info *e1000_info_tbl[] = {
[board_82571] = &e1000_82571_info,
[board_82572] = &e1000_82572_info,
[board_82573] = &e1000_82573_info,
[board_82574] = &e1000_82574_info,
[board_82583] = &e1000_82583_info,
[board_80003es2lan] = &e1000_es2_info,
[board_ich8lan] = &e1000_ich8_info,
[board_ich9lan] = &e1000_ich9_info,
[board_ich10lan] = &e1000_ich10_info,
[board_pchlan] = &e1000_pch_info,
[board_pch2lan] = &e1000_pch2_info,
[board_pch_lpt] = &e1000_pch_lpt_info,
[board_pch_spt] = &e1000_pch_spt_info,
[board_pch_cnp] = &e1000_pch_cnp_info,
};
struct e1000_reg_info {
u32 ofs;
char *name;
};
static const struct e1000_reg_info e1000_reg_info_tbl[] = {
/* General Registers */
{E1000_CTRL, "CTRL"},
{E1000_STATUS, "STATUS"},
{E1000_CTRL_EXT, "CTRL_EXT"},
/* Interrupt Registers */
{E1000_ICR, "ICR"},
/* Rx Registers */
{E1000_RCTL, "RCTL"},
{E1000_RDLEN(0), "RDLEN"},
{E1000_RDH(0), "RDH"},
{E1000_RDT(0), "RDT"},
{E1000_RDTR, "RDTR"},
{E1000_RXDCTL(0), "RXDCTL"},
{E1000_ERT, "ERT"},
{E1000_RDBAL(0), "RDBAL"},
{E1000_RDBAH(0), "RDBAH"},
{E1000_RDFH, "RDFH"},
{E1000_RDFT, "RDFT"},
{E1000_RDFHS, "RDFHS"},
{E1000_RDFTS, "RDFTS"},
{E1000_RDFPC, "RDFPC"},
/* Tx Registers */
{E1000_TCTL, "TCTL"},
{E1000_TDBAL(0), "TDBAL"},
{E1000_TDBAH(0), "TDBAH"},
{E1000_TDLEN(0), "TDLEN"},
{E1000_TDH(0), "TDH"},
{E1000_TDT(0), "TDT"},
{E1000_TIDV, "TIDV"},
{E1000_TXDCTL(0), "TXDCTL"},
{E1000_TADV, "TADV"},
{E1000_TARC(0), "TARC"},
{E1000_TDFH, "TDFH"},
{E1000_TDFT, "TDFT"},
{E1000_TDFHS, "TDFHS"},
{E1000_TDFTS, "TDFTS"},
{E1000_TDFPC, "TDFPC"},
/* List Terminator */
{0, NULL}
};
/**
* __ew32_prepare - prepare to write to MAC CSR register on certain parts
* @hw: pointer to the HW structure
*
* When updating the MAC CSR registers, the Manageability Engine (ME) could
* be accessing the registers at the same time. Normally, this is handled in
* h/w by an arbiter but on some parts there is a bug that acknowledges Host
* accesses later than it should which could result in the register to have
* an incorrect value. Workaround this by checking the FWSM register which
* has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
* and try again a number of times.
**/
s32 __ew32_prepare(struct e1000_hw *hw)
{
s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
udelay(50);
return i;
}
void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
{
if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
__ew32_prepare(hw);
writel(val, hw->hw_addr + reg);
}
/**
* e1000_regdump - register printout routine
* @hw: pointer to the HW structure
* @reginfo: pointer to the register info table
**/
static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
{
int n = 0;
char rname[16];
u32 regs[8];
switch (reginfo->ofs) {
case E1000_RXDCTL(0):
for (n = 0; n < 2; n++)
regs[n] = __er32(hw, E1000_RXDCTL(n));
break;
case E1000_TXDCTL(0):
for (n = 0; n < 2; n++)
regs[n] = __er32(hw, E1000_TXDCTL(n));
break;
case E1000_TARC(0):
for (n = 0; n < 2; n++)
regs[n] = __er32(hw, E1000_TARC(n));
break;
default:
pr_info("%-15s %08x\n",
reginfo->name, __er32(hw, reginfo->ofs));
return;
}
snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
}
static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
struct e1000_buffer *bi)
{
int i;
struct e1000_ps_page *ps_page;
for (i = 0; i < adapter->rx_ps_pages; i++) {
ps_page = &bi->ps_pages[i];
if (ps_page->page) {
pr_info("packet dump for ps_page %d:\n", i);
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
16, 1, page_address(ps_page->page),
PAGE_SIZE, true);
}
}
}
/**
* e1000e_dump - Print registers, Tx-ring and Rx-ring
* @adapter: board private structure
**/
static void e1000e_dump(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
struct e1000_reg_info *reginfo;
struct e1000_ring *tx_ring = adapter->tx_ring;
struct e1000_tx_desc *tx_desc;
struct my_u0 {
__le64 a;
__le64 b;
} *u0;
struct e1000_buffer *buffer_info;
struct e1000_ring *rx_ring = adapter->rx_ring;
union e1000_rx_desc_packet_split *rx_desc_ps;
union e1000_rx_desc_extended *rx_desc;
struct my_u1 {
__le64 a;
__le64 b;
__le64 c;
__le64 d;
} *u1;
u32 staterr;
int i = 0;
if (!netif_msg_hw(adapter))
return;
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
pr_info("Device Name state trans_start\n");
pr_info("%-15s %016lX %016lX\n", netdev->name,
netdev->state, dev_trans_start(netdev));
}
/* Print Registers */
dev_info(&adapter->pdev->dev, "Register Dump\n");
pr_info(" Register Name Value\n");
for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
reginfo->name; reginfo++) {
e1000_regdump(hw, reginfo);
}
/* Print Tx Ring Summary */
if (!netdev || !netif_running(netdev))
return;
dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
0, tx_ring->next_to_use, tx_ring->next_to_clean,
(unsigned long long)buffer_info->dma,
buffer_info->length,
buffer_info->next_to_watch,
(unsigned long long)buffer_info->time_stamp);
/* Print Tx Ring */
if (!netif_msg_tx_done(adapter))
goto rx_ring_summary;
dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
*
* Legacy Transmit Descriptor
* +--------------------------------------------------------------+
* 0 | Buffer Address [63:0] (Reserved on Write Back) |
* +--------------------------------------------------------------+
* 8 | Special | CSS | Status | CMD | CSO | Length |
* +--------------------------------------------------------------+
* 63 48 47 36 35 32 31 24 23 16 15 0
*
* Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
* 63 48 47 40 39 32 31 16 15 8 7 0
* +----------------------------------------------------------------+
* 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
* +----------------------------------------------------------------+
* 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
* +----------------------------------------------------------------+
* 63 48 47 40 39 36 35 32 31 24 23 20 19 0
*
* Extended Data Descriptor (DTYP=0x1)
* +----------------------------------------------------------------+
* 0 | Buffer Address [63:0] |
* +----------------------------------------------------------------+
* 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
* +----------------------------------------------------------------+
* 63 48 47 40 39 36 35 32 31 24 23 20 19 0
*/
pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n");
pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n");
pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
const char *next_desc;
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
next_desc = " NTC/U";
else if (i == tx_ring->next_to_use)
next_desc = " NTU";
else if (i == tx_ring->next_to_clean)
next_desc = " NTC";
else
next_desc = "";
pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
(!(le64_to_cpu(u0->b) & BIT(29)) ? 'l' :
((le64_to_cpu(u0->b) & BIT(20)) ? 'd' : 'c')),
i,
(unsigned long long)le64_to_cpu(u0->a),
(unsigned long long)le64_to_cpu(u0->b),
(unsigned long long)buffer_info->dma,
buffer_info->length, buffer_info->next_to_watch,
(unsigned long long)buffer_info->time_stamp,
buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter) && buffer_info->skb)
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
16, 1, buffer_info->skb->data,
buffer_info->skb->len, true);
}
/* Print Rx Ring Summary */
rx_ring_summary:
dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
pr_info("Queue [NTU] [NTC]\n");
pr_info(" %5d %5X %5X\n",
0, rx_ring->next_to_use, rx_ring->next_to_clean);
/* Print Rx Ring */
if (!netif_msg_rx_status(adapter))
return;
dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
switch (adapter->rx_ps_pages) {
case 1:
case 2:
case 3:
/* [Extended] Packet Split Receive Descriptor Format
*
* +-----------------------------------------------------+
* 0 | Buffer Address 0 [63:0] |
* +-----------------------------------------------------+
* 8 | Buffer Address 1 [63:0] |
* +-----------------------------------------------------+
* 16 | Buffer Address 2 [63:0] |
* +-----------------------------------------------------+
* 24 | Buffer Address 3 [63:0] |
* +-----------------------------------------------------+
*/
pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n");
/* [Extended] Receive Descriptor (Write-Back) Format
*
* 63 48 47 32 31 13 12 8 7 4 3 0
* +------------------------------------------------------+
* 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
* | Checksum | Ident | | Queue | | Type |
* +------------------------------------------------------+
* 8 | VLAN Tag | Length | Extended Error | Extended Status |
* +------------------------------------------------------+
* 63 48 47 32 31 20 19 0
*/
pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
const char *next_desc;
buffer_info = &rx_ring->buffer_info[i];
rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
u1 = (struct my_u1 *)rx_desc_ps;
staterr =
le32_to_cpu(rx_desc_ps->wb.middle.status_error);
if (i == rx_ring->next_to_use)
next_desc = " NTU";
else if (i == rx_ring->next_to_clean)
next_desc = " NTC";
else
next_desc = "";
if (staterr & E1000_RXD_STAT_DD) {
/* Descriptor Done */
pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n",
"RWB", i,
(unsigned long long)le64_to_cpu(u1->a),
(unsigned long long)le64_to_cpu(u1->b),
(unsigned long long)le64_to_cpu(u1->c),
(unsigned long long)le64_to_cpu(u1->d),
buffer_info->skb, next_desc);
} else {
pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n",
"R ", i,
(unsigned long long)le64_to_cpu(u1->a),
(unsigned long long)le64_to_cpu(u1->b),
(unsigned long long)le64_to_cpu(u1->c),
(unsigned long long)le64_to_cpu(u1->d),
(unsigned long long)buffer_info->dma,
buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter))
e1000e_dump_ps_pages(adapter,
buffer_info);
}
}
break;
default:
case 0:
/* Extended Receive Descriptor (Read) Format
*
* +-----------------------------------------------------+
* 0 | Buffer Address [63:0] |
* +-----------------------------------------------------+
* 8 | Reserved |
* +-----------------------------------------------------+
*/
pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n");
/* Extended Receive Descriptor (Write-Back) Format
*
* 63 48 47 32 31 24 23 4 3 0
* +------------------------------------------------------+
* | RSS Hash | | | |
* 0 +-------------------+ Rsvd | Reserved | MRQ RSS |
* | Packet | IP | | | Type |
* | Checksum | Ident | | | |
* +------------------------------------------------------+
* 8 | VLAN Tag | Length | Extended Error | Extended Status |
* +------------------------------------------------------+
* 63 48 47 32 31 20 19 0
*/
pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n");
for (i = 0; i < rx_ring->count; i++) {
const char *next_desc;
buffer_info = &rx_ring->buffer_info[i];
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
u1 = (struct my_u1 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
if (i == rx_ring->next_to_use)
next_desc = " NTU";
else if (i == rx_ring->next_to_clean)
next_desc = " NTC";
else
next_desc = "";
if (staterr & E1000_RXD_STAT_DD) {
/* Descriptor Done */
pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n",
"RWB", i,
(unsigned long long)le64_to_cpu(u1->a),
(unsigned long long)le64_to_cpu(u1->b),
buffer_info->skb, next_desc);
} else {
pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n",
"R ", i,
(unsigned long long)le64_to_cpu(u1->a),
(unsigned long long)le64_to_cpu(u1->b),
(unsigned long long)buffer_info->dma,
buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter) &&
buffer_info->skb)
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS, 16,
1,
buffer_info->skb->data,
adapter->rx_buffer_len,
true);
}
}
}
}
/**
* e1000_desc_unused - calculate if we have unused descriptors
**/
static int e1000_desc_unused(struct e1000_ring *ring)
{
if (ring->next_to_clean > ring->next_to_use)
return ring->next_to_clean - ring->next_to_use - 1;
return ring->count + ring->next_to_clean - ring->next_to_use - 1;
}
/**
* e1000e_systim_to_hwtstamp - convert system time value to hw time stamp
* @adapter: board private structure
* @hwtstamps: time stamp structure to update
* @systim: unsigned 64bit system time value.
*
* Convert the system time value stored in the RX/TXSTMP registers into a
* hwtstamp which can be used by the upper level time stamping functions.
*
* The 'systim_lock' spinlock is used to protect the consistency of the
* system time value. This is needed because reading the 64 bit time
* value involves reading two 32 bit registers. The first read latches the
* value.
**/
static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter,
struct skb_shared_hwtstamps *hwtstamps,
u64 systim)
{
u64 ns;
unsigned long flags;
spin_lock_irqsave(&adapter->systim_lock, flags);
ns = timecounter_cyc2time(&adapter->tc, systim);
spin_unlock_irqrestore(&adapter->systim_lock, flags);
memset(hwtstamps, 0, sizeof(*hwtstamps));
hwtstamps->hwtstamp = ns_to_ktime(ns);
}
/**
* e1000e_rx_hwtstamp - utility function which checks for Rx time stamp
* @adapter: board private structure
* @status: descriptor extended error and status field
* @skb: particular skb to include time stamp
*
* If the time stamp is valid, convert it into the timecounter ns value
* and store that result into the shhwtstamps structure which is passed
* up the network stack.
**/
static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status,
struct sk_buff *skb)
{
struct e1000_hw *hw = &adapter->hw;
u64 rxstmp;
if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) ||
!(status & E1000_RXDEXT_STATERR_TST) ||
!(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
return;
/* The Rx time stamp registers contain the time stamp. No other
* received packet will be time stamped until the Rx time stamp
* registers are read. Because only one packet can be time stamped
* at a time, the register values must belong to this packet and
* therefore none of the other additional attributes need to be
* compared.
*/
rxstmp = (u64)er32(RXSTMPL);
rxstmp |= (u64)er32(RXSTMPH) << 32;
e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp);
adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP;
}
/**
* e1000_receive_skb - helper function to handle Rx indications
* @adapter: board private structure
* @staterr: descriptor extended error and status field as written by hardware
* @vlan: descriptor vlan field as written by hardware (no le/be conversion)
* @skb: pointer to sk_buff to be indicated to stack
**/
static void e1000_receive_skb(struct e1000_adapter *adapter,
struct net_device *netdev, struct sk_buff *skb,
u32 staterr, __le16 vlan)
{
u16 tag = le16_to_cpu(vlan);
e1000e_rx_hwtstamp(adapter, staterr, skb);
skb->protocol = eth_type_trans(skb, netdev);
if (staterr & E1000_RXD_STAT_VP)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
napi_gro_receive(&adapter->napi, skb);
}
/**
* e1000_rx_checksum - Receive Checksum Offload
* @adapter: board private structure
* @status_err: receive descriptor status and error fields
* @csum: receive descriptor csum field
* @sk_buff: socket buffer with received data
**/
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
struct sk_buff *skb)
{
u16 status = (u16)status_err;
u8 errors = (u8)(status_err >> 24);
skb_checksum_none_assert(skb);
/* Rx checksum disabled */
if (!(adapter->netdev->features & NETIF_F_RXCSUM))
return;
/* Ignore Checksum bit is set */
if (status & E1000_RXD_STAT_IXSM)
return;
/* TCP/UDP checksum error bit or IP checksum error bit is set */
if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
/* let the stack verify checksum errors */
adapter->hw_csum_err++;
return;
}
/* TCP/UDP Checksum has not been calculated */
if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
return;
/* It must be a TCP or UDP packet with a valid checksum */
skb->ip_summed = CHECKSUM_UNNECESSARY;
adapter->hw_csum_good++;
}
static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
{
struct e1000_adapter *adapter = rx_ring->adapter;
struct e1000_hw *hw = &adapter->hw;
s32 ret_val = __ew32_prepare(hw);
writel(i, rx_ring->tail);
if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
u32 rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
e_err("ME firmware caused invalid RDT - resetting\n");
schedule_work(&adapter->reset_task);
}
}
static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
{
struct e1000_adapter *adapter = tx_ring->adapter;
struct e1000_hw *hw = &adapter->hw;
s32 ret_val = __ew32_prepare(hw);
writel(i, tx_ring->tail);
if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
u32 tctl = er32(TCTL);
ew32(TCTL, tctl & ~E1000_TCTL_EN);
e_err("ME firmware caused invalid TDT - resetting\n");
schedule_work(&adapter->reset_task);
}
}
/**
* e1000_alloc_rx_buffers - Replace used receive buffers
* @rx_ring: Rx descriptor ring
**/
static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
int cleaned_count, gfp_t gfp)
{
struct e1000_adapter *adapter = rx_ring->adapter;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
union e1000_rx_desc_extended *rx_desc;
struct e1000_buffer *buffer_info;
struct sk_buff *skb;
unsigned int i;
unsigned int bufsz = adapter->rx_buffer_len;
i = rx_ring->next_to_use;
buffer_info = &rx_ring->buffer_info[i];
while (cleaned_count--) {
skb = buffer_info->skb;
if (skb) {
skb_trim(skb, 0);
goto map_skb;
}
skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
if (!skb) {
/* Better luck next round */
adapter->alloc_rx_buff_failed++;
break;
}
buffer_info->skb = skb;
map_skb:
buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
dev_err(&pdev->dev, "Rx DMA map failed\n");
adapter->rx_dma_failed++;
break;
}
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_rdt_wa(rx_ring, i);
else
writel(i, rx_ring->tail);
}
i++;
if (i == rx_ring->count)
i = 0;
buffer_info = &rx_ring->buffer_info[i];
}
rx_ring->next_to_use = i;
}
/**
* e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
* @rx_ring: Rx descriptor ring
**/
static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
int cleaned_count, gfp_t gfp)
{
struct e1000_adapter *adapter = rx_ring->adapter;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
union e1000_rx_desc_packet_split *rx_desc;
struct e1000_buffer *buffer_info;
struct e1000_ps_page *ps_page;
struct sk_buff *skb;
unsigned int i, j;
i = rx_ring->next_to_use;
buffer_info = &rx_ring->buffer_info[i];
while (cleaned_count--) {
rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
for (j = 0; j < PS_PAGE_BUFFERS; j++) {
ps_page = &buffer_info->ps_pages[j];
if (j >= adapter->rx_ps_pages) {
/* all unused desc entries get hw null ptr */
rx_desc->read.buffer_addr[j + 1] =
~cpu_to_le64(0);
continue;
}
if (!ps_page->page) {
ps_page->page = alloc_page(gfp);
if (!ps_page->page) {
adapter->alloc_rx_buff_failed++;
goto no_buffers;
}
ps_page->dma = dma_map_page(&pdev->dev,
ps_page->page,
0, PAGE_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev,
ps_page->dma)) {
dev_err(&adapter->pdev->dev,
"Rx DMA page map failed\n");
adapter->rx_dma_failed++;
goto no_buffers;
}
}
/* Refresh the desc even if buffer_addrs
* didn't change because each write-back
* erases this info.
*/
rx_desc->read.buffer_addr[j + 1] =
cpu_to_le64(ps_page->dma);
}
skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0,
gfp);
if (!skb) {
adapter->alloc_rx_buff_failed++;
break;
}
buffer_info->skb = skb;
buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
adapter->rx_ps_bsize0,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
dev_err(&pdev->dev, "Rx DMA map failed\n");
adapter->rx_dma_failed++;
/* cleanup skb */
dev_kfree_skb_any(skb);
buffer_info->skb = NULL;
break;
}
rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_rdt_wa(rx_ring, i << 1);
else
writel(i << 1, rx_ring->tail);
}
i++;
if (i == rx_ring->count)
i = 0;
buffer_info = &rx_ring->buffer_info[i];
}
no_buffers:
rx_ring->next_to_use = i;
}
/**
* e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
* @rx_ring: Rx descriptor ring
* @cleaned_count: number of buffers to allocate this pass
**/
static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
int cleaned_count, gfp_t gfp)
{
struct e1000_adapter *adapter = rx_ring->adapter;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
union e1000_rx_desc_extended *rx_desc;
struct e1000_buffer *buffer_info;
struct sk_buff *skb;
unsigned int i;
unsigned int bufsz = 256 - 16; /* for skb_reserve */
i = rx_ring->next_to_use;
buffer_info = &rx_ring->buffer_info[i];
while (cleaned_count--) {
skb = buffer_info->skb;
if (skb) {
skb_trim(skb, 0);
goto check_page;
}
skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
if (unlikely(!skb)) {
/* Better luck next round */
adapter->alloc_rx_buff_failed++;
break;
}
buffer_info->skb = skb;
check_page:
/* allocate a new page if necessary */
if (!buffer_info->page) {
buffer_info->page = alloc_page(gfp);
if (unlikely(!buffer_info->page)) {
adapter->alloc_rx_buff_failed++;
break;
}
}
if (!buffer_info->dma) {
buffer_info->dma = dma_map_page(&pdev->dev,
buffer_info->page, 0,
PAGE_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
adapter->alloc_rx_buff_failed++;
break;
}
}
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
if (unlikely(++i == rx_ring->count))
i = 0;
buffer_info = &rx_ring->buffer_info[i];
}
if (likely(rx_ring->next_to_use != i)) {
rx_ring->next_to_use = i;
if (unlikely(i-- == 0))
i = (rx_ring->count - 1);
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_rdt_wa(rx_ring, i);
else
writel(i, rx_ring->tail);
}
}
static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
struct sk_buff *skb)
{
if (netdev->features & NETIF_F_RXHASH)
skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3);
}
/**
* e1000_clean_rx_irq - Send received data up the network stack
* @rx_ring: Rx descriptor ring
*
* the return value indicates whether actual cleaning was done, there
* is no guarantee that everything was cleaned
**/
static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
int work_to_do)
{
struct e1000_adapter *adapter = rx_ring->adapter;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;
union e1000_rx_desc_extended *rx_desc, *next_rxd;
struct e1000_buffer *buffer_info, *next_buffer;
u32 length, staterr;
unsigned int i;
int cleaned_count = 0;
bool cleaned = false;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
i = rx_ring->next_to_clean;
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
buffer_info = &rx_ring->buffer_info[i];
while (staterr & E1000_RXD_STAT_DD) {
struct sk_buff *skb;
if (*work_done >= work_to_do)
break;
(*work_done)++;
dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
skb = buffer_info->skb;
buffer_info->skb = NULL;
prefetch(skb->data - NET_IP_ALIGN);
i++;
if (i == rx_ring->count)
i = 0;
next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
prefetch(next_rxd);
next_buffer = &rx_ring->buffer_info[i];
cleaned = true;
cleaned_count++;
dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len, DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->wb.upper.length);
/* !EOP means multiple descriptors were used to store a single
* packet, if that's the case we need to toss it. In fact, we
* need to toss every packet with the EOP bit clear and the
* next frame that _does_ have the EOP bit set, as it is by
* definition only a frame fragment
*/
if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
adapter->flags2 |= FLAG2_IS_DISCARDING;
if (adapter->flags2 & FLAG2_IS_DISCARDING) {
/* All receives must fit into a single buffer */
e_dbg("Receive packet consumed multiple buffers\n");
/* recycle */
buffer_info->skb = skb;
if (staterr & E1000_RXD_STAT_EOP)
adapter->flags2 &= ~FLAG2_IS_DISCARDING;
goto next_desc;
}
if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
!(netdev->features & NETIF_F_RXALL))) {
/* recycle */
buffer_info->skb = skb;
goto next_desc;
}
/* adjust length to remove Ethernet CRC */
if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
/* If configured to store CRC, don't subtract FCS,
* but keep the FCS bytes out of the total_rx_bytes
* counter
*/
if (netdev->features & NETIF_F_RXFCS)
total_rx_bytes -= 4;
else
length -= 4;
}
total_rx_bytes += length;
total_rx_packets++;
/* code added for copybreak, this should improve
* performance for small packets with large amounts
* of reassembly being done in the stack
*/
if (length < copybreak) {
struct sk_buff *new_skb =
napi_alloc_skb(&adapter->napi, length);
if (new_skb) {
skb_copy_to_linear_data_offset(new_skb,
-NET_IP_ALIGN,
(skb->data -
NET_IP_ALIGN),
(length +
NET_IP_ALIGN));
/* save the skb in buffer_info as good */
buffer_info->skb = skb;
skb = new_skb;
}
/* else just continue with the old one */
}
/* end copybreak code */
skb_put(skb, length);
/* Receive Checksum Offload */
e1000_rx_checksum(adapter, staterr, skb);
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
e1000_receive_skb(adapter, netdev, skb, staterr,
rx_desc->wb.upper.vlan);
next_desc:
rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
adapter->alloc_rx_buf(rx_ring, cleaned_count,
GFP_ATOMIC);
cleaned_count = 0;
}
/* use prefetched values */
rx_desc = next_rxd;
buffer_info = next_buffer;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
}
rx_ring->next_to_clean = i;
cleaned_count = e1000_desc_unused(rx_ring);
if (cleaned_count)
adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
return cleaned;
}
static void e1000_put_txbuf(struct e1000_ring *tx_ring,
struct e1000_buffer *buffer_info,
bool drop)
{
struct e1000_adapter *adapter = tx_ring->adapter;
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
buffer_info->length, DMA_TO_DEVICE);
else
dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
buffer_info->length, DMA_TO_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
if (drop)
dev_kfree_skb_any(buffer_info->skb);
else
dev_consume_skb_any(buffer_info->skb);
buffer_info->skb = NULL;
}
buffer_info->time_stamp = 0;
}
static void e1000_print_hw_hang(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter,
print_hang_task);
struct net_device *netdev = adapter->netdev;
struct e1000_ring *tx_ring = adapter->tx_ring;
unsigned int i = tx_ring->next_to_clean;
unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
struct e1000_hw *hw = &adapter->hw;
u16 phy_status, phy_1000t_status, phy_ext_status;
u16 pci_status;
if (test_bit(__E1000_DOWN, &adapter->state))
return;
if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) {
/* May be block on write-back, flush and detect again
* flush pending descriptor writebacks to memory
*/
ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
/* execute the writes immediately */
e1e_flush();
/* Due to rare timing issues, write to TIDV again to ensure
* the write is successful
*/
ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
/* execute the writes immediately */
e1e_flush();
adapter->tx_hang_recheck = true;
return;
}
adapter->tx_hang_recheck = false;
if (er32(TDH(0)) == er32(TDT(0))) {
e_dbg("false hang detected, ignoring\n");
return;
}
/* Real hang detected */
netif_stop_queue(netdev);
e1e_rphy(hw, MII_BMSR, &phy_status);
e1e_rphy(hw, MII_STAT1000, &phy_1000t_status);
e1e_rphy(hw, MII_ESTATUS, &phy_ext_status);
pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
/* detected Hardware unit hang */
e_err("Detected Hardware Unit Hang:\n"
" TDH <%x>\n"
" TDT <%x>\n"
" next_to_use <%x>\n"
" next_to_clean <%x>\n"
"buffer_info[next_to_clean]:\n"
" time_stamp <%lx>\n"
" next_to_watch <%x>\n"
" jiffies <%lx>\n"
" next_to_watch.status <%x>\n"
"MAC Status <%x>\n"
"PHY Status <%x>\n"
"PHY 1000BASE-T Status <%x>\n"
"PHY Extended Status <%x>\n"
"PCI Status <%x>\n",
readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use,
tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp,
eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
phy_status, phy_1000t_status, phy_ext_status, pci_status);
e1000e_dump(adapter);
/* Suggest workaround for known h/w issue */
if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
e_err("Try turning off Tx pause (flow control) via ethtool\n");
}
/**
* e1000e_tx_hwtstamp_work - check for Tx time stamp
* @work: pointer to work struct
*
* This work function polls the TSYNCTXCTL valid bit to determine when a
* timestamp has been taken for the current stored skb. The timestamp must
* be for this skb because only one such packet is allowed in the queue.
*/
static void e1000e_tx_hwtstamp_work(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
tx_hwtstamp_work);
struct e1000_hw *hw = &adapter->hw;
if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
struct sk_buff *skb = adapter->tx_hwtstamp_skb;
struct skb_shared_hwtstamps shhwtstamps;
u64 txstmp;
txstmp = er32(TXSTMPL);
txstmp |= (u64)er32(TXSTMPH) << 32;
e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);
/* Clear the global tx_hwtstamp_skb pointer and force writes
* prior to notifying the stack of a Tx timestamp.
*/
adapter->tx_hwtstamp_skb = NULL;
wmb(); /* force write prior to skb_tstamp_tx */
skb_tstamp_tx(skb, &shhwtstamps);
dev_consume_skb_any(skb);
} else if (time_after(jiffies, adapter->tx_hwtstamp_start
+ adapter->tx_timeout_factor * HZ)) {
dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
adapter->tx_hwtstamp_skb = NULL;
adapter->tx_hwtstamp_timeouts++;
e_warn("clearing Tx timestamp hang\n");
} else {
/* reschedule to check later */
schedule_work(&adapter->tx_hwtstamp_work);
}
}
/**
* e1000_clean_tx_irq - Reclaim resources after transmit completes
* @tx_ring: Tx descriptor ring
*
* the return value indicates whether actual cleaning was done, there
* is no guarantee that everything was cleaned
**/
static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
{
struct e1000_adapter *adapter = tx_ring->adapter;
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
struct e1000_tx_desc *tx_desc, *eop_desc;
struct e1000_buffer *buffer_info;
unsigned int i, eop;
unsigned int count = 0;
unsigned int total_tx_bytes = 0, total_tx_packets = 0;
unsigned int bytes_compl = 0, pkts_compl = 0;
i = tx_ring->next_to_clean;
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
(count < tx_ring->count)) {
bool cleaned = false;
dma_rmb(); /* read buffer_info after eop_desc */
for (; !cleaned; count++) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop);
if (cleaned) {
total_tx_packets += buffer_info->segs;
total_tx_bytes += buffer_info->bytecount;
if (buffer_info->skb) {
bytes_compl += buffer_info->skb->len;
pkts_compl++;
}
}
e1000_put_txbuf(tx_ring, buffer_info, false);
tx_desc->upper.data = 0;
i++;
if (i == tx_ring->count)
i = 0;
}
if (i == tx_ring->next_to_use)
break;
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
}
tx_ring->next_to_clean = i;
netdev_completed_queue(netdev, pkts_compl, bytes_compl);
#define TX_WAKE_THRESHOLD 32
if (count && netif_carrier_ok(netdev) &&
e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
smp_mb();
if (netif_queue_stopped(netdev) &&
!(test_bit(__E1000_DOWN, &adapter->state))) {
netif_wake_queue(netdev);
++adapter->restart_queue;
}
}
if (adapter->detect_tx_hung) {
/* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i
*/
adapter->detect_tx_hung = false;
if (tx_ring->buffer_info[i].time_stamp &&
time_after(jiffies, tx_ring->buffer_info[i].time_stamp
+ (adapter->tx_timeout_factor * HZ)) &&
!(er32(STATUS) & E1000_STATUS_TXOFF))
schedule_work(&adapter->print_hang_task);
else
adapter->tx_hang_recheck = false;
}
adapter->total_tx_bytes += total_tx_bytes;
adapter->total_tx_packets += total_tx_packets;
return count < tx_ring->count;
}
/**
* e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
* @rx_ring: Rx descriptor ring
*
* the return value indicates whether actual cleaning was done, there
* is no guarantee that everything was cleaned
**/
static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
int work_to_do)
{
struct e1000_adapter *adapter = rx_ring->adapter;
struct e1000_hw *hw = &adapter->hw;
union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct e1000_buffer *buffer_info, *next_buffer;
struct e1000_ps_page *ps_page;
struct sk_buff *skb;
unsigned int i, j;
u32 length, staterr;
int cleaned_count = 0;
bool cleaned = false;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
i = rx_ring->next_to_clean;
rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
buffer_info = &rx_ring->buffer_info[i];
while (staterr & E1000_RXD_STAT_DD) {
if (*work_done >= work_to_do)
break;
(*work_done)++;
skb = buffer_info->skb;
dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
/* in the packet split case this is header only */
prefetch(skb->data - NET_IP_ALIGN);
i++;
if (i == rx_ring->count)
i = 0;
next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
prefetch(next_rxd);
next_buffer = &rx_ring->buffer_info[i];
cleaned = true;
cleaned_count++;
dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
buffer_info->dma = 0;
/* see !EOP comment in other Rx routine */
if (!(staterr & E1000_RXD_STAT_EOP))
adapter->flags2 |= FLAG2_IS_DISCARDING;
if (adapter->flags2 & FLAG2_IS_DISCARDING) {
e_dbg("Packet Split buffers didn't pick up the full packet\n");
dev_kfree_skb_irq(skb);
if (staterr & E1000_RXD_STAT_EOP)
adapter->flags2 &= ~FLAG2_IS_DISCARDING;
goto next_desc;
}
if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
!(netdev->features & NETIF_F_RXALL))) {
dev_kfree_skb_irq(skb);
goto next_desc;
}
length = le16_to_cpu(rx_desc->wb.middle.length0);
if (!length) {
e_dbg("Last part of the packet spanning multiple descriptors\n");
dev_kfree_skb_irq(skb);
goto next_desc;
}
/* Good Receive */
skb_put(skb, length);
{
/* this looks ugly, but it seems compiler issues make
* it more efficient than reusing j
*/
int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
/* page alloc/put takes too long and effects small
* packet throughput, so unsplit small packets and
* save the alloc/put only valid in softirq (napi)
* context to call kmap_*
*/
if (l1 && (l1 <= copybreak) &&
((length + l1) <= adapter->rx_ps_bsize0)) {
u8 *vaddr;
ps_page = &buffer_info->ps_pages[0];
/* there is no documentation about how to call
* kmap_atomic, so we can't hold the mapping
* very long
*/
dma_sync_single_for_cpu(&pdev->dev,
ps_page->dma,
PAGE_SIZE,
DMA_FROM_DEVICE);
vaddr = kmap_atomic(ps_page->page);
memcpy(skb_tail_pointer(skb), vaddr, l1);
kunmap_atomic(vaddr);
dma_sync_single_for_device(&pdev->dev,
ps_page->dma,
PAGE_SIZE,
DMA_FROM_DEVICE);
/* remove the CRC */
if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
if (!(netdev->features & NETIF_F_RXFCS))
l1 -= 4;
}
skb_put(skb, l1);
goto copydone;
} /* if */
}
for (j = 0; j < PS_PAGE_BUFFERS; j++) {
length = le16_to_cpu(rx_desc->wb.upper.length[j]);
if (!length)
break;
ps_page = &buffer_info->ps_pages[j];
dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
DMA_FROM_DEVICE);
ps_page->dma = 0;
skb_fill_page_desc(skb, j, ps_page->page, 0, length);
ps_page->page = NULL;
skb->len += length;
skb->data_len += length;
skb->truesize += PAGE_SIZE;
}
/* strip the ethernet crc, problem is we're using pages now so
* this whole operation can get a little cpu intensive
*/
if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
if (!(netdev->features & NETIF_F_RXFCS))
pskb_trim(skb, skb->len - 4);
}
copydone:
total_rx_bytes += skb->len;
total_rx_packets++;
e1000_rx_checksum(adapter, staterr, skb);
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
if (rx_desc->wb.upper.header_status &
cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
adapter->rx_hdr_split++;
e1000_receive_skb(adapter, netdev, skb, staterr,
rx_desc->wb.middle.vlan);
next_desc:
rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
buffer_info->skb = NULL;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
adapter->alloc_rx_buf(rx_ring, cleaned_count,
GFP_ATOMIC);
cleaned_count = 0;
}
/* use prefetched values */
rx_desc = next_rxd;
buffer_info = next_buffer;
staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
}
rx_ring->next_to_clean = i;
cleaned_count = e1000_desc_unused(rx_ring);
if (cleaned_count)
adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
return cleaned;
}
/**
* e1000_consume_page - helper function
**/
static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
u16 length)
{
bi->page = NULL;
skb->len += length;
skb->data_len += length;
skb->truesize += PAGE_SIZE;
}
/**
* e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
* @adapter: board private structure
*
* the return value indicates whether actual cleaning was done, there
* is no guarantee that everything was cleaned
**/
static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
int work_to_do)
{
struct e1000_adapter *adapter = rx_ring->adapter;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
union e1000_rx_desc_extended *rx_desc, *next_rxd;
struct e1000_buffer *buffer_info, *next_buffer;
u32 length, staterr;
unsigned int i;
int cleaned_count = 0;
bool cleaned = false;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
struct skb_shared_info *shinfo;
i = rx_ring->next_to_clean;
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
buffer_info = &rx_ring->buffer_info[i];
while (staterr & E1000_RXD_STAT_DD) {
struct sk_buff *skb;
if (*work_done >= work_to_do)
break;
(*work_done)++;
dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
skb = buffer_info->skb;
buffer_info->skb = NULL;
++i;
if (i == rx_ring->count)
i = 0;
next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
prefetch(next_rxd);
next_buffer = &rx_ring->buffer_info[i];
cleaned = true;
cleaned_count++;
dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->wb.upper.length);
/* errors is only valid for DD + EOP descriptors */
if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
!(netdev->features & NETIF_F_RXALL)))) {
/* recycle both page and skb */
buffer_info->skb = skb;
/* an error means any chain goes out the window too */
if (rx_ring->rx_skb_top)
dev_kfree_skb_irq(rx_ring->rx_skb_top);
rx_ring->rx_skb_top = NULL;
goto next_desc;
}
#define rxtop (rx_ring->rx_skb_top)
if (!(staterr & E1000_RXD_STAT_EOP)) {
/* this descriptor is only the beginning (or middle) */
if (!rxtop) {
/* this is the beginning of a chain */
rxtop = skb;
skb_fill_page_desc(rxtop, 0, buffer_info->page,
0, length);
} else {
/* this is the middle of a chain */
shinfo = skb_shinfo(rxtop);
skb_fill_page_desc(rxtop, shinfo->nr_frags,
buffer_info->page, 0,
length);
/* re-use the skb, only consumed the page */
buffer_info->skb = skb;
}
e1000_consume_page(buffer_info, rxtop, length);
goto next_desc;
} else {
if (rxtop) {
/* end of the chain */
shinfo = skb_shinfo(rxtop);
skb_fill_page_desc(rxtop, shinfo->nr_frags,
buffer_info->page, 0,
length);
/* re-use the current skb, we only consumed the
* page
*/
buffer_info->skb = skb;
skb = rxtop;
rxtop = NULL;
e1000_consume_page(buffer_info, skb, length);
} else {
/* no chain, got EOP, this buf is the packet
* copybreak to save the put_page/alloc_page
*/
if (length <= copybreak &&
skb_tailroom(skb) >= length) {
u8 *vaddr;
vaddr = kmap_atomic(buffer_info->page);
memcpy(skb_tail_pointer(skb), vaddr,
length);
kunmap_atomic(vaddr);
/* re-use the page, so don't erase
* buffer_info->page
*/
skb_put(skb, length);
} else {
skb_fill_page_desc(skb, 0,
buffer_info->page, 0,
length);
e1000_consume_page(buffer_info, skb,
length);
}
}
}
/* Receive Checksum Offload */
e1000_rx_checksum(adapter, staterr, skb);
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
total_rx_packets++;
/* eth type trans needs skb->data to point to something */
if (!pskb_may_pull(skb, ETH_HLEN)) {
e_err("pskb_may_pull failed.\n");
dev_kfree_skb_irq(skb);
goto next_desc;
}
e1000_receive_skb(adapter, netdev, skb, staterr,
rx_desc->wb.upper.vlan);
next_desc:
rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
/* return some buffers to hardware, one at a time is too slow */
if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
adapter->alloc_rx_buf(rx_ring, cleaned_count,
GFP_ATOMIC);
cleaned_count = 0;
}
/* use prefetched values */
rx_desc = next_rxd;
buffer_info = next_buffer;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
}
rx_ring->next_to_clean = i;
cleaned_count = e1000_desc_unused(rx_ring);
if (cleaned_count)
adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
return cleaned;
}
/**
* e1000_clean_rx_ring - Free Rx Buffers per Queue
* @rx_ring: Rx descriptor ring
**/
static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
{
struct e1000_adapter *adapter = rx_ring->adapter;
struct e1000_buffer *buffer_info;
struct e1000_ps_page *ps_page;
struct pci_dev *pdev = adapter->pdev;
unsigned int i, j;
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
if (adapter->clean_rx == e1000_clean_rx_irq)
dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
dma_unmap_page(&pdev->dev, buffer_info->dma,
PAGE_SIZE, DMA_FROM_DEVICE);
else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_bsize0,
DMA_FROM_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->page) {
put_page(buffer_info->page);
buffer_info->page = NULL;
}
if (buffer_info->skb) {
dev_kfree_skb(buffer_info->skb);
buffer_info->skb = NULL;
}
for (j = 0; j < PS_PAGE_BUFFERS; j++) {
ps_page = &buffer_info->ps_pages[j];
if (!ps_page->page)
break;
dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
DMA_FROM_DEVICE);
ps_page->dma = 0;
put_page(ps_page->page);
ps_page->page = NULL;
}
}
/* there also may be some cached data from a chained receive */
if (rx_ring->rx_skb_top) {
dev_kfree_skb(rx_ring->rx_skb_top);
rx_ring->rx_skb_top = NULL;
}
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
adapter->flags2 &= ~FLAG2_IS_DISCARDING;
}
static void e1000e_downshift_workaround(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter,
downshift_task);
if (test_bit(__E1000_DOWN, &adapter->state))
return;
e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
}
/**
* e1000_intr_msi - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
**/
static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 icr = er32(ICR);
/* read ICR disables interrupts using IAM */
if (icr & E1000_ICR_LSC) {
hw->mac.get_link_status = true;
/* ICH8 workaround-- Call gig speed drop workaround on cable
* disconnect (LSC) before accessing any PHY registers
*/
if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
(!(er32(STATUS) & E1000_STATUS_LU)))
schedule_work(&adapter->downshift_task);
/* 80003ES2LAN workaround-- For packet buffer work-around on
* link down event; disable receives here in the ISR and reset
* adapter in watchdog
*/
if (netif_carrier_ok(netdev) &&
adapter->flags & FLAG_RX_NEEDS_RESTART) {
/* disable receives */
u32 rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
adapter->flags |= FLAG_RESTART_NOW;
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
/* Reset on uncorrectable ECC error */
if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) {
u32 pbeccsts = er32(PBECCSTS);
adapter->corr_errors +=
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
adapter->uncorr_errors +=
(pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
/* Do the reset outside of interrupt context */
schedule_work(&adapter->reset_task);
/* return immediately since reset is imminent */
return IRQ_HANDLED;
}
if (napi_schedule_prep(&adapter->napi)) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
adapter->total_rx_bytes = 0;
adapter->total_rx_packets = 0;
__napi_schedule(&adapter->napi);
}
return IRQ_HANDLED;
}
/**
* e1000_intr - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
**/
static irqreturn_t e1000_intr(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 rctl, icr = er32(ICR);
if (!icr || test_bit(__E1000_DOWN, &adapter->state))
return IRQ_NONE; /* Not our interrupt */
/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
* not set, then the adapter didn't send an interrupt
*/
if (!(icr & E1000_ICR_INT_ASSERTED))
return IRQ_NONE;
/* Interrupt Auto-Mask...upon reading ICR,
* interrupts are masked. No need for the
* IMC write
*/
if (icr & E1000_ICR_LSC) {
hw->mac.get_link_status = true;
/* ICH8 workaround-- Call gig speed drop workaround on cable
* disconnect (LSC) before accessing any PHY registers
*/
if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
(!(er32(STATUS) & E1000_STATUS_LU)))
schedule_work(&adapter->downshift_task);
/* 80003ES2LAN workaround--
* For packet buffer work-around on link down event;
* disable receives here in the ISR and
* reset adapter in watchdog
*/
if (netif_carrier_ok(netdev) &&
(adapter->flags & FLAG_RX_NEEDS_RESTART)) {
/* disable receives */
rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
adapter->flags |= FLAG_RESTART_NOW;
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
/* Reset on uncorrectable ECC error */
if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) {
u32 pbeccsts = er32(PBECCSTS);
adapter->corr_errors +=
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
adapter->uncorr_errors +=
(pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
/* Do the reset outside of interrupt context */
schedule_work(&adapter->reset_task);
/* return immediately since reset is imminent */
return IRQ_HANDLED;
}
if (napi_schedule_prep(&adapter->napi)) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
adapter->total_rx_bytes = 0;
adapter->total_rx_packets = 0;
__napi_schedule(&adapter->napi);
}
return IRQ_HANDLED;
}
static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 icr = er32(ICR);
if (icr & adapter->eiac_mask)
ew32(ICS, (icr & adapter->eiac_mask));
if (icr & E1000_ICR_LSC) {
hw->mac.get_link_status = true;
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
if (!test_bit(__E1000_DOWN, &adapter->state))
ew32(IMS, E1000_IMS_OTHER | IMS_OTHER_MASK);
return IRQ_HANDLED;
}
static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
struct e1000_ring *tx_ring = adapter->tx_ring;
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
if (!e1000_clean_tx_irq(tx_ring))
/* Ring was not completely cleaned, so fire another interrupt */
ew32(ICS, tx_ring->ims_val);
if (!test_bit(__E1000_DOWN, &adapter->state))
ew32(IMS, adapter->tx_ring->ims_val);
return IRQ_HANDLED;
}
static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_ring *rx_ring = adapter->rx_ring;
/* Write the ITR value calculated at the end of the
* previous interrupt.
*/
if (rx_ring->set_itr) {
u32 itr = rx_ring->itr_val ?
1000000000 / (rx_ring->itr_val * 256) : 0;
writel(itr, rx_ring->itr_register);
rx_ring->set_itr = 0;
}
if (napi_schedule_prep(&adapter->napi)) {
adapter->total_rx_bytes = 0;
adapter->total_rx_packets = 0;
__napi_schedule(&adapter->napi);
}
return IRQ_HANDLED;
}
/**
* e1000_configure_msix - Configure MSI-X hardware
*
* e1000_configure_msix sets up the hardware to properly
* generate MSI-X interrupts.
**/
static void e1000_configure_msix(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_ring *rx_ring = adapter->rx_ring;
struct e1000_ring *tx_ring = adapter->tx_ring;
int vector = 0;
u32 ctrl_ext, ivar = 0;
adapter->eiac_mask = 0;
/* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
if (hw->mac.type == e1000_82574) {
u32 rfctl = er32(RFCTL);
rfctl |= E1000_RFCTL_ACK_DIS;
ew32(RFCTL, rfctl);
}
/* Configure Rx vector */
rx_ring->ims_val = E1000_IMS_RXQ0;
adapter->eiac_mask |= rx_ring->ims_val;
if (rx_ring->itr_val)
writel(1000000000 / (rx_ring->itr_val * 256),
rx_ring->itr_register);
else
writel(1, rx_ring->itr_register);
ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
/* Configure Tx vector */
tx_ring->ims_val = E1000_IMS_TXQ0;
vector++;
if (tx_ring->itr_val)
writel(1000000000 / (tx_ring->itr_val * 256),
tx_ring->itr_register);
else
writel(1, tx_ring->itr_register);
adapter->eiac_mask |= tx_ring->ims_val;
ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
/* set vector for Other Causes, e.g. link changes */
vector++;
ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
if (rx_ring->itr_val)
writel(1000000000 / (rx_ring->itr_val * 256),
hw->hw_addr + E1000_EITR_82574(vector));
else
writel(1, hw->hw_addr + E1000_EITR_82574(vector));
/* Cause Tx interrupts on every write back */
ivar |= BIT(31);
ew32(IVAR, ivar);
/* enable MSI-X PBA support */
ctrl_ext = er32(CTRL_EXT) & ~E1000_CTRL_EXT_IAME;
ctrl_ext |= E1000_CTRL_EXT_PBA_CLR | E1000_CTRL_EXT_EIAME;
ew32(CTRL_EXT, ctrl_ext);
e1e_flush();
}
void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
{
if (adapter->msix_entries) {
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
} else if (adapter->flags & FLAG_MSI_ENABLED) {
pci_disable_msi(adapter->pdev);
adapter->flags &= ~FLAG_MSI_ENABLED;
}
}
/**
* e1000e_set_interrupt_capability - set MSI or MSI-X if supported
*
* Attempt to configure interrupts using the best available
* capabilities of the hardware and kernel.
**/
void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
{
int err;
int i;
switch (adapter->int_mode) {
case E1000E_INT_MODE_MSIX:
if (adapter->flags & FLAG_HAS_MSIX) {
adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
adapter->msix_entries = kcalloc(adapter->num_vectors,
sizeof(struct
msix_entry),
GFP_KERNEL);
if (adapter->msix_entries) {
struct e1000_adapter *a = adapter;
for (i = 0; i < adapter->num_vectors; i++)
adapter->msix_entries[i].entry = i;
err = pci_enable_msix_range(a->pdev,
a->msix_entries,
a->num_vectors,
a->num_vectors);
if (err > 0)
return;
}
/* MSI-X failed, so fall through and try MSI */
e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n");
e1000e_reset_interrupt_capability(adapter);
}
adapter->int_mode = E1000E_INT_MODE_MSI;
/* Fall through */
case E1000E_INT_MODE_MSI:
if (!pci_enable_msi(adapter->pdev)) {
adapter->flags |= FLAG_MSI_ENABLED;
} else {
adapter->int_mode = E1000E_INT_MODE_LEGACY;
e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
}
/* Fall through */
case E1000E_INT_MODE_LEGACY:
/* Don't do anything; this is the system default */
break;
}
/* store the number of vectors being used */
adapter->num_vectors = 1;
}
/**
* e1000_request_msix - Initialize MSI-X interrupts
*
* e1000_request_msix allocates MSI-X vectors and requests interrupts from the
* kernel.
**/
static int e1000_request_msix(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int err = 0, vector = 0;
if (strlen(netdev->name) < (IFNAMSIZ - 5))
snprintf(adapter->rx_ring->name,
sizeof(adapter->rx_ring->name) - 1,
"%.14s-rx-0", netdev->name);
else
memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
err = request_irq(adapter->msix_entries[vector].vector,
e1000_intr_msix_rx, 0, adapter->rx_ring->name,
netdev);
if (err)
return err;
adapter->rx_ring->itr_register = adapter->hw.hw_addr +
E1000_EITR_82574(vector);
adapter->rx_ring->itr_val = adapter->itr;
vector++;
if (strlen(netdev->name) < (IFNAMSIZ - 5))
snprintf(adapter->tx_ring->name,
sizeof(adapter->tx_ring->name) - 1,
"%.14s-tx-0", netdev->name);
else
memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
err = request_irq(adapter->msix_entries[vector].vector,
e1000_intr_msix_tx, 0, adapter->tx_ring->name,
netdev);
if (err)
return err;
adapter->tx_ring->itr_register = adapter->hw.hw_addr +
E1000_EITR_82574(vector);
adapter->tx_ring->itr_val = adapter->itr;
vector++;
err = request_irq(adapter->msix_entries[vector].vector,
e1000_msix_other, 0, netdev->name, netdev);
if (err)
return err;
e1000_configure_msix(adapter);
return 0;
}
/**
* e1000_request_irq - initialize interrupts
*
* Attempts to configure interrupts using the best available
* capabilities of the hardware and kernel.
**/
static int e1000_request_irq(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int err;
if (adapter->msix_entries) {
err = e1000_request_msix(adapter);
if (!err)
return err;
/* fall back to MSI */
e1000e_reset_interrupt_capability(adapter);
adapter->int_mode = E1000E_INT_MODE_MSI;
e1000e_set_interrupt_capability(adapter);
}
if (adapter->flags & FLAG_MSI_ENABLED) {
err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
netdev->name, netdev);
if (!err)
return err;
/* fall back to legacy interrupt */
e1000e_reset_interrupt_capability(adapter);
adapter->int_mode = E1000E_INT_MODE_LEGACY;
}
err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
netdev->name, netdev);
if (err)
e_err("Unable to allocate interrupt, Error: %d\n", err);
return err;
}
static void e1000_free_irq(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
if (adapter->msix_entries) {
int vector = 0;
free_irq(adapter->msix_entries[vector].vector, netdev);
vector++;
free_irq(adapter->msix_entries[vector].vector, netdev);
vector++;
/* Other Causes interrupt vector */
free_irq(adapter->msix_entries[vector].vector, netdev);
return;
}
free_irq(adapter->pdev->irq, netdev);
}
/**
* e1000_irq_disable - Mask off interrupt generation on the NIC
**/
static void e1000_irq_disable(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
ew32(IMC, ~0);
if (adapter->msix_entries)
ew32(EIAC_82574, 0);
e1e_flush();
if (adapter->msix_entries) {
int i;
for (i = 0; i < adapter->num_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector);
} else {
synchronize_irq(adapter->pdev->irq);
}
}
/**
* e1000_irq_enable - Enable default interrupt generation settings
**/
static void e1000_irq_enable(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
if (adapter->msix_entries) {
ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER |
IMS_OTHER_MASK);
} else if (hw->mac.type >= e1000_pch_lpt) {
ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
} else {
ew32(IMS, IMS_ENABLE_MASK);
}
e1e_flush();
}
/**
* e1000e_get_hw_control - get control of the h/w from f/w
* @adapter: address of board private structure
*
* e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
* For ASF and Pass Through versions of f/w this means that
* the driver is loaded. For AMT version (only with 82573)
* of the f/w this means that the network i/f is open.
**/
void e1000e_get_hw_control(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl_ext;
u32 swsm;
/* Let firmware know the driver has taken over */
if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
swsm = er32(SWSM);
ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
} else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
ctrl_ext = er32(CTRL_EXT);
ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
}
}
/**
* e1000e_release_hw_control - release control of the h/w to f/w
* @adapter: address of board private structure
*
* e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
* For ASF and Pass Through versions of f/w this means that the
* driver is no longer loaded. For AMT version (only with 82573) i
* of the f/w this means that the network i/f is closed.
*
**/
void e1000e_release_hw_control(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl_ext;
u32 swsm;
/* Let firmware taken over control of h/w */
if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
swsm = er32(SWSM);
ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
} else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
ctrl_ext = er32(CTRL_EXT);
ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
}
}
/**
* e1000_alloc_ring_dma - allocate memory for a ring structure
**/
static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
struct e1000_ring *ring)
{
struct pci_dev *pdev = adapter->pdev;
ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
return 0;
}
/**
* e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
* @tx_ring: Tx descriptor ring
*
* Return 0 on success, negative on failure
**/
int e1000e_setup_tx_resources(struct e1000_ring *tx_ring)
{
struct e1000_adapter *adapter = tx_ring->adapter;
int err = -ENOMEM, size;
size = sizeof(struct e1000_buffer) * tx_ring->count;
tx_ring->buffer_info = vzalloc(size);
if (!tx_ring->buffer_info)
goto err;
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
err = e1000_alloc_ring_dma(adapter, tx_ring);
if (err)
goto err;
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
return 0;
err:
vfree(tx_ring->buffer_info);
e_err("Unable to allocate memory for the transmit descriptor ring\n");
return err;
}
/**
* e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
* @rx_ring: Rx descriptor ring
*
* Returns 0 on success, negative on failure
**/
int e1000e_setup_rx_resources(struct e1000_ring *rx_ring)
{
struct e1000_adapter *adapter = rx_ring->adapter;
struct e1000_buffer *buffer_info;
int i, size, desc_len, err = -ENOMEM;
size = sizeof(struct e1000_buffer) * rx_ring->count;
rx_ring->buffer_info = vzalloc(size);
if (!rx_ring->buffer_info)
goto err;
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
sizeof(struct e1000_ps_page),
GFP_KERNEL);
if (!buffer_info->ps_pages)
goto err_pages;
}
desc_len = sizeof(union e1000_rx_desc_packet_split);
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * desc_len;
rx_ring->size = ALIGN(rx_ring->size, 4096);
err = e1000_alloc_ring_dma(adapter, rx_ring);
if (err)
goto err_pages;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
rx_ring->rx_skb_top = NULL;
return 0;
err_pages:
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
kfree(buffer_info->ps_pages);
}
err:
vfree(rx_ring->buffer_info);
e_err("Unable to allocate memory for the receive descriptor ring\n");
return err;
}
/**
* e1000_clean_tx_ring - Free Tx Buffers
* @tx_ring: Tx descriptor ring
**/
static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
{
struct e1000_adapter *adapter = tx_ring->adapter;
struct e1000_buffer *buffer_info;
unsigned long size;
unsigned int i;
for (i = 0; i < tx_ring->count; i++) {
buffer_info = &tx_ring->buffer_info[i];
e1000_put_txbuf(tx_ring, buffer_info, false);
}
netdev_reset_queue(adapter->netdev);
size = sizeof(struct e1000_buffer) * tx_ring->count;
memset(tx_ring->buffer_info, 0, size);
memset(tx_ring->desc, 0, tx_ring->size);
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
}
/**
* e1000e_free_tx_resources - Free Tx Resources per Queue
* @tx_ring: Tx descriptor ring
*
* Free all transmit software resources
**/
void e1000e_free_tx_resources(struct e1000_ring *tx_ring)
{
struct e1000_adapter *adapter = tx_ring->adapter;
struct pci_dev *pdev = adapter->pdev;
e1000_clean_tx_ring(tx_ring);
vfree(tx_ring->buffer_info);
tx_ring->buffer_info = NULL;
dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
tx_ring->dma);
tx_ring->desc = NULL;
}
/**
* e1000e_free_rx_resources - Free Rx Resources
* @rx_ring: Rx descriptor ring
*
* Free all receive software resources
**/
void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
{
struct e1000_adapter *adapter = rx_ring->adapter;
struct pci_dev *pdev = adapter->pdev;
int i;
e1000_clean_rx_ring(rx_ring);
for (i = 0; i < rx_ring->count; i++)
kfree(rx_ring->buffer_info[i].ps_pages);
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
rx_ring->dma);
rx_ring->desc = NULL;
}
/**
* e1000_update_itr - update the dynamic ITR value based on statistics
* @adapter: pointer to adapter
* @itr_setting: current adapter->itr
* @packets: the number of packets during this measurement interval
* @bytes: the number of bytes during this measurement interval
*
* Stores a new ITR value based on packets and byte
* counts during the last interrupt. The advantage of per interrupt
* computation is faster updates and more accurate ITR for the current
* traffic pattern. Constants in this function were computed
* based on theoretical maximum wire speed and thresholds were set based
* on testing data as well as attempting to minimize response time
* while increasing bulk throughput. This functionality is controlled
* by the InterruptThrottleRate module parameter.
**/
static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
{
unsigned int retval = itr_setting;
if (packets == 0)
return itr_setting;
switch (itr_setting) {
case lowest_latency:
/* handle TSO and jumbo frames */
if (bytes / packets > 8000)
retval = bulk_latency;
else if ((packets < 5) && (bytes > 512))
retval = low_latency;
break;
case low_latency: /* 50 usec aka 20000 ints/s */
if (bytes > 10000) {
/* this if handles the TSO accounting */
if (bytes / packets > 8000)
retval = bulk_latency;
else if ((packets < 10) || ((bytes / packets) > 1200))
retval = bulk_latency;
else if ((packets > 35))
retval = lowest_latency;
} else if (bytes / packets > 2000) {
retval = bulk_latency;
} else if (packets <= 2 && bytes < 512) {
retval = lowest_latency;
}
break;
case bulk_latency: /* 250 usec aka 4000 ints/s */
if (bytes > 25000) {
if (packets > 35)
retval = low_latency;
} else if (bytes < 6000) {
retval = low_latency;
}
break;
}
return retval;
}
static void e1000_set_itr(struct e1000_adapter *adapter)
{
u16 current_itr;
u32 new_itr = adapter->itr;
/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
if (adapter->link_speed != SPEED_1000) {
current_itr = 0;
new_itr = 4000;
goto set_itr_now;
}
if (adapter->flags2 & FLAG2_DISABLE_AIM) {
new_itr = 0;
goto set_itr_now;
}
adapter->tx_itr = e1000_update_itr(adapter->tx_itr,
adapter->total_tx_packets,
adapter->total_tx_bytes);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
adapter->tx_itr = low_latency;
adapter->rx_itr = e1000_update_itr(adapter->rx_itr,
adapter->total_rx_packets,
adapter->total_rx_bytes);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
adapter->rx_itr = low_latency;
current_itr = max(adapter->rx_itr, adapter->tx_itr);
/* counts and packets in update_itr are dependent on these numbers */
switch (current_itr) {
case lowest_latency:
new_itr = 70000;
break;
case low_latency:
new_itr = 20000; /* aka hwitr = ~200 */
break;
case bulk_latency:
new_itr = 4000;
break;
default:
break;
}
set_itr_now:
if (new_itr != adapter->itr) {
/* this attempts to bias the interrupt rate towards Bulk
* by adding intermediate steps when interrupt rate is
* increasing
*/
new_itr = new_itr > adapter->itr ?
min(adapter->itr + (new_itr >> 2), new_itr) : new_itr;
adapter->itr = new_itr;
adapter->rx_ring->itr_val = new_itr;
if (adapter->msix_entries)
adapter->rx_ring->set_itr = 1;
else
e1000e_write_itr(adapter, new_itr);
}
}
/**
* e1000e_write_itr - write the ITR value to the appropriate registers
* @adapter: address of board private structure
* @itr: new ITR value to program
*
* e1000e_write_itr determines if the adapter is in MSI-X mode
* and, if so, writes the EITR registers with the ITR value.
* Otherwise, it writes the ITR value into the ITR register.
**/
void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
{
struct e1000_hw *hw = &adapter->hw;
u32 new_itr = itr ? 1000000000 / (itr * 256) : 0;
if (adapter->msix_entries) {
int vector;
for (vector = 0; vector < adapter->num_vectors; vector++)
writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector));
} else {
ew32(ITR, new_itr);
}
}
/**
* e1000_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
**/
static int e1000_alloc_queues(struct e1000_adapter *adapter)
{
int size = sizeof(struct e1000_ring);
adapter->tx_ring = kzalloc(size, GFP_KERNEL);
if (!adapter->tx_ring)
goto err;
adapter->tx_ring->count = adapter->tx_ring_count;
adapter->tx_ring->adapter = adapter;
adapter->rx_ring = kzalloc(size, GFP_KERNEL);
if (!adapter->rx_ring)
goto err;
adapter->rx_ring->count = adapter->rx_ring_count;
adapter->rx_ring->adapter = adapter;
return 0;
err:
e_err("Unable to allocate memory for queues\n");
kfree(adapter->rx_ring);
kfree(adapter->tx_ring);
return -ENOMEM;
}
/**
* e1000e_poll - NAPI Rx polling callback
* @napi: struct associated with this polling callback
* @budget: number of packets driver is allowed to process this poll
**/
static int e1000e_poll(struct napi_struct *napi, int budget)
{
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
napi);
struct e1000_hw *hw = &adapter->hw;
struct net_device *poll_dev = adapter->netdev;
int tx_cleaned = 1, work_done = 0;
adapter = netdev_priv(poll_dev);
if (!adapter->msix_entries ||
(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
adapter->clean_rx(adapter->rx_ring, &work_done, budget);
if (!tx_cleaned || work_done == budget)
return budget;
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
if (likely(napi_complete_done(napi, work_done))) {
if (adapter->itr_setting & 3)
e1000_set_itr(adapter);
if (!test_bit(__E1000_DOWN, &adapter->state)) {
if (adapter->msix_entries)
ew32(IMS, adapter->rx_ring->ims_val);
else
e1000_irq_enable(adapter);
}
}
return work_done;
}
static int e1000_vlan_rx_add_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 vfta, index;
/* don't update vlan cookie if already programmed */
if ((adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
(vid == adapter->mng_vlan_id))
return 0;
/* add VID to filter table */
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
index = (vid >> 5) & 0x7F;
vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
vfta |= BIT((vid & 0x1F));
hw->mac.ops.write_vfta(hw, index, vfta);
}
set_bit(vid, adapter->active_vlans);
return 0;
}
static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 vfta, index;
if ((adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
(vid == adapter->mng_vlan_id)) {
/* release control to f/w */
e1000e_release_hw_control(adapter);
return 0;
}
/* remove VID from filter table */
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
index = (vid >> 5) & 0x7F;
vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
vfta &= ~BIT((vid & 0x1F));
hw->mac.ops.write_vfta(hw, index, vfta);
}
clear_bit(vid, adapter->active_vlans);
return 0;
}
/**
* e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
* @adapter: board private structure to initialize
**/
static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
u32 rctl;
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
/* disable VLAN receive filtering */
rctl = er32(RCTL);
rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
ew32(RCTL, rctl);
if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
adapter->mng_vlan_id);
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
}
}
}
/**
* e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
* @adapter: board private structure to initialize
**/
static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 rctl;
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
/* enable VLAN receive filtering */
rctl = er32(RCTL);
rctl |= E1000_RCTL_VFE;
rctl &= ~E1000_RCTL_CFIEN;
ew32(RCTL, rctl);
}
}
/**
* e1000e_vlan_strip_disable - helper to disable HW VLAN stripping
* @adapter: board private structure to initialize
**/
static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl;
/* disable VLAN tag insert/strip */
ctrl = er32(CTRL);
ctrl &= ~E1000_CTRL_VME;
ew32(CTRL, ctrl);
}
/**
* e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
* @adapter: board private structure to initialize
**/
static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl;
/* enable VLAN tag insert/strip */
ctrl = er32(CTRL);
ctrl |= E1000_CTRL_VME;
ew32(CTRL, ctrl);
}
static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
u16 vid = adapter->hw.mng_cookie.vlan_id;
u16 old_vid = adapter->mng_vlan_id;
if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
adapter->mng_vlan_id = vid;
}
if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid);
}
static void e1000_restore_vlan(struct e1000_adapter *adapter)
{
u16 vid;
e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
}
static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 manc, manc2h, mdef, i, j;
if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
return;
manc = er32(MANC);
/* enable receiving management packets to the host. this will probably
* generate destination unreachable messages from the host OS, but
* the packets will be handled on SMBUS
*/
manc |= E1000_MANC_EN_MNG2HOST;
manc2h = er32(MANC2H);
switch (hw->mac.type) {
default:
manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
break;
case e1000_82574:
case e1000_82583:
/* Check if IPMI pass-through decision filter already exists;
* if so, enable it.
*/
for (i = 0, j = 0; i < 8; i++) {
mdef = er32(MDEF(i));
/* Ignore filters with anything other than IPMI ports */
if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
continue;
/* Enable this decision filter in MANC2H */
if (mdef)
manc2h |= BIT(i);
j |= mdef;
}
if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
break;
/* Create new decision filter in an empty filter */
for (i = 0, j = 0; i < 8; i++)
if (er32(MDEF(i)) == 0) {
ew32(MDEF(i), (E1000_MDEF_PORT_623 |
E1000_MDEF_PORT_664));
manc2h |= BIT(1);
j++;
break;
}
if (!j)
e_warn("Unable to create IPMI pass-through filter\n");
break;
}
ew32(MANC2H, manc2h);
ew32(MANC, manc);
}
/**
* e1000_configure_tx - Configure Transmit Unit after Reset
* @adapter: board private structure
*
* Configure the Tx unit of the MAC after a reset.
**/
static void e1000_configure_tx(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_ring *tx_ring = adapter->tx_ring;
u64 tdba;
u32 tdlen, tctl, tarc;
/* Setup the HW Tx Head and Tail descriptor pointers */
tdba = tx_ring->dma;
tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
ew32(TDBAH(0), (tdba >> 32));
ew32(TDLEN(0), tdlen);
ew32(TDH(0), 0);
ew32(TDT(0), 0);
tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
writel(0, tx_ring->head);
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_tdt_wa(tx_ring, 0);
else
writel(0, tx_ring->tail);
/* Set the Tx Interrupt Delay register */
ew32(TIDV, adapter->tx_int_delay);
/* Tx irq moderation */
ew32(TADV, adapter->tx_abs_int_delay);
if (adapter->flags2 & FLAG2_DMA_BURST) {
u32 txdctl = er32(TXDCTL(0));
txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
E1000_TXDCTL_WTHRESH);
/* set up some performance related parameters to encourage the
* hardware to use the bus more efficiently in bursts, depends
* on the tx_int_delay to be enabled,
* wthresh = 1 ==> burst write is disabled to avoid Tx stalls
* hthresh = 1 ==> prefetch when one or more available
* pthresh = 0x1f ==> prefetch if internal cache 31 or less
* BEWARE: this seems to work but should be considered first if
* there are Tx hangs or other Tx related bugs
*/
txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
ew32(TXDCTL(0), txdctl);
}
/* erratum work around: set txdctl the same for both queues */
ew32(TXDCTL(1), er32(TXDCTL(0)));
/* Program the Transmit Control Register */
tctl = er32(TCTL);
tctl &= ~E1000_TCTL_CT;
tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
tarc = er32(TARC(0));
/* set the speed mode bit, we'll clear it if we're not at
* gigabit link later
*/
#define SPEED_MODE_BIT BIT(21)
tarc |= SPEED_MODE_BIT;
ew32(TARC(0), tarc);
}
/* errata: program both queues to unweighted RR */
if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
tarc = er32(TARC(0));
tarc |= 1;
ew32(TARC(0), tarc);
tarc = er32(TARC(1));
tarc |= 1;
ew32(TARC(1), tarc);
}
/* Setup Transmit Descriptor Settings for eop descriptor */
adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
/* only set IDE if we are delaying interrupts using the timers */
if (adapter->tx_int_delay)
adapter->txd_cmd |= E1000_TXD_CMD_IDE;
/* enable Report Status bit */
adapter->txd_cmd |= E1000_TXD_CMD_RS;
ew32(TCTL, tctl);
hw->mac.ops.config_collision_dist(hw);
/* SPT and KBL Si errata workaround to avoid data corruption */
if (hw->mac.type == e1000_pch_spt) {
u32 reg_val;
reg_val = er32(IOSFPC);
reg_val |= E1000_RCTL_RDMTS_HEX;
ew32(IOSFPC, reg_val);
reg_val = er32(TARC(0));
/* SPT and KBL Si errata workaround to avoid Tx hang.
* Dropping the number of outstanding requests from
* 3 to 2 in order to avoid a buffer overrun.
*/
reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ;
reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ;
ew32(TARC(0), reg_val);
}
}
/**
* e1000_setup_rctl - configure the receive control registers
* @adapter: Board private structure
**/
#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
(((S) & (PAGE_SIZE - 1)) ? 1 : 0))
static void e1000_setup_rctl(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 rctl, rfctl;
u32 pages = 0;
/* Workaround Si errata on PCHx - configure jumbo frame flow.
* If jumbo frames not set, program related MAC/PHY registers
* to h/w defaults
*/
if (hw->mac.type >= e1000_pch2lan) {
s32 ret_val;
if (adapter->netdev->mtu > ETH_DATA_LEN)
ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
else
ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
if (ret_val)
e_dbg("failed to enable|disable jumbo frame workaround mode\n");
}
/* Program MC offset vector base */
rctl = er32(RCTL);
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
/* Do not Store bad packets */
rctl &= ~E1000_RCTL_SBP;
/* Enable Long Packet receive */
if (adapter->netdev->mtu <= ETH_DATA_LEN)
rctl &= ~E1000_RCTL_LPE;
else
rctl |= E1000_RCTL_LPE;
/* Some systems expect that the CRC is included in SMBUS traffic. The
* hardware strips the CRC before sending to both SMBUS (BMC) and to
* host memory when this is enabled
*/
if (adapter->flags2 & FLAG2_CRC_STRIPPING)
rctl |= E1000_RCTL_SECRC;
/* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
u16 phy_data;
e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
phy_data &= 0xfff8;
phy_data |= BIT(2);
e1e_wphy(hw, PHY_REG(770, 26), phy_data);
e1e_rphy(hw, 22, &phy_data);
phy_data &= 0x0fff;
phy_data |= BIT(14);
e1e_wphy(hw, 0x10, 0x2823);
e1e_wphy(hw, 0x11, 0x0003);
e1e_wphy(hw, 22, phy_data);
}
/* Setup buffer sizes */
rctl &= ~E1000_RCTL_SZ_4096;
rctl |= E1000_RCTL_BSEX;
switch (adapter->rx_buffer_len) {
case 2048:
default:
rctl |= E1000_RCTL_SZ_2048;
rctl &= ~E1000_RCTL_BSEX;
break;
case 4096:
rctl |= E1000_RCTL_SZ_4096;
break;
case 8192:
rctl |= E1000_RCTL_SZ_8192;
break;
case 16384:
rctl |= E1000_RCTL_SZ_16384;
break;
}
/* Enable Extended Status in all Receive Descriptors */
rfctl = er32(RFCTL);
rfctl |= E1000_RFCTL_EXTEN;
ew32(RFCTL, rfctl);
/* 82571 and greater support packet-split where the protocol
* header is placed in skb->data and the packet data is
* placed in pages hanging off of skb_shinfo(skb)->nr_frags.
* In the case of a non-split, skb->data is linearly filled,
* followed by the page buffers. Therefore, skb->data is
* sized to hold the largest protocol header.
*
* allocations using alloc_page take too long for regular MTU
* so only enable packet split for jumbo frames
*
* Using pages when the page size is greater than 16k wastes
* a lot of memory, since we allocate 3 pages at all times
* per packet.
*/
pages = PAGE_USE_COUNT(adapter->netdev->mtu);
if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
adapter->rx_ps_pages = pages;
else
adapter->rx_ps_pages = 0;
if (adapter->rx_ps_pages) {
u32 psrctl = 0;
/* Enable Packet split descriptors */
rctl |= E1000_RCTL_DTYP_PS;
psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT;
switch (adapter->rx_ps_pages) {
case 3:
psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT;
/* fall-through */
case 2:
psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT;
/* fall-through */
case 1:
psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT;
break;
}
ew32(PSRCTL, psrctl);
}
/* This is useful for sniffing bad packets. */
if (adapter->netdev->features & NETIF_F_RXALL) {
/* UPE and MPE will be handled by normal PROMISC logic
* in e1000e_set_rx_mode
*/
rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
E1000_RCTL_BAM | /* RX All Bcast Pkts */
E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
E1000_RCTL_DPF | /* Allow filtered pause */
E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
* and that breaks VLANs.
*/
}
ew32(RCTL, rctl);
/* just started the receive unit, no need to restart */
adapter->flags &= ~FLAG_RESTART_NOW;
}
/**
* e1000_configure_rx - Configure Receive Unit after Reset
* @adapter: board private structure
*
* Configure the Rx unit of the MAC after a reset.
**/
static void e1000_configure_rx(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_ring *rx_ring = adapter->rx_ring;
u64 rdba;
u32 rdlen, rctl, rxcsum, ctrl_ext;
if (adapter->rx_ps_pages) {
/* this is a 32 byte descriptor */
rdlen = rx_ring->count *
sizeof(union e1000_rx_desc_packet_split);
adapter->clean_rx = e1000_clean_rx_irq_ps;
adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
} else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
adapter->clean_rx = e1000_clean_jumbo_rx_irq;
adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
} else {
rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
adapter->clean_rx = e1000_clean_rx_irq;
adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
}
/* disable receives while setting up the descriptors */
rctl = er32(RCTL);
if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
ew32(RCTL, rctl & ~E1000_RCTL_EN);
e1e_flush();
usleep_range(10000, 20000);
if (adapter->flags2 & FLAG2_DMA_BURST) {
/* set the writeback threshold (only takes effect if the RDTR
* is set). set GRAN=1 and write back up to 0x4 worth, and
* enable prefetching of 0x20 Rx descriptors
* granularity = 01
* wthresh = 04,
* hthresh = 04,
* pthresh = 0x20
*/
ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
}
/* set the Receive Delay Timer Register */
ew32(RDTR, adapter->rx_int_delay);
/* irq moderation */
ew32(RADV, adapter->rx_abs_int_delay);
if ((adapter->itr_setting != 0) && (adapter->itr != 0))
e1000e_write_itr(adapter, adapter->itr);
ctrl_ext = er32(CTRL_EXT);
/* Auto-Mask interrupts upon ICR access */
ctrl_ext |= E1000_CTRL_EXT_IAME;
ew32(IAM, 0xffffffff);
ew32(CTRL_EXT, ctrl_ext);
e1e_flush();
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring
*/
rdba = rx_ring->dma;
ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
ew32(RDBAH(0), (rdba >> 32));
ew32(RDLEN(0), rdlen);
ew32(RDH(0), 0);
ew32(RDT(0), 0);
rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
writel(0, rx_ring->head);
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_rdt_wa(rx_ring, 0);
else
writel(0, rx_ring->tail);
/* Enable Receive Checksum Offload for TCP and UDP */
rxcsum = er32(RXCSUM);
if (adapter->netdev->features & NETIF_F_RXCSUM)
rxcsum |= E1000_RXCSUM_TUOFL;
else
rxcsum &= ~E1000_RXCSUM_TUOFL;
ew32(RXCSUM, rxcsum);
/* With jumbo frames, excessive C-state transition latencies result
* in dropped transactions.
*/
if (adapter->netdev->mtu > ETH_DATA_LEN) {
u32 lat =
((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 -
adapter->max_frame_size) * 8 / 1000;
if (adapter->flags & FLAG_IS_ICH) {
u32 rxdctl = er32(RXDCTL(0));
ew32(RXDCTL(0), rxdctl | 0x3 | BIT(8));
}
dev_info(&adapter->pdev->dev,
"Some CPU C-states have been disabled in order to enable jumbo frames\n");
pm_qos_update_request(&adapter->pm_qos_req, lat);
} else {
pm_qos_update_request(&adapter->pm_qos_req,
PM_QOS_DEFAULT_VALUE);
}
/* Enable Receives */
ew32(RCTL, rctl);
}
/**
* e1000e_write_mc_addr_list - write multicast addresses to MTA
* @netdev: network interface device structure
*
* Writes multicast address list to the MTA hash table.
* Returns: -ENOMEM on failure
* 0 on no addresses written
* X on writing X addresses to MTA
*/
static int e1000e_write_mc_addr_list(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
struct netdev_hw_addr *ha;
u8 *mta_list;
int i;
if (netdev_mc_empty(netdev)) {
/* nothing to program, so clear mc list */
hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
return 0;
}
mta_list = kcalloc(netdev_mc_count(netdev), ETH_ALEN, GFP_ATOMIC);
if (!mta_list)
return -ENOMEM;
/* update_mc_addr_list expects a packed array of only addresses. */
i = 0;
netdev_for_each_mc_addr(ha, netdev)
memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
return netdev_mc_count(netdev);
}
/**
* e1000e_write_uc_addr_list - write unicast addresses to RAR table
* @netdev: network interface device structure
*
* Writes unicast address list to the RAR table.
* Returns: -ENOMEM on failure/insufficient address space
* 0 on no addresses written
* X on writing X addresses to the RAR table
**/
static int e1000e_write_uc_addr_list(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
unsigned int rar_entries;
int count = 0;
rar_entries = hw->mac.ops.rar_get_count(hw);
/* save a rar entry for our hardware address */
rar_entries--;
/* save a rar entry for the LAA workaround */
if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
rar_entries--;
/* return ENOMEM indicating insufficient memory for addresses */
if (netdev_uc_count(netdev) > rar_entries)
return -ENOMEM;
if (!netdev_uc_empty(netdev) && rar_entries) {
struct netdev_hw_addr *ha;
/* write the addresses in reverse order to avoid write
* combining
*/
netdev_for_each_uc_addr(ha, netdev) {
int ret_val;
if (!rar_entries)
break;
ret_val = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
if (ret_val < 0)
return -ENOMEM;
count++;
}
}
/* zero out the remaining RAR entries not used above */
for (; rar_entries > 0; rar_entries--) {
ew32(RAH(rar_entries), 0);
ew32(RAL(rar_entries), 0);
}
e1e_flush();
return count;
}
/**
* e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
* The ndo_set_rx_mode entry point is called whenever the unicast or multicast
* address list or the network interface flags are updated. This routine is
* responsible for configuring the hardware for proper unicast, multicast,
* promiscuous mode, and all-multi behavior.
**/
static void e1000e_set_rx_mode(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 rctl;
if (pm_runtime_suspended(netdev->dev.parent))
return;
/* Check for Promiscuous and All Multicast modes */
rctl = er32(RCTL);
/* clear the affected bits */
rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
if (netdev->flags & IFF_PROMISC) {
rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
/* Do not hardware filter VLANs in promisc mode */
e1000e_vlan_filter_disable(adapter);
} else {
int count;
if (netdev->flags & IFF_ALLMULTI) {
rctl |= E1000_RCTL_MPE;
} else {
/* Write addresses to the MTA, if the attempt fails
* then we should just turn on promiscuous mode so
* that we can at least receive multicast traffic
*/
count = e1000e_write_mc_addr_list(netdev);
if (count < 0)
rctl |= E1000_RCTL_MPE;
}
e1000e_vlan_filter_enable(adapter);
/* Write addresses to available RAR registers, if there is not
* sufficient space to store all the addresses then enable
* unicast promiscuous mode
*/
count = e1000e_write_uc_addr_list(netdev);
if (count < 0)
rctl |= E1000_RCTL_UPE;
}
ew32(RCTL, rctl);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
e1000e_vlan_strip_enable(adapter);
else
e1000e_vlan_strip_disable(adapter);
}
static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 mrqc, rxcsum;
u32 rss_key[10];
int i;
netdev_rss_key_fill(rss_key, sizeof(rss_key));
for (i = 0; i < 10; i++)
ew32(RSSRK(i), rss_key[i]);
/* Direct all traffic to queue 0 */
for (i = 0; i < 32; i++)
ew32(RETA(i), 0);
/* Disable raw packet checksumming so that RSS hash is placed in
* descriptor on writeback.
*/
rxcsum = er32(RXCSUM);
rxcsum |= E1000_RXCSUM_PCSD;
ew32(RXCSUM, rxcsum);
mrqc = (E1000_MRQC_RSS_FIELD_IPV4 |
E1000_MRQC_RSS_FIELD_IPV4_TCP |
E1000_MRQC_RSS_FIELD_IPV6 |
E1000_MRQC_RSS_FIELD_IPV6_TCP |
E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
ew32(MRQC, mrqc);
}
/**
* e1000e_get_base_timinca - get default SYSTIM time increment attributes
* @adapter: board private structure
* @timinca: pointer to returned time increment attributes
*
* Get attributes for incrementing the System Time Register SYSTIML/H at
* the default base frequency, and set the cyclecounter shift value.
**/
s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
{
struct e1000_hw *hw = &adapter->hw;
u32 incvalue, incperiod, shift;
/* Make sure clock is enabled on I217/I218/I219 before checking
* the frequency
*/
if ((hw->mac.type >= e1000_pch_lpt) &&
!(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) &&
!(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
u32 fextnvm7 = er32(FEXTNVM7);
if (!(fextnvm7 & BIT(0))) {
ew32(FEXTNVM7, fextnvm7 | BIT(0));
e1e_flush();
}
}
switch (hw->mac.type) {
case e1000_pch2lan:
/* Stable 96MHz frequency */
incperiod = INCPERIOD_96MHZ;
incvalue = INCVALUE_96MHZ;
shift = INCVALUE_SHIFT_96MHZ;
adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ;
break;
case e1000_pch_lpt:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 96MHz frequency */
incperiod = INCPERIOD_96MHZ;
incvalue = INCVALUE_96MHZ;
shift = INCVALUE_SHIFT_96MHZ;
adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ;
} else {
/* Stable 25MHz frequency */
incperiod = INCPERIOD_25MHZ;
incvalue = INCVALUE_25MHZ;
shift = INCVALUE_SHIFT_25MHZ;
adapter->cc.shift = shift;
}
break;
case e1000_pch_spt:
/* Stable 24MHz frequency */
incperiod = INCPERIOD_24MHZ;
incvalue = INCVALUE_24MHZ;
shift = INCVALUE_SHIFT_24MHZ;
adapter->cc.shift = shift;
break;
case e1000_pch_cnp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
incperiod = INCPERIOD_24MHZ;
incvalue = INCVALUE_24MHZ;
shift = INCVALUE_SHIFT_24MHZ;
adapter->cc.shift = shift;
} else {
/* Stable 38400KHz frequency */
incperiod = INCPERIOD_38400KHZ;
incvalue = INCVALUE_38400KHZ;
shift = INCVALUE_SHIFT_38400KHZ;
adapter->cc.shift = shift;
}
break;
case e1000_82574:
case e1000_82583:
/* Stable 25MHz frequency */
incperiod = INCPERIOD_25MHZ;
incvalue = INCVALUE_25MHZ;
shift = INCVALUE_SHIFT_25MHZ;
adapter->cc.shift = shift;
break;
default:
return -EINVAL;
}
*timinca = ((incperiod << E1000_TIMINCA_INCPERIOD_SHIFT) |
((incvalue << shift) & E1000_TIMINCA_INCVALUE_MASK));
return 0;
}
/**
* e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
* @adapter: board private structure
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't cause any overhead
* when no packet needs it. At most one packet in the queue may be
* marked for time stamping, otherwise it would be impossible to tell
* for sure to which packet the hardware time stamp belongs.
*
* Incoming time stamping has to be configured via the hardware filters.
* Not all combinations are supported, in particular event type has to be
* specified. Matching the kind of event packet is not supported, with the
* exception of "all V2 events regardless of level 2 or 4".
**/
static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
struct hwtstamp_config *config)
{
struct e1000_hw *hw = &adapter->hw;
u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
u32 rxmtrl = 0;
u16 rxudp = 0;
bool is_l4 = false;
bool is_l2 = false;
u32 regval;
if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
return -EINVAL;
/* flags reserved for future extensions - must be zero */
if (config->flags)
return -EINVAL;
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
break;
case HWTSTAMP_TX_ON:
break;
default:
return -ERANGE;
}
switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
rxmtrl = E1000_RXMTRL_PTP_V1_SYNC_MESSAGE;
is_l4 = true;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
rxmtrl = E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE;
is_l4 = true;
break;
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
/* Also time stamps V2 L2 Path Delay Request/Response */
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
is_l2 = true;
break;
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
/* Also time stamps V2 L2 Path Delay Request/Response. */
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
is_l2 = true;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
/* Hardware cannot filter just V2 L4 Sync messages;
* fall-through to V2 (both L2 and L4) Sync.
*/
case HWTSTAMP_FILTER_PTP_V2_SYNC:
/* Also time stamps V2 Path Delay Request/Response. */
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
is_l2 = true;
is_l4 = true;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
/* Hardware cannot filter just V2 L4 Delay Request messages;
* fall-through to V2 (both L2 and L4) Delay Request.
*/
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
/* Also time stamps V2 Path Delay Request/Response. */
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
is_l2 = true;
is_l4 = true;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
/* Hardware cannot filter just V2 L4 or L2 Event messages;
* fall-through to all V2 (both L2 and L4) Events.
*/
case HWTSTAMP_FILTER_PTP_V2_EVENT:
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
is_l2 = true;
is_l4 = true;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
/* For V1, the hardware can only filter Sync messages or
* Delay Request messages but not both so fall-through to
* time stamp all packets.
*/
case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
is_l2 = true;
is_l4 = true;
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
adapter->hwtstamp_config = *config;
/* enable/disable Tx h/w time stamping */
regval = er32(TSYNCTXCTL);
regval &= ~E1000_TSYNCTXCTL_ENABLED;
regval |= tsync_tx_ctl;
ew32(TSYNCTXCTL, regval);
if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) !=
(regval & E1000_TSYNCTXCTL_ENABLED)) {
e_err("Timesync Tx Control register not set as expected\n");
return -EAGAIN;
}
/* enable/disable Rx h/w time stamping */
regval = er32(TSYNCRXCTL);
regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
regval |= tsync_rx_ctl;
ew32(TSYNCRXCTL, regval);
if ((er32(TSYNCRXCTL) & (E1000_TSYNCRXCTL_ENABLED |
E1000_TSYNCRXCTL_TYPE_MASK)) !=
(regval & (E1000_TSYNCRXCTL_ENABLED |
E1000_TSYNCRXCTL_TYPE_MASK))) {
e_err("Timesync Rx Control register not set as expected\n");
return -EAGAIN;
}
/* L2: define ethertype filter for time stamped packets */
if (is_l2)
rxmtrl |= ETH_P_1588;
/* define which PTP packets get time stamped */
ew32(RXMTRL, rxmtrl);
/* Filter by destination port */
if (is_l4) {
rxudp = PTP_EV_PORT;
cpu_to_be16s(&rxudp);
}
ew32(RXUDP, rxudp);
e1e_flush();
/* Clear TSYNCRXCTL_VALID & TSYNCTXCTL_VALID bit */
er32(RXSTMPH);
er32(TXSTMPH);
return 0;
}
/**
* e1000_configure - configure the hardware for Rx and Tx
* @adapter: private board structure
**/
static void e1000_configure(struct e1000_adapter *adapter)
{
struct e1000_ring *rx_ring = adapter->rx_ring;
e1000e_set_rx_mode(adapter->netdev);
e1000_restore_vlan(adapter);
e1000_init_manageability_pt(adapter);
e1000_configure_tx(adapter);
if (adapter->netdev->features & NETIF_F_RXHASH)
e1000e_setup_rss_hash(adapter);
e1000_setup_rctl(adapter);
e1000_configure_rx(adapter);
adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL);
}
/**
* e1000e_power_up_phy - restore link in case the phy was powered down
* @adapter: address of board private structure
*
* The phy may be powered down to save power and turn off link when the
* driver is unloaded and wake on lan is not enabled (among others)
* *** this routine MUST be followed by a call to e1000e_reset ***
**/
void e1000e_power_up_phy(struct e1000_adapter *adapter)
{
if (adapter->hw.phy.ops.power_up)
adapter->hw.phy.ops.power_up(&adapter->hw);
adapter->hw.mac.ops.setup_link(&adapter->hw);
}
/**
* e1000_power_down_phy - Power down the PHY
*
* Power down the PHY so no link is implied when interface is down.
* The PHY cannot be powered down if management or WoL is active.
*/
static void e1000_power_down_phy(struct e1000_adapter *adapter)
{
if (adapter->hw.phy.ops.power_down)
adapter->hw.phy.ops.power_down(&adapter->hw);
}
/**
* e1000_flush_tx_ring - remove all descriptors from the tx_ring
*
* We want to clear all pending descriptors from the TX ring.
* zeroing happens when the HW reads the regs. We assign the ring itself as
* the data of the next descriptor. We don't care about the data we are about
* to reset the HW.
*/
static void e1000_flush_tx_ring(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_ring *tx_ring = adapter->tx_ring;
struct e1000_tx_desc *tx_desc = NULL;
u32 tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS;
u16 size = 512;
tctl = er32(TCTL);
ew32(TCTL, tctl | E1000_TCTL_EN);
tdt = er32(TDT(0));
BUG_ON(tdt != tx_ring->next_to_use);
tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use);
tx_desc->buffer_addr = tx_ring->dma;
tx_desc->lower.data = cpu_to_le32(txd_lower | size);
tx_desc->upper.data = 0;
/* flush descriptors to memory before notifying the HW */
wmb();
tx_ring->next_to_use++;
if (tx_ring->next_to_use == tx_ring->count)
tx_ring->next_to_use = 0;
ew32(TDT(0), tx_ring->next_to_use);
usleep_range(200, 250);
}
/**
* e1000_flush_rx_ring - remove all descriptors from the rx_ring
*
* Mark all descriptors in the RX ring as consumed and disable the rx ring
*/
static void e1000_flush_rx_ring(struct e1000_adapter *adapter)
{
u32 rctl, rxdctl;
struct e1000_hw *hw = &adapter->hw;
rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
e1e_flush();
usleep_range(100, 150);
rxdctl = er32(RXDCTL(0));
/* zero the lower 14 bits (prefetch and host thresholds) */
rxdctl &= 0xffffc000;
/* update thresholds: prefetch threshold to 31, host threshold to 1
* and make sure the granularity is "descriptors" and not "cache lines"
*/
rxdctl |= (0x1F | BIT(8) | E1000_RXDCTL_THRESH_UNIT_DESC);
ew32(RXDCTL(0), rxdctl);
/* momentarily enable the RX ring for the changes to take effect */
ew32(RCTL, rctl | E1000_RCTL_EN);
e1e_flush();
usleep_range(100, 150);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
}
/**
* e1000_flush_desc_rings - remove all descriptors from the descriptor rings
*
* In i219, the descriptor rings must be emptied before resetting the HW
* or before changing the device state to D3 during runtime (runtime PM).
*
* Failure to do this will cause the HW to enter a unit hang state which can
* only be released by PCI reset on the device
*
*/
static void e1000_flush_desc_rings(struct e1000_adapter *adapter)
{
u16 hang_state;
u32 fext_nvm11, tdlen;
struct e1000_hw *hw = &adapter->hw;
/* First, disable MULR fix in FEXTNVM11 */
fext_nvm11 = er32