mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-08 07:21:27 +00:00
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/jkirsher/net-next-2.6
This commit is contained in:
commit
290f5e3b59
11 changed files with 174 additions and 80 deletions
|
@ -770,4 +770,11 @@
|
||||||
#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based
|
#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based
|
||||||
on DMA coal */
|
on DMA coal */
|
||||||
|
|
||||||
|
/* Tx Rate-Scheduler Config fields */
|
||||||
|
#define E1000_RTTBCNRC_RS_ENA 0x80000000
|
||||||
|
#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF
|
||||||
|
#define E1000_RTTBCNRC_RF_INT_SHIFT 14
|
||||||
|
#define E1000_RTTBCNRC_RF_INT_MASK \
|
||||||
|
(E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -106,6 +106,10 @@
|
||||||
|
|
||||||
#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
|
#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
|
||||||
|
|
||||||
|
/* TX Rate Limit Registers */
|
||||||
|
#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
|
||||||
|
#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
|
||||||
|
|
||||||
/* Split and Replication RX Control - RW */
|
/* Split and Replication RX Control - RW */
|
||||||
#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
|
#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -77,6 +77,7 @@ struct vf_data_storage {
|
||||||
unsigned long last_nack;
|
unsigned long last_nack;
|
||||||
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
|
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
|
||||||
u16 pf_qos;
|
u16 pf_qos;
|
||||||
|
u16 tx_rate;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
|
#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
|
||||||
|
@ -323,6 +324,7 @@ struct igb_adapter {
|
||||||
u16 rx_ring_count;
|
u16 rx_ring_count;
|
||||||
unsigned int vfs_allocated_count;
|
unsigned int vfs_allocated_count;
|
||||||
struct vf_data_storage *vf_data;
|
struct vf_data_storage *vf_data;
|
||||||
|
int vf_rate_link_speed;
|
||||||
u32 rss_queues;
|
u32 rss_queues;
|
||||||
u32 wvbr;
|
u32 wvbr;
|
||||||
};
|
};
|
||||||
|
|
|
@ -50,12 +50,12 @@
|
||||||
#endif
|
#endif
|
||||||
#include "igb.h"
|
#include "igb.h"
|
||||||
|
|
||||||
#define DRV_VERSION "2.1.0-k2"
|
#define DRV_VERSION "2.4.13-k2"
|
||||||
char igb_driver_name[] = "igb";
|
char igb_driver_name[] = "igb";
|
||||||
char igb_driver_version[] = DRV_VERSION;
|
char igb_driver_version[] = DRV_VERSION;
|
||||||
static const char igb_driver_string[] =
|
static const char igb_driver_string[] =
|
||||||
"Intel(R) Gigabit Ethernet Network Driver";
|
"Intel(R) Gigabit Ethernet Network Driver";
|
||||||
static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
|
static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
|
||||||
|
|
||||||
static const struct e1000_info *igb_info_tbl[] = {
|
static const struct e1000_info *igb_info_tbl[] = {
|
||||||
[board_82575] = &e1000_82575_info,
|
[board_82575] = &e1000_82575_info,
|
||||||
|
@ -150,6 +150,7 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev,
|
||||||
static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
|
static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
|
||||||
static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
|
static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
|
||||||
struct ifla_vf_info *ivi);
|
struct ifla_vf_info *ivi);
|
||||||
|
static void igb_check_vf_rate_limit(struct igb_adapter *);
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
#ifdef CONFIG_PM
|
||||||
static int igb_suspend(struct pci_dev *, pm_message_t);
|
static int igb_suspend(struct pci_dev *, pm_message_t);
|
||||||
|
@ -3511,6 +3512,7 @@ static void igb_watchdog_task(struct work_struct *work)
|
||||||
netif_carrier_on(netdev);
|
netif_carrier_on(netdev);
|
||||||
|
|
||||||
igb_ping_all_vfs(adapter);
|
igb_ping_all_vfs(adapter);
|
||||||
|
igb_check_vf_rate_limit(adapter);
|
||||||
|
|
||||||
/* link state has changed, schedule phy info update */
|
/* link state has changed, schedule phy info update */
|
||||||
if (!test_bit(__IGB_DOWN, &adapter->state))
|
if (!test_bit(__IGB_DOWN, &adapter->state))
|
||||||
|
@ -6599,9 +6601,91 @@ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
|
||||||
return igb_set_vf_mac(adapter, vf, mac);
|
return igb_set_vf_mac(adapter, vf, mac);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int igb_link_mbps(int internal_link_speed)
|
||||||
|
{
|
||||||
|
switch (internal_link_speed) {
|
||||||
|
case SPEED_100:
|
||||||
|
return 100;
|
||||||
|
case SPEED_1000:
|
||||||
|
return 1000;
|
||||||
|
default:
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
|
||||||
|
int link_speed)
|
||||||
|
{
|
||||||
|
int rf_dec, rf_int;
|
||||||
|
u32 bcnrc_val;
|
||||||
|
|
||||||
|
if (tx_rate != 0) {
|
||||||
|
/* Calculate the rate factor values to set */
|
||||||
|
rf_int = link_speed / tx_rate;
|
||||||
|
rf_dec = (link_speed - (rf_int * tx_rate));
|
||||||
|
rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
|
||||||
|
|
||||||
|
bcnrc_val = E1000_RTTBCNRC_RS_ENA;
|
||||||
|
bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
|
||||||
|
E1000_RTTBCNRC_RF_INT_MASK);
|
||||||
|
bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
|
||||||
|
} else {
|
||||||
|
bcnrc_val = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
|
||||||
|
wr32(E1000_RTTBCNRC, bcnrc_val);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
|
||||||
|
{
|
||||||
|
int actual_link_speed, i;
|
||||||
|
bool reset_rate = false;
|
||||||
|
|
||||||
|
/* VF TX rate limit was not set or not supported */
|
||||||
|
if ((adapter->vf_rate_link_speed == 0) ||
|
||||||
|
(adapter->hw.mac.type != e1000_82576))
|
||||||
|
return;
|
||||||
|
|
||||||
|
actual_link_speed = igb_link_mbps(adapter->link_speed);
|
||||||
|
if (actual_link_speed != adapter->vf_rate_link_speed) {
|
||||||
|
reset_rate = true;
|
||||||
|
adapter->vf_rate_link_speed = 0;
|
||||||
|
dev_info(&adapter->pdev->dev,
|
||||||
|
"Link speed has been changed. VF Transmit "
|
||||||
|
"rate is disabled\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < adapter->vfs_allocated_count; i++) {
|
||||||
|
if (reset_rate)
|
||||||
|
adapter->vf_data[i].tx_rate = 0;
|
||||||
|
|
||||||
|
igb_set_vf_rate_limit(&adapter->hw, i,
|
||||||
|
adapter->vf_data[i].tx_rate,
|
||||||
|
actual_link_speed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
|
static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
|
||||||
{
|
{
|
||||||
|
struct igb_adapter *adapter = netdev_priv(netdev);
|
||||||
|
struct e1000_hw *hw = &adapter->hw;
|
||||||
|
int actual_link_speed;
|
||||||
|
|
||||||
|
if (hw->mac.type != e1000_82576)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
actual_link_speed = igb_link_mbps(adapter->link_speed);
|
||||||
|
if ((vf >= adapter->vfs_allocated_count) ||
|
||||||
|
(!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
|
||||||
|
(tx_rate < 0) || (tx_rate > actual_link_speed))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
adapter->vf_rate_link_speed = actual_link_speed;
|
||||||
|
adapter->vf_data[vf].tx_rate = (u16)tx_rate;
|
||||||
|
igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int igb_ndo_get_vf_config(struct net_device *netdev,
|
static int igb_ndo_get_vf_config(struct net_device *netdev,
|
||||||
|
@ -6612,7 +6696,7 @@ static int igb_ndo_get_vf_config(struct net_device *netdev,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
ivi->vf = vf;
|
ivi->vf = vf;
|
||||||
memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
|
memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
|
||||||
ivi->tx_rate = 0;
|
ivi->tx_rate = adapter->vf_data[vf].tx_rate;
|
||||||
ivi->vlan = adapter->vf_data[vf].pf_vlan;
|
ivi->vlan = adapter->vf_data[vf].pf_vlan;
|
||||||
ivi->qos = adapter->vf_data[vf].pf_qos;
|
ivi->qos = adapter->vf_data[vf].pf_qos;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -201,9 +201,6 @@ struct igbvf_adapter {
|
||||||
unsigned int restart_queue;
|
unsigned int restart_queue;
|
||||||
u32 txd_cmd;
|
u32 txd_cmd;
|
||||||
|
|
||||||
bool detect_tx_hung;
|
|
||||||
u8 tx_timeout_factor;
|
|
||||||
|
|
||||||
u32 tx_int_delay;
|
u32 tx_int_delay;
|
||||||
u32 tx_abs_int_delay;
|
u32 tx_abs_int_delay;
|
||||||
|
|
||||||
|
|
|
@ -396,35 +396,6 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
|
||||||
buffer_info->time_stamp = 0;
|
buffer_info->time_stamp = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void igbvf_print_tx_hang(struct igbvf_adapter *adapter)
|
|
||||||
{
|
|
||||||
struct igbvf_ring *tx_ring = adapter->tx_ring;
|
|
||||||
unsigned int i = tx_ring->next_to_clean;
|
|
||||||
unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
|
|
||||||
union e1000_adv_tx_desc *eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
|
|
||||||
|
|
||||||
/* detected Tx unit hang */
|
|
||||||
dev_err(&adapter->pdev->dev,
|
|
||||||
"Detected Tx Unit Hang:\n"
|
|
||||||
" TDH <%x>\n"
|
|
||||||
" TDT <%x>\n"
|
|
||||||
" next_to_use <%x>\n"
|
|
||||||
" next_to_clean <%x>\n"
|
|
||||||
"buffer_info[next_to_clean]:\n"
|
|
||||||
" time_stamp <%lx>\n"
|
|
||||||
" next_to_watch <%x>\n"
|
|
||||||
" jiffies <%lx>\n"
|
|
||||||
" next_to_watch.status <%x>\n",
|
|
||||||
readl(adapter->hw.hw_addr + tx_ring->head),
|
|
||||||
readl(adapter->hw.hw_addr + tx_ring->tail),
|
|
||||||
tx_ring->next_to_use,
|
|
||||||
tx_ring->next_to_clean,
|
|
||||||
tx_ring->buffer_info[eop].time_stamp,
|
|
||||||
eop,
|
|
||||||
jiffies,
|
|
||||||
eop_desc->wb.status);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
|
* igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
|
||||||
* @adapter: board private structure
|
* @adapter: board private structure
|
||||||
|
@ -771,7 +742,6 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
|
||||||
static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
|
static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
|
||||||
{
|
{
|
||||||
struct igbvf_adapter *adapter = tx_ring->adapter;
|
struct igbvf_adapter *adapter = tx_ring->adapter;
|
||||||
struct e1000_hw *hw = &adapter->hw;
|
|
||||||
struct net_device *netdev = adapter->netdev;
|
struct net_device *netdev = adapter->netdev;
|
||||||
struct igbvf_buffer *buffer_info;
|
struct igbvf_buffer *buffer_info;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
@ -832,22 +802,6 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (adapter->detect_tx_hung) {
|
|
||||||
/* Detect a transmit hang in hardware, this serializes the
|
|
||||||
* check with the clearing of time_stamp and movement of i */
|
|
||||||
adapter->detect_tx_hung = false;
|
|
||||||
if (tx_ring->buffer_info[i].time_stamp &&
|
|
||||||
time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
|
|
||||||
(adapter->tx_timeout_factor * HZ)) &&
|
|
||||||
!(er32(STATUS) & E1000_STATUS_TXOFF)) {
|
|
||||||
|
|
||||||
tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
|
|
||||||
/* detected Tx unit hang */
|
|
||||||
igbvf_print_tx_hang(adapter);
|
|
||||||
|
|
||||||
netif_stop_queue(netdev);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
adapter->net_stats.tx_bytes += total_bytes;
|
adapter->net_stats.tx_bytes += total_bytes;
|
||||||
adapter->net_stats.tx_packets += total_packets;
|
adapter->net_stats.tx_packets += total_packets;
|
||||||
return count < tx_ring->count;
|
return count < tx_ring->count;
|
||||||
|
@ -1863,17 +1817,6 @@ static void igbvf_watchdog_task(struct work_struct *work)
|
||||||
&adapter->link_duplex);
|
&adapter->link_duplex);
|
||||||
igbvf_print_link_info(adapter);
|
igbvf_print_link_info(adapter);
|
||||||
|
|
||||||
/* adjust timeout factor according to speed/duplex */
|
|
||||||
adapter->tx_timeout_factor = 1;
|
|
||||||
switch (adapter->link_speed) {
|
|
||||||
case SPEED_10:
|
|
||||||
adapter->tx_timeout_factor = 16;
|
|
||||||
break;
|
|
||||||
case SPEED_100:
|
|
||||||
/* maybe add some timeout factor ? */
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
netif_carrier_on(netdev);
|
netif_carrier_on(netdev);
|
||||||
netif_wake_queue(netdev);
|
netif_wake_queue(netdev);
|
||||||
}
|
}
|
||||||
|
@ -1907,9 +1850,6 @@ static void igbvf_watchdog_task(struct work_struct *work)
|
||||||
/* Cause software interrupt to ensure Rx ring is cleaned */
|
/* Cause software interrupt to ensure Rx ring is cleaned */
|
||||||
ew32(EICS, adapter->rx_ring->eims_value);
|
ew32(EICS, adapter->rx_ring->eims_value);
|
||||||
|
|
||||||
/* Force detection of hung controller every watchdog period */
|
|
||||||
adapter->detect_tx_hung = 1;
|
|
||||||
|
|
||||||
/* Reset the timer */
|
/* Reset the timer */
|
||||||
if (!test_bit(__IGBVF_DOWN, &adapter->state))
|
if (!test_bit(__IGBVF_DOWN, &adapter->state))
|
||||||
mod_timer(&adapter->watchdog_timer,
|
mod_timer(&adapter->watchdog_timer,
|
||||||
|
|
|
@ -3077,6 +3077,14 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
|
||||||
ixgbe_configure_srrctl(adapter, ring);
|
ixgbe_configure_srrctl(adapter, ring);
|
||||||
ixgbe_configure_rscctl(adapter, ring);
|
ixgbe_configure_rscctl(adapter, ring);
|
||||||
|
|
||||||
|
/* If operating in IOV mode set RLPML for X540 */
|
||||||
|
if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
|
||||||
|
hw->mac.type == ixgbe_mac_X540) {
|
||||||
|
rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
|
||||||
|
rxdctl |= ((ring->netdev->mtu + ETH_HLEN +
|
||||||
|
ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN);
|
||||||
|
}
|
||||||
|
|
||||||
if (hw->mac.type == ixgbe_mac_82598EB) {
|
if (hw->mac.type == ixgbe_mac_82598EB) {
|
||||||
/*
|
/*
|
||||||
* enable cache line friendly hardware writes:
|
* enable cache line friendly hardware writes:
|
||||||
|
@ -5441,8 +5449,14 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
|
||||||
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
|
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
|
||||||
|
|
||||||
/* MTU < 68 is an error and causes problems on some kernels */
|
/* MTU < 68 is an error and causes problems on some kernels */
|
||||||
|
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED &&
|
||||||
|
hw->mac.type != ixgbe_mac_X540) {
|
||||||
|
if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
|
||||||
|
return -EINVAL;
|
||||||
|
} else {
|
||||||
if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
|
if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
|
e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
|
||||||
/* must set new MTU before calling down or up */
|
/* must set new MTU before calling down or up */
|
||||||
|
|
|
@ -110,6 +110,33 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
|
||||||
return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
|
return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
|
||||||
|
{
|
||||||
|
struct ixgbe_hw *hw = &adapter->hw;
|
||||||
|
int new_mtu = msgbuf[1];
|
||||||
|
u32 max_frs;
|
||||||
|
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
|
||||||
|
|
||||||
|
/* Only X540 supports jumbo frames in IOV mode */
|
||||||
|
if (adapter->hw.mac.type != ixgbe_mac_X540)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* MTU < 68 is an error and causes problems on some kernels */
|
||||||
|
if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) {
|
||||||
|
e_err(drv, "VF mtu %d out of range\n", new_mtu);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
|
||||||
|
IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
|
||||||
|
if (max_frs < new_mtu) {
|
||||||
|
max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
|
||||||
|
}
|
||||||
|
|
||||||
|
e_info(hw, "VF requests change max MTU to %d\n", new_mtu);
|
||||||
|
}
|
||||||
|
|
||||||
static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
|
static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
|
||||||
{
|
{
|
||||||
u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
|
u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
|
||||||
|
@ -302,7 +329,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
|
||||||
hash_list, vf);
|
hash_list, vf);
|
||||||
break;
|
break;
|
||||||
case IXGBE_VF_SET_LPE:
|
case IXGBE_VF_SET_LPE:
|
||||||
WARN_ON((msgbuf[0] & 0xFFFF) == IXGBE_VF_SET_LPE);
|
ixgbe_set_vf_lpe(adapter, msgbuf);
|
||||||
break;
|
break;
|
||||||
case IXGBE_VF_SET_VLAN:
|
case IXGBE_VF_SET_VLAN:
|
||||||
add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
|
add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
|
||||||
|
|
|
@ -1680,6 +1680,8 @@
|
||||||
#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
|
#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
|
||||||
#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
|
#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
|
||||||
#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
|
#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
|
||||||
|
#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */
|
||||||
|
#define IXGBE_RXDCTL_RLPML_EN 0x00008000
|
||||||
|
|
||||||
#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
|
#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
|
||||||
#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
|
#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
|
||||||
|
|
|
@ -65,6 +65,8 @@ typedef u32 ixgbe_link_speed;
|
||||||
#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
|
#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
|
||||||
#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
|
#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
|
||||||
#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
|
#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
|
||||||
|
#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */
|
||||||
|
#define IXGBE_RXDCTL_RLPML_EN 0x00008000
|
||||||
|
|
||||||
/* DCA Control */
|
/* DCA Control */
|
||||||
#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
|
#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
|
||||||
|
|
|
@ -51,7 +51,7 @@ char ixgbevf_driver_name[] = "ixgbevf";
|
||||||
static const char ixgbevf_driver_string[] =
|
static const char ixgbevf_driver_string[] =
|
||||||
"Intel(R) 82599 Virtual Function";
|
"Intel(R) 82599 Virtual Function";
|
||||||
|
|
||||||
#define DRV_VERSION "1.0.19-k0"
|
#define DRV_VERSION "1.1.0-k0"
|
||||||
const char ixgbevf_driver_version[] = DRV_VERSION;
|
const char ixgbevf_driver_version[] = DRV_VERSION;
|
||||||
static char ixgbevf_copyright[] =
|
static char ixgbevf_copyright[] =
|
||||||
"Copyright (c) 2009 - 2010 Intel Corporation.";
|
"Copyright (c) 2009 - 2010 Intel Corporation.";
|
||||||
|
@ -107,7 +107,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
|
* ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
|
||||||
* @adapter: pointer to adapter struct
|
* @adapter: pointer to adapter struct
|
||||||
* @direction: 0 for Rx, 1 for Tx, -1 for other causes
|
* @direction: 0 for Rx, 1 for Tx, -1 for other causes
|
||||||
* @queue: queue to map the corresponding interrupt to
|
* @queue: queue to map the corresponding interrupt to
|
||||||
|
@ -1017,7 +1017,7 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
|
* ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues)
|
||||||
* @irq: unused
|
* @irq: unused
|
||||||
* @data: pointer to our q_vector struct for this interrupt vector
|
* @data: pointer to our q_vector struct for this interrupt vector
|
||||||
**/
|
**/
|
||||||
|
@ -1665,6 +1665,11 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
|
||||||
j = adapter->rx_ring[i].reg_idx;
|
j = adapter->rx_ring[i].reg_idx;
|
||||||
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
|
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
|
||||||
rxdctl |= IXGBE_RXDCTL_ENABLE;
|
rxdctl |= IXGBE_RXDCTL_ENABLE;
|
||||||
|
if (hw->mac.type == ixgbe_mac_X540_vf) {
|
||||||
|
rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
|
||||||
|
rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
|
||||||
|
IXGBE_RXDCTL_RLPML_EN);
|
||||||
|
}
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
|
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
|
||||||
ixgbevf_rx_desc_queue_enable(adapter, i);
|
ixgbevf_rx_desc_queue_enable(adapter, i);
|
||||||
}
|
}
|
||||||
|
@ -1967,7 +1972,7 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ixgbe_set_num_queues: Allocate queues for device, feature dependant
|
* ixgbevf_set_num_queues: Allocate queues for device, feature dependant
|
||||||
* @adapter: board private structure to initialize
|
* @adapter: board private structure to initialize
|
||||||
*
|
*
|
||||||
* This is the top level queue allocation routine. The order here is very
|
* This is the top level queue allocation routine. The order here is very
|
||||||
|
@ -3217,10 +3222,16 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
|
||||||
static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
|
static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
|
||||||
{
|
{
|
||||||
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
|
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
|
||||||
|
struct ixgbe_hw *hw = &adapter->hw;
|
||||||
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
|
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
|
||||||
|
int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
|
||||||
|
u32 msg[2];
|
||||||
|
|
||||||
|
if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
|
||||||
|
max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
|
||||||
|
|
||||||
/* MTU < 68 is an error and causes problems on some kernels */
|
/* MTU < 68 is an error and causes problems on some kernels */
|
||||||
if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
|
if ((new_mtu < 68) || (max_frame > max_possible_frame))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
|
hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
|
||||||
|
@ -3228,6 +3239,10 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
|
||||||
/* must set new MTU before calling down or up */
|
/* must set new MTU before calling down or up */
|
||||||
netdev->mtu = new_mtu;
|
netdev->mtu = new_mtu;
|
||||||
|
|
||||||
|
msg[0] = IXGBE_VF_SET_LPE;
|
||||||
|
msg[1] = max_frame;
|
||||||
|
hw->mbx.ops.write_posted(hw, msg, 2);
|
||||||
|
|
||||||
if (netif_running(netdev))
|
if (netif_running(netdev))
|
||||||
ixgbevf_reinit_locked(adapter);
|
ixgbevf_reinit_locked(adapter);
|
||||||
|
|
||||||
|
@ -3519,9 +3534,9 @@ static struct pci_driver ixgbevf_driver = {
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ixgbe_init_module - Driver Registration Routine
|
* ixgbevf_init_module - Driver Registration Routine
|
||||||
*
|
*
|
||||||
* ixgbe_init_module is the first routine called when the driver is
|
* ixgbevf_init_module is the first routine called when the driver is
|
||||||
* loaded. All it does is register with the PCI subsystem.
|
* loaded. All it does is register with the PCI subsystem.
|
||||||
**/
|
**/
|
||||||
static int __init ixgbevf_init_module(void)
|
static int __init ixgbevf_init_module(void)
|
||||||
|
@ -3539,9 +3554,9 @@ static int __init ixgbevf_init_module(void)
|
||||||
module_init(ixgbevf_init_module);
|
module_init(ixgbevf_init_module);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ixgbe_exit_module - Driver Exit Cleanup Routine
|
* ixgbevf_exit_module - Driver Exit Cleanup Routine
|
||||||
*
|
*
|
||||||
* ixgbe_exit_module is called just before the driver is removed
|
* ixgbevf_exit_module is called just before the driver is removed
|
||||||
* from memory.
|
* from memory.
|
||||||
**/
|
**/
|
||||||
static void __exit ixgbevf_exit_module(void)
|
static void __exit ixgbevf_exit_module(void)
|
||||||
|
@ -3551,7 +3566,7 @@ static void __exit ixgbevf_exit_module(void)
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
/**
|
/**
|
||||||
* ixgbe_get_hw_dev_name - return device name string
|
* ixgbevf_get_hw_dev_name - return device name string
|
||||||
* used by hardware layer to print debugging information
|
* used by hardware layer to print debugging information
|
||||||
**/
|
**/
|
||||||
char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
|
char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue