diff -ur linux-2.6.28/ubuntu/e1000e-next/e1000_80003es2lan.c linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/e1000_80003es2lan.c --- linux-2.6.28/ubuntu/e1000e-next/e1000_80003es2lan.c 2010-05-06 11:29:15.000000000 +0200 +++ linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/e1000_80003es2lan.c 2009-10-01 00:53:10.000000000 +0200 @@ -278,6 +278,9 @@ /* link info */ mac->ops.get_link_up_info = e1000_get_link_up_info_80003es2lan; + /* set lan id for port to determine which phy lock to use */ + hw->mac.ops.set_lan_id(hw); + out: return ret_val; } @@ -295,7 +298,6 @@ hw->mac.ops.init_params = e1000_init_mac_params_80003es2lan; hw->nvm.ops.init_params = e1000_init_nvm_params_80003es2lan; hw->phy.ops.init_params = e1000_init_phy_params_80003es2lan; - e1000e_get_bus_info_pcie(hw); } /** @@ -502,28 +504,35 @@ goto out; } - /* - * The "ready" bit in the MDIC register may be incorrectly set - * before the device has completed the "Page Select" MDI - * transaction. So we wait 200us after each MDI command... - */ - udelay(200); + if (hw->dev_spec._80003es2lan.mdic_wa_enable == true) { + /* + * The "ready" bit in the MDIC register may be incorrectly set + * before the device has completed the "Page Select" MDI + * transaction. So we wait 200us after each MDI command... + */ + udelay(200); - /* ...and verify the command was successful. */ - ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); + /* ...and verify the command was successful. */ + ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); - if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { - ret_val = -E1000_ERR_PHY; - e1000_release_phy_80003es2lan(hw); - goto out; - } + if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { + ret_val = -E1000_ERR_PHY; + e1000_release_phy_80003es2lan(hw); + goto out; + } - udelay(200); + udelay(200); - ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); + ret_val = e1000e_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + udelay(200); + } else + ret_val = e1000e_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); - udelay(200); e1000_release_phy_80003es2lan(hw); out: @@ -567,29 +576,35 @@ goto out; } + if (hw->dev_spec._80003es2lan.mdic_wa_enable == true) { + /* + * The "ready" bit in the MDIC register may be incorrectly set + * before the device has completed the "Page Select" MDI + * transaction. So we wait 200us after each MDI command... + */ + udelay(200); - /* - * The "ready" bit in the MDIC register may be incorrectly set - * before the device has completed the "Page Select" MDI - * transaction. So we wait 200us after each MDI command... - */ - udelay(200); - - /* ...and verify the command was successful. */ - ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); + /* ...and verify the command was successful. */ + ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); - if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { - ret_val = -E1000_ERR_PHY; - e1000_release_phy_80003es2lan(hw); - goto out; - } + if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { + ret_val = -E1000_ERR_PHY; + e1000_release_phy_80003es2lan(hw); + goto out; + } - udelay(200); + udelay(200); - ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); + ret_val = e1000e_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + udelay(200); + } else + ret_val = e1000e_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); - udelay(200); e1000_release_phy_80003es2lan(hw); out: @@ -762,13 +777,13 @@ index = phy_data & GG82563_DSPD_CABLE_LENGTH; - if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE + 5) { + if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) { ret_val = E1000_ERR_PHY; goto out; } phy->min_cable_length = e1000_gg82563_cable_length_table[index]; - phy->max_cable_length = e1000_gg82563_cable_length_table[index+5]; + phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5]; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; @@ -923,6 +938,19 @@ reg_data &= ~0x00100000; E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data); + /* default to true to enable the MDIC W/A */ + hw->dev_spec._80003es2lan.mdic_wa_enable = true; + + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET >> + E1000_KMRNCTRLSTA_OFFSET_SHIFT, + &i); + if (!ret_val) { + if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) == + E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) + hw->dev_spec._80003es2lan.mdic_wa_enable = false; + } + /* * Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link diff -ur linux-2.6.28/ubuntu/e1000e-next/e1000_80003es2lan.h linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/e1000_80003es2lan.h --- linux-2.6.28/ubuntu/e1000e-next/e1000_80003es2lan.h 2010-05-06 11:29:15.000000000 +0200 +++ linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/e1000_80003es2lan.h 2009-10-01 00:53:10.000000000 +0200 @@ -42,6 +42,9 @@ #define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000 #define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000 +#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C +#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004 + #define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ #define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000 diff -ur linux-2.6.28/ubuntu/e1000e-next/e1000_defines.h linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/e1000_defines.h --- linux-2.6.28/ubuntu/e1000e-next/e1000_defines.h 2010-05-06 11:29:15.000000000 +0200 +++ linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/e1000_defines.h 2009-10-01 00:53:10.000000000 +0200 @@ -131,12 +131,12 @@ #define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Definable Pin 5 */ #define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA #define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Definable Pin 6 */ -#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Definable Pin 7 */ +#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */ /* SDP 4/5 (bits 8,9) are reserved in >= 82575 */ #define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ #define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */ #define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ -#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */ +#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */ #define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */ #define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ #define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ @@ -671,6 +671,7 @@ /* Extended Configuration Control and Size */ #define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 #define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 #define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 diff -ur linux-2.6.28/ubuntu/e1000e-next/e1000.h linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/e1000.h --- linux-2.6.28/ubuntu/e1000e-next/e1000.h 2010-05-06 11:29:15.000000000 +0200 +++ linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/e1000.h 2009-10-01 00:53:10.000000000 +0200 @@ -332,6 +332,7 @@ struct work_struct downshift_task; struct work_struct update_phy_task; struct work_struct led_blink_task; + struct work_struct print_hang_task; u32 *config_space; }; diff -ur linux-2.6.28/ubuntu/e1000e-next/e1000_ich8lan.c linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/e1000_ich8lan.c --- linux-2.6.28/ubuntu/e1000e-next/e1000_ich8lan.c 2010-05-06 11:29:15.000000000 +0200 +++ linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/e1000_ich8lan.c 2009-10-01 00:53:10.000000000 +0200 @@ -62,10 +62,13 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw); static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw); static void e1000_release_swflag_ich8lan(struct e1000_hw *hw); +static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw); +static void e1000_release_nvm_ich8lan(struct e1000_hw *hw); static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw); static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw); static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw); +static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active); static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, @@ -89,6 +92,7 @@ static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); +static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); static s32 e1000_setup_led_pchlan(struct e1000_hw *hw); static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); static s32 e1000_led_on_pchlan(struct e1000_hw *hw); @@ -115,6 +119,8 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw); static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw); +static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); +static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw); /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ /* Offset 04h HSFSTS */ @@ -179,11 +185,13 @@ phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan; phy->ops.get_info = e1000_get_phy_info_ich8lan; phy->ops.read_reg = e1000_read_phy_reg_hv; + phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; phy->ops.release = e1000_release_swflag_ich8lan; phy->ops.reset = e1000_phy_hw_reset_ich8lan; - phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan; - phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan; + phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; + phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; phy->ops.write_reg = e1000_write_phy_reg_hv; + phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; @@ -264,6 +272,8 @@ case IGP03E1000_E_PHY_ID: phy->type = e1000_phy_igp_3; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked; + phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked; break; case IFE_E_PHY_ID: case IFE_PLUS_E_PHY_ID: @@ -298,7 +308,6 @@ { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; - union ich8_hws_flash_status hsfsts; u32 gfpreg, sector_base_addr, sector_end_addr; s32 ret_val = E1000_SUCCESS; u16 i; @@ -335,20 +344,6 @@ /* Adjust to word count */ nvm->flash_bank_size /= sizeof(u16); - /* - * Make sure the flash bank size does not overwrite the 4k - * sector ranges. We may have 64k allotted to us but we only care - * about the first 2 4k sectors. Therefore, if we have anything less - * than 64k set in the HSFSTS register, we will reduce the bank size - * down to 4k and let the rest remain unused. If berasesz == 3, then - * we are working in 64k mode. Otherwise we are not. - */ - if (nvm->flash_bank_size > E1000_ICH8_SHADOW_RAM_WORDS) { - hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); - if (hsfsts.hsf_status.berasesz != 3) - nvm->flash_bank_size = E1000_ICH8_SHADOW_RAM_WORDS; - } - nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; /* Clear shadow ram */ @@ -358,9 +353,9 @@ } /* Function Pointers */ - nvm->ops.acquire = e1000_acquire_swflag_ich8lan; + nvm->ops.acquire = e1000_acquire_nvm_ich8lan; + nvm->ops.release = e1000_release_nvm_ich8lan; nvm->ops.read = e1000_read_nvm_ich8lan; - nvm->ops.release = e1000_release_swflag_ich8lan; nvm->ops.update = e1000_update_nvm_checksum_ich8lan; nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan; nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan; @@ -487,14 +482,6 @@ goto out; } - if (hw->mac.type == e1000_pchlan) { - ret_val = e1000e_write_kmrn_reg(hw, - E1000_KMRNCTRLSTA_K1_CONFIG, - E1000_KMRNCTRLSTA_K1_ENABLE); - if (ret_val) - goto out; - } - /* * First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex @@ -504,6 +491,12 @@ if (ret_val) goto out; + if (hw->mac.type == e1000_pchlan) { + ret_val = e1000_k1_gig_workaround_hv(hw, link); + if (ret_val) + goto out; + } + if (!link) goto out; /* No link detected */ @@ -580,47 +573,89 @@ static DEFINE_MUTEX(nvm_mutex); /** + * e1000_acquire_nvm_ich8lan - Acquire NVM mutex + * @hw: pointer to the HW structure + * + * Acquires the mutex for performing NVM operations. + **/ +static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw) +{ + mutex_lock(&nvm_mutex); + + return E1000_SUCCESS; +} + +/** + * e1000_release_nvm_ich8lan - Release NVM mutex + * @hw: pointer to the HW structure + * + * Releases the mutex used while performing NVM operations. + **/ +static void e1000_release_nvm_ich8lan(struct e1000_hw *hw) +{ + mutex_unlock(&nvm_mutex); + + return; +} + +static DEFINE_MUTEX(swflag_mutex); + +/** * e1000_acquire_swflag_ich8lan - Acquire software control flag * @hw: pointer to the HW structure * - * Acquires the software control flag for performing NVM and PHY - * operations. This is a function pointer entry point only called by - * read/write routines for the PHY and NVM parts. + * Acquires the software control flag for performing PHY and select + * MAC CSR accesses. **/ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) { u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; s32 ret_val = E1000_SUCCESS; - might_sleep(); - - mutex_lock(&nvm_mutex); + mutex_lock(&swflag_mutex); while (timeout) { extcnf_ctrl = er32(EXTCNF_CTRL); + if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) + break; - if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) { - extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; - ew32(EXTCNF_CTRL, extcnf_ctrl); - - extcnf_ctrl = er32(EXTCNF_CTRL); - if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) - break; - } mdelay(1); timeout--; } if (!timeout) { e_dbg("SW/FW/HW has locked the resource for too long.\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* In some cases, hardware will take up to 400ms to set the SW flag. */ + timeout = SW_FLAG_TIMEOUT; + + extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + + while (timeout) { + extcnf_ctrl = er32(EXTCNF_CTRL); + if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) + break; + + mdelay(1); + timeout--; + } + + if (!timeout) { + e_dbg("Failed to acquire the semaphore.\n"); extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; ew32(EXTCNF_CTRL, extcnf_ctrl); - mutex_unlock(&nvm_mutex); ret_val = -E1000_ERR_CONFIG; goto out; } out: + if (ret_val) + mutex_unlock(&swflag_mutex); + return ret_val; } @@ -628,9 +663,8 @@ * e1000_release_swflag_ich8lan - Release software control flag * @hw: pointer to the HW structure * - * Releases the software control flag for performing NVM and PHY operations. - * This is a function pointer entry point only called by read/write - * routines for the PHY and NVM parts. + * Releases the software control flag for performing PHY and select + * MAC CSR accesses. **/ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) { @@ -640,7 +674,9 @@ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; ew32(EXTCNF_CTRL, extcnf_ctrl); - mutex_unlock(&nvm_mutex); + mutex_unlock(&swflag_mutex); + + return; } /** @@ -677,6 +713,339 @@ } /** + * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration + * @hw: pointer to the HW structure + * + * SW should configure the LCD from the NVM extended configuration region + * as a workaround for certain parts. + **/ +static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + struct e1000_phy_info *phy = &hw->phy; + u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; + s32 ret_val; + u16 word_addr, reg_data, reg_addr, phy_page = 0; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* + * Initialize the PHY from the NVM on ICH platforms. This + * is needed due to an issue where the NVM configuration is + * not properly autoloaded after power transitions. + * Therefore, after each PHY reset, we will load the + * configuration data out of the NVM manually. + */ + if ((hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) || + (hw->mac.type == e1000_pchlan)) { + /* Check if SW needs to configure the PHY */ + if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_M_AMT) || + (hw->device_id == E1000_DEV_ID_ICH8_IGP_M) || + (hw->mac.type == e1000_pchlan)) + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; + else + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; + + data = er32(FEXTNVM); + if (!(data & sw_cfg_mask)) + goto out; + + /* Wait for basic configuration completes before proceeding */ + e1000_lan_init_done_ich8lan(hw); + + /* + * Make sure HW does not configure LCD from PHY + * extended configuration before SW configuration + */ + data = er32(EXTCNF_CTRL); + if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) + goto out; + + cnf_size = er32(EXTCNF_SIZE); + cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; + cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; + if (!cnf_size) + goto out; + + cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; + cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; + + if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && + hw->mac.type == e1000_pchlan) { + /* + * HW configures the SMBus address and LEDs when the + * OEM and LCD Write Enable bits are set in the NVM. + * When both NVM bits are cleared, SW will configure + * them instead. + */ + data = er32(STRAP); + data &= E1000_STRAP_SMBUS_ADDRESS_MASK; + reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT; + reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, + reg_data); + if (ret_val) + goto out; + + data = er32(LEDCTL); + ret_val = e1000_write_phy_reg_hv_locked(hw, + HV_LED_CONFIG, + (u16)data); + if (ret_val) + goto out; + + dev_spec->nvm_lcd_config_enabled = true; + } + /* Configure LCD from extended configuration region. */ + + /* cnf_base_addr is in DWORD */ + word_addr = (u16)(cnf_base_addr << 1); + + for (i = 0; i < cnf_size; i++) { + ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, + ®_data); + if (ret_val) + goto out; + + ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1), + 1, ®_addr); + if (ret_val) + goto out; + + /* Save off the PHY page for future writes. */ + if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { + phy_page = reg_data; + continue; + } + /* + * Bit 5 in the LCD config word contains the phy + * address for PCH + */ + if (hw->mac.type == e1000_pchlan) { + phy->addr = 1; + if (reg_addr & LCD_CFG_PHY_ADDR_BIT) { + phy->addr = 2; + reg_addr &= PHY_REG_MASK; + } + } + + reg_addr |= phy_page; + + ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, + reg_data); + if (ret_val) + goto out; + } + + if (hw->mac.type == e1000_pchlan) + dev_spec->nvm_lcd_config_enabled = false; + } + +out: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_k1_gig_workaround_hv - K1 Si workaround + * @hw: pointer to the HW structure + * @link: link up bool flag + * + * If K1 is enabled for 1Gbps, the MAC might stall when transitioning + * from a lower speed. This workaround disables K1 whenever link is at 1Gig + * If link is down, the function will restore the default K1 setting located + * in the NVM. + **/ +static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) +{ + s32 ret_val = E1000_SUCCESS; + u16 status_reg = 0; + bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; + + if (hw->mac.type != e1000_pchlan) + goto out; + + /* Wrap the whole flow with the sw flag */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ + if (link) { + if (hw->phy.type == e1000_phy_82578) { + ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, + &status_reg); + if (ret_val) + goto release; + + status_reg &= BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK; + + if (status_reg == (BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_1000)) + k1_enable = false; + } + + if (hw->phy.type == e1000_phy_82577) { + ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, + &status_reg); + if (ret_val) + goto release; + + status_reg &= HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_MASK; + + if (status_reg == (HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_1000)) + k1_enable = false; + } + + /* Link stall fix for link up */ + ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), + 0x0100); + if (ret_val) + goto release; + + } else { + /* Link stall fix for link down */ + ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), + 0x4100); + if (ret_val) + goto release; + } + + ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); + +release: + hw->phy.ops.release(hw); +out: + return ret_val; +} + +/** + * e1000_configure_k1_ich8lan - Configure K1 power state + * @hw: pointer to the HW structure + * @enable: K1 state to configure + * + * Configure the K1 power state based on the provided parameter. + * Assumes semaphore already acquired. + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + **/ +s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) +{ + s32 ret_val = E1000_SUCCESS; + u32 ctrl_reg = 0; + u32 ctrl_ext = 0; + u32 reg = 0; + u16 kmrn_reg = 0; + + ret_val = e1000_read_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + &kmrn_reg); + if (ret_val) + goto out; + + if (k1_enable) + kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; + else + kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; + + ret_val = e1000_write_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + kmrn_reg); + if (ret_val) + goto out; + + udelay(20); + ctrl_ext = er32(CTRL_EXT); + ctrl_reg = er32(CTRL); + + reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + reg |= E1000_CTRL_FRCSPD; + ew32(CTRL, reg); + + E1000_WRITE_REG(hw, + E1000_CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); + udelay(20); + ew32(CTRL, ctrl_reg); + ew32(CTRL_EXT, ctrl_ext); + udelay(20); + +out: + return ret_val; +} + +/** + * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration + * @hw: pointer to the HW structure + * @d0_state: boolean if entering d0 or d3 device state + * + * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are + * collectively called OEM bits. The OEM Write Enable bit and SW Config bit + * in NVM determines whether HW should configure LPLU and Gbe Disable. + **/ +s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) +{ + s32 ret_val = 0; + u32 mac_reg; + u16 oem_reg; + + if (hw->mac.type != e1000_pchlan) + return ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + mac_reg = er32(EXTCNF_CTRL); + if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) + goto out; + + mac_reg = er32(FEXTNVM); + if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) + goto out; + + mac_reg = er32(PHY_CTRL); + + ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); + if (ret_val) + goto out; + + oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); + + if (d0_state) { + if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) + oem_reg |= HV_OEM_BITS_GBE_DIS; + + if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) + oem_reg |= HV_OEM_BITS_LPLU; + } else { + if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE) + oem_reg |= HV_OEM_BITS_GBE_DIS; + + if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU) + oem_reg |= HV_OEM_BITS_LPLU; + } + /* Restart auto-neg to activate the bits */ + oem_reg |= HV_OEM_BITS_RESTART_AN; + ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); + +out: + hw->phy.ops.release(hw); + + return ret_val; +} + + +/** * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be * done after every PHY reset. **/ @@ -685,7 +1054,7 @@ s32 ret_val = E1000_SUCCESS; if (hw->mac.type != e1000_pchlan) - return ret_val; + goto out; if (((hw->phy.type == e1000_phy_82577) && ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || @@ -693,12 +1062,12 @@ /* Disable generation of early preamble */ ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431); if (ret_val) - return ret_val; + goto out; /* Preamble tuning for SSC */ ret_val = e1e_wphy(hw, PHY_REG(770, 16), 0xA204); if (ret_val) - return ret_val; + goto out; } if (hw->phy.type == e1000_phy_82578) { @@ -716,11 +1085,21 @@ /* Select page 0 */ ret_val = hw->phy.ops.acquire(hw); if (ret_val) - return ret_val; + goto out; + hw->phy.addr = 1; - e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); + ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); + if (ret_val) + goto out; hw->phy.ops.release(hw); + /* + * Configure the K1 Si workaround during phy reset assuming there is + * link so that it disables K1 if link is in 1Gbps. + */ + ret_val = e1000_k1_gig_workaround_hv(hw, true); + +out: return ret_val; } @@ -766,10 +1145,8 @@ **/ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) { - struct e1000_phy_info *phy = &hw->phy; - u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; - s32 ret_val; - u16 word_addr, reg_data, reg_addr, phy_page = 0; + s32 ret_val = E1000_SUCCESS; + u16 reg; ret_val = e1000e_phy_hw_reset_generic(hw); if (ret_val) @@ -784,74 +1161,18 @@ goto out; } - /* - * Initialize the PHY from the NVM on ICH platforms. This - * is needed due to an issue where the NVM configuration is - * not properly autoloaded after power transitions. - * Therefore, after each PHY reset, we will load the - * configuration data out of the NVM manually. - */ - if (hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) { - /* Check if SW needs configure the PHY */ - if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_M_AMT) || - (hw->device_id == E1000_DEV_ID_ICH8_IGP_M)) - sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; - else - sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; - - data = er32(FEXTNVM); - if (!(data & sw_cfg_mask)) - goto out; - - /* Wait for basic configuration completes before proceeding */ - e1000_lan_init_done_ich8lan(hw); - - /* - * Make sure HW does not configure LCD from PHY - * extended configuration before SW configuration - */ - data = er32(EXTCNF_CTRL); - if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) - goto out; - - cnf_size = er32(EXTCNF_SIZE); - cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; - cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; - if (!cnf_size) - goto out; - - cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; - cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; - - /* Configure LCD from extended configuration region. */ - - /* cnf_base_addr is in DWORD */ - word_addr = (u16)(cnf_base_addr << 1); - - for (i = 0; i < cnf_size; i++) { - ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, - ®_data); - if (ret_val) - goto out; - - ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1), - 1, ®_addr); - if (ret_val) - goto out; - - /* Save off the PHY page for future writes. */ - if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { - phy_page = reg_data; - continue; - } + /* Dummy read to clear the phy wakeup bit after lcd reset */ + if (hw->mac.type == e1000_pchlan) + e1e_rphy(hw, BM_WUC, ®); - reg_addr |= phy_page; + /* Configure the LCD with the extended configuration region in NVM */ + ret_val = e1000_sw_lcd_config_ich8lan(hw); + if (ret_val) + goto out; - ret_val = e1e_wphy(hw, (u32)reg_addr, reg_data); - if (ret_val) - goto out; - } - } + /* Configure the LCD with the OEM bits in NVM */ + if (hw->mac.type == e1000_pchlan) + ret_val = e1000_oem_bits_config_ich8lan(hw, true); out: return ret_val; @@ -942,6 +1263,38 @@ } /** + * e1000_set_lplu_state_pchlan - Set Low Power Link Up state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU state according to the active flag. For PCH, if OEM write + * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set + * the phy speed. This function will manually set the LPLU bit and restart + * auto-neg as hw would do. D3 and D0 LPLU will call the same function + * since it configures the same bit. + **/ +static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) +{ + s32 ret_val = E1000_SUCCESS; + u16 oem_reg; + + ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg); + if (ret_val) + goto out; + + if (active) + oem_reg |= HV_OEM_BITS_LPLU; + else + oem_reg &= ~HV_OEM_BITS_LPLU; + + oem_reg |= HV_OEM_BITS_RESTART_AN; + ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg); + +out: + return ret_val; +} + +/** * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state * @hw: pointer to the HW structure * @active: true to enable LPLU, false to disable @@ -1174,7 +1527,7 @@ if (ret_val) goto out; if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == - E1000_ICH_NVM_SIG_VALUE) { + E1000_ICH_NVM_SIG_VALUE) { *bank = 0; goto out; } @@ -1182,11 +1535,11 @@ /* Check bank 1 */ ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + bank1_offset, - &sig_byte); + &sig_byte); if (ret_val) goto out; if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == - E1000_ICH_NVM_SIG_VALUE) { + E1000_ICH_NVM_SIG_VALUE) { *bank = 1; goto out; } @@ -1225,17 +1578,18 @@ goto out; } - ret_val = nvm->ops.acquire(hw); - if (ret_val) - goto out; + nvm->ops.acquire(hw); ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); - if (ret_val != E1000_SUCCESS) - goto release; + if (ret_val != E1000_SUCCESS) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } act_offset = (bank) ? nvm->flash_bank_size : 0; act_offset += offset; + ret_val = E1000_SUCCESS; for (i = 0; i < words; i++) { if ((dev_spec->shadow_ram) && (dev_spec->shadow_ram[offset+i].modified)) { @@ -1250,7 +1604,6 @@ } } -release: nvm->ops.release(hw); out: @@ -1525,9 +1878,7 @@ goto out; } - ret_val = nvm->ops.acquire(hw); - if (ret_val) - goto out; + nvm->ops.acquire(hw); for (i = 0; i < words; i++) { dev_spec->shadow_ram[offset+i].modified = true; @@ -1566,9 +1917,7 @@ if (nvm->type != e1000_nvm_flash_sw) goto out; - ret_val = nvm->ops.acquire(hw); - if (ret_val) - goto out; + nvm->ops.acquire(hw); /* * We're writing to the opposite bank so if we're on bank 1, @@ -1577,8 +1926,8 @@ */ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); if (ret_val != E1000_SUCCESS) { - nvm->ops.release(hw); - goto out; + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; } if (bank == 0) { @@ -1667,6 +2016,7 @@ nvm->ops.release(hw); goto out; } + data &= 0xBFFF; ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1, @@ -1925,13 +2275,8 @@ iteration = 1; break; case 2: - if (hw->mac.type == e1000_ich9lan) { - sector_size = ICH_FLASH_SEG_SIZE_8K; - iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_8K; - } else { - ret_val = -E1000_ERR_NVM; - goto out; - } + sector_size = ICH_FLASH_SEG_SIZE_8K; + iteration = 1; break; case 3: sector_size = ICH_FLASH_SEG_SIZE_64K; @@ -1944,7 +2289,7 @@ /* Start with the base address, then add the sector offset. */ flash_linear_addr = hw->nvm.flash_base_addr; - flash_linear_addr += (bank) ? (sector_size * iteration) : 0; + flash_linear_addr += (bank) ? flash_bank_size : 0; for (j = 0; j < iteration ; j++) { do { @@ -2131,6 +2476,8 @@ **/ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) { + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u16 reg; u32 ctrl, icr, kab; s32 ret_val; @@ -2164,6 +2511,18 @@ ew32(PBS, E1000_PBS_16K); } + if (hw->mac.type == e1000_pchlan) { + /* Save the NVM K1 bit setting*/ + ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, ®); + if (ret_val) + return ret_val; + + if (reg & E1000_NVM_K1_ENABLE) + dev_spec->nvm_k1_enabled = true; + else + dev_spec->nvm_k1_enabled = false; + } + ctrl = er32(CTRL); if (!e1000_check_reset_block(hw) && !hw->phy.reset_disable) { @@ -2195,14 +2554,7 @@ if (hw->mac.type >= e1000_ich10lan) { e1000_lan_init_done_ich8lan(hw); } else { - if (!ret_val) { - /* release the swflag because it is not reset by - * hardware reset - */ - e1000_release_swflag_ich8lan(hw); - } - - ret_val = e1000e_get_auto_rd_done(hw); + ret_val = e1000e_get_auto_rd_done(hw); if (ret_val) { /* * When auto config read does not complete, do not @@ -2212,7 +2564,19 @@ e_dbg("Auto Read Done did not complete\n"); } } + /* Dummy read to clear the phy wakeup bit after lcd reset */ + if (hw->mac.type == e1000_pchlan) + e1e_rphy(hw, BM_WUC, ®); + ret_val = e1000_sw_lcd_config_ich8lan(hw); + if (ret_val) + goto out; + + if (hw->mac.type == e1000_pchlan) { + ret_val = e1000_oem_bits_config_ich8lan(hw, true); + if (ret_val) + goto out; + } /* * For PCH, this write will make sure that any noise * will be detected as a CRC error and be dropped rather than show up @@ -2231,6 +2595,7 @@ if (hw->mac.type == e1000_pchlan) ret_val = e1000_hv_phy_workarounds_ich8lan(hw); +out: return ret_val; } @@ -2543,14 +2908,6 @@ if (ret_val) goto out; - if ((hw->mac.type == e1000_pchlan) && (*speed == SPEED_1000)) { - ret_val = e1000e_write_kmrn_reg(hw, - E1000_KMRNCTRLSTA_K1_CONFIG, - E1000_KMRNCTRLSTA_K1_DISABLE); - if (ret_val) - goto out; - } - if ((hw->mac.type == e1000_ich8lan) && (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) { @@ -2777,9 +3134,8 @@ E1000_PHY_CTRL_GBE_DISABLE; ew32(PHY_CTRL, phy_ctrl); - /* Workaround SWFLAG unexpectedly set during S0->Sx */ if (hw->mac.type == e1000_pchlan) - udelay(500); + e1000_phy_hw_reset_ich8lan(hw); default: break; } diff -ur linux-2.6.28/ubuntu/e1000e-next/e1000_ich8lan.h linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/e1000_ich8lan.h --- linux-2.6.28/ubuntu/e1000e-next/e1000_ich8lan.h 2010-05-06 11:29:15.000000000 +0200 +++ linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/e1000_ich8lan.h 2009-10-01 00:53:10.000000000 +0200 @@ -134,6 +134,31 @@ #define HV_TNCRS_UPPER PHY_REG(778, 29) /* Transmit with no CRS */ #define HV_TNCRS_LOWER PHY_REG(778, 30) +#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */ + +#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ +#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ + +/* SMBus Address Phy Register */ +#define HV_SMB_ADDR PHY_REG(768, 26) +#define HV_SMB_ADDR_PEC_EN 0x0200 +#define HV_SMB_ADDR_VALID 0x0080 + +/* Strapping Option Register - RO */ +#define E1000_STRAP 0x0000C +#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 +#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 + +/* OEM Bits Phy Register */ +#define HV_OEM_BITS PHY_REG(768, 25) +#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */ +#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */ +#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */ + +#define LCD_CFG_PHY_ADDR_BIT 0x0020 /* Phy address bit from LCD Config word */ + +#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */ + /* * Additional interrupts need to be handled for ICH family: * DSW = The FW changed the status of the DISSW bit in FWSM @@ -163,5 +188,6 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw); - +s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); +s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_config); #endif diff -ur linux-2.6.28/ubuntu/e1000e-next/e1000_mac.c linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/e1000_mac.c --- linux-2.6.28/ubuntu/e1000e-next/e1000_mac.c 2010-05-06 11:29:15.000000000 +0200 +++ linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/e1000_mac.c 2009-10-01 00:53:10.000000000 +0200 @@ -1848,7 +1848,7 @@ * Verify that when not using auto-negotiation that MDI/MDIx is correctly * set, which is forced to MDI mode only. **/ -s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw) +static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; diff -ur linux-2.6.28/ubuntu/e1000e-next/e1000_phy.c linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/e1000_phy.c --- linux-2.6.28/ubuntu/e1000e-next/e1000_phy.c 2010-05-06 11:29:15.000000000 +0200 +++ linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/e1000_phy.c 2009-10-01 00:53:10.000000000 +0200 @@ -110,20 +110,30 @@ goto out; /* - * If the PHY ID is still unknown, we may have an 82577 without link. - * We will try again after setting Slow MDIC mode. No harm in trying - * again in this case since the PHY ID is unknown at this point anyway + * If the PHY ID is still unknown, we may have an 82577 + * without link. We will try again after setting Slow MDIC + * mode. No harm in trying again in this case since the PHY + * ID is unknown at this point anyway. */ + ret_val = phy->ops.acquire(hw); + if (ret_val) + goto out; ret_val = e1000_set_mdio_slow_mode_hv(hw, true); if (ret_val) goto out; + phy->ops.release(hw); retry_count++; } out: /* Revert to MDIO fast mode, if applicable */ - if (retry_count) + if (retry_count) { + ret_val = phy->ops.acquire(hw); + if (ret_val) + return ret_val; ret_val = e1000_set_mdio_slow_mode_hv(hw, false); + phy->ops.release(hw); + } return ret_val; } @@ -316,105 +326,172 @@ } /** - * e1000e_read_phy_reg_igp - Read igp PHY register + * __e1000e_read_phy_reg_igp - Read igp PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data + * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary, then reads the PHY register at offset - * and storing the retrieved information in data. Release any acquired + * and stores the retrieved information in data. Release any acquired * semaphores before exiting. **/ -s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) { s32 ret_val = E1000_SUCCESS; - if (!(hw->phy.ops.acquire)) - goto out; + if (!locked) { + if (!(hw->phy.ops.acquire)) + goto out; - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + } if (offset > MAX_PHY_MULTI_PAGE_REG) { ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (u16)offset); - if (ret_val) { - hw->phy.ops.release(hw); - goto out; - } + if (ret_val) + goto release; } ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); - hw->phy.ops.release(hw); - +release: + if (!locked) + hw->phy.ops.release(hw); out: return ret_val; } +/** + * e1000e_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores the + * retrieved information in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000e_read_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000e_read_phy_reg_igp_locked - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000e_read_phy_reg_igp(hw, offset, data, true); +} /** * e1000e_write_phy_reg_igp - Write igp PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset + * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ -s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) { s32 ret_val = E1000_SUCCESS; - if (!(hw->phy.ops.acquire)) - goto out; + if (!locked) { + if (!(hw->phy.ops.acquire)) + goto out; - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + } if (offset > MAX_PHY_MULTI_PAGE_REG) { ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (u16)offset); - if (ret_val) { - hw->phy.ops.release(hw); - goto out; - } + if (ret_val) + goto release; } ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); - hw->phy.ops.release(hw); +release: + if (!locked) + hw->phy.ops.release(hw); out: return ret_val; } /** - * e1000e_read_kmrn_reg - Read kumeran register + * e1000e_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000e_write_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000e_write_phy_reg_igp_locked - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. + * Assumes semaphore already acquired. + **/ +s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000e_write_phy_reg_igp(hw, offset, data, true); +} + +/** + * __e1000_read_kmrn_reg - Read kumeran register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data + * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary. Then reads the PHY register at offset * using the kumeran interface. The information retrieved is stored in data. * Release any acquired semaphores before exiting. **/ -s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) +static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) { u32 kmrnctrlsta; s32 ret_val = E1000_SUCCESS; - if (!(hw->phy.ops.acquire)) - goto out; + if (!locked) { + if (!(hw->phy.ops.acquire)) + goto out; - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + } kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; @@ -425,46 +502,111 @@ kmrnctrlsta = er32(KMRNCTRLSTA); *data = (u16)kmrnctrlsta; - hw->phy.ops.release(hw); + if (!locked) + hw->phy.ops.release(hw); out: return ret_val; } /** - * e1000e_write_kmrn_reg - Write kumeran register + * e1000e_read_kmrn_reg - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset using the + * kumeran interface. The information retrieved is stored in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000_read_kmrn_reg_locked - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the kumeran interface. The + * information retrieved is stored in data. + * Assumes semaphore already acquired. + **/ +s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, true); +} + +/** + * __e1000_write_kmrn_reg - Write kumeran register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset + * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary. Then write the data to PHY register * at the offset using the kumeran interface. Release any acquired semaphores * before exiting. **/ -s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) +static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) { u32 kmrnctrlsta; s32 ret_val = E1000_SUCCESS; - if (!(hw->phy.ops.acquire)) - goto out; + if (!locked) { + if (!(hw->phy.ops.acquire)) + goto out; - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + } kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & E1000_KMRNCTRLSTA_OFFSET) | data; ew32(KMRNCTRLSTA, kmrnctrlsta); udelay(2); - hw->phy.ops.release(hw); + + if (!locked) + hw->phy.ops.release(hw); out: return ret_val; } /** + * e1000e_write_kmrn_reg - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to the PHY register at the offset + * using the kumeran interface. Release the acquired semaphore before exiting. + **/ +s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000_write_kmrn_reg_locked - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Write the data to PHY register at the offset using the kumeran interface. + * Assumes semaphore already acquired. + **/ +s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, true); +} + +/** * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link * @hw: pointer to the HW structure * @@ -1495,7 +1637,6 @@ case e1000_phy_gg82563: case e1000_phy_bm: case e1000_phy_82578: - case e1000_phy_82577: offset = M88E1000_PHY_SPEC_STATUS; mask = M88E1000_PSSR_DOWNSHIFT; break; @@ -1685,15 +1826,15 @@ * it across the board. */ ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); - if (ret_val) + if (ret_val) { /* * If the first read fails, another entity may have * ownership of the resources, wait and try again to * see if they have relinquished the resources yet. */ udelay(usec_interval); - ret_val = e1e_rphy(hw, PHY_STATUS, - &phy_status); + } + ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); if (ret_val) break; if (phy_status & MII_SR_LINK_STATUS) @@ -1736,13 +1877,13 @@ index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> M88E1000_PSSR_CABLE_LENGTH_SHIFT; - if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE + 1) { + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { ret_val = E1000_ERR_PHY; goto out; } phy->min_cable_length = e1000_m88_cable_length_table[index]; - phy->max_cable_length = e1000_m88_cable_length_table[index+1]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; @@ -2259,6 +2400,10 @@ u32 page = offset >> IGP_PAGE_SHIFT; u32 page_shift = 0; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, @@ -2266,10 +2411,6 @@ goto out; } - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; - hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); if (offset > MAX_PHY_MULTI_PAGE_REG) { @@ -2289,18 +2430,15 @@ /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000e_write_phy_reg_mdic(hw, page_select, (page << page_shift)); - if (ret_val) { - hw->phy.ops.release(hw); + if (ret_val) goto out; - } } ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); - hw->phy.ops.release(hw); - out: + hw->phy.ops.release(hw); return ret_val; } @@ -2321,6 +2459,10 @@ u32 page = offset >> IGP_PAGE_SHIFT; u32 page_shift = 0; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, @@ -2328,10 +2470,6 @@ goto out; } - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; - hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); if (offset > MAX_PHY_MULTI_PAGE_REG) { @@ -2351,17 +2489,14 @@ /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000e_write_phy_reg_mdic(hw, page_select, (page << page_shift)); - if (ret_val) { - hw->phy.ops.release(hw); + if (ret_val) goto out; - } } ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); - hw->phy.ops.release(hw); - out: + hw->phy.ops.release(hw); return ret_val; } @@ -2380,6 +2515,10 @@ s32 ret_val; u16 page = (u16)(offset >> IGP_PAGE_SHIFT); + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, @@ -2387,10 +2526,6 @@ goto out; } - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; - hw->phy.addr = 1; if (offset > MAX_PHY_MULTI_PAGE_REG) { @@ -2399,17 +2534,14 @@ ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, page); - if (ret_val) { - hw->phy.ops.release(hw); + if (ret_val) goto out; - } } ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); - hw->phy.ops.release(hw); - out: + hw->phy.ops.release(hw); return ret_val; } @@ -2427,6 +2559,10 @@ s32 ret_val; u16 page = (u16)(offset >> IGP_PAGE_SHIFT); + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, @@ -2434,10 +2570,6 @@ goto out; } - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; - hw->phy.addr = 1; if (offset > MAX_PHY_MULTI_PAGE_REG) { @@ -2445,18 +2577,15 @@ ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, page); - if (ret_val) { - hw->phy.ops.release(hw); + if (ret_val) goto out; - } } ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); - hw->phy.ops.release(hw); - out: + hw->phy.ops.release(hw); return ret_val; } @@ -2476,6 +2605,8 @@ * 3) Write the address using the address opcode (0x11) * 4) Read or write the data using the data opcode (0x12) * 5) Restore 769_17.2 to its original value + * + * Assumes semaphore already acquired. **/ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data, bool read) @@ -2483,20 +2614,12 @@ s32 ret_val; u16 reg = BM_PHY_REG_NUM(offset); u16 phy_reg = 0; - u8 phy_acquired = 1; /* Gig must be disabled for MDIO accesses to page 800 */ if ((hw->mac.type == e1000_pchlan) && (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) e_dbg("Attempting to access page 800 while gig enabled.\n"); - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) { - e_dbg("Could not acquire PHY\n"); - phy_acquired = 0; - goto out; - } - /* All operations in this function are phy address 1 */ hw->phy.addr = 1; @@ -2568,8 +2691,6 @@ } out: - if (phy_acquired == 1) - hw->phy.ops.release(hw); return ret_val; } @@ -2610,52 +2731,63 @@ msleep(1); } +/** + * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode + * @hw: pointer to the HW structure + * @slow: true for slow mode, false for normal mode + * + * Assumes semaphore already acquired. + **/ s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow) { s32 ret_val = E1000_SUCCESS; u16 data = 0; - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; - /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */ hw->phy.addr = 1; ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); - if (ret_val) { - hw->phy.ops.release(hw); - return ret_val; - } + if (ret_val) + goto out; + ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1, (0x2180 | (slow << 10))); + if (ret_val) + goto out; /* dummy read when reverting to fast mode - throw away result */ if (!slow) - e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data); - - hw->phy.ops.release(hw); + ret_val = e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data); +out: return ret_val; } /** - * e1000_read_phy_reg_hv - Read HV PHY register + * __e1000_read_phy_reg_hv - Read HV PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data + * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary, then reads the PHY register at offset - * and storing the retrieved information in data. Release any acquired + * and stores the retrieved information in data. Release any acquired * semaphore before exiting. **/ -s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) +static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) { s32 ret_val; u16 page = BM_PHY_REG_PAGE(offset); u16 reg = BM_PHY_REG_NUM(offset); bool in_slow_mode = false; + if (!locked) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + /* Workaround failure in MDIO access while cable is disconnected */ if ((hw->phy.type == e1000_phy_82577) && !(er32(STATUS) & E1000_STATUS_LU)) { @@ -2679,62 +2811,89 @@ goto out; } - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; - hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); if (page == HV_INTC_FC_PAGE_START) page = 0; if (reg > MAX_PHY_MULTI_PAGE_REG) { - if ((hw->phy.type != e1000_phy_82578) || - ((reg != I82578_ADDR_REG) && - (reg != I82578_ADDR_REG + 1))) { - u32 phy_addr = hw->phy.addr; - - hw->phy.addr = 1; - - /* Page is shifted left, PHY expects (page x 32) */ - ret_val = e1000e_write_phy_reg_mdic(hw, - IGP01E1000_PHY_PAGE_SELECT, - (page << IGP_PAGE_SHIFT)); - if (ret_val) { - hw->phy.ops.release(hw); - goto out; - } - hw->phy.addr = phy_addr; - } + u32 phy_addr = hw->phy.addr; + + hw->phy.addr = 1; + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (page << IGP_PAGE_SHIFT)); + hw->phy.addr = phy_addr; } ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data); - hw->phy.ops.release(hw); - out: /* Revert to MDIO fast mode, if applicable */ if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) ret_val = e1000_set_mdio_slow_mode_hv(hw, false); + if (!locked) + hw->phy.ops.release(hw); + return ret_val; } /** - * e1000_write_phy_reg_hv - Write HV PHY register + * e1000_read_phy_reg_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores + * the retrieved information in data. Release the acquired semaphore + * before exiting. + **/ +s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, false); +} + +/** + * e1000_read_phy_reg_hv_locked - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, true); +} + +/** + * __e1000_write_phy_reg_hv - Write HV PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset + * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ -s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) +static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) { s32 ret_val; u16 page = BM_PHY_REG_PAGE(offset); u16 reg = BM_PHY_REG_NUM(offset); bool in_slow_mode = false; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + + if (!locked) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } /* Workaround failure in MDIO access while cable is disconnected */ if ((hw->phy.type == e1000_phy_82577) && @@ -2759,11 +2918,9 @@ goto out; } - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; - - hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); + /* The LCD Config workaround provides the phy address to use */ + if (dev_spec->nvm_lcd_config_enabled == false) + hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); if (page == HV_INTC_FC_PAGE_START) page = 0; @@ -2778,50 +2935,67 @@ ((MAX_PHY_REG_ADDRESS & reg) == 0) && (data & (1 << 11))) { u16 data2 = 0x7EFF; - hw->phy.ops.release(hw); ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3, &data2, false); if (ret_val) goto out; - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; } if (reg > MAX_PHY_MULTI_PAGE_REG) { - if ((hw->phy.type != e1000_phy_82578) || - ((reg != I82578_ADDR_REG) && - (reg != I82578_ADDR_REG + 1))) { - u32 phy_addr = hw->phy.addr; - - hw->phy.addr = 1; - - /* Page is shifted left, PHY expects (page x 32) */ - ret_val = e1000e_write_phy_reg_mdic(hw, - IGP01E1000_PHY_PAGE_SELECT, - (page << IGP_PAGE_SHIFT)); - if (ret_val) { - hw->phy.ops.release(hw); - goto out; - } - hw->phy.addr = phy_addr; - } + u32 phy_addr = hw->phy.addr; + + hw->phy.addr = 1; + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (page << IGP_PAGE_SHIFT)); + hw->phy.addr = phy_addr; } ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data); - hw->phy.ops.release(hw); out: /* Revert to MDIO fast mode, if applicable */ if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) ret_val = e1000_set_mdio_slow_mode_hv(hw, false); + if (!locked) + hw->phy.ops.release(hw); + return ret_val; } /** + * e1000_write_phy_reg_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register at the offset. + * Release the acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, false); +} + +/** + * e1000_write_phy_reg_hv_locked - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. Assumes semaphore + * already acquired. + **/ +s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, true); +} + +/** * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page * @page: page to be accessed **/ @@ -2842,10 +3016,9 @@ * @data: pointer to the data to be read or written * @read: determines if operation is read or written * - * Acquires semaphore, if necessary, then reads the PHY register at offset - * and storing the retreived information in data. Release any acquired - * semaphores before exiting. Note that the procedure to read these regs - * uses the address port and data port to read/write. + * Reads the PHY register at offset and stores the retreived information + * in data. Assumes semaphore already acquired. Note that the procedure + * to read these regs uses the address port and data port to read/write. **/ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, u16 *data, bool read) @@ -2853,20 +3026,12 @@ s32 ret_val; u32 addr_reg = 0; u32 data_reg = 0; - u8 phy_acquired = 1; /* This takes care of the difference with desktop vs mobile phy */ addr_reg = (hw->phy.type == e1000_phy_82578) ? I82578_ADDR_REG : I82577_ADDR_REG; data_reg = addr_reg + 1; - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) { - e_dbg("Could not acquire PHY\n"); - phy_acquired = 0; - goto out; - } - /* All operations in this function are phy address 2 */ hw->phy.addr = 2; @@ -2889,8 +3054,6 @@ } out: - if (phy_acquired == 1) - hw->phy.ops.release(hw); return ret_val; } diff -ur linux-2.6.28/ubuntu/e1000e-next/e1000_phy.h linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/e1000_phy.h --- linux-2.6.28/ubuntu/e1000e-next/e1000_phy.h 2010-05-06 11:29:15.000000000 +0200 +++ linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/e1000_phy.h 2009-10-01 00:53:10.000000000 +0200 @@ -51,13 +51,17 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw); s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data); s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data); s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active); s32 e1000e_setup_copper_link(struct e1000_hw *hw); s32 e1000_wait_autoneg(struct e1000_hw *hw); s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data); s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data); s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); s32 e1000_phy_reset_dsp(struct e1000_hw *hw); s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, @@ -74,7 +78,9 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data); s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data); s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow); s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); @@ -163,6 +169,13 @@ #define BM_CS_STATUS_SPEED_MASK 0xC000 #define BM_CS_STATUS_SPEED_1000 0x8000 +/* 82577 Mobile Phy Status Register */ +#define HV_M_STATUS 26 +#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 +#define HV_M_STATUS_SPEED_MASK 0x0300 +#define HV_M_STATUS_SPEED_1000 0x0200 +#define HV_M_STATUS_LINK_UP 0x0040 + #define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 #define IGP01E1000_PHY_POLARITY_MASK 0x0078 @@ -209,8 +222,7 @@ #define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ #define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ #define E1000_KMRNCTRLSTA_K1_CONFIG 0x7 -#define E1000_KMRNCTRLSTA_K1_ENABLE 0x140E -#define E1000_KMRNCTRLSTA_K1_DISABLE 0x1400 +#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 #define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 #define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */ diff -ur linux-2.6.28/ubuntu/e1000e-next/e1000_regs.h linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/e1000_regs.h --- linux-2.6.28/ubuntu/e1000e-next/e1000_regs.h 2010-05-06 11:29:15.000000000 +0200 +++ linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/e1000_regs.h 2009-10-01 00:53:10.000000000 +0200 @@ -237,10 +237,6 @@ #define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ #define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */ #define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ -/* - * The CRC offset register is undocumented because it is for future use and - * may change in the future. - */ #define E1000_CRC_OFFSET 0x05F50 /* CRC Offset register */ #define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ diff -ur linux-2.6.28/ubuntu/e1000e-next/ethtool.c linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/ethtool.c --- linux-2.6.28/ubuntu/e1000e-next/ethtool.c 2010-05-06 11:29:15.000000000 +0200 +++ linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/ethtool.c 2009-10-01 00:53:10.000000000 +0200 @@ -257,8 +257,7 @@ ecmd->advertising = hw->phy.autoneg_advertised; if (adapter->fc_autoneg) { if (hw->mac.type == e1000_pchlan) { - /* Workaround h/w hang when Tx flow control - * enabled */ + /* Workaround h/w hang when Tx fc enabled */ hw->fc.requested_mode = e1000_fc_rx_pause; } else { hw->fc.requested_mode = e1000_fc_default; @@ -340,10 +339,18 @@ hw->fc.current_mode = hw->fc.requested_mode; - retval = ((hw->phy.media_type == e1000_media_type_fiber) ? - hw->mac.ops.setup_link(hw) : e1000e_force_mac_fc(hw)); + if (hw->phy.media_type == e1000_media_type_fiber) { + retval = hw->mac.ops.setup_link(hw); + /* implicit goto out */ + } else { + retval = e1000e_force_mac_fc(hw); + if (retval) + goto out; + e1000e_set_fc_watermarks(hw); + } } +out: clear_bit(__E1000_RESETTING, &adapter->state); return retval; } diff -ur linux-2.6.28/ubuntu/e1000e-next/hw.h linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/hw.h --- linux-2.6.28/ubuntu/e1000e-next/hw.h 2010-05-06 11:29:15.000000000 +0200 +++ linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/hw.h 2009-10-01 00:53:10.000000000 +0200 @@ -518,11 +518,13 @@ s32 (*get_cable_length)(struct e1000_hw *); s32 (*get_info)(struct e1000_hw *); s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); void (*release)(struct e1000_hw *); s32 (*reset)(struct e1000_hw *); s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); void (*power_up)(struct e1000_hw *); void (*power_down)(struct e1000_hw *); }; @@ -653,6 +655,10 @@ u32 smb_counter; }; +struct e1000_dev_spec_80003es2lan { + bool mdic_wa_enable; +}; + struct e1000_shadow_ram { u16 value; bool modified; @@ -663,6 +669,8 @@ struct e1000_dev_spec_ich8lan { bool kmrn_lock_loss_workaround_enabled; struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; + bool nvm_k1_enabled; + bool nvm_lcd_config_enabled; }; struct e1000_hw { @@ -680,6 +688,7 @@ union { struct e1000_dev_spec_82571 _82571; + struct e1000_dev_spec_80003es2lan _80003es2lan; struct e1000_dev_spec_ich8lan ich8lan; } dev_spec; diff -ur linux-2.6.28/ubuntu/e1000e-next/kcompat.c linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/kcompat.c --- linux-2.6.28/ubuntu/e1000e-next/kcompat.c 2010-05-06 11:29:15.000000000 +0200 +++ linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/kcompat.c 2009-10-01 00:53:10.000000000 +0200 @@ -375,14 +375,6 @@ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) #ifdef NAPI -/* this function returns the true netdev of the napi struct */ -struct net_device * napi_to_netdev(struct napi_struct *napi) -{ - struct adapter_struct *adapter = container_of(napi, - struct adapter_struct, - napi); - return adapter->netdev; -} int __kc_adapter_clean(struct net_device *netdev, int *budget) { diff -ur linux-2.6.28/ubuntu/e1000e-next/kcompat.h linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/kcompat.h --- linux-2.6.28/ubuntu/e1000e-next/kcompat.h 2010-05-06 11:29:15.000000000 +0200 +++ linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/kcompat.h 2009-10-01 00:53:10.000000000 +0200 @@ -78,9 +78,13 @@ u16 entry; /* driver uses to specify entry, OS writes */ }; #endif +#undef pci_enable_msi #define pci_enable_msi(a) -ENOTSUPP +#undef pci_disable_msi #define pci_disable_msi(a) do {} while (0) +#undef pci_enable_msix #define pci_enable_msix(a, b, c) -ENOTSUPP +#undef pci_disable_msix #define pci_disable_msix(a) do {} while (0) #define msi_remove_pci_irq_vectors(a) do {} while (0) #endif /* CONFIG_PCI_MSI */ @@ -105,6 +109,8 @@ #if ( GCC_VERSION < 3000 ) #define _Bool char #endif +#else +#define _Bool char #endif #ifndef bool #define bool _Bool @@ -165,6 +171,12 @@ #define NETDEV_TX_LOCKED -1 #endif +#ifdef CONFIG_PCI_IOV +#define VMDQ_P(p) ((p) + adapter->num_vfs) +#else +#define VMDQ_P(p) (p) +#endif + #ifndef SKB_DATAREF_SHIFT /* if we do not have the infrastructure to detect if skb_header is cloned just return false in all cases */ @@ -182,6 +194,10 @@ #define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb) #endif +#ifndef NETIF_F_SCTP_CSUM +#define NETIF_F_SCTP_CSUM 0 +#endif + #ifndef CHECKSUM_PARTIAL #define CHECKSUM_PARTIAL CHECKSUM_HW #define CHECKSUM_COMPLETE CHECKSUM_HW @@ -266,7 +282,6 @@ #define dca3_get_tag(a,b) dca_get_tag(b) #endif - /*****************************************************************************/ /* Installations with ethtool version without eeprom, adapter id, or statistics * support */ @@ -782,6 +797,13 @@ #define cpu_relax() rep_nop() #endif +struct vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + unsigned short h_vlan_proto; + unsigned short h_vlan_TCI; + unsigned short h_vlan_encapsulated_proto; +}; #endif /* 2.4.13 => 2.4.10 */ /*****************************************************************************/ @@ -800,6 +822,7 @@ /* we won't support NAPI on less than 2.4.20 */ #ifdef NAPI +#undef NAPI #undef CONFIG_E1000E_NAPI #endif @@ -835,7 +858,9 @@ } } #endif - +#ifndef IPPROTO_SCTP +#define IPPROTO_SCTP 132 +#endif #ifndef netif_poll_enable #define netif_poll_enable(x) _kc_netif_poll_enable(x) static inline void _kc_netif_poll_enable(struct net_device *netdev) @@ -866,9 +891,7 @@ /*****************************************************************************/ /* 2.5.71 => 2.4.x */ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) ) -#include #define sk_protocol protocol - #define pci_get_device pci_find_device #endif /* 2.5.70 => 2.4.x */ @@ -1076,6 +1099,13 @@ memset(dst, 0, len); } } +#define random_ether_addr _kc_random_ether_addr +static inline void _kc_random_ether_addr(u8 *addr) +{ + get_random_bytes(addr, ETH_ALEN); + addr[0] &= 0xfe; /* clear multicast */ + addr[0] |= 0x02; /* set local assignment */ +} #endif /* < 2.6.6 */ /*****************************************************************************/ @@ -1159,6 +1189,9 @@ #ifndef __le64 #define __le64 u64 #endif +#ifndef __be16 +#define __be16 u16 +#endif #ifdef pci_dma_mapping_error #undef pci_dma_mapping_error @@ -1169,7 +1202,6 @@ { return dma_addr == 0; } - #endif /* < 2.6.9 */ /*****************************************************************************/ @@ -1280,6 +1312,7 @@ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) ) +#undef DEFINE_MUTEX #define DEFINE_MUTEX(x) DECLARE_MUTEX(x) #define mutex_lock(x) down_interruptible(x) #define mutex_unlock(x) up(x) @@ -1485,6 +1518,8 @@ return (struct udphdr *)skb_transport_header(skb); } #endif +#else /* 2.6.22 */ +#define ETH_TYPE_TRANS_SETS_DEV #endif /* < 2.6.22 */ /*****************************************************************************/ @@ -1501,19 +1536,22 @@ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +/* if GRO is supported then the napi struct must already exist */ +#ifndef NETIF_F_GRO /* NAPI API changes in 2.6.24 break everything */ struct napi_struct { /* used to look up the real NAPI polling routine */ int (*poll)(struct napi_struct *, int); - struct net_device poll_dev; + struct net_device *dev; int weight; }; +#endif + #ifdef NAPI extern int __kc_adapter_clean(struct net_device *, int *); -extern struct net_device * napi_to_netdev(struct napi_struct *); -#define napi_to_poll_dev(napi) napi_to_netdev(napi) -#define napi_enable(napi) netif_poll_enable(adapter->netdev) -#define napi_disable(napi) netif_poll_disable(adapter->netdev) +#define napi_to_poll_dev(_napi) (_napi)->dev +#define napi_enable(_napi) netif_poll_enable((_napi)->dev) +#define napi_disable(_napi) netif_poll_disable((_napi)->dev) #define netif_napi_add(_netdev, _napi, _poll, _weight) \ do { \ struct napi_struct *__napi = (_napi); \ @@ -1521,13 +1559,22 @@ _netdev->weight = (_weight); \ __napi->poll = &(_poll); \ __napi->weight = (_weight); \ + __napi->dev = (_netdev); \ netif_poll_disable(_netdev); \ } while (0) #define netif_napi_del(_a) do {} while (0) -#define napi_schedule_prep(napi) netif_rx_schedule_prep(napi_to_netdev(napi)) -#define napi_schedule(napi) netif_rx_schedule(napi_to_poll_dev(napi)) -#define __napi_schedule(napi) __netif_rx_schedule(napi_to_poll_dev(napi)) -#define napi_complete(napi) netif_rx_complete(napi_to_poll_dev(napi)) +#define napi_schedule_prep(_napi) netif_rx_schedule_prep((_napi)->dev) +#define napi_schedule(_napi) netif_rx_schedule(napi_to_poll_dev(_napi)) +#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi)) +#ifndef NETIF_F_GRO +#define napi_complete(_napi) netif_rx_complete(napi_to_poll_dev(_napi)) +#else +#define napi_complete(_napi) \ + do { \ + napi_gro_flush(_napi); \ + netif_rx_complete(napi_to_poll_dev(_napi)); \ + } while (0) +#endif /* NETIF_F_GRO */ #else /* NAPI */ #define netif_napi_add(_netdev, _napi, _poll, _weight) \ do { \ @@ -1536,6 +1583,7 @@ _netdev->weight = (_weight); \ __napi->poll = &(_poll); \ __napi->weight = (_weight); \ + __napi->dev = (_netdev); \ } while (0) #define netif_napi_del(_a) do {} while (0) #endif /* NAPI */ @@ -1690,15 +1738,20 @@ #endif /* IXGBE_FCOE */ extern u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb); #define skb_tx_hash(n, s) _kc_skb_tx_hash(n, s) +#define skb_record_rx_queue(a, b) do {} while (0) #else #define HAVE_ASPM_QUIRKS #endif /* < 2.6.30 */ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ) +#define ETH_P_1588 0x88F7 #else #ifndef HAVE_NETDEV_STORAGE_ADDRESS #define HAVE_NETDEV_STORAGE_ADDRESS #endif +#ifndef HAVE_NETDEV_HW_ADDR +#define HAVE_NETDEV_HW_ADDR +#endif #endif /* < 2.6.31 */ #endif /* _KCOMPAT_H_ */ diff -ur linux-2.6.28/ubuntu/e1000e-next/netdev.c linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/netdev.c --- linux-2.6.28/ubuntu/e1000e-next/netdev.c 2010-05-06 11:29:15.000000000 +0200 +++ linux-2.6.28-e1000e-next-full-speed/ubuntu/e1000e-next/netdev.c 2010-05-05 19:38:08.000000000 +0200 @@ -44,7 +44,9 @@ #endif #include #include +#ifdef NETIF_F_HW_VLAN_TX #include +#endif #include "e1000.h" @@ -54,7 +56,7 @@ #define DRV_EXTRAVERSION #endif -#define DRV_VERSION "1.0.2.5" DRV_EXTRAVERSION +#define DRV_VERSION "1.0.15" DRV_EXTRAVERSION char e1000e_driver_name[] = "e1000e-next"; const char e1000e_driver_version[] = DRV_VERSION; @@ -295,6 +297,7 @@ | FLAG_HAS_AMT | FLAG_HAS_FLASH | FLAG_HAS_JUMBO_FRAMES + | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ | FLAG_APME_IN_WUC, .pba = 26, .max_hw_frame_size = 4096, @@ -345,19 +348,23 @@ skb->protocol = eth_type_trans(skb, netdev); #ifdef CONFIG_E1000E_NAPI +#ifdef NETIF_F_HW_VLAN_TX if (adapter->vlgrp && (status & E1000_RXD_STAT_VP)) vlan_hwaccel_receive_skb(skb, adapter->vlgrp, le16_to_cpu(vlan)); else +#endif #ifdef NETIF_F_GRO napi_gro_receive(&adapter->napi, skb); #else netif_receive_skb(skb); #endif /* NETIF_F_GRO */ #else +#ifdef NETIF_F_HW_VLAN_TX if (adapter->vlgrp && (status & E1000_RXD_STAT_VP)) ret = vlan_hwaccel_rx(skb, adapter->vlgrp, le16_to_cpu(vlan)); else +#endif ret = netif_rx(skb); if (unlikely(ret == NET_RX_DROP)) adapter->rx_dropped_backlog++; @@ -847,15 +854,27 @@ } } -static void e1000_print_tx_hang(struct e1000_adapter *adapter) +static void e1000_print_hw_hang(struct work_struct *work) { + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + print_hang_task); struct e1000_ring *tx_ring = adapter->tx_ring; unsigned int i = tx_ring->next_to_clean; unsigned int eop = tx_ring->buffer_info[i].next_to_watch; struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); + struct e1000_hw *hw = &adapter->hw; + u16 phy_status, phy_1000t_status, phy_ext_status; + u16 pci_status; + + e1e_rphy(hw, PHY_STATUS, &phy_status); + e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); + e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); - /* detected Tx unit hang */ - e_err("Detected Tx Unit Hang:\n" + pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); + + /* detected Hardware unit hang */ + e_err("Detected Hardware Unit Hang:\n" " TDH <%x>\n" " TDT <%x>\n" " next_to_use <%x>\n" @@ -864,7 +883,12 @@ " time_stamp <%lx>\n" " next_to_watch <%x>\n" " jiffies <%lx>\n" - " next_to_watch.status <%x>\n", + " next_to_watch.status <%x>\n" + "MAC Status <%x>\n" + "PHY Status <%x>\n" + "PHY 1000BASE-T Status <%x>\n" + "PHY Extended Status <%x>\n" + "PCI Status <%x>\n", readl(adapter->hw.hw_addr + tx_ring->head), readl(adapter->hw.hw_addr + tx_ring->tail), tx_ring->next_to_use, @@ -872,7 +896,16 @@ tx_ring->buffer_info[eop].time_stamp, eop, jiffies, - eop_desc->upper.fields.status); + eop_desc->upper.fields.status, + er32(STATUS), + phy_status, + phy_1000t_status, + phy_ext_status, + pci_status); + + /* Suggest workaround for known h/w issue */ + if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) + e_err("Try turning off Tx pause (flow control) via ethtool\n"); } /** @@ -966,7 +999,7 @@ time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + (adapter->tx_timeout_factor * HZ)) && !(er32(STATUS) & E1000_STATUS_TXOFF)) { - e1000_print_tx_hang(adapter); + schedule_work(&adapter->print_hang_task); netif_stop_queue(netdev); } } @@ -2452,6 +2485,7 @@ work_done = budget; #ifndef HAVE_NETDEV_NAPI_LIST + /* if netdev is disabled we need to stop polling */ if (!netif_running(adapter->netdev)) work_done = 0; @@ -2475,6 +2509,7 @@ } #endif /* CONFIG_E1000E_NAPI */ +#ifdef NETIF_F_HW_VLAN_TX static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct e1000_adapter *adapter = netdev_priv(netdev); @@ -2616,6 +2651,7 @@ } } +#endif /* NETIF_F_HW_VLAN_TX */ static void e1000_init_manageability(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; @@ -3073,7 +3109,9 @@ { e1000_set_multi(adapter->netdev); +#ifdef NETIF_F_HW_VLAN_TX e1000_restore_vlan(adapter); +#endif e1000_init_manageability(adapter); e1000_configure_tx(adapter); @@ -3196,17 +3234,31 @@ * with ERT support assuming ERT set to E1000_ERT_2048), or * - the full Rx FIFO size minus two full frames */ - if ((adapter->flags & FLAG_HAS_ERT) && - (adapter->netdev->mtu > ETH_DATA_LEN)) - hwm = min(((pba << 10) * 9 / 10), - ((pba << 10) - (E1000_ERT_2048 << 3))); - else - hwm = min(((pba << 10) * 9 / 10), - ((pba << 10) - (2 * adapter->max_frame_size))); + if (hw->mac.type == e1000_pchlan) { + /* + * Workaround PCH LOM adapter hangs with certain network + * loads. If hangs persist, try disabling Tx flow control. + */ + if (adapter->netdev->mtu > ETH_DATA_LEN) { + fc->high_water = 0x3500; + fc->low_water = 0x1500; + } else { + fc->high_water = 0x5000; + fc->low_water = 0x3000; + } + } else { + if ((adapter->flags & FLAG_HAS_ERT) && + (adapter->netdev->mtu > ETH_DATA_LEN)) + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - (E1000_ERT_2048 << 3))); + else + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - (2*adapter->max_frame_size))); - fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ - fc->low_water = (fc->high_water - (2 * adapter->max_frame_size)); - fc->low_water &= E1000_FCRTL_RTL; /* 8-byte granularity */ + fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ + fc->low_water = (fc->high_water - (2*adapter->max_frame_size)); + fc->low_water &= E1000_FCRTL_RTL; /* 8-byte granularity */ + } if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) fc->pause_time = 0xFFFF; @@ -3232,11 +3284,17 @@ if (mac->ops.init_hw(hw)) e_err("Hardware Error\n"); + /* additional part of the flow-control workaround above */ + if (hw->mac.type == e1000_pchlan) + ew32(FCRTV_PCH, 0x1000); + +#ifdef NETIF_F_HW_VLAN_TX e1000_update_mng_vlan(adapter); /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ ew32(VET, ETH_P_8021Q); +#endif e1000e_reset_adaptive(hw); e1000_get_phy_info(hw); @@ -3570,11 +3628,13 @@ e1000e_power_up_phy(adapter); +#ifdef NETIF_F_HW_VLAN_TX adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) e1000_update_mng_vlan(adapter); +#endif /* * If AMT is enabled, let the firmware know that the network * interface is now open @@ -3662,6 +3722,7 @@ e1000e_free_tx_resources(adapter); e1000e_free_rx_resources(adapter); +#ifdef NETIF_F_HW_VLAN_TX /* * kill manageability vlan ID if supported, but not if a vlan with * the same ID is registered on the host OS (let 8021q kill it) @@ -3672,6 +3733,7 @@ vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); +#endif /* * If AMT is enabled, let the firmware know that the network * interface is now closed @@ -3781,24 +3843,24 @@ if ((hw->phy.type == e1000_phy_82578) || (hw->phy.type == e1000_phy_82577)) { e1e_rphy(hw, HV_SCC_UPPER, &phy_data); - e1e_rphy(hw, HV_SCC_LOWER, &phy_data); - adapter->stats.scc += phy_data; + if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data)) + adapter->stats.scc += phy_data; e1e_rphy(hw, HV_ECOL_UPPER, &phy_data); - e1e_rphy(hw, HV_ECOL_LOWER, &phy_data); - adapter->stats.ecol += phy_data; + if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data)) + adapter->stats.ecol += phy_data; e1e_rphy(hw, HV_MCC_UPPER, &phy_data); - e1e_rphy(hw, HV_MCC_LOWER, &phy_data); - adapter->stats.mcc += phy_data; + if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data)) + adapter->stats.mcc += phy_data; e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data); - e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data); - adapter->stats.latecol += phy_data; + if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data)) + adapter->stats.latecol += phy_data; e1e_rphy(hw, HV_DC_UPPER, &phy_data); - e1e_rphy(hw, HV_DC_LOWER, &phy_data); - adapter->stats.dc += phy_data; + if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data)) + adapter->stats.dc += phy_data; } else { adapter->stats.scc += er32(SCC); adapter->stats.ecol += er32(ECOL); @@ -3826,8 +3888,8 @@ if ((hw->phy.type == e1000_phy_82578) || (hw->phy.type == e1000_phy_82577)) { e1e_rphy(hw, HV_COLC_UPPER, &phy_data); - e1e_rphy(hw, HV_COLC_LOWER, &phy_data); - hw->mac.collision_delta = phy_data; + if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data)) + hw->mac.collision_delta = phy_data; } else { hw->mac.collision_delta = er32(COLC); } @@ -3838,8 +3900,8 @@ if ((hw->phy.type == e1000_phy_82578) || (hw->phy.type == e1000_phy_82577)) { e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data); - e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data); - adapter->stats.tncrs += phy_data; + if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data)) + adapter->stats.tncrs += phy_data; } else { if ((hw->mac.type != e1000_82574) && (hw->mac.type != e1000_82583)) @@ -4032,10 +4094,12 @@ goto link_up; } +#ifdef NETIF_F_HW_VLAN_TX if ((e1000e_enable_tx_pkt_filtering(hw)) && (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) e1000_update_mng_vlan(adapter); +#endif if (link) { if (!netif_carrier_ok(netdev)) { bool txb2b = 1; @@ -4083,7 +4147,7 @@ case SPEED_100: txb2b = 0; netdev->tx_queue_len = 100; - /* maybe add some timeout factor ? */ + adapter->tx_timeout_factor = 10; break; } @@ -4523,6 +4587,7 @@ struct e1000_hw *hw = &adapter->hw; u16 length, offset; +#ifdef NETIF_F_HW_VLAN_TX if (vlan_tx_tag_present(skb)) { if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && (adapter->hw.mng_cookie.status & @@ -4530,6 +4595,7 @@ return 0; } +#endif if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) return 0; @@ -4681,11 +4747,13 @@ return NETDEV_TX_BUSY; } +#ifdef NETIF_F_HW_VLAN_TX if (adapter->vlgrp && vlan_tx_tag_present(skb)) { tx_flags |= E1000_TX_FLAGS_VLAN; tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); } +#endif first = tx_ring->next_to_use; tso = e1000_tso(adapter, skb); @@ -4788,8 +4856,10 @@ while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) msleep(1); - /* e1000e_down has a dependency on max_frame_size */ + /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ adapter->max_frame_size = max_frame; + e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev->mtu = new_mtu; if (netif_running(netdev)) e1000e_down(adapter); @@ -4828,9 +4898,6 @@ adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; - e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); - netdev->mtu = new_mtu; - if (netif_running(netdev)) e1000e_up(adapter); else @@ -4990,7 +5057,7 @@ return retval; } -static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); @@ -5044,7 +5111,7 @@ e1000_media_type_internal_serdes) { /* keep the laser running in D3 */ ctrl_ext = er32(CTRL_EXT); - ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; + ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; ew32(CTRL_EXT, ctrl_ext); } @@ -5054,8 +5121,7 @@ /* Allow time for pending master requests to run */ e1000e_disable_pcie_master(&adapter->hw); - if ((adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) && - !(hw->mac.ops.check_mng_mode(hw))) { + if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { /* enable wakeup by the PHY */ retval = e1000_init_phy_wakeup(adapter, wufc); if (retval) @@ -5065,20 +5131,17 @@ ew32(WUFC, wufc); ew32(WUC, E1000_WUC_PME_EN); } - pci_enable_wake(pdev, PCI_D3hot, 1); - pci_enable_wake(pdev, PCI_D3cold, 1); } else { ew32(WUC, 0); ew32(WUFC, 0); - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); } + *enable_wake = !!wufc; + /* make sure adapter isn't asleep if manageability is enabled */ - if (adapter->flags & FLAG_MNG_PT_ENABLED) { - pci_enable_wake(pdev, PCI_D3hot, 1); - pci_enable_wake(pdev, PCI_D3cold, 1); - } + if ((adapter->flags & FLAG_MNG_PT_ENABLED) || + (hw->mac.ops.check_mng_mode(hw))) + *enable_wake = true; if (adapter->hw.phy.type == e1000_phy_igp_3) e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); @@ -5091,6 +5154,26 @@ pci_disable_device(pdev); + return 0; +} + +static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake) +{ + if (sleep && wake) { + pci_prepare_to_sleep(pdev); + return; + } + + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); +} + +static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, + bool wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + /* * The pci-e switch on some quad port adapters will report a * correctable error when the MAC transitions from D0 to D3. To @@ -5106,14 +5189,12 @@ pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, (devctl & ~PCI_EXP_DEVCTL_CERE)); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); + e1000_power_off(pdev, sleep, wake); pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl); } else { - pci_set_power_state(pdev, pci_choose_state(pdev, state)); + e1000_power_off(pdev, sleep, wake); } - - return 0; } static void e1000e_disable_l1aspm(struct pci_dev *pdev) @@ -5142,6 +5223,18 @@ } #ifdef CONFIG_PM +static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + bool wake; + + retval = __e1000_shutdown(pdev, &wake); + if (!retval) + e1000_complete_shutdown(pdev, true, wake); + + return retval; +} + static int e1000_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -5237,7 +5330,12 @@ #ifndef USE_REBOOT_NOTIFIER static void e1000_shutdown(struct pci_dev *pdev) { - e1000_suspend(pdev, PMSG_SUSPEND); + bool wake; + + __e1000_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) + e1000_complete_shutdown(pdev, false, wake); } #else static struct pci_driver e1000_driver; @@ -5304,6 +5402,9 @@ netif_device_detach(netdev); + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + if (netif_running(netdev)) e1000e_down(adapter); pci_disable_device(pdev); @@ -5460,10 +5561,11 @@ .ndo_do_ioctl = e1000_ioctl, .ndo_tx_timeout = e1000_tx_timeout, .ndo_validate_addr = eth_validate_addr, - +#ifdef NETIF_F_HW_VLAN_TX .ndo_vlan_rx_register = e1000_vlan_rx_register, .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, +#endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = e1000_netpoll, #endif @@ -5597,9 +5699,11 @@ netdev->change_mtu = &e1000_change_mtu; netdev->do_ioctl = &e1000_ioctl; netdev->tx_timeout = &e1000_tx_timeout; +#ifdef NETIF_F_HW_VLAN_TX netdev->vlan_rx_register = e1000_vlan_rx_register; netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid; netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid; +#endif #ifdef CONFIG_NET_POLL_CONTROLLER netdev->poll_controller = e1000_netpoll; #endif @@ -5625,6 +5729,7 @@ if (e1000_check_reset_block(&adapter->hw)) e_info("PHY reset is blocked due to SOL/IDER session.\n"); +#ifdef NETIF_F_HW_VLAN_TX netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX | @@ -5632,6 +5737,9 @@ if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) netdev->features |= NETIF_F_HW_VLAN_FILTER; +#else + netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM; +#endif #ifdef NETIF_F_TSO netdev->features |= NETIF_F_TSO; @@ -5701,6 +5809,7 @@ INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); + INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); /* Initialize link parameters. User can change them with ethtool */ adapter->hw.mac.autoneg = 1; @@ -5832,6 +5941,11 @@ del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + cancel_work_sync(&adapter->downshift_task); + cancel_work_sync(&adapter->update_phy_task); + cancel_work_sync(&adapter->print_hang_task); flush_scheduled_work(); /*