PageRenderTime 325ms CodeModel.GetById 15ms app.highlight 276ms RepoModel.GetById 1ms app.codeStats 2ms

/drivers/net/ethernet/intel/e1000e/ich8lan.c

http://github.com/mirrors/linux
C | 5952 lines | 3689 code | 779 blank | 1484 comment | 789 complexity | 03598df2fc875c966397072596100a82 MD5 | raw file
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 1999 - 2018 Intel Corporation. */
   3
   4/* 82562G 10/100 Network Connection
   5 * 82562G-2 10/100 Network Connection
   6 * 82562GT 10/100 Network Connection
   7 * 82562GT-2 10/100 Network Connection
   8 * 82562V 10/100 Network Connection
   9 * 82562V-2 10/100 Network Connection
  10 * 82566DC-2 Gigabit Network Connection
  11 * 82566DC Gigabit Network Connection
  12 * 82566DM-2 Gigabit Network Connection
  13 * 82566DM Gigabit Network Connection
  14 * 82566MC Gigabit Network Connection
  15 * 82566MM Gigabit Network Connection
  16 * 82567LM Gigabit Network Connection
  17 * 82567LF Gigabit Network Connection
  18 * 82567V Gigabit Network Connection
  19 * 82567LM-2 Gigabit Network Connection
  20 * 82567LF-2 Gigabit Network Connection
  21 * 82567V-2 Gigabit Network Connection
  22 * 82567LF-3 Gigabit Network Connection
  23 * 82567LM-3 Gigabit Network Connection
  24 * 82567LM-4 Gigabit Network Connection
  25 * 82577LM Gigabit Network Connection
  26 * 82577LC Gigabit Network Connection
  27 * 82578DM Gigabit Network Connection
  28 * 82578DC Gigabit Network Connection
  29 * 82579LM Gigabit Network Connection
  30 * 82579V Gigabit Network Connection
  31 * Ethernet Connection I217-LM
  32 * Ethernet Connection I217-V
  33 * Ethernet Connection I218-V
  34 * Ethernet Connection I218-LM
  35 * Ethernet Connection (2) I218-LM
  36 * Ethernet Connection (2) I218-V
  37 * Ethernet Connection (3) I218-LM
  38 * Ethernet Connection (3) I218-V
  39 */
  40
  41#include "e1000.h"
  42
  43/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
  44/* Offset 04h HSFSTS */
  45union ich8_hws_flash_status {
  46	struct ich8_hsfsts {
  47		u16 flcdone:1;	/* bit 0 Flash Cycle Done */
  48		u16 flcerr:1;	/* bit 1 Flash Cycle Error */
  49		u16 dael:1;	/* bit 2 Direct Access error Log */
  50		u16 berasesz:2;	/* bit 4:3 Sector Erase Size */
  51		u16 flcinprog:1;	/* bit 5 flash cycle in Progress */
  52		u16 reserved1:2;	/* bit 13:6 Reserved */
  53		u16 reserved2:6;	/* bit 13:6 Reserved */
  54		u16 fldesvalid:1;	/* bit 14 Flash Descriptor Valid */
  55		u16 flockdn:1;	/* bit 15 Flash Config Lock-Down */
  56	} hsf_status;
  57	u16 regval;
  58};
  59
  60/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
  61/* Offset 06h FLCTL */
  62union ich8_hws_flash_ctrl {
  63	struct ich8_hsflctl {
  64		u16 flcgo:1;	/* 0 Flash Cycle Go */
  65		u16 flcycle:2;	/* 2:1 Flash Cycle */
  66		u16 reserved:5;	/* 7:3 Reserved  */
  67		u16 fldbcount:2;	/* 9:8 Flash Data Byte Count */
  68		u16 flockdn:6;	/* 15:10 Reserved */
  69	} hsf_ctrl;
  70	u16 regval;
  71};
  72
  73/* ICH Flash Region Access Permissions */
  74union ich8_hws_flash_regacc {
  75	struct ich8_flracc {
  76		u32 grra:8;	/* 0:7 GbE region Read Access */
  77		u32 grwa:8;	/* 8:15 GbE region Write Access */
  78		u32 gmrag:8;	/* 23:16 GbE Master Read Access Grant */
  79		u32 gmwag:8;	/* 31:24 GbE Master Write Access Grant */
  80	} hsf_flregacc;
  81	u16 regval;
  82};
  83
  84/* ICH Flash Protected Region */
  85union ich8_flash_protected_range {
  86	struct ich8_pr {
  87		u32 base:13;	/* 0:12 Protected Range Base */
  88		u32 reserved1:2;	/* 13:14 Reserved */
  89		u32 rpe:1;	/* 15 Read Protection Enable */
  90		u32 limit:13;	/* 16:28 Protected Range Limit */
  91		u32 reserved2:2;	/* 29:30 Reserved */
  92		u32 wpe:1;	/* 31 Write Protection Enable */
  93	} range;
  94	u32 regval;
  95};
  96
  97static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
  98static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
  99static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
 100static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
 101						u32 offset, u8 byte);
 102static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
 103					 u8 *data);
 104static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
 105					 u16 *data);
 106static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
 107					 u8 size, u16 *data);
 108static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
 109					   u32 *data);
 110static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
 111					  u32 offset, u32 *data);
 112static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
 113					    u32 offset, u32 data);
 114static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
 115						 u32 offset, u32 dword);
 116static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
 117static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
 118static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
 119static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
 120static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
 121static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
 122static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
 123static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
 124static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
 125static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
 126static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
 127static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
 128static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
 129static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
 130static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
 131static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
 132static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
 133static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
 134static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw);
 135static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
 136static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
 137static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
 138static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
 139static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
 140
 141static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
 142{
 143	return readw(hw->flash_address + reg);
 144}
 145
 146static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg)
 147{
 148	return readl(hw->flash_address + reg);
 149}
 150
 151static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val)
 152{
 153	writew(val, hw->flash_address + reg);
 154}
 155
 156static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
 157{
 158	writel(val, hw->flash_address + reg);
 159}
 160
 161#define er16flash(reg)		__er16flash(hw, (reg))
 162#define er32flash(reg)		__er32flash(hw, (reg))
 163#define ew16flash(reg, val)	__ew16flash(hw, (reg), (val))
 164#define ew32flash(reg, val)	__ew32flash(hw, (reg), (val))
 165
 166/**
 167 *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
 168 *  @hw: pointer to the HW structure
 169 *
 170 *  Test access to the PHY registers by reading the PHY ID registers.  If
 171 *  the PHY ID is already known (e.g. resume path) compare it with known ID,
 172 *  otherwise assume the read PHY ID is correct if it is valid.
 173 *
 174 *  Assumes the sw/fw/hw semaphore is already acquired.
 175 **/
 176static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
 177{
 178	u16 phy_reg = 0;
 179	u32 phy_id = 0;
 180	s32 ret_val = 0;
 181	u16 retry_count;
 182	u32 mac_reg = 0;
 183
 184	for (retry_count = 0; retry_count < 2; retry_count++) {
 185		ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg);
 186		if (ret_val || (phy_reg == 0xFFFF))
 187			continue;
 188		phy_id = (u32)(phy_reg << 16);
 189
 190		ret_val = e1e_rphy_locked(hw, MII_PHYSID2, &phy_reg);
 191		if (ret_val || (phy_reg == 0xFFFF)) {
 192			phy_id = 0;
 193			continue;
 194		}
 195		phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
 196		break;
 197	}
 198
 199	if (hw->phy.id) {
 200		if (hw->phy.id == phy_id)
 201			goto out;
 202	} else if (phy_id) {
 203		hw->phy.id = phy_id;
 204		hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
 205		goto out;
 206	}
 207
 208	/* In case the PHY needs to be in mdio slow mode,
 209	 * set slow mode and try to get the PHY id again.
 210	 */
 211	if (hw->mac.type < e1000_pch_lpt) {
 212		hw->phy.ops.release(hw);
 213		ret_val = e1000_set_mdio_slow_mode_hv(hw);
 214		if (!ret_val)
 215			ret_val = e1000e_get_phy_id(hw);
 216		hw->phy.ops.acquire(hw);
 217	}
 218
 219	if (ret_val)
 220		return false;
 221out:
 222	if (hw->mac.type >= e1000_pch_lpt) {
 223		/* Only unforce SMBus if ME is not active */
 224		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
 225			/* Unforce SMBus mode in PHY */
 226			e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
 227			phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
 228			e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
 229
 230			/* Unforce SMBus mode in MAC */
 231			mac_reg = er32(CTRL_EXT);
 232			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
 233			ew32(CTRL_EXT, mac_reg);
 234		}
 235	}
 236
 237	return true;
 238}
 239
 240/**
 241 *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
 242 *  @hw: pointer to the HW structure
 243 *
 244 *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
 245 *  used to reset the PHY to a quiescent state when necessary.
 246 **/
 247static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
 248{
 249	u32 mac_reg;
 250
 251	/* Set Phy Config Counter to 50msec */
 252	mac_reg = er32(FEXTNVM3);
 253	mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
 254	mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
 255	ew32(FEXTNVM3, mac_reg);
 256
 257	/* Toggle LANPHYPC Value bit */
 258	mac_reg = er32(CTRL);
 259	mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
 260	mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
 261	ew32(CTRL, mac_reg);
 262	e1e_flush();
 263	usleep_range(10, 20);
 264	mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
 265	ew32(CTRL, mac_reg);
 266	e1e_flush();
 267
 268	if (hw->mac.type < e1000_pch_lpt) {
 269		msleep(50);
 270	} else {
 271		u16 count = 20;
 272
 273		do {
 274			usleep_range(5000, 6000);
 275		} while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--);
 276
 277		msleep(30);
 278	}
 279}
 280
 281/**
 282 *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
 283 *  @hw: pointer to the HW structure
 284 *
 285 *  Workarounds/flow necessary for PHY initialization during driver load
 286 *  and resume paths.
 287 **/
 288static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
 289{
 290	struct e1000_adapter *adapter = hw->adapter;
 291	u32 mac_reg, fwsm = er32(FWSM);
 292	s32 ret_val;
 293
 294	/* Gate automatic PHY configuration by hardware on managed and
 295	 * non-managed 82579 and newer adapters.
 296	 */
 297	e1000_gate_hw_phy_config_ich8lan(hw, true);
 298
 299	/* It is not possible to be certain of the current state of ULP
 300	 * so forcibly disable it.
 301	 */
 302	hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
 303	e1000_disable_ulp_lpt_lp(hw, true);
 304
 305	ret_val = hw->phy.ops.acquire(hw);
 306	if (ret_val) {
 307		e_dbg("Failed to initialize PHY flow\n");
 308		goto out;
 309	}
 310
 311	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
 312	 * inaccessible and resetting the PHY is not blocked, toggle the
 313	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
 314	 */
 315	switch (hw->mac.type) {
 316	case e1000_pch_lpt:
 317	case e1000_pch_spt:
 318	case e1000_pch_cnp:
 319	case e1000_pch_tgp:
 320	case e1000_pch_adp:
 321		if (e1000_phy_is_accessible_pchlan(hw))
 322			break;
 323
 324		/* Before toggling LANPHYPC, see if PHY is accessible by
 325		 * forcing MAC to SMBus mode first.
 326		 */
 327		mac_reg = er32(CTRL_EXT);
 328		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
 329		ew32(CTRL_EXT, mac_reg);
 330
 331		/* Wait 50 milliseconds for MAC to finish any retries
 332		 * that it might be trying to perform from previous
 333		 * attempts to acknowledge any phy read requests.
 334		 */
 335		msleep(50);
 336
 337		/* fall-through */
 338	case e1000_pch2lan:
 339		if (e1000_phy_is_accessible_pchlan(hw))
 340			break;
 341
 342		/* fall-through */
 343	case e1000_pchlan:
 344		if ((hw->mac.type == e1000_pchlan) &&
 345		    (fwsm & E1000_ICH_FWSM_FW_VALID))
 346			break;
 347
 348		if (hw->phy.ops.check_reset_block(hw)) {
 349			e_dbg("Required LANPHYPC toggle blocked by ME\n");
 350			ret_val = -E1000_ERR_PHY;
 351			break;
 352		}
 353
 354		/* Toggle LANPHYPC Value bit */
 355		e1000_toggle_lanphypc_pch_lpt(hw);
 356		if (hw->mac.type >= e1000_pch_lpt) {
 357			if (e1000_phy_is_accessible_pchlan(hw))
 358				break;
 359
 360			/* Toggling LANPHYPC brings the PHY out of SMBus mode
 361			 * so ensure that the MAC is also out of SMBus mode
 362			 */
 363			mac_reg = er32(CTRL_EXT);
 364			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
 365			ew32(CTRL_EXT, mac_reg);
 366
 367			if (e1000_phy_is_accessible_pchlan(hw))
 368				break;
 369
 370			ret_val = -E1000_ERR_PHY;
 371		}
 372		break;
 373	default:
 374		break;
 375	}
 376
 377	hw->phy.ops.release(hw);
 378	if (!ret_val) {
 379
 380		/* Check to see if able to reset PHY.  Print error if not */
 381		if (hw->phy.ops.check_reset_block(hw)) {
 382			e_err("Reset blocked by ME\n");
 383			goto out;
 384		}
 385
 386		/* Reset the PHY before any access to it.  Doing so, ensures
 387		 * that the PHY is in a known good state before we read/write
 388		 * PHY registers.  The generic reset is sufficient here,
 389		 * because we haven't determined the PHY type yet.
 390		 */
 391		ret_val = e1000e_phy_hw_reset_generic(hw);
 392		if (ret_val)
 393			goto out;
 394
 395		/* On a successful reset, possibly need to wait for the PHY
 396		 * to quiesce to an accessible state before returning control
 397		 * to the calling function.  If the PHY does not quiesce, then
 398		 * return E1000E_BLK_PHY_RESET, as this is the condition that
 399		 *  the PHY is in.
 400		 */
 401		ret_val = hw->phy.ops.check_reset_block(hw);
 402		if (ret_val)
 403			e_err("ME blocked access to PHY after reset\n");
 404	}
 405
 406out:
 407	/* Ungate automatic PHY configuration on non-managed 82579 */
 408	if ((hw->mac.type == e1000_pch2lan) &&
 409	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
 410		usleep_range(10000, 11000);
 411		e1000_gate_hw_phy_config_ich8lan(hw, false);
 412	}
 413
 414	return ret_val;
 415}
 416
 417/**
 418 *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
 419 *  @hw: pointer to the HW structure
 420 *
 421 *  Initialize family-specific PHY parameters and function pointers.
 422 **/
 423static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
 424{
 425	struct e1000_phy_info *phy = &hw->phy;
 426	s32 ret_val;
 427
 428	phy->addr = 1;
 429	phy->reset_delay_us = 100;
 430
 431	phy->ops.set_page = e1000_set_page_igp;
 432	phy->ops.read_reg = e1000_read_phy_reg_hv;
 433	phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
 434	phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
 435	phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
 436	phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
 437	phy->ops.write_reg = e1000_write_phy_reg_hv;
 438	phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
 439	phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
 440	phy->ops.power_up = e1000_power_up_phy_copper;
 441	phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
 442	phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 443
 444	phy->id = e1000_phy_unknown;
 445
 446	ret_val = e1000_init_phy_workarounds_pchlan(hw);
 447	if (ret_val)
 448		return ret_val;
 449
 450	if (phy->id == e1000_phy_unknown)
 451		switch (hw->mac.type) {
 452		default:
 453			ret_val = e1000e_get_phy_id(hw);
 454			if (ret_val)
 455				return ret_val;
 456			if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
 457				break;
 458			/* fall-through */
 459		case e1000_pch2lan:
 460		case e1000_pch_lpt:
 461		case e1000_pch_spt:
 462		case e1000_pch_cnp:
 463		case e1000_pch_tgp:
 464		case e1000_pch_adp:
 465			/* In case the PHY needs to be in mdio slow mode,
 466			 * set slow mode and try to get the PHY id again.
 467			 */
 468			ret_val = e1000_set_mdio_slow_mode_hv(hw);
 469			if (ret_val)
 470				return ret_val;
 471			ret_val = e1000e_get_phy_id(hw);
 472			if (ret_val)
 473				return ret_val;
 474			break;
 475		}
 476	phy->type = e1000e_get_phy_type_from_id(phy->id);
 477
 478	switch (phy->type) {
 479	case e1000_phy_82577:
 480	case e1000_phy_82579:
 481	case e1000_phy_i217:
 482		phy->ops.check_polarity = e1000_check_polarity_82577;
 483		phy->ops.force_speed_duplex =
 484		    e1000_phy_force_speed_duplex_82577;
 485		phy->ops.get_cable_length = e1000_get_cable_length_82577;
 486		phy->ops.get_info = e1000_get_phy_info_82577;
 487		phy->ops.commit = e1000e_phy_sw_reset;
 488		break;
 489	case e1000_phy_82578:
 490		phy->ops.check_polarity = e1000_check_polarity_m88;
 491		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
 492		phy->ops.get_cable_length = e1000e_get_cable_length_m88;
 493		phy->ops.get_info = e1000e_get_phy_info_m88;
 494		break;
 495	default:
 496		ret_val = -E1000_ERR_PHY;
 497		break;
 498	}
 499
 500	return ret_val;
 501}
 502
 503/**
 504 *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
 505 *  @hw: pointer to the HW structure
 506 *
 507 *  Initialize family-specific PHY parameters and function pointers.
 508 **/
 509static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
 510{
 511	struct e1000_phy_info *phy = &hw->phy;
 512	s32 ret_val;
 513	u16 i = 0;
 514
 515	phy->addr = 1;
 516	phy->reset_delay_us = 100;
 517
 518	phy->ops.power_up = e1000_power_up_phy_copper;
 519	phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
 520
 521	/* We may need to do this twice - once for IGP and if that fails,
 522	 * we'll set BM func pointers and try again
 523	 */
 524	ret_val = e1000e_determine_phy_address(hw);
 525	if (ret_val) {
 526		phy->ops.write_reg = e1000e_write_phy_reg_bm;
 527		phy->ops.read_reg = e1000e_read_phy_reg_bm;
 528		ret_val = e1000e_determine_phy_address(hw);
 529		if (ret_val) {
 530			e_dbg("Cannot determine PHY addr. Erroring out\n");
 531			return ret_val;
 532		}
 533	}
 534
 535	phy->id = 0;
 536	while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
 537	       (i++ < 100)) {
 538		usleep_range(1000, 1100);
 539		ret_val = e1000e_get_phy_id(hw);
 540		if (ret_val)
 541			return ret_val;
 542	}
 543
 544	/* Verify phy id */
 545	switch (phy->id) {
 546	case IGP03E1000_E_PHY_ID:
 547		phy->type = e1000_phy_igp_3;
 548		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 549		phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked;
 550		phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked;
 551		phy->ops.get_info = e1000e_get_phy_info_igp;
 552		phy->ops.check_polarity = e1000_check_polarity_igp;
 553		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp;
 554		break;
 555	case IFE_E_PHY_ID:
 556	case IFE_PLUS_E_PHY_ID:
 557	case IFE_C_E_PHY_ID:
 558		phy->type = e1000_phy_ife;
 559		phy->autoneg_mask = E1000_ALL_NOT_GIG;
 560		phy->ops.get_info = e1000_get_phy_info_ife;
 561		phy->ops.check_polarity = e1000_check_polarity_ife;
 562		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
 563		break;
 564	case BME1000_E_PHY_ID:
 565		phy->type = e1000_phy_bm;
 566		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 567		phy->ops.read_reg = e1000e_read_phy_reg_bm;
 568		phy->ops.write_reg = e1000e_write_phy_reg_bm;
 569		phy->ops.commit = e1000e_phy_sw_reset;
 570		phy->ops.get_info = e1000e_get_phy_info_m88;
 571		phy->ops.check_polarity = e1000_check_polarity_m88;
 572		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
 573		break;
 574	default:
 575		return -E1000_ERR_PHY;
 576	}
 577
 578	return 0;
 579}
 580
 581/**
 582 *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
 583 *  @hw: pointer to the HW structure
 584 *
 585 *  Initialize family-specific NVM parameters and function
 586 *  pointers.
 587 **/
 588static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
 589{
 590	struct e1000_nvm_info *nvm = &hw->nvm;
 591	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
 592	u32 gfpreg, sector_base_addr, sector_end_addr;
 593	u16 i;
 594	u32 nvm_size;
 595
 596	nvm->type = e1000_nvm_flash_sw;
 597
 598	if (hw->mac.type >= e1000_pch_spt) {
 599		/* in SPT, gfpreg doesn't exist. NVM size is taken from the
 600		 * STRAP register. This is because in SPT the GbE Flash region
 601		 * is no longer accessed through the flash registers. Instead,
 602		 * the mechanism has changed, and the Flash region access
 603		 * registers are now implemented in GbE memory space.
 604		 */
 605		nvm->flash_base_addr = 0;
 606		nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1)
 607		    * NVM_SIZE_MULTIPLIER;
 608		nvm->flash_bank_size = nvm_size / 2;
 609		/* Adjust to word count */
 610		nvm->flash_bank_size /= sizeof(u16);
 611		/* Set the base address for flash register access */
 612		hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
 613	} else {
 614		/* Can't read flash registers if register set isn't mapped. */
 615		if (!hw->flash_address) {
 616			e_dbg("ERROR: Flash registers not mapped\n");
 617			return -E1000_ERR_CONFIG;
 618		}
 619
 620		gfpreg = er32flash(ICH_FLASH_GFPREG);
 621
 622		/* sector_X_addr is a "sector"-aligned address (4096 bytes)
 623		 * Add 1 to sector_end_addr since this sector is included in
 624		 * the overall size.
 625		 */
 626		sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
 627		sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
 628
 629		/* flash_base_addr is byte-aligned */
 630		nvm->flash_base_addr = sector_base_addr
 631		    << FLASH_SECTOR_ADDR_SHIFT;
 632
 633		/* find total size of the NVM, then cut in half since the total
 634		 * size represents two separate NVM banks.
 635		 */
 636		nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
 637					<< FLASH_SECTOR_ADDR_SHIFT);
 638		nvm->flash_bank_size /= 2;
 639		/* Adjust to word count */
 640		nvm->flash_bank_size /= sizeof(u16);
 641	}
 642
 643	nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
 644
 645	/* Clear shadow ram */
 646	for (i = 0; i < nvm->word_size; i++) {
 647		dev_spec->shadow_ram[i].modified = false;
 648		dev_spec->shadow_ram[i].value = 0xFFFF;
 649	}
 650
 651	return 0;
 652}
 653
 654/**
 655 *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
 656 *  @hw: pointer to the HW structure
 657 *
 658 *  Initialize family-specific MAC parameters and function
 659 *  pointers.
 660 **/
 661static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
 662{
 663	struct e1000_mac_info *mac = &hw->mac;
 664
 665	/* Set media type function pointer */
 666	hw->phy.media_type = e1000_media_type_copper;
 667
 668	/* Set mta register count */
 669	mac->mta_reg_count = 32;
 670	/* Set rar entry count */
 671	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
 672	if (mac->type == e1000_ich8lan)
 673		mac->rar_entry_count--;
 674	/* FWSM register */
 675	mac->has_fwsm = true;
 676	/* ARC subsystem not supported */
 677	mac->arc_subsystem_valid = false;
 678	/* Adaptive IFS supported */
 679	mac->adaptive_ifs = true;
 680
 681	/* LED and other operations */
 682	switch (mac->type) {
 683	case e1000_ich8lan:
 684	case e1000_ich9lan:
 685	case e1000_ich10lan:
 686		/* check management mode */
 687		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
 688		/* ID LED init */
 689		mac->ops.id_led_init = e1000e_id_led_init_generic;
 690		/* blink LED */
 691		mac->ops.blink_led = e1000e_blink_led_generic;
 692		/* setup LED */
 693		mac->ops.setup_led = e1000e_setup_led_generic;
 694		/* cleanup LED */
 695		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
 696		/* turn on/off LED */
 697		mac->ops.led_on = e1000_led_on_ich8lan;
 698		mac->ops.led_off = e1000_led_off_ich8lan;
 699		break;
 700	case e1000_pch2lan:
 701		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
 702		mac->ops.rar_set = e1000_rar_set_pch2lan;
 703		/* fall-through */
 704	case e1000_pch_lpt:
 705	case e1000_pch_spt:
 706	case e1000_pch_cnp:
 707	case e1000_pch_tgp:
 708	case e1000_pch_adp:
 709	case e1000_pchlan:
 710		/* check management mode */
 711		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
 712		/* ID LED init */
 713		mac->ops.id_led_init = e1000_id_led_init_pchlan;
 714		/* setup LED */
 715		mac->ops.setup_led = e1000_setup_led_pchlan;
 716		/* cleanup LED */
 717		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
 718		/* turn on/off LED */
 719		mac->ops.led_on = e1000_led_on_pchlan;
 720		mac->ops.led_off = e1000_led_off_pchlan;
 721		break;
 722	default:
 723		break;
 724	}
 725
 726	if (mac->type >= e1000_pch_lpt) {
 727		mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
 728		mac->ops.rar_set = e1000_rar_set_pch_lpt;
 729		mac->ops.setup_physical_interface =
 730		    e1000_setup_copper_link_pch_lpt;
 731		mac->ops.rar_get_count = e1000_rar_get_count_pch_lpt;
 732	}
 733
 734	/* Enable PCS Lock-loss workaround for ICH8 */
 735	if (mac->type == e1000_ich8lan)
 736		e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
 737
 738	return 0;
 739}
 740
 741/**
 742 *  __e1000_access_emi_reg_locked - Read/write EMI register
 743 *  @hw: pointer to the HW structure
 744 *  @addr: EMI address to program
 745 *  @data: pointer to value to read/write from/to the EMI address
 746 *  @read: boolean flag to indicate read or write
 747 *
 748 *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
 749 **/
 750static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
 751					 u16 *data, bool read)
 752{
 753	s32 ret_val;
 754
 755	ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, address);
 756	if (ret_val)
 757		return ret_val;
 758
 759	if (read)
 760		ret_val = e1e_rphy_locked(hw, I82579_EMI_DATA, data);
 761	else
 762		ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, *data);
 763
 764	return ret_val;
 765}
 766
 767/**
 768 *  e1000_read_emi_reg_locked - Read Extended Management Interface register
 769 *  @hw: pointer to the HW structure
 770 *  @addr: EMI address to program
 771 *  @data: value to be read from the EMI address
 772 *
 773 *  Assumes the SW/FW/HW Semaphore is already acquired.
 774 **/
 775s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
 776{
 777	return __e1000_access_emi_reg_locked(hw, addr, data, true);
 778}
 779
 780/**
 781 *  e1000_write_emi_reg_locked - Write Extended Management Interface register
 782 *  @hw: pointer to the HW structure
 783 *  @addr: EMI address to program
 784 *  @data: value to be written to the EMI address
 785 *
 786 *  Assumes the SW/FW/HW Semaphore is already acquired.
 787 **/
 788s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
 789{
 790	return __e1000_access_emi_reg_locked(hw, addr, &data, false);
 791}
 792
 793/**
 794 *  e1000_set_eee_pchlan - Enable/disable EEE support
 795 *  @hw: pointer to the HW structure
 796 *
 797 *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
 798 *  the link and the EEE capabilities of the link partner.  The LPI Control
 799 *  register bits will remain set only if/when link is up.
 800 *
 801 *  EEE LPI must not be asserted earlier than one second after link is up.
 802 *  On 82579, EEE LPI should not be enabled until such time otherwise there
 803 *  can be link issues with some switches.  Other devices can have EEE LPI
 804 *  enabled immediately upon link up since they have a timer in hardware which
 805 *  prevents LPI from being asserted too early.
 806 **/
 807s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
 808{
 809	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
 810	s32 ret_val;
 811	u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
 812
 813	switch (hw->phy.type) {
 814	case e1000_phy_82579:
 815		lpa = I82579_EEE_LP_ABILITY;
 816		pcs_status = I82579_EEE_PCS_STATUS;
 817		adv_addr = I82579_EEE_ADVERTISEMENT;
 818		break;
 819	case e1000_phy_i217:
 820		lpa = I217_EEE_LP_ABILITY;
 821		pcs_status = I217_EEE_PCS_STATUS;
 822		adv_addr = I217_EEE_ADVERTISEMENT;
 823		break;
 824	default:
 825		return 0;
 826	}
 827
 828	ret_val = hw->phy.ops.acquire(hw);
 829	if (ret_val)
 830		return ret_val;
 831
 832	ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
 833	if (ret_val)
 834		goto release;
 835
 836	/* Clear bits that enable EEE in various speeds */
 837	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
 838
 839	/* Enable EEE if not disabled by user */
 840	if (!dev_spec->eee_disable) {
 841		/* Save off link partner's EEE ability */
 842		ret_val = e1000_read_emi_reg_locked(hw, lpa,
 843						    &dev_spec->eee_lp_ability);
 844		if (ret_val)
 845			goto release;
 846
 847		/* Read EEE advertisement */
 848		ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
 849		if (ret_val)
 850			goto release;
 851
 852		/* Enable EEE only for speeds in which the link partner is
 853		 * EEE capable and for which we advertise EEE.
 854		 */
 855		if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
 856			lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
 857
 858		if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
 859			e1e_rphy_locked(hw, MII_LPA, &data);
 860			if (data & LPA_100FULL)
 861				lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
 862			else
 863				/* EEE is not supported in 100Half, so ignore
 864				 * partner's EEE in 100 ability if full-duplex
 865				 * is not advertised.
 866				 */
 867				dev_spec->eee_lp_ability &=
 868				    ~I82579_EEE_100_SUPPORTED;
 869		}
 870	}
 871
 872	if (hw->phy.type == e1000_phy_82579) {
 873		ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
 874						    &data);
 875		if (ret_val)
 876			goto release;
 877
 878		data &= ~I82579_LPI_100_PLL_SHUT;
 879		ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
 880						     data);
 881	}
 882
 883	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
 884	ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
 885	if (ret_val)
 886		goto release;
 887
 888	ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
 889release:
 890	hw->phy.ops.release(hw);
 891
 892	return ret_val;
 893}
 894
 895/**
 896 *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
 897 *  @hw:   pointer to the HW structure
 898 *  @link: link up bool flag
 899 *
 900 *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
 901 *  preventing further DMA write requests.  Workaround the issue by disabling
 902 *  the de-assertion of the clock request when in 1Gpbs mode.
 903 *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
 904 *  speeds in order to avoid Tx hangs.
 905 **/
 906static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
 907{
 908	u32 fextnvm6 = er32(FEXTNVM6);
 909	u32 status = er32(STATUS);
 910	s32 ret_val = 0;
 911	u16 reg;
 912
 913	if (link && (status & E1000_STATUS_SPEED_1000)) {
 914		ret_val = hw->phy.ops.acquire(hw);
 915		if (ret_val)
 916			return ret_val;
 917
 918		ret_val =
 919		    e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
 920						&reg);
 921		if (ret_val)
 922			goto release;
 923
 924		ret_val =
 925		    e1000e_write_kmrn_reg_locked(hw,
 926						 E1000_KMRNCTRLSTA_K1_CONFIG,
 927						 reg &
 928						 ~E1000_KMRNCTRLSTA_K1_ENABLE);
 929		if (ret_val)
 930			goto release;
 931
 932		usleep_range(10, 20);
 933
 934		ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
 935
 936		ret_val =
 937		    e1000e_write_kmrn_reg_locked(hw,
 938						 E1000_KMRNCTRLSTA_K1_CONFIG,
 939						 reg);
 940release:
 941		hw->phy.ops.release(hw);
 942	} else {
 943		/* clear FEXTNVM6 bit 8 on link down or 10/100 */
 944		fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
 945
 946		if ((hw->phy.revision > 5) || !link ||
 947		    ((status & E1000_STATUS_SPEED_100) &&
 948		     (status & E1000_STATUS_FD)))
 949			goto update_fextnvm6;
 950
 951		ret_val = e1e_rphy(hw, I217_INBAND_CTRL, &reg);
 952		if (ret_val)
 953			return ret_val;
 954
 955		/* Clear link status transmit timeout */
 956		reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
 957
 958		if (status & E1000_STATUS_SPEED_100) {
 959			/* Set inband Tx timeout to 5x10us for 100Half */
 960			reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
 961
 962			/* Do not extend the K1 entry latency for 100Half */
 963			fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
 964		} else {
 965			/* Set inband Tx timeout to 50x10us for 10Full/Half */
 966			reg |= 50 <<
 967			    I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
 968
 969			/* Extend the K1 entry latency for 10 Mbps */
 970			fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
 971		}
 972
 973		ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg);
 974		if (ret_val)
 975			return ret_val;
 976
 977update_fextnvm6:
 978		ew32(FEXTNVM6, fextnvm6);
 979	}
 980
 981	return ret_val;
 982}
 983
 984/**
 985 *  e1000_platform_pm_pch_lpt - Set platform power management values
 986 *  @hw: pointer to the HW structure
 987 *  @link: bool indicating link status
 988 *
 989 *  Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
 990 *  GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
 991 *  when link is up (which must not exceed the maximum latency supported
 992 *  by the platform), otherwise specify there is no LTR requirement.
 993 *  Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop
 994 *  latencies in the LTR Extended Capability Structure in the PCIe Extended
 995 *  Capability register set, on this device LTR is set by writing the
 996 *  equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
 997 *  set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
 998 *  message to the PMC.
 999 **/
1000static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1001{
1002	u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1003	    link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1004	u16 lat_enc = 0;	/* latency encoded */
1005
1006	if (link) {
1007		u16 speed, duplex, scale = 0;
1008		u16 max_snoop, max_nosnoop;
1009		u16 max_ltr_enc;	/* max LTR latency encoded */
1010		u64 value;
1011		u32 rxa;
1012
1013		if (!hw->adapter->max_frame_size) {
1014			e_dbg("max_frame_size not set.\n");
1015			return -E1000_ERR_CONFIG;
1016		}
1017
1018		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1019		if (!speed) {
1020			e_dbg("Speed not set.\n");
1021			return -E1000_ERR_CONFIG;
1022		}
1023
1024		/* Rx Packet Buffer Allocation size (KB) */
1025		rxa = er32(PBA) & E1000_PBA_RXA_MASK;
1026
1027		/* Determine the maximum latency tolerated by the device.
1028		 *
1029		 * Per the PCIe spec, the tolerated latencies are encoded as
1030		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1031		 * a 10-bit value (0-1023) to provide a range from 1 ns to
1032		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
1033		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1034		 */
1035		rxa *= 512;
1036		value = (rxa > hw->adapter->max_frame_size) ?
1037			(rxa - hw->adapter->max_frame_size) * (16000 / speed) :
1038			0;
1039
1040		while (value > PCI_LTR_VALUE_MASK) {
1041			scale++;
1042			value = DIV_ROUND_UP(value, BIT(5));
1043		}
1044		if (scale > E1000_LTRV_SCALE_MAX) {
1045			e_dbg("Invalid LTR latency scale %d\n", scale);
1046			return -E1000_ERR_CONFIG;
1047		}
1048		lat_enc = (u16)((scale << PCI_LTR_SCALE_SHIFT) | value);
1049
1050		/* Determine the maximum latency tolerated by the platform */
1051		pci_read_config_word(hw->adapter->pdev, E1000_PCI_LTR_CAP_LPT,
1052				     &max_snoop);
1053		pci_read_config_word(hw->adapter->pdev,
1054				     E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1055		max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
1056
1057		if (lat_enc > max_ltr_enc)
1058			lat_enc = max_ltr_enc;
1059	}
1060
1061	/* Set Snoop and No-Snoop latencies the same */
1062	reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1063	ew32(LTRV, reg);
1064
1065	return 0;
1066}
1067
1068/**
1069 *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1070 *  @hw: pointer to the HW structure
1071 *  @to_sx: boolean indicating a system power state transition to Sx
1072 *
1073 *  When link is down, configure ULP mode to significantly reduce the power
1074 *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1075 *  ME firmware to start the ULP configuration.  If not on an ME enabled
1076 *  system, configure the ULP mode by software.
1077 */
1078s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1079{
1080	u32 mac_reg;
1081	s32 ret_val = 0;
1082	u16 phy_reg;
1083	u16 oem_reg = 0;
1084
1085	if ((hw->mac.type < e1000_pch_lpt) ||
1086	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1087	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
1088	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
1089	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
1090	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1091		return 0;
1092
1093	if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
1094		/* Request ME configure ULP mode in the PHY */
1095		mac_reg = er32(H2ME);
1096		mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1097		ew32(H2ME, mac_reg);
1098
1099		goto out;
1100	}
1101
1102	if (!to_sx) {
1103		int i = 0;
1104
1105		/* Poll up to 5 seconds for Cable Disconnected indication */
1106		while (!(er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1107			/* Bail if link is re-acquired */
1108			if (er32(STATUS) & E1000_STATUS_LU)
1109				return -E1000_ERR_PHY;
1110
1111			if (i++ == 100)
1112				break;
1113
1114			msleep(50);
1115		}
1116		e_dbg("CABLE_DISCONNECTED %s set after %dmsec\n",
1117		      (er32(FEXT) &
1118		       E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50);
1119	}
1120
1121	ret_val = hw->phy.ops.acquire(hw);
1122	if (ret_val)
1123		goto out;
1124
1125	/* Force SMBus mode in PHY */
1126	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1127	if (ret_val)
1128		goto release;
1129	phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1130	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1131
1132	/* Force SMBus mode in MAC */
1133	mac_reg = er32(CTRL_EXT);
1134	mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1135	ew32(CTRL_EXT, mac_reg);
1136
1137	/* Si workaround for ULP entry flow on i127/rev6 h/w.  Enable
1138	 * LPLU and disable Gig speed when entering ULP
1139	 */
1140	if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1141		ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1142						       &oem_reg);
1143		if (ret_val)
1144			goto release;
1145
1146		phy_reg = oem_reg;
1147		phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1148
1149		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1150							phy_reg);
1151
1152		if (ret_val)
1153			goto release;
1154	}
1155
1156	/* Set Inband ULP Exit, Reset to SMBus mode and
1157	 * Disable SMBus Release on PERST# in PHY
1158	 */
1159	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1160	if (ret_val)
1161		goto release;
1162	phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1163		    I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1164	if (to_sx) {
1165		if (er32(WUFC) & E1000_WUFC_LNKC)
1166			phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1167		else
1168			phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1169
1170		phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1171		phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1172	} else {
1173		phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1174		phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1175		phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1176	}
1177	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1178
1179	/* Set Disable SMBus Release on PERST# in MAC */
1180	mac_reg = er32(FEXTNVM7);
1181	mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1182	ew32(FEXTNVM7, mac_reg);
1183
1184	/* Commit ULP changes in PHY by starting auto ULP configuration */
1185	phy_reg |= I218_ULP_CONFIG1_START;
1186	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1187
1188	if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
1189	    to_sx && (er32(STATUS) & E1000_STATUS_LU)) {
1190		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1191							oem_reg);
1192		if (ret_val)
1193			goto release;
1194	}
1195
1196release:
1197	hw->phy.ops.release(hw);
1198out:
1199	if (ret_val)
1200		e_dbg("Error in ULP enable flow: %d\n", ret_val);
1201	else
1202		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1203
1204	return ret_val;
1205}
1206
1207/**
1208 *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1209 *  @hw: pointer to the HW structure
1210 *  @force: boolean indicating whether or not to force disabling ULP
1211 *
1212 *  Un-configure ULP mode when link is up, the system is transitioned from
1213 *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1214 *  system, poll for an indication from ME that ULP has been un-configured.
1215 *  If not on an ME enabled system, un-configure the ULP mode by software.
1216 *
1217 *  During nominal operation, this function is called when link is acquired
1218 *  to disable ULP mode (force=false); otherwise, for example when unloading
1219 *  the driver or during Sx->S0 transitions, this is called with force=true
1220 *  to forcibly disable ULP.
1221 */
1222static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1223{
1224	s32 ret_val = 0;
1225	u32 mac_reg;
1226	u16 phy_reg;
1227	int i = 0;
1228
1229	if ((hw->mac.type < e1000_pch_lpt) ||
1230	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1231	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
1232	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
1233	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
1234	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1235		return 0;
1236
1237	if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
1238		if (force) {
1239			/* Request ME un-configure ULP mode in the PHY */
1240			mac_reg = er32(H2ME);
1241			mac_reg &= ~E1000_H2ME_ULP;
1242			mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1243			ew32(H2ME, mac_reg);
1244		}
1245
1246		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
1247		while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
1248			if (i++ == 30) {
1249				ret_val = -E1000_ERR_PHY;
1250				goto out;
1251			}
1252
1253			usleep_range(10000, 11000);
1254		}
1255		e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1256
1257		if (force) {
1258			mac_reg = er32(H2ME);
1259			mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1260			ew32(H2ME, mac_reg);
1261		} else {
1262			/* Clear H2ME.ULP after ME ULP configuration */
1263			mac_reg = er32(H2ME);
1264			mac_reg &= ~E1000_H2ME_ULP;
1265			ew32(H2ME, mac_reg);
1266		}
1267
1268		goto out;
1269	}
1270
1271	ret_val = hw->phy.ops.acquire(hw);
1272	if (ret_val)
1273		goto out;
1274
1275	if (force)
1276		/* Toggle LANPHYPC Value bit */
1277		e1000_toggle_lanphypc_pch_lpt(hw);
1278
1279	/* Unforce SMBus mode in PHY */
1280	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1281	if (ret_val) {
1282		/* The MAC might be in PCIe mode, so temporarily force to
1283		 * SMBus mode in order to access the PHY.
1284		 */
1285		mac_reg = er32(CTRL_EXT);
1286		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1287		ew32(CTRL_EXT, mac_reg);
1288
1289		msleep(50);
1290
1291		ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1292						       &phy_reg);
1293		if (ret_val)
1294			goto release;
1295	}
1296	phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1297	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1298
1299	/* Unforce SMBus mode in MAC */
1300	mac_reg = er32(CTRL_EXT);
1301	mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1302	ew32(CTRL_EXT, mac_reg);
1303
1304	/* When ULP mode was previously entered, K1 was disabled by the
1305	 * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1306	 */
1307	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1308	if (ret_val)
1309		goto release;
1310	phy_reg |= HV_PM_CTRL_K1_ENABLE;
1311	e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1312
1313	/* Clear ULP enabled configuration */
1314	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1315	if (ret_val)
1316		goto release;
1317	phy_reg &= ~(I218_ULP_CONFIG1_IND |
1318		     I218_ULP_CONFIG1_STICKY_ULP |
1319		     I218_ULP_CONFIG1_RESET_TO_SMBUS |
1320		     I218_ULP_CONFIG1_WOL_HOST |
1321		     I218_ULP_CONFIG1_INBAND_EXIT |
1322		     I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
1323		     I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
1324		     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1325	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1326
1327	/* Commit ULP changes by starting auto ULP configuration */
1328	phy_reg |= I218_ULP_CONFIG1_START;
1329	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1330
1331	/* Clear Disable SMBus Release on PERST# in MAC */
1332	mac_reg = er32(FEXTNVM7);
1333	mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1334	ew32(FEXTNVM7, mac_reg);
1335
1336release:
1337	hw->phy.ops.release(hw);
1338	if (force) {
1339		e1000_phy_hw_reset(hw);
1340		msleep(50);
1341	}
1342out:
1343	if (ret_val)
1344		e_dbg("Error in ULP disable flow: %d\n", ret_val);
1345	else
1346		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1347
1348	return ret_val;
1349}
1350
1351/**
1352 *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1353 *  @hw: pointer to the HW structure
1354 *
1355 *  Checks to see of the link status of the hardware has changed.  If a
1356 *  change in link status has been detected, then we read the PHY registers
1357 *  to get the current speed/duplex if link exists.
1358 **/
1359static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1360{
1361	struct e1000_mac_info *mac = &hw->mac;
1362	s32 ret_val, tipg_reg = 0;
1363	u16 emi_addr, emi_val = 0;
1364	bool link;
1365	u16 phy_reg;
1366
1367	/* We only want to go out to the PHY registers to see if Auto-Neg
1368	 * has completed and/or if our link status has changed.  The
1369	 * get_link_status flag is set upon receiving a Link Status
1370	 * Change or Rx Sequence Error interrupt.
1371	 */
1372	if (!mac->get_link_status)
1373		return 0;
1374	mac->get_link_status = false;
1375
1376	/* First we want to see if the MII Status Register reports
1377	 * link.  If so, then we want to get the current speed/duplex
1378	 * of the PHY.
1379	 */
1380	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
1381	if (ret_val)
1382		goto out;
1383
1384	if (hw->mac.type == e1000_pchlan) {
1385		ret_val = e1000_k1_gig_workaround_hv(hw, link);
1386		if (ret_val)
1387			goto out;
1388	}
1389
1390	/* When connected at 10Mbps half-duplex, some parts are excessively
1391	 * aggressive resulting in many collisions. To avoid this, increase
1392	 * the IPG and reduce Rx latency in the PHY.
1393	 */
1394	if ((hw->mac.type >= e1000_pch2lan) && link) {
1395		u16 speed, duplex;
1396
1397		e1000e_get_speed_and_duplex_copper(hw, &speed, &duplex);
1398		tipg_reg = er32(TIPG);
1399		tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1400
1401		if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1402			tipg_reg |= 0xFF;
1403			/* Reduce Rx latency in analog PHY */
1404			emi_val = 0;
1405		} else if (hw->mac.type >= e1000_pch_spt &&
1406			   duplex == FULL_DUPLEX && speed != SPEED_1000) {
1407			tipg_reg |= 0xC;
1408			emi_val = 1;
1409		} else {
1410
1411			/* Roll back the default values */
1412			tipg_reg |= 0x08;
1413			emi_val = 1;
1414		}
1415
1416		ew32(TIPG, tipg_reg);
1417
1418		ret_val = hw->phy.ops.acquire(hw);
1419		if (ret_val)
1420			goto out;
1421
1422		if (hw->mac.type == e1000_pch2lan)
1423			emi_addr = I82579_RX_CONFIG;
1424		else
1425			emi_addr = I217_RX_CONFIG;
1426		ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1427
1428		if (hw->mac.type >= e1000_pch_lpt) {
1429			u16 phy_reg;
1430
1431			e1e_rphy_locked(hw, I217_PLL_CLOCK_GATE_REG, &phy_reg);
1432			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1433			if (speed == SPEED_100 || speed == SPEED_10)
1434				phy_reg |= 0x3E8;
1435			else
1436				phy_reg |= 0xFA;
1437			e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg);
1438
1439			if (speed == SPEED_1000) {
1440				hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
1441							    &phy_reg);
1442
1443				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
1444
1445				hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
1446							     phy_reg);
1447			}
1448		}
1449		hw->phy.ops.release(hw);
1450
1451		if (ret_val)
1452			goto out;
1453
1454		if (hw->mac.type >= e1000_pch_spt) {
1455			u16 data;
1456			u16 ptr_gap;
1457
1458			if (speed == SPEED_1000) {
1459				ret_val = hw->phy.ops.acquire(hw);
1460				if (ret_val)
1461					goto out;
1462
1463				ret_val = e1e_rphy_locked(hw,
1464							  PHY_REG(776, 20),
1465							  &data);
1466				if (ret_val) {
1467					hw->phy.ops.release(hw);
1468					goto out;
1469				}
1470
1471				ptr_gap = (data & (0x3FF << 2)) >> 2;
1472				if (ptr_gap < 0x18) {
1473					data &= ~(0x3FF << 2);
1474					data |= (0x18 << 2);
1475					ret_val =
1476					    e1e_wphy_locked(hw,
1477							    PHY_REG(776, 20),
1478							    data);
1479				}
1480				hw->phy.ops.release(hw);
1481				if (ret_val)
1482					goto out;
1483			} else {
1484				ret_val = hw->phy.ops.acquire(hw);
1485				if (ret_val)
1486					goto out;
1487
1488				ret_val = e1e_wphy_locked(hw,
1489							  PHY_REG(776, 20),
1490							  0xC023);
1491				hw->phy.ops.release(hw);
1492				if (ret_val)
1493					goto out;
1494
1495			}
1496		}
1497	}
1498
1499	/* I217 Packet Loss issue:
1500	 * ensure that FEXTNVM4 Beacon Duration is set correctly
1501	 * on power up.
1502	 * Set the Beacon Duration for I217 to 8 usec
1503	 */
1504	if (hw->mac.type >= e1000_pch_lpt) {
1505		u32 mac_reg;
1506
1507		mac_reg = er32(FEXTNVM4);
1508		mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1509		mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1510		ew32(FEXTNVM4, mac_reg);
1511	}
1512
1513	/* Work-around I218 hang issue */
1514	if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1515	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1516	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) ||
1517	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
1518		ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1519		if (ret_val)
1520			goto out;
1521	}
1522	if (hw->mac.type >= e1000_pch_lpt) {
1523		/* Set platform power management values for
1524		 * Latency Tolerance Reporting (LTR)
1525		 */
1526		ret_val = e1000_platform_pm_pch_lpt(hw, link);
1527		if (ret_val)
1528			goto out;
1529	}
1530
1531	/* Clear link partner's EEE ability */
1532	hw->dev_spec.ich8lan.eee_lp_ability = 0;
1533
1534	if (hw->mac.type >= e1000_pch_lpt) {
1535		u32 fextnvm6 = er32(FEXTNVM6);
1536
1537		if (hw->mac.type == e1000_pch_spt) {
1538			/* FEXTNVM6 K1-off workaround - for SPT only */
1539			u32 pcieanacfg = er32(PCIEANACFG);
1540
1541			if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1542				fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1543			else
1544				fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1545		}
1546
1547		ew32(FEXTNVM6, fextnvm6);
1548	}
1549
1550	if (!link)
1551		goto out;
1552
1553	switch (hw->mac.type) {
1554	case e1000_pch2lan:
1555		ret_val = e1000_k1_workaround_lv(hw);
1556		if (ret_val)
1557			return ret_val;
1558		/* fall-thru */
1559	case e1000_pchlan:
1560		if (hw->phy.type == e1000_phy_82578) {
1561			ret_val = e1000_link_stall_workaround_hv(hw);
1562			if (ret_val)
1563				return ret_val;
1564		}
1565
1566		/* Workaround for PCHx parts in half-duplex:
1567		 * Set the number of preambles removed from the packet
1568		 * when it is passed from the PHY to the MAC to prevent
1569		 * the MAC from misinterpreting the packet type.
1570		 */
1571		e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1572		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1573
1574		if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
1575			phy_reg |= BIT(HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1576
1577		e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1578		break;
1579	default:
1580		break;
1581	}
1582
1583	/* Check if there was DownShift, must be checked
1584	 * immediately after link-up
1585	 */
1586	e1000e_check_downshift(hw);
1587
1588	/* Enable/Disable EEE after link up */
1589	if (hw->phy.type > e1000_phy_82579) {
1590		ret_val = e1000_set_eee_pchlan(hw);
1591		if (ret_val)
1592			return ret_val;
1593	}
1594
1595	/* If we are forcing speed/duplex, then we simply return since
1596	 * we have already determined whether we have link or not.
1597	 */
1598	if (!mac->autoneg)
1599		return -E1000_ERR_CONFIG;
1600
1601	/* Auto-Neg is enabled.  Auto Speed Detection takes care
1602	 * of MAC speed/duplex configuration.  So we only need to
1603	 * configure Collision Distance in the MAC.
1604	 */
1605	mac->ops.config_collision_dist(hw);
1606
1607	/* Configure Flow Control now that Auto-Neg has completed.
1608	 * First, we need to restore the desired flow control
1609	 * settings because we may have had to re-autoneg with a
1610	 * different link partner.
1611	 */
1612	ret_val = e1000e_config_fc_after_link_up(hw);
1613	if (ret_val)
1614		e_dbg("Error configuring flow control\n");
1615
1616	return ret_val;
1617
1618out:
1619	mac->get_link_status = true;
1620	return ret_val;
1621}
1622
1623static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
1624{
1625	struct e1000_hw *hw = &adapter->hw;
1626	s32 rc;
1627
1628	rc = e1000_init_mac_params_ich8lan(hw);
1629	if (rc)
1630		return rc;
1631
1632	rc = e1000_init_nvm_params_ich8lan(hw);
1633	if (rc)
1634		return rc;
1635
1636	switch (hw->mac.type) {
1637	case e1000_ich8lan:
1638	case e1000_ich9lan:
1639	case e1000_ich10lan:
1640		rc = e1000_init_phy_params_ich8lan(hw);
1641		break;
1642	case e1000_pchlan:
1643	case e1000_pch2lan:
1644	case e1000_pch_lpt:
1645	case e1000_pch_spt:
1646	case e1000_pch_cnp:
1647	case e1000_pch_tgp:
1648	case e1000_pch_adp:
1649		rc = e1000_init_phy_params_pchlan(hw);
1650		break;
1651	default:
1652		break;
1653	}
1654	if (rc)
1655		return rc;
1656
1657	/* Disable Jumbo Frame support on parts with Intel 10/100 PHY or
1658	 * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
1659	 */
1660	if ((adapter->hw.phy.type == e1000_phy_ife) ||
1661	    ((adapter->hw.mac.type >= e1000_pch2lan) &&
1662	     (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
1663		adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
1664		adapter->max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
1665
1666		hw->mac.ops.blink_led = NULL;
1667	}
1668
1669	if ((adapter->hw.mac.type == e1000_ich8lan) &&
1670	    (adapter->hw.phy.type != e1000_phy_ife))
1671		adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
1672
1673	/* Enable workaround for 82579 w/ ME enabled */
1674	if ((adapter->hw.mac.type == e1000_pch2lan) &&
1675	    (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
1676		adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
1677
1678	return 0;
1679}
1680
1681static DEFINE_MUTEX(nvm_mutex);
1682
1683/**
1684 *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1685 *  @hw: pointer to the HW structure
1686 *
1687 *  Acquires the mutex for performing NVM operations.
1688 **/
1689static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw __always_unused *hw)
1690{
1691	mutex_lock(&nvm_mutex);
1692
1693	return 0;
1694}
1695
1696/**
1697 *  e1000_release_nvm_ich8lan - Release NVM mutex
1698 *  @hw: pointer to the HW structure
1699 *
1700 *  Releases the mutex used while performing NVM operations.
1701 **/
1702static void e1000_release_nvm_ich8lan(struct e1000_hw __always_unused *hw)
1703{
1704	mutex_unlock(&nvm_mutex);
1705}
1706
1707/**
1708 *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1709 *  @hw: pointer to the HW structure
1710 *
1711 *  Acquires the software control flag for performing PHY and select
1712 *  MAC CSR accesses.
1713 **/
1714static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1715{
1716	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1717	s32 ret_val = 0;
1718
1719	if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE,
1720			     &hw->adapter->state)) {
1721		e_dbg("contention for Phy access\n");
1722		return -E1000_ERR_PHY;
1723	}
1724
1725	while (timeout) {
1726		extcnf_ctrl = er32(EXTCNF_CTRL);
1727		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1728			break;
1729
1730		mdelay(1);
1731		timeout--;
1732	}
1733
1734	if (!timeout) {
1735		e_dbg("SW has already locked the resource.\n");
1736		ret_val = -E1000_ERR_CONFIG;
1737		goto out;
1738	}
1739
1740	timeout = SW_FLAG_TIMEOUT;
1741
1742	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1743	ew32(EXTCNF_CTRL, extcnf_ctrl);
1744
1745	while (timeout) {
1746		extcnf_ctrl = er32(EXTCNF_CTRL);
1747		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1748			break;
1749
1750		mdelay(1);
1751		timeout--;
1752	}
1753
1754	if (!timeout) {
1755		e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1756		      er32(FWSM), extcnf_ctrl);
1757		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1758		ew32(EXTCNF_CTRL, extcnf_ctrl);
1759		ret_val = -E1000_ERR_CONFIG;
1760		goto out;
1761	}
1762
1763out:
1764	if (ret_val)
1765		clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
1766
1767	return ret_val;
1768}
1769
1770/**
1771 *  e1000_release_swflag_ich8lan - Release software control flag
1772 *  @hw: pointer to the HW structure
1773 *
1774 *  Releases the software control flag for performing PHY and select
1775 *  MAC CSR accesses.
1776 **/
1777static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1778{
1779	u32 extcnf_ctrl;
1780
1781	extcnf_ctrl = er32(EXTCNF_CTRL);
1782
1783	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1784		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1785		ew32(EXTCNF_CTRL, extcnf_ctrl);
1786	} else {
1787		e_dbg("Semaphore unexpectedly released by sw/fw/hw\n");
1788	}
1789
1790	clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
1791}
1792
1793/**
1794 *  e1000_check_mng_mode_ich8lan - Checks management mode
1795 *  @hw: pointer to the HW structure
1796 *
1797 *  This checks if the adapter has any manageability enabled.
1798 *  This is a function pointer entry point only called by read/write
1799 *  routines for the PHY and NVM parts.
1800 **/
1801static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1802{
1803	u32 fwsm;
1804
1805	fwsm = er32(FWSM);
1806	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1807		((fwsm & E1000_FWSM_MODE_MASK) ==
1808		 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1809}
1810
1811/**
1812 *  e1000_check_mng_mode_pchlan - Checks management mode
1813 *  @hw: pointer to the HW structure
1814 *
1815 *  This checks if the adapter has iAMT enabled.
1816 *  This is a function pointer entry point only called by read/write
1817 *  routines for the PHY and NVM parts.
1818 **/
1819static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1820{
1821	u32 fwsm;
1822
1823	fwsm = er32(FWSM);
1824	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1825	    (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1826}
1827
1828/**
1829 *  e1000_rar_set_pch2lan - Set receive address register
1830 *  @hw: pointer to the HW structure
1831 *  @addr: pointer to the receive address
1832 *  @index: receive address array register
1833 *
1834 *  Sets the receive address array register at index to the address passed
1835 *  in by addr.  For 82579, RAR[0] is the base address register that is to
1836 *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1837 *  Use SHRA[0-3] in place of those reserved for ME.
1838 **/
1839static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1840{
1841	u32 rar_low, rar_high;
1842
1843	/* HW expects these in little endian so we reverse the byte order
1844	 * from network order (big endian) to little endian
1845	 */
1846	rar_low = ((u32)addr[0] |
1847		   ((u32)addr[1] << 8) |
1848		   ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1849
1850	rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1851
1852	/* If MAC address zero, no need to set the AV bit */
1853	if (rar_low || rar_high)
1854		rar_high |= E1000_RAH_AV;
1855
1856	if (index == 0) {
1857		ew32(RAL(index), rar_low);
1858		e1e_flush();
1859		ew32(RAH(index), rar_high);
1860		e1e_flush();
1861		return 0;
1862	}
1863
1864	/* RAR[1-6] are owned by manageability.  Skip those and program the
1865	 * next address into the SHRA register array.
1866	 */
1867	if (index < (u32)(hw->mac.rar_entry_count)) {
1868		s32 ret_val;
1869
1870		ret_val = e1000_acquire_swflag_ich8lan(hw);
1871		if (ret_val)
1872			goto out;
1873
1874		ew32(SHRAL(index - 1), rar_low);
1875		e1e_flush();
1876		ew32(SHRAH(index - 1), rar_high);
1877		e1e_flush();
1878
1879		e1000_release_swflag_ich8lan(hw);
1880
1881		/* verify the register updates */
1882		if ((er32(SHRAL(index - 1)) == rar_low) &&
1883		    (er32(SHRAH(index - 1)) == rar_high))
1884			return 0;
1885
1886		e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1887		      (index - 1), er32(FWSM));
1888	}
1889
1890out:
1891	e_dbg("Failed to write receive address at index %d\n", index);
1892	return -E1000_ERR_CONFIG;
1893}
1894
1895/**
1896 *  e1000_rar_get_count_pch_lpt - Get the number of available SHRA
1897 *  @hw: pointer to the HW structure
1898 *
1899 *  Get the number of available receive registers that the Host can
1900 *  program. SHRA[0-10] are the shared receive address registers
1901 *  that are shared between the Host and manageability engine (ME).
1902 *  ME can reserve any number of addresses and the host needs to be
1903 *  able to tell how many available registers it has access to.
1904 **/
1905static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw)
1906{
1907	u32 wlock_mac;
1908	u32 num_entries;
1909
1910	wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
1911	wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1912
1913	switch (wlock_mac) {
1914	case 0:
1915		/* All SHRA[0..10] and RAR[0] available */
1916		num_entries = hw->mac.rar_entry_count;
1917		break;
1918	case 1:
1919		/* Only RAR[0] available */
1920		num_entries = 1;
1921		break;
1922	default:
1923		/* SHRA[0..(wlock_mac - 1)] available + RAR[0] */
1924		num_entries = wlock_mac + 1;
1925		break;
1926	}
1927
1928	return num_entries;
1929}
1930
1931/**
1932 *  e1000_rar_set_pch_lpt - Set receive address registers
1933 *  @hw: pointer to the HW structure
1934 *  @addr: pointer to the receive address
1935 *  @index: receive address array register
1936 *
1937 *  Sets the receive address register array at index to the address passed
1938 *  in by addr. For LPT, RAR[0] is the base address register that is to
1939 *  contain the MAC address. SHRA[0-10] are the shared receive address
1940 *  registers that are shared between the Host and manageability engine (ME).
1941 **/
1942static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1943{
1944	u32 rar_low, rar_high;
1945	u32 wlock_mac;
1946
1947	/* HW expects these in little endian so we reverse the byte order
1948	 * from network order (big endian) to little endian
1949	 */
1950	rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
1951		   ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1952
1953	rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1954
1955	/* If MAC address zero, no need to set the AV bit */
1956	if (rar_low || rar_high)
1957		rar_high |= E1000_RAH_AV;
1958
1959	if (index == 0) {
1960		ew32(RAL(index), rar_low);
1961		e1e_flush();
1962		ew32(RAH(index), rar_high);
1963		e1e_flush();
1964		return 0;
1965	}
1966
1967	/* The manageability engine (ME) can lock certain SHRAR registers that
1968	 * it is using - those registers are unavailable for use.
1969	 */
1970	if (index < hw->mac.rar_entry_count) {
1971		wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
1972		wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1973
1974		/* Check if all SHRAR registers are locked */
1975		if (wlock_mac == 1)
1976			goto out;
1977
1978		if ((wlock_mac == 0) || (index <= wlock_mac)) {
1979			s32 ret_val;
1980
1981			ret_val = e1000_acquire_swflag_ich8lan(hw);
1982
1983			if (ret_val)
1984				goto out;
1985
1986			ew32(SHRAL_PCH_LPT(index - 1), rar_low);
1987			e1e_flush();
1988			ew32(SHRAH_PCH_LPT(index - 1), rar_high);
1989			e1e_flush();
1990
1991			e1000_release_swflag_ich8lan(hw);
1992
1993			/* verify the register updates */
1994			if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1995			    (er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
1996				return 0;
1997		}
1998	}
1999
2000out:
2001	e_dbg("Failed to write receive address at index %d\n", index);
2002	return -E1000_ERR_CONFIG;
2003}
2004
2005/**
2006 *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2007 *  @hw: pointer to the HW structure
2008 *
2009 *  Checks if firmware is blocking the reset of the PHY.
2010 *  This is a function pointer entry point only called by
2011 *  reset routines.
2012 **/
2013static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2014{
2015	bool blocked = false;
2016	int i = 0;
2017
2018	while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
2019	       (i++ < 30))
2020		usleep_range(10000, 11000);
2021	return blocked ? E1000_BLK_PHY_RESET : 0;
2022}
2023
2024/**
2025 *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2026 *  @hw: pointer to the HW structure
2027 *
2028 *  Assumes semaphore already acquired.
2029 *
2030 **/
2031static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2032{
2033	u16 phy_data;
2034	u32 strap = er32(STRAP);
2035	u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2036	    E1000_STRAP_SMT_FREQ_SHIFT;
2037	s32 ret_val;
2038
2039	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2040
2041	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2042	if (ret_val)
2043		return ret_val;
2044
2045	phy_data &= ~HV_SMB_ADDR_MASK;
2046	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2047	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2048
2049	if (hw->phy.type == e1000_phy_i217) {
2050		/* Restore SMBus frequency */
2051		if (freq--) {
2052			phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2053			phy_data |= (freq & BIT(0)) <<
2054			    HV_SMB_ADDR_FREQ_LOW_SHIFT;
2055			phy_data |= (freq & BIT(1)) <<
2056			    (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2057		} else {
2058			e_dbg("Unsupported SMB frequency in PHY\n");
2059		}
2060	}
2061
2062	return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2063}
2064
2065/**
2066 *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2067 *  @hw:   pointer to the HW structure
2068 *
2069 *  SW should configure the LCD from the NVM extended configuration region
2070 *  as a workaround for certain parts.
2071 **/
2072static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2073{
2074	struct e1000_phy_info *phy = &hw->phy;
2075	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2076	s32 ret_val = 0;
2077	u16 word_addr, reg_data, reg_addr, phy_page = 0;
2078
2079	/* Initialize the PHY from the NVM on ICH platforms.  This
2080	 * is needed due to an issue where the NVM configuration is
2081	 * not properly autoloaded after power transitions.
2082	 * Therefore, after each PHY reset, we will load the
2083	 * configuration data out of the NVM manually.
2084	 */
2085	switch (hw->mac.type) {
2086	case e1000_ich8lan:
2087		if (phy->type != e1000_phy_igp_3)
2088			return ret_val;
2089
2090		if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
2091		    (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
2092			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2093			break;
2094		}
2095		/* Fall-thru */
2096	case e1000_pchlan:
2097	case e1000_pch2lan:
2098	case e1000_pch_lpt:
2099	case e1000_pch_spt:
2100	case e1000_pch_cnp:
2101	case e1000_pch_tgp:
2102	case e1000_pch_adp:
2103		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2104		break;
2105	default:
2106		return ret_val;
2107	}
2108
2109	ret_val = hw->phy.ops.acquire(hw);
2110	if (ret_val)
2111		return ret_val;
2112
2113	data = er32(FEXTNVM);
2114	if (!(data & sw_cfg_mask))
2115		goto release;
2116
2117	/* Make sure HW does not configure LCD from PHY
2118	 * extended configuration before SW configuration
2119	 */
2120	data = er32(EXTCNF_CTRL);
2121	if ((hw->mac.type < e1000_pch2lan) &&
2122	    (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2123		goto release;
2124
2125	cnf_size = er32(EXTCNF_SIZE);
2126	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2127	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2128	if (!cnf_size)
2129		goto release;
2130
2131	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2132	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2133
2134	if (((hw->mac.type == e1000_pchlan) &&
2135	     !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2136	    (hw->mac.type > e1000_pchlan)) {
2137		/* HW configures the SMBus address and LEDs when the
2138		 * OEM and LCD Write Enable bits are set in the NVM.
2139		 * When both NVM bits are cleared, SW will configure
2140		 * them instead.
2141		 */
2142		ret_val = e1000_write_smbus_addr(hw);
2143		if (ret_val)
2144			goto release;
2145
2146		data = er32(LEDCTL);
2147		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2148							(u16)data);
2149		if (ret_val)
2150			goto release;
2151	}
2152
2153	/* Configure LCD from extended configuration region. */
2154
2155	/* cnf_base_addr is in DWORD */
2156	word_addr = (u16)(cnf_base_addr << 1);
2157
2158	for (i = 0; i < cnf_size; i++) {
2159		ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, &reg_data);
2160		if (ret_val)
2161			goto release;
2162
2163		ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
2164					 1, &reg_addr);
2165		if (ret_val)
2166			goto release;
2167
2168		/* Save off the PHY page for future writes. */
2169		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2170			phy_page = reg_data;
2171			continue;
2172		}
2173
2174		reg_addr &= PHY_REG_MASK;
2175		reg_addr |= phy_page;
2176
2177		ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data);
2178		if (ret_val)
2179			goto release;
2180	}
2181
2182release:
2183	hw->phy.ops.release(hw);
2184	return ret_val;
2185}
2186
2187/**
2188 *  e1000_k1_gig_workaround_hv - K1 Si workaround
2189 *  @hw:   pointer to the HW structure
2190 *  @link: link up bool flag
2191 *
2192 *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2193 *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2194 *  If link is down, the function will restore the default K1 setting located
2195 *  in the NVM.
2196 **/
2197static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2198{
2199	s32 ret_val = 0;
2200	u16 status_reg = 0;
2201	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2202
2203	if (hw->mac.type != e1000_pchlan)
2204		return 0;
2205
2206	/* Wrap the whole flow with the sw flag */
2207	ret_val = hw->phy.ops.acquire(hw);
2208	if (ret_val)
2209		return ret_val;
2210
2211	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2212	if (link) {
2213		if (hw->phy.type == e1000_phy_82578) {
2214			ret_val = e1e_rphy_locked(hw, BM_CS_STATUS,
2215						  &status_reg);
2216			if (ret_val)
2217				goto release;
2218
2219			status_reg &= (BM_CS_STATUS_LINK_UP |
2220				       BM_CS_STATUS_RESOLVED |
2221				       BM_CS_STATUS_SPEED_MASK);
2222
2223			if (status_reg == (BM_CS_STATUS_LINK_UP |
2224					   BM_CS_STATUS_RESOLVED |
2225					   BM_CS_STATUS_SPEED_1000))
2226				k1_enable = false;
2227		}
2228
2229		if (hw->phy.type == e1000_phy_82577) {
2230			ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg);
2231			if (ret_val)
2232				goto release;
2233
2234			status_reg &= (HV_M_STATUS_LINK_UP |
2235				       HV_M_STATUS_AUTONEG_COMPLETE |
2236				       HV_M_STATUS_SPEED_MASK);
2237
2238			if (status_reg == (HV_M_STATUS_LINK_UP |
2239					   HV_M_STATUS_AUTONEG_COMPLETE |
2240					   HV_M_STATUS_SPEED_1000))
2241				k1_enable = false;
2242		}
2243
2244		/* Link stall fix for link up */
2245		ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100);
2246		if (ret_val)
2247			goto release;
2248
2249	} else {
2250		/* Link stall fix for link down */
2251		ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100);
2252		if (ret_val)
2253			goto release;
2254	}
2255
2256	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2257
2258release:
2259	hw->phy.ops.release(hw);
2260
2261	return ret_val;
2262}
2263
2264/**
2265 *  e1000_configure_k1_ich8lan - Configure K1 power state
2266 *  @hw: pointer to the HW structure
2267 *  @enable: K1 state to configure
2268 *
2269 *  Configure the K1 power state based on the provided parameter.
2270 *  Assumes semaphore already acquired.
2271 *
2272 *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2273 **/
2274s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2275{
2276	s32 ret_val;
2277	u32 ctrl_reg = 0;
2278	u32 ctrl_ext = 0;
2279	u32 reg = 0;
2280	u16 kmrn_reg = 0;
2281
2282	ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2283					      &kmrn_reg);
2284	if (ret_val)
2285		return ret_val;
2286
2287	if (k1_enable)
2288		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2289	else
2290		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2291
2292	ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2293					       kmrn_reg);
2294	if (ret_val)
2295		return ret_val;
2296
2297	usleep_range(20, 40);
2298	ctrl_ext = er32(CTRL_EXT);
2299	ctrl_reg = er32(CTRL);
2300
2301	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2302	reg |= E1000_CTRL_FRCSPD;
2303	ew32(CTRL, reg);
2304
2305	ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2306	e1e_flush();
2307	usleep_range(20, 40);
2308	ew32(CTRL, ctrl_reg);
2309	ew32(CTRL_EXT, ctrl_ext);
2310	e1e_flush();
2311	usleep_range(20, 40);
2312
2313	return 0;
2314}
2315
2316/**
2317 *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2318 *  @hw:       pointer to the HW structure
2319 *  @d0_state: boolean if entering d0 or d3 device state
2320 *
2321 *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2322 *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2323 *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2324 **/
2325static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2326{
2327	s32 ret_val = 0;
2328	u32 mac_reg;
2329	u16 oem_reg;
2330
2331	if (hw->mac.type < e1000_pchlan)
2332		return ret_val;
2333
2334	ret_val = hw->phy.ops.acquire(hw);
2335	if (ret_val)
2336		return ret_val;
2337
2338	if (hw->mac.type == e1000_pchlan) {
2339		mac_reg = er32(EXTCNF_CTRL);
2340		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2341			goto release;
2342	}
2343
2344	mac_reg = er32(FEXTNVM);
2345	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2346		goto release;
2347
2348	mac_reg = er32(PHY_CTRL);
2349
2350	ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg);
2351	if (ret_val)
2352		goto release;
2353
2354	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2355
2356	if (d0_state) {
2357		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2358			oem_reg |= HV_OEM_BITS_GBE_DIS;
2359
2360		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2361			oem_reg |= HV_OEM_BITS_LPLU;
2362	} else {
2363		if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2364			       E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2365			oem_reg |= HV_OEM_BITS_GBE_DIS;
2366
2367		if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2368			       E1000_PHY_CTRL_NOND0A_LPLU))
2369			oem_reg |= HV_OEM_BITS_LPLU;
2370	}
2371
2372	/* Set Restart auto-neg to activate the bits */
2373	if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2374	    !hw->phy.ops.check_reset_block(hw))
2375		oem_reg |= HV_OEM_BITS_RESTART_AN;
2376
2377	ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg);
2378
2379release:
2380	hw->phy.ops.release(hw);
2381
2382	return ret_val;
2383}
2384
2385/**
2386 *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2387 *  @hw:   pointer to the HW structure
2388 **/
2389static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2390{
2391	s32 ret_val;
2392	u16 data;
2393
2394	ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
2395	if (ret_val)
2396		return ret_val;
2397
2398	data |= HV_KMRN_MDIO_SLOW;
2399
2400	ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
2401
2402	return ret_val;
2403}
2404
2405/**
2406 *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2407 *  done after every PHY reset.
2408 **/
2409static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2410{
2411	s32 ret_val = 0;
2412	u16 phy_data;
2413
2414	if (hw->mac.type != e1000_pchlan)
2415		return 0;
2416
2417	/* Set MDIO slow mode before any other MDIO access */
2418	if (hw->phy.type == e1000_phy_82577) {
2419		ret_val = e1000_set_mdio_slow_mode_hv(hw);
2420		if (ret_val)
2421			return ret_val;
2422	}
2423
2424	if (((hw->phy.type == e1000_phy_82577) &&
2425	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2426	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2427		/* Disable generation of early preamble */
2428		ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431);
2429		if (ret_val)
2430			return ret_val;
2431
2432		/* Preamble tuning for SSC */
2433		ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
2434		if (ret_val)
2435			return ret_val;
2436	}
2437
2438	if (hw->phy.type == e1000_phy_82578) {
2439		/* Return registers to default by doing a soft reset then
2440		 * writing 0x3140 to the control register.
2441		 */
2442		if (hw->phy.revision < 2) {
2443			e1000e_phy_sw_reset(hw);
2444			ret_val = e1e_wphy(hw, MII_BMCR, 0x3140);
2445			if (ret_val)
2446				return ret_val;
2447		}
2448	}
2449
2450	/* Select page 0 */
2451	ret_val = hw->phy.ops.acquire(hw);
2452	if (ret_val)
2453		return ret_val;
2454
2455	hw->phy.addr = 1;
2456	ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2457	hw->phy.ops.release(hw);
2458	if (ret_val)
2459		return ret_val;
2460
2461	/* Configure the K1 Si workaround during phy reset assuming there is
2462	 * link so that it disables K1 if link is in 1Gbps.
2463	 */
2464	ret_val = e1000_k1_gig_workaround_hv(hw, true);
2465	if (ret_val)
2466		return ret_val;
2467
2468	/* Workaround for link disconnects on a busy hub in half duplex */
2469	ret_val = hw->phy.ops.acquire(hw);
2470	if (ret_val)
2471		return ret_val;
2472	ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2473	if (ret_val)
2474		goto release;
2475	ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF);
2476	if (ret_val)
2477		goto release;
2478
2479	/* set MSE higher to enable link to stay up when noise is high */
2480	ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2481release:
2482	hw->phy.ops.release(hw);
2483
2484	return ret_val;
2485}
2486
2487/**
2488 *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2489 *  @hw:   pointer to the HW structure
2490 **/
2491void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2492{
2493	u32 mac_reg;
2494	u16 i, phy_reg = 0;
2495	s32 ret_val;
2496
2497	ret_val = hw->phy.ops.acquire(hw);
2498	if (ret_val)
2499		return;
2500	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2501	if (ret_val)
2502		goto release;
2503
2504	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2505	for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2506		mac_reg = er32(RAL(i));
2507		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2508					   (u16)(mac_reg & 0xFFFF));
2509		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2510					   (u16)((mac_reg >> 16) & 0xFFFF));
2511
2512		mac_reg = er32(RAH(i));
2513		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2514					   (u16)(mac_reg & 0xFFFF));
2515		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2516					   (u16)((mac_reg & E1000_RAH_AV)
2517						 >> 16));
2518	}
2519
2520	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2521
2522release:
2523	hw->phy.ops.release(hw);
2524}
2525
2526/**
2527 *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2528 *  with 82579 PHY
2529 *  @hw: pointer to the HW structure
2530 *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2531 **/
2532s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2533{
2534	s32 ret_val = 0;
2535	u16 phy_reg, data;
2536	u32 mac_reg;
2537	u16 i;
2538
2539	if (hw->mac.type < e1000_pch2lan)
2540		return 0;
2541
2542	/* disable Rx path while enabling/disabling workaround */
2543	e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
2544	ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | BIT(14));
2545	if (ret_val)
2546		return ret_val;
2547
2548	if (enable) {
2549		/* Write Rx addresses (rar_entry_count for RAL/H, and
2550		 * SHRAL/H) and initial CRC values to the MAC
2551		 */
2552		for (i = 0; i < hw->mac.rar_entry_count; i++) {
2553			u8 mac_addr[ETH_ALEN] = { 0 };
2554			u32 addr_high, addr_low;
2555
2556			addr_high = er32(RAH(i));
2557			if (!(addr_high & E1000_RAH_AV))
2558				continue;
2559			addr_low = er32(RAL(i));
2560			mac_addr[0] = (addr_low & 0xFF);
2561			mac_addr[1] = ((addr_low >> 8) & 0xFF);
2562			mac_addr[2] = ((addr_low >> 16) & 0xFF);
2563			mac_addr[3] = ((addr_low >> 24) & 0xFF);
2564			mac_addr[4] = (addr_high & 0xFF);
2565			mac_addr[5] = ((addr_high >> 8) & 0xFF);
2566
2567			ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
2568		}
2569
2570		/* Write Rx addresses to the PHY */
2571		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2572
2573		/* Enable jumbo frame workaround in the MAC */
2574		mac_reg = er32(FFLT_DBG);
2575		mac_reg &= ~BIT(14);
2576		mac_reg |= (7 << 15);
2577		ew32(FFLT_DBG, mac_reg);
2578
2579		mac_reg = er32(RCTL);
2580		mac_reg |= E1000_RCTL_SECRC;
2581		ew32(RCTL, mac_reg);
2582
2583		ret_val = e1000e_read_kmrn_reg(hw,
2584					       E1000_KMRNCTRLSTA_CTRL_OFFSET,
2585					       &data);
2586		if (ret_val)
2587			return ret_val;
2588		ret_val = e1000e_write_kmrn_reg(hw,
2589						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2590						data | BIT(0));
2591		if (ret_val)
2592			return ret_val;
2593		ret_val = e1000e_read_kmrn_reg(hw,
2594					       E1000_KMRNCTRLSTA_HD_CTRL,
2595					       &data);
2596		if (ret_val)
2597			return ret_val;
2598		data &= ~(0xF << 8);
2599		data |= (0xB << 8);
2600		ret_val = e1000e_write_kmrn_reg(hw,
2601						E1000_KMRNCTRLSTA_HD_CTRL,
2602						data);
2603		if (ret_val)
2604			return ret_val;
2605
2606		/* Enable jumbo frame workaround in the PHY */
2607		e1e_rphy(hw, PHY_REG(769, 23), &data);
2608		data &= ~(0x7F << 5);
2609		data |= (0x37 << 5);
2610		ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
2611		if (ret_val)
2612			return ret_val;
2613		e1e_rphy(hw, PHY_REG(769, 16), &data);
2614		data &= ~BIT(13);
2615		ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
2616		if (ret_val)
2617			return ret_val;
2618		e1e_rphy(hw, PHY_REG(776, 20), &data);
2619		data &= ~(0x3FF << 2);
2620		data |= (E1000_TX_PTR_GAP << 2);
2621		ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
2622		if (ret_val)
2623			return ret_val;
2624		ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100);
2625		if (ret_val)
2626			return ret_val;
2627		e1e_rphy(hw, HV_PM_CTRL, &data);
2628		ret_val = e1e_wphy(hw, HV_PM_CTRL, data | BIT(10));
2629		if (ret_val)
2630			return ret_val;
2631	} else {
2632		/* Write MAC register values back to h/w defaults */
2633		mac_reg = er32(FFLT_DBG);
2634		mac_reg &= ~(0xF << 14);
2635		ew32(FFLT_DBG, mac_reg);
2636
2637		mac_reg = er32(RCTL);
2638		mac_reg &= ~E1000_RCTL_SECRC;
2639		ew32(RCTL, mac_reg);
2640
2641		ret_val = e1000e_read_kmrn_reg(hw,
2642					       E1000_KMRNCTRLSTA_CTRL_OFFSET,
2643					       &data);
2644		if (ret_val)
2645			return ret_val;
2646		ret_val = e1000e_write_kmrn_reg(hw,
2647						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2648						data & ~BIT(0));
2649		if (ret_val)
2650			return ret_val;
2651		ret_val = e1000e_read_kmrn_reg(hw,
2652					       E1000_KMRNCTRLSTA_HD_CTRL,
2653					       &data);
2654		if (ret_val)
2655			return ret_val;
2656		data &= ~(0xF << 8);
2657		data |= (0xB << 8);
2658		ret_val = e1000e_write_kmrn_reg(hw,
2659						E1000_KMRNCTRLSTA_HD_CTRL,
2660						data);
2661		if (ret_val)
2662			return ret_val;
2663
2664		/* Write PHY register values back to h/w defaults */
2665		e1e_rphy(hw, PHY_REG(769, 23), &data);
2666		data &= ~(0x7F << 5);
2667		ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
2668		if (ret_val)
2669			return ret_val;
2670		e1e_rphy(hw, PHY_REG(769, 16), &data);
2671		data |= BIT(13);
2672		ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
2673		if (ret_val)
2674			return ret_val;
2675		e1e_rphy(hw, PHY_REG(776, 20), &data);
2676		data &= ~(0x3FF << 2);
2677		data |= (0x8 << 2);
2678		ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
2679		if (ret_val)
2680			return ret_val;
2681		ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00);
2682		if (ret_val)
2683			return ret_val;
2684		e1e_rphy(hw, HV_PM_CTRL, &data);
2685		ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~BIT(10));
2686		if (ret_val)
2687			return ret_val;
2688	}
2689
2690	/* re-enable Rx path after enabling/disabling workaround */
2691	return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~BIT(14));
2692}
2693
2694/**
2695 *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2696 *  done after every PHY reset.
2697 **/
2698static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2699{
2700	s32 ret_val = 0;
2701
2702	if (hw->mac.type != e1000_pch2lan)
2703		return 0;
2704
2705	/* Set MDIO slow mode before any other MDIO access */
2706	ret_val = e1000_set_mdio_slow_mode_hv(hw);
2707	if (ret_val)
2708		return ret_val;
2709
2710	ret_val = hw->phy.ops.acquire(hw);
2711	if (ret_val)
2712		return ret_val;
2713	/* set MSE higher to enable link to stay up when noise is high */
2714	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2715	if (ret_val)
2716		goto release;
2717	/* drop link after 5 times MSE threshold was reached */
2718	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2719release:
2720	hw->phy.ops.release(hw);
2721
2722	return ret_val;
2723}
2724
2725/**
2726 *  e1000_k1_gig_workaround_lv - K1 Si workaround
2727 *  @hw:   pointer to the HW structure
2728 *
2729 *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2730 *  Disable K1 in 1000Mbps and 100Mbps
2731 **/
2732static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2733{
2734	s32 ret_val = 0;
2735	u16 status_reg = 0;
2736
2737	if (hw->mac.type != e1000_pch2lan)
2738		return 0;
2739
2740	/* Set K1 beacon duration based on 10Mbs speed */
2741	ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
2742	if (ret_val)
2743		return ret_val;
2744
2745	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2746	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2747		if (status_reg &
2748		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2749			u16 pm_phy_reg;
2750
2751			/* LV 1G/100 Packet drop issue wa  */
2752			ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
2753			if (ret_val)
2754				return ret_val;
2755			pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2756			ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
2757			if (ret_val)
2758				return ret_val;
2759		} else {
2760			u32 mac_reg;
2761
2762			mac_reg = er32(FEXTNVM4);
2763			mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2764			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2765			ew32(FEXTNVM4, mac_reg);
2766		}
2767	}
2768
2769	return ret_val;
2770}
2771
2772/**
2773 *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2774 *  @hw:   pointer to the HW structure
2775 *  @gate: boolean set to true to gate, false to ungate
2776 *
2777 *  Gate/ungate the automatic PHY configuration via hardware; perform
2778 *  the configuration via software instead.
2779 **/
2780static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2781{
2782	u32 extcnf_ctrl;
2783
2784	if (hw->mac.type < e1000_pch2lan)
2785		return;
2786
2787	extcnf_ctrl = er32(EXTCNF_CTRL);
2788
2789	if (gate)
2790		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2791	else
2792		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2793
2794	ew32(EXTCNF_CTRL, extcnf_ctrl);
2795}
2796
2797/**
2798 *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2799 *  @hw: pointer to the HW structure
2800 *
2801 *  Check the appropriate indication the MAC has finished configuring the
2802 *  PHY after a software reset.
2803 **/
2804static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2805{
2806	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2807
2808	/* Wait for basic configuration completes before proceeding */
2809	do {
2810		data = er32(STATUS);
2811		data &= E1000_STATUS_LAN_INIT_DONE;
2812		usleep_range(100, 200);
2813	} while ((!data) && --loop);
2814
2815	/* If basic configuration is incomplete before the above loop
2816	 * count reaches 0, loading the configuration from NVM will
2817	 * leave the PHY in a bad state possibly resulting in no link.
2818	 */
2819	if (loop == 0)
2820		e_dbg("LAN_INIT_DONE not set, increase timeout\n");
2821
2822	/* Clear the Init Done bit for the next init event */
2823	data = er32(STATUS);
2824	data &= ~E1000_STATUS_LAN_INIT_DONE;
2825	ew32(STATUS, data);
2826}
2827
2828/**
2829 *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2830 *  @hw: pointer to the HW structure
2831 **/
2832static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2833{
2834	s32 ret_val = 0;
2835	u16 reg;
2836
2837	if (hw->phy.ops.check_reset_block(hw))
2838		return 0;
2839
2840	/* Allow time for h/w to get to quiescent state after reset */
2841	usleep_range(10000, 11000);
2842
2843	/* Perform any necessary post-reset workarounds */
2844	switch (hw->mac.type) {
2845	case e1000_pchlan:
2846		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2847		if (ret_val)
2848			return ret_val;
2849		break;
2850	case e1000_pch2lan:
2851		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2852		if (ret_val)
2853			return ret_val;
2854		break;
2855	default:
2856		break;
2857	}
2858
2859	/* Clear the host wakeup bit after lcd reset */
2860	if (hw->mac.type >= e1000_pchlan) {
2861		e1e_rphy(hw, BM_PORT_GEN_CFG, &reg);
2862		reg &= ~BM_WUC_HOST_WU_BIT;
2863		e1e_wphy(hw, BM_PORT_GEN_CFG, reg);
2864	}
2865
2866	/* Configure the LCD with the extended configuration region in NVM */
2867	ret_val = e1000_sw_lcd_config_ich8lan(hw);
2868	if (ret_val)
2869		return ret_val;
2870
2871	/* Configure the LCD with the OEM bits in NVM */
2872	ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2873
2874	if (hw->mac.type == e1000_pch2lan) {
2875		/* Ungate automatic PHY configuration on non-managed 82579 */
2876		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
2877			usleep_range(10000, 11000);
2878			e1000_gate_hw_phy_config_ich8lan(hw, false);
2879		}
2880
2881		/* Set EEE LPI Update Timer to 200usec */
2882		ret_val = hw->phy.ops.acquire(hw);
2883		if (ret_val)
2884			return ret_val;
2885		ret_val = e1000_write_emi_reg_locked(hw,
2886						     I82579_LPI_UPDATE_TIMER,
2887						     0x1387);
2888		hw->phy.ops.release(hw);
2889	}
2890
2891	return ret_val;
2892}
2893
2894/**
2895 *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2896 *  @hw: pointer to the HW structure
2897 *
2898 *  Resets the PHY
2899 *  This is a function pointer entry point called by drivers
2900 *  or other shared routines.
2901 **/
2902static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2903{
2904	s32 ret_val = 0;
2905
2906	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
2907	if ((hw->mac.type == e1000_pch2lan) &&
2908	    !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
2909		e1000_gate_hw_phy_config_ich8lan(hw, true);
2910
2911	ret_val = e1000e_phy_hw_reset_generic(hw);
2912	if (ret_val)
2913		return ret_val;
2914
2915	return e1000_post_phy_reset_ich8lan(hw);
2916}
2917
2918/**
2919 *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2920 *  @hw: pointer to the HW structure
2921 *  @active: true to enable LPLU, false to disable
2922 *
2923 *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2924 *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2925 *  the phy speed. This function will manually set the LPLU bit and restart
2926 *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2927 *  since it configures the same bit.
2928 **/
2929static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2930{
2931	s32 ret_val;
2932	u16 oem_reg;
2933
2934	ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
2935	if (ret_val)
2936		return ret_val;
2937
2938	if (active)
2939		oem_reg |= HV_OEM_BITS_LPLU;
2940	else
2941		oem_reg &= ~HV_OEM_BITS_LPLU;
2942
2943	if (!hw->phy.ops.check_reset_block(hw))
2944		oem_reg |= HV_OEM_BITS_RESTART_AN;
2945
2946	return e1e_wphy(hw, HV_OEM_BITS, oem_reg);
2947}
2948
2949/**
2950 *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2951 *  @hw: pointer to the HW structure
2952 *  @active: true to enable LPLU, false to disable
2953 *
2954 *  Sets the LPLU D0 state according to the active flag.  When
2955 *  activating LPLU this function also disables smart speed
2956 *  and vice versa.  LPLU will not be activated unless the
2957 *  device autonegotiation advertisement meets standards of
2958 *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2959 *  This is a function pointer entry point only called by
2960 *  PHY setup routines.
2961 **/
2962static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2963{
2964	struct e1000_phy_info *phy = &hw->phy;
2965	u32 phy_ctrl;
2966	s32 ret_val = 0;
2967	u16 data;
2968
2969	if (phy->type == e1000_phy_ife)
2970		return 0;
2971
2972	phy_ctrl = er32(PHY_CTRL);
2973
2974	if (active) {
2975		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2976		ew32(PHY_CTRL, phy_ctrl);
2977
2978		if (phy->type != e1000_phy_igp_3)
2979			return 0;
2980
2981		/* Call gig speed drop workaround on LPLU before accessing
2982		 * any PHY registers
2983		 */
2984		if (hw->mac.type == e1000_ich8lan)
2985			e1000e_gig_downshift_workaround_ich8lan(hw);
2986
2987		/* When LPLU is enabled, we should disable SmartSpeed */
2988		ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
2989		if (ret_val)
2990			return ret_val;
2991		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2992		ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
2993		if (ret_val)
2994			return ret_val;
2995	} else {
2996		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2997		ew32(PHY_CTRL, phy_ctrl);
2998
2999		if (phy->type != e1000_phy_igp_3)
3000			return 0;
3001
3002		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3003		 * during Dx states where the power conservation is most
3004		 * important.  During driver activity we should enable
3005		 * SmartSpeed, so performance is maintained.
3006		 */
3007		if (phy->smart_speed == e1000_smart_speed_on) {
3008			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3009					   &data);
3010			if (ret_val)
3011				return ret_val;
3012
3013			data |= IGP01E1000_PSCFR_SMART_SPEED;
3014			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3015					   data);
3016			if (ret_val)
3017				return ret_val;
3018		} else if (phy->smart_speed == e1000_smart_speed_off) {
3019			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3020					   &data);
3021			if (ret_val)
3022				return ret_val;
3023
3024			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3025			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3026					   data);
3027			if (ret_val)
3028				return ret_val;
3029		}
3030	}
3031
3032	return 0;
3033}
3034
3035/**
3036 *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3037 *  @hw: pointer to the HW structure
3038 *  @active: true to enable LPLU, false to disable
3039 *
3040 *  Sets the LPLU D3 state according to the active flag.  When
3041 *  activating LPLU this function also disables smart speed
3042 *  and vice versa.  LPLU will not be activated unless the
3043 *  device autonegotiation advertisement meets standards of
3044 *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3045 *  This is a function pointer entry point only called by
3046 *  PHY setup routines.
3047 **/
3048static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3049{
3050	struct e1000_phy_info *phy = &hw->phy;
3051	u32 phy_ctrl;
3052	s32 ret_val = 0;
3053	u16 data;
3054
3055	phy_ctrl = er32(PHY_CTRL);
3056
3057	if (!active) {
3058		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3059		ew32(PHY_CTRL, phy_ctrl);
3060
3061		if (phy->type != e1000_phy_igp_3)
3062			return 0;
3063
3064		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3065		 * during Dx states where the power conservation is most
3066		 * important.  During driver activity we should enable
3067		 * SmartSpeed, so performance is maintained.
3068		 */
3069		if (phy->smart_speed == e1000_smart_speed_on) {
3070			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3071					   &data);
3072			if (ret_val)
3073				return ret_val;
3074
3075			data |= IGP01E1000_PSCFR_SMART_SPEED;
3076			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3077					   data);
3078			if (ret_val)
3079				return ret_val;
3080		} else if (phy->smart_speed == e1000_smart_speed_off) {
3081			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3082					   &data);
3083			if (ret_val)
3084				return ret_val;
3085
3086			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3087			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3088					   data);
3089			if (ret_val)
3090				return ret_val;
3091		}
3092	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3093		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3094		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3095		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3096		ew32(PHY_CTRL, phy_ctrl);
3097
3098		if (phy->type != e1000_phy_igp_3)
3099			return 0;
3100
3101		/* Call gig speed drop workaround on LPLU before accessing
3102		 * any PHY registers
3103		 */
3104		if (hw->mac.type == e1000_ich8lan)
3105			e1000e_gig_downshift_workaround_ich8lan(hw);
3106
3107		/* When LPLU is enabled, we should disable SmartSpeed */
3108		ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
3109		if (ret_val)
3110			return ret_val;
3111
3112		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3113		ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
3114	}
3115
3116	return ret_val;
3117}
3118
3119/**
3120 *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3121 *  @hw: pointer to the HW structure
3122 *  @bank:  pointer to the variable that returns the active bank
3123 *
3124 *  Reads signature byte from the NVM using the flash access registers.
3125 *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3126 **/
3127static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3128{
3129	u32 eecd;
3130	struct e1000_nvm_info *nvm = &hw->nvm;
3131	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3132	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3133	u32 nvm_dword = 0;
3134	u8 sig_byte = 0;
3135	s32 ret_val;
3136
3137	switch (hw->mac.type) {
3138	case e1000_pch_spt:
3139	case e1000_pch_cnp:
3140	case e1000_pch_tgp:
3141	case e1000_pch_adp:
3142		bank1_offset = nvm->flash_bank_size;
3143		act_offset = E1000_ICH_NVM_SIG_WORD;
3144
3145		/* set bank to 0 in case flash read fails */
3146		*bank = 0;
3147
3148		/* Check bank 0 */
3149		ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3150							 &nvm_dword);
3151		if (ret_val)
3152			return ret_val;
3153		sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3154		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3155		    E1000_ICH_NVM_SIG_VALUE) {
3156			*bank = 0;
3157			return 0;
3158		}
3159
3160		/* Check bank 1 */
3161		ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3162							 bank1_offset,
3163							 &nvm_dword);
3164		if (ret_val)
3165			return ret_val;
3166		sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3167		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3168		    E1000_ICH_NVM_SIG_VALUE) {
3169			*bank = 1;
3170			return 0;
3171		}
3172
3173		e_dbg("ERROR: No valid NVM bank present\n");
3174		return -E1000_ERR_NVM;
3175	case e1000_ich8lan:
3176	case e1000_ich9lan:
3177		eecd = er32(EECD);
3178		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3179		    E1000_EECD_SEC1VAL_VALID_MASK) {
3180			if (eecd & E1000_EECD_SEC1VAL)
3181				*bank = 1;
3182			else
3183				*bank = 0;
3184
3185			return 0;
3186		}
3187		e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3188		/* fall-thru */
3189	default:
3190		/* set bank to 0 in case flash read fails */
3191		*bank = 0;
3192
3193		/* Check bank 0 */
3194		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3195							&sig_byte);
3196		if (ret_val)
3197			return ret_val;
3198		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3199		    E1000_ICH_NVM_SIG_VALUE) {
3200			*bank = 0;
3201			return 0;
3202		}
3203
3204		/* Check bank 1 */
3205		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3206							bank1_offset,
3207							&sig_byte);
3208		if (ret_val)
3209			return ret_val;
3210		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3211		    E1000_ICH_NVM_SIG_VALUE) {
3212			*bank = 1;
3213			return 0;
3214		}
3215
3216		e_dbg("ERROR: No valid NVM bank present\n");
3217		return -E1000_ERR_NVM;
3218	}
3219}
3220
3221/**
3222 *  e1000_read_nvm_spt - NVM access for SPT
3223 *  @hw: pointer to the HW structure
3224 *  @offset: The offset (in bytes) of the word(s) to read.
3225 *  @words: Size of data to read in words.
3226 *  @data: pointer to the word(s) to read at offset.
3227 *
3228 *  Reads a word(s) from the NVM
3229 **/
3230static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3231			      u16 *data)
3232{
3233	struct e1000_nvm_info *nvm = &hw->nvm;
3234	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3235	u32 act_offset;
3236	s32 ret_val = 0;
3237	u32 bank = 0;
3238	u32 dword = 0;
3239	u16 offset_to_read;
3240	u16 i;
3241
3242	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3243	    (words == 0)) {
3244		e_dbg("nvm parameter(s) out of bounds\n");
3245		ret_val = -E1000_ERR_NVM;
3246		goto out;
3247	}
3248
3249	nvm->ops.acquire(hw);
3250
3251	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3252	if (ret_val) {
3253		e_dbg("Could not detect valid bank, assuming bank 0\n");
3254		bank = 0;
3255	}
3256
3257	act_offset = (bank) ? nvm->flash_bank_size : 0;
3258	act_offset += offset;
3259
3260	ret_val = 0;
3261
3262	for (i = 0; i < words; i += 2) {
3263		if (words - i == 1) {
3264			if (dev_spec->shadow_ram[offset + i].modified) {
3265				data[i] =
3266				    dev_spec->shadow_ram[offset + i].value;
3267			} else {
3268				offset_to_read = act_offset + i -
3269				    ((act_offset + i) % 2);
3270				ret_val =
3271				  e1000_read_flash_dword_ich8lan(hw,
3272								 offset_to_read,
3273								 &dword);
3274				if (ret_val)
3275					break;
3276				if ((act_offset + i) % 2 == 0)
3277					data[i] = (u16)(dword & 0xFFFF);
3278				else
3279					data[i] = (u16)((dword >> 16) & 0xFFFF);
3280			}
3281		} else {
3282			offset_to_read = act_offset + i;
3283			if (!(dev_spec->shadow_ram[offset + i].modified) ||
3284			    !(dev_spec->shadow_ram[offset + i + 1].modified)) {
3285				ret_val =
3286				  e1000_read_flash_dword_ich8lan(hw,
3287								 offset_to_read,
3288								 &dword);
3289				if (ret_val)
3290					break;
3291			}
3292			if (dev_spec->shadow_ram[offset + i].modified)
3293				data[i] =
3294				    dev_spec->shadow_ram[offset + i].value;
3295			else
3296				data[i] = (u16)(dword & 0xFFFF);
3297			if (dev_spec->shadow_ram[offset + i].modified)
3298				data[i + 1] =
3299				    dev_spec->shadow_ram[offset + i + 1].value;
3300			else
3301				data[i + 1] = (u16)(dword >> 16 & 0xFFFF);
3302		}
3303	}
3304
3305	nvm->ops.release(hw);
3306
3307out:
3308	if (ret_val)
3309		e_dbg("NVM read error: %d\n", ret_val);
3310
3311	return ret_val;
3312}
3313
3314/**
3315 *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3316 *  @hw: pointer to the HW structure
3317 *  @offset: The offset (in bytes) of the word(s) to read.
3318 *  @words: Size of data to read in words
3319 *  @data: Pointer to the word(s) to read at offset.
3320 *
3321 *  Reads a word(s) from the NVM using the flash access registers.
3322 **/
3323static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3324				  u16 *data)
3325{
3326	struct e1000_nvm_info *nvm = &hw->nvm;
3327	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3328	u32 act_offset;
3329	s32 ret_val = 0;
3330	u32 bank = 0;
3331	u16 i, word;
3332
3333	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3334	    (words == 0)) {
3335		e_dbg("nvm parameter(s) out of bounds\n");
3336		ret_val = -E1000_ERR_NVM;
3337		goto out;
3338	}
3339
3340	nvm->ops.acquire(hw);
3341
3342	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3343	if (ret_val) {
3344		e_dbg("Could not detect valid bank, assuming bank 0\n");
3345		bank = 0;
3346	}
3347
3348	act_offset = (bank) ? nvm->flash_bank_size : 0;
3349	act_offset += offset;
3350
3351	ret_val = 0;
3352	for (i = 0; i < words; i++) {
3353		if (dev_spec->shadow_ram[offset + i].modified) {
3354			data[i] = dev_spec->shadow_ram[offset + i].value;
3355		} else {
3356			ret_val = e1000_read_flash_word_ich8lan(hw,
3357								act_offset + i,
3358								&word);
3359			if (ret_val)
3360				break;
3361			data[i] = word;
3362		}
3363	}
3364
3365	nvm->ops.release(hw);
3366
3367out:
3368	if (ret_val)
3369		e_dbg("NVM read error: %d\n", ret_val);
3370
3371	return ret_val;
3372}
3373
3374/**
3375 *  e1000_flash_cycle_init_ich8lan - Initialize flash
3376 *  @hw: pointer to the HW structure
3377 *
3378 *  This function does initial flash setup so that a new read/write/erase cycle
3379 *  can be started.
3380 **/
3381static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3382{
3383	union ich8_hws_flash_status hsfsts;
3384	s32 ret_val = -E1000_ERR_NVM;
3385
3386	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3387
3388	/* Check if the flash descriptor is valid */
3389	if (!hsfsts.hsf_status.fldesvalid) {
3390		e_dbg("Flash descriptor invalid.  SW Sequencing must be used.\n");
3391		return -E1000_ERR_NVM;
3392	}
3393
3394	/* Clear FCERR and DAEL in hw status by writing 1 */
3395	hsfsts.hsf_status.flcerr = 1;
3396	hsfsts.hsf_status.dael = 1;
3397	if (hw->mac.type >= e1000_pch_spt)
3398		ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
3399	else
3400		ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3401
3402	/* Either we should have a hardware SPI cycle in progress
3403	 * bit to check against, in order to start a new cycle or
3404	 * FDONE bit should be changed in the hardware so that it
3405	 * is 1 after hardware reset, which can then be used as an
3406	 * indication whether a cycle is in progress or has been
3407	 * completed.
3408	 */
3409
3410	if (!hsfsts.hsf_status.flcinprog) {
3411		/* There is no cycle running at present,
3412		 * so we can start a cycle.
3413		 * Begin by setting Flash Cycle Done.
3414		 */
3415		hsfsts.hsf_status.flcdone = 1;
3416		if (hw->mac.type >= e1000_pch_spt)
3417			ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
3418		else
3419			ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3420		ret_val = 0;
3421	} else {
3422		s32 i;
3423
3424		/* Otherwise poll for sometime so the current
3425		 * cycle has a chance to end before giving up.
3426		 */
3427		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3428			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3429			if (!hsfsts.hsf_status.flcinprog) {
3430				ret_val = 0;
3431				break;
3432			}
3433			udelay(1);
3434		}
3435		if (!ret_val) {
3436			/* Successful in waiting for previous cycle to timeout,
3437			 * now set the Flash Cycle Done.
3438			 */
3439			hsfsts.hsf_status.flcdone = 1;
3440			if (hw->mac.type >= e1000_pch_spt)
3441				ew32flash(ICH_FLASH_HSFSTS,
3442					  hsfsts.regval & 0xFFFF);
3443			else
3444				ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3445		} else {
3446			e_dbg("Flash controller busy, cannot get access\n");
3447		}
3448	}
3449
3450	return ret_val;
3451}
3452
3453/**
3454 *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3455 *  @hw: pointer to the HW structure
3456 *  @timeout: maximum time to wait for completion
3457 *
3458 *  This function starts a flash cycle and waits for its completion.
3459 **/
3460static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3461{
3462	union ich8_hws_flash_ctrl hsflctl;
3463	union ich8_hws_flash_status hsfsts;
3464	u32 i = 0;
3465
3466	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3467	if (hw->mac.type >= e1000_pch_spt)
3468		hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
3469	else
3470		hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3471	hsflctl.hsf_ctrl.flcgo = 1;
3472
3473	if (hw->mac.type >= e1000_pch_spt)
3474		ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
3475	else
3476		ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3477
3478	/* wait till FDONE bit is set to 1 */
3479	do {
3480		hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3481		if (hsfsts.hsf_status.flcdone)
3482			break;
3483		udelay(1);
3484	} while (i++ < timeout);
3485
3486	if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3487		return 0;
3488
3489	return -E1000_ERR_NVM;
3490}
3491
3492/**
3493 *  e1000_read_flash_dword_ich8lan - Read dword from flash
3494 *  @hw: pointer to the HW structure
3495 *  @offset: offset to data location
3496 *  @data: pointer to the location for storing the data
3497 *
3498 *  Reads the flash dword at offset into data.  Offset is converted
3499 *  to bytes before read.
3500 **/
3501static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3502					  u32 *data)
3503{
3504	/* Must convert word offset into bytes. */
3505	offset <<= 1;
3506	return e1000_read_flash_data32_ich8lan(hw, offset, data);
3507}
3508
3509/**
3510 *  e1000_read_flash_word_ich8lan - Read word from flash
3511 *  @hw: pointer to the HW structure
3512 *  @offset: offset to data location
3513 *  @data: pointer to the location for storing the data
3514 *
3515 *  Reads the flash word at offset into data.  Offset is converted
3516 *  to bytes before read.
3517 **/
3518static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3519					 u16 *data)
3520{
3521	/* Must convert offset into bytes. */
3522	offset <<= 1;
3523
3524	return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3525}
3526
3527/**
3528 *  e1000_read_flash_byte_ich8lan - Read byte from flash
3529 *  @hw: pointer to the HW structure
3530 *  @offset: The offset of the byte to read.
3531 *  @data: Pointer to a byte to store the value read.
3532 *
3533 *  Reads a single byte from the NVM using the flash access registers.
3534 **/
3535static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3536					 u8 *data)
3537{
3538	s32 ret_val;
3539	u16 word = 0;
3540
3541	/* In SPT, only 32 bits access is supported,
3542	 * so this function should not be called.
3543	 */
3544	if (hw->mac.type >= e1000_pch_spt)
3545		return -E1000_ERR_NVM;
3546	else
3547		ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3548
3549	if (ret_val)
3550		return ret_val;
3551
3552	*data = (u8)word;
3553
3554	return 0;
3555}
3556
3557/**
3558 *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3559 *  @hw: pointer to the HW structure
3560 *  @offset: The offset (in bytes) of the byte or word to read.
3561 *  @size: Size of data to read, 1=byte 2=word
3562 *  @data: Pointer to the word to store the value read.
3563 *
3564 *  Reads a byte or word from the NVM using the flash access registers.
3565 **/
3566static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3567					 u8 size, u16 *data)
3568{
3569	union ich8_hws_flash_status hsfsts;
3570	union ich8_hws_flash_ctrl hsflctl;
3571	u32 flash_linear_addr;
3572	u32 flash_data = 0;
3573	s32 ret_val = -E1000_ERR_NVM;
3574	u8 count = 0;
3575
3576	if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3577		return -E1000_ERR_NVM;
3578
3579	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3580			     hw->nvm.flash_base_addr);
3581
3582	do {
3583		udelay(1);
3584		/* Steps */
3585		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3586		if (ret_val)
3587			break;
3588
3589		hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3590		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3591		hsflctl.hsf_ctrl.fldbcount = size - 1;
3592		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3593		ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3594
3595		ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
3596
3597		ret_val =
3598		    e1000_flash_cycle_ich8lan(hw,
3599					      ICH_FLASH_READ_COMMAND_TIMEOUT);
3600
3601		/* Check if FCERR is set to 1, if set to 1, clear it
3602		 * and try the whole sequence a few more times, else
3603		 * read in (shift in) the Flash Data0, the order is
3604		 * least significant byte first msb to lsb
3605		 */
3606		if (!ret_val) {
3607			flash_data = er32flash(ICH_FLASH_FDATA0);
3608			if (size == 1)
3609				*data = (u8)(flash_data & 0x000000FF);
3610			else if (size == 2)
3611				*data = (u16)(flash_data & 0x0000FFFF);
3612			break;
3613		} else {
3614			/* If we've gotten here, then things are probably
3615			 * completely hosed, but if the error condition is
3616			 * detected, it won't hurt to give it another try...
3617			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3618			 */
3619			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3620			if (hsfsts.hsf_status.flcerr) {
3621				/* Repeat for some time before giving up. */
3622				continue;
3623			} else if (!hsfsts.hsf_status.flcdone) {
3624				e_dbg("Timeout error - flash cycle did not complete.\n");
3625				break;
3626			}
3627		}
3628	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3629
3630	return ret_val;
3631}
3632
3633/**
3634 *  e1000_read_flash_data32_ich8lan - Read dword from NVM
3635 *  @hw: pointer to the HW structure
3636 *  @offset: The offset (in bytes) of the dword to read.
3637 *  @data: Pointer to the dword to store the value read.
3638 *
3639 *  Reads a byte or word from the NVM using the flash access registers.
3640 **/
3641
3642static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3643					   u32 *data)
3644{
3645	union ich8_hws_flash_status hsfsts;
3646	union ich8_hws_flash_ctrl hsflctl;
3647	u32 flash_linear_addr;
3648	s32 ret_val = -E1000_ERR_NVM;
3649	u8 count = 0;
3650
3651	if (offset > ICH_FLASH_LINEAR_ADDR_MASK || hw->mac.type < e1000_pch_spt)
3652		return -E1000_ERR_NVM;
3653	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3654			     hw->nvm.flash_base_addr);
3655
3656	do {
3657		udelay(1);
3658		/* Steps */
3659		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3660		if (ret_val)
3661			break;
3662		/* In SPT, This register is in Lan memory space, not flash.
3663		 * Therefore, only 32 bit access is supported
3664		 */
3665		hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
3666
3667		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3668		hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3669		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3670		/* In SPT, This register is in Lan memory space, not flash.
3671		 * Therefore, only 32 bit access is supported
3672		 */
3673		ew32flash(ICH_FLASH_HSFSTS, (u32)hsflctl.regval << 16);
3674		ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
3675
3676		ret_val =
3677		   e1000_flash_cycle_ich8lan(hw,
3678					     ICH_FLASH_READ_COMMAND_TIMEOUT);
3679
3680		/* Check if FCERR is set to 1, if set to 1, clear it
3681		 * and try the whole sequence a few more times, else
3682		 * read in (shift in) the Flash Data0, the order is
3683		 * least significant byte first msb to lsb
3684		 */
3685		if (!ret_val) {
3686			*data = er32flash(ICH_FLASH_FDATA0);
3687			break;
3688		} else {
3689			/* If we've gotten here, then things are probably
3690			 * completely hosed, but if the error condition is
3691			 * detected, it won't hurt to give it another try...
3692			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3693			 */
3694			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3695			if (hsfsts.hsf_status.flcerr) {
3696				/* Repeat for some time before giving up. */
3697				continue;
3698			} else if (!hsfsts.hsf_status.flcdone) {
3699				e_dbg("Timeout error - flash cycle did not complete.\n");
3700				break;
3701			}
3702		}
3703	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3704
3705	return ret_val;
3706}
3707
3708/**
3709 *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3710 *  @hw: pointer to the HW structure
3711 *  @offset: The offset (in bytes) of the word(s) to write.
3712 *  @words: Size of data to write in words
3713 *  @data: Pointer to the word(s) to write at offset.
3714 *
3715 *  Writes a byte or word to the NVM using the flash access registers.
3716 **/
3717static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3718				   u16 *data)
3719{
3720	struct e1000_nvm_info *nvm = &hw->nvm;
3721	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3722	u16 i;
3723
3724	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3725	    (words == 0)) {
3726		e_dbg("nvm parameter(s) out of bounds\n");
3727		return -E1000_ERR_NVM;
3728	}
3729
3730	nvm->ops.acquire(hw);
3731
3732	for (i = 0; i < words; i++) {
3733		dev_spec->shadow_ram[offset + i].modified = true;
3734		dev_spec->shadow_ram[offset + i].value = data[i];
3735	}
3736
3737	nvm->ops.release(hw);
3738
3739	return 0;
3740}
3741
3742/**
3743 *  e1000_update_nvm_checksum_spt - Update the checksum for NVM
3744 *  @hw: pointer to the HW structure
3745 *
3746 *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3747 *  which writes the checksum to the shadow ram.  The changes in the shadow
3748 *  ram are then committed to the EEPROM by processing each bank at a time
3749 *  checking for the modified bit and writing only the pending changes.
3750 *  After a successful commit, the shadow ram is cleared and is ready for
3751 *  future writes.
3752 **/
3753static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
3754{
3755	struct e1000_nvm_info *nvm = &hw->nvm;
3756	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3757	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3758	s32 ret_val;
3759	u32 dword = 0;
3760
3761	ret_val = e1000e_update_nvm_checksum_generic(hw);
3762	if (ret_val)
3763		goto out;
3764
3765	if (nvm->type != e1000_nvm_flash_sw)
3766		goto out;
3767
3768	nvm->ops.acquire(hw);
3769
3770	/* We're writing to the opposite bank so if we're on bank 1,
3771	 * write to bank 0 etc.  We also need to erase the segment that
3772	 * is going to be written
3773	 */
3774	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3775	if (ret_val) {
3776		e_dbg("Could not detect valid bank, assuming bank 0\n");
3777		bank = 0;
3778	}
3779
3780	if (bank == 0) {
3781		new_bank_offset = nvm->flash_bank_size;
3782		old_bank_offset = 0;
3783		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3784		if (ret_val)
3785			goto release;
3786	} else {
3787		old_bank_offset = nvm->flash_bank_size;
3788		new_bank_offset = 0;
3789		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3790		if (ret_val)
3791			goto release;
3792	}
3793	for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i += 2) {
3794		/* Determine whether to write the value stored
3795		 * in the other NVM bank or a modified value stored
3796		 * in the shadow RAM
3797		 */
3798		ret_val = e1000_read_flash_dword_ich8lan(hw,
3799							 i + old_bank_offset,
3800							 &dword);
3801
3802		if (dev_spec->shadow_ram[i].modified) {
3803			dword &= 0xffff0000;
3804			dword |= (dev_spec->shadow_ram[i].value & 0xffff);
3805		}
3806		if (dev_spec->shadow_ram[i + 1].modified) {
3807			dword &= 0x0000ffff;
3808			dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
3809				  << 16);
3810		}
3811		if (ret_val)
3812			break;
3813
3814		/* If the word is 0x13, then make sure the signature bits
3815		 * (15:14) are 11b until the commit has completed.
3816		 * This will allow us to write 10b which indicates the
3817		 * signature is valid.  We want to do this after the write
3818		 * has completed so that we don't mark the segment valid
3819		 * while the write is still in progress
3820		 */
3821		if (i == E1000_ICH_NVM_SIG_WORD - 1)
3822			dword |= E1000_ICH_NVM_SIG_MASK << 16;
3823
3824		/* Convert offset to bytes. */
3825		act_offset = (i + new_bank_offset) << 1;
3826
3827		usleep_range(100, 200);
3828
3829		/* Write the data to the new bank. Offset in words */
3830		act_offset = i + new_bank_offset;
3831		ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
3832								dword);
3833		if (ret_val)
3834			break;
3835	}
3836
3837	/* Don't bother writing the segment valid bits if sector
3838	 * programming failed.
3839	 */
3840	if (ret_val) {
3841		/* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
3842		e_dbg("Flash commit failed.\n");
3843		goto release;
3844	}
3845
3846	/* Finally validate the new segment by setting bit 15:14
3847	 * to 10b in word 0x13 , this can be done without an
3848	 * erase as well since these bits are 11 to start with
3849	 * and we need to change bit 14 to 0b
3850	 */
3851	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3852
3853	/*offset in words but we read dword */
3854	--act_offset;
3855	ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
3856
3857	if (ret_val)
3858		goto release;
3859
3860	dword &= 0xBFFFFFFF;
3861	ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
3862
3863	if (ret_val)
3864		goto release;
3865
3866	/* And invalidate the previously valid segment by setting
3867	 * its signature word (0x13) high_byte to 0b. This can be
3868	 * done without an erase because flash erase sets all bits
3869	 * to 1's. We can write 1's to 0's without an erase
3870	 */
3871	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3872
3873	/* offset in words but we read dword */
3874	act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
3875	ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
3876
3877	if (ret_val)
3878		goto release;
3879
3880	dword &= 0x00FFFFFF;
3881	ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
3882
3883	if (ret_val)
3884		goto release;
3885
3886	/* Great!  Everything worked, we can now clear the cached entries. */
3887	for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
3888		dev_spec->shadow_ram[i].modified = false;
3889		dev_spec->shadow_ram[i].value = 0xFFFF;
3890	}
3891
3892release:
3893	nvm->ops.release(hw);
3894
3895	/* Reload the EEPROM, or else modifications will not appear
3896	 * until after the next adapter reset.
3897	 */
3898	if (!ret_val) {
3899		nvm->ops.reload(hw);
3900		usleep_range(10000, 11000);
3901	}
3902
3903out:
3904	if (ret_val)
3905		e_dbg("NVM update error: %d\n", ret_val);
3906
3907	return ret_val;
3908}
3909
3910/**
3911 *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3912 *  @hw: pointer to the HW structure
3913 *
3914 *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3915 *  which writes the checksum to the shadow ram.  The changes in the shadow
3916 *  ram are then committed to the EEPROM by processing each bank at a time
3917 *  checking for the modified bit and writing only the pending changes.
3918 *  After a successful commit, the shadow ram is cleared and is ready for
3919 *  future writes.
3920 **/
3921static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3922{
3923	struct e1000_nvm_info *nvm = &hw->nvm;
3924	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3925	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3926	s32 ret_val;
3927	u16 data = 0;
3928
3929	ret_val = e1000e_update_nvm_checksum_generic(hw);
3930	if (ret_val)
3931		goto out;
3932
3933	if (nvm->type != e1000_nvm_flash_sw)
3934		goto out;
3935
3936	nvm->ops.acquire(hw);
3937
3938	/* We're writing to the opposite bank so if we're on bank 1,
3939	 * write to bank 0 etc.  We also need to erase the segment that
3940	 * is going to be written
3941	 */
3942	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3943	if (ret_val) {
3944		e_dbg("Could not detect valid bank, assuming bank 0\n");
3945		bank = 0;
3946	}
3947
3948	if (bank == 0) {
3949		new_bank_offset = nvm->flash_bank_size;
3950		old_bank_offset = 0;
3951		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3952		if (ret_val)
3953			goto release;
3954	} else {
3955		old_bank_offset = nvm->flash_bank_size;
3956		new_bank_offset = 0;
3957		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3958		if (ret_val)
3959			goto release;
3960	}
3961	for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
3962		if (dev_spec->shadow_ram[i].modified) {
3963			data = dev_spec->shadow_ram[i].value;
3964		} else {
3965			ret_val = e1000_read_flash_word_ich8lan(hw, i +
3966								old_bank_offset,
3967								&data);
3968			if (ret_val)
3969				break;
3970		}
3971
3972		/* If the word is 0x13, then make sure the signature bits
3973		 * (15:14) are 11b until the commit has completed.
3974		 * This will allow us to write 10b which indicates the
3975		 * signature is valid.  We want to do this after the write
3976		 * has completed so that we don't mark the segment valid
3977		 * while the write is still in progress
3978		 */
3979		if (i == E1000_ICH_NVM_SIG_WORD)
3980			data |= E1000_ICH_NVM_SIG_MASK;
3981
3982		/* Convert offset to bytes. */
3983		act_offset = (i + new_bank_offset) << 1;
3984
3985		usleep_range(100, 200);
3986		/* Write the bytes to the new bank. */
3987		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3988							       act_offset,
3989							       (u8)data);
3990		if (ret_val)
3991			break;
3992
3993		usleep_range(100, 200);
3994		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3995							       act_offset + 1,
3996							       (u8)(data >> 8));
3997		if (ret_val)
3998			break;
3999	}
4000
4001	/* Don't bother writing the segment valid bits if sector
4002	 * programming failed.
4003	 */
4004	if (ret_val) {
4005		/* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
4006		e_dbg("Flash commit failed.\n");
4007		goto release;
4008	}
4009
4010	/* Finally validate the new segment by setting bit 15:14
4011	 * to 10b in word 0x13 , this can be done without an
4012	 * erase as well since these bits are 11 to start with
4013	 * and we need to change bit 14 to 0b
4014	 */
4015	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4016	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4017	if (ret_val)
4018		goto release;
4019
4020	data &= 0xBFFF;
4021	ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4022						       act_offset * 2 + 1,
4023						       (u8)(data >> 8));
4024	if (ret_val)
4025		goto release;
4026
4027	/* And invalidate the previously valid segment by setting
4028	 * its signature word (0x13) high_byte to 0b. This can be
4029	 * done without an erase because flash erase sets all bits
4030	 * to 1's. We can write 1's to 0's without an erase
4031	 */
4032	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4033	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4034	if (ret_val)
4035		goto release;
4036
4037	/* Great!  Everything worked, we can now clear the cached entries. */
4038	for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
4039		dev_spec->shadow_ram[i].modified = false;
4040		dev_spec->shadow_ram[i].value = 0xFFFF;
4041	}
4042
4043release:
4044	nvm->ops.release(hw);
4045
4046	/* Reload the EEPROM, or else modifications will not appear
4047	 * until after the next adapter reset.
4048	 */
4049	if (!ret_val) {
4050		nvm->ops.reload(hw);
4051		usleep_range(10000, 11000);
4052	}
4053
4054out:
4055	if (ret_val)
4056		e_dbg("NVM update error: %d\n", ret_val);
4057
4058	return ret_val;
4059}
4060
4061/**
4062 *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4063 *  @hw: pointer to the HW structure
4064 *
4065 *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4066 *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
4067 *  calculated, in which case we need to calculate the checksum and set bit 6.
4068 **/
4069static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4070{
4071	s32 ret_val;
4072	u16 data;
4073	u16 word;
4074	u16 valid_csum_mask;
4075
4076	/* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
4077	 * the checksum needs to be fixed.  This bit is an indication that
4078	 * the NVM was prepared by OEM software and did not calculate
4079	 * the checksum...a likely scenario.
4080	 */
4081	switch (hw->mac.type) {
4082	case e1000_pch_lpt:
4083	case e1000_pch_spt:
4084	case e1000_pch_cnp:
4085	case e1000_pch_tgp:
4086	case e1000_pch_adp:
4087		word = NVM_COMPAT;
4088		valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4089		break;
4090	default:
4091		word = NVM_FUTURE_INIT_WORD1;
4092		valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4093		break;
4094	}
4095
4096	ret_val = e1000_read_nvm(hw, word, 1, &data);
4097	if (ret_val)
4098		return ret_val;
4099
4100	if (!(data & valid_csum_mask)) {
4101		data |= valid_csum_mask;
4102		ret_val = e1000_write_nvm(hw, word, 1, &data);
4103		if (ret_val)
4104			return ret_val;
4105		ret_val = e1000e_update_nvm_checksum(hw);
4106		if (ret_val)
4107			return ret_val;
4108	}
4109
4110	return e1000e_validate_nvm_checksum_generic(hw);
4111}
4112
4113/**
4114 *  e1000e_write_protect_nvm_ich8lan - Make the NVM read-only
4115 *  @hw: pointer to the HW structure
4116 *
4117 *  To prevent malicious write/erase of the NVM, set it to be read-only
4118 *  so that the hardware ignores all write/erase cycles of the NVM via
4119 *  the flash control registers.  The shadow-ram copy of the NVM will
4120 *  still be updated, however any updates to this copy will not stick
4121 *  across driver reloads.
4122 **/
4123void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
4124{
4125	struct e1000_nvm_info *nvm = &hw->nvm;
4126	union ich8_flash_protected_range pr0;
4127	union ich8_hws_flash_status hsfsts;
4128	u32 gfpreg;
4129
4130	nvm->ops.acquire(hw);
4131
4132	gfpreg = er32flash(ICH_FLASH_GFPREG);
4133
4134	/* Write-protect GbE Sector of NVM */
4135	pr0.regval = er32flash(ICH_FLASH_PR0);
4136	pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK;
4137	pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK);
4138	pr0.range.wpe = true;
4139	ew32flash(ICH_FLASH_PR0, pr0.regval);
4140
4141	/* Lock down a subset of GbE Flash Control Registers, e.g.
4142	 * PR0 to prevent the write-protection from being lifted.
4143	 * Once FLOCKDN is set, the registers protected by it cannot
4144	 * be written until FLOCKDN is cleared by a hardware reset.
4145	 */
4146	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4147	hsfsts.hsf_status.flockdn = true;
4148	ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
4149
4150	nvm->ops.release(hw);
4151}
4152
4153/**
4154 *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4155 *  @hw: pointer to the HW structure
4156 *  @offset: The offset (in bytes) of the byte/word to read.
4157 *  @size: Size of data to read, 1=byte 2=word
4158 *  @data: The byte(s) to write to the NVM.
4159 *
4160 *  Writes one/two bytes to the NVM using the flash access registers.
4161 **/
4162static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4163					  u8 size, u16 data)
4164{
4165	union ich8_hws_flash_status hsfsts;
4166	union ich8_hws_flash_ctrl hsflctl;
4167	u32 flash_linear_addr;
4168	u32 flash_data = 0;
4169	s32 ret_val;
4170	u8 count = 0;
4171
4172	if (hw->mac.type >= e1000_pch_spt) {
4173		if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4174			return -E1000_ERR_NVM;
4175	} else {
4176		if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4177			return -E1000_ERR_NVM;
4178	}
4179
4180	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4181			     hw->nvm.flash_base_addr);
4182
4183	do {
4184		udelay(1);
4185		/* Steps */
4186		ret_val = e1000_flash_cycle_init_ich8lan(hw);
4187		if (ret_val)
4188			break;
4189		/* In SPT, This register is in Lan memory space, not
4190		 * flash.  Therefore, only 32 bit access is supported
4191		 */
4192		if (hw->mac.type >= e1000_pch_spt)
4193			hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
4194		else
4195			hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
4196
4197		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4198		hsflctl.hsf_ctrl.fldbcount = size - 1;
4199		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4200		/* In SPT, This register is in Lan memory space,
4201		 * not flash.  Therefore, only 32 bit access is
4202		 * supported
4203		 */
4204		if (hw->mac.type >= e1000_pch_spt)
4205			ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
4206		else
4207			ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
4208
4209		ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
4210
4211		if (size == 1)
4212			flash_data = (u32)data & 0x00FF;
4213		else
4214			flash_data = (u32)data;
4215
4216		ew32flash(ICH_FLASH_FDATA0, flash_data);
4217
4218		/* check if FCERR is set to 1 , if set to 1, clear it
4219		 * and try the whole sequence a few more times else done
4220		 */
4221		ret_val =
4222		    e1000_flash_cycle_ich8lan(hw,
4223					      ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4224		if (!ret_val)
4225			break;
4226
4227		/* If we're here, then things are most likely
4228		 * completely hosed, but if the error condition
4229		 * is detected, it won't hurt to give it another
4230		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4231		 */
4232		hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4233		if (hsfsts.hsf_status.flcerr)
4234			/* Repeat for some time before giving up. */
4235			continue;
4236		if (!hsfsts.hsf_status.flcdone) {
4237			e_dbg("Timeout error - flash cycle did not complete.\n");
4238			break;
4239		}
4240	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4241
4242	return ret_val;
4243}
4244
4245/**
4246*  e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4247*  @hw: pointer to the HW structure
4248*  @offset: The offset (in bytes) of the dwords to read.
4249*  @data: The 4 bytes to write to the NVM.
4250*
4251*  Writes one/two/four bytes to the NVM using the flash access registers.
4252**/
4253static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4254					    u32 data)
4255{
4256	union ich8_hws_flash_status hsfsts;
4257	union ich8_hws_flash_ctrl hsflctl;
4258	u32 flash_linear_addr;
4259	s32 ret_val;
4260	u8 count = 0;
4261
4262	if (hw->mac.type >= e1000_pch_spt) {
4263		if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4264			return -E1000_ERR_NVM;
4265	}
4266	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4267			     hw->nvm.flash_base_addr);
4268	do {
4269		udelay(1);
4270		/* Steps */
4271		ret_val = e1000_flash_cycle_init_ich8lan(hw);
4272		if (ret_val)
4273			break;
4274
4275		/* In SPT, This register is in Lan memory space, not
4276		 * flash.  Therefore, only 32 bit access is supported
4277		 */
4278		if (hw->mac.type >= e1000_pch_spt)
4279			hsflctl.regval = er32flash(ICH_FLASH_HSFSTS)
4280			    >> 16;
4281		else
4282			hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
4283
4284		hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4285		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4286
4287		/* In SPT, This register is in Lan memory space,
4288		 * not flash.  Therefore, only 32 bit access is
4289		 * supported
4290		 */
4291		if (hw->mac.type >= e1000_pch_spt)
4292			ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
4293		else
4294			ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
4295
4296		ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
4297
4298		ew32flash(ICH_FLASH_FDATA0, data);
4299
4300		/* check if FCERR is set to 1 , if set to 1, clear it
4301		 * and try the whole sequence a few more times else done
4302		 */
4303		ret_val =
4304		   e1000_flash_cycle_ich8lan(hw,
4305					     ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4306
4307		if (!ret_val)
4308			break;
4309
4310		/* If we're here, then things are most likely
4311		 * completely hosed, but if the error condition
4312		 * is detected, it won't hurt to give it another
4313		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4314		 */
4315		hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4316
4317		if (hsfsts.hsf_status.flcerr)
4318			/* Repeat for some time before giving up. */
4319			continue;
4320		if (!hsfsts.hsf_status.flcdone) {
4321			e_dbg("Timeout error - flash cycle did not complete.\n");
4322			break;
4323		}
4324	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4325
4326	return ret_val;
4327}
4328
4329/**
4330 *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4331 *  @hw: pointer to the HW structure
4332 *  @offset: The index of the byte to read.
4333 *  @data: The byte to write to the NVM.
4334 *
4335 *  Writes a single byte to the NVM using the flash access registers.
4336 **/
4337static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4338					  u8 data)
4339{
4340	u16 word = (u16)data;
4341
4342	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4343}
4344
4345/**
4346*  e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
4347*  @hw: pointer to the HW structure
4348*  @offset: The offset of the word to write.
4349*  @dword: The dword to write to the NVM.
4350*
4351*  Writes a single dword to the NVM using the flash access registers.
4352*  Goes through a retry algorithm before giving up.
4353**/
4354static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4355						 u32 offset, u32 dword)
4356{
4357	s32 ret_val;
4358	u16 program_retries;
4359
4360	/* Must convert word offset into bytes. */
4361	offset <<= 1;
4362	ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4363
4364	if (!ret_val)
4365		return ret_val;
4366	for (program_retries = 0; program_retries < 100; program_retries++) {
4367		e_dbg("Retrying Byte %8.8X at offset %u\n", dword, offset);
4368		usleep_range(100, 200);
4369		ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4370		if (!ret_val)
4371			break;
4372	}
4373	if (program_retries == 100)
4374		return -E1000_ERR_NVM;
4375
4376	return 0;
4377}
4378
4379/**
4380 *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4381 *  @hw: pointer to the HW structure
4382 *  @offset: The offset of the byte to write.
4383 *  @byte: The byte to write to the NVM.
4384 *
4385 *  Writes a single byte to the NVM using the flash access registers.
4386 *  Goes through a retry algorithm before giving up.
4387 **/
4388static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4389						u32 offset, u8 byte)
4390{
4391	s32 ret_val;
4392	u16 program_retries;
4393
4394	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4395	if (!ret_val)
4396		return ret_val;
4397
4398	for (program_retries = 0; program_retries < 100; program_retries++) {
4399		e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
4400		usleep_range(100, 200);
4401		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4402		if (!ret_val)
4403			break;
4404	}
4405	if (program_retries == 100)
4406		return -E1000_ERR_NVM;
4407
4408	return 0;
4409}
4410
4411/**
4412 *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4413 *  @hw: pointer to the HW structure
4414 *  @bank: 0 for first bank, 1 for second bank, etc.
4415 *
4416 *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4417 *  bank N is 4096 * N + flash_reg_addr.
4418 **/
4419static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4420{
4421	struct e1000_nvm_info *nvm = &hw->nvm;
4422	union ich8_hws_flash_status hsfsts;
4423	union ich8_hws_flash_ctrl hsflctl;
4424	u32 flash_linear_addr;
4425	/* bank size is in 16bit words - adjust to bytes */
4426	u32 flash_bank_size = nvm->flash_bank_size * 2;
4427	s32 ret_val;
4428	s32 count = 0;
4429	s32 j, iteration, sector_size;
4430
4431	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4432
4433	/* Determine HW Sector size: Read BERASE bits of hw flash status
4434	 * register
4435	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
4436	 *     consecutive sectors.  The start index for the nth Hw sector
4437	 *     can be calculated as = bank * 4096 + n * 256
4438	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4439	 *     The start index for the nth Hw sector can be calculated
4440	 *     as = bank * 4096
4441	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4442	 *     (ich9 only, otherwise error condition)
4443	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4444	 */
4445	switch (hsfsts.hsf_status.berasesz) {
4446	case 0:
4447		/* Hw sector size 256 */
4448		sector_size = ICH_FLASH_SEG_SIZE_256;
4449		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4450		break;
4451	case 1:
4452		sector_size = ICH_FLASH_SEG_SIZE_4K;
4453		iteration = 1;
4454		break;
4455	case 2:
4456		sector_size = ICH_FLASH_SEG_SIZE_8K;
4457		iteration = 1;
4458		break;
4459	case 3:
4460		sector_size = ICH_FLASH_SEG_SIZE_64K;
4461		iteration = 1;
4462		break;
4463	default:
4464		return -E1000_ERR_NVM;
4465	}
4466
4467	/* Start with the base address, then add the sector offset. */
4468	flash_linear_addr = hw->nvm.flash_base_addr;
4469	flash_linear_addr += (bank) ? flash_bank_size : 0;
4470
4471	for (j = 0; j < iteration; j++) {
4472		do {
4473			u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4474
4475			/* Steps */
4476			ret_val = e1000_flash_cycle_init_ich8lan(hw);
4477			if (ret_val)
4478				return ret_val;
4479
4480			/* Write a value 11 (block Erase) in Flash
4481			 * Cycle field in hw flash control
4482			 */
4483			if (hw->mac.type >= e1000_pch_spt)
4484				hsflctl.regval =
4485				    er32flash(ICH_FLASH_HSFSTS) >> 16;
4486			else
4487				hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
4488
4489			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4490			if (hw->mac.type >= e1000_pch_spt)
4491				ew32flash(ICH_FLASH_HSFSTS,
4492					  hsflctl.regval << 16);
4493			else
4494				ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
4495
4496			/* Write the last 24 bits of an index within the
4497			 * block into Flash Linear address field in Flash
4498			 * Address.
4499			 */
4500			flash_linear_addr += (j * sector_size);
4501			ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
4502
4503			ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4504			if (!ret_val)
4505				break;
4506
4507			/* Check if FCERR is set to 1.  If 1,
4508			 * clear it and try the whole sequence
4509			 * a few more times else Done
4510			 */
4511			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4512			if (hsfsts.hsf_status.flcerr)
4513				/* repeat for some time before giving up */
4514				continue;
4515			else if (!hsfsts.hsf_status.flcdone)
4516				return ret_val;
4517		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4518	}
4519
4520	return 0;
4521}
4522
4523/**
4524 *  e1000_valid_led_default_ich8lan - Set the default LED settings
4525 *  @hw: pointer to the HW structure
4526 *  @data: Pointer to the LED settings
4527 *
4528 *  Reads the LED default settings from the NVM to data.  If the NVM LED
4529 *  settings is all 0's or F's, set the LED default to a valid LED default
4530 *  setting.
4531 **/
4532static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4533{
4534	s32 ret_val;
4535
4536	ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
4537	if (ret_val) {
4538		e_dbg("NVM Read Error\n");
4539		return ret_val;
4540	}
4541
4542	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4543		*data = ID_LED_DEFAULT_ICH8LAN;
4544
4545	return 0;
4546}
4547
4548/**
4549 *  e1000_id_led_init_pchlan - store LED configurations
4550 *  @hw: pointer to the HW structure
4551 *
4552 *  PCH does not control LEDs via the LEDCTL register, rather it uses
4553 *  the PHY LED configuration register.
4554 *
4555 *  PCH also does not have an "always on" or "always off" mode which
4556 *  complicates the ID feature.  Instead of using the "on" mode to indicate
4557 *  in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init_generic()),
4558 *  use "link_up" mode.  The LEDs will still ID on request if there is no
4559 *  link based on logic in e1000_led_[on|off]_pchlan().
4560 **/
4561static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4562{
4563	struct e1000_mac_info *mac = &hw->mac;
4564	s32 ret_val;
4565	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4566	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4567	u16 data, i, temp, shift;
4568
4569	/* Get default ID LED modes */
4570	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4571	if (ret_val)
4572		return ret_val;
4573
4574	mac->ledctl_default = er32(LEDCTL);
4575	mac->ledctl_mode1 = mac->ledctl_default;
4576	mac->ledctl_mode2 = mac->ledctl_default;
4577
4578	for (i = 0; i < 4; i++) {
4579		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4580		shift = (i * 5);
4581		switch (temp) {
4582		case ID_LED_ON1_DEF2:
4583		case ID_LED_ON1_ON2:
4584		case ID_LED_ON1_OFF2:
4585			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4586			mac->ledctl_mode1 |= (ledctl_on << shift);
4587			break;
4588		case ID_LED_OFF1_DEF2:
4589		case ID_LED_OFF1_ON2:
4590		case ID_LED_OFF1_OFF2:
4591			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4592			mac->ledctl_mode1 |= (ledctl_off << shift);
4593			break;
4594		default:
4595			/* Do nothing */
4596			break;
4597		}
4598		switch (temp) {
4599		case ID_LED_DEF1_ON2:
4600		case ID_LED_ON1_ON2:
4601		case ID_LED_OFF1_ON2:
4602			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4603			mac->ledctl_mode2 |= (ledctl_on << shift);
4604			break;
4605		case ID_LED_DEF1_OFF2:
4606		case ID_LED_ON1_OFF2:
4607		case ID_LED_OFF1_OFF2:
4608			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4609			mac->ledctl_mode2 |= (ledctl_off << shift);
4610			break;
4611		default:
4612			/* Do nothing */
4613			break;
4614		}
4615	}
4616
4617	return 0;
4618}
4619
4620/**
4621 *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4622 *  @hw: pointer to the HW structure
4623 *
4624 *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4625 *  register, so the the bus width is hard coded.
4626 **/
4627static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4628{
4629	struct e1000_bus_info *bus = &hw->bus;
4630	s32 ret_val;
4631
4632	ret_val = e1000e_get_bus_info_pcie(hw);
4633
4634	/* ICH devices are "PCI Express"-ish.  They have
4635	 * a configuration space, but do not contain
4636	 * PCI Express Capability registers, so bus width
4637	 * must be hardcoded.
4638	 */
4639	if (bus->width == e1000_bus_width_unknown)
4640		bus->width = e1000_bus_width_pcie_x1;
4641
4642	return ret_val;
4643}
4644
4645/**
4646 *  e1000_reset_hw_ich8lan - Reset the hardware
4647 *  @hw: pointer to the HW structure
4648 *
4649 *  Does a full reset of the hardware which includes a reset of the PHY and
4650 *  MAC.
4651 **/
4652static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4653{
4654	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4655	u16 kum_cfg;
4656	u32 ctrl, reg;
4657	s32 ret_val;
4658
4659	/* Prevent the PCI-E bus from sticking if there is no TLP connection
4660	 * on the last TLP read/write transaction when MAC is reset.
4661	 */
4662	ret_val = e1000e_disable_pcie_master(hw);
4663	if (ret_val)
4664		e_dbg("PCI-E Master disable polling has failed.\n");
4665
4666	e_dbg("Masking off all interrupts\n");
4667	ew32(IMC, 0xffffffff);
4668
4669	/* Disable the Transmit and Receive units.  Then delay to allow
4670	 * any pending transactions to complete before we hit the MAC
4671	 * with the global reset.
4672	 */
4673	ew32(RCTL, 0);
4674	ew32(TCTL, E1000_TCTL_PSP);
4675	e1e_flush();
4676
4677	usleep_range(10000, 11000);
4678
4679	/* Workaround for ICH8 bit corruption issue in FIFO memory */
4680	if (hw->mac.type == e1000_ich8lan) {
4681		/* Set Tx and Rx buffer allocation to 8k apiece. */
4682		ew32(PBA, E1000_PBA_8K);
4683		/* Set Packet Buffer Size to 16k. */
4684		ew32(PBS, E1000_PBS_16K);
4685	}
4686
4687	if (hw->mac.type == e1000_pchlan) {
4688		/* Save the NVM K1 bit setting */
4689		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4690		if (ret_val)
4691			return ret_val;
4692
4693		if (kum_cfg & E1000_NVM_K1_ENABLE)
4694			dev_spec->nvm_k1_enabled = true;
4695		else
4696			dev_spec->nvm_k1_enabled = false;
4697	}
4698
4699	ctrl = er32(CTRL);
4700
4701	if (!hw->phy.ops.check_reset_block(hw)) {
4702		/* Full-chip reset requires MAC and PHY reset at the same
4703		 * time to make sure the interface between MAC and the
4704		 * external PHY is reset.
4705		 */
4706		ctrl |= E1000_CTRL_PHY_RST;
4707
4708		/* Gate automatic PHY configuration by hardware on
4709		 * non-managed 82579
4710		 */
4711		if ((hw->mac.type == e1000_pch2lan) &&
4712		    !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
4713			e1000_gate_hw_phy_config_ich8lan(hw, true);
4714	}
4715	ret_val = e1000_acquire_swflag_ich8lan(hw);
4716	e_dbg("Issuing a global reset to ich8lan\n");
4717	ew32(CTRL, (ctrl | E1000_CTRL_RST));
4718	/* cannot issue a flush here because it hangs the hardware */
4719	msleep(20);
4720
4721	/* Set Phy Config Counter to 50msec */
4722	if (hw->mac.type == e1000_pch2lan) {
4723		reg = er32(FEXTNVM3);
4724		reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4725		reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4726		ew32(FEXTNVM3, reg);
4727	}
4728
4729	if (!ret_val)
4730		clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
4731
4732	if (ctrl & E1000_CTRL_PHY_RST) {
4733		ret_val = hw->phy.ops.get_cfg_done(hw);
4734		if (ret_val)
4735			return ret_val;
4736
4737		ret_val = e1000_post_phy_reset_ich8lan(hw);
4738		if (ret_val)
4739			return ret_val;
4740	}
4741
4742	/* For PCH, this write will make sure that any noise
4743	 * will be detected as a CRC error and be dropped rather than show up
4744	 * as a bad packet to the DMA engine.
4745	 */
4746	if (hw->mac.type == e1000_pchlan)
4747		ew32(CRC_OFFSET, 0x65656565);
4748
4749	ew32(IMC, 0xffffffff);
4750	er32(ICR);
4751
4752	reg = er32(KABGTXD);
4753	reg |= E1000_KABGTXD_BGSQLBIAS;
4754	ew32(KABGTXD, reg);
4755
4756	return 0;
4757}
4758
4759/**
4760 *  e1000_init_hw_ich8lan - Initialize the hardware
4761 *  @hw: pointer to the HW structure
4762 *
4763 *  Prepares the hardware for transmit and receive by doing the following:
4764 *   - initialize hardware bits
4765 *   - initialize LED identification
4766 *   - setup receive address registers
4767 *   - setup flow control
4768 *   - setup transmit descriptors
4769 *   - clear statistics
4770 **/
4771static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4772{
4773	struct e1000_mac_info *mac = &hw->mac;
4774	u32 ctrl_ext, txdctl, snoop;
4775	s32 ret_val;
4776	u16 i;
4777
4778	e1000_initialize_hw_bits_ich8lan(hw);
4779
4780	/* Initialize identification LED */
4781	ret_val = mac->ops.id_led_init(hw);
4782	/* An error is not fatal and we should not stop init due to this */
4783	if (ret_val)
4784		e_dbg("Error initializing identification LED\n");
4785
4786	/* Setup the receive address. */
4787	e1000e_init_rx_addrs(hw, mac->rar_entry_count);
4788
4789	/* Zero out the Multicast HASH table */
4790	e_dbg("Zeroing the MTA\n");
4791	for (i = 0; i < mac->mta_reg_count; i++)
4792		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4793
4794	/* The 82578 Rx buffer will stall if wakeup is enabled in host and
4795	 * the ME.  Disable wakeup by clearing the host wakeup bit.
4796	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
4797	 */
4798	if (hw->phy.type == e1000_phy_82578) {
4799		e1e_rphy(hw, BM_PORT_GEN_CFG, &i);
4800		i &= ~BM_WUC_HOST_WU_BIT;
4801		e1e_wphy(hw, BM_PORT_GEN_CFG, i);
4802		ret_val = e1000_phy_hw_reset_ich8lan(hw);
4803		if (ret_val)
4804			return ret_val;
4805	}
4806
4807	/* Setup link and flow control */
4808	ret_val = mac->ops.setup_link(hw);
4809
4810	/* Set the transmit descriptor write-back policy for both queues */
4811	txdctl = er32(TXDCTL(0));
4812	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4813		  E1000_TXDCTL_FULL_TX_DESC_WB);
4814	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4815		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4816	ew32(TXDCTL(0), txdctl);
4817	txdctl = er32(TXDCTL(1));
4818	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4819		  E1000_TXDCTL_FULL_TX_DESC_WB);
4820	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4821		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4822	ew32(TXDCTL(1), txdctl);
4823
4824	/* ICH8 has opposite polarity of no_snoop bits.
4825	 * By default, we should use snoop behavior.
4826	 */
4827	if (mac->type == e1000_ich8lan)
4828		snoop = PCIE_ICH8_SNOOP_ALL;
4829	else
4830		snoop = (u32)~(PCIE_NO_SNOOP_ALL);
4831	e1000e_set_pcie_no_snoop(hw, snoop);
4832
4833	ctrl_ext = er32(CTRL_EXT);
4834	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4835	ew32(CTRL_EXT, ctrl_ext);
4836
4837	/* Clear all of the statistics registers (clear on read).  It is
4838	 * important that we do this after we have tried to establish link
4839	 * because the symbol error count will increment wildly if there
4840	 * is no link.
4841	 */
4842	e1000_clear_hw_cntrs_ich8lan(hw);
4843
4844	return ret_val;
4845}
4846
4847/**
4848 *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4849 *  @hw: pointer to the HW structure
4850 *
4851 *  Sets/Clears required hardware bits necessary for correctly setting up the
4852 *  hardware for transmit and receive.
4853 **/
4854static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4855{
4856	u32 reg;
4857
4858	/* Extended Device Control */
4859	reg = er32(CTRL_EXT);
4860	reg |= BIT(22);
4861	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
4862	if (hw->mac.type >= e1000_pchlan)
4863		reg |= E1000_CTRL_EXT_PHYPDEN;
4864	ew32(CTRL_EXT, reg);
4865
4866	/* Transmit Descriptor Control 0 */
4867	reg = er32(TXDCTL(0));
4868	reg |= BIT(22);
4869	ew32(TXDCTL(0), reg);
4870
4871	/* Transmit Descriptor Control 1 */
4872	reg = er32(TXDCTL(1));
4873	reg |= BIT(22);
4874	ew32(TXDCTL(1), reg);
4875
4876	/* Transmit Arbitration Control 0 */
4877	reg = er32(TARC(0));
4878	if (hw->mac.type == e1000_ich8lan)
4879		reg |= BIT(28) | BIT(29);
4880	reg |= BIT(23) | BIT(24) | BIT(26) | BIT(27);
4881	ew32(TARC(0), reg);
4882
4883	/* Transmit Arbitration Control 1 */
4884	reg = er32(TARC(1));
4885	if (er32(TCTL) & E1000_TCTL_MULR)
4886		reg &= ~BIT(28);
4887	else
4888		reg |= BIT(28);
4889	reg |= BIT(24) | BIT(26) | BIT(30);
4890	ew32(TARC(1), reg);
4891
4892	/* Device Status */
4893	if (hw->mac.type == e1000_ich8lan) {
4894		reg = er32(STATUS);
4895		reg &= ~BIT(31);
4896		ew32(STATUS, reg);
4897	}
4898
4899	/* work-around descriptor data corruption issue during nfs v2 udp
4900	 * traffic, just disable the nfs filtering capability
4901	 */
4902	reg = er32(RFCTL);
4903	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4904
4905	/* Disable IPv6 extension header parsing because some malformed
4906	 * IPv6 headers can hang the Rx.
4907	 */
4908	if (hw->mac.type == e1000_ich8lan)
4909		reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4910	ew32(RFCTL, reg);
4911
4912	/* Enable ECC on Lynxpoint */
4913	if (hw->mac.type >= e1000_pch_lpt) {
4914		reg = er32(PBECCSTS);
4915		reg |= E1000_PBECCSTS_ECC_ENABLE;
4916		ew32(PBECCSTS, reg);
4917
4918		reg = er32(CTRL);
4919		reg |= E1000_CTRL_MEHE;
4920		ew32(CTRL, reg);
4921	}
4922}
4923
4924/**
4925 *  e1000_setup_link_ich8lan - Setup flow control and link settings
4926 *  @hw: pointer to the HW structure
4927 *
4928 *  Determines which flow control settings to use, then configures flow
4929 *  control.  Calls the appropriate media-specific link configuration
4930 *  function.  Assuming the adapter has a valid link partner, a valid link
4931 *  should be established.  Assumes the hardware has previously been reset
4932 *  and the transmitter and receiver are not enabled.
4933 **/
4934static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4935{
4936	s32 ret_val;
4937
4938	if (hw->phy.ops.check_reset_block(hw))
4939		return 0;
4940
4941	/* ICH parts do not have a word in the NVM to determine
4942	 * the default flow control setting, so we explicitly
4943	 * set it to full.
4944	 */
4945	if (hw->fc.requested_mode == e1000_fc_default) {
4946		/* Workaround h/w hang when Tx flow control enabled */
4947		if (hw->mac.type == e1000_pchlan)
4948			hw->fc.requested_mode = e1000_fc_rx_pause;
4949		else
4950			hw->fc.requested_mode = e1000_fc_full;
4951	}
4952
4953	/* Save off the requested flow control mode for use later.  Depending
4954	 * on the link partner's capabilities, we may or may not use this mode.
4955	 */
4956	hw->fc.current_mode = hw->fc.requested_mode;
4957
4958	e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
4959
4960	/* Continue to configure the copper link. */
4961	ret_val = hw->mac.ops.setup_physical_interface(hw);
4962	if (ret_val)
4963		return ret_val;
4964
4965	ew32(FCTTV, hw->fc.pause_time);
4966	if ((hw->phy.type == e1000_phy_82578) ||
4967	    (hw->phy.type == e1000_phy_82579) ||
4968	    (hw->phy.type == e1000_phy_i217) ||
4969	    (hw->phy.type == e1000_phy_82577)) {
4970		ew32(FCRTV_PCH, hw->fc.refresh_time);
4971
4972		ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
4973				   hw->fc.pause_time);
4974		if (ret_val)
4975			return ret_val;
4976	}
4977
4978	return e1000e_set_fc_watermarks(hw);
4979}
4980
4981/**
4982 *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4983 *  @hw: pointer to the HW structure
4984 *
4985 *  Configures the kumeran interface to the PHY to wait the appropriate time
4986 *  when polling the PHY, then call the generic setup_copper_link to finish
4987 *  configuring the copper link.
4988 **/
4989static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4990{
4991	u32 ctrl;
4992	s32 ret_val;
4993	u16 reg_data;
4994
4995	ctrl = er32(CTRL);
4996	ctrl |= E1000_CTRL_SLU;
4997	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4998	ew32(CTRL, ctrl);
4999
5000	/* Set the mac to wait the maximum time between each iteration
5001	 * and increase the max iterations when polling the phy;
5002	 * this fixes erroneous timeouts at 10Mbps.
5003	 */
5004	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF);
5005	if (ret_val)
5006		return ret_val;
5007	ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
5008				       &reg_data);
5009	if (ret_val)
5010		return ret_val;
5011	reg_data |= 0x3F;
5012	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
5013					reg_data);
5014	if (ret_val)
5015		return ret_val;
5016
5017	switch (hw->phy.type) {
5018	case e1000_phy_igp_3:
5019		ret_val = e1000e_copper_link_setup_igp(hw);
5020		if (ret_val)
5021			return ret_val;
5022		break;
5023	case e1000_phy_bm:
5024	case e1000_phy_82578:
5025		ret_val = e1000e_copper_link_setup_m88(hw);
5026		if (ret_val)
5027			return ret_val;
5028		break;
5029	case e1000_phy_82577:
5030	case e1000_phy_82579:
5031		ret_val = e1000_copper_link_setup_82577(hw);
5032		if (ret_val)
5033			return ret_val;
5034		break;
5035	case e1000_phy_ife:
5036		ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
5037		if (ret_val)
5038			return ret_val;
5039
5040		reg_data &= ~IFE_PMC_AUTO_MDIX;
5041
5042		switch (hw->phy.mdix) {
5043		case 1:
5044			reg_data &= ~IFE_PMC_FORCE_MDIX;
5045			break;
5046		case 2:
5047			reg_data |= IFE_PMC_FORCE_MDIX;
5048			break;
5049		case 0:
5050		default:
5051			reg_data |= IFE_PMC_AUTO_MDIX;
5052			break;
5053		}
5054		ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
5055		if (ret_val)
5056			return ret_val;
5057		break;
5058	default:
5059		break;
5060	}
5061
5062	return e1000e_setup_copper_link(hw);
5063}
5064
5065/**
5066 *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5067 *  @hw: pointer to the HW structure
5068 *
5069 *  Calls the PHY specific link setup function and then calls the
5070 *  generic setup_copper_link to finish configuring the link for
5071 *  Lynxpoint PCH devices
5072 **/
5073static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5074{
5075	u32 ctrl;
5076	s32 ret_val;
5077
5078	ctrl = er32(CTRL);
5079	ctrl |= E1000_CTRL_SLU;
5080	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5081	ew32(CTRL, ctrl);
5082
5083	ret_val = e1000_copper_link_setup_82577(hw);
5084	if (ret_val)
5085		return ret_val;
5086
5087	return e1000e_setup_copper_link(hw);
5088}
5089
5090/**
5091 *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5092 *  @hw: pointer to the HW structure
5093 *  @speed: pointer to store current link speed
5094 *  @duplex: pointer to store the current link duplex
5095 *
5096 *  Calls the generic get_speed_and_duplex to retrieve the current link
5097 *  information and then calls the Kumeran lock loss workaround for links at
5098 *  gigabit speeds.
5099 **/
5100static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5101					  u16 *duplex)
5102{
5103	s32 ret_val;
5104
5105	ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
5106	if (ret_val)
5107		return ret_val;
5108
5109	if ((hw->mac.type == e1000_ich8lan) &&
5110	    (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
5111		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5112	}
5113
5114	return ret_val;
5115}
5116
5117/**
5118 *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5119 *  @hw: pointer to the HW structure
5120 *
5121 *  Work-around for 82566 Kumeran PCS lock loss:
5122 *  On link status change (i.e. PCI reset, speed change) and link is up and
5123 *  speed is gigabit-
5124 *    0) if workaround is optionally disabled do nothing
5125 *    1) wait 1ms for Kumeran link to come up
5126 *    2) check Kumeran Diagnostic register PCS lock loss bit
5127 *    3) if not set the link is locked (all is good), otherwise...
5128 *    4) reset the PHY
5129 *    5) repeat up to 10 times
5130 *  Note: this is only called for IGP3 copper when speed is 1gb.
5131 **/
5132static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5133{
5134	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5135	u32 phy_ctrl;
5136	s32 ret_val;
5137	u16 i, data;
5138	bool link;
5139
5140	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5141		return 0;
5142
5143	/* Make sure link is up before proceeding.  If not just return.
5144	 * Attempting this while link is negotiating fouled up link
5145	 * stability
5146	 */
5147	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
5148	if (!link)
5149		return 0;
5150
5151	for (i = 0; i < 10; i++) {
5152		/* read once to clear */
5153		ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
5154		if (ret_val)
5155			return ret_val;
5156		/* and again to get new status */
5157		ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
5158		if (ret_val)
5159			return ret_val;
5160
5161		/* check for PCS lock */
5162		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5163			return 0;
5164
5165		/* Issue PHY reset */
5166		e1000_phy_hw_reset(hw);
5167		mdelay(5);
5168	}
5169	/* Disable GigE link negotiation */
5170	phy_ctrl = er32(PHY_CTRL);
5171	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5172		     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5173	ew32(PHY_CTRL, phy_ctrl);
5174
5175	/* Call gig speed drop workaround on Gig disable before accessing
5176	 * any PHY registers
5177	 */
5178	e1000e_gig_downshift_workaround_ich8lan(hw);
5179
5180	/* unable to acquire PCS lock */
5181	return -E1000_ERR_PHY;
5182}
5183
5184/**
5185 *  e1000e_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5186 *  @hw: pointer to the HW structure
5187 *  @state: boolean value used to set the current Kumeran workaround state
5188 *
5189 *  If ICH8, set the current Kumeran workaround state (enabled - true
5190 *  /disabled - false).
5191 **/
5192void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5193						  bool state)
5194{
5195	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5196
5197	if (hw->mac.type != e1000_ich8lan) {
5198		e_dbg("Workaround applies to ICH8 only.\n");
5199		return;
5200	}
5201
5202	dev_spec->kmrn_lock_loss_workaround_enabled = state;
5203}
5204
5205/**
5206 *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5207 *  @hw: pointer to the HW structure
5208 *
5209 *  Workaround for 82566 power-down on D3 entry:
5210 *    1) disable gigabit link
5211 *    2) write VR power-down enable
5212 *    3) read it back
5213 *  Continue if successful, else issue LCD reset and repeat
5214 **/
5215void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5216{
5217	u32 reg;
5218	u16 data;
5219	u8 retry = 0;
5220
5221	if (hw->phy.type != e1000_phy_igp_3)
5222		return;
5223
5224	/* Try the workaround twice (if needed) */
5225	do {
5226		/* Disable link */
5227		reg = er32(PHY_CTRL);
5228		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5229			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5230		ew32(PHY_CTRL, reg);
5231
5232		/* Call gig speed drop workaround on Gig disable before
5233		 * accessing any PHY registers
5234		 */
5235		if (hw->mac.type == e1000_ich8lan)
5236			e1000e_gig_downshift_workaround_ich8lan(hw);
5237
5238		/* Write VR power-down enable */
5239		e1e_rphy(hw, IGP3_VR_CTRL, &data);
5240		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5241		e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5242
5243		/* Read it back and test */
5244		e1e_rphy(hw, IGP3_VR_CTRL, &data);
5245		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5246		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5247			break;
5248
5249		/* Issue PHY reset and repeat at most one more time */
5250		reg = er32(CTRL);
5251		ew32(CTRL, reg | E1000_CTRL_PHY_RST);
5252		retry++;
5253	} while (retry);
5254}
5255
5256/**
5257 *  e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5258 *  @hw: pointer to the HW structure
5259 *
5260 *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5261 *  LPLU, Gig disable, MDIC PHY reset):
5262 *    1) Set Kumeran Near-end loopback
5263 *    2) Clear Kumeran Near-end loopback
5264 *  Should only be called for ICH8[m] devices with any 1G Phy.
5265 **/
5266void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5267{
5268	s32 ret_val;
5269	u16 reg_data;
5270
5271	if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife))
5272		return;
5273
5274	ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5275				       &reg_data);
5276	if (ret_val)
5277		return;
5278	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5279	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5280					reg_data);
5281	if (ret_val)
5282		return;
5283	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5284	e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data);
5285}
5286
5287/**
5288 *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5289 *  @hw: pointer to the HW structure
5290 *
5291 *  During S0 to Sx transition, it is possible the link remains at gig
5292 *  instead of negotiating to a lower speed.  Before going to Sx, set
5293 *  'Gig Disable' to force link speed negotiation to a lower speed based on
5294 *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
5295 *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5296 *  needs to be written.
5297 *  Parts that support (and are linked to a partner which support) EEE in
5298 *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5299 *  than 10Mbps w/o EEE.
5300 **/
5301void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5302{
5303	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5304	u32 phy_ctrl;
5305	s32 ret_val;
5306
5307	phy_ctrl = er32(PHY_CTRL);
5308	phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5309
5310	if (hw->phy.type == e1000_phy_i217) {
5311		u16 phy_reg, device_id = hw->adapter->pdev->device;
5312
5313		if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5314		    (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5315		    (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5316		    (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5317		    (hw->mac.type >= e1000_pch_spt)) {
5318			u32 fextnvm6 = er32(FEXTNVM6);
5319
5320			ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5321		}
5322
5323		ret_val = hw->phy.ops.acquire(hw);
5324		if (ret_val)
5325			goto out;
5326
5327		if (!dev_spec->eee_disable) {
5328			u16 eee_advert;
5329
5330			ret_val =
5331			    e1000_read_emi_reg_locked(hw,
5332						      I217_EEE_ADVERTISEMENT,
5333						      &eee_advert);
5334			if (ret_val)
5335				goto release;
5336
5337			/* Disable LPLU if both link partners support 100BaseT
5338			 * EEE and 100Full is advertised on both ends of the
5339			 * link, and enable Auto Enable LPI since there will
5340			 * be no driver to enable LPI while in Sx.
5341			 */
5342			if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5343			    (dev_spec->eee_lp_ability &
5344			     I82579_EEE_100_SUPPORTED) &&
5345			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5346				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5347					      E1000_PHY_CTRL_NOND0A_LPLU);
5348
5349				/* Set Auto Enable LPI after link up */
5350				e1e_rphy_locked(hw,
5351						I217_LPI_GPIO_CTRL, &phy_reg);
5352				phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5353				e1e_wphy_locked(hw,
5354						I217_LPI_GPIO_CTRL, phy_reg);
5355			}
5356		}
5357
5358		/* For i217 Intel Rapid Start Technology support,
5359		 * when the system is going into Sx and no manageability engine
5360		 * is present, the driver must configure proxy to reset only on
5361		 * power good.  LPI (Low Power Idle) state must also reset only
5362		 * on power good, as well as the MTA (Multicast table array).
5363		 * The SMBus release must also be disabled on LCD reset.
5364		 */
5365		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
5366			/* Enable proxy to reset only on power good. */
5367			e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
5368			phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5369			e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg);
5370
5371			/* Set bit enable LPI (EEE) to reset only on
5372			 * power good.
5373			 */
5374			e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
5375			phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5376			e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
5377
5378			/* Disable the SMB release on LCD reset. */
5379			e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
5380			phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5381			e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
5382		}
5383
5384		/* Enable MTA to reset for Intel Rapid Start Technology
5385		 * Support
5386		 */
5387		e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
5388		phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5389		e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
5390
5391release:
5392		hw->phy.ops.release(hw);
5393	}
5394out:
5395	ew32(PHY_CTRL, phy_ctrl);
5396
5397	if (hw->mac.type == e1000_ich8lan)
5398		e1000e_gig_downshift_workaround_ich8lan(hw);
5399
5400	if (hw->mac.type >= e1000_pchlan) {
5401		e1000_oem_bits_config_ich8lan(hw, false);
5402
5403		/* Reset PHY to activate OEM bits on 82577/8 */
5404		if (hw->mac.type == e1000_pchlan)
5405			e1000e_phy_hw_reset_generic(hw);
5406
5407		ret_val = hw->phy.ops.acquire(hw);
5408		if (ret_val)
5409			return;
5410		e1000_write_smbus_addr(hw);
5411		hw->phy.ops.release(hw);
5412	}
5413}
5414
5415/**
5416 *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5417 *  @hw: pointer to the HW structure
5418 *
5419 *  During Sx to S0 transitions on non-managed devices or managed devices
5420 *  on which PHY resets are not blocked, if the PHY registers cannot be
5421 *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
5422 *  the PHY.
5423 *  On i217, setup Intel Rapid Start Technology.
5424 **/
5425void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5426{
5427	s32 ret_val;
5428
5429	if (hw->mac.type < e1000_pch2lan)
5430		return;
5431
5432	ret_val = e1000_init_phy_workarounds_pchlan(hw);
5433	if (ret_val) {
5434		e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val);
5435		return;
5436	}
5437
5438	/* For i217 Intel Rapid Start Technology support when the system
5439	 * is transitioning from Sx and no manageability engine is present
5440	 * configure SMBus to restore on reset, disable proxy, and enable
5441	 * the reset on MTA (Multicast table array).
5442	 */
5443	if (hw->phy.type == e1000_phy_i217) {
5444		u16 phy_reg;
5445
5446		ret_val = hw->phy.ops.acquire(hw);
5447		if (ret_val) {
5448			e_dbg("Failed to setup iRST\n");
5449			return;
5450		}
5451
5452		/* Clear Auto Enable LPI after link up */
5453		e1e_rphy_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5454		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5455		e1e_wphy_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5456
5457		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
5458			/* Restore clear on SMB if no manageability engine
5459			 * is present
5460			 */
5461			ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
5462			if (ret_val)
5463				goto release;
5464			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5465			e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
5466
5467			/* Disable Proxy */
5468			e1e_wphy_locked(hw, I217_PROXY_CTRL, 0);
5469		}
5470		/* Enable reset on MTA */
5471		ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
5472		if (ret_val)
5473			goto release;
5474		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5475		e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
5476release:
5477		if (ret_val)
5478			e_dbg("Error %d in resume workarounds\n", ret_val);
5479		hw->phy.ops.release(hw);
5480	}
5481}
5482
5483/**
5484 *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5485 *  @hw: pointer to the HW structure
5486 *
5487 *  Return the LED back to the default configuration.
5488 **/
5489static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5490{
5491	if (hw->phy.type == e1000_phy_ife)
5492		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
5493
5494	ew32(LEDCTL, hw->mac.ledctl_default);
5495	return 0;
5496}
5497
5498/**
5499 *  e1000_led_on_ich8lan - Turn LEDs on
5500 *  @hw: pointer to the HW structure
5501 *
5502 *  Turn on the LEDs.
5503 **/
5504static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5505{
5506	if (hw->phy.type == e1000_phy_ife)
5507		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5508				(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5509
5510	ew32(LEDCTL, hw->mac.ledctl_mode2);
5511	return 0;
5512}
5513
5514/**
5515 *  e1000_led_off_ich8lan - Turn LEDs off
5516 *  @hw: pointer to the HW structure
5517 *
5518 *  Turn off the LEDs.
5519 **/
5520static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5521{
5522	if (hw->phy.type == e1000_phy_ife)
5523		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5524				(IFE_PSCL_PROBE_MODE |
5525				 IFE_PSCL_PROBE_LEDS_OFF));
5526
5527	ew32(LEDCTL, hw->mac.ledctl_mode1);
5528	return 0;
5529}
5530
5531/**
5532 *  e1000_setup_led_pchlan - Configures SW controllable LED
5533 *  @hw: pointer to the HW structure
5534 *
5535 *  This prepares the SW controllable LED for use.
5536 **/
5537static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5538{
5539	return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
5540}
5541
5542/**
5543 *  e1000_cleanup_led_pchlan - Restore the default LED operation
5544 *  @hw: pointer to the HW structure
5545 *
5546 *  Return the LED back to the default configuration.
5547 **/
5548static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5549{
5550	return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
5551}
5552
5553/**
5554 *  e1000_led_on_pchlan - Turn LEDs on
5555 *  @hw: pointer to the HW structure
5556 *
5557 *  Turn on the LEDs.
5558 **/
5559static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5560{
5561	u16 data = (u16)hw->mac.ledctl_mode2;
5562	u32 i, led;
5563
5564	/* If no link, then turn LED on by setting the invert bit
5565	 * for each LED that's mode is "link_up" in ledctl_mode2.
5566	 */
5567	if (!(er32(STATUS) & E1000_STATUS_LU)) {
5568		for (i = 0; i < 3; i++) {
5569			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5570			if ((led & E1000_PHY_LED0_MODE_MASK) !=
5571			    E1000_LEDCTL_MODE_LINK_UP)
5572				continue;
5573			if (led & E1000_PHY_LED0_IVRT)
5574				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5575			else
5576				data |= (E1000_PHY_LED0_IVRT << (i * 5));
5577		}
5578	}
5579
5580	return e1e_wphy(hw, HV_LED_CONFIG, data);
5581}
5582
5583/**
5584 *  e1000_led_off_pchlan - Turn LEDs off
5585 *  @hw: pointer to the HW structure
5586 *
5587 *  Turn off the LEDs.
5588 **/
5589static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5590{
5591	u16 data = (u16)hw->mac.ledctl_mode1;
5592	u32 i, led;
5593
5594	/* If no link, then turn LED off by clearing the invert bit
5595	 * for each LED that's mode is "link_up" in ledctl_mode1.
5596	 */
5597	if (!(er32(STATUS) & E1000_STATUS_LU)) {
5598		for (i = 0; i < 3; i++) {
5599			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5600			if ((led & E1000_PHY_LED0_MODE_MASK) !=
5601			    E1000_LEDCTL_MODE_LINK_UP)
5602				continue;
5603			if (led & E1000_PHY_LED0_IVRT)
5604				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5605			else
5606				data |= (E1000_PHY_LED0_IVRT << (i * 5));
5607		}
5608	}
5609
5610	return e1e_wphy(hw, HV_LED_CONFIG, data);
5611}
5612
5613/**
5614 *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5615 *  @hw: pointer to the HW structure
5616 *
5617 *  Read appropriate register for the config done bit for completion status
5618 *  and configure the PHY through s/w for EEPROM-less parts.
5619 *
5620 *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5621 *  config done bit, so only an error is logged and continues.  If we were
5622 *  to return with error, EEPROM-less silicon would not be able to be reset
5623 *  or change link.
5624 **/
5625static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5626{
5627	s32 ret_val = 0;
5628	u32 bank = 0;
5629	u32 status;
5630
5631	e1000e_get_cfg_done_generic(hw);
5632
5633	/* Wait for indication from h/w that it has completed basic config */
5634	if (hw->mac.type >= e1000_ich10lan) {
5635		e1000_lan_init_done_ich8lan(hw);
5636	} else {
5637		ret_val = e1000e_get_auto_rd_done(hw);
5638		if (ret_val) {
5639			/* When auto config read does not complete, do not
5640			 * return with an error. This can happen in situations
5641			 * where there is no eeprom and prevents getting link.
5642			 */
5643			e_dbg("Auto Read Done did not complete\n");
5644			ret_val = 0;
5645		}
5646	}
5647
5648	/* Clear PHY Reset Asserted bit */
5649	status = er32(STATUS);
5650	if (status & E1000_STATUS_PHYRA)
5651		ew32(STATUS, status & ~E1000_STATUS_PHYRA);
5652	else
5653		e_dbg("PHY Reset Asserted not set - needs delay\n");
5654
5655	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
5656	if (hw->mac.type <= e1000_ich9lan) {
5657		if (!(er32(EECD) & E1000_EECD_PRES) &&
5658		    (hw->phy.type == e1000_phy_igp_3)) {
5659			e1000e_phy_init_script_igp3(hw);
5660		}
5661	} else {
5662		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5663			/* Maybe we should do a basic PHY config */
5664			e_dbg("EEPROM not present\n");
5665			ret_val = -E1000_ERR_CONFIG;
5666		}
5667	}
5668
5669	return ret_val;
5670}
5671
5672/**
5673 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5674 * @hw: pointer to the HW structure
5675 *
5676 * In the case of a PHY power down to save power, or to turn off link during a
5677 * driver unload, or wake on lan is not enabled, remove the link.
5678 **/
5679static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5680{
5681	/* If the management interface is not enabled, then power down */
5682	if (!(hw->mac.ops.check_mng_mode(hw) ||
5683	      hw->phy.ops.check_reset_block(hw)))
5684		e1000_power_down_phy_copper(hw);
5685}
5686
5687/**
5688 *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5689 *  @hw: pointer to the HW structure
5690 *
5691 *  Clears hardware counters specific to the silicon family and calls
5692 *  clear_hw_cntrs_generic to clear all general purpose counters.
5693 **/
5694static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5695{
5696	u16 phy_data;
5697	s32 ret_val;
5698
5699	e1000e_clear_hw_cntrs_base(hw);
5700
5701	er32(ALGNERRC);
5702	er32(RXERRC);
5703	er32(TNCRS);
5704	er32(CEXTERR);
5705	er32(TSCTC);
5706	er32(TSCTFC);
5707
5708	er32(MGTPRC);
5709	er32(MGTPDC);
5710	er32(MGTPTC);
5711
5712	er32(IAC);
5713	er32(ICRXOC);
5714
5715	/* Clear PHY statistics registers */
5716	if ((hw->phy.type == e1000_phy_82578) ||
5717	    (hw->phy.type == e1000_phy_82579) ||
5718	    (hw->phy.type == e1000_phy_i217) ||
5719	    (hw->phy.type == e1000_phy_82577)) {
5720		ret_val = hw->phy.ops.acquire(hw);
5721		if (ret_val)
5722			return;
5723		ret_val = hw->phy.ops.set_page(hw,
5724					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
5725		if (ret_val)
5726			goto release;
5727		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5728		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5729		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5730		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5731		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5732		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5733		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5734		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5735		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5736		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5737		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5738		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5739		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5740		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5741release:
5742		hw->phy.ops.release(hw);
5743	}
5744}
5745
5746static const struct e1000_mac_operations ich8_mac_ops = {
5747	/* check_mng_mode dependent on mac type */
5748	.check_for_link		= e1000_check_for_copper_link_ich8lan,
5749	/* cleanup_led dependent on mac type */
5750	.clear_hw_cntrs		= e1000_clear_hw_cntrs_ich8lan,
5751	.get_bus_info		= e1000_get_bus_info_ich8lan,
5752	.set_lan_id		= e1000_set_lan_id_single_port,
5753	.get_link_up_info	= e1000_get_link_up_info_ich8lan,
5754	/* led_on dependent on mac type */
5755	/* led_off dependent on mac type */
5756	.update_mc_addr_list	= e1000e_update_mc_addr_list_generic,
5757	.reset_hw		= e1000_reset_hw_ich8lan,
5758	.init_hw		= e1000_init_hw_ich8lan,
5759	.setup_link		= e1000_setup_link_ich8lan,
5760	.setup_physical_interface = e1000_setup_copper_link_ich8lan,
5761	/* id_led_init dependent on mac type */
5762	.config_collision_dist	= e1000e_config_collision_dist_generic,
5763	.rar_set		= e1000e_rar_set_generic,
5764	.rar_get_count		= e1000e_rar_get_count_generic,
5765};
5766
5767static const struct e1000_phy_operations ich8_phy_ops = {
5768	.acquire		= e1000_acquire_swflag_ich8lan,
5769	.check_reset_block	= e1000_check_reset_block_ich8lan,
5770	.commit			= NULL,
5771	.get_cfg_done		= e1000_get_cfg_done_ich8lan,
5772	.get_cable_length	= e1000e_get_cable_length_igp_2,
5773	.read_reg		= e1000e_read_phy_reg_igp,
5774	.release		= e1000_release_swflag_ich8lan,
5775	.reset			= e1000_phy_hw_reset_ich8lan,
5776	.set_d0_lplu_state	= e1000_set_d0_lplu_state_ich8lan,
5777	.set_d3_lplu_state	= e1000_set_d3_lplu_state_ich8lan,
5778	.write_reg		= e1000e_write_phy_reg_igp,
5779};
5780
5781static const struct e1000_nvm_operations ich8_nvm_ops = {
5782	.acquire		= e1000_acquire_nvm_ich8lan,
5783	.read			= e1000_read_nvm_ich8lan,
5784	.release		= e1000_release_nvm_ich8lan,
5785	.reload			= e1000e_reload_nvm_generic,
5786	.update			= e1000_update_nvm_checksum_ich8lan,
5787	.valid_led_default	= e1000_valid_led_default_ich8lan,
5788	.validate		= e1000_validate_nvm_checksum_ich8lan,
5789	.write			= e1000_write_nvm_ich8lan,
5790};
5791
5792static const struct e1000_nvm_operations spt_nvm_ops = {
5793	.acquire		= e1000_acquire_nvm_ich8lan,
5794	.release		= e1000_release_nvm_ich8lan,
5795	.read			= e1000_read_nvm_spt,
5796	.update			= e1000_update_nvm_checksum_spt,
5797	.reload			= e1000e_reload_nvm_generic,
5798	.valid_led_default	= e1000_valid_led_default_ich8lan,
5799	.validate		= e1000_validate_nvm_checksum_ich8lan,
5800	.write			= e1000_write_nvm_ich8lan,
5801};
5802
5803const struct e1000_info e1000_ich8_info = {
5804	.mac			= e1000_ich8lan,
5805	.flags			= FLAG_HAS_WOL
5806				  | FLAG_IS_ICH
5807				  | FLAG_HAS_CTRLEXT_ON_LOAD
5808				  | FLAG_HAS_AMT
5809				  | FLAG_HAS_FLASH
5810				  | FLAG_APME_IN_WUC,
5811	.pba			= 8,
5812	.max_hw_frame_size	= VLAN_ETH_FRAME_LEN + ETH_FCS_LEN,
5813	.get_variants		= e1000_get_variants_ich8lan,
5814	.mac_ops		= &ich8_mac_ops,
5815	.phy_ops		= &ich8_phy_ops,
5816	.nvm_ops		= &ich8_nvm_ops,
5817};
5818
5819const struct e1000_info e1000_ich9_info = {
5820	.mac			= e1000_ich9lan,
5821	.flags			= FLAG_HAS_JUMBO_FRAMES
5822				  | FLAG_IS_ICH
5823				  | FLAG_HAS_WOL
5824				  | FLAG_HAS_CTRLEXT_ON_LOAD
5825				  | FLAG_HAS_AMT
5826				  | FLAG_HAS_FLASH
5827				  | FLAG_APME_IN_WUC,
5828	.pba			= 18,
5829	.max_hw_frame_size	= DEFAULT_JUMBO,
5830	.get_variants		= e1000_get_variants_ich8lan,
5831	.mac_ops		= &ich8_mac_ops,
5832	.phy_ops		= &ich8_phy_ops,
5833	.nvm_ops		= &ich8_nvm_ops,
5834};
5835
5836const struct e1000_info e1000_ich10_info = {
5837	.mac			= e1000_ich10lan,
5838	.flags			= FLAG_HAS_JUMBO_FRAMES
5839				  | FLAG_IS_ICH
5840				  | FLAG_HAS_WOL
5841				  | FLAG_HAS_CTRLEXT_ON_LOAD
5842				  | FLAG_HAS_AMT
5843				  | FLAG_HAS_FLASH
5844				  | FLAG_APME_IN_WUC,
5845	.pba			= 18,
5846	.max_hw_frame_size	= DEFAULT_JUMBO,
5847	.get_variants		= e1000_get_variants_ich8lan,
5848	.mac_ops		= &ich8_mac_ops,
5849	.phy_ops		= &ich8_phy_ops,
5850	.nvm_ops		= &ich8_nvm_ops,
5851};
5852
5853const struct e1000_info e1000_pch_info = {
5854	.mac			= e1000_pchlan,
5855	.flags			= FLAG_IS_ICH
5856				  | FLAG_HAS_WOL
5857				  | FLAG_HAS_CTRLEXT_ON_LOAD
5858				  | FLAG_HAS_AMT
5859				  | FLAG_HAS_FLASH
5860				  | FLAG_HAS_JUMBO_FRAMES
5861				  | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
5862				  | FLAG_APME_IN_WUC,
5863	.flags2			= FLAG2_HAS_PHY_STATS,
5864	.pba			= 26,
5865	.max_hw_frame_size	= 4096,
5866	.get_variants		= e1000_get_variants_ich8lan,
5867	.mac_ops		= &ich8_mac_ops,
5868	.phy_ops		= &ich8_phy_ops,
5869	.nvm_ops		= &ich8_nvm_ops,
5870};
5871
5872const struct e1000_info e1000_pch2_info = {
5873	.mac			= e1000_pch2lan,
5874	.flags			= FLAG_IS_ICH
5875				  | FLAG_HAS_WOL
5876				  | FLAG_HAS_HW_TIMESTAMP
5877				  | FLAG_HAS_CTRLEXT_ON_LOAD
5878				  | FLAG_HAS_AMT
5879				  | FLAG_HAS_FLASH
5880				  | FLAG_HAS_JUMBO_FRAMES
5881				  | FLAG_APME_IN_WUC,
5882	.flags2			= FLAG2_HAS_PHY_STATS
5883				  | FLAG2_HAS_EEE
5884				  | FLAG2_CHECK_SYSTIM_OVERFLOW,
5885	.pba			= 26,
5886	.max_hw_frame_size	= 9022,
5887	.get_variants		= e1000_get_variants_ich8lan,
5888	.mac_ops		= &ich8_mac_ops,
5889	.phy_ops		= &ich8_phy_ops,
5890	.nvm_ops		= &ich8_nvm_ops,
5891};
5892
5893const struct e1000_info e1000_pch_lpt_info = {
5894	.mac			= e1000_pch_lpt,
5895	.flags			= FLAG_IS_ICH
5896				  | FLAG_HAS_WOL
5897				  | FLAG_HAS_HW_TIMESTAMP
5898				  | FLAG_HAS_CTRLEXT_ON_LOAD
5899				  | FLAG_HAS_AMT
5900				  | FLAG_HAS_FLASH
5901				  | FLAG_HAS_JUMBO_FRAMES
5902				  | FLAG_APME_IN_WUC,
5903	.flags2			= FLAG2_HAS_PHY_STATS
5904				  | FLAG2_HAS_EEE
5905				  | FLAG2_CHECK_SYSTIM_OVERFLOW,
5906	.pba			= 26,
5907	.max_hw_frame_size	= 9022,
5908	.get_variants		= e1000_get_variants_ich8lan,
5909	.mac_ops		= &ich8_mac_ops,
5910	.phy_ops		= &ich8_phy_ops,
5911	.nvm_ops		= &ich8_nvm_ops,
5912};
5913
5914const struct e1000_info e1000_pch_spt_info = {
5915	.mac			= e1000_pch_spt,
5916	.flags			= FLAG_IS_ICH
5917				  | FLAG_HAS_WOL
5918				  | FLAG_HAS_HW_TIMESTAMP
5919				  | FLAG_HAS_CTRLEXT_ON_LOAD
5920				  | FLAG_HAS_AMT
5921				  | FLAG_HAS_FLASH
5922				  | FLAG_HAS_JUMBO_FRAMES
5923				  | FLAG_APME_IN_WUC,
5924	.flags2			= FLAG2_HAS_PHY_STATS
5925				  | FLAG2_HAS_EEE,
5926	.pba			= 26,
5927	.max_hw_frame_size	= 9022,
5928	.get_variants		= e1000_get_variants_ich8lan,
5929	.mac_ops		= &ich8_mac_ops,
5930	.phy_ops		= &ich8_phy_ops,
5931	.nvm_ops		= &spt_nvm_ops,
5932};
5933
5934const struct e1000_info e1000_pch_cnp_info = {
5935	.mac			= e1000_pch_cnp,
5936	.flags			= FLAG_IS_ICH
5937				  | FLAG_HAS_WOL
5938				  | FLAG_HAS_HW_TIMESTAMP
5939				  | FLAG_HAS_CTRLEXT_ON_LOAD
5940				  | FLAG_HAS_AMT
5941				  | FLAG_HAS_FLASH
5942				  | FLAG_HAS_JUMBO_FRAMES
5943				  | FLAG_APME_IN_WUC,
5944	.flags2			= FLAG2_HAS_PHY_STATS
5945				  | FLAG2_HAS_EEE,
5946	.pba			= 26,
5947	.max_hw_frame_size	= 9022,
5948	.get_variants		= e1000_get_variants_ich8lan,
5949	.mac_ops		= &ich8_mac_ops,
5950	.phy_ops		= &ich8_phy_ops,
5951	.nvm_ops		= &spt_nvm_ops,
5952};