/drivers/net/ethernet/intel/e1000e/ich8lan.c

http://github.com/mirrors/linux · C · 5952 lines · 3689 code · 779 blank · 1484 comment · 789 complexity · 03598df2fc875c966397072596100a82 MD5 · raw file

Large files are truncated click here to view the full file

  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 1999 - 2018 Intel Corporation. */
  3. /* 82562G 10/100 Network Connection
  4. * 82562G-2 10/100 Network Connection
  5. * 82562GT 10/100 Network Connection
  6. * 82562GT-2 10/100 Network Connection
  7. * 82562V 10/100 Network Connection
  8. * 82562V-2 10/100 Network Connection
  9. * 82566DC-2 Gigabit Network Connection
  10. * 82566DC Gigabit Network Connection
  11. * 82566DM-2 Gigabit Network Connection
  12. * 82566DM Gigabit Network Connection
  13. * 82566MC Gigabit Network Connection
  14. * 82566MM Gigabit Network Connection
  15. * 82567LM Gigabit Network Connection
  16. * 82567LF Gigabit Network Connection
  17. * 82567V Gigabit Network Connection
  18. * 82567LM-2 Gigabit Network Connection
  19. * 82567LF-2 Gigabit Network Connection
  20. * 82567V-2 Gigabit Network Connection
  21. * 82567LF-3 Gigabit Network Connection
  22. * 82567LM-3 Gigabit Network Connection
  23. * 82567LM-4 Gigabit Network Connection
  24. * 82577LM Gigabit Network Connection
  25. * 82577LC Gigabit Network Connection
  26. * 82578DM Gigabit Network Connection
  27. * 82578DC Gigabit Network Connection
  28. * 82579LM Gigabit Network Connection
  29. * 82579V Gigabit Network Connection
  30. * Ethernet Connection I217-LM
  31. * Ethernet Connection I217-V
  32. * Ethernet Connection I218-V
  33. * Ethernet Connection I218-LM
  34. * Ethernet Connection (2) I218-LM
  35. * Ethernet Connection (2) I218-V
  36. * Ethernet Connection (3) I218-LM
  37. * Ethernet Connection (3) I218-V
  38. */
  39. #include "e1000.h"
  40. /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
  41. /* Offset 04h HSFSTS */
  42. union ich8_hws_flash_status {
  43. struct ich8_hsfsts {
  44. u16 flcdone:1; /* bit 0 Flash Cycle Done */
  45. u16 flcerr:1; /* bit 1 Flash Cycle Error */
  46. u16 dael:1; /* bit 2 Direct Access error Log */
  47. u16 berasesz:2; /* bit 4:3 Sector Erase Size */
  48. u16 flcinprog:1; /* bit 5 flash cycle in Progress */
  49. u16 reserved1:2; /* bit 13:6 Reserved */
  50. u16 reserved2:6; /* bit 13:6 Reserved */
  51. u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
  52. u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
  53. } hsf_status;
  54. u16 regval;
  55. };
  56. /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
  57. /* Offset 06h FLCTL */
  58. union ich8_hws_flash_ctrl {
  59. struct ich8_hsflctl {
  60. u16 flcgo:1; /* 0 Flash Cycle Go */
  61. u16 flcycle:2; /* 2:1 Flash Cycle */
  62. u16 reserved:5; /* 7:3 Reserved */
  63. u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
  64. u16 flockdn:6; /* 15:10 Reserved */
  65. } hsf_ctrl;
  66. u16 regval;
  67. };
  68. /* ICH Flash Region Access Permissions */
  69. union ich8_hws_flash_regacc {
  70. struct ich8_flracc {
  71. u32 grra:8; /* 0:7 GbE region Read Access */
  72. u32 grwa:8; /* 8:15 GbE region Write Access */
  73. u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
  74. u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
  75. } hsf_flregacc;
  76. u16 regval;
  77. };
  78. /* ICH Flash Protected Region */
  79. union ich8_flash_protected_range {
  80. struct ich8_pr {
  81. u32 base:13; /* 0:12 Protected Range Base */
  82. u32 reserved1:2; /* 13:14 Reserved */
  83. u32 rpe:1; /* 15 Read Protection Enable */
  84. u32 limit:13; /* 16:28 Protected Range Limit */
  85. u32 reserved2:2; /* 29:30 Reserved */
  86. u32 wpe:1; /* 31 Write Protection Enable */
  87. } range;
  88. u32 regval;
  89. };
  90. static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
  91. static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
  92. static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
  93. static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
  94. u32 offset, u8 byte);
  95. static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
  96. u8 *data);
  97. static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
  98. u16 *data);
  99. static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
  100. u8 size, u16 *data);
  101. static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
  102. u32 *data);
  103. static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
  104. u32 offset, u32 *data);
  105. static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
  106. u32 offset, u32 data);
  107. static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
  108. u32 offset, u32 dword);
  109. static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
  110. static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
  111. static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
  112. static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
  113. static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
  114. static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
  115. static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
  116. static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
  117. static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
  118. static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
  119. static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
  120. static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
  121. static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
  122. static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
  123. static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
  124. static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
  125. static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
  126. static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
  127. static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw);
  128. static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
  129. static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
  130. static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
  131. static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
  132. static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
  133. static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
  134. {
  135. return readw(hw->flash_address + reg);
  136. }
  137. static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg)
  138. {
  139. return readl(hw->flash_address + reg);
  140. }
  141. static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val)
  142. {
  143. writew(val, hw->flash_address + reg);
  144. }
  145. static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
  146. {
  147. writel(val, hw->flash_address + reg);
  148. }
  149. #define er16flash(reg) __er16flash(hw, (reg))
  150. #define er32flash(reg) __er32flash(hw, (reg))
  151. #define ew16flash(reg, val) __ew16flash(hw, (reg), (val))
  152. #define ew32flash(reg, val) __ew32flash(hw, (reg), (val))
  153. /**
  154. * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
  155. * @hw: pointer to the HW structure
  156. *
  157. * Test access to the PHY registers by reading the PHY ID registers. If
  158. * the PHY ID is already known (e.g. resume path) compare it with known ID,
  159. * otherwise assume the read PHY ID is correct if it is valid.
  160. *
  161. * Assumes the sw/fw/hw semaphore is already acquired.
  162. **/
  163. static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
  164. {
  165. u16 phy_reg = 0;
  166. u32 phy_id = 0;
  167. s32 ret_val = 0;
  168. u16 retry_count;
  169. u32 mac_reg = 0;
  170. for (retry_count = 0; retry_count < 2; retry_count++) {
  171. ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg);
  172. if (ret_val || (phy_reg == 0xFFFF))
  173. continue;
  174. phy_id = (u32)(phy_reg << 16);
  175. ret_val = e1e_rphy_locked(hw, MII_PHYSID2, &phy_reg);
  176. if (ret_val || (phy_reg == 0xFFFF)) {
  177. phy_id = 0;
  178. continue;
  179. }
  180. phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
  181. break;
  182. }
  183. if (hw->phy.id) {
  184. if (hw->phy.id == phy_id)
  185. goto out;
  186. } else if (phy_id) {
  187. hw->phy.id = phy_id;
  188. hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
  189. goto out;
  190. }
  191. /* In case the PHY needs to be in mdio slow mode,
  192. * set slow mode and try to get the PHY id again.
  193. */
  194. if (hw->mac.type < e1000_pch_lpt) {
  195. hw->phy.ops.release(hw);
  196. ret_val = e1000_set_mdio_slow_mode_hv(hw);
  197. if (!ret_val)
  198. ret_val = e1000e_get_phy_id(hw);
  199. hw->phy.ops.acquire(hw);
  200. }
  201. if (ret_val)
  202. return false;
  203. out:
  204. if (hw->mac.type >= e1000_pch_lpt) {
  205. /* Only unforce SMBus if ME is not active */
  206. if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
  207. /* Unforce SMBus mode in PHY */
  208. e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
  209. phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
  210. e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
  211. /* Unforce SMBus mode in MAC */
  212. mac_reg = er32(CTRL_EXT);
  213. mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
  214. ew32(CTRL_EXT, mac_reg);
  215. }
  216. }
  217. return true;
  218. }
  219. /**
  220. * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
  221. * @hw: pointer to the HW structure
  222. *
  223. * Toggling the LANPHYPC pin value fully power-cycles the PHY and is
  224. * used to reset the PHY to a quiescent state when necessary.
  225. **/
  226. static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
  227. {
  228. u32 mac_reg;
  229. /* Set Phy Config Counter to 50msec */
  230. mac_reg = er32(FEXTNVM3);
  231. mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
  232. mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
  233. ew32(FEXTNVM3, mac_reg);
  234. /* Toggle LANPHYPC Value bit */
  235. mac_reg = er32(CTRL);
  236. mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
  237. mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
  238. ew32(CTRL, mac_reg);
  239. e1e_flush();
  240. usleep_range(10, 20);
  241. mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
  242. ew32(CTRL, mac_reg);
  243. e1e_flush();
  244. if (hw->mac.type < e1000_pch_lpt) {
  245. msleep(50);
  246. } else {
  247. u16 count = 20;
  248. do {
  249. usleep_range(5000, 6000);
  250. } while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--);
  251. msleep(30);
  252. }
  253. }
  254. /**
  255. * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
  256. * @hw: pointer to the HW structure
  257. *
  258. * Workarounds/flow necessary for PHY initialization during driver load
  259. * and resume paths.
  260. **/
  261. static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
  262. {
  263. struct e1000_adapter *adapter = hw->adapter;
  264. u32 mac_reg, fwsm = er32(FWSM);
  265. s32 ret_val;
  266. /* Gate automatic PHY configuration by hardware on managed and
  267. * non-managed 82579 and newer adapters.
  268. */
  269. e1000_gate_hw_phy_config_ich8lan(hw, true);
  270. /* It is not possible to be certain of the current state of ULP
  271. * so forcibly disable it.
  272. */
  273. hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
  274. e1000_disable_ulp_lpt_lp(hw, true);
  275. ret_val = hw->phy.ops.acquire(hw);
  276. if (ret_val) {
  277. e_dbg("Failed to initialize PHY flow\n");
  278. goto out;
  279. }
  280. /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
  281. * inaccessible and resetting the PHY is not blocked, toggle the
  282. * LANPHYPC Value bit to force the interconnect to PCIe mode.
  283. */
  284. switch (hw->mac.type) {
  285. case e1000_pch_lpt:
  286. case e1000_pch_spt:
  287. case e1000_pch_cnp:
  288. case e1000_pch_tgp:
  289. case e1000_pch_adp:
  290. if (e1000_phy_is_accessible_pchlan(hw))
  291. break;
  292. /* Before toggling LANPHYPC, see if PHY is accessible by
  293. * forcing MAC to SMBus mode first.
  294. */
  295. mac_reg = er32(CTRL_EXT);
  296. mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
  297. ew32(CTRL_EXT, mac_reg);
  298. /* Wait 50 milliseconds for MAC to finish any retries
  299. * that it might be trying to perform from previous
  300. * attempts to acknowledge any phy read requests.
  301. */
  302. msleep(50);
  303. /* fall-through */
  304. case e1000_pch2lan:
  305. if (e1000_phy_is_accessible_pchlan(hw))
  306. break;
  307. /* fall-through */
  308. case e1000_pchlan:
  309. if ((hw->mac.type == e1000_pchlan) &&
  310. (fwsm & E1000_ICH_FWSM_FW_VALID))
  311. break;
  312. if (hw->phy.ops.check_reset_block(hw)) {
  313. e_dbg("Required LANPHYPC toggle blocked by ME\n");
  314. ret_val = -E1000_ERR_PHY;
  315. break;
  316. }
  317. /* Toggle LANPHYPC Value bit */
  318. e1000_toggle_lanphypc_pch_lpt(hw);
  319. if (hw->mac.type >= e1000_pch_lpt) {
  320. if (e1000_phy_is_accessible_pchlan(hw))
  321. break;
  322. /* Toggling LANPHYPC brings the PHY out of SMBus mode
  323. * so ensure that the MAC is also out of SMBus mode
  324. */
  325. mac_reg = er32(CTRL_EXT);
  326. mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
  327. ew32(CTRL_EXT, mac_reg);
  328. if (e1000_phy_is_accessible_pchlan(hw))
  329. break;
  330. ret_val = -E1000_ERR_PHY;
  331. }
  332. break;
  333. default:
  334. break;
  335. }
  336. hw->phy.ops.release(hw);
  337. if (!ret_val) {
  338. /* Check to see if able to reset PHY. Print error if not */
  339. if (hw->phy.ops.check_reset_block(hw)) {
  340. e_err("Reset blocked by ME\n");
  341. goto out;
  342. }
  343. /* Reset the PHY before any access to it. Doing so, ensures
  344. * that the PHY is in a known good state before we read/write
  345. * PHY registers. The generic reset is sufficient here,
  346. * because we haven't determined the PHY type yet.
  347. */
  348. ret_val = e1000e_phy_hw_reset_generic(hw);
  349. if (ret_val)
  350. goto out;
  351. /* On a successful reset, possibly need to wait for the PHY
  352. * to quiesce to an accessible state before returning control
  353. * to the calling function. If the PHY does not quiesce, then
  354. * return E1000E_BLK_PHY_RESET, as this is the condition that
  355. * the PHY is in.
  356. */
  357. ret_val = hw->phy.ops.check_reset_block(hw);
  358. if (ret_val)
  359. e_err("ME blocked access to PHY after reset\n");
  360. }
  361. out:
  362. /* Ungate automatic PHY configuration on non-managed 82579 */
  363. if ((hw->mac.type == e1000_pch2lan) &&
  364. !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
  365. usleep_range(10000, 11000);
  366. e1000_gate_hw_phy_config_ich8lan(hw, false);
  367. }
  368. return ret_val;
  369. }
  370. /**
  371. * e1000_init_phy_params_pchlan - Initialize PHY function pointers
  372. * @hw: pointer to the HW structure
  373. *
  374. * Initialize family-specific PHY parameters and function pointers.
  375. **/
  376. static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
  377. {
  378. struct e1000_phy_info *phy = &hw->phy;
  379. s32 ret_val;
  380. phy->addr = 1;
  381. phy->reset_delay_us = 100;
  382. phy->ops.set_page = e1000_set_page_igp;
  383. phy->ops.read_reg = e1000_read_phy_reg_hv;
  384. phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
  385. phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
  386. phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
  387. phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
  388. phy->ops.write_reg = e1000_write_phy_reg_hv;
  389. phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
  390. phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
  391. phy->ops.power_up = e1000_power_up_phy_copper;
  392. phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
  393. phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
  394. phy->id = e1000_phy_unknown;
  395. ret_val = e1000_init_phy_workarounds_pchlan(hw);
  396. if (ret_val)
  397. return ret_val;
  398. if (phy->id == e1000_phy_unknown)
  399. switch (hw->mac.type) {
  400. default:
  401. ret_val = e1000e_get_phy_id(hw);
  402. if (ret_val)
  403. return ret_val;
  404. if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
  405. break;
  406. /* fall-through */
  407. case e1000_pch2lan:
  408. case e1000_pch_lpt:
  409. case e1000_pch_spt:
  410. case e1000_pch_cnp:
  411. case e1000_pch_tgp:
  412. case e1000_pch_adp:
  413. /* In case the PHY needs to be in mdio slow mode,
  414. * set slow mode and try to get the PHY id again.
  415. */
  416. ret_val = e1000_set_mdio_slow_mode_hv(hw);
  417. if (ret_val)
  418. return ret_val;
  419. ret_val = e1000e_get_phy_id(hw);
  420. if (ret_val)
  421. return ret_val;
  422. break;
  423. }
  424. phy->type = e1000e_get_phy_type_from_id(phy->id);
  425. switch (phy->type) {
  426. case e1000_phy_82577:
  427. case e1000_phy_82579:
  428. case e1000_phy_i217:
  429. phy->ops.check_polarity = e1000_check_polarity_82577;
  430. phy->ops.force_speed_duplex =
  431. e1000_phy_force_speed_duplex_82577;
  432. phy->ops.get_cable_length = e1000_get_cable_length_82577;
  433. phy->ops.get_info = e1000_get_phy_info_82577;
  434. phy->ops.commit = e1000e_phy_sw_reset;
  435. break;
  436. case e1000_phy_82578:
  437. phy->ops.check_polarity = e1000_check_polarity_m88;
  438. phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
  439. phy->ops.get_cable_length = e1000e_get_cable_length_m88;
  440. phy->ops.get_info = e1000e_get_phy_info_m88;
  441. break;
  442. default:
  443. ret_val = -E1000_ERR_PHY;
  444. break;
  445. }
  446. return ret_val;
  447. }
  448. /**
  449. * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
  450. * @hw: pointer to the HW structure
  451. *
  452. * Initialize family-specific PHY parameters and function pointers.
  453. **/
  454. static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
  455. {
  456. struct e1000_phy_info *phy = &hw->phy;
  457. s32 ret_val;
  458. u16 i = 0;
  459. phy->addr = 1;
  460. phy->reset_delay_us = 100;
  461. phy->ops.power_up = e1000_power_up_phy_copper;
  462. phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
  463. /* We may need to do this twice - once for IGP and if that fails,
  464. * we'll set BM func pointers and try again
  465. */
  466. ret_val = e1000e_determine_phy_address(hw);
  467. if (ret_val) {
  468. phy->ops.write_reg = e1000e_write_phy_reg_bm;
  469. phy->ops.read_reg = e1000e_read_phy_reg_bm;
  470. ret_val = e1000e_determine_phy_address(hw);
  471. if (ret_val) {
  472. e_dbg("Cannot determine PHY addr. Erroring out\n");
  473. return ret_val;
  474. }
  475. }
  476. phy->id = 0;
  477. while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
  478. (i++ < 100)) {
  479. usleep_range(1000, 1100);
  480. ret_val = e1000e_get_phy_id(hw);
  481. if (ret_val)
  482. return ret_val;
  483. }
  484. /* Verify phy id */
  485. switch (phy->id) {
  486. case IGP03E1000_E_PHY_ID:
  487. phy->type = e1000_phy_igp_3;
  488. phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
  489. phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked;
  490. phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked;
  491. phy->ops.get_info = e1000e_get_phy_info_igp;
  492. phy->ops.check_polarity = e1000_check_polarity_igp;
  493. phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp;
  494. break;
  495. case IFE_E_PHY_ID:
  496. case IFE_PLUS_E_PHY_ID:
  497. case IFE_C_E_PHY_ID:
  498. phy->type = e1000_phy_ife;
  499. phy->autoneg_mask = E1000_ALL_NOT_GIG;
  500. phy->ops.get_info = e1000_get_phy_info_ife;
  501. phy->ops.check_polarity = e1000_check_polarity_ife;
  502. phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
  503. break;
  504. case BME1000_E_PHY_ID:
  505. phy->type = e1000_phy_bm;
  506. phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
  507. phy->ops.read_reg = e1000e_read_phy_reg_bm;
  508. phy->ops.write_reg = e1000e_write_phy_reg_bm;
  509. phy->ops.commit = e1000e_phy_sw_reset;
  510. phy->ops.get_info = e1000e_get_phy_info_m88;
  511. phy->ops.check_polarity = e1000_check_polarity_m88;
  512. phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
  513. break;
  514. default:
  515. return -E1000_ERR_PHY;
  516. }
  517. return 0;
  518. }
  519. /**
  520. * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
  521. * @hw: pointer to the HW structure
  522. *
  523. * Initialize family-specific NVM parameters and function
  524. * pointers.
  525. **/
  526. static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
  527. {
  528. struct e1000_nvm_info *nvm = &hw->nvm;
  529. struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
  530. u32 gfpreg, sector_base_addr, sector_end_addr;
  531. u16 i;
  532. u32 nvm_size;
  533. nvm->type = e1000_nvm_flash_sw;
  534. if (hw->mac.type >= e1000_pch_spt) {
  535. /* in SPT, gfpreg doesn't exist. NVM size is taken from the
  536. * STRAP register. This is because in SPT the GbE Flash region
  537. * is no longer accessed through the flash registers. Instead,
  538. * the mechanism has changed, and the Flash region access
  539. * registers are now implemented in GbE memory space.
  540. */
  541. nvm->flash_base_addr = 0;
  542. nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1)
  543. * NVM_SIZE_MULTIPLIER;
  544. nvm->flash_bank_size = nvm_size / 2;
  545. /* Adjust to word count */
  546. nvm->flash_bank_size /= sizeof(u16);
  547. /* Set the base address for flash register access */
  548. hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
  549. } else {
  550. /* Can't read flash registers if register set isn't mapped. */
  551. if (!hw->flash_address) {
  552. e_dbg("ERROR: Flash registers not mapped\n");
  553. return -E1000_ERR_CONFIG;
  554. }
  555. gfpreg = er32flash(ICH_FLASH_GFPREG);
  556. /* sector_X_addr is a "sector"-aligned address (4096 bytes)
  557. * Add 1 to sector_end_addr since this sector is included in
  558. * the overall size.
  559. */
  560. sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
  561. sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
  562. /* flash_base_addr is byte-aligned */
  563. nvm->flash_base_addr = sector_base_addr
  564. << FLASH_SECTOR_ADDR_SHIFT;
  565. /* find total size of the NVM, then cut in half since the total
  566. * size represents two separate NVM banks.
  567. */
  568. nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
  569. << FLASH_SECTOR_ADDR_SHIFT);
  570. nvm->flash_bank_size /= 2;
  571. /* Adjust to word count */
  572. nvm->flash_bank_size /= sizeof(u16);
  573. }
  574. nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
  575. /* Clear shadow ram */
  576. for (i = 0; i < nvm->word_size; i++) {
  577. dev_spec->shadow_ram[i].modified = false;
  578. dev_spec->shadow_ram[i].value = 0xFFFF;
  579. }
  580. return 0;
  581. }
  582. /**
  583. * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
  584. * @hw: pointer to the HW structure
  585. *
  586. * Initialize family-specific MAC parameters and function
  587. * pointers.
  588. **/
  589. static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
  590. {
  591. struct e1000_mac_info *mac = &hw->mac;
  592. /* Set media type function pointer */
  593. hw->phy.media_type = e1000_media_type_copper;
  594. /* Set mta register count */
  595. mac->mta_reg_count = 32;
  596. /* Set rar entry count */
  597. mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
  598. if (mac->type == e1000_ich8lan)
  599. mac->rar_entry_count--;
  600. /* FWSM register */
  601. mac->has_fwsm = true;
  602. /* ARC subsystem not supported */
  603. mac->arc_subsystem_valid = false;
  604. /* Adaptive IFS supported */
  605. mac->adaptive_ifs = true;
  606. /* LED and other operations */
  607. switch (mac->type) {
  608. case e1000_ich8lan:
  609. case e1000_ich9lan:
  610. case e1000_ich10lan:
  611. /* check management mode */
  612. mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
  613. /* ID LED init */
  614. mac->ops.id_led_init = e1000e_id_led_init_generic;
  615. /* blink LED */
  616. mac->ops.blink_led = e1000e_blink_led_generic;
  617. /* setup LED */
  618. mac->ops.setup_led = e1000e_setup_led_generic;
  619. /* cleanup LED */
  620. mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
  621. /* turn on/off LED */
  622. mac->ops.led_on = e1000_led_on_ich8lan;
  623. mac->ops.led_off = e1000_led_off_ich8lan;
  624. break;
  625. case e1000_pch2lan:
  626. mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
  627. mac->ops.rar_set = e1000_rar_set_pch2lan;
  628. /* fall-through */
  629. case e1000_pch_lpt:
  630. case e1000_pch_spt:
  631. case e1000_pch_cnp:
  632. case e1000_pch_tgp:
  633. case e1000_pch_adp:
  634. case e1000_pchlan:
  635. /* check management mode */
  636. mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
  637. /* ID LED init */
  638. mac->ops.id_led_init = e1000_id_led_init_pchlan;
  639. /* setup LED */
  640. mac->ops.setup_led = e1000_setup_led_pchlan;
  641. /* cleanup LED */
  642. mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
  643. /* turn on/off LED */
  644. mac->ops.led_on = e1000_led_on_pchlan;
  645. mac->ops.led_off = e1000_led_off_pchlan;
  646. break;
  647. default:
  648. break;
  649. }
  650. if (mac->type >= e1000_pch_lpt) {
  651. mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
  652. mac->ops.rar_set = e1000_rar_set_pch_lpt;
  653. mac->ops.setup_physical_interface =
  654. e1000_setup_copper_link_pch_lpt;
  655. mac->ops.rar_get_count = e1000_rar_get_count_pch_lpt;
  656. }
  657. /* Enable PCS Lock-loss workaround for ICH8 */
  658. if (mac->type == e1000_ich8lan)
  659. e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
  660. return 0;
  661. }
  662. /**
  663. * __e1000_access_emi_reg_locked - Read/write EMI register
  664. * @hw: pointer to the HW structure
  665. * @addr: EMI address to program
  666. * @data: pointer to value to read/write from/to the EMI address
  667. * @read: boolean flag to indicate read or write
  668. *
  669. * This helper function assumes the SW/FW/HW Semaphore is already acquired.
  670. **/
  671. static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
  672. u16 *data, bool read)
  673. {
  674. s32 ret_val;
  675. ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, address);
  676. if (ret_val)
  677. return ret_val;
  678. if (read)
  679. ret_val = e1e_rphy_locked(hw, I82579_EMI_DATA, data);
  680. else
  681. ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, *data);
  682. return ret_val;
  683. }
  684. /**
  685. * e1000_read_emi_reg_locked - Read Extended Management Interface register
  686. * @hw: pointer to the HW structure
  687. * @addr: EMI address to program
  688. * @data: value to be read from the EMI address
  689. *
  690. * Assumes the SW/FW/HW Semaphore is already acquired.
  691. **/
  692. s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
  693. {
  694. return __e1000_access_emi_reg_locked(hw, addr, data, true);
  695. }
  696. /**
  697. * e1000_write_emi_reg_locked - Write Extended Management Interface register
  698. * @hw: pointer to the HW structure
  699. * @addr: EMI address to program
  700. * @data: value to be written to the EMI address
  701. *
  702. * Assumes the SW/FW/HW Semaphore is already acquired.
  703. **/
  704. s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
  705. {
  706. return __e1000_access_emi_reg_locked(hw, addr, &data, false);
  707. }
  708. /**
  709. * e1000_set_eee_pchlan - Enable/disable EEE support
  710. * @hw: pointer to the HW structure
  711. *
  712. * Enable/disable EEE based on setting in dev_spec structure, the duplex of
  713. * the link and the EEE capabilities of the link partner. The LPI Control
  714. * register bits will remain set only if/when link is up.
  715. *
  716. * EEE LPI must not be asserted earlier than one second after link is up.
  717. * On 82579, EEE LPI should not be enabled until such time otherwise there
  718. * can be link issues with some switches. Other devices can have EEE LPI
  719. * enabled immediately upon link up since they have a timer in hardware which
  720. * prevents LPI from being asserted too early.
  721. **/
  722. s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
  723. {
  724. struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
  725. s32 ret_val;
  726. u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
  727. switch (hw->phy.type) {
  728. case e1000_phy_82579:
  729. lpa = I82579_EEE_LP_ABILITY;
  730. pcs_status = I82579_EEE_PCS_STATUS;
  731. adv_addr = I82579_EEE_ADVERTISEMENT;
  732. break;
  733. case e1000_phy_i217:
  734. lpa = I217_EEE_LP_ABILITY;
  735. pcs_status = I217_EEE_PCS_STATUS;
  736. adv_addr = I217_EEE_ADVERTISEMENT;
  737. break;
  738. default:
  739. return 0;
  740. }
  741. ret_val = hw->phy.ops.acquire(hw);
  742. if (ret_val)
  743. return ret_val;
  744. ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
  745. if (ret_val)
  746. goto release;
  747. /* Clear bits that enable EEE in various speeds */
  748. lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
  749. /* Enable EEE if not disabled by user */
  750. if (!dev_spec->eee_disable) {
  751. /* Save off link partner's EEE ability */
  752. ret_val = e1000_read_emi_reg_locked(hw, lpa,
  753. &dev_spec->eee_lp_ability);
  754. if (ret_val)
  755. goto release;
  756. /* Read EEE advertisement */
  757. ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
  758. if (ret_val)
  759. goto release;
  760. /* Enable EEE only for speeds in which the link partner is
  761. * EEE capable and for which we advertise EEE.
  762. */
  763. if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
  764. lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
  765. if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
  766. e1e_rphy_locked(hw, MII_LPA, &data);
  767. if (data & LPA_100FULL)
  768. lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
  769. else
  770. /* EEE is not supported in 100Half, so ignore
  771. * partner's EEE in 100 ability if full-duplex
  772. * is not advertised.
  773. */
  774. dev_spec->eee_lp_ability &=
  775. ~I82579_EEE_100_SUPPORTED;
  776. }
  777. }
  778. if (hw->phy.type == e1000_phy_82579) {
  779. ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
  780. &data);
  781. if (ret_val)
  782. goto release;
  783. data &= ~I82579_LPI_100_PLL_SHUT;
  784. ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
  785. data);
  786. }
  787. /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
  788. ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
  789. if (ret_val)
  790. goto release;
  791. ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
  792. release:
  793. hw->phy.ops.release(hw);
  794. return ret_val;
  795. }
  796. /**
  797. * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
  798. * @hw: pointer to the HW structure
  799. * @link: link up bool flag
  800. *
  801. * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
  802. * preventing further DMA write requests. Workaround the issue by disabling
  803. * the de-assertion of the clock request when in 1Gpbs mode.
  804. * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
  805. * speeds in order to avoid Tx hangs.
  806. **/
  807. static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
  808. {
  809. u32 fextnvm6 = er32(FEXTNVM6);
  810. u32 status = er32(STATUS);
  811. s32 ret_val = 0;
  812. u16 reg;
  813. if (link && (status & E1000_STATUS_SPEED_1000)) {
  814. ret_val = hw->phy.ops.acquire(hw);
  815. if (ret_val)
  816. return ret_val;
  817. ret_val =
  818. e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
  819. &reg);
  820. if (ret_val)
  821. goto release;
  822. ret_val =
  823. e1000e_write_kmrn_reg_locked(hw,
  824. E1000_KMRNCTRLSTA_K1_CONFIG,
  825. reg &
  826. ~E1000_KMRNCTRLSTA_K1_ENABLE);
  827. if (ret_val)
  828. goto release;
  829. usleep_range(10, 20);
  830. ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
  831. ret_val =
  832. e1000e_write_kmrn_reg_locked(hw,
  833. E1000_KMRNCTRLSTA_K1_CONFIG,
  834. reg);
  835. release:
  836. hw->phy.ops.release(hw);
  837. } else {
  838. /* clear FEXTNVM6 bit 8 on link down or 10/100 */
  839. fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
  840. if ((hw->phy.revision > 5) || !link ||
  841. ((status & E1000_STATUS_SPEED_100) &&
  842. (status & E1000_STATUS_FD)))
  843. goto update_fextnvm6;
  844. ret_val = e1e_rphy(hw, I217_INBAND_CTRL, &reg);
  845. if (ret_val)
  846. return ret_val;
  847. /* Clear link status transmit timeout */
  848. reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
  849. if (status & E1000_STATUS_SPEED_100) {
  850. /* Set inband Tx timeout to 5x10us for 100Half */
  851. reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
  852. /* Do not extend the K1 entry latency for 100Half */
  853. fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
  854. } else {
  855. /* Set inband Tx timeout to 50x10us for 10Full/Half */
  856. reg |= 50 <<
  857. I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
  858. /* Extend the K1 entry latency for 10 Mbps */
  859. fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
  860. }
  861. ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg);
  862. if (ret_val)
  863. return ret_val;
  864. update_fextnvm6:
  865. ew32(FEXTNVM6, fextnvm6);
  866. }
  867. return ret_val;
  868. }
  869. /**
  870. * e1000_platform_pm_pch_lpt - Set platform power management values
  871. * @hw: pointer to the HW structure
  872. * @link: bool indicating link status
  873. *
  874. * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
  875. * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
  876. * when link is up (which must not exceed the maximum latency supported
  877. * by the platform), otherwise specify there is no LTR requirement.
  878. * Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop
  879. * latencies in the LTR Extended Capability Structure in the PCIe Extended
  880. * Capability register set, on this device LTR is set by writing the
  881. * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
  882. * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
  883. * message to the PMC.
  884. **/
  885. static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
  886. {
  887. u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
  888. link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
  889. u16 lat_enc = 0; /* latency encoded */
  890. if (link) {
  891. u16 speed, duplex, scale = 0;
  892. u16 max_snoop, max_nosnoop;
  893. u16 max_ltr_enc; /* max LTR latency encoded */
  894. u64 value;
  895. u32 rxa;
  896. if (!hw->adapter->max_frame_size) {
  897. e_dbg("max_frame_size not set.\n");
  898. return -E1000_ERR_CONFIG;
  899. }
  900. hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
  901. if (!speed) {
  902. e_dbg("Speed not set.\n");
  903. return -E1000_ERR_CONFIG;
  904. }
  905. /* Rx Packet Buffer Allocation size (KB) */
  906. rxa = er32(PBA) & E1000_PBA_RXA_MASK;
  907. /* Determine the maximum latency tolerated by the device.
  908. *
  909. * Per the PCIe spec, the tolerated latencies are encoded as
  910. * a 3-bit encoded scale (only 0-5 are valid) multiplied by
  911. * a 10-bit value (0-1023) to provide a range from 1 ns to
  912. * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
  913. * 1=2^5ns, 2=2^10ns,...5=2^25ns.
  914. */
  915. rxa *= 512;
  916. value = (rxa > hw->adapter->max_frame_size) ?
  917. (rxa - hw->adapter->max_frame_size) * (16000 / speed) :
  918. 0;
  919. while (value > PCI_LTR_VALUE_MASK) {
  920. scale++;
  921. value = DIV_ROUND_UP(value, BIT(5));
  922. }
  923. if (scale > E1000_LTRV_SCALE_MAX) {
  924. e_dbg("Invalid LTR latency scale %d\n", scale);
  925. return -E1000_ERR_CONFIG;
  926. }
  927. lat_enc = (u16)((scale << PCI_LTR_SCALE_SHIFT) | value);
  928. /* Determine the maximum latency tolerated by the platform */
  929. pci_read_config_word(hw->adapter->pdev, E1000_PCI_LTR_CAP_LPT,
  930. &max_snoop);
  931. pci_read_config_word(hw->adapter->pdev,
  932. E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
  933. max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
  934. if (lat_enc > max_ltr_enc)
  935. lat_enc = max_ltr_enc;
  936. }
  937. /* Set Snoop and No-Snoop latencies the same */
  938. reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
  939. ew32(LTRV, reg);
  940. return 0;
  941. }
  942. /**
  943. * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
  944. * @hw: pointer to the HW structure
  945. * @to_sx: boolean indicating a system power state transition to Sx
  946. *
  947. * When link is down, configure ULP mode to significantly reduce the power
  948. * to the PHY. If on a Manageability Engine (ME) enabled system, tell the
  949. * ME firmware to start the ULP configuration. If not on an ME enabled
  950. * system, configure the ULP mode by software.
  951. */
  952. s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
  953. {
  954. u32 mac_reg;
  955. s32 ret_val = 0;
  956. u16 phy_reg;
  957. u16 oem_reg = 0;
  958. if ((hw->mac.type < e1000_pch_lpt) ||
  959. (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
  960. (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
  961. (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
  962. (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
  963. (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
  964. return 0;
  965. if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
  966. /* Request ME configure ULP mode in the PHY */
  967. mac_reg = er32(H2ME);
  968. mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
  969. ew32(H2ME, mac_reg);
  970. goto out;
  971. }
  972. if (!to_sx) {
  973. int i = 0;
  974. /* Poll up to 5 seconds for Cable Disconnected indication */
  975. while (!(er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
  976. /* Bail if link is re-acquired */
  977. if (er32(STATUS) & E1000_STATUS_LU)
  978. return -E1000_ERR_PHY;
  979. if (i++ == 100)
  980. break;
  981. msleep(50);
  982. }
  983. e_dbg("CABLE_DISCONNECTED %s set after %dmsec\n",
  984. (er32(FEXT) &
  985. E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50);
  986. }
  987. ret_val = hw->phy.ops.acquire(hw);
  988. if (ret_val)
  989. goto out;
  990. /* Force SMBus mode in PHY */
  991. ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
  992. if (ret_val)
  993. goto release;
  994. phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
  995. e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
  996. /* Force SMBus mode in MAC */
  997. mac_reg = er32(CTRL_EXT);
  998. mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
  999. ew32(CTRL_EXT, mac_reg);
  1000. /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
  1001. * LPLU and disable Gig speed when entering ULP
  1002. */
  1003. if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
  1004. ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
  1005. &oem_reg);
  1006. if (ret_val)
  1007. goto release;
  1008. phy_reg = oem_reg;
  1009. phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
  1010. ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
  1011. phy_reg);
  1012. if (ret_val)
  1013. goto release;
  1014. }
  1015. /* Set Inband ULP Exit, Reset to SMBus mode and
  1016. * Disable SMBus Release on PERST# in PHY
  1017. */
  1018. ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
  1019. if (ret_val)
  1020. goto release;
  1021. phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
  1022. I218_ULP_CONFIG1_DISABLE_SMB_PERST);
  1023. if (to_sx) {
  1024. if (er32(WUFC) & E1000_WUFC_LNKC)
  1025. phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
  1026. else
  1027. phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
  1028. phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
  1029. phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
  1030. } else {
  1031. phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
  1032. phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
  1033. phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
  1034. }
  1035. e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
  1036. /* Set Disable SMBus Release on PERST# in MAC */
  1037. mac_reg = er32(FEXTNVM7);
  1038. mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
  1039. ew32(FEXTNVM7, mac_reg);
  1040. /* Commit ULP changes in PHY by starting auto ULP configuration */
  1041. phy_reg |= I218_ULP_CONFIG1_START;
  1042. e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
  1043. if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
  1044. to_sx && (er32(STATUS) & E1000_STATUS_LU)) {
  1045. ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
  1046. oem_reg);
  1047. if (ret_val)
  1048. goto release;
  1049. }
  1050. release:
  1051. hw->phy.ops.release(hw);
  1052. out:
  1053. if (ret_val)
  1054. e_dbg("Error in ULP enable flow: %d\n", ret_val);
  1055. else
  1056. hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
  1057. return ret_val;
  1058. }
  1059. /**
  1060. * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
  1061. * @hw: pointer to the HW structure
  1062. * @force: boolean indicating whether or not to force disabling ULP
  1063. *
  1064. * Un-configure ULP mode when link is up, the system is transitioned from
  1065. * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled
  1066. * system, poll for an indication from ME that ULP has been un-configured.
  1067. * If not on an ME enabled system, un-configure the ULP mode by software.
  1068. *
  1069. * During nominal operation, this function is called when link is acquired
  1070. * to disable ULP mode (force=false); otherwise, for example when unloading
  1071. * the driver or during Sx->S0 transitions, this is called with force=true
  1072. * to forcibly disable ULP.
  1073. */
  1074. static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
  1075. {
  1076. s32 ret_val = 0;
  1077. u32 mac_reg;
  1078. u16 phy_reg;
  1079. int i = 0;
  1080. if ((hw->mac.type < e1000_pch_lpt) ||
  1081. (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
  1082. (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
  1083. (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
  1084. (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
  1085. (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
  1086. return 0;
  1087. if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
  1088. if (force) {
  1089. /* Request ME un-configure ULP mode in the PHY */
  1090. mac_reg = er32(H2ME);
  1091. mac_reg &= ~E1000_H2ME_ULP;
  1092. mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
  1093. ew32(H2ME, mac_reg);
  1094. }
  1095. /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
  1096. while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
  1097. if (i++ == 30) {
  1098. ret_val = -E1000_ERR_PHY;
  1099. goto out;
  1100. }
  1101. usleep_range(10000, 11000);
  1102. }
  1103. e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
  1104. if (force) {
  1105. mac_reg = er32(H2ME);
  1106. mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
  1107. ew32(H2ME, mac_reg);
  1108. } else {
  1109. /* Clear H2ME.ULP after ME ULP configuration */
  1110. mac_reg = er32(H2ME);
  1111. mac_reg &= ~E1000_H2ME_ULP;
  1112. ew32(H2ME, mac_reg);
  1113. }
  1114. goto out;
  1115. }
  1116. ret_val = hw->phy.ops.acquire(hw);
  1117. if (ret_val)
  1118. goto out;
  1119. if (force)
  1120. /* Toggle LANPHYPC Value bit */
  1121. e1000_toggle_lanphypc_pch_lpt(hw);
  1122. /* Unforce SMBus mode in PHY */
  1123. ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
  1124. if (ret_val) {
  1125. /* The MAC might be in PCIe mode, so temporarily force to
  1126. * SMBus mode in order to access the PHY.
  1127. */
  1128. mac_reg = er32(CTRL_EXT);
  1129. mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
  1130. ew32(CTRL_EXT, mac_reg);
  1131. msleep(50);
  1132. ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
  1133. &phy_reg);
  1134. if (ret_val)
  1135. goto release;
  1136. }
  1137. phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
  1138. e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
  1139. /* Unforce SMBus mode in MAC */
  1140. mac_reg = er32(CTRL_EXT);
  1141. mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
  1142. ew32(CTRL_EXT, mac_reg);
  1143. /* When ULP mode was previously entered, K1 was disabled by the
  1144. * hardware. Re-Enable K1 in the PHY when exiting ULP.
  1145. */
  1146. ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
  1147. if (ret_val)
  1148. goto release;
  1149. phy_reg |= HV_PM_CTRL_K1_ENABLE;
  1150. e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
  1151. /* Clear ULP enabled configuration */
  1152. ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
  1153. if (ret_val)
  1154. goto release;
  1155. phy_reg &= ~(I218_ULP_CONFIG1_IND |
  1156. I218_ULP_CONFIG1_STICKY_ULP |
  1157. I218_ULP_CONFIG1_RESET_TO_SMBUS |
  1158. I218_ULP_CONFIG1_WOL_HOST |
  1159. I218_ULP_CONFIG1_INBAND_EXIT |
  1160. I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
  1161. I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
  1162. I218_ULP_CONFIG1_DISABLE_SMB_PERST);
  1163. e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
  1164. /* Commit ULP changes by starting auto ULP configuration */
  1165. phy_reg |= I218_ULP_CONFIG1_START;
  1166. e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
  1167. /* Clear Disable SMBus Release on PERST# in MAC */
  1168. mac_reg = er32(FEXTNVM7);
  1169. mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
  1170. ew32(FEXTNVM7, mac_reg);
  1171. release:
  1172. hw->phy.ops.release(hw);
  1173. if (force) {
  1174. e1000_phy_hw_reset(hw);
  1175. msleep(50);
  1176. }
  1177. out:
  1178. if (ret_val)
  1179. e_dbg("Error in ULP disable flow: %d\n", ret_val);
  1180. else
  1181. hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
  1182. return ret_val;
  1183. }
  1184. /**
  1185. * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
  1186. * @hw: pointer to the HW structure
  1187. *
  1188. * Checks to see of the link status of the hardware has changed. If a
  1189. * change in link status has been detected, then we read the PHY registers
  1190. * to get the current speed/duplex if link exists.
  1191. **/
  1192. static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
  1193. {
  1194. struct e1000_mac_info *mac = &hw->mac;
  1195. s32 ret_val, tipg_reg = 0;
  1196. u16 emi_addr, emi_val = 0;
  1197. bool link;
  1198. u16 phy_reg;
  1199. /* We only want to go out to the PHY registers to see if Auto-Neg
  1200. * has completed and/or if our link status has changed. The
  1201. * get_link_status flag is set upon receiving a Link Status
  1202. * Change or Rx Sequence Error interrupt.
  1203. */
  1204. if (!mac->get_link_status)
  1205. return 0;
  1206. mac->get_link_status = false;
  1207. /* First we want to see if the MII Status Register reports
  1208. * link. If so, then we want to get the current speed/duplex
  1209. * of the PHY.
  1210. */
  1211. ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
  1212. if (ret_val)
  1213. goto out;
  1214. if (hw->mac.type == e1000_pchlan) {
  1215. ret_val = e1000_k1_gig_workaround_hv(hw, link);
  1216. if (ret_val)
  1217. goto out;
  1218. }
  1219. /* When connected at 10Mbps half-duplex, some parts are excessively
  1220. * aggressive resulting in many collisions. To avoid this, increase
  1221. * the IPG and reduce Rx latency in the PHY.
  1222. */
  1223. if ((hw->mac.type >= e1000_pch2lan) && link) {
  1224. u16 speed, duplex;
  1225. e1000e_get_speed_and_duplex_copper(hw, &speed, &duplex);
  1226. tipg_reg = er32(TIPG);
  1227. tipg_reg &= ~E1000_TIPG_IPGT_MASK;
  1228. if (duplex == HALF_DUPLEX && speed == SPEED_10) {
  1229. tipg_reg |= 0xFF;
  1230. /* Reduce Rx latency in analog PHY */
  1231. emi_val = 0;
  1232. } else if (hw->mac.type >= e1000_pch_spt &&
  1233. duplex == FULL_DUPLEX && speed != SPEED_1000) {
  1234. tipg_reg |= 0xC;
  1235. emi_val = 1;
  1236. } else {
  1237. /* Roll back the default values */
  1238. tipg_reg |= 0x08;
  1239. emi_val = 1;
  1240. }
  1241. ew32(TIPG, tipg_reg);
  1242. ret_val = hw->phy.ops.acquire(hw);
  1243. if (ret_val)
  1244. goto out;
  1245. if (hw->mac.type == e1000_pch2lan)
  1246. emi_addr = I82579_RX_CONFIG;
  1247. else
  1248. emi_addr = I217_RX_CONFIG;
  1249. ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
  1250. if (hw->mac.type >= e1000_pch_lpt) {
  1251. u16 phy_reg;
  1252. e1e_rphy_locked(hw, I217_PLL_CLOCK_GATE_REG, &phy_reg);
  1253. phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
  1254. if (speed == SPEED_100 || speed == SPEED_10)
  1255. phy_reg |= 0x3E8;
  1256. else
  1257. phy_reg |= 0xFA;
  1258. e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg);
  1259. if (speed == SPEED_1000) {
  1260. hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
  1261. &phy_reg);
  1262. phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
  1263. hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
  1264. phy_reg);
  1265. }
  1266. }
  1267. hw->phy.ops.release(hw);
  1268. if (ret_val)
  1269. goto out;
  1270. if (hw->mac.type >= e1000_pch_spt) {
  1271. u16 data;
  1272. u16 ptr_gap;
  1273. if (speed == SPEED_1000) {
  1274. ret_val = hw->phy.ops.acquire(hw);
  1275. if (ret_val)
  1276. goto out;
  1277. ret_val = e1e_rphy_locked(hw,
  1278. PHY_REG(776, 20),
  1279. &data);
  1280. if (ret_val) {
  1281. hw->phy.ops.release(hw);
  1282. goto out;
  1283. }
  1284. ptr_gap = (data & (0x3FF << 2)) >> 2;
  1285. if (ptr_gap < 0x18) {
  1286. data &= ~(0x3FF << 2);
  1287. data |= (0x18 << 2);
  1288. ret_val =
  1289. e1e_wphy_locked(hw,
  1290. PHY_REG(776, 20),
  1291. data);
  1292. }
  1293. hw->phy.ops.release(hw);
  1294. if (ret_val)
  1295. goto out;
  1296. } else {
  1297. ret_val = hw->phy.ops.acquire(hw);
  1298. if (ret_val)
  1299. goto out;
  1300. ret_val = e1e_wphy_locked(hw,
  1301. PHY_REG(776, 20),
  1302. 0xC023);
  1303. hw->phy.ops.release(hw);
  1304. if (ret_val)
  1305. goto out;
  1306. }
  1307. }
  1308. }
  1309. /* I217 Packet Loss issue:
  1310. * ensure that FEXTNVM4 Beacon Duration is set correctly
  1311. * on power up.
  1312. * Set the Beacon Duration for I217 to 8 usec
  1313. */
  1314. if (hw->mac.type >= e1000_pch_lpt) {
  1315. u32 mac_reg;
  1316. mac_reg = er32(FEXTNVM4);
  1317. mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
  1318. mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
  1319. ew32(FEXTNVM4, mac_reg);
  1320. }
  1321. /* Work-around I218 hang issue */
  1322. if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
  1323. (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
  1324. (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) ||
  1325. (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
  1326. ret_val = e1000_k1_workaround_lpt_lp(hw, link);
  1327. if (ret_val)
  1328. goto out;
  1329. }
  1330. if (hw->mac.type >= e1000_pch_lpt) {
  1331. /* Set platform power management values for
  1332. * Latency Tolerance Reporting (LTR)
  1333. */
  1334. ret_val = e1000_platform_pm_pch_lpt(hw, link);
  1335. if (ret_val)
  1336. goto out;
  1337. }
  1338. /* Clear link partner's EEE ability */
  1339. hw->dev_spec.ich8lan.eee_lp_ability = 0;
  1340. if (hw->mac.type >= e1000_pch_lpt) {
  1341. u32 fextnvm6 = er32(FEXTNVM6);
  1342. if (hw->mac.type == e1000_pch_spt) {
  1343. /* FEXTNVM6 K1-off workaround - for SPT only */
  1344. u32 pcieanacfg = er32(PCIEANACFG);
  1345. if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
  1346. fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
  1347. else
  1348. fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
  1349. }
  1350. ew32(FEXTNVM6, fextnvm6);
  1351. }
  1352. if (!link)
  1353. goto out;
  1354. switch (hw->mac.type) {
  1355. case e1000_pch2lan:
  1356. ret_val = e1000_k1_workaround_lv(hw);
  1357. if (ret_val)
  1358. return ret_val;
  1359. /* fall-thru */
  1360. case e1000_pchlan:
  1361. if (hw->phy.type == e1000_phy_82578) {
  1362. ret_val = e1000_link_stall_workaround_hv(hw);
  1363. if (ret_val)
  1364. return ret_val;
  1365. }
  1366. /* Workaround for PCHx parts in half-duplex:
  1367. * Set the number of preambles removed from the packet
  1368. * when it is passed from the PHY to the MAC to prevent
  1369. * the MAC from misinterpreting the packet type.
  1370. */
  1371. e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
  1372. phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
  1373. if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
  1374. phy_reg |= BIT(HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
  1375. e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
  1376. break;
  1377. default:
  1378. break;
  1379. }
  1380. /* Check if there was DownShift, must be checked
  1381. * immediately after link-up
  1382. */
  1383. e1000e_check_downshift(hw);
  1384. /* Enable/Disable EEE after link up */
  1385. if (hw->phy.type > e1000_phy_82579) {
  1386. ret_val = e1000_set_eee_pchlan(hw);
  1387. if (ret_val)
  1388. return ret_val;
  1389. }
  1390. /* If we are forcing speed/duplex, then we simply return since
  1391. * we have already determined whether we have link or not.
  1392. */
  1393. if (!mac->autoneg)
  1394. return -E1000_ERR_CONFIG;
  1395. /* Auto-Neg is enabled. Auto Speed Detection takes care
  1396. * of MAC speed/duplex configuration. So we only need to
  1397. * configure Collision Distance in the MAC.
  1398. */
  1399. mac->ops.config_collision_dist(hw);
  1400. /* Configure Flow Control now that Auto-Neg has completed.
  1401. * First, we need to restore the desired flow control
  1402. * settings because we may have had to re-autoneg with a
  1403. * different link partner.
  1404. */
  1405. ret_val = e1000e_config_fc_after_link_up(hw);
  1406. if (ret_val)
  1407. e_dbg("Error configuring flow control\n");
  1408. return ret_val;
  1409. out:
  1410. mac->get_link_status = true;
  1411. return ret_val;
  1412. }
  1413. static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
  1414. {
  1415. struct e1000_hw *hw = &adapter->hw;
  1416. s32 rc;
  1417. rc = e1000_init_mac_params_ich8lan(hw);
  1418. if (rc)
  1419. return rc;
  1420. rc = e1000_init_nvm_params_ich8lan(hw);
  1421. if (rc)
  1422. return rc;
  1423. switch (hw->mac.type) {
  1424. case e1000_ich8lan:
  1425. case e1000_ich9lan:
  1426. case e1000_ich10lan:
  1427. rc = e1000_init_phy_params_ich8lan(hw);
  1428. break;
  1429. case e1000_pchlan:
  1430. case e1000_pch2lan:
  1431. case e1000_pch_lpt:
  1432. case e1000_pch_spt:
  1433. case e1000_pch_cnp:
  1434. case e1000_pch_tgp:
  1435. case e1000_pch_adp:
  1436. rc = e1000_init_phy_params_pchlan(hw);
  1437. break;
  1438. default:
  1439. break;
  1440. }
  1441. if (rc)
  1442. return rc;
  1443. /* Disable Jumbo Frame support on parts with Intel 10/100 PHY or
  1444. * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
  1445. */
  1446. if ((adapter->hw.phy.type == e1000_phy_ife) ||
  1447. ((adapter->hw.mac.type >= e1000_pch2lan) &&
  1448. (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
  1449. adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
  1450. adapter->max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
  1451. hw->mac.ops.blink_led = NULL;
  1452. }
  1453. if ((adapter->hw.mac.type == e1000_ich8lan) &&
  1454. (adapter->hw.phy.type != e1000_phy_ife))
  1455. adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
  1456. /* Enable workaround for 82579 w/ ME enabled */
  1457. if ((adapter->hw.mac.type == e1000_pch2lan) &&
  1458. (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
  1459. adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
  1460. return 0;
  1461. }
  1462. static DEFINE_MUTEX(nvm_mutex);
  1463. /**
  1464. * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
  1465. * @hw: pointer to the HW structure
  1466. *
  1467. * Acquires the mutex for performing NVM operations.
  1468. **/
  1469. static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw __always_unused *hw)
  1470. {
  1471. mutex_lock(&nvm_mutex);
  1472. return 0;
  1473. }
  1474. /**
  1475. * e1000_release_nvm_ich8lan - Release NVM mutex
  1476. * @hw: pointer to the HW structure
  1477. *
  1478. * Releases the mutex used while performing NVM operations.
  1479. **/
  1480. static void e1000_release_nvm_ich8lan(struct e1000_hw __always_unused *hw)
  1481. {
  1482. mutex_unlock(&nvm_mutex);
  1483. }
  1484. /**
  1485. * e1000_acquire_swflag_ich8lan - Acquire software control flag
  1486. * @hw: pointer to the HW structure
  1487. *
  1488. * Acquires the software control flag for performing PHY and select
  1489. * MAC CSR accesses.
  1490. **/
  1491. static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
  1492. {
  1493. u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
  1494. s32 ret_val = 0;
  1495. if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE,
  1496. &hw->adapter->state)) {
  1497. e_dbg("contention for Phy access\n");
  1498. return -E1000_ERR_PHY;
  1499. }
  1500. while (timeout) {
  1501. extcnf_ctrl = er32(EXTCNF_CTRL);
  1502. if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
  1503. break;
  1504. mdelay(1);
  1505. timeout--;
  1506. }
  1507. if (!timeout) {
  1508. e_dbg("SW has alre…