PageRenderTime 923ms CodeModel.GetById 10ms app.highlight 807ms RepoModel.GetById 1ms app.codeStats 4ms

/drivers/net/ethernet/broadcom/tg3.c

http://github.com/mirrors/linux
C | 18321 lines | 13829 code | 3121 blank | 1371 comment | 3395 complexity | f576d86aef186712610d2eaffa15f544 MD5 | raw file
    1/*
    2 * tg3.c: Broadcom Tigon3 ethernet driver.
    3 *
    4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
    5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
    6 * Copyright (C) 2004 Sun Microsystems Inc.
    7 * Copyright (C) 2005-2016 Broadcom Corporation.
    8 * Copyright (C) 2016-2017 Broadcom Limited.
    9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
   10 * refers to Broadcom Inc. and/or its subsidiaries.
   11 *
   12 * Firmware is:
   13 *	Derived from proprietary unpublished source code,
   14 *	Copyright (C) 2000-2016 Broadcom Corporation.
   15 *	Copyright (C) 2016-2017 Broadcom Ltd.
   16 *	Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
   17 *	refers to Broadcom Inc. and/or its subsidiaries.
   18 *
   19 *	Permission is hereby granted for the distribution of this firmware
   20 *	data in hexadecimal or equivalent format, provided this copyright
   21 *	notice is accompanying it.
   22 */
   23
   24
   25#include <linux/module.h>
   26#include <linux/moduleparam.h>
   27#include <linux/stringify.h>
   28#include <linux/kernel.h>
   29#include <linux/sched/signal.h>
   30#include <linux/types.h>
   31#include <linux/compiler.h>
   32#include <linux/slab.h>
   33#include <linux/delay.h>
   34#include <linux/in.h>
   35#include <linux/interrupt.h>
   36#include <linux/ioport.h>
   37#include <linux/pci.h>
   38#include <linux/netdevice.h>
   39#include <linux/etherdevice.h>
   40#include <linux/skbuff.h>
   41#include <linux/ethtool.h>
   42#include <linux/mdio.h>
   43#include <linux/mii.h>
   44#include <linux/phy.h>
   45#include <linux/brcmphy.h>
   46#include <linux/if.h>
   47#include <linux/if_vlan.h>
   48#include <linux/ip.h>
   49#include <linux/tcp.h>
   50#include <linux/workqueue.h>
   51#include <linux/prefetch.h>
   52#include <linux/dma-mapping.h>
   53#include <linux/firmware.h>
   54#include <linux/ssb/ssb_driver_gige.h>
   55#include <linux/hwmon.h>
   56#include <linux/hwmon-sysfs.h>
   57#include <linux/crc32poly.h>
   58
   59#include <net/checksum.h>
   60#include <net/ip.h>
   61
   62#include <linux/io.h>
   63#include <asm/byteorder.h>
   64#include <linux/uaccess.h>
   65
   66#include <uapi/linux/net_tstamp.h>
   67#include <linux/ptp_clock_kernel.h>
   68
   69#define BAR_0	0
   70#define BAR_2	2
   71
   72#include "tg3.h"
   73
   74/* Functions & macros to verify TG3_FLAGS types */
   75
   76static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
   77{
   78	return test_bit(flag, bits);
   79}
   80
   81static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
   82{
   83	set_bit(flag, bits);
   84}
   85
   86static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
   87{
   88	clear_bit(flag, bits);
   89}
   90
   91#define tg3_flag(tp, flag)				\
   92	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
   93#define tg3_flag_set(tp, flag)				\
   94	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
   95#define tg3_flag_clear(tp, flag)			\
   96	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
   97
   98#define DRV_MODULE_NAME		"tg3"
   99/* DO NOT UPDATE TG3_*_NUM defines */
  100#define TG3_MAJ_NUM			3
  101#define TG3_MIN_NUM			137
  102
  103#define RESET_KIND_SHUTDOWN	0
  104#define RESET_KIND_INIT		1
  105#define RESET_KIND_SUSPEND	2
  106
  107#define TG3_DEF_RX_MODE		0
  108#define TG3_DEF_TX_MODE		0
  109#define TG3_DEF_MSG_ENABLE	  \
  110	(NETIF_MSG_DRV		| \
  111	 NETIF_MSG_PROBE	| \
  112	 NETIF_MSG_LINK		| \
  113	 NETIF_MSG_TIMER	| \
  114	 NETIF_MSG_IFDOWN	| \
  115	 NETIF_MSG_IFUP		| \
  116	 NETIF_MSG_RX_ERR	| \
  117	 NETIF_MSG_TX_ERR)
  118
  119#define TG3_GRC_LCLCTL_PWRSW_DELAY	100
  120
  121/* length of time before we decide the hardware is borked,
  122 * and dev->tx_timeout() should be called to fix the problem
  123 */
  124
  125#define TG3_TX_TIMEOUT			(5 * HZ)
  126
  127/* hardware minimum and maximum for a single frame's data payload */
  128#define TG3_MIN_MTU			ETH_ZLEN
  129#define TG3_MAX_MTU(tp)	\
  130	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
  131
  132/* These numbers seem to be hard coded in the NIC firmware somehow.
  133 * You can't change the ring sizes, but you can change where you place
  134 * them in the NIC onboard memory.
  135 */
  136#define TG3_RX_STD_RING_SIZE(tp) \
  137	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
  138	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
  139#define TG3_DEF_RX_RING_PENDING		200
  140#define TG3_RX_JMB_RING_SIZE(tp) \
  141	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
  142	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
  143#define TG3_DEF_RX_JUMBO_RING_PENDING	100
  144
  145/* Do not place this n-ring entries value into the tp struct itself,
  146 * we really want to expose these constants to GCC so that modulo et
  147 * al.  operations are done with shifts and masks instead of with
  148 * hw multiply/modulo instructions.  Another solution would be to
  149 * replace things like '% foo' with '& (foo - 1)'.
  150 */
  151
  152#define TG3_TX_RING_SIZE		512
  153#define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
  154
  155#define TG3_RX_STD_RING_BYTES(tp) \
  156	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
  157#define TG3_RX_JMB_RING_BYTES(tp) \
  158	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
  159#define TG3_RX_RCB_RING_BYTES(tp) \
  160	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
  161#define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
  162				 TG3_TX_RING_SIZE)
  163#define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
  164
  165#define TG3_DMA_BYTE_ENAB		64
  166
  167#define TG3_RX_STD_DMA_SZ		1536
  168#define TG3_RX_JMB_DMA_SZ		9046
  169
  170#define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
  171
  172#define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
  173#define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
  174
  175#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
  176	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
  177
  178#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
  179	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
  180
  181/* Due to a hardware bug, the 5701 can only DMA to memory addresses
  182 * that are at least dword aligned when used in PCIX mode.  The driver
  183 * works around this bug by double copying the packet.  This workaround
  184 * is built into the normal double copy length check for efficiency.
  185 *
  186 * However, the double copy is only necessary on those architectures
  187 * where unaligned memory accesses are inefficient.  For those architectures
  188 * where unaligned memory accesses incur little penalty, we can reintegrate
  189 * the 5701 in the normal rx path.  Doing so saves a device structure
  190 * dereference by hardcoding the double copy threshold in place.
  191 */
  192#define TG3_RX_COPY_THRESHOLD		256
  193#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  194	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
  195#else
  196	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
  197#endif
  198
  199#if (NET_IP_ALIGN != 0)
  200#define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
  201#else
  202#define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
  203#endif
  204
  205/* minimum number of free TX descriptors required to wake up TX process */
  206#define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
  207#define TG3_TX_BD_DMA_MAX_2K		2048
  208#define TG3_TX_BD_DMA_MAX_4K		4096
  209
  210#define TG3_RAW_IP_ALIGN 2
  211
  212#define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
  213#define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
  214
  215#define TG3_FW_UPDATE_TIMEOUT_SEC	5
  216#define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
  217
  218#define FIRMWARE_TG3		"tigon/tg3.bin"
  219#define FIRMWARE_TG357766	"tigon/tg357766.bin"
  220#define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
  221#define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
  222
  223MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
  224MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
  225MODULE_LICENSE("GPL");
  226MODULE_FIRMWARE(FIRMWARE_TG3);
  227MODULE_FIRMWARE(FIRMWARE_TG3TSO);
  228MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
  229
  230static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
  231module_param(tg3_debug, int, 0);
  232MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
  233
  234#define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
  235#define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
  236
  237static const struct pci_device_id tg3_pci_tbl[] = {
  238	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
  239	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
  240	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
  241	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
  242	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
  243	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
  244	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
  245	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
  246	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
  247	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
  248	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
  249	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
  250	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
  251	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
  252	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
  253	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
  254	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
  255	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
  256	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
  257	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
  258			TG3_DRV_DATA_FLAG_5705_10_100},
  259	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
  260	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
  261			TG3_DRV_DATA_FLAG_5705_10_100},
  262	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
  263	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
  264	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
  265			TG3_DRV_DATA_FLAG_5705_10_100},
  266	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
  267	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
  268	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
  269	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
  270	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
  271	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
  272	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  273	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
  274	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
  275	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
  276	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
  277	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
  278	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  279	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
  280	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
  281	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
  282	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
  283	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
  284	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
  285	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
  286	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
  287			PCI_VENDOR_ID_LENOVO,
  288			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
  289	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  290	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
  291	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
  292	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  293	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
  294	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
  295	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
  296	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
  297	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
  298	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
  299	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
  300	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
  301	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
  302	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
  303	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
  304	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
  305	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
  306	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
  307	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
  308	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
  309	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
  310	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
  311	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
  312			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
  313	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  314	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
  315			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
  316	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  317	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
  318	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
  319	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
  320	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  321	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
  322	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
  323	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
  324	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
  325	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
  326	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
  327	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
  328	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
  329	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
  330	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  331	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
  332	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  333	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
  334	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
  335	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
  336	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
  337	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
  338	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
  339	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
  340	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
  341	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
  342	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
  343	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
  344	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
  345	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
  346	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
  347	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
  348	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
  349	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
  350	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
  351	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
  352	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
  353	{}
  354};
  355
  356MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
  357
  358static const struct {
  359	const char string[ETH_GSTRING_LEN];
  360} ethtool_stats_keys[] = {
  361	{ "rx_octets" },
  362	{ "rx_fragments" },
  363	{ "rx_ucast_packets" },
  364	{ "rx_mcast_packets" },
  365	{ "rx_bcast_packets" },
  366	{ "rx_fcs_errors" },
  367	{ "rx_align_errors" },
  368	{ "rx_xon_pause_rcvd" },
  369	{ "rx_xoff_pause_rcvd" },
  370	{ "rx_mac_ctrl_rcvd" },
  371	{ "rx_xoff_entered" },
  372	{ "rx_frame_too_long_errors" },
  373	{ "rx_jabbers" },
  374	{ "rx_undersize_packets" },
  375	{ "rx_in_length_errors" },
  376	{ "rx_out_length_errors" },
  377	{ "rx_64_or_less_octet_packets" },
  378	{ "rx_65_to_127_octet_packets" },
  379	{ "rx_128_to_255_octet_packets" },
  380	{ "rx_256_to_511_octet_packets" },
  381	{ "rx_512_to_1023_octet_packets" },
  382	{ "rx_1024_to_1522_octet_packets" },
  383	{ "rx_1523_to_2047_octet_packets" },
  384	{ "rx_2048_to_4095_octet_packets" },
  385	{ "rx_4096_to_8191_octet_packets" },
  386	{ "rx_8192_to_9022_octet_packets" },
  387
  388	{ "tx_octets" },
  389	{ "tx_collisions" },
  390
  391	{ "tx_xon_sent" },
  392	{ "tx_xoff_sent" },
  393	{ "tx_flow_control" },
  394	{ "tx_mac_errors" },
  395	{ "tx_single_collisions" },
  396	{ "tx_mult_collisions" },
  397	{ "tx_deferred" },
  398	{ "tx_excessive_collisions" },
  399	{ "tx_late_collisions" },
  400	{ "tx_collide_2times" },
  401	{ "tx_collide_3times" },
  402	{ "tx_collide_4times" },
  403	{ "tx_collide_5times" },
  404	{ "tx_collide_6times" },
  405	{ "tx_collide_7times" },
  406	{ "tx_collide_8times" },
  407	{ "tx_collide_9times" },
  408	{ "tx_collide_10times" },
  409	{ "tx_collide_11times" },
  410	{ "tx_collide_12times" },
  411	{ "tx_collide_13times" },
  412	{ "tx_collide_14times" },
  413	{ "tx_collide_15times" },
  414	{ "tx_ucast_packets" },
  415	{ "tx_mcast_packets" },
  416	{ "tx_bcast_packets" },
  417	{ "tx_carrier_sense_errors" },
  418	{ "tx_discards" },
  419	{ "tx_errors" },
  420
  421	{ "dma_writeq_full" },
  422	{ "dma_write_prioq_full" },
  423	{ "rxbds_empty" },
  424	{ "rx_discards" },
  425	{ "rx_errors" },
  426	{ "rx_threshold_hit" },
  427
  428	{ "dma_readq_full" },
  429	{ "dma_read_prioq_full" },
  430	{ "tx_comp_queue_full" },
  431
  432	{ "ring_set_send_prod_index" },
  433	{ "ring_status_update" },
  434	{ "nic_irqs" },
  435	{ "nic_avoided_irqs" },
  436	{ "nic_tx_threshold_hit" },
  437
  438	{ "mbuf_lwm_thresh_hit" },
  439};
  440
  441#define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
  442#define TG3_NVRAM_TEST		0
  443#define TG3_LINK_TEST		1
  444#define TG3_REGISTER_TEST	2
  445#define TG3_MEMORY_TEST		3
  446#define TG3_MAC_LOOPB_TEST	4
  447#define TG3_PHY_LOOPB_TEST	5
  448#define TG3_EXT_LOOPB_TEST	6
  449#define TG3_INTERRUPT_TEST	7
  450
  451
  452static const struct {
  453	const char string[ETH_GSTRING_LEN];
  454} ethtool_test_keys[] = {
  455	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
  456	[TG3_LINK_TEST]		= { "link test         (online) " },
  457	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
  458	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
  459	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
  460	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
  461	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
  462	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
  463};
  464
  465#define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
  466
  467
  468static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
  469{
  470	writel(val, tp->regs + off);
  471}
  472
  473static u32 tg3_read32(struct tg3 *tp, u32 off)
  474{
  475	return readl(tp->regs + off);
  476}
  477
  478static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
  479{
  480	writel(val, tp->aperegs + off);
  481}
  482
  483static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
  484{
  485	return readl(tp->aperegs + off);
  486}
  487
  488static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
  489{
  490	unsigned long flags;
  491
  492	spin_lock_irqsave(&tp->indirect_lock, flags);
  493	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
  494	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
  495	spin_unlock_irqrestore(&tp->indirect_lock, flags);
  496}
  497
  498static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
  499{
  500	writel(val, tp->regs + off);
  501	readl(tp->regs + off);
  502}
  503
  504static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
  505{
  506	unsigned long flags;
  507	u32 val;
  508
  509	spin_lock_irqsave(&tp->indirect_lock, flags);
  510	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
  511	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
  512	spin_unlock_irqrestore(&tp->indirect_lock, flags);
  513	return val;
  514}
  515
  516static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
  517{
  518	unsigned long flags;
  519
  520	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
  521		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
  522				       TG3_64BIT_REG_LOW, val);
  523		return;
  524	}
  525	if (off == TG3_RX_STD_PROD_IDX_REG) {
  526		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
  527				       TG3_64BIT_REG_LOW, val);
  528		return;
  529	}
  530
  531	spin_lock_irqsave(&tp->indirect_lock, flags);
  532	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
  533	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
  534	spin_unlock_irqrestore(&tp->indirect_lock, flags);
  535
  536	/* In indirect mode when disabling interrupts, we also need
  537	 * to clear the interrupt bit in the GRC local ctrl register.
  538	 */
  539	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
  540	    (val == 0x1)) {
  541		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
  542				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
  543	}
  544}
  545
  546static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
  547{
  548	unsigned long flags;
  549	u32 val;
  550
  551	spin_lock_irqsave(&tp->indirect_lock, flags);
  552	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
  553	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
  554	spin_unlock_irqrestore(&tp->indirect_lock, flags);
  555	return val;
  556}
  557
  558/* usec_wait specifies the wait time in usec when writing to certain registers
  559 * where it is unsafe to read back the register without some delay.
  560 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
  561 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
  562 */
  563static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
  564{
  565	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
  566		/* Non-posted methods */
  567		tp->write32(tp, off, val);
  568	else {
  569		/* Posted method */
  570		tg3_write32(tp, off, val);
  571		if (usec_wait)
  572			udelay(usec_wait);
  573		tp->read32(tp, off);
  574	}
  575	/* Wait again after the read for the posted method to guarantee that
  576	 * the wait time is met.
  577	 */
  578	if (usec_wait)
  579		udelay(usec_wait);
  580}
  581
  582static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
  583{
  584	tp->write32_mbox(tp, off, val);
  585	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
  586	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
  587	     !tg3_flag(tp, ICH_WORKAROUND)))
  588		tp->read32_mbox(tp, off);
  589}
  590
  591static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
  592{
  593	void __iomem *mbox = tp->regs + off;
  594	writel(val, mbox);
  595	if (tg3_flag(tp, TXD_MBOX_HWBUG))
  596		writel(val, mbox);
  597	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
  598	    tg3_flag(tp, FLUSH_POSTED_WRITES))
  599		readl(mbox);
  600}
  601
  602static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
  603{
  604	return readl(tp->regs + off + GRCMBOX_BASE);
  605}
  606
  607static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
  608{
  609	writel(val, tp->regs + off + GRCMBOX_BASE);
  610}
  611
  612#define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
  613#define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
  614#define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
  615#define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
  616#define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
  617
  618#define tw32(reg, val)			tp->write32(tp, reg, val)
  619#define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
  620#define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
  621#define tr32(reg)			tp->read32(tp, reg)
  622
  623static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
  624{
  625	unsigned long flags;
  626
  627	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
  628	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
  629		return;
  630
  631	spin_lock_irqsave(&tp->indirect_lock, flags);
  632	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
  633		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
  634		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  635
  636		/* Always leave this as zero. */
  637		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
  638	} else {
  639		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
  640		tw32_f(TG3PCI_MEM_WIN_DATA, val);
  641
  642		/* Always leave this as zero. */
  643		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
  644	}
  645	spin_unlock_irqrestore(&tp->indirect_lock, flags);
  646}
  647
  648static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
  649{
  650	unsigned long flags;
  651
  652	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
  653	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
  654		*val = 0;
  655		return;
  656	}
  657
  658	spin_lock_irqsave(&tp->indirect_lock, flags);
  659	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
  660		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
  661		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  662
  663		/* Always leave this as zero. */
  664		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
  665	} else {
  666		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
  667		*val = tr32(TG3PCI_MEM_WIN_DATA);
  668
  669		/* Always leave this as zero. */
  670		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
  671	}
  672	spin_unlock_irqrestore(&tp->indirect_lock, flags);
  673}
  674
  675static void tg3_ape_lock_init(struct tg3 *tp)
  676{
  677	int i;
  678	u32 regbase, bit;
  679
  680	if (tg3_asic_rev(tp) == ASIC_REV_5761)
  681		regbase = TG3_APE_LOCK_GRANT;
  682	else
  683		regbase = TG3_APE_PER_LOCK_GRANT;
  684
  685	/* Make sure the driver hasn't any stale locks. */
  686	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
  687		switch (i) {
  688		case TG3_APE_LOCK_PHY0:
  689		case TG3_APE_LOCK_PHY1:
  690		case TG3_APE_LOCK_PHY2:
  691		case TG3_APE_LOCK_PHY3:
  692			bit = APE_LOCK_GRANT_DRIVER;
  693			break;
  694		default:
  695			if (!tp->pci_fn)
  696				bit = APE_LOCK_GRANT_DRIVER;
  697			else
  698				bit = 1 << tp->pci_fn;
  699		}
  700		tg3_ape_write32(tp, regbase + 4 * i, bit);
  701	}
  702
  703}
  704
  705static int tg3_ape_lock(struct tg3 *tp, int locknum)
  706{
  707	int i, off;
  708	int ret = 0;
  709	u32 status, req, gnt, bit;
  710
  711	if (!tg3_flag(tp, ENABLE_APE))
  712		return 0;
  713
  714	switch (locknum) {
  715	case TG3_APE_LOCK_GPIO:
  716		if (tg3_asic_rev(tp) == ASIC_REV_5761)
  717			return 0;
  718		/* fall through */
  719	case TG3_APE_LOCK_GRC:
  720	case TG3_APE_LOCK_MEM:
  721		if (!tp->pci_fn)
  722			bit = APE_LOCK_REQ_DRIVER;
  723		else
  724			bit = 1 << tp->pci_fn;
  725		break;
  726	case TG3_APE_LOCK_PHY0:
  727	case TG3_APE_LOCK_PHY1:
  728	case TG3_APE_LOCK_PHY2:
  729	case TG3_APE_LOCK_PHY3:
  730		bit = APE_LOCK_REQ_DRIVER;
  731		break;
  732	default:
  733		return -EINVAL;
  734	}
  735
  736	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
  737		req = TG3_APE_LOCK_REQ;
  738		gnt = TG3_APE_LOCK_GRANT;
  739	} else {
  740		req = TG3_APE_PER_LOCK_REQ;
  741		gnt = TG3_APE_PER_LOCK_GRANT;
  742	}
  743
  744	off = 4 * locknum;
  745
  746	tg3_ape_write32(tp, req + off, bit);
  747
  748	/* Wait for up to 1 millisecond to acquire lock. */
  749	for (i = 0; i < 100; i++) {
  750		status = tg3_ape_read32(tp, gnt + off);
  751		if (status == bit)
  752			break;
  753		if (pci_channel_offline(tp->pdev))
  754			break;
  755
  756		udelay(10);
  757	}
  758
  759	if (status != bit) {
  760		/* Revoke the lock request. */
  761		tg3_ape_write32(tp, gnt + off, bit);
  762		ret = -EBUSY;
  763	}
  764
  765	return ret;
  766}
  767
  768static void tg3_ape_unlock(struct tg3 *tp, int locknum)
  769{
  770	u32 gnt, bit;
  771
  772	if (!tg3_flag(tp, ENABLE_APE))
  773		return;
  774
  775	switch (locknum) {
  776	case TG3_APE_LOCK_GPIO:
  777		if (tg3_asic_rev(tp) == ASIC_REV_5761)
  778			return;
  779		/* fall through */
  780	case TG3_APE_LOCK_GRC:
  781	case TG3_APE_LOCK_MEM:
  782		if (!tp->pci_fn)
  783			bit = APE_LOCK_GRANT_DRIVER;
  784		else
  785			bit = 1 << tp->pci_fn;
  786		break;
  787	case TG3_APE_LOCK_PHY0:
  788	case TG3_APE_LOCK_PHY1:
  789	case TG3_APE_LOCK_PHY2:
  790	case TG3_APE_LOCK_PHY3:
  791		bit = APE_LOCK_GRANT_DRIVER;
  792		break;
  793	default:
  794		return;
  795	}
  796
  797	if (tg3_asic_rev(tp) == ASIC_REV_5761)
  798		gnt = TG3_APE_LOCK_GRANT;
  799	else
  800		gnt = TG3_APE_PER_LOCK_GRANT;
  801
  802	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
  803}
  804
  805static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
  806{
  807	u32 apedata;
  808
  809	while (timeout_us) {
  810		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
  811			return -EBUSY;
  812
  813		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
  814		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
  815			break;
  816
  817		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
  818
  819		udelay(10);
  820		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
  821	}
  822
  823	return timeout_us ? 0 : -EBUSY;
  824}
  825
  826#ifdef CONFIG_TIGON3_HWMON
  827static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
  828{
  829	u32 i, apedata;
  830
  831	for (i = 0; i < timeout_us / 10; i++) {
  832		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
  833
  834		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
  835			break;
  836
  837		udelay(10);
  838	}
  839
  840	return i == timeout_us / 10;
  841}
  842
  843static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
  844				   u32 len)
  845{
  846	int err;
  847	u32 i, bufoff, msgoff, maxlen, apedata;
  848
  849	if (!tg3_flag(tp, APE_HAS_NCSI))
  850		return 0;
  851
  852	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
  853	if (apedata != APE_SEG_SIG_MAGIC)
  854		return -ENODEV;
  855
  856	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
  857	if (!(apedata & APE_FW_STATUS_READY))
  858		return -EAGAIN;
  859
  860	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
  861		 TG3_APE_SHMEM_BASE;
  862	msgoff = bufoff + 2 * sizeof(u32);
  863	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
  864
  865	while (len) {
  866		u32 length;
  867
  868		/* Cap xfer sizes to scratchpad limits. */
  869		length = (len > maxlen) ? maxlen : len;
  870		len -= length;
  871
  872		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
  873		if (!(apedata & APE_FW_STATUS_READY))
  874			return -EAGAIN;
  875
  876		/* Wait for up to 1 msec for APE to service previous event. */
  877		err = tg3_ape_event_lock(tp, 1000);
  878		if (err)
  879			return err;
  880
  881		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
  882			  APE_EVENT_STATUS_SCRTCHPD_READ |
  883			  APE_EVENT_STATUS_EVENT_PENDING;
  884		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
  885
  886		tg3_ape_write32(tp, bufoff, base_off);
  887		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
  888
  889		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
  890		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
  891
  892		base_off += length;
  893
  894		if (tg3_ape_wait_for_event(tp, 30000))
  895			return -EAGAIN;
  896
  897		for (i = 0; length; i += 4, length -= 4) {
  898			u32 val = tg3_ape_read32(tp, msgoff + i);
  899			memcpy(data, &val, sizeof(u32));
  900			data++;
  901		}
  902	}
  903
  904	return 0;
  905}
  906#endif
  907
  908static int tg3_ape_send_event(struct tg3 *tp, u32 event)
  909{
  910	int err;
  911	u32 apedata;
  912
  913	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
  914	if (apedata != APE_SEG_SIG_MAGIC)
  915		return -EAGAIN;
  916
  917	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
  918	if (!(apedata & APE_FW_STATUS_READY))
  919		return -EAGAIN;
  920
  921	/* Wait for up to 20 millisecond for APE to service previous event. */
  922	err = tg3_ape_event_lock(tp, 20000);
  923	if (err)
  924		return err;
  925
  926	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
  927			event | APE_EVENT_STATUS_EVENT_PENDING);
  928
  929	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
  930	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
  931
  932	return 0;
  933}
  934
  935static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
  936{
  937	u32 event;
  938	u32 apedata;
  939
  940	if (!tg3_flag(tp, ENABLE_APE))
  941		return;
  942
  943	switch (kind) {
  944	case RESET_KIND_INIT:
  945		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
  946		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
  947				APE_HOST_SEG_SIG_MAGIC);
  948		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
  949				APE_HOST_SEG_LEN_MAGIC);
  950		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
  951		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
  952		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
  953			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
  954		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
  955				APE_HOST_BEHAV_NO_PHYLOCK);
  956		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
  957				    TG3_APE_HOST_DRVR_STATE_START);
  958
  959		event = APE_EVENT_STATUS_STATE_START;
  960		break;
  961	case RESET_KIND_SHUTDOWN:
  962		if (device_may_wakeup(&tp->pdev->dev) &&
  963		    tg3_flag(tp, WOL_ENABLE)) {
  964			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
  965					    TG3_APE_HOST_WOL_SPEED_AUTO);
  966			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
  967		} else
  968			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
  969
  970		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
  971
  972		event = APE_EVENT_STATUS_STATE_UNLOAD;
  973		break;
  974	default:
  975		return;
  976	}
  977
  978	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
  979
  980	tg3_ape_send_event(tp, event);
  981}
  982
  983static void tg3_send_ape_heartbeat(struct tg3 *tp,
  984				   unsigned long interval)
  985{
  986	/* Check if hb interval has exceeded */
  987	if (!tg3_flag(tp, ENABLE_APE) ||
  988	    time_before(jiffies, tp->ape_hb_jiffies + interval))
  989		return;
  990
  991	tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
  992	tp->ape_hb_jiffies = jiffies;
  993}
  994
  995static void tg3_disable_ints(struct tg3 *tp)
  996{
  997	int i;
  998
  999	tw32(TG3PCI_MISC_HOST_CTRL,
 1000	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
 1001	for (i = 0; i < tp->irq_max; i++)
 1002		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
 1003}
 1004
 1005static void tg3_enable_ints(struct tg3 *tp)
 1006{
 1007	int i;
 1008
 1009	tp->irq_sync = 0;
 1010	wmb();
 1011
 1012	tw32(TG3PCI_MISC_HOST_CTRL,
 1013	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
 1014
 1015	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
 1016	for (i = 0; i < tp->irq_cnt; i++) {
 1017		struct tg3_napi *tnapi = &tp->napi[i];
 1018
 1019		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
 1020		if (tg3_flag(tp, 1SHOT_MSI))
 1021			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
 1022
 1023		tp->coal_now |= tnapi->coal_now;
 1024	}
 1025
 1026	/* Force an initial interrupt */
 1027	if (!tg3_flag(tp, TAGGED_STATUS) &&
 1028	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
 1029		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
 1030	else
 1031		tw32(HOSTCC_MODE, tp->coal_now);
 1032
 1033	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
 1034}
 1035
 1036static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
 1037{
 1038	struct tg3 *tp = tnapi->tp;
 1039	struct tg3_hw_status *sblk = tnapi->hw_status;
 1040	unsigned int work_exists = 0;
 1041
 1042	/* check for phy events */
 1043	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
 1044		if (sblk->status & SD_STATUS_LINK_CHG)
 1045			work_exists = 1;
 1046	}
 1047
 1048	/* check for TX work to do */
 1049	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
 1050		work_exists = 1;
 1051
 1052	/* check for RX work to do */
 1053	if (tnapi->rx_rcb_prod_idx &&
 1054	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
 1055		work_exists = 1;
 1056
 1057	return work_exists;
 1058}
 1059
 1060/* tg3_int_reenable
 1061 *  similar to tg3_enable_ints, but it accurately determines whether there
 1062 *  is new work pending and can return without flushing the PIO write
 1063 *  which reenables interrupts
 1064 */
 1065static void tg3_int_reenable(struct tg3_napi *tnapi)
 1066{
 1067	struct tg3 *tp = tnapi->tp;
 1068
 1069	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
 1070
 1071	/* When doing tagged status, this work check is unnecessary.
 1072	 * The last_tag we write above tells the chip which piece of
 1073	 * work we've completed.
 1074	 */
 1075	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
 1076		tw32(HOSTCC_MODE, tp->coalesce_mode |
 1077		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
 1078}
 1079
 1080static void tg3_switch_clocks(struct tg3 *tp)
 1081{
 1082	u32 clock_ctrl;
 1083	u32 orig_clock_ctrl;
 1084
 1085	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
 1086		return;
 1087
 1088	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
 1089
 1090	orig_clock_ctrl = clock_ctrl;
 1091	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
 1092		       CLOCK_CTRL_CLKRUN_OENABLE |
 1093		       0x1f);
 1094	tp->pci_clock_ctrl = clock_ctrl;
 1095
 1096	if (tg3_flag(tp, 5705_PLUS)) {
 1097		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
 1098			tw32_wait_f(TG3PCI_CLOCK_CTRL,
 1099				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
 1100		}
 1101	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
 1102		tw32_wait_f(TG3PCI_CLOCK_CTRL,
 1103			    clock_ctrl |
 1104			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
 1105			    40);
 1106		tw32_wait_f(TG3PCI_CLOCK_CTRL,
 1107			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
 1108			    40);
 1109	}
 1110	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
 1111}
 1112
 1113#define PHY_BUSY_LOOPS	5000
 1114
 1115static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
 1116			 u32 *val)
 1117{
 1118	u32 frame_val;
 1119	unsigned int loops;
 1120	int ret;
 1121
 1122	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
 1123		tw32_f(MAC_MI_MODE,
 1124		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
 1125		udelay(80);
 1126	}
 1127
 1128	tg3_ape_lock(tp, tp->phy_ape_lock);
 1129
 1130	*val = 0x0;
 1131
 1132	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
 1133		      MI_COM_PHY_ADDR_MASK);
 1134	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
 1135		      MI_COM_REG_ADDR_MASK);
 1136	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
 1137
 1138	tw32_f(MAC_MI_COM, frame_val);
 1139
 1140	loops = PHY_BUSY_LOOPS;
 1141	while (loops != 0) {
 1142		udelay(10);
 1143		frame_val = tr32(MAC_MI_COM);
 1144
 1145		if ((frame_val & MI_COM_BUSY) == 0) {
 1146			udelay(5);
 1147			frame_val = tr32(MAC_MI_COM);
 1148			break;
 1149		}
 1150		loops -= 1;
 1151	}
 1152
 1153	ret = -EBUSY;
 1154	if (loops != 0) {
 1155		*val = frame_val & MI_COM_DATA_MASK;
 1156		ret = 0;
 1157	}
 1158
 1159	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
 1160		tw32_f(MAC_MI_MODE, tp->mi_mode);
 1161		udelay(80);
 1162	}
 1163
 1164	tg3_ape_unlock(tp, tp->phy_ape_lock);
 1165
 1166	return ret;
 1167}
 1168
 1169static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
 1170{
 1171	return __tg3_readphy(tp, tp->phy_addr, reg, val);
 1172}
 1173
 1174static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
 1175			  u32 val)
 1176{
 1177	u32 frame_val;
 1178	unsigned int loops;
 1179	int ret;
 1180
 1181	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
 1182	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
 1183		return 0;
 1184
 1185	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
 1186		tw32_f(MAC_MI_MODE,
 1187		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
 1188		udelay(80);
 1189	}
 1190
 1191	tg3_ape_lock(tp, tp->phy_ape_lock);
 1192
 1193	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
 1194		      MI_COM_PHY_ADDR_MASK);
 1195	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
 1196		      MI_COM_REG_ADDR_MASK);
 1197	frame_val |= (val & MI_COM_DATA_MASK);
 1198	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
 1199
 1200	tw32_f(MAC_MI_COM, frame_val);
 1201
 1202	loops = PHY_BUSY_LOOPS;
 1203	while (loops != 0) {
 1204		udelay(10);
 1205		frame_val = tr32(MAC_MI_COM);
 1206		if ((frame_val & MI_COM_BUSY) == 0) {
 1207			udelay(5);
 1208			frame_val = tr32(MAC_MI_COM);
 1209			break;
 1210		}
 1211		loops -= 1;
 1212	}
 1213
 1214	ret = -EBUSY;
 1215	if (loops != 0)
 1216		ret = 0;
 1217
 1218	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
 1219		tw32_f(MAC_MI_MODE, tp->mi_mode);
 1220		udelay(80);
 1221	}
 1222
 1223	tg3_ape_unlock(tp, tp->phy_ape_lock);
 1224
 1225	return ret;
 1226}
 1227
 1228static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
 1229{
 1230	return __tg3_writephy(tp, tp->phy_addr, reg, val);
 1231}
 1232
 1233static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
 1234{
 1235	int err;
 1236
 1237	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
 1238	if (err)
 1239		goto done;
 1240
 1241	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
 1242	if (err)
 1243		goto done;
 1244
 1245	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
 1246			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
 1247	if (err)
 1248		goto done;
 1249
 1250	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
 1251
 1252done:
 1253	return err;
 1254}
 1255
 1256static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
 1257{
 1258	int err;
 1259
 1260	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
 1261	if (err)
 1262		goto done;
 1263
 1264	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
 1265	if (err)
 1266		goto done;
 1267
 1268	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
 1269			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
 1270	if (err)
 1271		goto done;
 1272
 1273	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
 1274
 1275done:
 1276	return err;
 1277}
 1278
 1279static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
 1280{
 1281	int err;
 1282
 1283	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
 1284	if (!err)
 1285		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
 1286
 1287	return err;
 1288}
 1289
 1290static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
 1291{
 1292	int err;
 1293
 1294	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
 1295	if (!err)
 1296		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
 1297
 1298	return err;
 1299}
 1300
 1301static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
 1302{
 1303	int err;
 1304
 1305	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
 1306			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
 1307			   MII_TG3_AUXCTL_SHDWSEL_MISC);
 1308	if (!err)
 1309		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
 1310
 1311	return err;
 1312}
 1313
 1314static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
 1315{
 1316	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
 1317		set |= MII_TG3_AUXCTL_MISC_WREN;
 1318
 1319	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
 1320}
 1321
 1322static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
 1323{
 1324	u32 val;
 1325	int err;
 1326
 1327	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
 1328
 1329	if (err)
 1330		return err;
 1331
 1332	if (enable)
 1333		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
 1334	else
 1335		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
 1336
 1337	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
 1338				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
 1339
 1340	return err;
 1341}
 1342
 1343static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
 1344{
 1345	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
 1346			    reg | val | MII_TG3_MISC_SHDW_WREN);
 1347}
 1348
 1349static int tg3_bmcr_reset(struct tg3 *tp)
 1350{
 1351	u32 phy_control;
 1352	int limit, err;
 1353
 1354	/* OK, reset it, and poll the BMCR_RESET bit until it
 1355	 * clears or we time out.
 1356	 */
 1357	phy_control = BMCR_RESET;
 1358	err = tg3_writephy(tp, MII_BMCR, phy_control);
 1359	if (err != 0)
 1360		return -EBUSY;
 1361
 1362	limit = 5000;
 1363	while (limit--) {
 1364		err = tg3_readphy(tp, MII_BMCR, &phy_control);
 1365		if (err != 0)
 1366			return -EBUSY;
 1367
 1368		if ((phy_control & BMCR_RESET) == 0) {
 1369			udelay(40);
 1370			break;
 1371		}
 1372		udelay(10);
 1373	}
 1374	if (limit < 0)
 1375		return -EBUSY;
 1376
 1377	return 0;
 1378}
 1379
 1380static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
 1381{
 1382	struct tg3 *tp = bp->priv;
 1383	u32 val;
 1384
 1385	spin_lock_bh(&tp->lock);
 1386
 1387	if (__tg3_readphy(tp, mii_id, reg, &val))
 1388		val = -EIO;
 1389
 1390	spin_unlock_bh(&tp->lock);
 1391
 1392	return val;
 1393}
 1394
 1395static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
 1396{
 1397	struct tg3 *tp = bp->priv;
 1398	u32 ret = 0;
 1399
 1400	spin_lock_bh(&tp->lock);
 1401
 1402	if (__tg3_writephy(tp, mii_id, reg, val))
 1403		ret = -EIO;
 1404
 1405	spin_unlock_bh(&tp->lock);
 1406
 1407	return ret;
 1408}
 1409
 1410static void tg3_mdio_config_5785(struct tg3 *tp)
 1411{
 1412	u32 val;
 1413	struct phy_device *phydev;
 1414
 1415	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
 1416	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
 1417	case PHY_ID_BCM50610:
 1418	case PHY_ID_BCM50610M:
 1419		val = MAC_PHYCFG2_50610_LED_MODES;
 1420		break;
 1421	case PHY_ID_BCMAC131:
 1422		val = MAC_PHYCFG2_AC131_LED_MODES;
 1423		break;
 1424	case PHY_ID_RTL8211C:
 1425		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
 1426		break;
 1427	case PHY_ID_RTL8201E:
 1428		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
 1429		break;
 1430	default:
 1431		return;
 1432	}
 1433
 1434	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
 1435		tw32(MAC_PHYCFG2, val);
 1436
 1437		val = tr32(MAC_PHYCFG1);
 1438		val &= ~(MAC_PHYCFG1_RGMII_INT |
 1439			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
 1440		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
 1441		tw32(MAC_PHYCFG1, val);
 1442
 1443		return;
 1444	}
 1445
 1446	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
 1447		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
 1448		       MAC_PHYCFG2_FMODE_MASK_MASK |
 1449		       MAC_PHYCFG2_GMODE_MASK_MASK |
 1450		       MAC_PHYCFG2_ACT_MASK_MASK   |
 1451		       MAC_PHYCFG2_QUAL_MASK_MASK |
 1452		       MAC_PHYCFG2_INBAND_ENABLE;
 1453
 1454	tw32(MAC_PHYCFG2, val);
 1455
 1456	val = tr32(MAC_PHYCFG1);
 1457	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
 1458		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
 1459	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
 1460		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
 1461			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
 1462		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
 1463			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
 1464	}
 1465	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
 1466	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
 1467	tw32(MAC_PHYCFG1, val);
 1468
 1469	val = tr32(MAC_EXT_RGMII_MODE);
 1470	val &= ~(MAC_RGMII_MODE_RX_INT_B |
 1471		 MAC_RGMII_MODE_RX_QUALITY |
 1472		 MAC_RGMII_MODE_RX_ACTIVITY |
 1473		 MAC_RGMII_MODE_RX_ENG_DET |
 1474		 MAC_RGMII_MODE_TX_ENABLE |
 1475		 MAC_RGMII_MODE_TX_LOWPWR |
 1476		 MAC_RGMII_MODE_TX_RESET);
 1477	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
 1478		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
 1479			val |= MAC_RGMII_MODE_RX_INT_B |
 1480			       MAC_RGMII_MODE_RX_QUALITY |
 1481			       MAC_RGMII_MODE_RX_ACTIVITY |
 1482			       MAC_RGMII_MODE_RX_ENG_DET;
 1483		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
 1484			val |= MAC_RGMII_MODE_TX_ENABLE |
 1485			       MAC_RGMII_MODE_TX_LOWPWR |
 1486			       MAC_RGMII_MODE_TX_RESET;
 1487	}
 1488	tw32(MAC_EXT_RGMII_MODE, val);
 1489}
 1490
 1491static void tg3_mdio_start(struct tg3 *tp)
 1492{
 1493	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
 1494	tw32_f(MAC_MI_MODE, tp->mi_mode);
 1495	udelay(80);
 1496
 1497	if (tg3_flag(tp, MDIOBUS_INITED) &&
 1498	    tg3_asic_rev(tp) == ASIC_REV_5785)
 1499		tg3_mdio_config_5785(tp);
 1500}
 1501
 1502static int tg3_mdio_init(struct tg3 *tp)
 1503{
 1504	int i;
 1505	u32 reg;
 1506	struct phy_device *phydev;
 1507
 1508	if (tg3_flag(tp, 5717_PLUS)) {
 1509		u32 is_serdes;
 1510
 1511		tp->phy_addr = tp->pci_fn + 1;
 1512
 1513		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
 1514			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
 1515		else
 1516			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
 1517				    TG3_CPMU_PHY_STRAP_IS_SERDES;
 1518		if (is_serdes)
 1519			tp->phy_addr += 7;
 1520	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
 1521		int addr;
 1522
 1523		addr = ssb_gige_get_phyaddr(tp->pdev);
 1524		if (addr < 0)
 1525			return addr;
 1526		tp->phy_addr = addr;
 1527	} else
 1528		tp->phy_addr = TG3_PHY_MII_ADDR;
 1529
 1530	tg3_mdio_start(tp);
 1531
 1532	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
 1533		return 0;
 1534
 1535	tp->mdio_bus = mdiobus_alloc();
 1536	if (tp->mdio_bus == NULL)
 1537		return -ENOMEM;
 1538
 1539	tp->mdio_bus->name     = "tg3 mdio bus";
 1540	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
 1541		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
 1542	tp->mdio_bus->priv     = tp;
 1543	tp->mdio_bus->parent   = &tp->pdev->dev;
 1544	tp->mdio_bus->read     = &tg3_mdio_read;
 1545	tp->mdio_bus->write    = &tg3_mdio_write;
 1546	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
 1547
 1548	/* The bus registration will look for all the PHYs on the mdio bus.
 1549	 * Unfortunately, it does not ensure the PHY is powered up before
 1550	 * accessing the PHY ID registers.  A chip reset is the
 1551	 * quickest way to bring the device back to an operational state..
 1552	 */
 1553	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
 1554		tg3_bmcr_reset(tp);
 1555
 1556	i = mdiobus_register(tp->mdio_bus);
 1557	if (i) {
 1558		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
 1559		mdiobus_free(tp->mdio_bus);
 1560		return i;
 1561	}
 1562
 1563	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
 1564
 1565	if (!phydev || !phydev->drv) {
 1566		dev_warn(&tp->pdev->dev, "No PHY devices\n");
 1567		mdiobus_unregister(tp->mdio_bus);
 1568		mdiobus_free(tp->mdio_bus);
 1569		return -ENODEV;
 1570	}
 1571
 1572	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
 1573	case PHY_ID_BCM57780:
 1574		phydev->interface = PHY_INTERFACE_MODE_GMII;
 1575		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
 1576		break;
 1577	case PHY_ID_BCM50610:
 1578	case PHY_ID_BCM50610M:
 1579		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
 1580				     PHY_BRCM_RX_REFCLK_UNUSED |
 1581				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
 1582				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
 1583		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
 1584			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
 1585		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
 1586			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
 1587		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
 1588			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
 1589		/* fall through */
 1590	case PHY_ID_RTL8211C:
 1591		phydev->interface = PHY_INTERFACE_MODE_RGMII;
 1592		break;
 1593	case PHY_ID_RTL8201E:
 1594	case PHY_ID_BCMAC131:
 1595		phydev->interface = PHY_INTERFACE_MODE_MII;
 1596		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
 1597		tp->phy_flags |= TG3_PHYFLG_IS_FET;
 1598		break;
 1599	}
 1600
 1601	tg3_flag_set(tp, MDIOBUS_INITED);
 1602
 1603	if (tg3_asic_rev(tp) == ASIC_REV_5785)
 1604		tg3_mdio_config_5785(tp);
 1605
 1606	return 0;
 1607}
 1608
 1609static void tg3_mdio_fini(struct tg3 *tp)
 1610{
 1611	if (tg3_flag(tp, MDIOBUS_INITED)) {
 1612		tg3_flag_clear(tp, MDIOBUS_INITED);
 1613		mdiobus_unregister(tp->mdio_bus);
 1614		mdiobus_free(tp->mdio_bus);
 1615	}
 1616}
 1617
 1618/* tp->lock is held. */
 1619static inline void tg3_generate_fw_event(struct tg3 *tp)
 1620{
 1621	u32 val;
 1622
 1623	val = tr32(GRC_RX_CPU_EVENT);
 1624	val |= GRC_RX_CPU_DRIVER_EVENT;
 1625	tw32_f(GRC_RX_CPU_EVENT, val);
 1626
 1627	tp->last_event_jiffies = jiffies;
 1628}
 1629
 1630#define TG3_FW_EVENT_TIMEOUT_USEC 2500
 1631
 1632/* tp->lock is held. */
 1633static void tg3_wait_for_event_ack(struct tg3 *tp)
 1634{
 1635	int i;
 1636	unsigned int delay_cnt;
 1637	long time_remain;
 1638
 1639	/* If enough time has passed, no wait is necessary. */
 1640	time_remain = (long)(tp->last_event_jiffies + 1 +
 1641		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
 1642		      (long)jiffies;
 1643	if (time_remain < 0)
 1644		return;
 1645
 1646	/* Check if we can shorten the wait time. */
 1647	delay_cnt = jiffies_to_usecs(time_remain);
 1648	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
 1649		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
 1650	delay_cnt = (delay_cnt >> 3) + 1;
 1651
 1652	for (i = 0; i < delay_cnt; i++) {
 1653		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
 1654			break;
 1655		if (pci_channel_offline(tp->pdev))
 1656			break;
 1657
 1658		udelay(8);
 1659	}
 1660}
 1661
 1662/* tp->lock is held. */
 1663static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
 1664{
 1665	u32 reg, val;
 1666
 1667	val = 0;
 1668	if (!tg3_readphy(tp, MII_BMCR, &reg))
 1669		val = reg << 16;
 1670	if (!tg3_readphy(tp, MII_BMSR, &reg))
 1671		val |= (reg & 0xffff);
 1672	*data++ = val;
 1673
 1674	val = 0;
 1675	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
 1676		val = reg << 16;
 1677	if (!tg3_readphy(tp, MII_LPA, &reg))
 1678		val |= (reg & 0xffff);
 1679	*data++ = val;
 1680
 1681	val = 0;
 1682	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
 1683		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
 1684			val = reg << 16;
 1685		if (!tg3_readphy(tp, MII_STAT1000, &reg))
 1686			val |= (reg & 0xffff);
 1687	}
 1688	*data++ = val;
 1689
 1690	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
 1691		val = reg << 16;
 1692	else
 1693		val = 0;
 1694	*data++ = val;
 1695}
 1696
 1697/* tp->lock is held. */
 1698static void tg3_ump_link_report(struct tg3 *tp)
 1699{
 1700	u32 data[4];
 1701
 1702	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
 1703		return;
 1704
 1705	tg3_phy_gather_ump_data(tp, data);
 1706
 1707	tg3_wait_for_event_ack(tp);
 1708
 1709	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
 1710	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
 1711	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
 1712	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
 1713	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
 1714	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
 1715
 1716	tg3_generate_fw_event(tp);
 1717}
 1718
 1719/* tp->lock is held. */
 1720static void tg3_stop_fw(struct tg3 *tp)
 1721{
 1722	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
 1723		/* Wait for RX cpu to ACK the previous event. */
 1724		tg3_wait_for_event_ack(tp);
 1725
 1726		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
 1727
 1728		tg3_generate_fw_event(tp);
 1729
 1730		/* Wait for RX cpu to ACK this event. */
 1731		tg3_wait_for_event_ack(tp);
 1732	}
 1733}
 1734
 1735/* tp->lock is held. */
 1736static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
 1737{
 1738	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
 1739		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
 1740
 1741	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
 1742		switch (kind) {
 1743		case RESET_KIND_INIT:
 1744			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1745				      DRV_STATE_START);
 1746			break;
 1747
 1748		case RESET_KIND_SHUTDOWN:
 1749			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1750				      DRV_STATE_UNLOAD);
 1751			break;
 1752
 1753		case RESET_KIND_SUSPEND:
 1754			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1755				      DRV_STATE_SUSPEND);
 1756			break;
 1757
 1758		default:
 1759			break;
 1760		}
 1761	}
 1762}
 1763
 1764/* tp->lock is held. */
 1765static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
 1766{
 1767	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
 1768		switch (kind) {
 1769		case RESET_KIND_INIT:
 1770			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1771				      DRV_STATE_START_DONE);
 1772			break;
 1773
 1774		case RESET_KIND_SHUTDOWN:
 1775			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1776				      DRV_STATE_UNLOAD_DONE);
 1777			break;
 1778
 1779		default:
 1780			break;
 1781		}
 1782	}
 1783}
 1784
 1785/* tp->lock is held. */
 1786static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
 1787{
 1788	if (tg3_flag(tp, ENABLE_ASF)) {
 1789		switch (kind) {
 1790		case RESET_KIND_INIT:
 1791			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1792				      DRV_STATE_START);
 1793			break;
 1794
 1795		case RESET_KIND_SHUTDOWN:
 1796			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1797				      DRV_STATE_UNLOAD);
 1798			break;
 1799
 1800		case RESET_KIND_SUSPEND:
 1801			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1802				      DRV_STATE_SUSPEND);
 1803			break;
 1804
 1805		default:
 1806			break;
 1807		}
 1808	}
 1809}
 1810
 1811static int tg3_poll_fw(struct tg3 *tp)
 1812{
 1813	int i;
 1814	u32 val;
 1815
 1816	if (tg3_flag(tp, NO_FWARE_REPORTED))
 1817		return 0;
 1818
 1819	if (tg3_flag(tp, IS_SSB_CORE)) {
 1820		/* We don't use firmware. */
 1821		return 0;
 1822	}
 1823
 1824	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
 1825		/* Wait up to 20ms for init done. */
 1826		for (i = 0; i < 200; i++) {
 1827			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
 1828				return 0;
 1829			if (pci_channel_offline(tp->pdev))
 1830				return -ENODEV;
 1831
 1832			udelay(100);
 1833		}
 1834		return -ENODEV;
 1835	}
 1836
 1837	/* Wait for firmware initialization to complete. */
 1838	for (i = 0; i < 100000; i++) {
 1839		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
 1840		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
 1841			break;
 1842		if (pci_channel_offline(tp->pdev)) {
 1843			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
 1844				tg3_flag_set(tp, NO_FWARE_REPORTED);
 1845				netdev_info(tp->dev, "No firmware running\n");
 1846			}
 1847
 1848			break;
 1849		}
 1850
 1851		udelay(10);
 1852	}
 1853
 1854	/* Chip might not be fitted with firmware.  Some Sun onboard
 1855	 * parts are configured like that.  So don't signal the timeout
 1856	 * of the above loop as an error, but do report the lack of
 1857	 * running firmware once.
 1858	 */
 1859	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
 1860		tg3_flag_set(tp, NO_FWARE_REPORTED);
 1861
 1862		netdev_info(tp->dev, "No firmware running\n");
 1863	}
 1864
 1865	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
 1866		/* The 57765 A0 needs a little more
 1867		 * time to do some important work.
 1868		 */
 1869		mdelay(10);
 1870	}
 1871
 1872	return 0;
 1873}
 1874
 1875static void tg3_link_report(struct tg3 *tp)
 1876{
 1877	if (!netif_carrier_ok(tp->dev)) {
 1878		netif_info(tp, link, tp->dev, "Link is down\n");
 1879		tg3_ump_link_report(tp);
 1880	} else if (netif_msg_link(tp)) {
 1881		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
 1882			    (tp->link_config.active_speed == SPEED_1000 ?
 1883			     1000 :
 1884			     (tp->link_config.active_speed == SPEED_100 ?
 1885			      100 : 10)),
 1886			    (tp->link_config.active_duplex == DUPLEX_FULL ?
 1887			     "full" : "half"));
 1888
 1889		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
 1890			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
 1891			    "on" : "off",
 1892			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
 1893			    "on" : "off");
 1894
 1895		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
 1896			netdev_info(tp->dev, "EEE is %s\n",
 1897				    tp->setlpicnt ? "enabled" : "disabled");
 1898
 1899		tg3_ump_link_report(tp);
 1900	}
 1901
 1902	tp->link_up = netif_carrier_ok(tp->dev);
 1903}
 1904
 1905static u32 tg3_decode_flowctrl_1000T(u32 adv)
 1906{
 1907	u32 flowctrl = 0;
 1908
 1909	if (adv & ADVERTISE_PAUSE_CAP) {
 1910		flowctrl |= FLOW_CTRL_RX;
 1911		if (!(adv & ADVERTISE_PAUSE_ASYM))
 1912			flowctrl |= FLOW_CTRL_TX;
 1913	} else if (adv & ADVERTISE_PAUSE_ASYM)
 1914		flowctrl |= FLOW_CTRL_TX;
 1915
 1916	return flowctrl;
 1917}
 1918
 1919static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
 1920{
 1921	u16 miireg;
 1922
 1923	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
 1924		miireg = ADVERTISE_1000XPAUSE;
 1925	else if (flow_ctrl & FLOW_CTRL_TX)
 1926		miireg = ADVERTISE_1000XPSE_ASYM;
 1927	else if (flow_ctrl & FLOW_CTRL_RX)
 1928		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
 1929	else
 1930		miireg = 0;
 1931
 1932	return miireg;
 1933}
 1934
 1935static u32 tg3_decode_flowctrl_1000X(u32 adv)
 1936{
 1937	u32 flowctrl = 0;
 1938
 1939	if (adv & ADVERTISE_1000XPAUSE) {
 1940		flowctrl |= FLOW_CTRL_RX;
 1941		if (!(adv & ADVERTISE_1000XPSE_ASYM))
 1942			flowctrl |= FLOW_CTRL_TX;
 1943	} else if (adv & ADVERTISE_1000XPSE_ASYM)
 1944		flowctrl |= FLOW_CTRL_TX;
 1945
 1946	return flowctrl;
 1947}
 1948
 1949static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
 1950{
 1951	u8 cap = 0;
 1952
 1953	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
 1954		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
 1955	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
 1956		if (lcladv & ADVERTISE_1000XPAUSE)
 1957			cap = FLOW_CTRL_RX;
 1958		if (rmtadv & ADVERTISE_1000XPAUSE)
 1959			cap = FLOW_CTRL_TX;
 1960	}
 1961
 1962	return cap;
 1963}
 1964
 1965static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
 1966{
 1967	u8 autoneg;
 1968	u8 flowctrl = 0;
 1969	u32 old_rx_mode = tp->rx_mode;
 1970	u32 old_tx_mode = tp->tx_mode;
 1971
 1972	if (tg3_flag(tp, USE_PHYLIB))
 1973		autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
 1974	else
 1975		autoneg = tp->link_config.autoneg;
 1976
 1977	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
 1978		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
 1979			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
 1980		else
 1981			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
 1982	} else
 1983		flowctrl = tp->link_config.flowctrl;
 1984
 1985	tp->link_config.active_flowctrl = flowctrl;
 1986
 1987	if (flowctrl & FLOW_CTRL_RX)
 1988		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
 1989	else
 1990		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
 1991
 1992	if (old_rx_mode != tp->rx_mode)
 1993		tw32_f(MAC_RX_MODE, tp->rx_mode);
 1994
 1995	if (flowctrl & FLOW_CTRL_TX)
 1996		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
 1997	else
 1998		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
 1999
 2000	if (old_tx_mode != tp->tx_mode)
 2001		tw32_f(MAC_TX_MODE, tp->tx_mode);
 2002}
 2003
 2004static void tg3_adjust_link(struct net_device *dev)
 2005{
 2006	u8 oldflowctrl, linkmesg = 0;
 2007	u32 mac_mode, lcl_adv, rmt_adv;
 2008	struct tg3 *tp = netdev_priv(dev);
 2009	struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
 2010
 2011	spin_lock_bh(&tp->lock);
 2012
 2013	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
 2014				    MAC_MODE_HALF_DUPLEX);
 2015
 2016	oldflowctrl = tp->link_config.active_flowctrl;
 2017
 2018	if (phydev->link) {
 2019		lcl_adv = 0;
 2020		rmt_adv = 0;
 2021
 2022		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
 2023			mac_mode |= MAC_MODE_PORT_MODE_MII;
 2024		else if (phydev->speed == SPEED_1000 ||
 2025			 tg3_asic_rev(tp) != ASIC_REV_5785)
 2026			mac_mode |= MAC_MODE_PORT_MODE_GMII;
 2027		else
 2028			mac_mode |= MAC_MODE_PORT_MODE_MII;
 2029
 2030		if (phydev->duplex == DUPLEX_HALF)
 2031			mac_mode |= MAC_MODE_HALF_DUPLEX;
 2032		else {
 2033			lcl_adv = mii_advertise_flowctrl(
 2034				  tp->link_config.flowctrl);
 2035
 2036			if (phydev->pause)
 2037				rmt_adv = LPA_PAUSE_CAP;
 2038			if (phydev->asym_pause)
 2039				rmt_adv |= LPA_PAUSE_ASYM;
 2040		}
 2041
 2042		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
 2043	} else
 2044		mac_mode |= MAC_MODE_PORT_MODE_GMII;
 2045
 2046	if (mac_mode != tp->mac_mode) {
 2047		tp->mac_mode = mac_mode;
 2048		tw32_f(MAC_MODE, tp->mac_mode);
 2049		udelay(40);
 2050	}
 2051
 2052	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
 2053		if (phydev->speed == SPEED_10)
 2054			tw32(MAC_MI_STAT,
 2055			     MAC_MI_STAT_10MBPS_MODE |
 2056			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
 2057		else
 2058			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
 2059	}
 2060
 2061	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
 2062		tw32(MAC_TX_LENGTHS,
 2063		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
 2064		      (6 << TX_LENGTHS_IPG_SHIFT) |
 2065		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
 2066	else
 2067		tw32(MAC_TX_LENGTHS,
 2068		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
 2069		      (6 << TX_LENGTHS_IPG_SHIFT) |
 2070		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
 2071
 2072	if (phydev->link != tp->old_link ||
 2073	    phydev->speed != tp->link_config.active_speed ||
 2074	    phydev->duplex != tp->link_config.active_duplex ||
 2075	    oldflowctrl != tp->link_config.active_flowctrl)
 2076		linkmesg = 1;
 2077
 2078	tp->old_link = phydev->link;
 2079	tp->link_config.active_speed = phydev->speed;
 2080	tp->link_config.active_duplex = phydev->duplex;
 2081
 2082	spin_unlock_bh(&tp->lock);
 2083
 2084	if (linkmesg)
 2085		tg3_link_report(tp);
 2086}
 2087
 2088static int tg3_phy_init(struct tg3 *tp)
 2089{
 2090	struct phy_device *phydev;
 2091
 2092	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
 2093		return 0;
 2094
 2095	/* Bring the PHY back to a known state. */
 2096	tg3_bmcr_reset(tp);
 2097
 2098	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
 2099
 2100	/* Attach the MAC to the PHY. */
 2101	phydev = phy_connect(tp->dev, phydev_name(phydev),
 2102			     tg3_adjust_link, phydev->interface);
 2103	if (IS_ERR(phydev)) {
 2104		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
 2105		return PTR_ERR(phydev);
 2106	}
 2107
 2108	/* Mask with MAC supported features. */
 2109	switch (phydev->interface) {
 2110	case PHY_INTERFACE_MODE_GMII:
 2111	case PHY_INTERFACE_MODE_RGMII:
 2112		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 2113			phy_set_max_speed(phydev, SPEED_1000);
 2114			phy_support_asym_pause(phydev);
 2115			break;
 2116		}
 2117		/* fall through */
 2118	case PHY_INTERFACE_MODE_MII:
 2119		phy_set_max_speed(phydev, SPEED_100);
 2120		phy_support_asym_pause(phydev);
 2121		break;
 2122	default:
 2123		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
 2124		return -EINVAL;
 2125	}
 2126
 2127	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
 2128
 2129	phy_attached_info(phydev);
 2130
 2131	return 0;
 2132}
 2133
 2134static void tg3_phy_start(struct tg3 *tp)
 2135{
 2136	struct phy_device *phydev;
 2137
 2138	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 2139		return;
 2140
 2141	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
 2142
 2143	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
 2144		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
 2145		phydev->speed = tp->link_config.speed;
 2146		phydev->duplex = tp->link_config.duplex;
 2147		phydev->autoneg = tp->link_config.autoneg;
 2148		ethtool_convert_legacy_u32_to_link_mode(
 2149			phydev->advertising, tp->link_config.advertising);
 2150	}
 2151
 2152	phy_start(phydev);
 2153
 2154	phy_start_aneg(phydev);
 2155}
 2156
 2157static void tg3_phy_stop(struct tg3 *tp)
 2158{
 2159	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 2160		return;
 2161
 2162	phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
 2163}
 2164
 2165static void tg3_phy_fini(struct tg3 *tp)
 2166{
 2167	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
 2168		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
 2169		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
 2170	}
 2171}
 2172
 2173static int tg3_phy_set_extloopbk(struct tg3 *tp)
 2174{
 2175	int err;
 2176	u32 val;
 2177
 2178	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
 2179		return 0;
 2180
 2181	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
 2182		/* Cannot do read-modify-write on 5401 */
 2183		err = tg3_phy_auxctl_write(tp,
 2184					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
 2185					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
 2186					   0x4c20);
 2187		goto done;
 2188	}
 2189
 2190	err = tg3_phy_auxctl_read(tp,
 2191				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
 2192	if (err)
 2193		return err;
 2194
 2195	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
 2196	err = tg3_phy_auxctl_write(tp,
 2197				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
 2198
 2199done:
 2200	return err;
 2201}
 2202
 2203static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
 2204{
 2205	u32 phytest;
 2206
 2207	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
 2208		u32 phy;
 2209
 2210		tg3_writephy(tp, MII_TG3_FET_TEST,
 2211			     phytest | MII_TG3_FET_SHADOW_EN);
 2212		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
 2213			if (enable)
 2214				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
 2215			else
 2216				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
 2217			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
 2218		}
 2219		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
 2220	}
 2221}
 2222
 2223static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
 2224{
 2225	u32 reg;
 2226
 2227	if (!tg3_flag(tp, 5705_PLUS) ||
 2228	    (tg3_flag(tp, 5717_PLUS) &&
 2229	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
 2230		return;
 2231
 2232	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 2233		tg3_phy_fet_toggle_apd(tp, enable);
 2234		return;
 2235	}
 2236
 2237	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
 2238	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
 2239	      MII_TG3_MISC_SHDW_SCR5_SDTL |
 2240	      MII_TG3_MISC_SHDW_SCR5_C125OE;
 2241	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
 2242		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
 2243
 2244	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
 2245
 2246
 2247	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
 2248	if (enable)
 2249		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
 2250
 2251	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
 2252}
 2253
 2254static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
 2255{
 2256	u32 phy;
 2257
 2258	if (!tg3_flag(tp, 5705_PLUS) ||
 2259	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
 2260		return;
 2261
 2262	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 2263		u32 ephy;
 2264
 2265		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
 2266			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
 2267
 2268			tg3_writephy(tp, MII_TG3_FET_TEST,
 2269				     ephy | MII_TG3_FET_SHADOW_EN);
 2270			if (!tg3_readphy(tp, reg, &phy)) {
 2271				if (enable)
 2272					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
 2273				else
 2274					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
 2275				tg3_writephy(tp, reg, phy);
 2276			}
 2277			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
 2278		}
 2279	} else {
 2280		int ret;
 2281
 2282		ret = tg3_phy_auxctl_read(tp,
 2283					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
 2284		if (!ret) {
 2285			if (enable)
 2286				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
 2287			else
 2288				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
 2289			tg3_phy_auxctl_write(tp,
 2290					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
 2291		}
 2292	}
 2293}
 2294
 2295static void tg3_phy_set_wirespeed(struct tg3 *tp)
 2296{
 2297	int ret;
 2298	u32 val;
 2299
 2300	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
 2301		return;
 2302
 2303	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
 2304	if (!ret)
 2305		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
 2306				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
 2307}
 2308
 2309static void tg3_phy_apply_otp(struct tg3 *tp)
 2310{
 2311	u32 otp, phy;
 2312
 2313	if (!tp->phy_otp)
 2314		return;
 2315
 2316	otp = tp->phy_otp;
 2317
 2318	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
 2319		return;
 2320
 2321	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
 2322	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
 2323	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
 2324
 2325	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
 2326	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
 2327	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
 2328
 2329	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
 2330	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
 2331	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
 2332
 2333	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
 2334	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
 2335
 2336	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
 2337	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
 2338
 2339	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
 2340	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
 2341	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
 2342
 2343	tg3_phy_toggle_auxctl_smdsp(tp, false);
 2344}
 2345
 2346static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
 2347{
 2348	u32 val;
 2349	struct ethtool_eee *dest = &tp->eee;
 2350
 2351	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
 2352		return;
 2353
 2354	if (eee)
 2355		dest = eee;
 2356
 2357	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
 2358		return;
 2359
 2360	/* Pull eee_active */
 2361	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
 2362	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
 2363		dest->eee_active = 1;
 2364	} else
 2365		dest->eee_active = 0;
 2366
 2367	/* Pull lp advertised settings */
 2368	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
 2369		return;
 2370	dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
 2371
 2372	/* Pull advertised and eee_enabled settings */
 2373	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
 2374		return;
 2375	dest->eee_enabled = !!val;
 2376	dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
 2377
 2378	/* Pull tx_lpi_enabled */
 2379	val = tr32(TG3_CPMU_EEE_MODE);
 2380	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
 2381
 2382	/* Pull lpi timer value */
 2383	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
 2384}
 2385
 2386static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
 2387{
 2388	u32 val;
 2389
 2390	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
 2391		return;
 2392
 2393	tp->setlpicnt = 0;
 2394
 2395	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
 2396	    current_link_up &&
 2397	    tp->link_config.active_duplex == DUPLEX_FULL &&
 2398	    (tp->link_config.active_speed == SPEED_100 ||
 2399	     tp->link_config.active_speed == SPEED_1000)) {
 2400		u32 eeectl;
 2401
 2402		if (tp->link_config.active_speed == SPEED_1000)
 2403			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
 2404		else
 2405			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
 2406
 2407		tw32(TG3_CPMU_EEE_CTRL, eeectl);
 2408
 2409		tg3_eee_pull_config(tp, NULL);
 2410		if (tp->eee.eee_active)
 2411			tp->setlpicnt = 2;
 2412	}
 2413
 2414	if (!tp->setlpicnt) {
 2415		if (current_link_up &&
 2416		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
 2417			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
 2418			tg3_phy_toggle_auxctl_smdsp(tp, false);
 2419		}
 2420
 2421		val = tr32(TG3_CPMU_EEE_MODE);
 2422		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
 2423	}
 2424}
 2425
 2426static void tg3_phy_eee_enable(struct tg3 *tp)
 2427{
 2428	u32 val;
 2429
 2430	if (tp->link_config.active_speed == SPEED_1000 &&
 2431	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
 2432	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
 2433	     tg3_flag(tp, 57765_CLASS)) &&
 2434	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
 2435		val = MII_TG3_DSP_TAP26_ALNOKO |
 2436		      MII_TG3_DSP_TAP26_RMRXSTO;
 2437		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
 2438		tg3_phy_toggle_auxctl_smdsp(tp, false);
 2439	}
 2440
 2441	val = tr32(TG3_CPMU_EEE_MODE);
 2442	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
 2443}
 2444
 2445static int tg3_wait_macro_done(struct tg3 *tp)
 2446{
 2447	int limit = 100;
 2448
 2449	while (limit--) {
 2450		u32 tmp32;
 2451
 2452		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
 2453			if ((tmp32 & 0x1000) == 0)
 2454				break;
 2455		}
 2456	}
 2457	if (limit < 0)
 2458		return -EBUSY;
 2459
 2460	return 0;
 2461}
 2462
 2463static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
 2464{
 2465	static const u32 test_pat[4][6] = {
 2466	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
 2467	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
 2468	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
 2469	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
 2470	};
 2471	int chan;
 2472
 2473	for (chan = 0; chan < 4; chan++) {
 2474		int i;
 2475
 2476		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 2477			     (chan * 0x2000) | 0x0200);
 2478		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
 2479
 2480		for (i = 0; i < 6; i++)
 2481			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
 2482				     test_pat[chan][i]);
 2483
 2484		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
 2485		if (tg3_wait_macro_done(tp)) {
 2486			*resetp = 1;
 2487			return -EBUSY;
 2488		}
 2489
 2490		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 2491			     (chan * 0x2000) | 0x0200);
 2492		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
 2493		if (tg3_wait_macro_done(tp)) {
 2494			*resetp = 1;
 2495			return -EBUSY;
 2496		}
 2497
 2498		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
 2499		if (tg3_wait_macro_done(tp)) {
 2500			*resetp = 1;
 2501			return -EBUSY;
 2502		}
 2503
 2504		for (i = 0; i < 6; i += 2) {
 2505			u32 low, high;
 2506
 2507			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
 2508			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
 2509			    tg3_wait_macro_done(tp)) {
 2510				*resetp = 1;
 2511				return -EBUSY;
 2512			}
 2513			low &= 0x7fff;
 2514			high &= 0x000f;
 2515			if (low != test_pat[chan][i] ||
 2516			    high != test_pat[chan][i+1]) {
 2517				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
 2518				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
 2519				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
 2520
 2521				return -EBUSY;
 2522			}
 2523		}
 2524	}
 2525
 2526	return 0;
 2527}
 2528
 2529static int tg3_phy_reset_chanpat(struct tg3 *tp)
 2530{
 2531	int chan;
 2532
 2533	for (chan = 0; chan < 4; chan++) {
 2534		int i;
 2535
 2536		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 2537			     (chan * 0x2000) | 0x0200);
 2538		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
 2539		for (i = 0; i < 6; i++)
 2540			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
 2541		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
 2542		if (tg3_wait_macro_done(tp))
 2543			return -EBUSY;
 2544	}
 2545
 2546	return 0;
 2547}
 2548
 2549static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
 2550{
 2551	u32 reg32, phy9_orig;
 2552	int retries, do_phy_reset, err;
 2553
 2554	retries = 10;
 2555	do_phy_reset = 1;
 2556	do {
 2557		if (do_phy_reset) {
 2558			err = tg3_bmcr_reset(tp);
 2559			if (err)
 2560				return err;
 2561			do_phy_reset = 0;
 2562		}
 2563
 2564		/* Disable transmitter and interrupt.  */
 2565		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
 2566			continue;
 2567
 2568		reg32 |= 0x3000;
 2569		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
 2570
 2571		/* Set full-duplex, 1000 mbps.  */
 2572		tg3_writephy(tp, MII_BMCR,
 2573			     BMCR_FULLDPLX | BMCR_SPEED1000);
 2574
 2575		/* Set to master mode.  */
 2576		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
 2577			continue;
 2578
 2579		tg3_writephy(tp, MII_CTRL1000,
 2580			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
 2581
 2582		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
 2583		if (err)
 2584			return err;
 2585
 2586		/* Block the PHY control access.  */
 2587		tg3_phydsp_write(tp, 0x8005, 0x0800);
 2588
 2589		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
 2590		if (!err)
 2591			break;
 2592	} while (--retries);
 2593
 2594	err = tg3_phy_reset_chanpat(tp);
 2595	if (err)
 2596		return err;
 2597
 2598	tg3_phydsp_write(tp, 0x8005, 0x0000);
 2599
 2600	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
 2601	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
 2602
 2603	tg3_phy_toggle_auxctl_smdsp(tp, false);
 2604
 2605	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
 2606
 2607	err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
 2608	if (err)
 2609		return err;
 2610
 2611	reg32 &= ~0x3000;
 2612	tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
 2613
 2614	return 0;
 2615}
 2616
 2617static void tg3_carrier_off(struct tg3 *tp)
 2618{
 2619	netif_carrier_off(tp->dev);
 2620	tp->link_up = false;
 2621}
 2622
 2623static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
 2624{
 2625	if (tg3_flag(tp, ENABLE_ASF))
 2626		netdev_warn(tp->dev,
 2627			    "Management side-band traffic will be interrupted during phy settings change\n");
 2628}
 2629
 2630/* This will reset the tigon3 PHY if there is no valid
 2631 * link unless the FORCE argument is non-zero.
 2632 */
 2633static int tg3_phy_reset(struct tg3 *tp)
 2634{
 2635	u32 val, cpmuctrl;
 2636	int err;
 2637
 2638	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
 2639		val = tr32(GRC_MISC_CFG);
 2640		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
 2641		udelay(40);
 2642	}
 2643	err  = tg3_readphy(tp, MII_BMSR, &val);
 2644	err |= tg3_readphy(tp, MII_BMSR, &val);
 2645	if (err != 0)
 2646		return -EBUSY;
 2647
 2648	if (netif_running(tp->dev) && tp->link_up) {
 2649		netif_carrier_off(tp->dev);
 2650		tg3_link_report(tp);
 2651	}
 2652
 2653	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
 2654	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
 2655	    tg3_asic_rev(tp) == ASIC_REV_5705) {
 2656		err = tg3_phy_reset_5703_4_5(tp);
 2657		if (err)
 2658			return err;
 2659		goto out;
 2660	}
 2661
 2662	cpmuctrl = 0;
 2663	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
 2664	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
 2665		cpmuctrl = tr32(TG3_CPMU_CTRL);
 2666		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
 2667			tw32(TG3_CPMU_CTRL,
 2668			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
 2669	}
 2670
 2671	err = tg3_bmcr_reset(tp);
 2672	if (err)
 2673		return err;
 2674
 2675	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
 2676		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
 2677		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
 2678
 2679		tw32(TG3_CPMU_CTRL, cpmuctrl);
 2680	}
 2681
 2682	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
 2683	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
 2684		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
 2685		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
 2686		    CPMU_LSPD_1000MB_MACCLK_12_5) {
 2687			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
 2688			udelay(40);
 2689			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
 2690		}
 2691	}
 2692
 2693	if (tg3_flag(tp, 5717_PLUS) &&
 2694	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
 2695		return 0;
 2696
 2697	tg3_phy_apply_otp(tp);
 2698
 2699	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
 2700		tg3_phy_toggle_apd(tp, true);
 2701	else
 2702		tg3_phy_toggle_apd(tp, false);
 2703
 2704out:
 2705	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
 2706	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
 2707		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
 2708		tg3_phydsp_write(tp, 0x000a, 0x0323);
 2709		tg3_phy_toggle_auxctl_smdsp(tp, false);
 2710	}
 2711
 2712	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
 2713		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
 2714		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
 2715	}
 2716
 2717	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
 2718		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
 2719			tg3_phydsp_write(tp, 0x000a, 0x310b);
 2720			tg3_phydsp_write(tp, 0x201f, 0x9506);
 2721			tg3_phydsp_write(tp, 0x401f, 0x14e2);
 2722			tg3_phy_toggle_auxctl_smdsp(tp, false);
 2723		}
 2724	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
 2725		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
 2726			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
 2727			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
 2728				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
 2729				tg3_writephy(tp, MII_TG3_TEST1,
 2730					     MII_TG3_TEST1_TRIM_EN | 0x4);
 2731			} else
 2732				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
 2733
 2734			tg3_phy_toggle_auxctl_smdsp(tp, false);
 2735		}
 2736	}
 2737
 2738	/* Set Extended packet length bit (bit 14) on all chips that */
 2739	/* support jumbo frames */
 2740	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
 2741		/* Cannot do read-modify-write on 5401 */
 2742		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
 2743	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
 2744		/* Set bit 14 with read-modify-write to preserve other bits */
 2745		err = tg3_phy_auxctl_read(tp,
 2746					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
 2747		if (!err)
 2748			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
 2749					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
 2750	}
 2751
 2752	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
 2753	 * jumbo frames transmission.
 2754	 */
 2755	if (tg3_flag(tp, JUMBO_CAPABLE)) {
 2756		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
 2757			tg3_writephy(tp, MII_TG3_EXT_CTRL,
 2758				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
 2759	}
 2760
 2761	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
 2762		/* adjust output voltage */
 2763		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
 2764	}
 2765
 2766	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
 2767		tg3_phydsp_write(tp, 0xffb, 0x4000);
 2768
 2769	tg3_phy_toggle_automdix(tp, true);
 2770	tg3_phy_set_wirespeed(tp);
 2771	return 0;
 2772}
 2773
 2774#define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
 2775#define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
 2776#define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
 2777					  TG3_GPIO_MSG_NEED_VAUX)
 2778#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
 2779	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
 2780	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
 2781	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
 2782	 (TG3_GPIO_MSG_DRVR_PRES << 12))
 2783
 2784#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
 2785	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
 2786	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
 2787	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
 2788	 (TG3_GPIO_MSG_NEED_VAUX << 12))
 2789
 2790static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
 2791{
 2792	u32 status, shift;
 2793
 2794	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
 2795	    tg3_asic_rev(tp) == ASIC_REV_5719)
 2796		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
 2797	else
 2798		status = tr32(TG3_CPMU_DRV_STATUS);
 2799
 2800	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
 2801	status &= ~(TG3_GPIO_MSG_MASK << shift);
 2802	status |= (newstat << shift);
 2803
 2804	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
 2805	    tg3_asic_rev(tp) == ASIC_REV_5719)
 2806		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
 2807	else
 2808		tw32(TG3_CPMU_DRV_STATUS, status);
 2809
 2810	return status >> TG3_APE_GPIO_MSG_SHIFT;
 2811}
 2812
 2813static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
 2814{
 2815	if (!tg3_flag(tp, IS_NIC))
 2816		return 0;
 2817
 2818	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
 2819	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
 2820	    tg3_asic_rev(tp) == ASIC_REV_5720) {
 2821		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
 2822			return -EIO;
 2823
 2824		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
 2825
 2826		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
 2827			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2828
 2829		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
 2830	} else {
 2831		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
 2832			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2833	}
 2834
 2835	return 0;
 2836}
 2837
 2838static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
 2839{
 2840	u32 grc_local_ctrl;
 2841
 2842	if (!tg3_flag(tp, IS_NIC) ||
 2843	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
 2844	    tg3_asic_rev(tp) == ASIC_REV_5701)
 2845		return;
 2846
 2847	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
 2848
 2849	tw32_wait_f(GRC_LOCAL_CTRL,
 2850		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
 2851		    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2852
 2853	tw32_wait_f(GRC_LOCAL_CTRL,
 2854		    grc_local_ctrl,
 2855		    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2856
 2857	tw32_wait_f(GRC_LOCAL_CTRL,
 2858		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
 2859		    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2860}
 2861
 2862static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
 2863{
 2864	if (!tg3_flag(tp, IS_NIC))
 2865		return;
 2866
 2867	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
 2868	    tg3_asic_rev(tp) == ASIC_REV_5701) {
 2869		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
 2870			    (GRC_LCLCTRL_GPIO_OE0 |
 2871			     GRC_LCLCTRL_GPIO_OE1 |
 2872			     GRC_LCLCTRL_GPIO_OE2 |
 2873			     GRC_LCLCTRL_GPIO_OUTPUT0 |
 2874			     GRC_LCLCTRL_GPIO_OUTPUT1),
 2875			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2876	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
 2877		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
 2878		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
 2879		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
 2880				     GRC_LCLCTRL_GPIO_OE1 |
 2881				     GRC_LCLCTRL_GPIO_OE2 |
 2882				     GRC_LCLCTRL_GPIO_OUTPUT0 |
 2883				     GRC_LCLCTRL_GPIO_OUTPUT1 |
 2884				     tp->grc_local_ctrl;
 2885		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
 2886			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2887
 2888		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
 2889		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
 2890			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2891
 2892		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
 2893		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
 2894			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2895	} else {
 2896		u32 no_gpio2;
 2897		u32 grc_local_ctrl = 0;
 2898
 2899		/* Workaround to prevent overdrawing Amps. */
 2900		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
 2901			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
 2902			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
 2903				    grc_local_ctrl,
 2904				    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2905		}
 2906
 2907		/* On 5753 and variants, GPIO2 cannot be used. */
 2908		no_gpio2 = tp->nic_sram_data_cfg &
 2909			   NIC_SRAM_DATA_CFG_NO_GPIO2;
 2910
 2911		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
 2912				  GRC_LCLCTRL_GPIO_OE1 |
 2913				  GRC_LCLCTRL_GPIO_OE2 |
 2914				  GRC_LCLCTRL_GPIO_OUTPUT1 |
 2915				  GRC_LCLCTRL_GPIO_OUTPUT2;
 2916		if (no_gpio2) {
 2917			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
 2918					    GRC_LCLCTRL_GPIO_OUTPUT2);
 2919		}
 2920		tw32_wait_f(GRC_LOCAL_CTRL,
 2921			    tp->grc_local_ctrl | grc_local_ctrl,
 2922			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2923
 2924		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
 2925
 2926		tw32_wait_f(GRC_LOCAL_CTRL,
 2927			    tp->grc_local_ctrl | grc_local_ctrl,
 2928			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2929
 2930		if (!no_gpio2) {
 2931			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
 2932			tw32_wait_f(GRC_LOCAL_CTRL,
 2933				    tp->grc_local_ctrl | grc_local_ctrl,
 2934				    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2935		}
 2936	}
 2937}
 2938
 2939static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
 2940{
 2941	u32 msg = 0;
 2942
 2943	/* Serialize power state transitions */
 2944	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
 2945		return;
 2946
 2947	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
 2948		msg = TG3_GPIO_MSG_NEED_VAUX;
 2949
 2950	msg = tg3_set_function_status(tp, msg);
 2951
 2952	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
 2953		goto done;
 2954
 2955	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
 2956		tg3_pwrsrc_switch_to_vaux(tp);
 2957	else
 2958		tg3_pwrsrc_die_with_vmain(tp);
 2959
 2960done:
 2961	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
 2962}
 2963
 2964static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
 2965{
 2966	bool need_vaux = false;
 2967
 2968	/* The GPIOs do something completely different on 57765. */
 2969	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
 2970		return;
 2971
 2972	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
 2973	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
 2974	    tg3_asic_rev(tp) == ASIC_REV_5720) {
 2975		tg3_frob_aux_power_5717(tp, include_wol ?
 2976					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
 2977		return;
 2978	}
 2979
 2980	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
 2981		struct net_device *dev_peer;
 2982
 2983		dev_peer = pci_get_drvdata(tp->pdev_peer);
 2984
 2985		/* remove_one() may have been run on the peer. */
 2986		if (dev_peer) {
 2987			struct tg3 *tp_peer = netdev_priv(dev_peer);
 2988
 2989			if (tg3_flag(tp_peer, INIT_COMPLETE))
 2990				return;
 2991
 2992			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
 2993			    tg3_flag(tp_peer, ENABLE_ASF))
 2994				need_vaux = true;
 2995		}
 2996	}
 2997
 2998	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
 2999	    tg3_flag(tp, ENABLE_ASF))
 3000		need_vaux = true;
 3001
 3002	if (need_vaux)
 3003		tg3_pwrsrc_switch_to_vaux(tp);
 3004	else
 3005		tg3_pwrsrc_die_with_vmain(tp);
 3006}
 3007
 3008static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
 3009{
 3010	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
 3011		return 1;
 3012	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
 3013		if (speed != SPEED_10)
 3014			return 1;
 3015	} else if (speed == SPEED_10)
 3016		return 1;
 3017
 3018	return 0;
 3019}
 3020
 3021static bool tg3_phy_power_bug(struct tg3 *tp)
 3022{
 3023	switch (tg3_asic_rev(tp)) {
 3024	case ASIC_REV_5700:
 3025	case ASIC_REV_5704:
 3026		return true;
 3027	case ASIC_REV_5780:
 3028		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
 3029			return true;
 3030		return false;
 3031	case ASIC_REV_5717:
 3032		if (!tp->pci_fn)
 3033			return true;
 3034		return false;
 3035	case ASIC_REV_5719:
 3036	case ASIC_REV_5720:
 3037		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
 3038		    !tp->pci_fn)
 3039			return true;
 3040		return false;
 3041	}
 3042
 3043	return false;
 3044}
 3045
 3046static bool tg3_phy_led_bug(struct tg3 *tp)
 3047{
 3048	switch (tg3_asic_rev(tp)) {
 3049	case ASIC_REV_5719:
 3050	case ASIC_REV_5720:
 3051		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
 3052		    !tp->pci_fn)
 3053			return true;
 3054		return false;
 3055	}
 3056
 3057	return false;
 3058}
 3059
 3060static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
 3061{
 3062	u32 val;
 3063
 3064	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
 3065		return;
 3066
 3067	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
 3068		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
 3069			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
 3070			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
 3071
 3072			sg_dig_ctrl |=
 3073				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
 3074			tw32(SG_DIG_CTRL, sg_dig_ctrl);
 3075			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
 3076		}
 3077		return;
 3078	}
 3079
 3080	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
 3081		tg3_bmcr_reset(tp);
 3082		val = tr32(GRC_MISC_CFG);
 3083		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
 3084		udelay(40);
 3085		return;
 3086	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 3087		u32 phytest;
 3088		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
 3089			u32 phy;
 3090
 3091			tg3_writephy(tp, MII_ADVERTISE, 0);
 3092			tg3_writephy(tp, MII_BMCR,
 3093				     BMCR_ANENABLE | BMCR_ANRESTART);
 3094
 3095			tg3_writephy(tp, MII_TG3_FET_TEST,
 3096				     phytest | MII_TG3_FET_SHADOW_EN);
 3097			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
 3098				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
 3099				tg3_writephy(tp,
 3100					     MII_TG3_FET_SHDW_AUXMODE4,
 3101					     phy);
 3102			}
 3103			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
 3104		}
 3105		return;
 3106	} else if (do_low_power) {
 3107		if (!tg3_phy_led_bug(tp))
 3108			tg3_writephy(tp, MII_TG3_EXT_CTRL,
 3109				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
 3110
 3111		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
 3112		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
 3113		      MII_TG3_AUXCTL_PCTL_VREG_11V;
 3114		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
 3115	}
 3116
 3117	/* The PHY should not be powered down on some chips because
 3118	 * of bugs.
 3119	 */
 3120	if (tg3_phy_power_bug(tp))
 3121		return;
 3122
 3123	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
 3124	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
 3125		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
 3126		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
 3127		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
 3128		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
 3129	}
 3130
 3131	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
 3132}
 3133
 3134/* tp->lock is held. */
 3135static int tg3_nvram_lock(struct tg3 *tp)
 3136{
 3137	if (tg3_flag(tp, NVRAM)) {
 3138		int i;
 3139
 3140		if (tp->nvram_lock_cnt == 0) {
 3141			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
 3142			for (i = 0; i < 8000; i++) {
 3143				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
 3144					break;
 3145				udelay(20);
 3146			}
 3147			if (i == 8000) {
 3148				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
 3149				return -ENODEV;
 3150			}
 3151		}
 3152		tp->nvram_lock_cnt++;
 3153	}
 3154	return 0;
 3155}
 3156
 3157/* tp->lock is held. */
 3158static void tg3_nvram_unlock(struct tg3 *tp)
 3159{
 3160	if (tg3_flag(tp, NVRAM)) {
 3161		if (tp->nvram_lock_cnt > 0)
 3162			tp->nvram_lock_cnt--;
 3163		if (tp->nvram_lock_cnt == 0)
 3164			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
 3165	}
 3166}
 3167
 3168/* tp->lock is held. */
 3169static void tg3_enable_nvram_access(struct tg3 *tp)
 3170{
 3171	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
 3172		u32 nvaccess = tr32(NVRAM_ACCESS);
 3173
 3174		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
 3175	}
 3176}
 3177
 3178/* tp->lock is held. */
 3179static void tg3_disable_nvram_access(struct tg3 *tp)
 3180{
 3181	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
 3182		u32 nvaccess = tr32(NVRAM_ACCESS);
 3183
 3184		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
 3185	}
 3186}
 3187
 3188static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
 3189					u32 offset, u32 *val)
 3190{
 3191	u32 tmp;
 3192	int i;
 3193
 3194	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
 3195		return -EINVAL;
 3196
 3197	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
 3198					EEPROM_ADDR_DEVID_MASK |
 3199					EEPROM_ADDR_READ);
 3200	tw32(GRC_EEPROM_ADDR,
 3201	     tmp |
 3202	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
 3203	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
 3204	      EEPROM_ADDR_ADDR_MASK) |
 3205	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
 3206
 3207	for (i = 0; i < 1000; i++) {
 3208		tmp = tr32(GRC_EEPROM_ADDR);
 3209
 3210		if (tmp & EEPROM_ADDR_COMPLETE)
 3211			break;
 3212		msleep(1);
 3213	}
 3214	if (!(tmp & EEPROM_ADDR_COMPLETE))
 3215		return -EBUSY;
 3216
 3217	tmp = tr32(GRC_EEPROM_DATA);
 3218
 3219	/*
 3220	 * The data will always be opposite the native endian
 3221	 * format.  Perform a blind byteswap to compensate.
 3222	 */
 3223	*val = swab32(tmp);
 3224
 3225	return 0;
 3226}
 3227
 3228#define NVRAM_CMD_TIMEOUT 10000
 3229
 3230static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
 3231{
 3232	int i;
 3233
 3234	tw32(NVRAM_CMD, nvram_cmd);
 3235	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
 3236		usleep_range(10, 40);
 3237		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
 3238			udelay(10);
 3239			break;
 3240		}
 3241	}
 3242
 3243	if (i == NVRAM_CMD_TIMEOUT)
 3244		return -EBUSY;
 3245
 3246	return 0;
 3247}
 3248
 3249static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
 3250{
 3251	if (tg3_flag(tp, NVRAM) &&
 3252	    tg3_flag(tp, NVRAM_BUFFERED) &&
 3253	    tg3_flag(tp, FLASH) &&
 3254	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
 3255	    (tp->nvram_jedecnum == JEDEC_ATMEL))
 3256
 3257		addr = ((addr / tp->nvram_pagesize) <<
 3258			ATMEL_AT45DB0X1B_PAGE_POS) +
 3259		       (addr % tp->nvram_pagesize);
 3260
 3261	return addr;
 3262}
 3263
 3264static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
 3265{
 3266	if (tg3_flag(tp, NVRAM) &&
 3267	    tg3_flag(tp, NVRAM_BUFFERED) &&
 3268	    tg3_flag(tp, FLASH) &&
 3269	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
 3270	    (tp->nvram_jedecnum == JEDEC_ATMEL))
 3271
 3272		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
 3273			tp->nvram_pagesize) +
 3274		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
 3275
 3276	return addr;
 3277}
 3278
 3279/* NOTE: Data read in from NVRAM is byteswapped according to
 3280 * the byteswapping settings for all other register accesses.
 3281 * tg3 devices are BE devices, so on a BE machine, the data
 3282 * returned will be exactly as it is seen in NVRAM.  On a LE
 3283 * machine, the 32-bit value will be byteswapped.
 3284 */
 3285static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
 3286{
 3287	int ret;
 3288
 3289	if (!tg3_flag(tp, NVRAM))
 3290		return tg3_nvram_read_using_eeprom(tp, offset, val);
 3291
 3292	offset = tg3_nvram_phys_addr(tp, offset);
 3293
 3294	if (offset > NVRAM_ADDR_MSK)
 3295		return -EINVAL;
 3296
 3297	ret = tg3_nvram_lock(tp);
 3298	if (ret)
 3299		return ret;
 3300
 3301	tg3_enable_nvram_access(tp);
 3302
 3303	tw32(NVRAM_ADDR, offset);
 3304	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
 3305		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
 3306
 3307	if (ret == 0)
 3308		*val = tr32(NVRAM_RDDATA);
 3309
 3310	tg3_disable_nvram_access(tp);
 3311
 3312	tg3_nvram_unlock(tp);
 3313
 3314	return ret;
 3315}
 3316
 3317/* Ensures NVRAM data is in bytestream format. */
 3318static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
 3319{
 3320	u32 v;
 3321	int res = tg3_nvram_read(tp, offset, &v);
 3322	if (!res)
 3323		*val = cpu_to_be32(v);
 3324	return res;
 3325}
 3326
 3327static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
 3328				    u32 offset, u32 len, u8 *buf)
 3329{
 3330	int i, j, rc = 0;
 3331	u32 val;
 3332
 3333	for (i = 0; i < len; i += 4) {
 3334		u32 addr;
 3335		__be32 data;
 3336
 3337		addr = offset + i;
 3338
 3339		memcpy(&data, buf + i, 4);
 3340
 3341		/*
 3342		 * The SEEPROM interface expects the data to always be opposite
 3343		 * the native endian format.  We accomplish this by reversing
 3344		 * all the operations that would have been performed on the
 3345		 * data from a call to tg3_nvram_read_be32().
 3346		 */
 3347		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
 3348
 3349		val = tr32(GRC_EEPROM_ADDR);
 3350		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
 3351
 3352		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
 3353			EEPROM_ADDR_READ);
 3354		tw32(GRC_EEPROM_ADDR, val |
 3355			(0 << EEPROM_ADDR_DEVID_SHIFT) |
 3356			(addr & EEPROM_ADDR_ADDR_MASK) |
 3357			EEPROM_ADDR_START |
 3358			EEPROM_ADDR_WRITE);
 3359
 3360		for (j = 0; j < 1000; j++) {
 3361			val = tr32(GRC_EEPROM_ADDR);
 3362
 3363			if (val & EEPROM_ADDR_COMPLETE)
 3364				break;
 3365			msleep(1);
 3366		}
 3367		if (!(val & EEPROM_ADDR_COMPLETE)) {
 3368			rc = -EBUSY;
 3369			break;
 3370		}
 3371	}
 3372
 3373	return rc;
 3374}
 3375
 3376/* offset and length are dword aligned */
 3377static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
 3378		u8 *buf)
 3379{
 3380	int ret = 0;
 3381	u32 pagesize = tp->nvram_pagesize;
 3382	u32 pagemask = pagesize - 1;
 3383	u32 nvram_cmd;
 3384	u8 *tmp;
 3385
 3386	tmp = kmalloc(pagesize, GFP_KERNEL);
 3387	if (tmp == NULL)
 3388		return -ENOMEM;
 3389
 3390	while (len) {
 3391		int j;
 3392		u32 phy_addr, page_off, size;
 3393
 3394		phy_addr = offset & ~pagemask;
 3395
 3396		for (j = 0; j < pagesize; j += 4) {
 3397			ret = tg3_nvram_read_be32(tp, phy_addr + j,
 3398						  (__be32 *) (tmp + j));
 3399			if (ret)
 3400				break;
 3401		}
 3402		if (ret)
 3403			break;
 3404
 3405		page_off = offset & pagemask;
 3406		size = pagesize;
 3407		if (len < size)
 3408			size = len;
 3409
 3410		len -= size;
 3411
 3412		memcpy(tmp + page_off, buf, size);
 3413
 3414		offset = offset + (pagesize - page_off);
 3415
 3416		tg3_enable_nvram_access(tp);
 3417
 3418		/*
 3419		 * Before we can erase the flash page, we need
 3420		 * to issue a special "write enable" command.
 3421		 */
 3422		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
 3423
 3424		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
 3425			break;
 3426
 3427		/* Erase the target page */
 3428		tw32(NVRAM_ADDR, phy_addr);
 3429
 3430		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
 3431			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
 3432
 3433		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
 3434			break;
 3435
 3436		/* Issue another write enable to start the write. */
 3437		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
 3438
 3439		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
 3440			break;
 3441
 3442		for (j = 0; j < pagesize; j += 4) {
 3443			__be32 data;
 3444
 3445			data = *((__be32 *) (tmp + j));
 3446
 3447			tw32(NVRAM_WRDATA, be32_to_cpu(data));
 3448
 3449			tw32(NVRAM_ADDR, phy_addr + j);
 3450
 3451			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
 3452				NVRAM_CMD_WR;
 3453
 3454			if (j == 0)
 3455				nvram_cmd |= NVRAM_CMD_FIRST;
 3456			else if (j == (pagesize - 4))
 3457				nvram_cmd |= NVRAM_CMD_LAST;
 3458
 3459			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
 3460			if (ret)
 3461				break;
 3462		}
 3463		if (ret)
 3464			break;
 3465	}
 3466
 3467	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
 3468	tg3_nvram_exec_cmd(tp, nvram_cmd);
 3469
 3470	kfree(tmp);
 3471
 3472	return ret;
 3473}
 3474
 3475/* offset and length are dword aligned */
 3476static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
 3477		u8 *buf)
 3478{
 3479	int i, ret = 0;
 3480
 3481	for (i = 0; i < len; i += 4, offset += 4) {
 3482		u32 page_off, phy_addr, nvram_cmd;
 3483		__be32 data;
 3484
 3485		memcpy(&data, buf + i, 4);
 3486		tw32(NVRAM_WRDATA, be32_to_cpu(data));
 3487
 3488		page_off = offset % tp->nvram_pagesize;
 3489
 3490		phy_addr = tg3_nvram_phys_addr(tp, offset);
 3491
 3492		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
 3493
 3494		if (page_off == 0 || i == 0)
 3495			nvram_cmd |= NVRAM_CMD_FIRST;
 3496		if (page_off == (tp->nvram_pagesize - 4))
 3497			nvram_cmd |= NVRAM_CMD_LAST;
 3498
 3499		if (i == (len - 4))
 3500			nvram_cmd |= NVRAM_CMD_LAST;
 3501
 3502		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
 3503		    !tg3_flag(tp, FLASH) ||
 3504		    !tg3_flag(tp, 57765_PLUS))
 3505			tw32(NVRAM_ADDR, phy_addr);
 3506
 3507		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
 3508		    !tg3_flag(tp, 5755_PLUS) &&
 3509		    (tp->nvram_jedecnum == JEDEC_ST) &&
 3510		    (nvram_cmd & NVRAM_CMD_FIRST)) {
 3511			u32 cmd;
 3512
 3513			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
 3514			ret = tg3_nvram_exec_cmd(tp, cmd);
 3515			if (ret)
 3516				break;
 3517		}
 3518		if (!tg3_flag(tp, FLASH)) {
 3519			/* We always do complete word writes to eeprom. */
 3520			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
 3521		}
 3522
 3523		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
 3524		if (ret)
 3525			break;
 3526	}
 3527	return ret;
 3528}
 3529
 3530/* offset and length are dword aligned */
 3531static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
 3532{
 3533	int ret;
 3534
 3535	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
 3536		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
 3537		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
 3538		udelay(40);
 3539	}
 3540
 3541	if (!tg3_flag(tp, NVRAM)) {
 3542		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
 3543	} else {
 3544		u32 grc_mode;
 3545
 3546		ret = tg3_nvram_lock(tp);
 3547		if (ret)
 3548			return ret;
 3549
 3550		tg3_enable_nvram_access(tp);
 3551		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
 3552			tw32(NVRAM_WRITE1, 0x406);
 3553
 3554		grc_mode = tr32(GRC_MODE);
 3555		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
 3556
 3557		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
 3558			ret = tg3_nvram_write_block_buffered(tp, offset, len,
 3559				buf);
 3560		} else {
 3561			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
 3562				buf);
 3563		}
 3564
 3565		grc_mode = tr32(GRC_MODE);
 3566		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
 3567
 3568		tg3_disable_nvram_access(tp);
 3569		tg3_nvram_unlock(tp);
 3570	}
 3571
 3572	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
 3573		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
 3574		udelay(40);
 3575	}
 3576
 3577	return ret;
 3578}
 3579
 3580#define RX_CPU_SCRATCH_BASE	0x30000
 3581#define RX_CPU_SCRATCH_SIZE	0x04000
 3582#define TX_CPU_SCRATCH_BASE	0x34000
 3583#define TX_CPU_SCRATCH_SIZE	0x04000
 3584
 3585/* tp->lock is held. */
 3586static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
 3587{
 3588	int i;
 3589	const int iters = 10000;
 3590
 3591	for (i = 0; i < iters; i++) {
 3592		tw32(cpu_base + CPU_STATE, 0xffffffff);
 3593		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
 3594		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
 3595			break;
 3596		if (pci_channel_offline(tp->pdev))
 3597			return -EBUSY;
 3598	}
 3599
 3600	return (i == iters) ? -EBUSY : 0;
 3601}
 3602
 3603/* tp->lock is held. */
 3604static int tg3_rxcpu_pause(struct tg3 *tp)
 3605{
 3606	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
 3607
 3608	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
 3609	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
 3610	udelay(10);
 3611
 3612	return rc;
 3613}
 3614
 3615/* tp->lock is held. */
 3616static int tg3_txcpu_pause(struct tg3 *tp)
 3617{
 3618	return tg3_pause_cpu(tp, TX_CPU_BASE);
 3619}
 3620
 3621/* tp->lock is held. */
 3622static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
 3623{
 3624	tw32(cpu_base + CPU_STATE, 0xffffffff);
 3625	tw32_f(cpu_base + CPU_MODE,  0x00000000);
 3626}
 3627
 3628/* tp->lock is held. */
 3629static void tg3_rxcpu_resume(struct tg3 *tp)
 3630{
 3631	tg3_resume_cpu(tp, RX_CPU_BASE);
 3632}
 3633
 3634/* tp->lock is held. */
 3635static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
 3636{
 3637	int rc;
 3638
 3639	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
 3640
 3641	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
 3642		u32 val = tr32(GRC_VCPU_EXT_CTRL);
 3643
 3644		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
 3645		return 0;
 3646	}
 3647	if (cpu_base == RX_CPU_BASE) {
 3648		rc = tg3_rxcpu_pause(tp);
 3649	} else {
 3650		/*
 3651		 * There is only an Rx CPU for the 5750 derivative in the
 3652		 * BCM4785.
 3653		 */
 3654		if (tg3_flag(tp, IS_SSB_CORE))
 3655			return 0;
 3656
 3657		rc = tg3_txcpu_pause(tp);
 3658	}
 3659
 3660	if (rc) {
 3661		netdev_err(tp->dev, "%s timed out, %s CPU\n",
 3662			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
 3663		return -ENODEV;
 3664	}
 3665
 3666	/* Clear firmware's nvram arbitration. */
 3667	if (tg3_flag(tp, NVRAM))
 3668		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
 3669	return 0;
 3670}
 3671
 3672static int tg3_fw_data_len(struct tg3 *tp,
 3673			   const struct tg3_firmware_hdr *fw_hdr)
 3674{
 3675	int fw_len;
 3676
 3677	/* Non fragmented firmware have one firmware header followed by a
 3678	 * contiguous chunk of data to be written. The length field in that
 3679	 * header is not the length of data to be written but the complete
 3680	 * length of the bss. The data length is determined based on
 3681	 * tp->fw->size minus headers.
 3682	 *
 3683	 * Fragmented firmware have a main header followed by multiple
 3684	 * fragments. Each fragment is identical to non fragmented firmware
 3685	 * with a firmware header followed by a contiguous chunk of data. In
 3686	 * the main header, the length field is unused and set to 0xffffffff.
 3687	 * In each fragment header the length is the entire size of that
 3688	 * fragment i.e. fragment data + header length. Data length is
 3689	 * therefore length field in the header minus TG3_FW_HDR_LEN.
 3690	 */
 3691	if (tp->fw_len == 0xffffffff)
 3692		fw_len = be32_to_cpu(fw_hdr->len);
 3693	else
 3694		fw_len = tp->fw->size;
 3695
 3696	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
 3697}
 3698
 3699/* tp->lock is held. */
 3700static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
 3701				 u32 cpu_scratch_base, int cpu_scratch_size,
 3702				 const struct tg3_firmware_hdr *fw_hdr)
 3703{
 3704	int err, i;
 3705	void (*write_op)(struct tg3 *, u32, u32);
 3706	int total_len = tp->fw->size;
 3707
 3708	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
 3709		netdev_err(tp->dev,
 3710			   "%s: Trying to load TX cpu firmware which is 5705\n",
 3711			   __func__);
 3712		return -EINVAL;
 3713	}
 3714
 3715	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
 3716		write_op = tg3_write_mem;
 3717	else
 3718		write_op = tg3_write_indirect_reg32;
 3719
 3720	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
 3721		/* It is possible that bootcode is still loading at this point.
 3722		 * Get the nvram lock first before halting the cpu.
 3723		 */
 3724		int lock_err = tg3_nvram_lock(tp);
 3725		err = tg3_halt_cpu(tp, cpu_base);
 3726		if (!lock_err)
 3727			tg3_nvram_unlock(tp);
 3728		if (err)
 3729			goto out;
 3730
 3731		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
 3732			write_op(tp, cpu_scratch_base + i, 0);
 3733		tw32(cpu_base + CPU_STATE, 0xffffffff);
 3734		tw32(cpu_base + CPU_MODE,
 3735		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
 3736	} else {
 3737		/* Subtract additional main header for fragmented firmware and
 3738		 * advance to the first fragment
 3739		 */
 3740		total_len -= TG3_FW_HDR_LEN;
 3741		fw_hdr++;
 3742	}
 3743
 3744	do {
 3745		u32 *fw_data = (u32 *)(fw_hdr + 1);
 3746		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
 3747			write_op(tp, cpu_scratch_base +
 3748				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
 3749				     (i * sizeof(u32)),
 3750				 be32_to_cpu(fw_data[i]));
 3751
 3752		total_len -= be32_to_cpu(fw_hdr->len);
 3753
 3754		/* Advance to next fragment */
 3755		fw_hdr = (struct tg3_firmware_hdr *)
 3756			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
 3757	} while (total_len > 0);
 3758
 3759	err = 0;
 3760
 3761out:
 3762	return err;
 3763}
 3764
 3765/* tp->lock is held. */
 3766static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
 3767{
 3768	int i;
 3769	const int iters = 5;
 3770
 3771	tw32(cpu_base + CPU_STATE, 0xffffffff);
 3772	tw32_f(cpu_base + CPU_PC, pc);
 3773
 3774	for (i = 0; i < iters; i++) {
 3775		if (tr32(cpu_base + CPU_PC) == pc)
 3776			break;
 3777		tw32(cpu_base + CPU_STATE, 0xffffffff);
 3778		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
 3779		tw32_f(cpu_base + CPU_PC, pc);
 3780		udelay(1000);
 3781	}
 3782
 3783	return (i == iters) ? -EBUSY : 0;
 3784}
 3785
 3786/* tp->lock is held. */
 3787static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
 3788{
 3789	const struct tg3_firmware_hdr *fw_hdr;
 3790	int err;
 3791
 3792	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
 3793
 3794	/* Firmware blob starts with version numbers, followed by
 3795	   start address and length. We are setting complete length.
 3796	   length = end_address_of_bss - start_address_of_text.
 3797	   Remainder is the blob to be loaded contiguously
 3798	   from start address. */
 3799
 3800	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
 3801				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
 3802				    fw_hdr);
 3803	if (err)
 3804		return err;
 3805
 3806	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
 3807				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
 3808				    fw_hdr);
 3809	if (err)
 3810		return err;
 3811
 3812	/* Now startup only the RX cpu. */
 3813	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
 3814				       be32_to_cpu(fw_hdr->base_addr));
 3815	if (err) {
 3816		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
 3817			   "should be %08x\n", __func__,
 3818			   tr32(RX_CPU_BASE + CPU_PC),
 3819				be32_to_cpu(fw_hdr->base_addr));
 3820		return -ENODEV;
 3821	}
 3822
 3823	tg3_rxcpu_resume(tp);
 3824
 3825	return 0;
 3826}
 3827
 3828static int tg3_validate_rxcpu_state(struct tg3 *tp)
 3829{
 3830	const int iters = 1000;
 3831	int i;
 3832	u32 val;
 3833
 3834	/* Wait for boot code to complete initialization and enter service
 3835	 * loop. It is then safe to download service patches
 3836	 */
 3837	for (i = 0; i < iters; i++) {
 3838		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
 3839			break;
 3840
 3841		udelay(10);
 3842	}
 3843
 3844	if (i == iters) {
 3845		netdev_err(tp->dev, "Boot code not ready for service patches\n");
 3846		return -EBUSY;
 3847	}
 3848
 3849	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
 3850	if (val & 0xff) {
 3851		netdev_warn(tp->dev,
 3852			    "Other patches exist. Not downloading EEE patch\n");
 3853		return -EEXIST;
 3854	}
 3855
 3856	return 0;
 3857}
 3858
 3859/* tp->lock is held. */
 3860static void tg3_load_57766_firmware(struct tg3 *tp)
 3861{
 3862	struct tg3_firmware_hdr *fw_hdr;
 3863
 3864	if (!tg3_flag(tp, NO_NVRAM))
 3865		return;
 3866
 3867	if (tg3_validate_rxcpu_state(tp))
 3868		return;
 3869
 3870	if (!tp->fw)
 3871		return;
 3872
 3873	/* This firmware blob has a different format than older firmware
 3874	 * releases as given below. The main difference is we have fragmented
 3875	 * data to be written to non-contiguous locations.
 3876	 *
 3877	 * In the beginning we have a firmware header identical to other
 3878	 * firmware which consists of version, base addr and length. The length
 3879	 * here is unused and set to 0xffffffff.
 3880	 *
 3881	 * This is followed by a series of firmware fragments which are
 3882	 * individually identical to previous firmware. i.e. they have the
 3883	 * firmware header and followed by data for that fragment. The version
 3884	 * field of the individual fragment header is unused.
 3885	 */
 3886
 3887	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
 3888	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
 3889		return;
 3890
 3891	if (tg3_rxcpu_pause(tp))
 3892		return;
 3893
 3894	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
 3895	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
 3896
 3897	tg3_rxcpu_resume(tp);
 3898}
 3899
 3900/* tp->lock is held. */
 3901static int tg3_load_tso_firmware(struct tg3 *tp)
 3902{
 3903	const struct tg3_firmware_hdr *fw_hdr;
 3904	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
 3905	int err;
 3906
 3907	if (!tg3_flag(tp, FW_TSO))
 3908		return 0;
 3909
 3910	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
 3911
 3912	/* Firmware blob starts with version numbers, followed by
 3913	   start address and length. We are setting complete length.
 3914	   length = end_address_of_bss - start_address_of_text.
 3915	   Remainder is the blob to be loaded contiguously
 3916	   from start address. */
 3917
 3918	cpu_scratch_size = tp->fw_len;
 3919
 3920	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
 3921		cpu_base = RX_CPU_BASE;
 3922		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
 3923	} else {
 3924		cpu_base = TX_CPU_BASE;
 3925		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
 3926		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
 3927	}
 3928
 3929	err = tg3_load_firmware_cpu(tp, cpu_base,
 3930				    cpu_scratch_base, cpu_scratch_size,
 3931				    fw_hdr);
 3932	if (err)
 3933		return err;
 3934
 3935	/* Now startup the cpu. */
 3936	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
 3937				       be32_to_cpu(fw_hdr->base_addr));
 3938	if (err) {
 3939		netdev_err(tp->dev,
 3940			   "%s fails to set CPU PC, is %08x should be %08x\n",
 3941			   __func__, tr32(cpu_base + CPU_PC),
 3942			   be32_to_cpu(fw_hdr->base_addr));
 3943		return -ENODEV;
 3944	}
 3945
 3946	tg3_resume_cpu(tp, cpu_base);
 3947	return 0;
 3948}
 3949
 3950/* tp->lock is held. */
 3951static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
 3952{
 3953	u32 addr_high, addr_low;
 3954
 3955	addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
 3956	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
 3957		    (mac_addr[4] <<  8) | mac_addr[5]);
 3958
 3959	if (index < 4) {
 3960		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
 3961		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
 3962	} else {
 3963		index -= 4;
 3964		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
 3965		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
 3966	}
 3967}
 3968
 3969/* tp->lock is held. */
 3970static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
 3971{
 3972	u32 addr_high;
 3973	int i;
 3974
 3975	for (i = 0; i < 4; i++) {
 3976		if (i == 1 && skip_mac_1)
 3977			continue;
 3978		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
 3979	}
 3980
 3981	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
 3982	    tg3_asic_rev(tp) == ASIC_REV_5704) {
 3983		for (i = 4; i < 16; i++)
 3984			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
 3985	}
 3986
 3987	addr_high = (tp->dev->dev_addr[0] +
 3988		     tp->dev->dev_addr[1] +
 3989		     tp->dev->dev_addr[2] +
 3990		     tp->dev->dev_addr[3] +
 3991		     tp->dev->dev_addr[4] +
 3992		     tp->dev->dev_addr[5]) &
 3993		TX_BACKOFF_SEED_MASK;
 3994	tw32(MAC_TX_BACKOFF_SEED, addr_high);
 3995}
 3996
 3997static void tg3_enable_register_access(struct tg3 *tp)
 3998{
 3999	/*
 4000	 * Make sure register accesses (indirect or otherwise) will function
 4001	 * correctly.
 4002	 */
 4003	pci_write_config_dword(tp->pdev,
 4004			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
 4005}
 4006
 4007static int tg3_power_up(struct tg3 *tp)
 4008{
 4009	int err;
 4010
 4011	tg3_enable_register_access(tp);
 4012
 4013	err = pci_set_power_state(tp->pdev, PCI_D0);
 4014	if (!err) {
 4015		/* Switch out of Vaux if it is a NIC */
 4016		tg3_pwrsrc_switch_to_vmain(tp);
 4017	} else {
 4018		netdev_err(tp->dev, "Transition to D0 failed\n");
 4019	}
 4020
 4021	return err;
 4022}
 4023
 4024static int tg3_setup_phy(struct tg3 *, bool);
 4025
 4026static int tg3_power_down_prepare(struct tg3 *tp)
 4027{
 4028	u32 misc_host_ctrl;
 4029	bool device_should_wake, do_low_power;
 4030
 4031	tg3_enable_register_access(tp);
 4032
 4033	/* Restore the CLKREQ setting. */
 4034	if (tg3_flag(tp, CLKREQ_BUG))
 4035		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
 4036					 PCI_EXP_LNKCTL_CLKREQ_EN);
 4037
 4038	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
 4039	tw32(TG3PCI_MISC_HOST_CTRL,
 4040	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
 4041
 4042	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
 4043			     tg3_flag(tp, WOL_ENABLE);
 4044
 4045	if (tg3_flag(tp, USE_PHYLIB)) {
 4046		do_low_power = false;
 4047		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
 4048		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
 4049			__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
 4050			struct phy_device *phydev;
 4051			u32 phyid;
 4052
 4053			phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
 4054
 4055			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
 4056
 4057			tp->link_config.speed = phydev->speed;
 4058			tp->link_config.duplex = phydev->duplex;
 4059			tp->link_config.autoneg = phydev->autoneg;
 4060			ethtool_convert_link_mode_to_legacy_u32(
 4061				&tp->link_config.advertising,
 4062				phydev->advertising);
 4063
 4064			linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
 4065			linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
 4066					 advertising);
 4067			linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
 4068					 advertising);
 4069			linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
 4070					 advertising);
 4071
 4072			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
 4073				if (tg3_flag(tp, WOL_SPEED_100MB)) {
 4074					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
 4075							 advertising);
 4076					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
 4077							 advertising);
 4078					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
 4079							 advertising);
 4080				} else {
 4081					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
 4082							 advertising);
 4083				}
 4084			}
 4085
 4086			linkmode_copy(phydev->advertising, advertising);
 4087			phy_start_aneg(phydev);
 4088
 4089			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
 4090			if (phyid != PHY_ID_BCMAC131) {
 4091				phyid &= PHY_BCM_OUI_MASK;
 4092				if (phyid == PHY_BCM_OUI_1 ||
 4093				    phyid == PHY_BCM_OUI_2 ||
 4094				    phyid == PHY_BCM_OUI_3)
 4095					do_low_power = true;
 4096			}
 4097		}
 4098	} else {
 4099		do_low_power = true;
 4100
 4101		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
 4102			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
 4103
 4104		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
 4105			tg3_setup_phy(tp, false);
 4106	}
 4107
 4108	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
 4109		u32 val;
 4110
 4111		val = tr32(GRC_VCPU_EXT_CTRL);
 4112		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
 4113	} else if (!tg3_flag(tp, ENABLE_ASF)) {
 4114		int i;
 4115		u32 val;
 4116
 4117		for (i = 0; i < 200; i++) {
 4118			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
 4119			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
 4120				break;
 4121			msleep(1);
 4122		}
 4123	}
 4124	if (tg3_flag(tp, WOL_CAP))
 4125		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
 4126						     WOL_DRV_STATE_SHUTDOWN |
 4127						     WOL_DRV_WOL |
 4128						     WOL_SET_MAGIC_PKT);
 4129
 4130	if (device_should_wake) {
 4131		u32 mac_mode;
 4132
 4133		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
 4134			if (do_low_power &&
 4135			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
 4136				tg3_phy_auxctl_write(tp,
 4137					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
 4138					       MII_TG3_AUXCTL_PCTL_WOL_EN |
 4139					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
 4140					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
 4141				udelay(40);
 4142			}
 4143
 4144			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
 4145				mac_mode = MAC_MODE_PORT_MODE_GMII;
 4146			else if (tp->phy_flags &
 4147				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
 4148				if (tp->link_config.active_speed == SPEED_1000)
 4149					mac_mode = MAC_MODE_PORT_MODE_GMII;
 4150				else
 4151					mac_mode = MAC_MODE_PORT_MODE_MII;
 4152			} else
 4153				mac_mode = MAC_MODE_PORT_MODE_MII;
 4154
 4155			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
 4156			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
 4157				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
 4158					     SPEED_100 : SPEED_10;
 4159				if (tg3_5700_link_polarity(tp, speed))
 4160					mac_mode |= MAC_MODE_LINK_POLARITY;
 4161				else
 4162					mac_mode &= ~MAC_MODE_LINK_POLARITY;
 4163			}
 4164		} else {
 4165			mac_mode = MAC_MODE_PORT_MODE_TBI;
 4166		}
 4167
 4168		if (!tg3_flag(tp, 5750_PLUS))
 4169			tw32(MAC_LED_CTRL, tp->led_ctrl);
 4170
 4171		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
 4172		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
 4173		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
 4174			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
 4175
 4176		if (tg3_flag(tp, ENABLE_APE))
 4177			mac_mode |= MAC_MODE_APE_TX_EN |
 4178				    MAC_MODE_APE_RX_EN |
 4179				    MAC_MODE_TDE_ENABLE;
 4180
 4181		tw32_f(MAC_MODE, mac_mode);
 4182		udelay(100);
 4183
 4184		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
 4185		udelay(10);
 4186	}
 4187
 4188	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
 4189	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
 4190	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
 4191		u32 base_val;
 4192
 4193		base_val = tp->pci_clock_ctrl;
 4194		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
 4195			     CLOCK_CTRL_TXCLK_DISABLE);
 4196
 4197		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
 4198			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
 4199	} else if (tg3_flag(tp, 5780_CLASS) ||
 4200		   tg3_flag(tp, CPMU_PRESENT) ||
 4201		   tg3_asic_rev(tp) == ASIC_REV_5906) {
 4202		/* do nothing */
 4203	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
 4204		u32 newbits1, newbits2;
 4205
 4206		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
 4207		    tg3_asic_rev(tp) == ASIC_REV_5701) {
 4208			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
 4209				    CLOCK_CTRL_TXCLK_DISABLE |
 4210				    CLOCK_CTRL_ALTCLK);
 4211			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
 4212		} else if (tg3_flag(tp, 5705_PLUS)) {
 4213			newbits1 = CLOCK_CTRL_625_CORE;
 4214			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
 4215		} else {
 4216			newbits1 = CLOCK_CTRL_ALTCLK;
 4217			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
 4218		}
 4219
 4220		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
 4221			    40);
 4222
 4223		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
 4224			    40);
 4225
 4226		if (!tg3_flag(tp, 5705_PLUS)) {
 4227			u32 newbits3;
 4228
 4229			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
 4230			    tg3_asic_rev(tp) == ASIC_REV_5701) {
 4231				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
 4232					    CLOCK_CTRL_TXCLK_DISABLE |
 4233					    CLOCK_CTRL_44MHZ_CORE);
 4234			} else {
 4235				newbits3 = CLOCK_CTRL_44MHZ_CORE;
 4236			}
 4237
 4238			tw32_wait_f(TG3PCI_CLOCK_CTRL,
 4239				    tp->pci_clock_ctrl | newbits3, 40);
 4240		}
 4241	}
 4242
 4243	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
 4244		tg3_power_down_phy(tp, do_low_power);
 4245
 4246	tg3_frob_aux_power(tp, true);
 4247
 4248	/* Workaround for unstable PLL clock */
 4249	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
 4250	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
 4251	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
 4252		u32 val = tr32(0x7d00);
 4253
 4254		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
 4255		tw32(0x7d00, val);
 4256		if (!tg3_flag(tp, ENABLE_ASF)) {
 4257			int err;
 4258
 4259			err = tg3_nvram_lock(tp);
 4260			tg3_halt_cpu(tp, RX_CPU_BASE);
 4261			if (!err)
 4262				tg3_nvram_unlock(tp);
 4263		}
 4264	}
 4265
 4266	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
 4267
 4268	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
 4269
 4270	return 0;
 4271}
 4272
 4273static void tg3_power_down(struct tg3 *tp)
 4274{
 4275	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
 4276	pci_set_power_state(tp->pdev, PCI_D3hot);
 4277}
 4278
 4279static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
 4280{
 4281	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
 4282	case MII_TG3_AUX_STAT_10HALF:
 4283		*speed = SPEED_10;
 4284		*duplex = DUPLEX_HALF;
 4285		break;
 4286
 4287	case MII_TG3_AUX_STAT_10FULL:
 4288		*speed = SPEED_10;
 4289		*duplex = DUPLEX_FULL;
 4290		break;
 4291
 4292	case MII_TG3_AUX_STAT_100HALF:
 4293		*speed = SPEED_100;
 4294		*duplex = DUPLEX_HALF;
 4295		break;
 4296
 4297	case MII_TG3_AUX_STAT_100FULL:
 4298		*speed = SPEED_100;
 4299		*duplex = DUPLEX_FULL;
 4300		break;
 4301
 4302	case MII_TG3_AUX_STAT_1000HALF:
 4303		*speed = SPEED_1000;
 4304		*duplex = DUPLEX_HALF;
 4305		break;
 4306
 4307	case MII_TG3_AUX_STAT_1000FULL:
 4308		*speed = SPEED_1000;
 4309		*duplex = DUPLEX_FULL;
 4310		break;
 4311
 4312	default:
 4313		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 4314			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
 4315				 SPEED_10;
 4316			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
 4317				  DUPLEX_HALF;
 4318			break;
 4319		}
 4320		*speed = SPEED_UNKNOWN;
 4321		*duplex = DUPLEX_UNKNOWN;
 4322		break;
 4323	}
 4324}
 4325
 4326static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
 4327{
 4328	int err = 0;
 4329	u32 val, new_adv;
 4330
 4331	new_adv = ADVERTISE_CSMA;
 4332	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
 4333	new_adv |= mii_advertise_flowctrl(flowctrl);
 4334
 4335	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
 4336	if (err)
 4337		goto done;
 4338
 4339	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 4340		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
 4341
 4342		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
 4343		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
 4344			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
 4345
 4346		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
 4347		if (err)
 4348			goto done;
 4349	}
 4350
 4351	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
 4352		goto done;
 4353
 4354	tw32(TG3_CPMU_EEE_MODE,
 4355	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
 4356
 4357	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
 4358	if (!err) {
 4359		u32 err2;
 4360
 4361		val = 0;
 4362		/* Advertise 100-BaseTX EEE ability */
 4363		if (advertise & ADVERTISED_100baseT_Full)
 4364			val |= MDIO_AN_EEE_ADV_100TX;
 4365		/* Advertise 1000-BaseT EEE ability */
 4366		if (advertise & ADVERTISED_1000baseT_Full)
 4367			val |= MDIO_AN_EEE_ADV_1000T;
 4368
 4369		if (!tp->eee.eee_enabled) {
 4370			val = 0;
 4371			tp->eee.advertised = 0;
 4372		} else {
 4373			tp->eee.advertised = advertise &
 4374					     (ADVERTISED_100baseT_Full |
 4375					      ADVERTISED_1000baseT_Full);
 4376		}
 4377
 4378		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
 4379		if (err)
 4380			val = 0;
 4381
 4382		switch (tg3_asic_rev(tp)) {
 4383		case ASIC_REV_5717:
 4384		case ASIC_REV_57765:
 4385		case ASIC_REV_57766:
 4386		case ASIC_REV_5719:
 4387			/* If we advertised any eee advertisements above... */
 4388			if (val)
 4389				val = MII_TG3_DSP_TAP26_ALNOKO |
 4390				      MII_TG3_DSP_TAP26_RMRXSTO |
 4391				      MII_TG3_DSP_TAP26_OPCSINPT;
 4392			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
 4393			/* Fall through */
 4394		case ASIC_REV_5720:
 4395		case ASIC_REV_5762:
 4396			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
 4397				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
 4398						 MII_TG3_DSP_CH34TP2_HIBW01);
 4399		}
 4400
 4401		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
 4402		if (!err)
 4403			err = err2;
 4404	}
 4405
 4406done:
 4407	return err;
 4408}
 4409
 4410static void tg3_phy_copper_begin(struct tg3 *tp)
 4411{
 4412	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
 4413	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
 4414		u32 adv, fc;
 4415
 4416		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
 4417		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
 4418			adv = ADVERTISED_10baseT_Half |
 4419			      ADVERTISED_10baseT_Full;
 4420			if (tg3_flag(tp, WOL_SPEED_100MB))
 4421				adv |= ADVERTISED_100baseT_Half |
 4422				       ADVERTISED_100baseT_Full;
 4423			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
 4424				if (!(tp->phy_flags &
 4425				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
 4426					adv |= ADVERTISED_1000baseT_Half;
 4427				adv |= ADVERTISED_1000baseT_Full;
 4428			}
 4429
 4430			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
 4431		} else {
 4432			adv = tp->link_config.advertising;
 4433			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
 4434				adv &= ~(ADVERTISED_1000baseT_Half |
 4435					 ADVERTISED_1000baseT_Full);
 4436
 4437			fc = tp->link_config.flowctrl;
 4438		}
 4439
 4440		tg3_phy_autoneg_cfg(tp, adv, fc);
 4441
 4442		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
 4443		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
 4444			/* Normally during power down we want to autonegotiate
 4445			 * the lowest possible speed for WOL. However, to avoid
 4446			 * link flap, we leave it untouched.
 4447			 */
 4448			return;
 4449		}
 4450
 4451		tg3_writephy(tp, MII_BMCR,
 4452			     BMCR_ANENABLE | BMCR_ANRESTART);
 4453	} else {
 4454		int i;
 4455		u32 bmcr, orig_bmcr;
 4456
 4457		tp->link_config.active_speed = tp->link_config.speed;
 4458		tp->link_config.active_duplex = tp->link_config.duplex;
 4459
 4460		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
 4461			/* With autoneg disabled, 5715 only links up when the
 4462			 * advertisement register has the configured speed
 4463			 * enabled.
 4464			 */
 4465			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
 4466		}
 4467
 4468		bmcr = 0;
 4469		switch (tp->link_config.speed) {
 4470		default:
 4471		case SPEED_10:
 4472			break;
 4473
 4474		case SPEED_100:
 4475			bmcr |= BMCR_SPEED100;
 4476			break;
 4477
 4478		case SPEED_1000:
 4479			bmcr |= BMCR_SPEED1000;
 4480			break;
 4481		}
 4482
 4483		if (tp->link_config.duplex == DUPLEX_FULL)
 4484			bmcr |= BMCR_FULLDPLX;
 4485
 4486		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
 4487		    (bmcr != orig_bmcr)) {
 4488			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
 4489			for (i = 0; i < 1500; i++) {
 4490				u32 tmp;
 4491
 4492				udelay(10);
 4493				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
 4494				    tg3_readphy(tp, MII_BMSR, &tmp))
 4495					continue;
 4496				if (!(tmp & BMSR_LSTATUS)) {
 4497					udelay(40);
 4498					break;
 4499				}
 4500			}
 4501			tg3_writephy(tp, MII_BMCR, bmcr);
 4502			udelay(40);
 4503		}
 4504	}
 4505}
 4506
 4507static int tg3_phy_pull_config(struct tg3 *tp)
 4508{
 4509	int err;
 4510	u32 val;
 4511
 4512	err = tg3_readphy(tp, MII_BMCR, &val);
 4513	if (err)
 4514		goto done;
 4515
 4516	if (!(val & BMCR_ANENABLE)) {
 4517		tp->link_config.autoneg = AUTONEG_DISABLE;
 4518		tp->link_config.advertising = 0;
 4519		tg3_flag_clear(tp, PAUSE_AUTONEG);
 4520
 4521		err = -EIO;
 4522
 4523		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
 4524		case 0:
 4525			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
 4526				goto done;
 4527
 4528			tp->link_config.speed = SPEED_10;
 4529			break;
 4530		case BMCR_SPEED100:
 4531			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
 4532				goto done;
 4533
 4534			tp->link_config.speed = SPEED_100;
 4535			break;
 4536		case BMCR_SPEED1000:
 4537			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 4538				tp->link_config.speed = SPEED_1000;
 4539				break;
 4540			}
 4541			/* Fall through */
 4542		default:
 4543			goto done;
 4544		}
 4545
 4546		if (val & BMCR_FULLDPLX)
 4547			tp->link_config.duplex = DUPLEX_FULL;
 4548		else
 4549			tp->link_config.duplex = DUPLEX_HALF;
 4550
 4551		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
 4552
 4553		err = 0;
 4554		goto done;
 4555	}
 4556
 4557	tp->link_config.autoneg = AUTONEG_ENABLE;
 4558	tp->link_config.advertising = ADVERTISED_Autoneg;
 4559	tg3_flag_set(tp, PAUSE_AUTONEG);
 4560
 4561	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
 4562		u32 adv;
 4563
 4564		err = tg3_readphy(tp, MII_ADVERTISE, &val);
 4565		if (err)
 4566			goto done;
 4567
 4568		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
 4569		tp->link_config.advertising |= adv | ADVERTISED_TP;
 4570
 4571		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
 4572	} else {
 4573		tp->link_config.advertising |= ADVERTISED_FIBRE;
 4574	}
 4575
 4576	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 4577		u32 adv;
 4578
 4579		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
 4580			err = tg3_readphy(tp, MII_CTRL1000, &val);
 4581			if (err)
 4582				goto done;
 4583
 4584			adv = mii_ctrl1000_to_ethtool_adv_t(val);
 4585		} else {
 4586			err = tg3_readphy(tp, MII_ADVERTISE, &val);
 4587			if (err)
 4588				goto done;
 4589
 4590			adv = tg3_decode_flowctrl_1000X(val);
 4591			tp->link_config.flowctrl = adv;
 4592
 4593			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
 4594			adv = mii_adv_to_ethtool_adv_x(val);
 4595		}
 4596
 4597		tp->link_config.advertising |= adv;
 4598	}
 4599
 4600done:
 4601	return err;
 4602}
 4603
 4604static int tg3_init_5401phy_dsp(struct tg3 *tp)
 4605{
 4606	int err;
 4607
 4608	/* Turn off tap power management. */
 4609	/* Set Extended packet length bit */
 4610	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
 4611
 4612	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
 4613	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
 4614	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
 4615	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
 4616	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
 4617
 4618	udelay(40);
 4619
 4620	return err;
 4621}
 4622
 4623static bool tg3_phy_eee_config_ok(struct tg3 *tp)
 4624{
 4625	struct ethtool_eee eee;
 4626
 4627	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
 4628		return true;
 4629
 4630	tg3_eee_pull_config(tp, &eee);
 4631
 4632	if (tp->eee.eee_enabled) {
 4633		if (tp->eee.advertised != eee.advertised ||
 4634		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
 4635		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
 4636			return false;
 4637	} else {
 4638		/* EEE is disabled but we're advertising */
 4639		if (eee.advertised)
 4640			return false;
 4641	}
 4642
 4643	return true;
 4644}
 4645
 4646static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
 4647{
 4648	u32 advmsk, tgtadv, advertising;
 4649
 4650	advertising = tp->link_config.advertising;
 4651	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
 4652
 4653	advmsk = ADVERTISE_ALL;
 4654	if (tp->link_config.active_duplex == DUPLEX_FULL) {
 4655		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
 4656		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
 4657	}
 4658
 4659	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
 4660		return false;
 4661
 4662	if ((*lcladv & advmsk) != tgtadv)
 4663		return false;
 4664
 4665	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 4666		u32 tg3_ctrl;
 4667
 4668		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
 4669
 4670		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
 4671			return false;
 4672
 4673		if (tgtadv &&
 4674		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
 4675		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
 4676			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
 4677			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
 4678				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
 4679		} else {
 4680			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
 4681		}
 4682
 4683		if (tg3_ctrl != tgtadv)
 4684			return false;
 4685	}
 4686
 4687	return true;
 4688}
 4689
 4690static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
 4691{
 4692	u32 lpeth = 0;
 4693
 4694	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 4695		u32 val;
 4696
 4697		if (tg3_readphy(tp, MII_STAT1000, &val))
 4698			return false;
 4699
 4700		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
 4701	}
 4702
 4703	if (tg3_readphy(tp, MII_LPA, rmtadv))
 4704		return false;
 4705
 4706	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
 4707	tp->link_config.rmt_adv = lpeth;
 4708
 4709	return true;
 4710}
 4711
 4712static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
 4713{
 4714	if (curr_link_up != tp->link_up) {
 4715		if (curr_link_up) {
 4716			netif_carrier_on(tp->dev);
 4717		} else {
 4718			netif_carrier_off(tp->dev);
 4719			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
 4720				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 4721		}
 4722
 4723		tg3_link_report(tp);
 4724		return true;
 4725	}
 4726
 4727	return false;
 4728}
 4729
 4730static void tg3_clear_mac_status(struct tg3 *tp)
 4731{
 4732	tw32(MAC_EVENT, 0);
 4733
 4734	tw32_f(MAC_STATUS,
 4735	       MAC_STATUS_SYNC_CHANGED |
 4736	       MAC_STATUS_CFG_CHANGED |
 4737	       MAC_STATUS_MI_COMPLETION |
 4738	       MAC_STATUS_LNKSTATE_CHANGED);
 4739	udelay(40);
 4740}
 4741
 4742static void tg3_setup_eee(struct tg3 *tp)
 4743{
 4744	u32 val;
 4745
 4746	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
 4747	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
 4748	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
 4749		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
 4750
 4751	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
 4752
 4753	tw32_f(TG3_CPMU_EEE_CTRL,
 4754	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
 4755
 4756	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
 4757	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
 4758	      TG3_CPMU_EEEMD_LPI_IN_RX |
 4759	      TG3_CPMU_EEEMD_EEE_ENABLE;
 4760
 4761	if (tg3_asic_rev(tp) != ASIC_REV_5717)
 4762		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
 4763
 4764	if (tg3_flag(tp, ENABLE_APE))
 4765		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
 4766
 4767	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
 4768
 4769	tw32_f(TG3_CPMU_EEE_DBTMR1,
 4770	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
 4771	       (tp->eee.tx_lpi_timer & 0xffff));
 4772
 4773	tw32_f(TG3_CPMU_EEE_DBTMR2,
 4774	       TG3_CPMU_DBTMR2_APE_TX_2047US |
 4775	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
 4776}
 4777
 4778static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
 4779{
 4780	bool current_link_up;
 4781	u32 bmsr, val;
 4782	u32 lcl_adv, rmt_adv;
 4783	u32 current_speed;
 4784	u8 current_duplex;
 4785	int i, err;
 4786
 4787	tg3_clear_mac_status(tp);
 4788
 4789	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
 4790		tw32_f(MAC_MI_MODE,
 4791		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
 4792		udelay(80);
 4793	}
 4794
 4795	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
 4796
 4797	/* Some third-party PHYs need to be reset on link going
 4798	 * down.
 4799	 */
 4800	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
 4801	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
 4802	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
 4803	    tp->link_up) {
 4804		tg3_readphy(tp, MII_BMSR, &bmsr);
 4805		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
 4806		    !(bmsr & BMSR_LSTATUS))
 4807			force_reset = true;
 4808	}
 4809	if (force_reset)
 4810		tg3_phy_reset(tp);
 4811
 4812	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
 4813		tg3_readphy(tp, MII_BMSR, &bmsr);
 4814		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
 4815		    !tg3_flag(tp, INIT_COMPLETE))
 4816			bmsr = 0;
 4817
 4818		if (!(bmsr & BMSR_LSTATUS)) {
 4819			err = tg3_init_5401phy_dsp(tp);
 4820			if (err)
 4821				return err;
 4822
 4823			tg3_readphy(tp, MII_BMSR, &bmsr);
 4824			for (i = 0; i < 1000; i++) {
 4825				udelay(10);
 4826				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
 4827				    (bmsr & BMSR_LSTATUS)) {
 4828					udelay(40);
 4829					break;
 4830				}
 4831			}
 4832
 4833			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
 4834			    TG3_PHY_REV_BCM5401_B0 &&
 4835			    !(bmsr & BMSR_LSTATUS) &&
 4836			    tp->link_config.active_speed == SPEED_1000) {
 4837				err = tg3_phy_reset(tp);
 4838				if (!err)
 4839					err = tg3_init_5401phy_dsp(tp);
 4840				if (err)
 4841					return err;
 4842			}
 4843		}
 4844	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
 4845		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
 4846		/* 5701 {A0,B0} CRC bug workaround */
 4847		tg3_writephy(tp, 0x15, 0x0a75);
 4848		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
 4849		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
 4850		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
 4851	}
 4852
 4853	/* Clear pending interrupts... */
 4854	tg3_readphy(tp, MII_TG3_ISTAT, &val);
 4855	tg3_readphy(tp, MII_TG3_ISTAT, &val);
 4856
 4857	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
 4858		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
 4859	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
 4860		tg3_writephy(tp, MII_TG3_IMASK, ~0);
 4861
 4862	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
 4863	    tg3_asic_rev(tp) == ASIC_REV_5701) {
 4864		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
 4865			tg3_writephy(tp, MII_TG3_EXT_CTRL,
 4866				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
 4867		else
 4868			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
 4869	}
 4870
 4871	current_link_up = false;
 4872	current_speed = SPEED_UNKNOWN;
 4873	current_duplex = DUPLEX_UNKNOWN;
 4874	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
 4875	tp->link_config.rmt_adv = 0;
 4876
 4877	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
 4878		err = tg3_phy_auxctl_read(tp,
 4879					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
 4880					  &val);
 4881		if (!err && !(val & (1 << 10))) {
 4882			tg3_phy_auxctl_write(tp,
 4883					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
 4884					     val | (1 << 10));
 4885			goto relink;
 4886		}
 4887	}
 4888
 4889	bmsr = 0;
 4890	for (i = 0; i < 100; i++) {
 4891		tg3_readphy(tp, MII_BMSR, &bmsr);
 4892		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
 4893		    (bmsr & BMSR_LSTATUS))
 4894			break;
 4895		udelay(40);
 4896	}
 4897
 4898	if (bmsr & BMSR_LSTATUS) {
 4899		u32 aux_stat, bmcr;
 4900
 4901		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
 4902		for (i = 0; i < 2000; i++) {
 4903			udelay(10);
 4904			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
 4905			    aux_stat)
 4906				break;
 4907		}
 4908
 4909		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
 4910					     &current_speed,
 4911					     &current_duplex);
 4912
 4913		bmcr = 0;
 4914		for (i = 0; i < 200; i++) {
 4915			tg3_readphy(tp, MII_BMCR, &bmcr);
 4916			if (tg3_readphy(tp, MII_BMCR, &bmcr))
 4917				continue;
 4918			if (bmcr && bmcr != 0x7fff)
 4919				break;
 4920			udelay(10);
 4921		}
 4922
 4923		lcl_adv = 0;
 4924		rmt_adv = 0;
 4925
 4926		tp->link_config.active_speed = current_speed;
 4927		tp->link_config.active_duplex = current_duplex;
 4928
 4929		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
 4930			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
 4931
 4932			if ((bmcr & BMCR_ANENABLE) &&
 4933			    eee_config_ok &&
 4934			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
 4935			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
 4936				current_link_up = true;
 4937
 4938			/* EEE settings changes take effect only after a phy
 4939			 * reset.  If we have skipped a reset due to Link Flap
 4940			 * Avoidance being enabled, do it now.
 4941			 */
 4942			if (!eee_config_ok &&
 4943			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
 4944			    !force_reset) {
 4945				tg3_setup_eee(tp);
 4946				tg3_phy_reset(tp);
 4947			}
 4948		} else {
 4949			if (!(bmcr & BMCR_ANENABLE) &&
 4950			    tp->link_config.speed == current_speed &&
 4951			    tp->link_config.duplex == current_duplex) {
 4952				current_link_up = true;
 4953			}
 4954		}
 4955
 4956		if (current_link_up &&
 4957		    tp->link_config.active_duplex == DUPLEX_FULL) {
 4958			u32 reg, bit;
 4959
 4960			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 4961				reg = MII_TG3_FET_GEN_STAT;
 4962				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
 4963			} else {
 4964				reg = MII_TG3_EXT_STAT;
 4965				bit = MII_TG3_EXT_STAT_MDIX;
 4966			}
 4967
 4968			if (!tg3_readphy(tp, reg, &val) && (val & bit))
 4969				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
 4970
 4971			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
 4972		}
 4973	}
 4974
 4975relink:
 4976	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
 4977		tg3_phy_copper_begin(tp);
 4978
 4979		if (tg3_flag(tp, ROBOSWITCH)) {
 4980			current_link_up = true;
 4981			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
 4982			current_speed = SPEED_1000;
 4983			current_duplex = DUPLEX_FULL;
 4984			tp->link_config.active_speed = current_speed;
 4985			tp->link_config.active_duplex = current_duplex;
 4986		}
 4987
 4988		tg3_readphy(tp, MII_BMSR, &bmsr);
 4989		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
 4990		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
 4991			current_link_up = true;
 4992	}
 4993
 4994	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
 4995	if (current_link_up) {
 4996		if (tp->link_config.active_speed == SPEED_100 ||
 4997		    tp->link_config.active_speed == SPEED_10)
 4998			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
 4999		else
 5000			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
 5001	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
 5002		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
 5003	else
 5004		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
 5005
 5006	/* In order for the 5750 core in BCM4785 chip to work properly
 5007	 * in RGMII mode, the Led Control Register must be set up.
 5008	 */
 5009	if (tg3_flag(tp, RGMII_MODE)) {
 5010		u32 led_ctrl = tr32(MAC_LED_CTRL);
 5011		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
 5012
 5013		if (tp->link_config.active_speed == SPEED_10)
 5014			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
 5015		else if (tp->link_config.active_speed == SPEED_100)
 5016			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
 5017				     LED_CTRL_100MBPS_ON);
 5018		else if (tp->link_config.active_speed == SPEED_1000)
 5019			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
 5020				     LED_CTRL_1000MBPS_ON);
 5021
 5022		tw32(MAC_LED_CTRL, led_ctrl);
 5023		udelay(40);
 5024	}
 5025
 5026	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
 5027	if (tp->link_config.active_duplex == DUPLEX_HALF)
 5028		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
 5029
 5030	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
 5031		if (current_link_up &&
 5032		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
 5033			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
 5034		else
 5035			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
 5036	}
 5037
 5038	/* ??? Without this setting Netgear GA302T PHY does not
 5039	 * ??? send/receive packets...
 5040	 */
 5041	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
 5042	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
 5043		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
 5044		tw32_f(MAC_MI_MODE, tp->mi_mode);
 5045		udelay(80);
 5046	}
 5047
 5048	tw32_f(MAC_MODE, tp->mac_mode);
 5049	udelay(40);
 5050
 5051	tg3_phy_eee_adjust(tp, current_link_up);
 5052
 5053	if (tg3_flag(tp, USE_LINKCHG_REG)) {
 5054		/* Polled via timer. */
 5055		tw32_f(MAC_EVENT, 0);
 5056	} else {
 5057		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
 5058	}
 5059	udelay(40);
 5060
 5061	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
 5062	    current_link_up &&
 5063	    tp->link_config.active_speed == SPEED_1000 &&
 5064	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
 5065		udelay(120);
 5066		tw32_f(MAC_STATUS,
 5067		     (MAC_STATUS_SYNC_CHANGED |
 5068		      MAC_STATUS_CFG_CHANGED));
 5069		udelay(40);
 5070		tg3_write_mem(tp,
 5071			      NIC_SRAM_FIRMWARE_MBOX,
 5072			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
 5073	}
 5074
 5075	/* Prevent send BD corruption. */
 5076	if (tg3_flag(tp, CLKREQ_BUG)) {
 5077		if (tp->link_config.active_speed == SPEED_100 ||
 5078		    tp->link_config.active_speed == SPEED_10)
 5079			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
 5080						   PCI_EXP_LNKCTL_CLKREQ_EN);
 5081		else
 5082			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
 5083						 PCI_EXP_LNKCTL_CLKREQ_EN);
 5084	}
 5085
 5086	tg3_test_and_report_link_chg(tp, current_link_up);
 5087
 5088	return 0;
 5089}
 5090
 5091struct tg3_fiber_aneginfo {
 5092	int state;
 5093#define ANEG_STATE_UNKNOWN		0
 5094#define ANEG_STATE_AN_ENABLE		1
 5095#define ANEG_STATE_RESTART_INIT		2
 5096#define ANEG_STATE_RESTART		3
 5097#define ANEG_STATE_DISABLE_LINK_OK	4
 5098#define ANEG_STATE_ABILITY_DETECT_INIT	5
 5099#define ANEG_STATE_ABILITY_DETECT	6
 5100#define ANEG_STATE_ACK_DETECT_INIT	7
 5101#define ANEG_STATE_ACK_DETECT		8
 5102#define ANEG_STATE_COMPLETE_ACK_INIT	9
 5103#define ANEG_STATE_COMPLETE_ACK		10
 5104#define ANEG_STATE_IDLE_DETECT_INIT	11
 5105#define ANEG_STATE_IDLE_DETECT		12
 5106#define ANEG_STATE_LINK_OK		13
 5107#define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
 5108#define ANEG_STATE_NEXT_PAGE_WAIT	15
 5109
 5110	u32 flags;
 5111#define MR_AN_ENABLE		0x00000001
 5112#define MR_RESTART_AN		0x00000002
 5113#define MR_AN_COMPLETE		0x00000004
 5114#define MR_PAGE_RX		0x00000008
 5115#define MR_NP_LOADED		0x00000010
 5116#define MR_TOGGLE_TX		0x00000020
 5117#define MR_LP_ADV_FULL_DUPLEX	0x00000040
 5118#define MR_LP_ADV_HALF_DUPLEX	0x00000080
 5119#define MR_LP_ADV_SYM_PAUSE	0x00000100
 5120#define MR_LP_ADV_ASYM_PAUSE	0x00000200
 5121#define MR_LP_ADV_REMOTE_FAULT1	0x00000400
 5122#define MR_LP_ADV_REMOTE_FAULT2	0x00000800
 5123#define MR_LP_ADV_NEXT_PAGE	0x00001000
 5124#define MR_TOGGLE_RX		0x00002000
 5125#define MR_NP_RX		0x00004000
 5126
 5127#define MR_LINK_OK		0x80000000
 5128
 5129	unsigned long link_time, cur_time;
 5130
 5131	u32 ability_match_cfg;
 5132	int ability_match_count;
 5133
 5134	char ability_match, idle_match, ack_match;
 5135
 5136	u32 txconfig, rxconfig;
 5137#define ANEG_CFG_NP		0x00000080
 5138#define ANEG_CFG_ACK		0x00000040
 5139#define ANEG_CFG_RF2		0x00000020
 5140#define ANEG_CFG_RF1		0x00000010
 5141#define ANEG_CFG_PS2		0x00000001
 5142#define ANEG_CFG_PS1		0x00008000
 5143#define ANEG_CFG_HD		0x00004000
 5144#define ANEG_CFG_FD		0x00002000
 5145#define ANEG_CFG_INVAL		0x00001f06
 5146
 5147};
 5148#define ANEG_OK		0
 5149#define ANEG_DONE	1
 5150#define ANEG_TIMER_ENAB	2
 5151#define ANEG_FAILED	-1
 5152
 5153#define ANEG_STATE_SETTLE_TIME	10000
 5154
 5155static int tg3_fiber_aneg_smachine(struct tg3 *tp,
 5156				   struct tg3_fiber_aneginfo *ap)
 5157{
 5158	u16 flowctrl;
 5159	unsigned long delta;
 5160	u32 rx_cfg_reg;
 5161	int ret;
 5162
 5163	if (ap->state == ANEG_STATE_UNKNOWN) {
 5164		ap->rxconfig = 0;
 5165		ap->link_time = 0;
 5166		ap->cur_time = 0;
 5167		ap->ability_match_cfg = 0;
 5168		ap->ability_match_count = 0;
 5169		ap->ability_match = 0;
 5170		ap->idle_match = 0;
 5171		ap->ack_match = 0;
 5172	}
 5173	ap->cur_time++;
 5174
 5175	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
 5176		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
 5177
 5178		if (rx_cfg_reg != ap->ability_match_cfg) {
 5179			ap->ability_match_cfg = rx_cfg_reg;
 5180			ap->ability_match = 0;
 5181			ap->ability_match_count = 0;
 5182		} else {
 5183			if (++ap->ability_match_count > 1) {
 5184				ap->ability_match = 1;
 5185				ap->ability_match_cfg = rx_cfg_reg;
 5186			}
 5187		}
 5188		if (rx_cfg_reg & ANEG_CFG_ACK)
 5189			ap->ack_match = 1;
 5190		else
 5191			ap->ack_match = 0;
 5192
 5193		ap->idle_match = 0;
 5194	} else {
 5195		ap->idle_match = 1;
 5196		ap->ability_match_cfg = 0;
 5197		ap->ability_match_count = 0;
 5198		ap->ability_match = 0;
 5199		ap->ack_match = 0;
 5200
 5201		rx_cfg_reg = 0;
 5202	}
 5203
 5204	ap->rxconfig = rx_cfg_reg;
 5205	ret = ANEG_OK;
 5206
 5207	switch (ap->state) {
 5208	case ANEG_STATE_UNKNOWN:
 5209		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
 5210			ap->state = ANEG_STATE_AN_ENABLE;
 5211
 5212		/* fall through */
 5213	case ANEG_STATE_AN_ENABLE:
 5214		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
 5215		if (ap->flags & MR_AN_ENABLE) {
 5216			ap->link_time = 0;
 5217			ap->cur_time = 0;
 5218			ap->ability_match_cfg = 0;
 5219			ap->ability_match_count = 0;
 5220			ap->ability_match = 0;
 5221			ap->idle_match = 0;
 5222			ap->ack_match = 0;
 5223
 5224			ap->state = ANEG_STATE_RESTART_INIT;
 5225		} else {
 5226			ap->state = ANEG_STATE_DISABLE_LINK_OK;
 5227		}
 5228		break;
 5229
 5230	case ANEG_STATE_RESTART_INIT:
 5231		ap->link_time = ap->cur_time;
 5232		ap->flags &= ~(MR_NP_LOADED);
 5233		ap->txconfig = 0;
 5234		tw32(MAC_TX_AUTO_NEG, 0);
 5235		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
 5236		tw32_f(MAC_MODE, tp->mac_mode);
 5237		udelay(40);
 5238
 5239		ret = ANEG_TIMER_ENAB;
 5240		ap->state = ANEG_STATE_RESTART;
 5241
 5242		/* fall through */
 5243	case ANEG_STATE_RESTART:
 5244		delta = ap->cur_time - ap->link_time;
 5245		if (delta > ANEG_STATE_SETTLE_TIME)
 5246			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
 5247		else
 5248			ret = ANEG_TIMER_ENAB;
 5249		break;
 5250
 5251	case ANEG_STATE_DISABLE_LINK_OK:
 5252		ret = ANEG_DONE;
 5253		break;
 5254
 5255	case ANEG_STATE_ABILITY_DETECT_INIT:
 5256		ap->flags &= ~(MR_TOGGLE_TX);
 5257		ap->txconfig = ANEG_CFG_FD;
 5258		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
 5259		if (flowctrl & ADVERTISE_1000XPAUSE)
 5260			ap->txconfig |= ANEG_CFG_PS1;
 5261		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
 5262			ap->txconfig |= ANEG_CFG_PS2;
 5263		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
 5264		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
 5265		tw32_f(MAC_MODE, tp->mac_mode);
 5266		udelay(40);
 5267
 5268		ap->state = ANEG_STATE_ABILITY_DETECT;
 5269		break;
 5270
 5271	case ANEG_STATE_ABILITY_DETECT:
 5272		if (ap->ability_match != 0 && ap->rxconfig != 0)
 5273			ap->state = ANEG_STATE_ACK_DETECT_INIT;
 5274		break;
 5275
 5276	case ANEG_STATE_ACK_DETECT_INIT:
 5277		ap->txconfig |= ANEG_CFG_ACK;
 5278		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
 5279		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
 5280		tw32_f(MAC_MODE, tp->mac_mode);
 5281		udelay(40);
 5282
 5283		ap->state = ANEG_STATE_ACK_DETECT;
 5284
 5285		/* fall through */
 5286	case ANEG_STATE_ACK_DETECT:
 5287		if (ap->ack_match != 0) {
 5288			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
 5289			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
 5290				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
 5291			} else {
 5292				ap->state = ANEG_STATE_AN_ENABLE;
 5293			}
 5294		} else if (ap->ability_match != 0 &&
 5295			   ap->rxconfig == 0) {
 5296			ap->state = ANEG_STATE_AN_ENABLE;
 5297		}
 5298		break;
 5299
 5300	case ANEG_STATE_COMPLETE_ACK_INIT:
 5301		if (ap->rxconfig & ANEG_CFG_INVAL) {
 5302			ret = ANEG_FAILED;
 5303			break;
 5304		}
 5305		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
 5306			       MR_LP_ADV_HALF_DUPLEX |
 5307			       MR_LP_ADV_SYM_PAUSE |
 5308			       MR_LP_ADV_ASYM_PAUSE |
 5309			       MR_LP_ADV_REMOTE_FAULT1 |
 5310			       MR_LP_ADV_REMOTE_FAULT2 |
 5311			       MR_LP_ADV_NEXT_PAGE |
 5312			       MR_TOGGLE_RX |
 5313			       MR_NP_RX);
 5314		if (ap->rxconfig & ANEG_CFG_FD)
 5315			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
 5316		if (ap->rxconfig & ANEG_CFG_HD)
 5317			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
 5318		if (ap->rxconfig & ANEG_CFG_PS1)
 5319			ap->flags |= MR_LP_ADV_SYM_PAUSE;
 5320		if (ap->rxconfig & ANEG_CFG_PS2)
 5321			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
 5322		if (ap->rxconfig & ANEG_CFG_RF1)
 5323			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
 5324		if (ap->rxconfig & ANEG_CFG_RF2)
 5325			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
 5326		if (ap->rxconfig & ANEG_CFG_NP)
 5327			ap->flags |= MR_LP_ADV_NEXT_PAGE;
 5328
 5329		ap->link_time = ap->cur_time;
 5330
 5331		ap->flags ^= (MR_TOGGLE_TX);
 5332		if (ap->rxconfig & 0x0008)
 5333			ap->flags |= MR_TOGGLE_RX;
 5334		if (ap->rxconfig & ANEG_CFG_NP)
 5335			ap->flags |= MR_NP_RX;
 5336		ap->flags |= MR_PAGE_RX;
 5337
 5338		ap->state = ANEG_STATE_COMPLETE_ACK;
 5339		ret = ANEG_TIMER_ENAB;
 5340		break;
 5341
 5342	case ANEG_STATE_COMPLETE_ACK:
 5343		if (ap->ability_match != 0 &&
 5344		    ap->rxconfig == 0) {
 5345			ap->state = ANEG_STATE_AN_ENABLE;
 5346			break;
 5347		}
 5348		delta = ap->cur_time - ap->link_time;
 5349		if (delta > ANEG_STATE_SETTLE_TIME) {
 5350			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
 5351				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
 5352			} else {
 5353				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
 5354				    !(ap->flags & MR_NP_RX)) {
 5355					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
 5356				} else {
 5357					ret = ANEG_FAILED;
 5358				}
 5359			}
 5360		}
 5361		break;
 5362
 5363	case ANEG_STATE_IDLE_DETECT_INIT:
 5364		ap->link_time = ap->cur_time;
 5365		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
 5366		tw32_f(MAC_MODE, tp->mac_mode);
 5367		udelay(40);
 5368
 5369		ap->state = ANEG_STATE_IDLE_DETECT;
 5370		ret = ANEG_TIMER_ENAB;
 5371		break;
 5372
 5373	case ANEG_STATE_IDLE_DETECT:
 5374		if (ap->ability_match != 0 &&
 5375		    ap->rxconfig == 0) {
 5376			ap->state = ANEG_STATE_AN_ENABLE;
 5377			break;
 5378		}
 5379		delta = ap->cur_time - ap->link_time;
 5380		if (delta > ANEG_STATE_SETTLE_TIME) {
 5381			/* XXX another gem from the Broadcom driver :( */
 5382			ap->state = ANEG_STATE_LINK_OK;
 5383		}
 5384		break;
 5385
 5386	case ANEG_STATE_LINK_OK:
 5387		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
 5388		ret = ANEG_DONE;
 5389		break;
 5390
 5391	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
 5392		/* ??? unimplemented */
 5393		break;
 5394
 5395	case ANEG_STATE_NEXT_PAGE_WAIT:
 5396		/* ??? unimplemented */
 5397		break;
 5398
 5399	default:
 5400		ret = ANEG_FAILED;
 5401		break;
 5402	}
 5403
 5404	return ret;
 5405}
 5406
 5407static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
 5408{
 5409	int res = 0;
 5410	struct tg3_fiber_aneginfo aninfo;
 5411	int status = ANEG_FAILED;
 5412	unsigned int tick;
 5413	u32 tmp;
 5414
 5415	tw32_f(MAC_TX_AUTO_NEG, 0);
 5416
 5417	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
 5418	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
 5419	udelay(40);
 5420
 5421	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
 5422	udelay(40);
 5423
 5424	memset(&aninfo, 0, sizeof(aninfo));
 5425	aninfo.flags |= MR_AN_ENABLE;
 5426	aninfo.state = ANEG_STATE_UNKNOWN;
 5427	aninfo.cur_time = 0;
 5428	tick = 0;
 5429	while (++tick < 195000) {
 5430		status = tg3_fiber_aneg_smachine(tp, &aninfo);
 5431		if (status == ANEG_DONE || status == ANEG_FAILED)
 5432			break;
 5433
 5434		udelay(1);
 5435	}
 5436
 5437	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
 5438	tw32_f(MAC_MODE, tp->mac_mode);
 5439	udelay(40);
 5440
 5441	*txflags = aninfo.txconfig;
 5442	*rxflags = aninfo.flags;
 5443
 5444	if (status == ANEG_DONE &&
 5445	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
 5446			     MR_LP_ADV_FULL_DUPLEX)))
 5447		res = 1;
 5448
 5449	return res;
 5450}
 5451
 5452static void tg3_init_bcm8002(struct tg3 *tp)
 5453{
 5454	u32 mac_status = tr32(MAC_STATUS);
 5455	int i;
 5456
 5457	/* Reset when initting first time or we have a link. */
 5458	if (tg3_flag(tp, INIT_COMPLETE) &&
 5459	    !(mac_status & MAC_STATUS_PCS_SYNCED))
 5460		return;
 5461
 5462	/* Set PLL lock range. */
 5463	tg3_writephy(tp, 0x16, 0x8007);
 5464
 5465	/* SW reset */
 5466	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
 5467
 5468	/* Wait for reset to complete. */
 5469	/* XXX schedule_timeout() ... */
 5470	for (i = 0; i < 500; i++)
 5471		udelay(10);
 5472
 5473	/* Config mode; select PMA/Ch 1 regs. */
 5474	tg3_writephy(tp, 0x10, 0x8411);
 5475
 5476	/* Enable auto-lock and comdet, select txclk for tx. */
 5477	tg3_writephy(tp, 0x11, 0x0a10);
 5478
 5479	tg3_writephy(tp, 0x18, 0x00a0);
 5480	tg3_writephy(tp, 0x16, 0x41ff);
 5481
 5482	/* Assert and deassert POR. */
 5483	tg3_writephy(tp, 0x13, 0x0400);
 5484	udelay(40);
 5485	tg3_writephy(tp, 0x13, 0x0000);
 5486
 5487	tg3_writephy(tp, 0x11, 0x0a50);
 5488	udelay(40);
 5489	tg3_writephy(tp, 0x11, 0x0a10);
 5490
 5491	/* Wait for signal to stabilize */
 5492	/* XXX schedule_timeout() ... */
 5493	for (i = 0; i < 15000; i++)
 5494		udelay(10);
 5495
 5496	/* Deselect the channel register so we can read the PHYID
 5497	 * later.
 5498	 */
 5499	tg3_writephy(tp, 0x10, 0x8011);
 5500}
 5501
 5502static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
 5503{
 5504	u16 flowctrl;
 5505	bool current_link_up;
 5506	u32 sg_dig_ctrl, sg_dig_status;
 5507	u32 serdes_cfg, expected_sg_dig_ctrl;
 5508	int workaround, port_a;
 5509
 5510	serdes_cfg = 0;
 5511	expected_sg_dig_ctrl = 0;
 5512	workaround = 0;
 5513	port_a = 1;
 5514	current_link_up = false;
 5515
 5516	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
 5517	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
 5518		workaround = 1;
 5519		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
 5520			port_a = 0;
 5521
 5522		/* preserve bits 0-11,13,14 for signal pre-emphasis */
 5523		/* preserve bits 20-23 for voltage regulator */
 5524		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
 5525	}
 5526
 5527	sg_dig_ctrl = tr32(SG_DIG_CTRL);
 5528
 5529	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
 5530		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
 5531			if (workaround) {
 5532				u32 val = serdes_cfg;
 5533
 5534				if (port_a)
 5535					val |= 0xc010000;
 5536				else
 5537					val |= 0x4010000;
 5538				tw32_f(MAC_SERDES_CFG, val);
 5539			}
 5540
 5541			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
 5542		}
 5543		if (mac_status & MAC_STATUS_PCS_SYNCED) {
 5544			tg3_setup_flow_control(tp, 0, 0);
 5545			current_link_up = true;
 5546		}
 5547		goto out;
 5548	}
 5549
 5550	/* Want auto-negotiation.  */
 5551	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
 5552
 5553	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
 5554	if (flowctrl & ADVERTISE_1000XPAUSE)
 5555		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
 5556	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
 5557		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
 5558
 5559	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
 5560		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
 5561		    tp->serdes_counter &&
 5562		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
 5563				    MAC_STATUS_RCVD_CFG)) ==
 5564		     MAC_STATUS_PCS_SYNCED)) {
 5565			tp->serdes_counter--;
 5566			current_link_up = true;
 5567			goto out;
 5568		}
 5569restart_autoneg:
 5570		if (workaround)
 5571			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
 5572		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
 5573		udelay(5);
 5574		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
 5575
 5576		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
 5577		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 5578	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
 5579				 MAC_STATUS_SIGNAL_DET)) {
 5580		sg_dig_status = tr32(SG_DIG_STATUS);
 5581		mac_status = tr32(MAC_STATUS);
 5582
 5583		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
 5584		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
 5585			u32 local_adv = 0, remote_adv = 0;
 5586
 5587			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
 5588				local_adv |= ADVERTISE_1000XPAUSE;
 5589			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
 5590				local_adv |= ADVERTISE_1000XPSE_ASYM;
 5591
 5592			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
 5593				remote_adv |= LPA_1000XPAUSE;
 5594			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
 5595				remote_adv |= LPA_1000XPAUSE_ASYM;
 5596
 5597			tp->link_config.rmt_adv =
 5598					   mii_adv_to_ethtool_adv_x(remote_adv);
 5599
 5600			tg3_setup_flow_control(tp, local_adv, remote_adv);
 5601			current_link_up = true;
 5602			tp->serdes_counter = 0;
 5603			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 5604		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
 5605			if (tp->serdes_counter)
 5606				tp->serdes_counter--;
 5607			else {
 5608				if (workaround) {
 5609					u32 val = serdes_cfg;
 5610
 5611					if (port_a)
 5612						val |= 0xc010000;
 5613					else
 5614						val |= 0x4010000;
 5615
 5616					tw32_f(MAC_SERDES_CFG, val);
 5617				}
 5618
 5619				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
 5620				udelay(40);
 5621
 5622				/* Link parallel detection - link is up */
 5623				/* only if we have PCS_SYNC and not */
 5624				/* receiving config code words */
 5625				mac_status = tr32(MAC_STATUS);
 5626				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
 5627				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
 5628					tg3_setup_flow_control(tp, 0, 0);
 5629					current_link_up = true;
 5630					tp->phy_flags |=
 5631						TG3_PHYFLG_PARALLEL_DETECT;
 5632					tp->serdes_counter =
 5633						SERDES_PARALLEL_DET_TIMEOUT;
 5634				} else
 5635					goto restart_autoneg;
 5636			}
 5637		}
 5638	} else {
 5639		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
 5640		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 5641	}
 5642
 5643out:
 5644	return current_link_up;
 5645}
 5646
 5647static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
 5648{
 5649	bool current_link_up = false;
 5650
 5651	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
 5652		goto out;
 5653
 5654	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
 5655		u32 txflags, rxflags;
 5656		int i;
 5657
 5658		if (fiber_autoneg(tp, &txflags, &rxflags)) {
 5659			u32 local_adv = 0, remote_adv = 0;
 5660
 5661			if (txflags & ANEG_CFG_PS1)
 5662				local_adv |= ADVERTISE_1000XPAUSE;
 5663			if (txflags & ANEG_CFG_PS2)
 5664				local_adv |= ADVERTISE_1000XPSE_ASYM;
 5665
 5666			if (rxflags & MR_LP_ADV_SYM_PAUSE)
 5667				remote_adv |= LPA_1000XPAUSE;
 5668			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
 5669				remote_adv |= LPA_1000XPAUSE_ASYM;
 5670
 5671			tp->link_config.rmt_adv =
 5672					   mii_adv_to_ethtool_adv_x(remote_adv);
 5673
 5674			tg3_setup_flow_control(tp, local_adv, remote_adv);
 5675
 5676			current_link_up = true;
 5677		}
 5678		for (i = 0; i < 30; i++) {
 5679			udelay(20);
 5680			tw32_f(MAC_STATUS,
 5681			       (MAC_STATUS_SYNC_CHANGED |
 5682				MAC_STATUS_CFG_CHANGED));
 5683			udelay(40);
 5684			if ((tr32(MAC_STATUS) &
 5685			     (MAC_STATUS_SYNC_CHANGED |
 5686			      MAC_STATUS_CFG_CHANGED)) == 0)
 5687				break;
 5688		}
 5689
 5690		mac_status = tr32(MAC_STATUS);
 5691		if (!current_link_up &&
 5692		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
 5693		    !(mac_status & MAC_STATUS_RCVD_CFG))
 5694			current_link_up = true;
 5695	} else {
 5696		tg3_setup_flow_control(tp, 0, 0);
 5697
 5698		/* Forcing 1000FD link up. */
 5699		current_link_up = true;
 5700
 5701		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
 5702		udelay(40);
 5703
 5704		tw32_f(MAC_MODE, tp->mac_mode);
 5705		udelay(40);
 5706	}
 5707
 5708out:
 5709	return current_link_up;
 5710}
 5711
 5712static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
 5713{
 5714	u32 orig_pause_cfg;
 5715	u32 orig_active_speed;
 5716	u8 orig_active_duplex;
 5717	u32 mac_status;
 5718	bool current_link_up;
 5719	int i;
 5720
 5721	orig_pause_cfg = tp->link_config.active_flowctrl;
 5722	orig_active_speed = tp->link_config.active_speed;
 5723	orig_active_duplex = tp->link_config.active_duplex;
 5724
 5725	if (!tg3_flag(tp, HW_AUTONEG) &&
 5726	    tp->link_up &&
 5727	    tg3_flag(tp, INIT_COMPLETE)) {
 5728		mac_status = tr32(MAC_STATUS);
 5729		mac_status &= (MAC_STATUS_PCS_SYNCED |
 5730			       MAC_STATUS_SIGNAL_DET |
 5731			       MAC_STATUS_CFG_CHANGED |
 5732			       MAC_STATUS_RCVD_CFG);
 5733		if (mac_status == (MAC_STATUS_PCS_SYNCED |
 5734				   MAC_STATUS_SIGNAL_DET)) {
 5735			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
 5736					    MAC_STATUS_CFG_CHANGED));
 5737			return 0;
 5738		}
 5739	}
 5740
 5741	tw32_f(MAC_TX_AUTO_NEG, 0);
 5742
 5743	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
 5744	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
 5745	tw32_f(MAC_MODE, tp->mac_mode);
 5746	udelay(40);
 5747
 5748	if (tp->phy_id == TG3_PHY_ID_BCM8002)
 5749		tg3_init_bcm8002(tp);
 5750
 5751	/* Enable link change event even when serdes polling.  */
 5752	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
 5753	udelay(40);
 5754
 5755	current_link_up = false;
 5756	tp->link_config.rmt_adv = 0;
 5757	mac_status = tr32(MAC_STATUS);
 5758
 5759	if (tg3_flag(tp, HW_AUTONEG))
 5760		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
 5761	else
 5762		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
 5763
 5764	tp->napi[0].hw_status->status =
 5765		(SD_STATUS_UPDATED |
 5766		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
 5767
 5768	for (i = 0; i < 100; i++) {
 5769		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
 5770				    MAC_STATUS_CFG_CHANGED));
 5771		udelay(5);
 5772		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
 5773					 MAC_STATUS_CFG_CHANGED |
 5774					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
 5775			break;
 5776	}
 5777
 5778	mac_status = tr32(MAC_STATUS);
 5779	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
 5780		current_link_up = false;
 5781		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
 5782		    tp->serdes_counter == 0) {
 5783			tw32_f(MAC_MODE, (tp->mac_mode |
 5784					  MAC_MODE_SEND_CONFIGS));
 5785			udelay(1);
 5786			tw32_f(MAC_MODE, tp->mac_mode);
 5787		}
 5788	}
 5789
 5790	if (current_link_up) {
 5791		tp->link_config.active_speed = SPEED_1000;
 5792		tp->link_config.active_duplex = DUPLEX_FULL;
 5793		tw32(MAC_LED_CTRL, (tp->led_ctrl |
 5794				    LED_CTRL_LNKLED_OVERRIDE |
 5795				    LED_CTRL_1000MBPS_ON));
 5796	} else {
 5797		tp->link_config.active_speed = SPEED_UNKNOWN;
 5798		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
 5799		tw32(MAC_LED_CTRL, (tp->led_ctrl |
 5800				    LED_CTRL_LNKLED_OVERRIDE |
 5801				    LED_CTRL_TRAFFIC_OVERRIDE));
 5802	}
 5803
 5804	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
 5805		u32 now_pause_cfg = tp->link_config.active_flowctrl;
 5806		if (orig_pause_cfg != now_pause_cfg ||
 5807		    orig_active_speed != tp->link_config.active_speed ||
 5808		    orig_active_duplex != tp->link_config.active_duplex)
 5809			tg3_link_report(tp);
 5810	}
 5811
 5812	return 0;
 5813}
 5814
 5815static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
 5816{
 5817	int err = 0;
 5818	u32 bmsr, bmcr;
 5819	u32 current_speed = SPEED_UNKNOWN;
 5820	u8 current_duplex = DUPLEX_UNKNOWN;
 5821	bool current_link_up = false;
 5822	u32 local_adv, remote_adv, sgsr;
 5823
 5824	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
 5825	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
 5826	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
 5827	     (sgsr & SERDES_TG3_SGMII_MODE)) {
 5828
 5829		if (force_reset)
 5830			tg3_phy_reset(tp);
 5831
 5832		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
 5833
 5834		if (!(sgsr & SERDES_TG3_LINK_UP)) {
 5835			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
 5836		} else {
 5837			current_link_up = true;
 5838			if (sgsr & SERDES_TG3_SPEED_1000) {
 5839				current_speed = SPEED_1000;
 5840				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
 5841			} else if (sgsr & SERDES_TG3_SPEED_100) {
 5842				current_speed = SPEED_100;
 5843				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
 5844			} else {
 5845				current_speed = SPEED_10;
 5846				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
 5847			}
 5848
 5849			if (sgsr & SERDES_TG3_FULL_DUPLEX)
 5850				current_duplex = DUPLEX_FULL;
 5851			else
 5852				current_duplex = DUPLEX_HALF;
 5853		}
 5854
 5855		tw32_f(MAC_MODE, tp->mac_mode);
 5856		udelay(40);
 5857
 5858		tg3_clear_mac_status(tp);
 5859
 5860		goto fiber_setup_done;
 5861	}
 5862
 5863	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
 5864	tw32_f(MAC_MODE, tp->mac_mode);
 5865	udelay(40);
 5866
 5867	tg3_clear_mac_status(tp);
 5868
 5869	if (force_reset)
 5870		tg3_phy_reset(tp);
 5871
 5872	tp->link_config.rmt_adv = 0;
 5873
 5874	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
 5875	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
 5876	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
 5877		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
 5878			bmsr |= BMSR_LSTATUS;
 5879		else
 5880			bmsr &= ~BMSR_LSTATUS;
 5881	}
 5882
 5883	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
 5884
 5885	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
 5886	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
 5887		/* do nothing, just check for link up at the end */
 5888	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
 5889		u32 adv, newadv;
 5890
 5891		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
 5892		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
 5893				 ADVERTISE_1000XPAUSE |
 5894				 ADVERTISE_1000XPSE_ASYM |
 5895				 ADVERTISE_SLCT);
 5896
 5897		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
 5898		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
 5899
 5900		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
 5901			tg3_writephy(tp, MII_ADVERTISE, newadv);
 5902			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
 5903			tg3_writephy(tp, MII_BMCR, bmcr);
 5904
 5905			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
 5906			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
 5907			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 5908
 5909			return err;
 5910		}
 5911	} else {
 5912		u32 new_bmcr;
 5913
 5914		bmcr &= ~BMCR_SPEED1000;
 5915		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
 5916
 5917		if (tp->link_config.duplex == DUPLEX_FULL)
 5918			new_bmcr |= BMCR_FULLDPLX;
 5919
 5920		if (new_bmcr != bmcr) {
 5921			/* BMCR_SPEED1000 is a reserved bit that needs
 5922			 * to be set on write.
 5923			 */
 5924			new_bmcr |= BMCR_SPEED1000;
 5925
 5926			/* Force a linkdown */
 5927			if (tp->link_up) {
 5928				u32 adv;
 5929
 5930				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
 5931				adv &= ~(ADVERTISE_1000XFULL |
 5932					 ADVERTISE_1000XHALF |
 5933					 ADVERTISE_SLCT);
 5934				tg3_writephy(tp, MII_ADVERTISE, adv);
 5935				tg3_writephy(tp, MII_BMCR, bmcr |
 5936							   BMCR_ANRESTART |
 5937							   BMCR_ANENABLE);
 5938				udelay(10);
 5939				tg3_carrier_off(tp);
 5940			}
 5941			tg3_writephy(tp, MII_BMCR, new_bmcr);
 5942			bmcr = new_bmcr;
 5943			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
 5944			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
 5945			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
 5946				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
 5947					bmsr |= BMSR_LSTATUS;
 5948				else
 5949					bmsr &= ~BMSR_LSTATUS;
 5950			}
 5951			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 5952		}
 5953	}
 5954
 5955	if (bmsr & BMSR_LSTATUS) {
 5956		current_speed = SPEED_1000;
 5957		current_link_up = true;
 5958		if (bmcr & BMCR_FULLDPLX)
 5959			current_duplex = DUPLEX_FULL;
 5960		else
 5961			current_duplex = DUPLEX_HALF;
 5962
 5963		local_adv = 0;
 5964		remote_adv = 0;
 5965
 5966		if (bmcr & BMCR_ANENABLE) {
 5967			u32 common;
 5968
 5969			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
 5970			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
 5971			common = local_adv & remote_adv;
 5972			if (common & (ADVERTISE_1000XHALF |
 5973				      ADVERTISE_1000XFULL)) {
 5974				if (common & ADVERTISE_1000XFULL)
 5975					current_duplex = DUPLEX_FULL;
 5976				else
 5977					current_duplex = DUPLEX_HALF;
 5978
 5979				tp->link_config.rmt_adv =
 5980					   mii_adv_to_ethtool_adv_x(remote_adv);
 5981			} else if (!tg3_flag(tp, 5780_CLASS)) {
 5982				/* Link is up via parallel detect */
 5983			} else {
 5984				current_link_up = false;
 5985			}
 5986		}
 5987	}
 5988
 5989fiber_setup_done:
 5990	if (current_link_up && current_duplex == DUPLEX_FULL)
 5991		tg3_setup_flow_control(tp, local_adv, remote_adv);
 5992
 5993	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
 5994	if (tp->link_config.active_duplex == DUPLEX_HALF)
 5995		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
 5996
 5997	tw32_f(MAC_MODE, tp->mac_mode);
 5998	udelay(40);
 5999
 6000	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
 6001
 6002	tp->link_config.active_speed = current_speed;
 6003	tp->link_config.active_duplex = current_duplex;
 6004
 6005	tg3_test_and_report_link_chg(tp, current_link_up);
 6006	return err;
 6007}
 6008
 6009static void tg3_serdes_parallel_detect(struct tg3 *tp)
 6010{
 6011	if (tp->serdes_counter) {
 6012		/* Give autoneg time to complete. */
 6013		tp->serdes_counter--;
 6014		return;
 6015	}
 6016
 6017	if (!tp->link_up &&
 6018	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
 6019		u32 bmcr;
 6020
 6021		tg3_readphy(tp, MII_BMCR, &bmcr);
 6022		if (bmcr & BMCR_ANENABLE) {
 6023			u32 phy1, phy2;
 6024
 6025			/* Select shadow register 0x1f */
 6026			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
 6027			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
 6028
 6029			/* Select expansion interrupt status register */
 6030			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 6031					 MII_TG3_DSP_EXP1_INT_STAT);
 6032			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
 6033			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
 6034
 6035			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
 6036				/* We have signal detect and not receiving
 6037				 * config code words, link is up by parallel
 6038				 * detection.
 6039				 */
 6040
 6041				bmcr &= ~BMCR_ANENABLE;
 6042				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
 6043				tg3_writephy(tp, MII_BMCR, bmcr);
 6044				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
 6045			}
 6046		}
 6047	} else if (tp->link_up &&
 6048		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
 6049		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
 6050		u32 phy2;
 6051
 6052		/* Select expansion interrupt status register */
 6053		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 6054				 MII_TG3_DSP_EXP1_INT_STAT);
 6055		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
 6056		if (phy2 & 0x20) {
 6057			u32 bmcr;
 6058
 6059			/* Config code words received, turn on autoneg. */
 6060			tg3_readphy(tp, MII_BMCR, &bmcr);
 6061			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
 6062
 6063			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 6064
 6065		}
 6066	}
 6067}
 6068
 6069static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
 6070{
 6071	u32 val;
 6072	int err;
 6073
 6074	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
 6075		err = tg3_setup_fiber_phy(tp, force_reset);
 6076	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
 6077		err = tg3_setup_fiber_mii_phy(tp, force_reset);
 6078	else
 6079		err = tg3_setup_copper_phy(tp, force_reset);
 6080
 6081	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
 6082		u32 scale;
 6083
 6084		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
 6085		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
 6086			scale = 65;
 6087		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
 6088			scale = 6;
 6089		else
 6090			scale = 12;
 6091
 6092		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
 6093		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
 6094		tw32(GRC_MISC_CFG, val);
 6095	}
 6096
 6097	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
 6098	      (6 << TX_LENGTHS_IPG_SHIFT);
 6099	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
 6100	    tg3_asic_rev(tp) == ASIC_REV_5762)
 6101		val |= tr32(MAC_TX_LENGTHS) &
 6102		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
 6103			TX_LENGTHS_CNT_DWN_VAL_MSK);
 6104
 6105	if (tp->link_config.active_speed == SPEED_1000 &&
 6106	    tp->link_config.active_duplex == DUPLEX_HALF)
 6107		tw32(MAC_TX_LENGTHS, val |
 6108		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
 6109	else
 6110		tw32(MAC_TX_LENGTHS, val |
 6111		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
 6112
 6113	if (!tg3_flag(tp, 5705_PLUS)) {
 6114		if (tp->link_up) {
 6115			tw32(HOSTCC_STAT_COAL_TICKS,
 6116			     tp->coal.stats_block_coalesce_usecs);
 6117		} else {
 6118			tw32(HOSTCC_STAT_COAL_TICKS, 0);
 6119		}
 6120	}
 6121
 6122	if (tg3_flag(tp, ASPM_WORKAROUND)) {
 6123		val = tr32(PCIE_PWR_MGMT_THRESH);
 6124		if (!tp->link_up)
 6125			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
 6126			      tp->pwrmgmt_thresh;
 6127		else
 6128			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
 6129		tw32(PCIE_PWR_MGMT_THRESH, val);
 6130	}
 6131
 6132	return err;
 6133}
 6134
 6135/* tp->lock must be held */
 6136static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
 6137{
 6138	u64 stamp;
 6139
 6140	ptp_read_system_prets(sts);
 6141	stamp = tr32(TG3_EAV_REF_CLCK_LSB);
 6142	ptp_read_system_postts(sts);
 6143	stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
 6144
 6145	return stamp;
 6146}
 6147
 6148/* tp->lock must be held */
 6149static void tg3_refclk_write(struct tg3 *tp, u64 newval)
 6150{
 6151	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
 6152
 6153	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
 6154	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
 6155	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
 6156	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
 6157}
 6158
 6159static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
 6160static inline void tg3_full_unlock(struct tg3 *tp);
 6161static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
 6162{
 6163	struct tg3 *tp = netdev_priv(dev);
 6164
 6165	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
 6166				SOF_TIMESTAMPING_RX_SOFTWARE |
 6167				SOF_TIMESTAMPING_SOFTWARE;
 6168
 6169	if (tg3_flag(tp, PTP_CAPABLE)) {
 6170		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
 6171					SOF_TIMESTAMPING_RX_HARDWARE |
 6172					SOF_TIMESTAMPING_RAW_HARDWARE;
 6173	}
 6174
 6175	if (tp->ptp_clock)
 6176		info->phc_index = ptp_clock_index(tp->ptp_clock);
 6177	else
 6178		info->phc_index = -1;
 6179
 6180	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
 6181
 6182	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
 6183			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
 6184			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
 6185			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
 6186	return 0;
 6187}
 6188
 6189static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 6190{
 6191	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
 6192	bool neg_adj = false;
 6193	u32 correction = 0;
 6194
 6195	if (ppb < 0) {
 6196		neg_adj = true;
 6197		ppb = -ppb;
 6198	}
 6199
 6200	/* Frequency adjustment is performed using hardware with a 24 bit
 6201	 * accumulator and a programmable correction value. On each clk, the
 6202	 * correction value gets added to the accumulator and when it
 6203	 * overflows, the time counter is incremented/decremented.
 6204	 *
 6205	 * So conversion from ppb to correction value is
 6206	 *		ppb * (1 << 24) / 1000000000
 6207	 */
 6208	correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
 6209		     TG3_EAV_REF_CLK_CORRECT_MASK;
 6210
 6211	tg3_full_lock(tp, 0);
 6212
 6213	if (correction)
 6214		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
 6215		     TG3_EAV_REF_CLK_CORRECT_EN |
 6216		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
 6217	else
 6218		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
 6219
 6220	tg3_full_unlock(tp);
 6221
 6222	return 0;
 6223}
 6224
 6225static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 6226{
 6227	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
 6228
 6229	tg3_full_lock(tp, 0);
 6230	tp->ptp_adjust += delta;
 6231	tg3_full_unlock(tp);
 6232
 6233	return 0;
 6234}
 6235
 6236static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
 6237			    struct ptp_system_timestamp *sts)
 6238{
 6239	u64 ns;
 6240	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
 6241
 6242	tg3_full_lock(tp, 0);
 6243	ns = tg3_refclk_read(tp, sts);
 6244	ns += tp->ptp_adjust;
 6245	tg3_full_unlock(tp);
 6246
 6247	*ts = ns_to_timespec64(ns);
 6248
 6249	return 0;
 6250}
 6251
 6252static int tg3_ptp_settime(struct ptp_clock_info *ptp,
 6253			   const struct timespec64 *ts)
 6254{
 6255	u64 ns;
 6256	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
 6257
 6258	ns = timespec64_to_ns(ts);
 6259
 6260	tg3_full_lock(tp, 0);
 6261	tg3_refclk_write(tp, ns);
 6262	tp->ptp_adjust = 0;
 6263	tg3_full_unlock(tp);
 6264
 6265	return 0;
 6266}
 6267
 6268static int tg3_ptp_enable(struct ptp_clock_info *ptp,
 6269			  struct ptp_clock_request *rq, int on)
 6270{
 6271	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
 6272	u32 clock_ctl;
 6273	int rval = 0;
 6274
 6275	switch (rq->type) {
 6276	case PTP_CLK_REQ_PEROUT:
 6277		/* Reject requests with unsupported flags */
 6278		if (rq->perout.flags)
 6279			return -EOPNOTSUPP;
 6280
 6281		if (rq->per