PageRenderTime 94ms CodeModel.GetById 24ms app.highlight 63ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/ppc/8260_io/enet.c

https://bitbucket.org/evzijst/gittest
C | 867 lines | 461 code | 132 blank | 274 comment | 55 complexity | 06913345ecc12eaec5a592e3b57a35f2 MD5 | raw file
  1/*
  2 * Ethernet driver for Motorola MPC8260.
  3 * Copyright (c) 1999 Dan Malek (dmalek@jlc.net)
  4 * Copyright (c) 2000 MontaVista Software Inc. (source@mvista.com)
  5 *	2.3.99 Updates
  6 *
  7 * I copied this from the 8xx CPM Ethernet driver, so follow the
  8 * credits back through that.
  9 *
 10 * This version of the driver is somewhat selectable for the different
 11 * processor/board combinations.  It works for the boards I know about
 12 * now, and should be easily modified to include others.  Some of the
 13 * configuration information is contained in <asm/commproc.h> and the
 14 * remainder is here.
 15 *
 16 * Buffer descriptors are kept in the CPM dual port RAM, and the frame
 17 * buffers are in the host memory.
 18 *
 19 * Right now, I am very watseful with the buffers.  I allocate memory
 20 * pages and then divide them into 2K frame buffers.  This way I know I
 21 * have buffers large enough to hold one frame within one buffer descriptor.
 22 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
 23 * will be much more memory efficient and will easily handle lots of
 24 * small packets.
 25 *
 26 */
 27#include <linux/kernel.h>
 28#include <linux/sched.h>
 29#include <linux/string.h>
 30#include <linux/ptrace.h>
 31#include <linux/errno.h>
 32#include <linux/ioport.h>
 33#include <linux/slab.h>
 34#include <linux/interrupt.h>
 35#include <linux/pci.h>
 36#include <linux/init.h>
 37#include <linux/delay.h>
 38#include <linux/netdevice.h>
 39#include <linux/etherdevice.h>
 40#include <linux/skbuff.h>
 41#include <linux/spinlock.h>
 42#include <linux/bitops.h>
 43
 44#include <asm/immap_cpm2.h>
 45#include <asm/pgtable.h>
 46#include <asm/mpc8260.h>
 47#include <asm/uaccess.h>
 48#include <asm/cpm2.h>
 49#include <asm/irq.h>
 50
 51/*
 52 *				Theory of Operation
 53 *
 54 * The MPC8260 CPM performs the Ethernet processing on an SCC.  It can use
 55 * an aribtrary number of buffers on byte boundaries, but must have at
 56 * least two receive buffers to prevent constant overrun conditions.
 57 *
 58 * The buffer descriptors are allocated from the CPM dual port memory
 59 * with the data buffers allocated from host memory, just like all other
 60 * serial communication protocols.  The host memory buffers are allocated
 61 * from the free page pool, and then divided into smaller receive and
 62 * transmit buffers.  The size of the buffers should be a power of two,
 63 * since that nicely divides the page.  This creates a ring buffer
 64 * structure similar to the LANCE and other controllers.
 65 *
 66 * Like the LANCE driver:
 67 * The driver runs as two independent, single-threaded flows of control.  One
 68 * is the send-packet routine, which enforces single-threaded use by the
 69 * cep->tx_busy flag.  The other thread is the interrupt handler, which is
 70 * single threaded by the hardware and other software.
 71 */
 72
 73/* The transmitter timeout
 74 */
 75#define TX_TIMEOUT	(2*HZ)
 76
 77/* The number of Tx and Rx buffers.  These are allocated from the page
 78 * pool.  The code may assume these are power of two, so it is best
 79 * to keep them that size.
 80 * We don't need to allocate pages for the transmitter.  We just use
 81 * the skbuffer directly.
 82 */
 83#define CPM_ENET_RX_PAGES	4
 84#define CPM_ENET_RX_FRSIZE	2048
 85#define CPM_ENET_RX_FRPPG	(PAGE_SIZE / CPM_ENET_RX_FRSIZE)
 86#define RX_RING_SIZE		(CPM_ENET_RX_FRPPG * CPM_ENET_RX_PAGES)
 87#define TX_RING_SIZE		8	/* Must be power of two */
 88#define TX_RING_MOD_MASK	7	/*   for this to work */
 89
 90/* The CPM stores dest/src/type, data, and checksum for receive packets.
 91 */
 92#define PKT_MAXBUF_SIZE		1518
 93#define PKT_MINBUF_SIZE		64
 94#define PKT_MAXBLR_SIZE		1520
 95
 96/* The CPM buffer descriptors track the ring buffers.  The rx_bd_base and
 97 * tx_bd_base always point to the base of the buffer descriptors.  The
 98 * cur_rx and cur_tx point to the currently available buffer.
 99 * The dirty_tx tracks the current buffer that is being sent by the
100 * controller.  The cur_tx and dirty_tx are equal under both completely
101 * empty and completely full conditions.  The empty/ready indicator in
102 * the buffer descriptor determines the actual condition.
103 */
104struct scc_enet_private {
105	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
106	struct	sk_buff* tx_skbuff[TX_RING_SIZE];
107	ushort	skb_cur;
108	ushort	skb_dirty;
109
110	/* CPM dual port RAM relative addresses.
111	*/
112	cbd_t	*rx_bd_base;		/* Address of Rx and Tx buffers. */
113	cbd_t	*tx_bd_base;
114	cbd_t	*cur_rx, *cur_tx;		/* The next free ring entry */
115	cbd_t	*dirty_tx;	/* The ring entries to be free()ed. */
116	scc_t	*sccp;
117	struct	net_device_stats stats;
118	uint	tx_full;
119	spinlock_t lock;
120};
121
122static int scc_enet_open(struct net_device *dev);
123static int scc_enet_start_xmit(struct sk_buff *skb, struct net_device *dev);
124static int scc_enet_rx(struct net_device *dev);
125static irqreturn_t scc_enet_interrupt(int irq, void *dev_id, struct pt_regs *);
126static int scc_enet_close(struct net_device *dev);
127static struct net_device_stats *scc_enet_get_stats(struct net_device *dev);
128static void set_multicast_list(struct net_device *dev);
129
130/* These will be configurable for the SCC choice.
131*/
132#define CPM_ENET_BLOCK	CPM_CR_SCC1_SBLOCK
133#define CPM_ENET_PAGE	CPM_CR_SCC1_PAGE
134#define PROFF_ENET	PROFF_SCC1
135#define SCC_ENET	0
136#define SIU_INT_ENET	SIU_INT_SCC1
137
138/* These are both board and SCC dependent....
139*/
140#define PD_ENET_RXD	((uint)0x00000001)
141#define PD_ENET_TXD	((uint)0x00000002)
142#define PD_ENET_TENA	((uint)0x00000004)
143#define PC_ENET_RENA	((uint)0x00020000)
144#define PC_ENET_CLSN	((uint)0x00000004)
145#define PC_ENET_TXCLK	((uint)0x00000800)
146#define PC_ENET_RXCLK	((uint)0x00000400)
147#define CMX_CLK_ROUTE	((uint)0x25000000)
148#define CMX_CLK_MASK	((uint)0xff000000)
149
150/* Specific to a board.
151*/
152#define PC_EST8260_ENET_LOOPBACK	((uint)0x80000000)
153#define PC_EST8260_ENET_SQE		((uint)0x40000000)
154#define PC_EST8260_ENET_NOTFD		((uint)0x20000000)
155
156static int
157scc_enet_open(struct net_device *dev)
158{
159
160	/* I should reset the ring buffers here, but I don't yet know
161	 * a simple way to do that.
162	 */
163	netif_start_queue(dev);
164	return 0;					/* Always succeed */
165}
166
167static int
168scc_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
169{
170	struct scc_enet_private *cep = (struct scc_enet_private *)dev->priv;
171	volatile cbd_t	*bdp;
172
173
174	/* Fill in a Tx ring entry */
175	bdp = cep->cur_tx;
176
177#ifndef final_version
178	if (bdp->cbd_sc & BD_ENET_TX_READY) {
179		/* Ooops.  All transmit buffers are full.  Bail out.
180		 * This should not happen, since cep->tx_full should be set.
181		 */
182		printk("%s: tx queue full!.\n", dev->name);
183		return 1;
184	}
185#endif
186
187	/* Clear all of the status flags.
188	 */
189	bdp->cbd_sc &= ~BD_ENET_TX_STATS;
190
191	/* If the frame is short, tell CPM to pad it.
192	*/
193	if (skb->len <= ETH_ZLEN)
194		bdp->cbd_sc |= BD_ENET_TX_PAD;
195	else
196		bdp->cbd_sc &= ~BD_ENET_TX_PAD;
197
198	/* Set buffer length and buffer pointer.
199	*/
200	bdp->cbd_datlen = skb->len;
201	bdp->cbd_bufaddr = __pa(skb->data);
202
203	/* Save skb pointer.
204	*/
205	cep->tx_skbuff[cep->skb_cur] = skb;
206
207	cep->stats.tx_bytes += skb->len;
208	cep->skb_cur = (cep->skb_cur+1) & TX_RING_MOD_MASK;
209
210	spin_lock_irq(&cep->lock);
211
212	/* Send it on its way.  Tell CPM its ready, interrupt when done,
213	 * its the last BD of the frame, and to put the CRC on the end.
214	 */
215	bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | BD_ENET_TX_LAST | BD_ENET_TX_TC);
216
217	dev->trans_start = jiffies;
218
219	/* If this was the last BD in the ring, start at the beginning again.
220	*/
221	if (bdp->cbd_sc & BD_ENET_TX_WRAP)
222		bdp = cep->tx_bd_base;
223	else
224		bdp++;
225
226	if (bdp->cbd_sc & BD_ENET_TX_READY) {
227		netif_stop_queue(dev);
228		cep->tx_full = 1;
229	}
230
231	cep->cur_tx = (cbd_t *)bdp;
232
233	spin_unlock_irq(&cep->lock);
234
235	return 0;
236}
237
238static void
239scc_enet_timeout(struct net_device *dev)
240{
241	struct scc_enet_private *cep = (struct scc_enet_private *)dev->priv;
242
243	printk("%s: transmit timed out.\n", dev->name);
244	cep->stats.tx_errors++;
245#ifndef final_version
246	{
247		int	i;
248		cbd_t	*bdp;
249		printk(" Ring data dump: cur_tx %p%s cur_rx %p.\n",
250		       cep->cur_tx, cep->tx_full ? " (full)" : "",
251		       cep->cur_rx);
252		bdp = cep->tx_bd_base;
253		printk(" Tx @base %p :\n", bdp);
254		for (i = 0 ; i < TX_RING_SIZE; i++, bdp++)
255			printk("%04x %04x %08x\n",
256			       bdp->cbd_sc,
257			       bdp->cbd_datlen,
258			       bdp->cbd_bufaddr);
259		bdp = cep->rx_bd_base;
260		printk(" Rx @base %p :\n", bdp);
261		for (i = 0 ; i < RX_RING_SIZE; i++, bdp++)
262			printk("%04x %04x %08x\n",
263			       bdp->cbd_sc,
264			       bdp->cbd_datlen,
265			       bdp->cbd_bufaddr);
266	}
267#endif
268	if (!cep->tx_full)
269		netif_wake_queue(dev);
270}
271
272/* The interrupt handler.
273 * This is called from the CPM handler, not the MPC core interrupt.
274 */
275static irqreturn_t
276scc_enet_interrupt(int irq, void * dev_id, struct pt_regs * regs)
277{
278	struct	net_device *dev = dev_id;
279	volatile struct	scc_enet_private *cep;
280	volatile cbd_t	*bdp;
281	ushort	int_events;
282	int	must_restart;
283
284	cep = (struct scc_enet_private *)dev->priv;
285
286	/* Get the interrupt events that caused us to be here.
287	*/
288	int_events = cep->sccp->scc_scce;
289	cep->sccp->scc_scce = int_events;
290	must_restart = 0;
291
292	/* Handle receive event in its own function.
293	*/
294	if (int_events & SCCE_ENET_RXF)
295		scc_enet_rx(dev_id);
296
297	/* Check for a transmit error.  The manual is a little unclear
298	 * about this, so the debug code until I get it figured out.  It
299	 * appears that if TXE is set, then TXB is not set.  However,
300	 * if carrier sense is lost during frame transmission, the TXE
301	 * bit is set, "and continues the buffer transmission normally."
302	 * I don't know if "normally" implies TXB is set when the buffer
303	 * descriptor is closed.....trial and error :-).
304	 */
305
306	/* Transmit OK, or non-fatal error.  Update the buffer descriptors.
307	*/
308	if (int_events & (SCCE_ENET_TXE | SCCE_ENET_TXB)) {
309	    spin_lock(&cep->lock);
310	    bdp = cep->dirty_tx;
311	    while ((bdp->cbd_sc&BD_ENET_TX_READY)==0) {
312		if ((bdp==cep->cur_tx) && (cep->tx_full == 0))
313		    break;
314
315		if (bdp->cbd_sc & BD_ENET_TX_HB)	/* No heartbeat */
316			cep->stats.tx_heartbeat_errors++;
317		if (bdp->cbd_sc & BD_ENET_TX_LC)	/* Late collision */
318			cep->stats.tx_window_errors++;
319		if (bdp->cbd_sc & BD_ENET_TX_RL)	/* Retrans limit */
320			cep->stats.tx_aborted_errors++;
321		if (bdp->cbd_sc & BD_ENET_TX_UN)	/* Underrun */
322			cep->stats.tx_fifo_errors++;
323		if (bdp->cbd_sc & BD_ENET_TX_CSL)	/* Carrier lost */
324			cep->stats.tx_carrier_errors++;
325
326
327		/* No heartbeat or Lost carrier are not really bad errors.
328		 * The others require a restart transmit command.
329		 */
330		if (bdp->cbd_sc &
331		    (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
332			must_restart = 1;
333			cep->stats.tx_errors++;
334		}
335
336		cep->stats.tx_packets++;
337
338		/* Deferred means some collisions occurred during transmit,
339		 * but we eventually sent the packet OK.
340		 */
341		if (bdp->cbd_sc & BD_ENET_TX_DEF)
342			cep->stats.collisions++;
343
344		/* Free the sk buffer associated with this last transmit.
345		*/
346		dev_kfree_skb_irq(cep->tx_skbuff[cep->skb_dirty]);
347		cep->skb_dirty = (cep->skb_dirty + 1) & TX_RING_MOD_MASK;
348
349		/* Update pointer to next buffer descriptor to be transmitted.
350		*/
351		if (bdp->cbd_sc & BD_ENET_TX_WRAP)
352			bdp = cep->tx_bd_base;
353		else
354			bdp++;
355
356		/* I don't know if we can be held off from processing these
357		 * interrupts for more than one frame time.  I really hope
358		 * not.  In such a case, we would now want to check the
359		 * currently available BD (cur_tx) and determine if any
360		 * buffers between the dirty_tx and cur_tx have also been
361		 * sent.  We would want to process anything in between that
362		 * does not have BD_ENET_TX_READY set.
363		 */
364
365		/* Since we have freed up a buffer, the ring is no longer
366		 * full.
367		 */
368		if (cep->tx_full) {
369			cep->tx_full = 0;
370			if (netif_queue_stopped(dev)) {
371				netif_wake_queue(dev);
372			}
373		}
374
375		cep->dirty_tx = (cbd_t *)bdp;
376	    }
377
378	    if (must_restart) {
379		volatile cpm_cpm2_t *cp;
380
381		/* Some transmit errors cause the transmitter to shut
382		 * down.  We now issue a restart transmit.  Since the
383		 * errors close the BD and update the pointers, the restart
384		 * _should_ pick up without having to reset any of our
385		 * pointers either.
386		 */
387
388		cp = cpmp;
389		cp->cp_cpcr =
390		    mk_cr_cmd(CPM_ENET_PAGE, CPM_ENET_BLOCK, 0,
391		    			CPM_CR_RESTART_TX) | CPM_CR_FLG;
392		while (cp->cp_cpcr & CPM_CR_FLG);
393	    }
394	    spin_unlock(&cep->lock);
395	}
396
397	/* Check for receive busy, i.e. packets coming but no place to
398	 * put them.  This "can't happen" because the receive interrupt
399	 * is tossing previous frames.
400	 */
401	if (int_events & SCCE_ENET_BSY) {
402		cep->stats.rx_dropped++;
403		printk("SCC ENET: BSY can't happen.\n");
404	}
405
406	return IRQ_HANDLED;
407}
408
409/* During a receive, the cur_rx points to the current incoming buffer.
410 * When we update through the ring, if the next incoming buffer has
411 * not been given to the system, we just set the empty indicator,
412 * effectively tossing the packet.
413 */
414static int
415scc_enet_rx(struct net_device *dev)
416{
417	struct	scc_enet_private *cep;
418	volatile cbd_t	*bdp;
419	struct	sk_buff *skb;
420	ushort	pkt_len;
421
422	cep = (struct scc_enet_private *)dev->priv;
423
424	/* First, grab all of the stats for the incoming packet.
425	 * These get messed up if we get called due to a busy condition.
426	 */
427	bdp = cep->cur_rx;
428
429for (;;) {
430	if (bdp->cbd_sc & BD_ENET_RX_EMPTY)
431		break;
432
433#ifndef final_version
434	/* Since we have allocated space to hold a complete frame, both
435	 * the first and last indicators should be set.
436	 */
437	if ((bdp->cbd_sc & (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) !=
438		(BD_ENET_RX_FIRST | BD_ENET_RX_LAST))
439			printk("CPM ENET: rcv is not first+last\n");
440#endif
441
442	/* Frame too long or too short.
443	*/
444	if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
445		cep->stats.rx_length_errors++;
446	if (bdp->cbd_sc & BD_ENET_RX_NO)	/* Frame alignment */
447		cep->stats.rx_frame_errors++;
448	if (bdp->cbd_sc & BD_ENET_RX_CR)	/* CRC Error */
449		cep->stats.rx_crc_errors++;
450	if (bdp->cbd_sc & BD_ENET_RX_OV)	/* FIFO overrun */
451		cep->stats.rx_crc_errors++;
452
453	/* Report late collisions as a frame error.
454	 * On this error, the BD is closed, but we don't know what we
455	 * have in the buffer.  So, just drop this frame on the floor.
456	 */
457	if (bdp->cbd_sc & BD_ENET_RX_CL) {
458		cep->stats.rx_frame_errors++;
459	}
460	else {
461
462		/* Process the incoming frame.
463		*/
464		cep->stats.rx_packets++;
465		pkt_len = bdp->cbd_datlen;
466		cep->stats.rx_bytes += pkt_len;
467
468		/* This does 16 byte alignment, much more than we need.
469		 * The packet length includes FCS, but we don't want to
470		 * include that when passing upstream as it messes up
471		 * bridging applications.
472		 */
473		skb = dev_alloc_skb(pkt_len-4);
474
475		if (skb == NULL) {
476			printk("%s: Memory squeeze, dropping packet.\n", dev->name);
477			cep->stats.rx_dropped++;
478		}
479		else {
480			skb->dev = dev;
481			skb_put(skb,pkt_len-4);	/* Make room */
482			eth_copy_and_sum(skb,
483				(unsigned char *)__va(bdp->cbd_bufaddr),
484				pkt_len-4, 0);
485			skb->protocol=eth_type_trans(skb,dev);
486			netif_rx(skb);
487		}
488	}
489
490	/* Clear the status flags for this buffer.
491	*/
492	bdp->cbd_sc &= ~BD_ENET_RX_STATS;
493
494	/* Mark the buffer empty.
495	*/
496	bdp->cbd_sc |= BD_ENET_RX_EMPTY;
497
498	/* Update BD pointer to next entry.
499	*/
500	if (bdp->cbd_sc & BD_ENET_RX_WRAP)
501		bdp = cep->rx_bd_base;
502	else
503		bdp++;
504
505   }
506	cep->cur_rx = (cbd_t *)bdp;
507
508	return 0;
509}
510
511static int
512scc_enet_close(struct net_device *dev)
513{
514	/* Don't know what to do yet.
515	*/
516	netif_stop_queue(dev);
517
518	return 0;
519}
520
521static struct net_device_stats *scc_enet_get_stats(struct net_device *dev)
522{
523	struct scc_enet_private *cep = (struct scc_enet_private *)dev->priv;
524
525	return &cep->stats;
526}
527
528/* Set or clear the multicast filter for this adaptor.
529 * Skeleton taken from sunlance driver.
530 * The CPM Ethernet implementation allows Multicast as well as individual
531 * MAC address filtering.  Some of the drivers check to make sure it is
532 * a group multicast address, and discard those that are not.  I guess I
533 * will do the same for now, but just remove the test if you want
534 * individual filtering as well (do the upper net layers want or support
535 * this kind of feature?).
536 */
537
538static void set_multicast_list(struct net_device *dev)
539{
540	struct	scc_enet_private *cep;
541	struct	dev_mc_list *dmi;
542	u_char	*mcptr, *tdptr;
543	volatile scc_enet_t *ep;
544	int	i, j;
545	cep = (struct scc_enet_private *)dev->priv;
546
547	/* Get pointer to SCC area in parameter RAM.
548	*/
549	ep = (scc_enet_t *)dev->base_addr;
550
551	if (dev->flags&IFF_PROMISC) {
552	
553		/* Log any net taps. */
554		printk("%s: Promiscuous mode enabled.\n", dev->name);
555		cep->sccp->scc_psmr |= SCC_PSMR_PRO;
556	} else {
557
558		cep->sccp->scc_psmr &= ~SCC_PSMR_PRO;
559
560		if (dev->flags & IFF_ALLMULTI) {
561			/* Catch all multicast addresses, so set the
562			 * filter to all 1's.
563			 */
564			ep->sen_gaddr1 = 0xffff;
565			ep->sen_gaddr2 = 0xffff;
566			ep->sen_gaddr3 = 0xffff;
567			ep->sen_gaddr4 = 0xffff;
568		}
569		else {
570			/* Clear filter and add the addresses in the list.
571			*/
572			ep->sen_gaddr1 = 0;
573			ep->sen_gaddr2 = 0;
574			ep->sen_gaddr3 = 0;
575			ep->sen_gaddr4 = 0;
576
577			dmi = dev->mc_list;
578
579			for (i=0; i<dev->mc_count; i++) {
580		
581				/* Only support group multicast for now.
582				*/
583				if (!(dmi->dmi_addr[0] & 1))
584					continue;
585
586				/* The address in dmi_addr is LSB first,
587				 * and taddr is MSB first.  We have to
588				 * copy bytes MSB first from dmi_addr.
589				 */
590				mcptr = (u_char *)dmi->dmi_addr + 5;
591				tdptr = (u_char *)&ep->sen_taddrh;
592				for (j=0; j<6; j++)
593					*tdptr++ = *mcptr--;
594
595				/* Ask CPM to run CRC and set bit in
596				 * filter mask.
597				 */
598				cpmp->cp_cpcr = mk_cr_cmd(CPM_ENET_PAGE,
599						CPM_ENET_BLOCK, 0,
600						CPM_CR_SET_GADDR) | CPM_CR_FLG;
601				/* this delay is necessary here -- Cort */
602				udelay(10);
603				while (cpmp->cp_cpcr & CPM_CR_FLG);
604			}
605		}
606	}
607}
608
609/* Initialize the CPM Ethernet on SCC.
610 */
611static int __init scc_enet_init(void)
612{
613	struct net_device *dev;
614	struct scc_enet_private *cep;
615	int i, j, err;
616	uint dp_offset;
617	unsigned char	*eap;
618	unsigned long	mem_addr;
619	bd_t		*bd;
620	volatile	cbd_t		*bdp;
621	volatile	cpm_cpm2_t	*cp;
622	volatile	scc_t		*sccp;
623	volatile	scc_enet_t	*ep;
624	volatile	cpm2_map_t		*immap;
625	volatile	iop_cpm2_t	*io;
626
627	cp = cpmp;	/* Get pointer to Communication Processor */
628
629	immap = (cpm2_map_t *)CPM_MAP_ADDR;	/* and to internal registers */
630	io = &immap->im_ioport;
631
632	bd = (bd_t *)__res;
633
634	/* Create an Ethernet device instance.
635	*/
636	dev = alloc_etherdev(sizeof(*cep));
637	if (!dev)
638		return -ENOMEM;
639
640	cep = dev->priv;
641	spin_lock_init(&cep->lock);
642
643	/* Get pointer to SCC area in parameter RAM.
644	*/
645	ep = (scc_enet_t *)(&immap->im_dprambase[PROFF_ENET]);
646
647	/* And another to the SCC register area.
648	*/
649	sccp = (volatile scc_t *)(&immap->im_scc[SCC_ENET]);
650	cep->sccp = (scc_t *)sccp;		/* Keep the pointer handy */
651
652	/* Disable receive and transmit in case someone left it running.
653	*/
654	sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
655
656	/* Configure port C and D pins for SCC Ethernet.  This
657	 * won't work for all SCC possibilities....it will be
658	 * board/port specific.
659	 */
660	io->iop_pparc |=
661		(PC_ENET_RENA | PC_ENET_CLSN | PC_ENET_TXCLK | PC_ENET_RXCLK);
662	io->iop_pdirc &=
663		~(PC_ENET_RENA | PC_ENET_CLSN | PC_ENET_TXCLK | PC_ENET_RXCLK);
664	io->iop_psorc &=
665		~(PC_ENET_RENA | PC_ENET_TXCLK | PC_ENET_RXCLK);
666	io->iop_psorc |= PC_ENET_CLSN;
667
668	io->iop_ppard |= (PD_ENET_RXD | PD_ENET_TXD | PD_ENET_TENA);
669	io->iop_pdird |= (PD_ENET_TXD | PD_ENET_TENA);
670	io->iop_pdird &= ~PD_ENET_RXD;
671	io->iop_psord |= PD_ENET_TXD;
672	io->iop_psord &= ~(PD_ENET_RXD | PD_ENET_TENA);
673
674	/* Configure Serial Interface clock routing.
675	 * First, clear all SCC bits to zero, then set the ones we want.
676	 */
677	immap->im_cpmux.cmx_scr &= ~CMX_CLK_MASK;
678	immap->im_cpmux.cmx_scr |= CMX_CLK_ROUTE;
679
680	/* Allocate space for the buffer descriptors in the DP ram.
681	 * These are relative offsets in the DP ram address space.
682	 * Initialize base addresses for the buffer descriptors.
683	 */
684	dp_offset = cpm_dpalloc(sizeof(cbd_t) * RX_RING_SIZE, 8);
685	ep->sen_genscc.scc_rbase = dp_offset;
686	cep->rx_bd_base = (cbd_t *)cpm_dpram_addr(dp_offset);
687
688	dp_offset = cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE, 8);
689	ep->sen_genscc.scc_tbase = dp_offset;
690	cep->tx_bd_base = (cbd_t *)cpm_dpram_addr(dp_offset);
691
692	cep->dirty_tx = cep->cur_tx = cep->tx_bd_base;
693	cep->cur_rx = cep->rx_bd_base;
694
695	ep->sen_genscc.scc_rfcr = CPMFCR_GBL | CPMFCR_EB;
696	ep->sen_genscc.scc_tfcr = CPMFCR_GBL | CPMFCR_EB;
697
698	/* Set maximum bytes per receive buffer.
699	 * This appears to be an Ethernet frame size, not the buffer
700	 * fragment size.  It must be a multiple of four.
701	 */
702	ep->sen_genscc.scc_mrblr = PKT_MAXBLR_SIZE;
703
704	/* Set CRC preset and mask.
705	*/
706	ep->sen_cpres = 0xffffffff;
707	ep->sen_cmask = 0xdebb20e3;
708
709	ep->sen_crcec = 0;	/* CRC Error counter */
710	ep->sen_alec = 0;	/* alignment error counter */
711	ep->sen_disfc = 0;	/* discard frame counter */
712
713	ep->sen_pads = 0x8888;	/* Tx short frame pad character */
714	ep->sen_retlim = 15;	/* Retry limit threshold */
715
716	ep->sen_maxflr = PKT_MAXBUF_SIZE;   /* maximum frame length register */
717	ep->sen_minflr = PKT_MINBUF_SIZE;  /* minimum frame length register */
718
719	ep->sen_maxd1 = PKT_MAXBLR_SIZE;	/* maximum DMA1 length */
720	ep->sen_maxd2 = PKT_MAXBLR_SIZE;	/* maximum DMA2 length */
721
722	/* Clear hash tables.
723	*/
724	ep->sen_gaddr1 = 0;
725	ep->sen_gaddr2 = 0;
726	ep->sen_gaddr3 = 0;
727	ep->sen_gaddr4 = 0;
728	ep->sen_iaddr1 = 0;
729	ep->sen_iaddr2 = 0;
730	ep->sen_iaddr3 = 0;
731	ep->sen_iaddr4 = 0;
732
733	/* Set Ethernet station address.
734	 *
735	 * This is supplied in the board information structure, so we
736	 * copy that into the controller.
737	 */
738	eap = (unsigned char *)&(ep->sen_paddrh);
739	for (i=5; i>=0; i--)
740		*eap++ = dev->dev_addr[i] = bd->bi_enetaddr[i];
741
742	ep->sen_pper = 0;	/* 'cause the book says so */
743	ep->sen_taddrl = 0;	/* temp address (LSB) */
744	ep->sen_taddrm = 0;
745	ep->sen_taddrh = 0;	/* temp address (MSB) */
746
747	/* Now allocate the host memory pages and initialize the
748	 * buffer descriptors.
749	 */
750	bdp = cep->tx_bd_base;
751	for (i=0; i<TX_RING_SIZE; i++) {
752
753		/* Initialize the BD for every fragment in the page.
754		*/
755		bdp->cbd_sc = 0;
756		bdp->cbd_bufaddr = 0;
757		bdp++;
758	}
759
760	/* Set the last buffer to wrap.
761	*/
762	bdp--;
763	bdp->cbd_sc |= BD_SC_WRAP;
764
765	bdp = cep->rx_bd_base;
766	for (i=0; i<CPM_ENET_RX_PAGES; i++) {
767
768		/* Allocate a page.
769		*/
770		mem_addr = __get_free_page(GFP_KERNEL);
771		/* BUG: no check for failure */
772
773		/* Initialize the BD for every fragment in the page.
774		*/
775		for (j=0; j<CPM_ENET_RX_FRPPG; j++) {
776			bdp->cbd_sc = BD_ENET_RX_EMPTY | BD_ENET_RX_INTR;
777			bdp->cbd_bufaddr = __pa(mem_addr);
778			mem_addr += CPM_ENET_RX_FRSIZE;
779			bdp++;
780		}
781	}
782
783	/* Set the last buffer to wrap.
784	*/
785	bdp--;
786	bdp->cbd_sc |= BD_SC_WRAP;
787
788	/* Let's re-initialize the channel now.  We have to do it later
789	 * than the manual describes because we have just now finished
790	 * the BD initialization.
791	 */
792	cpmp->cp_cpcr = mk_cr_cmd(CPM_ENET_PAGE, CPM_ENET_BLOCK, 0,
793			CPM_CR_INIT_TRX) | CPM_CR_FLG;
794	while (cp->cp_cpcr & CPM_CR_FLG);
795
796	cep->skb_cur = cep->skb_dirty = 0;
797
798	sccp->scc_scce = 0xffff;	/* Clear any pending events */
799
800	/* Enable interrupts for transmit error, complete frame
801	 * received, and any transmit buffer we have also set the
802	 * interrupt flag.
803	 */
804	sccp->scc_sccm = (SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
805
806	/* Install our interrupt handler.
807	*/
808	request_irq(SIU_INT_ENET, scc_enet_interrupt, 0, "enet", dev);
809	/* BUG: no check for failure */
810
811	/* Set GSMR_H to enable all normal operating modes.
812	 * Set GSMR_L to enable Ethernet to MC68160.
813	 */
814	sccp->scc_gsmrh = 0;
815	sccp->scc_gsmrl = (SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 | SCC_GSMRL_MODE_ENET);
816
817	/* Set sync/delimiters.
818	*/
819	sccp->scc_dsr = 0xd555;
820
821	/* Set processing mode.  Use Ethernet CRC, catch broadcast, and
822	 * start frame search 22 bit times after RENA.
823	 */
824	sccp->scc_psmr = (SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
825
826	/* It is now OK to enable the Ethernet transmitter.
827	 * Unfortunately, there are board implementation differences here.
828	 */
829	io->iop_pparc &= ~(PC_EST8260_ENET_LOOPBACK |
830				PC_EST8260_ENET_SQE | PC_EST8260_ENET_NOTFD);
831	io->iop_psorc &= ~(PC_EST8260_ENET_LOOPBACK |
832				PC_EST8260_ENET_SQE | PC_EST8260_ENET_NOTFD);
833	io->iop_pdirc |= (PC_EST8260_ENET_LOOPBACK |
834				PC_EST8260_ENET_SQE | PC_EST8260_ENET_NOTFD);
835	io->iop_pdatc &= ~(PC_EST8260_ENET_LOOPBACK | PC_EST8260_ENET_SQE);
836	io->iop_pdatc |= PC_EST8260_ENET_NOTFD;
837
838	dev->base_addr = (unsigned long)ep;
839
840	/* The CPM Ethernet specific entries in the device structure. */
841	dev->open = scc_enet_open;
842	dev->hard_start_xmit = scc_enet_start_xmit;
843	dev->tx_timeout = scc_enet_timeout;
844	dev->watchdog_timeo = TX_TIMEOUT;
845	dev->stop = scc_enet_close;
846	dev->get_stats = scc_enet_get_stats;
847	dev->set_multicast_list = set_multicast_list;
848
849	/* And last, enable the transmit and receive processing.
850	*/
851	sccp->scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT);
852
853	err = register_netdev(dev);
854	if (err) {
855		free_netdev(dev);
856		return err;
857	}
858
859	printk("%s: SCC ENET Version 0.1, ", dev->name);
860	for (i=0; i<5; i++)
861		printk("%02x:", dev->dev_addr[i]);
862	printk("%02x\n", dev->dev_addr[5]);
863
864	return 0;
865}
866
867module_init(scc_enet_init);