PageRenderTime 53ms CodeModel.GetById 24ms app.highlight 24ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/alpha/kernel/sys_titan.c

https://bitbucket.org/evzijst/gittest
C | 420 lines | 281 code | 61 blank | 78 comment | 31 complexity | 871b4fa02da3d80d49e7057e581ab55c MD5 | raw file
  1/*
  2 *	linux/arch/alpha/kernel/sys_titan.c
  3 *
  4 *	Copyright (C) 1995 David A Rusling
  5 *	Copyright (C) 1996, 1999 Jay A Estabrook
  6 *	Copyright (C) 1998, 1999 Richard Henderson
  7 *      Copyright (C) 1999, 2000 Jeff Wiedemeier
  8 *
  9 * Code supporting TITAN systems (EV6+TITAN), currently:
 10 *      Privateer
 11 *	Falcon
 12 *	Granite
 13 */
 14
 15#include <linux/config.h>
 16#include <linux/kernel.h>
 17#include <linux/types.h>
 18#include <linux/mm.h>
 19#include <linux/sched.h>
 20#include <linux/pci.h>
 21#include <linux/init.h>
 22#include <linux/bitops.h>
 23
 24#include <asm/ptrace.h>
 25#include <asm/system.h>
 26#include <asm/dma.h>
 27#include <asm/irq.h>
 28#include <asm/mmu_context.h>
 29#include <asm/io.h>
 30#include <asm/pgtable.h>
 31#include <asm/core_titan.h>
 32#include <asm/hwrpb.h>
 33#include <asm/tlbflush.h>
 34
 35#include "proto.h"
 36#include "irq_impl.h"
 37#include "pci_impl.h"
 38#include "machvec_impl.h"
 39#include "err_impl.h"
 40
 41
 42/*
 43 * Titan generic
 44 */
 45
 46/*
 47 * Titan supports up to 4 CPUs
 48 */
 49static unsigned long titan_cpu_irq_affinity[4] = { ~0UL, ~0UL, ~0UL, ~0UL };
 50
 51/*
 52 * Mask is set (1) if enabled
 53 */
 54static unsigned long titan_cached_irq_mask;
 55
 56/*
 57 * Need SMP-safe access to interrupt CSRs
 58 */
 59DEFINE_SPINLOCK(titan_irq_lock);
 60
 61static void
 62titan_update_irq_hw(unsigned long mask)
 63{
 64	register titan_cchip *cchip = TITAN_cchip;
 65	unsigned long isa_enable = 1UL << 55;
 66	register int bcpu = boot_cpuid;
 67
 68#ifdef CONFIG_SMP
 69	cpumask_t cpm = cpu_present_mask;
 70	volatile unsigned long *dim0, *dim1, *dim2, *dim3;
 71	unsigned long mask0, mask1, mask2, mask3, dummy;
 72
 73	mask &= ~isa_enable;
 74	mask0 = mask & titan_cpu_irq_affinity[0];
 75	mask1 = mask & titan_cpu_irq_affinity[1];
 76	mask2 = mask & titan_cpu_irq_affinity[2];
 77	mask3 = mask & titan_cpu_irq_affinity[3];
 78
 79	if (bcpu == 0) mask0 |= isa_enable;
 80	else if (bcpu == 1) mask1 |= isa_enable;
 81	else if (bcpu == 2) mask2 |= isa_enable;
 82	else mask3 |= isa_enable;
 83
 84	dim0 = &cchip->dim0.csr;
 85	dim1 = &cchip->dim1.csr;
 86	dim2 = &cchip->dim2.csr;
 87	dim3 = &cchip->dim3.csr;
 88	if (!cpu_isset(0, cpm)) dim0 = &dummy;
 89	if (!cpu_isset(1, cpm)) dim1 = &dummy;
 90	if (!cpu_isset(2, cpm)) dim2 = &dummy;
 91	if (!cpu_isset(3, cpm)) dim3 = &dummy;
 92
 93	*dim0 = mask0;
 94	*dim1 = mask1;
 95	*dim2 = mask2;
 96	*dim3 = mask3;
 97	mb();
 98	*dim0;
 99	*dim1;
100	*dim2;
101	*dim3;
102#else
103	volatile unsigned long *dimB;
104	dimB = &cchip->dim0.csr;
105	if (bcpu == 1) dimB = &cchip->dim1.csr;
106	else if (bcpu == 2) dimB = &cchip->dim2.csr;
107	else if (bcpu == 3) dimB = &cchip->dim3.csr;
108
109	*dimB = mask | isa_enable;
110	mb();
111	*dimB;
112#endif
113}
114
115static inline void
116titan_enable_irq(unsigned int irq)
117{
118	spin_lock(&titan_irq_lock);
119	titan_cached_irq_mask |= 1UL << (irq - 16);
120	titan_update_irq_hw(titan_cached_irq_mask);
121	spin_unlock(&titan_irq_lock);
122}
123
124static inline void
125titan_disable_irq(unsigned int irq)
126{
127	spin_lock(&titan_irq_lock);
128	titan_cached_irq_mask &= ~(1UL << (irq - 16));
129	titan_update_irq_hw(titan_cached_irq_mask);
130	spin_unlock(&titan_irq_lock);
131}
132
133static unsigned int
134titan_startup_irq(unsigned int irq)
135{
136	titan_enable_irq(irq);
137	return 0;	/* never anything pending */
138}
139
140static void
141titan_end_irq(unsigned int irq)
142{
143	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
144		titan_enable_irq(irq);
145}
146
147static void
148titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
149{
150	int cpu;
151
152	for (cpu = 0; cpu < 4; cpu++) {
153		if (cpu_isset(cpu, affinity))
154			titan_cpu_irq_affinity[cpu] |= 1UL << irq;
155		else
156			titan_cpu_irq_affinity[cpu] &= ~(1UL << irq);
157	}
158
159}
160
161static void
162titan_set_irq_affinity(unsigned int irq, cpumask_t affinity)
163{ 
164	spin_lock(&titan_irq_lock);
165	titan_cpu_set_irq_affinity(irq - 16, affinity);
166	titan_update_irq_hw(titan_cached_irq_mask);
167	spin_unlock(&titan_irq_lock);
168}
169
170static void
171titan_device_interrupt(unsigned long vector, struct pt_regs * regs)
172{
173	printk("titan_device_interrupt: NOT IMPLEMENTED YET!! \n");
174}
175
176static void 
177titan_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
178{
179	int irq;
180
181	irq = (vector - 0x800) >> 4;
182	handle_irq(irq, regs);
183}
184
185
186static void __init
187init_titan_irqs(struct hw_interrupt_type * ops, int imin, int imax)
188{
189	long i;
190	for (i = imin; i <= imax; ++i) {
191		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
192		irq_desc[i].handler = ops;
193	}
194}
195
196static struct hw_interrupt_type titan_irq_type = {
197       .typename       = "TITAN",
198       .startup        = titan_startup_irq,
199       .shutdown       = titan_disable_irq,
200       .enable         = titan_enable_irq,
201       .disable        = titan_disable_irq,
202       .ack            = titan_disable_irq,
203       .end            = titan_end_irq,
204       .set_affinity   = titan_set_irq_affinity,
205};
206
207static irqreturn_t
208titan_intr_nop(int irq, void *dev_id, struct pt_regs *regs)                    
209{
210      /*
211       * This is a NOP interrupt handler for the purposes of
212       * event counting -- just return.
213       */                                                                     
214       return IRQ_HANDLED;
215}
216
217static void __init
218titan_init_irq(void)
219{
220	if (alpha_using_srm && !alpha_mv.device_interrupt)
221		alpha_mv.device_interrupt = titan_srm_device_interrupt;
222	if (!alpha_mv.device_interrupt)
223		alpha_mv.device_interrupt = titan_device_interrupt;
224
225	titan_update_irq_hw(0);
226
227	init_titan_irqs(&titan_irq_type, 16, 63 + 16);
228}
229  
230static void __init
231titan_legacy_init_irq(void)
232{
233	/* init the legacy dma controller */
234	outb(0, DMA1_RESET_REG);
235	outb(0, DMA2_RESET_REG);
236	outb(DMA_MODE_CASCADE, DMA2_MODE_REG);
237	outb(0, DMA2_MASK_REG);
238
239	/* init the legacy irq controller */
240	init_i8259a_irqs();
241
242	/* init the titan irqs */
243	titan_init_irq();
244}
245
246void
247titan_dispatch_irqs(u64 mask, struct pt_regs *regs)
248{
249	unsigned long vector;
250
251	/*
252	 * Mask down to those interrupts which are enable on this processor
253	 */
254	mask &= titan_cpu_irq_affinity[smp_processor_id()];
255
256	/*
257	 * Dispatch all requested interrupts 
258	 */
259	while (mask) {
260		/* convert to SRM vector... priority is <63> -> <0> */
261		__asm__("ctlz %1, %0" : "=r"(vector) : "r"(mask));
262		vector = 63 - vector;
263		mask &= ~(1UL << vector);	/* clear it out 	 */
264		vector = 0x900 + (vector << 4);	/* convert to SRM vector */
265		
266		/* dispatch it */
267		alpha_mv.device_interrupt(vector, regs);
268	}
269}
270  
271
272/*
273 * Titan Family
274 */
275static void __init
276titan_late_init(void)
277{
278	/*
279	 * Enable the system error interrupts. These interrupts are 
280	 * all reported to the kernel as machine checks, so the handler
281	 * is a nop so it can be called to count the individual events.
282	 */
283	request_irq(63+16, titan_intr_nop, SA_INTERRUPT, 
284		    "CChip Error", NULL);
285	request_irq(62+16, titan_intr_nop, SA_INTERRUPT, 
286		    "PChip 0 H_Error", NULL);
287	request_irq(61+16, titan_intr_nop, SA_INTERRUPT, 
288		    "PChip 1 H_Error", NULL);
289	request_irq(60+16, titan_intr_nop, SA_INTERRUPT, 
290		    "PChip 0 C_Error", NULL);
291	request_irq(59+16, titan_intr_nop, SA_INTERRUPT, 
292		    "PChip 1 C_Error", NULL);
293
294	/* 
295	 * Register our error handlers.
296	 */
297	titan_register_error_handlers();
298
299	/*
300	 * Check if the console left us any error logs.
301	 */
302	cdl_check_console_data_log();
303
304}
305
306static int __devinit
307titan_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
308{
309	u8 intline;
310	int irq;
311
312 	/* Get the current intline.  */
313	pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
314	irq = intline;
315
316 	/* Is it explicitly routed through ISA?  */
317 	if ((irq & 0xF0) == 0xE0)
318 		return irq;
319 
320 	/* Offset by 16 to make room for ISA interrupts 0 - 15.  */
321 	return irq + 16;
322}
323
324static void __init
325titan_init_pci(void)
326{
327 	/*
328 	 * This isn't really the right place, but there's some init
329 	 * that needs to be done after everything is basically up.
330 	 */
331 	titan_late_init();
332 
333	pci_probe_only = 1;
334	common_init_pci();
335	SMC669_Init(0);
336#ifdef CONFIG_VGA_HOSE
337	locate_and_init_vga(NULL);
338#endif
339}
340
341
342/*
343 * Privateer
344 */
345static void __init
346privateer_init_pci(void)
347{
348	/*
349	 * Hook a couple of extra err interrupts that the
350	 * common titan code won't.
351	 */
352	request_irq(53+16, titan_intr_nop, SA_INTERRUPT, 
353		    "NMI", NULL);
354	request_irq(50+16, titan_intr_nop, SA_INTERRUPT, 
355		    "Temperature Warning", NULL);
356
357	/*
358	 * Finish with the common version.
359	 */
360	return titan_init_pci();
361}
362
363
364/*
365 * The System Vectors.
366 */
367struct alpha_machine_vector titan_mv __initmv = {
368	.vector_name		= "TITAN",
369	DO_EV6_MMU,
370	DO_DEFAULT_RTC,
371	DO_TITAN_IO,
372	.machine_check		= titan_machine_check,
373	.max_isa_dma_address	= ALPHA_MAX_ISA_DMA_ADDRESS,
374	.min_io_address		= DEFAULT_IO_BASE,
375	.min_mem_address	= DEFAULT_MEM_BASE,
376	.pci_dac_offset		= TITAN_DAC_OFFSET,
377
378	.nr_irqs		= 80,	/* 64 + 16 */
379	/* device_interrupt will be filled in by titan_init_irq */
380
381	.agp_info		= titan_agp_info,
382
383	.init_arch		= titan_init_arch,
384	.init_irq		= titan_legacy_init_irq,
385	.init_rtc		= common_init_rtc,
386	.init_pci		= titan_init_pci,
387
388	.kill_arch		= titan_kill_arch,
389	.pci_map_irq		= titan_map_irq,
390	.pci_swizzle		= common_swizzle,
391};
392ALIAS_MV(titan)
393
394struct alpha_machine_vector privateer_mv __initmv = {
395	.vector_name		= "PRIVATEER",
396	DO_EV6_MMU,
397	DO_DEFAULT_RTC,
398	DO_TITAN_IO,
399	.machine_check		= privateer_machine_check,
400	.max_isa_dma_address	= ALPHA_MAX_ISA_DMA_ADDRESS,
401	.min_io_address		= DEFAULT_IO_BASE,
402	.min_mem_address	= DEFAULT_MEM_BASE,
403	.pci_dac_offset		= TITAN_DAC_OFFSET,
404
405	.nr_irqs		= 80,	/* 64 + 16 */
406	/* device_interrupt will be filled in by titan_init_irq */
407
408	.agp_info		= titan_agp_info,
409
410	.init_arch		= titan_init_arch,
411	.init_irq		= titan_legacy_init_irq,
412	.init_rtc		= common_init_rtc,
413	.init_pci		= privateer_init_pci,
414
415	.kill_arch		= titan_kill_arch,
416	.pci_map_irq		= titan_map_irq,
417	.pci_swizzle		= common_swizzle,
418};
419/* No alpha_mv alias for privateer since we compile it 
420   in unconditionally with titan; setup_arch knows how to cope. */