/arch/mips/kernel/genex.S
Assembly | 548 lines | 500 code | 48 blank | 0 comment | 21 complexity | d83a36605ba8b1f646e81b27672159df MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
- /*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
- * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- * Copyright (C) 2001 MIPS Technologies, Inc.
- * Copyright (C) 2002, 2007 Maciej W. Rozycki
- */
- #include <linux/init.h>
- #include <asm/asm.h>
- #include <asm/asmmacro.h>
- #include <asm/cacheops.h>
- #include <asm/irqflags.h>
- #include <asm/regdef.h>
- #include <asm/fpregdef.h>
- #include <asm/mipsregs.h>
- #include <asm/stackframe.h>
- #include <asm/war.h>
- #include <asm/page.h>
- #include <asm/thread_info.h>
- #define PANIC_PIC(msg) \
- .set push; \
- .set reorder; \
- PTR_LA a0,8f; \
- .set noat; \
- PTR_LA AT, panic; \
- jr AT; \
- 9: b 9b; \
- .set pop; \
- TEXT(msg)
- __INIT
- NESTED(except_vec0_generic, 0, sp)
- PANIC_PIC("Exception vector 0 called")
- END(except_vec0_generic)
- NESTED(except_vec1_generic, 0, sp)
- PANIC_PIC("Exception vector 1 called")
- END(except_vec1_generic)
- /*
- * General exception vector for all other CPUs.
- *
- * Be careful when changing this, it has to be at most 128 bytes
- * to fit into space reserved for the exception handler.
- */
- NESTED(except_vec3_generic, 0, sp)
- .set push
- .set noat
- #if R5432_CP0_INTERRUPT_WAR
- mfc0 k0, CP0_INDEX
- #endif
- mfc0 k1, CP0_CAUSE
- andi k1, k1, 0x7c
- #ifdef CONFIG_64BIT
- dsll k1, k1, 1
- #endif
- PTR_L k0, exception_handlers(k1)
- jr k0
- .set pop
- END(except_vec3_generic)
- /*
- * General exception handler for CPUs with virtual coherency exception.
- *
- * Be careful when changing this, it has to be at most 256 (as a special
- * exception) bytes to fit into space reserved for the exception handler.
- */
- NESTED(except_vec3_r4000, 0, sp)
- .set push
- .set mips3
- .set noat
- mfc0 k1, CP0_CAUSE
- li k0, 31<<2
- andi k1, k1, 0x7c
- .set push
- .set noreorder
- .set nomacro
- beq k1, k0, handle_vced
- li k0, 14<<2
- beq k1, k0, handle_vcei
- #ifdef CONFIG_64BIT
- dsll k1, k1, 1
- #endif
- .set pop
- PTR_L k0, exception_handlers(k1)
- jr k0
- /*
- * Big shit, we now may have two dirty primary cache lines for the same
- * physical address. We can safely invalidate the line pointed to by
- * c0_badvaddr because after return from this exception handler the
- * load / store will be re-executed.
- */
- handle_vced:
- MFC0 k0, CP0_BADVADDR
- li k1, -4 # Is this ...
- and k0, k1 # ... really needed?
- mtc0 zero, CP0_TAGLO
- cache Index_Store_Tag_D, (k0)
- cache Hit_Writeback_Inv_SD, (k0)
- #ifdef CONFIG_PROC_FS
- PTR_LA k0, vced_count
- lw k1, (k0)
- addiu k1, 1
- sw k1, (k0)
- #endif
- eret
- handle_vcei:
- MFC0 k0, CP0_BADVADDR
- cache Hit_Writeback_Inv_SD, (k0) # also cleans pi
- #ifdef CONFIG_PROC_FS
- PTR_LA k0, vcei_count
- lw k1, (k0)
- addiu k1, 1
- sw k1, (k0)
- #endif
- eret
- .set pop
- END(except_vec3_r4000)
- __FINIT
- .align 5 /* 32 byte rollback region */
- LEAF(r4k_wait)
- .set push
- .set noreorder
- /* start of rollback region */
- LONG_L t0, TI_FLAGS($28)
- nop
- andi t0, _TIF_NEED_RESCHED
- bnez t0, 1f
- nop
- nop
- nop
- .set mips3
- wait
- /* end of rollback region (the region size must be power of two) */
- .set pop
- 1:
- jr ra
- END(r4k_wait)
- .macro BUILD_ROLLBACK_PROLOGUE handler
- FEXPORT(rollback_\handler)
- .set push
- .set noat
- MFC0 k0, CP0_EPC
- PTR_LA k1, r4k_wait
- ori k0, 0x1f /* 32 byte rollback region */
- xori k0, 0x1f
- bne k0, k1, 9f
- MTC0 k0, CP0_EPC
- 9:
- .set pop
- .endm
- .align 5
- BUILD_ROLLBACK_PROLOGUE handle_int
- NESTED(handle_int, PT_SIZE, sp)
- #ifdef CONFIG_TRACE_IRQFLAGS
- /*
- * Check to see if the interrupted code has just disabled
- * interrupts and ignore this interrupt for now if so.
- *
- * local_irq_disable() disables interrupts and then calls
- * trace_hardirqs_off() to track the state. If an interrupt is taken
- * after interrupts are disabled but before the state is updated
- * it will appear to restore_all that it is incorrectly returning with
- * interrupts disabled
- */
- .set push
- .set noat
- mfc0 k0, CP0_STATUS
- #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
- and k0, ST0_IEP
- bnez k0, 1f
- mfc0 k0, CP0_EPC
- .set noreorder
- j k0
- rfe
- #else
- and k0, ST0_IE
- bnez k0, 1f
- eret
- #endif
- 1:
- .set pop
- #endif
- SAVE_ALL
- CLI
- TRACE_IRQS_OFF
- LONG_L s0, TI_REGS($28)
- LONG_S sp, TI_REGS($28)
- PTR_LA ra, ret_from_irq
- j plat_irq_dispatch
- END(handle_int)
- __INIT
- /*
- * Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
- * This is a dedicated interrupt exception vector which reduces the
- * interrupt processing overhead. The jump instruction will be replaced
- * at the initialization time.
- *
- * Be careful when changing this, it has to be at most 128 bytes
- * to fit into space reserved for the exception handler.
- */
- NESTED(except_vec4, 0, sp)
- 1: j 1b /* Dummy, will be replaced */
- END(except_vec4)
- /*
- * EJTAG debug exception handler.
- * The EJTAG debug exception entry point is 0xbfc00480, which
- * normally is in the boot PROM, so the boot PROM must do a
- * unconditional jump to this vector.
- */
- NESTED(except_vec_ejtag_debug, 0, sp)
- j ejtag_debug_handler
- END(except_vec_ejtag_debug)
- __FINIT
- /*
- * Vectored interrupt handler.
- * This prototype is copied to ebase + n*IntCtl.VS and patched
- * to invoke the handler
- */
- BUILD_ROLLBACK_PROLOGUE except_vec_vi
- NESTED(except_vec_vi, 0, sp)
- SAVE_SOME
- SAVE_AT
- .set push
- .set noreorder
- #ifdef CONFIG_MIPS_MT_SMTC
- /*