PageRenderTime 51ms CodeModel.GetById 16ms RepoModel.GetById 1ms app.codeStats 0ms

/src/freebsd/sys/mips/mips/exception.S

https://bitbucket.org/killerpenguinassassins/open_distrib_devel
Assembly | 1243 lines | 1226 code | 17 blank | 0 comment | 11 complexity | 42def7474b53285a2ee3da7608b3f05e MD5 | raw file
Possible License(s): CC0-1.0, MIT, LGPL-2.0, LGPL-3.0, WTFPL, GPL-2.0, BSD-2-Clause, AGPL-3.0, CC-BY-SA-3.0, MPL-2.0, JSON, BSD-3-Clause-No-Nuclear-License-2014, LGPL-2.1, CPL-1.0, AGPL-1.0, 0BSD, ISC, Apache-2.0, GPL-3.0, IPL-1.0, MPL-2.0-no-copyleft-exception, BSD-3-Clause
  1. /* $OpenBSD: locore.S,v 1.18 1998/09/15 10:58:53 pefo Exp $ */
  2. /*-
  3. * Copyright (c) 1992, 1993
  4. * The Regents of the University of California. All rights reserved.
  5. *
  6. * This code is derived from software contributed to Berkeley by
  7. * Digital Equipment Corporation and Ralph Campbell.
  8. *
  9. * Redistribution and use in source and binary forms, with or without
  10. * modification, are permitted provided that the following conditions
  11. * are met:
  12. * 1. Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions and the following disclaimer.
  14. * 2. Redistributions in binary form must reproduce the above copyright
  15. * notice, this list of conditions and the following disclaimer in the
  16. * documentation and/or other materials provided with the distribution.
  17. * 4. Neither the name of the University nor the names of its contributors
  18. * may be used to endorse or promote products derived from this software
  19. * without specific prior written permission.
  20. *
  21. * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  22. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  23. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  24. * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  25. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  26. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  27. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  28. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  29. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  30. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  31. * SUCH DAMAGE.
  32. *
  33. * Copyright (C) 1989 Digital Equipment Corporation.
  34. * Permission to use, copy, modify, and distribute this software and
  35. * its documentation for any purpose and without fee is hereby granted,
  36. * provided that the above copyright notice appears in all copies.
  37. * Digital Equipment Corporation makes no representations about the
  38. * suitability of this software for any purpose. It is provided "as is"
  39. * without express or implied warranty.
  40. *
  41. * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
  42. * v 1.1 89/07/11 17:55:04 nelson Exp SPRITE (DECWRL)
  43. * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
  44. * v 9.2 90/01/29 18:00:39 shirriff Exp SPRITE (DECWRL)
  45. * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
  46. * v 1.1 89/07/10 14:27:41 nelson Exp SPRITE (DECWRL)
  47. * from: @(#)locore.s 8.5 (Berkeley) 1/4/94
  48. * JNPR: exception.S,v 1.5 2007/01/08 04:58:37 katta
  49. * $FreeBSD$
  50. */
  51. /*
  52. * Contains code that is the first executed at boot time plus
  53. * assembly language support routines.
  54. */
  55. #include "opt_cputype.h"
  56. #include "opt_ddb.h"
  57. #include <machine/asm.h>
  58. #include <machine/cpu.h>
  59. #include <machine/regnum.h>
  60. #include <machine/cpuregs.h>
  61. #include <machine/pte.h>
  62. #include "opt_cputype.h"
  63. #include "assym.s"
  64. .set noreorder # Noreorder is default style!
  65. /*
  66. * Reasonable limit
  67. */
  68. #define INTRCNT_COUNT 128
  69. /*
  70. *----------------------------------------------------------------------------
  71. *
  72. * MipsTLBMiss --
  73. *
  74. * Vector code for the TLB-miss exception vector 0x80000000.
  75. *
  76. * This code is copied to the TLB exception vector address to
  77. * which the CPU jumps in response to an exception or a TLB miss.
  78. * NOTE: This code must be position independent!!!
  79. *
  80. *
  81. */
  82. VECTOR(MipsTLBMiss, unknown)
  83. .set push
  84. .set noat
  85. j MipsDoTLBMiss
  86. MFC0 k0, MIPS_COP_0_BAD_VADDR # get the fault address
  87. .set pop
  88. VECTOR_END(MipsTLBMiss)
  89. /*
  90. *----------------------------------------------------------------------------
  91. *
  92. * MipsDoTLBMiss --
  93. *
  94. * This is the real TLB Miss Handler code.
  95. * 'segbase' points to the base of the segment table for user processes.
  96. *
  97. * Don't check for invalid pte's here. We load them as well and
  98. * let the processor trap to load the correct value after service.
  99. *----------------------------------------------------------------------------
  100. */
  101. .set push
  102. .set noat
  103. MipsDoTLBMiss:
  104. bltz k0, 1f #02: k0<0 -> 1f (kernel fault)
  105. PTR_SRL k0, k0, SEGSHIFT - PTRSHIFT #03: k0=seg offset (almost)
  106. GET_CPU_PCPU(k1)
  107. PTR_L k1, PC_SEGBASE(k1)
  108. beqz k1, 2f #05: make sure segbase is not null
  109. andi k0, k0, PDEPTRMASK #06: k0=seg offset
  110. PTR_ADDU k1, k0, k1 #07: k1=seg entry address
  111. PTR_L k1, 0(k1) #08: k1=seg entry
  112. MFC0 k0, MIPS_COP_0_BAD_VADDR #09: k0=bad address (again)
  113. beq k1, zero, 2f #0a: ==0 -- no page table
  114. #ifdef __mips_n64
  115. PTR_SRL k0, PDRSHIFT - PTRSHIFT # k0=VPN
  116. andi k0, k0, PDEPTRMASK # k0=pde offset
  117. PTR_ADDU k1, k0, k1 # k1=pde entry address
  118. PTR_L k1, 0(k1) # k1=pde entry
  119. MFC0 k0, MIPS_COP_0_BAD_VADDR # k0=bad address (again)
  120. beq k1, zero, 2f # ==0 -- no page table
  121. #endif
  122. PTR_SRL k0, PAGE_SHIFT - PTESHIFT #0b: k0=VPN (aka va>>10)
  123. andi k0, k0, PTE2MASK #0c: k0=page tab offset
  124. PTR_ADDU k1, k1, k0 #0d: k1=pte address
  125. PTE_L k0, 0(k1) #0e: k0=lo0 pte
  126. PTE_L k1, PTESIZE(k1) #0f: k1=lo0 pte
  127. CLEAR_PTE_SWBITS(k0)
  128. PTE_MTC0 k0, MIPS_COP_0_TLB_LO0 #12: lo0 is loaded
  129. COP0_SYNC
  130. CLEAR_PTE_SWBITS(k1)
  131. PTE_MTC0 k1, MIPS_COP_0_TLB_LO1 #15: lo1 is loaded
  132. COP0_SYNC
  133. tlbwr #1a: write to tlb
  134. HAZARD_DELAY
  135. eret #1f: retUrn from exception
  136. 1: j MipsTLBMissException #20: kernel exception
  137. nop #21: branch delay slot
  138. 2: j SlowFault #22: no page table present
  139. nop #23: branch delay slot
  140. .set pop
  141. /*
  142. * This code is copied to the general exception vector address to
  143. * handle all execptions except RESET and TLBMiss.
  144. * NOTE: This code must be position independent!!!
  145. */
  146. VECTOR(MipsException, unknown)
  147. /*
  148. * Find out what mode we came from and jump to the proper handler.
  149. */
  150. .set noat
  151. mfc0 k0, MIPS_COP_0_STATUS # Get the status register
  152. mfc0 k1, MIPS_COP_0_CAUSE # Get the cause register value.
  153. and k0, k0, SR_KSU_USER # test for user mode
  154. # sneaky but the bits are
  155. # with us........
  156. sll k0, k0, 3 # shift user bit for cause index
  157. and k1, k1, MIPS3_CR_EXC_CODE # Mask out the cause bits.
  158. or k1, k1, k0 # change index to user table
  159. #if defined(__mips_n64)
  160. PTR_SLL k1, k1, 1 # shift to get 8-byte offset
  161. #endif
  162. 1:
  163. PTR_LA k0, _C_LABEL(machExceptionTable) # get base of the jump table
  164. PTR_ADDU k0, k0, k1 # Get the address of the
  165. # function entry. Note that
  166. # the cause is already
  167. # shifted left by 2 bits so
  168. # we dont have to shift.
  169. PTR_L k0, 0(k0) # Get the function address
  170. nop
  171. j k0 # Jump to the function.
  172. nop
  173. .set at
  174. VECTOR_END(MipsException)
  175. /*
  176. * We couldn't find a TLB entry.
  177. * Find out what mode we came from and call the appropriate handler.
  178. */
  179. SlowFault:
  180. .set noat
  181. mfc0 k0, MIPS_COP_0_STATUS
  182. nop
  183. and k0, k0, SR_KSU_USER
  184. bne k0, zero, _C_LABEL(MipsUserGenException)
  185. nop
  186. .set at
  187. /*
  188. * Fall though ...
  189. */
  190. /*----------------------------------------------------------------------------
  191. *
  192. * MipsKernGenException --
  193. *
  194. * Handle an exception from kernel mode.
  195. *
  196. * Results:
  197. * None.
  198. *
  199. * Side effects:
  200. * None.
  201. *
  202. *----------------------------------------------------------------------------
  203. */
  204. #define SAVE_REG(reg, offs, base) \
  205. REG_S reg, CALLFRAME_SIZ + (SZREG * offs) (base)
  206. #if defined(CPU_CNMIPS)
  207. #define CLEAR_STATUS \
  208. mfc0 a0, MIPS_COP_0_STATUS ;\
  209. li a2, (MIPS_SR_KX | MIPS_SR_SX | MIPS_SR_UX) ; \
  210. or a0, a0, a2 ; \
  211. li a2, ~(MIPS_SR_INT_IE | MIPS_SR_EXL | SR_KSU_USER) ; \
  212. and a0, a0, a2 ; \
  213. mtc0 a0, MIPS_COP_0_STATUS ; \
  214. ITLBNOPFIX
  215. #elif defined(CPU_RMI) || defined(CPU_NLM)
  216. #define CLEAR_STATUS \
  217. mfc0 a0, MIPS_COP_0_STATUS ;\
  218. li a2, (MIPS_SR_KX | MIPS_SR_UX | MIPS_SR_COP_2_BIT) ; \
  219. or a0, a0, a2 ; \
  220. li a2, ~(MIPS_SR_INT_IE | MIPS_SR_EXL | SR_KSU_USER) ; \
  221. and a0, a0, a2 ; \
  222. mtc0 a0, MIPS_COP_0_STATUS ; \
  223. ITLBNOPFIX
  224. #else
  225. #define CLEAR_STATUS \
  226. mfc0 a0, MIPS_COP_0_STATUS ;\
  227. li a2, ~(MIPS_SR_INT_IE | MIPS_SR_EXL | SR_KSU_USER) ; \
  228. and a0, a0, a2 ; \
  229. mtc0 a0, MIPS_COP_0_STATUS ; \
  230. ITLBNOPFIX
  231. #endif
  232. /*
  233. * Save CPU and CP0 register state.
  234. *
  235. * This is straightforward except for saving the exception program
  236. * counter. The ddb backtrace code looks for the first instruction
  237. * matching the form "sw ra, (off)sp" to figure out the address of the
  238. * calling function. So we must make sure that we save the exception
  239. * PC by staging it through 'ra' as opposed to any other register.
  240. */
  241. #define SAVE_CPU \
  242. SAVE_REG(AT, AST, sp) ;\
  243. .set at ; \
  244. SAVE_REG(v0, V0, sp) ;\
  245. SAVE_REG(v1, V1, sp) ;\
  246. SAVE_REG(a0, A0, sp) ;\
  247. SAVE_REG(a1, A1, sp) ;\
  248. SAVE_REG(a2, A2, sp) ;\
  249. SAVE_REG(a3, A3, sp) ;\
  250. SAVE_REG(t0, T0, sp) ;\
  251. SAVE_REG(t1, T1, sp) ;\
  252. SAVE_REG(t2, T2, sp) ;\
  253. SAVE_REG(t3, T3, sp) ;\
  254. SAVE_REG(ta0, TA0, sp) ;\
  255. SAVE_REG(ta1, TA1, sp) ;\
  256. SAVE_REG(ta2, TA2, sp) ;\
  257. SAVE_REG(ta3, TA3, sp) ;\
  258. SAVE_REG(t8, T8, sp) ;\
  259. SAVE_REG(t9, T9, sp) ;\
  260. SAVE_REG(gp, GP, sp) ;\
  261. SAVE_REG(s0, S0, sp) ;\
  262. SAVE_REG(s1, S1, sp) ;\
  263. SAVE_REG(s2, S2, sp) ;\
  264. SAVE_REG(s3, S3, sp) ;\
  265. SAVE_REG(s4, S4, sp) ;\
  266. SAVE_REG(s5, S5, sp) ;\
  267. SAVE_REG(s6, S6, sp) ;\
  268. SAVE_REG(s7, S7, sp) ;\
  269. SAVE_REG(s8, S8, sp) ;\
  270. mflo v0 ;\
  271. mfhi v1 ;\
  272. mfc0 a0, MIPS_COP_0_STATUS ;\
  273. mfc0 a1, MIPS_COP_0_CAUSE ;\
  274. MFC0 a2, MIPS_COP_0_BAD_VADDR;\
  275. MFC0 a3, MIPS_COP_0_EXC_PC ;\
  276. SAVE_REG(v0, MULLO, sp) ;\
  277. SAVE_REG(v1, MULHI, sp) ;\
  278. SAVE_REG(a0, SR, sp) ;\
  279. SAVE_REG(a1, CAUSE, sp) ;\
  280. SAVE_REG(a2, BADVADDR, sp) ;\
  281. move t0, ra ;\
  282. move ra, a3 ;\
  283. SAVE_REG(ra, PC, sp) ;\
  284. move ra, t0 ;\
  285. SAVE_REG(ra, RA, sp) ;\
  286. PTR_ADDU v0, sp, KERN_EXC_FRAME_SIZE ;\
  287. SAVE_REG(v0, SP, sp) ;\
  288. CLEAR_STATUS ;\
  289. PTR_ADDU a0, sp, CALLFRAME_SIZ ;\
  290. ITLBNOPFIX
  291. #define RESTORE_REG(reg, offs, base) \
  292. REG_L reg, CALLFRAME_SIZ + (SZREG * offs) (base)
  293. #define RESTORE_CPU \
  294. CLEAR_STATUS ;\
  295. RESTORE_REG(k0, SR, sp) ;\
  296. RESTORE_REG(t0, MULLO, sp) ;\
  297. RESTORE_REG(t1, MULHI, sp) ;\
  298. mtlo t0 ;\
  299. mthi t1 ;\
  300. MTC0 v0, MIPS_COP_0_EXC_PC ;\
  301. .set noat ;\
  302. RESTORE_REG(AT, AST, sp) ;\
  303. RESTORE_REG(v0, V0, sp) ;\
  304. RESTORE_REG(v1, V1, sp) ;\
  305. RESTORE_REG(a0, A0, sp) ;\
  306. RESTORE_REG(a1, A1, sp) ;\
  307. RESTORE_REG(a2, A2, sp) ;\
  308. RESTORE_REG(a3, A3, sp) ;\
  309. RESTORE_REG(t0, T0, sp) ;\
  310. RESTORE_REG(t1, T1, sp) ;\
  311. RESTORE_REG(t2, T2, sp) ;\
  312. RESTORE_REG(t3, T3, sp) ;\
  313. RESTORE_REG(ta0, TA0, sp) ;\
  314. RESTORE_REG(ta1, TA1, sp) ;\
  315. RESTORE_REG(ta2, TA2, sp) ;\
  316. RESTORE_REG(ta3, TA3, sp) ;\
  317. RESTORE_REG(t8, T8, sp) ;\
  318. RESTORE_REG(t9, T9, sp) ;\
  319. RESTORE_REG(s0, S0, sp) ;\
  320. RESTORE_REG(s1, S1, sp) ;\
  321. RESTORE_REG(s2, S2, sp) ;\
  322. RESTORE_REG(s3, S3, sp) ;\
  323. RESTORE_REG(s4, S4, sp) ;\
  324. RESTORE_REG(s5, S5, sp) ;\
  325. RESTORE_REG(s6, S6, sp) ;\
  326. RESTORE_REG(s7, S7, sp) ;\
  327. RESTORE_REG(s8, S8, sp) ;\
  328. RESTORE_REG(gp, GP, sp) ;\
  329. RESTORE_REG(ra, RA, sp) ;\
  330. PTR_ADDU sp, sp, KERN_EXC_FRAME_SIZE;\
  331. mtc0 k0, MIPS_COP_0_STATUS
  332. /*
  333. * The kernel exception stack contains 18 saved general registers,
  334. * the status register and the multiply lo and high registers.
  335. * In addition, we set this up for linkage conventions.
  336. */
  337. #define KERN_REG_SIZE (NUMSAVEREGS * SZREG)
  338. #define KERN_EXC_FRAME_SIZE (CALLFRAME_SIZ + KERN_REG_SIZE + 16)
  339. NNON_LEAF(MipsKernGenException, KERN_EXC_FRAME_SIZE, ra)
  340. .set noat
  341. PTR_SUBU sp, sp, KERN_EXC_FRAME_SIZE
  342. .mask 0x80000000, (CALLFRAME_RA - KERN_EXC_FRAME_SIZE)
  343. /*
  344. * Save CPU state, building 'frame'.
  345. */
  346. SAVE_CPU
  347. /*
  348. * Call the exception handler. a0 points at the saved frame.
  349. */
  350. PTR_LA gp, _C_LABEL(_gp)
  351. PTR_LA k0, _C_LABEL(trap)
  352. jalr k0
  353. REG_S a3, CALLFRAME_RA + KERN_REG_SIZE(sp) # for debugging
  354. /*
  355. * Update interrupt and CPU mask in saved status register
  356. * Some of interrupts could be disabled by
  357. * intr filters if interrupts are enabled later
  358. * in trap handler
  359. */
  360. mfc0 a0, MIPS_COP_0_STATUS
  361. and a0, a0, (MIPS_SR_INT_MASK|MIPS_SR_COP_USABILITY)
  362. RESTORE_REG(a1, SR, sp)
  363. and a1, a1, ~(MIPS_SR_INT_MASK|MIPS_SR_COP_USABILITY)
  364. or a1, a1, a0
  365. SAVE_REG(a1, SR, sp)
  366. RESTORE_CPU # v0 contains the return address.
  367. sync
  368. eret
  369. .set at
  370. END(MipsKernGenException)
  371. #define SAVE_U_PCB_REG(reg, offs, base) \
  372. REG_S reg, U_PCB_REGS + (SZREG * offs) (base)
  373. #define RESTORE_U_PCB_REG(reg, offs, base) \
  374. REG_L reg, U_PCB_REGS + (SZREG * offs) (base)
  375. /*----------------------------------------------------------------------------
  376. *
  377. * MipsUserGenException --
  378. *
  379. * Handle an exception from user mode.
  380. *
  381. * Results:
  382. * None.
  383. *
  384. * Side effects:
  385. * None.
  386. *
  387. *----------------------------------------------------------------------------
  388. */
  389. NNON_LEAF(MipsUserGenException, CALLFRAME_SIZ, ra)
  390. .set noat
  391. .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
  392. /*
  393. * Save all of the registers except for the kernel temporaries in u.u_pcb.
  394. */
  395. GET_CPU_PCPU(k1)
  396. PTR_L k1, PC_CURPCB(k1)
  397. SAVE_U_PCB_REG(AT, AST, k1)
  398. .set at
  399. SAVE_U_PCB_REG(v0, V0, k1)
  400. SAVE_U_PCB_REG(v1, V1, k1)
  401. SAVE_U_PCB_REG(a0, A0, k1)
  402. mflo v0
  403. SAVE_U_PCB_REG(a1, A1, k1)
  404. SAVE_U_PCB_REG(a2, A2, k1)
  405. SAVE_U_PCB_REG(a3, A3, k1)
  406. SAVE_U_PCB_REG(t0, T0, k1)
  407. mfhi v1
  408. SAVE_U_PCB_REG(t1, T1, k1)
  409. SAVE_U_PCB_REG(t2, T2, k1)
  410. SAVE_U_PCB_REG(t3, T3, k1)
  411. SAVE_U_PCB_REG(ta0, TA0, k1)
  412. mfc0 a0, MIPS_COP_0_STATUS # First arg is the status reg.
  413. SAVE_U_PCB_REG(ta1, TA1, k1)
  414. SAVE_U_PCB_REG(ta2, TA2, k1)
  415. SAVE_U_PCB_REG(ta3, TA3, k1)
  416. SAVE_U_PCB_REG(s0, S0, k1)
  417. mfc0 a1, MIPS_COP_0_CAUSE # Second arg is the cause reg.
  418. SAVE_U_PCB_REG(s1, S1, k1)
  419. SAVE_U_PCB_REG(s2, S2, k1)
  420. SAVE_U_PCB_REG(s3, S3, k1)
  421. SAVE_U_PCB_REG(s4, S4, k1)
  422. MFC0 a2, MIPS_COP_0_BAD_VADDR # Third arg is the fault addr
  423. SAVE_U_PCB_REG(s5, S5, k1)
  424. SAVE_U_PCB_REG(s6, S6, k1)
  425. SAVE_U_PCB_REG(s7, S7, k1)
  426. SAVE_U_PCB_REG(t8, T8, k1)
  427. MFC0 a3, MIPS_COP_0_EXC_PC # Fourth arg is the pc.
  428. SAVE_U_PCB_REG(t9, T9, k1)
  429. SAVE_U_PCB_REG(gp, GP, k1)
  430. SAVE_U_PCB_REG(sp, SP, k1)
  431. SAVE_U_PCB_REG(s8, S8, k1)
  432. PTR_SUBU sp, k1, CALLFRAME_SIZ # switch to kernel SP
  433. SAVE_U_PCB_REG(ra, RA, k1)
  434. SAVE_U_PCB_REG(v0, MULLO, k1)
  435. SAVE_U_PCB_REG(v1, MULHI, k1)
  436. SAVE_U_PCB_REG(a0, SR, k1)
  437. SAVE_U_PCB_REG(a1, CAUSE, k1)
  438. SAVE_U_PCB_REG(a2, BADVADDR, k1)
  439. SAVE_U_PCB_REG(a3, PC, k1)
  440. REG_S a3, CALLFRAME_RA(sp) # for debugging
  441. PTR_LA gp, _C_LABEL(_gp) # switch to kernel GP
  442. # Turn off fpu and enter kernel mode
  443. and t0, a0, ~(MIPS_SR_COP_1_BIT | MIPS_SR_EXL | MIPS3_SR_KSU_MASK | MIPS_SR_INT_IE)
  444. #if defined(CPU_CNMIPS)
  445. and t0, t0, ~(MIPS_SR_COP_2_BIT)
  446. or t0, t0, (MIPS_SR_KX | MIPS_SR_SX | MIPS_SR_UX | MIPS_SR_PX)
  447. #elif defined(CPU_RMI) || defined(CPU_NLM)
  448. or t0, t0, (MIPS_SR_KX | MIPS_SR_UX | MIPS_SR_COP_2_BIT)
  449. #endif
  450. mtc0 t0, MIPS_COP_0_STATUS
  451. PTR_ADDU a0, k1, U_PCB_REGS
  452. ITLBNOPFIX
  453. /*
  454. * Call the exception handler.
  455. */
  456. PTR_LA k0, _C_LABEL(trap)
  457. jalr k0
  458. nop
  459. /*
  460. * Restore user registers and return.
  461. * First disable interrupts and set exeption level.
  462. */
  463. DO_AST
  464. CLEAR_STATUS
  465. /*
  466. * The use of k1 for storing the PCB pointer must be done only
  467. * after interrupts are disabled. Otherwise it will get overwritten
  468. * by the interrupt code.
  469. */
  470. GET_CPU_PCPU(k1)
  471. PTR_L k1, PC_CURPCB(k1)
  472. /*
  473. * Update interrupt mask in saved status register
  474. * Some of interrupts could be enabled by ithread
  475. * scheduled by ast()
  476. */
  477. mfc0 a0, MIPS_COP_0_STATUS
  478. and a0, a0, MIPS_SR_INT_MASK
  479. RESTORE_U_PCB_REG(a1, SR, k1)
  480. and a1, a1, ~MIPS_SR_INT_MASK
  481. or a1, a1, a0
  482. SAVE_U_PCB_REG(a1, SR, k1)
  483. RESTORE_U_PCB_REG(t0, MULLO, k1)
  484. RESTORE_U_PCB_REG(t1, MULHI, k1)
  485. mtlo t0
  486. mthi t1
  487. RESTORE_U_PCB_REG(a0, PC, k1)
  488. RESTORE_U_PCB_REG(v0, V0, k1)
  489. MTC0 a0, MIPS_COP_0_EXC_PC # set return address
  490. RESTORE_U_PCB_REG(v1, V1, k1)
  491. RESTORE_U_PCB_REG(a0, A0, k1)
  492. RESTORE_U_PCB_REG(a1, A1, k1)
  493. RESTORE_U_PCB_REG(a2, A2, k1)
  494. RESTORE_U_PCB_REG(a3, A3, k1)
  495. RESTORE_U_PCB_REG(t0, T0, k1)
  496. RESTORE_U_PCB_REG(t1, T1, k1)
  497. RESTORE_U_PCB_REG(t2, T2, k1)
  498. RESTORE_U_PCB_REG(t3, T3, k1)
  499. RESTORE_U_PCB_REG(ta0, TA0, k1)
  500. RESTORE_U_PCB_REG(ta1, TA1, k1)
  501. RESTORE_U_PCB_REG(ta2, TA2, k1)
  502. RESTORE_U_PCB_REG(ta3, TA3, k1)
  503. RESTORE_U_PCB_REG(s0, S0, k1)
  504. RESTORE_U_PCB_REG(s1, S1, k1)
  505. RESTORE_U_PCB_REG(s2, S2, k1)
  506. RESTORE_U_PCB_REG(s3, S3, k1)
  507. RESTORE_U_PCB_REG(s4, S4, k1)
  508. RESTORE_U_PCB_REG(s5, S5, k1)
  509. RESTORE_U_PCB_REG(s6, S6, k1)
  510. RESTORE_U_PCB_REG(s7, S7, k1)
  511. RESTORE_U_PCB_REG(t8, T8, k1)
  512. RESTORE_U_PCB_REG(t9, T9, k1)
  513. RESTORE_U_PCB_REG(gp, GP, k1)
  514. RESTORE_U_PCB_REG(sp, SP, k1)
  515. RESTORE_U_PCB_REG(k0, SR, k1)
  516. RESTORE_U_PCB_REG(s8, S8, k1)
  517. RESTORE_U_PCB_REG(ra, RA, k1)
  518. .set noat
  519. RESTORE_U_PCB_REG(AT, AST, k1)
  520. mtc0 k0, MIPS_COP_0_STATUS # still exception level
  521. ITLBNOPFIX
  522. sync
  523. eret
  524. .set at
  525. END(MipsUserGenException)
  526. .set push
  527. .set noat
  528. NON_LEAF(mips_wait, CALLFRAME_SIZ, ra)
  529. PTR_SUBU sp, sp, CALLFRAME_SIZ
  530. .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
  531. REG_S ra, CALLFRAME_RA(sp) # save RA
  532. mfc0 t0, MIPS_COP_0_STATUS
  533. xori t1, t0, MIPS_SR_INT_IE
  534. mtc0 t1, MIPS_COP_0_STATUS
  535. COP0_SYNC
  536. jal sched_runnable
  537. nop
  538. REG_L ra, CALLFRAME_RA(sp)
  539. mfc0 t0, MIPS_COP_0_STATUS
  540. ori t1, t0, MIPS_SR_INT_IE
  541. .align 4
  542. GLOBAL(MipsWaitStart) # this is 16 byte aligned
  543. mtc0 t1, MIPS_COP_0_STATUS
  544. bnez v0, MipsWaitEnd
  545. nop
  546. wait
  547. GLOBAL(MipsWaitEnd) # MipsWaitStart + 16
  548. jr ra
  549. PTR_ADDU sp, sp, CALLFRAME_SIZ
  550. END(mips_wait)
  551. .set pop
  552. /*----------------------------------------------------------------------------
  553. *
  554. * MipsKernIntr --
  555. *
  556. * Handle an interrupt from kernel mode.
  557. * Interrupts use the standard kernel stack.
  558. * switch_exit sets up a kernel stack after exit so interrupts won't fail.
  559. *
  560. * Results:
  561. * None.
  562. *
  563. * Side effects:
  564. * None.
  565. *
  566. *----------------------------------------------------------------------------
  567. */
  568. NNON_LEAF(MipsKernIntr, KERN_EXC_FRAME_SIZE, ra)
  569. .set noat
  570. PTR_SUBU sp, sp, KERN_EXC_FRAME_SIZE
  571. .mask 0x80000000, (CALLFRAME_RA - KERN_EXC_FRAME_SIZE)
  572. /*
  573. * Check for getting interrupts just before wait
  574. */
  575. MFC0 k0, MIPS_COP_0_EXC_PC
  576. ori k0, 0xf
  577. xori k0, 0xf # 16 byte align
  578. PTR_LA k1, MipsWaitStart
  579. bne k0, k1, 1f
  580. nop
  581. PTR_ADDU k1, 16 # skip over wait
  582. MTC0 k1, MIPS_COP_0_EXC_PC
  583. 1:
  584. /*
  585. * Save CPU state, building 'frame'.
  586. */
  587. SAVE_CPU
  588. /*
  589. * Call the interrupt handler. a0 points at the saved frame.
  590. */
  591. PTR_LA gp, _C_LABEL(_gp)
  592. PTR_LA k0, _C_LABEL(cpu_intr)
  593. jalr k0
  594. REG_S a3, CALLFRAME_RA + KERN_REG_SIZE(sp) # for debugging
  595. /*
  596. * Update interrupt and CPU mask in saved status register
  597. * Some of interrupts could be disabled by
  598. * intr filters if interrupts are enabled later
  599. * in trap handler
  600. */
  601. mfc0 a0, MIPS_COP_0_STATUS
  602. and a0, a0, (MIPS_SR_INT_MASK|MIPS_SR_COP_USABILITY)
  603. RESTORE_REG(a1, SR, sp)
  604. and a1, a1, ~(MIPS_SR_INT_MASK|MIPS_SR_COP_USABILITY)
  605. or a1, a1, a0
  606. SAVE_REG(a1, SR, sp)
  607. REG_L v0, CALLFRAME_RA + KERN_REG_SIZE(sp)
  608. RESTORE_CPU # v0 contains the return address.
  609. sync
  610. eret
  611. .set at
  612. END(MipsKernIntr)
  613. /*----------------------------------------------------------------------------
  614. *
  615. * MipsUserIntr --
  616. *
  617. * Handle an interrupt from user mode.
  618. * Note: we save minimal state in the u.u_pcb struct and use the standard
  619. * kernel stack since there has to be a u page if we came from user mode.
  620. * If there is a pending software interrupt, then save the remaining state
  621. * and call softintr(). This is all because if we call switch() inside
  622. * interrupt(), not all the user registers have been saved in u.u_pcb.
  623. *
  624. * Results:
  625. * None.
  626. *
  627. * Side effects:
  628. * None.
  629. *
  630. *----------------------------------------------------------------------------
  631. */
  632. NNON_LEAF(MipsUserIntr, CALLFRAME_SIZ, ra)
  633. .set noat
  634. .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
  635. /*
  636. * Save the relevant user registers into the u.u_pcb struct.
  637. * We don't need to save s0 - s8 because the compiler does it for us.
  638. */
  639. GET_CPU_PCPU(k1)
  640. PTR_L k1, PC_CURPCB(k1)
  641. SAVE_U_PCB_REG(AT, AST, k1)
  642. .set at
  643. SAVE_U_PCB_REG(v0, V0, k1)
  644. SAVE_U_PCB_REG(v1, V1, k1)
  645. SAVE_U_PCB_REG(a0, A0, k1)
  646. SAVE_U_PCB_REG(a1, A1, k1)
  647. SAVE_U_PCB_REG(a2, A2, k1)
  648. SAVE_U_PCB_REG(a3, A3, k1)
  649. SAVE_U_PCB_REG(t0, T0, k1)
  650. SAVE_U_PCB_REG(t1, T1, k1)
  651. SAVE_U_PCB_REG(t2, T2, k1)
  652. SAVE_U_PCB_REG(t3, T3, k1)
  653. SAVE_U_PCB_REG(ta0, TA0, k1)
  654. SAVE_U_PCB_REG(ta1, TA1, k1)
  655. SAVE_U_PCB_REG(ta2, TA2, k1)
  656. SAVE_U_PCB_REG(ta3, TA3, k1)
  657. SAVE_U_PCB_REG(t8, T8, k1)
  658. SAVE_U_PCB_REG(t9, T9, k1)
  659. SAVE_U_PCB_REG(gp, GP, k1)
  660. SAVE_U_PCB_REG(sp, SP, k1)
  661. SAVE_U_PCB_REG(ra, RA, k1)
  662. /*
  663. * save remaining user state in u.u_pcb.
  664. */
  665. SAVE_U_PCB_REG(s0, S0, k1)
  666. SAVE_U_PCB_REG(s1, S1, k1)
  667. SAVE_U_PCB_REG(s2, S2, k1)
  668. SAVE_U_PCB_REG(s3, S3, k1)
  669. SAVE_U_PCB_REG(s4, S4, k1)
  670. SAVE_U_PCB_REG(s5, S5, k1)
  671. SAVE_U_PCB_REG(s6, S6, k1)
  672. SAVE_U_PCB_REG(s7, S7, k1)
  673. SAVE_U_PCB_REG(s8, S8, k1)
  674. mflo v0 # get lo/hi late to avoid stall
  675. mfhi v1
  676. mfc0 a0, MIPS_COP_0_STATUS
  677. mfc0 a1, MIPS_COP_0_CAUSE
  678. MFC0 a3, MIPS_COP_0_EXC_PC
  679. SAVE_U_PCB_REG(v0, MULLO, k1)
  680. SAVE_U_PCB_REG(v1, MULHI, k1)
  681. SAVE_U_PCB_REG(a0, SR, k1)
  682. SAVE_U_PCB_REG(a1, CAUSE, k1)
  683. SAVE_U_PCB_REG(a3, PC, k1) # PC in a3, note used later!
  684. PTR_SUBU sp, k1, CALLFRAME_SIZ # switch to kernel SP
  685. PTR_LA gp, _C_LABEL(_gp) # switch to kernel GP
  686. # Turn off fpu, disable interrupts, set kernel mode kernel mode, clear exception level.
  687. and t0, a0, ~(MIPS_SR_COP_1_BIT | MIPS_SR_EXL | MIPS_SR_INT_IE | MIPS3_SR_KSU_MASK)
  688. #ifdef CPU_CNMIPS
  689. and t0, t0, ~(MIPS_SR_COP_2_BIT)
  690. or t0, t0, (MIPS_SR_KX | MIPS_SR_SX | MIPS_SR_UX | MIPS_SR_PX)
  691. #elif defined(CPU_RMI) || defined(CPU_NLM)
  692. or t0, t0, (MIPS_SR_KX | MIPS_SR_UX | MIPS_SR_COP_2_BIT)
  693. #endif
  694. mtc0 t0, MIPS_COP_0_STATUS
  695. ITLBNOPFIX
  696. PTR_ADDU a0, k1, U_PCB_REGS
  697. /*
  698. * Call the interrupt handler.
  699. */
  700. PTR_LA k0, _C_LABEL(cpu_intr)
  701. jalr k0
  702. REG_S a3, CALLFRAME_RA(sp) # for debugging
  703. /*
  704. * Enable interrupts before doing ast().
  705. *
  706. * On SMP kernels the AST processing might trigger IPI to other processors.
  707. * If that processor is also doing AST processing with interrupts disabled
  708. * then we may deadlock.
  709. */
  710. mfc0 a0, MIPS_COP_0_STATUS
  711. or a0, a0, MIPS_SR_INT_IE
  712. mtc0 a0, MIPS_COP_0_STATUS
  713. ITLBNOPFIX
  714. /*
  715. * DO_AST enabled interrupts
  716. */
  717. DO_AST
  718. /*
  719. * Restore user registers and return.
  720. */
  721. CLEAR_STATUS
  722. GET_CPU_PCPU(k1)
  723. PTR_L k1, PC_CURPCB(k1)
  724. /*
  725. * Update interrupt mask in saved status register
  726. * Some of interrupts could be disabled by
  727. * intr filters
  728. */
  729. mfc0 a0, MIPS_COP_0_STATUS
  730. and a0, a0, MIPS_SR_INT_MASK
  731. RESTORE_U_PCB_REG(a1, SR, k1)
  732. and a1, a1, ~MIPS_SR_INT_MASK
  733. or a1, a1, a0
  734. SAVE_U_PCB_REG(a1, SR, k1)
  735. RESTORE_U_PCB_REG(s0, S0, k1)
  736. RESTORE_U_PCB_REG(s1, S1, k1)
  737. RESTORE_U_PCB_REG(s2, S2, k1)
  738. RESTORE_U_PCB_REG(s3, S3, k1)
  739. RESTORE_U_PCB_REG(s4, S4, k1)
  740. RESTORE_U_PCB_REG(s5, S5, k1)
  741. RESTORE_U_PCB_REG(s6, S6, k1)
  742. RESTORE_U_PCB_REG(s7, S7, k1)
  743. RESTORE_U_PCB_REG(s8, S8, k1)
  744. RESTORE_U_PCB_REG(t0, MULLO, k1)
  745. RESTORE_U_PCB_REG(t1, MULHI, k1)
  746. RESTORE_U_PCB_REG(t2, PC, k1)
  747. mtlo t0
  748. mthi t1
  749. MTC0 t2, MIPS_COP_0_EXC_PC # set return address
  750. RESTORE_U_PCB_REG(v0, V0, k1)
  751. RESTORE_U_PCB_REG(v1, V1, k1)
  752. RESTORE_U_PCB_REG(a0, A0, k1)
  753. RESTORE_U_PCB_REG(a1, A1, k1)
  754. RESTORE_U_PCB_REG(a2, A2, k1)
  755. RESTORE_U_PCB_REG(a3, A3, k1)
  756. RESTORE_U_PCB_REG(t0, T0, k1)
  757. RESTORE_U_PCB_REG(t1, T1, k1)
  758. RESTORE_U_PCB_REG(t2, T2, k1)
  759. RESTORE_U_PCB_REG(t3, T3, k1)
  760. RESTORE_U_PCB_REG(ta0, TA0, k1)
  761. RESTORE_U_PCB_REG(ta1, TA1, k1)
  762. RESTORE_U_PCB_REG(ta2, TA2, k1)
  763. RESTORE_U_PCB_REG(ta3, TA3, k1)
  764. RESTORE_U_PCB_REG(t8, T8, k1)
  765. RESTORE_U_PCB_REG(t9, T9, k1)
  766. RESTORE_U_PCB_REG(gp, GP, k1)
  767. RESTORE_U_PCB_REG(k0, SR, k1)
  768. RESTORE_U_PCB_REG(sp, SP, k1)
  769. RESTORE_U_PCB_REG(ra, RA, k1)
  770. .set noat
  771. RESTORE_U_PCB_REG(AT, AST, k1)
  772. mtc0 k0, MIPS_COP_0_STATUS # SR with EXL set.
  773. ITLBNOPFIX
  774. sync
  775. eret
  776. .set at
  777. END(MipsUserIntr)
  778. NLEAF(MipsTLBInvalidException)
  779. .set push
  780. .set noat
  781. .set noreorder
  782. MFC0 k0, MIPS_COP_0_BAD_VADDR
  783. PTR_LI k1, VM_MAXUSER_ADDRESS
  784. sltu k1, k0, k1
  785. bnez k1, 1f
  786. nop
  787. /* Kernel address. */
  788. lui k1, %hi(kernel_segmap) # k1=hi of segbase
  789. b 2f
  790. PTR_L k1, %lo(kernel_segmap)(k1) # k1=segment tab base
  791. 1: /* User address. */
  792. GET_CPU_PCPU(k1)
  793. PTR_L k1, PC_SEGBASE(k1)
  794. 2: /* Validate page directory pointer. */
  795. beqz k1, 3f
  796. nop
  797. PTR_SRL k0, SEGSHIFT - PTRSHIFT # k0=seg offset (almost)
  798. beq k1, zero, MipsKernGenException # ==0 -- no seg tab
  799. andi k0, k0, PDEPTRMASK #06: k0=seg offset
  800. PTR_ADDU k1, k0, k1 # k1=seg entry address
  801. PTR_L k1, 0(k1) # k1=seg entry
  802. /* Validate page table pointer. */
  803. beqz k1, 3f
  804. nop
  805. #ifdef __mips_n64
  806. MFC0 k0, MIPS_COP_0_BAD_VADDR
  807. PTR_SRL k0, PDRSHIFT - PTRSHIFT # k0=pde offset (almost)
  808. beq k1, zero, MipsKernGenException # ==0 -- no pde tab
  809. andi k0, k0, PDEPTRMASK # k0=pde offset
  810. PTR_ADDU k1, k0, k1 # k1=pde entry address
  811. PTR_L k1, 0(k1) # k1=pde entry
  812. /* Validate pde table pointer. */
  813. beqz k1, 3f
  814. nop
  815. #endif
  816. MFC0 k0, MIPS_COP_0_BAD_VADDR # k0=bad address (again)
  817. PTR_SRL k0, PAGE_SHIFT - PTESHIFT # k0=VPN
  818. andi k0, k0, PTEMASK # k0=page tab offset
  819. PTR_ADDU k1, k1, k0 # k1=pte address
  820. PTE_L k0, 0(k1) # k0=this PTE
  821. /* Validate page table entry. */
  822. andi k0, PTE_V
  823. beqz k0, 3f
  824. nop
  825. /* Check whether this is an even or odd entry. */
  826. andi k0, k1, PTESIZE
  827. bnez k0, odd_page
  828. nop
  829. PTE_L k0, 0(k1)
  830. PTE_L k1, PTESIZE(k1)
  831. CLEAR_PTE_SWBITS(k0)
  832. PTE_MTC0 k0, MIPS_COP_0_TLB_LO0
  833. COP0_SYNC
  834. CLEAR_PTE_SWBITS(k1)
  835. PTE_MTC0 k1, MIPS_COP_0_TLB_LO1
  836. COP0_SYNC
  837. b tlb_insert_entry
  838. nop
  839. odd_page:
  840. PTE_L k0, -PTESIZE(k1)
  841. PTE_L k1, 0(k1)
  842. CLEAR_PTE_SWBITS(k0)
  843. PTE_MTC0 k0, MIPS_COP_0_TLB_LO0
  844. COP0_SYNC
  845. CLEAR_PTE_SWBITS(k1)
  846. PTE_MTC0 k1, MIPS_COP_0_TLB_LO1
  847. COP0_SYNC
  848. tlb_insert_entry:
  849. tlbp
  850. HAZARD_DELAY
  851. mfc0 k0, MIPS_COP_0_TLB_INDEX
  852. bltz k0, tlb_insert_random
  853. nop
  854. tlbwi
  855. eret
  856. ssnop
  857. tlb_insert_random:
  858. tlbwr
  859. eret
  860. ssnop
  861. 3:
  862. /*
  863. * Branch to the comprehensive exception processing.
  864. */
  865. mfc0 k1, MIPS_COP_0_STATUS
  866. andi k1, k1, SR_KSU_USER
  867. bnez k1, _C_LABEL(MipsUserGenException)
  868. nop
  869. /*
  870. * Check for kernel stack overflow.
  871. */
  872. GET_CPU_PCPU(k1)
  873. PTR_L k0, PC_CURTHREAD(k1)
  874. PTR_L k0, TD_KSTACK(k0)
  875. sltu k0, k0, sp
  876. bnez k0, _C_LABEL(MipsKernGenException)
  877. nop
  878. /*
  879. * Kernel stack overflow.
  880. *
  881. * Move to a valid stack before we call panic. We use the boot stack
  882. * for this purpose.
  883. */
  884. GET_CPU_PCPU(k1)
  885. lw k1, PC_CPUID(k1)
  886. sll k1, k1, PAGE_SHIFT + 1
  887. PTR_LA k0, _C_LABEL(pcpu_space)
  888. PTR_ADDU k0, PAGE_SIZE * 2
  889. PTR_ADDU k0, k0, k1
  890. /*
  891. * Stash the original value of 'sp' so we can update trapframe later.
  892. * We assume that SAVE_CPU does not trash 'k1'.
  893. */
  894. move k1, sp
  895. move sp, k0
  896. PTR_SUBU sp, sp, KERN_EXC_FRAME_SIZE
  897. move k0, ra
  898. move ra, zero
  899. REG_S ra, CALLFRAME_RA(sp) /* stop the ddb backtrace right here */
  900. REG_S zero, CALLFRAME_SP(sp)
  901. move ra, k0
  902. SAVE_CPU
  903. /*
  904. * Now restore the value of 'sp' at the time of the tlb exception in
  905. * the trapframe.
  906. */
  907. SAVE_REG(k1, SP, sp)
  908. /*
  909. * Squelch any more overflow checks by setting the stack base to 0.
  910. */
  911. GET_CPU_PCPU(k1)
  912. PTR_L k0, PC_CURTHREAD(k1)
  913. PTR_S zero, TD_KSTACK(k0)
  914. move a1, a0
  915. PANIC("kernel stack overflow - trapframe at %p")
  916. /*
  917. * This nop is necessary so that the 'ra' remains within the bounds
  918. * of this handler. Otherwise the ddb backtrace code will think that
  919. * the panic() was called from MipsTLBMissException.
  920. */
  921. nop
  922. .set pop
  923. END(MipsTLBInvalidException)
  924. /*----------------------------------------------------------------------------
  925. *
  926. * MipsTLBMissException --
  927. *
  928. * Handle a TLB miss exception from kernel mode in kernel space.
  929. * The BaddVAddr, Context, and EntryHi registers contain the failed
  930. * virtual address.
  931. *
  932. * Results:
  933. * None.
  934. *
  935. * Side effects:
  936. * None.
  937. *
  938. *----------------------------------------------------------------------------
  939. */
  940. NLEAF(MipsTLBMissException)
  941. .set noat
  942. MFC0 k0, MIPS_COP_0_BAD_VADDR # k0=bad address
  943. PTR_LI k1, VM_MAX_KERNEL_ADDRESS # check fault address against
  944. sltu k1, k1, k0 # upper bound of kernel_segmap
  945. bnez k1, MipsKernGenException # out of bound
  946. lui k1, %hi(kernel_segmap) # k1=hi of segbase
  947. PTR_SRL k0, SEGSHIFT - PTRSHIFT # k0=seg offset (almost)
  948. PTR_L k1, %lo(kernel_segmap)(k1) # k1=segment tab base
  949. beq k1, zero, MipsKernGenException # ==0 -- no seg tab
  950. andi k0, k0, PDEPTRMASK #06: k0=seg offset
  951. PTR_ADDU k1, k0, k1 # k1=seg entry address
  952. PTR_L k1, 0(k1) # k1=seg entry
  953. MFC0 k0, MIPS_COP_0_BAD_VADDR # k0=bad address (again)
  954. beq k1, zero, MipsKernGenException # ==0 -- no page table
  955. #ifdef __mips_n64
  956. PTR_SRL k0, PDRSHIFT - PTRSHIFT # k0=VPN
  957. andi k0, k0, PDEPTRMASK # k0=pde offset
  958. PTR_ADDU k1, k0, k1 # k1=pde entry address
  959. PTR_L k1, 0(k1) # k1=pde entry
  960. MFC0 k0, MIPS_COP_0_BAD_VADDR # k0=bad address (again)
  961. beq k1, zero, MipsKernGenException # ==0 -- no page table
  962. #endif
  963. PTR_SRL k0, PAGE_SHIFT - PTESHIFT # k0=VPN
  964. andi k0, k0, PTE2MASK # k0=page tab offset
  965. PTR_ADDU k1, k1, k0 # k1=pte address
  966. PTE_L k0, 0(k1) # k0=lo0 pte
  967. PTE_L k1, PTESIZE(k1) # k1=lo1 pte
  968. CLEAR_PTE_SWBITS(k0)
  969. PTE_MTC0 k0, MIPS_COP_0_TLB_LO0 # lo0 is loaded
  970. COP0_SYNC
  971. CLEAR_PTE_SWBITS(k1)
  972. PTE_MTC0 k1, MIPS_COP_0_TLB_LO1 # lo1 is loaded
  973. COP0_SYNC
  974. tlbwr # write to tlb
  975. HAZARD_DELAY
  976. eret # return from exception
  977. .set at
  978. END(MipsTLBMissException)
  979. /*----------------------------------------------------------------------------
  980. *
  981. * MipsFPTrap --
  982. *
  983. * Handle a floating point Trap.
  984. *
  985. * MipsFPTrap(statusReg, causeReg, pc)
  986. * unsigned statusReg;
  987. * unsigned causeReg;
  988. * unsigned pc;
  989. *
  990. * Results:
  991. * None.
  992. *
  993. * Side effects:
  994. * None.
  995. *
  996. *----------------------------------------------------------------------------
  997. */
  998. NON_LEAF(MipsFPTrap, CALLFRAME_SIZ, ra)
  999. PTR_SUBU sp, sp, CALLFRAME_SIZ
  1000. mfc0 t0, MIPS_COP_0_STATUS
  1001. REG_S ra, CALLFRAME_RA(sp)
  1002. .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
  1003. or t1, t0, MIPS_SR_COP_1_BIT
  1004. mtc0 t1, MIPS_COP_0_STATUS
  1005. ITLBNOPFIX
  1006. cfc1 t1, MIPS_FPU_CSR # stall til FP done
  1007. cfc1 t1, MIPS_FPU_CSR # now get status
  1008. nop
  1009. sll t2, t1, (31 - 17) # unimplemented operation?
  1010. bgez t2, 3f # no, normal trap
  1011. nop
  1012. /*
  1013. * We got an unimplemented operation trap so
  1014. * fetch the instruction, compute the next PC and emulate the instruction.
  1015. */
  1016. bgez a1, 1f # Check the branch delay bit.
  1017. nop
  1018. /*
  1019. * The instruction is in the branch delay slot so the branch will have to
  1020. * be emulated to get the resulting PC.
  1021. */
  1022. PTR_S a2, CALLFRAME_SIZ + 8(sp)
  1023. GET_CPU_PCPU(a0)
  1024. #mips64 unsafe?
  1025. PTR_L a0, PC_CURPCB(a0)
  1026. PTR_ADDU a0, a0, U_PCB_REGS # first arg is ptr to CPU registers
  1027. move a1, a2 # second arg is instruction PC
  1028. move a2, t1 # third arg is floating point CSR
  1029. PTR_LA t3, _C_LABEL(MipsEmulateBranch) # compute PC after branch
  1030. jalr t3 # compute PC after branch
  1031. move a3, zero # fourth arg is FALSE
  1032. /*
  1033. * Now load the floating-point instruction in the branch delay slot
  1034. * to be emulated.
  1035. */
  1036. PTR_L a2, CALLFRAME_SIZ + 8(sp) # restore EXC pc
  1037. b 2f
  1038. lw a0, 4(a2) # a0 = coproc instruction
  1039. /*
  1040. * This is not in the branch delay slot so calculate the resulting
  1041. * PC (epc + 4) into v0 and continue to MipsEmulateFP().
  1042. */
  1043. 1:
  1044. lw a0, 0(a2) # a0 = coproc instruction
  1045. #xxx mips64 unsafe?
  1046. PTR_ADDU v0, a2, 4 # v0 = next pc
  1047. 2:
  1048. GET_CPU_PCPU(t2)
  1049. PTR_L t2, PC_CURPCB(t2)
  1050. SAVE_U_PCB_REG(v0, PC, t2) # save new pc
  1051. /*
  1052. * Check to see if the instruction to be emulated is a floating-point
  1053. * instruction.
  1054. */
  1055. srl a3, a0, MIPS_OPCODE_SHIFT
  1056. beq a3, MIPS_OPCODE_C1, 4f # this should never fail
  1057. nop
  1058. /*
  1059. * Send a floating point exception signal to the current process.
  1060. */
  1061. 3:
  1062. GET_CPU_PCPU(a0)
  1063. PTR_L a0, PC_CURTHREAD(a0) # get current thread
  1064. cfc1 a2, MIPS_FPU_CSR # code = FP execptions
  1065. ctc1 zero, MIPS_FPU_CSR # Clear exceptions
  1066. PTR_LA t3, _C_LABEL(trapsignal)
  1067. jalr t3
  1068. li a1, SIGFPE
  1069. b FPReturn
  1070. nop
  1071. /*
  1072. * Finally, we can call MipsEmulateFP() where a0 is the instruction to emulate.
  1073. */
  1074. 4:
  1075. PTR_LA t3, _C_LABEL(MipsEmulateFP)
  1076. jalr t3
  1077. nop
  1078. /*
  1079. * Turn off the floating point coprocessor and return.
  1080. */
  1081. FPReturn:
  1082. mfc0 t0, MIPS_COP_0_STATUS
  1083. PTR_L ra, CALLFRAME_RA(sp)
  1084. and t0, t0, ~MIPS_SR_COP_1_BIT
  1085. mtc0 t0, MIPS_COP_0_STATUS
  1086. ITLBNOPFIX
  1087. j ra
  1088. PTR_ADDU sp, sp, CALLFRAME_SIZ
  1089. END(MipsFPTrap)
  1090. /*
  1091. * Interrupt counters for vmstat.
  1092. */
  1093. .data
  1094. .globl intrcnt
  1095. .globl sintrcnt
  1096. .globl intrnames
  1097. .globl sintrnames
  1098. intrnames:
  1099. .space INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
  1100. sintrnames:
  1101. #ifdef __mips_n64
  1102. .quad INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
  1103. #else
  1104. .int INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
  1105. #endif
  1106. .align 4
  1107. intrcnt:
  1108. .space INTRCNT_COUNT * 4 * 2
  1109. sintrcnt:
  1110. #ifdef __mips_n64
  1111. .quad INTRCNT_COUNT * 4 * 2
  1112. #else
  1113. .int INTRCNT_COUNT * 4 * 2
  1114. #endif
  1115. /*
  1116. * Vector to real handler in KSEG1.
  1117. */
  1118. .text
  1119. VECTOR(MipsCache, unknown)
  1120. PTR_LA k0, _C_LABEL(MipsCacheException)
  1121. li k1, MIPS_KSEG0_PHYS_MASK
  1122. and k0, k1
  1123. PTR_LI k1, MIPS_KSEG1_START
  1124. or k0, k1
  1125. j k0
  1126. nop
  1127. VECTOR_END(MipsCache)
  1128. .set at
  1129. /*
  1130. * Panic on cache errors. A lot more could be done to recover
  1131. * from some types of errors but it is tricky.
  1132. */
  1133. NESTED_NOPROFILE(MipsCacheException, KERN_EXC_FRAME_SIZE, ra)
  1134. .set noat
  1135. .mask 0x80000000, -4
  1136. PTR_LA k0, _C_LABEL(panic) # return to panic
  1137. PTR_LA a0, 9f # panicstr
  1138. MFC0 a1, MIPS_COP_0_ERROR_PC
  1139. mfc0 a2, MIPS_COP_0_CACHE_ERR # 3rd arg cache error
  1140. MTC0 k0, MIPS_COP_0_ERROR_PC # set return address
  1141. mfc0 k0, MIPS_COP_0_STATUS # restore status
  1142. li k1, MIPS_SR_DIAG_PE # ignore further errors
  1143. or k0, k1
  1144. mtc0 k0, MIPS_COP_0_STATUS # restore status
  1145. COP0_SYNC
  1146. eret
  1147. MSG("cache error @ EPC 0x%x CachErr 0x%x");
  1148. .set at
  1149. END(MipsCacheException)