/arch/frv/mm/tlb-miss.S

https://bitbucket.org/evzijst/gittest · Assembly · 631 lines · 576 code · 45 blank · 10 comment · 5 complexity · 2d98f411322b252d9dc84a662530162c MD5 · raw file

  1. /* tlb-miss.S: TLB miss handlers
  2. *
  3. * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/sys.h>
  12. #include <linux/config.h>
  13. #include <linux/linkage.h>
  14. #include <asm/page.h>
  15. #include <asm/pgtable.h>
  16. #include <asm/highmem.h>
  17. #include <asm/spr-regs.h>
  18. .section .text
  19. .balign 4
  20. .globl __entry_insn_mmu_miss
  21. __entry_insn_mmu_miss:
  22. break
  23. nop
  24. .globl __entry_insn_mmu_exception
  25. __entry_insn_mmu_exception:
  26. break
  27. nop
  28. .globl __entry_data_mmu_miss
  29. __entry_data_mmu_miss:
  30. break
  31. nop
  32. .globl __entry_data_mmu_exception
  33. __entry_data_mmu_exception:
  34. break
  35. nop
  36. ###############################################################################
  37. #
  38. # handle a lookup failure of one sort or another in a kernel TLB handler
  39. # On entry:
  40. # GR29 - faulting address
  41. # SCR2 - saved CCR
  42. #
  43. ###############################################################################
  44. .type __tlb_kernel_fault,@function
  45. __tlb_kernel_fault:
  46. # see if we're supposed to re-enable single-step mode upon return
  47. sethi.p %hi(__break_tlb_miss_return_break),gr30
  48. setlo %lo(__break_tlb_miss_return_break),gr30
  49. movsg pcsr,gr31
  50. subcc gr31,gr30,gr0,icc0
  51. beq icc0,#0,__tlb_kernel_fault_sstep
  52. movsg scr2,gr30
  53. movgs gr30,ccr
  54. movgs gr29,scr2 /* save EAR0 value */
  55. sethi.p %hi(__kernel_current_task),gr29
  56. setlo %lo(__kernel_current_task),gr29
  57. ldi.p @(gr29,#0),gr29 /* restore GR29 */
  58. bra __entry_kernel_handle_mmu_fault
  59. # we've got to re-enable single-stepping
  60. __tlb_kernel_fault_sstep:
  61. sethi.p %hi(__break_tlb_miss_real_return_info),gr30
  62. setlo %lo(__break_tlb_miss_real_return_info),gr30
  63. lddi @(gr30,0),gr30
  64. movgs gr30,pcsr
  65. movgs gr31,psr
  66. movsg scr2,gr30
  67. movgs gr30,ccr
  68. movgs gr29,scr2 /* save EAR0 value */
  69. sethi.p %hi(__kernel_current_task),gr29
  70. setlo %lo(__kernel_current_task),gr29
  71. ldi.p @(gr29,#0),gr29 /* restore GR29 */
  72. bra __entry_kernel_handle_mmu_fault_sstep
  73. .size __tlb_kernel_fault, .-__tlb_kernel_fault
  74. ###############################################################################
  75. #
  76. # handle a lookup failure of one sort or another in a user TLB handler
  77. # On entry:
  78. # GR28 - faulting address
  79. # SCR2 - saved CCR
  80. #
  81. ###############################################################################
  82. .type __tlb_user_fault,@function
  83. __tlb_user_fault:
  84. # see if we're supposed to re-enable single-step mode upon return
  85. sethi.p %hi(__break_tlb_miss_return_break),gr30
  86. setlo %lo(__break_tlb_miss_return_break),gr30
  87. movsg pcsr,gr31
  88. subcc gr31,gr30,gr0,icc0
  89. beq icc0,#0,__tlb_user_fault_sstep
  90. movsg scr2,gr30
  91. movgs gr30,ccr
  92. bra __entry_uspace_handle_mmu_fault
  93. # we've got to re-enable single-stepping
  94. __tlb_user_fault_sstep:
  95. sethi.p %hi(__break_tlb_miss_real_return_info),gr30
  96. setlo %lo(__break_tlb_miss_real_return_info),gr30
  97. lddi @(gr30,0),gr30
  98. movgs gr30,pcsr
  99. movgs gr31,psr
  100. movsg scr2,gr30
  101. movgs gr30,ccr
  102. bra __entry_uspace_handle_mmu_fault_sstep
  103. .size __tlb_user_fault, .-__tlb_user_fault
  104. ###############################################################################
  105. #
  106. # Kernel instruction TLB miss handler
  107. # On entry:
  108. # GR1 - kernel stack pointer
  109. # GR28 - saved exception frame pointer
  110. # GR29 - faulting address
  111. # GR31 - EAR0 ^ SCR0
  112. # SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
  113. # DAMR3 - mapped page directory
  114. # DAMR4 - mapped page table as matched by SCR0
  115. #
  116. ###############################################################################
  117. .globl __entry_kernel_insn_tlb_miss
  118. .type __entry_kernel_insn_tlb_miss,@function
  119. __entry_kernel_insn_tlb_miss:
  120. #if 0
  121. sethi.p %hi(0xe1200004),gr30
  122. setlo %lo(0xe1200004),gr30
  123. st gr0,@(gr30,gr0)
  124. sethi.p %hi(0xffc00100),gr30
  125. setlo %lo(0xffc00100),gr30
  126. sth gr30,@(gr30,gr0)
  127. membar
  128. #endif
  129. movsg ccr,gr30 /* save CCR */
  130. movgs gr30,scr2
  131. # see if the cached page table mapping is appropriate
  132. srlicc.p gr31,#26,gr0,icc0
  133. setlos 0x3ffc,gr30
  134. srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
  135. bne icc0,#0,__itlb_k_PTD_miss
  136. __itlb_k_PTD_mapped:
  137. # access the PTD with EAR0[25:14]
  138. # - DAMLR4 points to the virtual address of the appropriate page table
  139. # - the PTD holds 4096 PTEs
  140. # - the PTD must be accessed uncached
  141. # - the PTE must be marked accessed if it was valid
  142. #
  143. and gr31,gr30,gr31
  144. movsg damlr4,gr30
  145. add gr30,gr31,gr31
  146. ldi @(gr31,#0),gr30 /* fetch the PTE */
  147. andicc gr30,#_PAGE_PRESENT,gr0,icc0
  148. ori.p gr30,#_PAGE_ACCESSED,gr30
  149. beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */
  150. sti.p gr30,@(gr31,#0) /* update the PTE */
  151. andi gr30,#~_PAGE_ACCESSED,gr30
  152. # we're using IAMR1 as an extra TLB entry
  153. # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
  154. # - need to check DAMR1 lest we cause an multiple-DAT-hit exception
  155. # - IAMPR1 has no WP bit, and we mustn't lose WP information
  156. movsg iampr1,gr31
  157. andicc gr31,#xAMPRx_V,gr0,icc0
  158. setlos.p 0xfffff000,gr31
  159. beq icc0,#0,__itlb_k_nopunt /* punt not required */
  160. movsg iamlr1,gr31
  161. movgs gr31,tplr /* set TPLR.CXN */
  162. tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */
  163. movsg dampr1,gr31
  164. ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */
  165. movgs gr31,tppr
  166. movsg iamlr1,gr31 /* set TPLR.CXN */
  167. movgs gr31,tplr
  168. tlbpr gr31,gr0,#2,#0 /* save to the TLB */
  169. movsg tpxr,gr31 /* check the TLB write error flag */
  170. andicc.p gr31,#TPXR_E,gr0,icc0
  171. setlos #0xfffff000,gr31
  172. bne icc0,#0,__tlb_kernel_fault
  173. __itlb_k_nopunt:
  174. # assemble the new TLB entry
  175. and gr29,gr31,gr29
  176. movsg cxnr,gr31
  177. or gr29,gr31,gr29
  178. movgs gr29,iamlr1 /* xAMLR = address | context number */
  179. movgs gr30,iampr1
  180. movgs gr29,damlr1
  181. movgs gr30,dampr1
  182. # return, restoring registers
  183. movsg scr2,gr30
  184. movgs gr30,ccr
  185. sethi.p %hi(__kernel_current_task),gr29
  186. setlo %lo(__kernel_current_task),gr29
  187. ldi @(gr29,#0),gr29
  188. rett #0
  189. beq icc0,#3,0 /* prevent icache prefetch */
  190. # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
  191. # appropriate page table and map that instead
  192. # - access the PGD with EAR0[31:26]
  193. # - DAMLR3 points to the virtual address of the page directory
  194. # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
  195. __itlb_k_PTD_miss:
  196. srli gr29,#26,gr31 /* calculate PGE offset */
  197. slli gr31,#8,gr31 /* and clear bottom bits */
  198. movsg damlr3,gr30
  199. ld @(gr31,gr30),gr30 /* access the PGE */
  200. andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
  201. andicc gr30,#xAMPRx_SS,gr0,icc1
  202. # map this PTD instead and record coverage address
  203. ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
  204. beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */
  205. slli.p gr31,#18,gr31
  206. bne icc1,#0,__itlb_k_bigpage
  207. movgs gr30,dampr4
  208. movgs gr31,scr0
  209. # we can now resume normal service
  210. setlos 0x3ffc,gr30
  211. srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
  212. bra __itlb_k_PTD_mapped
  213. __itlb_k_bigpage:
  214. break
  215. nop
  216. .size __entry_kernel_insn_tlb_miss, .-__entry_kernel_insn_tlb_miss
  217. ###############################################################################
  218. #
  219. # Kernel data TLB miss handler
  220. # On entry:
  221. # GR1 - kernel stack pointer
  222. # GR28 - saved exception frame pointer
  223. # GR29 - faulting address
  224. # GR31 - EAR0 ^ SCR1
  225. # SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
  226. # DAMR3 - mapped page directory
  227. # DAMR5 - mapped page table as matched by SCR1
  228. #
  229. ###############################################################################
  230. .globl __entry_kernel_data_tlb_miss
  231. .type __entry_kernel_data_tlb_miss,@function
  232. __entry_kernel_data_tlb_miss:
  233. #if 0
  234. sethi.p %hi(0xe1200004),gr30
  235. setlo %lo(0xe1200004),gr30
  236. st gr0,@(gr30,gr0)
  237. sethi.p %hi(0xffc00100),gr30
  238. setlo %lo(0xffc00100),gr30
  239. sth gr30,@(gr30,gr0)
  240. membar
  241. #endif
  242. movsg ccr,gr30 /* save CCR */
  243. movgs gr30,scr2
  244. # see if the cached page table mapping is appropriate
  245. srlicc.p gr31,#26,gr0,icc0
  246. setlos 0x3ffc,gr30
  247. srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
  248. bne icc0,#0,__dtlb_k_PTD_miss
  249. __dtlb_k_PTD_mapped:
  250. # access the PTD with EAR0[25:14]
  251. # - DAMLR5 points to the virtual address of the appropriate page table
  252. # - the PTD holds 4096 PTEs
  253. # - the PTD must be accessed uncached
  254. # - the PTE must be marked accessed if it was valid
  255. #
  256. and gr31,gr30,gr31
  257. movsg damlr5,gr30
  258. add gr30,gr31,gr31
  259. ldi @(gr31,#0),gr30 /* fetch the PTE */
  260. andicc gr30,#_PAGE_PRESENT,gr0,icc0
  261. ori.p gr30,#_PAGE_ACCESSED,gr30
  262. beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */
  263. sti.p gr30,@(gr31,#0) /* update the PTE */
  264. andi gr30,#~_PAGE_ACCESSED,gr30
  265. # we're using DAMR1 as an extra TLB entry
  266. # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
  267. # - need to check IAMR1 lest we cause an multiple-DAT-hit exception
  268. movsg dampr1,gr31
  269. andicc gr31,#xAMPRx_V,gr0,icc0
  270. setlos.p 0xfffff000,gr31
  271. beq icc0,#0,__dtlb_k_nopunt /* punt not required */
  272. movsg damlr1,gr31
  273. movgs gr31,tplr /* set TPLR.CXN */
  274. tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */
  275. movsg dampr1,gr31
  276. ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */
  277. movgs gr31,tppr
  278. movsg damlr1,gr31 /* set TPLR.CXN */
  279. movgs gr31,tplr
  280. tlbpr gr31,gr0,#2,#0 /* save to the TLB */
  281. movsg tpxr,gr31 /* check the TLB write error flag */
  282. andicc.p gr31,#TPXR_E,gr0,icc0
  283. setlos #0xfffff000,gr31
  284. bne icc0,#0,__tlb_kernel_fault
  285. __dtlb_k_nopunt:
  286. # assemble the new TLB entry
  287. and gr29,gr31,gr29
  288. movsg cxnr,gr31
  289. or gr29,gr31,gr29
  290. movgs gr29,iamlr1 /* xAMLR = address | context number */
  291. movgs gr30,iampr1
  292. movgs gr29,damlr1
  293. movgs gr30,dampr1
  294. # return, restoring registers
  295. movsg scr2,gr30
  296. movgs gr30,ccr
  297. sethi.p %hi(__kernel_current_task),gr29
  298. setlo %lo(__kernel_current_task),gr29
  299. ldi @(gr29,#0),gr29
  300. rett #0
  301. beq icc0,#3,0 /* prevent icache prefetch */
  302. # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
  303. # appropriate page table and map that instead
  304. # - access the PGD with EAR0[31:26]
  305. # - DAMLR3 points to the virtual address of the page directory
  306. # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
  307. __dtlb_k_PTD_miss:
  308. srli gr29,#26,gr31 /* calculate PGE offset */
  309. slli gr31,#8,gr31 /* and clear bottom bits */
  310. movsg damlr3,gr30
  311. ld @(gr31,gr30),gr30 /* access the PGE */
  312. andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
  313. andicc gr30,#xAMPRx_SS,gr0,icc1
  314. # map this PTD instead and record coverage address
  315. ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
  316. beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */
  317. slli.p gr31,#18,gr31
  318. bne icc1,#0,__dtlb_k_bigpage
  319. movgs gr30,dampr5
  320. movgs gr31,scr1
  321. # we can now resume normal service
  322. setlos 0x3ffc,gr30
  323. srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
  324. bra __dtlb_k_PTD_mapped
  325. __dtlb_k_bigpage:
  326. break
  327. nop
  328. .size __entry_kernel_data_tlb_miss, .-__entry_kernel_data_tlb_miss
  329. ###############################################################################
  330. #
  331. # Userspace instruction TLB miss handler (with PGE prediction)
  332. # On entry:
  333. # GR28 - faulting address
  334. # GR31 - EAR0 ^ SCR0
  335. # SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
  336. # DAMR3 - mapped page directory
  337. # DAMR4 - mapped page table as matched by SCR0
  338. #
  339. ###############################################################################
  340. .globl __entry_user_insn_tlb_miss
  341. .type __entry_user_insn_tlb_miss,@function
  342. __entry_user_insn_tlb_miss:
  343. #if 0
  344. sethi.p %hi(0xe1200004),gr30
  345. setlo %lo(0xe1200004),gr30
  346. st gr0,@(gr30,gr0)
  347. sethi.p %hi(0xffc00100),gr30
  348. setlo %lo(0xffc00100),gr30
  349. sth gr30,@(gr30,gr0)
  350. membar
  351. #endif
  352. movsg ccr,gr30 /* save CCR */
  353. movgs gr30,scr2
  354. # see if the cached page table mapping is appropriate
  355. srlicc.p gr31,#26,gr0,icc0
  356. setlos 0x3ffc,gr30
  357. srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
  358. bne icc0,#0,__itlb_u_PTD_miss
  359. __itlb_u_PTD_mapped:
  360. # access the PTD with EAR0[25:14]
  361. # - DAMLR4 points to the virtual address of the appropriate page table
  362. # - the PTD holds 4096 PTEs
  363. # - the PTD must be accessed uncached
  364. # - the PTE must be marked accessed if it was valid
  365. #
  366. and gr31,gr30,gr31
  367. movsg damlr4,gr30
  368. add gr30,gr31,gr31
  369. ldi @(gr31,#0),gr30 /* fetch the PTE */
  370. andicc gr30,#_PAGE_PRESENT,gr0,icc0
  371. ori.p gr30,#_PAGE_ACCESSED,gr30
  372. beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */
  373. sti.p gr30,@(gr31,#0) /* update the PTE */
  374. andi gr30,#~_PAGE_ACCESSED,gr30
  375. # we're using IAMR1/DAMR1 as an extra TLB entry
  376. # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
  377. movsg dampr1,gr31
  378. andicc gr31,#xAMPRx_V,gr0,icc0
  379. setlos.p 0xfffff000,gr31
  380. beq icc0,#0,__itlb_u_nopunt /* punt not required */
  381. movsg dampr1,gr31
  382. movgs gr31,tppr
  383. movsg damlr1,gr31 /* set TPLR.CXN */
  384. movgs gr31,tplr
  385. tlbpr gr31,gr0,#2,#0 /* save to the TLB */
  386. movsg tpxr,gr31 /* check the TLB write error flag */
  387. andicc.p gr31,#TPXR_E,gr0,icc0
  388. setlos #0xfffff000,gr31
  389. bne icc0,#0,__tlb_user_fault
  390. __itlb_u_nopunt:
  391. # assemble the new TLB entry
  392. and gr28,gr31,gr28
  393. movsg cxnr,gr31
  394. or gr28,gr31,gr28
  395. movgs gr28,iamlr1 /* xAMLR = address | context number */
  396. movgs gr30,iampr1
  397. movgs gr28,damlr1
  398. movgs gr30,dampr1
  399. # return, restoring registers
  400. movsg scr2,gr30
  401. movgs gr30,ccr
  402. rett #0
  403. beq icc0,#3,0 /* prevent icache prefetch */
  404. # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
  405. # appropriate page table and map that instead
  406. # - access the PGD with EAR0[31:26]
  407. # - DAMLR3 points to the virtual address of the page directory
  408. # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
  409. __itlb_u_PTD_miss:
  410. srli gr28,#26,gr31 /* calculate PGE offset */
  411. slli gr31,#8,gr31 /* and clear bottom bits */
  412. movsg damlr3,gr30
  413. ld @(gr31,gr30),gr30 /* access the PGE */
  414. andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
  415. andicc gr30,#xAMPRx_SS,gr0,icc1
  416. # map this PTD instead and record coverage address
  417. ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
  418. beq icc0,#0,__tlb_user_fault /* jump if PGE not present */
  419. slli.p gr31,#18,gr31
  420. bne icc1,#0,__itlb_u_bigpage
  421. movgs gr30,dampr4
  422. movgs gr31,scr0
  423. # we can now resume normal service
  424. setlos 0x3ffc,gr30
  425. srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
  426. bra __itlb_u_PTD_mapped
  427. __itlb_u_bigpage:
  428. break
  429. nop
  430. .size __entry_user_insn_tlb_miss, .-__entry_user_insn_tlb_miss
  431. ###############################################################################
  432. #
  433. # Userspace data TLB miss handler
  434. # On entry:
  435. # GR28 - faulting address
  436. # GR31 - EAR0 ^ SCR1
  437. # SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
  438. # DAMR3 - mapped page directory
  439. # DAMR5 - mapped page table as matched by SCR1
  440. #
  441. ###############################################################################
  442. .globl __entry_user_data_tlb_miss
  443. .type __entry_user_data_tlb_miss,@function
  444. __entry_user_data_tlb_miss:
  445. #if 0
  446. sethi.p %hi(0xe1200004),gr30
  447. setlo %lo(0xe1200004),gr30
  448. st gr0,@(gr30,gr0)
  449. sethi.p %hi(0xffc00100),gr30
  450. setlo %lo(0xffc00100),gr30
  451. sth gr30,@(gr30,gr0)
  452. membar
  453. #endif
  454. movsg ccr,gr30 /* save CCR */
  455. movgs gr30,scr2
  456. # see if the cached page table mapping is appropriate
  457. srlicc.p gr31,#26,gr0,icc0
  458. setlos 0x3ffc,gr30
  459. srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
  460. bne icc0,#0,__dtlb_u_PTD_miss
  461. __dtlb_u_PTD_mapped:
  462. # access the PTD with EAR0[25:14]
  463. # - DAMLR5 points to the virtual address of the appropriate page table
  464. # - the PTD holds 4096 PTEs
  465. # - the PTD must be accessed uncached
  466. # - the PTE must be marked accessed if it was valid
  467. #
  468. and gr31,gr30,gr31
  469. movsg damlr5,gr30
  470. __dtlb_u_using_iPTD:
  471. add gr30,gr31,gr31
  472. ldi @(gr31,#0),gr30 /* fetch the PTE */
  473. andicc gr30,#_PAGE_PRESENT,gr0,icc0
  474. ori.p gr30,#_PAGE_ACCESSED,gr30
  475. beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */
  476. sti.p gr30,@(gr31,#0) /* update the PTE */
  477. andi gr30,#~_PAGE_ACCESSED,gr30
  478. # we're using DAMR1 as an extra TLB entry
  479. # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
  480. movsg dampr1,gr31
  481. andicc gr31,#xAMPRx_V,gr0,icc0
  482. setlos.p 0xfffff000,gr31
  483. beq icc0,#0,__dtlb_u_nopunt /* punt not required */
  484. movsg dampr1,gr31
  485. movgs gr31,tppr
  486. movsg damlr1,gr31 /* set TPLR.CXN */
  487. movgs gr31,tplr
  488. tlbpr gr31,gr0,#2,#0 /* save to the TLB */
  489. movsg tpxr,gr31 /* check the TLB write error flag */
  490. andicc.p gr31,#TPXR_E,gr0,icc0
  491. setlos #0xfffff000,gr31
  492. bne icc0,#0,__tlb_user_fault
  493. __dtlb_u_nopunt:
  494. # assemble the new TLB entry
  495. and gr28,gr31,gr28
  496. movsg cxnr,gr31
  497. or gr28,gr31,gr28
  498. movgs gr28,iamlr1 /* xAMLR = address | context number */
  499. movgs gr30,iampr1
  500. movgs gr28,damlr1
  501. movgs gr30,dampr1
  502. # return, restoring registers
  503. movsg scr2,gr30
  504. movgs gr30,ccr
  505. rett #0
  506. beq icc0,#3,0 /* prevent icache prefetch */
  507. # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
  508. # appropriate page table and map that instead
  509. # - first of all, check the insn PGE cache - we may well get a hit there
  510. # - access the PGD with EAR0[31:26]
  511. # - DAMLR3 points to the virtual address of the page directory
  512. # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
  513. __dtlb_u_PTD_miss:
  514. movsg scr0,gr31 /* consult the insn-PGE-cache key */
  515. xor gr28,gr31,gr31
  516. srlicc gr31,#26,gr0,icc0
  517. srli gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
  518. bne icc0,#0,__dtlb_u_iPGE_miss
  519. # what we're looking for is covered by the insn-PGE-cache
  520. setlos 0x3ffc,gr30
  521. and gr31,gr30,gr31
  522. movsg damlr4,gr30
  523. bra __dtlb_u_using_iPTD
  524. __dtlb_u_iPGE_miss:
  525. srli gr28,#26,gr31 /* calculate PGE offset */
  526. slli gr31,#8,gr31 /* and clear bottom bits */
  527. movsg damlr3,gr30
  528. ld @(gr31,gr30),gr30 /* access the PGE */
  529. andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
  530. andicc gr30,#xAMPRx_SS,gr0,icc1
  531. # map this PTD instead and record coverage address
  532. ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
  533. beq icc0,#0,__tlb_user_fault /* jump if PGE not present */
  534. slli.p gr31,#18,gr31
  535. bne icc1,#0,__dtlb_u_bigpage
  536. movgs gr30,dampr5
  537. movgs gr31,scr1
  538. # we can now resume normal service
  539. setlos 0x3ffc,gr30
  540. srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
  541. bra __dtlb_u_PTD_mapped
  542. __dtlb_u_bigpage:
  543. break
  544. nop
  545. .size __entry_user_data_tlb_miss, .-__entry_user_data_tlb_miss