PageRenderTime 47ms CodeModel.GetById 17ms RepoModel.GetById 0ms app.codeStats 1ms

/src/freebsd/sys/mips/mips/support.S

https://bitbucket.org/killerpenguinassassins/open_distrib_devel
Assembly | 1595 lines | 1520 code | 75 blank | 0 comment | 13 complexity | ab6cdbc0a3c3e5a9e70d02fd7b096fac MD5 | raw file
Possible License(s): CC0-1.0, MIT, LGPL-2.0, LGPL-3.0, WTFPL, GPL-2.0, BSD-2-Clause, AGPL-3.0, CC-BY-SA-3.0, MPL-2.0, JSON, BSD-3-Clause-No-Nuclear-License-2014, LGPL-2.1, CPL-1.0, AGPL-1.0, 0BSD, ISC, Apache-2.0, GPL-3.0, IPL-1.0, MPL-2.0-no-copyleft-exception, BSD-3-Clause
  1. /* $OpenBSD: locore.S,v 1.18 1998/09/15 10:58:53 pefo Exp $ */
  2. /*-
  3. * Copyright (c) 1992, 1993
  4. * The Regents of the University of California. All rights reserved.
  5. *
  6. * This code is derived from software contributed to Berkeley by
  7. * Digital Equipment Corporation and Ralph Campbell.
  8. *
  9. * Redistribution and use in source and binary forms, with or without
  10. * modification, are permitted provided that the following conditions
  11. * are met:
  12. * 1. Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions and the following disclaimer.
  14. * 2. Redistributions in binary form must reproduce the above copyright
  15. * notice, this list of conditions and the following disclaimer in the
  16. * documentation and/or other materials provided with the distribution.
  17. * 4. Neither the name of the University nor the names of its contributors
  18. * may be used to endorse or promote products derived from this software
  19. * without specific prior written permission.
  20. *
  21. * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  22. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  23. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  24. * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  25. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  26. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  27. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  28. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  29. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  30. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  31. * SUCH DAMAGE.
  32. *
  33. * Copyright (C) 1989 Digital Equipment Corporation.
  34. * Permission to use, copy, modify, and distribute this software and
  35. * its documentation for any purpose and without fee is hereby granted,
  36. * provided that the above copyright notice appears in all copies.
  37. * Digital Equipment Corporation makes no representations about the
  38. * suitability of this software for any purpose. It is provided "as is"
  39. * without express or implied warranty.
  40. *
  41. * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
  42. * v 1.1 89/07/11 17:55:04 nelson Exp SPRITE (DECWRL)
  43. * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
  44. * v 9.2 90/01/29 18:00:39 shirriff Exp SPRITE (DECWRL)
  45. * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
  46. * v 1.1 89/07/10 14:27:41 nelson Exp SPRITE (DECWRL)
  47. *
  48. * from: @(#)locore.s 8.5 (Berkeley) 1/4/94
  49. * JNPR: support.S,v 1.5.2.2 2007/08/29 10:03:49 girish
  50. * $FreeBSD$
  51. */
  52. /*
  53. * Copyright (c) 1997 Jonathan Stone (hereinafter referred to as the author)
  54. * All rights reserved.
  55. *
  56. * Redistribution and use in source and binary forms, with or without
  57. * modification, are permitted provided that the following conditions
  58. * are met:
  59. * 1. Redistributions of source code must retain the above copyright
  60. * notice, this list of conditions and the following disclaimer.
  61. * 2. Redistributions in binary form must reproduce the above copyright
  62. * notice, this list of conditions and the following disclaimer in the
  63. * documentation and/or other materials provided with the distribution.
  64. * 3. All advertising materials mentioning features or use of this software
  65. * must display the following acknowledgement:
  66. * This product includes software developed by Jonathan R. Stone for
  67. * the NetBSD Project.
  68. * 4. The name of the author may not be used to endorse or promote products
  69. * derived from this software without specific prior written permission.
  70. *
  71. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
  72. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  73. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  74. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
  75. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  76. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  77. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  78. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  79. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  80. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  81. * SUCH DAMAGE.
  82. */
  83. /*
  84. * Contains code that is the first executed at boot time plus
  85. * assembly language support routines.
  86. */
  87. #include "opt_cputype.h"
  88. #include "opt_ddb.h"
  89. #include <sys/errno.h>
  90. #include <machine/asm.h>
  91. #include <machine/cpu.h>
  92. #include <machine/regnum.h>
  93. #include <machine/cpuregs.h>
  94. #include "assym.s"
  95. .set noreorder # Noreorder is default style!
  96. /*
  97. * Primitives
  98. */
  99. .text
  100. /*
  101. * See if access to addr with a len type instruction causes a machine check.
  102. * len is length of access (1=byte, 2=short, 4=int)
  103. *
  104. * badaddr(addr, len)
  105. * char *addr;
  106. * int len;
  107. */
  108. LEAF(badaddr)
  109. PTR_LA v0, baderr
  110. GET_CPU_PCPU(v1)
  111. PTR_L v1, PC_CURPCB(v1)
  112. bne a1, 1, 2f
  113. PTR_S v0, U_PCB_ONFAULT(v1)
  114. b 5f
  115. lbu v0, (a0)
  116. 2:
  117. bne a1, 2, 4f
  118. nop
  119. b 5f
  120. lhu v0, (a0)
  121. 4:
  122. lw v0, (a0)
  123. 5:
  124. PTR_S zero, U_PCB_ONFAULT(v1)
  125. j ra
  126. move v0, zero # made it w/o errors
  127. baderr:
  128. j ra
  129. li v0, 1 # trap sends us here
  130. END(badaddr)
  131. /*
  132. * int copystr(void *kfaddr, void *kdaddr, size_t maxlen, size_t *lencopied)
  133. * Copy a NIL-terminated string, at most maxlen characters long. Return the
  134. * number of characters copied (including the NIL) in *lencopied. If the
  135. * string is too long, return ENAMETOOLONG; else return 0.
  136. */
  137. LEAF(copystr)
  138. move t0, a2
  139. beq a2, zero, 4f
  140. 1:
  141. lbu v0, 0(a0)
  142. PTR_SUBU a2, a2, 1
  143. beq v0, zero, 2f
  144. sb v0, 0(a1) # each byte until NIL
  145. PTR_ADDU a0, a0, 1
  146. bne a2, zero, 1b # less than maxlen
  147. PTR_ADDU a1, a1, 1
  148. 4:
  149. li v0, ENAMETOOLONG # run out of space
  150. 2:
  151. beq a3, zero, 3f # return num. of copied bytes
  152. PTR_SUBU a2, t0, a2 # if the 4th arg was non-NULL
  153. PTR_S a2, 0(a3)
  154. 3:
  155. j ra # v0 is 0 or ENAMETOOLONG
  156. nop
  157. END(copystr)
  158. /*
  159. * fillw(pat, addr, count)
  160. */
  161. LEAF(fillw)
  162. 1:
  163. PTR_ADDU a2, a2, -1
  164. sh a0, 0(a1)
  165. bne a2,zero, 1b
  166. PTR_ADDU a1, a1, 2
  167. jr ra
  168. nop
  169. END(fillw)
  170. /*
  171. * Optimized memory zero code.
  172. * mem_zero_page(addr);
  173. */
  174. LEAF(mem_zero_page)
  175. li v0, PAGE_SIZE
  176. 1:
  177. PTR_SUBU v0, 8
  178. sd zero, 0(a0)
  179. bne zero, v0, 1b
  180. PTR_ADDU a0, 8
  181. jr ra
  182. nop
  183. END(mem_zero_page)
  184. /*
  185. * Block I/O routines mainly used by I/O drivers.
  186. *
  187. * Args as: a0 = port
  188. * a1 = memory address
  189. * a2 = count
  190. */
  191. LEAF(insb)
  192. beq a2, zero, 2f
  193. PTR_ADDU a2, a1
  194. 1:
  195. lbu v0, 0(a0)
  196. PTR_ADDU a1, 1
  197. bne a1, a2, 1b
  198. sb v0, -1(a1)
  199. 2:
  200. jr ra
  201. nop
  202. END(insb)
  203. LEAF(insw)
  204. beq a2, zero, 2f
  205. PTR_ADDU a2, a2
  206. PTR_ADDU a2, a1
  207. 1:
  208. lhu v0, 0(a0)
  209. PTR_ADDU a1, 2
  210. bne a1, a2, 1b
  211. sh v0, -2(a1)
  212. 2:
  213. jr ra
  214. nop
  215. END(insw)
  216. LEAF(insl)
  217. beq a2, zero, 2f
  218. sll a2, 2
  219. PTR_ADDU a2, a1
  220. 1:
  221. lw v0, 0(a0)
  222. PTR_ADDU a1, 4
  223. bne a1, a2, 1b
  224. sw v0, -4(a1)
  225. 2:
  226. jr ra
  227. nop
  228. END(insl)
  229. LEAF(outsb)
  230. beq a2, zero, 2f
  231. PTR_ADDU a2, a1
  232. 1:
  233. lbu v0, 0(a1)
  234. PTR_ADDU a1, 1
  235. bne a1, a2, 1b
  236. sb v0, 0(a0)
  237. 2:
  238. jr ra
  239. nop
  240. END(outsb)
  241. LEAF(outsw)
  242. beq a2, zero, 2f
  243. addu a2, a2
  244. li v0, 1
  245. and v0, a1
  246. bne v0, zero, 3f # arghh, unaligned.
  247. addu a2, a1
  248. 1:
  249. lhu v0, 0(a1)
  250. addiu a1, 2
  251. bne a1, a2, 1b
  252. sh v0, 0(a0)
  253. 2:
  254. jr ra
  255. nop
  256. 3:
  257. LWHI v0, 0(a1)
  258. LWLO v0, 3(a1)
  259. addiu a1, 2
  260. bne a1, a2, 3b
  261. sh v0, 0(a0)
  262. jr ra
  263. nop
  264. END(outsw)
  265. LEAF(outsl)
  266. beq a2, zero, 2f
  267. sll a2, 2
  268. li v0, 3
  269. and v0, a1
  270. bne v0, zero, 3f # arghh, unaligned.
  271. addu a2, a1
  272. 1:
  273. lw v0, 0(a1)
  274. addiu a1, 4
  275. bne a1, a2, 1b
  276. sw v0, 0(a0)
  277. 2:
  278. jr ra
  279. nop
  280. 3:
  281. LWHI v0, 0(a1)
  282. LWLO v0, 3(a1)
  283. addiu a1, 4
  284. bne a1, a2, 3b
  285. sw v0, 0(a0)
  286. jr ra
  287. nop
  288. END(outsl)
  289. /*
  290. * Copy a null terminated string from the user address space into
  291. * the kernel address space.
  292. *
  293. * copyinstr(fromaddr, toaddr, maxlength, &lencopied)
  294. * caddr_t fromaddr;
  295. * caddr_t toaddr;
  296. * u_int maxlength;
  297. * u_int *lencopied;
  298. */
  299. NON_LEAF(copyinstr, CALLFRAME_SIZ, ra)
  300. PTR_SUBU sp, sp, CALLFRAME_SIZ
  301. .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
  302. PTR_LA v0, copyerr
  303. blt a0, zero, _C_LABEL(copyerr) # make sure address is in user space
  304. REG_S ra, CALLFRAME_RA(sp)
  305. GET_CPU_PCPU(v1)
  306. PTR_L v1, PC_CURPCB(v1)
  307. jal _C_LABEL(copystr)
  308. PTR_S v0, U_PCB_ONFAULT(v1)
  309. REG_L ra, CALLFRAME_RA(sp)
  310. GET_CPU_PCPU(v1)
  311. PTR_L v1, PC_CURPCB(v1)
  312. PTR_S zero, U_PCB_ONFAULT(v1)
  313. j ra
  314. PTR_ADDU sp, sp, CALLFRAME_SIZ
  315. END(copyinstr)
  316. /*
  317. * Copy a null terminated string from the kernel address space into
  318. * the user address space.
  319. *
  320. * copyoutstr(fromaddr, toaddr, maxlength, &lencopied)
  321. * caddr_t fromaddr;
  322. * caddr_t toaddr;
  323. * u_int maxlength;
  324. * u_int *lencopied;
  325. */
  326. NON_LEAF(copyoutstr, CALLFRAME_SIZ, ra)
  327. PTR_SUBU sp, sp, CALLFRAME_SIZ
  328. .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
  329. PTR_LA v0, copyerr
  330. blt a1, zero, _C_LABEL(copyerr) # make sure address is in user space
  331. REG_S ra, CALLFRAME_RA(sp)
  332. GET_CPU_PCPU(v1)
  333. PTR_L v1, PC_CURPCB(v1)
  334. jal _C_LABEL(copystr)
  335. PTR_S v0, U_PCB_ONFAULT(v1)
  336. REG_L ra, CALLFRAME_RA(sp)
  337. GET_CPU_PCPU(v1)
  338. PTR_L v1, PC_CURPCB(v1)
  339. PTR_S zero, U_PCB_ONFAULT(v1)
  340. j ra
  341. PTR_ADDU sp, sp, CALLFRAME_SIZ
  342. END(copyoutstr)
  343. /*
  344. * Copy specified amount of data from user space into the kernel
  345. * copyin(from, to, len)
  346. * caddr_t *from; (user source address)
  347. * caddr_t *to; (kernel destination address)
  348. * unsigned len;
  349. */
  350. NON_LEAF(copyin, CALLFRAME_SIZ, ra)
  351. PTR_SUBU sp, sp, CALLFRAME_SIZ
  352. .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
  353. PTR_LA v0, copyerr
  354. blt a0, zero, _C_LABEL(copyerr) # make sure address is in user space
  355. REG_S ra, CALLFRAME_RA(sp)
  356. GET_CPU_PCPU(v1)
  357. PTR_L v1, PC_CURPCB(v1)
  358. jal _C_LABEL(bcopy)
  359. PTR_S v0, U_PCB_ONFAULT(v1)
  360. REG_L ra, CALLFRAME_RA(sp)
  361. GET_CPU_PCPU(v1)
  362. PTR_L v1, PC_CURPCB(v1) # bcopy modified v1, so reload
  363. PTR_S zero, U_PCB_ONFAULT(v1)
  364. PTR_ADDU sp, sp, CALLFRAME_SIZ
  365. j ra
  366. move v0, zero
  367. END(copyin)
  368. /*
  369. * Copy specified amount of data from kernel to the user space
  370. * copyout(from, to, len)
  371. * caddr_t *from; (kernel source address)
  372. * caddr_t *to; (user destination address)
  373. * unsigned len;
  374. */
  375. NON_LEAF(copyout, CALLFRAME_SIZ, ra)
  376. PTR_SUBU sp, sp, CALLFRAME_SIZ
  377. .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
  378. PTR_LA v0, copyerr
  379. blt a1, zero, _C_LABEL(copyerr) # make sure address is in user space
  380. REG_S ra, CALLFRAME_RA(sp)
  381. GET_CPU_PCPU(v1)
  382. PTR_L v1, PC_CURPCB(v1)
  383. jal _C_LABEL(bcopy)
  384. PTR_S v0, U_PCB_ONFAULT(v1)
  385. REG_L ra, CALLFRAME_RA(sp)
  386. GET_CPU_PCPU(v1)
  387. PTR_L v1, PC_CURPCB(v1) # bcopy modified v1, so reload
  388. PTR_S zero, U_PCB_ONFAULT(v1)
  389. PTR_ADDU sp, sp, CALLFRAME_SIZ
  390. j ra
  391. move v0, zero
  392. END(copyout)
  393. LEAF(copyerr)
  394. REG_L ra, CALLFRAME_RA(sp)
  395. PTR_ADDU sp, sp, CALLFRAME_SIZ
  396. j ra
  397. li v0, EFAULT # return error
  398. END(copyerr)
  399. /*
  400. * {fu,su},{ibyte,isword,iword}, fetch or store a byte, short or word to
  401. * user text space.
  402. * {fu,su},{byte,sword,word}, fetch or store a byte, short or word to
  403. * user data space.
  404. */
  405. #ifdef __mips_n64
  406. LEAF(fuword64)
  407. ALEAF(fuword)
  408. ALEAF(fuiword)
  409. PTR_LA v0, fswberr
  410. blt a0, zero, fswberr # make sure address is in user space
  411. nop
  412. GET_CPU_PCPU(v1)
  413. PTR_L v1, PC_CURPCB(v1)
  414. PTR_S v0, U_PCB_ONFAULT(v1)
  415. ld v0, 0(a0) # fetch word
  416. j ra
  417. PTR_S zero, U_PCB_ONFAULT(v1)
  418. END(fuword64)
  419. #endif
  420. LEAF(fuword32)
  421. #ifndef __mips_n64
  422. ALEAF(fuword)
  423. ALEAF(fuiword)
  424. #endif
  425. PTR_LA v0, fswberr
  426. blt a0, zero, fswberr # make sure address is in user space
  427. nop
  428. GET_CPU_PCPU(v1)
  429. PTR_L v1, PC_CURPCB(v1)
  430. PTR_S v0, U_PCB_ONFAULT(v1)
  431. lw v0, 0(a0) # fetch word
  432. j ra
  433. PTR_S zero, U_PCB_ONFAULT(v1)
  434. END(fuword32)
  435. LEAF(fusword)
  436. ALEAF(fuisword)
  437. PTR_LA v0, fswberr
  438. blt a0, zero, fswberr # make sure address is in user space
  439. nop
  440. GET_CPU_PCPU(v1)
  441. PTR_L v1, PC_CURPCB(v1)
  442. PTR_S v0, U_PCB_ONFAULT(v1)
  443. lhu v0, 0(a0) # fetch short
  444. j ra
  445. PTR_S zero, U_PCB_ONFAULT(v1)
  446. END(fusword)
  447. LEAF(fubyte)
  448. ALEAF(fuibyte)
  449. PTR_LA v0, fswberr
  450. blt a0, zero, fswberr # make sure address is in user space
  451. nop
  452. GET_CPU_PCPU(v1)
  453. PTR_L v1, PC_CURPCB(v1)
  454. PTR_S v0, U_PCB_ONFAULT(v1)
  455. lbu v0, 0(a0) # fetch byte
  456. j ra
  457. PTR_S zero, U_PCB_ONFAULT(v1)
  458. END(fubyte)
  459. LEAF(suword32)
  460. #ifndef __mips_n64
  461. XLEAF(suword)
  462. #endif
  463. PTR_LA v0, fswberr
  464. blt a0, zero, fswberr # make sure address is in user space
  465. nop
  466. GET_CPU_PCPU(v1)
  467. PTR_L v1, PC_CURPCB(v1)
  468. PTR_S v0, U_PCB_ONFAULT(v1)
  469. sw a1, 0(a0) # store word
  470. PTR_S zero, U_PCB_ONFAULT(v1)
  471. j ra
  472. move v0, zero
  473. END(suword32)
  474. #ifdef __mips_n64
  475. LEAF(suword64)
  476. XLEAF(suword)
  477. PTR_LA v0, fswberr
  478. blt a0, zero, fswberr # make sure address is in user space
  479. nop
  480. GET_CPU_PCPU(v1)
  481. PTR_L v1, PC_CURPCB(v1)
  482. PTR_S v0, U_PCB_ONFAULT(v1)
  483. sd a1, 0(a0) # store word
  484. PTR_S zero, U_PCB_ONFAULT(v1)
  485. j ra
  486. move v0, zero
  487. END(suword64)
  488. #endif
  489. /*
  490. * casuword(9)
  491. * <v0>u_long casuword(<a0>u_long *p, <a1>u_long oldval, <a2>u_long newval)
  492. */
  493. /*
  494. * casuword32(9)
  495. * <v0>uint32_t casuword(<a0>uint32_t *p, <a1>uint32_t oldval,
  496. * <a2>uint32_t newval)
  497. */
  498. LEAF(casuword32)
  499. #ifndef __mips_n64
  500. XLEAF(casuword)
  501. #endif
  502. PTR_LA v0, fswberr
  503. blt a0, zero, fswberr # make sure address is in user space
  504. nop
  505. GET_CPU_PCPU(v1)
  506. PTR_L v1, PC_CURPCB(v1)
  507. PTR_S v0, U_PCB_ONFAULT(v1)
  508. 1:
  509. move t0, a2
  510. ll v0, 0(a0)
  511. bne a1, v0, 2f
  512. nop
  513. sc t0, 0(a0) # store word
  514. beqz t0, 1b
  515. nop
  516. j 3f
  517. nop
  518. 2:
  519. li v0, -1
  520. 3:
  521. PTR_S zero, U_PCB_ONFAULT(v1)
  522. jr ra
  523. nop
  524. END(casuword32)
  525. #ifdef __mips_n64
  526. LEAF(casuword64)
  527. XLEAF(casuword)
  528. PTR_LA v0, fswberr
  529. blt a0, zero, fswberr # make sure address is in user space
  530. nop
  531. GET_CPU_PCPU(v1)
  532. PTR_L v1, PC_CURPCB(v1)
  533. PTR_S v0, U_PCB_ONFAULT(v1)
  534. 1:
  535. move t0, a2
  536. lld v0, 0(a0)
  537. bne a1, v0, 2f
  538. nop
  539. scd t0, 0(a0) # store double word
  540. beqz t0, 1b
  541. nop
  542. j 3f
  543. nop
  544. 2:
  545. li v0, -1
  546. 3:
  547. PTR_S zero, U_PCB_ONFAULT(v1)
  548. jr ra
  549. nop
  550. END(casuword64)
  551. #endif
  552. #if 0
  553. /* unused in FreeBSD */
  554. /*
  555. * Have to flush instruction cache afterwards.
  556. */
  557. LEAF(suiword)
  558. PTR_LA v0, fswberr
  559. blt a0, zero, fswberr # make sure address is in user space
  560. nop
  561. GET_CPU_PCPU(v1)
  562. PTR_L v1, PC_CURPCB(v1)
  563. PTR_S v0, U_PCB_ONFAULT(v1)
  564. sw a1, 0(a0) # store word
  565. PTR_S zero, U_PCB_ONFAULT(v1)
  566. j _C_LABEL(Mips_SyncICache) # FlushICache sets v0 = 0. (Ugly)
  567. li a1, 4 # size of word
  568. END(suiword)
  569. #endif
  570. /*
  571. * Will have to flush the instruction cache if byte merging is done in hardware.
  572. */
  573. LEAF(susword)
  574. ALEAF(suisword)
  575. PTR_LA v0, fswberr
  576. blt a0, zero, fswberr # make sure address is in user space
  577. nop
  578. GET_CPU_PCPU(v1)
  579. PTR_L v1, PC_CURPCB(v1)
  580. PTR_S v0, U_PCB_ONFAULT(v1)
  581. sh a1, 0(a0) # store short
  582. PTR_S zero, U_PCB_ONFAULT(v1)
  583. j ra
  584. move v0, zero
  585. END(susword)
  586. LEAF(subyte)
  587. ALEAF(suibyte)
  588. PTR_LA v0, fswberr
  589. blt a0, zero, fswberr # make sure address is in user space
  590. nop
  591. GET_CPU_PCPU(v1)
  592. PTR_L v1, PC_CURPCB(v1)
  593. PTR_S v0, U_PCB_ONFAULT(v1)
  594. sb a1, 0(a0) # store byte
  595. PTR_S zero, U_PCB_ONFAULT(v1)
  596. j ra
  597. move v0, zero
  598. END(subyte)
  599. LEAF(fswberr)
  600. j ra
  601. li v0, -1
  602. END(fswberr)
  603. /*
  604. * fuswintr and suswintr are just like fusword and susword except that if
  605. * the page is not in memory or would cause a trap, then we return an error.
  606. * The important thing is to prevent sleep() and switch().
  607. */
  608. LEAF(fuswintr)
  609. PTR_LA v0, fswintrberr
  610. blt a0, zero, fswintrberr # make sure address is in user space
  611. nop
  612. GET_CPU_PCPU(v1)
  613. PTR_L v1, PC_CURPCB(v1)
  614. PTR_S v0, U_PCB_ONFAULT(v1)
  615. lhu v0, 0(a0) # fetch short
  616. j ra
  617. PTR_S zero, U_PCB_ONFAULT(v1)
  618. END(fuswintr)
  619. LEAF(suswintr)
  620. PTR_LA v0, fswintrberr
  621. blt a0, zero, fswintrberr # make sure address is in user space
  622. nop
  623. GET_CPU_PCPU(v1)
  624. PTR_L v1, PC_CURPCB(v1)
  625. PTR_S v0, U_PCB_ONFAULT(v1)
  626. sh a1, 0(a0) # store short
  627. PTR_S zero, U_PCB_ONFAULT(v1)
  628. j ra
  629. move v0, zero
  630. END(suswintr)
  631. LEAF(fswintrberr)
  632. j ra
  633. li v0, -1
  634. END(fswintrberr)
  635. /*
  636. * memcpy(to, from, len)
  637. * {ov}bcopy(from, to, len)
  638. */
  639. LEAF(memcpy)
  640. .set noreorder
  641. move v0, a0 # swap from and to
  642. move a0, a1
  643. move a1, v0
  644. ALEAF(bcopy)
  645. ALEAF(ovbcopy)
  646. .set noreorder
  647. PTR_ADDU t0, a0, a2 # t0 = end of s1 region
  648. sltu t1, a1, t0
  649. sltu t2, a0, a1
  650. and t1, t1, t2 # t1 = true if from < to < (from+len)
  651. beq t1, zero, forward # non overlapping, do forward copy
  652. slt t2, a2, 12 # check for small copy
  653. ble a2, zero, 2f
  654. PTR_ADDU t1, a1, a2 # t1 = end of to region
  655. 1:
  656. lb v1, -1(t0) # copy bytes backwards,
  657. PTR_SUBU t0, t0, 1 # doesnt happen often so do slow way
  658. PTR_SUBU t1, t1, 1
  659. bne t0, a0, 1b
  660. sb v1, 0(t1)
  661. 2:
  662. j ra
  663. nop
  664. forward:
  665. bne t2, zero, smallcpy # do a small bcopy
  666. xor v1, a0, a1 # compare low two bits of addresses
  667. and v1, v1, 3
  668. PTR_SUBU a3, zero, a1 # compute # bytes to word align address
  669. beq v1, zero, aligned # addresses can be word aligned
  670. and a3, a3, 3
  671. beq a3, zero, 1f
  672. PTR_SUBU a2, a2, a3 # subtract from remaining count
  673. LWHI v1, 0(a0) # get next 4 bytes (unaligned)
  674. LWLO v1, 3(a0)
  675. PTR_ADDU a0, a0, a3
  676. SWHI v1, 0(a1) # store 1, 2, or 3 bytes to align a1
  677. PTR_ADDU a1, a1, a3
  678. 1:
  679. and v1, a2, 3 # compute number of words left
  680. PTR_SUBU a3, a2, v1
  681. move a2, v1
  682. PTR_ADDU a3, a3, a0 # compute ending address
  683. 2:
  684. LWHI v1, 0(a0) # copy words a0 unaligned, a1 aligned
  685. LWLO v1, 3(a0)
  686. PTR_ADDU a0, a0, 4
  687. sw v1, 0(a1)
  688. PTR_ADDU a1, a1, 4
  689. bne a0, a3, 2b
  690. nop # We have to do this mmu-bug.
  691. b smallcpy
  692. nop
  693. aligned:
  694. beq a3, zero, 1f
  695. PTR_SUBU a2, a2, a3 # subtract from remaining count
  696. LWHI v1, 0(a0) # copy 1, 2, or 3 bytes to align
  697. PTR_ADDU a0, a0, a3
  698. SWHI v1, 0(a1)
  699. PTR_ADDU a1, a1, a3
  700. 1:
  701. and v1, a2, 3 # compute number of whole words left
  702. PTR_SUBU a3, a2, v1
  703. move a2, v1
  704. PTR_ADDU a3, a3, a0 # compute ending address
  705. 2:
  706. lw v1, 0(a0) # copy words
  707. PTR_ADDU a0, a0, 4
  708. sw v1, 0(a1)
  709. bne a0, a3, 2b
  710. PTR_ADDU a1, a1, 4
  711. smallcpy:
  712. ble a2, zero, 2f
  713. PTR_ADDU a3, a2, a0 # compute ending address
  714. 1:
  715. lbu v1, 0(a0) # copy bytes
  716. PTR_ADDU a0, a0, 1
  717. sb v1, 0(a1)
  718. bne a0, a3, 1b
  719. PTR_ADDU a1, a1, 1 # MMU BUG ? can not do -1(a1) at 0x80000000!!
  720. 2:
  721. j ra
  722. nop
  723. END(memcpy)
  724. /*
  725. * memset(void *s1, int c, int len)
  726. * NetBSD: memset.S,v 1.3 2001/10/16 15:40:53 uch Exp
  727. */
  728. LEAF(memset)
  729. .set noreorder
  730. blt a2, 12, memsetsmallclr # small amount to clear?
  731. move v0, a0 # save s1 for result
  732. sll t1, a1, 8 # compute c << 8 in t1
  733. or t1, t1, a1 # compute c << 8 | c in 11
  734. sll t2, t1, 16 # shift that left 16
  735. or t1, t2, t1 # or together
  736. PTR_SUBU t0, zero, a0 # compute # bytes to word align address
  737. and t0, t0, 3
  738. beq t0, zero, 1f # skip if word aligned
  739. PTR_SUBU a2, a2, t0 # subtract from remaining count
  740. SWHI t1, 0(a0) # store 1, 2, or 3 bytes to align
  741. PTR_ADDU a0, a0, t0
  742. 1:
  743. and v1, a2, 3 # compute number of whole words left
  744. PTR_SUBU t0, a2, v1
  745. PTR_SUBU a2, a2, t0
  746. PTR_ADDU t0, t0, a0 # compute ending address
  747. 2:
  748. PTR_ADDU a0, a0, 4 # clear words
  749. #ifdef MIPS3_5900
  750. nop
  751. nop
  752. nop
  753. nop
  754. #endif
  755. bne a0, t0, 2b # unrolling loop does not help
  756. sw t1, -4(a0) # since we are limited by memory speed
  757. memsetsmallclr:
  758. ble a2, zero, 2f
  759. PTR_ADDU t0, a2, a0 # compute ending address
  760. 1:
  761. PTR_ADDU a0, a0, 1 # clear bytes
  762. #ifdef MIPS3_5900
  763. nop
  764. nop
  765. nop
  766. nop
  767. #endif
  768. bne a0, t0, 1b
  769. sb a1, -1(a0)
  770. 2:
  771. j ra
  772. nop
  773. .set reorder
  774. END(memset)
  775. /*
  776. * bzero(s1, n)
  777. */
  778. LEAF(bzero)
  779. ALEAF(blkclr)
  780. .set noreorder
  781. blt a1, 12, smallclr # small amount to clear?
  782. PTR_SUBU a3, zero, a0 # compute # bytes to word align address
  783. and a3, a3, 3
  784. beq a3, zero, 1f # skip if word aligned
  785. PTR_SUBU a1, a1, a3 # subtract from remaining count
  786. SWHI zero, 0(a0) # clear 1, 2, or 3 bytes to align
  787. PTR_ADDU a0, a0, a3
  788. 1:
  789. and v0, a1, 3 # compute number of words left
  790. PTR_SUBU a3, a1, v0
  791. move a1, v0
  792. PTR_ADDU a3, a3, a0 # compute ending address
  793. 2:
  794. PTR_ADDU a0, a0, 4 # clear words
  795. bne a0, a3, 2b # unrolling loop does not help
  796. sw zero, -4(a0) # since we are limited by memory speed
  797. smallclr:
  798. ble a1, zero, 2f
  799. PTR_ADDU a3, a1, a0 # compute ending address
  800. 1:
  801. PTR_ADDU a0, a0, 1 # clear bytes
  802. bne a0, a3, 1b
  803. sb zero, -1(a0)
  804. 2:
  805. j ra
  806. nop
  807. END(bzero)
  808. /*
  809. * bcmp(s1, s2, n)
  810. */
  811. LEAF(bcmp)
  812. .set noreorder
  813. blt a2, 16, smallcmp # is it worth any trouble?
  814. xor v0, a0, a1 # compare low two bits of addresses
  815. and v0, v0, 3
  816. PTR_SUBU a3, zero, a1 # compute # bytes to word align address
  817. bne v0, zero, unalignedcmp # not possible to align addresses
  818. and a3, a3, 3
  819. beq a3, zero, 1f
  820. PTR_SUBU a2, a2, a3 # subtract from remaining count
  821. move v0, v1 # init v0,v1 so unmodified bytes match
  822. LWHI v0, 0(a0) # read 1, 2, or 3 bytes
  823. LWHI v1, 0(a1)
  824. PTR_ADDU a1, a1, a3
  825. bne v0, v1, nomatch
  826. PTR_ADDU a0, a0, a3
  827. 1:
  828. and a3, a2, ~3 # compute number of whole words left
  829. PTR_SUBU a2, a2, a3 # which has to be >= (16-3) & ~3
  830. PTR_ADDU a3, a3, a0 # compute ending address
  831. 2:
  832. lw v0, 0(a0) # compare words
  833. lw v1, 0(a1)
  834. PTR_ADDU a0, a0, 4
  835. bne v0, v1, nomatch
  836. PTR_ADDU a1, a1, 4
  837. bne a0, a3, 2b
  838. nop
  839. b smallcmp # finish remainder
  840. nop
  841. unalignedcmp:
  842. beq a3, zero, 2f
  843. PTR_SUBU a2, a2, a3 # subtract from remaining count
  844. PTR_ADDU a3, a3, a0 # compute ending address
  845. 1:
  846. lbu v0, 0(a0) # compare bytes until a1 word aligned
  847. lbu v1, 0(a1)
  848. PTR_ADDU a0, a0, 1
  849. bne v0, v1, nomatch
  850. PTR_ADDU a1, a1, 1
  851. bne a0, a3, 1b
  852. nop
  853. 2:
  854. and a3, a2, ~3 # compute number of whole words left
  855. PTR_SUBU a2, a2, a3 # which has to be >= (16-3) & ~3
  856. PTR_ADDU a3, a3, a0 # compute ending address
  857. 3:
  858. LWHI v0, 0(a0) # compare words a0 unaligned, a1 aligned
  859. LWLO v0, 3(a0)
  860. lw v1, 0(a1)
  861. PTR_ADDU a0, a0, 4
  862. bne v0, v1, nomatch
  863. PTR_ADDU a1, a1, 4
  864. bne a0, a3, 3b
  865. nop
  866. smallcmp:
  867. ble a2, zero, match
  868. PTR_ADDU a3, a2, a0 # compute ending address
  869. 1:
  870. lbu v0, 0(a0)
  871. lbu v1, 0(a1)
  872. PTR_ADDU a0, a0, 1
  873. bne v0, v1, nomatch
  874. PTR_ADDU a1, a1, 1
  875. bne a0, a3, 1b
  876. nop
  877. match:
  878. j ra
  879. move v0, zero
  880. nomatch:
  881. j ra
  882. li v0, 1
  883. END(bcmp)
  884. /*
  885. * bit = ffs(value)
  886. */
  887. LEAF(ffs)
  888. .set noreorder
  889. beq a0, zero, 2f
  890. move v0, zero
  891. 1:
  892. and v1, a0, 1 # bit set?
  893. addu v0, v0, 1
  894. beq v1, zero, 1b # no, continue
  895. srl a0, a0, 1
  896. 2:
  897. j ra
  898. nop
  899. END(ffs)
  900. LEAF(get_current_fp)
  901. j ra
  902. move v0, s8
  903. END(get_current_fp)
  904. LEAF(loadandclear)
  905. .set noreorder
  906. 1:
  907. ll v0, 0(a0)
  908. move t0, zero
  909. sc t0, 0(a0)
  910. beq t0, zero, 1b
  911. nop
  912. j ra
  913. nop
  914. END(loadandclear)
  915. #if 0
  916. /*
  917. * u_int32_t atomic_cmpset_32(u_int32_t *p, u_int32_t cmpval, u_int32_t newval)
  918. * Atomically compare the value stored at p with cmpval
  919. * and if the two values are equal, update value *p with
  920. * newval. Return zero if compare failed, non-zero otherwise
  921. *
  922. */
  923. LEAF(atomic_cmpset_32)
  924. .set noreorder
  925. 1:
  926. ll t0, 0(a0)
  927. move v0, zero
  928. bne t0, a1, 2f
  929. move t1, a2
  930. sc t1, 0(a0)
  931. beq t1, zero, 1b
  932. or v0, v0, 1
  933. 2:
  934. j ra
  935. nop
  936. END(atomic_cmpset_32)
  937. /**
  938. * u_int32_t
  939. * atomic_readandclear_32(u_int32_t *a)
  940. * {
  941. * u_int32_t retval;
  942. * retval = *a;
  943. * *a = 0;
  944. * }
  945. */
  946. LEAF(atomic_readandclear_32)
  947. .set noreorder
  948. 1:
  949. ll t0, 0(a0)
  950. move t1, zero
  951. move v0, t0
  952. sc t1, 0(a0)
  953. beq t1, zero, 1b
  954. nop
  955. j ra
  956. nop
  957. END(atomic_readandclear_32)
  958. /**
  959. * void
  960. * atomic_set_32(u_int32_t *a, u_int32_t b)
  961. * {
  962. * *a |= b;
  963. * }
  964. */
  965. LEAF(atomic_set_32)
  966. .set noreorder
  967. 1:
  968. ll t0, 0(a0)
  969. or t0, t0, a1
  970. sc t0, 0(a0)
  971. beq t0, zero, 1b
  972. nop
  973. j ra
  974. nop
  975. END(atomic_set_32)
  976. /**
  977. * void
  978. * atomic_add_32(uint32_t *a, uint32_t b)
  979. * {
  980. * *a += b;
  981. * }
  982. */
  983. LEAF(atomic_add_32)
  984. .set noreorder
  985. srl a0, a0, 2 # round down address to be 32-bit aligned
  986. sll a0, a0, 2
  987. 1:
  988. ll t0, 0(a0)
  989. addu t0, t0, a1
  990. sc t0, 0(a0)
  991. beq t0, zero, 1b
  992. nop
  993. j ra
  994. nop
  995. END(atomic_add_32)
  996. /**
  997. * void
  998. * atomic_clear_32(u_int32_t *a, u_int32_t b)
  999. * {
  1000. * *a &= ~b;
  1001. * }
  1002. */
  1003. LEAF(atomic_clear_32)
  1004. .set noreorder
  1005. srl a0, a0, 2 # round down address to be 32-bit aligned
  1006. sll a0, a0, 2
  1007. nor a1, zero, a1
  1008. 1:
  1009. ll t0, 0(a0)
  1010. and t0, t0, a1 # t1 has the new lower 16 bits
  1011. sc t0, 0(a0)
  1012. beq t0, zero, 1b
  1013. nop
  1014. j ra
  1015. nop
  1016. END(atomic_clear_32)
  1017. /**
  1018. * void
  1019. * atomic_subtract_32(uint16_t *a, uint16_t b)
  1020. * {
  1021. * *a -= b;
  1022. * }
  1023. */
  1024. LEAF(atomic_subtract_32)
  1025. .set noreorder
  1026. srl a0, a0, 2 # round down address to be 32-bit aligned
  1027. sll a0, a0, 2
  1028. 1:
  1029. ll t0, 0(a0)
  1030. subu t0, t0, a1
  1031. sc t0, 0(a0)
  1032. beq t0, zero, 1b
  1033. nop
  1034. j ra
  1035. nop
  1036. END(atomic_subtract_32)
  1037. #endif
  1038. /**
  1039. * void
  1040. * atomic_set_16(u_int16_t *a, u_int16_t b)
  1041. * {
  1042. * *a |= b;
  1043. * }
  1044. */
  1045. LEAF(atomic_set_16)
  1046. .set noreorder
  1047. srl a0, a0, 2 # round down address to be 32-bit aligned
  1048. sll a0, a0, 2
  1049. andi a1, a1, 0xffff
  1050. 1:
  1051. ll t0, 0(a0)
  1052. or t0, t0, a1
  1053. sc t0, 0(a0)
  1054. beq t0, zero, 1b
  1055. nop
  1056. j ra
  1057. nop
  1058. END(atomic_set_16)
  1059. /**
  1060. * void
  1061. * atomic_clear_16(u_int16_t *a, u_int16_t b)
  1062. * {
  1063. * *a &= ~b;
  1064. * }
  1065. */
  1066. LEAF(atomic_clear_16)
  1067. .set noreorder
  1068. srl a0, a0, 2 # round down address to be 32-bit aligned
  1069. sll a0, a0, 2
  1070. nor a1, zero, a1
  1071. 1:
  1072. ll t0, 0(a0)
  1073. move t1, t0
  1074. andi t1, t1, 0xffff # t1 has the original lower 16 bits
  1075. and t1, t1, a1 # t1 has the new lower 16 bits
  1076. srl t0, t0, 16 # preserve original top 16 bits
  1077. sll t0, t0, 16
  1078. or t0, t0, t1
  1079. sc t0, 0(a0)
  1080. beq t0, zero, 1b
  1081. nop
  1082. j ra
  1083. nop
  1084. END(atomic_clear_16)
  1085. /**
  1086. * void
  1087. * atomic_subtract_16(uint16_t *a, uint16_t b)
  1088. * {
  1089. * *a -= b;
  1090. * }
  1091. */
  1092. LEAF(atomic_subtract_16)
  1093. .set noreorder
  1094. srl a0, a0, 2 # round down address to be 32-bit aligned
  1095. sll a0, a0, 2
  1096. 1:
  1097. ll t0, 0(a0)
  1098. move t1, t0
  1099. andi t1, t1, 0xffff # t1 has the original lower 16 bits
  1100. subu t1, t1, a1
  1101. andi t1, t1, 0xffff # t1 has the new lower 16 bits
  1102. srl t0, t0, 16 # preserve original top 16 bits
  1103. sll t0, t0, 16
  1104. or t0, t0, t1
  1105. sc t0, 0(a0)
  1106. beq t0, zero, 1b
  1107. nop
  1108. j ra
  1109. nop
  1110. END(atomic_subtract_16)
  1111. /**
  1112. * void
  1113. * atomic_add_16(uint16_t *a, uint16_t b)
  1114. * {
  1115. * *a += b;
  1116. * }
  1117. */
  1118. LEAF(atomic_add_16)
  1119. .set noreorder
  1120. srl a0, a0, 2 # round down address to be 32-bit aligned
  1121. sll a0, a0, 2
  1122. 1:
  1123. ll t0, 0(a0)
  1124. move t1, t0
  1125. andi t1, t1, 0xffff # t1 has the original lower 16 bits
  1126. addu t1, t1, a1
  1127. andi t1, t1, 0xffff # t1 has the new lower 16 bits
  1128. srl t0, t0, 16 # preserve original top 16 bits
  1129. sll t0, t0, 16
  1130. or t0, t0, t1
  1131. sc t0, 0(a0)
  1132. beq t0, zero, 1b
  1133. nop
  1134. j ra
  1135. nop
  1136. END(atomic_add_16)
  1137. /**
  1138. * void
  1139. * atomic_add_8(uint8_t *a, uint8_t b)
  1140. * {
  1141. * *a += b;
  1142. * }
  1143. */
  1144. LEAF(atomic_add_8)
  1145. .set noreorder
  1146. srl a0, a0, 2 # round down address to be 32-bit aligned
  1147. sll a0, a0, 2
  1148. 1:
  1149. ll t0, 0(a0)
  1150. move t1, t0
  1151. andi t1, t1, 0xff # t1 has the original lower 8 bits
  1152. addu t1, t1, a1
  1153. andi t1, t1, 0xff # t1 has the new lower 8 bits
  1154. srl t0, t0, 8 # preserve original top 24 bits
  1155. sll t0, t0, 8
  1156. or t0, t0, t1
  1157. sc t0, 0(a0)
  1158. beq t0, zero, 1b
  1159. nop
  1160. j ra
  1161. nop
  1162. END(atomic_add_8)
  1163. /**
  1164. * void
  1165. * atomic_subtract_8(uint8_t *a, uint8_t b)
  1166. * {
  1167. * *a += b;
  1168. * }
  1169. */
  1170. LEAF(atomic_subtract_8)
  1171. .set noreorder
  1172. srl a0, a0, 2 # round down address to be 32-bit aligned
  1173. sll a0, a0, 2
  1174. 1:
  1175. ll t0, 0(a0)
  1176. move t1, t0
  1177. andi t1, t1, 0xff # t1 has the original lower 8 bits
  1178. subu t1, t1, a1
  1179. andi t1, t1, 0xff # t1 has the new lower 8 bits
  1180. srl t0, t0, 8 # preserve original top 24 bits
  1181. sll t0, t0, 8
  1182. or t0, t0, t1
  1183. sc t0, 0(a0)
  1184. beq t0, zero, 1b
  1185. nop
  1186. j ra
  1187. nop
  1188. END(atomic_subtract_8)
  1189. /*
  1190. * atomic 64-bit register read/write assembly language support routines.
  1191. */
  1192. .set noreorder # Noreorder is default style!
  1193. #if !defined(__mips_n64) && !defined(__mips_n32)
  1194. /*
  1195. * I don't know if these routines have the right number of
  1196. * NOPs in it for all processors. XXX
  1197. *
  1198. * Maybe it would be better to just leave this undefined in that case.
  1199. */
  1200. LEAF(atomic_store_64)
  1201. mfc0 t1, MIPS_COP_0_STATUS
  1202. and t2, t1, ~MIPS_SR_INT_IE
  1203. mtc0 t2, MIPS_COP_0_STATUS
  1204. nop
  1205. nop
  1206. nop
  1207. nop
  1208. ld t0, (a1)
  1209. nop
  1210. nop
  1211. sd t0, (a0)
  1212. nop
  1213. nop
  1214. mtc0 t1,MIPS_COP_0_STATUS
  1215. nop
  1216. nop
  1217. nop
  1218. nop
  1219. j ra
  1220. nop
  1221. END(atomic_store_64)
  1222. LEAF(atomic_load_64)
  1223. mfc0 t1, MIPS_COP_0_STATUS
  1224. and t2, t1, ~MIPS_SR_INT_IE
  1225. mtc0 t2, MIPS_COP_0_STATUS
  1226. nop
  1227. nop
  1228. nop
  1229. nop
  1230. ld t0, (a0)
  1231. nop
  1232. nop
  1233. sd t0, (a1)
  1234. nop
  1235. nop
  1236. mtc0 t1,MIPS_COP_0_STATUS
  1237. nop
  1238. nop
  1239. nop
  1240. nop
  1241. j ra
  1242. nop
  1243. END(atomic_load_64)
  1244. #endif
  1245. #if defined(DDB) || defined(DEBUG)
  1246. LEAF(kdbpeek)
  1247. PTR_LA v1, ddberr
  1248. and v0, a0, 3 # unaligned ?
  1249. GET_CPU_PCPU(t1)
  1250. PTR_L t1, PC_CURPCB(t1)
  1251. bne v0, zero, 1f
  1252. PTR_S v1, U_PCB_ONFAULT(t1)
  1253. lw v0, (a0)
  1254. jr ra
  1255. PTR_S zero, U_PCB_ONFAULT(t1)
  1256. 1:
  1257. LWHI v0, 0(a0)
  1258. LWLO v0, 3(a0)
  1259. jr ra
  1260. PTR_S zero, U_PCB_ONFAULT(t1)
  1261. END(kdbpeek)
  1262. LEAF(kdbpeekd)
  1263. PTR_LA v1, ddberr
  1264. and v0, a0, 3 # unaligned ?
  1265. GET_CPU_PCPU(t1)
  1266. PTR_L t1, PC_CURPCB(t1)
  1267. bne v0, zero, 1f
  1268. PTR_S v1, U_PCB_ONFAULT(t1)
  1269. ld v0, (a0)
  1270. jr ra
  1271. PTR_S zero, U_PCB_ONFAULT(t1)
  1272. 1:
  1273. REG_LHI v0, 0(a0)
  1274. REG_LLO v0, 7(a0)
  1275. jr ra
  1276. PTR_S zero, U_PCB_ONFAULT(t1)
  1277. END(kdbpeekd)
  1278. ddberr:
  1279. jr ra
  1280. nop
  1281. #if defined(DDB)
  1282. LEAF(kdbpoke)
  1283. PTR_LA v1, ddberr
  1284. and v0, a0, 3 # unaligned ?
  1285. GET_CPU_PCPU(t1)
  1286. PTR_L t1, PC_CURPCB(t1)
  1287. bne v0, zero, 1f
  1288. PTR_S v1, U_PCB_ONFAULT(t1)
  1289. sw a1, (a0)
  1290. jr ra
  1291. PTR_S zero, U_PCB_ONFAULT(t1)
  1292. 1:
  1293. SWHI a1, 0(a0)
  1294. SWLO a1, 3(a0)
  1295. jr ra
  1296. PTR_S zero, U_PCB_ONFAULT(t1)
  1297. END(kdbpoke)
  1298. .data
  1299. .globl esym
  1300. esym: .word 0
  1301. #endif /* DDB */
  1302. #endif /* DDB || DEBUG */
  1303. .text
  1304. LEAF(breakpoint)
  1305. break MIPS_BREAK_SOVER_VAL
  1306. jr ra
  1307. nop
  1308. END(breakpoint)
  1309. LEAF(setjmp)
  1310. mfc0 v0, MIPS_COP_0_STATUS # Later the "real" spl value!
  1311. REG_S s0, (SZREG * PREG_S0)(a0)
  1312. REG_S s1, (SZREG * PREG_S1)(a0)
  1313. REG_S s2, (SZREG * PREG_S2)(a0)
  1314. REG_S s3, (SZREG * PREG_S3)(a0)
  1315. REG_S s4, (SZREG * PREG_S4)(a0)
  1316. REG_S s5, (SZREG * PREG_S5)(a0)
  1317. REG_S s6, (SZREG * PREG_S6)(a0)
  1318. REG_S s7, (SZREG * PREG_S7)(a0)
  1319. REG_S s8, (SZREG * PREG_S8)(a0)
  1320. REG_S sp, (SZREG * PREG_SP)(a0)
  1321. REG_S ra, (SZREG * PREG_RA)(a0)
  1322. REG_S v0, (SZREG * PREG_SR)(a0)
  1323. jr ra
  1324. li v0, 0 # setjmp return
  1325. END(setjmp)
  1326. LEAF(longjmp)
  1327. REG_L v0, (SZREG * PREG_SR)(a0)
  1328. REG_L ra, (SZREG * PREG_RA)(a0)
  1329. REG_L s0, (SZREG * PREG_S0)(a0)
  1330. REG_L s1, (SZREG * PREG_S1)(a0)
  1331. REG_L s2, (SZREG * PREG_S2)(a0)
  1332. REG_L s3, (SZREG * PREG_S3)(a0)
  1333. REG_L s4, (SZREG * PREG_S4)(a0)
  1334. REG_L s5, (SZREG * PREG_S5)(a0)
  1335. REG_L s6, (SZREG * PREG_S6)(a0)
  1336. REG_L s7, (SZREG * PREG_S7)(a0)
  1337. REG_L s8, (SZREG * PREG_S8)(a0)
  1338. REG_L sp, (SZREG * PREG_SP)(a0)
  1339. mtc0 v0, MIPS_COP_0_STATUS # Later the "real" spl value!
  1340. ITLBNOPFIX
  1341. jr ra
  1342. li v0, 1 # longjmp return
  1343. END(longjmp)
  1344. LEAF(fusufault)
  1345. GET_CPU_PCPU(t0)
  1346. lw t0, PC_CURTHREAD(t0)
  1347. lw t0, TD_PCB(t0)
  1348. li v0, -1
  1349. j ra
  1350. END(fusufault)
  1351. /* Define a new md function 'casuptr'. This atomically compares and sets
  1352. a pointer that is in user space. It will be used as the basic primitive
  1353. for a kernel supported user space lock implementation. */
  1354. LEAF(casuptr)
  1355. PTR_LI t0, VM_MAXUSER_ADDRESS /* verify address validity */
  1356. blt a0, t0, fusufault /* trap faults */
  1357. nop
  1358. GET_CPU_PCPU(t1)
  1359. lw t1, PC_CURTHREAD(t1)
  1360. lw t1, TD_PCB(t1)
  1361. PTR_LA t2, fusufault
  1362. PTR_S t2, U_PCB_ONFAULT(t1)
  1363. 1:
  1364. ll v0, 0(a0) /* try to load the old value */
  1365. beq v0, a1, 2f /* compare */
  1366. move t0, a2 /* setup value to write */
  1367. sc t0, 0(a0) /* write if address still locked */
  1368. beq t0, zero, 1b /* if it failed, spin */
  1369. 2:
  1370. PTR_S zero, U_PCB_ONFAULT(t1) /* clean up */
  1371. j ra
  1372. END(casuptr)
  1373. #ifdef CPU_CNMIPS
  1374. /*
  1375. * void octeon_enable_shadow(void)
  1376. * turns on access to CC and CCRes
  1377. */
  1378. LEAF(octeon_enable_shadow)
  1379. li t1, 0x0000000f
  1380. mtc0 t1, MIPS_COP_0_INFO
  1381. jr ra
  1382. nop
  1383. END(octeon_enable_shadow)
  1384. LEAF(octeon_get_shadow)
  1385. mfc0 v0, MIPS_COP_0_INFO
  1386. jr ra
  1387. nop
  1388. END(octeon_get_shadow)
  1389. /*
  1390. * octeon_set_control(addr, uint32_t val)
  1391. */
  1392. LEAF(octeon_set_control)
  1393. .set push
  1394. or t1, a1, zero
  1395. /* dmfc0 a1, 9, 7*/
  1396. .word 0x40254807
  1397. sd a1, 0(a0)
  1398. or a1, t1, zero
  1399. /* dmtc0 a1, 9, 7*/
  1400. .word 0x40a54807
  1401. jr ra
  1402. nop
  1403. .set pop
  1404. END(octeon_set_control)
  1405. /*
  1406. * octeon_get_control(addr)
  1407. */
  1408. LEAF(octeon_get_control)
  1409. .set push
  1410. .set mips64r2
  1411. /* dmfc0 a1, 9, 7 */
  1412. .word 0x40254807
  1413. sd a1, 0(a0)
  1414. jr ra
  1415. nop
  1416. .set pop
  1417. END(octeon_get_control)
  1418. #endif
  1419. LEAF(mips3_ld)
  1420. .set push
  1421. .set noreorder
  1422. .set mips64
  1423. #if defined(__mips_o32)
  1424. mfc0 t0, MIPS_COP_0_STATUS # turn off interrupts
  1425. and t1, t0, ~(MIPS_SR_INT_IE)
  1426. mtc0 t1, MIPS_COP_0_STATUS
  1427. COP0_SYNC
  1428. nop
  1429. nop
  1430. nop
  1431. ld v0, 0(a0)
  1432. #if _BYTE_ORDER == _BIG_ENDIAN
  1433. dsll v1, v0, 32
  1434. dsra v1, v1, 32 # low word in v1
  1435. dsra v0, v0, 32 # high word in v0
  1436. #else
  1437. dsra v1, v0, 32 # high word in v1
  1438. dsll v0, v0, 32
  1439. dsra v0, v0, 32 # low word in v0
  1440. #endif
  1441. mtc0 t0, MIPS_COP_0_STATUS # restore intr status.
  1442. COP0_SYNC
  1443. nop
  1444. #else /* !__mips_o32 */
  1445. ld v0, 0(a0)
  1446. #endif /* !__mips_o32 */
  1447. jr ra
  1448. nop
  1449. .set pop
  1450. END(mips3_ld)
  1451. LEAF(mips3_sd)
  1452. .set push
  1453. .set mips64
  1454. .set noreorder
  1455. #if defined(__mips_o32)
  1456. mfc0 t0, MIPS_COP_0_STATUS # turn off interrupts
  1457. and t1, t0, ~(MIPS_SR_INT_IE)
  1458. mtc0 t1, MIPS_COP_0_STATUS
  1459. COP0_SYNC
  1460. nop
  1461. nop
  1462. nop
  1463. # NOTE: a1 is padding!
  1464. #if _BYTE_ORDER == _BIG_ENDIAN
  1465. dsll a2, a2, 32 # high word in a2
  1466. dsll a3, a3, 32 # low word in a3
  1467. dsrl a3, a3, 32
  1468. #else
  1469. dsll a2, a2, 32 # low word in a2
  1470. dsrl a2, a2, 32
  1471. dsll a3, a3, 32 # high word in a3
  1472. #endif
  1473. or a1, a2, a3
  1474. sd a1, 0(a0)
  1475. mtc0 t0, MIPS_COP_0_STATUS # restore intr status.
  1476. COP0_SYNC
  1477. nop
  1478. #else /* !__mips_o32 */
  1479. sd a1, 0(a0)
  1480. #endif /* !__mips_o32 */
  1481. jr ra
  1482. nop
  1483. .set pop
  1484. END(mips3_sd)