/arch/s390/kernel/entry64.S

https://bitbucket.org/evzijst/gittest · Assembly · 881 lines · 841 code · 12 blank · 28 comment · 2 complexity · 324c7a5004aecb675e213e1a71364170 MD5 · raw file

  1. /*
  2. * arch/s390/kernel/entry.S
  3. * S390 low-level entry points.
  4. *
  5. * S390 version
  6. * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  7. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  8. * Hartmut Penner (hp@de.ibm.com),
  9. * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
  10. */
  11. #include <linux/sys.h>
  12. #include <linux/linkage.h>
  13. #include <linux/config.h>
  14. #include <asm/cache.h>
  15. #include <asm/lowcore.h>
  16. #include <asm/errno.h>
  17. #include <asm/ptrace.h>
  18. #include <asm/thread_info.h>
  19. #include <asm/offsets.h>
  20. #include <asm/unistd.h>
  21. #include <asm/page.h>
  22. /*
  23. * Stack layout for the system_call stack entry.
  24. * The first few entries are identical to the user_regs_struct.
  25. */
  26. SP_PTREGS = STACK_FRAME_OVERHEAD
  27. SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS
  28. SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
  29. SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS
  30. SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8
  31. SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16
  32. SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24
  33. SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32
  34. SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40
  35. SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48
  36. SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56
  37. SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 64
  38. SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 72
  39. SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 80
  40. SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 88
  41. SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 96
  42. SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 104
  43. SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112
  44. SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120
  45. SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
  46. SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC
  47. SP_TRAP = STACK_FRAME_OVERHEAD + __PT_TRAP
  48. SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
  49. STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
  50. STACK_SIZE = 1 << STACK_SHIFT
  51. _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
  52. _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
  53. _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED)
  54. #define BASED(name) name-system_call(%r13)
  55. .macro STORE_TIMER lc_offset
  56. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  57. stpt \lc_offset
  58. #endif
  59. .endm
  60. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  61. .macro UPDATE_VTIME lc_from,lc_to,lc_sum
  62. lg %r10,\lc_from
  63. slg %r10,\lc_to
  64. alg %r10,\lc_sum
  65. stg %r10,\lc_sum
  66. .endm
  67. #endif
  68. /*
  69. * Register usage in interrupt handlers:
  70. * R9 - pointer to current task structure
  71. * R13 - pointer to literal pool
  72. * R14 - return register for function calls
  73. * R15 - kernel stack pointer
  74. */
  75. .macro SAVE_ALL_BASE savearea
  76. stmg %r12,%r15,\savearea
  77. larl %r13,system_call
  78. .endm
  79. .macro SAVE_ALL psworg,savearea,sync
  80. la %r12,\psworg
  81. .if \sync
  82. tm \psworg+1,0x01 # test problem state bit
  83. jz 2f # skip stack setup save
  84. lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
  85. .else
  86. tm \psworg+1,0x01 # test problem state bit
  87. jnz 1f # from user -> load kernel stack
  88. clc \psworg+8(8),BASED(.Lcritical_end)
  89. jhe 0f
  90. clc \psworg+8(8),BASED(.Lcritical_start)
  91. jl 0f
  92. brasl %r14,cleanup_critical
  93. tm 0(%r12),0x01 # retest problem state after cleanup
  94. jnz 1f
  95. 0: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ?
  96. slgr %r14,%r15
  97. srag %r14,%r14,STACK_SHIFT
  98. jz 2f
  99. 1: lg %r15,__LC_ASYNC_STACK # load async stack
  100. .endif
  101. #ifdef CONFIG_CHECK_STACK
  102. j 3f
  103. 2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
  104. jz stack_overflow
  105. 3:
  106. #endif
  107. 2: aghi %r15,-SP_SIZE # make room for registers & psw
  108. mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
  109. la %r12,\psworg
  110. stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
  111. icm %r12,12,__LC_SVC_ILC
  112. stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
  113. st %r12,SP_ILC(%r15)
  114. mvc SP_R12(32,%r15),\savearea # move %r12-%r15 to stack
  115. la %r12,0
  116. stg %r12,__SF_BACKCHAIN(%r15)
  117. .endm
  118. .macro RESTORE_ALL sync
  119. mvc __LC_RETURN_PSW(16),SP_PSW(%r15) # move user PSW to lowcore
  120. .if !\sync
  121. ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
  122. .endif
  123. lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
  124. STORE_TIMER __LC_EXIT_TIMER
  125. lpswe __LC_RETURN_PSW # back to caller
  126. .endm
  127. /*
  128. * Scheduler resume function, called by switch_to
  129. * gpr2 = (task_struct *) prev
  130. * gpr3 = (task_struct *) next
  131. * Returns:
  132. * gpr2 = prev
  133. */
  134. .globl __switch_to
  135. __switch_to:
  136. tm __THREAD_per+4(%r3),0xe8 # is the new process using per ?
  137. jz __switch_to_noper # if not we're fine
  138. stctg %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff
  139. clc __THREAD_per(24,%r3),__SF_EMPTY(%r15)
  140. je __switch_to_noper # we got away without bashing TLB's
  141. lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't
  142. __switch_to_noper:
  143. stmg %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
  144. stg %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp
  145. lg %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp
  146. lmg %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
  147. stg %r3,__LC_CURRENT # __LC_CURRENT = current task struct
  148. lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
  149. lg %r3,__THREAD_info(%r3) # load thread_info from task struct
  150. stg %r3,__LC_THREAD_INFO
  151. aghi %r3,STACK_SIZE
  152. stg %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
  153. br %r14
  154. __critical_start:
  155. /*
  156. * SVC interrupt handler routine. System calls are synchronous events and
  157. * are executed with interrupts enabled.
  158. */
  159. .globl system_call
  160. system_call:
  161. STORE_TIMER __LC_SYNC_ENTER_TIMER
  162. sysc_saveall:
  163. SAVE_ALL_BASE __LC_SAVE_AREA
  164. SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
  165. llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
  166. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  167. sysc_vtime:
  168. tm SP_PSW+1(%r15),0x01 # interrupting from user ?
  169. jz sysc_do_svc
  170. UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
  171. sysc_stime:
  172. UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
  173. sysc_update:
  174. mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
  175. #endif
  176. sysc_do_svc:
  177. lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
  178. slag %r7,%r7,2 # *4 and test for svc 0
  179. jnz sysc_nr_ok
  180. # svc 0: system call number in %r1
  181. cl %r1,BASED(.Lnr_syscalls)
  182. jnl sysc_nr_ok
  183. lgfr %r7,%r1 # clear high word in r1
  184. slag %r7,%r7,2 # svc 0: system call number in %r1
  185. sysc_nr_ok:
  186. mvc SP_ARGS(8,%r15),SP_R7(%r15)
  187. sysc_do_restart:
  188. larl %r10,sys_call_table
  189. #ifdef CONFIG_S390_SUPPORT
  190. tm SP_PSW+3(%r15),0x01 # are we running in 31 bit mode ?
  191. jo sysc_noemu
  192. larl %r10,sys_call_table_emu # use 31 bit emulation system calls
  193. sysc_noemu:
  194. #endif
  195. tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
  196. lgf %r8,0(%r7,%r10) # load address of system call routine
  197. jnz sysc_tracesys
  198. basr %r14,%r8 # call sys_xxxx
  199. stg %r2,SP_R2(%r15) # store return value (change R2 on stack)
  200. # ATTENTION: check sys_execve_glue before
  201. # changing anything here !!
  202. sysc_return:
  203. tm SP_PSW+1(%r15),0x01 # returning to user ?
  204. jno sysc_leave
  205. tm __TI_flags+7(%r9),_TIF_WORK_SVC
  206. jnz sysc_work # there is work to do (signals etc.)
  207. sysc_leave:
  208. RESTORE_ALL 1
  209. #
  210. # recheck if there is more work to do
  211. #
  212. sysc_work_loop:
  213. tm __TI_flags+7(%r9),_TIF_WORK_SVC
  214. jz sysc_leave # there is no work to do
  215. #
  216. # One of the work bits is on. Find out which one.
  217. #
  218. sysc_work:
  219. tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
  220. jo sysc_reschedule
  221. tm __TI_flags+7(%r9),_TIF_SIGPENDING
  222. jo sysc_sigpending
  223. tm __TI_flags+7(%r9),_TIF_RESTART_SVC
  224. jo sysc_restart
  225. tm __TI_flags+7(%r9),_TIF_SINGLE_STEP
  226. jo sysc_singlestep
  227. j sysc_leave
  228. #
  229. # _TIF_NEED_RESCHED is set, call schedule
  230. #
  231. sysc_reschedule:
  232. larl %r14,sysc_work_loop
  233. jg schedule # return point is sysc_return
  234. #
  235. # _TIF_SIGPENDING is set, call do_signal
  236. #
  237. sysc_sigpending:
  238. ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
  239. la %r2,SP_PTREGS(%r15) # load pt_regs
  240. sgr %r3,%r3 # clear *oldset
  241. brasl %r14,do_signal # call do_signal
  242. tm __TI_flags+7(%r9),_TIF_RESTART_SVC
  243. jo sysc_restart
  244. tm __TI_flags+7(%r9),_TIF_SINGLE_STEP
  245. jo sysc_singlestep
  246. j sysc_leave # out of here, do NOT recheck
  247. #
  248. # _TIF_RESTART_SVC is set, set up registers and restart svc
  249. #
  250. sysc_restart:
  251. ni __TI_flags+7(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
  252. lg %r7,SP_R2(%r15) # load new svc number
  253. slag %r7,%r7,2 # *4
  254. mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument
  255. lmg %r2,%r6,SP_R2(%r15) # load svc arguments
  256. j sysc_do_restart # restart svc
  257. #
  258. # _TIF_SINGLE_STEP is set, call do_single_step
  259. #
  260. sysc_singlestep:
  261. ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
  262. lhi %r0,__LC_PGM_OLD_PSW
  263. sth %r0,SP_TRAP(%r15) # set trap indication to pgm check
  264. la %r2,SP_PTREGS(%r15) # address of register-save area
  265. larl %r14,sysc_return # load adr. of system return
  266. jg do_single_step # branch to do_sigtrap
  267. __critical_end:
  268. #
  269. # call syscall_trace before and after system call
  270. # special linkage: %r12 contains the return address for trace_svc
  271. #
  272. sysc_tracesys:
  273. la %r2,SP_PTREGS(%r15) # load pt_regs
  274. la %r3,0
  275. srl %r7,2
  276. stg %r7,SP_R2(%r15)
  277. brasl %r14,syscall_trace
  278. lghi %r0,NR_syscalls
  279. clg %r0,SP_R2(%r15)
  280. jnh sysc_tracenogo
  281. lg %r7,SP_R2(%r15) # strace might have changed the
  282. sll %r7,2 # system call
  283. lgf %r8,0(%r7,%r10)
  284. sysc_tracego:
  285. lmg %r3,%r6,SP_R3(%r15)
  286. lg %r2,SP_ORIG_R2(%r15)
  287. basr %r14,%r8 # call sys_xxx
  288. stg %r2,SP_R2(%r15) # store return value
  289. sysc_tracenogo:
  290. tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
  291. jz sysc_return
  292. la %r2,SP_PTREGS(%r15) # load pt_regs
  293. la %r3,1
  294. larl %r14,sysc_return # return point is sysc_return
  295. jg syscall_trace
  296. #
  297. # a new process exits the kernel with ret_from_fork
  298. #
  299. .globl ret_from_fork
  300. ret_from_fork:
  301. lg %r13,__LC_SVC_NEW_PSW+8
  302. lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
  303. tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
  304. jo 0f
  305. stg %r15,SP_R15(%r15) # store stack pointer for new kthread
  306. 0: brasl %r14,schedule_tail
  307. stosm 24(%r15),0x03 # reenable interrupts
  308. j sysc_return
  309. #
  310. # clone, fork, vfork, exec and sigreturn need glue,
  311. # because they all expect pt_regs as parameter,
  312. # but are called with different parameter.
  313. # return-address is set up above
  314. #
  315. sys_clone_glue:
  316. la %r2,SP_PTREGS(%r15) # load pt_regs
  317. jg sys_clone # branch to sys_clone
  318. #ifdef CONFIG_S390_SUPPORT
  319. sys32_clone_glue:
  320. la %r2,SP_PTREGS(%r15) # load pt_regs
  321. jg sys32_clone # branch to sys32_clone
  322. #endif
  323. sys_fork_glue:
  324. la %r2,SP_PTREGS(%r15) # load pt_regs
  325. jg sys_fork # branch to sys_fork
  326. sys_vfork_glue:
  327. la %r2,SP_PTREGS(%r15) # load pt_regs
  328. jg sys_vfork # branch to sys_vfork
  329. sys_execve_glue:
  330. la %r2,SP_PTREGS(%r15) # load pt_regs
  331. lgr %r12,%r14 # save return address
  332. brasl %r14,sys_execve # call sys_execve
  333. ltgr %r2,%r2 # check if execve failed
  334. bnz 0(%r12) # it did fail -> store result in gpr2
  335. b 6(%r12) # SKIP STG 2,SP_R2(15) in
  336. # system_call/sysc_tracesys
  337. #ifdef CONFIG_S390_SUPPORT
  338. sys32_execve_glue:
  339. la %r2,SP_PTREGS(%r15) # load pt_regs
  340. lgr %r12,%r14 # save return address
  341. brasl %r14,sys32_execve # call sys32_execve
  342. ltgr %r2,%r2 # check if execve failed
  343. bnz 0(%r12) # it did fail -> store result in gpr2
  344. b 6(%r12) # SKIP STG 2,SP_R2(15) in
  345. # system_call/sysc_tracesys
  346. #endif
  347. sys_sigreturn_glue:
  348. la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
  349. jg sys_sigreturn # branch to sys_sigreturn
  350. #ifdef CONFIG_S390_SUPPORT
  351. sys32_sigreturn_glue:
  352. la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
  353. jg sys32_sigreturn # branch to sys32_sigreturn
  354. #endif
  355. sys_rt_sigreturn_glue:
  356. la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
  357. jg sys_rt_sigreturn # branch to sys_sigreturn
  358. #ifdef CONFIG_S390_SUPPORT
  359. sys32_rt_sigreturn_glue:
  360. la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
  361. jg sys32_rt_sigreturn # branch to sys32_sigreturn
  362. #endif
  363. #
  364. # sigsuspend and rt_sigsuspend need pt_regs as an additional
  365. # parameter and they have to skip the store of %r2 into the
  366. # user register %r2 because the return value was set in
  367. # sigsuspend and rt_sigsuspend already and must not be overwritten!
  368. #
  369. sys_sigsuspend_glue:
  370. lgr %r5,%r4 # move mask back
  371. lgr %r4,%r3 # move history1 parameter
  372. lgr %r3,%r2 # move history0 parameter
  373. la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter
  374. la %r14,6(%r14) # skip store of return value
  375. jg sys_sigsuspend # branch to sys_sigsuspend
  376. #ifdef CONFIG_S390_SUPPORT
  377. sys32_sigsuspend_glue:
  378. llgfr %r4,%r4 # unsigned long
  379. lgr %r5,%r4 # move mask back
  380. lgfr %r3,%r3 # int
  381. lgr %r4,%r3 # move history1 parameter
  382. lgfr %r2,%r2 # int
  383. lgr %r3,%r2 # move history0 parameter
  384. la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter
  385. la %r14,6(%r14) # skip store of return value
  386. jg sys32_sigsuspend # branch to sys32_sigsuspend
  387. #endif
  388. sys_rt_sigsuspend_glue:
  389. lgr %r4,%r3 # move sigsetsize parameter
  390. lgr %r3,%r2 # move unewset parameter
  391. la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter
  392. la %r14,6(%r14) # skip store of return value
  393. jg sys_rt_sigsuspend # branch to sys_rt_sigsuspend
  394. #ifdef CONFIG_S390_SUPPORT
  395. sys32_rt_sigsuspend_glue:
  396. llgfr %r3,%r3 # size_t
  397. lgr %r4,%r3 # move sigsetsize parameter
  398. llgtr %r2,%r2 # sigset_emu31_t *
  399. lgr %r3,%r2 # move unewset parameter
  400. la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter
  401. la %r14,6(%r14) # skip store of return value
  402. jg sys32_rt_sigsuspend # branch to sys32_rt_sigsuspend
  403. #endif
  404. sys_sigaltstack_glue:
  405. la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
  406. jg sys_sigaltstack # branch to sys_sigreturn
  407. #ifdef CONFIG_S390_SUPPORT
  408. sys32_sigaltstack_glue:
  409. la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
  410. jg sys32_sigaltstack_wrapper # branch to sys_sigreturn
  411. #endif
  412. /*
  413. * Program check handler routine
  414. */
  415. .globl pgm_check_handler
  416. pgm_check_handler:
  417. /*
  418. * First we need to check for a special case:
  419. * Single stepping an instruction that disables the PER event mask will
  420. * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
  421. * For a single stepped SVC the program check handler gets control after
  422. * the SVC new PSW has been loaded. But we want to execute the SVC first and
  423. * then handle the PER event. Therefore we update the SVC old PSW to point
  424. * to the pgm_check_handler and branch to the SVC handler after we checked
  425. * if we have to load the kernel stack register.
  426. * For every other possible cause for PER event without the PER mask set
  427. * we just ignore the PER event (FIXME: is there anything we have to do
  428. * for LPSW?).
  429. */
  430. STORE_TIMER __LC_SYNC_ENTER_TIMER
  431. SAVE_ALL_BASE __LC_SAVE_AREA
  432. tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
  433. jnz pgm_per # got per exception -> special case
  434. SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
  435. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  436. tm SP_PSW+1(%r15),0x01 # interrupting from user ?
  437. jz pgm_no_vtime
  438. UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
  439. UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
  440. mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
  441. pgm_no_vtime:
  442. #endif
  443. lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
  444. lgf %r3,__LC_PGM_ILC # load program interruption code
  445. lghi %r8,0x7f
  446. ngr %r8,%r3
  447. pgm_do_call:
  448. sll %r8,3
  449. larl %r1,pgm_check_table
  450. lg %r1,0(%r8,%r1) # load address of handler routine
  451. la %r2,SP_PTREGS(%r15) # address of register-save area
  452. larl %r14,sysc_return
  453. br %r1 # branch to interrupt-handler
  454. #
  455. # handle per exception
  456. #
  457. pgm_per:
  458. tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on
  459. jnz pgm_per_std # ok, normal per event from user space
  460. # ok its one of the special cases, now we need to find out which one
  461. clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
  462. je pgm_svcper
  463. # no interesting special case, ignore PER event
  464. lmg %r12,%r15,__LC_SAVE_AREA
  465. lpswe __LC_PGM_OLD_PSW
  466. #
  467. # Normal per exception
  468. #
  469. pgm_per_std:
  470. SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
  471. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  472. tm SP_PSW+1(%r15),0x01 # interrupting from user ?
  473. jz pgm_no_vtime2
  474. UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
  475. UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
  476. mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
  477. pgm_no_vtime2:
  478. #endif
  479. lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
  480. lg %r1,__TI_task(%r9)
  481. mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
  482. mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
  483. mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
  484. oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
  485. lgf %r3,__LC_PGM_ILC # load program interruption code
  486. lghi %r8,0x7f
  487. ngr %r8,%r3 # clear per-event-bit and ilc
  488. je sysc_return
  489. j pgm_do_call
  490. #
  491. # it was a single stepped SVC that is causing all the trouble
  492. #
  493. pgm_svcper:
  494. SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
  495. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  496. tm SP_PSW+1(%r15),0x01 # interrupting from user ?
  497. jz pgm_no_vtime3
  498. UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
  499. UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
  500. mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
  501. pgm_no_vtime3:
  502. #endif
  503. llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
  504. lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
  505. lg %r1,__TI_task(%r9)
  506. mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
  507. mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
  508. mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
  509. oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
  510. stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
  511. j sysc_do_svc
  512. /*
  513. * IO interrupt handler routine
  514. */
  515. .globl io_int_handler
  516. io_int_handler:
  517. STORE_TIMER __LC_ASYNC_ENTER_TIMER
  518. stck __LC_INT_CLOCK
  519. SAVE_ALL_BASE __LC_SAVE_AREA+32
  520. SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+32,0
  521. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  522. tm SP_PSW+1(%r15),0x01 # interrupting from user ?
  523. jz io_no_vtime
  524. UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
  525. UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
  526. mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
  527. io_no_vtime:
  528. #endif
  529. lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
  530. la %r2,SP_PTREGS(%r15) # address of register-save area
  531. brasl %r14,do_IRQ # call standard irq handler
  532. io_return:
  533. tm SP_PSW+1(%r15),0x01 # returning to user ?
  534. #ifdef CONFIG_PREEMPT
  535. jno io_preempt # no -> check for preemptive scheduling
  536. #else
  537. jno io_leave # no-> skip resched & signal
  538. #endif
  539. tm __TI_flags+7(%r9),_TIF_WORK_INT
  540. jnz io_work # there is work to do (signals etc.)
  541. io_leave:
  542. RESTORE_ALL 0
  543. #ifdef CONFIG_PREEMPT
  544. io_preempt:
  545. icm %r0,15,__TI_precount(%r9)
  546. jnz io_leave
  547. # switch to kernel stack
  548. lg %r1,SP_R15(%r15)
  549. aghi %r1,-SP_SIZE
  550. mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
  551. xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
  552. lgr %r15,%r1
  553. io_resume_loop:
  554. tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
  555. jno io_leave
  556. larl %r1,.Lc_pactive
  557. mvc __TI_precount(4,%r9),0(%r1)
  558. stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
  559. brasl %r14,schedule # call schedule
  560. stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
  561. xc __TI_precount(4,%r9),__TI_precount(%r9)
  562. j io_resume_loop
  563. #endif
  564. #
  565. # switch to kernel stack, then check TIF bits
  566. #
  567. io_work:
  568. lg %r1,__LC_KERNEL_STACK
  569. aghi %r1,-SP_SIZE
  570. mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
  571. xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
  572. lgr %r15,%r1
  573. #
  574. # One of the work bits is on. Find out which one.
  575. # Checked are: _TIF_SIGPENDING and _TIF_NEED_RESCHED
  576. #
  577. io_work_loop:
  578. tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
  579. jo io_reschedule
  580. tm __TI_flags+7(%r9),_TIF_SIGPENDING
  581. jo io_sigpending
  582. j io_leave
  583. #
  584. # _TIF_NEED_RESCHED is set, call schedule
  585. #
  586. io_reschedule:
  587. stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
  588. brasl %r14,schedule # call scheduler
  589. stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
  590. tm __TI_flags+7(%r9),_TIF_WORK_INT
  591. jz io_leave # there is no work to do
  592. j io_work_loop
  593. #
  594. # _TIF_SIGPENDING is set, call do_signal
  595. #
  596. io_sigpending:
  597. stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
  598. la %r2,SP_PTREGS(%r15) # load pt_regs
  599. slgr %r3,%r3 # clear *oldset
  600. brasl %r14,do_signal # call do_signal
  601. stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
  602. j sysc_leave # out of here, do NOT recheck
  603. /*
  604. * External interrupt handler routine
  605. */
  606. .globl ext_int_handler
  607. ext_int_handler:
  608. STORE_TIMER __LC_ASYNC_ENTER_TIMER
  609. stck __LC_INT_CLOCK
  610. SAVE_ALL_BASE __LC_SAVE_AREA+32
  611. SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32,0
  612. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  613. tm SP_PSW+1(%r15),0x01 # interrupting from user ?
  614. jz ext_no_vtime
  615. UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
  616. UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
  617. mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
  618. ext_no_vtime:
  619. #endif
  620. lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
  621. la %r2,SP_PTREGS(%r15) # address of register-save area
  622. llgh %r3,__LC_EXT_INT_CODE # get interruption code
  623. brasl %r14,do_extint
  624. j io_return
  625. /*
  626. * Machine check handler routines
  627. */
  628. .globl mcck_int_handler
  629. mcck_int_handler:
  630. STORE_TIMER __LC_ASYNC_ENTER_TIMER
  631. SAVE_ALL_BASE __LC_SAVE_AREA+64
  632. SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64,0
  633. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  634. tm SP_PSW+1(%r15),0x01 # interrupting from user ?
  635. jz mcck_no_vtime
  636. UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
  637. UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
  638. mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
  639. mcck_no_vtime:
  640. #endif
  641. brasl %r14,s390_do_machine_check
  642. mcck_return:
  643. RESTORE_ALL 0
  644. #ifdef CONFIG_SMP
  645. /*
  646. * Restart interruption handler, kick starter for additional CPUs
  647. */
  648. .globl restart_int_handler
  649. restart_int_handler:
  650. lg %r15,__LC_SAVE_AREA+120 # load ksp
  651. lghi %r10,__LC_CREGS_SAVE_AREA
  652. lctlg %c0,%c15,0(%r10) # get new ctl regs
  653. lghi %r10,__LC_AREGS_SAVE_AREA
  654. lam %a0,%a15,0(%r10)
  655. lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone
  656. stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
  657. jg start_secondary
  658. #else
  659. /*
  660. * If we do not run with SMP enabled, let the new CPU crash ...
  661. */
  662. .globl restart_int_handler
  663. restart_int_handler:
  664. basr %r1,0
  665. restart_base:
  666. lpswe restart_crash-restart_base(%r1)
  667. .align 8
  668. restart_crash:
  669. .long 0x000a0000,0x00000000,0x00000000,0x00000000
  670. restart_go:
  671. #endif
  672. #ifdef CONFIG_CHECK_STACK
  673. /*
  674. * The synchronous or the asynchronous stack overflowed. We are dead.
  675. * No need to properly save the registers, we are going to panic anyway.
  676. * Setup a pt_regs so that show_trace can provide a good call trace.
  677. */
  678. stack_overflow:
  679. lg %r15,__LC_PANIC_STACK # change to panic stack
  680. aghi %r1,-SP_SIZE
  681. mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
  682. stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
  683. la %r1,__LC_SAVE_AREA
  684. chi %r12,__LC_SVC_OLD_PSW
  685. je 0f
  686. chi %r12,__LC_PGM_OLD_PSW
  687. je 0f
  688. la %r1,__LC_SAVE_AREA+16
  689. 0: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack
  690. xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
  691. la %r2,SP_PTREGS(%r15) # load pt_regs
  692. jg kernel_stack_overflow
  693. #endif
  694. cleanup_table_system_call:
  695. .quad system_call, sysc_do_svc
  696. cleanup_table_sysc_return:
  697. .quad sysc_return, sysc_leave
  698. cleanup_table_sysc_leave:
  699. .quad sysc_leave, sysc_work_loop
  700. cleanup_table_sysc_work_loop:
  701. .quad sysc_work_loop, sysc_reschedule
  702. cleanup_critical:
  703. clc 8(8,%r12),BASED(cleanup_table_system_call)
  704. jl 0f
  705. clc 8(8,%r12),BASED(cleanup_table_system_call+8)
  706. jl cleanup_system_call
  707. 0:
  708. clc 8(8,%r12),BASED(cleanup_table_sysc_return)
  709. jl 0f
  710. clc 8(8,%r12),BASED(cleanup_table_sysc_return+8)
  711. jl cleanup_sysc_return
  712. 0:
  713. clc 8(8,%r12),BASED(cleanup_table_sysc_leave)
  714. jl 0f
  715. clc 8(8,%r12),BASED(cleanup_table_sysc_leave+8)
  716. jl cleanup_sysc_leave
  717. 0:
  718. clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop)
  719. jl 0f
  720. clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8)
  721. jl cleanup_sysc_leave
  722. 0:
  723. br %r14
  724. cleanup_system_call:
  725. mvc __LC_RETURN_PSW(16),0(%r12)
  726. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  727. clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
  728. jh 0f
  729. mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
  730. 0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16)
  731. jhe cleanup_vtime
  732. #endif
  733. clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
  734. jh 0f
  735. mvc __LC_SAVE_AREA(32),__LC_SAVE_AREA+32
  736. 0: stg %r13,__LC_SAVE_AREA+40
  737. SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
  738. stg %r15,__LC_SAVE_AREA+56
  739. llgh %r7,__LC_SVC_INT_CODE
  740. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  741. cleanup_vtime:
  742. clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
  743. jhe cleanup_stime
  744. tm SP_PSW+1(%r15),0x01 # interrupting from user ?
  745. jz cleanup_novtime
  746. UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
  747. cleanup_stime:
  748. clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+32)
  749. jh cleanup_update
  750. UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
  751. cleanup_update:
  752. mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
  753. cleanup_novtime:
  754. #endif
  755. mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
  756. la %r12,__LC_RETURN_PSW
  757. br %r14
  758. cleanup_system_call_insn:
  759. .quad sysc_saveall
  760. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  761. .quad system_call
  762. .quad sysc_vtime
  763. .quad sysc_stime
  764. .quad sysc_update
  765. #endif
  766. cleanup_sysc_return:
  767. mvc __LC_RETURN_PSW(8),0(%r12)
  768. mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_return)
  769. la %r12,__LC_RETURN_PSW
  770. br %r14
  771. cleanup_sysc_leave:
  772. clc 8(8,%r12),BASED(cleanup_sysc_leave_insn)
  773. je 0f
  774. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  775. mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
  776. clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8)
  777. je 0f
  778. #endif
  779. mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
  780. mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
  781. lmg %r0,%r11,SP_R0(%r15)
  782. lg %r15,SP_R15(%r15)
  783. 0: la %r12,__LC_RETURN_PSW
  784. br %r14
  785. cleanup_sysc_leave_insn:
  786. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  787. .quad sysc_leave + 16
  788. #endif
  789. .quad sysc_leave + 12
  790. /*
  791. * Integer constants
  792. */
  793. .align 4
  794. .Lconst:
  795. .Lc_pactive: .long PREEMPT_ACTIVE
  796. .Lnr_syscalls: .long NR_syscalls
  797. .L0x0130: .short 0x130
  798. .L0x0140: .short 0x140
  799. .L0x0150: .short 0x150
  800. .L0x0160: .short 0x160
  801. .L0x0170: .short 0x170
  802. .Lcritical_start:
  803. .quad __critical_start
  804. .Lcritical_end:
  805. .quad __critical_end
  806. #define SYSCALL(esa,esame,emu) .long esame
  807. .globl sys_call_table
  808. sys_call_table:
  809. #include "syscalls.S"
  810. #undef SYSCALL
  811. #ifdef CONFIG_S390_SUPPORT
  812. #define SYSCALL(esa,esame,emu) .long emu
  813. .globl sys_call_table_emu
  814. sys_call_table_emu:
  815. #include "syscalls.S"
  816. #undef SYSCALL
  817. #endif