PageRenderTime 26ms CodeModel.GetById 22ms RepoModel.GetById 0ms app.codeStats 0ms

/arch/powerpc/platforms/cell/spufs/run.c

http://github.com/torvalds/linux
C | 455 lines | 307 code | 71 blank | 77 comment | 63 complexity | 210262218325de5436c9d8ba5fdf9a0f MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
  1. // SPDX-License-Identifier: GPL-2.0
  2. #define DEBUG
  3. #include <linux/wait.h>
  4. #include <linux/ptrace.h>
  5. #include <asm/spu.h>
  6. #include <asm/spu_priv1.h>
  7. #include <asm/io.h>
  8. #include <asm/unistd.h>
  9. #include "spufs.h"
  10. /* interrupt-level stop callback function. */
  11. void spufs_stop_callback(struct spu *spu, int irq)
  12. {
  13. struct spu_context *ctx = spu->ctx;
  14. /*
  15. * It should be impossible to preempt a context while an exception
  16. * is being processed, since the context switch code is specially
  17. * coded to deal with interrupts ... But, just in case, sanity check
  18. * the context pointer. It is OK to return doing nothing since
  19. * the exception will be regenerated when the context is resumed.
  20. */
  21. if (ctx) {
  22. /* Copy exception arguments into module specific structure */
  23. switch(irq) {
  24. case 0 :
  25. ctx->csa.class_0_pending = spu->class_0_pending;
  26. ctx->csa.class_0_dar = spu->class_0_dar;
  27. break;
  28. case 1 :
  29. ctx->csa.class_1_dsisr = spu->class_1_dsisr;
  30. ctx->csa.class_1_dar = spu->class_1_dar;
  31. break;
  32. case 2 :
  33. break;
  34. }
  35. /* ensure that the exception status has hit memory before a
  36. * thread waiting on the context's stop queue is woken */
  37. smp_wmb();
  38. wake_up_all(&ctx->stop_wq);
  39. }
  40. }
  41. int spu_stopped(struct spu_context *ctx, u32 *stat)
  42. {
  43. u64 dsisr;
  44. u32 stopped;
  45. stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
  46. SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
  47. top:
  48. *stat = ctx->ops->status_read(ctx);
  49. if (*stat & stopped) {
  50. /*
  51. * If the spu hasn't finished stopping, we need to
  52. * re-read the register to get the stopped value.
  53. */
  54. if (*stat & SPU_STATUS_RUNNING)
  55. goto top;
  56. return 1;
  57. }
  58. if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
  59. return 1;
  60. dsisr = ctx->csa.class_1_dsisr;
  61. if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
  62. return 1;
  63. if (ctx->csa.class_0_pending)
  64. return 1;
  65. return 0;
  66. }
  67. static int spu_setup_isolated(struct spu_context *ctx)
  68. {
  69. int ret;
  70. u64 __iomem *mfc_cntl;
  71. u64 sr1;
  72. u32 status;
  73. unsigned long timeout;
  74. const u32 status_loading = SPU_STATUS_RUNNING
  75. | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
  76. ret = -ENODEV;
  77. if (!isolated_loader)
  78. goto out;
  79. /*
  80. * We need to exclude userspace access to the context.
  81. *
  82. * To protect against memory access we invalidate all ptes
  83. * and make sure the pagefault handlers block on the mutex.
  84. */
  85. spu_unmap_mappings(ctx);
  86. mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
  87. /* purge the MFC DMA queue to ensure no spurious accesses before we
  88. * enter kernel mode */
  89. timeout = jiffies + HZ;
  90. out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);
  91. while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)
  92. != MFC_CNTL_PURGE_DMA_COMPLETE) {
  93. if (time_after(jiffies, timeout)) {
  94. printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
  95. __func__);
  96. ret = -EIO;
  97. goto out;
  98. }
  99. cond_resched();
  100. }
  101. /* clear purge status */
  102. out_be64(mfc_cntl, 0);
  103. /* put the SPE in kernel mode to allow access to the loader */
  104. sr1 = spu_mfc_sr1_get(ctx->spu);
  105. sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
  106. spu_mfc_sr1_set(ctx->spu, sr1);
  107. /* start the loader */
  108. ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);
  109. ctx->ops->signal2_write(ctx,
  110. (unsigned long)isolated_loader & 0xffffffff);
  111. ctx->ops->runcntl_write(ctx,
  112. SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
  113. ret = 0;
  114. timeout = jiffies + HZ;
  115. while (((status = ctx->ops->status_read(ctx)) & status_loading) ==
  116. status_loading) {
  117. if (time_after(jiffies, timeout)) {
  118. printk(KERN_ERR "%s: timeout waiting for loader\n",
  119. __func__);
  120. ret = -EIO;
  121. goto out_drop_priv;
  122. }
  123. cond_resched();
  124. }
  125. if (!(status & SPU_STATUS_RUNNING)) {
  126. /* If isolated LOAD has failed: run SPU, we will get a stop-and
  127. * signal later. */
  128. pr_debug("%s: isolated LOAD failed\n", __func__);
  129. ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
  130. ret = -EACCES;
  131. goto out_drop_priv;
  132. }
  133. if (!(status & SPU_STATUS_ISOLATED_STATE)) {
  134. /* This isn't allowed by the CBEA, but check anyway */
  135. pr_debug("%s: SPU fell out of isolated mode?\n", __func__);
  136. ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
  137. ret = -EINVAL;
  138. goto out_drop_priv;
  139. }
  140. out_drop_priv:
  141. /* Finished accessing the loader. Drop kernel mode */
  142. sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
  143. spu_mfc_sr1_set(ctx->spu, sr1);
  144. out:
  145. return ret;
  146. }
  147. static int spu_run_init(struct spu_context *ctx, u32 *npc)
  148. {
  149. unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
  150. int ret;
  151. spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
  152. /*
  153. * NOSCHED is synchronous scheduling with respect to the caller.
  154. * The caller waits for the context to be loaded.
  155. */
  156. if (ctx->flags & SPU_CREATE_NOSCHED) {
  157. if (ctx->state == SPU_STATE_SAVED) {
  158. ret = spu_activate(ctx, 0);
  159. if (ret)
  160. return ret;
  161. }
  162. }
  163. /*
  164. * Apply special setup as required.
  165. */
  166. if (ctx->flags & SPU_CREATE_ISOLATE) {
  167. if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
  168. ret = spu_setup_isolated(ctx);
  169. if (ret)
  170. return ret;
  171. }
  172. /*
  173. * If userspace has set the runcntrl register (eg, to
  174. * issue an isolated exit), we need to re-set it here
  175. */
  176. runcntl = ctx->ops->runcntl_read(ctx) &
  177. (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
  178. if (runcntl == 0)
  179. runcntl = SPU_RUNCNTL_RUNNABLE;
  180. } else {
  181. unsigned long privcntl;
  182. if (test_thread_flag(TIF_SINGLESTEP))
  183. privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP;
  184. else
  185. privcntl = SPU_PRIVCNTL_MODE_NORMAL;
  186. ctx->ops->privcntl_write(ctx, privcntl);
  187. ctx->ops->npc_write(ctx, *npc);
  188. }
  189. ctx->ops->runcntl_write(ctx, runcntl);
  190. if (ctx->flags & SPU_CREATE_NOSCHED) {
  191. spuctx_switch_state(ctx, SPU_UTIL_USER);
  192. } else {
  193. if (ctx->state == SPU_STATE_SAVED) {
  194. ret = spu_activate(ctx, 0);
  195. if (ret)
  196. return ret;
  197. } else {
  198. spuctx_switch_state(ctx, SPU_UTIL_USER);
  199. }
  200. }
  201. set_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
  202. return 0;
  203. }
  204. static int spu_run_fini(struct spu_context *ctx, u32 *npc,
  205. u32 *status)
  206. {
  207. int ret = 0;
  208. spu_del_from_rq(ctx);
  209. *status = ctx->ops->status_read(ctx);
  210. *npc = ctx->ops->npc_read(ctx);
  211. spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
  212. clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
  213. spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, *status);
  214. spu_release(ctx);
  215. if (signal_pending(current))
  216. ret = -ERESTARTSYS;
  217. return ret;
  218. }
  219. /*
  220. * SPU syscall restarting is tricky because we violate the basic
  221. * assumption that the signal handler is running on the interrupted
  222. * thread. Here instead, the handler runs on PowerPC user space code,
  223. * while the syscall was called from the SPU.
  224. * This means we can only do a very rough approximation of POSIX
  225. * signal semantics.
  226. */
  227. static int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,
  228. unsigned int *npc)
  229. {
  230. int ret;
  231. switch (*spu_ret) {
  232. case -ERESTARTSYS:
  233. case -ERESTARTNOINTR:
  234. /*
  235. * Enter the regular syscall restarting for
  236. * sys_spu_run, then restart the SPU syscall
  237. * callback.
  238. */
  239. *npc -= 8;
  240. ret = -ERESTARTSYS;
  241. break;
  242. case -ERESTARTNOHAND:
  243. case -ERESTART_RESTARTBLOCK:
  244. /*
  245. * Restart block is too hard for now, just return -EINTR
  246. * to the SPU.
  247. * ERESTARTNOHAND comes from sys_pause, we also return
  248. * -EINTR from there.
  249. * Assume that we need to be restarted ourselves though.
  250. */
  251. *spu_ret = -EINTR;
  252. ret = -ERESTARTSYS;
  253. break;
  254. default:
  255. printk(KERN_WARNING "%s: unexpected return code %ld\n",
  256. __func__, *spu_ret);
  257. ret = 0;
  258. }
  259. return ret;
  260. }
  261. static int spu_process_callback(struct spu_context *ctx)
  262. {
  263. struct spu_syscall_block s;
  264. u32 ls_pointer, npc;
  265. void __iomem *ls;
  266. long spu_ret;
  267. int ret;
  268. /* get syscall block from local store */
  269. npc = ctx->ops->npc_read(ctx) & ~3;
  270. ls = (void __iomem *)ctx->ops->get_ls(ctx);
  271. ls_pointer = in_be32(ls + npc);
  272. if (ls_pointer > (LS_SIZE - sizeof(s)))
  273. return -EFAULT;
  274. memcpy_fromio(&s, ls + ls_pointer, sizeof(s));
  275. /* do actual syscall without pinning the spu */
  276. ret = 0;
  277. spu_ret = -ENOSYS;
  278. npc += 4;
  279. if (s.nr_ret < NR_syscalls) {
  280. spu_release(ctx);
  281. /* do actual system call from here */
  282. spu_ret = spu_sys_callback(&s);
  283. if (spu_ret <= -ERESTARTSYS) {
  284. ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
  285. }
  286. mutex_lock(&ctx->state_mutex);
  287. if (ret == -ERESTARTSYS)
  288. return ret;
  289. }
  290. /* need to re-get the ls, as it may have changed when we released the
  291. * spu */
  292. ls = (void __iomem *)ctx->ops->get_ls(ctx);
  293. /* write result, jump over indirect pointer */
  294. memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret));
  295. ctx->ops->npc_write(ctx, npc);
  296. ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
  297. return ret;
  298. }
  299. long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
  300. {
  301. int ret;
  302. struct spu *spu;
  303. u32 status;
  304. if (mutex_lock_interruptible(&ctx->run_mutex))
  305. return -ERESTARTSYS;
  306. ctx->event_return = 0;
  307. ret = spu_acquire(ctx);
  308. if (ret)
  309. goto out_unlock;
  310. spu_enable_spu(ctx);
  311. spu_update_sched_info(ctx);
  312. ret = spu_run_init(ctx, npc);
  313. if (ret) {
  314. spu_release(ctx);
  315. goto out;
  316. }
  317. do {
  318. ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
  319. if (unlikely(ret)) {
  320. /*
  321. * This is nasty: we need the state_mutex for all the
  322. * bookkeeping even if the syscall was interrupted by
  323. * a signal. ewww.
  324. */
  325. mutex_lock(&ctx->state_mutex);
  326. break;
  327. }
  328. spu = ctx->spu;
  329. if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE,
  330. &ctx->sched_flags))) {
  331. if (!(status & SPU_STATUS_STOPPED_BY_STOP)) {
  332. spu_switch_notify(spu, ctx);
  333. continue;
  334. }
  335. }
  336. spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
  337. if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
  338. (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
  339. ret = spu_process_callback(ctx);
  340. if (ret)
  341. break;
  342. status &= ~SPU_STATUS_STOPPED_BY_STOP;
  343. }
  344. ret = spufs_handle_class1(ctx);
  345. if (ret)
  346. break;
  347. ret = spufs_handle_class0(ctx);
  348. if (ret)
  349. break;
  350. if (signal_pending(current))
  351. ret = -ERESTARTSYS;
  352. } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
  353. SPU_STATUS_STOPPED_BY_HALT |
  354. SPU_STATUS_SINGLE_STEP)));
  355. spu_disable_spu(ctx);
  356. ret = spu_run_fini(ctx, npc, &status);
  357. spu_yield(ctx);
  358. if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
  359. (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100))
  360. ctx->stats.libassist++;
  361. if ((ret == 0) ||
  362. ((ret == -ERESTARTSYS) &&
  363. ((status & SPU_STATUS_STOPPED_BY_HALT) ||
  364. (status & SPU_STATUS_SINGLE_STEP) ||
  365. ((status & SPU_STATUS_STOPPED_BY_STOP) &&
  366. (status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))
  367. ret = status;
  368. /* Note: we don't need to force_sig SIGTRAP on single-step
  369. * since we have TIF_SINGLESTEP set, thus the kernel will do
  370. * it upon return from the syscall anyway.
  371. */
  372. if (unlikely(status & SPU_STATUS_SINGLE_STEP))
  373. ret = -ERESTARTSYS;
  374. else if (unlikely((status & SPU_STATUS_STOPPED_BY_STOP)
  375. && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff)) {
  376. force_sig(SIGTRAP);
  377. ret = -ERESTARTSYS;
  378. }
  379. out:
  380. *event = ctx->event_return;
  381. out_unlock:
  382. mutex_unlock(&ctx->run_mutex);
  383. return ret;
  384. }