PageRenderTime 101ms CodeModel.GetById 18ms app.highlight 71ms RepoModel.GetById 1ms app.codeStats 1ms

/arch/powerpc/platforms/cell/spufs/switch.c

http://github.com/mirrors/linux
C | 2208 lines | 1303 code | 258 blank | 647 comment | 101 complexity | 0fec13d5cc42dc5d04d2956943f74a4f MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * spu_switch.c
   4 *
   5 * (C) Copyright IBM Corp. 2005
   6 *
   7 * Author: Mark Nutter <mnutter@us.ibm.com>
   8 *
   9 * Host-side part of SPU context switch sequence outlined in
  10 * Synergistic Processor Element, Book IV.
  11 *
  12 * A fully premptive switch of an SPE is very expensive in terms
  13 * of time and system resources.  SPE Book IV indicates that SPE
  14 * allocation should follow a "serially reusable device" model,
  15 * in which the SPE is assigned a task until it completes.  When
  16 * this is not possible, this sequence may be used to premptively
  17 * save, and then later (optionally) restore the context of a
  18 * program executing on an SPE.
  19 */
  20
  21#include <linux/export.h>
  22#include <linux/errno.h>
  23#include <linux/hardirq.h>
  24#include <linux/sched.h>
  25#include <linux/kernel.h>
  26#include <linux/mm.h>
  27#include <linux/vmalloc.h>
  28#include <linux/smp.h>
  29#include <linux/stddef.h>
  30#include <linux/unistd.h>
  31
  32#include <asm/io.h>
  33#include <asm/spu.h>
  34#include <asm/spu_priv1.h>
  35#include <asm/spu_csa.h>
  36#include <asm/mmu_context.h>
  37
  38#include "spufs.h"
  39
  40#include "spu_save_dump.h"
  41#include "spu_restore_dump.h"
  42
  43#if 0
  44#define POLL_WHILE_TRUE(_c) {				\
  45    do {						\
  46    } while (_c);					\
  47  }
  48#else
  49#define RELAX_SPIN_COUNT				1000
  50#define POLL_WHILE_TRUE(_c) {				\
  51    do {						\
  52	int _i;						\
  53	for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \
  54	    cpu_relax();				\
  55	}						\
  56	if (unlikely(_c)) yield();			\
  57	else break;					\
  58    } while (_c);					\
  59  }
  60#endif				/* debug */
  61
  62#define POLL_WHILE_FALSE(_c)	POLL_WHILE_TRUE(!(_c))
  63
  64static inline void acquire_spu_lock(struct spu *spu)
  65{
  66	/* Save, Step 1:
  67	 * Restore, Step 1:
  68	 *    Acquire SPU-specific mutual exclusion lock.
  69	 *    TBD.
  70	 */
  71}
  72
  73static inline void release_spu_lock(struct spu *spu)
  74{
  75	/* Restore, Step 76:
  76	 *    Release SPU-specific mutual exclusion lock.
  77	 *    TBD.
  78	 */
  79}
  80
  81static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)
  82{
  83	struct spu_problem __iomem *prob = spu->problem;
  84	u32 isolate_state;
  85
  86	/* Save, Step 2:
  87	 * Save, Step 6:
  88	 *     If SPU_Status[E,L,IS] any field is '1', this
  89	 *     SPU is in isolate state and cannot be context
  90	 *     saved at this time.
  91	 */
  92	isolate_state = SPU_STATUS_ISOLATED_STATE |
  93	    SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS;
  94	return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;
  95}
  96
  97static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
  98{
  99	/* Save, Step 3:
 100	 * Restore, Step 2:
 101	 *     Save INT_Mask_class0 in CSA.
 102	 *     Write INT_MASK_class0 with value of 0.
 103	 *     Save INT_Mask_class1 in CSA.
 104	 *     Write INT_MASK_class1 with value of 0.
 105	 *     Save INT_Mask_class2 in CSA.
 106	 *     Write INT_MASK_class2 with value of 0.
 107	 *     Synchronize all three interrupts to be sure
 108	 *     we no longer execute a handler on another CPU.
 109	 */
 110	spin_lock_irq(&spu->register_lock);
 111	if (csa) {
 112		csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0);
 113		csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1);
 114		csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2);
 115	}
 116	spu_int_mask_set(spu, 0, 0ul);
 117	spu_int_mask_set(spu, 1, 0ul);
 118	spu_int_mask_set(spu, 2, 0ul);
 119	eieio();
 120	spin_unlock_irq(&spu->register_lock);
 121
 122	/*
 123	 * This flag needs to be set before calling synchronize_irq so
 124	 * that the update will be visible to the relevant handlers
 125	 * via a simple load.
 126	 */
 127	set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
 128	clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
 129	synchronize_irq(spu->irqs[0]);
 130	synchronize_irq(spu->irqs[1]);
 131	synchronize_irq(spu->irqs[2]);
 132}
 133
 134static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu)
 135{
 136	/* Save, Step 4:
 137	 * Restore, Step 25.
 138	 *    Set a software watchdog timer, which specifies the
 139	 *    maximum allowable time for a context save sequence.
 140	 *
 141	 *    For present, this implementation will not set a global
 142	 *    watchdog timer, as virtualization & variable system load
 143	 *    may cause unpredictable execution times.
 144	 */
 145}
 146
 147static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu)
 148{
 149	/* Save, Step 5:
 150	 * Restore, Step 3:
 151	 *     Inhibit user-space access (if provided) to this
 152	 *     SPU by unmapping the virtual pages assigned to
 153	 *     the SPU memory-mapped I/O (MMIO) for problem
 154	 *     state. TBD.
 155	 */
 156}
 157
 158static inline void set_switch_pending(struct spu_state *csa, struct spu *spu)
 159{
 160	/* Save, Step 7:
 161	 * Restore, Step 5:
 162	 *     Set a software context switch pending flag.
 163	 *     Done above in Step 3 - disable_interrupts().
 164	 */
 165}
 166
 167static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
 168{
 169	struct spu_priv2 __iomem *priv2 = spu->priv2;
 170
 171	/* Save, Step 8:
 172	 *     Suspend DMA and save MFC_CNTL.
 173	 */
 174	switch (in_be64(&priv2->mfc_control_RW) &
 175	       MFC_CNTL_SUSPEND_DMA_STATUS_MASK) {
 176	case MFC_CNTL_SUSPEND_IN_PROGRESS:
 177		POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
 178				  MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
 179				 MFC_CNTL_SUSPEND_COMPLETE);
 180		fallthrough;
 181	case MFC_CNTL_SUSPEND_COMPLETE:
 182		if (csa)
 183			csa->priv2.mfc_control_RW =
 184				in_be64(&priv2->mfc_control_RW) |
 185				MFC_CNTL_SUSPEND_DMA_QUEUE;
 186		break;
 187	case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION:
 188		out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
 189		POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
 190				  MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
 191				 MFC_CNTL_SUSPEND_COMPLETE);
 192		if (csa)
 193			csa->priv2.mfc_control_RW =
 194				in_be64(&priv2->mfc_control_RW) &
 195				~MFC_CNTL_SUSPEND_DMA_QUEUE &
 196				~MFC_CNTL_SUSPEND_MASK;
 197		break;
 198	}
 199}
 200
 201static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu)
 202{
 203	struct spu_problem __iomem *prob = spu->problem;
 204
 205	/* Save, Step 9:
 206	 *     Save SPU_Runcntl in the CSA.  This value contains
 207	 *     the "Application Desired State".
 208	 */
 209	csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW);
 210}
 211
 212static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu)
 213{
 214	/* Save, Step 10:
 215	 *     Save MFC_SR1 in the CSA.
 216	 */
 217	csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu);
 218}
 219
 220static inline void save_spu_status(struct spu_state *csa, struct spu *spu)
 221{
 222	struct spu_problem __iomem *prob = spu->problem;
 223
 224	/* Save, Step 11:
 225	 *     Read SPU_Status[R], and save to CSA.
 226	 */
 227	if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) {
 228		csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
 229	} else {
 230		u32 stopped;
 231
 232		out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
 233		eieio();
 234		POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
 235				SPU_STATUS_RUNNING);
 236		stopped =
 237		    SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
 238		    SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
 239		if ((in_be32(&prob->spu_status_R) & stopped) == 0)
 240			csa->prob.spu_status_R = SPU_STATUS_RUNNING;
 241		else
 242			csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
 243	}
 244}
 245
 246static inline void save_mfc_stopped_status(struct spu_state *csa,
 247		struct spu *spu)
 248{
 249	struct spu_priv2 __iomem *priv2 = spu->priv2;
 250	const u64 mask = MFC_CNTL_DECREMENTER_RUNNING |
 251			MFC_CNTL_DMA_QUEUES_EMPTY;
 252
 253	/* Save, Step 12:
 254	 *     Read MFC_CNTL[Ds].  Update saved copy of
 255	 *     CSA.MFC_CNTL[Ds].
 256	 *
 257	 * update: do the same with MFC_CNTL[Q].
 258	 */
 259	csa->priv2.mfc_control_RW &= ~mask;
 260	csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask;
 261}
 262
 263static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
 264{
 265	struct spu_priv2 __iomem *priv2 = spu->priv2;
 266
 267	/* Save, Step 13:
 268	 *     Write MFC_CNTL[Dh] set to a '1' to halt
 269	 *     the decrementer.
 270	 */
 271	out_be64(&priv2->mfc_control_RW,
 272		 MFC_CNTL_DECREMENTER_HALTED | MFC_CNTL_SUSPEND_MASK);
 273	eieio();
 274}
 275
 276static inline void save_timebase(struct spu_state *csa, struct spu *spu)
 277{
 278	/* Save, Step 14:
 279	 *    Read PPE Timebase High and Timebase low registers
 280	 *    and save in CSA.  TBD.
 281	 */
 282	csa->suspend_time = get_cycles();
 283}
 284
 285static inline void remove_other_spu_access(struct spu_state *csa,
 286					   struct spu *spu)
 287{
 288	/* Save, Step 15:
 289	 *     Remove other SPU access to this SPU by unmapping
 290	 *     this SPU's pages from their address space.  TBD.
 291	 */
 292}
 293
 294static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu)
 295{
 296	struct spu_problem __iomem *prob = spu->problem;
 297
 298	/* Save, Step 16:
 299	 * Restore, Step 11.
 300	 *     Write SPU_MSSync register. Poll SPU_MSSync[P]
 301	 *     for a value of 0.
 302	 */
 303	out_be64(&prob->spc_mssync_RW, 1UL);
 304	POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING);
 305}
 306
 307static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu)
 308{
 309	/* Save, Step 17:
 310	 * Restore, Step 12.
 311	 * Restore, Step 48.
 312	 *     Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register.
 313	 *     Then issue a PPE sync instruction.
 314	 */
 315	spu_tlb_invalidate(spu);
 316	mb();
 317}
 318
 319static inline void handle_pending_interrupts(struct spu_state *csa,
 320					     struct spu *spu)
 321{
 322	/* Save, Step 18:
 323	 *     Handle any pending interrupts from this SPU
 324	 *     here.  This is OS or hypervisor specific.  One
 325	 *     option is to re-enable interrupts to handle any
 326	 *     pending interrupts, with the interrupt handlers
 327	 *     recognizing the software Context Switch Pending
 328	 *     flag, to ensure the SPU execution or MFC command
 329	 *     queue is not restarted.  TBD.
 330	 */
 331}
 332
 333static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu)
 334{
 335	struct spu_priv2 __iomem *priv2 = spu->priv2;
 336	int i;
 337
 338	/* Save, Step 19:
 339	 *     If MFC_Cntl[Se]=0 then save
 340	 *     MFC command queues.
 341	 */
 342	if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) {
 343		for (i = 0; i < 8; i++) {
 344			csa->priv2.puq[i].mfc_cq_data0_RW =
 345			    in_be64(&priv2->puq[i].mfc_cq_data0_RW);
 346			csa->priv2.puq[i].mfc_cq_data1_RW =
 347			    in_be64(&priv2->puq[i].mfc_cq_data1_RW);
 348			csa->priv2.puq[i].mfc_cq_data2_RW =
 349			    in_be64(&priv2->puq[i].mfc_cq_data2_RW);
 350			csa->priv2.puq[i].mfc_cq_data3_RW =
 351			    in_be64(&priv2->puq[i].mfc_cq_data3_RW);
 352		}
 353		for (i = 0; i < 16; i++) {
 354			csa->priv2.spuq[i].mfc_cq_data0_RW =
 355			    in_be64(&priv2->spuq[i].mfc_cq_data0_RW);
 356			csa->priv2.spuq[i].mfc_cq_data1_RW =
 357			    in_be64(&priv2->spuq[i].mfc_cq_data1_RW);
 358			csa->priv2.spuq[i].mfc_cq_data2_RW =
 359			    in_be64(&priv2->spuq[i].mfc_cq_data2_RW);
 360			csa->priv2.spuq[i].mfc_cq_data3_RW =
 361			    in_be64(&priv2->spuq[i].mfc_cq_data3_RW);
 362		}
 363	}
 364}
 365
 366static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu)
 367{
 368	struct spu_problem __iomem *prob = spu->problem;
 369
 370	/* Save, Step 20:
 371	 *     Save the PPU_QueryMask register
 372	 *     in the CSA.
 373	 */
 374	csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW);
 375}
 376
 377static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu)
 378{
 379	struct spu_problem __iomem *prob = spu->problem;
 380
 381	/* Save, Step 21:
 382	 *     Save the PPU_QueryType register
 383	 *     in the CSA.
 384	 */
 385	csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW);
 386}
 387
 388static inline void save_ppu_tagstatus(struct spu_state *csa, struct spu *spu)
 389{
 390	struct spu_problem __iomem *prob = spu->problem;
 391
 392	/* Save the Prxy_TagStatus register in the CSA.
 393	 *
 394	 * It is unnecessary to restore dma_tagstatus_R, however,
 395	 * dma_tagstatus_R in the CSA is accessed via backing_ops, so
 396	 * we must save it.
 397	 */
 398	csa->prob.dma_tagstatus_R = in_be32(&prob->dma_tagstatus_R);
 399}
 400
 401static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
 402{
 403	struct spu_priv2 __iomem *priv2 = spu->priv2;
 404
 405	/* Save, Step 22:
 406	 *     Save the MFC_CSR_TSQ register
 407	 *     in the LSCSA.
 408	 */
 409	csa->priv2.spu_tag_status_query_RW =
 410	    in_be64(&priv2->spu_tag_status_query_RW);
 411}
 412
 413static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
 414{
 415	struct spu_priv2 __iomem *priv2 = spu->priv2;
 416
 417	/* Save, Step 23:
 418	 *     Save the MFC_CSR_CMD1 and MFC_CSR_CMD2
 419	 *     registers in the CSA.
 420	 */
 421	csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW);
 422	csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW);
 423}
 424
 425static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
 426{
 427	struct spu_priv2 __iomem *priv2 = spu->priv2;
 428
 429	/* Save, Step 24:
 430	 *     Save the MFC_CSR_ATO register in
 431	 *     the CSA.
 432	 */
 433	csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW);
 434}
 435
 436static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
 437{
 438	/* Save, Step 25:
 439	 *     Save the MFC_TCLASS_ID register in
 440	 *     the CSA.
 441	 */
 442	csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu);
 443}
 444
 445static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
 446{
 447	/* Save, Step 26:
 448	 * Restore, Step 23.
 449	 *     Write the MFC_TCLASS_ID register with
 450	 *     the value 0x10000000.
 451	 */
 452	spu_mfc_tclass_id_set(spu, 0x10000000);
 453	eieio();
 454}
 455
 456static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu)
 457{
 458	struct spu_priv2 __iomem *priv2 = spu->priv2;
 459
 460	/* Save, Step 27:
 461	 * Restore, Step 14.
 462	 *     Write MFC_CNTL[Pc]=1 (purge queue).
 463	 */
 464	out_be64(&priv2->mfc_control_RW,
 465			MFC_CNTL_PURGE_DMA_REQUEST |
 466			MFC_CNTL_SUSPEND_MASK);
 467	eieio();
 468}
 469
 470static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu)
 471{
 472	struct spu_priv2 __iomem *priv2 = spu->priv2;
 473
 474	/* Save, Step 28:
 475	 *     Poll MFC_CNTL[Ps] until value '11' is read
 476	 *     (purge complete).
 477	 */
 478	POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
 479			 MFC_CNTL_PURGE_DMA_STATUS_MASK) ==
 480			 MFC_CNTL_PURGE_DMA_COMPLETE);
 481}
 482
 483static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)
 484{
 485	/* Save, Step 30:
 486	 * Restore, Step 18:
 487	 *     Write MFC_SR1 with MFC_SR1[D=0,S=1] and
 488	 *     MFC_SR1[TL,R,Pr,T] set correctly for the
 489	 *     OS specific environment.
 490	 *
 491	 *     Implementation note: The SPU-side code
 492	 *     for save/restore is privileged, so the
 493	 *     MFC_SR1[Pr] bit is not set.
 494	 *
 495	 */
 496	spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK |
 497			      MFC_STATE1_RELOCATE_MASK |
 498			      MFC_STATE1_BUS_TLBIE_MASK));
 499}
 500
 501static inline void save_spu_npc(struct spu_state *csa, struct spu *spu)
 502{
 503	struct spu_problem __iomem *prob = spu->problem;
 504
 505	/* Save, Step 31:
 506	 *     Save SPU_NPC in the CSA.
 507	 */
 508	csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW);
 509}
 510
 511static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu)
 512{
 513	struct spu_priv2 __iomem *priv2 = spu->priv2;
 514
 515	/* Save, Step 32:
 516	 *     Save SPU_PrivCntl in the CSA.
 517	 */
 518	csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW);
 519}
 520
 521static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu)
 522{
 523	struct spu_priv2 __iomem *priv2 = spu->priv2;
 524
 525	/* Save, Step 33:
 526	 * Restore, Step 16:
 527	 *     Write SPU_PrivCntl[S,Le,A] fields reset to 0.
 528	 */
 529	out_be64(&priv2->spu_privcntl_RW, 0UL);
 530	eieio();
 531}
 532
 533static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu)
 534{
 535	struct spu_priv2 __iomem *priv2 = spu->priv2;
 536
 537	/* Save, Step 34:
 538	 *     Save SPU_LSLR in the CSA.
 539	 */
 540	csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW);
 541}
 542
 543static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu)
 544{
 545	struct spu_priv2 __iomem *priv2 = spu->priv2;
 546
 547	/* Save, Step 35:
 548	 * Restore, Step 17.
 549	 *     Reset SPU_LSLR.
 550	 */
 551	out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK);
 552	eieio();
 553}
 554
 555static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu)
 556{
 557	struct spu_priv2 __iomem *priv2 = spu->priv2;
 558
 559	/* Save, Step 36:
 560	 *     Save SPU_Cfg in the CSA.
 561	 */
 562	csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW);
 563}
 564
 565static inline void save_pm_trace(struct spu_state *csa, struct spu *spu)
 566{
 567	/* Save, Step 37:
 568	 *     Save PM_Trace_Tag_Wait_Mask in the CSA.
 569	 *     Not performed by this implementation.
 570	 */
 571}
 572
 573static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu)
 574{
 575	/* Save, Step 38:
 576	 *     Save RA_GROUP_ID register and the
 577	 *     RA_ENABLE reigster in the CSA.
 578	 */
 579	csa->priv1.resource_allocation_groupID_RW =
 580		spu_resource_allocation_groupID_get(spu);
 581	csa->priv1.resource_allocation_enable_RW =
 582		spu_resource_allocation_enable_get(spu);
 583}
 584
 585static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
 586{
 587	struct spu_problem __iomem *prob = spu->problem;
 588
 589	/* Save, Step 39:
 590	 *     Save MB_Stat register in the CSA.
 591	 */
 592	csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R);
 593}
 594
 595static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu)
 596{
 597	struct spu_problem __iomem *prob = spu->problem;
 598
 599	/* Save, Step 40:
 600	 *     Save the PPU_MB register in the CSA.
 601	 */
 602	csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R);
 603}
 604
 605static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu)
 606{
 607	struct spu_priv2 __iomem *priv2 = spu->priv2;
 608
 609	/* Save, Step 41:
 610	 *     Save the PPUINT_MB register in the CSA.
 611	 */
 612	csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R);
 613}
 614
 615static inline void save_ch_part1(struct spu_state *csa, struct spu *spu)
 616{
 617	struct spu_priv2 __iomem *priv2 = spu->priv2;
 618	u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
 619	int i;
 620
 621	/* Save, Step 42:
 622	 */
 623
 624	/* Save CH 1, without channel count */
 625	out_be64(&priv2->spu_chnlcntptr_RW, 1);
 626	csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW);
 627
 628	/* Save the following CH: [0,3,4,24,25,27] */
 629	for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
 630		idx = ch_indices[i];
 631		out_be64(&priv2->spu_chnlcntptr_RW, idx);
 632		eieio();
 633		csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW);
 634		csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW);
 635		out_be64(&priv2->spu_chnldata_RW, 0UL);
 636		out_be64(&priv2->spu_chnlcnt_RW, 0UL);
 637		eieio();
 638	}
 639}
 640
 641static inline void save_spu_mb(struct spu_state *csa, struct spu *spu)
 642{
 643	struct spu_priv2 __iomem *priv2 = spu->priv2;
 644	int i;
 645
 646	/* Save, Step 43:
 647	 *     Save SPU Read Mailbox Channel.
 648	 */
 649	out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
 650	eieio();
 651	csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW);
 652	for (i = 0; i < 4; i++) {
 653		csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW);
 654	}
 655	out_be64(&priv2->spu_chnlcnt_RW, 0UL);
 656	eieio();
 657}
 658
 659static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu)
 660{
 661	struct spu_priv2 __iomem *priv2 = spu->priv2;
 662
 663	/* Save, Step 44:
 664	 *     Save MFC_CMD Channel.
 665	 */
 666	out_be64(&priv2->spu_chnlcntptr_RW, 21UL);
 667	eieio();
 668	csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW);
 669	eieio();
 670}
 671
 672static inline void reset_ch(struct spu_state *csa, struct spu *spu)
 673{
 674	struct spu_priv2 __iomem *priv2 = spu->priv2;
 675	u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL };
 676	u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL };
 677	u64 idx;
 678	int i;
 679
 680	/* Save, Step 45:
 681	 *     Reset the following CH: [21, 23, 28, 30]
 682	 */
 683	for (i = 0; i < 4; i++) {
 684		idx = ch_indices[i];
 685		out_be64(&priv2->spu_chnlcntptr_RW, idx);
 686		eieio();
 687		out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
 688		eieio();
 689	}
 690}
 691
 692static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)
 693{
 694	struct spu_priv2 __iomem *priv2 = spu->priv2;
 695
 696	/* Save, Step 46:
 697	 * Restore, Step 25.
 698	 *     Write MFC_CNTL[Sc]=0 (resume queue processing).
 699	 */
 700	out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);
 701}
 702
 703static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu,
 704		unsigned int *code, int code_size)
 705{
 706	/* Save, Step 47:
 707	 * Restore, Step 30.
 708	 *     If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All
 709	 *     register, then initialize SLB_VSID and SLB_ESID
 710	 *     to provide access to SPU context save code and
 711	 *     LSCSA.
 712	 *
 713	 *     This implementation places both the context
 714	 *     switch code and LSCSA in kernel address space.
 715	 *
 716	 *     Further this implementation assumes that the
 717	 *     MFC_SR1[R]=1 (in other words, assume that
 718	 *     translation is desired by OS environment).
 719	 */
 720	spu_invalidate_slbs(spu);
 721	spu_setup_kernel_slbs(spu, csa->lscsa, code, code_size);
 722}
 723
 724static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
 725{
 726	/* Save, Step 48:
 727	 * Restore, Step 23.
 728	 *     Change the software context switch pending flag
 729	 *     to context switch active.  This implementation does
 730	 *     not uses a switch active flag.
 731	 *
 732	 * Now that we have saved the mfc in the csa, we can add in the
 733	 * restart command if an exception occurred.
 734	 */
 735	if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags))
 736		csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
 737	clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
 738	mb();
 739}
 740
 741static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)
 742{
 743	unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
 744	    CLASS1_ENABLE_STORAGE_FAULT_INTR;
 745
 746	/* Save, Step 49:
 747	 * Restore, Step 22:
 748	 *     Reset and then enable interrupts, as
 749	 *     needed by OS.
 750	 *
 751	 *     This implementation enables only class1
 752	 *     (translation) interrupts.
 753	 */
 754	spin_lock_irq(&spu->register_lock);
 755	spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
 756	spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
 757	spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
 758	spu_int_mask_set(spu, 0, 0ul);
 759	spu_int_mask_set(spu, 1, class1_mask);
 760	spu_int_mask_set(spu, 2, 0ul);
 761	spin_unlock_irq(&spu->register_lock);
 762}
 763
 764static inline int send_mfc_dma(struct spu *spu, unsigned long ea,
 765			       unsigned int ls_offset, unsigned int size,
 766			       unsigned int tag, unsigned int rclass,
 767			       unsigned int cmd)
 768{
 769	struct spu_problem __iomem *prob = spu->problem;
 770	union mfc_tag_size_class_cmd command;
 771	unsigned int transfer_size;
 772	volatile unsigned int status = 0x0;
 773
 774	while (size > 0) {
 775		transfer_size =
 776		    (size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size;
 777		command.u.mfc_size = transfer_size;
 778		command.u.mfc_tag = tag;
 779		command.u.mfc_rclassid = rclass;
 780		command.u.mfc_cmd = cmd;
 781		do {
 782			out_be32(&prob->mfc_lsa_W, ls_offset);
 783			out_be64(&prob->mfc_ea_W, ea);
 784			out_be64(&prob->mfc_union_W.all64, command.all64);
 785			status =
 786			    in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
 787			if (unlikely(status & 0x2)) {
 788				cpu_relax();
 789			}
 790		} while (status & 0x3);
 791		size -= transfer_size;
 792		ea += transfer_size;
 793		ls_offset += transfer_size;
 794	}
 795	return 0;
 796}
 797
 798static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu)
 799{
 800	unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
 801	unsigned int ls_offset = 0x0;
 802	unsigned int size = 16384;
 803	unsigned int tag = 0;
 804	unsigned int rclass = 0;
 805	unsigned int cmd = MFC_PUT_CMD;
 806
 807	/* Save, Step 50:
 808	 *     Issue a DMA command to copy the first 16K bytes
 809	 *     of local storage to the CSA.
 810	 */
 811	send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
 812}
 813
 814static inline void set_spu_npc(struct spu_state *csa, struct spu *spu)
 815{
 816	struct spu_problem __iomem *prob = spu->problem;
 817
 818	/* Save, Step 51:
 819	 * Restore, Step 31.
 820	 *     Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry
 821	 *     point address of context save code in local
 822	 *     storage.
 823	 *
 824	 *     This implementation uses SPU-side save/restore
 825	 *     programs with entry points at LSA of 0.
 826	 */
 827	out_be32(&prob->spu_npc_RW, 0);
 828	eieio();
 829}
 830
 831static inline void set_signot1(struct spu_state *csa, struct spu *spu)
 832{
 833	struct spu_problem __iomem *prob = spu->problem;
 834	union {
 835		u64 ull;
 836		u32 ui[2];
 837	} addr64;
 838
 839	/* Save, Step 52:
 840	 * Restore, Step 32:
 841	 *    Write SPU_Sig_Notify_1 register with upper 32-bits
 842	 *    of the CSA.LSCSA effective address.
 843	 */
 844	addr64.ull = (u64) csa->lscsa;
 845	out_be32(&prob->signal_notify1, addr64.ui[0]);
 846	eieio();
 847}
 848
 849static inline void set_signot2(struct spu_state *csa, struct spu *spu)
 850{
 851	struct spu_problem __iomem *prob = spu->problem;
 852	union {
 853		u64 ull;
 854		u32 ui[2];
 855	} addr64;
 856
 857	/* Save, Step 53:
 858	 * Restore, Step 33:
 859	 *    Write SPU_Sig_Notify_2 register with lower 32-bits
 860	 *    of the CSA.LSCSA effective address.
 861	 */
 862	addr64.ull = (u64) csa->lscsa;
 863	out_be32(&prob->signal_notify2, addr64.ui[1]);
 864	eieio();
 865}
 866
 867static inline void send_save_code(struct spu_state *csa, struct spu *spu)
 868{
 869	unsigned long addr = (unsigned long)&spu_save_code[0];
 870	unsigned int ls_offset = 0x0;
 871	unsigned int size = sizeof(spu_save_code);
 872	unsigned int tag = 0;
 873	unsigned int rclass = 0;
 874	unsigned int cmd = MFC_GETFS_CMD;
 875
 876	/* Save, Step 54:
 877	 *     Issue a DMA command to copy context save code
 878	 *     to local storage and start SPU.
 879	 */
 880	send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
 881}
 882
 883static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu)
 884{
 885	struct spu_problem __iomem *prob = spu->problem;
 886
 887	/* Save, Step 55:
 888	 * Restore, Step 38.
 889	 *     Write PPU_QueryMask=1 (enable Tag Group 0)
 890	 *     and issue eieio instruction.
 891	 */
 892	out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0));
 893	eieio();
 894}
 895
 896static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)
 897{
 898	struct spu_problem __iomem *prob = spu->problem;
 899	u32 mask = MFC_TAGID_TO_TAGMASK(0);
 900	unsigned long flags;
 901
 902	/* Save, Step 56:
 903	 * Restore, Step 39.
 904	 * Restore, Step 39.
 905	 * Restore, Step 46.
 906	 *     Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete)
 907	 *     or write PPU_QueryType[TS]=01 and wait for Tag Group
 908	 *     Complete Interrupt.  Write INT_Stat_Class0 or
 909	 *     INT_Stat_Class2 with value of 'handled'.
 910	 */
 911	POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask);
 912
 913	local_irq_save(flags);
 914	spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
 915	spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
 916	local_irq_restore(flags);
 917}
 918
 919static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)
 920{
 921	struct spu_problem __iomem *prob = spu->problem;
 922	unsigned long flags;
 923
 924	/* Save, Step 57:
 925	 * Restore, Step 40.
 926	 *     Poll until SPU_Status[R]=0 or wait for SPU Class 0
 927	 *     or SPU Class 2 interrupt.  Write INT_Stat_class0
 928	 *     or INT_Stat_class2 with value of handled.
 929	 */
 930	POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
 931
 932	local_irq_save(flags);
 933	spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
 934	spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
 935	local_irq_restore(flags);
 936}
 937
 938static inline int check_save_status(struct spu_state *csa, struct spu *spu)
 939{
 940	struct spu_problem __iomem *prob = spu->problem;
 941	u32 complete;
 942
 943	/* Save, Step 54:
 944	 *     If SPU_Status[P]=1 and SPU_Status[SC] = "success",
 945	 *     context save succeeded, otherwise context save
 946	 *     failed.
 947	 */
 948	complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
 949		    SPU_STATUS_STOPPED_BY_STOP);
 950	return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
 951}
 952
 953static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu)
 954{
 955	/* Restore, Step 4:
 956	 *    If required, notify the "using application" that
 957	 *    the SPU task has been terminated.  TBD.
 958	 */
 959}
 960
 961static inline void suspend_mfc_and_halt_decr(struct spu_state *csa,
 962		struct spu *spu)
 963{
 964	struct spu_priv2 __iomem *priv2 = spu->priv2;
 965
 966	/* Restore, Step 7:
 967	 *     Write MFC_Cntl[Dh,Sc,Sm]='1','1','0' to suspend
 968	 *     the queue and halt the decrementer.
 969	 */
 970	out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE |
 971		 MFC_CNTL_DECREMENTER_HALTED);
 972	eieio();
 973}
 974
 975static inline void wait_suspend_mfc_complete(struct spu_state *csa,
 976					     struct spu *spu)
 977{
 978	struct spu_priv2 __iomem *priv2 = spu->priv2;
 979
 980	/* Restore, Step 8:
 981	 * Restore, Step 47.
 982	 *     Poll MFC_CNTL[Ss] until 11 is returned.
 983	 */
 984	POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
 985			 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
 986			 MFC_CNTL_SUSPEND_COMPLETE);
 987}
 988
 989static inline int suspend_spe(struct spu_state *csa, struct spu *spu)
 990{
 991	struct spu_problem __iomem *prob = spu->problem;
 992
 993	/* Restore, Step 9:
 994	 *    If SPU_Status[R]=1, stop SPU execution
 995	 *    and wait for stop to complete.
 996	 *
 997	 *    Returns       1 if SPU_Status[R]=1 on entry.
 998	 *                  0 otherwise
 999	 */
1000	if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {
1001		if (in_be32(&prob->spu_status_R) &
1002		    SPU_STATUS_ISOLATED_EXIT_STATUS) {
1003			POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1004					SPU_STATUS_RUNNING);
1005		}
1006		if ((in_be32(&prob->spu_status_R) &
1007		     SPU_STATUS_ISOLATED_LOAD_STATUS)
1008		    || (in_be32(&prob->spu_status_R) &
1009			SPU_STATUS_ISOLATED_STATE)) {
1010			out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1011			eieio();
1012			POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1013					SPU_STATUS_RUNNING);
1014			out_be32(&prob->spu_runcntl_RW, 0x2);
1015			eieio();
1016			POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1017					SPU_STATUS_RUNNING);
1018		}
1019		if (in_be32(&prob->spu_status_R) &
1020		    SPU_STATUS_WAITING_FOR_CHANNEL) {
1021			out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1022			eieio();
1023			POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1024					SPU_STATUS_RUNNING);
1025		}
1026		return 1;
1027	}
1028	return 0;
1029}
1030
1031static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1032{
1033	struct spu_problem __iomem *prob = spu->problem;
1034
1035	/* Restore, Step 10:
1036	 *    If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1,
1037	 *    release SPU from isolate state.
1038	 */
1039	if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
1040		if (in_be32(&prob->spu_status_R) &
1041		    SPU_STATUS_ISOLATED_EXIT_STATUS) {
1042			spu_mfc_sr1_set(spu,
1043					MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1044			eieio();
1045			out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1046			eieio();
1047			POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1048					SPU_STATUS_RUNNING);
1049		}
1050		if ((in_be32(&prob->spu_status_R) &
1051		     SPU_STATUS_ISOLATED_LOAD_STATUS)
1052		    || (in_be32(&prob->spu_status_R) &
1053			SPU_STATUS_ISOLATED_STATE)) {
1054			spu_mfc_sr1_set(spu,
1055					MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1056			eieio();
1057			out_be32(&prob->spu_runcntl_RW, 0x2);
1058			eieio();
1059			POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1060					SPU_STATUS_RUNNING);
1061		}
1062	}
1063}
1064
1065static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu)
1066{
1067	struct spu_priv2 __iomem *priv2 = spu->priv2;
1068	u64 ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1069	u64 idx;
1070	int i;
1071
1072	/* Restore, Step 20:
1073	 */
1074
1075	/* Reset CH 1 */
1076	out_be64(&priv2->spu_chnlcntptr_RW, 1);
1077	out_be64(&priv2->spu_chnldata_RW, 0UL);
1078
1079	/* Reset the following CH: [0,3,4,24,25,27] */
1080	for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
1081		idx = ch_indices[i];
1082		out_be64(&priv2->spu_chnlcntptr_RW, idx);
1083		eieio();
1084		out_be64(&priv2->spu_chnldata_RW, 0UL);
1085		out_be64(&priv2->spu_chnlcnt_RW, 0UL);
1086		eieio();
1087	}
1088}
1089
1090static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu)
1091{
1092	struct spu_priv2 __iomem *priv2 = spu->priv2;
1093	u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL };
1094	u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL };
1095	u64 idx;
1096	int i;
1097
1098	/* Restore, Step 21:
1099	 *     Reset the following CH: [21, 23, 28, 29, 30]
1100	 */
1101	for (i = 0; i < 5; i++) {
1102		idx = ch_indices[i];
1103		out_be64(&priv2->spu_chnlcntptr_RW, idx);
1104		eieio();
1105		out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1106		eieio();
1107	}
1108}
1109
1110static inline void setup_spu_status_part1(struct spu_state *csa,
1111					  struct spu *spu)
1112{
1113	u32 status_P = SPU_STATUS_STOPPED_BY_STOP;
1114	u32 status_I = SPU_STATUS_INVALID_INSTR;
1115	u32 status_H = SPU_STATUS_STOPPED_BY_HALT;
1116	u32 status_S = SPU_STATUS_SINGLE_STEP;
1117	u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR;
1118	u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP;
1119	u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP;
1120	u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR;
1121	u32 status_code;
1122
1123	/* Restore, Step 27:
1124	 *     If the CSA.SPU_Status[I,S,H,P]=1 then add the correct
1125	 *     instruction sequence to the end of the SPU based restore
1126	 *     code (after the "context restored" stop and signal) to
1127	 *     restore the correct SPU status.
1128	 *
1129	 *     NOTE: Rather than modifying the SPU executable, we
1130	 *     instead add a new 'stopped_status' field to the
1131	 *     LSCSA.  The SPU-side restore reads this field and
1132	 *     takes the appropriate action when exiting.
1133	 */
1134
1135	status_code =
1136	    (csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF;
1137	if ((csa->prob.spu_status_R & status_P_I) == status_P_I) {
1138
1139		/* SPU_Status[P,I]=1 - Illegal Instruction followed
1140		 * by Stop and Signal instruction, followed by 'br -4'.
1141		 *
1142		 */
1143		csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I;
1144		csa->lscsa->stopped_status.slot[1] = status_code;
1145
1146	} else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) {
1147
1148		/* SPU_Status[P,H]=1 - Halt Conditional, followed
1149		 * by Stop and Signal instruction, followed by
1150		 * 'br -4'.
1151		 */
1152		csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H;
1153		csa->lscsa->stopped_status.slot[1] = status_code;
1154
1155	} else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) {
1156
1157		/* SPU_Status[S,P]=1 - Stop and Signal instruction
1158		 * followed by 'br -4'.
1159		 */
1160		csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P;
1161		csa->lscsa->stopped_status.slot[1] = status_code;
1162
1163	} else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) {
1164
1165		/* SPU_Status[S,I]=1 - Illegal instruction followed
1166		 * by 'br -4'.
1167		 */
1168		csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I;
1169		csa->lscsa->stopped_status.slot[1] = status_code;
1170
1171	} else if ((csa->prob.spu_status_R & status_P) == status_P) {
1172
1173		/* SPU_Status[P]=1 - Stop and Signal instruction
1174		 * followed by 'br -4'.
1175		 */
1176		csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P;
1177		csa->lscsa->stopped_status.slot[1] = status_code;
1178
1179	} else if ((csa->prob.spu_status_R & status_H) == status_H) {
1180
1181		/* SPU_Status[H]=1 - Halt Conditional, followed
1182		 * by 'br -4'.
1183		 */
1184		csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H;
1185
1186	} else if ((csa->prob.spu_status_R & status_S) == status_S) {
1187
1188		/* SPU_Status[S]=1 - Two nop instructions.
1189		 */
1190		csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S;
1191
1192	} else if ((csa->prob.spu_status_R & status_I) == status_I) {
1193
1194		/* SPU_Status[I]=1 - Illegal instruction followed
1195		 * by 'br -4'.
1196		 */
1197		csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I;
1198
1199	}
1200}
1201
1202static inline void setup_spu_status_part2(struct spu_state *csa,
1203					  struct spu *spu)
1204{
1205	u32 mask;
1206
1207	/* Restore, Step 28:
1208	 *     If the CSA.SPU_Status[I,S,H,P,R]=0 then
1209	 *     add a 'br *' instruction to the end of
1210	 *     the SPU based restore code.
1211	 *
1212	 *     NOTE: Rather than modifying the SPU executable, we
1213	 *     instead add a new 'stopped_status' field to the
1214	 *     LSCSA.  The SPU-side restore reads this field and
1215	 *     takes the appropriate action when exiting.
1216	 */
1217	mask = SPU_STATUS_INVALID_INSTR |
1218	    SPU_STATUS_SINGLE_STEP |
1219	    SPU_STATUS_STOPPED_BY_HALT |
1220	    SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1221	if (!(csa->prob.spu_status_R & mask)) {
1222		csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R;
1223	}
1224}
1225
1226static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu)
1227{
1228	/* Restore, Step 29:
1229	 *     Restore RA_GROUP_ID register and the
1230	 *     RA_ENABLE reigster from the CSA.
1231	 */
1232	spu_resource_allocation_groupID_set(spu,
1233			csa->priv1.resource_allocation_groupID_RW);
1234	spu_resource_allocation_enable_set(spu,
1235			csa->priv1.resource_allocation_enable_RW);
1236}
1237
1238static inline void send_restore_code(struct spu_state *csa, struct spu *spu)
1239{
1240	unsigned long addr = (unsigned long)&spu_restore_code[0];
1241	unsigned int ls_offset = 0x0;
1242	unsigned int size = sizeof(spu_restore_code);
1243	unsigned int tag = 0;
1244	unsigned int rclass = 0;
1245	unsigned int cmd = MFC_GETFS_CMD;
1246
1247	/* Restore, Step 37:
1248	 *     Issue MFC DMA command to copy context
1249	 *     restore code to local storage.
1250	 */
1251	send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1252}
1253
1254static inline void setup_decr(struct spu_state *csa, struct spu *spu)
1255{
1256	/* Restore, Step 34:
1257	 *     If CSA.MFC_CNTL[Ds]=1 (decrementer was
1258	 *     running) then adjust decrementer, set
1259	 *     decrementer running status in LSCSA,
1260	 *     and set decrementer "wrapped" status
1261	 *     in LSCSA.
1262	 */
1263	if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) {
1264		cycles_t resume_time = get_cycles();
1265		cycles_t delta_time = resume_time - csa->suspend_time;
1266
1267		csa->lscsa->decr_status.slot[0] = SPU_DECR_STATUS_RUNNING;
1268		if (csa->lscsa->decr.slot[0] < delta_time) {
1269			csa->lscsa->decr_status.slot[0] |=
1270				 SPU_DECR_STATUS_WRAPPED;
1271		}
1272
1273		csa->lscsa->decr.slot[0] -= delta_time;
1274	} else {
1275		csa->lscsa->decr_status.slot[0] = 0;
1276	}
1277}
1278
1279static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu)
1280{
1281	/* Restore, Step 35:
1282	 *     Copy the CSA.PU_MB data into the LSCSA.
1283	 */
1284	csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R;
1285}
1286
1287static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu)
1288{
1289	/* Restore, Step 36:
1290	 *     Copy the CSA.PUINT_MB data into the LSCSA.
1291	 */
1292	csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R;
1293}
1294
1295static inline int check_restore_status(struct spu_state *csa, struct spu *spu)
1296{
1297	struct spu_problem __iomem *prob = spu->problem;
1298	u32 complete;
1299
1300	/* Restore, Step 40:
1301	 *     If SPU_Status[P]=1 and SPU_Status[SC] = "success",
1302	 *     context restore succeeded, otherwise context restore
1303	 *     failed.
1304	 */
1305	complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
1306		    SPU_STATUS_STOPPED_BY_STOP);
1307	return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
1308}
1309
1310static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu)
1311{
1312	struct spu_priv2 __iomem *priv2 = spu->priv2;
1313
1314	/* Restore, Step 41:
1315	 *     Restore SPU_PrivCntl from the CSA.
1316	 */
1317	out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW);
1318	eieio();
1319}
1320
1321static inline void restore_status_part1(struct spu_state *csa, struct spu *spu)
1322{
1323	struct spu_problem __iomem *prob = spu->problem;
1324	u32 mask;
1325
1326	/* Restore, Step 42:
1327	 *     If any CSA.SPU_Status[I,S,H,P]=1, then
1328	 *     restore the error or single step state.
1329	 */
1330	mask = SPU_STATUS_INVALID_INSTR |
1331	    SPU_STATUS_SINGLE_STEP |
1332	    SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
1333	if (csa->prob.spu_status_R & mask) {
1334		out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1335		eieio();
1336		POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1337				SPU_STATUS_RUNNING);
1338	}
1339}
1340
1341static inline void restore_status_part2(struct spu_state *csa, struct spu *spu)
1342{
1343	struct spu_problem __iomem *prob = spu->problem;
1344	u32 mask;
1345
1346	/* Restore, Step 43:
1347	 *     If all CSA.SPU_Status[I,S,H,P,R]=0 then write
1348	 *     SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1,
1349	 *     then write '00' to SPU_RunCntl[R0R1] and wait
1350	 *     for SPU_Status[R]=0.
1351	 */
1352	mask = SPU_STATUS_INVALID_INSTR |
1353	    SPU_STATUS_SINGLE_STEP |
1354	    SPU_STATUS_STOPPED_BY_HALT |
1355	    SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1356	if (!(csa->prob.spu_status_R & mask)) {
1357		out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1358		eieio();
1359		POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) &
1360				 SPU_STATUS_RUNNING);
1361		out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1362		eieio();
1363		POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1364				SPU_STATUS_RUNNING);
1365	}
1366}
1367
1368static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu)
1369{
1370	unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
1371	unsigned int ls_offset = 0x0;
1372	unsigned int size = 16384;
1373	unsigned int tag = 0;
1374	unsigned int rclass = 0;
1375	unsigned int cmd = MFC_GET_CMD;
1376
1377	/* Restore, Step 44:
1378	 *     Issue a DMA command to restore the first
1379	 *     16kb of local storage from CSA.
1380	 */
1381	send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1382}
1383
1384static inline void suspend_mfc(struct spu_state *csa, struct spu *spu)
1385{
1386	struct spu_priv2 __iomem *priv2 = spu->priv2;
1387
1388	/* Restore, Step 47.
1389	 *     Write MFC_Cntl[Sc,Sm]='1','0' to suspend
1390	 *     the queue.
1391	 */
1392	out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
1393	eieio();
1394}
1395
1396static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
1397{
1398	/* Restore, Step 49:
1399	 *     Write INT_MASK_class0 with value of 0.
1400	 *     Write INT_MASK_class1 with value of 0.
1401	 *     Write INT_MASK_class2 with value of 0.
1402	 *     Write INT_STAT_class0 with value of -1.
1403	 *     Write INT_STAT_class1 with value of -1.
1404	 *     Write INT_STAT_class2 with value of -1.
1405	 */
1406	spin_lock_irq(&spu->register_lock);
1407	spu_int_mask_set(spu, 0, 0ul);
1408	spu_int_mask_set(spu, 1, 0ul);
1409	spu_int_mask_set(spu, 2, 0ul);
1410	spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
1411	spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
1412	spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
1413	spin_unlock_irq(&spu->register_lock);
1414}
1415
1416static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu)
1417{
1418	struct spu_priv2 __iomem *priv2 = spu->priv2;
1419	int i;
1420
1421	/* Restore, Step 50:
1422	 *     If MFC_Cntl[Se]!=0 then restore
1423	 *     MFC command queues.
1424	 */
1425	if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) {
1426		for (i = 0; i < 8; i++) {
1427			out_be64(&priv2->puq[i].mfc_cq_data0_RW,
1428				 csa->priv2.puq[i].mfc_cq_data0_RW);
1429			out_be64(&priv2->puq[i].mfc_cq_data1_RW,
1430				 csa->priv2.puq[i].mfc_cq_data1_RW);
1431			out_be64(&priv2->puq[i].mfc_cq_data2_RW,
1432				 csa->priv2.puq[i].mfc_cq_data2_RW);
1433			out_be64(&priv2->puq[i].mfc_cq_data3_RW,
1434				 csa->priv2.puq[i].mfc_cq_data3_RW);
1435		}
1436		for (i = 0; i < 16; i++) {
1437			out_be64(&priv2->spuq[i].mfc_cq_data0_RW,
1438				 csa->priv2.spuq[i].mfc_cq_data0_RW);
1439			out_be64(&priv2->spuq[i].mfc_cq_data1_RW,
1440				 csa->priv2.spuq[i].mfc_cq_data1_RW);
1441			out_be64(&priv2->spuq[i].mfc_cq_data2_RW,
1442				 csa->priv2.spuq[i].mfc_cq_data2_RW);
1443			out_be64(&priv2->spuq[i].mfc_cq_data3_RW,
1444				 csa->priv2.spuq[i].mfc_cq_data3_RW);
1445		}
1446	}
1447	eieio();
1448}
1449
1450static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu)
1451{
1452	struct spu_problem __iomem *prob = spu->problem;
1453
1454	/* Restore, Step 51:
1455	 *     Restore the PPU_QueryMask register from CSA.
1456	 */
1457	out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW);
1458	eieio();
1459}
1460
1461static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu)
1462{
1463	struct spu_problem __iomem *prob = spu->problem;
1464
1465	/* Restore, Step 52:
1466	 *     Restore the PPU_QueryType register from CSA.
1467	 */
1468	out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW);
1469	eieio();
1470}
1471
1472static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
1473{
1474	struct spu_priv2 __iomem *priv2 = spu->priv2;
1475
1476	/* Restore, Step 53:
1477	 *     Restore the MFC_CSR_TSQ register from CSA.
1478	 */
1479	out_be64(&priv2->spu_tag_status_query_RW,
1480		 csa->priv2.spu_tag_status_query_RW);
1481	eieio();
1482}
1483
1484static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
1485{
1486	struct spu_priv2 __iomem *priv2 = spu->priv2;
1487
1488	/* Restore, Step 54:
1489	 *     Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2
1490	 *     registers from CSA.
1491	 */
1492	out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW);
1493	out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW);
1494	eieio();
1495}
1496
1497static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
1498{
1499	struct spu_priv2 __iomem *priv2 = spu->priv2;
1500
1501	/* Restore, Step 55:
1502	 *     Restore the MFC_CSR_ATO register from CSA.
1503	 */
1504	out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW);
1505}
1506
1507static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
1508{
1509	/* Restore, Step 56:
1510	 *     Restore the MFC_TCLASS_ID register from CSA.
1511	 */
1512	spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW);
1513	eieio();
1514}
1515
1516static inline void set_llr_event(struct spu_state *csa, struct spu *spu)
1517{
1518	u64 ch0_cnt, ch0_data;
1519	u64 ch1_data;
1520
1521	/* Restore, Step 57:
1522	 *    Set the Lock Line Reservation Lost Event by:
1523	 *      1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1.
1524	 *      2. If CSA.SPU_Channel_0_Count=0 and
1525	 *         CSA.SPU_Wr_Event_Mask[Lr]=1 and
1526	 *         CSA.SPU_Event_Status[Lr]=0 then set
1527	 *         CSA.SPU_Event_Status_Count=1.
1528	 */
1529	ch0_cnt = csa->spu_chnlcnt_RW[0];
1530	ch0_data = csa->spu_chnldata_RW[0];
1531	ch1_data = csa->spu_chnldata_RW[1];
1532	csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT;
1533	if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) &&
1534	    (ch1_data & MFC_LLR_LOST_EVENT)) {
1535		csa->spu_chnlcnt_RW[0] = 1;
1536	}
1537}
1538
1539static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)
1540{
1541	/* Restore, Step 58:
1542	 *     If the status of the CSA software decrementer
1543	 *     "wrapped" flag is set, OR in a '1' to
1544	 *     CSA.SPU_Event_Status[Tm].
1545	 */
1546	if (!(csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED))
1547		return;
1548
1549	if ((csa->spu_chnlcnt_RW[0] == 0) &&
1550	    (csa->spu_chnldata_RW[1] & 0x20) &&
1551	    !(csa->spu_chnldata_RW[0] & 0x20))
1552		csa->spu_chnlcnt_RW[0] = 1;
1553
1554	csa->spu_chnldata_RW[0] |= 0x20;
1555}
1556
1557static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu)
1558{
1559	struct spu_priv2 __iomem *priv2 = spu->priv2;
1560	u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1561	int i;
1562
1563	/* Restore, Step 59:
1564	 *	Restore the following CH: [0,3,4,24,25,27]
1565	 */
1566	for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
1567		idx = ch_indices[i];
1568		out_be64(&priv2->spu_chnlcntptr_RW, idx);
1569		eieio();
1570		out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]);
1571		out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]);
1572		eieio();
1573	}
1574}
1575
1576static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu)
1577{
1578	struct spu_priv2 __iomem *priv2 = spu->priv2;
1579	u64 ch_indices[3] = { 9UL, 21UL, 23UL };
1580	u64 ch_counts[3] = { 1UL, 16UL, 1UL };
1581	u64 idx;
1582	int i;
1583
1584	/* Restore, Step 60:
1585	 *     Restore the following CH: [9,21,23].
1586	 */
1587	ch_counts[0] = 1UL;
1588	ch_counts[1] = csa->spu_chnlcnt_RW[21];
1589	ch_counts[2] = 1UL;
1590	for (i = 0; i < 3; i++) {
1591		idx = ch_indices[i];
1592		out_be64(&priv2->spu_chnlcntptr_RW, idx);
1593		eieio();
1594		out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1595		eieio();
1596	}
1597}
1598
1599static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu)
1600{
1601	struct spu_priv2 __iomem *priv2 = spu->priv2;
1602
1603	/* Restore, Step 61:
1604	 *     Restore the SPU_LSLR register from CSA.
1605	 */
1606	out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW);
1607	eieio();
1608}
1609
1610static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu)
1611{
1612	struct spu_priv2 __iomem *priv2 = spu->priv2;
1613
1614	/* Restore, Step 62:
1615	 *     Restore the SPU_Cfg register from CSA.
1616	 */
1617	out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW);
1618	eieio();
1619}
1620
1621static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu)
1622{
1623	/* Restore, Step 63:
1624	 *     Restore PM_Trace_Tag_Wait_Mask from CSA.
1625	 *     Not performed by this implementation.
1626	 */
1627}
1628
1629static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu)
1630{
1631	struct spu_problem __iomem *prob = spu->problem;
1632
1633	/* Restore, Step 64:
1634	 *     Restore SPU_NPC from CSA.
1635	 */
1636	out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW);
1637	eieio();
1638}
1639
1640static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu)
1641{
1642	struct spu_priv2 __iomem *priv2 = spu->priv2;
1643	int i;
1644
1645	/* Restore, Step 65:
1646	 *     Restore MFC_RdSPU_MB from CSA.
1647	 */
1648	out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
1649	eieio();
1650	out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]);
1651	for (i = 0; i < 4; i++) {
1652		out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]);
1653	}
1654	eieio();
1655}
1656
1657static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
1658{
1659	struct spu_problem __iomem *prob = spu->problem;
1660	u32 dummy = 0;
1661
1662	/* Restore, Step 66:
1663	 *     If CSA.MB_Stat[P]=0 (mailbox empty) then
1664	 *     read from the PPU_MB register.
1665	 */
1666	if ((csa->prob.mb_stat_R & 0xFF) == 0) {
1667		dummy = in_be32(&prob->pu_mb_R);
1668		eieio();
1669	}
1670}
1671
1672static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
1673{
1674	struct spu_priv2 __iomem *priv2 = spu->priv2;
1675	u64 dummy = 0UL;
1676
1677	/* Restore, Step 66:
1678	 *     If CSA.MB_Stat[I]=0 (mailbox empty) then
1679	 *     read from the PPUINT_MB register.
1680	 */
1681	if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
1682		dummy = in_be64(&priv2->puint_mb_R);
1683		eieio();
1684		spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
1685		eieio();
1686	}
1687}
1688
1689static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
1690{
1691	/* Restore, Step 69:
1692	 *     Restore the MFC_SR1 register from CSA.
1693	 */
1694	spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW);
1695	eieio();
1696}
1697
1698static inline void set_int_route(struct spu_state *csa, struct spu *spu)
1699{
1700	struct spu_context *ctx = spu->ctx;
1701
1702	spu_cpu_affinity_set(spu, ctx->last_ran);
1703}
1704
1705static inline void restore_other_spu_access(struct spu_state *csa,
1706					    struct spu *spu)
1707{
1708	/* Restore, Step 70:
1709	 *     Restore other SPU mappings to this SPU. TBD.
1710	 */
1711}
1712
1713static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu)
1714{
1715	struct spu_problem __iomem *prob = spu->problem;
1716
1717	/* Restore, Step 71:
1718	 *     If CSA.SPU_Status[R]=1 then write
1719	 *     SPU_RunCntl[R0R1]='01'.
1720	 */
1721	if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) {
1722		out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1723		eieio();
1724	}
1725}
1726
1727static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)
1728{
1729	struct spu_priv2 __iomem *priv2 = spu->priv2;
1730
1731	/* Restore, Step 72:
1732	 *    Restore the MFC_CNTL register for the CSA.
1733	 */
1734	out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
1735	eieio();
1736
1737	/*
1738	 * The queue is put back into the same state that was evident prior to
1739	 * the context switch. The suspend flag is added to the saved state in
1740	 * the csa, if the operational state was suspending or suspended. In
1741	 * this case, the code that suspended the mfc is responsible for
1742	 * continuing it. Note that SPE faults do not change the operational
1743	 * state of the spu.
1744	 */
1745}
1746
1747static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
1748{
1749	/* Restore, Step 73:
1750	 *     Enable user-space access (if provided) to this
1751	 *     SPU by mapping the virtual pages assigned to
1752	 *     the SPU memory-mapped I/O (MMIO) for problem
1753	 *     state. TBD.
1754	 */
1755}
1756
1757static inline void reset_switch_active(struct spu_state *csa, struct spu *spu)
1758{
1759	/* Restore, Step 74:
1760	 *     Reset the "context switch active" flag.
1761	 *     Not performed by this implementation.
1762	 */
1763}
1764
1765static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu)
1766{
1767	/* Restore, Step 75:
1768	 *     Re-enable SPU interrupts.
1769	 */
1770	spin_lock_irq(&spu->register_lock);
1771	spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW);
1772	spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW);
1773	spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW);
1774	spin_unlock_irq(&spu->register_lock);
1775}
1776
1777static int quiece_spu(struct spu_state *prev, struct spu *spu)
1778{
1779	/*
1780	 * Combined steps 2-18 of SPU context save sequence, which
1781	 * quiesce the SPU state (disable SPU execution, MFC command
1782	 * queues, decre…

Large files files are truncated, but you can click here to view the full file