PageRenderTime 46ms CodeModel.GetById 13ms app.highlight 28ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/arm/mach-omap2/sleep34xx.S

https://github.com/AICP/kernel_asus_grouper
Assembly | 614 lines | 599 code | 15 blank | 0 comment | 8 complexity | d4845d62fdc9702130ac78b994d7caa8 MD5 | raw file
  1/*
  2 * (C) Copyright 2007
  3 * Texas Instruments
  4 * Karthik Dasu <karthik-dp@ti.com>
  5 *
  6 * (C) Copyright 2004
  7 * Texas Instruments, <www.ti.com>
  8 * Richard Woodruff <r-woodruff2@ti.com>
  9 *
 10 * This program is free software; you can redistribute it and/or
 11 * modify it under the terms of the GNU General Public License as
 12 * published by the Free Software Foundation; either version 2 of
 13 * the License, or (at your option) any later version.
 14 *
 15 * This program is distributed in the hope that it will be useful,
 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE.  See the
 18 * GNU General Public License for more details.
 19 *
 20 * You should have received a copy of the GNU General Public License
 21 * along with this program; if not, write to the Free Software
 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
 23 * MA 02111-1307 USA
 24 */
 25#include <linux/linkage.h>
 26#include <asm/assembler.h>
 27#include <plat/sram.h>
 28#include <mach/io.h>
 29
 30#include "cm2xxx_3xxx.h"
 31#include "prm2xxx_3xxx.h"
 32#include "sdrc.h"
 33#include "control.h"
 34
 35/*
 36 * Registers access definitions
 37 */
 38#define SDRC_SCRATCHPAD_SEM_OFFS	0xc
 39#define SDRC_SCRATCHPAD_SEM_V	OMAP343X_SCRATCHPAD_REGADDR\
 40					(SDRC_SCRATCHPAD_SEM_OFFS)
 41#define PM_PREPWSTST_CORE_P	OMAP3430_PRM_BASE + CORE_MOD +\
 42					OMAP3430_PM_PREPWSTST
 43#define PM_PWSTCTRL_MPU_P	OMAP3430_PRM_BASE + MPU_MOD + OMAP2_PM_PWSTCTRL
 44#define CM_IDLEST1_CORE_V	OMAP34XX_CM_REGADDR(CORE_MOD, CM_IDLEST1)
 45#define CM_IDLEST_CKGEN_V	OMAP34XX_CM_REGADDR(PLL_MOD, CM_IDLEST)
 46#define SRAM_BASE_P		OMAP3_SRAM_PA
 47#define CONTROL_STAT		OMAP343X_CTRL_BASE + OMAP343X_CONTROL_STATUS
 48#define CONTROL_MEM_RTA_CTRL	(OMAP343X_CTRL_BASE +\
 49					OMAP36XX_CONTROL_MEM_RTA_CTRL)
 50
 51/* Move this as correct place is available */
 52#define SCRATCHPAD_MEM_OFFS	0x310
 53#define SCRATCHPAD_BASE_P	(OMAP343X_CTRL_BASE +\
 54					OMAP343X_CONTROL_MEM_WKUP +\
 55					SCRATCHPAD_MEM_OFFS)
 56#define SDRC_POWER_V		OMAP34XX_SDRC_REGADDR(SDRC_POWER)
 57#define SDRC_SYSCONFIG_P	(OMAP343X_SDRC_BASE + SDRC_SYSCONFIG)
 58#define SDRC_MR_0_P		(OMAP343X_SDRC_BASE + SDRC_MR_0)
 59#define SDRC_EMR2_0_P		(OMAP343X_SDRC_BASE + SDRC_EMR2_0)
 60#define SDRC_MANUAL_0_P		(OMAP343X_SDRC_BASE + SDRC_MANUAL_0)
 61#define SDRC_MR_1_P		(OMAP343X_SDRC_BASE + SDRC_MR_1)
 62#define SDRC_EMR2_1_P		(OMAP343X_SDRC_BASE + SDRC_EMR2_1)
 63#define SDRC_MANUAL_1_P		(OMAP343X_SDRC_BASE + SDRC_MANUAL_1)
 64#define SDRC_DLLA_STATUS_V	OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS)
 65#define SDRC_DLLA_CTRL_V	OMAP34XX_SDRC_REGADDR(SDRC_DLLA_CTRL)
 66
 67/*
 68 * This file needs be built unconditionally as ARM to interoperate correctly
 69 * with non-Thumb-2-capable firmware.
 70 */
 71	.arm
 72
 73/*
 74 * API functions
 75 */
 76
 77	.text
 78/*
 79 * L2 cache needs to be toggled for stable OFF mode functionality on 3630.
 80 * This function sets up a flag that will allow for this toggling to take
 81 * place on 3630. Hopefully some version in the future may not need this.
 82 */
 83ENTRY(enable_omap3630_toggle_l2_on_restore)
 84	stmfd	sp!, {lr}	@ save registers on stack
 85	/* Setup so that we will disable and enable l2 */
 86	mov	r1, #0x1
 87	adrl	r2, l2dis_3630	@ may be too distant for plain adr
 88	str	r1, [r2]
 89	ldmfd	sp!, {pc}	@ restore regs and return
 90ENDPROC(enable_omap3630_toggle_l2_on_restore)
 91
 92	.text
 93/* Function to call rom code to save secure ram context */
 94	.align	3
 95ENTRY(save_secure_ram_context)
 96	stmfd	sp!, {r4 - r11, lr}	@ save registers on stack
 97	adr	r3, api_params		@ r3 points to parameters
 98	str	r0, [r3,#0x4]		@ r0 has sdram address
 99	ldr	r12, high_mask
100	and	r3, r3, r12
101	ldr	r12, sram_phy_addr_mask
102	orr	r3, r3, r12
103	mov	r0, #25			@ set service ID for PPA
104	mov	r12, r0			@ copy secure service ID in r12
105	mov	r1, #0			@ set task id for ROM code in r1
106	mov	r2, #4			@ set some flags in r2, r6
107	mov	r6, #0xff
108	dsb				@ data write barrier
109	dmb				@ data memory barrier
110	smc	#1			@ call SMI monitor (smi #1)
111	nop
112	nop
113	nop
114	nop
115	ldmfd	sp!, {r4 - r11, pc}
116	.align
117sram_phy_addr_mask:
118	.word	SRAM_BASE_P
119high_mask:
120	.word	0xffff
121api_params:
122	.word	0x4, 0x0, 0x0, 0x1, 0x1
123ENDPROC(save_secure_ram_context)
124ENTRY(save_secure_ram_context_sz)
125	.word	. - save_secure_ram_context
126
127/*
128 * ======================
129 * == Idle entry point ==
130 * ======================
131 */
132
133/*
134 * Forces OMAP into idle state
135 *
136 * omap34xx_cpu_suspend() - This bit of code saves the CPU context if needed
137 * and executes the WFI instruction. Calling WFI effectively changes the
138 * power domains states to the desired target power states.
139 *
140 *
141 * Notes:
142 * - only the minimum set of functions gets copied to internal SRAM at boot
143 *   and after wake-up from OFF mode, cf. omap_push_sram_idle. The function
144 *   pointers in SDRAM or SRAM are called depending on the desired low power
145 *   target state.
146 * - when the OMAP wakes up it continues at different execution points
147 *   depending on the low power mode (non-OFF vs OFF modes),
148 *   cf. 'Resume path for xxx mode' comments.
149 */
150	.align	3
151ENTRY(omap34xx_cpu_suspend)
152	stmfd	sp!, {r4 - r11, lr}	@ save registers on stack
153
154	/*
155	 * r0 contains information about saving context:
156	 *   0 - No context lost
157	 *   1 - Only L1 and logic lost
158	 *   2 - Only L2 lost (Even L1 is retained we clean it along with L2)
159	 *   3 - Both L1 and L2 lost and logic lost
160	 */
161
162	/*
163	 * For OFF mode: save context and jump to WFI in SDRAM (omap3_do_wfi)
164	 * For non-OFF modes: jump to the WFI code in SRAM (omap3_do_wfi_sram)
165	 */
166	ldr	r4, omap3_do_wfi_sram_addr
167	ldr	r5, [r4]
168	cmp	r0, #0x0		@ If no context save required,
169	bxeq	r5			@  jump to the WFI code in SRAM
170
171
172	/* Otherwise fall through to the save context code */
173save_context_wfi:
174	/*
175	 * jump out to kernel flush routine
176	 *  - reuse that code is better
177	 *  - it executes in a cached space so is faster than refetch per-block
178	 *  - should be faster and will change with kernel
179	 *  - 'might' have to copy address, load and jump to it
180	 * Flush all data from the L1 data cache before disabling
181	 * SCTLR.C bit.
182	 */
183	ldr	r1, kernel_flush
184	mov	lr, pc
185	bx	r1
186
187	/*
188	 * Clear the SCTLR.C bit to prevent further data cache
189	 * allocation. Clearing SCTLR.C would make all the data accesses
190	 * strongly ordered and would not hit the cache.
191	 */
192	mrc	p15, 0, r0, c1, c0, 0
193	bic	r0, r0, #(1 << 2)	@ Disable the C bit
194	mcr	p15, 0, r0, c1, c0, 0
195	isb
196
197	/*
198	 * Invalidate L1 data cache. Even though only invalidate is
199	 * necessary exported flush API is used here. Doing clean
200	 * on already clean cache would be almost NOP.
201	 */
202	ldr	r1, kernel_flush
203	blx	r1
204	/*
205	 * The kernel doesn't interwork: v7_flush_dcache_all in particluar will
206	 * always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled.
207	 * This sequence switches back to ARM.  Note that .align may insert a
208	 * nop: bx pc needs to be word-aligned in order to work.
209	 */
210 THUMB(	.thumb		)
211 THUMB(	.align		)
212 THUMB(	bx	pc	)
213 THUMB(	nop		)
214	.arm
215
216	b	omap3_do_wfi
217
218/*
219 * Local variables
220 */
221omap3_do_wfi_sram_addr:
222	.word omap3_do_wfi_sram
223kernel_flush:
224	.word v7_flush_dcache_all
225
226/* ===================================
227 * == WFI instruction => Enter idle ==
228 * ===================================
229 */
230
231/*
232 * Do WFI instruction
233 * Includes the resume path for non-OFF modes
234 *
235 * This code gets copied to internal SRAM and is accessible
236 * from both SDRAM and SRAM:
237 * - executed from SRAM for non-off modes (omap3_do_wfi_sram),
238 * - executed from SDRAM for OFF mode (omap3_do_wfi).
239 */
240	.align	3
241ENTRY(omap3_do_wfi)
242	ldr	r4, sdrc_power		@ read the SDRC_POWER register
243	ldr	r5, [r4]		@ read the contents of SDRC_POWER
244	orr	r5, r5, #0x40		@ enable self refresh on idle req
245	str	r5, [r4]		@ write back to SDRC_POWER register
246
247	/* Data memory barrier and Data sync barrier */
248	dsb
249	dmb
250
251/*
252 * ===================================
253 * == WFI instruction => Enter idle ==
254 * ===================================
255 */
256	wfi				@ wait for interrupt
257
258/*
259 * ===================================
260 * == Resume path for non-OFF modes ==
261 * ===================================
262 */
263	nop
264	nop
265	nop
266	nop
267	nop
268	nop
269	nop
270	nop
271	nop
272	nop
273
274/*
275 * This function implements the erratum ID i581 WA:
276 *  SDRC state restore before accessing the SDRAM
277 *
278 * Only used at return from non-OFF mode. For OFF
279 * mode the ROM code configures the SDRC and
280 * the DPLL before calling the restore code directly
281 * from DDR.
282 */
283
284/* Make sure SDRC accesses are ok */
285wait_sdrc_ok:
286
287/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */
288	ldr	r4, cm_idlest_ckgen
289wait_dpll3_lock:
290	ldr	r5, [r4]
291	tst	r5, #1
292	beq	wait_dpll3_lock
293
294	ldr	r4, cm_idlest1_core
295wait_sdrc_ready:
296	ldr	r5, [r4]
297	tst	r5, #0x2
298	bne	wait_sdrc_ready
299	/* allow DLL powerdown upon hw idle req */
300	ldr	r4, sdrc_power
301	ldr	r5, [r4]
302	bic	r5, r5, #0x40
303	str	r5, [r4]
304
305/*
306 * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
307 * base instead.
308 * Be careful not to clobber r7 when maintaing this code.
309 */
310
311is_dll_in_lock_mode:
312	/* Is dll in lock mode? */
313	ldr	r4, sdrc_dlla_ctrl
314	ldr	r5, [r4]
315	tst	r5, #0x4
316	bne	exit_nonoff_modes	@ Return if locked
317	/* wait till dll locks */
318	adr	r7, kick_counter
319wait_dll_lock_timed:
320	ldr	r4, wait_dll_lock_counter
321	add	r4, r4, #1
322	str	r4, [r7, #wait_dll_lock_counter - kick_counter]
323	ldr	r4, sdrc_dlla_status
324	/* Wait 20uS for lock */
325	mov	r6, #8
326wait_dll_lock:
327	subs	r6, r6, #0x1
328	beq	kick_dll
329	ldr	r5, [r4]
330	and	r5, r5, #0x4
331	cmp	r5, #0x4
332	bne	wait_dll_lock
333	b	exit_nonoff_modes	@ Return when locked
334
335	/* disable/reenable DLL if not locked */
336kick_dll:
337	ldr	r4, sdrc_dlla_ctrl
338	ldr	r5, [r4]
339	mov	r6, r5
340	bic	r6, #(1<<3)		@ disable dll
341	str	r6, [r4]
342	dsb
343	orr	r6, r6, #(1<<3)		@ enable dll
344	str	r6, [r4]
345	dsb
346	ldr	r4, kick_counter
347	add	r4, r4, #1
348	str	r4, [r7]		@ kick_counter
349	b	wait_dll_lock_timed
350
351exit_nonoff_modes:
352	/* Re-enable C-bit if needed */
353	mrc	p15, 0, r0, c1, c0, 0
354	tst	r0, #(1 << 2)		@ Check C bit enabled?
355	orreq	r0, r0, #(1 << 2)	@ Enable the C bit if cleared
356	mcreq	p15, 0, r0, c1, c0, 0
357	isb
358
359/*
360 * ===================================
361 * == Exit point from non-OFF modes ==
362 * ===================================
363 */
364	ldmfd	sp!, {r4 - r11, pc}	@ restore regs and return
365
366/*
367 * Local variables
368 */
369sdrc_power:
370	.word	SDRC_POWER_V
371cm_idlest1_core:
372	.word	CM_IDLEST1_CORE_V
373cm_idlest_ckgen:
374	.word	CM_IDLEST_CKGEN_V
375sdrc_dlla_status:
376	.word	SDRC_DLLA_STATUS_V
377sdrc_dlla_ctrl:
378	.word	SDRC_DLLA_CTRL_V
379	/*
380	 * When exporting to userspace while the counters are in SRAM,
381	 * these 2 words need to be at the end to facilitate retrival!
382	 */
383kick_counter:
384	.word	0
385wait_dll_lock_counter:
386	.word	0
387
388ENTRY(omap3_do_wfi_sz)
389	.word	. - omap3_do_wfi
390
391
392/*
393 * ==============================
394 * == Resume path for OFF mode ==
395 * ==============================
396 */
397
398/*
399 * The restore_* functions are called by the ROM code
400 *  when back from WFI in OFF mode.
401 * Cf. the get_*restore_pointer functions.
402 *
403 *  restore_es3: applies to 34xx >= ES3.0
404 *  restore_3630: applies to 36xx
405 *  restore: common code for 3xxx
406 *
407 * Note: when back from CORE and MPU OFF mode we are running
408 *  from SDRAM, without MMU, without the caches and prediction.
409 *  Also the SRAM content has been cleared.
410 */
411ENTRY(omap3_restore_es3)
412	ldr	r5, pm_prepwstst_core_p
413	ldr	r4, [r5]
414	and	r4, r4, #0x3
415	cmp	r4, #0x0	@ Check if previous power state of CORE is OFF
416	bne	omap3_restore	@ Fall through to OMAP3 common code
417	adr	r0, es3_sdrc_fix
418	ldr	r1, sram_base
419	ldr	r2, es3_sdrc_fix_sz
420	mov	r2, r2, ror #2
421copy_to_sram:
422	ldmia	r0!, {r3}	@ val = *src
423	stmia	r1!, {r3}	@ *dst = val
424	subs	r2, r2, #0x1	@ num_words--
425	bne	copy_to_sram
426	ldr	r1, sram_base
427	blx	r1
428	b	omap3_restore	@ Fall through to OMAP3 common code
429ENDPROC(omap3_restore_es3)
430
431ENTRY(omap3_restore_3630)
432	ldr	r1, pm_prepwstst_core_p
433	ldr	r2, [r1]
434	and	r2, r2, #0x3
435	cmp	r2, #0x0	@ Check if previous power state of CORE is OFF
436	bne	omap3_restore	@ Fall through to OMAP3 common code
437	/* Disable RTA before giving control */
438	ldr	r1, control_mem_rta
439	mov	r2, #OMAP36XX_RTA_DISABLE
440	str	r2, [r1]
441ENDPROC(omap3_restore_3630)
442
443	/* Fall through to common code for the remaining logic */
444
445ENTRY(omap3_restore)
446	/*
447	 * Read the pwstctrl register to check the reason for mpu reset.
448	 * This tells us what was lost.
449	 */
450	ldr	r1, pm_pwstctrl_mpu
451	ldr	r2, [r1]
452	and	r2, r2, #0x3
453	cmp	r2, #0x0	@ Check if target power state was OFF or RET
454	bne	logic_l1_restore
455
456	ldr	r0, l2dis_3630
457	cmp	r0, #0x1	@ should we disable L2 on 3630?
458	bne	skipl2dis
459	mrc	p15, 0, r0, c1, c0, 1
460	bic	r0, r0, #2	@ disable L2 cache
461	mcr	p15, 0, r0, c1, c0, 1
462skipl2dis:
463	ldr	r0, control_stat
464	ldr	r1, [r0]
465	and	r1, #0x700
466	cmp	r1, #0x300
467	beq	l2_inv_gp
468	mov	r0, #40			@ set service ID for PPA
469	mov	r12, r0			@ copy secure Service ID in r12
470	mov	r1, #0			@ set task id for ROM code in r1
471	mov	r2, #4			@ set some flags in r2, r6
472	mov	r6, #0xff
473	adr	r3, l2_inv_api_params	@ r3 points to dummy parameters
474	dsb				@ data write barrier
475	dmb				@ data memory barrier
476	smc	#1			@ call SMI monitor (smi #1)
477	/* Write to Aux control register to set some bits */
478	mov	r0, #42			@ set service ID for PPA
479	mov	r12, r0			@ copy secure Service ID in r12
480	mov	r1, #0			@ set task id for ROM code in r1
481	mov	r2, #4			@ set some flags in r2, r6
482	mov	r6, #0xff
483	ldr	r4, scratchpad_base
484	ldr	r3, [r4, #0xBC]		@ r3 points to parameters
485	dsb				@ data write barrier
486	dmb				@ data memory barrier
487	smc	#1			@ call SMI monitor (smi #1)
488
489#ifdef CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE
490	/* Restore L2 aux control register */
491					@ set service ID for PPA
492	mov	r0, #CONFIG_OMAP3_L2_AUX_SECURE_SERVICE_SET_ID
493	mov	r12, r0			@ copy service ID in r12
494	mov	r1, #0			@ set task ID for ROM code in r1
495	mov	r2, #4			@ set some flags in r2, r6
496	mov	r6, #0xff
497	ldr	r4, scratchpad_base
498	ldr	r3, [r4, #0xBC]
499	adds	r3, r3, #8		@ r3 points to parameters
500	dsb				@ data write barrier
501	dmb				@ data memory barrier
502	smc	#1			@ call SMI monitor (smi #1)
503#endif
504	b	logic_l1_restore
505
506	.align
507l2_inv_api_params:
508	.word	0x1, 0x00
509l2_inv_gp:
510	/* Execute smi to invalidate L2 cache */
511	mov r12, #0x1			@ set up to invalidate L2
512	smc	#0			@ Call SMI monitor (smieq)
513	/* Write to Aux control register to set some bits */
514	ldr	r4, scratchpad_base
515	ldr	r3, [r4,#0xBC]
516	ldr	r0, [r3,#4]
517	mov	r12, #0x3
518	smc	#0			@ Call SMI monitor (smieq)
519	ldr	r4, scratchpad_base
520	ldr	r3, [r4,#0xBC]
521	ldr	r0, [r3,#12]
522	mov	r12, #0x2
523	smc	#0			@ Call SMI monitor (smieq)
524logic_l1_restore:
525	ldr	r1, l2dis_3630
526	cmp	r1, #0x1		@ Test if L2 re-enable needed on 3630
527	bne	skipl2reen
528	mrc	p15, 0, r1, c1, c0, 1
529	orr	r1, r1, #2		@ re-enable L2 cache
530	mcr	p15, 0, r1, c1, c0, 1
531skipl2reen:
532
533	/* Now branch to the common CPU resume function */
534	b	cpu_resume
535ENDPROC(omap3_restore)
536
537	.ltorg
538
539/*
540 * Local variables
541 */
542pm_prepwstst_core_p:
543	.word	PM_PREPWSTST_CORE_P
544pm_pwstctrl_mpu:
545	.word	PM_PWSTCTRL_MPU_P
546scratchpad_base:
547	.word	SCRATCHPAD_BASE_P
548sram_base:
549	.word	SRAM_BASE_P + 0x8000
550control_stat:
551	.word	CONTROL_STAT
552control_mem_rta:
553	.word	CONTROL_MEM_RTA_CTRL
554l2dis_3630:
555	.word	0
556
557/*
558 * Internal functions
559 */
560
561/*
562 * This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0
563 * Copied to and run from SRAM in order to reconfigure the SDRC parameters.
564 */
565	.text
566	.align	3
567ENTRY(es3_sdrc_fix)
568	ldr	r4, sdrc_syscfg		@ get config addr
569	ldr	r5, [r4]		@ get value
570	tst	r5, #0x100		@ is part access blocked
571	it	eq
572	biceq	r5, r5, #0x100		@ clear bit if set
573	str	r5, [r4]		@ write back change
574	ldr	r4, sdrc_mr_0		@ get config addr
575	ldr	r5, [r4]		@ get value
576	str	r5, [r4]		@ write back change
577	ldr	r4, sdrc_emr2_0		@ get config addr
578	ldr	r5, [r4]		@ get value
579	str	r5, [r4]		@ write back change
580	ldr	r4, sdrc_manual_0	@ get config addr
581	mov	r5, #0x2		@ autorefresh command
582	str	r5, [r4]		@ kick off refreshes
583	ldr	r4, sdrc_mr_1		@ get config addr
584	ldr	r5, [r4]		@ get value
585	str	r5, [r4]		@ write back change
586	ldr	r4, sdrc_emr2_1		@ get config addr
587	ldr	r5, [r4]		@ get value
588	str	r5, [r4]		@ write back change
589	ldr	r4, sdrc_manual_1	@ get config addr
590	mov	r5, #0x2		@ autorefresh command
591	str	r5, [r4]		@ kick off refreshes
592	bx	lr
593
594/*
595 * Local variables
596 */
597	.align
598sdrc_syscfg:
599	.word	SDRC_SYSCONFIG_P
600sdrc_mr_0:
601	.word	SDRC_MR_0_P
602sdrc_emr2_0:
603	.word	SDRC_EMR2_0_P
604sdrc_manual_0:
605	.word	SDRC_MANUAL_0_P
606sdrc_mr_1:
607	.word	SDRC_MR_1_P
608sdrc_emr2_1:
609	.word	SDRC_EMR2_1_P
610sdrc_manual_1:
611	.word	SDRC_MANUAL_1_P
612ENDPROC(es3_sdrc_fix)
613ENTRY(es3_sdrc_fix_sz)
614	.word	. - es3_sdrc_fix