PageRenderTime 62ms CodeModel.GetById 39ms app.highlight 18ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/scsi/bfa/bfa_ioc_ct.c

https://bitbucket.org/wisechild/galaxy-nexus
C | 526 lines | 337 code | 69 blank | 120 comment | 32 complexity | 29aaa2f13322ca40f92ef934043074da MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1/*
  2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3 * All rights reserved
  4 * www.brocade.com
  5 *
  6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7 *
  8 * This program is free software; you can redistribute it and/or modify it
  9 * under the terms of the GNU General Public License (GPL) Version 2 as
 10 * published by the Free Software Foundation
 11 *
 12 * This program is distributed in the hope that it will be useful, but
 13 * WITHOUT ANY WARRANTY; without even the implied warranty of
 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 15 * General Public License for more details.
 16 */
 17
 18#include "bfad_drv.h"
 19#include "bfa_ioc.h"
 20#include "bfi_ctreg.h"
 21#include "bfa_defs.h"
 22
 23BFA_TRC_FILE(CNA, IOC_CT);
 24
 25#define bfa_ioc_ct_sync_pos(__ioc)      \
 26		((uint32_t) (1 << bfa_ioc_pcifn(__ioc)))
 27#define BFA_IOC_SYNC_REQD_SH    16
 28#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
 29#define bfa_ioc_ct_clear_sync_ackd(__val)       (__val & 0xffff0000)
 30#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
 31#define bfa_ioc_ct_sync_reqd_pos(__ioc) \
 32			(bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
 33
 34/*
 35 * forward declarations
 36 */
 37static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
 38static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
 39static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
 40static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
 41static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
 42static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
 43static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
 44static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
 45static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
 46static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
 47static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
 48static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
 49
 50static struct bfa_ioc_hwif_s hwif_ct;
 51
 52/*
 53 * Called from bfa_ioc_attach() to map asic specific calls.
 54 */
 55void
 56bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
 57{
 58	hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
 59	hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
 60	hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
 61	hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
 62	hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
 63	hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
 64	hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
 65	hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
 66	hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
 67	hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
 68	hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
 69	hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
 70	hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
 71
 72	ioc->ioc_hwif = &hwif_ct;
 73}
 74
 75/*
 76 * Return true if firmware of current driver matches the running firmware.
 77 */
 78static bfa_boolean_t
 79bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
 80{
 81	enum bfi_ioc_state ioc_fwstate;
 82	u32 usecnt;
 83	struct bfi_ioc_image_hdr_s fwhdr;
 84
 85	/*
 86	 * Firmware match check is relevant only for CNA.
 87	 */
 88	if (!ioc->cna)
 89		return BFA_TRUE;
 90
 91	/*
 92	 * If bios boot (flash based) -- do not increment usage count
 93	 */
 94	if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
 95						BFA_IOC_FWIMG_MINSZ)
 96		return BFA_TRUE;
 97
 98	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
 99	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
100
101	/*
102	 * If usage count is 0, always return TRUE.
103	 */
104	if (usecnt == 0) {
105		writel(1, ioc->ioc_regs.ioc_usage_reg);
106		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
107		writel(0, ioc->ioc_regs.ioc_fail_sync);
108		bfa_trc(ioc, usecnt);
109		return BFA_TRUE;
110	}
111
112	ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
113	bfa_trc(ioc, ioc_fwstate);
114
115	/*
116	 * Use count cannot be non-zero and chip in uninitialized state.
117	 */
118	WARN_ON(ioc_fwstate == BFI_IOC_UNINIT);
119
120	/*
121	 * Check if another driver with a different firmware is active
122	 */
123	bfa_ioc_fwver_get(ioc, &fwhdr);
124	if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
125		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
126		bfa_trc(ioc, usecnt);
127		return BFA_FALSE;
128	}
129
130	/*
131	 * Same firmware version. Increment the reference count.
132	 */
133	usecnt++;
134	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
135	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
136	bfa_trc(ioc, usecnt);
137	return BFA_TRUE;
138}
139
140static void
141bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
142{
143	u32 usecnt;
144
145	/*
146	 * Firmware lock is relevant only for CNA.
147	 */
148	if (!ioc->cna)
149		return;
150
151	/*
152	 * If bios boot (flash based) -- do not decrement usage count
153	 */
154	if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
155						BFA_IOC_FWIMG_MINSZ)
156		return;
157
158	/*
159	 * decrement usage count
160	 */
161	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
162	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
163	WARN_ON(usecnt <= 0);
164
165	usecnt--;
166	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
167	bfa_trc(ioc, usecnt);
168
169	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
170}
171
172/*
173 * Notify other functions on HB failure.
174 */
175static void
176bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
177{
178	if (ioc->cna) {
179		writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
180		writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
181		/* Wait for halt to take effect */
182		readl(ioc->ioc_regs.ll_halt);
183		readl(ioc->ioc_regs.alt_ll_halt);
184	} else {
185		writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
186		readl(ioc->ioc_regs.err_set);
187	}
188}
189
190/*
191 * Host to LPU mailbox message addresses
192 */
193static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
194	{ HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
195	{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
196	{ HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
197	{ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
198};
199
200/*
201 * Host <-> LPU mailbox command/status registers - port 0
202 */
203static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
204	{ HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
205	{ HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
206	{ HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
207	{ HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
208};
209
210/*
211 * Host <-> LPU mailbox command/status registers - port 1
212 */
213static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
214	{ HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
215	{ HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
216	{ HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
217	{ HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
218};
219
220static void
221bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
222{
223	void __iomem *rb;
224	int		pcifn = bfa_ioc_pcifn(ioc);
225
226	rb = bfa_ioc_bar0(ioc);
227
228	ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
229	ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
230	ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
231
232	if (ioc->port_id == 0) {
233		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
234		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
235		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
236		ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
237		ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
238		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
239		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
240	} else {
241		ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
242		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
243		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
244		ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
245		ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
246		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
247		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
248	}
249
250	/*
251	 * PSS control registers
252	 */
253	ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
254	ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
255	ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
256	ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
257
258	/*
259	 * IOC semaphore registers and serialization
260	 */
261	ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
262	ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
263	ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
264	ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
265	ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
266
267	/*
268	 * sram memory access
269	 */
270	ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
271	ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
272
273	/*
274	 * err set reg : for notification of hb failure in fcmode
275	 */
276	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
277}
278
279/*
280 * Initialize IOC to port mapping.
281 */
282
283#define FNC_PERS_FN_SHIFT(__fn)	((__fn) * 8)
284static void
285bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
286{
287	void __iomem *rb = ioc->pcidev.pci_bar_kva;
288	u32	r32;
289
290	/*
291	 * For catapult, base port id on personality register and IOC type
292	 */
293	r32 = readl(rb + FNC_PERS_REG);
294	r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
295	ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
296
297	bfa_trc(ioc, bfa_ioc_pcifn(ioc));
298	bfa_trc(ioc, ioc->port_id);
299}
300
301/*
302 * Set interrupt mode for a function: INTX or MSIX
303 */
304static void
305bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
306{
307	void __iomem *rb = ioc->pcidev.pci_bar_kva;
308	u32	r32, mode;
309
310	r32 = readl(rb + FNC_PERS_REG);
311	bfa_trc(ioc, r32);
312
313	mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
314		__F0_INTX_STATUS;
315
316	/*
317	 * If already in desired mode, do not change anything
318	 */
319	if (!msix && mode)
320		return;
321
322	if (msix)
323		mode = __F0_INTX_STATUS_MSIX;
324	else
325		mode = __F0_INTX_STATUS_INTA;
326
327	r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
328	r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
329	bfa_trc(ioc, r32);
330
331	writel(r32, rb + FNC_PERS_REG);
332}
333
334/*
335 * Cleanup hw semaphore and usecnt registers
336 */
337static void
338bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
339{
340
341	if (ioc->cna) {
342		bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
343		writel(0, ioc->ioc_regs.ioc_usage_reg);
344		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
345	}
346
347	/*
348	 * Read the hw sem reg to make sure that it is locked
349	 * before we clear it. If it is not locked, writing 1
350	 * will lock it instead of clearing it.
351	 */
352	readl(ioc->ioc_regs.ioc_sem_reg);
353	writel(1, ioc->ioc_regs.ioc_sem_reg);
354}
355
356static bfa_boolean_t
357bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
358{
359	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
360	uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
361
362	/*
363	 * Driver load time.  If the sync required bit for this PCI fn
364	 * is set, it is due to an unclean exit by the driver for this
365	 * PCI fn in the previous incarnation. Whoever comes here first
366	 * should clean it up, no matter which PCI fn.
367	 */
368
369	if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
370		writel(0, ioc->ioc_regs.ioc_fail_sync);
371		writel(1, ioc->ioc_regs.ioc_usage_reg);
372		writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
373		writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
374		return BFA_TRUE;
375	}
376
377	return bfa_ioc_ct_sync_complete(ioc);
378}
379
380/*
381 * Synchronized IOC failure processing routines
382 */
383static void
384bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc)
385{
386	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
387	uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
388
389	writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
390}
391
392static void
393bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc)
394{
395	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
396	uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
397					bfa_ioc_ct_sync_pos(ioc);
398
399	writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
400}
401
402static void
403bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc)
404{
405	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
406
407	writel((r32 | bfa_ioc_ct_sync_pos(ioc)),
408		ioc->ioc_regs.ioc_fail_sync);
409}
410
411static bfa_boolean_t
412bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
413{
414	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
415	uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
416	uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
417	uint32_t tmp_ackd;
418
419	if (sync_ackd == 0)
420		return BFA_TRUE;
421
422	/*
423	 * The check below is to see whether any other PCI fn
424	 * has reinitialized the ASIC (reset sync_ackd bits)
425	 * and failed again while this IOC was waiting for hw
426	 * semaphore (in bfa_iocpf_sm_semwait()).
427	 */
428	tmp_ackd = sync_ackd;
429	if ((sync_reqd &  bfa_ioc_ct_sync_pos(ioc)) &&
430		!(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
431		sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
432
433	if (sync_reqd == sync_ackd) {
434		writel(bfa_ioc_ct_clear_sync_ackd(r32),
435			ioc->ioc_regs.ioc_fail_sync);
436		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
437		writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
438		return BFA_TRUE;
439	}
440
441	/*
442	 * If another PCI fn reinitialized and failed again while
443	 * this IOC was waiting for hw sem, the sync_ackd bit for
444	 * this IOC need to be set again to allow reinitialization.
445	 */
446	if (tmp_ackd != sync_ackd)
447		writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
448
449	return BFA_FALSE;
450}
451
452/*
453 * Check the firmware state to know if pll_init has been completed already
454 */
455bfa_boolean_t
456bfa_ioc_ct_pll_init_complete(void __iomem *rb)
457{
458	if ((readl(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) ||
459	  (readl(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP))
460		return BFA_TRUE;
461
462	return BFA_FALSE;
463}
464
465bfa_status_t
466bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
467{
468	u32	pll_sclk, pll_fclk, r32;
469
470	pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
471		__APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
472		__APP_PLL_312_JITLMT0_1(3U) |
473		__APP_PLL_312_CNTLMT0_1(1U);
474	pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
475		__APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
476		__APP_PLL_425_JITLMT0_1(3U) |
477		__APP_PLL_425_CNTLMT0_1(1U);
478	if (fcmode) {
479		writel(0, (rb + OP_MODE));
480		writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
481			 __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
482	} else {
483		writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
484		writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
485	}
486	writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
487	writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
488	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
489	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
490	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
491	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
492	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
493	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
494	writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET,
495			rb + APP_PLL_312_CTL_REG);
496	writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET,
497			rb + APP_PLL_425_CTL_REG);
498	writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
499			rb + APP_PLL_312_CTL_REG);
500	writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
501			rb + APP_PLL_425_CTL_REG);
502	readl(rb + HOSTFN0_INT_MSK);
503	udelay(2000);
504	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
505	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
506	writel(pll_sclk | __APP_PLL_312_ENABLE, rb + APP_PLL_312_CTL_REG);
507	writel(pll_fclk | __APP_PLL_425_ENABLE, rb + APP_PLL_425_CTL_REG);
508	if (!fcmode) {
509		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
510		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
511	}
512	r32 = readl((rb + PSS_CTL_REG));
513	r32 &= ~__PSS_LMEM_RESET;
514	writel(r32, (rb + PSS_CTL_REG));
515	udelay(1000);
516	if (!fcmode) {
517		writel(0, (rb + PMM_1T_RESET_REG_P0));
518		writel(0, (rb + PMM_1T_RESET_REG_P1));
519	}
520
521	writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
522	udelay(1000);
523	r32 = readl((rb + MBIST_STAT_REG));
524	writel(0, (rb + MBIST_CTL_REG));
525	return BFA_STATUS_OK;
526}