PageRenderTime 62ms CodeModel.GetById 11ms app.highlight 46ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c

https://gitlab.com/sunny256/linux
C | 617 lines | 510 code | 69 blank | 38 comment | 28 complexity | b0bb4c516d99875d66d15735d4160cbc MD5 | raw file
  1/*
  2 * Copyright 2017 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Xiangliang.Yu@amd.com
 23 */
 24
 25#include "amdgpu.h"
 26#include "vi.h"
 27#include "bif/bif_5_0_d.h"
 28#include "bif/bif_5_0_sh_mask.h"
 29#include "vid.h"
 30#include "gca/gfx_8_0_d.h"
 31#include "gca/gfx_8_0_sh_mask.h"
 32#include "gmc_v8_0.h"
 33#include "gfx_v8_0.h"
 34#include "sdma_v3_0.h"
 35#include "tonga_ih.h"
 36#include "gmc/gmc_8_2_d.h"
 37#include "gmc/gmc_8_2_sh_mask.h"
 38#include "oss/oss_3_0_d.h"
 39#include "oss/oss_3_0_sh_mask.h"
 40#include "gca/gfx_8_0_sh_mask.h"
 41#include "dce/dce_10_0_d.h"
 42#include "dce/dce_10_0_sh_mask.h"
 43#include "smu/smu_7_1_3_d.h"
 44#include "mxgpu_vi.h"
 45
 46/* VI golden setting */
 47static const u32 xgpu_fiji_mgcg_cgcg_init[] = {
 48	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 49	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 50	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 51	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 52	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
 53	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
 54	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
 55	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
 56	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 57	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 58	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 59	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 60	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 61	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 62	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 63	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 64	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 65	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 66	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 67	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 68	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 69	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 70	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 71	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
 72	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 73	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 74	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 75	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 76	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 77	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 78	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 79	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 80	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 81	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 82	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
 83	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
 84	mmPCIE_DATA, 0x000f0000, 0x00000000,
 85	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
 86	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
 87	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
 88	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
 89	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
 90	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
 91	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
 92	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 93	mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
 94	mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100,
 95};
 96
 97static const u32 xgpu_fiji_golden_settings_a10[] = {
 98	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
 99	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
100	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
101	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
102	mmFBC_MISC, 0x1f311fff, 0x12300000,
103	mmHDMI_CONTROL, 0x31000111, 0x00000011,
104	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
105	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
106	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
107	mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
108	mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
109	mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
110	mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
111	mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
112	mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
113	mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
114	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
115	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
116	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
117	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
118	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
119	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
120	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
121	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
122	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
123};
124
125static const u32 xgpu_fiji_golden_common_all[] = {
126	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
127	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
128	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
129	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
130	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
131	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
132	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
133	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
134	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
135	mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
136};
137
138static const u32 xgpu_tonga_mgcg_cgcg_init[] = {
139	mmRLC_CGTT_MGCG_OVERRIDE,   0xffffffff, 0xffffffff,
140	mmGRBM_GFX_INDEX,           0xffffffff, 0xe0000000,
141	mmCB_CGTT_SCLK_CTRL,        0xffffffff, 0x00000100,
142	mmCGTT_BCI_CLK_CTRL,        0xffffffff, 0x00000100,
143	mmCGTT_CP_CLK_CTRL,         0xffffffff, 0x00000100,
144	mmCGTT_CPC_CLK_CTRL,        0xffffffff, 0x00000100,
145	mmCGTT_CPF_CLK_CTRL,        0xffffffff, 0x40000100,
146	mmCGTT_DRM_CLK_CTRL0,       0xffffffff, 0x00600100,
147	mmCGTT_GDS_CLK_CTRL,        0xffffffff, 0x00000100,
148	mmCGTT_IA_CLK_CTRL,         0xffffffff, 0x06000100,
149	mmCGTT_PA_CLK_CTRL,         0xffffffff, 0x00000100,
150	mmCGTT_WD_CLK_CTRL,         0xffffffff, 0x06000100,
151	mmCGTT_PC_CLK_CTRL,         0xffffffff, 0x00000100,
152	mmCGTT_RLC_CLK_CTRL,        0xffffffff, 0x00000100,
153	mmCGTT_SC_CLK_CTRL,         0xffffffff, 0x00000100,
154	mmCGTT_SPI_CLK_CTRL,        0xffffffff, 0x00000100,
155	mmCGTT_SQ_CLK_CTRL,         0xffffffff, 0x00000100,
156	mmCGTT_SQG_CLK_CTRL,        0xffffffff, 0x00000100,
157	mmCGTT_SX_CLK_CTRL0,        0xffffffff, 0x00000100,
158	mmCGTT_SX_CLK_CTRL1,        0xffffffff, 0x00000100,
159	mmCGTT_SX_CLK_CTRL2,        0xffffffff, 0x00000100,
160	mmCGTT_SX_CLK_CTRL3,        0xffffffff, 0x00000100,
161	mmCGTT_SX_CLK_CTRL4,        0xffffffff, 0x00000100,
162	mmCGTT_TCI_CLK_CTRL,        0xffffffff, 0x00000100,
163	mmCGTT_TCP_CLK_CTRL,        0xffffffff, 0x00000100,
164	mmCGTT_VGT_CLK_CTRL,        0xffffffff, 0x06000100,
165	mmDB_CGTT_CLK_CTRL_0,       0xffffffff, 0x00000100,
166	mmTA_CGTT_CTRL,             0xffffffff, 0x00000100,
167	mmTCA_CGTT_SCLK_CTRL,       0xffffffff, 0x00000100,
168	mmTCC_CGTT_SCLK_CTRL,       0xffffffff, 0x00000100,
169	mmTD_CGTT_CTRL,             0xffffffff, 0x00000100,
170	mmGRBM_GFX_INDEX,           0xffffffff, 0xe0000000,
171	mmCGTS_CU0_SP0_CTRL_REG,    0xffffffff, 0x00010000,
172	mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
173	mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
174	mmCGTS_CU0_SP1_CTRL_REG,    0xffffffff, 0x00060005,
175	mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
176	mmCGTS_CU1_SP0_CTRL_REG,    0xffffffff, 0x00010000,
177	mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
178	mmCGTS_CU1_TA_CTRL_REG,     0xffffffff, 0x00040007,
179	mmCGTS_CU1_SP1_CTRL_REG,    0xffffffff, 0x00060005,
180	mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
181	mmCGTS_CU2_SP0_CTRL_REG,    0xffffffff, 0x00010000,
182	mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
183	mmCGTS_CU2_TA_CTRL_REG,     0xffffffff, 0x00040007,
184	mmCGTS_CU2_SP1_CTRL_REG,    0xffffffff, 0x00060005,
185	mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
186	mmCGTS_CU3_SP0_CTRL_REG,    0xffffffff, 0x00010000,
187	mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
188	mmCGTS_CU3_TA_CTRL_REG,     0xffffffff, 0x00040007,
189	mmCGTS_CU3_SP1_CTRL_REG,    0xffffffff, 0x00060005,
190	mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
191	mmCGTS_CU4_SP0_CTRL_REG,    0xffffffff, 0x00010000,
192	mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
193	mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
194	mmCGTS_CU4_SP1_CTRL_REG,    0xffffffff, 0x00060005,
195	mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
196	mmCGTS_CU5_SP0_CTRL_REG,    0xffffffff, 0x00010000,
197	mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
198	mmCGTS_CU5_TA_CTRL_REG,     0xffffffff, 0x00040007,
199	mmCGTS_CU5_SP1_CTRL_REG,    0xffffffff, 0x00060005,
200	mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
201	mmCGTS_CU6_SP0_CTRL_REG,    0xffffffff, 0x00010000,
202	mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
203	mmCGTS_CU6_TA_CTRL_REG,     0xffffffff, 0x00040007,
204	mmCGTS_CU6_SP1_CTRL_REG,    0xffffffff, 0x00060005,
205	mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
206	mmCGTS_CU7_SP0_CTRL_REG,    0xffffffff, 0x00010000,
207	mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
208	mmCGTS_CU7_TA_CTRL_REG,     0xffffffff, 0x00040007,
209	mmCGTS_CU7_SP1_CTRL_REG,    0xffffffff, 0x00060005,
210	mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
211	mmCGTS_SM_CTRL_REG,         0xffffffff, 0x96e00200,
212	mmCP_RB_WPTR_POLL_CNTL,     0xffffffff, 0x00900100,
213	mmRLC_CGCG_CGLS_CTRL,       0xffffffff, 0x0020003c,
214	mmPCIE_INDEX,               0xffffffff, 0x0140001c,
215	mmPCIE_DATA,                0x000f0000, 0x00000000,
216	mmSMC_IND_INDEX_4,          0xffffffff, 0xC060000C,
217	mmSMC_IND_DATA_4,           0xc0000fff, 0x00000100,
218	mmXDMA_CLOCK_GATING_CNTL,   0xffffffff, 0x00000100,
219	mmXDMA_MEM_POWER_CNTL,      0x00000101, 0x00000000,
220	mmMC_MEM_POWER_LS,          0xffffffff, 0x00000104,
221	mmCGTT_DRM_CLK_CTRL0,       0xff000fff, 0x00000100,
222	mmHDP_XDP_CGTT_BLK_CTRL,    0xc0000fff, 0x00000104,
223	mmCP_MEM_SLP_CNTL,          0x00000001, 0x00000001,
224	mmSDMA0_CLK_CTRL,           0xff000ff0, 0x00000100,
225	mmSDMA1_CLK_CTRL,           0xff000ff0, 0x00000100,
226};
227
228static const u32 xgpu_tonga_golden_settings_a11[] = {
229	mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
230	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
231	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
232	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
233	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
234	mmFBC_MISC, 0x1f311fff, 0x12300000,
235	mmGB_GPU_ID, 0x0000000f, 0x00000000,
236	mmHDMI_CONTROL, 0x31000111, 0x00000011,
237	mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
238	mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
239	mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
240	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
241	mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
242	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
243	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
244	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
245	mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
246	mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
247	mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
248	mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
249	mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
250	mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
251	mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
252	mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
253	mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
254	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
255	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
256	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
257	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
258	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
259	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
260	mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
261	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
262	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
263	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
264	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
265	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
266};
267
268static const u32 xgpu_tonga_golden_common_all[] = {
269	mmGRBM_GFX_INDEX,               0xffffffff, 0xe0000000,
270	mmPA_SC_RASTER_CONFIG,          0xffffffff, 0x16000012,
271	mmPA_SC_RASTER_CONFIG_1,        0xffffffff, 0x0000002A,
272	mmGB_ADDR_CONFIG,               0xffffffff, 0x22011002,
273	mmSPI_RESOURCE_RESERVE_CU_0,    0xffffffff, 0x00000800,
274	mmSPI_RESOURCE_RESERVE_CU_1,    0xffffffff, 0x00000800,
275	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
276};
277
278void xgpu_vi_init_golden_registers(struct amdgpu_device *adev)
279{
280	switch (adev->asic_type) {
281	case CHIP_FIJI:
282		amdgpu_program_register_sequence(adev,
283						 xgpu_fiji_mgcg_cgcg_init,
284						 (const u32)ARRAY_SIZE(
285						 xgpu_fiji_mgcg_cgcg_init));
286		amdgpu_program_register_sequence(adev,
287						 xgpu_fiji_golden_settings_a10,
288						 (const u32)ARRAY_SIZE(
289						 xgpu_fiji_golden_settings_a10));
290		amdgpu_program_register_sequence(adev,
291						 xgpu_fiji_golden_common_all,
292						 (const u32)ARRAY_SIZE(
293						 xgpu_fiji_golden_common_all));
294		break;
295	case CHIP_TONGA:
296		amdgpu_program_register_sequence(adev,
297						 xgpu_tonga_mgcg_cgcg_init,
298						 (const u32)ARRAY_SIZE(
299						 xgpu_tonga_mgcg_cgcg_init));
300		amdgpu_program_register_sequence(adev,
301						 xgpu_tonga_golden_settings_a11,
302						 (const u32)ARRAY_SIZE(
303						 xgpu_tonga_golden_settings_a11));
304		amdgpu_program_register_sequence(adev,
305						 xgpu_tonga_golden_common_all,
306						 (const u32)ARRAY_SIZE(
307						 xgpu_tonga_golden_common_all));
308		break;
309	default:
310		BUG_ON("Doesn't support chip type.\n");
311		break;
312	}
313}
314
315/*
316 * Mailbox communication between GPU hypervisor and VFs
317 */
318static void xgpu_vi_mailbox_send_ack(struct amdgpu_device *adev)
319{
320	u32 reg;
321	int timeout = VI_MAILBOX_TIMEDOUT;
322	u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID);
323
324	reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
325	reg = REG_SET_FIELD(reg, MAILBOX_CONTROL, RCV_MSG_ACK, 1);
326	WREG32_NO_KIQ(mmMAILBOX_CONTROL, reg);
327
328	/*Wait for RCV_MSG_VALID to be 0*/
329	reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
330	while (reg & mask) {
331		if (timeout <= 0) {
332			pr_err("RCV_MSG_VALID is not cleared\n");
333			break;
334		}
335		mdelay(1);
336		timeout -=1;
337
338		reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
339	}
340}
341
342static void xgpu_vi_mailbox_set_valid(struct amdgpu_device *adev, bool val)
343{
344	u32 reg;
345
346	reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
347	reg = REG_SET_FIELD(reg, MAILBOX_CONTROL,
348			    TRN_MSG_VALID, val ? 1 : 0);
349	WREG32_NO_KIQ(mmMAILBOX_CONTROL, reg);
350}
351
352static void xgpu_vi_mailbox_trans_msg(struct amdgpu_device *adev,
353				      enum idh_request req)
354{
355	u32 reg;
356
357	reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0);
358	reg = REG_SET_FIELD(reg, MAILBOX_MSGBUF_TRN_DW0,
359			    MSGBUF_DATA, req);
360	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, reg);
361
362	xgpu_vi_mailbox_set_valid(adev, true);
363}
364
365static int xgpu_vi_mailbox_rcv_msg(struct amdgpu_device *adev,
366				   enum idh_event event)
367{
368	u32 reg;
369	u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID);
370
371	/* workaround: host driver doesn't set VALID for CMPL now */
372	if (event != IDH_FLR_NOTIFICATION_CMPL) {
373		reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
374		if (!(reg & mask))
375			return -ENOENT;
376	}
377
378	reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
379	if (reg != event)
380		return -ENOENT;
381
382	/* send ack to PF */
383	xgpu_vi_mailbox_send_ack(adev);
384
385	return 0;
386}
387
388static int xgpu_vi_poll_ack(struct amdgpu_device *adev)
389{
390	int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
391	u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, TRN_MSG_ACK);
392	u32 reg;
393
394	reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
395	while (!(reg & mask)) {
396		if (timeout <= 0) {
397			pr_err("Doesn't get ack from pf.\n");
398			r = -ETIME;
399			break;
400		}
401		mdelay(5);
402		timeout -= 5;
403
404		reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
405	}
406
407	return r;
408}
409
410static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event)
411{
412	int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
413
414	r = xgpu_vi_mailbox_rcv_msg(adev, event);
415	while (r) {
416		if (timeout <= 0) {
417			pr_err("Doesn't get ack from pf.\n");
418			r = -ETIME;
419			break;
420		}
421		mdelay(5);
422		timeout -= 5;
423
424		r = xgpu_vi_mailbox_rcv_msg(adev, event);
425	}
426
427	return r;
428}
429
430static int xgpu_vi_send_access_requests(struct amdgpu_device *adev,
431					enum idh_request request)
432{
433	int r;
434
435	xgpu_vi_mailbox_trans_msg(adev, request);
436
437	/* start to poll ack */
438	r = xgpu_vi_poll_ack(adev);
439	if (r)
440		return r;
441
442	xgpu_vi_mailbox_set_valid(adev, false);
443
444	/* start to check msg if request is idh_req_gpu_init_access */
445	if (request == IDH_REQ_GPU_INIT_ACCESS ||
446		request == IDH_REQ_GPU_FINI_ACCESS ||
447		request == IDH_REQ_GPU_RESET_ACCESS) {
448		r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
449		if (r)
450			pr_err("Doesn't get ack from pf, continue\n");
451	}
452
453	return 0;
454}
455
456static int xgpu_vi_request_reset(struct amdgpu_device *adev)
457{
458	return xgpu_vi_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
459}
460
461static int xgpu_vi_request_full_gpu_access(struct amdgpu_device *adev,
462					   bool init)
463{
464	enum idh_request req;
465
466	req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
467	return xgpu_vi_send_access_requests(adev, req);
468}
469
470static int xgpu_vi_release_full_gpu_access(struct amdgpu_device *adev,
471					   bool init)
472{
473	enum idh_request req;
474	int r = 0;
475
476	req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
477	r = xgpu_vi_send_access_requests(adev, req);
478
479	return r;
480}
481
482/* add support mailbox interrupts */
483static int xgpu_vi_mailbox_ack_irq(struct amdgpu_device *adev,
484				   struct amdgpu_irq_src *source,
485				   struct amdgpu_iv_entry *entry)
486{
487	DRM_DEBUG("get ack intr and do nothing.\n");
488	return 0;
489}
490
491static int xgpu_vi_set_mailbox_ack_irq(struct amdgpu_device *adev,
492				       struct amdgpu_irq_src *src,
493				       unsigned type,
494				       enum amdgpu_interrupt_state state)
495{
496	u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
497
498	tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, ACK_INT_EN,
499			    (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
500	WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
501
502	return 0;
503}
504
505static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
506{
507	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
508	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
509
510	/* wait until RCV_MSG become 3 */
511	if (xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
512		pr_err("failed to recieve FLR_CMPL\n");
513		return;
514	}
515
516	/* Trigger recovery due to world switch failure */
517	amdgpu_sriov_gpu_reset(adev, NULL);
518}
519
520static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
521				       struct amdgpu_irq_src *src,
522				       unsigned type,
523				       enum amdgpu_interrupt_state state)
524{
525	u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
526
527	tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, VALID_INT_EN,
528			    (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
529	WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
530
531	return 0;
532}
533
534static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
535				   struct amdgpu_irq_src *source,
536				   struct amdgpu_iv_entry *entry)
537{
538	int r;
539
540	/* trigger gpu-reset by hypervisor only if TDR disbaled */
541	if (amdgpu_lockup_timeout == 0) {
542		/* see what event we get */
543		r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
544
545		/* only handle FLR_NOTIFY now */
546		if (!r)
547			schedule_work(&adev->virt.flr_work);
548	}
549
550	return 0;
551}
552
553static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_ack_irq_funcs = {
554	.set = xgpu_vi_set_mailbox_ack_irq,
555	.process = xgpu_vi_mailbox_ack_irq,
556};
557
558static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_rcv_irq_funcs = {
559	.set = xgpu_vi_set_mailbox_rcv_irq,
560	.process = xgpu_vi_mailbox_rcv_irq,
561};
562
563void xgpu_vi_mailbox_set_irq_funcs(struct amdgpu_device *adev)
564{
565	adev->virt.ack_irq.num_types = 1;
566	adev->virt.ack_irq.funcs = &xgpu_vi_mailbox_ack_irq_funcs;
567	adev->virt.rcv_irq.num_types = 1;
568	adev->virt.rcv_irq.funcs = &xgpu_vi_mailbox_rcv_irq_funcs;
569}
570
571int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev)
572{
573	int r;
574
575	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
576	if (r)
577		return r;
578
579	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
580	if (r) {
581		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
582		return r;
583	}
584
585	return 0;
586}
587
588int xgpu_vi_mailbox_get_irq(struct amdgpu_device *adev)
589{
590	int r;
591
592	r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
593	if (r)
594		return r;
595	r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
596	if (r) {
597		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
598		return r;
599	}
600
601	INIT_WORK(&adev->virt.flr_work, xgpu_vi_mailbox_flr_work);
602
603	return 0;
604}
605
606void xgpu_vi_mailbox_put_irq(struct amdgpu_device *adev)
607{
608	amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
609	amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
610}
611
612const struct amdgpu_virt_ops xgpu_vi_virt_ops = {
613	.req_full_gpu		= xgpu_vi_request_full_gpu_access,
614	.rel_full_gpu		= xgpu_vi_release_full_gpu_access,
615	.reset_gpu		= xgpu_vi_request_reset,
616	.trans_msg		= NULL, /* Does not need to trans VF errors to host. */
617};