/drivers/gpu/drm/i915/i915_irq.c
C | 2884 lines | 2069 code | 518 blank | 297 comment | 351 complexity | ff3c660d5ab32dbf46996a4c893614ea MD5 | raw file
Large files files are truncated, but you can click here to view the full file
1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3/* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31#include <linux/sysrq.h> 32#include <linux/slab.h> 33#include <drm/drmP.h> 34#include <drm/i915_drm.h> 35#include "i915_drv.h" 36#include "i915_trace.h" 37#include "intel_drv.h" 38 39/* For display hotplug interrupt */ 40static void 41ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 42{ 43 if ((dev_priv->irq_mask & mask) != 0) { 44 dev_priv->irq_mask &= ~mask; 45 I915_WRITE(DEIMR, dev_priv->irq_mask); 46 POSTING_READ(DEIMR); 47 } 48} 49 50static inline void 51ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 52{ 53 if ((dev_priv->irq_mask & mask) != mask) { 54 dev_priv->irq_mask |= mask; 55 I915_WRITE(DEIMR, dev_priv->irq_mask); 56 POSTING_READ(DEIMR); 57 } 58} 59 60void 61i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 62{ 63 if ((dev_priv->pipestat[pipe] & mask) != mask) { 64 u32 reg = PIPESTAT(pipe); 65 66 dev_priv->pipestat[pipe] |= mask; 67 /* Enable the interrupt, clear any pending status */ 68 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); 69 POSTING_READ(reg); 70 } 71} 72 73void 74i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 75{ 76 if ((dev_priv->pipestat[pipe] & mask) != 0) { 77 u32 reg = PIPESTAT(pipe); 78 79 dev_priv->pipestat[pipe] &= ~mask; 80 I915_WRITE(reg, dev_priv->pipestat[pipe]); 81 POSTING_READ(reg); 82 } 83} 84 85/** 86 * intel_enable_asle - enable ASLE interrupt for OpRegion 87 */ 88void intel_enable_asle(struct drm_device *dev) 89{ 90 drm_i915_private_t *dev_priv = dev->dev_private; 91 unsigned long irqflags; 92 93 /* FIXME: opregion/asle for VLV */ 94 if (IS_VALLEYVIEW(dev)) 95 return; 96 97 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 98 99 if (HAS_PCH_SPLIT(dev)) 100 ironlake_enable_display_irq(dev_priv, DE_GSE); 101 else { 102 i915_enable_pipestat(dev_priv, 1, 103 PIPE_LEGACY_BLC_EVENT_ENABLE); 104 if (INTEL_INFO(dev)->gen >= 4) 105 i915_enable_pipestat(dev_priv, 0, 106 PIPE_LEGACY_BLC_EVENT_ENABLE); 107 } 108 109 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 110} 111 112/** 113 * i915_pipe_enabled - check if a pipe is enabled 114 * @dev: DRM device 115 * @pipe: pipe to check 116 * 117 * Reading certain registers when the pipe is disabled can hang the chip. 118 * Use this routine to make sure the PLL is running and the pipe is active 119 * before reading such registers if unsure. 120 */ 121static int 122i915_pipe_enabled(struct drm_device *dev, int pipe) 123{ 124 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 125 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 126 pipe); 127 128 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE; 129} 130 131/* Called from drm generic code, passed a 'crtc', which 132 * we use as a pipe index 133 */ 134static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 135{ 136 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 137 unsigned long high_frame; 138 unsigned long low_frame; 139 u32 high1, high2, low; 140 141 if (!i915_pipe_enabled(dev, pipe)) { 142 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 143 "pipe %c\n", pipe_name(pipe)); 144 return 0; 145 } 146 147 high_frame = PIPEFRAME(pipe); 148 low_frame = PIPEFRAMEPIXEL(pipe); 149 150 /* 151 * High & low register fields aren't synchronized, so make sure 152 * we get a low value that's stable across two reads of the high 153 * register. 154 */ 155 do { 156 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 157 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; 158 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 159 } while (high1 != high2); 160 161 high1 >>= PIPE_FRAME_HIGH_SHIFT; 162 low >>= PIPE_FRAME_LOW_SHIFT; 163 return (high1 << 8) | low; 164} 165 166static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 167{ 168 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 169 int reg = PIPE_FRMCOUNT_GM45(pipe); 170 171 if (!i915_pipe_enabled(dev, pipe)) { 172 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 173 "pipe %c\n", pipe_name(pipe)); 174 return 0; 175 } 176 177 return I915_READ(reg); 178} 179 180static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 181 int *vpos, int *hpos) 182{ 183 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 184 u32 vbl = 0, position = 0; 185 int vbl_start, vbl_end, htotal, vtotal; 186 bool in_vbl = true; 187 int ret = 0; 188 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 189 pipe); 190 191 if (!i915_pipe_enabled(dev, pipe)) { 192 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 193 "pipe %c\n", pipe_name(pipe)); 194 return 0; 195 } 196 197 /* Get vtotal. */ 198 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 199 200 if (INTEL_INFO(dev)->gen >= 4) { 201 /* No obvious pixelcount register. Only query vertical 202 * scanout position from Display scan line register. 203 */ 204 position = I915_READ(PIPEDSL(pipe)); 205 206 /* Decode into vertical scanout position. Don't have 207 * horizontal scanout position. 208 */ 209 *vpos = position & 0x1fff; 210 *hpos = 0; 211 } else { 212 /* Have access to pixelcount since start of frame. 213 * We can split this into vertical and horizontal 214 * scanout position. 215 */ 216 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 217 218 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 219 *vpos = position / htotal; 220 *hpos = position - (*vpos * htotal); 221 } 222 223 /* Query vblank area. */ 224 vbl = I915_READ(VBLANK(cpu_transcoder)); 225 226 /* Test position against vblank region. */ 227 vbl_start = vbl & 0x1fff; 228 vbl_end = (vbl >> 16) & 0x1fff; 229 230 if ((*vpos < vbl_start) || (*vpos > vbl_end)) 231 in_vbl = false; 232 233 /* Inside "upper part" of vblank area? Apply corrective offset: */ 234 if (in_vbl && (*vpos >= vbl_start)) 235 *vpos = *vpos - vtotal; 236 237 /* Readouts valid? */ 238 if (vbl > 0) 239 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 240 241 /* In vblank? */ 242 if (in_vbl) 243 ret |= DRM_SCANOUTPOS_INVBL; 244 245 return ret; 246} 247 248static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 249 int *max_error, 250 struct timeval *vblank_time, 251 unsigned flags) 252{ 253 struct drm_i915_private *dev_priv = dev->dev_private; 254 struct drm_crtc *crtc; 255 256 if (pipe < 0 || pipe >= dev_priv->num_pipe) { 257 DRM_ERROR("Invalid crtc %d\n", pipe); 258 return -EINVAL; 259 } 260 261 /* Get drm_crtc to timestamp: */ 262 crtc = intel_get_crtc_for_pipe(dev, pipe); 263 if (crtc == NULL) { 264 DRM_ERROR("Invalid crtc %d\n", pipe); 265 return -EINVAL; 266 } 267 268 if (!crtc->enabled) { 269 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 270 return -EBUSY; 271 } 272 273 /* Helper routine in DRM core does all the work: */ 274 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 275 vblank_time, flags, 276 crtc); 277} 278 279/* 280 * Handle hotplug events outside the interrupt handler proper. 281 */ 282static void i915_hotplug_work_func(struct work_struct *work) 283{ 284 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 285 hotplug_work); 286 struct drm_device *dev = dev_priv->dev; 287 struct drm_mode_config *mode_config = &dev->mode_config; 288 struct intel_encoder *encoder; 289 290 /* HPD irq before everything is fully set up. */ 291 if (!dev_priv->enable_hotplug_processing) 292 return; 293 294 mutex_lock(&mode_config->mutex); 295 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 296 297 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 298 if (encoder->hot_plug) 299 encoder->hot_plug(encoder); 300 301 mutex_unlock(&mode_config->mutex); 302 303 /* Just fire off a uevent and let userspace tell us what to do */ 304 drm_helper_hpd_irq_event(dev); 305} 306 307static void ironlake_handle_rps_change(struct drm_device *dev) 308{ 309 drm_i915_private_t *dev_priv = dev->dev_private; 310 u32 busy_up, busy_down, max_avg, min_avg; 311 u8 new_delay; 312 unsigned long flags; 313 314 spin_lock_irqsave(&mchdev_lock, flags); 315 316 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 317 318 new_delay = dev_priv->ips.cur_delay; 319 320 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 321 busy_up = I915_READ(RCPREVBSYTUPAVG); 322 busy_down = I915_READ(RCPREVBSYTDNAVG); 323 max_avg = I915_READ(RCBMAXAVG); 324 min_avg = I915_READ(RCBMINAVG); 325 326 /* Handle RCS change request from hw */ 327 if (busy_up > max_avg) { 328 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 329 new_delay = dev_priv->ips.cur_delay - 1; 330 if (new_delay < dev_priv->ips.max_delay) 331 new_delay = dev_priv->ips.max_delay; 332 } else if (busy_down < min_avg) { 333 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 334 new_delay = dev_priv->ips.cur_delay + 1; 335 if (new_delay > dev_priv->ips.min_delay) 336 new_delay = dev_priv->ips.min_delay; 337 } 338 339 if (ironlake_set_drps(dev, new_delay)) 340 dev_priv->ips.cur_delay = new_delay; 341 342 spin_unlock_irqrestore(&mchdev_lock, flags); 343 344 return; 345} 346 347static void notify_ring(struct drm_device *dev, 348 struct intel_ring_buffer *ring) 349{ 350 struct drm_i915_private *dev_priv = dev->dev_private; 351 352 if (ring->obj == NULL) 353 return; 354 355 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); 356 357 wake_up_all(&ring->irq_queue); 358 if (i915_enable_hangcheck) { 359 dev_priv->gpu_error.hangcheck_count = 0; 360 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 361 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 362 } 363} 364 365static void gen6_pm_rps_work(struct work_struct *work) 366{ 367 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 368 rps.work); 369 u32 pm_iir, pm_imr; 370 u8 new_delay; 371 372 spin_lock_irq(&dev_priv->rps.lock); 373 pm_iir = dev_priv->rps.pm_iir; 374 dev_priv->rps.pm_iir = 0; 375 pm_imr = I915_READ(GEN6_PMIMR); 376 I915_WRITE(GEN6_PMIMR, 0); 377 spin_unlock_irq(&dev_priv->rps.lock); 378 379 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) 380 return; 381 382 mutex_lock(&dev_priv->rps.hw_lock); 383 384 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) 385 new_delay = dev_priv->rps.cur_delay + 1; 386 else 387 new_delay = dev_priv->rps.cur_delay - 1; 388 389 /* sysfs frequency interfaces may have snuck in while servicing the 390 * interrupt 391 */ 392 if (!(new_delay > dev_priv->rps.max_delay || 393 new_delay < dev_priv->rps.min_delay)) { 394 gen6_set_rps(dev_priv->dev, new_delay); 395 } 396 397 mutex_unlock(&dev_priv->rps.hw_lock); 398} 399 400 401/** 402 * ivybridge_parity_work - Workqueue called when a parity error interrupt 403 * occurred. 404 * @work: workqueue struct 405 * 406 * Doesn't actually do anything except notify userspace. As a consequence of 407 * this event, userspace should try to remap the bad rows since statistically 408 * it is likely the same row is more likely to go bad again. 409 */ 410static void ivybridge_parity_work(struct work_struct *work) 411{ 412 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 413 l3_parity.error_work); 414 u32 error_status, row, bank, subbank; 415 char *parity_event[5]; 416 uint32_t misccpctl; 417 unsigned long flags; 418 419 /* We must turn off DOP level clock gating to access the L3 registers. 420 * In order to prevent a get/put style interface, acquire struct mutex 421 * any time we access those registers. 422 */ 423 mutex_lock(&dev_priv->dev->struct_mutex); 424 425 misccpctl = I915_READ(GEN7_MISCCPCTL); 426 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 427 POSTING_READ(GEN7_MISCCPCTL); 428 429 error_status = I915_READ(GEN7_L3CDERRST1); 430 row = GEN7_PARITY_ERROR_ROW(error_status); 431 bank = GEN7_PARITY_ERROR_BANK(error_status); 432 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 433 434 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | 435 GEN7_L3CDERRST1_ENABLE); 436 POSTING_READ(GEN7_L3CDERRST1); 437 438 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 439 440 spin_lock_irqsave(&dev_priv->irq_lock, flags); 441 dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 442 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 443 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 444 445 mutex_unlock(&dev_priv->dev->struct_mutex); 446 447 parity_event[0] = "L3_PARITY_ERROR=1"; 448 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 449 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 450 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 451 parity_event[4] = NULL; 452 453 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, 454 KOBJ_CHANGE, parity_event); 455 456 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", 457 row, bank, subbank); 458 459 kfree(parity_event[3]); 460 kfree(parity_event[2]); 461 kfree(parity_event[1]); 462} 463 464static void ivybridge_handle_parity_error(struct drm_device *dev) 465{ 466 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 467 unsigned long flags; 468 469 if (!HAS_L3_GPU_CACHE(dev)) 470 return; 471 472 spin_lock_irqsave(&dev_priv->irq_lock, flags); 473 dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 474 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 475 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 476 477 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 478} 479 480static void snb_gt_irq_handler(struct drm_device *dev, 481 struct drm_i915_private *dev_priv, 482 u32 gt_iir) 483{ 484 485 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | 486 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) 487 notify_ring(dev, &dev_priv->ring[RCS]); 488 if (gt_iir & GEN6_BSD_USER_INTERRUPT) 489 notify_ring(dev, &dev_priv->ring[VCS]); 490 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) 491 notify_ring(dev, &dev_priv->ring[BCS]); 492 493 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | 494 GT_GEN6_BSD_CS_ERROR_INTERRUPT | 495 GT_RENDER_CS_ERROR_INTERRUPT)) { 496 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 497 i915_handle_error(dev, false); 498 } 499 500 if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT) 501 ivybridge_handle_parity_error(dev); 502} 503 504static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, 505 u32 pm_iir) 506{ 507 unsigned long flags; 508 509 /* 510 * IIR bits should never already be set because IMR should 511 * prevent an interrupt from being shown in IIR. The warning 512 * displays a case where we've unsafely cleared 513 * dev_priv->rps.pm_iir. Although missing an interrupt of the same 514 * type is not a problem, it displays a problem in the logic. 515 * 516 * The mask bit in IMR is cleared by dev_priv->rps.work. 517 */ 518 519 spin_lock_irqsave(&dev_priv->rps.lock, flags); 520 dev_priv->rps.pm_iir |= pm_iir; 521 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); 522 POSTING_READ(GEN6_PMIMR); 523 spin_unlock_irqrestore(&dev_priv->rps.lock, flags); 524 525 queue_work(dev_priv->wq, &dev_priv->rps.work); 526} 527 528static void gmbus_irq_handler(struct drm_device *dev) 529{ 530 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 531 532 wake_up_all(&dev_priv->gmbus_wait_queue); 533} 534 535static void dp_aux_irq_handler(struct drm_device *dev) 536{ 537 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 538 539 wake_up_all(&dev_priv->gmbus_wait_queue); 540} 541 542static irqreturn_t valleyview_irq_handler(int irq, void *arg) 543{ 544 struct drm_device *dev = (struct drm_device *) arg; 545 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 546 u32 iir, gt_iir, pm_iir; 547 irqreturn_t ret = IRQ_NONE; 548 unsigned long irqflags; 549 int pipe; 550 u32 pipe_stats[I915_MAX_PIPES]; 551 552 atomic_inc(&dev_priv->irq_received); 553 554 while (true) { 555 iir = I915_READ(VLV_IIR); 556 gt_iir = I915_READ(GTIIR); 557 pm_iir = I915_READ(GEN6_PMIIR); 558 559 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 560 goto out; 561 562 ret = IRQ_HANDLED; 563 564 snb_gt_irq_handler(dev, dev_priv, gt_iir); 565 566 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 567 for_each_pipe(pipe) { 568 int reg = PIPESTAT(pipe); 569 pipe_stats[pipe] = I915_READ(reg); 570 571 /* 572 * Clear the PIPE*STAT regs before the IIR 573 */ 574 if (pipe_stats[pipe] & 0x8000ffff) { 575 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 576 DRM_DEBUG_DRIVER("pipe %c underrun\n", 577 pipe_name(pipe)); 578 I915_WRITE(reg, pipe_stats[pipe]); 579 } 580 } 581 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 582 583 for_each_pipe(pipe) { 584 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 585 drm_handle_vblank(dev, pipe); 586 587 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 588 intel_prepare_page_flip(dev, pipe); 589 intel_finish_page_flip(dev, pipe); 590 } 591 } 592 593 /* Consume port. Then clear IIR or we'll miss events */ 594 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 595 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 596 597 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 598 hotplug_status); 599 if (hotplug_status & dev_priv->hotplug_supported_mask) 600 queue_work(dev_priv->wq, 601 &dev_priv->hotplug_work); 602 603 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 604 I915_READ(PORT_HOTPLUG_STAT); 605 } 606 607 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 608 gmbus_irq_handler(dev); 609 610 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 611 gen6_queue_rps_work(dev_priv, pm_iir); 612 613 I915_WRITE(GTIIR, gt_iir); 614 I915_WRITE(GEN6_PMIIR, pm_iir); 615 I915_WRITE(VLV_IIR, iir); 616 } 617 618out: 619 return ret; 620} 621 622static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 623{ 624 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 625 int pipe; 626 627 if (pch_iir & SDE_HOTPLUG_MASK) 628 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 629 630 if (pch_iir & SDE_AUDIO_POWER_MASK) 631 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 632 (pch_iir & SDE_AUDIO_POWER_MASK) >> 633 SDE_AUDIO_POWER_SHIFT); 634 635 if (pch_iir & SDE_AUX_MASK) 636 dp_aux_irq_handler(dev); 637 638 if (pch_iir & SDE_GMBUS) 639 gmbus_irq_handler(dev); 640 641 if (pch_iir & SDE_AUDIO_HDCP_MASK) 642 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 643 644 if (pch_iir & SDE_AUDIO_TRANS_MASK) 645 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 646 647 if (pch_iir & SDE_POISON) 648 DRM_ERROR("PCH poison interrupt\n"); 649 650 if (pch_iir & SDE_FDI_MASK) 651 for_each_pipe(pipe) 652 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 653 pipe_name(pipe), 654 I915_READ(FDI_RX_IIR(pipe))); 655 656 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 657 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 658 659 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 660 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 661 662 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 663 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); 664 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 665 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); 666} 667 668static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 669{ 670 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 671 int pipe; 672 673 if (pch_iir & SDE_HOTPLUG_MASK_CPT) 674 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 675 676 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) 677 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 678 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 679 SDE_AUDIO_POWER_SHIFT_CPT); 680 681 if (pch_iir & SDE_AUX_MASK_CPT) 682 dp_aux_irq_handler(dev); 683 684 if (pch_iir & SDE_GMBUS_CPT) 685 gmbus_irq_handler(dev); 686 687 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 688 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 689 690 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 691 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 692 693 if (pch_iir & SDE_FDI_MASK_CPT) 694 for_each_pipe(pipe) 695 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 696 pipe_name(pipe), 697 I915_READ(FDI_RX_IIR(pipe))); 698} 699 700static irqreturn_t ivybridge_irq_handler(int irq, void *arg) 701{ 702 struct drm_device *dev = (struct drm_device *) arg; 703 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 704 u32 de_iir, gt_iir, de_ier, pm_iir; 705 irqreturn_t ret = IRQ_NONE; 706 int i; 707 708 atomic_inc(&dev_priv->irq_received); 709 710 /* disable master interrupt before clearing iir */ 711 de_ier = I915_READ(DEIER); 712 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 713 714 gt_iir = I915_READ(GTIIR); 715 if (gt_iir) { 716 snb_gt_irq_handler(dev, dev_priv, gt_iir); 717 I915_WRITE(GTIIR, gt_iir); 718 ret = IRQ_HANDLED; 719 } 720 721 de_iir = I915_READ(DEIIR); 722 if (de_iir) { 723 if (de_iir & DE_AUX_CHANNEL_A_IVB) 724 dp_aux_irq_handler(dev); 725 726 if (de_iir & DE_GSE_IVB) 727 intel_opregion_gse_intr(dev); 728 729 for (i = 0; i < 3; i++) { 730 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 731 drm_handle_vblank(dev, i); 732 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 733 intel_prepare_page_flip(dev, i); 734 intel_finish_page_flip_plane(dev, i); 735 } 736 } 737 738 /* check event from PCH */ 739 if (de_iir & DE_PCH_EVENT_IVB) { 740 u32 pch_iir = I915_READ(SDEIIR); 741 742 cpt_irq_handler(dev, pch_iir); 743 744 /* clear PCH hotplug event before clear CPU irq */ 745 I915_WRITE(SDEIIR, pch_iir); 746 } 747 748 I915_WRITE(DEIIR, de_iir); 749 ret = IRQ_HANDLED; 750 } 751 752 pm_iir = I915_READ(GEN6_PMIIR); 753 if (pm_iir) { 754 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 755 gen6_queue_rps_work(dev_priv, pm_iir); 756 I915_WRITE(GEN6_PMIIR, pm_iir); 757 ret = IRQ_HANDLED; 758 } 759 760 I915_WRITE(DEIER, de_ier); 761 POSTING_READ(DEIER); 762 763 return ret; 764} 765 766static void ilk_gt_irq_handler(struct drm_device *dev, 767 struct drm_i915_private *dev_priv, 768 u32 gt_iir) 769{ 770 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) 771 notify_ring(dev, &dev_priv->ring[RCS]); 772 if (gt_iir & GT_BSD_USER_INTERRUPT) 773 notify_ring(dev, &dev_priv->ring[VCS]); 774} 775 776static irqreturn_t ironlake_irq_handler(int irq, void *arg) 777{ 778 struct drm_device *dev = (struct drm_device *) arg; 779 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 780 int ret = IRQ_NONE; 781 u32 de_iir, gt_iir, de_ier, pm_iir; 782 783 atomic_inc(&dev_priv->irq_received); 784 785 /* disable master interrupt before clearing iir */ 786 de_ier = I915_READ(DEIER); 787 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 788 POSTING_READ(DEIER); 789 790 de_iir = I915_READ(DEIIR); 791 gt_iir = I915_READ(GTIIR); 792 pm_iir = I915_READ(GEN6_PMIIR); 793 794 if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0)) 795 goto done; 796 797 ret = IRQ_HANDLED; 798 799 if (IS_GEN5(dev)) 800 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 801 else 802 snb_gt_irq_handler(dev, dev_priv, gt_iir); 803 804 if (de_iir & DE_AUX_CHANNEL_A) 805 dp_aux_irq_handler(dev); 806 807 if (de_iir & DE_GSE) 808 intel_opregion_gse_intr(dev); 809 810 if (de_iir & DE_PIPEA_VBLANK) 811 drm_handle_vblank(dev, 0); 812 813 if (de_iir & DE_PIPEB_VBLANK) 814 drm_handle_vblank(dev, 1); 815 816 if (de_iir & DE_PLANEA_FLIP_DONE) { 817 intel_prepare_page_flip(dev, 0); 818 intel_finish_page_flip_plane(dev, 0); 819 } 820 821 if (de_iir & DE_PLANEB_FLIP_DONE) { 822 intel_prepare_page_flip(dev, 1); 823 intel_finish_page_flip_plane(dev, 1); 824 } 825 826 /* check event from PCH */ 827 if (de_iir & DE_PCH_EVENT) { 828 u32 pch_iir = I915_READ(SDEIIR); 829 830 if (HAS_PCH_CPT(dev)) 831 cpt_irq_handler(dev, pch_iir); 832 else 833 ibx_irq_handler(dev, pch_iir); 834 835 /* should clear PCH hotplug event before clear CPU irq */ 836 I915_WRITE(SDEIIR, pch_iir); 837 } 838 839 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 840 ironlake_handle_rps_change(dev); 841 842 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) 843 gen6_queue_rps_work(dev_priv, pm_iir); 844 845 I915_WRITE(GTIIR, gt_iir); 846 I915_WRITE(DEIIR, de_iir); 847 I915_WRITE(GEN6_PMIIR, pm_iir); 848 849done: 850 I915_WRITE(DEIER, de_ier); 851 POSTING_READ(DEIER); 852 853 return ret; 854} 855 856/** 857 * i915_error_work_func - do process context error handling work 858 * @work: work struct 859 * 860 * Fire an error uevent so userspace can see that a hang or error 861 * was detected. 862 */ 863static void i915_error_work_func(struct work_struct *work) 864{ 865 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 866 work); 867 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 868 gpu_error); 869 struct drm_device *dev = dev_priv->dev; 870 struct intel_ring_buffer *ring; 871 char *error_event[] = { "ERROR=1", NULL }; 872 char *reset_event[] = { "RESET=1", NULL }; 873 char *reset_done_event[] = { "ERROR=0", NULL }; 874 int i, ret; 875 876 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 877 878 /* 879 * Note that there's only one work item which does gpu resets, so we 880 * need not worry about concurrent gpu resets potentially incrementing 881 * error->reset_counter twice. We only need to take care of another 882 * racing irq/hangcheck declaring the gpu dead for a second time. A 883 * quick check for that is good enough: schedule_work ensures the 884 * correct ordering between hang detection and this work item, and since 885 * the reset in-progress bit is only ever set by code outside of this 886 * work we don't need to worry about any other races. 887 */ 888 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 889 DRM_DEBUG_DRIVER("resetting chip\n"); 890 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, 891 reset_event); 892 893 ret = i915_reset(dev); 894 895 if (ret == 0) { 896 /* 897 * After all the gem state is reset, increment the reset 898 * counter and wake up everyone waiting for the reset to 899 * complete. 900 * 901 * Since unlock operations are a one-sided barrier only, 902 * we need to insert a barrier here to order any seqno 903 * updates before 904 * the counter increment. 905 */ 906 smp_mb__before_atomic_inc(); 907 atomic_inc(&dev_priv->gpu_error.reset_counter); 908 909 kobject_uevent_env(&dev->primary->kdev.kobj, 910 KOBJ_CHANGE, reset_done_event); 911 } else { 912 atomic_set(&error->reset_counter, I915_WEDGED); 913 } 914 915 for_each_ring(ring, dev_priv, i) 916 wake_up_all(&ring->irq_queue); 917 918 wake_up_all(&dev_priv->gpu_error.reset_queue); 919 } 920} 921 922/* NB: please notice the memset */ 923static void i915_get_extra_instdone(struct drm_device *dev, 924 uint32_t *instdone) 925{ 926 struct drm_i915_private *dev_priv = dev->dev_private; 927 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); 928 929 switch(INTEL_INFO(dev)->gen) { 930 case 2: 931 case 3: 932 instdone[0] = I915_READ(INSTDONE); 933 break; 934 case 4: 935 case 5: 936 case 6: 937 instdone[0] = I915_READ(INSTDONE_I965); 938 instdone[1] = I915_READ(INSTDONE1); 939 break; 940 default: 941 WARN_ONCE(1, "Unsupported platform\n"); 942 case 7: 943 instdone[0] = I915_READ(GEN7_INSTDONE_1); 944 instdone[1] = I915_READ(GEN7_SC_INSTDONE); 945 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); 946 instdone[3] = I915_READ(GEN7_ROW_INSTDONE); 947 break; 948 } 949} 950 951#ifdef CONFIG_DEBUG_FS 952static struct drm_i915_error_object * 953i915_error_object_create(struct drm_i915_private *dev_priv, 954 struct drm_i915_gem_object *src) 955{ 956 struct drm_i915_error_object *dst; 957 int i, count; 958 u32 reloc_offset; 959 960 if (src == NULL || src->pages == NULL) 961 return NULL; 962 963 count = src->base.size / PAGE_SIZE; 964 965 dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC); 966 if (dst == NULL) 967 return NULL; 968 969 reloc_offset = src->gtt_offset; 970 for (i = 0; i < count; i++) { 971 unsigned long flags; 972 void *d; 973 974 d = kmalloc(PAGE_SIZE, GFP_ATOMIC); 975 if (d == NULL) 976 goto unwind; 977 978 local_irq_save(flags); 979 if (reloc_offset < dev_priv->gtt.mappable_end && 980 src->has_global_gtt_mapping) { 981 void __iomem *s; 982 983 /* Simply ignore tiling or any overlapping fence. 984 * It's part of the error state, and this hopefully 985 * captures what the GPU read. 986 */ 987 988 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, 989 reloc_offset); 990 memcpy_fromio(d, s, PAGE_SIZE); 991 io_mapping_unmap_atomic(s); 992 } else if (src->stolen) { 993 unsigned long offset; 994 995 offset = dev_priv->mm.stolen_base; 996 offset += src->stolen->start; 997 offset += i << PAGE_SHIFT; 998 999 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE); 1000 } else { 1001 struct page *page; 1002 void *s; 1003 1004 page = i915_gem_object_get_page(src, i); 1005 1006 drm_clflush_pages(&page, 1); 1007 1008 s = kmap_atomic(page); 1009 memcpy(d, s, PAGE_SIZE); 1010 kunmap_atomic(s); 1011 1012 drm_clflush_pages(&page, 1); 1013 } 1014 local_irq_restore(flags); 1015 1016 dst->pages[i] = d; 1017 1018 reloc_offset += PAGE_SIZE; 1019 } 1020 dst->page_count = count; 1021 dst->gtt_offset = src->gtt_offset; 1022 1023 return dst; 1024 1025unwind: 1026 while (i--) 1027 kfree(dst->pages[i]); 1028 kfree(dst); 1029 return NULL; 1030} 1031 1032static void 1033i915_error_object_free(struct drm_i915_error_object *obj) 1034{ 1035 int page; 1036 1037 if (obj == NULL) 1038 return; 1039 1040 for (page = 0; page < obj->page_count; page++) 1041 kfree(obj->pages[page]); 1042 1043 kfree(obj); 1044} 1045 1046void 1047i915_error_state_free(struct kref *error_ref) 1048{ 1049 struct drm_i915_error_state *error = container_of(error_ref, 1050 typeof(*error), ref); 1051 int i; 1052 1053 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 1054 i915_error_object_free(error->ring[i].batchbuffer); 1055 i915_error_object_free(error->ring[i].ringbuffer); 1056 kfree(error->ring[i].requests); 1057 } 1058 1059 kfree(error->active_bo); 1060 kfree(error->overlay); 1061 kfree(error); 1062} 1063static void capture_bo(struct drm_i915_error_buffer *err, 1064 struct drm_i915_gem_object *obj) 1065{ 1066 err->size = obj->base.size; 1067 err->name = obj->base.name; 1068 err->rseqno = obj->last_read_seqno; 1069 err->wseqno = obj->last_write_seqno; 1070 err->gtt_offset = obj->gtt_offset; 1071 err->read_domains = obj->base.read_domains; 1072 err->write_domain = obj->base.write_domain; 1073 err->fence_reg = obj->fence_reg; 1074 err->pinned = 0; 1075 if (obj->pin_count > 0) 1076 err->pinned = 1; 1077 if (obj->user_pin_count > 0) 1078 err->pinned = -1; 1079 err->tiling = obj->tiling_mode; 1080 err->dirty = obj->dirty; 1081 err->purgeable = obj->madv != I915_MADV_WILLNEED; 1082 err->ring = obj->ring ? obj->ring->id : -1; 1083 err->cache_level = obj->cache_level; 1084} 1085 1086static u32 capture_active_bo(struct drm_i915_error_buffer *err, 1087 int count, struct list_head *head) 1088{ 1089 struct drm_i915_gem_object *obj; 1090 int i = 0; 1091 1092 list_for_each_entry(obj, head, mm_list) { 1093 capture_bo(err++, obj); 1094 if (++i == count) 1095 break; 1096 } 1097 1098 return i; 1099} 1100 1101static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, 1102 int count, struct list_head *head) 1103{ 1104 struct drm_i915_gem_object *obj; 1105 int i = 0; 1106 1107 list_for_each_entry(obj, head, gtt_list) { 1108 if (obj->pin_count == 0) 1109 continue; 1110 1111 capture_bo(err++, obj); 1112 if (++i == count) 1113 break; 1114 } 1115 1116 return i; 1117} 1118 1119static void i915_gem_record_fences(struct drm_device *dev, 1120 struct drm_i915_error_state *error) 1121{ 1122 struct drm_i915_private *dev_priv = dev->dev_private; 1123 int i; 1124 1125 /* Fences */ 1126 switch (INTEL_INFO(dev)->gen) { 1127 case 7: 1128 case 6: 1129 for (i = 0; i < 16; i++) 1130 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 1131 break; 1132 case 5: 1133 case 4: 1134 for (i = 0; i < 16; i++) 1135 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); 1136 break; 1137 case 3: 1138 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 1139 for (i = 0; i < 8; i++) 1140 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); 1141 case 2: 1142 for (i = 0; i < 8; i++) 1143 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 1144 break; 1145 1146 default: 1147 BUG(); 1148 } 1149} 1150 1151static struct drm_i915_error_object * 1152i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, 1153 struct intel_ring_buffer *ring) 1154{ 1155 struct drm_i915_gem_object *obj; 1156 u32 seqno; 1157 1158 if (!ring->get_seqno) 1159 return NULL; 1160 1161 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { 1162 u32 acthd = I915_READ(ACTHD); 1163 1164 if (WARN_ON(ring->id != RCS)) 1165 return NULL; 1166 1167 obj = ring->private; 1168 if (acthd >= obj->gtt_offset && 1169 acthd < obj->gtt_offset + obj->base.size) 1170 return i915_error_object_create(dev_priv, obj); 1171 } 1172 1173 seqno = ring->get_seqno(ring, false); 1174 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 1175 if (obj->ring != ring) 1176 continue; 1177 1178 if (i915_seqno_passed(seqno, obj->last_read_seqno)) 1179 continue; 1180 1181 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) 1182 continue; 1183 1184 /* We need to copy these to an anonymous buffer as the simplest 1185 * method to avoid being overwritten by userspace. 1186 */ 1187 return i915_error_object_create(dev_priv, obj); 1188 } 1189 1190 return NULL; 1191} 1192 1193static void i915_record_ring_state(struct drm_device *dev, 1194 struct drm_i915_error_state *error, 1195 struct intel_ring_buffer *ring) 1196{ 1197 struct drm_i915_private *dev_priv = dev->dev_private; 1198 1199 if (INTEL_INFO(dev)->gen >= 6) { 1200 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); 1201 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); 1202 error->semaphore_mboxes[ring->id][0] 1203 = I915_READ(RING_SYNC_0(ring->mmio_base)); 1204 error->semaphore_mboxes[ring->id][1] 1205 = I915_READ(RING_SYNC_1(ring->mmio_base)); 1206 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; 1207 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; 1208 } 1209 1210 if (INTEL_INFO(dev)->gen >= 4) { 1211 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); 1212 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); 1213 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); 1214 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); 1215 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); 1216 if (ring->id == RCS) 1217 error->bbaddr = I915_READ64(BB_ADDR); 1218 } else { 1219 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); 1220 error->ipeir[ring->id] = I915_READ(IPEIR); 1221 error->ipehr[ring->id] = I915_READ(IPEHR); 1222 error->instdone[ring->id] = I915_READ(INSTDONE); 1223 } 1224 1225 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); 1226 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); 1227 error->seqno[ring->id] = ring->get_seqno(ring, false); 1228 error->acthd[ring->id] = intel_ring_get_active_head(ring); 1229 error->head[ring->id] = I915_READ_HEAD(ring); 1230 error->tail[ring->id] = I915_READ_TAIL(ring); 1231 error->ctl[ring->id] = I915_READ_CTL(ring); 1232 1233 error->cpu_ring_head[ring->id] = ring->head; 1234 error->cpu_ring_tail[ring->id] = ring->tail; 1235} 1236 1237static void i915_gem_record_rings(struct drm_device *dev, 1238 struct drm_i915_error_state *error) 1239{ 1240 struct drm_i915_private *dev_priv = dev->dev_private; 1241 struct intel_ring_buffer *ring; 1242 struct drm_i915_gem_request *request; 1243 int i, count; 1244 1245 for_each_ring(ring, dev_priv, i) { 1246 i915_record_ring_state(dev, error, ring); 1247 1248 error->ring[i].batchbuffer = 1249 i915_error_first_batchbuffer(dev_priv, ring); 1250 1251 error->ring[i].ringbuffer = 1252 i915_error_object_create(dev_priv, ring->obj); 1253 1254 count = 0; 1255 list_for_each_entry(request, &ring->request_list, list) 1256 count++; 1257 1258 error->ring[i].num_requests = count; 1259 error->ring[i].requests = 1260 kmalloc(count*sizeof(struct drm_i915_error_request), 1261 GFP_ATOMIC); 1262 if (error->ring[i].requests == NULL) { 1263 error->ring[i].num_requests = 0; 1264 continue; 1265 } 1266 1267 count = 0; 1268 list_for_each_entry(request, &ring->request_list, list) { 1269 struct drm_i915_error_request *erq; 1270 1271 erq = &error->ring[i].requests[count++]; 1272 erq->seqno = request->seqno; 1273 erq->jiffies = request->emitted_jiffies; 1274 erq->tail = request->tail; 1275 } 1276 } 1277} 1278 1279/** 1280 * i915_capture_error_state - capture an error record for later analysis 1281 * @dev: drm device 1282 * 1283 * Should be called when an error is detected (either a hang or an error 1284 * interrupt) to capture error state from the time of the error. Fills 1285 * out a structure which becomes available in debugfs for user level tools 1286 * to pick up. 1287 */ 1288static void i915_capture_error_state(struct drm_device *dev) 1289{ 1290 struct drm_i915_private *dev_priv = dev->dev_private; 1291 struct drm_i915_gem_object *obj; 1292 struct drm_i915_error_state *error; 1293 unsigned long flags; 1294 int i, pipe; 1295 1296 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1297 error = dev_priv->gpu_error.first_error; 1298 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 1299 if (error) 1300 return; 1301 1302 /* Account for pipe specific data like PIPE*STAT */ 1303 error = kzalloc(sizeof(*error), GFP_ATOMIC); 1304 if (!error) { 1305 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); 1306 return; 1307 } 1308 1309 DRM_INFO("capturing error event; look for more information in" 1310 "/sys/kernel/debug/dri/%d/i915_error_state\n", 1311 dev->primary->index); 1312 1313 kref_init(&error->ref); 1314 error->eir = I915_READ(EIR); 1315 error->pgtbl_er = I915_READ(PGTBL_ER); 1316 error->ccid = I915_READ(CCID); 1317 1318 if (HAS_PCH_SPLIT(dev)) 1319 error->ier = I915_READ(DEIER) | I915_READ(GTIER); 1320 else if (IS_VALLEYVIEW(dev)) 1321 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); 1322 else if (IS_GEN2(dev)) 1323 error->ier = I915_READ16(IER); 1324 else 1325 error->ier = I915_READ(IER); 1326 1327 if (INTEL_INFO(dev)->gen >= 6) 1328 error->derrmr = I915_READ(DERRMR); 1329 1330 if (IS_VALLEYVIEW(dev)) 1331 error->forcewake = I915_READ(FORCEWAKE_VLV); 1332 else if (INTEL_INFO(dev)->gen >= 7) 1333 error->forcewake = I915_READ(FORCEWAKE_MT); 1334 else if (INTEL_INFO(dev)->gen == 6) 1335 error->forcewake = I915_READ(FORCEWAKE); 1336 1337 for_each_pipe(pipe) 1338 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); 1339 1340 if (INTEL_INFO(dev)->gen >= 6) { 1341 error->error = I915_READ(ERROR_GEN6); 1342 error->done_reg = I915_READ(DONE_REG); 1343 } 1344 1345 if (INTEL_INFO(dev)->gen == 7) 1346 error->err_int = I915_READ(GEN7_ERR_INT); 1347 1348 i915_get_extra_instdone(dev, error->extra_instdone); 1349 1350 i915_gem_record_fences(dev, error); 1351 i915_gem_record_rings(dev, error); 1352 1353 /* Record buffers on the active and pinned lists. */ 1354 error->active_bo = NULL; 1355 error->pinned_bo = NULL; 1356 1357 i = 0; 1358 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) 1359 i++; 1360 error->active_bo_count = i; 1361 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) 1362 if (obj->pin_count) 1363 i++; 1364 error->pinned_bo_count = i - error->active_bo_count; 1365 1366 error->active_bo = NULL; 1367 error->pinned_bo = NULL; 1368 if (i) { 1369 error->active_bo = kmalloc(sizeof(*error->active_bo)*i, 1370 GFP_ATOMIC); 1371 if (error->active_bo) 1372 error->pinned_bo = 1373 error->active_bo + error->active_bo_count; 1374 } 1375 1376 if (error->active_bo) 1377 error->active_bo_count = 1378 capture_active_bo(error->active_bo, 1379 error->active_bo_count, 1380 &dev_priv->mm.active_list); 1381 1382 if (error->pinned_bo) 1383 error->pinned_bo_count = 1384 capture_pinned_bo(error->pinned_bo, 1385 error->pinned_bo_count, 1386 &dev_priv->mm.bound_list); 1387 1388 do_gettimeofday(&error->time); 1389 1390 error->overlay = intel_overlay_capture_error_state(dev); 1391 error->display = intel_display_capture_error_state(dev); 1392 1393 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1394 if (dev_priv->gpu_error.first_error == NULL) { 1395 dev_priv->gpu_error.first_error = error; 1396 error = NULL; 1397 } 1398 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 1399 1400 if (error) 1401 i915_error_state_free(&error->ref); 1402} 1403 1404void i915_destroy_error_state(struct drm_device *dev) 1405{ 1406 struct drm_i915_private *dev_priv = dev->dev_private; 1407 struct drm_i915_error_state *error; 1408 unsigned long flags; 1409 1410 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1411 error = dev_priv->gpu_error.first_error; 1412 dev_priv->gpu_error.first_error = NULL; 1413 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 1414 1415 if (error) 1416 kref_put(&error->ref, i915_error_state_free); 1417} 1418#else 1419#define i915_capture_error_state(x) 1420#endif 1421 1422static void i915_report_and_clear_eir(struct drm_device *dev) 1423{ 1424 struct drm_i915_private *dev_priv = dev->dev_private; 1425 uint32_t instdone[I915_NUM_INSTDONE_REG]; 1426 u32 eir = I915_READ(EIR); 1427 int pipe, i; 1428 1429 if (!eir) 1430 return; 1431 1432 pr_err("render error detected, EIR: 0x%08x\n", eir); 1433 1434 i915_get_extra_instdone(dev, instdone); 1435 1436 if (IS_G4X(dev)) { 1437 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 1438 u32 ipeir = I915_READ(IPEIR_I965); 1439 1440 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1441 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1442 for (i = 0; i < ARRAY_SIZE(instdone); i++) 1443 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1444 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1445 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 1446 I915_WRITE(IPEIR_I965, ipeir); 1447 POSTING_READ(IPEIR_I965); 1448 } 1449 if (eir & GM45_ERROR_PAGE_TABLE) { 1450 u32 pgtbl_err = I915_READ(PGTBL_ER); 1451 pr_err("page table error\n"); 1452 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 1453 I915_WRITE(PGTBL_ER, pgtbl_err); 1454 POSTING_READ(PGTBL_ER); 1455 } 1456 } 1457 1458 if (!IS_GEN2(dev)) { 1459 if (eir & I915_ERROR_PAGE_TABLE) { 1460 u32 pgtbl_err = I915_READ(PGTBL_ER); 1461 pr_err("page table error\n"); 1462 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 1463 I915_WRITE(PGTBL_ER, pgtbl_err); 1464 POSTING_READ(PGTBL_ER); 1465 } 1466 } 1467 1468 if (eir & I915_ERROR_MEMORY_REFRESH) { 1469 pr_err("memory refresh error:\n"); 1470 for_each_pipe(pipe) 1471 pr_err("pipe %c stat: 0x%08x\n", 1472 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 1473 /* pipestat has already been acked */ 1474 } 1475 if (eir & I915_ERROR_INSTRUCTION) { 1476 pr_err("instruction error\n"); 1477 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 1478 for (i = 0; i < ARRAY_SIZE(instdone); i++) 1479 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1480 if (INTEL_INFO(dev)->gen < 4) { 1481 u32 ipeir = I915_READ(IPEIR); 1482 1483 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 1484 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 1485 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 1486 I915_WRITE(IPEIR, ipeir); 1487 POSTING_READ(IPEIR); 1488 } else { 1489 u32 ipeir = I915_READ(IPEIR_I965); 1490 1491 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1492 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1493 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1494 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 1495 I915_WRITE(IPEIR_I965, ipeir); 1496 POSTING_READ(IPEIR_I965); 1497 } 1498 } 1499 1500 I915_WRITE(EIR, eir); 1501 POSTING_READ(EIR); 1502 eir = I915_READ(EIR); 1503 if (eir) { 1504 /* 1505 * some errors might have become stuck, 1506 * mask them. 1507 */ 1508 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 1509 I915_WRITE(EMR, I915_READ(EMR) | eir); 1510 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 1511 } 1512} 1513 1514/** 1515 * i915_handle_error - handle an error interrupt 1516 * @dev: drm device 1517 * 1518 * Do some basic checking of regsiter state at error interrupt time and 1519 * dump it to the syslog. Also call i915_capture_error_state() to make 1520 * sure we get a record and make it available in debugfs. Fire a uevent 1521 * so userspace knows something bad happened (should trigger collection 1522 * of a ring dump etc.). 1523 */ 1524void i915_handle_error(struct drm_device *dev, bool wedged) 1525{ 1526 struct drm_i915_private *dev_priv = dev->dev_private; 1527 struct intel_ring_buffer *ring; 1528 int i; 1529 1530 i915_capture_error_state(dev); 1531 i915_report_and_clear_eir(dev); 1532 1533 if (wedged) { 1534 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 1535 &dev_priv->gpu_error.reset_counter); 1536 1537 /* 1538 * Wakeup waiting processes so that the reset work item 1539 * doesn't deadlock trying to grab various locks. 1540 */ 1541 for_each_ring(ring, dev_priv, i) 1542 wake_up_all(&ring->irq_queue); 1543 } 1544 1545 queue_work(dev_priv->wq, &dev_priv->gpu_error.work); 1546} 1547 1548static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) 1549{ 1550 drm_i915_private_t *dev_priv = dev->dev_private; 1551 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1552 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1553 struct drm_i915_gem_object *obj; 1554 struct intel_unpin_work *work; 1555 unsigned long flags; 1556 bool stall_detected; 1557 1558 /* Ignore early vblank irqs */ 1559 if (intel_crtc == NULL) 1560 return; 1561 1562 spin_lock_irqsave(&dev->event_lock, flags); 1563 work = intel_crtc->unpin_work; 1564 1565 if (work == NULL || 1566 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 1567 !work->enable_stall_check) { 1568 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 1569 spin_unlock_irqrestore(&dev->event_lock, flags); 1570 return; 1571 } 1572 1573 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 1574 obj = work->pending_flip_obj; 1575 if (INTEL_INFO(dev)->gen >= 4) { 1576 int dspsurf = DSPSURF(intel_crtc->plane); 1577 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 1578 obj->gtt_offset; 1579 } else { 1580 int dspaddr = DSPADDR(intel_crtc->plane); 1581 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + 1582 crtc->y * crtc->fb->pitches[0] + 1583 crtc->x * crtc->fb->bits_per_pixel/8); 1584 } 1585 1586 spin_unlock_irqrestore(&dev->event_lock, flags); 1587 1588 if (stall_detected) { 1589 DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 1590 intel_prepare_page_flip(dev, intel_crtc->plane); 1591 } 1592} 1593 1594/* Called from drm generic code, passed 'crtc' which 1595 * we use as a pipe index 1596 */ 1597static int i915_enable_vblank(struct drm_device *dev, int pipe) 1598{ 1599 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1600 unsigned long irqflags; 1601 1602 if (!i915_pipe_enabled(dev, pipe)) 1603 return -EINVAL; 1604 1605 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1606 if (INTEL_INFO(dev)->gen >= 4) 1607 i915_enable_pipestat(dev_priv, pipe, 1608 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1609 else 1610 i915_enable_pipestat(dev_priv, pipe, 1611 PIPE_VBLANK_INTERRUPT_ENABLE); 1612 1613 /* maintain vblank delivery even in deep C-states */ 1614 if (dev_priv->info->gen == 3) 1615 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 1616 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1617 1618 return 0; 1619} 1620 1621static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 1622{ 1623 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1624 unsigned long irqflags; 1625 1626 if (!i915_pipe_enabled(dev, pipe)) 1627 return -EINVAL; 1628 1629 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1630 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1631 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 1632 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1633 1634 return 0; 1635} 1636 1637static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) 1638{ 1639 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1640 unsigned long irqflags; 1641 1642 if (!i915_pipe_enabled(dev, pipe)) 1643 return -EINVAL; 1644 1645 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1646 ironlake_enable_display_irq(dev_priv, 1647 DE_PIPEA_VBLANK_IVB << (5 * pipe)); 1648 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1649 1650 return 0; 1651} 1652 1653static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 1654{ 1655 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1656 unsigned long irqflags; 1657 u32 imr; 1658 1659 if (!i915_pipe_enabled(dev, pipe)) 1660 return -EINVAL; 1661 1662 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1663 imr = I915_READ(VLV_IMR); 1664 if (pipe == 0) 1665 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 1666 else 1667 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1668 I915_WRITE(VLV_IMR, imr); 1669 i915_enable_pipestat(dev_priv, pipe, 1670 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1671 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1672 1673 return 0; 1674} 1675 1676/* Called from drm generic code, passed 'crtc' which 1677 * we use as a pipe index 1678 */ 1679static void i915_disable_vblank(struct drm_device *dev, int pipe) 1680{ 1681 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1682 unsigned long irqflags; 1683 1684 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1685 if (dev_priv->info->gen == 3) 1686 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 1687 1688 i915_disable_pipestat(dev_priv, pipe, 1689 PIPE_VBLANK_INTERRUPT_ENABLE | 1690 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1691 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1692} 1693 1694static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 1695{ 1696 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1697 unsigned long irqflags; 1698 1699 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1700 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1701 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 1702 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1703} 1704 1705static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) 1706{ 1707 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1708 unsigned long irqflags; 1709 1710 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1711 ironlake_disable_display_irq(dev_priv, 1712 DE_PIPEA_VBLANK_IVB << (pipe * 5)); 1713 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1714} 1715 1716static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 1717{ 1718 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1719 unsigned long irqflags; 1720 u32 imr; 1721 1722 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1723 i915_disable_pipestat(dev_priv, pipe, 1724 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1725 imr = I915_READ(VLV_IMR); 1726 if (pipe == 0) 1727 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 1728 else 1729 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1730 I915_WRITE(VLV_IMR, imr); 1731 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1732} 1733 1734static u32 1735ring_last_seqno(struct intel_ring_buffer *ring) 1736{ 1737 return list_entry(ring->request_list.prev, 1738 struct drm_i915_gem_request, list)->seqno; 1739} 1740 1741static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) 1742{ 1743 if (list_empty(&ring->request_list) || 1744 i915_seqno_passed(ring->get_seqno(ring, false), 1745 ring_last_seqno(ring))) { 1746 /* Issue a wake-up to catch stuck h/w. */ 1747 if (waitqueue_active(&ring->irq_queue)) { 1748 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 1749 ring->name); 1750 wake_up_all(&ring->irq_queue); 1751 *err = true; 1752 } 1753 return true; 1754 } 1755 return false; 1756} 1757 1758static bool kick_ring(struct intel_ring_buffer *ring) 1759{ 1760 struct drm_device *dev = ring->dev; 1761 struct drm_i915_private *dev_priv = dev->dev_private; 1762 u32 tmp = I915_READ_CTL(ring); 1763 if (tmp & RING_WAIT) { 1764 DRM_ERROR("Kicking stuck wait on %s\n", 1765 ring->name); 1766 I915_WRITE_CTL(ring, tmp); 1767 return true; 1768 } 1769 return false; 1770} 1771 1772static bool i915_hangcheck_hung(struct drm_device *dev) 1773{ 1774 drm_i915_private_t *dev_priv = dev->dev_private; 1775 1776 if (dev_priv->gpu_error.hangcheck_count++ > 1) { 1777 bool hung = true; 1778 1779 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); 1780 i915_handle_error(dev, true); 1781 1782 if (!IS_GEN2(dev)) { 1783 struct intel_ring_buffer *ring; 1784 int i; 1785 1786 /* Is the chip hanging on a WAIT_FOR_EVENT? 1787 * If so we can simply poke the RB_WAIT bit 1788 * and break the hang. This should work on 1789 * all but the second generation chipsets. 1790 */ 1791 for_each_ring(ring, dev_priv, i) 1792 hung &= !kick_ring(ring); 1793 } 1794 1795 return hung; 1796 } 1797 1798 return false; 1799} 1800 1801/** 1802 * This is called when the chip hasn't reported back with completed 1803 * batchbuffers in a long time. The first time this is called we simply record 1804 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses 1805 * again, we assume the chip is wedged and try to fix it. 1806 */ 1807void i915_hangcheck_elapsed(unsigned long data) 1808{ 1809 struct drm_device *dev = (struct drm_device *)data; 1810 drm_i915_private_t *dev_priv = dev->dev_private; 1811 uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG]; 1812 struct intel_ring_buffer *ring; 1813 bool err = false, idle; 1814 int…
Large files files are truncated, but you can click here to view the full file