/include/trace/events/block.h
C++ Header | 574 lines | 300 code | 109 blank | 165 comment | 1 complexity | df8b95b473707ed60b816fe070b637bf MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
1#undef TRACE_SYSTEM 2#define TRACE_SYSTEM block 3 4#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ) 5#define _TRACE_BLOCK_H 6 7#include <linux/blktrace_api.h> 8#include <linux/blkdev.h> 9#include <linux/tracepoint.h> 10 11DECLARE_EVENT_CLASS(block_rq_with_error, 12 13 TP_PROTO(struct request_queue *q, struct request *rq), 14 15 TP_ARGS(q, rq), 16 17 TP_STRUCT__entry( 18 __field( dev_t, dev ) 19 __field( sector_t, sector ) 20 __field( unsigned int, nr_sector ) 21 __field( int, errors ) 22 __array( char, rwbs, 6 ) 23 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) 24 ), 25 26 TP_fast_assign( 27 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 28 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); 29 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); 30 __entry->errors = rq->errors; 31 32 blk_fill_rwbs_rq(__entry->rwbs, rq); 33 blk_dump_cmd(__get_str(cmd), rq); 34 ), 35 36 TP_printk("%d,%d %s (%s) %llu + %u [%d]", 37 MAJOR(__entry->dev), MINOR(__entry->dev), 38 __entry->rwbs, __get_str(cmd), 39 (unsigned long long)__entry->sector, 40 __entry->nr_sector, __entry->errors) 41); 42 43/** 44 * block_rq_abort - abort block operation request 45 * @q: queue containing the block operation request 46 * @rq: block IO operation request 47 * 48 * Called immediately after pending block IO operation request @rq in 49 * queue @q is aborted. The fields in the operation request @rq 50 * can be examined to determine which device and sectors the pending 51 * operation would access. 52 */ 53DEFINE_EVENT(block_rq_with_error, block_rq_abort, 54 55 TP_PROTO(struct request_queue *q, struct request *rq), 56 57 TP_ARGS(q, rq) 58); 59 60/** 61 * block_rq_requeue - place block IO request back on a queue 62 * @q: queue holding operation 63 * @rq: block IO operation request 64 * 65 * The block operation request @rq is being placed back into queue 66 * @q. For some reason the request was not completed and needs to be 67 * put back in the queue. 68 */ 69DEFINE_EVENT(block_rq_with_error, block_rq_requeue, 70 71 TP_PROTO(struct request_queue *q, struct request *rq), 72 73 TP_ARGS(q, rq) 74); 75 76/** 77 * block_rq_complete - block IO operation completed by device driver 78 * @q: queue containing the block operation request 79 * @rq: block operations request 80 * 81 * The block_rq_complete tracepoint event indicates that some portion 82 * of operation request has been completed by the device driver. If 83 * the @rq->bio is %NULL, then there is absolutely no additional work to 84 * do for the request. If @rq->bio is non-NULL then there is 85 * additional work required to complete the request. 86 */ 87DEFINE_EVENT(block_rq_with_error, block_rq_complete, 88 89 TP_PROTO(struct request_queue *q, struct request *rq), 90 91 TP_ARGS(q, rq) 92); 93 94DECLARE_EVENT_CLASS(block_rq, 95 96 TP_PROTO(struct request_queue *q, struct request *rq), 97 98 TP_ARGS(q, rq), 99 100 TP_STRUCT__entry( 101 __field( dev_t, dev ) 102 __field( sector_t, sector ) 103 __field( unsigned int, nr_sector ) 104 __field( unsigned int, bytes ) 105 __array( char, rwbs, 6 ) 106 __array( char, comm, TASK_COMM_LEN ) 107 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) 108 ), 109 110 TP_fast_assign( 111 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 112 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); 113 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); 114 __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0; 115 116 blk_fill_rwbs_rq(__entry->rwbs, rq); 117 blk_dump_cmd(__get_str(cmd), rq); 118 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 119 ), 120 121 TP_printk("%d,%d %s %u (%s) %llu + %u [%s]", 122 MAJOR(__entry->dev), MINOR(__entry->dev), 123 __entry->rwbs, __entry->bytes, __get_str(cmd), 124 (unsigned long long)__entry->sector, 125 __entry->nr_sector, __entry->comm) 126); 127 128/** 129 * block_rq_insert - insert block operation request into queue 130 * @q: target queue 131 * @rq: block IO operation request 132 * 133 * Called immediately before block operation request @rq is inserted 134 * into queue @q. The fields in the operation request @rq struct can 135 * be examined to determine which device and sectors the pending 136 * operation would access. 137 */ 138DEFINE_EVENT(block_rq, block_rq_insert, 139 140 TP_PROTO(struct request_queue *q, struct request *rq), 141 142 TP_ARGS(q, rq) 143); 144 145/** 146 * block_rq_issue - issue pending block IO request operation to device driver 147 * @q: queue holding operation 148 * @rq: block IO operation operation request 149 * 150 * Called when block operation request @rq from queue @q is sent to a 151 * device driver for processing. 152 */ 153DEFINE_EVENT(block_rq, block_rq_issue, 154 155 TP_PROTO(struct request_queue *q, struct request *rq), 156 157 TP_ARGS(q, rq) 158); 159 160/** 161 * block_bio_bounce - used bounce buffer when processing block operation 162 * @q: queue holding the block operation 163 * @bio: block operation 164 * 165 * A bounce buffer was used to handle the block operation @bio in @q. 166 * This occurs when hardware limitations prevent a direct transfer of 167 * data between the @bio data memory area and the IO device. Use of a 168 * bounce buffer requires extra copying of data and decreases 169 * performance. 170 */ 171TRACE_EVENT(block_bio_bounce, 172 173 TP_PROTO(struct request_queue *q, struct bio *bio), 174 175 TP_ARGS(q, bio), 176 177 TP_STRUCT__entry( 178 __field( dev_t, dev ) 179 __field( sector_t, sector ) 180 __field( unsigned int, nr_sector ) 181 __array( char, rwbs, 6 ) 182 __array( char, comm, TASK_COMM_LEN ) 183 ), 184 185 TP_fast_assign( 186 __entry->dev = bio->bi_bdev ? 187 bio->bi_bdev->bd_dev : 0; 188 __entry->sector = bio->bi_sector; 189 __entry->nr_sector = bio->bi_size >> 9; 190 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 191 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 192 ), 193 194 TP_printk("%d,%d %s %llu + %u [%s]", 195 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 196 (unsigned long long)__entry->sector, 197 __entry->nr_sector, __entry->comm) 198); 199 200/** 201 * block_bio_complete - completed all work on the block operation 202 * @q: queue holding the block operation 203 * @bio: block operation completed 204 * 205 * This tracepoint indicates there is no further work to do on this 206 * block IO operation @bio. 207 */ 208TRACE_EVENT(block_bio_complete, 209 210 TP_PROTO(struct request_queue *q, struct bio *bio), 211 212 TP_ARGS(q, bio), 213 214 TP_STRUCT__entry( 215 __field( dev_t, dev ) 216 __field( sector_t, sector ) 217 __field( unsigned, nr_sector ) 218 __field( int, error ) 219 __array( char, rwbs, 6 ) 220 ), 221 222 TP_fast_assign( 223 __entry->dev = bio->bi_bdev->bd_dev; 224 __entry->sector = bio->bi_sector; 225 __entry->nr_sector = bio->bi_size >> 9; 226 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 227 ), 228 229 TP_printk("%d,%d %s %llu + %u [%d]", 230 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 231 (unsigned long long)__entry->sector, 232 __entry->nr_sector, __entry->error) 233); 234 235DECLARE_EVENT_CLASS(block_bio, 236 237 TP_PROTO(struct request_queue *q, struct bio *bio), 238 239 TP_ARGS(q, bio), 240 241 TP_STRUCT__entry( 242 __field( dev_t, dev ) 243 __field( sector_t, sector ) 244 __field( unsigned int, nr_sector ) 245 __array( char, rwbs, 6 ) 246 __array( char, comm, TASK_COMM_LEN ) 247 ), 248 249 TP_fast_assign( 250 __entry->dev = bio->bi_bdev->bd_dev; 251 __entry->sector = bio->bi_sector; 252 __entry->nr_sector = bio->bi_size >> 9; 253 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 254 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 255 ), 256 257 TP_printk("%d,%d %s %llu + %u [%s]", 258 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 259 (unsigned long long)__entry->sector, 260 __entry->nr_sector, __entry->comm) 261); 262 263/** 264 * block_bio_backmerge - merging block operation to the end of an existing operation 265 * @q: queue holding operation 266 * @bio: new block operation to merge 267 * 268 * Merging block request @bio to the end of an existing block request 269 * in queue @q. 270 */ 271DEFINE_EVENT(block_bio, block_bio_backmerge, 272 273 TP_PROTO(struct request_queue *q, struct bio *bio), 274 275 TP_ARGS(q, bio) 276); 277 278/** 279 * block_bio_frontmerge - merging block operation to the beginning of an existing operation 280 * @q: queue holding operation 281 * @bio: new block operation to merge 282 * 283 * Merging block IO operation @bio to the beginning of an existing block 284 * operation in queue @q. 285 */ 286DEFINE_EVENT(block_bio, block_bio_frontmerge, 287 288 TP_PROTO(struct request_queue *q, struct bio *bio), 289 290 TP_ARGS(q, bio) 291); 292 293/** 294 * block_bio_queue - putting new block IO operation in queue 295 * @q: queue holding operation 296 * @bio: new block operation 297 * 298 * About to place the block IO operation @bio into queue @q. 299 */ 300DEFINE_EVENT(block_bio, block_bio_queue, 301 302 TP_PROTO(struct request_queue *q, struct bio *bio), 303 304 TP_ARGS(q, bio) 305); 306 307DECLARE_EVENT_CLASS(block_get_rq, 308 309 TP_PROTO(struct request_queue *q, struct bio *bio, int rw), 310 311 TP_ARGS(q, bio, rw), 312 313 TP_STRUCT__entry( 314 __field( dev_t, dev ) 315 __field( sector_t, sector ) 316 __field( unsigned int, nr_sector ) 317 __array( char, rwbs, 6 ) 318 __array( char, comm, TASK_COMM_LEN ) 319 ), 320 321 TP_fast_assign( 322 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; 323 __entry->sector = bio ? bio->bi_sector : 0; 324 __entry->nr_sector = bio ? bio->bi_size >> 9 : 0; 325 blk_fill_rwbs(__entry->rwbs, 326 bio ? bio->bi_rw : 0, __entry->nr_sector); 327 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 328 ), 329 330 TP_printk("%d,%d %s %llu + %u [%s]", 331 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 332 (unsigned long long)__entry->sector, 333 __entry->nr_sector, __entry->comm) 334); 335 336/** 337 * block_getrq - get a free request entry in queue for block IO operations 338 * @q: queue for operations 339 * @bio: pending block IO operation 340 * @rw: low bit indicates a read (%0) or a write (%1) 341 * 342 * A request struct for queue @q has been allocated to handle the 343 * block IO operation @bio. 344 */ 345DEFINE_EVENT(block_get_rq, block_getrq, 346 347 TP_PROTO(struct request_queue *q, struct bio *bio, int rw), 348 349 TP_ARGS(q, bio, rw) 350); 351 352/** 353 * block_sleeprq - waiting to get a free request entry in queue for block IO operation 354 * @q: queue for operation 355 * @bio: pending block IO operation 356 * @rw: low bit indicates a read (%0) or a write (%1) 357 * 358 * In the case where a request struct cannot be provided for queue @q 359 * the process needs to wait for an request struct to become 360 * available. This tracepoint event is generated each time the 361 * process goes to sleep waiting for request struct become available. 362 */ 363DEFINE_EVENT(block_get_rq, block_sleeprq, 364 365 TP_PROTO(struct request_queue *q, struct bio *bio, int rw), 366 367 TP_ARGS(q, bio, rw) 368); 369 370/** 371 * block_plug - keep operations requests in request queue 372 * @q: request queue to plug 373 * 374 * Plug the request queue @q. Do not allow block operation requests 375 * to be sent to the device driver. Instead, accumulate requests in 376 * the queue to improve throughput performance of the block device. 377 */ 378TRACE_EVENT(block_plug, 379 380 TP_PROTO(struct request_queue *q), 381 382 TP_ARGS(q), 383 384 TP_STRUCT__entry( 385 __array( char, comm, TASK_COMM_LEN ) 386 ), 387 388 TP_fast_assign( 389 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 390 ), 391 392 TP_printk("[%s]", __entry->comm) 393); 394 395DECLARE_EVENT_CLASS(block_unplug, 396 397 TP_PROTO(struct request_queue *q), 398 399 TP_ARGS(q), 400 401 TP_STRUCT__entry( 402 __field( int, nr_rq ) 403 __array( char, comm, TASK_COMM_LEN ) 404 ), 405 406 TP_fast_assign( 407 __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE]; 408 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 409 ), 410 411 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) 412); 413 414/** 415 * block_unplug_timer - timed release of operations requests in queue to device driver 416 * @q: request queue to unplug 417 * 418 * Unplug the request queue @q because a timer expired and allow block 419 * operation requests to be sent to the device driver. 420 */ 421DEFINE_EVENT(block_unplug, block_unplug_timer, 422 423 TP_PROTO(struct request_queue *q), 424 425 TP_ARGS(q) 426); 427 428/** 429 * block_unplug_io - release of operations requests in request queue 430 * @q: request queue to unplug 431 * 432 * Unplug request queue @q because device driver is scheduled to work 433 * on elements in the request queue. 434 */ 435DEFINE_EVENT(block_unplug, block_unplug_io, 436 437 TP_PROTO(struct request_queue *q), 438 439 TP_ARGS(q) 440); 441 442/** 443 * block_split - split a single bio struct into two bio structs 444 * @q: queue containing the bio 445 * @bio: block operation being split 446 * @new_sector: The starting sector for the new bio 447 * 448 * The bio request @bio in request queue @q needs to be split into two 449 * bio requests. The newly created @bio request starts at 450 * @new_sector. This split may be required due to hardware limitation 451 * such as operation crossing device boundaries in a RAID system. 452 */ 453TRACE_EVENT(block_split, 454 455 TP_PROTO(struct request_queue *q, struct bio *bio, 456 unsigned int new_sector), 457 458 TP_ARGS(q, bio, new_sector), 459 460 TP_STRUCT__entry( 461 __field( dev_t, dev ) 462 __field( sector_t, sector ) 463 __field( sector_t, new_sector ) 464 __array( char, rwbs, 6 ) 465 __array( char, comm, TASK_COMM_LEN ) 466 ), 467 468 TP_fast_assign( 469 __entry->dev = bio->bi_bdev->bd_dev; 470 __entry->sector = bio->bi_sector; 471 __entry->new_sector = new_sector; 472 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 473 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 474 ), 475 476 TP_printk("%d,%d %s %llu / %llu [%s]", 477 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 478 (unsigned long long)__entry->sector, 479 (unsigned long long)__entry->new_sector, 480 __entry->comm) 481); 482 483/** 484 * block_remap - map request for a partition to the raw device 485 * @q: queue holding the operation 486 * @bio: revised operation 487 * @dev: device for the operation 488 * @from: original sector for the operation 489 * 490 * An operation for a partition on a block device has been mapped to the 491 * raw block device. 492 */ 493TRACE_EVENT(block_remap, 494 495 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, 496 sector_t from), 497 498 TP_ARGS(q, bio, dev, from), 499 500 TP_STRUCT__entry( 501 __field( dev_t, dev ) 502 __field( sector_t, sector ) 503 __field( unsigned int, nr_sector ) 504 __field( dev_t, old_dev ) 505 __field( sector_t, old_sector ) 506 __array( char, rwbs, 6 ) 507 ), 508 509 TP_fast_assign( 510 __entry->dev = bio->bi_bdev->bd_dev; 511 __entry->sector = bio->bi_sector; 512 __entry->nr_sector = bio->bi_size >> 9; 513 __entry->old_dev = dev; 514 __entry->old_sector = from; 515 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 516 ), 517 518 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", 519 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 520 (unsigned long long)__entry->sector, 521 __entry->nr_sector, 522 MAJOR(__entry->old_dev), MINOR(__entry->old_dev), 523 (unsigned long long)__entry->old_sector) 524); 525 526/** 527 * block_rq_remap - map request for a block operation request 528 * @q: queue holding the operation 529 * @rq: block IO operation request 530 * @dev: device for the operation 531 * @from: original sector for the operation 532 * 533 * The block operation request @rq in @q has been remapped. The block 534 * operation request @rq holds the current information and @from hold 535 * the original sector. 536 */ 537TRACE_EVENT(block_rq_remap, 538 539 TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, 540 sector_t from), 541 542 TP_ARGS(q, rq, dev, from), 543 544 TP_STRUCT__entry( 545 __field( dev_t, dev ) 546 __field( sector_t, sector ) 547 __field( unsigned int, nr_sector ) 548 __field( dev_t, old_dev ) 549 __field( sector_t, old_sector ) 550 __array( char, rwbs, 6 ) 551 ), 552 553 TP_fast_assign( 554 __entry->dev = disk_devt(rq->rq_disk); 555 __entry->sector = blk_rq_pos(rq); 556 __entry->nr_sector = blk_rq_sectors(rq); 557 __entry->old_dev = dev; 558 __entry->old_sector = from; 559 blk_fill_rwbs_rq(__entry->rwbs, rq); 560 ), 561 562 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", 563 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 564 (unsigned long long)__entry->sector, 565 __entry->nr_sector, 566 MAJOR(__entry->old_dev), MINOR(__entry->old_dev), 567 (unsigned long long)__entry->old_sector) 568); 569 570#endif /* _TRACE_BLOCK_H */ 571 572/* This part must be outside protection */ 573#include <trace/define_trace.h> 574