Blame view
include/trace/events/block.h
18.2 KB
d0b6e04a4
|
1 2 |
#undef TRACE_SYSTEM #define TRACE_SYSTEM block |
55782138e
|
3 4 5 6 7 |
#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_BLOCK_H #include <linux/blktrace_api.h> #include <linux/blkdev.h> |
5305cb830
|
8 |
#include <linux/buffer_head.h> |
55782138e
|
9 |
#include <linux/tracepoint.h> |
c09c47cae
|
10 |
#define RWBS_LEN 8 |
5305cb830
|
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
DECLARE_EVENT_CLASS(block_buffer, TP_PROTO(struct buffer_head *bh), TP_ARGS(bh), TP_STRUCT__entry ( __field( dev_t, dev ) __field( sector_t, sector ) __field( size_t, size ) ), TP_fast_assign( __entry->dev = bh->b_bdev->bd_dev; __entry->sector = bh->b_blocknr; __entry->size = bh->b_size; ), TP_printk("%d,%d sector=%llu size=%zu", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->sector, __entry->size ) ); /** * block_touch_buffer - mark a buffer accessed * @bh: buffer_head being touched * * Called from touch_buffer(). */ DEFINE_EVENT(block_buffer, block_touch_buffer, TP_PROTO(struct buffer_head *bh), TP_ARGS(bh) ); /** * block_dirty_buffer - mark a buffer dirty * @bh: buffer_head being dirtied * * Called from mark_buffer_dirty(). */ DEFINE_EVENT(block_buffer, block_dirty_buffer, TP_PROTO(struct buffer_head *bh), TP_ARGS(bh) ); |
77ca1e029
|
60 |
DECLARE_EVENT_CLASS(block_rq_with_error, |
55782138e
|
61 62 63 64 65 66 67 68 69 70 |
TP_PROTO(struct request_queue *q, struct request *rq), TP_ARGS(q, rq), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( int, errors ) |
c09c47cae
|
71 |
__array( char, rwbs, RWBS_LEN ) |
55782138e
|
72 73 74 75 76 |
__dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) ), TP_fast_assign( __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; |
33659ebba
|
77 78 79 80 |
__entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 0 : blk_rq_pos(rq); __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 0 : blk_rq_sectors(rq); |
55782138e
|
81 |
__entry->errors = rq->errors; |
2d3a8497f
|
82 |
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); |
55782138e
|
83 84 85 86 87 88 |
blk_dump_cmd(__get_str(cmd), rq); ), TP_printk("%d,%d %s (%s) %llu + %u [%d]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, __get_str(cmd), |
6556d1df8
|
89 90 |
(unsigned long long)__entry->sector, __entry->nr_sector, __entry->errors) |
55782138e
|
91 |
); |
881245dcf
|
92 93 94 95 96 97 98 99 100 101 |
/** * block_rq_abort - abort block operation request * @q: queue containing the block operation request * @rq: block IO operation request * * Called immediately after pending block IO operation request @rq in * queue @q is aborted. The fields in the operation request @rq * can be examined to determine which device and sectors the pending * operation would access. */ |
77ca1e029
|
102 |
DEFINE_EVENT(block_rq_with_error, block_rq_abort, |
55782138e
|
103 104 |
TP_PROTO(struct request_queue *q, struct request *rq), |
77ca1e029
|
105 106 |
TP_ARGS(q, rq) ); |
55782138e
|
107 |
|
881245dcf
|
108 109 110 111 112 113 114 115 116 |
/** * block_rq_requeue - place block IO request back on a queue * @q: queue holding operation * @rq: block IO operation request * * The block operation request @rq is being placed back into queue * @q. For some reason the request was not completed and needs to be * put back in the queue. */ |
77ca1e029
|
117 |
DEFINE_EVENT(block_rq_with_error, block_rq_requeue, |
55782138e
|
118 |
|
77ca1e029
|
119 |
TP_PROTO(struct request_queue *q, struct request *rq), |
55782138e
|
120 |
|
77ca1e029
|
121 122 |
TP_ARGS(q, rq) ); |
55782138e
|
123 |
|
881245dcf
|
124 125 126 127 |
/** * block_rq_complete - block IO operation completed by device driver * @q: queue containing the block operation request * @rq: block operations request |
af5040da0
|
128 |
* @nr_bytes: number of completed bytes |
881245dcf
|
129 130 131 132 133 134 135 |
* * The block_rq_complete tracepoint event indicates that some portion * of operation request has been completed by the device driver. If * the @rq->bio is %NULL, then there is absolutely no additional work to * do for the request. If @rq->bio is non-NULL then there is * additional work required to complete the request. */ |
af5040da0
|
136 |
TRACE_EVENT(block_rq_complete, |
77ca1e029
|
137 |
|
af5040da0
|
138 139 |
TP_PROTO(struct request_queue *q, struct request *rq, unsigned int nr_bytes), |
77ca1e029
|
140 |
|
af5040da0
|
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
TP_ARGS(q, rq, nr_bytes), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( int, errors ) __array( char, rwbs, RWBS_LEN ) __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) ), TP_fast_assign( __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; __entry->sector = blk_rq_pos(rq); __entry->nr_sector = nr_bytes >> 9; __entry->errors = rq->errors; blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes); blk_dump_cmd(__get_str(cmd), rq); ), TP_printk("%d,%d %s (%s) %llu + %u [%d]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, __get_str(cmd), (unsigned long long)__entry->sector, __entry->nr_sector, __entry->errors) |
55782138e
|
167 |
); |
77ca1e029
|
168 |
DECLARE_EVENT_CLASS(block_rq, |
55782138e
|
169 170 171 172 173 174 175 176 177 178 |
TP_PROTO(struct request_queue *q, struct request *rq), TP_ARGS(q, rq), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( unsigned int, bytes ) |
c09c47cae
|
179 |
__array( char, rwbs, RWBS_LEN ) |
77ca1e029
|
180 |
__array( char, comm, TASK_COMM_LEN ) |
55782138e
|
181 182 183 184 185 |
__dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) ), TP_fast_assign( __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; |
33659ebba
|
186 187 188 189 190 191 |
__entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 0 : blk_rq_pos(rq); __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 0 : blk_rq_sectors(rq); __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? blk_rq_bytes(rq) : 0; |
55782138e
|
192 |
|
2d3a8497f
|
193 |
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); |
55782138e
|
194 195 196 197 198 199 200 |
blk_dump_cmd(__get_str(cmd), rq); memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("%d,%d %s %u (%s) %llu + %u [%s]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, __entry->bytes, __get_str(cmd), |
6556d1df8
|
201 202 |
(unsigned long long)__entry->sector, __entry->nr_sector, __entry->comm) |
55782138e
|
203 |
); |
881245dcf
|
204 205 206 207 208 209 210 211 212 213 |
/** * block_rq_insert - insert block operation request into queue * @q: target queue * @rq: block IO operation request * * Called immediately before block operation request @rq is inserted * into queue @q. The fields in the operation request @rq struct can * be examined to determine which device and sectors the pending * operation would access. */ |
77ca1e029
|
214 |
DEFINE_EVENT(block_rq, block_rq_insert, |
55782138e
|
215 216 |
TP_PROTO(struct request_queue *q, struct request *rq), |
77ca1e029
|
217 |
TP_ARGS(q, rq) |
55782138e
|
218 |
); |
881245dcf
|
219 220 221 222 223 224 225 226 |
/** * block_rq_issue - issue pending block IO request operation to device driver * @q: queue holding operation * @rq: block IO operation operation request * * Called when block operation request @rq from queue @q is sent to a * device driver for processing. */ |
77ca1e029
|
227 |
DEFINE_EVENT(block_rq, block_rq_issue, |
55782138e
|
228 229 |
TP_PROTO(struct request_queue *q, struct request *rq), |
77ca1e029
|
230 |
TP_ARGS(q, rq) |
55782138e
|
231 |
); |
fe63b94a4
|
232 |
|
881245dcf
|
233 234 235 236 237 238 239 240 241 242 243 |
/** * block_bio_bounce - used bounce buffer when processing block operation * @q: queue holding the block operation * @bio: block operation * * A bounce buffer was used to handle the block operation @bio in @q. * This occurs when hardware limitations prevent a direct transfer of * data between the @bio data memory area and the IO device. Use of a * bounce buffer requires extra copying of data and decreases * performance. */ |
55782138e
|
244 245 246 247 248 249 250 251 252 253 |
TRACE_EVENT(block_bio_bounce, TP_PROTO(struct request_queue *q, struct bio *bio), TP_ARGS(q, bio), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) |
c09c47cae
|
254 |
__array( char, rwbs, RWBS_LEN ) |
55782138e
|
255 256 257 258 |
__array( char, comm, TASK_COMM_LEN ) ), TP_fast_assign( |
fe63b94a4
|
259 260 |
__entry->dev = bio->bi_bdev ? bio->bi_bdev->bd_dev : 0; |
4f024f379
|
261 |
__entry->sector = bio->bi_iter.bi_sector; |
aa8b57aa3
|
262 |
__entry->nr_sector = bio_sectors(bio); |
4f024f379
|
263 |
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); |
55782138e
|
264 265 266 267 268 |
memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("%d,%d %s %llu + %u [%s]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, |
6556d1df8
|
269 270 |
(unsigned long long)__entry->sector, __entry->nr_sector, __entry->comm) |
55782138e
|
271 |
); |
881245dcf
|
272 273 |
/** * block_bio_complete - completed all work on the block operation |
0a82a8d13
|
274 |
* @q: queue holding the block operation |
881245dcf
|
275 |
* @bio: block operation completed |
b7908c103
|
276 |
* @error: io error value |
881245dcf
|
277 278 279 280 |
* * This tracepoint indicates there is no further work to do on this * block IO operation @bio. */ |
55782138e
|
281 |
TRACE_EVENT(block_bio_complete, |
0a82a8d13
|
282 |
TP_PROTO(struct request_queue *q, struct bio *bio, int error), |
55782138e
|
283 |
|
0a82a8d13
|
284 |
TP_ARGS(q, bio, error), |
55782138e
|
285 286 287 288 289 290 |
TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned, nr_sector ) __field( int, error ) |
c09c47cae
|
291 |
__array( char, rwbs, RWBS_LEN) |
55782138e
|
292 293 294 |
), TP_fast_assign( |
0a82a8d13
|
295 |
__entry->dev = bio->bi_bdev->bd_dev; |
4f024f379
|
296 |
__entry->sector = bio->bi_iter.bi_sector; |
aa8b57aa3
|
297 |
__entry->nr_sector = bio_sectors(bio); |
b7908c103
|
298 |
__entry->error = error; |
4f024f379
|
299 |
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); |
55782138e
|
300 301 302 303 |
), TP_printk("%d,%d %s %llu + %u [%d]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, |
6556d1df8
|
304 305 |
(unsigned long long)__entry->sector, __entry->nr_sector, __entry->error) |
55782138e
|
306 |
); |
8c1cf6bb0
|
307 |
DECLARE_EVENT_CLASS(block_bio_merge, |
55782138e
|
308 |
|
8c1cf6bb0
|
309 |
TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), |
55782138e
|
310 |
|
8c1cf6bb0
|
311 |
TP_ARGS(q, rq, bio), |
55782138e
|
312 313 314 315 316 |
TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) |
c09c47cae
|
317 |
__array( char, rwbs, RWBS_LEN ) |
55782138e
|
318 319 320 321 322 |
__array( char, comm, TASK_COMM_LEN ) ), TP_fast_assign( __entry->dev = bio->bi_bdev->bd_dev; |
4f024f379
|
323 |
__entry->sector = bio->bi_iter.bi_sector; |
aa8b57aa3
|
324 |
__entry->nr_sector = bio_sectors(bio); |
4f024f379
|
325 |
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); |
55782138e
|
326 327 328 329 330 |
memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("%d,%d %s %llu + %u [%s]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, |
6556d1df8
|
331 332 |
(unsigned long long)__entry->sector, __entry->nr_sector, __entry->comm) |
55782138e
|
333 |
); |
881245dcf
|
334 335 336 |
/** * block_bio_backmerge - merging block operation to the end of an existing operation * @q: queue holding operation |
8c1cf6bb0
|
337 |
* @rq: request bio is being merged into |
881245dcf
|
338 339 340 341 342 |
* @bio: new block operation to merge * * Merging block request @bio to the end of an existing block request * in queue @q. */ |
8c1cf6bb0
|
343 |
DEFINE_EVENT(block_bio_merge, block_bio_backmerge, |
55782138e
|
344 |
|
8c1cf6bb0
|
345 |
TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), |
55782138e
|
346 |
|
8c1cf6bb0
|
347 |
TP_ARGS(q, rq, bio) |
55782138e
|
348 |
); |
881245dcf
|
349 350 351 |
/** * block_bio_frontmerge - merging block operation to the beginning of an existing operation * @q: queue holding operation |
8c1cf6bb0
|
352 |
* @rq: request bio is being merged into |
881245dcf
|
353 354 355 356 357 |
* @bio: new block operation to merge * * Merging block IO operation @bio to the beginning of an existing block * operation in queue @q. */ |
8c1cf6bb0
|
358 |
DEFINE_EVENT(block_bio_merge, block_bio_frontmerge, |
55782138e
|
359 |
|
8c1cf6bb0
|
360 |
TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), |
55782138e
|
361 |
|
8c1cf6bb0
|
362 |
TP_ARGS(q, rq, bio) |
77ca1e029
|
363 |
); |
55782138e
|
364 |
|
881245dcf
|
365 366 367 368 369 370 371 |
/** * block_bio_queue - putting new block IO operation in queue * @q: queue holding operation * @bio: new block operation * * About to place the block IO operation @bio into queue @q. */ |
8c1cf6bb0
|
372 |
TRACE_EVENT(block_bio_queue, |
55782138e
|
373 |
|
77ca1e029
|
374 |
TP_PROTO(struct request_queue *q, struct bio *bio), |
55782138e
|
375 |
|
8c1cf6bb0
|
376 377 378 379 380 381 382 383 384 385 386 387 |
TP_ARGS(q, bio), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __array( char, rwbs, RWBS_LEN ) __array( char, comm, TASK_COMM_LEN ) ), TP_fast_assign( __entry->dev = bio->bi_bdev->bd_dev; |
4f024f379
|
388 |
__entry->sector = bio->bi_iter.bi_sector; |
aa8b57aa3
|
389 |
__entry->nr_sector = bio_sectors(bio); |
4f024f379
|
390 |
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); |
8c1cf6bb0
|
391 392 393 394 395 396 397 |
memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("%d,%d %s %llu + %u [%s]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, (unsigned long long)__entry->sector, __entry->nr_sector, __entry->comm) |
55782138e
|
398 |
); |
77ca1e029
|
399 |
DECLARE_EVENT_CLASS(block_get_rq, |
55782138e
|
400 401 402 403 404 405 406 407 408 |
TP_PROTO(struct request_queue *q, struct bio *bio, int rw), TP_ARGS(q, bio, rw), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) |
c09c47cae
|
409 |
__array( char, rwbs, RWBS_LEN ) |
55782138e
|
410 411 412 413 414 |
__array( char, comm, TASK_COMM_LEN ) ), TP_fast_assign( __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; |
4f024f379
|
415 |
__entry->sector = bio ? bio->bi_iter.bi_sector : 0; |
aa8b57aa3
|
416 |
__entry->nr_sector = bio ? bio_sectors(bio) : 0; |
55782138e
|
417 418 419 420 421 422 423 |
blk_fill_rwbs(__entry->rwbs, bio ? bio->bi_rw : 0, __entry->nr_sector); memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("%d,%d %s %llu + %u [%s]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, |
6556d1df8
|
424 425 |
(unsigned long long)__entry->sector, __entry->nr_sector, __entry->comm) |
55782138e
|
426 |
); |
881245dcf
|
427 428 429 430 431 432 433 434 435 |
/** * block_getrq - get a free request entry in queue for block IO operations * @q: queue for operations * @bio: pending block IO operation * @rw: low bit indicates a read (%0) or a write (%1) * * A request struct for queue @q has been allocated to handle the * block IO operation @bio. */ |
77ca1e029
|
436 |
DEFINE_EVENT(block_get_rq, block_getrq, |
55782138e
|
437 438 |
TP_PROTO(struct request_queue *q, struct bio *bio, int rw), |
77ca1e029
|
439 440 |
TP_ARGS(q, bio, rw) ); |
55782138e
|
441 |
|
881245dcf
|
442 443 444 445 446 447 448 449 450 451 452 |
/** * block_sleeprq - waiting to get a free request entry in queue for block IO operation * @q: queue for operation * @bio: pending block IO operation * @rw: low bit indicates a read (%0) or a write (%1) * * In the case where a request struct cannot be provided for queue @q * the process needs to wait for an request struct to become * available. This tracepoint event is generated each time the * process goes to sleep waiting for request struct become available. */ |
77ca1e029
|
453 |
DEFINE_EVENT(block_get_rq, block_sleeprq, |
55782138e
|
454 |
|
77ca1e029
|
455 |
TP_PROTO(struct request_queue *q, struct bio *bio, int rw), |
55782138e
|
456 |
|
77ca1e029
|
457 |
TP_ARGS(q, bio, rw) |
55782138e
|
458 |
); |
881245dcf
|
459 460 461 462 463 464 465 466 |
/** * block_plug - keep operations requests in request queue * @q: request queue to plug * * Plug the request queue @q. Do not allow block operation requests * to be sent to the device driver. Instead, accumulate requests in * the queue to improve throughput performance of the block device. */ |
55782138e
|
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 |
TRACE_EVENT(block_plug, TP_PROTO(struct request_queue *q), TP_ARGS(q), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) ), TP_fast_assign( memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("[%s]", __entry->comm) ); |
77ca1e029
|
483 |
DECLARE_EVENT_CLASS(block_unplug, |
55782138e
|
484 |
|
49cac01e1
|
485 |
TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), |
55782138e
|
486 |
|
49cac01e1
|
487 |
TP_ARGS(q, depth, explicit), |
55782138e
|
488 489 490 491 492 493 494 |
TP_STRUCT__entry( __field( int, nr_rq ) __array( char, comm, TASK_COMM_LEN ) ), TP_fast_assign( |
94b5eb28b
|
495 |
__entry->nr_rq = depth; |
55782138e
|
496 497 498 499 500 |
memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) ); |
881245dcf
|
501 |
/** |
49cac01e1
|
502 |
* block_unplug - release of operations requests in request queue |
881245dcf
|
503 |
* @q: request queue to unplug |
94b5eb28b
|
504 |
* @depth: number of requests just added to the queue |
49cac01e1
|
505 |
* @explicit: whether this was an explicit unplug, or one from schedule() |
881245dcf
|
506 507 508 509 |
* * Unplug request queue @q because device driver is scheduled to work * on elements in the request queue. */ |
49cac01e1
|
510 |
DEFINE_EVENT(block_unplug, block_unplug, |
55782138e
|
511 |
|
49cac01e1
|
512 |
TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), |
55782138e
|
513 |
|
49cac01e1
|
514 |
TP_ARGS(q, depth, explicit) |
55782138e
|
515 |
); |
881245dcf
|
516 517 518 519 520 521 522 523 524 525 526 |
/** * block_split - split a single bio struct into two bio structs * @q: queue containing the bio * @bio: block operation being split * @new_sector: The starting sector for the new bio * * The bio request @bio in request queue @q needs to be split into two * bio requests. The newly created @bio request starts at * @new_sector. This split may be required due to hardware limitation * such as operation crossing device boundaries in a RAID system. */ |
55782138e
|
527 528 529 530 531 532 533 534 535 536 537 |
TRACE_EVENT(block_split, TP_PROTO(struct request_queue *q, struct bio *bio, unsigned int new_sector), TP_ARGS(q, bio, new_sector), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( sector_t, new_sector ) |
c09c47cae
|
538 |
__array( char, rwbs, RWBS_LEN ) |
55782138e
|
539 540 541 542 543 |
__array( char, comm, TASK_COMM_LEN ) ), TP_fast_assign( __entry->dev = bio->bi_bdev->bd_dev; |
4f024f379
|
544 |
__entry->sector = bio->bi_iter.bi_sector; |
55782138e
|
545 |
__entry->new_sector = new_sector; |
4f024f379
|
546 |
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); |
55782138e
|
547 548 549 550 551 |
memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("%d,%d %s %llu / %llu [%s]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, |
6556d1df8
|
552 553 554 |
(unsigned long long)__entry->sector, (unsigned long long)__entry->new_sector, __entry->comm) |
55782138e
|
555 |
); |
881245dcf
|
556 |
/** |
d07335e51
|
557 |
* block_bio_remap - map request for a logical device to the raw device |
881245dcf
|
558 559 560 561 562 |
* @q: queue holding the operation * @bio: revised operation * @dev: device for the operation * @from: original sector for the operation * |
d07335e51
|
563 |
* An operation for a logical device has been mapped to the |
881245dcf
|
564 565 |
* raw block device. */ |
d07335e51
|
566 |
TRACE_EVENT(block_bio_remap, |
55782138e
|
567 568 569 570 571 572 573 574 575 576 577 578 |
TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, sector_t from), TP_ARGS(q, bio, dev, from), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( dev_t, old_dev ) __field( sector_t, old_sector ) |
c09c47cae
|
579 |
__array( char, rwbs, RWBS_LEN) |
55782138e
|
580 581 582 583 |
), TP_fast_assign( __entry->dev = bio->bi_bdev->bd_dev; |
4f024f379
|
584 |
__entry->sector = bio->bi_iter.bi_sector; |
aa8b57aa3
|
585 |
__entry->nr_sector = bio_sectors(bio); |
55782138e
|
586 587 |
__entry->old_dev = dev; __entry->old_sector = from; |
4f024f379
|
588 |
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); |
55782138e
|
589 590 591 592 |
), TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, |
6556d1df8
|
593 594 |
(unsigned long long)__entry->sector, __entry->nr_sector, |
55782138e
|
595 |
MAJOR(__entry->old_dev), MINOR(__entry->old_dev), |
6556d1df8
|
596 |
(unsigned long long)__entry->old_sector) |
55782138e
|
597 |
); |
881245dcf
|
598 599 600 601 602 603 604 605 606 607 608 |
/** * block_rq_remap - map request for a block operation request * @q: queue holding the operation * @rq: block IO operation request * @dev: device for the operation * @from: original sector for the operation * * The block operation request @rq in @q has been remapped. The block * operation request @rq holds the current information and @from hold * the original sector. */ |
b0da3f0da
|
609 610 611 612 613 614 615 616 617 618 619 620 621 |
TRACE_EVENT(block_rq_remap, TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, sector_t from), TP_ARGS(q, rq, dev, from), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( dev_t, old_dev ) __field( sector_t, old_sector ) |
75afb3529
|
622 |
__field( unsigned int, nr_bios ) |
c09c47cae
|
623 |
__array( char, rwbs, RWBS_LEN) |
b0da3f0da
|
624 625 626 627 628 629 630 631 |
), TP_fast_assign( __entry->dev = disk_devt(rq->rq_disk); __entry->sector = blk_rq_pos(rq); __entry->nr_sector = blk_rq_sectors(rq); __entry->old_dev = dev; __entry->old_sector = from; |
75afb3529
|
632 |
__entry->nr_bios = blk_rq_count_bios(rq); |
2d3a8497f
|
633 |
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); |
b0da3f0da
|
634 |
), |
75afb3529
|
635 |
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u", |
b0da3f0da
|
636 637 638 639 |
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, (unsigned long long)__entry->sector, __entry->nr_sector, MAJOR(__entry->old_dev), MINOR(__entry->old_dev), |
75afb3529
|
640 |
(unsigned long long)__entry->old_sector, __entry->nr_bios) |
b0da3f0da
|
641 |
); |
55782138e
|
642 643 644 645 |
#endif /* _TRACE_BLOCK_H */ /* This part must be outside protection */ #include <trace/define_trace.h> |