|
7 | 7 | #include <linux/blk-copy.h> |
8 | 8 | #include <linux/blk-mq.h> |
9 | 9 |
|
| 10 | +/** |
| 11 | + * Tracks the state of a single onloaded copy operation. |
| 12 | + * @params: Data copy parameters. |
| 13 | + * @read_work: For scheduling read work. |
| 14 | + * @write_work: For scheduling write work. |
| 15 | + * @buf: Data buffer. |
| 16 | + * @buf_len: Length in bytes of @buf. |
| 17 | + * @offset: Current copying offset. Range: [0, @len[. |
| 18 | + * @chunk: Size in bytes of the chunk of data that is being copied. |
| 19 | + */ |
| 20 | +struct blkdev_copy_onload_ctx { |
| 21 | + struct blk_copy_params *params; |
| 22 | + struct work_struct read_work; |
| 23 | + struct work_struct write_work; |
| 24 | + void *buf; |
| 25 | + ssize_t buf_len; |
| 26 | + loff_t offset; |
| 27 | + loff_t chunk; |
| 28 | +}; |
| 29 | + |
10 | 30 | /* End all bios in the @ctx->bios list with status @ctx->status. */ |
11 | 31 | static void blkdev_end_bios(struct bio_copy_offload_ctx *ctx) |
12 | 32 | { |
@@ -353,3 +373,212 @@ int blkdev_copy_offload(struct blk_copy_params *params) |
353 | 373 | return -EIOCBQUEUED; |
354 | 374 | } |
355 | 375 | EXPORT_SYMBOL_GPL(blkdev_copy_offload); |
| 376 | + |
| 377 | +static void *blkdev_copy_alloc_buf(size_t req_size, size_t *alloc_size) |
| 378 | +{ |
| 379 | + unsigned int min_size = PAGE_SIZE; |
| 380 | + char *buf; |
| 381 | + |
| 382 | + while (req_size >= min_size) { |
| 383 | + buf = kmalloc(req_size, GFP_NOIO | __GFP_NOWARN); |
| 384 | + if (buf) { |
| 385 | + *alloc_size = req_size; |
| 386 | + return buf; |
| 387 | + } |
| 388 | + req_size >>= 1; |
| 389 | + } |
| 390 | + |
| 391 | + return NULL; |
| 392 | +} |
| 393 | + |
| 394 | +static struct bio *bio_map_buf(void *buf, unsigned int len) |
| 395 | +{ |
| 396 | + struct page *page; |
| 397 | + struct bio *bio; |
| 398 | + static const uint16_t nr_vecs = 1; |
| 399 | + |
| 400 | + bio = bio_kmalloc(nr_vecs, GFP_NOIO); |
| 401 | + if (!bio) |
| 402 | + return NULL; |
| 403 | + bio_init_inline(bio, /*bdev=*/NULL, /*max_vecs=*/nr_vecs, /*opf=*/0); |
| 404 | + |
| 405 | + page = virt_to_page(buf); |
| 406 | + if (bio_add_page(bio, page, len, offset_in_page(buf)) < len) { |
| 407 | + /* we don't support partial mappings */ |
| 408 | + bio_uninit(bio); |
| 409 | + kfree(bio); |
| 410 | + WARN_ON_ONCE(true); |
| 411 | + return NULL; |
| 412 | + } |
| 413 | + |
| 414 | + return bio; |
| 415 | +} |
| 416 | + |
| 417 | +static void blkdev_write_done(struct bio *bio) |
| 418 | +{ |
| 419 | + struct blkdev_copy_onload_ctx *ctx = bio->bi_copy_ctx; |
| 420 | + struct blk_copy_params *params = ctx->params; |
| 421 | + blk_status_t sts = bio->bi_status; |
| 422 | + |
| 423 | + kfree(bio); |
| 424 | + |
| 425 | + if (sts) { |
| 426 | + params->status = sts; |
| 427 | + params->end_io(params); |
| 428 | + return; |
| 429 | + } |
| 430 | + |
| 431 | + ctx->offset += ctx->chunk; |
| 432 | + |
| 433 | + schedule_work(&ctx->read_work); |
| 434 | +} |
| 435 | + |
| 436 | +static sector_t blkdev_offset_to_out_pos(const struct blk_copy_params *params, |
| 437 | + loff_t offset) |
| 438 | +{ |
| 439 | + for (int i = 0; i < params->out_nseg; i++) { |
| 440 | + loff_t rem = params->out_segs[i].len - offset; |
| 441 | + |
| 442 | + if (rem > 0) |
| 443 | + return params->out_segs[i].pos + offset; |
| 444 | + offset -= params->out_segs[i].len; |
| 445 | + } |
| 446 | + return 0; |
| 447 | +} |
| 448 | + |
| 449 | +static void blkdev_write_work(struct work_struct *work) |
| 450 | +{ |
| 451 | + struct blkdev_copy_onload_ctx *ctx = |
| 452 | + container_of(work, typeof(*ctx), read_work); |
| 453 | + struct blk_copy_params *params = ctx->params; |
| 454 | + struct bio *bio; |
| 455 | + loff_t out_pos; |
| 456 | + |
| 457 | + out_pos = blkdev_offset_to_out_pos(params, ctx->offset); |
| 458 | + |
| 459 | + bio = bio_map_buf(ctx->buf, ctx->buf_len); |
| 460 | + if (!bio) { |
| 461 | + params->status = BLK_STS_AGAIN; |
| 462 | + params->end_io(params); |
| 463 | + return; |
| 464 | + } |
| 465 | + bio->bi_opf = REQ_OP_WRITE; |
| 466 | + bio_set_dev(bio, params->out_bdev); |
| 467 | + bio->bi_iter.bi_sector = out_pos >> SECTOR_SHIFT; |
| 468 | + bio->bi_iter.bi_size = ctx->chunk; |
| 469 | + bio->bi_end_io = blkdev_write_done; |
| 470 | + bio->bi_copy_ctx = ctx; |
| 471 | + submit_bio(bio); |
| 472 | +} |
| 473 | + |
| 474 | +static void blkdev_read_done(struct bio *bio) |
| 475 | +{ |
| 476 | + struct blkdev_copy_onload_ctx *ctx = bio->bi_copy_ctx; |
| 477 | + struct blk_copy_params *params = ctx->params; |
| 478 | + blk_status_t sts = bio->bi_status; |
| 479 | + |
| 480 | + kfree(bio); |
| 481 | + |
| 482 | + if (sts) { |
| 483 | + params->status = sts; |
| 484 | + params->end_io(params); |
| 485 | + return; |
| 486 | + } |
| 487 | + |
| 488 | + schedule_work(&ctx->write_work); |
| 489 | +} |
| 490 | + |
| 491 | +static sector_t blkdev_offset_to_in_pos(const struct blk_copy_params *params, |
| 492 | + loff_t offset, loff_t *chunk) |
| 493 | +{ |
| 494 | + for (int i = 0; i < params->in_nseg; i++) { |
| 495 | + loff_t rem = params->in_segs[i].len - offset; |
| 496 | + |
| 497 | + if (rem > 0) { |
| 498 | + if (*chunk > rem) |
| 499 | + *chunk = rem; |
| 500 | + return params->in_segs[i].pos + offset; |
| 501 | + } |
| 502 | + offset -= params->in_segs[i].len; |
| 503 | + } |
| 504 | + *chunk = 0; |
| 505 | + return 0; |
| 506 | +} |
| 507 | + |
| 508 | +static void blkdev_read_work(struct work_struct *work) |
| 509 | +{ |
| 510 | + struct blkdev_copy_onload_ctx *ctx = |
| 511 | + container_of(work, typeof(*ctx), read_work); |
| 512 | + struct blk_copy_params *params = ctx->params; |
| 513 | + loff_t offset = ctx->offset; |
| 514 | + sector_t in_pos; |
| 515 | + struct bio *bio; |
| 516 | + |
| 517 | + ctx->chunk = min(ctx->buf_len, params->len - offset); |
| 518 | + if (ctx->chunk) |
| 519 | + in_pos = blkdev_offset_to_in_pos(params, offset, &ctx->chunk); |
| 520 | + if (ctx->chunk == 0) { |
| 521 | + params->end_io(params); |
| 522 | + return; |
| 523 | + } |
| 524 | + |
| 525 | + bio = bio_map_buf(ctx->buf, ctx->buf_len); |
| 526 | + if (!bio) { |
| 527 | + params->status = BLK_STS_AGAIN; |
| 528 | + params->end_io(params); |
| 529 | + return; |
| 530 | + } |
| 531 | + bio->bi_opf = REQ_OP_READ; |
| 532 | + bio_set_dev(bio, params->in_bdev); |
| 533 | + bio->bi_iter.bi_sector = in_pos >> SECTOR_SHIFT; |
| 534 | + bio->bi_iter.bi_size = ctx->chunk; |
| 535 | + bio->bi_end_io = blkdev_read_done; |
| 536 | + bio->bi_copy_ctx = ctx; |
| 537 | + submit_bio(bio); |
| 538 | +} |
| 539 | + |
| 540 | +/** |
| 541 | + * blkdev_copy_onload - asynchronously copy data between two block devices using |
| 542 | + * read and write operations. |
| 543 | + * @params: Input and output block devices, input and output ranges and |
| 544 | + * completion callback pointer. |
| 545 | + * Return: 0 upon success; -EIOCBQUEUED if the completion callback function will |
| 546 | + * be called or has already been called. |
| 547 | + */ |
| 548 | +int blkdev_copy_onload(struct blk_copy_params *params) |
| 549 | +{ |
| 550 | + loff_t max_hw_bytes = |
| 551 | + min(queue_max_hw_sectors(params->in_bdev->bd_queue), |
| 552 | + queue_max_hw_sectors(params->out_bdev->bd_queue)) << |
| 553 | + SECTOR_SHIFT; |
| 554 | + struct blkdev_copy_onload_ctx *ctx; |
| 555 | + loff_t len; |
| 556 | + int ret; |
| 557 | + |
| 558 | + ret = blkdev_copy_check_params(params, &len); |
| 559 | + if (ret) |
| 560 | + return ret; |
| 561 | + |
| 562 | + params->len = len; |
| 563 | + |
| 564 | + ctx = kzalloc_obj(*ctx); |
| 565 | + if (!ctx) |
| 566 | + return -ENOMEM; |
| 567 | + |
| 568 | + INIT_WORK(&ctx->read_work, blkdev_read_work); |
| 569 | + INIT_WORK(&ctx->write_work, blkdev_write_work); |
| 570 | + ctx->params = params; |
| 571 | + |
| 572 | + ctx->buf = blkdev_copy_alloc_buf(min(max_hw_bytes, len), &ctx->buf_len); |
| 573 | + if (!ctx->buf) |
| 574 | + goto err; |
| 575 | + |
| 576 | + blkdev_read_work(&ctx->read_work); |
| 577 | + |
| 578 | + return -EIOCBQUEUED; |
| 579 | + |
| 580 | +err: |
| 581 | + kfree(ctx); |
| 582 | + return -ENOMEM; |
| 583 | +} |
| 584 | +EXPORT_SYMBOL_GPL(blkdev_copy_onload); |
0 commit comments