|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
| 2 | +#include <linux/blkdev.h> |
| 3 | + |
| 4 | +#include "../dm-core.h" |
| 5 | +#include "pcache_internal.h" |
| 6 | +#include "cache_dev.h" |
| 7 | +#include "backing_dev.h" |
| 8 | +#include "cache.h" |
| 9 | +#include "dm_pcache.h" |
| 10 | + |
| 11 | +static void backing_dev_exit(struct pcache_backing_dev *backing_dev) |
| 12 | +{ |
| 13 | + kmem_cache_destroy(backing_dev->backing_req_cache); |
| 14 | +} |
| 15 | + |
| 16 | +static void req_submit_fn(struct work_struct *work); |
| 17 | +static void req_complete_fn(struct work_struct *work); |
| 18 | +static int backing_dev_init(struct dm_pcache *pcache) |
| 19 | +{ |
| 20 | + struct pcache_backing_dev *backing_dev = &pcache->backing_dev; |
| 21 | + int ret; |
| 22 | + |
| 23 | + backing_dev->backing_req_cache = KMEM_CACHE(pcache_backing_dev_req, 0); |
| 24 | + if (!backing_dev->backing_req_cache) { |
| 25 | + ret = -ENOMEM; |
| 26 | + goto err; |
| 27 | + } |
| 28 | + |
| 29 | + INIT_LIST_HEAD(&backing_dev->submit_list); |
| 30 | + INIT_LIST_HEAD(&backing_dev->complete_list); |
| 31 | + spin_lock_init(&backing_dev->submit_lock); |
| 32 | + spin_lock_init(&backing_dev->complete_lock); |
| 33 | + INIT_WORK(&backing_dev->req_submit_work, req_submit_fn); |
| 34 | + INIT_WORK(&backing_dev->req_complete_work, req_complete_fn); |
| 35 | + |
| 36 | + return 0; |
| 37 | +err: |
| 38 | + return ret; |
| 39 | +} |
| 40 | + |
| 41 | +static int backing_dev_open(struct pcache_backing_dev *backing_dev, const char *path) |
| 42 | +{ |
| 43 | + struct dm_pcache *pcache = BACKING_DEV_TO_PCACHE(backing_dev); |
| 44 | + int ret; |
| 45 | + |
| 46 | + ret = dm_get_device(pcache->ti, path, |
| 47 | + BLK_OPEN_READ | BLK_OPEN_WRITE, &backing_dev->dm_dev); |
| 48 | + if (ret) { |
| 49 | + pcache_dev_err(pcache, "failed to open dm_dev: %s: %d", path, ret); |
| 50 | + goto err; |
| 51 | + } |
| 52 | + backing_dev->dev_size = bdev_nr_sectors(backing_dev->dm_dev->bdev); |
| 53 | + |
| 54 | + return 0; |
| 55 | +err: |
| 56 | + return ret; |
| 57 | +} |
| 58 | + |
| 59 | +static void backing_dev_close(struct pcache_backing_dev *backing_dev) |
| 60 | +{ |
| 61 | + struct dm_pcache *pcache = BACKING_DEV_TO_PCACHE(backing_dev); |
| 62 | + |
| 63 | + dm_put_device(pcache->ti, backing_dev->dm_dev); |
| 64 | +} |
| 65 | + |
| 66 | +int backing_dev_start(struct dm_pcache *pcache, const char *backing_dev_path) |
| 67 | +{ |
| 68 | + struct pcache_backing_dev *backing_dev = &pcache->backing_dev; |
| 69 | + int ret; |
| 70 | + |
| 71 | + ret = backing_dev_init(pcache); |
| 72 | + if (ret) |
| 73 | + goto err; |
| 74 | + |
| 75 | + ret = backing_dev_open(backing_dev, backing_dev_path); |
| 76 | + if (ret) |
| 77 | + goto destroy_backing_dev; |
| 78 | + |
| 79 | + return 0; |
| 80 | + |
| 81 | +destroy_backing_dev: |
| 82 | + backing_dev_exit(backing_dev); |
| 83 | +err: |
| 84 | + return ret; |
| 85 | +} |
| 86 | + |
| 87 | +void backing_dev_stop(struct dm_pcache *pcache) |
| 88 | +{ |
| 89 | + struct pcache_backing_dev *backing_dev = &pcache->backing_dev; |
| 90 | + |
| 91 | + flush_work(&backing_dev->req_submit_work); |
| 92 | + flush_work(&backing_dev->req_complete_work); |
| 93 | + |
| 94 | + /* There should be no inflight backing_dev_request */ |
| 95 | + BUG_ON(!list_empty(&backing_dev->submit_list)); |
| 96 | + BUG_ON(!list_empty(&backing_dev->complete_list)); |
| 97 | + |
| 98 | + backing_dev_close(backing_dev); |
| 99 | + backing_dev_exit(backing_dev); |
| 100 | +} |
| 101 | + |
| 102 | +/* pcache_backing_dev_req functions */ |
| 103 | +void backing_dev_req_end(struct pcache_backing_dev_req *backing_req) |
| 104 | +{ |
| 105 | + struct pcache_backing_dev *backing_dev = backing_req->backing_dev; |
| 106 | + |
| 107 | + if (backing_req->end_req) |
| 108 | + backing_req->end_req(backing_req, backing_req->ret); |
| 109 | + |
| 110 | + switch (backing_req->type) { |
| 111 | + case BACKING_DEV_REQ_TYPE_REQ: |
| 112 | + pcache_req_put(backing_req->req.upper_req, backing_req->ret); |
| 113 | + break; |
| 114 | + case BACKING_DEV_REQ_TYPE_KMEM: |
| 115 | + kfree(backing_req->kmem.bvecs); |
| 116 | + break; |
| 117 | + default: |
| 118 | + BUG(); |
| 119 | + } |
| 120 | + |
| 121 | + kmem_cache_free(backing_dev->backing_req_cache, backing_req); |
| 122 | +} |
| 123 | + |
| 124 | +static void req_complete_fn(struct work_struct *work) |
| 125 | +{ |
| 126 | + struct pcache_backing_dev *backing_dev = container_of(work, struct pcache_backing_dev, req_complete_work); |
| 127 | + struct pcache_backing_dev_req *backing_req; |
| 128 | + unsigned long flags; |
| 129 | + LIST_HEAD(tmp_list); |
| 130 | + |
| 131 | + spin_lock_irqsave(&backing_dev->complete_lock, flags); |
| 132 | + list_splice_init(&backing_dev->complete_list, &tmp_list); |
| 133 | + spin_unlock_irqrestore(&backing_dev->complete_lock, flags); |
| 134 | + |
| 135 | + while (!list_empty(&tmp_list)) { |
| 136 | + backing_req = list_first_entry(&tmp_list, |
| 137 | + struct pcache_backing_dev_req, node); |
| 138 | + list_del_init(&backing_req->node); |
| 139 | + backing_dev_req_end(backing_req); |
| 140 | + } |
| 141 | +} |
| 142 | + |
| 143 | +static void backing_dev_bio_end(struct bio *bio) |
| 144 | +{ |
| 145 | + struct pcache_backing_dev_req *backing_req = bio->bi_private; |
| 146 | + struct pcache_backing_dev *backing_dev = backing_req->backing_dev; |
| 147 | + |
| 148 | + backing_req->ret = bio->bi_status; |
| 149 | + |
| 150 | + spin_lock(&backing_dev->complete_lock); |
| 151 | + list_move_tail(&backing_req->node, &backing_dev->complete_list); |
| 152 | + spin_unlock(&backing_dev->complete_lock); |
| 153 | + |
| 154 | + queue_work(BACKING_DEV_TO_PCACHE(backing_dev)->task_wq, &backing_dev->req_complete_work); |
| 155 | +} |
| 156 | + |
| 157 | +static void req_submit_fn(struct work_struct *work) |
| 158 | +{ |
| 159 | + struct pcache_backing_dev *backing_dev = container_of(work, struct pcache_backing_dev, req_submit_work); |
| 160 | + struct pcache_backing_dev_req *backing_req; |
| 161 | + LIST_HEAD(tmp_list); |
| 162 | + |
| 163 | + spin_lock(&backing_dev->submit_lock); |
| 164 | + list_splice_init(&backing_dev->submit_list, &tmp_list); |
| 165 | + spin_unlock(&backing_dev->submit_lock); |
| 166 | + |
| 167 | + while (!list_empty(&tmp_list)) { |
| 168 | + backing_req = list_first_entry(&tmp_list, |
| 169 | + struct pcache_backing_dev_req, node); |
| 170 | + list_del_init(&backing_req->node); |
| 171 | + submit_bio_noacct(&backing_req->bio); |
| 172 | + } |
| 173 | +} |
| 174 | + |
| 175 | +void backing_dev_req_submit(struct pcache_backing_dev_req *backing_req, bool direct) |
| 176 | +{ |
| 177 | + struct pcache_backing_dev *backing_dev = backing_req->backing_dev; |
| 178 | + |
| 179 | + if (direct) { |
| 180 | + submit_bio_noacct(&backing_req->bio); |
| 181 | + return; |
| 182 | + } |
| 183 | + |
| 184 | + spin_lock(&backing_dev->submit_lock); |
| 185 | + list_add_tail(&backing_req->node, &backing_dev->submit_list); |
| 186 | + spin_unlock(&backing_dev->submit_lock); |
| 187 | + |
| 188 | + queue_work(BACKING_DEV_TO_PCACHE(backing_dev)->task_wq, &backing_dev->req_submit_work); |
| 189 | +} |
| 190 | + |
| 191 | +static struct pcache_backing_dev_req *req_type_req_create(struct pcache_backing_dev *backing_dev, |
| 192 | + struct pcache_backing_dev_req_opts *opts) |
| 193 | +{ |
| 194 | + struct pcache_request *pcache_req = opts->req.upper_req; |
| 195 | + struct pcache_backing_dev_req *backing_req; |
| 196 | + struct bio *clone, *orig = pcache_req->bio; |
| 197 | + u32 off = opts->req.req_off; |
| 198 | + u32 len = opts->req.len; |
| 199 | + int ret; |
| 200 | + |
| 201 | + backing_req = kmem_cache_zalloc(backing_dev->backing_req_cache, opts->gfp_mask); |
| 202 | + if (!backing_req) |
| 203 | + return NULL; |
| 204 | + |
| 205 | + ret = bio_init_clone(backing_dev->dm_dev->bdev, &backing_req->bio, orig, opts->gfp_mask); |
| 206 | + if (ret) |
| 207 | + goto err_free_req; |
| 208 | + |
| 209 | + backing_req->type = BACKING_DEV_REQ_TYPE_REQ; |
| 210 | + |
| 211 | + clone = &backing_req->bio; |
| 212 | + BUG_ON(off & SECTOR_MASK); |
| 213 | + BUG_ON(len & SECTOR_MASK); |
| 214 | + bio_trim(clone, off >> SECTOR_SHIFT, len >> SECTOR_SHIFT); |
| 215 | + |
| 216 | + clone->bi_iter.bi_sector = (pcache_req->off + off) >> SECTOR_SHIFT; |
| 217 | + clone->bi_private = backing_req; |
| 218 | + clone->bi_end_io = backing_dev_bio_end; |
| 219 | + |
| 220 | + backing_req->backing_dev = backing_dev; |
| 221 | + INIT_LIST_HEAD(&backing_req->node); |
| 222 | + backing_req->end_req = opts->end_fn; |
| 223 | + |
| 224 | + pcache_req_get(pcache_req); |
| 225 | + backing_req->req.upper_req = pcache_req; |
| 226 | + backing_req->req.bio_off = off; |
| 227 | + |
| 228 | + return backing_req; |
| 229 | + |
| 230 | +err_free_req: |
| 231 | + kmem_cache_free(backing_dev->backing_req_cache, backing_req); |
| 232 | + return NULL; |
| 233 | +} |
| 234 | + |
| 235 | +static void bio_map(struct bio *bio, void *base, size_t size) |
| 236 | +{ |
| 237 | + if (is_vmalloc_addr(base)) |
| 238 | + flush_kernel_vmap_range(base, size); |
| 239 | + |
| 240 | + while (size) { |
| 241 | + struct page *page = is_vmalloc_addr(base) |
| 242 | + ? vmalloc_to_page(base) |
| 243 | + : virt_to_page(base); |
| 244 | + unsigned int offset = offset_in_page(base); |
| 245 | + unsigned int len = min_t(size_t, PAGE_SIZE - offset, size); |
| 246 | + |
| 247 | + BUG_ON(!bio_add_page(bio, page, len, offset)); |
| 248 | + size -= len; |
| 249 | + base += len; |
| 250 | + } |
| 251 | +} |
| 252 | + |
| 253 | +static struct pcache_backing_dev_req *kmem_type_req_create(struct pcache_backing_dev *backing_dev, |
| 254 | + struct pcache_backing_dev_req_opts *opts) |
| 255 | +{ |
| 256 | + struct pcache_backing_dev_req *backing_req; |
| 257 | + struct bio *backing_bio; |
| 258 | + u32 n_vecs = DIV_ROUND_UP(opts->kmem.len, PAGE_SIZE); |
| 259 | + |
| 260 | + backing_req = kmem_cache_zalloc(backing_dev->backing_req_cache, opts->gfp_mask); |
| 261 | + if (!backing_req) |
| 262 | + return NULL; |
| 263 | + |
| 264 | + backing_req->kmem.bvecs = kcalloc(n_vecs, sizeof(struct bio_vec), opts->gfp_mask); |
| 265 | + if (!backing_req->kmem.bvecs) |
| 266 | + goto err_free_req; |
| 267 | + |
| 268 | + backing_req->type = BACKING_DEV_REQ_TYPE_KMEM; |
| 269 | + |
| 270 | + bio_init(&backing_req->bio, backing_dev->dm_dev->bdev, backing_req->kmem.bvecs, |
| 271 | + n_vecs, opts->kmem.opf); |
| 272 | + |
| 273 | + backing_bio = &backing_req->bio; |
| 274 | + bio_map(backing_bio, opts->kmem.data, opts->kmem.len); |
| 275 | + |
| 276 | + backing_bio->bi_iter.bi_sector = (opts->kmem.backing_off) >> SECTOR_SHIFT; |
| 277 | + backing_bio->bi_private = backing_req; |
| 278 | + backing_bio->bi_end_io = backing_dev_bio_end; |
| 279 | + |
| 280 | + backing_req->backing_dev = backing_dev; |
| 281 | + INIT_LIST_HEAD(&backing_req->node); |
| 282 | + backing_req->end_req = opts->end_fn; |
| 283 | + |
| 284 | + return backing_req; |
| 285 | + |
| 286 | +err_free_req: |
| 287 | + kmem_cache_free(backing_dev->backing_req_cache, backing_req); |
| 288 | + return NULL; |
| 289 | +} |
| 290 | + |
| 291 | +struct pcache_backing_dev_req *backing_dev_req_create(struct pcache_backing_dev *backing_dev, |
| 292 | + struct pcache_backing_dev_req_opts *opts) |
| 293 | +{ |
| 294 | + if (opts->type == BACKING_DEV_REQ_TYPE_REQ) |
| 295 | + return req_type_req_create(backing_dev, opts); |
| 296 | + else if (opts->type == BACKING_DEV_REQ_TYPE_KMEM) |
| 297 | + return kmem_type_req_create(backing_dev, opts); |
| 298 | + |
| 299 | + return NULL; |
| 300 | +} |
| 301 | + |
| 302 | +void backing_dev_flush(struct pcache_backing_dev *backing_dev) |
| 303 | +{ |
| 304 | + blkdev_issue_flush(backing_dev->dm_dev->bdev); |
| 305 | +} |
0 commit comments