split dev_queue
[cor.git] / drivers / lightnvm / pblk-write.c
blobb9a2aeba95abd39e37709921a5376be4118aefef
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * pblk-write.c - pblk's write path from write buffer to media
19 #include "pblk.h"
20 #include "pblk-trace.h"
22 static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
23 struct pblk_c_ctx *c_ctx)
25 struct bio *original_bio;
26 struct pblk_rb *rwb = &pblk->rwb;
27 unsigned long ret;
28 int i;
30 for (i = 0; i < c_ctx->nr_valid; i++) {
31 struct pblk_w_ctx *w_ctx;
32 int pos = c_ctx->sentry + i;
33 int flags;
35 w_ctx = pblk_rb_w_ctx(rwb, pos);
36 flags = READ_ONCE(w_ctx->flags);
38 if (flags & PBLK_FLUSH_ENTRY) {
39 flags &= ~PBLK_FLUSH_ENTRY;
40 /* Release flags on context. Protect from writes */
41 smp_store_release(&w_ctx->flags, flags);
43 #ifdef CONFIG_NVM_PBLK_DEBUG
44 atomic_dec(&rwb->inflight_flush_point);
45 #endif
48 while ((original_bio = bio_list_pop(&w_ctx->bios)))
49 bio_endio(original_bio);
52 if (c_ctx->nr_padded)
53 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
54 c_ctx->nr_padded);
56 #ifdef CONFIG_NVM_PBLK_DEBUG
57 atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
58 #endif
60 ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
62 bio_put(rqd->bio);
63 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
65 return ret;
68 static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
69 struct nvm_rq *rqd,
70 struct pblk_c_ctx *c_ctx)
72 list_del(&c_ctx->list);
73 return pblk_end_w_bio(pblk, rqd, c_ctx);
76 static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
77 struct pblk_c_ctx *c_ctx)
79 struct pblk_c_ctx *c, *r;
80 unsigned long flags;
81 unsigned long pos;
83 #ifdef CONFIG_NVM_PBLK_DEBUG
84 atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
85 #endif
86 pblk_up_rq(pblk, c_ctx->lun_bitmap);
88 pos = pblk_rb_sync_init(&pblk->rwb, &flags);
89 if (pos == c_ctx->sentry) {
90 pos = pblk_end_w_bio(pblk, rqd, c_ctx);
92 retry:
93 list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
94 rqd = nvm_rq_from_c_ctx(c);
95 if (c->sentry == pos) {
96 pos = pblk_end_queued_w_bio(pblk, rqd, c);
97 goto retry;
100 } else {
101 WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
102 list_add_tail(&c_ctx->list, &pblk->compl_list);
104 pblk_rb_sync_end(&pblk->rwb, &flags);
107 /* Map remaining sectors in chunk, starting from ppa */
108 static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa,
109 int rqd_ppas)
111 struct pblk_line *line;
112 struct ppa_addr map_ppa = *ppa;
113 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
114 __le64 *lba_list;
115 u64 paddr;
116 int done = 0;
117 int n = 0;
119 line = pblk_ppa_to_line(pblk, *ppa);
120 lba_list = emeta_to_lbas(pblk, line->emeta->buf);
122 spin_lock(&line->lock);
124 while (!done) {
125 paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa);
127 if (!test_and_set_bit(paddr, line->map_bitmap))
128 line->left_msecs--;
130 if (n < rqd_ppas && lba_list[paddr] != addr_empty)
131 line->nr_valid_lbas--;
133 lba_list[paddr] = addr_empty;
135 if (!test_and_set_bit(paddr, line->invalid_bitmap))
136 le32_add_cpu(line->vsc, -1);
138 done = nvm_next_ppa_in_chk(pblk->dev, &map_ppa);
140 n++;
143 line->w_err_gc->has_write_err = 1;
144 spin_unlock(&line->lock);
147 static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
148 unsigned int nr_entries)
150 struct pblk_rb *rb = &pblk->rwb;
151 struct pblk_rb_entry *entry;
152 struct pblk_line *line;
153 struct pblk_w_ctx *w_ctx;
154 struct ppa_addr ppa_l2p;
155 int flags;
156 unsigned int i;
158 spin_lock(&pblk->trans_lock);
159 for (i = 0; i < nr_entries; i++) {
160 entry = &rb->entries[pblk_rb_ptr_wrap(rb, sentry, i)];
161 w_ctx = &entry->w_ctx;
163 /* Check if the lba has been overwritten */
164 if (w_ctx->lba != ADDR_EMPTY) {
165 ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
166 if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
167 w_ctx->lba = ADDR_EMPTY;
170 /* Mark up the entry as submittable again */
171 flags = READ_ONCE(w_ctx->flags);
172 flags |= PBLK_WRITTEN_DATA;
173 /* Release flags on write context. Protect from writes */
174 smp_store_release(&w_ctx->flags, flags);
176 /* Decrease the reference count to the line as we will
177 * re-map these entries
179 line = pblk_ppa_to_line(pblk, w_ctx->ppa);
180 atomic_dec(&line->sec_to_update);
181 kref_put(&line->ref, pblk_line_put);
183 spin_unlock(&pblk->trans_lock);
186 static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
188 struct pblk_c_ctx *r_ctx;
190 r_ctx = kzalloc(sizeof(struct pblk_c_ctx), GFP_KERNEL);
191 if (!r_ctx)
192 return;
194 r_ctx->lun_bitmap = NULL;
195 r_ctx->sentry = c_ctx->sentry;
196 r_ctx->nr_valid = c_ctx->nr_valid;
197 r_ctx->nr_padded = c_ctx->nr_padded;
199 spin_lock(&pblk->resubmit_lock);
200 list_add_tail(&r_ctx->list, &pblk->resubmit_list);
201 spin_unlock(&pblk->resubmit_lock);
203 #ifdef CONFIG_NVM_PBLK_DEBUG
204 atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
205 #endif
208 static void pblk_submit_rec(struct work_struct *work)
210 struct pblk_rec_ctx *recovery =
211 container_of(work, struct pblk_rec_ctx, ws_rec);
212 struct pblk *pblk = recovery->pblk;
213 struct nvm_rq *rqd = recovery->rqd;
214 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
215 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
217 pblk_log_write_err(pblk, rqd);
219 pblk_map_remaining(pblk, ppa_list, rqd->nr_ppas);
220 pblk_queue_resubmit(pblk, c_ctx);
222 pblk_up_rq(pblk, c_ctx->lun_bitmap);
223 if (c_ctx->nr_padded)
224 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
225 c_ctx->nr_padded);
226 bio_put(rqd->bio);
227 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
228 mempool_free(recovery, &pblk->rec_pool);
230 atomic_dec(&pblk->inflight_io);
231 pblk_write_kick(pblk);
235 static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
237 struct pblk_rec_ctx *recovery;
239 recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
240 if (!recovery) {
241 pblk_err(pblk, "could not allocate recovery work\n");
242 return;
245 recovery->pblk = pblk;
246 recovery->rqd = rqd;
248 INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
249 queue_work(pblk->close_wq, &recovery->ws_rec);
252 static void pblk_end_io_write(struct nvm_rq *rqd)
254 struct pblk *pblk = rqd->private;
255 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
257 if (rqd->error) {
258 pblk_end_w_fail(pblk, rqd);
259 return;
260 } else {
261 if (trace_pblk_chunk_state_enabled())
262 pblk_check_chunk_state_update(pblk, rqd);
263 #ifdef CONFIG_NVM_PBLK_DEBUG
264 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
265 #endif
268 pblk_complete_write(pblk, rqd, c_ctx);
269 atomic_dec(&pblk->inflight_io);
272 static void pblk_end_io_write_meta(struct nvm_rq *rqd)
274 struct pblk *pblk = rqd->private;
275 struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
276 struct pblk_line *line = m_ctx->private;
277 struct pblk_emeta *emeta = line->emeta;
278 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
279 int sync;
281 pblk_up_chunk(pblk, ppa_list[0]);
283 if (rqd->error) {
284 pblk_log_write_err(pblk, rqd);
285 pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id);
286 line->w_err_gc->has_write_err = 1;
287 } else {
288 if (trace_pblk_chunk_state_enabled())
289 pblk_check_chunk_state_update(pblk, rqd);
292 sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
293 if (sync == emeta->nr_entries)
294 pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
295 GFP_ATOMIC, pblk->close_wq);
297 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
299 atomic_dec(&pblk->inflight_io);
302 static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
303 unsigned int nr_secs, nvm_end_io_fn(*end_io))
305 /* Setup write request */
306 rqd->opcode = NVM_OP_PWRITE;
307 rqd->nr_ppas = nr_secs;
308 rqd->is_seq = 1;
309 rqd->private = pblk;
310 rqd->end_io = end_io;
312 return pblk_alloc_rqd_meta(pblk, rqd);
315 static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
316 struct ppa_addr *erase_ppa)
318 struct pblk_line_meta *lm = &pblk->lm;
319 struct pblk_line *e_line = pblk_line_get_erase(pblk);
320 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
321 unsigned int valid = c_ctx->nr_valid;
322 unsigned int padded = c_ctx->nr_padded;
323 unsigned int nr_secs = valid + padded;
324 unsigned long *lun_bitmap;
325 int ret;
327 lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
328 if (!lun_bitmap)
329 return -ENOMEM;
330 c_ctx->lun_bitmap = lun_bitmap;
332 ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
333 if (ret) {
334 kfree(lun_bitmap);
335 return ret;
338 if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
339 ret = pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
340 valid, 0);
341 else
342 ret = pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
343 valid, erase_ppa);
345 return ret;
348 static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
349 unsigned int secs_to_flush)
351 int secs_to_sync;
353 secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush, true);
355 #ifdef CONFIG_NVM_PBLK_DEBUG
356 if ((!secs_to_sync && secs_to_flush)
357 || (secs_to_sync < 0)
358 || (secs_to_sync > secs_avail && !secs_to_flush)) {
359 pblk_err(pblk, "bad sector calculation (a:%d,s:%d,f:%d)\n",
360 secs_avail, secs_to_sync, secs_to_flush);
362 #endif
364 return secs_to_sync;
367 int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
369 struct nvm_tgt_dev *dev = pblk->dev;
370 struct nvm_geo *geo = &dev->geo;
371 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
372 struct pblk_line_meta *lm = &pblk->lm;
373 struct pblk_emeta *emeta = meta_line->emeta;
374 struct ppa_addr *ppa_list;
375 struct pblk_g_ctx *m_ctx;
376 struct nvm_rq *rqd;
377 void *data;
378 u64 paddr;
379 int rq_ppas = pblk->min_write_pgs;
380 int id = meta_line->id;
381 int rq_len;
382 int i, j;
383 int ret;
385 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
387 m_ctx = nvm_rq_to_pdu(rqd);
388 m_ctx->private = meta_line;
390 rq_len = rq_ppas * geo->csecs;
391 data = ((void *)emeta->buf) + emeta->mem;
393 ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
394 if (ret)
395 goto fail_free_rqd;
397 ppa_list = nvm_rq_to_ppa_list(rqd);
398 for (i = 0; i < rqd->nr_ppas; ) {
399 spin_lock(&meta_line->lock);
400 paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
401 spin_unlock(&meta_line->lock);
402 for (j = 0; j < rq_ppas; j++, i++, paddr++)
403 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
406 spin_lock(&l_mg->close_lock);
407 emeta->mem += rq_len;
408 if (emeta->mem >= lm->emeta_len[0])
409 list_del(&meta_line->list);
410 spin_unlock(&l_mg->close_lock);
412 pblk_down_chunk(pblk, ppa_list[0]);
414 ret = pblk_submit_io(pblk, rqd, data);
415 if (ret) {
416 pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
417 goto fail_rollback;
420 return NVM_IO_OK;
422 fail_rollback:
423 pblk_up_chunk(pblk, ppa_list[0]);
424 spin_lock(&l_mg->close_lock);
425 pblk_dealloc_page(pblk, meta_line, rq_ppas);
426 list_add(&meta_line->list, &meta_line->list);
427 spin_unlock(&l_mg->close_lock);
428 fail_free_rqd:
429 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
430 return ret;
433 static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
434 struct pblk_line *meta_line,
435 struct nvm_rq *data_rqd)
437 struct nvm_tgt_dev *dev = pblk->dev;
438 struct nvm_geo *geo = &dev->geo;
439 struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
440 struct pblk_line *data_line = pblk_line_get_data(pblk);
441 struct ppa_addr ppa, ppa_opt;
442 u64 paddr;
443 int pos_opt;
445 /* Schedule a metadata I/O that is half the distance from the data I/O
446 * with regards to the number of LUNs forming the pblk instance. This
447 * balances LUN conflicts across every I/O.
449 * When the LUN configuration changes (e.g., due to GC), this distance
450 * can align, which would result on metadata and data I/Os colliding. In
451 * this case, modify the distance to not be optimal, but move the
452 * optimal in the right direction.
454 paddr = pblk_lookup_page(pblk, meta_line);
455 ppa = addr_to_gen_ppa(pblk, paddr, 0);
456 ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
457 pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
459 if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
460 test_bit(pos_opt, data_line->blk_bitmap))
461 return true;
463 if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
464 data_line->meta_distance--;
466 return false;
469 static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
470 struct nvm_rq *data_rqd)
472 struct pblk_line_meta *lm = &pblk->lm;
473 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
474 struct pblk_line *meta_line;
476 spin_lock(&l_mg->close_lock);
477 if (list_empty(&l_mg->emeta_list)) {
478 spin_unlock(&l_mg->close_lock);
479 return NULL;
481 meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
482 if (meta_line->emeta->mem >= lm->emeta_len[0]) {
483 spin_unlock(&l_mg->close_lock);
484 return NULL;
486 spin_unlock(&l_mg->close_lock);
488 if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
489 return NULL;
491 return meta_line;
494 static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
496 struct ppa_addr erase_ppa;
497 struct pblk_line *meta_line;
498 int err;
500 pblk_ppa_set_empty(&erase_ppa);
502 /* Assign lbas to ppas and populate request structure */
503 err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
504 if (err) {
505 pblk_err(pblk, "could not setup write request: %d\n", err);
506 return NVM_IO_ERR;
509 meta_line = pblk_should_submit_meta_io(pblk, rqd);
511 /* Submit data write for current data line */
512 err = pblk_submit_io(pblk, rqd, NULL);
513 if (err) {
514 pblk_err(pblk, "data I/O submission failed: %d\n", err);
515 return NVM_IO_ERR;
518 if (!pblk_ppa_empty(erase_ppa)) {
519 /* Submit erase for next data line */
520 if (pblk_blk_erase_async(pblk, erase_ppa)) {
521 struct pblk_line *e_line = pblk_line_get_erase(pblk);
522 struct nvm_tgt_dev *dev = pblk->dev;
523 struct nvm_geo *geo = &dev->geo;
524 int bit;
526 atomic_inc(&e_line->left_eblks);
527 bit = pblk_ppa_to_pos(geo, erase_ppa);
528 WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
532 if (meta_line) {
533 /* Submit metadata write for previous data line */
534 err = pblk_submit_meta_io(pblk, meta_line);
535 if (err) {
536 pblk_err(pblk, "metadata I/O submission failed: %d",
537 err);
538 return NVM_IO_ERR;
542 return NVM_IO_OK;
545 static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
547 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
548 struct bio *bio = rqd->bio;
550 if (c_ctx->nr_padded)
551 pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
552 c_ctx->nr_padded);
555 static int pblk_submit_write(struct pblk *pblk, int *secs_left)
557 struct bio *bio;
558 struct nvm_rq *rqd;
559 unsigned int secs_avail, secs_to_sync, secs_to_com;
560 unsigned int secs_to_flush, packed_meta_pgs;
561 unsigned long pos;
562 unsigned int resubmit;
564 *secs_left = 0;
566 spin_lock(&pblk->resubmit_lock);
567 resubmit = !list_empty(&pblk->resubmit_list);
568 spin_unlock(&pblk->resubmit_lock);
570 /* Resubmit failed writes first */
571 if (resubmit) {
572 struct pblk_c_ctx *r_ctx;
574 spin_lock(&pblk->resubmit_lock);
575 r_ctx = list_first_entry(&pblk->resubmit_list,
576 struct pblk_c_ctx, list);
577 list_del(&r_ctx->list);
578 spin_unlock(&pblk->resubmit_lock);
580 secs_avail = r_ctx->nr_valid;
581 pos = r_ctx->sentry;
583 pblk_prepare_resubmit(pblk, pos, secs_avail);
584 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
585 secs_avail);
587 kfree(r_ctx);
588 } else {
589 /* If there are no sectors in the cache,
590 * flushes (bios without data) will be cleared on
591 * the cache threads
593 secs_avail = pblk_rb_read_count(&pblk->rwb);
594 if (!secs_avail)
595 return 0;
597 secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
598 if (!secs_to_flush && secs_avail < pblk->min_write_pgs_data)
599 return 0;
601 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
602 secs_to_flush);
603 if (secs_to_sync > pblk->max_write_pgs) {
604 pblk_err(pblk, "bad buffer sync calculation\n");
605 return 0;
608 secs_to_com = (secs_to_sync > secs_avail) ?
609 secs_avail : secs_to_sync;
610 pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
613 packed_meta_pgs = (pblk->min_write_pgs - pblk->min_write_pgs_data);
614 bio = bio_alloc(GFP_KERNEL, secs_to_sync + packed_meta_pgs);
616 bio->bi_iter.bi_sector = 0; /* internal bio */
617 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
619 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
620 rqd->bio = bio;
622 if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
623 secs_avail)) {
624 pblk_err(pblk, "corrupted write bio\n");
625 goto fail_put_bio;
628 if (pblk_submit_io_set(pblk, rqd))
629 goto fail_free_bio;
631 #ifdef CONFIG_NVM_PBLK_DEBUG
632 atomic_long_add(secs_to_sync, &pblk->sub_writes);
633 #endif
635 *secs_left = 1;
636 return 0;
638 fail_free_bio:
639 pblk_free_write_rqd(pblk, rqd);
640 fail_put_bio:
641 bio_put(bio);
642 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
644 return -EINTR;
647 int pblk_write_ts(void *data)
649 struct pblk *pblk = data;
650 int secs_left;
651 int write_failure = 0;
653 while (!kthread_should_stop()) {
654 if (!write_failure) {
655 write_failure = pblk_submit_write(pblk, &secs_left);
657 if (secs_left)
658 continue;
660 set_current_state(TASK_INTERRUPTIBLE);
661 io_schedule();
664 return 0;