summaryrefslogtreecommitdiff
path: root/drivers/lightnvm/rrpc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/lightnvm/rrpc.c')
-rw-r--r--drivers/lightnvm/rrpc.c203
1 files changed, 130 insertions, 73 deletions
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 307db1ea22de..3ab6495c3fd8 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -38,7 +38,7 @@ static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
spin_lock(&rblk->lock);
- div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, &pg_offset);
+ div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset);
WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
rblk->nr_invalid_pages++;
@@ -113,14 +113,24 @@ static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- return (rblk->next_page == rrpc->dev->pgs_per_blk);
+ return (rblk->next_page == rrpc->dev->sec_per_blk);
}
+/* Calculate relative addr for the given block, considering instantiated LUNs */
+static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
+{
+ struct nvm_block *blk = rblk->parent;
+ int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns);
+
+ return lun_blk * rrpc->dev->sec_per_blk;
+}
+
+/* Calculate global addr for the given block */
static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
{
struct nvm_block *blk = rblk->parent;
- return blk->id * rrpc->dev->pgs_per_blk;
+ return blk->id * rrpc->dev->sec_per_blk;
}
static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
@@ -136,7 +146,7 @@ static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
l.g.sec = secs;
sector_div(ppa, dev->sec_per_pg);
- div_u64_rem(ppa, dev->sec_per_blk, &pgs);
+ div_u64_rem(ppa, dev->pgs_per_blk, &pgs);
l.g.pg = pgs;
sector_div(ppa, dev->pgs_per_blk);
@@ -191,12 +201,12 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
return NULL;
}
- rblk = &rlun->blocks[blk->id];
+ rblk = rrpc_get_rblk(rlun, blk->id);
list_add_tail(&rblk->list, &rlun->open_list);
spin_unlock(&lun->lock);
blk->priv = rblk;
- bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk);
+ bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
rblk->next_page = 0;
rblk->nr_invalid_pages = 0;
atomic_set(&rblk->data_cmnt_size, 0);
@@ -286,11 +296,11 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
struct bio *bio;
struct page *page;
int slot;
- int nr_pgs_per_blk = rrpc->dev->pgs_per_blk;
+ int nr_sec_per_blk = rrpc->dev->sec_per_blk;
u64 phys_addr;
DECLARE_COMPLETION_ONSTACK(wait);
- if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk))
+ if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
return 0;
bio = bio_alloc(GFP_NOIO, 1);
@@ -306,10 +316,10 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
}
while ((slot = find_first_zero_bit(rblk->invalid_pages,
- nr_pgs_per_blk)) < nr_pgs_per_blk) {
+ nr_sec_per_blk)) < nr_sec_per_blk) {
/* Lock laddr */
- phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot;
+ phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
try:
spin_lock(&rrpc->rev_lock);
@@ -381,7 +391,7 @@ finished:
mempool_free(page, rrpc->page_pool);
bio_put(bio);
- if (!bitmap_full(rblk->invalid_pages, nr_pgs_per_blk)) {
+ if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
pr_err("nvm: failed to garbage collect block\n");
return -EIO;
}
@@ -499,12 +509,21 @@ static void rrpc_gc_queue(struct work_struct *work)
struct rrpc *rrpc = gcb->rrpc;
struct rrpc_block *rblk = gcb->rblk;
struct nvm_lun *lun = rblk->parent->lun;
+ struct nvm_block *blk = rblk->parent;
struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
spin_lock(&rlun->lock);
list_add_tail(&rblk->prio, &rlun->prio_list);
spin_unlock(&rlun->lock);
+ spin_lock(&lun->lock);
+ lun->nr_open_blocks--;
+ lun->nr_closed_blocks++;
+ blk->state &= ~NVM_BLK_ST_OPEN;
+ blk->state |= NVM_BLK_ST_CLOSED;
+ list_move_tail(&rblk->list, &rlun->closed_list);
+ spin_unlock(&lun->lock);
+
mempool_free(gcb, rrpc->gcb_pool);
pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
rblk->parent->id);
@@ -545,7 +564,7 @@ static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
struct rrpc_addr *gp;
struct rrpc_rev_addr *rev;
- BUG_ON(laddr >= rrpc->nr_pages);
+ BUG_ON(laddr >= rrpc->nr_sects);
gp = &rrpc->trans_map[laddr];
spin_lock(&rrpc->rev_lock);
@@ -668,20 +687,8 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
lun = rblk->parent->lun;
cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
- if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) {
- struct nvm_block *blk = rblk->parent;
- struct rrpc_lun *rlun = rblk->rlun;
-
- spin_lock(&lun->lock);
- lun->nr_open_blocks--;
- lun->nr_closed_blocks++;
- blk->state &= ~NVM_BLK_ST_OPEN;
- blk->state |= NVM_BLK_ST_CLOSED;
- list_move_tail(&rblk->list, &rlun->closed_list);
- spin_unlock(&lun->lock);
-
+ if (unlikely(cmnt_size == rrpc->dev->sec_per_blk))
rrpc_run_gc(rrpc, rblk);
- }
}
}
@@ -726,7 +733,7 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
for (i = 0; i < npages; i++) {
/* We assume that mapping occurs at 4KB granularity */
- BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_pages));
+ BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects));
gp = &rrpc->trans_map[laddr + i];
if (gp->rblk) {
@@ -757,7 +764,7 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
return NVM_IO_REQUEUE;
- BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_pages));
+ BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects));
gp = &rrpc->trans_map[laddr];
if (gp->rblk) {
@@ -958,25 +965,11 @@ static void rrpc_requeue(struct work_struct *work)
static void rrpc_gc_free(struct rrpc *rrpc)
{
- struct rrpc_lun *rlun;
- int i;
-
if (rrpc->krqd_wq)
destroy_workqueue(rrpc->krqd_wq);
if (rrpc->kgc_wq)
destroy_workqueue(rrpc->kgc_wq);
-
- if (!rrpc->luns)
- return;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
-
- if (!rlun->blocks)
- break;
- vfree(rlun->blocks);
- }
}
static int rrpc_gc_init(struct rrpc *rrpc)
@@ -1007,21 +1000,21 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
struct nvm_dev *dev = rrpc->dev;
struct rrpc_addr *addr = rrpc->trans_map + slba;
struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
- sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
u64 elba = slba + nlb;
u64 i;
- if (unlikely(elba > dev->total_pages)) {
+ if (unlikely(elba > dev->total_secs)) {
pr_err("nvm: L2P data from device is out of bounds!\n");
return -EINVAL;
}
for (i = 0; i < nlb; i++) {
u64 pba = le64_to_cpu(entries[i]);
+ unsigned int mod;
/* LNVM treats address-spaces as silos, LBA and PBA are
* equally large and zero-indexed.
*/
- if (unlikely(pba >= max_pages && pba != U64_MAX)) {
+ if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
pr_err("nvm: L2P data entry is out of bounds!\n");
return -EINVAL;
}
@@ -1033,8 +1026,10 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
if (!pba)
continue;
+ div_u64_rem(pba, rrpc->nr_sects, &mod);
+
addr[i].addr = pba;
- raddr[pba].addr = slba + i;
+ raddr[mod].addr = slba + i;
}
return 0;
@@ -1044,18 +1039,21 @@ static int rrpc_map_init(struct rrpc *rrpc)
{
struct nvm_dev *dev = rrpc->dev;
sector_t i;
+ u64 slba;
int ret;
- rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
+ slba = rrpc->soffset >> (ilog2(dev->sec_size) - 9);
+
+ rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
if (!rrpc->trans_map)
return -ENOMEM;
rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
- * rrpc->nr_pages);
+ * rrpc->nr_sects);
if (!rrpc->rev_trans_map)
return -ENOMEM;
- for (i = 0; i < rrpc->nr_pages; i++) {
+ for (i = 0; i < rrpc->nr_sects; i++) {
struct rrpc_addr *p = &rrpc->trans_map[i];
struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
@@ -1067,8 +1065,8 @@ static int rrpc_map_init(struct rrpc *rrpc)
return 0;
/* Bring up the mapping table from device */
- ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
- rrpc_l2p_update, rrpc);
+ ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_sects, rrpc_l2p_update,
+ rrpc);
if (ret) {
pr_err("nvm: rrpc: could not read L2P table.\n");
return -EINVAL;
@@ -1077,7 +1075,6 @@ static int rrpc_map_init(struct rrpc *rrpc)
return 0;
}
-
/* Minimum pages needed within a lun */
#define PAGE_POOL_SIZE 16
#define ADDR_POOL_SIZE 64
@@ -1132,6 +1129,23 @@ static void rrpc_core_free(struct rrpc *rrpc)
static void rrpc_luns_free(struct rrpc *rrpc)
{
+ struct nvm_dev *dev = rrpc->dev;
+ struct nvm_lun *lun;
+ struct rrpc_lun *rlun;
+ int i;
+
+ if (!rrpc->luns)
+ return;
+
+ for (i = 0; i < rrpc->nr_luns; i++) {
+ rlun = &rrpc->luns[i];
+ lun = rlun->parent;
+ if (!lun)
+ break;
+ dev->mt->release_lun(dev, lun->id);
+ vfree(rlun->blocks);
+ }
+
kfree(rrpc->luns);
}
@@ -1139,9 +1153,9 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
{
struct nvm_dev *dev = rrpc->dev;
struct rrpc_lun *rlun;
- int i, j;
+ int i, j, ret = -EINVAL;
- if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
+ if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
pr_err("rrpc: number of pages per block too high.");
return -EINVAL;
}
@@ -1155,25 +1169,26 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
/* 1:1 mapping */
for (i = 0; i < rrpc->nr_luns; i++) {
- struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i);
+ int lunid = lun_begin + i;
+ struct nvm_lun *lun;
- rlun = &rrpc->luns[i];
- rlun->rrpc = rrpc;
- rlun->parent = lun;
- INIT_LIST_HEAD(&rlun->prio_list);
- INIT_LIST_HEAD(&rlun->open_list);
- INIT_LIST_HEAD(&rlun->closed_list);
-
- INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
- spin_lock_init(&rlun->lock);
+ if (dev->mt->reserve_lun(dev, lunid)) {
+ pr_err("rrpc: lun %u is already allocated\n", lunid);
+ goto err;
+ }
- rrpc->total_blocks += dev->blks_per_lun;
- rrpc->nr_pages += dev->sec_per_lun;
+ lun = dev->mt->get_lun(dev, lunid);
+ if (!lun)
+ goto err;
+ rlun = &rrpc->luns[i];
+ rlun->parent = lun;
rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
rrpc->dev->blks_per_lun);
- if (!rlun->blocks)
+ if (!rlun->blocks) {
+ ret = -ENOMEM;
goto err;
+ }
for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
struct rrpc_block *rblk = &rlun->blocks[j];
@@ -1184,11 +1199,43 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
INIT_LIST_HEAD(&rblk->prio);
spin_lock_init(&rblk->lock);
}
+
+ rlun->rrpc = rrpc;
+ INIT_LIST_HEAD(&rlun->prio_list);
+ INIT_LIST_HEAD(&rlun->open_list);
+ INIT_LIST_HEAD(&rlun->closed_list);
+
+ INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
+ spin_lock_init(&rlun->lock);
+
+ rrpc->total_blocks += dev->blks_per_lun;
+ rrpc->nr_sects += dev->sec_per_lun;
+
}
return 0;
err:
- return -ENOMEM;
+ return ret;
+}
+
+/* returns 0 on success and stores the beginning address in *begin */
+static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
+{
+ struct nvm_dev *dev = rrpc->dev;
+ struct nvmm_type *mt = dev->mt;
+ sector_t size = rrpc->nr_sects * dev->sec_size;
+
+ size >>= 9;
+
+ return mt->get_area(dev, begin, size);
+}
+
+static void rrpc_area_free(struct rrpc *rrpc)
+{
+ struct nvm_dev *dev = rrpc->dev;
+ struct nvmm_type *mt = dev->mt;
+
+ mt->put_area(dev, rrpc->soffset);
}
static void rrpc_free(struct rrpc *rrpc)
@@ -1197,6 +1244,7 @@ static void rrpc_free(struct rrpc *rrpc)
rrpc_map_free(rrpc);
rrpc_core_free(rrpc);
rrpc_luns_free(rrpc);
+ rrpc_area_free(rrpc);
kfree(rrpc);
}
@@ -1221,9 +1269,9 @@ static sector_t rrpc_capacity(void *private)
/* cur, gc, and two emergency blocks for each lun */
reserved = rrpc->nr_luns * dev->max_pages_per_blk * 4;
- provisioned = rrpc->nr_pages - reserved;
+ provisioned = rrpc->nr_sects - reserved;
- if (reserved > rrpc->nr_pages) {
+ if (reserved > rrpc->nr_sects) {
pr_err("rrpc: not enough space available to expose storage.\n");
return 0;
}
@@ -1242,10 +1290,11 @@ static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
struct nvm_dev *dev = rrpc->dev;
int offset;
struct rrpc_addr *laddr;
- u64 paddr, pladdr;
+ u64 bpaddr, paddr, pladdr;
- for (offset = 0; offset < dev->pgs_per_blk; offset++) {
- paddr = block_to_addr(rrpc, rblk) + offset;
+ bpaddr = block_to_rel_addr(rrpc, rblk);
+ for (offset = 0; offset < dev->sec_per_blk; offset++) {
+ paddr = bpaddr + offset;
pladdr = rrpc->rev_trans_map[paddr].addr;
if (pladdr == ADDR_EMPTY)
@@ -1317,6 +1366,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
struct request_queue *bqueue = dev->q;
struct request_queue *tqueue = tdisk->queue;
struct rrpc *rrpc;
+ sector_t soffset;
int ret;
if (!(dev->identity.dom & NVM_RSP_L2P)) {
@@ -1342,6 +1392,13 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
/* simple round-robin strategy */
atomic_set(&rrpc->next_lun, -1);
+ ret = rrpc_area_init(rrpc, &soffset);
+ if (ret < 0) {
+ pr_err("nvm: rrpc: could not initialize area\n");
+ return ERR_PTR(ret);
+ }
+ rrpc->soffset = soffset;
+
ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
if (ret) {
pr_err("nvm: rrpc: could not initialize luns\n");
@@ -1386,7 +1443,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
- rrpc->nr_luns, (unsigned long long)rrpc->nr_pages);
+ rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));