summaryrefslogtreecommitdiff
path: root/drivers/nvme/target
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-07 14:42:05 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-07 14:42:05 -0700
commit513a4befae06c4469abfb836e8f71977de58c636 (patch)
tree18cc7d0b01a7fd2352de734e99a4ca5c29ad5fac /drivers/nvme/target
parent87840a2b7e048018d18d60bdac5c09224de85370 (diff)
parent997198ba1ed691c09457120576c27dbd953d0557 (diff)
Merge branch 'for-4.9/block' of git://git.kernel.dk/linux-block
Pull block layer updates from Jens Axboe: "This is the main pull request for block layer changes in 4.9. As mentioned at the last merge window, I've changed things up and now do just one branch for core block layer changes, and driver changes. This avoids dependencies between the two branches. Outside of this main pull request, there are two topical branches coming as well. This pull request contains: - A set of fixes, and a conversion to blk-mq, of nbd. From Josef. - Set of fixes and updates for lightnvm from Matias, Simon, and Arnd. Followup dependency fix from Geert. - General fixes from Bart, Baoyou, Guoqing, and Linus W. - CFQ async write starvation fix from Glauber. - Add supprot for delayed kick of the requeue list, from Mike. - Pull out the scalable bitmap code from blk-mq-tag.c and make it generally available under the name of sbitmap. Only blk-mq-tag uses it for now, but the blk-mq scheduling bits will use it as well. From Omar. - bdev thaw error progagation from Pierre. - Improve the blk polling statistics, and allow the user to clear them. From Stephen. - Set of minor cleanups from Christoph in block/blk-mq. - Set of cleanups and optimizations from me for block/blk-mq. - Various nvme/nvmet/nvmeof fixes from the various folks" * 'for-4.9/block' of git://git.kernel.dk/linux-block: (54 commits) fs/block_dev.c: return the right error in thaw_bdev() nvme: Pass pointers, not dma addresses, to nvme_get/set_features() nvme/scsi: Remove power management support nvmet: Make dsm number of ranges zero based nvmet: Use direct IO for writes admin-cmd: Added smart-log command support. nvme-fabrics: Add host_traddr options field to host infrastructure nvme-fabrics: revise host transport option descriptions nvme-fabrics: rework nvmf_get_address() for variable options nbd: use BLK_MQ_F_BLOCKING blkcg: Annotate blkg_hint correctly cfq: fix starvation of asynchronous writes blk-mq: add flag for drivers wanting blocking ->queue_rq() blk-mq: remove non-blocking pass in blk_mq_map_request blk-mq: get rid of manual run of queue with __blk_mq_run_hw_queue() block: export bio_free_pages to other modules lightnvm: propagate device_add() error code lightnvm: expose device geometry through sysfs lightnvm: control life of nvm_dev in driver blk-mq: register device instead of disk ...
Diffstat (limited to 'drivers/nvme/target')
-rw-r--r--drivers/nvme/target/admin-cmd.c88
-rw-r--r--drivers/nvme/target/io-cmd.c3
2 files changed, 90 insertions, 1 deletions
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 47c564b5a289..7ab9c9381b98 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -14,6 +14,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <generated/utsrelease.h>
+#include <asm/unaligned.h>
#include "nvmet.h"
u32 nvmet_get_log_page_len(struct nvme_command *cmd)
@@ -29,8 +30,84 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd)
return len;
}
+static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
+ struct nvme_smart_log *slog)
+{
+ u16 status;
+ struct nvmet_ns *ns;
+ u64 host_reads, host_writes, data_units_read, data_units_written;
+
+ status = NVME_SC_SUCCESS;
+ ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
+ if (!ns) {
+ status = NVME_SC_INVALID_NS;
+ pr_err("nvmet : Counld not find namespace id : %d\n",
+ le32_to_cpu(req->cmd->get_log_page.nsid));
+ goto out;
+ }
+
+ host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
+ data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]);
+ host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
+ data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
+
+ put_unaligned_le64(host_reads, &slog->host_reads[0]);
+ put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
+ put_unaligned_le64(host_writes, &slog->host_writes[0]);
+ put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
+ nvmet_put_namespace(ns);
+out:
+ return status;
+}
+
+static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
+ struct nvme_smart_log *slog)
+{
+ u16 status;
+ u64 host_reads = 0, host_writes = 0;
+ u64 data_units_read = 0, data_units_written = 0;
+ struct nvmet_ns *ns;
+ struct nvmet_ctrl *ctrl;
+
+ status = NVME_SC_SUCCESS;
+ ctrl = req->sq->ctrl;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
+ host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
+ data_units_read +=
+ part_stat_read(ns->bdev->bd_part, sectors[READ]);
+ host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
+ data_units_written +=
+ part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
+
+ }
+ rcu_read_unlock();
+
+ put_unaligned_le64(host_reads, &slog->host_reads[0]);
+ put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
+ put_unaligned_le64(host_writes, &slog->host_writes[0]);
+ put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
+
+ return status;
+}
+
+static u16 nvmet_get_smart_log(struct nvmet_req *req,
+ struct nvme_smart_log *slog)
+{
+ u16 status;
+
+ WARN_ON(req == NULL || slog == NULL);
+ if (req->cmd->get_log_page.nsid == 0xFFFFFFFF)
+ status = nvmet_get_smart_log_all(req, slog);
+ else
+ status = nvmet_get_smart_log_nsid(req, slog);
+ return status;
+}
+
static void nvmet_execute_get_log_page(struct nvmet_req *req)
{
+ struct nvme_smart_log *smart_log;
size_t data_len = nvmet_get_log_page_len(req->cmd);
void *buf;
u16 status = 0;
@@ -59,6 +136,16 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
* available (e.g. units or commands read/written) those aren't
* persistent over power loss.
*/
+ if (data_len != sizeof(*smart_log)) {
+ status = NVME_SC_INTERNAL;
+ goto err;
+ }
+ smart_log = buf;
+ status = nvmet_get_smart_log(req, smart_log);
+ if (status) {
+ memset(buf, '\0', data_len);
+ goto err;
+ }
break;
case 0x03:
/*
@@ -73,6 +160,7 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
status = nvmet_copy_to_sgl(req, 0, buf, data_len);
+err:
kfree(buf);
out:
nvmet_req_complete(req, status);
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 2cd069b691ae..4a96c2049b7b 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -58,6 +58,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
if (req->cmd->rw.opcode == nvme_cmd_write) {
op = REQ_OP_WRITE;
+ op_flags = WRITE_ODIRECT;
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
op_flags |= REQ_FUA;
} else {
@@ -205,7 +206,7 @@ int nvmet_parse_io_cmd(struct nvmet_req *req)
return 0;
case nvme_cmd_dsm:
req->execute = nvmet_execute_dsm;
- req->data_len = le32_to_cpu(cmd->dsm.nr) *
+ req->data_len = le32_to_cpu(cmd->dsm.nr + 1) *
sizeof(struct nvme_dsm_range);
return 0;
default: