[PATCH] nvme: Move nvme_setup_cmd before hot_pathing

From: Minsik Jeon

Date: Fri Mar 20 2026 - 01:48:07 EST


we were checking host_pathing_error before calling nvme_setup_cmd().
This is caused the command setup to be skipped entirely when a pathing
error occurred, making it impossible to trace the nvme command via
trace_cmd nvme_complete_rq().

As a result, when nvme_complete_rq() logged a completion with cmdid=0,
it was impossible to correlate the completion with the nvme command
request.

This patch reorders the logic to first call nvme_setup_cmd(), then
perform the host_pathing_error check.

Co-authored-by: Beomsoo Kim <beomsooo.kim@xxxxxxxxxxx>
Co-authored-by: Eunsoo Lee <euns212.lee@xxxxxxxxxxx>
Co-authored-by: Steven Seungcheol Lee <sc108.lee@xxxxxxxxxxx>
Signed-off-by: Minsik Jeon <hmi.jeon@xxxxxxxxxxx>
---
drivers/nvme/host/apple.c | 6 +++---
drivers/nvme/host/fc.c | 8 ++++----
drivers/nvme/host/pci.c | 8 ++++----
drivers/nvme/host/rdma.c | 8 ++++----
drivers/nvme/host/tcp.c | 8 ++++----
drivers/nvme/target/loop.c | 6 +++---
6 files changed, 22 insertions(+), 22 deletions(-)

diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index ed61b97fde59..2a28c992d024 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -783,13 +783,13 @@ static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (unlikely(!READ_ONCE(q->enabled)))
return BLK_STS_IOERR;

- if (!nvme_check_ready(&anv->ctrl, req, true))
- return nvme_fail_nonready_command(&anv->ctrl, req);
-
ret = nvme_setup_cmd(ns, req);
if (ret)
return ret;

+ if (!nvme_check_ready(&anv->ctrl, req, true))
+ return nvme_fail_nonready_command(&anv->ctrl, req);
+
if (blk_rq_nr_phys_segments(req)) {
ret = apple_nvme_map_data(anv, req, cmnd);
if (ret)
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index e1bb4707183c..8ea37102a836 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2762,14 +2762,14 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
u32 data_len;
blk_status_t ret;

- if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
- !nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
- return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
-
ret = nvme_setup_cmd(ns, rq);
if (ret)
return ret;

+ if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
+ !nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
+ return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
+
/*
* nvme core doesn't quite treat the rq opaquely. Commands such
* as WRITE ZEROES will return a non-zero rq payload_bytes yet
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index b78ba239c8ea..ad0363f7e681 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1376,10 +1376,6 @@ static blk_status_t nvme_prep_rq(struct request *req)
iod->meta_total_len = 0;
iod->nr_dma_vecs = 0;

- ret = nvme_setup_cmd(req->q->queuedata, req);
- if (ret)
- return ret;
-
if (blk_rq_nr_phys_segments(req)) {
ret = nvme_map_data(req);
if (ret)
@@ -1418,6 +1414,10 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
return BLK_STS_IOERR;

+ ret = nvme_setup_cmd(req->q->queuedata, req);
+ if (ret)
+ return ret;
+
if (unlikely(!nvme_check_ready(&dev->ctrl, req, true)))
return nvme_fail_nonready_command(&dev->ctrl, req);

diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 57111139e84f..96248d81237e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -2005,6 +2005,10 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,

WARN_ON_ONCE(rq->tag < 0);

+ ret = nvme_setup_cmd(ns, rq);
+ if (ret)
+ return ret;
+
if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);

@@ -2020,10 +2024,6 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
ib_dma_sync_single_for_cpu(dev, sqe->dma,
sizeof(struct nvme_command), DMA_TO_DEVICE);

- ret = nvme_setup_cmd(ns, rq);
- if (ret)
- goto unmap_qe;
-
nvme_start_request(rq);

if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 243dab830dc8..1a3640e81b8f 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2705,10 +2705,6 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
blk_status_t ret;

- ret = nvme_setup_cmd(ns, rq);
- if (ret)
- return ret;
-
req->state = NVME_TCP_SEND_CMD_PDU;
req->status = cpu_to_le16(NVME_SC_SUCCESS);
req->offset = 0;
@@ -2768,6 +2764,10 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
blk_status_t ret;

+ ret = nvme_setup_cmd(ns, rq);
+ if (ret)
+ return ret;
+
if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);

diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 4b3f4f11928d..475b532d08e8 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -140,13 +140,13 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
blk_status_t ret;

- if (!nvme_check_ready(&queue->ctrl->ctrl, req, queue_ready))
- return nvme_fail_nonready_command(&queue->ctrl->ctrl, req);
-
ret = nvme_setup_cmd(ns, req);
if (ret)
return ret;

+ if (!nvme_check_ready(&queue->ctrl->ctrl, req, queue_ready))
+ return nvme_fail_nonready_command(&queue->ctrl->ctrl, req);
+
nvme_start_request(req);
iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
iod->req.port = queue->ctrl->port;
--
2.52.0