mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-23 07:12:09 +00:00
nvme-tcp: fix possible crash in write_zeroes processing
We cannot look at blk_rq_payload_bytes without first checking that the request has a mappable physical segments first (e.g. blk_rq_nr_phys_segments(rq) != 0) and only then to take the request payload bytes. This caused us to send a wrong sgl to the target or even dereference a non-existing buffer in case we actually got to the data send sequence (if it was in-capsule). Reported-by: Tony Asleson <tasleson@redhat.com> Suggested-by: Chaitanya Kulkarni <Chaitanya.Kulkarni@wdc.com> Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Keith Busch <kbusch@kernel.org> Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
d038dd815f
commit
25e5cb780e
1 changed files with 7 additions and 6 deletions
|
@ -174,16 +174,14 @@ static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
|
||||||
static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
|
static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
|
||||||
{
|
{
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
unsigned int bytes;
|
|
||||||
|
|
||||||
if (unlikely(nvme_tcp_async_req(req)))
|
if (unlikely(nvme_tcp_async_req(req)))
|
||||||
return false; /* async events don't have a request */
|
return false; /* async events don't have a request */
|
||||||
|
|
||||||
rq = blk_mq_rq_from_pdu(req);
|
rq = blk_mq_rq_from_pdu(req);
|
||||||
bytes = blk_rq_payload_bytes(rq);
|
|
||||||
|
|
||||||
return rq_data_dir(rq) == WRITE && bytes &&
|
return rq_data_dir(rq) == WRITE && req->data_len &&
|
||||||
bytes <= nvme_tcp_inline_data_size(req->queue);
|
req->data_len <= nvme_tcp_inline_data_size(req->queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
|
static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
|
||||||
|
@ -2164,7 +2162,9 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
|
||||||
|
|
||||||
c->common.flags |= NVME_CMD_SGL_METABUF;
|
c->common.flags |= NVME_CMD_SGL_METABUF;
|
||||||
|
|
||||||
if (rq_data_dir(rq) == WRITE && req->data_len &&
|
if (!blk_rq_nr_phys_segments(rq))
|
||||||
|
nvme_tcp_set_sg_null(c);
|
||||||
|
else if (rq_data_dir(rq) == WRITE &&
|
||||||
req->data_len <= nvme_tcp_inline_data_size(queue))
|
req->data_len <= nvme_tcp_inline_data_size(queue))
|
||||||
nvme_tcp_set_sg_inline(queue, c, req->data_len);
|
nvme_tcp_set_sg_inline(queue, c, req->data_len);
|
||||||
else
|
else
|
||||||
|
@ -2191,7 +2191,8 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
|
||||||
req->data_sent = 0;
|
req->data_sent = 0;
|
||||||
req->pdu_len = 0;
|
req->pdu_len = 0;
|
||||||
req->pdu_sent = 0;
|
req->pdu_sent = 0;
|
||||||
req->data_len = blk_rq_payload_bytes(rq);
|
req->data_len = blk_rq_nr_phys_segments(rq) ?
|
||||||
|
blk_rq_payload_bytes(rq) : 0;
|
||||||
req->curr_bio = rq->bio;
|
req->curr_bio = rq->bio;
|
||||||
|
|
||||||
if (rq_data_dir(rq) == WRITE &&
|
if (rq_data_dir(rq) == WRITE &&
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue