From 49d24398327e32265eccdeec4baeb5a6a609c0bd Mon Sep 17 00:00:00 2001 From: Uday Shankar Date: Tue, 28 Feb 2023 17:06:55 -0700 Subject: [PATCH] blk-mq: enforce op-specific segment limits in blk_insert_cloned_request The block layer might merge together discard requests up until the max_discard_segments limit is hit, but blk_insert_cloned_request checks the segment count against max_segments regardless of the req op. This can result in errors like the following when discards are issued through a DM device and max_discard_segments exceeds max_segments for the queue of the chosen underlying device. blk_insert_cloned_request: over max segments limit. (256 > 129) Fix this by looking at the req_op and enforcing the appropriate segment limit - max_discard_segments for REQ_OP_DISCARDs and max_segments for everything else. Signed-off-by: Uday Shankar Reviewed-by: Keith Busch Reviewed-by: Ming Lei Link: https://lore.kernel.org/r/20230301000655.48112-1-ushankar@purestorage.com Signed-off-by: Jens Axboe --- block/blk-merge.c | 7 ------- block/blk-mq.c | 7 ++++--- block/blk.h | 7 +++++++ 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/block/blk-merge.c b/block/blk-merge.c index 808b58129d3e..ff72edd7ee03 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -586,13 +586,6 @@ int __blk_rq_map_sg(struct request_queue *q, struct request *rq, } EXPORT_SYMBOL(__blk_rq_map_sg); -static inline unsigned int blk_rq_get_max_segments(struct request *rq) -{ - if (req_op(rq) == REQ_OP_DISCARD) - return queue_max_discard_segments(rq->q); - return queue_max_segments(rq->q); -} - static inline unsigned int blk_rq_get_max_sectors(struct request *rq, sector_t offset) { diff --git a/block/blk-mq.c b/block/blk-mq.c index 08093d4348dd..7c6f812323af 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3000,6 +3000,7 @@ blk_status_t blk_insert_cloned_request(struct request *rq) { struct request_queue *q = rq->q; unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); + unsigned int max_segments = blk_rq_get_max_segments(rq); blk_status_t ret; if (blk_rq_sectors(rq) > max_sectors) { @@ -3026,9 +3027,9 @@ blk_status_t blk_insert_cloned_request(struct request *rq) * original queue. */ rq->nr_phys_segments = blk_recalc_rq_segments(rq); - if (rq->nr_phys_segments > queue_max_segments(q)) { - printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n", - __func__, rq->nr_phys_segments, queue_max_segments(q)); + if (rq->nr_phys_segments > max_segments) { + printk(KERN_ERR "%s: over max segments limit. (%u > %u)\n", + __func__, rq->nr_phys_segments, max_segments); return BLK_STS_IOERR; } diff --git a/block/blk.h b/block/blk.h index e835f21d48af..cc4e8873dfde 100644 --- a/block/blk.h +++ b/block/blk.h @@ -156,6 +156,13 @@ static inline bool blk_discard_mergable(struct request *req) return false; } +static inline unsigned int blk_rq_get_max_segments(struct request *rq) +{ + if (req_op(rq) == REQ_OP_DISCARD) + return queue_max_discard_segments(rq->q); + return queue_max_segments(rq->q); +} + static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, enum req_op op) {