blk-mq: enforce op-specific segment limits in blk_insert_cloned_request
The block layer might merge together discard requests up until the max_discard_segments limit is hit, but blk_insert_cloned_request checks the segment count against max_segments regardless of the req op. This can result in errors like the following when discards are issued through a DM device and max_discard_segments exceeds max_segments for the queue of the chosen underlying device. blk_insert_cloned_request: over max segments limit. (256 > 129) Fix this by looking at the req_op and enforcing the appropriate segment limit - max_discard_segments for REQ_OP_DISCARDs and max_segments for everything else. Signed-off-by: Uday Shankar <ushankar@purestorage.com> Reviewed-by: Keith Busch <kbusch@kernel.org> Reviewed-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20230301000655.48112-1-ushankar@purestorage.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
326ac2c513
commit
49d2439832
|
@ -586,13 +586,6 @@ int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
|||
}
|
||||
EXPORT_SYMBOL(__blk_rq_map_sg);
|
||||
|
||||
static inline unsigned int blk_rq_get_max_segments(struct request *rq)
|
||||
{
|
||||
if (req_op(rq) == REQ_OP_DISCARD)
|
||||
return queue_max_discard_segments(rq->q);
|
||||
return queue_max_segments(rq->q);
|
||||
}
|
||||
|
||||
static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
|
||||
sector_t offset)
|
||||
{
|
||||
|
|
|
@ -3000,6 +3000,7 @@ blk_status_t blk_insert_cloned_request(struct request *rq)
|
|||
{
|
||||
struct request_queue *q = rq->q;
|
||||
unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
|
||||
unsigned int max_segments = blk_rq_get_max_segments(rq);
|
||||
blk_status_t ret;
|
||||
|
||||
if (blk_rq_sectors(rq) > max_sectors) {
|
||||
|
@ -3026,9 +3027,9 @@ blk_status_t blk_insert_cloned_request(struct request *rq)
|
|||
* original queue.
|
||||
*/
|
||||
rq->nr_phys_segments = blk_recalc_rq_segments(rq);
|
||||
if (rq->nr_phys_segments > queue_max_segments(q)) {
|
||||
printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
|
||||
__func__, rq->nr_phys_segments, queue_max_segments(q));
|
||||
if (rq->nr_phys_segments > max_segments) {
|
||||
printk(KERN_ERR "%s: over max segments limit. (%u > %u)\n",
|
||||
__func__, rq->nr_phys_segments, max_segments);
|
||||
return BLK_STS_IOERR;
|
||||
}
|
||||
|
||||
|
|
|
@ -156,6 +156,13 @@ static inline bool blk_discard_mergable(struct request *req)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline unsigned int blk_rq_get_max_segments(struct request *rq)
|
||||
{
|
||||
if (req_op(rq) == REQ_OP_DISCARD)
|
||||
return queue_max_discard_segments(rq->q);
|
||||
return queue_max_segments(rq->q);
|
||||
}
|
||||
|
||||
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
|
||||
enum req_op op)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue