diff --git a/block/blk-merge.c b/block/blk-merge.c index 1cd480ee2d77..b8315dba857f 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -167,6 +167,13 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, *segs = nsegs; if (do_split) { + /* + * Bio splitting may cause subtle trouble such as hang when doing sync + * iopoll in direct IO routine. Given performance gain of iopoll for + * big IO can be trival, disable iopoll when split needed. + */ + bio->bi_opf &= ~REQ_HIPRI; + new = bio_split(bio, sectors, GFP_NOIO, bs); if (new) bio = new; diff --git a/block/blk-mq.c b/block/blk-mq.c index 275ffd3aa809..1d1b99f6976c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1975,6 +1975,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) struct blk_plug *plug; struct request *same_queue_rq = NULL; blk_qc_t cookie; + bool hipri; blk_queue_bounce(q, &bio); @@ -1992,6 +1993,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) rq_qos_throttle(q, bio, NULL); + hipri = bio->bi_opf & REQ_HIPRI; + data.cmd_flags = bio->bi_opf; rq = blk_mq_get_request(q, bio, &data); if (unlikely(!rq)) { @@ -2074,6 +2077,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) blk_mq_sched_insert_request(rq, false, true, true); } + if (!hipri) + return BLK_QC_T_NONE; return cookie; }