diff --git a/block/blk-mq-dma.c b/block/blk-mq-dma.c index fb018fffffdc..4afeda45df15 100644 --- a/block/blk-mq-dma.c +++ b/block/blk-mq-dma.c @@ -238,7 +238,6 @@ EXPORT_SYMBOL_GPL(blk_rq_dma_map_iter_start); * blk_rq_dma_map_iter_next - map the next DMA segment for a request * @req: request to map * @dma_dev: device to map to - * @state: DMA IOVA state * @iter: block layer DMA iterator * * Iterate to the next mapping after a previous call to @@ -253,7 +252,7 @@ EXPORT_SYMBOL_GPL(blk_rq_dma_map_iter_start); * returned in @iter.status. */ bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev, - struct dma_iova_state *state, struct blk_dma_iter *iter) + struct blk_dma_iter *iter) { struct phys_vec vec; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 0e4caeab739c..9fc4a60280a0 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -823,7 +823,7 @@ static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev, if (iter->len) return true; - if (!blk_rq_dma_map_iter_next(req, dma_dev, &iod->dma_state, iter)) + if (!blk_rq_dma_map_iter_next(req, dma_dev, iter)) return false; if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) { iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr; @@ -1010,8 +1010,7 @@ static blk_status_t nvme_pci_setup_data_sgl(struct request *req, } nvme_pci_sgl_set_data(&sg_list[mapped++], iter); iod->total_len += iter->len; - } while (blk_rq_dma_map_iter_next(req, nvmeq->dev->dev, &iod->dma_state, - iter)); + } while (blk_rq_dma_map_iter_next(req, nvmeq->dev->dev, iter)); nvme_pci_sgl_set_seg(&iod->cmd.common.dptr.sgl, sgl_dma, mapped); if (unlikely(iter->status)) diff --git a/include/linux/blk-mq-dma.h b/include/linux/blk-mq-dma.h index cb88fc791fbd..214c181ff2c9 100644 --- a/include/linux/blk-mq-dma.h +++ b/include/linux/blk-mq-dma.h @@ -28,7 +28,7 @@ struct blk_dma_iter { bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev, struct dma_iova_state *state, struct blk_dma_iter *iter); bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev, - struct dma_iova_state *state, struct blk_dma_iter *iter); + struct blk_dma_iter *iter); /** * blk_rq_dma_map_coalesce - were all segments coalesced?