Skip to content

Commit b84621d

Browse files
isilenceaxboe
authored andcommitted
io_uring/zcrx: allocate sgtable for umem areas
Currently, dma addresses for umem areas are stored directly in niovs. It's memory efficient but inconvenient. I need a better format 1) to share code with dmabuf areas, and 2) for disentangling page, folio and niov sizes. dmabuf already provides sg_table, create one for user memory as well. Signed-off-by: Pavel Begunkov <[email protected]> Reviewed-by: David Wei <[email protected]> Link: https://lore.kernel.org/r/f3c15081827c1bf5427d3a2e693bc526476b87ee.1751466461.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <[email protected]>
1 parent 54e89a9 commit b84621d

2 files changed

Lines changed: 28 additions & 51 deletions

File tree

io_uring/zcrx.c

Lines changed: 27 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ static int io_import_umem(struct io_zcrx_ifq *ifq,
161161
struct io_uring_zcrx_area_reg *area_reg)
162162
{
163163
struct page **pages;
164-
int nr_pages;
164+
int nr_pages, ret;
165165

166166
if (area_reg->dmabuf_fd)
167167
return -EINVAL;
@@ -172,6 +172,12 @@ static int io_import_umem(struct io_zcrx_ifq *ifq,
172172
if (IS_ERR(pages))
173173
return PTR_ERR(pages);
174174

175+
ret = sg_alloc_table_from_pages(&mem->page_sg_table, pages, nr_pages,
176+
0, nr_pages << PAGE_SHIFT,
177+
GFP_KERNEL_ACCOUNT);
178+
if (ret)
179+
return ret;
180+
175181
mem->pages = pages;
176182
mem->nr_folios = nr_pages;
177183
mem->size = area_reg->len;
@@ -186,6 +192,7 @@ static void io_release_area_mem(struct io_zcrx_mem *mem)
186192
}
187193
if (mem->pages) {
188194
unpin_user_pages(mem->pages, mem->nr_folios);
195+
sg_free_table(&mem->page_sg_table);
189196
kvfree(mem->pages);
190197
}
191198
}
@@ -207,67 +214,36 @@ static int io_import_area(struct io_zcrx_ifq *ifq,
207214
return io_import_umem(ifq, mem, area_reg);
208215
}
209216

210-
static void io_zcrx_unmap_umem(struct io_zcrx_ifq *ifq,
211-
struct io_zcrx_area *area, int nr_mapped)
212-
{
213-
int i;
214-
215-
for (i = 0; i < nr_mapped; i++) {
216-
netmem_ref netmem = net_iov_to_netmem(&area->nia.niovs[i]);
217-
dma_addr_t dma = page_pool_get_dma_addr_netmem(netmem);
218-
219-
dma_unmap_page_attrs(ifq->dev, dma, PAGE_SIZE,
220-
DMA_FROM_DEVICE, IO_DMA_ATTR);
221-
}
222-
}
223-
224-
static void __io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
225-
struct io_zcrx_area *area, int nr_mapped)
217+
static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
218+
struct io_zcrx_area *area)
226219
{
227220
int i;
228221

229-
if (area->mem.is_dmabuf)
230-
io_release_dmabuf(&area->mem);
231-
else
232-
io_zcrx_unmap_umem(ifq, area, nr_mapped);
222+
guard(mutex)(&ifq->dma_lock);
223+
if (!area->is_mapped)
224+
return;
225+
area->is_mapped = false;
233226

234227
for (i = 0; i < area->nia.num_niovs; i++)
235228
net_mp_niov_set_dma_addr(&area->nia.niovs[i], 0);
236-
}
237-
238-
static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
239-
{
240-
guard(mutex)(&ifq->dma_lock);
241229

242-
if (area->is_mapped)
243-
__io_zcrx_unmap_area(ifq, area, area->nia.num_niovs);
244-
area->is_mapped = false;
230+
if (area->mem.is_dmabuf) {
231+
io_release_dmabuf(&area->mem);
232+
} else {
233+
dma_unmap_sgtable(ifq->dev, &area->mem.page_sg_table,
234+
DMA_FROM_DEVICE, IO_DMA_ATTR);
235+
}
245236
}
246237

247-
static int io_zcrx_map_area_umem(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
238+
static unsigned io_zcrx_map_area_umem(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
248239
{
249-
int i;
250-
251-
for (i = 0; i < area->nia.num_niovs; i++) {
252-
struct net_iov *niov = &area->nia.niovs[i];
253-
dma_addr_t dma;
254-
255-
dma = dma_map_page_attrs(ifq->dev, area->mem.pages[i], 0,
256-
PAGE_SIZE, DMA_FROM_DEVICE, IO_DMA_ATTR);
257-
if (dma_mapping_error(ifq->dev, dma))
258-
break;
259-
if (net_mp_niov_set_dma_addr(niov, dma)) {
260-
dma_unmap_page_attrs(ifq->dev, dma, PAGE_SIZE,
261-
DMA_FROM_DEVICE, IO_DMA_ATTR);
262-
break;
263-
}
264-
}
240+
int ret;
265241

266-
if (i != area->nia.num_niovs) {
267-
__io_zcrx_unmap_area(ifq, area, i);
268-
return -EINVAL;
269-
}
270-
return 0;
242+
ret = dma_map_sgtable(ifq->dev, &area->mem.page_sg_table,
243+
DMA_FROM_DEVICE, IO_DMA_ATTR);
244+
if (ret < 0)
245+
return ret;
246+
return io_populate_area_dma(ifq, area, &area->mem.page_sg_table, 0);
271247
}
272248

273249
static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)

io_uring/zcrx.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ struct io_zcrx_mem {
1414

1515
struct page **pages;
1616
unsigned long nr_folios;
17+
struct sg_table page_sg_table;
1718

1819
struct dma_buf_attachment *attach;
1920
struct dma_buf *dmabuf;

0 commit comments

Comments
 (0)