@@ -161,7 +161,7 @@ static int io_import_umem(struct io_zcrx_ifq *ifq,
161161 struct io_uring_zcrx_area_reg * area_reg )
162162{
163163 struct page * * pages ;
164- int nr_pages ;
164+ int nr_pages , ret ;
165165
166166 if (area_reg -> dmabuf_fd )
167167 return - EINVAL ;
@@ -172,6 +172,12 @@ static int io_import_umem(struct io_zcrx_ifq *ifq,
172172 if (IS_ERR (pages ))
173173 return PTR_ERR (pages );
174174
175+ ret = sg_alloc_table_from_pages (& mem -> page_sg_table , pages , nr_pages ,
176+ 0 , nr_pages << PAGE_SHIFT ,
177+ GFP_KERNEL_ACCOUNT );
178+ if (ret )
179+ return ret ;
180+
175181 mem -> pages = pages ;
176182 mem -> nr_folios = nr_pages ;
177183 mem -> size = area_reg -> len ;
@@ -186,6 +192,7 @@ static void io_release_area_mem(struct io_zcrx_mem *mem)
186192 }
187193 if (mem -> pages ) {
188194 unpin_user_pages (mem -> pages , mem -> nr_folios );
195+ sg_free_table (& mem -> page_sg_table );
189196 kvfree (mem -> pages );
190197 }
191198}
@@ -207,67 +214,36 @@ static int io_import_area(struct io_zcrx_ifq *ifq,
207214 return io_import_umem (ifq , mem , area_reg );
208215}
209216
210- static void io_zcrx_unmap_umem (struct io_zcrx_ifq * ifq ,
211- struct io_zcrx_area * area , int nr_mapped )
212- {
213- int i ;
214-
215- for (i = 0 ; i < nr_mapped ; i ++ ) {
216- netmem_ref netmem = net_iov_to_netmem (& area -> nia .niovs [i ]);
217- dma_addr_t dma = page_pool_get_dma_addr_netmem (netmem );
218-
219- dma_unmap_page_attrs (ifq -> dev , dma , PAGE_SIZE ,
220- DMA_FROM_DEVICE , IO_DMA_ATTR );
221- }
222- }
223-
224- static void __io_zcrx_unmap_area (struct io_zcrx_ifq * ifq ,
225- struct io_zcrx_area * area , int nr_mapped )
217+ static void io_zcrx_unmap_area (struct io_zcrx_ifq * ifq ,
218+ struct io_zcrx_area * area )
226219{
227220 int i ;
228221
229- if ( area -> mem . is_dmabuf )
230- io_release_dmabuf ( & area -> mem );
231- else
232- io_zcrx_unmap_umem ( ifq , area , nr_mapped ) ;
222+ guard ( mutex )( & ifq -> dma_lock );
223+ if (! area -> is_mapped )
224+ return ;
225+ area -> is_mapped = false ;
233226
234227 for (i = 0 ; i < area -> nia .num_niovs ; i ++ )
235228 net_mp_niov_set_dma_addr (& area -> nia .niovs [i ], 0 );
236- }
237-
238- static void io_zcrx_unmap_area (struct io_zcrx_ifq * ifq , struct io_zcrx_area * area )
239- {
240- guard (mutex )(& ifq -> dma_lock );
241229
242- if (area -> is_mapped )
243- __io_zcrx_unmap_area (ifq , area , area -> nia .num_niovs );
244- area -> is_mapped = false;
230+ if (area -> mem .is_dmabuf ) {
231+ io_release_dmabuf (& area -> mem );
232+ } else {
233+ dma_unmap_sgtable (ifq -> dev , & area -> mem .page_sg_table ,
234+ DMA_FROM_DEVICE , IO_DMA_ATTR );
235+ }
245236}
246237
247- static int io_zcrx_map_area_umem (struct io_zcrx_ifq * ifq , struct io_zcrx_area * area )
238+ static unsigned io_zcrx_map_area_umem (struct io_zcrx_ifq * ifq , struct io_zcrx_area * area )
248239{
249- int i ;
250-
251- for (i = 0 ; i < area -> nia .num_niovs ; i ++ ) {
252- struct net_iov * niov = & area -> nia .niovs [i ];
253- dma_addr_t dma ;
254-
255- dma = dma_map_page_attrs (ifq -> dev , area -> mem .pages [i ], 0 ,
256- PAGE_SIZE , DMA_FROM_DEVICE , IO_DMA_ATTR );
257- if (dma_mapping_error (ifq -> dev , dma ))
258- break ;
259- if (net_mp_niov_set_dma_addr (niov , dma )) {
260- dma_unmap_page_attrs (ifq -> dev , dma , PAGE_SIZE ,
261- DMA_FROM_DEVICE , IO_DMA_ATTR );
262- break ;
263- }
264- }
240+ int ret ;
265241
266- if ( i != area -> nia . num_niovs ) {
267- __io_zcrx_unmap_area ( ifq , area , i );
268- return - EINVAL ;
269- }
270- return 0 ;
242+ ret = dma_map_sgtable ( ifq -> dev , & area -> mem . page_sg_table ,
243+ DMA_FROM_DEVICE , IO_DMA_ATTR );
244+ if ( ret < 0 )
245+ return ret ;
246+ return io_populate_area_dma ( ifq , area , & area -> mem . page_sg_table , 0 ) ;
271247}
272248
273249static int io_zcrx_map_area (struct io_zcrx_ifq * ifq , struct io_zcrx_area * area )
0 commit comments