Skip to content

Commit bf8488f

Browse files
committed
4k iommu hack
1 parent 1a9452f commit bf8488f

6 files changed

Lines changed: 167 additions & 42 deletions

File tree

drivers/iommu/apple-dart.c

Lines changed: 2 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,6 @@ struct apple_dart_hw {
203203
* @lock: lock for hardware operations involving this dart
204204
* @pgsize: pagesize supported by this DART
205205
* @supports_bypass: indicates if this DART supports bypass mode
206-
* @force_bypass: force bypass mode due to pagesize mismatch?
207206
* @locked: indicates if this DART is locked
208207
* @sid2group: maps stream ids to iommu_groups
209208
* @iommu: iommu core device
@@ -225,7 +224,6 @@ struct apple_dart {
225224
u32 pgsize;
226225
u32 num_streams;
227226
u32 supports_bypass : 1;
228-
u32 force_bypass : 1;
229227
u32 locked : 1;
230228
u32 four_level : 1;
231229

@@ -774,8 +772,6 @@ static int apple_dart_attach_dev(struct iommu_domain *domain,
774772
struct apple_dart_domain *dart_domain = to_dart_domain(domain);
775773
struct apple_dart *dart0 = cfg->stream_maps[0].dart;
776774

777-
if (dart0->force_bypass && domain->type != IOMMU_DOMAIN_IDENTITY)
778-
return -EINVAL;
779775
if (!dart0->supports_bypass && domain->type == IOMMU_DOMAIN_IDENTITY)
780776
return -EINVAL;
781777
if (dart0->locked && domain->type != IOMMU_DOMAIN_DMA)
@@ -892,8 +888,6 @@ static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args)
892888
if (cfg_dart) {
893889
if (cfg_dart->supports_bypass != dart->supports_bypass)
894890
return -EINVAL;
895-
if (cfg_dart->force_bypass != dart->force_bypass)
896-
return -EINVAL;
897891
if (cfg_dart->pgsize != dart->pgsize)
898892
return -EINVAL;
899893
}
@@ -1036,10 +1030,6 @@ static int apple_dart_def_domain_type(struct device *dev)
10361030
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
10371031
struct apple_dart *dart = cfg->stream_maps[0].dart;
10381032

1039-
WARN_ON(dart->force_bypass && dart->locked);
1040-
1041-
if (dart->force_bypass)
1042-
return IOMMU_DOMAIN_IDENTITY;
10431033
if (dart->locked)
10441034
return IOMMU_DOMAIN_DMA;
10451035
if (dart->supports_bypass)
@@ -1277,8 +1267,6 @@ static int apple_dart_probe(struct platform_device *pdev)
12771267
goto err_clk_disable;
12781268
}
12791269

1280-
dart->force_bypass = dart->pgsize > PAGE_SIZE;
1281-
12821270
dart->locked = apple_dart_is_locked(dart);
12831271
if (!dart->locked) {
12841272
ret = apple_dart_hw_reset(dart);
@@ -1306,8 +1294,8 @@ static int apple_dart_probe(struct platform_device *pdev)
13061294

13071295
dev_info(
13081296
&pdev->dev,
1309-
"DART [pagesize %x, %d streams, bypass support: %d, bypass forced: %d, locked: %d, AS %d -> %d] initialized\n",
1310-
dart->pgsize, dart->num_streams, dart->supports_bypass, dart->force_bypass, dart->locked,
1297+
"DART [pagesize %x, %d streams, bypass support: %d, locked: %d, AS %d -> %d] initialized\n",
1298+
dart->pgsize, dart->num_streams, dart->supports_bypass, dart->locked,
13111299
dart->ias, dart->oas);
13121300
return 0;
13131301

drivers/iommu/dma-iommu.c

Lines changed: 113 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,14 @@
1919
#include <linux/iommu.h>
2020
#include <linux/iova.h>
2121
#include <linux/irq.h>
22+
#include <linux/kernel.h>
2223
#include <linux/list_sort.h>
2324
#include <linux/memremap.h>
2425
#include <linux/mm.h>
2526
#include <linux/mutex.h>
2627
#include <linux/of_iommu.h>
2728
#include <linux/pci.h>
29+
#include <linux/pfn.h>
2830
#include <linux/scatterlist.h>
2931
#include <linux/spinlock.h>
3032
#include <linux/swiotlb.h>
@@ -740,6 +742,9 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
740742
{
741743
struct page **pages;
742744
unsigned int i = 0, nid = dev_to_node(dev);
745+
unsigned int j;
746+
unsigned long min_order = __fls(order_mask);
747+
unsigned int min_order_size = 1U << min_order;
743748

744749
order_mask &= (2U << MAX_ORDER) - 1;
745750
if (!order_mask)
@@ -776,15 +781,38 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
776781
split_page(page, order);
777782
break;
778783
}
779-
if (!page) {
780-
__iommu_dma_free_pages(pages, i);
781-
return NULL;
784+
785+
/*
786+
* If we have no valid page here we might be trying to allocate
787+
* the last block consisting of 1<<order pages (to guarantee
788+
* alignment) but actually need less pages than that.
789+
* In that case we just try to allocate the entire block and
790+
* directly free the spillover pages again.
791+
*/
792+
if (!page && !order_mask && count < min_order_size) {
793+
page = alloc_pages_node(nid, gfp, min_order);
794+
if (!page)
795+
goto free_pages;
796+
split_page(page, min_order);
797+
798+
for (j = count; j < min_order_size; ++j)
799+
__free_page(page + j);
800+
801+
order_size = count;
782802
}
803+
804+
if (!page)
805+
goto free_pages;
806+
783807
count -= order_size;
784808
while (order_size--)
785809
pages[i++] = page++;
786810
}
787811
return pages;
812+
813+
free_pages:
814+
__iommu_dma_free_pages(pages, i);
815+
return NULL;
788816
}
789817

790818
/*
@@ -801,16 +829,28 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
801829
bool coherent = dev_is_dma_coherent(dev);
802830
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
803831
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
832+
struct sg_append_table sgt_append = {};
833+
struct scatterlist *last_sg;
804834
struct page **pages;
805835
dma_addr_t iova;
836+
phys_addr_t orig_s_phys;
837+
size_t orig_s_len, orig_s_off, s_iova_off, iova_size;
806838
ssize_t ret;
807839

808840
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
809841
iommu_deferred_attach(dev, domain))
810842
return NULL;
811843

812844
min_size = alloc_sizes & -alloc_sizes;
813-
if (min_size < PAGE_SIZE) {
845+
if (iovad->granule > PAGE_SIZE) {
846+
if (size < iovad->granule) {
847+
/* ensure a single contiguous allocation */
848+
min_size = ALIGN(size, PAGE_SIZE*(1U<<get_order(size)));
849+
alloc_sizes = min_size;
850+
}
851+
852+
size = PAGE_ALIGN(size);
853+
} else if (min_size < PAGE_SIZE) {
814854
min_size = PAGE_SIZE;
815855
alloc_sizes |= PAGE_SIZE;
816856
} else {
@@ -825,8 +865,8 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
825865
if (!pages)
826866
return NULL;
827867

828-
size = iova_align(iovad, size);
829-
iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
868+
iova_size = iova_align(iovad, size);
869+
iova = iommu_dma_alloc_iova(domain, iova_size, dev->coherent_dma_mask, dev);
830870
if (!iova)
831871
goto out_free_pages;
832872

@@ -837,8 +877,12 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
837877
*/
838878
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_COMP);
839879

840-
if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, gfp))
880+
/* append_table is only used to get a pointer to the last entry */
881+
if (sg_alloc_append_table_from_pages(&sgt_append, pages, count, 0,
882+
iova_size, UINT_MAX, 0, gfp))
841883
goto out_free_iova;
884+
memcpy(sgt, &sgt_append.sgt, sizeof(*sgt));
885+
last_sg = sgt_append.prv;
842886

843887
if (!(ioprot & IOMMU_CACHE)) {
844888
struct scatterlist *sg;
@@ -847,20 +891,58 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
847891
for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
848892
arch_dma_prep_coherent(sg_page(sg), sg->length);
849893
}
850-
894+
if (iovad->granule > PAGE_SIZE) {
895+
if (size < iovad->granule) {
896+
/*
897+
* we only have a single sg list entry here that is
898+
* likely not aligned to iovad->granule. adjust the
899+
* entry to represent the encapsulating IOMMU page
900+
* and then later restore everything to its original
901+
* values, similar to the impedance matching done in
902+
* iommu_dma_map_sg.
903+
*/
904+
orig_s_phys = sg_phys(sgt->sgl);
905+
orig_s_len = sgt->sgl->length;
906+
orig_s_off = sgt->sgl->offset;
907+
s_iova_off = iova_offset(iovad, orig_s_phys);
908+
909+
sg_set_page(sgt->sgl,
910+
pfn_to_page(PHYS_PFN(orig_s_phys - s_iova_off)),
911+
iova_align(iovad, orig_s_len + s_iova_off),
912+
sgt->sgl->offset & ~s_iova_off);
913+
} else {
914+
/*
915+
* convince iommu_map_sg_atomic to map the last block
916+
* even though it may be too small.
917+
*/
918+
orig_s_len = last_sg->length;
919+
last_sg->length = iova_align(iovad, last_sg->length);
920+
}
921+
}
851922
ret = iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, ioprot,
852923
gfp);
853-
if (ret < 0 || ret < size)
924+
if (ret < 0 || ret < iova_size)
854925
goto out_free_sg;
926+
if (iovad->granule > PAGE_SIZE) {
927+
if (size < iovad->granule) {
928+
sg_set_page(sgt->sgl,
929+
pfn_to_page(PHYS_PFN(orig_s_phys)),
930+
orig_s_len, orig_s_off);
931+
932+
iova += s_iova_off;
933+
} else {
934+
last_sg->length = orig_s_len;
935+
}
936+
}
855937

856938
sgt->sgl->dma_address = iova;
857-
sgt->sgl->dma_length = size;
939+
sgt->sgl->dma_length = iova_size;
858940
return pages;
859941

860942
out_free_sg:
861943
sg_free_table(sgt);
862944
out_free_iova:
863-
iommu_dma_free_iova(cookie, iova, size, NULL);
945+
iommu_dma_free_iova(cookie, iova, iova_size, NULL);
864946
out_free_pages:
865947
__iommu_dma_free_pages(pages, count);
866948
return NULL;
@@ -1098,8 +1180,9 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
10981180
continue;
10991181
}
11001182

1101-
s->offset += s_iova_off;
1102-
s->length = s_length;
1183+
sg_set_page(s,
1184+
pfn_to_page(PHYS_PFN(sg_phys(s) + s_iova_off)),
1185+
s_length, s_iova_off & ~PAGE_MASK);
11031186

11041187
/*
11051188
* Now fill in the real DMA data. If...
@@ -1138,16 +1221,18 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
11381221
static void __invalidate_sg(struct scatterlist *sg, int nents)
11391222
{
11401223
struct scatterlist *s;
1224+
phys_addr_t orig_paddr;
11411225
int i;
11421226

11431227
for_each_sg(sg, s, nents, i) {
11441228
if (sg_is_dma_bus_address(s)) {
11451229
sg_dma_unmark_bus_address(s);
1146-
} else {
1147-
if (sg_dma_address(s) != DMA_MAPPING_ERROR)
1148-
s->offset += sg_dma_address(s);
1149-
if (sg_dma_len(s))
1150-
s->length = sg_dma_len(s);
1230+
} else if (sg_dma_len(s)) {
1231+
orig_paddr = sg_phys(s) + sg_dma_address(s);
1232+
sg_set_page(s,
1233+
pfn_to_page(PHYS_PFN(orig_paddr)),
1234+
sg_dma_len(s),
1235+
sg_dma_address(s) & ~PAGE_MASK);
11511236
}
11521237
sg_dma_address(s) = DMA_MAPPING_ERROR;
11531238
sg_dma_len(s) = 0;
@@ -1228,7 +1313,8 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
12281313
* stashing the unaligned parts in the as-yet-unused DMA fields.
12291314
*/
12301315
for_each_sg(sg, s, nents, i) {
1231-
size_t s_iova_off = iova_offset(iovad, s->offset);
1316+
phys_addr_t s_phys = sg_phys(s);
1317+
size_t s_iova_off = iova_offset(iovad, s_phys);
12321318
size_t s_length = s->length;
12331319
size_t pad_len = (mask - iova_len + 1) & mask;
12341320

@@ -1258,10 +1344,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
12581344

12591345
sg_dma_address(s) = s_iova_off;
12601346
sg_dma_len(s) = s_length;
1261-
s->offset -= s_iova_off;
12621347
s_length = iova_align(iovad, s_length + s_iova_off);
1263-
s->length = s_length;
1264-
1348+
sg_set_page(s, pfn_to_page(PHYS_PFN(s_phys - s_iova_off)),
1349+
s_length, s->offset & ~s_iova_off);
12651350
/*
12661351
* Due to the alignment of our single IOVA allocation, we can
12671352
* depend on these assumptions about the segment boundary mask:
@@ -1522,9 +1607,15 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
15221607
void *cpu_addr, dma_addr_t dma_addr, size_t size,
15231608
unsigned long attrs)
15241609
{
1610+
struct iommu_domain *domain = iommu_get_dma_domain(dev);
1611+
struct iommu_dma_cookie *cookie = domain->iova_cookie;
1612+
struct iova_domain *iovad = &cookie->iovad;
15251613
struct page *page;
15261614
int ret;
15271615

1616+
if (iovad->granule > PAGE_SIZE)
1617+
return -ENXIO;
1618+
15281619
if (is_vmalloc_addr(cpu_addr)) {
15291620
struct page **pages = dma_common_find_pages(cpu_addr);
15301621

drivers/iommu/iommu.c

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,7 @@ static int __iommu_attach_group(struct iommu_domain *domain,
9898
struct iommu_group *group);
9999
static int __iommu_group_set_domain(struct iommu_group *group,
100100
struct iommu_domain *new_domain);
101+
static int iommu_group_do_set_platform_dma(struct device *dev, void *data);
101102
static int iommu_create_device_direct_mappings(struct iommu_group *group,
102103
struct device *dev);
103104
static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
@@ -2010,6 +2011,24 @@ static void __iommu_group_set_core_domain(struct iommu_group *group)
20102011
WARN(ret, "iommu driver failed to attach the default/blocking domain");
20112012
}
20122013

2014+
static int iommu_check_page_size(struct iommu_domain *domain,
2015+
struct device *dev)
2016+
{
2017+
bool trusted = !(dev_is_pci(dev) && to_pci_dev(dev)->untrusted);
2018+
2019+
if (!iommu_is_paging_domain(domain))
2020+
return 0;
2021+
if (iommu_is_large_pages_domain(domain) && trusted)
2022+
return 0;
2023+
2024+
if (!(domain->pgsize_bitmap & (PAGE_SIZE | (PAGE_SIZE - 1)))) {
2025+
pr_warn("IOMMU pages cannot exactly represent CPU pages.\n");
2026+
return -EFAULT;
2027+
}
2028+
2029+
return 0;
2030+
}
2031+
20132032
static int __iommu_attach_device(struct iommu_domain *domain,
20142033
struct device *dev)
20152034
{
@@ -2022,6 +2041,19 @@ static int __iommu_attach_device(struct iommu_domain *domain,
20222041
if (ret)
20232042
return ret;
20242043
dev->iommu->attach_deferred = 0;
2044+
2045+
/*
2046+
* Check that CPU pages can be represented by the IOVA granularity.
2047+
* This has to be done after ops->attach_dev since many IOMMU drivers
2048+
* only limit domain->pgsize_bitmap after having attached the first
2049+
* device.
2050+
*/
2051+
ret = iommu_check_page_size(domain, dev);
2052+
if (ret) {
2053+
iommu_group_do_set_platform_dma(dev, NULL);
2054+
return ret;
2055+
}
2056+
20252057
trace_attach_device_to_domain(dev);
20262058
return 0;
20272059
}

drivers/iommu/iova.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -54,10 +54,11 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
5454
{
5555
/*
5656
* IOVA granularity will normally be equal to the smallest
57-
* supported IOMMU page size; both *must* be capable of
58-
* representing individual CPU pages exactly.
57+
* supported IOMMU page size; while both usually are capable of
58+
* representing individual CPU pages exactly the IOVA allocator
59+
* supports any granularities that are an exact power of two.
5960
*/
60-
BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
61+
BUG_ON(!is_power_of_2(granule));
6162

6263
spin_lock_init(&iovad->iova_rbtree_lock);
6364
iovad->rbroot = RB_ROOT;

0 commit comments

Comments
 (0)