@@ -170,140 +170,137 @@ def from_adt(cls, u, path):
170170# self.iomap_at(stream, iova, addr, size)
171171# return iova
172172
173- # def iomap_at(self, stream, iova, addr, size):
174- # if size == 0:
175- # return
173+ def iomap_at (self , stream , iova , addr , size ):
174+ if size == 0 :
175+ return
176176
177- # if not (self.enabled_streams & (1 << stream)):
178- # self.enabled_streams |= (1 << stream)
179- # self.regs.ENABLED_STREAMS.val |= self.enabled_streams
180-
181- # tcr = self.regs.TCR[stream].reg
182-
183- # if tcr.BYPASS_DART and not tcr.TRANSLATE_ENABLE:
184- # raise Exception("Stream is bypassed in DART")
185-
186- # if tcr.BYPASS_DART or not tcr.TRANSLATE_ENABLE:
187- # raise Exception(f"Unknown DART mode {tcr}")
188-
189- # if addr & (self.PAGE_SIZE - 1):
190- # raise Exception(f"Unaligned PA {addr:#x}")
191-
192- # if iova & (self.PAGE_SIZE - 1):
193- # raise Exception(f"Unaligned IOVA {iova:#x}")
194-
195- # start_page = align_down(iova, self.PAGE_SIZE)
196- # end = iova + size
197- # end_page = align_up(end, self.PAGE_SIZE)
198-
199- # dirty = set()
200-
201- # for page in range(start_page, end_page, self.PAGE_SIZE):
202- # paddr = addr + page - start_page
203-
204- # l0 = page >> self.L0_OFF
205- # assert l0 < self.L0_SIZE
206- # ttbr = self.regs.TTBR[stream, l0].reg
207- # if not ttbr.VALID:
208- # l1addr = self.u.memalign(self.PAGE_SIZE, self.PAGE_SIZE)
209- # self.pt_cache[l1addr] = [0] * self.Lx_SIZE
210- # ttbr.VALID = 1
211- # ttbr.ADDR = l1addr >> 12
212- # self.regs.TTBR[stream, l0].reg = ttbr
213-
214- # cached, l1 = self.get_pt(ttbr.ADDR << 12)
215- # l1idx = (page >> self.L1_OFF) & self.IDX_MASK
216- # l1pte = self.ptecls(l1[l1idx])
217- # if not l1pte.VALID:
218- # l2addr = self.u.memalign(self.PAGE_SIZE, self.PAGE_SIZE)
219- # self.pt_cache[l2addr] = [0] * self.Lx_SIZE
220- # l1pte = self.ptecls(
221- # OFFSET=l2addr >> self.PAGE_BITS, VALID=1, SP_PROT_DIS=1)
222- # l1[l1idx] = l1pte.value
223- # dirty.add(ttbr.ADDR << 12)
224- # else:
225- # l2addr = l1pte.OFFSET << self.PAGE_BITS
226-
227- # dirty.add(l1pte.OFFSET << self.PAGE_BITS)
228- # cached, l2 = self.get_pt(l2addr)
229- # l2idx = (page >> self.L2_OFF) & self.IDX_MASK
230- # self.pt_cache[l2addr][l2idx] = self.ptecls(
231- # SP_START=0, SP_END=0xfff,
232- # OFFSET=paddr >> self.PAGE_BITS, VALID=1, SP_PROT_DIS=1).value
233-
234- # for page in dirty:
235- # self.flush_pt(page)
236-
237- # def iotranslate(self, stream, start, size):
238- # if size == 0:
239- # return []
240-
241- # tcr = self.regs.TCR[stream].reg
242-
243- # if tcr.BYPASS_DART and not tcr.TRANSLATE_ENABLE:
244- # return [(start, size)]
245-
246- # if tcr.BYPASS_DART or not tcr.TRANSLATE_ENABLE:
247- # raise Exception(f"Unknown DART mode {tcr}")
248-
249- # start = start & 0xffffffff
250-
251- # start_page = align_down(start, self.PAGE_SIZE)
252- # start_off = start - start_page
253- # end = start + size
254- # end_page = align_up(end, self.PAGE_SIZE)
255- # end_size = end - (end_page - self.PAGE_SIZE)
256-
257- # pages = []
258-
259- # for page in range(start_page, end_page, self.PAGE_SIZE):
260- # l0 = page >> self.L0_OFF
261- # assert l0 < self.L0_SIZE
262- # ttbr = self.regs.TTBR[stream, l0].reg
263- # if not ttbr.VALID:
264- # pages.append(None)
265- # continue
266-
267- # cached, l1 = self.get_pt(ttbr.ADDR << 12)
268- # l1pte = self.ptecls(l1[(page >> self.L1_OFF) & self.IDX_MASK])
269- # if not l1pte.VALID and cached:
270- # cached, l1 = self.get_pt(ttbr.ADDR << 12, uncached=True)
271- # l1pte = self.ptecls(l1[(page >> self.L1_OFF) & self.IDX_MASK])
272- # if not l1pte.VALID:
273- # pages.append(None)
274- # continue
275-
276- # cached, l2 = self.get_pt(l1pte.OFFSET << self.PAGE_BITS)
277- # l2pte = self.ptecls(l2[(page >> self.L2_OFF) & self.IDX_MASK])
278- # if not l2pte.VALID and cached:
279- # cached, l2 = self.get_pt(l1pte.OFFSET << self.PAGE_BITS, uncached=True)
280- # l2pte = self.ptecls(l2[(page >> self.L2_OFF) & self.IDX_MASK])
281- # if not l2pte.VALID:
282- # pages.append(None)
283- # continue
284-
285- # pages.append(l2pte.OFFSET << self.PAGE_BITS)
286-
287- # ranges = []
288-
289- # for page in pages:
290- # if not ranges:
291- # ranges.append((page, self.PAGE_SIZE))
292- # continue
293- # laddr, lsize = ranges[-1]
294- # if ((page is None and laddr is None) or
295- # (page is not None and laddr == (page - lsize))):
296- # ranges[-1] = laddr, lsize + self.PAGE_SIZE
297- # else:
298- # ranges.append((page, self.PAGE_SIZE))
299-
300- # ranges[-1] = (ranges[-1][0], ranges[-1][1] - self.PAGE_SIZE + end_size)
301-
302- # if start_off:
303- # ranges[0] = (ranges[0][0] + start_off if ranges[0][0] else None,
304- # ranges[0][1] - start_off)
305-
306- # return ranges
177+ if not (self .enabled_streams & (1 << stream )):
178+ self .enabled_streams |= (1 << stream )
179+ self .regs .ENABLE_STREAMS [stream // 32 ].val |= (1 << (stream % 32 ))
180+
181+ tcr = self .regs .TCR [stream ].reg
182+
183+ if tcr .BYPASS_DART and not tcr .TRANSLATE_ENABLE :
184+ raise Exception ("Stream is bypassed in DART" )
185+
186+ if tcr .BYPASS_DART or not tcr .TRANSLATE_ENABLE :
187+ raise Exception (f"Unknown DART mode { tcr } " )
188+
189+ if addr & (self .PAGE_SIZE - 1 ):
190+ raise Exception (f"Unaligned PA { addr :#x} " )
191+
192+ if iova & (self .PAGE_SIZE - 1 ):
193+ raise Exception (f"Unaligned IOVA { iova :#x} " )
194+
195+ start_page = align_down (iova , self .PAGE_SIZE )
196+ end = iova + size
197+ end_page = align_up (end , self .PAGE_SIZE )
198+
199+ dirty = set ()
200+
201+ for page in range (start_page , end_page , self .PAGE_SIZE ):
202+ paddr = addr + page - start_page
203+
204+ ttbr = self .regs .TTBR [stream ].reg
205+ if not ttbr .VALID :
206+ l1addr = self .u .memalign (self .PAGE_SIZE , self .PAGE_SIZE )
207+ self .pt_cache [l1addr ] = [0 ] * self .Lx_SIZE
208+ ttbr .VALID = 1
209+ ttbr .ADDR = l1addr >> self .PAGE_BITS
210+ self .regs .TTBR [stream ].reg = ttbr
211+
212+ cached , l1 = self .get_pt (ttbr .ADDR << self .PAGE_BITS )
213+ l1idx = (page >> self .L1_OFF ) & self .IDX_MASK
214+ l1pte = PTE (l1 [l1idx ])
215+ if not l1pte .VALID :
216+ l2addr = self .u .memalign (self .PAGE_SIZE , self .PAGE_SIZE )
217+ self .pt_cache [l2addr ] = [0 ] * self .Lx_SIZE
218+ l1pte = PTE (
219+ OFFSET = l2addr >> self .PAGE_BITS , VALID = 1 )
220+ l1 [l1idx ] = l1pte .value
221+ dirty .add (ttbr .ADDR << self .PAGE_BITS )
222+ else :
223+ l2addr = l1pte .OFFSET << self .PAGE_BITS
224+
225+ dirty .add (l1pte .OFFSET << self .PAGE_BITS )
226+ cached , l2 = self .get_pt (l2addr )
227+ l2idx = (page >> self .L2_OFF ) & self .IDX_MASK
228+ self .pt_cache [l2addr ][l2idx ] = PTE (
229+ SP_START = 0 , SP_END = 0xfff ,
230+ OFFSET = paddr >> self .PAGE_BITS , VALID = 1 ).value
231+
232+ for page in dirty :
233+ self .flush_pt (page )
234+
235+ def iotranslate (self , stream , start , size ):
236+ if size == 0 :
237+ return []
238+
239+ tcr = self .regs .TCR [stream ].reg
240+
241+ if tcr .BYPASS_DART and not tcr .TRANSLATE_ENABLE :
242+ # FIXME this may not be correct
243+ return [(start , size )]
244+
245+ if tcr .BYPASS_DART or not tcr .TRANSLATE_ENABLE :
246+ raise Exception (f"Unknown DART mode { tcr } " )
247+
248+ start = start & 0xfffffffff
249+
250+ start_page = align_down (start , self .PAGE_SIZE )
251+ start_off = start - start_page
252+ end = start + size
253+ end_page = align_up (end , self .PAGE_SIZE )
254+ end_size = end - (end_page - self .PAGE_SIZE )
255+
256+ pages = []
257+
258+ for page in range (start_page , end_page , self .PAGE_SIZE ):
259+ ttbr = self .regs .TTBR [stream ].reg
260+ if not ttbr .VALID :
261+ pages .append (None )
262+ continue
263+
264+ cached , l1 = self .get_pt (ttbr .ADDR << self .PAGE_BITS )
265+ l1pte = PTE (l1 [(page >> self .L1_OFF ) & self .IDX_MASK ])
266+ if not l1pte .VALID and cached :
267+ cached , l1 = self .get_pt (ttbr .ADDR << self .PAGE_BITS , uncached = True )
268+ l1pte = PTE (l1 [(page >> self .L1_OFF ) & self .IDX_MASK ])
269+ if not l1pte .VALID :
270+ pages .append (None )
271+ continue
272+
273+ cached , l2 = self .get_pt (l1pte .OFFSET << self .PAGE_BITS )
274+ l2pte = PTE (l2 [(page >> self .L2_OFF ) & self .IDX_MASK ])
275+ if not l2pte .VALID and cached :
276+ cached , l2 = self .get_pt (l1pte .OFFSET << self .PAGE_BITS , uncached = True )
277+ l2pte = PTE (l2 [(page >> self .L2_OFF ) & self .IDX_MASK ])
278+ if not l2pte .VALID :
279+ pages .append (None )
280+ continue
281+
282+ pages .append (l2pte .OFFSET << self .PAGE_BITS )
283+
284+ ranges = []
285+
286+ for page in pages :
287+ if not ranges :
288+ ranges .append ((page , self .PAGE_SIZE ))
289+ continue
290+ laddr , lsize = ranges [- 1 ]
291+ if ((page is None and laddr is None ) or
292+ (page is not None and laddr == (page - lsize ))):
293+ ranges [- 1 ] = laddr , lsize + self .PAGE_SIZE
294+ else :
295+ ranges .append ((page , self .PAGE_SIZE ))
296+
297+ ranges [- 1 ] = (ranges [- 1 ][0 ], ranges [- 1 ][1 ] - self .PAGE_SIZE + end_size )
298+
299+ if start_off :
300+ ranges [0 ] = (ranges [0 ][0 ] + start_off if ranges [0 ][0 ] else None ,
301+ ranges [0 ][1 ] - start_off )
302+
303+ return ranges
307304
308305 def get_pt (self , addr , uncached = False ):
309306 cached = True
0 commit comments