@@ -192,7 +192,7 @@ static void hv_update_fiq(void)
192192 _msr(sr_tkn(sr), regs[rt]); \
193193 return true;
194194
195- static bool hv_handle_msr (struct exc_info * ctx , u64 iss )
195+ static bool hv_handle_msr_unlocked (struct exc_info * ctx , u64 iss )
196196{
197197 u64 reg = iss & (ESR_ISS_MSR_OP0 | ESR_ISS_MSR_OP2 | ESR_ISS_MSR_OP1 | ESR_ISS_MSR_CRn |
198198 ESR_ISS_MSR_CRm );
@@ -267,6 +267,27 @@ static bool hv_handle_msr(struct exc_info *ctx, u64 iss)
267267 SYSREG_PASS (sys_reg (1 , 0 , 8 , 1 , 2 )) // TLBI ASIDE1OS
268268 SYSREG_PASS (sys_reg (1 , 0 , 8 , 5 , 1 )) // TLBI RVAE1OS
269269
270+ case SYSREG_ISS (SYS_IMP_APL_IPI_SR_EL1 ):
271+ if (is_read )
272+ regs [rt ] = PERCPU (ipi_pending ) ? IPI_SR_PENDING : 0 ;
273+ else if (regs [rt ] & IPI_SR_PENDING )
274+ PERCPU (ipi_pending ) = false;
275+ return true;
276+
277+ /* shadow the interrupt mode and state flag */
278+ case SYSREG_ISS (SYS_IMP_APL_PMCR0 ):
279+ if (is_read ) {
280+ u64 val = (mrs (SYS_IMP_APL_PMCR0 ) & ~PMCR0_IMODE_MASK ) | PERCPU (pmc_irq_mode );
281+ regs [rt ] =
282+ val | (PERCPU (pmc_pending ) ? PMCR0_IACT : 0 ) | PERCPU (exc_entry_pmcr0_cnt );
283+ } else {
284+ PERCPU (pmc_pending ) = !!(regs [rt ] & PMCR0_IACT );
285+ PERCPU (pmc_irq_mode ) = regs [rt ] & PMCR0_IMODE_MASK ;
286+ PERCPU (exc_entry_pmcr0_cnt ) = regs [rt ] & PMCR0_CNT_MASK ;
287+ msr (SYS_IMP_APL_PMCR0 , regs [rt ] & ~PERCPU (exc_entry_pmcr0_cnt ));
288+ }
289+ return true;
290+
270291 /*
271292 * Handle this one here because m1n1/Linux (will) use it for explicit cpuidle.
272293 * We can pass it through; going into deep sleep doesn't break the HV since we
@@ -285,6 +306,27 @@ static bool hv_handle_msr(struct exc_info *ctx, u64 iss)
285306 /* clang-format off */
286307 /* IPI handling */
287308 SYSREG_PASS (SYS_IMP_APL_IPI_CR_EL1 )
309+ /* M1RACLES reg, handle here due to silly 12.0 "mitigation" */
310+ case SYSREG_ISS (sys_reg (3 , 5 , 15 , 10 , 1 )):
311+ if (is_read )
312+ regs [rt ] = 0 ;
313+ return true;
314+ }
315+ return false;
316+ }
317+
318+ static bool hv_handle_msr (struct exc_info * ctx , u64 iss )
319+ {
320+ u64 reg = iss & (ESR_ISS_MSR_OP0 | ESR_ISS_MSR_OP2 | ESR_ISS_MSR_OP1 | ESR_ISS_MSR_CRn |
321+ ESR_ISS_MSR_CRm );
322+ u64 rt = FIELD_GET (ESR_ISS_MSR_Rt , iss );
323+ bool is_read = iss & ESR_ISS_MSR_DIR ;
324+
325+ u64 * regs = ctx -> regs ;
326+
327+ regs [31 ] = 0 ;
328+
329+ switch (reg ) {
288330 /* clang-format on */
289331 case SYSREG_ISS (SYS_IMP_APL_IPI_RR_LOCAL_EL1 ): {
290332 assert (!is_read );
@@ -308,25 +350,6 @@ static bool hv_handle_msr(struct exc_info *ctx, u64 iss)
308350 }
309351 }
310352 return false;
311- case SYSREG_ISS (SYS_IMP_APL_IPI_SR_EL1 ):
312- if (is_read )
313- regs [rt ] = PERCPU (ipi_pending ) ? IPI_SR_PENDING : 0 ;
314- else if (regs [rt ] & IPI_SR_PENDING )
315- PERCPU (ipi_pending ) = false;
316- return true;
317- /* shadow the interrupt mode and state flag */
318- case SYSREG_ISS (SYS_IMP_APL_PMCR0 ):
319- if (is_read ) {
320- u64 val = (mrs (SYS_IMP_APL_PMCR0 ) & ~PMCR0_IMODE_MASK ) | PERCPU (pmc_irq_mode );
321- regs [rt ] =
322- val | (PERCPU (pmc_pending ) ? PMCR0_IACT : 0 ) | PERCPU (exc_entry_pmcr0_cnt );
323- } else {
324- PERCPU (pmc_pending ) = !!(regs [rt ] & PMCR0_IACT );
325- PERCPU (pmc_irq_mode ) = regs [rt ] & PMCR0_IMODE_MASK ;
326- PERCPU (exc_entry_pmcr0_cnt ) = regs [rt ] & PMCR0_CNT_MASK ;
327- msr (SYS_IMP_APL_PMCR0 , regs [rt ] & ~PERCPU (exc_entry_pmcr0_cnt ));
328- }
329- return true;
330353#ifdef DEBUG_PMU_IRQ
331354 case SYSREG_ISS (SYS_IMP_APL_PMC0 ):
332355 if (is_read ) {
@@ -338,17 +361,12 @@ static bool hv_handle_msr(struct exc_info *ctx, u64 iss)
338361 }
339362 return true;
340363#endif
341- /* M1RACLES reg, handle here due to silly 12.0 "mitigation" */
342- case SYSREG_ISS (sys_reg (3 , 5 , 15 , 10 , 1 )):
343- if (is_read )
344- regs [rt ] = 0 ;
345- return true;
346364 }
347365
348366 return false;
349367}
350368
351- static void hv_exc_entry (struct exc_info * ctx )
369+ static void hv_get_context (struct exc_info * ctx )
352370{
353371 ctx -> spsr = hv_get_spsr ();
354372 ctx -> elr = hv_get_elr ();
@@ -362,7 +380,10 @@ static void hv_exc_entry(struct exc_info *ctx)
362380 ctx -> mpidr = mrs (MPIDR_EL1 );
363381
364382 sysop ("isb" );
383+ }
365384
385+ static void hv_exc_entry (void )
386+ {
366387 // Enable SErrors in the HV, but only if not already pending
367388 if (!(mrs (ISR_EL1 ) & 0x100 ))
368389 sysop ("msr daifclr, 4" );
@@ -397,10 +418,36 @@ static void hv_exc_exit(struct exc_info *ctx)
397418void hv_exc_sync (struct exc_info * ctx )
398419{
399420 hv_wdt_breadcrumb ('S' );
400- hv_exc_entry (ctx );
421+ hv_get_context (ctx );
401422 bool handled = false;
402423 u32 ec = FIELD_GET (ESR_EC , ctx -> esr );
403424
425+ switch (ec ) {
426+ case ESR_EC_MSR :
427+ hv_wdt_breadcrumb ('m' );
428+ handled = hv_handle_msr_unlocked (ctx , FIELD_GET (ESR_ISS , ctx -> esr ));
429+ break ;
430+ case ESR_EC_IMPDEF :
431+ hv_wdt_breadcrumb ('a' );
432+ switch (FIELD_GET (ESR_ISS , ctx -> esr )) {
433+ case ESR_ISS_IMPDEF_MSR :
434+ handled = hv_handle_msr_unlocked (ctx , ctx -> afsr1 );
435+ break ;
436+ }
437+ break ;
438+ }
439+
440+ if (handled ) {
441+ hv_wdt_breadcrumb ('#' );
442+ ctx -> elr += 4 ;
443+ hv_set_elr (ctx -> elr );
444+ hv_update_fiq ();
445+ hv_wdt_breadcrumb ('s' );
446+ return ;
447+ }
448+
449+ hv_exc_entry ();
450+
404451 switch (ec ) {
405452 case ESR_EC_DABORT_LOWER :
406453 hv_wdt_breadcrumb ('D' );
@@ -439,7 +486,8 @@ void hv_exc_sync(struct exc_info *ctx)
439486void hv_exc_irq (struct exc_info * ctx )
440487{
441488 hv_wdt_breadcrumb ('I' );
442- hv_exc_entry (ctx );
489+ hv_get_context (ctx );
490+ hv_exc_entry ();
443491 hv_exc_proxy (ctx , START_EXCEPTION_LOWER , EXC_IRQ , NULL );
444492 hv_exc_exit (ctx );
445493 hv_wdt_breadcrumb ('i' );
@@ -469,7 +517,8 @@ void hv_exc_fiq(struct exc_info *ctx)
469517
470518 // Slow (single threaded) path
471519 hv_wdt_breadcrumb ('F' );
472- hv_exc_entry (ctx );
520+ hv_get_context (ctx );
521+ hv_exc_entry ();
473522
474523 // Only poll for HV events in the interruptible CPU
475524 if (tick ) {
@@ -521,7 +570,8 @@ void hv_exc_fiq(struct exc_info *ctx)
521570void hv_exc_serr (struct exc_info * ctx )
522571{
523572 hv_wdt_breadcrumb ('E' );
524- hv_exc_entry (ctx );
573+ hv_get_context (ctx );
574+ hv_exc_entry ();
525575 hv_exc_proxy (ctx , START_EXCEPTION_LOWER , EXC_SERROR , NULL );
526576 hv_exc_exit (ctx );
527577 hv_wdt_breadcrumb ('e' );
0 commit comments