Skip to content

Commit 7ed6b85

Browse files
Merge pull request #95 from ErichDonGubler/stragglers-oof
Fix several unreleased straggler issues 😅
2 parents e6b5b32 + c293e24 commit 7ed6b85

2 files changed

Lines changed: 64 additions & 63 deletions

File tree

moz-webgpu-cts/src/main.rs

Lines changed: 61 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -57,20 +57,20 @@ struct Cli {
5757

5858
#[derive(Debug, Parser)]
5959
enum Subcommand {
60-
/// Adjust test expectations in metadata, optionally using `wptreport.json` reports from CI
61-
/// runs covering Firefox's implementation of WebGPU.
60+
/// Adjust expected test outcomes in metadata, optionally using `wptreport.json` reports from
61+
/// CI runs covering Firefox's implementation of WebGPU.
6262
///
6363
/// As Firefox's behavior changes, one generally expects CTS test outcomes to change. When you
64-
/// are testing your own changes in CI, you can use this subcommand to update expectations
64+
/// are testing your own changes in CI, you can use this subcommand to update expected outcomes
6565
/// automatically with the following steps:
6666
///
67-
/// 1. Run `moz-webgpu-cts process-reports --preset=new-fx …` against the first complete set of
67+
/// 1. Run `moz-webgpu-cts update-expected --preset=new-fx …` against the first complete set of
6868
/// reports you gather from CI with your new Firefox build. This will adjust for new
6969
/// permanent outcomes, and may capture some (but not all) intermittent outcomes.
7070
///
7171
/// 2. There may still exist intermittent issues that you do not discover in CI run(s) from the
7272
/// previous step. As you discover them in further CI runs on the same build of Firefox,
73-
/// adjust expected outcomes to match by running `moz-webgpu-cts process-reports
73+
/// adjust expected outcomes to match by running `moz-webgpu-cts update-expected
7474
/// --preset=same-fx …` against the runs' new reports. Repeat as necessary.
7575
///
7676
/// With both steps, you may delete the local copies of these reports after being processed
@@ -80,10 +80,12 @@ enum Subcommand {
8080
UpdateExpected {
8181
/// Direct paths to report files to be processed.
8282
report_paths: Vec<PathBuf>,
83-
/// Cross-platform `wax` globs to enumerate report files to be processed.
83+
/// Cross-platform [`wax` globs] to enumerate report files to be processed.
8484
///
8585
/// N.B. for Windows users: backslashes are used strictly for escaped characters, and
8686
/// forward slashes (`/`) are the only valid path separator for these globs.
87+
///
88+
/// [`wax` globs]: https://github.com/olson-sean-k/wax/blob/master/README.md#patterns
8789
#[clap(long = "glob", value_name = "REPORT_GLOB")]
8890
report_globs: Vec<String>,
8991
/// The heuristic for resolving differences between current metadata and processed reports.
@@ -711,39 +713,30 @@ fn run(cli: Cli) -> ExitCode {
711713
ExitCode::SUCCESS
712714
}
713715
Subcommand::Fixup => {
714-
log::info!("formatting metadata in-place…");
715-
let raw_test_files_by_path = read_and_parse_all_metadata(&gecko_checkout);
716-
let mut err_found = false;
717-
for res in raw_test_files_by_path {
718-
let (path, mut file) = match res {
719-
Ok(ok) => ok,
720-
Err(AlreadyReportedToCommandline) => {
721-
err_found = true;
722-
continue;
723-
}
724-
};
725-
726-
for test in file.tests.values_mut() {
727-
for subtest in &mut test.subtests.values_mut() {
728-
if let Some(expected) = subtest.properties.expected.as_mut() {
729-
for (_, expected) in expected.iter_mut() {
730-
taint_subtest_timeouts_by_suspicion(expected);
716+
log::info!("fixing up metadata in-place…");
717+
let err_found = read_and_parse_all_metadata(&gecko_checkout)
718+
.map(|res| {
719+
res.and_then(|(path, mut file)| {
720+
for test in file.tests.values_mut() {
721+
for subtest in &mut test.subtests.values_mut() {
722+
if let Some(expected) = subtest.properties.expected.as_mut() {
723+
for (_, expected) in expected.iter_mut() {
724+
taint_subtest_timeouts_by_suspicion(expected);
725+
}
726+
}
731727
}
732728
}
733-
}
734-
}
735-
736-
match write_to_file(&path, metadata::format_file(&file)) {
737-
Ok(()) => (),
738-
Err(AlreadyReportedToCommandline) => {
739-
err_found = true;
740-
}
741-
};
742-
}
743729

730+
write_to_file(&path, metadata::format_file(&file))
731+
})
732+
})
733+
.fold(false, |err_found, res| match res {
734+
Ok(()) => err_found,
735+
Err(AlreadyReportedToCommandline) => true,
736+
});
744737
if err_found {
745738
log::error!(concat!(
746-
"found one or more failures while formatting metadata, ",
739+
"found one or more failures while fixing up metadata, ",
747740
"see above for more details"
748741
));
749742
ExitCode::FAILURE
@@ -758,7 +751,8 @@ fn run(cli: Cli) -> ExitCode {
758751
orig_path: Arc<PathBuf>,
759752
inner: Test,
760753
}
761-
let tests_by_name = match read_and_parse_all_metadata(&gecko_checkout)
754+
let mut err_found = false;
755+
let tests_by_name = read_and_parse_all_metadata(&gecko_checkout)
762756
.map_ok(
763757
|(
764758
path,
@@ -789,11 +783,17 @@ fn run(cli: Cli) -> ExitCode {
789783
},
790784
)
791785
.flatten_ok()
792-
.collect::<Result<BTreeMap<_, _>, _>>()
793-
{
794-
Ok(paths) => paths,
795-
Err(AlreadyReportedToCommandline) => return ExitCode::FAILURE,
796-
};
786+
.filter_map(|res| match res {
787+
Ok(ok) => Some(ok),
788+
Err(AlreadyReportedToCommandline) => {
789+
err_found = true;
790+
None
791+
}
792+
})
793+
.collect::<BTreeMap<_, _>>();
794+
if err_found {
795+
return ExitCode::FAILURE;
796+
}
797797

798798
log::info!(concat!(
799799
"finished parsing of interesting properties ",
@@ -980,12 +980,12 @@ fn run(cli: Cli) -> ExitCode {
980980
if let Some(expected) = expected {
981981
fn analyze_test_outcome<F>(
982982
test_name: &Arc<String>,
983-
expectation: Expected<TestOutcome>,
983+
expected: Expected<TestOutcome>,
984984
mut receiver: F,
985985
) where
986986
F: FnMut(&mut dyn FnMut(&mut PerPlatformAnalysis)),
987987
{
988-
for outcome in expectation.iter() {
988+
for outcome in expected.iter() {
989989
match outcome {
990990
TestOutcome::Ok => (),
991991
// We skip this because this test _should_ contain subtests with
@@ -995,23 +995,23 @@ fn run(cli: Cli) -> ExitCode {
995995
insert_in_test_set(
996996
&mut analysis.tests_with_crashes,
997997
test_name,
998-
expectation,
998+
expected,
999999
outcome,
10001000
)
10011001
}),
10021002
TestOutcome::Error => receiver(&mut |analysis| {
10031003
insert_in_test_set(
10041004
&mut analysis.tests_with_runner_errors,
10051005
test_name,
1006-
expectation,
1006+
expected,
10071007
outcome,
10081008
)
10091009
}),
10101010
TestOutcome::Skip => receiver(&mut |analysis| {
10111011
insert_in_test_set(
10121012
&mut analysis.tests_with_disabled_or_skip,
10131013
test_name,
1014-
expectation,
1014+
expected,
10151015
outcome,
10161016
)
10171017
}),
@@ -1020,8 +1020,8 @@ fn run(cli: Cli) -> ExitCode {
10201020
}
10211021

10221022
let apply_to_specific_platforms =
1023-
|analysis: &mut Analysis, platform, expectation| {
1024-
analyze_test_outcome(&test_name, expectation, |f| {
1023+
|analysis: &mut Analysis, platform, expected| {
1024+
analyze_test_outcome(&test_name, expected, |f| {
10251025
analysis.for_platform_mut(platform, f)
10261026
})
10271027
};
@@ -1094,13 +1094,10 @@ fn run(cli: Cli) -> ExitCode {
10941094
}
10951095

10961096
let apply_to_specific_platforms =
1097-
|analysis: &mut Analysis, platform, expectation| {
1098-
analyze_subtest_outcome(
1099-
&test_name,
1100-
&subtest_name,
1101-
expectation,
1102-
|f| analysis.for_platform_mut(platform, f),
1103-
)
1097+
|analysis: &mut Analysis, platform, expected| {
1098+
analyze_subtest_outcome(&test_name, &subtest_name, expected, |f| {
1099+
analysis.for_platform_mut(platform, f)
1100+
})
11041101
};
11051102

11061103
for ((platform, _build_profile), expected) in expected.iter() {
@@ -1548,12 +1545,16 @@ fn write_to_file(path: &Path, contents: impl Display) -> Result<(), AlreadyRepor
15481545
/// deterministic if executed, but consistently exceed the timeout window offered by the test
15491546
/// runner.
15501547
fn taint_subtest_timeouts_by_suspicion(expected: &mut Expected<SubtestOutcome>) {
1551-
static PRINTED_WARNING: AtomicBool = AtomicBool::new(false);
1552-
let already_printed_warning = PRINTED_WARNING.swap(true, atomic::Ordering::Relaxed);
1553-
if !already_printed_warning {
1554-
log::info!("encountered at least one case where taint-by-suspicion is being applied…")
1555-
}
1556-
if !expected.is_disjoint(SubtestOutcome::Timeout | SubtestOutcome::NotRun) {
1548+
let timeout_and_notrun =
1549+
Expected::intermittent(SubtestOutcome::Timeout | SubtestOutcome::NotRun).unwrap();
1550+
if !expected.is_disjoint(timeout_and_notrun.inner())
1551+
&& !expected.is_superset(&timeout_and_notrun)
1552+
{
1553+
static PRINTED_WARNING: AtomicBool = AtomicBool::new(false);
1554+
let already_printed_warning = PRINTED_WARNING.swap(true, atomic::Ordering::Relaxed);
1555+
if !already_printed_warning {
1556+
log::info!("encountered at least one case where taint-by-suspicion is being applied…")
1557+
}
15571558
*expected |= SubtestOutcome::Timeout | SubtestOutcome::NotRun;
15581559
}
15591560
}

moz-webgpu-cts/src/shared.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -61,9 +61,9 @@ where
6161
}
6262
}
6363

64-
fn inner(&self) -> &EnumSet<Out> {
64+
pub fn inner(&self) -> EnumSet<Out> {
6565
let Self(inner) = self;
66-
inner
66+
*inner
6767
}
6868

6969
pub fn len(&self) -> NonZeroUsize {
@@ -93,7 +93,7 @@ where
9393
where
9494
Out: std::fmt::Debug + Default + EnumSetType,
9595
{
96-
self.inner().is_superset(*rep.inner())
96+
self.inner().is_superset(rep.inner())
9797
}
9898
}
9999

0 commit comments

Comments
 (0)