From a0d3a43f6024c0b628ee6c76b904541466c6cc08 Mon Sep 17 00:00:00 2001 From: Tim Jacomb Date: Tue, 21 Apr 2026 21:57:31 +0100 Subject: [PATCH 01/26] Compute graph as build progresses --- .../livestate/LiveGraphLifecycle.java | 50 +++++++++++ .../livestate/LiveGraphPopulator.java | 69 +++++++++++++++ .../livestate/LiveGraphRegistry.java | 82 ++++++++++++++++++ .../livestate/LiveGraphSnapshot.java | 11 +++ .../livestate/LiveGraphState.java | 67 +++++++++++++++ .../treescanner/PipelineNodeGraphAdapter.java | 10 +++ .../treescanner/PipelineNodeTreeScanner.java | 52 ++++++++---- .../utils/PipelineGraphApi.java | 28 ++++++- .../utils/PipelineStepApi.java | 7 ++ .../livestate/LiveGraphLifecycleTest.java | 83 +++++++++++++++++++ 10 files changed, 440 insertions(+), 19 deletions(-) create mode 100644 src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java create mode 100644 src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphPopulator.java create mode 100644 src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java create mode 100644 src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphSnapshot.java create mode 100644 src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java create mode 100644 src/test/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycleTest.java diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java new file mode 100644 index 000000000..314622147 --- /dev/null +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java @@ -0,0 +1,50 @@ +package io.jenkins.plugins.pipelinegraphview.livestate; + +import edu.umd.cs.findbugs.annotations.NonNull; +import hudson.Extension; +import org.jenkinsci.plugins.workflow.flow.FlowExecution; +import org.jenkinsci.plugins.workflow.flow.FlowExecutionListener; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Creates {@link LiveGraphState} entries at execution start / resume and evicts them on + * completion. Without this, entries are still created lazily by {@link LiveGraphPopulator} + * on first {@code onNewHead}, but {@code onResumed} guarantees the catch-up scan happens + * once up-front rather than at the first event after restart. + */ +@Extension +public class LiveGraphLifecycle extends FlowExecutionListener { + + private static final Logger logger = LoggerFactory.getLogger(LiveGraphLifecycle.class); + + @Override + public void onRunning(@NonNull FlowExecution execution) { + try { + LiveGraphRegistry.get().getOrCreate(execution); + } catch (Throwable t) { + logger.warn("pipeline-graph-view live state onRunning failed", t); + } + } + + @Override + public void onResumed(@NonNull FlowExecution execution) { + try { + LiveGraphState state = LiveGraphRegistry.get().getOrCreate(execution); + if (state != null) { + LiveGraphPopulator.catchUp(execution, state); + } + } catch (Throwable t) { + logger.warn("pipeline-graph-view live state onResumed failed", t); + } + } + + @Override + public void onCompleted(@NonNull FlowExecution execution) { + try { + LiveGraphRegistry.get().remove(execution); + } catch (Throwable t) { + logger.warn("pipeline-graph-view live state onCompleted failed", t); + } + } +} diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphPopulator.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphPopulator.java new file mode 100644 index 000000000..08c81b56d --- /dev/null +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphPopulator.java @@ -0,0 +1,69 @@ +package io.jenkins.plugins.pipelinegraphview.livestate; + +import hudson.Extension; +import org.jenkinsci.plugins.workflow.flow.FlowExecution; +import org.jenkinsci.plugins.workflow.flow.GraphListener; +import org.jenkinsci.plugins.workflow.graph.FlowNode; +import org.jenkinsci.plugins.workflow.graphanalysis.DepthFirstScanner; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Extension that captures every new {@link FlowNode} across every running execution and + * feeds it to the corresponding {@link LiveGraphState}. The downstream {@code PipelineGraphApi} + * path reads a snapshot of that state instead of walking the whole execution each time. + * + *

We use {@link GraphListener.Synchronous} rather than the async variant because callers + * expect "once a node is a head, the next API read reflects it" — async delivery creates a + * lag window where the snapshot is behind the execution, which breaks tests that check state + * at precise trigger points and would surprise anyone hitting the REST API after an event. + * The work done under the monitor is trivial ({@code ArrayList}/{@code HashSet} additions), + * so the CPS VM thread is not meaningfully blocked. Every code path is still wrapped in + * try/catch and poisons the state on failure so a bug here can never disrupt a build. + */ +@Extension +public class LiveGraphPopulator implements GraphListener.Synchronous { + + private static final Logger logger = LoggerFactory.getLogger(LiveGraphPopulator.class); + + @Override + public void onNewHead(FlowNode node) { + LiveGraphState state = null; + try { + FlowExecution execution = node.getExecution(); + state = LiveGraphRegistry.get().getOrCreate(execution); + if (state == null) { + return; // feature disabled or execution not a WorkflowRun + } + // Lazy initial catch-up: if the listener is seeing nodes for an execution it's + // never observed (plugin upgrade mid-build, Jenkins resume without onResumed + // firing first), the early history is already in the FlowExecution's storage. + // Backfill it once before processing this event. + if (state.size() == 0 && !state.hasSeen(node.getId())) { + catchUp(execution, state); + } + state.addNode(node); + } catch (Throwable t) { + // A thrown exception here propagates into the CPS VM and can abort the build. + // Poison the state so subsequent reads fall back to the scanner; log the failure + // but never rethrow. + logger.warn("pipeline-graph-view live state failed; falling back to scanner", t); + if (state != null) { + state.poison(); + } + } + } + + static void catchUp(FlowExecution execution, LiveGraphState state) { + try { + DepthFirstScanner scanner = new DepthFirstScanner(); + scanner.setup(execution.getCurrentHeads()); + for (FlowNode existing : scanner) { + state.addNode(existing); + } + } catch (Throwable t) { + logger.warn("pipeline-graph-view live state catch-up failed; poisoning", t); + state.poison(); + } + } +} diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java new file mode 100644 index 000000000..08c17c3d0 --- /dev/null +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java @@ -0,0 +1,82 @@ +package io.jenkins.plugins.pipelinegraphview.livestate; + +import com.github.benmanes.caffeine.cache.Cache; +import com.github.benmanes.caffeine.cache.Caffeine; +import java.time.Duration; +import jenkins.util.SystemProperties; +import org.jenkinsci.plugins.workflow.flow.FlowExecution; +import org.jenkinsci.plugins.workflow.job.WorkflowRun; + +/** + * Singleton holding one {@link LiveGraphState} per in-progress run. + * Entries are created on demand by the listener / lifecycle code, removed on completion, + * and otherwise bounded by a Caffeine LRU so abandoned entries (deleted runs, listener + * bugs) don't leak. + */ +public final class LiveGraphRegistry { + + private static final LiveGraphRegistry INSTANCE = new LiveGraphRegistry(); + + public static LiveGraphRegistry get() { + return INSTANCE; + } + + private final Cache states = Caffeine.newBuilder() + .maximumSize(256) + .expireAfterAccess(Duration.ofMinutes(30)) + .build(); + + LiveGraphRegistry() {} + + /** + * Escape hatch. Setting this system property to {@code false} makes + * {@link #snapshot(WorkflowRun)} always return {@code null}, forcing callers to use the + * scanner fallback. Useful if a regression lands in the live-state path. + */ + private static boolean disabled() { + return !SystemProperties.getBoolean(LiveGraphRegistry.class.getName() + ".enabled", true); + } + + LiveGraphState getOrCreate(FlowExecution execution) { + if (disabled()) { + return null; + } + String key = keyFor(execution); + if (key == null) { + return null; + } + return states.get(key, LiveGraphState::new); + } + + /** + * Returns a snapshot of the live state for this run, or {@code null} if none exists + * (feature disabled, state never populated, state poisoned). Callers must treat + * {@code null} as "fall back to the scanner path." + */ + public LiveGraphSnapshot snapshot(WorkflowRun run) { + if (disabled()) { + return null; + } + LiveGraphState state = states.getIfPresent(run.getExternalizableId()); + return state == null ? null : state.snapshot(); + } + + void remove(FlowExecution execution) { + String key = keyFor(execution); + if (key != null) { + states.invalidate(key); + } + } + + private static String keyFor(FlowExecution execution) { + try { + Object exec = execution.getOwner().getExecutable(); + if (exec instanceof WorkflowRun run) { + return run.getExternalizableId(); + } + return null; + } catch (Exception e) { + return null; + } + } +} diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphSnapshot.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphSnapshot.java new file mode 100644 index 000000000..608b4eb07 --- /dev/null +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphSnapshot.java @@ -0,0 +1,11 @@ +package io.jenkins.plugins.pipelinegraphview.livestate; + +import java.util.List; +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +/** + * Immutable projection of a {@link LiveGraphState} at a point in time. + * Held briefly outside the state's monitor so HTTP callers can construct DTOs without + * blocking the CPS VM thread that's feeding the live state. + */ +public record LiveGraphSnapshot(List nodes, List workspaceNodes) {} diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java new file mode 100644 index 000000000..ab4508e28 --- /dev/null +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java @@ -0,0 +1,67 @@ +package io.jenkins.plugins.pipelinegraphview.livestate; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import org.jenkinsci.plugins.workflow.actions.WorkspaceAction; +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +/** + * Per-run mutable state built up by {@link LiveGraphPopulator} as {@code GraphListener} + * events arrive. Reads and writes are serialised on the instance monitor; holders should + * snapshot and release quickly — the writer is the CPS VM thread and must not block. + * + *

This is a Phase 1 state: it records the raw node list plus the subset that carry a + * {@link WorkspaceAction}. Snapshotting returns immutable copies of these so the downstream + * relationship-finder / graph-builder path runs without the monitor. + */ +final class LiveGraphState { + + private final String runId; + private final List nodes = new ArrayList<>(); + private final Set seenIds = new HashSet<>(); + private final List workspaceNodes = new ArrayList<>(); + private volatile boolean poisoned = false; + + LiveGraphState(String runId) { + this.runId = runId; + } + + String getRunId() { + return runId; + } + + synchronized void addNode(FlowNode node) { + if (!seenIds.add(node.getId())) { + return; + } + nodes.add(node); + if (node.getAction(WorkspaceAction.class) != null) { + workspaceNodes.add(node); + } + } + + synchronized boolean hasSeen(String nodeId) { + return seenIds.contains(nodeId); + } + + synchronized int size() { + return nodes.size(); + } + + synchronized LiveGraphSnapshot snapshot() { + if (poisoned) { + return null; + } + return new LiveGraphSnapshot(List.copyOf(nodes), List.copyOf(workspaceNodes)); + } + + void poison() { + poisoned = true; + } + + boolean isPoisoned() { + return poisoned; + } +} diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeGraphAdapter.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeGraphAdapter.java index 88dfae3a0..9b2c1c938 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeGraphAdapter.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeGraphAdapter.java @@ -4,11 +4,13 @@ import io.jenkins.plugins.pipelinegraphview.utils.PipelineGraphBuilderApi; import io.jenkins.plugins.pipelinegraphview.utils.PipelineStepBuilderApi; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import org.jenkinsci.plugins.workflow.graph.FlowNode; import org.jenkinsci.plugins.workflow.job.WorkflowRun; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -34,6 +36,14 @@ public PipelineNodeGraphAdapter(WorkflowRun run) { treeScanner = new PipelineNodeTreeScanner(run); } + /** + * Builds the adapter over a pre-collected node set rather than walking the execution + * graph. Used by the live-state path. + */ + public PipelineNodeGraphAdapter(WorkflowRun run, Collection preCollectedNodes) { + treeScanner = new PipelineNodeTreeScanner(run, preCollectedNodes); + } + private final Object pipelineLock = new Object(); private final Object stepLock = new Object(); private final Object remapLock = new Object(); diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java index fb08fb94a..b1df5cc4b 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java @@ -59,6 +59,18 @@ public PipelineNodeTreeScanner(@NonNull WorkflowRun run) { this.build(); } + /** + * Alternate constructor that uses a caller-supplied node collection instead of walking + * the execution graph with a {@link DepthFirstScanner}. Intended for use by the + * live-state path, which has already observed every node via {@code GraphListener}. + */ + public PipelineNodeTreeScanner(@NonNull WorkflowRun run, @NonNull Collection nodes) { + this.run = run; + this.execution = run.getExecution(); + this.declarative = run.getAction(ExecutionModelAction.class) != null; + this.buildFrom(nodes); + } + /** * Builds the flow node graph. */ @@ -67,22 +79,7 @@ public void build() { logger.debug("Building graph"); } if (execution != null) { - Collection nodes = getAllNodes(); - NodeRelationshipFinder finder = new NodeRelationshipFinder(); - Map relationships = finder.getNodeRelationships(nodes); - GraphBuilder builder = new GraphBuilder(nodes, relationships, this.run, this.execution); - if (isDebugEnabled) { - logger.debug("Original nodes:"); - logger.debug("{}", builder.getNodes()); - } - this.stageNodeMap = builder.getStageMapping(); - this.stepNodeMap = builder.getStepMapping(); - List remappedNodes = new ArrayList<>(this.stageNodeMap.values()); - remappedNodes.addAll(this.stepNodeMap.values()); - if (isDebugEnabled) { - logger.debug("Remapped nodes:"); - logger.debug("{}", remappedNodes); - } + buildFrom(getAllNodes()); } else { this.stageNodeMap = new LinkedHashMap<>(); this.stepNodeMap = new LinkedHashMap<>(); @@ -92,6 +89,29 @@ public void build() { } } + private void buildFrom(Collection nodes) { + if (execution == null || nodes.isEmpty()) { + this.stageNodeMap = new LinkedHashMap<>(); + this.stepNodeMap = new LinkedHashMap<>(); + return; + } + NodeRelationshipFinder finder = new NodeRelationshipFinder(); + Map relationships = finder.getNodeRelationships(nodes); + GraphBuilder builder = new GraphBuilder(nodes, relationships, this.run, this.execution); + if (isDebugEnabled) { + logger.debug("Original nodes:"); + logger.debug("{}", builder.getNodes()); + } + this.stageNodeMap = builder.getStageMapping(); + this.stepNodeMap = builder.getStepMapping(); + if (isDebugEnabled) { + List remappedNodes = new ArrayList<>(this.stageNodeMap.values()); + remappedNodes.addAll(this.stepNodeMap.values()); + logger.debug("Remapped nodes:"); + logger.debug("{}", remappedNodes); + } + } + /** * Gets all the nodes that are reachable in the graph. */ diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java index 7c9103aa5..8c6cf0de1 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java @@ -2,6 +2,10 @@ import static java.util.Collections.emptyList; +import edu.umd.cs.findbugs.annotations.CheckForNull; +import io.jenkins.plugins.pipelinegraphview.livestate.LiveGraphRegistry; +import io.jenkins.plugins.pipelinegraphview.livestate.LiveGraphSnapshot; +import io.jenkins.plugins.pipelinegraphview.treescanner.PipelineNodeGraphAdapter; import java.io.IOException; import java.util.*; import java.util.function.Function; @@ -19,6 +23,14 @@ public class PipelineGraphApi { private static final Logger logger = LoggerFactory.getLogger(PipelineGraphApi.class); private final transient WorkflowRun run; + /** + * Non-null when the live-state path supplied a pre-collected list of nodes carrying a + * {@link WorkspaceAction}. Avoids the per-stage {@code DepthFirstScanner} walk inside + * {@link #getStageNode(FlowNodeWrapper)}. + */ + @CheckForNull + private transient List workspaceNodesOverride; + public PipelineGraphApi(WorkflowRun run) { this.run = run; } @@ -113,12 +125,13 @@ private PipelineGraph createTree(PipelineGraphBuilderApi builder) { return new PipelineGraph(stageResults, complete); } - private static String getStageNode(FlowNodeWrapper flowNodeWrapper) { + private String getStageNode(FlowNodeWrapper flowNodeWrapper) { FlowNode flowNode = flowNodeWrapper.getNode(); - DepthFirstScanner scan = new DepthFirstScanner(); logger.debug("Checking node {}", flowNode); FlowExecution execution = flowNode.getExecution(); - for (FlowNode n : scan.allNodes(execution)) { + Iterable candidates = + workspaceNodesOverride != null ? workspaceNodesOverride : new DepthFirstScanner().allNodes(execution); + for (FlowNode n : candidates) { WorkspaceAction ws = n.getAction(WorkspaceAction.class); if (ws != null) { logger.debug("Found workspace node: {}", n); @@ -161,6 +174,15 @@ public PipelineGraph createTree() { } PipelineGraph computeTree() { + LiveGraphSnapshot snapshot = LiveGraphRegistry.get().snapshot(run); + if (snapshot != null) { + try { + workspaceNodesOverride = snapshot.workspaceNodes(); + return createTree(new PipelineNodeGraphAdapter(run, snapshot.nodes())); + } finally { + workspaceNodesOverride = null; + } + } return createTree(CachedPipelineNodeGraphAdaptor.instance.getFor(run)); } } diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java index 69d9f8f60..02738125c 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java @@ -1,5 +1,8 @@ package io.jenkins.plugins.pipelinegraphview.utils; +import io.jenkins.plugins.pipelinegraphview.livestate.LiveGraphRegistry; +import io.jenkins.plugins.pipelinegraphview.livestate.LiveGraphSnapshot; +import io.jenkins.plugins.pipelinegraphview.treescanner.PipelineNodeGraphAdapter; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -121,6 +124,10 @@ public PipelineStepList getAllSteps() { PipelineStepList computeAllSteps() { // Look up the completed state before computing steps. boolean runIsComplete = !run.isBuilding(); + LiveGraphSnapshot snapshot = LiveGraphRegistry.get().snapshot(run); + if (snapshot != null) { + return getAllSteps(new PipelineNodeGraphAdapter(run, snapshot.nodes()), runIsComplete); + } return getAllSteps(CachedPipelineNodeGraphAdaptor.instance.getFor(run), runIsComplete); } } diff --git a/src/test/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycleTest.java b/src/test/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycleTest.java new file mode 100644 index 000000000..ebea1087b --- /dev/null +++ b/src/test/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycleTest.java @@ -0,0 +1,83 @@ +package io.jenkins.plugins.pipelinegraphview.livestate; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +import hudson.model.Result; +import io.jenkins.plugins.pipelinegraphview.utils.PipelineGraph; +import io.jenkins.plugins.pipelinegraphview.utils.PipelineGraphApi; +import io.jenkins.plugins.pipelinegraphview.utils.TestUtils; +import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition; +import org.jenkinsci.plugins.workflow.job.WorkflowJob; +import org.jenkinsci.plugins.workflow.job.WorkflowRun; +import org.jenkinsci.plugins.workflow.test.steps.SemaphoreStep; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.jvnet.hudson.test.JenkinsRule; +import org.jvnet.hudson.test.junit.jupiter.WithJenkins; + +@WithJenkins +class LiveGraphLifecycleTest { + + private JenkinsRule j; + + @BeforeEach + void setUp(JenkinsRule j) { + this.j = j; + } + + @Test + void liveStatePopulatesDuringBuildAndEvictsOnCompletion() throws Exception { + WorkflowJob job = j.createProject(WorkflowJob.class, "live"); + job.setDefinition(new CpsFlowDefinition( + "stage('one') { echo 'hello' }\n" + + "stage('gate') { semaphore 'wait' }\n" + + "stage('two') { echo 'done' }\n", + true)); + WorkflowRun run = job.scheduleBuild2(0).waitForStart(); + try { + SemaphoreStep.waitForStart("wait/1", run); + + LiveGraphSnapshot midSnapshot = LiveGraphRegistry.get().snapshot(run); + assertThat("live snapshot is present while run is building", midSnapshot, is(notNullValue())); + assertThat( + "live snapshot contains nodes observed so far", + midSnapshot.nodes().size(), + is(greaterThan(0))); + + // The snapshot path should produce a correct in-progress tree. + PipelineGraph midGraph = new PipelineGraphApi(run).createTree(); + assertThat(midGraph, is(notNullValue())); + } finally { + SemaphoreStep.success("wait/1", null); + j.waitForCompletion(run); + } + j.assertBuildStatus(Result.SUCCESS, run); + + // onCompleted evicts the live state; subsequent reads fall through to the scanner + // path (which, combined with the #885 disk cache, handles completed runs). + assertThat( + "live snapshot is evicted once the run completes", + LiveGraphRegistry.get().snapshot(run), + is(nullValue())); + } + + @Test + void featureFlagDisabledReturnsEmptySnapshot() throws Exception { + System.setProperty("pipelinegraphview.livestate.enabled", "false"); + try { + WorkflowRun run = TestUtils.createAndRunJob(j, "flag-off", "smokeTest.jenkinsfile", Result.FAILURE); + assertThat(LiveGraphRegistry.get().snapshot(run), is(nullValue())); + + // The scanner fallback still produces a usable graph. + PipelineGraph graph = new PipelineGraphApi(run).createTree(); + assertThat(graph, is(notNullValue())); + assertThat(graph.complete, is(true)); + } finally { + System.clearProperty("pipelinegraphview.livestate.enabled"); + } + } +} From ecf93cf4aeb123af5c44d76d398a731a894ff0a1 Mon Sep 17 00:00:00 2001 From: Tim Jacomb Date: Wed, 22 Apr 2026 11:22:49 +0100 Subject: [PATCH 02/26] Allow bigger for busier instances --- .../pipelinegraphview/livestate/LiveGraphRegistry.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java index 08c17c3d0..755ff207b 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java @@ -21,8 +21,10 @@ public static LiveGraphRegistry get() { return INSTANCE; } + private static final int CACHE_MAX_SIZE = SystemProperties.getInteger(LiveGraphRegistry.class.getName() + ".size", 512); + private final Cache states = Caffeine.newBuilder() - .maximumSize(256) + .maximumSize(CACHE_MAX_SIZE) .expireAfterAccess(Duration.ofMinutes(30)) .build(); From d47ee6b308981af570b603ac3b440b2bcfeb48e2 Mon Sep 17 00:00:00 2001 From: Tim Jacomb Date: Wed, 22 Apr 2026 13:10:12 +0100 Subject: [PATCH 03/26] Fix static init order so CACHE_MAX_SIZE is set before INSTANCE --- .../pipelinegraphview/livestate/LiveGraphRegistry.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java index 755ff207b..8148902d6 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java @@ -15,14 +15,19 @@ */ public final class LiveGraphRegistry { + // IMPORTANT: keep CACHE_MAX_SIZE declared BEFORE INSTANCE. Static fields initialise in + // source order, and the instance's Caffeine builder reads CACHE_MAX_SIZE — if INSTANCE + // were declared first, CACHE_MAX_SIZE would still be 0 during construction and Caffeine + // would evict every entry immediately (maximumSize(0) means "no entries allowed"). + private static final int CACHE_MAX_SIZE = + SystemProperties.getInteger(LiveGraphRegistry.class.getName() + ".size", 512); + private static final LiveGraphRegistry INSTANCE = new LiveGraphRegistry(); public static LiveGraphRegistry get() { return INSTANCE; } - private static final int CACHE_MAX_SIZE = SystemProperties.getInteger(LiveGraphRegistry.class.getName() + ".size", 512); - private final Cache states = Caffeine.newBuilder() .maximumSize(CACHE_MAX_SIZE) .expireAfterAccess(Duration.ofMinutes(30)) From 7e410c8eadb724f1d082d44e0b0367bd6fb553fb Mon Sep 17 00:00:00 2001 From: Tim Jacomb Date: Wed, 22 Apr 2026 13:46:00 +0100 Subject: [PATCH 04/26] Phase 2 --- .../livestate/LiveGraphRegistry.java | 46 +++++++++- .../livestate/LiveGraphSnapshot.java | 6 +- .../livestate/LiveGraphState.java | 62 +++++++++---- .../utils/PipelineGraphApi.java | 8 +- .../utils/PipelineStepApi.java | 8 +- .../livestate/LiveGraphLifecycleTest.java | 86 ++++++++++++++++++- 6 files changed, 195 insertions(+), 21 deletions(-) diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java index 8148902d6..89dd59359 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java @@ -2,6 +2,8 @@ import com.github.benmanes.caffeine.cache.Cache; import com.github.benmanes.caffeine.cache.Caffeine; +import io.jenkins.plugins.pipelinegraphview.utils.PipelineGraph; +import io.jenkins.plugins.pipelinegraphview.utils.PipelineStepList; import java.time.Duration; import jenkins.util.SystemProperties; import org.jenkinsci.plugins.workflow.flow.FlowExecution; @@ -52,7 +54,7 @@ LiveGraphState getOrCreate(FlowExecution execution) { if (key == null) { return null; } - return states.get(key, LiveGraphState::new); + return states.get(key, k -> new LiveGraphState()); } /** @@ -75,6 +77,48 @@ void remove(FlowExecution execution) { } } + /** + * Returns a previously-cached {@link PipelineGraph} for this run if it was computed at + * or after {@code minVersion}, otherwise {@code null}. Use {@link LiveGraphSnapshot#version()} + * as the argument — cache entries older than the caller's snapshot are rejected. + */ + public PipelineGraph cachedGraph(WorkflowRun run, long minVersion) { + if (disabled()) { + return null; + } + LiveGraphState state = states.getIfPresent(run.getExternalizableId()); + return state == null ? null : state.cachedGraph(minVersion); + } + + /** Stores a {@link PipelineGraph} computed from a snapshot at {@code version}. */ + public void cacheGraph(WorkflowRun run, long version, PipelineGraph graph) { + if (disabled()) { + return; + } + LiveGraphState state = states.getIfPresent(run.getExternalizableId()); + if (state != null) { + state.cacheGraph(version, graph); + } + } + + public PipelineStepList cachedAllSteps(WorkflowRun run, long minVersion) { + if (disabled()) { + return null; + } + LiveGraphState state = states.getIfPresent(run.getExternalizableId()); + return state == null ? null : state.cachedAllSteps(minVersion); + } + + public void cacheAllSteps(WorkflowRun run, long version, PipelineStepList steps) { + if (disabled()) { + return; + } + LiveGraphState state = states.getIfPresent(run.getExternalizableId()); + if (state != null) { + state.cacheAllSteps(version, steps); + } + } + private static String keyFor(FlowExecution execution) { try { Object exec = execution.getOwner().getExecutable(); diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphSnapshot.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphSnapshot.java index 608b4eb07..e6e4dca03 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphSnapshot.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphSnapshot.java @@ -7,5 +7,9 @@ * Immutable projection of a {@link LiveGraphState} at a point in time. * Held briefly outside the state's monitor so HTTP callers can construct DTOs without * blocking the CPS VM thread that's feeding the live state. + * + *

{@code version} is a monotonically-increasing counter that bumps on every new flow + * node. It lets HTTP callers key the output cache so repeat polls between node arrivals + * return the cached {@code PipelineGraph} / {@code PipelineStepList} without rebuilding. */ -public record LiveGraphSnapshot(List nodes, List workspaceNodes) {} +public record LiveGraphSnapshot(List nodes, List workspaceNodes, long version) {} diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java index ab4508e28..1936ddb64 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java @@ -1,5 +1,7 @@ package io.jenkins.plugins.pipelinegraphview.livestate; +import io.jenkins.plugins.pipelinegraphview.utils.PipelineGraph; +import io.jenkins.plugins.pipelinegraphview.utils.PipelineStepList; import java.util.ArrayList; import java.util.HashSet; import java.util.List; @@ -12,25 +14,27 @@ * events arrive. Reads and writes are serialised on the instance monitor; holders should * snapshot and release quickly — the writer is the CPS VM thread and must not block. * - *

This is a Phase 1 state: it records the raw node list plus the subset that carry a - * {@link WorkspaceAction}. Snapshotting returns immutable copies of these so the downstream - * relationship-finder / graph-builder path runs without the monitor. + *

Two things live here: + *

*/ final class LiveGraphState { - private final String runId; private final List nodes = new ArrayList<>(); private final Set seenIds = new HashSet<>(); private final List workspaceNodes = new ArrayList<>(); + private long version = 0; private volatile boolean poisoned = false; - LiveGraphState(String runId) { - this.runId = runId; - } - - String getRunId() { - return runId; - } + // Output cache. Volatile reference to an immutable (version, value) tuple so readers + // and writers never see a torn pair. + private volatile VersionedCache cachedGraph; + private volatile VersionedCache cachedAllSteps; synchronized void addNode(FlowNode node) { if (!seenIds.add(node.getId())) { @@ -40,6 +44,7 @@ synchronized void addNode(FlowNode node) { if (node.getAction(WorkspaceAction.class) != null) { workspaceNodes.add(node); } + version++; } synchronized boolean hasSeen(String nodeId) { @@ -54,14 +59,41 @@ synchronized LiveGraphSnapshot snapshot() { if (poisoned) { return null; } - return new LiveGraphSnapshot(List.copyOf(nodes), List.copyOf(workspaceNodes)); + return new LiveGraphSnapshot(List.copyOf(nodes), List.copyOf(workspaceNodes), version); + } + + /** + * Returns the cached graph if it was computed at or after {@code minVersion}. Returning + * a newer cache than requested is intentional — the caller's snapshot can only become + * staler, so a newer output is strictly more accurate. + */ + PipelineGraph cachedGraph(long minVersion) { + VersionedCache cached = cachedGraph; + return (cached != null && cached.version >= minVersion) ? cached.value : null; + } + + void cacheGraph(long version, PipelineGraph graph) { + VersionedCache current = cachedGraph; + if (current == null || current.version < version) { + cachedGraph = new VersionedCache<>(version, graph); + } + } + + PipelineStepList cachedAllSteps(long minVersion) { + VersionedCache cached = cachedAllSteps; + return (cached != null && cached.version >= minVersion) ? cached.value : null; + } + + void cacheAllSteps(long version, PipelineStepList steps) { + VersionedCache current = cachedAllSteps; + if (current == null || current.version < version) { + cachedAllSteps = new VersionedCache<>(version, steps); + } } void poison() { poisoned = true; } - boolean isPoisoned() { - return poisoned; - } + private record VersionedCache(long version, T value) {} } diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java index 8c6cf0de1..5aa1ad716 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java @@ -176,9 +176,15 @@ public PipelineGraph createTree() { PipelineGraph computeTree() { LiveGraphSnapshot snapshot = LiveGraphRegistry.get().snapshot(run); if (snapshot != null) { + PipelineGraph cached = LiveGraphRegistry.get().cachedGraph(run, snapshot.version()); + if (cached != null) { + return cached; + } try { workspaceNodesOverride = snapshot.workspaceNodes(); - return createTree(new PipelineNodeGraphAdapter(run, snapshot.nodes())); + PipelineGraph computed = createTree(new PipelineNodeGraphAdapter(run, snapshot.nodes())); + LiveGraphRegistry.get().cacheGraph(run, snapshot.version(), computed); + return computed; } finally { workspaceNodesOverride = null; } diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java index 02738125c..e90e284ef 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java @@ -126,7 +126,13 @@ PipelineStepList computeAllSteps() { boolean runIsComplete = !run.isBuilding(); LiveGraphSnapshot snapshot = LiveGraphRegistry.get().snapshot(run); if (snapshot != null) { - return getAllSteps(new PipelineNodeGraphAdapter(run, snapshot.nodes()), runIsComplete); + PipelineStepList cached = LiveGraphRegistry.get().cachedAllSteps(run, snapshot.version()); + if (cached != null) { + return cached; + } + PipelineStepList computed = getAllSteps(new PipelineNodeGraphAdapter(run, snapshot.nodes()), runIsComplete); + LiveGraphRegistry.get().cacheAllSteps(run, snapshot.version(), computed); + return computed; } return getAllSteps(CachedPipelineNodeGraphAdaptor.instance.getFor(run), runIsComplete); } diff --git a/src/test/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycleTest.java b/src/test/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycleTest.java index ebea1087b..482e90b6b 100644 --- a/src/test/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycleTest.java +++ b/src/test/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycleTest.java @@ -3,12 +3,16 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; import hudson.model.Result; import io.jenkins.plugins.pipelinegraphview.utils.PipelineGraph; import io.jenkins.plugins.pipelinegraphview.utils.PipelineGraphApi; +import io.jenkins.plugins.pipelinegraphview.utils.PipelineStepApi; +import io.jenkins.plugins.pipelinegraphview.utils.PipelineStepList; import io.jenkins.plugins.pipelinegraphview.utils.TestUtils; import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition; import org.jenkinsci.plugins.workflow.job.WorkflowJob; @@ -22,6 +26,8 @@ @WithJenkins class LiveGraphLifecycleTest { + private static final String ENABLED_PROPERTY = LiveGraphRegistry.class.getName() + ".enabled"; + private JenkinsRule j; @BeforeEach @@ -47,6 +53,7 @@ void liveStatePopulatesDuringBuildAndEvictsOnCompletion() throws Exception { "live snapshot contains nodes observed so far", midSnapshot.nodes().size(), is(greaterThan(0))); + assertThat("snapshot exposes the state's version counter", midSnapshot.version(), is(greaterThan(0L))); // The snapshot path should produce a correct in-progress tree. PipelineGraph midGraph = new PipelineGraphApi(run).createTree(); @@ -67,7 +74,7 @@ void liveStatePopulatesDuringBuildAndEvictsOnCompletion() throws Exception { @Test void featureFlagDisabledReturnsEmptySnapshot() throws Exception { - System.setProperty("pipelinegraphview.livestate.enabled", "false"); + System.setProperty(ENABLED_PROPERTY, "false"); try { WorkflowRun run = TestUtils.createAndRunJob(j, "flag-off", "smokeTest.jenkinsfile", Result.FAILURE); assertThat(LiveGraphRegistry.get().snapshot(run), is(nullValue())); @@ -77,7 +84,82 @@ void featureFlagDisabledReturnsEmptySnapshot() throws Exception { assertThat(graph, is(notNullValue())); assertThat(graph.complete, is(true)); } finally { - System.clearProperty("pipelinegraphview.livestate.enabled"); + System.clearProperty(ENABLED_PROPERTY); + } + } + + @Test + void repeatCallsReturnCachedDtoWhenNoNewNodes() throws Exception { + WorkflowJob job = j.createProject(WorkflowJob.class, "cache-hit"); + job.setDefinition(new CpsFlowDefinition( + "stage('one') { echo 'hello' }\n" + + "stage('gate') { semaphore 'wait' }\n" + + "stage('two') { echo 'done' }\n", + true)); + WorkflowRun run = job.scheduleBuild2(0).waitForStart(); + try { + SemaphoreStep.waitForStart("wait/1", run); + + PipelineGraphApi graphApi = new PipelineGraphApi(run); + PipelineStepApi stepApi = new PipelineStepApi(run); + + PipelineGraph firstGraph = graphApi.createTree(); + PipelineGraph secondGraph = graphApi.createTree(); + PipelineStepList firstSteps = stepApi.getAllSteps(); + PipelineStepList secondSteps = stepApi.getAllSteps(); + + // Same underlying state version → cache hit → identical instance. + assertThat("repeat createTree returns cached instance", secondGraph, is(sameInstance(firstGraph))); + assertThat("repeat getAllSteps returns cached instance", secondSteps, is(sameInstance(firstSteps))); + } finally { + SemaphoreStep.success("wait/1", null); + j.waitForCompletion(run); + } + } + + @Test + void newNodeInvalidatesOutputCache() throws Exception { + WorkflowJob job = j.createProject(WorkflowJob.class, "cache-miss"); + job.setDefinition(new CpsFlowDefinition( + "stage('a') { semaphore 'gateA' }\n" + "stage('b') { semaphore 'gateB' }\n", true)); + WorkflowRun run = job.scheduleBuild2(0).waitForStart(); + try { + SemaphoreStep.waitForStart("gateA/1", run); + PipelineGraphApi api = new PipelineGraphApi(run); + + PipelineGraph beforeAdvance = api.createTree(); + LiveGraphSnapshot snapshotBefore = LiveGraphRegistry.get().snapshot(run); + assertThat("snapshot present before advance", snapshotBefore, is(notNullValue())); + long versionBefore = snapshotBefore.version(); + + // Advance the pipeline — new flow nodes are added which bump the state's version + // and should invalidate the cached DTO. + SemaphoreStep.success("gateA/1", null); + SemaphoreStep.waitForStart("gateB/1", run); + + LiveGraphSnapshot snapshotAfter = LiveGraphRegistry.get().snapshot(run); + assertThat("snapshot present after advance", snapshotAfter, is(notNullValue())); + assertThat("new nodes bumped the version", snapshotAfter.version(), is(greaterThan(versionBefore))); + + PipelineGraph afterAdvance = api.createTree(); + assertThat( + "cache was invalidated; a fresh graph was computed", + afterAdvance, + is(not(sameInstance(beforeAdvance)))); + } finally { + // Release both gates so the build can always complete even if an assertion + // threw before we reached the second semaphore. + try { + SemaphoreStep.success("gateA/1", null); + } catch (Exception ignored) { + // Already released or never reached — fine. + } + try { + SemaphoreStep.success("gateB/1", null); + } catch (Exception ignored) { + // Ditto. + } + j.waitForCompletion(run); } } } From 681d36f9a40748b257068affad99f91e07abbc08 Mon Sep 17 00:00:00 2001 From: Tim Jacomb Date: Wed, 22 Apr 2026 21:35:29 +0100 Subject: [PATCH 05/26] Cleanup log messages --- .../pipelinegraphview/livestate/LiveGraphLifecycle.java | 6 +++--- .../pipelinegraphview/livestate/LiveGraphPopulator.java | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java index 314622147..02de9121c 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java @@ -23,7 +23,7 @@ public void onRunning(@NonNull FlowExecution execution) { try { LiveGraphRegistry.get().getOrCreate(execution); } catch (Throwable t) { - logger.warn("pipeline-graph-view live state onRunning failed", t); + logger.warn("onRunning failed", t); } } @@ -35,7 +35,7 @@ public void onResumed(@NonNull FlowExecution execution) { LiveGraphPopulator.catchUp(execution, state); } } catch (Throwable t) { - logger.warn("pipeline-graph-view live state onResumed failed", t); + logger.warn("onResumed failed", t); } } @@ -44,7 +44,7 @@ public void onCompleted(@NonNull FlowExecution execution) { try { LiveGraphRegistry.get().remove(execution); } catch (Throwable t) { - logger.warn("pipeline-graph-view live state onCompleted failed", t); + logger.warn("onCompleted failed", t); } } } diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphPopulator.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphPopulator.java index 08c81b56d..1895deea2 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphPopulator.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphPopulator.java @@ -47,7 +47,7 @@ public void onNewHead(FlowNode node) { // A thrown exception here propagates into the CPS VM and can abort the build. // Poison the state so subsequent reads fall back to the scanner; log the failure // but never rethrow. - logger.warn("pipeline-graph-view live state failed; falling back to scanner", t); + logger.warn("live state failed; falling back to scanner", t); if (state != null) { state.poison(); } @@ -62,7 +62,7 @@ static void catchUp(FlowExecution execution, LiveGraphState state) { state.addNode(existing); } } catch (Throwable t) { - logger.warn("pipeline-graph-view live state catch-up failed; poisoning", t); + logger.warn("catch-up failed; poisoning state", t); state.poison(); } } From b8464cd3e4d4c0824f9eded11ca0c954a852b9f4 Mon Sep 17 00:00:00 2001 From: Tim Jacomb Date: Wed, 22 Apr 2026 21:45:30 +0100 Subject: [PATCH 06/26] Persist to disk the result --- .../livestate/LiveGraphLifecycle.java | 45 ++++++++++++++++--- .../livestate/LiveGraphState.java | 21 ++++++--- .../utils/PipelineGraphApi.java | 2 +- .../utils/PipelineGraphViewCache.java | 18 ++++++++ .../utils/PipelineStepApi.java | 2 +- .../livestate/LiveGraphLifecycleTest.java | 8 +++- .../utils/PipelineGraphViewCacheTest.java | 6 ++- 7 files changed, 85 insertions(+), 17 deletions(-) diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java index 02de9121c..626c5015e 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java @@ -2,16 +2,22 @@ import edu.umd.cs.findbugs.annotations.NonNull; import hudson.Extension; +import io.jenkins.plugins.pipelinegraphview.utils.PipelineGraph; +import io.jenkins.plugins.pipelinegraphview.utils.PipelineGraphApi; +import io.jenkins.plugins.pipelinegraphview.utils.PipelineGraphViewCache; +import io.jenkins.plugins.pipelinegraphview.utils.PipelineStepApi; +import io.jenkins.plugins.pipelinegraphview.utils.PipelineStepList; import org.jenkinsci.plugins.workflow.flow.FlowExecution; import org.jenkinsci.plugins.workflow.flow.FlowExecutionListener; +import org.jenkinsci.plugins.workflow.job.WorkflowRun; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Creates {@link LiveGraphState} entries at execution start / resume and evicts them on - * completion. Without this, entries are still created lazily by {@link LiveGraphPopulator} - * on first {@code onNewHead}, but {@code onResumed} guarantees the catch-up scan happens - * once up-front rather than at the first event after restart. + * Creates {@link LiveGraphState} entries at execution start / resume and handles the + * handoff to the on-disk cache at completion. Without the completion handoff, the first + * HTTP read after the build finishes would fall through to a full scanner sweep — wasting + * the work the live-state path already did. */ @Extension public class LiveGraphLifecycle extends FlowExecutionListener { @@ -42,9 +48,36 @@ public void onResumed(@NonNull FlowExecution execution) { @Override public void onCompleted(@NonNull FlowExecution execution) { try { - LiveGraphRegistry.get().remove(execution); + WorkflowRun run = workflowRunFor(execution); + if (run != null) { + // Compute one last time while the live state is still in the registry; this + // path hits the snapshot/cache rather than the scanner. + PipelineGraph graph = new PipelineGraphApi(run).computeTree(); + PipelineStepList allSteps = new PipelineStepApi(run).computeAllSteps(); + // WorkflowRun.isBuilding() can still be true here even though FlowExecution + // is complete; rebuild the step list with runIsComplete=true so the persisted + // copy reflects reality. PipelineGraph.complete comes from + // FlowExecution.isComplete() and is already correct. + PipelineStepList finalSteps = new PipelineStepList(allSteps.steps, true); + PipelineGraphViewCache.get().seed(run, graph, finalSteps); + } } catch (Throwable t) { - logger.warn("onCompleted failed", t); + logger.warn("seeding disk cache on completion failed", t); + } finally { + try { + LiveGraphRegistry.get().remove(execution); + } catch (Throwable t) { + logger.warn("state eviction on completion failed", t); + } + } + } + + private static WorkflowRun workflowRunFor(FlowExecution execution) { + try { + Object exec = execution.getOwner().getExecutable(); + return exec instanceof WorkflowRun r ? r : null; + } catch (Exception e) { + return null; } } } diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java index 1936ddb64..0f303a06b 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java @@ -16,8 +16,11 @@ * *

Two things live here: *

    - *
  • The raw {@link FlowNode} list (plus the {@link WorkspaceAction}-carrying subset) - * that the downstream relationship-finder / graph-builder path consumes.
  • + *
  • The raw {@link FlowNode} list that the downstream relationship-finder / + * graph-builder path consumes. The {@link WorkspaceAction}-carrying subset used by + * agent detection is derived at snapshot time by iterating this list, not at + * add-time — {@code WorkspaceAction} can be attached to a node after {@code onNewHead} + * fires.
  • *
  • A small cache of the last-computed {@link PipelineGraph} / {@link PipelineStepList} * keyed by a monotonic version counter that bumps on every {@link #addNode}. HTTP * polls that hit between node arrivals return the cached DTO verbatim — no rebuild.
  • @@ -27,7 +30,6 @@ final class LiveGraphState { private final List nodes = new ArrayList<>(); private final Set seenIds = new HashSet<>(); - private final List workspaceNodes = new ArrayList<>(); private long version = 0; private volatile boolean poisoned = false; @@ -41,9 +43,6 @@ synchronized void addNode(FlowNode node) { return; } nodes.add(node); - if (node.getAction(WorkspaceAction.class) != null) { - workspaceNodes.add(node); - } version++; } @@ -59,6 +58,16 @@ synchronized LiveGraphSnapshot snapshot() { if (poisoned) { return null; } + // Filter for WorkspaceAction at snapshot time rather than at addNode time: + // WorkspaceAction is attached to a block-start node when the workspace is allocated, + // which can happen AFTER onNewHead has already fired for that node. A snapshot-time + // scan always observes the latest action state on each captured FlowNode. + List workspaceNodes = new ArrayList<>(); + for (FlowNode n : nodes) { + if (n.getAction(WorkspaceAction.class) != null) { + workspaceNodes.add(n); + } + } return new LiveGraphSnapshot(List.copyOf(nodes), List.copyOf(workspaceNodes), version); } diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java index 5aa1ad716..c37d549ae 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java @@ -173,7 +173,7 @@ public PipelineGraph createTree() { return PipelineGraphViewCache.get().getGraph(run, this::computeTree); } - PipelineGraph computeTree() { + public PipelineGraph computeTree() { LiveGraphSnapshot snapshot = LiveGraphRegistry.get().snapshot(run); if (snapshot != null) { PipelineGraph cached = LiveGraphRegistry.get().cachedGraph(run, snapshot.version()); diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphViewCache.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphViewCache.java index bf1ccd444..c1ac37ef2 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphViewCache.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphViewCache.java @@ -69,6 +69,24 @@ public PipelineStepList getAllSteps(WorkflowRun run, Supplier } } + /** + * Writes a final graph and step list directly to disk, bypassing the {@code isBuilding} + * guard used by {@link #getGraph} / {@link #getAllSteps}. Intended for use at + * {@code FlowExecutionListener.onCompleted}, where the execution is known complete but + * {@code WorkflowRun.isBuilding()} may not have flipped yet. Calling this avoids wasting + * the work already done by the live-state path: without it, the first read after + * completion falls through to a full scanner sweep. + */ + public void seed(WorkflowRun run, PipelineGraph graph, PipelineStepList allSteps) { + CachedPayload payload = load(run); + synchronized (payload) { + payload.graph = graph; + payload.allSteps = allSteps; + payload.schemaVersion = SCHEMA_VERSION; + write(run, payload); + } + } + private CachedPayload load(WorkflowRun run) { return memCache.get(run.getExternalizableId(), k -> readFromDisk(run)); } diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java index e90e284ef..4e06d9af5 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java @@ -121,7 +121,7 @@ public PipelineStepList getAllSteps() { return PipelineGraphViewCache.get().getAllSteps(run, this::computeAllSteps); } - PipelineStepList computeAllSteps() { + public PipelineStepList computeAllSteps() { // Look up the completed state before computing steps. boolean runIsComplete = !run.isBuilding(); LiveGraphSnapshot snapshot = LiveGraphRegistry.get().snapshot(run); diff --git a/src/test/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycleTest.java b/src/test/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycleTest.java index 482e90b6b..aea23f141 100644 --- a/src/test/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycleTest.java +++ b/src/test/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycleTest.java @@ -14,6 +14,7 @@ import io.jenkins.plugins.pipelinegraphview.utils.PipelineStepApi; import io.jenkins.plugins.pipelinegraphview.utils.PipelineStepList; import io.jenkins.plugins.pipelinegraphview.utils.TestUtils; +import java.io.File; import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition; import org.jenkinsci.plugins.workflow.job.WorkflowJob; import org.jenkinsci.plugins.workflow.job.WorkflowRun; @@ -64,12 +65,15 @@ void liveStatePopulatesDuringBuildAndEvictsOnCompletion() throws Exception { } j.assertBuildStatus(Result.SUCCESS, run); - // onCompleted evicts the live state; subsequent reads fall through to the scanner - // path (which, combined with the #885 disk cache, handles completed runs). + // onCompleted evicts the live state; subsequent reads fall through to the disk cache + // that onCompleted seeded (so the first post-completion request doesn't re-scan). assertThat( "live snapshot is evicted once the run completes", LiveGraphRegistry.get().snapshot(run), is(nullValue())); + File cacheFile = new File(run.getRootDir(), "pipeline-graph-view-cache.xml"); + assertThat("disk cache file was written at completion", cacheFile.exists(), is(true)); + assertThat("disk cache file is non-empty", cacheFile.length(), is(greaterThan(0L))); } @Test diff --git a/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphViewCacheTest.java b/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphViewCacheTest.java index 5d2e10af7..d7fd6233c 100644 --- a/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphViewCacheTest.java +++ b/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphViewCacheTest.java @@ -44,7 +44,11 @@ void setUp(JenkinsRule j) { void coldCache_computesAndWritesFile() throws Exception { WorkflowRun run = TestUtils.createAndRunJob(j, "cold", "smokeTest.jenkinsfile", Result.FAILURE); File cacheFile = new File(run.getRootDir(), CACHE_FILE_NAME); - assertThat("no cache file before first call", cacheFile.exists(), is(false)); + // LiveGraphLifecycle#onCompleted now seeds the on-disk cache at build completion, + // so the file usually exists by this point. Delete it to exercise the cold-cache + // path of getGraph without changing the rest of the test's intent. + Files.deleteIfExists(cacheFile.toPath()); + cache.invalidateMemory(); AtomicInteger computes = new AtomicInteger(); PipelineGraph graph = cache.getGraph(run, () -> { From 23e72c5ac46c89195361c115b9b4590c45262a41 Mon Sep 17 00:00:00 2001 From: Tim Jacomb Date: Wed, 22 Apr 2026 21:51:22 +0100 Subject: [PATCH 07/26] Reverse nodes order --- .../pipelinegraphview/livestate/LiveGraphState.java | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java index 0f303a06b..3f263c2a8 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java @@ -62,8 +62,14 @@ synchronized LiveGraphSnapshot snapshot() { // WorkspaceAction is attached to a block-start node when the workspace is allocated, // which can happen AFTER onNewHead has already fired for that node. A snapshot-time // scan always observes the latest action state on each captured FlowNode. + // + // The list is built newest-first (reverse insertion order) to match the iteration + // order of DepthFirstScanner (from current heads backward): PipelineGraphApi#getStageNode + // returns on the first match, and for nested agents the innermost workspace is the + // more-specific match — the one a later-created inner `node {}` block sits in. List workspaceNodes = new ArrayList<>(); - for (FlowNode n : nodes) { + for (int i = nodes.size() - 1; i >= 0; i--) { + FlowNode n = nodes.get(i); if (n.getAction(WorkspaceAction.class) != null) { workspaceNodes.add(n); } From 2cc823da5c150002409283b7af3e5d0daf3be8fc Mon Sep 17 00:00:00 2001 From: Tim Jacomb Date: Wed, 22 Apr 2026 21:58:23 +0100 Subject: [PATCH 08/26] Fix races --- .../livestate/LiveGraphState.java | 40 +++++++++++-------- .../utils/PipelineGraphApi.java | 37 +++++++---------- 2 files changed, 37 insertions(+), 40 deletions(-) diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java index 3f263c2a8..e1f81b36d 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java @@ -33,10 +33,16 @@ final class LiveGraphState { private long version = 0; private volatile boolean poisoned = false; - // Output cache. Volatile reference to an immutable (version, value) tuple so readers - // and writers never see a torn pair. - private volatile VersionedCache cachedGraph; - private volatile VersionedCache cachedAllSteps; + // Memoise the last snapshot. Poll-frequent readers at the same version skip the O(N) + // copy and workspace scan under the monitor — important because the writer (addNode) + // is the CPS VM thread and must not block for long. + private LiveGraphSnapshot lastSnapshot; + + // Output cache. Mutations synchronise on the instance monitor so concurrent writers + // cannot regress a newer version with an older one (check-then-set on a volatile + // reference alone would be racy). + private VersionedCache cachedGraph; + private VersionedCache cachedAllSteps; synchronized void addNode(FlowNode node) { if (!seenIds.add(node.getId())) { @@ -58,6 +64,9 @@ synchronized LiveGraphSnapshot snapshot() { if (poisoned) { return null; } + if (lastSnapshot != null && lastSnapshot.version() == version) { + return lastSnapshot; + } // Filter for WorkspaceAction at snapshot time rather than at addNode time: // WorkspaceAction is attached to a block-start node when the workspace is allocated, // which can happen AFTER onNewHead has already fired for that node. A snapshot-time @@ -74,7 +83,8 @@ synchronized LiveGraphSnapshot snapshot() { workspaceNodes.add(n); } } - return new LiveGraphSnapshot(List.copyOf(nodes), List.copyOf(workspaceNodes), version); + lastSnapshot = new LiveGraphSnapshot(List.copyOf(nodes), List.copyOf(workspaceNodes), version); + return lastSnapshot; } /** @@ -82,26 +92,22 @@ synchronized LiveGraphSnapshot snapshot() { * a newer cache than requested is intentional — the caller's snapshot can only become * staler, so a newer output is strictly more accurate. */ - PipelineGraph cachedGraph(long minVersion) { - VersionedCache cached = cachedGraph; - return (cached != null && cached.version >= minVersion) ? cached.value : null; + synchronized PipelineGraph cachedGraph(long minVersion) { + return (cachedGraph != null && cachedGraph.version >= minVersion) ? cachedGraph.value : null; } - void cacheGraph(long version, PipelineGraph graph) { - VersionedCache current = cachedGraph; - if (current == null || current.version < version) { + synchronized void cacheGraph(long version, PipelineGraph graph) { + if (cachedGraph == null || cachedGraph.version < version) { cachedGraph = new VersionedCache<>(version, graph); } } - PipelineStepList cachedAllSteps(long minVersion) { - VersionedCache cached = cachedAllSteps; - return (cached != null && cached.version >= minVersion) ? cached.value : null; + synchronized PipelineStepList cachedAllSteps(long minVersion) { + return (cachedAllSteps != null && cachedAllSteps.version >= minVersion) ? cachedAllSteps.value : null; } - void cacheAllSteps(long version, PipelineStepList steps) { - VersionedCache current = cachedAllSteps; - if (current == null || current.version < version) { + synchronized void cacheAllSteps(long version, PipelineStepList steps) { + if (cachedAllSteps == null || cachedAllSteps.version < version) { cachedAllSteps = new VersionedCache<>(version, steps); } } diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java index c37d549ae..e7849d5e5 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java @@ -23,19 +23,12 @@ public class PipelineGraphApi { private static final Logger logger = LoggerFactory.getLogger(PipelineGraphApi.class); private final transient WorkflowRun run; - /** - * Non-null when the live-state path supplied a pre-collected list of nodes carrying a - * {@link WorkspaceAction}. Avoids the per-stage {@code DepthFirstScanner} walk inside - * {@link #getStageNode(FlowNodeWrapper)}. - */ - @CheckForNull - private transient List workspaceNodesOverride; - public PipelineGraphApi(WorkflowRun run) { this.run = run; } - private List getPipelineNodes(PipelineGraphBuilderApi builder) { + private List getPipelineNodes( + PipelineGraphBuilderApi builder, @CheckForNull List workspaceNodes) { return builder.getPipelineNodes().stream() .map(flowNodeWrapper -> new PipelineStageInternal( flowNodeWrapper.getId(), // TODO no need to parse it BO returns a string even though the @@ -49,7 +42,7 @@ private List getPipelineNodes(PipelineGraphBuilderApi bui flowNodeWrapper.getDisplayName(), // TODO blue ocean uses timing information: "Passed in 0s" flowNodeWrapper.isSynthetic(), flowNodeWrapper.getTiming(), - getStageNode(flowNodeWrapper))) + getStageNode(flowNodeWrapper, workspaceNodes))) .collect(Collectors.toList()); } @@ -64,7 +57,7 @@ private Function mapper( }; } - private PipelineGraph createTree(PipelineGraphBuilderApi builder) { + private PipelineGraph createTree(PipelineGraphBuilderApi builder, @CheckForNull List workspaceNodes) { FlowExecution execution = run.getExecution(); if (execution == null) { // If we don't have an execution - e.g. if the Pipeline has a syntax error - @@ -78,7 +71,7 @@ private PipelineGraph createTree(PipelineGraphBuilderApi builder) { // We want to remap children here, so we don't update the parents of the // original objects - as // these are completely new representations. - List stages = getPipelineNodes(builder); + List stages = getPipelineNodes(builder, workspaceNodes); // Get InputAction once for all stages InputAction inputAction = run.getAction(InputAction.class); @@ -125,12 +118,14 @@ private PipelineGraph createTree(PipelineGraphBuilderApi builder) { return new PipelineGraph(stageResults, complete); } - private String getStageNode(FlowNodeWrapper flowNodeWrapper) { + private static String getStageNode(FlowNodeWrapper flowNodeWrapper, @CheckForNull List workspaceNodes) { FlowNode flowNode = flowNodeWrapper.getNode(); logger.debug("Checking node {}", flowNode); FlowExecution execution = flowNode.getExecution(); + // When the caller supplies a pre-filtered workspace-node list (the live-state path), + // iterate that; otherwise fall back to scanning the whole execution graph. Iterable candidates = - workspaceNodesOverride != null ? workspaceNodesOverride : new DepthFirstScanner().allNodes(execution); + workspaceNodes != null ? workspaceNodes : new DepthFirstScanner().allNodes(execution); for (FlowNode n : candidates) { WorkspaceAction ws = n.getAction(WorkspaceAction.class); if (ws != null) { @@ -180,15 +175,11 @@ public PipelineGraph computeTree() { if (cached != null) { return cached; } - try { - workspaceNodesOverride = snapshot.workspaceNodes(); - PipelineGraph computed = createTree(new PipelineNodeGraphAdapter(run, snapshot.nodes())); - LiveGraphRegistry.get().cacheGraph(run, snapshot.version(), computed); - return computed; - } finally { - workspaceNodesOverride = null; - } + PipelineGraph computed = + createTree(new PipelineNodeGraphAdapter(run, snapshot.nodes()), snapshot.workspaceNodes()); + LiveGraphRegistry.get().cacheGraph(run, snapshot.version(), computed); + return computed; } - return createTree(CachedPipelineNodeGraphAdaptor.instance.getFor(run)); + return createTree(CachedPipelineNodeGraphAdaptor.instance.getFor(run), null); } } From 964e9d88ece7b33560ca4ee8819916ff584e11ef Mon Sep 17 00:00:00 2001 From: Tim Jacomb Date: Wed, 22 Apr 2026 22:10:31 +0100 Subject: [PATCH 09/26] Remove catchUp --- .../livestate/LiveGraphLifecycle.java | 12 +++++++++++- .../livestate/LiveGraphPopulator.java | 19 ++++++++++++------- .../livestate/LiveGraphState.java | 13 ++++++++++++- 3 files changed, 35 insertions(+), 9 deletions(-) diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java index 626c5015e..13ea08f5f 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java @@ -27,7 +27,12 @@ public class LiveGraphLifecycle extends FlowExecutionListener { @Override public void onRunning(@NonNull FlowExecution execution) { try { - LiveGraphRegistry.get().getOrCreate(execution); + // Fresh execution — no history to catch up, so mark ready immediately. The + // listener will populate nodes as they arrive via onNewHead. + LiveGraphState state = LiveGraphRegistry.get().getOrCreate(execution); + if (state != null) { + state.markReady(); + } } catch (Throwable t) { logger.warn("onRunning failed", t); } @@ -36,9 +41,14 @@ public void onRunning(@NonNull FlowExecution execution) { @Override public void onResumed(@NonNull FlowExecution execution) { try { + // Resume after a Jenkins restart. The execution's persisted graph may contain + // nodes from before the restart that our in-memory state doesn't know about, + // so catch up here (safe — this runs on a Jenkins event thread, not the CPS VM) + // before flipping the state to ready. LiveGraphState state = LiveGraphRegistry.get().getOrCreate(execution); if (state != null) { LiveGraphPopulator.catchUp(execution, state); + state.markReady(); } } catch (Throwable t) { logger.warn("onResumed failed", t); diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphPopulator.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphPopulator.java index 1895deea2..729be9878 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphPopulator.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphPopulator.java @@ -20,6 +20,13 @@ * The work done under the monitor is trivial ({@code ArrayList}/{@code HashSet} additions), * so the CPS VM thread is not meaningfully blocked. Every code path is still wrapped in * try/catch and poisons the state on failure so a bug here can never disrupt a build. + * + *

    The javadoc also forbids blocking work here, which rules out any {@code DepthFirstScanner} + * catch-up walk inside this method. {@link LiveGraphState} starts unready and is only + * marked ready by {@link LiveGraphLifecycle} (on an event thread, not the CPS VM). If + * {@code onNewHead} fires for an execution the lifecycle never saw (e.g. plugin installed + * mid-build), the state stays unready, {@code snapshot()} keeps returning {@code null}, + * and HTTP readers fall back to the scanner for the remainder of that run. */ @Extension public class LiveGraphPopulator implements GraphListener.Synchronous { @@ -35,13 +42,6 @@ public void onNewHead(FlowNode node) { if (state == null) { return; // feature disabled or execution not a WorkflowRun } - // Lazy initial catch-up: if the listener is seeing nodes for an execution it's - // never observed (plugin upgrade mid-build, Jenkins resume without onResumed - // firing first), the early history is already in the FlowExecution's storage. - // Backfill it once before processing this event. - if (state.size() == 0 && !state.hasSeen(node.getId())) { - catchUp(execution, state); - } state.addNode(node); } catch (Throwable t) { // A thrown exception here propagates into the CPS VM and can abort the build. @@ -54,6 +54,11 @@ public void onNewHead(FlowNode node) { } } + /** + * Backfills the live state from the execution's persisted graph. Called only from + * {@link LiveGraphLifecycle#onResumed}, which runs on a Jenkins event thread — never + * from a {@link GraphListener.Synchronous} path on the CPS VM. + */ static void catchUp(FlowExecution execution, LiveGraphState state) { try { DepthFirstScanner scanner = new DepthFirstScanner(); diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java index e1f81b36d..41f5754b7 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java @@ -33,6 +33,13 @@ final class LiveGraphState { private long version = 0; private volatile boolean poisoned = false; + // Starts unready. Lifecycle callbacks ({@link LiveGraphLifecycle}) run on Jenkins event + // threads and flip this on after they've done any needed catch-up. If lifecycle never + // fires for an execution (plugin installed while the build was running), the state stays + // unready and HTTP readers fall back to the scanner path — we can't safely backfill from + // the CPS VM thread inside {@code onNewHead}. + private boolean ready = false; + // Memoise the last snapshot. Poll-frequent readers at the same version skip the O(N) // copy and workspace scan under the monitor — important because the writer (addNode) // is the CPS VM thread and must not block for long. @@ -61,7 +68,7 @@ synchronized int size() { } synchronized LiveGraphSnapshot snapshot() { - if (poisoned) { + if (poisoned || !ready) { return null; } if (lastSnapshot != null && lastSnapshot.version() == version) { @@ -116,5 +123,9 @@ void poison() { poisoned = true; } + synchronized void markReady() { + ready = true; + } + private record VersionedCache(long version, T value) {} } From ec01eff0be4b8fc6acabfa0770c02fbed50004fe Mon Sep 17 00:00:00 2001 From: Tim Jacomb Date: Thu, 23 Apr 2026 09:32:02 +0100 Subject: [PATCH 10/26] Address copilot review --- .../livestate/LiveGraphState.java | 17 ++++++----------- .../utils/PipelineGraphApi.java | 9 +++++++++ .../utils/PipelineStepApi.java | 8 ++++++++ 3 files changed, 23 insertions(+), 11 deletions(-) diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java index 41f5754b7..aa7515128 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java @@ -40,14 +40,13 @@ final class LiveGraphState { // the CPS VM thread inside {@code onNewHead}. private boolean ready = false; - // Memoise the last snapshot. Poll-frequent readers at the same version skip the O(N) - // copy and workspace scan under the monitor — important because the writer (addNode) - // is the CPS VM thread and must not block for long. - private LiveGraphSnapshot lastSnapshot; - // Output cache. Mutations synchronise on the instance monitor so concurrent writers // cannot regress a newer version with an older one (check-then-set on a volatile - // reference alone would be racy). + // reference alone would be racy). Repeat polls at the same version hit this cache and + // skip rebuilding the DTOs entirely — that's the real savings, not snapshot-level + // memoisation (which would make the WorkspaceAction subset stale: actions can be + // attached to an existing FlowNode after onNewHead has already fired, without bumping + // the node-count version). private VersionedCache cachedGraph; private VersionedCache cachedAllSteps; @@ -71,9 +70,6 @@ synchronized LiveGraphSnapshot snapshot() { if (poisoned || !ready) { return null; } - if (lastSnapshot != null && lastSnapshot.version() == version) { - return lastSnapshot; - } // Filter for WorkspaceAction at snapshot time rather than at addNode time: // WorkspaceAction is attached to a block-start node when the workspace is allocated, // which can happen AFTER onNewHead has already fired for that node. A snapshot-time @@ -90,8 +86,7 @@ synchronized LiveGraphSnapshot snapshot() { workspaceNodes.add(n); } } - lastSnapshot = new LiveGraphSnapshot(List.copyOf(nodes), List.copyOf(workspaceNodes), version); - return lastSnapshot; + return new LiveGraphSnapshot(List.copyOf(nodes), List.copyOf(workspaceNodes), version); } /** diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java index e7849d5e5..7dcdfc360 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java @@ -16,6 +16,8 @@ import org.jenkinsci.plugins.workflow.graphanalysis.DepthFirstScanner; import org.jenkinsci.plugins.workflow.job.WorkflowRun; import org.jenkinsci.plugins.workflow.support.steps.input.InputAction; +import org.kohsuke.accmod.Restricted; +import org.kohsuke.accmod.restrictions.NoExternalUse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -168,6 +170,13 @@ public PipelineGraph createTree() { return PipelineGraphViewCache.get().getGraph(run, this::computeTree); } + /** + * Internal: direct uncached compute path, for use by + * {@link io.jenkins.plugins.pipelinegraphview.livestate.LiveGraphLifecycle} to build a + * final graph at completion without going through {@code PipelineGraphViewCache} (which + * would skip caching while {@code WorkflowRun.isBuilding()} is true). + */ + @Restricted(NoExternalUse.class) public PipelineGraph computeTree() { LiveGraphSnapshot snapshot = LiveGraphRegistry.get().snapshot(run); if (snapshot != null) { diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java index 4e06d9af5..549820536 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java @@ -8,6 +8,8 @@ import java.util.stream.Collectors; import org.jenkinsci.plugins.workflow.job.WorkflowRun; import org.jenkinsci.plugins.workflow.support.steps.input.InputStep; +import org.kohsuke.accmod.Restricted; +import org.kohsuke.accmod.restrictions.NoExternalUse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -121,6 +123,12 @@ public PipelineStepList getAllSteps() { return PipelineGraphViewCache.get().getAllSteps(run, this::computeAllSteps); } + /** + * Internal: direct uncached compute path, for use by + * {@link io.jenkins.plugins.pipelinegraphview.livestate.LiveGraphLifecycle} to build a + * final step list at completion without going through {@code PipelineGraphViewCache}. + */ + @Restricted(NoExternalUse.class) public PipelineStepList computeAllSteps() { // Look up the completed state before computing steps. boolean runIsComplete = !run.isBuilding(); From a31dcfd971f70e46f21ef34c3e112d94e7e2f454 Mon Sep 17 00:00:00 2001 From: Tim Jacomb Date: Thu, 23 Apr 2026 09:37:44 +0100 Subject: [PATCH 11/26] Cleanup javadoc --- .../livestate/LiveGraphLifecycle.java | 24 ++++++--------- .../livestate/LiveGraphPopulator.java | 22 ++++---------- .../livestate/LiveGraphRegistry.java | 1 - .../livestate/LiveGraphSnapshot.java | 10 ++----- .../livestate/LiveGraphState.java | 29 ++++--------------- .../treescanner/PipelineNodeGraphAdapter.java | 5 +--- .../treescanner/PipelineNodeTreeScanner.java | 5 ++-- .../utils/PipelineGraphApi.java | 9 +----- .../utils/PipelineStepApi.java | 6 +--- 9 files changed, 28 insertions(+), 83 deletions(-) diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java index 13ea08f5f..a7a15d1ab 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java @@ -14,10 +14,9 @@ import org.slf4j.LoggerFactory; /** - * Creates {@link LiveGraphState} entries at execution start / resume and handles the - * handoff to the on-disk cache at completion. Without the completion handoff, the first - * HTTP read after the build finishes would fall through to a full scanner sweep — wasting - * the work the live-state path already did. + * Creates and readies {@link LiveGraphState} entries at execution start / resume, and + * hands the final graph off to the on-disk cache at completion so the first post-build + * read doesn't need a fresh scanner sweep. */ @Extension public class LiveGraphLifecycle extends FlowExecutionListener { @@ -27,8 +26,7 @@ public class LiveGraphLifecycle extends FlowExecutionListener { @Override public void onRunning(@NonNull FlowExecution execution) { try { - // Fresh execution — no history to catch up, so mark ready immediately. The - // listener will populate nodes as they arrive via onNewHead. + // Fresh execution — no prior nodes to catch up. LiveGraphState state = LiveGraphRegistry.get().getOrCreate(execution); if (state != null) { state.markReady(); @@ -41,10 +39,9 @@ public void onRunning(@NonNull FlowExecution execution) { @Override public void onResumed(@NonNull FlowExecution execution) { try { - // Resume after a Jenkins restart. The execution's persisted graph may contain - // nodes from before the restart that our in-memory state doesn't know about, - // so catch up here (safe — this runs on a Jenkins event thread, not the CPS VM) - // before flipping the state to ready. + // Resumed after a Jenkins restart: the execution's persisted graph already holds + // nodes we never saw live. Running here (not on the CPS VM) makes a scanner walk + // safe. LiveGraphState state = LiveGraphRegistry.get().getOrCreate(execution); if (state != null) { LiveGraphPopulator.catchUp(execution, state); @@ -60,14 +57,11 @@ public void onCompleted(@NonNull FlowExecution execution) { try { WorkflowRun run = workflowRunFor(execution); if (run != null) { - // Compute one last time while the live state is still in the registry; this - // path hits the snapshot/cache rather than the scanner. PipelineGraph graph = new PipelineGraphApi(run).computeTree(); PipelineStepList allSteps = new PipelineStepApi(run).computeAllSteps(); // WorkflowRun.isBuilding() can still be true here even though FlowExecution - // is complete; rebuild the step list with runIsComplete=true so the persisted - // copy reflects reality. PipelineGraph.complete comes from - // FlowExecution.isComplete() and is already correct. + // is complete; rebuild with runIsComplete=true so the persisted copy matches + // reality. PipelineGraph.complete already reflects FlowExecution.isComplete(). PipelineStepList finalSteps = new PipelineStepList(allSteps.steps, true); PipelineGraphViewCache.get().seed(run, graph, finalSteps); } diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphPopulator.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphPopulator.java index 729be9878..4fdec1706 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphPopulator.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphPopulator.java @@ -10,23 +10,13 @@ /** * Extension that captures every new {@link FlowNode} across every running execution and - * feeds it to the corresponding {@link LiveGraphState}. The downstream {@code PipelineGraphApi} - * path reads a snapshot of that state instead of walking the whole execution each time. + * feeds it to the corresponding {@link LiveGraphState}. * - *

    We use {@link GraphListener.Synchronous} rather than the async variant because callers - * expect "once a node is a head, the next API read reflects it" — async delivery creates a - * lag window where the snapshot is behind the execution, which breaks tests that check state - * at precise trigger points and would surprise anyone hitting the REST API after an event. - * The work done under the monitor is trivial ({@code ArrayList}/{@code HashSet} additions), - * so the CPS VM thread is not meaningfully blocked. Every code path is still wrapped in - * try/catch and poisons the state on failure so a bug here can never disrupt a build. - * - *

    The javadoc also forbids blocking work here, which rules out any {@code DepthFirstScanner} - * catch-up walk inside this method. {@link LiveGraphState} starts unready and is only - * marked ready by {@link LiveGraphLifecycle} (on an event thread, not the CPS VM). If - * {@code onNewHead} fires for an execution the lifecycle never saw (e.g. plugin installed - * mid-build), the state stays unready, {@code snapshot()} keeps returning {@code null}, - * and HTTP readers fall back to the scanner for the remainder of that run. + *

    We use {@link GraphListener.Synchronous} because callers expect that once a node is a + * head, the next API read reflects it — async delivery would create a lag window where the + * snapshot is behind the execution. The listener therefore does the minimum under the + * monitor and never scans: any catch-up belongs in {@link LiveGraphLifecycle}, on a Jenkins + * event thread. */ @Extension public class LiveGraphPopulator implements GraphListener.Synchronous { diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java index 89dd59359..5e83a39fa 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java @@ -90,7 +90,6 @@ public PipelineGraph cachedGraph(WorkflowRun run, long minVersion) { return state == null ? null : state.cachedGraph(minVersion); } - /** Stores a {@link PipelineGraph} computed from a snapshot at {@code version}. */ public void cacheGraph(WorkflowRun run, long version, PipelineGraph graph) { if (disabled()) { return; diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphSnapshot.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphSnapshot.java index e6e4dca03..980813a97 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphSnapshot.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphSnapshot.java @@ -4,12 +4,8 @@ import org.jenkinsci.plugins.workflow.graph.FlowNode; /** - * Immutable projection of a {@link LiveGraphState} at a point in time. - * Held briefly outside the state's monitor so HTTP callers can construct DTOs without - * blocking the CPS VM thread that's feeding the live state. - * - *

    {@code version} is a monotonically-increasing counter that bumps on every new flow - * node. It lets HTTP callers key the output cache so repeat polls between node arrivals - * return the cached {@code PipelineGraph} / {@code PipelineStepList} without rebuilding. + * Immutable projection of a {@link LiveGraphState} at a point in time. {@code version} is + * a monotonic counter that bumps on every new flow node, so callers can use it as a cache + * key for computed DTOs. */ public record LiveGraphSnapshot(List nodes, List workspaceNodes, long version) {} diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java index aa7515128..1d7837d07 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java @@ -13,18 +13,6 @@ * Per-run mutable state built up by {@link LiveGraphPopulator} as {@code GraphListener} * events arrive. Reads and writes are serialised on the instance monitor; holders should * snapshot and release quickly — the writer is the CPS VM thread and must not block. - * - *

    Two things live here: - *

      - *
    • The raw {@link FlowNode} list that the downstream relationship-finder / - * graph-builder path consumes. The {@link WorkspaceAction}-carrying subset used by - * agent detection is derived at snapshot time by iterating this list, not at - * add-time — {@code WorkspaceAction} can be attached to a node after {@code onNewHead} - * fires.
    • - *
    • A small cache of the last-computed {@link PipelineGraph} / {@link PipelineStepList} - * keyed by a monotonic version counter that bumps on every {@link #addNode}. HTTP - * polls that hit between node arrivals return the cached DTO verbatim — no rebuild.
    • - *
    */ final class LiveGraphState { @@ -33,20 +21,13 @@ final class LiveGraphState { private long version = 0; private volatile boolean poisoned = false; - // Starts unready. Lifecycle callbacks ({@link LiveGraphLifecycle}) run on Jenkins event - // threads and flip this on after they've done any needed catch-up. If lifecycle never - // fires for an execution (plugin installed while the build was running), the state stays - // unready and HTTP readers fall back to the scanner path — we can't safely backfill from - // the CPS VM thread inside {@code onNewHead}. + // Starts unready. Only the lifecycle ({@link LiveGraphLifecycle}) flips it on, after any + // needed catch-up — callers of {@link #snapshot} treat a null return as "fall back to + // the scanner", so a partial state that never gets the ready handshake stays invisible. private boolean ready = false; - // Output cache. Mutations synchronise on the instance monitor so concurrent writers - // cannot regress a newer version with an older one (check-then-set on a volatile - // reference alone would be racy). Repeat polls at the same version hit this cache and - // skip rebuilding the DTOs entirely — that's the real savings, not snapshot-level - // memoisation (which would make the WorkspaceAction subset stale: actions can be - // attached to an existing FlowNode after onNewHead has already fired, without bumping - // the node-count version). + // Mutations synchronise on the instance monitor so concurrent writers cannot regress a + // newer version with an older one (check-then-set on a volatile reference would be racy). private VersionedCache cachedGraph; private VersionedCache cachedAllSteps; diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeGraphAdapter.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeGraphAdapter.java index 9b2c1c938..22f596a54 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeGraphAdapter.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeGraphAdapter.java @@ -36,10 +36,7 @@ public PipelineNodeGraphAdapter(WorkflowRun run) { treeScanner = new PipelineNodeTreeScanner(run); } - /** - * Builds the adapter over a pre-collected node set rather than walking the execution - * graph. Used by the live-state path. - */ + /** Builds the adapter over a pre-collected node set rather than walking the execution. */ public PipelineNodeGraphAdapter(WorkflowRun run, Collection preCollectedNodes) { treeScanner = new PipelineNodeTreeScanner(run, preCollectedNodes); } diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java index b1df5cc4b..744e7edaa 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java @@ -60,9 +60,8 @@ public PipelineNodeTreeScanner(@NonNull WorkflowRun run) { } /** - * Alternate constructor that uses a caller-supplied node collection instead of walking - * the execution graph with a {@link DepthFirstScanner}. Intended for use by the - * live-state path, which has already observed every node via {@code GraphListener}. + * Builds from a caller-supplied node collection, skipping the {@link DepthFirstScanner} + * walk. The caller is responsible for having observed every node already. */ public PipelineNodeTreeScanner(@NonNull WorkflowRun run, @NonNull Collection nodes) { this.run = run; diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java index 7dcdfc360..bed3f9b0f 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java @@ -124,8 +124,6 @@ private static String getStageNode(FlowNodeWrapper flowNodeWrapper, @CheckForNul FlowNode flowNode = flowNodeWrapper.getNode(); logger.debug("Checking node {}", flowNode); FlowExecution execution = flowNode.getExecution(); - // When the caller supplies a pre-filtered workspace-node list (the live-state path), - // iterate that; otherwise fall back to scanning the whole execution graph. Iterable candidates = workspaceNodes != null ? workspaceNodes : new DepthFirstScanner().allNodes(execution); for (FlowNode n : candidates) { @@ -170,12 +168,7 @@ public PipelineGraph createTree() { return PipelineGraphViewCache.get().getGraph(run, this::computeTree); } - /** - * Internal: direct uncached compute path, for use by - * {@link io.jenkins.plugins.pipelinegraphview.livestate.LiveGraphLifecycle} to build a - * final graph at completion without going through {@code PipelineGraphViewCache} (which - * would skip caching while {@code WorkflowRun.isBuilding()} is true). - */ + /** Uncached compute path; callers are responsible for any caching. */ @Restricted(NoExternalUse.class) public PipelineGraph computeTree() { LiveGraphSnapshot snapshot = LiveGraphRegistry.get().snapshot(run); diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java index 549820536..4594eb488 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java @@ -123,11 +123,7 @@ public PipelineStepList getAllSteps() { return PipelineGraphViewCache.get().getAllSteps(run, this::computeAllSteps); } - /** - * Internal: direct uncached compute path, for use by - * {@link io.jenkins.plugins.pipelinegraphview.livestate.LiveGraphLifecycle} to build a - * final step list at completion without going through {@code PipelineGraphViewCache}. - */ + /** Uncached compute path; callers are responsible for any caching. */ @Restricted(NoExternalUse.class) public PipelineStepList computeAllSteps() { // Look up the completed state before computing steps. From c4f02efaa8fb76915ec65d7b4a77507ad032ac80 Mon Sep 17 00:00:00 2001 From: Tim Jacomb Date: Thu, 23 Apr 2026 09:54:57 +0100 Subject: [PATCH 12/26] Tweaks --- .../livestate/LiveGraphLifecycle.java | 32 +++++++++++--- .../livestate/LiveGraphPopulator.java | 12 ++++- .../livestate/LiveGraphRegistry.java | 30 ++++++++++--- .../livestate/LiveGraphState.java | 44 +++++++++++++------ .../utils/PipelineGraphApi.java | 28 +++++++++--- .../utils/PipelineStepApi.java | 28 +++++++++--- 6 files changed, 134 insertions(+), 40 deletions(-) diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java index a7a15d1ab..e561892bb 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java @@ -2,11 +2,13 @@ import edu.umd.cs.findbugs.annotations.NonNull; import hudson.Extension; +import io.jenkins.plugins.pipelinegraphview.treescanner.PipelineNodeGraphAdapter; import io.jenkins.plugins.pipelinegraphview.utils.PipelineGraph; import io.jenkins.plugins.pipelinegraphview.utils.PipelineGraphApi; import io.jenkins.plugins.pipelinegraphview.utils.PipelineGraphViewCache; import io.jenkins.plugins.pipelinegraphview.utils.PipelineStepApi; import io.jenkins.plugins.pipelinegraphview.utils.PipelineStepList; +import java.util.ArrayList; import org.jenkinsci.plugins.workflow.flow.FlowExecution; import org.jenkinsci.plugins.workflow.flow.FlowExecutionListener; import org.jenkinsci.plugins.workflow.job.WorkflowRun; @@ -57,13 +59,29 @@ public void onCompleted(@NonNull FlowExecution execution) { try { WorkflowRun run = workflowRunFor(execution); if (run != null) { - PipelineGraph graph = new PipelineGraphApi(run).computeTree(); - PipelineStepList allSteps = new PipelineStepApi(run).computeAllSteps(); - // WorkflowRun.isBuilding() can still be true here even though FlowExecution - // is complete; rebuild with runIsComplete=true so the persisted copy matches - // reality. PipelineGraph.complete already reflects FlowExecution.isComplete(). - PipelineStepList finalSteps = new PipelineStepList(allSteps.steps, true); - PipelineGraphViewCache.get().seed(run, graph, finalSteps); + PipelineGraph graph; + PipelineStepList allSteps; + LiveGraphSnapshot snapshot = LiveGraphRegistry.get().snapshot(run); + if (snapshot != null) { + // Share a single adapter (and therefore a single tree-scanner pass) for + // both graph and steps rather than paying the cost twice. + PipelineNodeGraphAdapter adapter = new PipelineNodeGraphAdapter(run, snapshot.nodes()); + // runIsComplete=true here directly: WorkflowRun.isBuilding() can still be + // true even though FlowExecution is complete. PipelineGraph.complete is + // already derived from FlowExecution.isComplete() inside createTreeFrom. + graph = new PipelineGraphApi(run).createTreeFrom(adapter, snapshot.workspaceNodes()); + allSteps = new PipelineStepApi(run).getAllStepsFrom(adapter, true); + } else { + // No live state (feature disabled, poisoned, plugin installed mid-build). + // Fall back to the scanner-backed paths and rebuild steps with + // runIsComplete=true to keep the persisted copy consistent. Defensively + // copy the list: PipelineStepList.steps is publicly mutable, and `raw` + // may be held by the DTO cache or returned to a concurrent HTTP reader. + graph = new PipelineGraphApi(run).computeTree(); + PipelineStepList raw = new PipelineStepApi(run).computeAllSteps(); + allSteps = new PipelineStepList(new ArrayList<>(raw.steps), true); + } + PipelineGraphViewCache.get().seed(run, graph, allSteps); } } catch (Throwable t) { logger.warn("seeding disk cache on completion failed", t); diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphPopulator.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphPopulator.java index 4fdec1706..6ee9e2bf7 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphPopulator.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphPopulator.java @@ -1,6 +1,8 @@ package io.jenkins.plugins.pipelinegraphview.livestate; import hudson.Extension; +import java.util.ArrayList; +import java.util.List; import org.jenkinsci.plugins.workflow.flow.FlowExecution; import org.jenkinsci.plugins.workflow.flow.GraphListener; import org.jenkinsci.plugins.workflow.graph.FlowNode; @@ -53,8 +55,16 @@ static void catchUp(FlowExecution execution, LiveGraphState state) { try { DepthFirstScanner scanner = new DepthFirstScanner(); scanner.setup(execution.getCurrentHeads()); + // The scanner walks from current heads backward (newest→oldest). Add nodes in + // the reverse of that order so the state's insertion order matches the + // oldest→newest sequence the onNewHead path maintains — LiveGraphState#snapshot + // reverses again to surface workspace candidates newest-first. + List scanned = new ArrayList<>(); for (FlowNode existing : scanner) { - state.addNode(existing); + scanned.add(existing); + } + for (int i = scanned.size() - 1; i >= 0; i--) { + state.addNode(scanned.get(i)); } } catch (Throwable t) { logger.warn("catch-up failed; poisoning state", t); diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java index 5e83a39fa..cb1ca08f3 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java @@ -14,6 +14,16 @@ * Entries are created on demand by the listener / lifecycle code, removed on completion, * and otherwise bounded by a Caffeine LRU so abandoned entries (deleted runs, listener * bugs) don't leak. + * + *

    Operator knobs: + *

      + *
    • {@code io.jenkins.plugins.pipelinegraphview.livestate.LiveGraphRegistry.enabled} + * ({@code boolean}, default {@code true}) — set to {@code false} to disable the + * live-state path entirely and force scanner fallback.
    • + *
    • {@code io.jenkins.plugins.pipelinegraphview.livestate.LiveGraphRegistry.size} + * ({@code int}, default {@code 512}) — max concurrent in-progress runs tracked. + * Extra runs use the scanner path until an entry evicts.
    • + *
    */ public final class LiveGraphRegistry { @@ -37,11 +47,7 @@ public static LiveGraphRegistry get() { LiveGraphRegistry() {} - /** - * Escape hatch. Setting this system property to {@code false} makes - * {@link #snapshot(WorkflowRun)} always return {@code null}, forcing callers to use the - * scanner fallback. Useful if a regression lands in the live-state path. - */ + /** See the {@code .enabled} knob documented on the class javadoc. */ private static boolean disabled() { return !SystemProperties.getBoolean(LiveGraphRegistry.class.getName() + ".enabled", true); } @@ -57,6 +63,20 @@ LiveGraphState getOrCreate(FlowExecution execution) { return states.get(key, k -> new LiveGraphState()); } + /** + * Returns the state's current version, or {@code null} if there's no usable state + * (feature disabled, not populated, poisoned, or not yet ready). Cheap — lets callers + * short-circuit to {@link #cachedGraph}/{@link #cachedAllSteps} without paying for a + * full snapshot first. + */ + public Long currentVersion(WorkflowRun run) { + if (disabled()) { + return null; + } + LiveGraphState state = states.getIfPresent(run.getExternalizableId()); + return state == null ? null : state.currentVersion(); + } + /** * Returns a snapshot of the live state for this run, or {@code null} if none exists * (feature disabled, state never populated, state poisoned). Callers must treat diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java index 1d7837d07..db3a6421a 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java @@ -47,27 +47,45 @@ synchronized int size() { return nodes.size(); } - synchronized LiveGraphSnapshot snapshot() { + /** + * Cheap version read for cache-hit short-circuits — same readiness/poison semantics as + * {@link #snapshot()}, but skips the O(N) copy. Callers check this first and only take + * a full snapshot on cache miss. + */ + synchronized Long currentVersion() { if (poisoned || !ready) { return null; } - // Filter for WorkspaceAction at snapshot time rather than at addNode time: - // WorkspaceAction is attached to a block-start node when the workspace is allocated, - // which can happen AFTER onNewHead has already fired for that node. A snapshot-time - // scan always observes the latest action state on each captured FlowNode. - // - // The list is built newest-first (reverse insertion order) to match the iteration - // order of DepthFirstScanner (from current heads backward): PipelineGraphApi#getStageNode - // returns on the first match, and for nested agents the innermost workspace is the - // more-specific match — the one a later-created inner `node {}` block sits in. + return version; + } + + LiveGraphSnapshot snapshot() { + // Release the monitor before running the WorkspaceAction scan: each + // getAction(WorkspaceAction.class) walks the FlowNode's action list, so doing N of + // them under the lock the CPS VM also uses would block pipeline execution for O(N). + List nodesCopy; + long v; + synchronized (this) { + if (poisoned || !ready) { + return null; + } + nodesCopy = List.copyOf(nodes); + v = version; + } + // Filter for WorkspaceAction at snapshot time, not at addNode time: a node can have + // WorkspaceAction attached after onNewHead fires (when the workspace is allocated), + // so scanning here always observes the latest action state. Newest-first ordering + // matches DepthFirstScanner (from current heads backward); PipelineGraphApi#getStageNode + // returns on the first match, and for nested agents the innermost workspace — the + // one a later-created inner `node {}` block sits in — is the more-specific match. List workspaceNodes = new ArrayList<>(); - for (int i = nodes.size() - 1; i >= 0; i--) { - FlowNode n = nodes.get(i); + for (int i = nodesCopy.size() - 1; i >= 0; i--) { + FlowNode n = nodesCopy.get(i); if (n.getAction(WorkspaceAction.class) != null) { workspaceNodes.add(n); } } - return new LiveGraphSnapshot(List.copyOf(nodes), List.copyOf(workspaceNodes), version); + return new LiveGraphSnapshot(nodesCopy, List.copyOf(workspaceNodes), v); } /** diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java index bed3f9b0f..95a9b9eb2 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java @@ -171,17 +171,31 @@ public PipelineGraph createTree() { /** Uncached compute path; callers are responsible for any caching. */ @Restricted(NoExternalUse.class) public PipelineGraph computeTree() { - LiveGraphSnapshot snapshot = LiveGraphRegistry.get().snapshot(run); - if (snapshot != null) { - PipelineGraph cached = LiveGraphRegistry.get().cachedGraph(run, snapshot.version()); + // Check the cache first using the cheap version read — if we already have a graph + // for the current state, we can skip the O(N) snapshot copy entirely. + Long currentVersion = LiveGraphRegistry.get().currentVersion(run); + if (currentVersion != null) { + PipelineGraph cached = LiveGraphRegistry.get().cachedGraph(run, currentVersion); if (cached != null) { return cached; } - PipelineGraph computed = - createTree(new PipelineNodeGraphAdapter(run, snapshot.nodes()), snapshot.workspaceNodes()); - LiveGraphRegistry.get().cacheGraph(run, snapshot.version(), computed); - return computed; + LiveGraphSnapshot snapshot = LiveGraphRegistry.get().snapshot(run); + if (snapshot != null) { + PipelineGraph computed = + createTree(new PipelineNodeGraphAdapter(run, snapshot.nodes()), snapshot.workspaceNodes()); + LiveGraphRegistry.get().cacheGraph(run, snapshot.version(), computed); + return computed; + } } return createTree(CachedPipelineNodeGraphAdaptor.instance.getFor(run), null); } + + /** + * Builds a {@link PipelineGraph} from a caller-supplied adapter and workspace-node list. + * Doesn't touch the live-state DTO cache — caller owns caching. + */ + @Restricted(NoExternalUse.class) + public PipelineGraph createTreeFrom(PipelineGraphBuilderApi builder, @CheckForNull List workspaceNodes) { + return createTree(builder, workspaceNodes); + } } diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java index 4594eb488..9d640b80b 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java @@ -126,18 +126,32 @@ public PipelineStepList getAllSteps() { /** Uncached compute path; callers are responsible for any caching. */ @Restricted(NoExternalUse.class) public PipelineStepList computeAllSteps() { - // Look up the completed state before computing steps. boolean runIsComplete = !run.isBuilding(); - LiveGraphSnapshot snapshot = LiveGraphRegistry.get().snapshot(run); - if (snapshot != null) { - PipelineStepList cached = LiveGraphRegistry.get().cachedAllSteps(run, snapshot.version()); + // Check the cache first using the cheap version read — if we already have steps + // for the current state, we can skip the O(N) snapshot copy entirely. + Long currentVersion = LiveGraphRegistry.get().currentVersion(run); + if (currentVersion != null) { + PipelineStepList cached = LiveGraphRegistry.get().cachedAllSteps(run, currentVersion); if (cached != null) { return cached; } - PipelineStepList computed = getAllSteps(new PipelineNodeGraphAdapter(run, snapshot.nodes()), runIsComplete); - LiveGraphRegistry.get().cacheAllSteps(run, snapshot.version(), computed); - return computed; + LiveGraphSnapshot snapshot = LiveGraphRegistry.get().snapshot(run); + if (snapshot != null) { + PipelineStepList computed = + getAllSteps(new PipelineNodeGraphAdapter(run, snapshot.nodes()), runIsComplete); + LiveGraphRegistry.get().cacheAllSteps(run, snapshot.version(), computed); + return computed; + } } return getAllSteps(CachedPipelineNodeGraphAdaptor.instance.getFor(run), runIsComplete); } + + /** + * Builds a {@link PipelineStepList} from a caller-supplied adapter. Doesn't touch the + * live-state DTO cache — caller owns caching. + */ + @Restricted(NoExternalUse.class) + public PipelineStepList getAllStepsFrom(PipelineStepBuilderApi builder, boolean runIsComplete) { + return getAllSteps(builder, runIsComplete); + } } From 3d728c745785cb5853470de8c098d16f1f7f93b1 Mon Sep 17 00:00:00 2001 From: Tim Jacomb Date: Thu, 23 Apr 2026 10:38:58 +0100 Subject: [PATCH 13/26] Performance improvements --- .../livestate/LiveGraphLifecycle.java | 6 +- .../livestate/LiveGraphSnapshot.java | 18 +++- .../livestate/LiveGraphState.java | 19 ++++- .../treescanner/PipelineNodeGraphAdapter.java | 16 +++- .../treescanner/PipelineNodeTreeScanner.java | 36 ++++++-- .../utils/PipelineGraphApi.java | 84 ++++++++++++++----- .../utils/PipelineStepApi.java | 5 +- 7 files changed, 143 insertions(+), 41 deletions(-) diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java index e561892bb..38ae41787 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java @@ -65,11 +65,13 @@ public void onCompleted(@NonNull FlowExecution execution) { if (snapshot != null) { // Share a single adapter (and therefore a single tree-scanner pass) for // both graph and steps rather than paying the cost twice. - PipelineNodeGraphAdapter adapter = new PipelineNodeGraphAdapter(run, snapshot.nodes()); + PipelineNodeGraphAdapter adapter = + new PipelineNodeGraphAdapter(run, snapshot.nodes(), snapshot.enclosingIdsByNodeId()); // runIsComplete=true here directly: WorkflowRun.isBuilding() can still be // true even though FlowExecution is complete. PipelineGraph.complete is // already derived from FlowExecution.isComplete() inside createTreeFrom. - graph = new PipelineGraphApi(run).createTreeFrom(adapter, snapshot.workspaceNodes()); + graph = new PipelineGraphApi(run) + .createTreeFrom(adapter, snapshot.workspaceNodes(), snapshot.enclosingIdsByNodeId()); allSteps = new PipelineStepApi(run).getAllStepsFrom(adapter, true); } else { // No live state (feature disabled, poisoned, plugin installed mid-build). diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphSnapshot.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphSnapshot.java index 980813a97..d89b06b73 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphSnapshot.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphSnapshot.java @@ -1,11 +1,21 @@ package io.jenkins.plugins.pipelinegraphview.livestate; import java.util.List; +import java.util.Map; import org.jenkinsci.plugins.workflow.graph.FlowNode; /** - * Immutable projection of a {@link LiveGraphState} at a point in time. {@code version} is - * a monotonic counter that bumps on every new flow node, so callers can use it as a cache - * key for computed DTOs. + * Immutable projection of a {@link LiveGraphState} at a point in time. + * + *

    {@code enclosingIdsByNodeId} carries each captured node's ancestor chain so graph + * construction can resolve parentage without touching the execution's FlowNode storage + * (and its read lock, which contends with the running build's writes). + * + *

    {@code version} is a monotonic counter that bumps on every new flow node, so callers + * can use it as a cache key for computed DTOs. */ -public record LiveGraphSnapshot(List nodes, List workspaceNodes, long version) {} +public record LiveGraphSnapshot( + List nodes, + List workspaceNodes, + Map> enclosingIdsByNodeId, + long version) {} diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java index db3a6421a..9c9845484 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java @@ -3,8 +3,10 @@ import io.jenkins.plugins.pipelinegraphview.utils.PipelineGraph; import io.jenkins.plugins.pipelinegraphview.utils.PipelineStepList; import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import org.jenkinsci.plugins.workflow.actions.WorkspaceAction; import org.jenkinsci.plugins.workflow.graph.FlowNode; @@ -18,6 +20,11 @@ final class LiveGraphState { private final List nodes = new ArrayList<>(); private final Set seenIds = new HashSet<>(); + // Node ID → its enclosing-block IDs (innermost first). Populated at add time on the + // CPS VM thread, so HTTP graph builds don't need to acquire the storage read lock to + // resolve ancestry. Without this, each findParentNode call goes back to storage and + // contends with the write lock the running build is holding. + private final Map> enclosingIdsByNodeId = new HashMap<>(); private long version = 0; private volatile boolean poisoned = false; @@ -36,6 +43,14 @@ synchronized void addNode(FlowNode node) { return; } nodes.add(node); + // Capture ancestry now. On the CPS VM thread (the usual onNewHead caller) this is a + // reentrant read against the already-held storage write lock — effectively free. + // A node's enclosing chain never changes after creation, so we only pay this once. + try { + enclosingIdsByNodeId.put(node.getId(), List.copyOf(node.getAllEnclosingIds())); + } catch (Throwable ignored) { + enclosingIdsByNodeId.put(node.getId(), List.of()); + } version++; } @@ -64,12 +79,14 @@ LiveGraphSnapshot snapshot() { // getAction(WorkspaceAction.class) walks the FlowNode's action list, so doing N of // them under the lock the CPS VM also uses would block pipeline execution for O(N). List nodesCopy; + Map> enclosingCopy; long v; synchronized (this) { if (poisoned || !ready) { return null; } nodesCopy = List.copyOf(nodes); + enclosingCopy = Map.copyOf(enclosingIdsByNodeId); v = version; } // Filter for WorkspaceAction at snapshot time, not at addNode time: a node can have @@ -85,7 +102,7 @@ LiveGraphSnapshot snapshot() { workspaceNodes.add(n); } } - return new LiveGraphSnapshot(nodesCopy, List.copyOf(workspaceNodes), v); + return new LiveGraphSnapshot(nodesCopy, List.copyOf(workspaceNodes), enclosingCopy, v); } /** diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeGraphAdapter.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeGraphAdapter.java index 22f596a54..120196823 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeGraphAdapter.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeGraphAdapter.java @@ -1,5 +1,6 @@ package io.jenkins.plugins.pipelinegraphview.treescanner; +import edu.umd.cs.findbugs.annotations.CheckForNull; import io.jenkins.plugins.pipelinegraphview.utils.FlowNodeWrapper; import io.jenkins.plugins.pipelinegraphview.utils.PipelineGraphBuilderApi; import io.jenkins.plugins.pipelinegraphview.utils.PipelineStepBuilderApi; @@ -38,7 +39,20 @@ public PipelineNodeGraphAdapter(WorkflowRun run) { /** Builds the adapter over a pre-collected node set rather than walking the execution. */ public PipelineNodeGraphAdapter(WorkflowRun run, Collection preCollectedNodes) { - treeScanner = new PipelineNodeTreeScanner(run, preCollectedNodes); + this(run, preCollectedNodes, null); + } + + /** + * Builds the adapter over a pre-collected node set plus a pre-computed + * {@code nodeId → enclosingIds} map. Supplying the map lets graph construction resolve + * ancestry without touching the execution's node storage (and its read lock, which + * contends with the running build's writes). + */ + public PipelineNodeGraphAdapter( + WorkflowRun run, + Collection preCollectedNodes, + @CheckForNull Map> enclosingIdsByNodeId) { + treeScanner = new PipelineNodeTreeScanner(run, preCollectedNodes, enclosingIdsByNodeId); } private final Object pipelineLock = new Object(); diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java index 744e7edaa..1e66d32f6 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java @@ -61,13 +61,18 @@ public PipelineNodeTreeScanner(@NonNull WorkflowRun run) { /** * Builds from a caller-supplied node collection, skipping the {@link DepthFirstScanner} - * walk. The caller is responsible for having observed every node already. + * walk. The caller is responsible for having observed every node already. Pass + * {@code enclosingIdsByNodeId} to skip the storage read lock during ancestry resolution + * (the live-state path captures this on the CPS VM thread). */ - public PipelineNodeTreeScanner(@NonNull WorkflowRun run, @NonNull Collection nodes) { + public PipelineNodeTreeScanner( + @NonNull WorkflowRun run, + @NonNull Collection nodes, + @CheckForNull Map> enclosingIdsByNodeId) { this.run = run; this.execution = run.getExecution(); this.declarative = run.getAction(ExecutionModelAction.class) != null; - this.buildFrom(nodes); + this.buildFrom(nodes, enclosingIdsByNodeId); } /** @@ -78,7 +83,7 @@ public void build() { logger.debug("Building graph"); } if (execution != null) { - buildFrom(getAllNodes()); + buildFrom(getAllNodes(), null); } else { this.stageNodeMap = new LinkedHashMap<>(); this.stepNodeMap = new LinkedHashMap<>(); @@ -88,7 +93,7 @@ public void build() { } } - private void buildFrom(Collection nodes) { + private void buildFrom(Collection nodes, @CheckForNull Map> enclosingIdsByNodeId) { if (execution == null || nodes.isEmpty()) { this.stageNodeMap = new LinkedHashMap<>(); this.stepNodeMap = new LinkedHashMap<>(); @@ -96,7 +101,7 @@ private void buildFrom(Collection nodes) { } NodeRelationshipFinder finder = new NodeRelationshipFinder(); Map relationships = finder.getNodeRelationships(nodes); - GraphBuilder builder = new GraphBuilder(nodes, relationships, this.run, this.execution); + GraphBuilder builder = new GraphBuilder(nodes, relationships, this.run, this.execution, enclosingIdsByNodeId); if (isDebugEnabled) { logger.debug("Original nodes:"); logger.debug("{}", builder.getNodes()); @@ -173,6 +178,11 @@ private static class GraphBuilder { @NonNull private final FlowExecution execution; + // Optional pre-computed ancestry. When present, findParentNode avoids the storage + // read lock that FlowNode#getAllEnclosingIds would take. + @CheckForNull + private final Map> enclosingIdsByNodeId; + private final Map wrappedNodeMap = new LinkedHashMap<>(); // These two are populated when required using by filtering unwanted nodes from // 'wrappedNodeMap' into a new map. @@ -194,12 +204,14 @@ public GraphBuilder( Collection nodes, @NonNull Map relationships, @NonNull WorkflowRun run, - @NonNull FlowExecution execution) { + @NonNull FlowExecution execution, + @CheckForNull Map> enclosingIdsByNodeId) { this.nodes = nodes; this.relationships = relationships; this.run = run; this.inputAction = run.getAction(InputAction.class); this.execution = execution; + this.enclosingIdsByNodeId = enclosingIdsByNodeId; buildGraph(); } @@ -480,7 +492,15 @@ private void assignParent(@NonNull FlowNodeWrapper wrappedNode, @CheckForNull Fl */ private @CheckForNull FlowNodeWrapper findParentNode( @NonNull FlowNodeWrapper child, @NonNull Map wrappedNodeMap) { - List enclosingIds = child.getNode().getAllEnclosingIds(); + // Prefer the pre-computed ancestry: FlowNode#getAllEnclosingIds reaches back into + // the execution's storage, which contends with the running build's write lock + // and can make HTTP requests block for minutes on a large graph. + List enclosingIds; + if (enclosingIdsByNodeId != null) { + enclosingIds = enclosingIdsByNodeId.getOrDefault(child.getId(), List.of()); + } else { + enclosingIds = child.getNode().getAllEnclosingIds(); + } Set knownNodes = wrappedNodeMap.keySet(); for (String possibleParentId : enclosingIds) { if (isDebugEnabled) { diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java index 95a9b9eb2..951ac4e86 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java @@ -30,7 +30,9 @@ public PipelineGraphApi(WorkflowRun run) { } private List getPipelineNodes( - PipelineGraphBuilderApi builder, @CheckForNull List workspaceNodes) { + PipelineGraphBuilderApi builder, + @CheckForNull List workspaceNodes, + @CheckForNull Map> enclosingIdsByNodeId) { return builder.getPipelineNodes().stream() .map(flowNodeWrapper -> new PipelineStageInternal( flowNodeWrapper.getId(), // TODO no need to parse it BO returns a string even though the @@ -44,7 +46,7 @@ private List getPipelineNodes( flowNodeWrapper.getDisplayName(), // TODO blue ocean uses timing information: "Passed in 0s" flowNodeWrapper.isSynthetic(), flowNodeWrapper.getTiming(), - getStageNode(flowNodeWrapper, workspaceNodes))) + getStageNode(flowNodeWrapper, workspaceNodes, enclosingIdsByNodeId))) .collect(Collectors.toList()); } @@ -59,7 +61,10 @@ private Function mapper( }; } - private PipelineGraph createTree(PipelineGraphBuilderApi builder, @CheckForNull List workspaceNodes) { + private PipelineGraph createTree( + PipelineGraphBuilderApi builder, + @CheckForNull List workspaceNodes, + @CheckForNull Map> enclosingIdsByNodeId) { FlowExecution execution = run.getExecution(); if (execution == null) { // If we don't have an execution - e.g. if the Pipeline has a syntax error - @@ -73,7 +78,7 @@ private PipelineGraph createTree(PipelineGraphBuilderApi builder, @CheckForNull // We want to remap children here, so we don't update the parents of the // original objects - as // these are completely new representations. - List stages = getPipelineNodes(builder, workspaceNodes); + List stages = getPipelineNodes(builder, workspaceNodes, enclosingIdsByNodeId); // Get InputAction once for all stages InputAction inputAction = run.getAction(InputAction.class); @@ -120,34 +125,51 @@ private PipelineGraph createTree(PipelineGraphBuilderApi builder, @CheckForNull return new PipelineGraph(stageResults, complete); } - private static String getStageNode(FlowNodeWrapper flowNodeWrapper, @CheckForNull List workspaceNodes) { + private static String getStageNode( + FlowNodeWrapper flowNodeWrapper, + @CheckForNull List workspaceNodes, + @CheckForNull Map> enclosingIdsByNodeId) { FlowNode flowNode = flowNodeWrapper.getNode(); logger.debug("Checking node {}", flowNode); FlowExecution execution = flowNode.getExecution(); Iterable candidates = workspaceNodes != null ? workspaceNodes : new DepthFirstScanner().allNodes(execution); + // Prefer pre-computed ancestry over FlowNode#getAllEnclosingIds / getEnclosingId / + // execution.getNode, all of which acquire the storage read lock and can block for + // minutes on a large graph when the CPS VM is writing frequently. + List flowNodeEnclosingIds = enclosingIdsFor(flowNode, enclosingIdsByNodeId); for (FlowNode n : candidates) { WorkspaceAction ws = n.getAction(WorkspaceAction.class); if (ws != null) { logger.debug("Found workspace node: {}", n); + List nEnclosingIds = enclosingIdsFor(n, enclosingIdsByNodeId); + String nDirectEnclosingId = nEnclosingIds.isEmpty() ? null : nEnclosingIds.get(0); boolean isWorkspaceNode = Objects.equals(n.getId(), flowNode.getId()) - || Objects.equals(n.getEnclosingId(), flowNode.getId()) - || flowNode.getAllEnclosingIds().contains(n.getId()); + || Objects.equals(nDirectEnclosingId, flowNode.getId()) + || flowNodeEnclosingIds.contains(n.getId()); - // Parallel stages have a sub-stage, so we need to check the 3rd parent for a match + // Parallel stages have a sub-stage, so we need to check the 3rd parent for a match. + // Original code walked three levels via execution.getNode(...); the third-level + // enclosing ID is simply enclosingIds[2] in the pre-computed list. if (flowNodeWrapper.getType() == FlowNodeWrapper.NodeType.PARALLEL) { - try { - if (n.getEnclosingId() != null) { - FlowNode p = execution.getNode(n.getEnclosingId()); - if (p != null && p.getEnclosingId() != null) { - p = execution.getNode(p.getEnclosingId()); + if (enclosingIdsByNodeId != null) { + if (nEnclosingIds.size() >= 3) { + isWorkspaceNode = Objects.equals(flowNode.getId(), nEnclosingIds.get(2)); + } + } else { + try { + if (n.getEnclosingId() != null) { + FlowNode p = execution.getNode(n.getEnclosingId()); if (p != null && p.getEnclosingId() != null) { - isWorkspaceNode = Objects.equals(flowNode.getId(), p.getEnclosingId()); + p = execution.getNode(p.getEnclosingId()); + if (p != null && p.getEnclosingId() != null) { + isWorkspaceNode = Objects.equals(flowNode.getId(), p.getEnclosingId()); + } } } + } catch (IOException e) { + throw new RuntimeException(e); } - } catch (IOException e) { - throw new RuntimeException(e); } } @@ -164,6 +186,16 @@ private static String getStageNode(FlowNodeWrapper flowNodeWrapper, @CheckForNul return null; } + private static List enclosingIdsFor(FlowNode node, @CheckForNull Map> map) { + if (map != null) { + List pre = map.get(node.getId()); + if (pre != null) { + return pre; + } + } + return node.getAllEnclosingIds(); + } + public PipelineGraph createTree() { return PipelineGraphViewCache.get().getGraph(run, this::computeTree); } @@ -181,21 +213,27 @@ public PipelineGraph computeTree() { } LiveGraphSnapshot snapshot = LiveGraphRegistry.get().snapshot(run); if (snapshot != null) { - PipelineGraph computed = - createTree(new PipelineNodeGraphAdapter(run, snapshot.nodes()), snapshot.workspaceNodes()); + PipelineGraph computed = createTree( + new PipelineNodeGraphAdapter(run, snapshot.nodes(), snapshot.enclosingIdsByNodeId()), + snapshot.workspaceNodes(), + snapshot.enclosingIdsByNodeId()); LiveGraphRegistry.get().cacheGraph(run, snapshot.version(), computed); return computed; } } - return createTree(CachedPipelineNodeGraphAdaptor.instance.getFor(run), null); + return createTree(CachedPipelineNodeGraphAdaptor.instance.getFor(run), null, null); } /** - * Builds a {@link PipelineGraph} from a caller-supplied adapter and workspace-node list. - * Doesn't touch the live-state DTO cache — caller owns caching. + * Builds a {@link PipelineGraph} from a caller-supplied adapter, workspace-node list, + * and optional pre-computed ancestry map. Doesn't touch the live-state DTO cache — + * caller owns caching. */ @Restricted(NoExternalUse.class) - public PipelineGraph createTreeFrom(PipelineGraphBuilderApi builder, @CheckForNull List workspaceNodes) { - return createTree(builder, workspaceNodes); + public PipelineGraph createTreeFrom( + PipelineGraphBuilderApi builder, + @CheckForNull List workspaceNodes, + @CheckForNull Map> enclosingIdsByNodeId) { + return createTree(builder, workspaceNodes, enclosingIdsByNodeId); } } diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java index 9d640b80b..2f677a102 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java @@ -137,8 +137,9 @@ public PipelineStepList computeAllSteps() { } LiveGraphSnapshot snapshot = LiveGraphRegistry.get().snapshot(run); if (snapshot != null) { - PipelineStepList computed = - getAllSteps(new PipelineNodeGraphAdapter(run, snapshot.nodes()), runIsComplete); + PipelineStepList computed = getAllSteps( + new PipelineNodeGraphAdapter(run, snapshot.nodes(), snapshot.enclosingIdsByNodeId()), + runIsComplete); LiveGraphRegistry.get().cacheAllSteps(run, snapshot.version(), computed); return computed; } From 87be5d3a69cb44097b5e9401d7ed85a9d8217fdb Mon Sep 17 00:00:00 2001 From: Tim Jacomb Date: Thu, 23 Apr 2026 11:27:12 +0100 Subject: [PATCH 14/26] Optimisations --- .../livestate/LiveGraphLifecycle.java | 7 +- .../livestate/LiveGraphRegistry.java | 23 ++++ .../livestate/LiveGraphSnapshot.java | 6 + .../livestate/LiveGraphState.java | 39 ++++++- .../treescanner/NodeRelationshipFinder.java | 51 +++++++-- .../treescanner/PipelineNodeTreeScanner.java | 2 +- .../utils/PipelineGraphApi.java | 34 ++++-- .../utils/PipelineStepApi.java | 105 ++++++++++++++---- 8 files changed, 218 insertions(+), 49 deletions(-) diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java index 38ae41787..92130e472 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java @@ -72,7 +72,12 @@ public void onCompleted(@NonNull FlowExecution execution) { // already derived from FlowExecution.isComplete() inside createTreeFrom. graph = new PipelineGraphApi(run) .createTreeFrom(adapter, snapshot.workspaceNodes(), snapshot.enclosingIdsByNodeId()); - allSteps = new PipelineStepApi(run).getAllStepsFrom(adapter, true); + allSteps = new PipelineStepApi(run) + .getAllStepsFrom( + adapter, + true, + snapshot.hideFromViewBlockStartIds(), + snapshot.enclosingIdsByNodeId()); } else { // No live state (feature disabled, poisoned, plugin installed mid-build). // Fall back to the scanner-backed paths and rebuild steps with diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java index cb1ca08f3..7ecd1e2b6 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java @@ -138,6 +138,29 @@ public void cacheAllSteps(WorkflowRun run, long version, PipelineStepList steps) } } + /** + * Returns a per-run monitor that callers can synchronise on to dedup concurrent graph + * rebuilds. Null when the live state isn't present (caller just computes directly). + */ + @edu.umd.cs.findbugs.annotations.CheckForNull + public Object graphComputeLock(WorkflowRun run) { + if (disabled()) { + return null; + } + LiveGraphState state = states.getIfPresent(run.getExternalizableId()); + return state == null ? null : state.graphComputeLock(); + } + + /** See {@link #graphComputeLock(WorkflowRun)} — the matching lock for the steps path. */ + @edu.umd.cs.findbugs.annotations.CheckForNull + public Object allStepsComputeLock(WorkflowRun run) { + if (disabled()) { + return null; + } + LiveGraphState state = states.getIfPresent(run.getExternalizableId()); + return state == null ? null : state.allStepsComputeLock(); + } + private static String keyFor(FlowExecution execution) { try { Object exec = execution.getOwner().getExecutable(); diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphSnapshot.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphSnapshot.java index d89b06b73..339b65ccc 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphSnapshot.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphSnapshot.java @@ -2,6 +2,7 @@ import java.util.List; import java.util.Map; +import java.util.Set; import org.jenkinsci.plugins.workflow.graph.FlowNode; /** @@ -11,6 +12,10 @@ * construction can resolve parentage without touching the execution's FlowNode storage * (and its read lock, which contends with the running build's writes). * + *

    {@code hideFromViewBlockStartIds} is the set of {@code hideFromView} block-start IDs, + * so callers can derive a step's "hidden" flag by intersecting with the step's enclosing + * IDs — no per-step storage walk required. + * *

    {@code version} is a monotonic counter that bumps on every new flow node, so callers * can use it as a cache key for computed DTOs. */ @@ -18,4 +23,5 @@ public record LiveGraphSnapshot( List nodes, List workspaceNodes, Map> enclosingIdsByNodeId, + Set hideFromViewBlockStartIds, long version) {} diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java index 9c9845484..9c7e72b09 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java @@ -1,5 +1,6 @@ package io.jenkins.plugins.pipelinegraphview.livestate; +import io.jenkins.plugins.pipelinegraphview.steps.HideFromViewStep; import io.jenkins.plugins.pipelinegraphview.utils.PipelineGraph; import io.jenkins.plugins.pipelinegraphview.utils.PipelineStepList; import java.util.ArrayList; @@ -9,7 +10,9 @@ import java.util.Map; import java.util.Set; import org.jenkinsci.plugins.workflow.actions.WorkspaceAction; +import org.jenkinsci.plugins.workflow.cps.nodes.StepStartNode; import org.jenkinsci.plugins.workflow.graph.FlowNode; +import org.jenkinsci.plugins.workflow.steps.StepDescriptor; /** * Per-run mutable state built up by {@link LiveGraphPopulator} as {@code GraphListener} @@ -25,6 +28,10 @@ final class LiveGraphState { // resolve ancestry. Without this, each findParentNode call goes back to storage and // contends with the write lock the running build is holding. private final Map> enclosingIdsByNodeId = new HashMap<>(); + // IDs of BlockStartNodes that wrap a {@code hideFromView} step. Lets HTTP readers check + // whether a step is "hidden" by scanning this set against the step's enclosing IDs, + // instead of calling FlowNode#iterateEnclosingBlocks which goes to storage per step. + private final Set hideFromViewBlockStartIds = new HashSet<>(); private long version = 0; private volatile boolean poisoned = false; @@ -38,6 +45,14 @@ final class LiveGraphState { private VersionedCache cachedGraph; private VersionedCache cachedAllSteps; + // Per-compute dedup locks. N concurrent HTTP threads rebuilding the same graph + // multiplies CPU cost by N (they each do the same O(nodes) work). With these, the + // first thread computes and caches; the rest block here, then re-check cache and + // usually find the fresh result. Separate locks for tree vs steps — they're independent + // computations and a slow tree rebuild shouldn't starve steps (or vice versa). + private final Object graphComputeLock = new Object(); + private final Object allStepsComputeLock = new Object(); + synchronized void addNode(FlowNode node) { if (!seenIds.add(node.getId())) { return; @@ -51,6 +66,18 @@ synchronized void addNode(FlowNode node) { } catch (Throwable ignored) { enclosingIdsByNodeId.put(node.getId(), List.of()); } + // Record hideFromView block starts once, here, so readers never need to walk a + // node's enclosing blocks through storage to tell whether the node is hidden. + if (node instanceof StepStartNode stepStartNode) { + try { + StepDescriptor descriptor = stepStartNode.getDescriptor(); + if (descriptor != null && HideFromViewStep.class.getName().equals(descriptor.getId())) { + hideFromViewBlockStartIds.add(node.getId()); + } + } catch (Throwable ignored) { + // Descriptor lookup is best-effort; fall back to "not hidden". + } + } version++; } @@ -80,6 +107,7 @@ LiveGraphSnapshot snapshot() { // them under the lock the CPS VM also uses would block pipeline execution for O(N). List nodesCopy; Map> enclosingCopy; + Set hideFromViewCopy; long v; synchronized (this) { if (poisoned || !ready) { @@ -87,6 +115,7 @@ LiveGraphSnapshot snapshot() { } nodesCopy = List.copyOf(nodes); enclosingCopy = Map.copyOf(enclosingIdsByNodeId); + hideFromViewCopy = Set.copyOf(hideFromViewBlockStartIds); v = version; } // Filter for WorkspaceAction at snapshot time, not at addNode time: a node can have @@ -102,7 +131,7 @@ LiveGraphSnapshot snapshot() { workspaceNodes.add(n); } } - return new LiveGraphSnapshot(nodesCopy, List.copyOf(workspaceNodes), enclosingCopy, v); + return new LiveGraphSnapshot(nodesCopy, List.copyOf(workspaceNodes), enclosingCopy, hideFromViewCopy, v); } /** @@ -138,5 +167,13 @@ synchronized void markReady() { ready = true; } + Object graphComputeLock() { + return graphComputeLock; + } + + Object allStepsComputeLock() { + return allStepsComputeLock; + } + private record VersionedCache(long version, T value) {} } diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/NodeRelationshipFinder.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/NodeRelationshipFinder.java index 500489370..843bc3ef4 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/NodeRelationshipFinder.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/NodeRelationshipFinder.java @@ -42,7 +42,19 @@ public class NodeRelationshipFinder { private LinkedHashMap relationships = new LinkedHashMap<>(); - public NodeRelationshipFinder() {} + // Pre-computed ancestry (node id → enclosing ids, innermost first). When present, this + // class never calls FlowNode#getEnclosingId / #getEnclosingBlocks, both of which hit the + // execution's storage read lock and contend with the running build's writes. + @CheckForNull + private final Map> enclosingIdsByNodeId; + + public NodeRelationshipFinder() { + this(null); + } + + public NodeRelationshipFinder(@CheckForNull Map> enclosingIdsByNodeId) { + this.enclosingIdsByNodeId = enclosingIdsByNodeId; + } /** * Determines the relationship between FlowNodes {@link FlowNode#getParents()}. @@ -96,7 +108,7 @@ private void handleBlockStart(@NonNull FlowNode node) { } private void addSeenNodes(FlowNode node) { - String enclosingId = node.getEnclosingId(); + String enclosingId = firstEnclosingIdOf(node); if (!seenChildNodes.containsKey(enclosingId)) { seenChildNodes.put(enclosingId, new ArrayDeque<>()); } @@ -111,11 +123,13 @@ private FlowNode getAfterNode(FlowNode node) { FlowNode after; // The after node is the last child of the enclosing node, except for the last node in // a block, then it's the last node in the enclosing nodes list (likely, this blocks end node). - FlowNode parentStartNode = getFirstEnclosingNode(node); - ArrayDeque laterSiblings = getProcessedChildren(parentStartNode); - if (parentStartNode != null && laterSiblings.isEmpty()) { - // If there are no later siblings, get the parents later sibling. - ArrayDeque parentsLaterSiblings = getProcessedChildren(getFirstEnclosingNode(parentStartNode)); + String parentStartId = firstEnclosingIdOf(node); + ArrayDeque laterSiblings = getProcessedChildren(parentStartId); + if (parentStartId != null && laterSiblings.isEmpty()) { + // If there are no later siblings, get the parent's later siblings via the + // grandparent id. + String grandparentId = secondEnclosingIdOf(node); + ArrayDeque parentsLaterSiblings = getProcessedChildren(grandparentId); after = parentsLaterSiblings.isEmpty() ? null : parentsLaterSiblings.peek(); if (isDebugEnabled) { logger.debug(parentsLaterSiblings.toString()); @@ -130,14 +144,27 @@ private FlowNode getAfterNode(FlowNode node) { } @CheckForNull - private BlockStartNode getFirstEnclosingNode(FlowNode node) { + private String firstEnclosingIdOf(FlowNode node) { + if (enclosingIdsByNodeId != null) { + List ids = enclosingIdsByNodeId.get(node.getId()); + return (ids != null && !ids.isEmpty()) ? ids.get(0) : null; + } + return node.getEnclosingId(); + } + + @CheckForNull + private String secondEnclosingIdOf(FlowNode node) { + if (enclosingIdsByNodeId != null) { + List ids = enclosingIdsByNodeId.get(node.getId()); + return (ids != null && ids.size() >= 2) ? ids.get(1) : null; + } List enclosingBlocks = node.getEnclosingBlocks(); - return enclosingBlocks.isEmpty() ? null : enclosingBlocks.get(0); + return enclosingBlocks.size() >= 2 ? enclosingBlocks.get(1).getId() : null; } - private ArrayDeque getProcessedChildren(@CheckForNull FlowNode node) { - if (node != null && seenChildNodes.containsKey(node.getId())) { - return seenChildNodes.get(node.getId()); + private ArrayDeque getProcessedChildren(@CheckForNull String nodeId) { + if (nodeId != null && seenChildNodes.containsKey(nodeId)) { + return seenChildNodes.get(nodeId); } return new ArrayDeque<>(); } diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java index 1e66d32f6..1268fe926 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java @@ -99,7 +99,7 @@ private void buildFrom(Collection nodes, @CheckForNull Map(); return; } - NodeRelationshipFinder finder = new NodeRelationshipFinder(); + NodeRelationshipFinder finder = new NodeRelationshipFinder(enclosingIdsByNodeId); Map relationships = finder.getNodeRelationships(nodes); GraphBuilder builder = new GraphBuilder(nodes, relationships, this.run, this.execution, enclosingIdsByNodeId); if (isDebugEnabled) { diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java index 951ac4e86..7b1d5c16a 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java @@ -203,22 +203,36 @@ public PipelineGraph createTree() { /** Uncached compute path; callers are responsible for any caching. */ @Restricted(NoExternalUse.class) public PipelineGraph computeTree() { - // Check the cache first using the cheap version read — if we already have a graph - // for the current state, we can skip the O(N) snapshot copy entirely. + // Fast path: cache hit without locking. Long currentVersion = LiveGraphRegistry.get().currentVersion(run); if (currentVersion != null) { PipelineGraph cached = LiveGraphRegistry.get().cachedGraph(run, currentVersion); if (cached != null) { return cached; } - LiveGraphSnapshot snapshot = LiveGraphRegistry.get().snapshot(run); - if (snapshot != null) { - PipelineGraph computed = createTree( - new PipelineNodeGraphAdapter(run, snapshot.nodes(), snapshot.enclosingIdsByNodeId()), - snapshot.workspaceNodes(), - snapshot.enclosingIdsByNodeId()); - LiveGraphRegistry.get().cacheGraph(run, snapshot.version(), computed); - return computed; + } + // Slow path: serialise concurrent rebuilds for this run. Without this, N concurrent + // HTTP readers each spend O(nodes) CPU on the same computation. + Object lock = LiveGraphRegistry.get().graphComputeLock(run); + if (lock != null) { + synchronized (lock) { + // Re-check — another thread likely computed while we waited. + Long retryVersion = LiveGraphRegistry.get().currentVersion(run); + if (retryVersion != null) { + PipelineGraph cached = LiveGraphRegistry.get().cachedGraph(run, retryVersion); + if (cached != null) { + return cached; + } + } + LiveGraphSnapshot snapshot = LiveGraphRegistry.get().snapshot(run); + if (snapshot != null) { + PipelineGraph computed = createTree( + new PipelineNodeGraphAdapter(run, snapshot.nodes(), snapshot.enclosingIdsByNodeId()), + snapshot.workspaceNodes(), + snapshot.enclosingIdsByNodeId()); + LiveGraphRegistry.get().cacheGraph(run, snapshot.version(), computed); + return computed; + } } } return createTree(CachedPipelineNodeGraphAdaptor.instance.getFor(run), null, null); diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java index 2f677a102..4bfd6d48a 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java @@ -1,10 +1,13 @@ package io.jenkins.plugins.pipelinegraphview.utils; +import edu.umd.cs.findbugs.annotations.CheckForNull; import io.jenkins.plugins.pipelinegraphview.livestate.LiveGraphRegistry; import io.jenkins.plugins.pipelinegraphview.livestate.LiveGraphSnapshot; import io.jenkins.plugins.pipelinegraphview.treescanner.PipelineNodeGraphAdapter; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.stream.Collectors; import org.jenkinsci.plugins.workflow.job.WorkflowRun; import org.jenkinsci.plugins.workflow.support.steps.input.InputStep; @@ -21,7 +24,11 @@ public PipelineStepApi(WorkflowRun run) { this.run = run; } - private List parseSteps(List stepNodes, String stageId) { + private List parseSteps( + List stepNodes, + String stageId, + @CheckForNull Set hideFromViewBlockStartIds, + @CheckForNull Map> enclosingIdsByNodeId) { if (logger.isDebugEnabled()) { logger.debug("PipelineStepApi steps: '{}'.", stepNodes); } @@ -56,8 +63,8 @@ private List parseSteps(List stepNodes, String st } } - // Extract feature flags from parent nodes - Map flags = flowNodeWrapper.getFeatureFlags(); + Map flags = + resolveFlags(flowNodeWrapper, hideFromViewBlockStartIds, enclosingIdsByNodeId); return new PipelineStep( flowNodeWrapper.getId(), @@ -73,6 +80,32 @@ private List parseSteps(List stepNodes, String st .collect(Collectors.toList()); } + /** + * Derives per-step feature flags. Prefers the snapshot-provided sets over + * {@link FlowNodeWrapper#getFeatureFlags()}, which would otherwise call + * {@code FlowNode#iterateEnclosingBlocks()} once per step — O(depth) storage reads + * per step, and the HTTP reader bottleneck on large in-progress graphs. + */ + private static Map resolveFlags( + FlowNodeWrapper wrapper, + @CheckForNull Set hideFromViewBlockStartIds, + @CheckForNull Map> enclosingIdsByNodeId) { + if (hideFromViewBlockStartIds != null && enclosingIdsByNodeId != null) { + Map flags = new HashMap<>(); + List enclosing = enclosingIdsByNodeId.get(wrapper.getId()); + if (enclosing != null) { + for (String id : enclosing) { + if (hideFromViewBlockStartIds.contains(id)) { + flags.put("hidden", Boolean.TRUE); + break; + } + } + } + return flags; + } + return wrapper.getFeatureFlags(); + } + private PipelineInputStep mapInputStep(InputStep inputStep) { if (inputStep == null) { return null; @@ -91,19 +124,16 @@ static String cleanTextContent(String text) { return text.trim(); } - private PipelineStepList getSteps(String stageId, PipelineStepBuilderApi builder, boolean runIsComplete) { - List stepNodes = builder.getStageSteps(stageId); - PipelineStepList steps = new PipelineStepList(parseSteps(stepNodes, stageId), runIsComplete); - steps.sort(); - return steps; - } - - /* Returns a PipelineStepList, sorted by stageId and Id. */ - private PipelineStepList getAllSteps(PipelineStepBuilderApi builder, boolean runIsComplete) { + private PipelineStepList getAllSteps( + PipelineStepBuilderApi builder, + boolean runIsComplete, + @CheckForNull Set hideFromViewBlockStartIds, + @CheckForNull Map> enclosingIdsByNodeId) { Map> stepNodes = builder.getAllSteps(); PipelineStepList allSteps = new PipelineStepList(runIsComplete); for (Map.Entry> entry : stepNodes.entrySet()) { - allSteps.addAll(parseSteps(entry.getValue(), entry.getKey())); + allSteps.addAll( + parseSteps(entry.getValue(), entry.getKey(), hideFromViewBlockStartIds, enclosingIdsByNodeId)); } allSteps.sort(); return allSteps; @@ -127,24 +157,38 @@ public PipelineStepList getAllSteps() { @Restricted(NoExternalUse.class) public PipelineStepList computeAllSteps() { boolean runIsComplete = !run.isBuilding(); - // Check the cache first using the cheap version read — if we already have steps - // for the current state, we can skip the O(N) snapshot copy entirely. + // Fast path: cache hit without locking. Long currentVersion = LiveGraphRegistry.get().currentVersion(run); if (currentVersion != null) { PipelineStepList cached = LiveGraphRegistry.get().cachedAllSteps(run, currentVersion); if (cached != null) { return cached; } - LiveGraphSnapshot snapshot = LiveGraphRegistry.get().snapshot(run); - if (snapshot != null) { - PipelineStepList computed = getAllSteps( - new PipelineNodeGraphAdapter(run, snapshot.nodes(), snapshot.enclosingIdsByNodeId()), - runIsComplete); - LiveGraphRegistry.get().cacheAllSteps(run, snapshot.version(), computed); - return computed; + } + // Slow path: serialise concurrent rebuilds for this run — see computeTree for why. + Object lock = LiveGraphRegistry.get().allStepsComputeLock(run); + if (lock != null) { + synchronized (lock) { + Long retryVersion = LiveGraphRegistry.get().currentVersion(run); + if (retryVersion != null) { + PipelineStepList cached = LiveGraphRegistry.get().cachedAllSteps(run, retryVersion); + if (cached != null) { + return cached; + } + } + LiveGraphSnapshot snapshot = LiveGraphRegistry.get().snapshot(run); + if (snapshot != null) { + PipelineStepList computed = getAllSteps( + new PipelineNodeGraphAdapter(run, snapshot.nodes(), snapshot.enclosingIdsByNodeId()), + runIsComplete, + snapshot.hideFromViewBlockStartIds(), + snapshot.enclosingIdsByNodeId()); + LiveGraphRegistry.get().cacheAllSteps(run, snapshot.version(), computed); + return computed; + } } } - return getAllSteps(CachedPipelineNodeGraphAdaptor.instance.getFor(run), runIsComplete); + return getAllSteps(CachedPipelineNodeGraphAdaptor.instance.getFor(run), runIsComplete, null, null); } /** @@ -153,6 +197,19 @@ public PipelineStepList computeAllSteps() { */ @Restricted(NoExternalUse.class) public PipelineStepList getAllStepsFrom(PipelineStepBuilderApi builder, boolean runIsComplete) { - return getAllSteps(builder, runIsComplete); + return getAllStepsFrom(builder, runIsComplete, null, null); + } + + /** + * Same as {@link #getAllStepsFrom(PipelineStepBuilderApi, boolean)} but with pre-computed + * snapshot data so feature-flag resolution doesn't need to walk enclosing blocks per step. + */ + @Restricted(NoExternalUse.class) + public PipelineStepList getAllStepsFrom( + PipelineStepBuilderApi builder, + boolean runIsComplete, + @CheckForNull Set hideFromViewBlockStartIds, + @CheckForNull Map> enclosingIdsByNodeId) { + return getAllSteps(builder, runIsComplete, hideFromViewBlockStartIds, enclosingIdsByNodeId); } } From be53e670dab168ff2a892c243092dc6808eb3610 Mon Sep 17 00:00:00 2001 From: Tim Jacomb Date: Thu, 23 Apr 2026 11:49:39 +0100 Subject: [PATCH 15/26] Add perf observer --- docs/examples/300k-nodes.jenkinsfile | 57 ++++++++ docs/examples/medium-pipeline.jenkinsfile | 86 ++++++++++++ docs/examples/perf-observer.sh | 158 ++++++++++++++++++++++ 3 files changed, 301 insertions(+) create mode 100644 docs/examples/300k-nodes.jenkinsfile create mode 100644 docs/examples/medium-pipeline.jenkinsfile create mode 100755 docs/examples/perf-observer.sh diff --git a/docs/examples/300k-nodes.jenkinsfile b/docs/examples/300k-nodes.jenkinsfile new file mode 100644 index 000000000..d965a355f --- /dev/null +++ b/docs/examples/300k-nodes.jenkinsfile @@ -0,0 +1,57 @@ +// Stress-test pipeline that produces roughly 300,000 FlowNodes across three +// bulk parallel phases, with short sleep windows between them so you can poll +// the REST API and watch latency as the graph grows. +// +// Give Jenkins at least 2 GB heap before running — each FlowNode retains a few +// KB, and GC thrash will muddy any measurement you take from it. +// +// Knobs (multiplicative): +// BRANCHES - parallel branches per phase (default 50) +// STAGES_PER_SECTION - sequential stages inside each branch per phase (default 20) +// STEPS_PER_STAGE - echo steps per stage (default 100) +// Defaults yield 3 * 50 * 20 * 100 = 300,000 echo FlowNodes, plus ~6,000 +// stage/parallel wrapping nodes. +// +// Runtime is dominated by CPS step-evaluation (a few ms per echo), so the +// defaults take 30–45 minutes on a typical controller. Drop STEPS_PER_STAGE +// for faster iteration while keeping the structural shape. + +BRANCHES = 50 +STAGES_PER_SECTION = 20 +STEPS_PER_STAGE = 100 + +def bulkSection(int branches, int stagesPerBranch, int stepsPerStage, String label) { + def work = [:] + for (int i = 0; i < branches; i++) { + def idx = i + work["${label}-b${idx}"] = { + for (int j = 0; j < stagesPerBranch; j++) { + stage("${label}-b${idx}-s${j}") { + for (int k = 0; k < stepsPerStage; k++) { + echo "." + } + } + } + } + } + parallel work +} + +node { + stage('Warmup') { + echo "BRANCHES=${BRANCHES} STAGES_PER_SECTION=${STAGES_PER_SECTION} STEPS_PER_STAGE=${STEPS_PER_STAGE}" + echo "Expected echo nodes: ~${3 * BRANCHES * STAGES_PER_SECTION * STEPS_PER_STAGE}" + } + + stage('Settle (tiny graph)') { sleep 30 } + stage('Bulk 1/3') { bulkSection(BRANCHES, STAGES_PER_SECTION, STEPS_PER_STAGE, 's1') } + + stage('Settle (~100k nodes)') { sleep 30 } + stage('Bulk 2/3') { bulkSection(BRANCHES, STAGES_PER_SECTION, STEPS_PER_STAGE, 's2') } + + stage('Settle (~200k nodes)') { sleep 30 } + stage('Bulk 3/3') { bulkSection(BRANCHES, STAGES_PER_SECTION, STEPS_PER_STAGE, 's3') } + + stage('Settle (~300k nodes, still building)') { sleep 30 } + stage('Settle (completed)') { sleep 60 } +} diff --git a/docs/examples/medium-pipeline.jenkinsfile b/docs/examples/medium-pipeline.jenkinsfile new file mode 100644 index 000000000..95cecbeb5 --- /dev/null +++ b/docs/examples/medium-pipeline.jenkinsfile @@ -0,0 +1,86 @@ +// A medium-sized pipeline exercising sequential stages, flat parallel, nested +// parallel, and wide branch counts. Useful when you want to see the Pipeline +// Overview render a non-trivial graph without waiting on a stress test. +// +// Shape: ~70 user-visible stages, a few hundred FlowNodes. Runs in a minute or +// two. Tune the sleep values and branch counts for bigger or smaller graphs. + +def runStage(String name, int sleepSeconds) { + stage(name) { + echo "enter: ${name}" + sleep sleepSeconds + writeFile file: "out/${name.replaceAll('[^A-Za-z0-9]', '_')}.txt", text: name + echo "exit: ${name}" + } +} + +node { + runStage('Checkout', 2) + runStage('Install dependencies', 3) + runStage('Lint', 2) + runStage('Format check', 2) + runStage('Static analysis', 2) + + stage('Build matrix') { + def matrix = [:] + ['linux', 'macos', 'windows', 'freebsd', 'illumos', 'aix'].each { os -> + matrix["Build ${os}"] = { + runStage("${os}: fetch", 1) + runStage("${os}: compile", 3) + runStage("${os}: unit tests", 2) + runStage("${os}: package", 1) + } + } + parallel matrix + } + + stage('Integration') { + parallel( + 'api': { + parallel( + 'api: smoke': { runStage('api smoke', 2) }, + 'api: contract': { runStage('api contract', 3) }, + 'api: regression': { runStage('api regression', 4) }, + ) + }, + 'browser': { + parallel( + 'browser: chrome': { runStage('chrome', 3) }, + 'browser: firefox': { runStage('firefox', 3) }, + 'browser: safari': { runStage('safari', 3) }, + 'browser: edge': { runStage('edge', 3) }, + ) + }, + 'perf': { + runStage('perf: baseline', 2) + runStage('perf: load', 4) + runStage('perf: stress', 5) + }, + 'security': { + runStage('dep audit', 2) + runStage('sast', 3) + runStage('dast', 4) + }, + ) + } + + stage('Publish artifacts') { + def publishers = [:] + // C-style loop: (1..8).each would push an IntRange through a parallel + // closure, which is not CPS-serialisable and fails at checkpoint. + for (int i = 1; i <= 8; i++) { + def idx = i + publishers["Publish region ${idx}"] = { + runStage("upload region ${idx}", 1) + runStage("verify region ${idx}", 1) + } + } + parallel publishers + } + + runStage('Release notes', 1) + runStage('Changelog', 2) + runStage('Tag', 2) + runStage('Announce', 1) + runStage('Cleanup', 1) +} diff --git a/docs/examples/perf-observer.sh b/docs/examples/perf-observer.sh new file mode 100755 index 000000000..d2075c44c --- /dev/null +++ b/docs/examples/perf-observer.sh @@ -0,0 +1,158 @@ +#!/usr/bin/env bash +# +# perf-observer.sh — polls /tree and /allSteps for a running build and prints a +# latency summary at the end. Use it to compare two configurations (e.g. before +# vs. after a change) by running it once per build and diffing the two summaries. +# +# Usage: +# ./perf-observer.sh JENKINS_URL JOB_NAME BUILD_NUMBER [LABEL] +# +# Optional env vars: +# JENKINS_AUTH=user:token pass-through to curl --user +# POLL_INTERVAL=3 seconds between poll rounds (default 3) +# +# Example: +# +# ./perf-observer.sh http://localhost:8080/jenkins perf 1 baseline +# ./perf-observer.sh http://localhost:8080/jenkins perf 2 after-change +# +# Raw samples are written to /tmp/pgv-perf-

    The previous implementation called {@code getStageSteps} per stage, each scan running + * {@code steps.filter(s -> s.getParents().contains(stage))}. That's O(stages × steps); on a + * 300k-node run with ~6000 stages this was the dominant cost of the {@code /allSteps} + * endpoint — 90+ seconds of CPU per request observed in production jstacks. + */ @NonNull public Map> getAllSteps() { Map> stageNodeStepMap = new LinkedHashMap<>(); for (String stageId : stageNodeMap.keySet()) { - stageNodeStepMap.put(stageId, getStageSteps(stageId)); + stageNodeStepMap.put(stageId, new ArrayList<>()); + } + for (FlowNodeWrapper step : stepNodeMap.values()) { + List parents = step.getParents(); + if (parents.isEmpty()) { + continue; + } + if (parents.size() == 1) { + List bucket = stageNodeStepMap.get(parents.get(0).getId()); + if (bucket != null) { + bucket.add(step); + } + continue; + } + // Rare: multi-parent step. Match the legacy contains() semantics without adding the + // same step twice to the same stage's bucket. + Set seen = new HashSet<>(parents.size()); + for (FlowNodeWrapper parent : parents) { + if (seen.add(parent.getId())) { + List bucket = stageNodeStepMap.get(parent.getId()); + if (bucket != null) { + bucket.add(step); + } + } + } + } + FlowNodeWrapper.NodeComparator comparator = new FlowNodeWrapper.NodeComparator(); + for (List bucket : stageNodeStepMap.values()) { + bucket.sort(comparator); } return stageNodeStepMap; } From 7bf0c63ac9a1d807264c105cee71c6763ef17bfa Mon Sep 17 00:00:00 2001 From: Tim Jacomb Date: Thu, 23 Apr 2026 13:17:34 +0100 Subject: [PATCH 21/26] Cleanup javadoc --- .../livestate/LiveGraphLifecycle.java | 17 +++--- .../livestate/LiveGraphRegistry.java | 6 +-- .../livestate/LiveGraphState.java | 54 ++++++++----------- .../treescanner/PipelineNodeTreeScanner.java | 16 ++---- .../utils/PipelineGraphApi.java | 11 ++-- .../utils/PipelineGraphViewCache.java | 6 +-- .../utils/PipelineStepApi.java | 8 +-- 7 files changed, 44 insertions(+), 74 deletions(-) diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java index 92130e472..1aba0b3fa 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphLifecycle.java @@ -63,13 +63,12 @@ public void onCompleted(@NonNull FlowExecution execution) { PipelineStepList allSteps; LiveGraphSnapshot snapshot = LiveGraphRegistry.get().snapshot(run); if (snapshot != null) { - // Share a single adapter (and therefore a single tree-scanner pass) for - // both graph and steps rather than paying the cost twice. + // Share a single adapter so both graph and step builds reuse one + // tree-scanner pass. PipelineNodeGraphAdapter adapter = new PipelineNodeGraphAdapter(run, snapshot.nodes(), snapshot.enclosingIdsByNodeId()); - // runIsComplete=true here directly: WorkflowRun.isBuilding() can still be - // true even though FlowExecution is complete. PipelineGraph.complete is - // already derived from FlowExecution.isComplete() inside createTreeFrom. + // Force runIsComplete=true for the step list: WorkflowRun.isBuilding() can + // still return true here even though the execution is complete. graph = new PipelineGraphApi(run) .createTreeFrom(adapter, snapshot.workspaceNodes(), snapshot.enclosingIdsByNodeId()); allSteps = new PipelineStepApi(run) @@ -79,11 +78,9 @@ public void onCompleted(@NonNull FlowExecution execution) { snapshot.hideFromViewBlockStartIds(), snapshot.enclosingIdsByNodeId()); } else { - // No live state (feature disabled, poisoned, plugin installed mid-build). - // Fall back to the scanner-backed paths and rebuild steps with - // runIsComplete=true to keep the persisted copy consistent. Defensively - // copy the list: PipelineStepList.steps is publicly mutable, and `raw` - // may be held by the DTO cache or returned to a concurrent HTTP reader. + // Fall back to the scanner-backed paths. Defensively copy the step list: + // PipelineStepList.steps is publicly mutable and `raw` may be aliased + // with a concurrent HTTP reader's cached entry. graph = new PipelineGraphApi(run).computeTree(); PipelineStepList raw = new PipelineStepApi(run).computeAllSteps(); allSteps = new PipelineStepList(new ArrayList<>(raw.steps), true); diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java index 7ecd1e2b6..b28d36da4 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphRegistry.java @@ -27,10 +27,8 @@ */ public final class LiveGraphRegistry { - // IMPORTANT: keep CACHE_MAX_SIZE declared BEFORE INSTANCE. Static fields initialise in - // source order, and the instance's Caffeine builder reads CACHE_MAX_SIZE — if INSTANCE - // were declared first, CACHE_MAX_SIZE would still be 0 during construction and Caffeine - // would evict every entry immediately (maximumSize(0) means "no entries allowed"). + // Must be declared before INSTANCE: the Caffeine builder reads this during INSTANCE + // construction, and Caffeine's maximumSize(0) means "no entries allowed". private static final int CACHE_MAX_SIZE = SystemProperties.getInteger(LiveGraphRegistry.class.getName() + ".size", 512); diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java index 9c7e72b09..8a5da4963 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java @@ -23,33 +23,27 @@ final class LiveGraphState { private final List nodes = new ArrayList<>(); private final Set seenIds = new HashSet<>(); - // Node ID → its enclosing-block IDs (innermost first). Populated at add time on the - // CPS VM thread, so HTTP graph builds don't need to acquire the storage read lock to - // resolve ancestry. Without this, each findParentNode call goes back to storage and - // contends with the write lock the running build is holding. + // Node ID → its enclosing-block IDs (innermost first). Populated at add time on the CPS + // VM thread so HTTP graph builds don't contend with the storage write lock to resolve + // ancestry. private final Map> enclosingIdsByNodeId = new HashMap<>(); - // IDs of BlockStartNodes that wrap a {@code hideFromView} step. Lets HTTP readers check - // whether a step is "hidden" by scanning this set against the step's enclosing IDs, - // instead of calling FlowNode#iterateEnclosingBlocks which goes to storage per step. + // IDs of BlockStartNodes that wrap a {@code hideFromView} step. Lets HTTP readers decide + // whether a step is hidden by intersecting with the step's enclosing IDs, avoiding a + // per-step {@code iterateEnclosingBlocks} walk through storage. private final Set hideFromViewBlockStartIds = new HashSet<>(); private long version = 0; private volatile boolean poisoned = false; - // Starts unready. Only the lifecycle ({@link LiveGraphLifecycle}) flips it on, after any - // needed catch-up — callers of {@link #snapshot} treat a null return as "fall back to - // the scanner", so a partial state that never gets the ready handshake stays invisible. + // Starts unready; {@link LiveGraphLifecycle} flips it on after any catch-up. Readers + // treat a null {@link #snapshot} as "fall back to the scanner", so a state that hasn't + // been marked ready stays invisible. private boolean ready = false; - // Mutations synchronise on the instance monitor so concurrent writers cannot regress a - // newer version with an older one (check-then-set on a volatile reference would be racy). private VersionedCache cachedGraph; private VersionedCache cachedAllSteps; - // Per-compute dedup locks. N concurrent HTTP threads rebuilding the same graph - // multiplies CPU cost by N (they each do the same O(nodes) work). With these, the - // first thread computes and caches; the rest block here, then re-check cache and - // usually find the fresh result. Separate locks for tree vs steps — they're independent - // computations and a slow tree rebuild shouldn't starve steps (or vice versa). + // Serialise concurrent rebuilds so N HTTP readers don't each do the same O(nodes) work. + // Separate locks for tree vs steps — a slow tree rebuild must not starve steps. private final Object graphComputeLock = new Object(); private final Object allStepsComputeLock = new Object(); @@ -58,16 +52,15 @@ synchronized void addNode(FlowNode node) { return; } nodes.add(node); - // Capture ancestry now. On the CPS VM thread (the usual onNewHead caller) this is a - // reentrant read against the already-held storage write lock — effectively free. - // A node's enclosing chain never changes after creation, so we only pay this once. + // Capture ancestry once, at add time. The enclosing chain never changes after + // creation, so HTTP readers can resolve parentage from this map instead of going + // back to storage. try { enclosingIdsByNodeId.put(node.getId(), List.copyOf(node.getAllEnclosingIds())); } catch (Throwable ignored) { enclosingIdsByNodeId.put(node.getId(), List.of()); } - // Record hideFromView block starts once, here, so readers never need to walk a - // node's enclosing blocks through storage to tell whether the node is hidden. + // See the hideFromViewBlockStartIds field comment for why this is captured here. if (node instanceof StepStartNode stepStartNode) { try { StepDescriptor descriptor = stepStartNode.getDescriptor(); @@ -91,8 +84,7 @@ synchronized int size() { /** * Cheap version read for cache-hit short-circuits — same readiness/poison semantics as - * {@link #snapshot()}, but skips the O(N) copy. Callers check this first and only take - * a full snapshot on cache miss. + * {@link #snapshot()}, but skips the O(N) copy. */ synchronized Long currentVersion() { if (poisoned || !ready) { @@ -102,9 +94,6 @@ synchronized Long currentVersion() { } LiveGraphSnapshot snapshot() { - // Release the monitor before running the WorkspaceAction scan: each - // getAction(WorkspaceAction.class) walks the FlowNode's action list, so doing N of - // them under the lock the CPS VM also uses would block pipeline execution for O(N). List nodesCopy; Map> enclosingCopy; Set hideFromViewCopy; @@ -118,12 +107,11 @@ LiveGraphSnapshot snapshot() { hideFromViewCopy = Set.copyOf(hideFromViewBlockStartIds); v = version; } - // Filter for WorkspaceAction at snapshot time, not at addNode time: a node can have - // WorkspaceAction attached after onNewHead fires (when the workspace is allocated), - // so scanning here always observes the latest action state. Newest-first ordering - // matches DepthFirstScanner (from current heads backward); PipelineGraphApi#getStageNode - // returns on the first match, and for nested agents the innermost workspace — the - // one a later-created inner `node {}` block sits in — is the more-specific match. + // Scan for WorkspaceAction outside the monitor: each getAction call walks the node's + // action list, and a node can gain WorkspaceAction after onNewHead fires (when the + // workspace is allocated), so resolving here observes the latest state. Newest-first + // order matches DepthFirstScanner so PipelineGraphApi#getStageNode picks the + // innermost workspace for nested agents. List workspaceNodes = new ArrayList<>(); for (int i = nodesCopy.size() - 1; i >= 0; i--) { FlowNode n = nodesCopy.get(i); diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java index 2d04aea96..f414173d8 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java @@ -135,14 +135,6 @@ public List getStageSteps(String startNodeId) { return getAllSteps().getOrDefault(startNodeId, new ArrayList<>()); } - /** - * Buckets every step into its parent stage's list in a single O(steps) pass. - * - *

    The previous implementation called {@code getStageSteps} per stage, each scan running - * {@code steps.filter(s -> s.getParents().contains(stage))}. That's O(stages × steps); on a - * 300k-node run with ~6000 stages this was the dominant cost of the {@code /allSteps} - * endpoint — 90+ seconds of CPU per request observed in production jstacks. - */ @NonNull public Map> getAllSteps() { Map> stageNodeStepMap = new LinkedHashMap<>(); @@ -161,8 +153,7 @@ public Map> getAllSteps() { } continue; } - // Rare: multi-parent step. Match the legacy contains() semantics without adding the - // same step twice to the same stage's bucket. + // A step with multiple parents belongs to every matching stage, but only once. Set seen = new HashSet<>(parents.size()); for (FlowNodeWrapper parent : parents) { if (seen.add(parent.getId())) { @@ -518,9 +509,8 @@ private void assignParent(@NonNull FlowNodeWrapper wrappedNode, @CheckForNull Fl */ private @CheckForNull FlowNodeWrapper findParentNode( @NonNull FlowNodeWrapper child, @NonNull Map wrappedNodeMap) { - // Prefer the pre-computed ancestry: FlowNode#getAllEnclosingIds reaches back into - // the execution's storage, which contends with the running build's write lock - // and can make HTTP requests block for minutes on a large graph. + // Prefer the pre-computed ancestry: FlowNode#getAllEnclosingIds goes through the + // storage read lock and contends with the running build's writes. List enclosingIds; if (enclosingIdsByNodeId != null) { enclosingIds = enclosingIdsByNodeId.getOrDefault(child.getId(), List.of()); diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java index 7b1d5c16a..cff563e89 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphApi.java @@ -134,9 +134,9 @@ private static String getStageNode( FlowExecution execution = flowNode.getExecution(); Iterable candidates = workspaceNodes != null ? workspaceNodes : new DepthFirstScanner().allNodes(execution); - // Prefer pre-computed ancestry over FlowNode#getAllEnclosingIds / getEnclosingId / - // execution.getNode, all of which acquire the storage read lock and can block for - // minutes on a large graph when the CPS VM is writing frequently. + // Prefer pre-computed ancestry: FlowNode#getAllEnclosingIds / getEnclosingId / + // execution.getNode all acquire the storage read lock, which contends with the + // running build's writes. List flowNodeEnclosingIds = enclosingIdsFor(flowNode, enclosingIdsByNodeId); for (FlowNode n : candidates) { WorkspaceAction ws = n.getAction(WorkspaceAction.class); @@ -148,9 +148,8 @@ private static String getStageNode( || Objects.equals(nDirectEnclosingId, flowNode.getId()) || flowNodeEnclosingIds.contains(n.getId()); - // Parallel stages have a sub-stage, so we need to check the 3rd parent for a match. - // Original code walked three levels via execution.getNode(...); the third-level - // enclosing ID is simply enclosingIds[2] in the pre-computed list. + // For parallel stages the stage wrapper sits three levels above the workspace + // node (branch → parallel block → sub-stage → workspace). if (flowNodeWrapper.getType() == FlowNodeWrapper.NodeType.PARALLEL) { if (enclosingIdsByNodeId != null) { if (nEnclosingIds.size() >= 3) { diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphViewCache.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphViewCache.java index c1ac37ef2..a75c28365 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphViewCache.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraphViewCache.java @@ -71,11 +71,9 @@ public PipelineStepList getAllSteps(WorkflowRun run, Supplier /** * Writes a final graph and step list directly to disk, bypassing the {@code isBuilding} - * guard used by {@link #getGraph} / {@link #getAllSteps}. Intended for use at + * guard on {@link #getGraph} / {@link #getAllSteps}. Intended for * {@code FlowExecutionListener.onCompleted}, where the execution is known complete but - * {@code WorkflowRun.isBuilding()} may not have flipped yet. Calling this avoids wasting - * the work already done by the live-state path: without it, the first read after - * completion falls through to a full scanner sweep. + * {@code WorkflowRun.isBuilding()} may not have flipped yet. */ public void seed(WorkflowRun run, PipelineGraph graph, PipelineStepList allSteps) { CachedPayload payload = load(run); diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java index fa9897525..c71e5ba8c 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepApi.java @@ -81,10 +81,10 @@ private List parseSteps( } /** - * Derives per-step feature flags. Prefers the snapshot-provided sets over - * {@link FlowNodeWrapper#getFeatureFlags()}, which would otherwise call - * {@code FlowNode#iterateEnclosingBlocks()} once per step — O(depth) storage reads - * per step, and the HTTP reader bottleneck on large in-progress graphs. + * Derives per-step feature flags from the snapshot-provided sets when available, falling + * back to {@link FlowNodeWrapper#getFeatureFlags()}. The fallback walks each step's + * enclosing blocks through FlowNode storage, which contends with the running build's + * write lock. */ private static Map resolveFlags( FlowNodeWrapper wrapper, From d8369de0d43ea2dadac74712b59cba549b2525ce Mon Sep 17 00:00:00 2001 From: Tim Jacomb Date: Thu, 23 Apr 2026 13:23:55 +0100 Subject: [PATCH 22/26] Use jackson for steps --- pom.xml | 4 + .../PipelineConsoleViewAction.java | 7 +- .../pipelinegraphview/utils/PipelineStep.java | 2 +- .../utils/PipelineStepListJsonWriter.java | 94 +++++++++++++++++++ .../utils/PipelineStepListJsonWriterTest.java | 86 +++++++++++++++++ 5 files changed, 188 insertions(+), 5 deletions(-) create mode 100644 src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriter.java create mode 100644 src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriterTest.java diff --git a/pom.xml b/pom.xml index 6085ae5dc..e423d9300 100644 --- a/pom.xml +++ b/pom.xml @@ -63,6 +63,10 @@ io.jenkins.plugins ionicons-api + + io.jenkins.plugins + jackson3-api + org.jenkins-ci.plugins display-url-api diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/consoleview/PipelineConsoleViewAction.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/consoleview/PipelineConsoleViewAction.java index d9945de59..0eb3467d9 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/consoleview/PipelineConsoleViewAction.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/consoleview/PipelineConsoleViewAction.java @@ -23,6 +23,7 @@ import io.jenkins.plugins.pipelinegraphview.utils.PipelineStep; import io.jenkins.plugins.pipelinegraphview.utils.PipelineStepApi; import io.jenkins.plugins.pipelinegraphview.utils.PipelineStepList; +import io.jenkins.plugins.pipelinegraphview.utils.PipelineStepListJsonWriter; import jakarta.servlet.ServletException; import java.io.IOException; import java.util.ArrayList; @@ -108,12 +109,10 @@ private JSONObject getSteps(String nodeId) { public void getAllSteps(StaplerRequest2 req, StaplerResponse2 rsp) throws IOException, ServletException { run.checkPermission(Item.READ); PipelineStepList steps = stepApi.getAllSteps(); - JSONObject json = JSONObject.fromObject(steps, jsonConfig); - HttpResponse response = HttpResponses.okJSON(json); - rsp.setStatus(200); + rsp.setContentType("application/json;charset=UTF-8"); setCache(rsp, steps.runIsComplete); - response.generateResponse(req, rsp, null); + PipelineStepListJsonWriter.write(steps, rsp.getOutputStream()); } private void setCache(StaplerResponse2 rsp, boolean complete) { diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStep.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStep.java index e9591bed6..9e7146592 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStep.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStep.java @@ -7,7 +7,7 @@ public class PipelineStep extends AbstractPipelineNode { final String stageId; - private final PipelineInputStep inputStep; + final PipelineInputStep inputStep; private final Map flags; public PipelineStep( diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriter.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriter.java new file mode 100644 index 000000000..4a4588508 --- /dev/null +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriter.java @@ -0,0 +1,94 @@ +package io.jenkins.plugins.pipelinegraphview.utils; + +import java.io.OutputStream; +import tools.jackson.core.JsonGenerator; +import tools.jackson.databind.ObjectMapper; +import tools.jackson.databind.SerializationContext; +import tools.jackson.databind.ValueSerializer; +import tools.jackson.databind.json.JsonMapper; +import tools.jackson.databind.module.SimpleModule; + +/** + * Serialises {@link PipelineStepList} to JSON via Jackson. The wire format matches + * {@link PipelineStepList.PipelineStepListJsonProcessor} exactly — notably, fields whose value + * is {@code null} are omitted entirely rather than written as {@code null}. + */ +public final class PipelineStepListJsonWriter { + + private static final ObjectMapper MAPPER = buildMapper(); + + private PipelineStepListJsonWriter() {} + + public static void write(PipelineStepList list, OutputStream out) { + MAPPER.writeValue(out, list); + } + + private static ObjectMapper buildMapper() { + SimpleModule module = new SimpleModule("PipelineStepList"); + module.addSerializer(PipelineStepList.class, new StepListSerializer()); + module.addSerializer(PipelineStep.class, new StepSerializer()); + module.addSerializer(PipelineInputStep.class, new InputStepSerializer()); + module.addSerializer(PipelineState.class, new StateSerializer()); + return JsonMapper.builder().addModule(module).build(); + } + + private static final class StepListSerializer extends ValueSerializer { + @Override + public void serialize(PipelineStepList list, JsonGenerator g, SerializationContext ctx) { + g.writeStartObject(); + g.writeArrayPropertyStart("steps"); + for (PipelineStep step : list.steps) { + ctx.writeValue(g, step); + } + g.writeEndArray(); + g.writeBooleanProperty("runIsComplete", list.runIsComplete); + g.writeEndObject(); + } + } + + private static final class StepSerializer extends ValueSerializer { + @Override + public void serialize(PipelineStep step, JsonGenerator g, SerializationContext ctx) { + g.writeStartObject(); + g.writeStringProperty("id", step.id); + g.writeStringProperty("name", step.name); + g.writeName("state"); + ctx.writeValue(g, step.state); + g.writeStringProperty("type", step.type); + g.writeStringProperty("title", step.title); + g.writeNumberProperty("pauseDurationMillis", step.timingInfo.getPauseDurationMillis()); + g.writeNumberProperty("startTimeMillis", step.getStartTimeMillis()); + Long total = step.getTotalDurationMillis(); + if (total != null) { + g.writeNumberProperty("totalDurationMillis", total.longValue()); + } + g.writeStringProperty("stageId", step.stageId); + if (step.inputStep != null) { + g.writeName("inputStep"); + ctx.writeValue(g, step.inputStep); + } + g.writePOJOProperty("flags", step.getFlags()); + g.writeEndObject(); + } + } + + private static final class InputStepSerializer extends ValueSerializer { + @Override + public void serialize(PipelineInputStep input, JsonGenerator g, SerializationContext ctx) { + g.writeStartObject(); + g.writeStringProperty("message", input.message()); + g.writeStringProperty("cancel", input.cancel()); + g.writeStringProperty("id", input.id()); + g.writeStringProperty("ok", input.ok()); + g.writeBooleanProperty("parameters", input.parameters()); + g.writeEndObject(); + } + } + + private static final class StateSerializer extends ValueSerializer { + @Override + public void serialize(PipelineState state, JsonGenerator g, SerializationContext ctx) { + g.writeString(state.toString()); + } + } +} diff --git a/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriterTest.java b/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriterTest.java new file mode 100644 index 000000000..a921c5dbf --- /dev/null +++ b/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriterTest.java @@ -0,0 +1,86 @@ +package io.jenkins.plugins.pipelinegraphview.utils; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; + +import io.jenkins.plugins.pipelinegraphview.analysis.TimingInfo; +import java.io.ByteArrayOutputStream; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import net.sf.json.JSONObject; +import net.sf.json.JsonConfig; +import org.junit.jupiter.api.Test; + +/** + * Pins the Jackson writer against the legacy net.sf.json bean-processor output so a + * wire-format regression fails this test rather than the frontend. + */ +class PipelineStepListJsonWriterTest { + + @Test + void matchesLegacyOutputForRunningAndCompletedSteps() throws Exception { + assertMatches(build()); + } + + @Test + void matchesLegacyOutputForEmptyList() throws Exception { + assertMatches(new PipelineStepList(true)); + } + + private static void assertMatches(PipelineStepList list) throws Exception { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + PipelineStepListJsonWriter.write(list, baos); + String jacksonOutput = baos.toString("UTF-8"); + + JsonConfig config = new JsonConfig(); + PipelineStepList.PipelineStepListJsonProcessor.configure(config); + String legacyOutput = JSONObject.fromObject(list, config).toString(); + + // Jackson may emit keys/values in a different order than net.sf.json, so compare + // parsed trees rather than raw strings. + JSONObject parsedJackson = JSONObject.fromObject(jacksonOutput); + JSONObject parsedLegacy = JSONObject.fromObject(legacyOutput); + assertThat(parsedJackson, equalTo(parsedLegacy)); + } + + private static PipelineStepList build() { + Map hiddenFlag = new LinkedHashMap<>(); + hiddenFlag.put("hidden", Boolean.TRUE); + + PipelineStep running = new PipelineStep( + "3", + "echo hello", + PipelineState.RUNNING, + "STEP", + "Print Message", + "2", + null, + new TimingInfo(0, 0, 1_700_000_000_000L), + Map.of()); + + PipelineStep completed = new PipelineStep( + "4", + "sh build.sh", + PipelineState.SUCCESS, + "STEP", + "Shell Script", + "2", + null, + new TimingInfo(1500, 0, 1_700_000_000_500L), + hiddenFlag); + + PipelineStep withInput = new PipelineStep( + "5", + "Deploy?", + PipelineState.PAUSED, + "STEP", + "", + "2", + new PipelineInputStep("Deploy to prod?", "Cancel", "input-1", "Proceed", true), + new TimingInfo(0, 200, 1_700_000_001_000L), + Map.of()); + + return new PipelineStepList(List.of(running, completed, withInput), false); + } +} From 21f7ce3a33be766d596ca6f9bcd6b5d1b616a840 Mon Sep 17 00:00:00 2001 From: Tim Jacomb Date: Thu, 23 Apr 2026 13:36:59 +0100 Subject: [PATCH 23/26] Simplify jackson code --- .../utils/AbstractPipelineNode.java | 11 ++- .../utils/PipelineState.java | 2 + .../utils/PipelineStepListJsonWriter.java | 87 +++---------------- .../utils/PipelineStepListJsonWriterTest.java | 33 +++++-- 4 files changed, 48 insertions(+), 85 deletions(-) diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/AbstractPipelineNode.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/AbstractPipelineNode.java index 676194753..f3617a263 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/AbstractPipelineNode.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/AbstractPipelineNode.java @@ -1,5 +1,6 @@ package io.jenkins.plugins.pipelinegraphview.utils; +import com.fasterxml.jackson.annotation.JsonIgnore; import io.jenkins.plugins.pipelinegraphview.analysis.TimingInfo; import net.sf.json.JSONObject; import net.sf.json.JsonConfig; @@ -12,7 +13,11 @@ public class AbstractPipelineNode { final String title; public final String id; private final long pauseDurationMillis; - private final long totalDurationMillis; + // Cached value; the serialised view is the null-aware getTotalDurationMillis() below. + @JsonIgnore + private final long cachedTotalDurationMillis; + + @JsonIgnore final TimingInfo timingInfo; public AbstractPipelineNode( @@ -25,7 +30,7 @@ public AbstractPipelineNode( this.timingInfo = timingInfo; // These values won't change for a given TimingInfo. this.pauseDurationMillis = timingInfo.getPauseDurationMillis(); - this.totalDurationMillis = timingInfo.getTotalDurationMillis(); + this.cachedTotalDurationMillis = timingInfo.getTotalDurationMillis(); } public long getStartTimeMillis() { @@ -33,7 +38,7 @@ public long getStartTimeMillis() { } public Long getTotalDurationMillis() { - return state.isInProgress() ? null : totalDurationMillis; + return state.isInProgress() ? null : cachedTotalDurationMillis; } abstract static class AbstractPipelineNodeJsonProcessor implements JsonBeanProcessor { diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineState.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineState.java index ecbf0b883..450880e64 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineState.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineState.java @@ -1,5 +1,6 @@ package io.jenkins.plugins.pipelinegraphview.utils; +import com.fasterxml.jackson.annotation.JsonValue; import hudson.model.Result; import java.util.Locale; import net.sf.json.JsonConfig; @@ -65,6 +66,7 @@ public static PipelineState of(NodeRunStatus status) { }; } + @JsonValue @Override public String toString() { return name().toLowerCase(Locale.ROOT); diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriter.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriter.java index 4a4588508..962e97421 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriter.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriter.java @@ -1,94 +1,27 @@ package io.jenkins.plugins.pipelinegraphview.utils; +import com.fasterxml.jackson.annotation.JsonAutoDetect; +import com.fasterxml.jackson.annotation.JsonInclude; import java.io.OutputStream; -import tools.jackson.core.JsonGenerator; import tools.jackson.databind.ObjectMapper; -import tools.jackson.databind.SerializationContext; -import tools.jackson.databind.ValueSerializer; import tools.jackson.databind.json.JsonMapper; -import tools.jackson.databind.module.SimpleModule; /** - * Serialises {@link PipelineStepList} to JSON via Jackson. The wire format matches - * {@link PipelineStepList.PipelineStepListJsonProcessor} exactly — notably, fields whose value - * is {@code null} are omitted entirely rather than written as {@code null}. + * Serialises {@link PipelineStepList} to JSON via Jackson. The DTO classes carry Jackson + * annotations so the wire format matches {@link PipelineStepList.PipelineStepListJsonProcessor} + * used by the net.sf.json path — notably, fields whose value is {@code null} are omitted rather + * than written as {@code null}. */ public final class PipelineStepListJsonWriter { - private static final ObjectMapper MAPPER = buildMapper(); + private static final ObjectMapper MAPPER = JsonMapper.builder() + .changeDefaultPropertyInclusion(inc -> inc.withValueInclusion(JsonInclude.Include.NON_NULL)) + .changeDefaultVisibility(v -> v.withFieldVisibility(JsonAutoDetect.Visibility.ANY)) + .build(); private PipelineStepListJsonWriter() {} public static void write(PipelineStepList list, OutputStream out) { MAPPER.writeValue(out, list); } - - private static ObjectMapper buildMapper() { - SimpleModule module = new SimpleModule("PipelineStepList"); - module.addSerializer(PipelineStepList.class, new StepListSerializer()); - module.addSerializer(PipelineStep.class, new StepSerializer()); - module.addSerializer(PipelineInputStep.class, new InputStepSerializer()); - module.addSerializer(PipelineState.class, new StateSerializer()); - return JsonMapper.builder().addModule(module).build(); - } - - private static final class StepListSerializer extends ValueSerializer { - @Override - public void serialize(PipelineStepList list, JsonGenerator g, SerializationContext ctx) { - g.writeStartObject(); - g.writeArrayPropertyStart("steps"); - for (PipelineStep step : list.steps) { - ctx.writeValue(g, step); - } - g.writeEndArray(); - g.writeBooleanProperty("runIsComplete", list.runIsComplete); - g.writeEndObject(); - } - } - - private static final class StepSerializer extends ValueSerializer { - @Override - public void serialize(PipelineStep step, JsonGenerator g, SerializationContext ctx) { - g.writeStartObject(); - g.writeStringProperty("id", step.id); - g.writeStringProperty("name", step.name); - g.writeName("state"); - ctx.writeValue(g, step.state); - g.writeStringProperty("type", step.type); - g.writeStringProperty("title", step.title); - g.writeNumberProperty("pauseDurationMillis", step.timingInfo.getPauseDurationMillis()); - g.writeNumberProperty("startTimeMillis", step.getStartTimeMillis()); - Long total = step.getTotalDurationMillis(); - if (total != null) { - g.writeNumberProperty("totalDurationMillis", total.longValue()); - } - g.writeStringProperty("stageId", step.stageId); - if (step.inputStep != null) { - g.writeName("inputStep"); - ctx.writeValue(g, step.inputStep); - } - g.writePOJOProperty("flags", step.getFlags()); - g.writeEndObject(); - } - } - - private static final class InputStepSerializer extends ValueSerializer { - @Override - public void serialize(PipelineInputStep input, JsonGenerator g, SerializationContext ctx) { - g.writeStartObject(); - g.writeStringProperty("message", input.message()); - g.writeStringProperty("cancel", input.cancel()); - g.writeStringProperty("id", input.id()); - g.writeStringProperty("ok", input.ok()); - g.writeBooleanProperty("parameters", input.parameters()); - g.writeEndObject(); - } - } - - private static final class StateSerializer extends ValueSerializer { - @Override - public void serialize(PipelineState state, JsonGenerator g, SerializationContext ctx) { - g.writeString(state.toString()); - } - } } diff --git a/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriterTest.java b/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriterTest.java index a921c5dbf..60ac0b924 100644 --- a/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriterTest.java +++ b/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriterTest.java @@ -5,9 +5,11 @@ import io.jenkins.plugins.pipelinegraphview.analysis.TimingInfo; import java.io.ByteArrayOutputStream; +import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.TreeMap; import net.sf.json.JSONObject; import net.sf.json.JsonConfig; import org.junit.jupiter.api.Test; @@ -37,11 +39,32 @@ private static void assertMatches(PipelineStepList list) throws Exception { PipelineStepList.PipelineStepListJsonProcessor.configure(config); String legacyOutput = JSONObject.fromObject(list, config).toString(); - // Jackson may emit keys/values in a different order than net.sf.json, so compare - // parsed trees rather than raw strings. - JSONObject parsedJackson = JSONObject.fromObject(jacksonOutput); - JSONObject parsedLegacy = JSONObject.fromObject(legacyOutput); - assertThat(parsedJackson, equalTo(parsedLegacy)); + // Compare as sorted maps so a difference in key order (which isn't part of the wire + // contract) doesn't fail the test — only structural / value differences matter. + assertThat(canonicalize(jacksonOutput), equalTo(canonicalize(legacyOutput))); + } + + @SuppressWarnings("unchecked") + private static Object canonicalize(String json) { + Object parsed = JSONObject.fromObject(json); + return canonicalize(parsed); + } + + @SuppressWarnings("unchecked") + private static Object canonicalize(Object node) { + if (node instanceof Map map) { + Map sorted = new TreeMap<>(); + map.forEach((k, v) -> sorted.put(String.valueOf(k), canonicalize(v))); + return sorted; + } + if (node instanceof List list) { + List out = new ArrayList<>(list.size()); + for (Object v : list) { + out.add(canonicalize(v)); + } + return out; + } + return node; } private static PipelineStepList build() { From cdb9eab7c3e4def5d0f06f92881071ad505e3585 Mon Sep 17 00:00:00 2001 From: Tim Jacomb Date: Thu, 23 Apr 2026 14:06:25 +0100 Subject: [PATCH 24/26] Use solely jackson --- .../PipelineConsoleViewAction.java | 32 ++--- .../MultiPipelineGraphViewAction.java | 16 +-- .../multipipelinegraphview/PipelineRun.java | 31 +--- .../utils/AbstractPipelineNode.java | 24 +--- .../utils/PipelineGraph.java | 21 --- .../utils/PipelineInputStep.java | 26 +--- .../utils/PipelineJsonWriter.java | 30 ++++ .../utils/PipelineStage.java | 30 +--- .../utils/PipelineState.java | 22 --- .../pipelinegraphview/utils/PipelineStep.java | 26 ---- .../utils/PipelineStepList.java | 22 --- .../utils/PipelineStepListJsonWriter.java | 27 ---- .../utils/PipelineJsonWriterTest.java | 133 ++++++++++++++++++ .../utils/PipelineStateTest.java | 39 +---- .../utils/PipelineStepListJsonWriterTest.java | 109 -------------- 15 files changed, 191 insertions(+), 397 deletions(-) create mode 100644 src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineJsonWriter.java delete mode 100644 src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriter.java create mode 100644 src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineJsonWriterTest.java delete mode 100644 src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriterTest.java diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/consoleview/PipelineConsoleViewAction.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/consoleview/PipelineConsoleViewAction.java index 0eb3467d9..d597d7e9d 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/consoleview/PipelineConsoleViewAction.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/consoleview/PipelineConsoleViewAction.java @@ -19,11 +19,11 @@ import io.jenkins.plugins.pipelinegraphview.cards.items.TestResultRunDetailsItem; import io.jenkins.plugins.pipelinegraphview.utils.PipelineGraph; import io.jenkins.plugins.pipelinegraphview.utils.PipelineGraphApi; +import io.jenkins.plugins.pipelinegraphview.utils.PipelineJsonWriter; import io.jenkins.plugins.pipelinegraphview.utils.PipelineNodeUtil; import io.jenkins.plugins.pipelinegraphview.utils.PipelineStep; import io.jenkins.plugins.pipelinegraphview.utils.PipelineStepApi; import io.jenkins.plugins.pipelinegraphview.utils.PipelineStepList; -import io.jenkins.plugins.pipelinegraphview.utils.PipelineStepListJsonWriter; import jakarta.servlet.ServletException; import java.io.IOException; import java.util.ArrayList; @@ -32,7 +32,6 @@ import jenkins.model.Jenkins; import jenkins.model.Tab; import net.sf.json.JSONObject; -import net.sf.json.JsonConfig; import org.jenkinsci.plugins.pipeline.modeldefinition.actions.RestartDeclarativePipelineAction; import org.jenkinsci.plugins.workflow.cps.replay.ReplayAction; import org.jenkinsci.plugins.workflow.flow.FlowExecution; @@ -53,12 +52,6 @@ public class PipelineConsoleViewAction extends Tab { public static final int CACHE_AGE = (int) TimeUnit.DAYS.toSeconds(1); private static final Logger logger = LoggerFactory.getLogger(PipelineConsoleViewAction.class); - private static final JsonConfig jsonConfig = new JsonConfig(); - - static { - PipelineStepList.PipelineStepListJsonProcessor.configure(jsonConfig); - PipelineGraph.PipelineGraphJsonProcessor.configure(jsonConfig); - } private final PipelineGraphApi graphApi; private final WorkflowRun run; @@ -85,20 +78,18 @@ public String getUrlName() { // running). @GET @WebMethod(name = "steps") - public HttpResponse getSteps(StaplerRequest2 req) { + public void getSteps(StaplerRequest2 req, StaplerResponse2 rsp) throws IOException, ServletException { run.checkPermission(Item.READ); String nodeId = req.getParameter("nodeId"); - if (nodeId != null) { - return HttpResponses.okJSON(getSteps(nodeId)); - } else { - return HttpResponses.errorJSON("Error getting console text"); + if (nodeId == null) { + HttpResponses.errorJSON("Error getting console text").generateResponse(req, rsp, null); + return; } - } - - private JSONObject getSteps(String nodeId) { logger.debug("getSteps was passed nodeId '{}'.", nodeId); PipelineStepList steps = stepApi.getSteps(nodeId); - return JSONObject.fromObject(steps, jsonConfig); + rsp.setStatus(200); + rsp.setContentType("application/json;charset=UTF-8"); + PipelineJsonWriter.write(steps, rsp.getOutputStream()); } // Return all steps to: @@ -112,7 +103,7 @@ public void getAllSteps(StaplerRequest2 req, StaplerResponse2 rsp) throws IOExce rsp.setStatus(200); rsp.setContentType("application/json;charset=UTF-8"); setCache(rsp, steps.runIsComplete); - PipelineStepListJsonWriter.write(steps, rsp.getOutputStream()); + PipelineJsonWriter.write(steps, rsp.getOutputStream()); } private void setCache(StaplerResponse2 rsp, boolean complete) { @@ -393,11 +384,10 @@ public void getTree(StaplerRequest2 req, StaplerResponse2 rsp) throws IOExceptio run.checkPermission(Item.READ); PipelineGraph tree = graphApi.createTree(); - HttpResponse response = HttpResponses.okJSON(JSONObject.fromObject(tree, jsonConfig)); - rsp.setStatus(200); + rsp.setContentType("application/json;charset=UTF-8"); setCache(rsp, tree.complete); - response.generateResponse(req, rsp, null); + PipelineJsonWriter.write(tree, rsp.getOutputStream()); } // Icon related methods these may appear as unused but are used by /lib/hudson/buildCaption.jelly diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/multipipelinegraphview/MultiPipelineGraphViewAction.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/multipipelinegraphview/MultiPipelineGraphViewAction.java index 0d898b22e..2fe0db5e2 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/multipipelinegraphview/MultiPipelineGraphViewAction.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/multipipelinegraphview/MultiPipelineGraphViewAction.java @@ -3,20 +3,17 @@ import hudson.model.Action; import hudson.model.Item; import hudson.security.Permission; -import hudson.util.HttpResponses; import hudson.util.RunList; import io.jenkins.plugins.pipelinegraphview.PipelineGraphViewConfiguration; +import io.jenkins.plugins.pipelinegraphview.utils.PipelineJsonWriter; import jakarta.servlet.ServletException; import jakarta.servlet.http.HttpServletResponse; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import net.sf.json.JSONArray; -import net.sf.json.JsonConfig; import org.jenkins.ui.icon.IconSpec; import org.jenkinsci.plugins.workflow.job.WorkflowJob; import org.jenkinsci.plugins.workflow.job.WorkflowRun; -import org.kohsuke.stapler.HttpResponse; import org.kohsuke.stapler.StaplerRequest2; import org.kohsuke.stapler.StaplerResponse2; import org.kohsuke.stapler.WebMethod; @@ -62,12 +59,6 @@ public boolean isShowStageDurations() { return PipelineGraphViewConfiguration.get().isShowStageDurations(); } - private static final JsonConfig jsonConfig = new JsonConfig(); - - static { - PipelineRun.PipelineRunJsonProcessor.configure(jsonConfig); - } - @GET @WebMethod(name = "runs") public void getRuns(StaplerRequest2 req, StaplerResponse2 rsp) throws ServletException, IOException { @@ -92,14 +83,13 @@ public void getRuns(StaplerRequest2 req, StaplerResponse2 rsp) throws ServletExc return; } - HttpResponse response = HttpResponses.okJSON(JSONArray.fromObject(pipelineRuns, jsonConfig)); - if (etag != null) { rsp.setHeader("ETag", etag); } rsp.setHeader("Cache-Control", "no-cache"); rsp.setStatus(200); - response.generateResponse(req, rsp, null); + rsp.setContentType("application/json;charset=UTF-8"); + PipelineJsonWriter.write(pipelineRuns, rsp.getOutputStream()); } public String getJobUrl() { diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/multipipelinegraphview/PipelineRun.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/multipipelinegraphview/PipelineRun.java index 71d8f0032..d0a2ceeb1 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/multipipelinegraphview/PipelineRun.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/multipipelinegraphview/PipelineRun.java @@ -2,11 +2,9 @@ import static io.jenkins.plugins.pipelinegraphview.utils.ChangesUtil.getChanges; +import com.fasterxml.jackson.annotation.JsonIgnore; import edu.umd.cs.findbugs.annotations.NonNull; import io.jenkins.plugins.pipelinegraphview.utils.PipelineState; -import net.sf.json.JSONObject; -import net.sf.json.JsonConfig; -import net.sf.json.processors.JsonBeanProcessor; import org.jenkinsci.plugins.workflow.job.WorkflowRun; public class PipelineRun { @@ -20,6 +18,9 @@ public class PipelineRun { private final long timestamp; private final long duration; private final int changesCount; + + // Not part of the wire format — only used server-side via {@link #isBuilding()}. + @JsonIgnore private final boolean building; @NonNull @@ -53,6 +54,7 @@ public PipelineRun(WorkflowRun run) { this.building = building; } + @JsonIgnore public boolean isBuilding() { return building; } @@ -70,27 +72,4 @@ public String etag() { + '|' + this.result.name(); } - - public static class PipelineRunJsonProcessor implements JsonBeanProcessor { - - public static void configure(JsonConfig config) { - config.registerJsonBeanProcessor(PipelineRun.class, new PipelineRunJsonProcessor()); - PipelineState.PipelineStateJsonProcessor.configure(config); - } - - @Override - public JSONObject processBean(Object bean, JsonConfig jsonConfig) { - if (!(bean instanceof PipelineRun run)) { - return null; - } - JSONObject json = new JSONObject(); - json.element("id", run.id); - json.element("displayName", run.displayName); - json.element("timestamp", run.timestamp); - json.element("duration", run.duration); - json.element("changesCount", run.changesCount); - json.element("result", run.result, jsonConfig); - return json; - } - } } diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/AbstractPipelineNode.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/AbstractPipelineNode.java index f3617a263..9126c53be 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/AbstractPipelineNode.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/AbstractPipelineNode.java @@ -2,9 +2,6 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import io.jenkins.plugins.pipelinegraphview.analysis.TimingInfo; -import net.sf.json.JSONObject; -import net.sf.json.JsonConfig; -import net.sf.json.processors.JsonBeanProcessor; public class AbstractPipelineNode { final String name; @@ -13,6 +10,7 @@ public class AbstractPipelineNode { final String title; public final String id; private final long pauseDurationMillis; + // Cached value; the serialised view is the null-aware getTotalDurationMillis() below. @JsonIgnore private final long cachedTotalDurationMillis; @@ -40,24 +38,4 @@ public long getStartTimeMillis() { public Long getTotalDurationMillis() { return state.isInProgress() ? null : cachedTotalDurationMillis; } - - abstract static class AbstractPipelineNodeJsonProcessor implements JsonBeanProcessor { - - protected static void baseConfigure(JsonConfig config) { - config.registerJsonValueProcessor(PipelineState.class, new PipelineState.PipelineStateJsonProcessor()); - } - - protected JSONObject create(AbstractPipelineNode node, JsonConfig config) { - JSONObject json = new JSONObject(); - json.element("id", node.id); - json.element("name", node.name); - json.element("state", node.state, config); - json.element("type", node.type); - json.element("title", node.title); - json.element("pauseDurationMillis", node.pauseDurationMillis); - json.element("startTimeMillis", node.getStartTimeMillis()); - json.element("totalDurationMillis", node.getTotalDurationMillis()); - return json; - } - } } diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraph.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraph.java index 1e5ca995a..42099a97a 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraph.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineGraph.java @@ -1,9 +1,6 @@ package io.jenkins.plugins.pipelinegraphview.utils; import java.util.List; -import net.sf.json.JSONObject; -import net.sf.json.JsonConfig; -import net.sf.json.processors.JsonBeanProcessor; public class PipelineGraph { @@ -14,22 +11,4 @@ public PipelineGraph(List stages, boolean complete) { this.stages = stages; this.complete = complete; } - - public static class PipelineGraphJsonProcessor implements JsonBeanProcessor { - public static void configure(JsonConfig config) { - config.registerJsonBeanProcessor(PipelineGraph.class, new PipelineGraphJsonProcessor()); - PipelineStage.PipelineStageJsonProcessor.configure(config); - } - - @Override - public JSONObject processBean(Object bean, JsonConfig jsonConfig) { - if (!(bean instanceof PipelineGraph graph)) { - return null; - } - JSONObject json = new JSONObject(); - json.element("complete", graph.complete); - json.element("stages", graph.stages, jsonConfig); - return json; - } - } } diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineInputStep.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineInputStep.java index 0178d4fb4..ab6f7d414 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineInputStep.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineInputStep.java @@ -1,27 +1,3 @@ package io.jenkins.plugins.pipelinegraphview.utils; -import net.sf.json.JSONObject; -import net.sf.json.JsonConfig; -import net.sf.json.processors.JsonBeanProcessor; - -public record PipelineInputStep(String message, String cancel, String id, String ok, boolean parameters) { - public static class PipelineInputStepJsonProcessor implements JsonBeanProcessor { - @Override - public JSONObject processBean(Object bean, JsonConfig jsonConfig) { - if (!(bean instanceof PipelineInputStep input)) { - return null; - } - JSONObject json = new JSONObject(); - json.element("message", input.message()); - json.element("cancel", input.cancel()); - json.element("id", input.id()); - json.element("ok", input.ok()); - json.element("parameters", input.parameters()); - return json; - } - - public static void configure(JsonConfig config) { - config.registerJsonBeanProcessor(PipelineInputStep.class, new PipelineInputStepJsonProcessor()); - } - } -} +public record PipelineInputStep(String message, String cancel, String id, String ok, boolean parameters) {} diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineJsonWriter.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineJsonWriter.java new file mode 100644 index 000000000..7f4e4a02e --- /dev/null +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineJsonWriter.java @@ -0,0 +1,30 @@ +package io.jenkins.plugins.pipelinegraphview.utils; + +import com.fasterxml.jackson.annotation.JsonAutoDetect; +import com.fasterxml.jackson.annotation.JsonInclude; +import java.io.OutputStream; +import java.util.Map; +import tools.jackson.databind.ObjectMapper; +import tools.jackson.databind.json.JsonMapper; + +/** + * Writes plugin DTOs to JSON via Jackson, wrapped in the Stapler {@code okJSON} envelope + * ({@code {"status":"ok","data":...}}) that the frontend expects. + * + *

    DTOs carry Jackson annotations to control the wire format — null values are omitted + * (matching the historical bean-processor output) and fields are read regardless of visibility + * so DTOs can keep package-private fields. + */ +public final class PipelineJsonWriter { + + private static final ObjectMapper MAPPER = JsonMapper.builder() + .changeDefaultPropertyInclusion(inc -> inc.withValueInclusion(JsonInclude.Include.NON_NULL)) + .changeDefaultVisibility(v -> v.withFieldVisibility(JsonAutoDetect.Visibility.ANY)) + .build(); + + private PipelineJsonWriter() {} + + public static void write(Object data, OutputStream out) { + MAPPER.writeValue(out, Map.of("status", "ok", "data", data)); + } +} diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStage.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStage.java index ad0c4669d..e682712e4 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStage.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStage.java @@ -2,17 +2,20 @@ import static io.jenkins.plugins.pipelinegraphview.consoleview.PipelineConsoleViewAction.URL_NAME; +import com.fasterxml.jackson.annotation.JsonProperty; import io.jenkins.plugins.pipelinegraphview.analysis.TimingInfo; import java.util.List; -import net.sf.json.JSONObject; -import net.sf.json.JsonConfig; public class PipelineStage extends AbstractPipelineNode { final List children; private final String seqContainerName; private final PipelineStage nextSibling; + + // The legacy wire format spells this {@code isSequential}, not {@code sequential}. + @JsonProperty("isSequential") private final boolean sequential; + final boolean synthetic; private final boolean placeholder; final String agent; @@ -44,27 +47,4 @@ public PipelineStage( this.url = "/" + runUrl + URL_NAME + "/?selected-node=" + id; } - public static class PipelineStageJsonProcessor extends AbstractPipelineNodeJsonProcessor { - public static void configure(JsonConfig config) { - baseConfigure(config); - config.registerJsonBeanProcessor(PipelineStage.class, new PipelineStageJsonProcessor()); - } - - @Override - public JSONObject processBean(Object bean, JsonConfig config) { - if (!(bean instanceof PipelineStage stage)) { - return null; - } - JSONObject json = create(stage, config); - json.element("children", stage.children, config); - json.element("seqContainerName", stage.seqContainerName); - json.element("nextSibling", stage.nextSibling, config); - json.element("isSequential", stage.sequential); - json.element("synthetic", stage.synthetic); - json.element("placeholder", stage.placeholder); - json.element("agent", stage.agent); - json.element("url", stage.url); - return json; - } - } } diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineState.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineState.java index 450880e64..fc8b13a9c 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineState.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineState.java @@ -3,8 +3,6 @@ import com.fasterxml.jackson.annotation.JsonValue; import hudson.model.Result; import java.util.Locale; -import net.sf.json.JsonConfig; -import net.sf.json.processors.JsonValueProcessor; import org.jenkinsci.plugins.workflow.job.WorkflowRun; public enum PipelineState { @@ -71,24 +69,4 @@ public static PipelineState of(NodeRunStatus status) { public String toString() { return name().toLowerCase(Locale.ROOT); } - - public static class PipelineStateJsonProcessor implements JsonValueProcessor { - public static void configure(JsonConfig config) { - config.registerJsonValueProcessor(PipelineState.class, new PipelineStateJsonProcessor()); - } - - @Override - public Object processArrayValue(Object value, JsonConfig jsonConfig) { - return json(value); - } - - @Override - public Object processObjectValue(String key, Object value, JsonConfig jsonConfig) { - return json(value); - } - - private static String json(Object value) { - return !(value instanceof PipelineState state) ? null : state.toString(); - } - } } diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStep.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStep.java index 9e7146592..04274a2b1 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStep.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStep.java @@ -2,8 +2,6 @@ import io.jenkins.plugins.pipelinegraphview.analysis.TimingInfo; import java.util.Map; -import net.sf.json.JSONObject; -import net.sf.json.JsonConfig; public class PipelineStep extends AbstractPipelineNode { final String stageId; @@ -29,28 +27,4 @@ public PipelineStep( public Map getFlags() { return flags; } - - public static class PipelineStepJsonProcessor extends AbstractPipelineNodeJsonProcessor { - - public static void configure(JsonConfig config) { - baseConfigure(config); - config.registerJsonBeanProcessor(PipelineStep.class, new PipelineStepJsonProcessor()); - PipelineInputStep.PipelineInputStepJsonProcessor.configure(config); - } - - @Override - public JSONObject processBean(Object bean, JsonConfig jsonConfig) { - if (!(bean instanceof PipelineStep step)) { - return null; - } - JSONObject json = create(step, jsonConfig); - - json.element("stageId", step.stageId); - if (step.inputStep != null) { - json.element("inputStep", step.inputStep, jsonConfig); - } - json.element("flags", step.flags); - return json; - } - } } diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepList.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepList.java index ebb03bf4d..e239e0741 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepList.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepList.java @@ -2,9 +2,6 @@ import java.util.ArrayList; import java.util.List; -import net.sf.json.JSONObject; -import net.sf.json.JsonConfig; -import net.sf.json.processors.JsonBeanProcessor; public class PipelineStepList { @@ -34,23 +31,4 @@ public void sort() { public void addAll(List steps) { this.steps.addAll(steps); } - - public static class PipelineStepListJsonProcessor implements JsonBeanProcessor { - - public static void configure(JsonConfig config) { - config.registerJsonBeanProcessor(PipelineStepList.class, new PipelineStepListJsonProcessor()); - PipelineStep.PipelineStepJsonProcessor.configure(config); - } - - @Override - public JSONObject processBean(Object bean, JsonConfig jsonConfig) { - if (!(bean instanceof PipelineStepList stepList)) { - return null; - } - JSONObject json = new JSONObject(); - json.element("steps", stepList.steps, jsonConfig); - json.element("runIsComplete", stepList.runIsComplete, jsonConfig); - return json; - } - } } diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriter.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriter.java deleted file mode 100644 index 962e97421..000000000 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriter.java +++ /dev/null @@ -1,27 +0,0 @@ -package io.jenkins.plugins.pipelinegraphview.utils; - -import com.fasterxml.jackson.annotation.JsonAutoDetect; -import com.fasterxml.jackson.annotation.JsonInclude; -import java.io.OutputStream; -import tools.jackson.databind.ObjectMapper; -import tools.jackson.databind.json.JsonMapper; - -/** - * Serialises {@link PipelineStepList} to JSON via Jackson. The DTO classes carry Jackson - * annotations so the wire format matches {@link PipelineStepList.PipelineStepListJsonProcessor} - * used by the net.sf.json path — notably, fields whose value is {@code null} are omitted rather - * than written as {@code null}. - */ -public final class PipelineStepListJsonWriter { - - private static final ObjectMapper MAPPER = JsonMapper.builder() - .changeDefaultPropertyInclusion(inc -> inc.withValueInclusion(JsonInclude.Include.NON_NULL)) - .changeDefaultVisibility(v -> v.withFieldVisibility(JsonAutoDetect.Visibility.ANY)) - .build(); - - private PipelineStepListJsonWriter() {} - - public static void write(PipelineStepList list, OutputStream out) { - MAPPER.writeValue(out, list); - } -} diff --git a/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineJsonWriterTest.java b/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineJsonWriterTest.java new file mode 100644 index 000000000..fe7368546 --- /dev/null +++ b/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineJsonWriterTest.java @@ -0,0 +1,133 @@ +package io.jenkins.plugins.pipelinegraphview.utils; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +import io.jenkins.plugins.pipelinegraphview.analysis.TimingInfo; +import java.io.ByteArrayOutputStream; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import net.sf.json.JSONArray; +import net.sf.json.JSONObject; +import org.junit.jupiter.api.Test; + +/** + * Pins the {@code /allSteps} wire format. The frontend depends on this shape; if you're + * changing something here you're changing a public HTTP contract. + */ +class PipelineJsonWriterTest { + + @Test + void stepListSerializesToExpectedShape() throws Exception { + Map hiddenFlag = new LinkedHashMap<>(); + hiddenFlag.put("hidden", Boolean.TRUE); + + PipelineStep running = new PipelineStep( + "3", + "echo hello", + PipelineState.RUNNING, + "STEP", + "Print Message", + "2", + null, + new TimingInfo(0, 0, 1_700_000_000_000L), + Map.of()); + PipelineStep completed = new PipelineStep( + "4", + "sh build.sh", + PipelineState.SUCCESS, + "STEP", + "Shell Script", + "2", + null, + new TimingInfo(1500, 0, 1_700_000_000_500L), + hiddenFlag); + PipelineStep withInput = new PipelineStep( + "5", + "Deploy?", + PipelineState.PAUSED, + "STEP", + "", + "2", + new PipelineInputStep("Deploy to prod?", "Cancel", "input-1", "Proceed", true), + new TimingInfo(0, 200, 1_700_000_001_000L), + Map.of()); + + JSONObject json = serialize(new PipelineStepList(List.of(running, completed, withInput), false)); + assertThat(json.getBoolean("runIsComplete"), is(false)); + + JSONArray steps = json.getJSONArray("steps"); + assertThat(steps.size(), is(3)); + + JSONObject first = steps.getJSONObject(0); + assertThat(first.getString("id"), is("3")); + assertThat(first.getString("state"), is("running")); + assertThat(first.getString("stageId"), is("2")); + // totalDurationMillis omitted for in-progress steps. + assertThat(first.has("totalDurationMillis"), is(false)); + assertThat(first.has("inputStep"), is(false)); + assertThat(first.getJSONObject("flags").isEmpty(), is(true)); + + JSONObject second = steps.getJSONObject(1); + assertThat(second.getLong("totalDurationMillis"), is(1500L)); + assertThat(second.getJSONObject("flags").getBoolean("hidden"), is(true)); + + JSONObject third = steps.getJSONObject(2); + assertThat(third.getString("state"), is("paused")); + JSONObject input = third.getJSONObject("inputStep"); + assertThat(input.getString("message"), is("Deploy to prod?")); + assertThat(input.getBoolean("parameters"), is(true)); + } + + @Test + void emptyStepListSerializesToExpectedShape() throws Exception { + JSONObject json = serialize(new PipelineStepList(true)); + assertThat(json.getBoolean("runIsComplete"), is(true)); + assertThat(json.getJSONArray("steps").isEmpty(), is(true)); + } + + @Test + void stageSerializesIsSequentialKey() throws Exception { + PipelineStage stage = new PipelineStage( + "7", + "Build", + List.of(), + PipelineState.SUCCESS, + "STAGE", + "", + null, + null, + true, + false, + false, + new TimingInfo(500, 0, 1_700_000_002_000L), + "built-in", + "job/example/1/"); + + PipelineGraph graph = new PipelineGraph(List.of(stage), true); + JSONObject json = serialize(graph); + assertThat(json.getBoolean("complete"), is(true)); + JSONObject s = json.getJSONArray("stages").getJSONObject(0); + assertThat(s, is(notNullValue())); + assertThat(s.getBoolean("isSequential"), is(true)); + assertThat(s.has("sequential"), is(false)); + assertThat(s.has("nextSibling"), is(false)); + assertThat(s.has("seqContainerName"), is(false)); + assertThat(s.getString("agent"), is("built-in")); + } + + /** + * Returns the {@code data} payload from the Stapler envelope that {@link PipelineJsonWriter} + * emits. Tests elsewhere assert the envelope ({@code status}/{@code data}) shape. + */ + private static JSONObject serialize(Object value) throws Exception { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + PipelineJsonWriter.write(value, baos); + JSONObject envelope = JSONObject.fromObject(baos.toString("UTF-8")); + assertThat(envelope.getString("status"), is("ok")); + return envelope.getJSONObject("data"); + } +} diff --git a/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStateTest.java b/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStateTest.java index 8685fb59e..973312889 100644 --- a/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStateTest.java +++ b/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStateTest.java @@ -3,10 +3,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import java.util.stream.Stream; -import net.sf.json.JSONArray; -import net.sf.json.JSONObject; -import net.sf.json.JsonConfig; -import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.CsvSource; @@ -53,12 +49,6 @@ private static Stream whenNotFinished() { Arguments.arguments(BlueRun.BlueRunState.NOT_BUILT, PipelineState.NOT_BUILT)); } - private static final JsonConfig CONFIG = new JsonConfig(); - - static { - PipelineState.PipelineStateJsonProcessor.configure(CONFIG); - } - @ParameterizedTest @CsvSource({ "QUEUED, queued", @@ -73,32 +63,7 @@ private static Stream whenNotFinished() { "UNKNOWN, unknown", "ABORTED, aborted" }) - void objectSerialization(String input, String expected) { - PipelineState status = PipelineState.valueOf(input); - - String serialized = JSONObject.fromObject(new TestBean(status), CONFIG).toString(); - - assertEquals("{\"state\":\"%s\"}".formatted(expected), serialized); - } - - @Test - void arrayValueSerialization() { - PipelineState[] states = {PipelineState.RUNNING, PipelineState.SUCCESS}; - - String serialized = JSONArray.fromObject(states, CONFIG).toString(); - - assertEquals("[\"running\",\"success\"]", serialized); - } - - public static class TestBean { - private final PipelineState state; - - public TestBean(PipelineState state) { - this.state = state; - } - - public PipelineState getState() { - return state; - } + void toStringReturnsLowercaseName(String input, String expected) { + assertEquals(expected, PipelineState.valueOf(input).toString()); } } diff --git a/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriterTest.java b/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriterTest.java deleted file mode 100644 index 60ac0b924..000000000 --- a/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStepListJsonWriterTest.java +++ /dev/null @@ -1,109 +0,0 @@ -package io.jenkins.plugins.pipelinegraphview.utils; - -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.equalTo; - -import io.jenkins.plugins.pipelinegraphview.analysis.TimingInfo; -import java.io.ByteArrayOutputStream; -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; -import net.sf.json.JSONObject; -import net.sf.json.JsonConfig; -import org.junit.jupiter.api.Test; - -/** - * Pins the Jackson writer against the legacy net.sf.json bean-processor output so a - * wire-format regression fails this test rather than the frontend. - */ -class PipelineStepListJsonWriterTest { - - @Test - void matchesLegacyOutputForRunningAndCompletedSteps() throws Exception { - assertMatches(build()); - } - - @Test - void matchesLegacyOutputForEmptyList() throws Exception { - assertMatches(new PipelineStepList(true)); - } - - private static void assertMatches(PipelineStepList list) throws Exception { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - PipelineStepListJsonWriter.write(list, baos); - String jacksonOutput = baos.toString("UTF-8"); - - JsonConfig config = new JsonConfig(); - PipelineStepList.PipelineStepListJsonProcessor.configure(config); - String legacyOutput = JSONObject.fromObject(list, config).toString(); - - // Compare as sorted maps so a difference in key order (which isn't part of the wire - // contract) doesn't fail the test — only structural / value differences matter. - assertThat(canonicalize(jacksonOutput), equalTo(canonicalize(legacyOutput))); - } - - @SuppressWarnings("unchecked") - private static Object canonicalize(String json) { - Object parsed = JSONObject.fromObject(json); - return canonicalize(parsed); - } - - @SuppressWarnings("unchecked") - private static Object canonicalize(Object node) { - if (node instanceof Map map) { - Map sorted = new TreeMap<>(); - map.forEach((k, v) -> sorted.put(String.valueOf(k), canonicalize(v))); - return sorted; - } - if (node instanceof List list) { - List out = new ArrayList<>(list.size()); - for (Object v : list) { - out.add(canonicalize(v)); - } - return out; - } - return node; - } - - private static PipelineStepList build() { - Map hiddenFlag = new LinkedHashMap<>(); - hiddenFlag.put("hidden", Boolean.TRUE); - - PipelineStep running = new PipelineStep( - "3", - "echo hello", - PipelineState.RUNNING, - "STEP", - "Print Message", - "2", - null, - new TimingInfo(0, 0, 1_700_000_000_000L), - Map.of()); - - PipelineStep completed = new PipelineStep( - "4", - "sh build.sh", - PipelineState.SUCCESS, - "STEP", - "Shell Script", - "2", - null, - new TimingInfo(1500, 0, 1_700_000_000_500L), - hiddenFlag); - - PipelineStep withInput = new PipelineStep( - "5", - "Deploy?", - PipelineState.PAUSED, - "STEP", - "", - "2", - new PipelineInputStep("Deploy to prod?", "Cancel", "input-1", "Proceed", true), - new TimingInfo(0, 200, 1_700_000_001_000L), - Map.of()); - - return new PipelineStepList(List.of(running, completed, withInput), false); - } -} From f05db98da7ed5d89712c54cacc4259c36e7a6dfc Mon Sep 17 00:00:00 2001 From: Tim Jacomb Date: Thu, 23 Apr 2026 14:24:15 +0100 Subject: [PATCH 25/26] Spotless --- .../treescanner/PipelineNodeTreeScanner.java | 6 +++--- .../plugins/pipelinegraphview/utils/PipelineStage.java | 1 - .../pipelinegraphview/utils/PipelineJsonWriterTest.java | 1 - 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java index f414173d8..7ebbd080a 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/treescanner/PipelineNodeTreeScanner.java @@ -109,8 +109,7 @@ private void buildFrom(Collection nodes, @CheckForNull Map> getAllSteps() { continue; } if (parents.size() == 1) { - List bucket = stageNodeStepMap.get(parents.get(0).getId()); + List bucket = + stageNodeStepMap.get(parents.get(0).getId()); if (bucket != null) { bucket.add(step); } diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStage.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStage.java index e682712e4..a4a508ee8 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStage.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineStage.java @@ -46,5 +46,4 @@ public PipelineStage( this.agent = agent; this.url = "/" + runUrl + URL_NAME + "/?selected-node=" + id; } - } diff --git a/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineJsonWriterTest.java b/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineJsonWriterTest.java index fe7368546..bed526e7c 100644 --- a/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineJsonWriterTest.java +++ b/src/test/java/io/jenkins/plugins/pipelinegraphview/utils/PipelineJsonWriterTest.java @@ -1,7 +1,6 @@ package io.jenkins.plugins.pipelinegraphview.utils; import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; From a93df1f8f09dac77dc04cab280d023884fefb587 Mon Sep 17 00:00:00 2001 From: Tim Jacomb Date: Thu, 23 Apr 2026 14:36:18 +0100 Subject: [PATCH 26/26] Address review feedback --- .../livestate/LiveGraphState.java | 33 ++++++++++++------- 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java index 8a5da4963..d2c661f58 100644 --- a/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java +++ b/src/main/java/io/jenkins/plugins/pipelinegraphview/livestate/LiveGraphState.java @@ -4,11 +4,12 @@ import io.jenkins.plugins.pipelinegraphview.utils.PipelineGraph; import io.jenkins.plugins.pipelinegraphview.utils.PipelineStepList; import java.util.ArrayList; -import java.util.HashMap; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import org.jenkinsci.plugins.workflow.actions.WorkspaceAction; import org.jenkinsci.plugins.workflow.cps.nodes.StepStartNode; import org.jenkinsci.plugins.workflow.graph.FlowNode; @@ -16,8 +17,10 @@ /** * Per-run mutable state built up by {@link LiveGraphPopulator} as {@code GraphListener} - * events arrive. Reads and writes are serialised on the instance monitor; holders should - * snapshot and release quickly — the writer is the CPS VM thread and must not block. + * events arrive. Writes run on the CPS VM thread and must not block, so the instance monitor + * is only held around the append to the nodes list + version bump; the ancestry map and + * hideFromView set are {@link ConcurrentHashMap}-backed so readers can publish them without + * copying. */ final class LiveGraphState { @@ -25,12 +28,13 @@ final class LiveGraphState { private final Set seenIds = new HashSet<>(); // Node ID → its enclosing-block IDs (innermost first). Populated at add time on the CPS // VM thread so HTTP graph builds don't contend with the storage write lock to resolve - // ancestry. - private final Map> enclosingIdsByNodeId = new HashMap<>(); + // ancestry. ConcurrentHashMap so {@link #snapshot} can publish the live map (wrapped + // unmodifiable) without copying — readers iterate weakly-consistent views. + private final Map> enclosingIdsByNodeId = new ConcurrentHashMap<>(); // IDs of BlockStartNodes that wrap a {@code hideFromView} step. Lets HTTP readers decide // whether a step is hidden by intersecting with the step's enclosing IDs, avoiding a // per-step {@code iterateEnclosingBlocks} walk through storage. - private final Set hideFromViewBlockStartIds = new HashSet<>(); + private final Set hideFromViewBlockStartIds = ConcurrentHashMap.newKeySet(); private long version = 0; private volatile boolean poisoned = false; @@ -94,17 +98,17 @@ synchronized Long currentVersion() { } LiveGraphSnapshot snapshot() { + // Only the nodes list needs to be copied — enclosingIds and hideFromView are on + // concurrent structures we can publish by reference. Keeping the monitor-held + // section down to a single array copy means addNode (on the CPS VM thread) almost + // never blocks on a snapshot. List nodesCopy; - Map> enclosingCopy; - Set hideFromViewCopy; long v; synchronized (this) { if (poisoned || !ready) { return null; } - nodesCopy = List.copyOf(nodes); - enclosingCopy = Map.copyOf(enclosingIdsByNodeId); - hideFromViewCopy = Set.copyOf(hideFromViewBlockStartIds); + nodesCopy = new ArrayList<>(nodes); v = version; } // Scan for WorkspaceAction outside the monitor: each getAction call walks the node's @@ -119,7 +123,12 @@ LiveGraphSnapshot snapshot() { workspaceNodes.add(n); } } - return new LiveGraphSnapshot(nodesCopy, List.copyOf(workspaceNodes), enclosingCopy, hideFromViewCopy, v); + return new LiveGraphSnapshot( + Collections.unmodifiableList(nodesCopy), + Collections.unmodifiableList(workspaceNodes), + Collections.unmodifiableMap(enclosingIdsByNodeId), + Collections.unmodifiableSet(hideFromViewBlockStartIds), + v); } /**