Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -25,3 +25,6 @@ src/main/webapp/js/bundles

# macOS
.DS_Store

results.txt

57 changes: 57 additions & 0 deletions docs/examples/300k-nodes.jenkinsfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
// Stress-test pipeline that produces roughly 300,000 FlowNodes across three
// bulk parallel phases, with short sleep windows between them so you can poll
// the REST API and watch latency as the graph grows.
//
// Give Jenkins at least 2 GB heap before running — each FlowNode retains a few
// KB, and GC thrash will muddy any measurement you take from it.
//
// Knobs (multiplicative):
// BRANCHES - parallel branches per phase (default 50)
// STAGES_PER_SECTION - sequential stages inside each branch per phase (default 20)
// STEPS_PER_STAGE - echo steps per stage (default 100)
// Defaults yield 3 * 50 * 20 * 100 = 300,000 echo FlowNodes, plus ~6,000
// stage/parallel wrapping nodes.
//
// Runtime is dominated by CPS step-evaluation (a few ms per echo), so the
// defaults take 30–45 minutes on a typical controller. Drop STEPS_PER_STAGE
// for faster iteration while keeping the structural shape.

BRANCHES = 50
STAGES_PER_SECTION = 20
STEPS_PER_STAGE = 100

def bulkSection(int branches, int stagesPerBranch, int stepsPerStage, String label) {
def work = [:]
for (int i = 0; i < branches; i++) {
def idx = i
work["${label}-b${idx}"] = {
for (int j = 0; j < stagesPerBranch; j++) {
stage("${label}-b${idx}-s${j}") {
for (int k = 0; k < stepsPerStage; k++) {
echo "."
}
}
}
}
}
parallel work
}

node {
stage('Warmup') {
echo "BRANCHES=${BRANCHES} STAGES_PER_SECTION=${STAGES_PER_SECTION} STEPS_PER_STAGE=${STEPS_PER_STAGE}"
echo "Expected echo nodes: ~${3 * BRANCHES * STAGES_PER_SECTION * STEPS_PER_STAGE}"
}

stage('Settle (tiny graph)') { sleep 30 }
stage('Bulk 1/3') { bulkSection(BRANCHES, STAGES_PER_SECTION, STEPS_PER_STAGE, 's1') }

stage('Settle (~100k nodes)') { sleep 30 }
stage('Bulk 2/3') { bulkSection(BRANCHES, STAGES_PER_SECTION, STEPS_PER_STAGE, 's2') }

stage('Settle (~200k nodes)') { sleep 30 }
stage('Bulk 3/3') { bulkSection(BRANCHES, STAGES_PER_SECTION, STEPS_PER_STAGE, 's3') }

stage('Settle (~300k nodes, still building)') { sleep 30 }
stage('Settle (completed)') { sleep 60 }
}
86 changes: 86 additions & 0 deletions docs/examples/medium-pipeline.jenkinsfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
// A medium-sized pipeline exercising sequential stages, flat parallel, nested
// parallel, and wide branch counts. Useful when you want to see the Pipeline
// Overview render a non-trivial graph without waiting on a stress test.
//
// Shape: ~70 user-visible stages, a few hundred FlowNodes. Runs in a minute or
// two. Tune the sleep values and branch counts for bigger or smaller graphs.

def runStage(String name, int sleepSeconds) {
stage(name) {
echo "enter: ${name}"
sleep sleepSeconds
writeFile file: "out/${name.replaceAll('[^A-Za-z0-9]', '_')}.txt", text: name
echo "exit: ${name}"
}
}

node {
runStage('Checkout', 2)
runStage('Install dependencies', 3)
runStage('Lint', 2)
runStage('Format check', 2)
runStage('Static analysis', 2)

stage('Build matrix') {
def matrix = [:]
['linux', 'macos', 'windows', 'freebsd', 'illumos', 'aix'].each { os ->
matrix["Build ${os}"] = {
runStage("${os}: fetch", 1)
runStage("${os}: compile", 3)
runStage("${os}: unit tests", 2)
runStage("${os}: package", 1)
}
}
parallel matrix
}

stage('Integration') {
parallel(
'api': {
parallel(
'api: smoke': { runStage('api smoke', 2) },
'api: contract': { runStage('api contract', 3) },
'api: regression': { runStage('api regression', 4) },
)
},
'browser': {
parallel(
'browser: chrome': { runStage('chrome', 3) },
'browser: firefox': { runStage('firefox', 3) },
'browser: safari': { runStage('safari', 3) },
'browser: edge': { runStage('edge', 3) },
)
},
'perf': {
runStage('perf: baseline', 2)
runStage('perf: load', 4)
runStage('perf: stress', 5)
},
'security': {
runStage('dep audit', 2)
runStage('sast', 3)
runStage('dast', 4)
},
)
}

stage('Publish artifacts') {
def publishers = [:]
// C-style loop: (1..8).each would push an IntRange through a parallel
// closure, which is not CPS-serialisable and fails at checkpoint.
for (int i = 1; i <= 8; i++) {
def idx = i
publishers["Publish region ${idx}"] = {
runStage("upload region ${idx}", 1)
runStage("verify region ${idx}", 1)
}
}
parallel publishers
}

runStage('Release notes', 1)
runStage('Changelog', 2)
runStage('Tag', 2)
runStage('Announce', 1)
runStage('Cleanup', 1)
}
158 changes: 158 additions & 0 deletions docs/examples/perf-observer.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
#!/usr/bin/env bash
#
# perf-observer.sh — polls /tree and /allSteps for a running build and prints a
# latency summary at the end. Use it to compare two configurations (e.g. before
# vs. after a change) by running it once per build and diffing the two summaries.
#
# Usage:
# ./perf-observer.sh JENKINS_URL JOB_NAME BUILD_NUMBER [LABEL]
#
# Optional env vars:
# JENKINS_AUTH=user:token pass-through to curl --user
# POLL_INTERVAL=3 seconds between poll rounds (default 3)
#
# Example:
#
# ./perf-observer.sh http://localhost:8080/jenkins perf 1 baseline
# ./perf-observer.sh http://localhost:8080/jenkins perf 2 after-change
#
# Raw samples are written to /tmp/pgv-perf-<LABEL>.csv for later analysis.

set -euo pipefail

if [[ $# -lt 3 ]]; then
echo "Usage: $0 JENKINS_URL JOB_NAME BUILD_NUMBER [LABEL]" >&2
exit 2
fi

JENKINS_URL="${1%/}"
JOB_NAME="$2"
BUILD_NUMBER="$3"
LABEL="${4:-run}"
POLL_INTERVAL="${POLL_INTERVAL:-3}"
CSV="/tmp/pgv-perf-${LABEL}.csv"

BASE="${JENKINS_URL}/job/${JOB_NAME}/${BUILD_NUMBER}/stages"
API_URL="${JENKINS_URL}/job/${JOB_NAME}/${BUILD_NUMBER}/api/json?tree=result,inProgress"

AUTH=()
if [[ -n "${JENKINS_AUTH:-}" ]]; then
AUTH=(--user "$JENKINS_AUTH")
fi

echo "Observing: $BASE"
echo "Label: $LABEL"
echo "CSV: $CSV"
echo "Interval: ${POLL_INTERVAL}s"
echo

echo "sample_ts,endpoint,http_code,time_ms,bytes" > "$CSV"

measure() {
local endpoint="$1"
local url="$BASE/$endpoint"
local out
out=$(curl -so /dev/null --compressed -w '%{http_code},%{time_total},%{size_download}' \
"${AUTH[@]}" "$url" 2>/dev/null || echo "000,0,0")
local http_code="${out%%,*}"
local rest="${out#*,}"
local time_s="${rest%%,*}"
local bytes="${rest#*,}"
local time_ms
time_ms=$(awk -v t="$time_s" 'BEGIN { printf "%.1f", t * 1000 }')
local ts
ts=$(date +%s)
echo "$ts,$endpoint,$http_code,$time_ms,$bytes" >> "$CSV"
printf ' %s %-10s %s %8sms %8s bytes\n' \
"$(date +%H:%M:%S)" "$endpoint" "$http_code" "$time_ms" "$bytes"
}

build_state() {
curl -s "${AUTH[@]}" "$API_URL" 2>/dev/null || echo ""
}

is_running() {
local resp
resp=$(build_state)
[[ "$resp" == *'"inProgress":true'* ]]
}

echo "Waiting for build to enter in-progress state..."
for _ in $(seq 1 30); do
if is_running; then break; fi
sleep 2
done

if ! is_running; then
state=$(build_state)
if [[ -z "$state" ]]; then
echo "ERROR: could not reach $API_URL" >&2
exit 1
fi
echo "NOTE: build is not in-progress (state: $state). Taking a few post-completion samples only."
fi

echo
echo "Polling while build runs..."
echo

while is_running; do
measure tree
measure allSteps
sleep "$POLL_INTERVAL"
done

echo
echo "Build complete. Taking three post-completion samples..."
for _ in 1 2 3; do
measure tree
measure allSteps
sleep 1
done

echo
echo "================ Summary: '$LABEL' ================"
python3 - "$CSV" <<'PY'
import csv, sys

csv_path = sys.argv[1]
samples = {}
warnings = {}

with open(csv_path) as f:
for row in csv.DictReader(f):
ep = row["endpoint"]
code = row["http_code"]
try:
t = float(row["time_ms"])
except ValueError:
continue
if code != "200":
warnings[(ep, code)] = warnings.get((ep, code), 0) + 1
continue
samples.setdefault(ep, []).append(t)

def pct(sorted_vals, p):
if not sorted_vals:
return 0.0
idx = int(round((p / 100.0) * (len(sorted_vals) - 1)))
idx = max(0, min(len(sorted_vals) - 1, idx))
return sorted_vals[idx]

for ep in sorted(samples):
vals = sorted(samples[ep])
n = len(vals)
print(f" {ep:10s} n={n:4d} "
f"min={vals[0]:8.1f}ms "
f"median={pct(vals, 50):8.1f}ms "
f"p95={pct(vals, 95):8.1f}ms "
f"max={vals[-1]:8.1f}ms")

if warnings:
print()
for (ep, code), count in sorted(warnings.items()):
print(f" {ep:10s} WARN {count}x non-200 ({code})")
PY
echo "====================================================="
echo
echo "Raw samples: $CSV"
4 changes: 4 additions & 0 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,10 @@
<groupId>io.jenkins.plugins</groupId>
<artifactId>ionicons-api</artifactId>
</dependency>
<dependency>
<groupId>io.jenkins.plugins</groupId>
<artifactId>jackson3-api</artifactId>
</dependency>
<dependency>
<groupId>org.jenkins-ci.plugins</groupId>
<artifactId>display-url-api</artifactId>
Expand Down
Loading
Loading