-
Notifications
You must be signed in to change notification settings - Fork 12
Expand file tree
/
Copy pathexample-expb.yaml
More file actions
133 lines (126 loc) · 5.67 KB
/
example-expb.yaml
File metadata and controls
133 lines (126 loc) · 5.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
# Pull images before execution
pull_images: true
# Optional: Cap CPU frequency to prevent turbo boost during benchmarks.
# Set to your CPU's nominal/base frequency in kHz (e.g., 3800000 for 3.8 GHz).
# Useful when the turbo boost sysfs toggle is not available (e.g., amd-pstate-epp driver).
# Original frequencies are restored after each scenario.
# cpu_max_frequency_khz: 3800000
# HT/SMT siblings of cpuset and infra_cpuset cores are auto-detected and offlined
# during benchmarks for cache isolation. Override with explicit list if needed:
# offline_cpus: [9, 10, 11, 12, 13, 14, 15]
# Docker images to use (pin to specific versions for reproducible benchmarks)
images:
k6: grafana/k6:1.6.1
alloy: grafana/alloy:v1.13.2
payload_server: python:3.12-slim
# Paths for the payloads jsonl file, work directory, and outputs directory
paths:
work: ./work
outputs: ./outputs
# Optional: Export options for scenarios metrics
export:
# Prometheus remote write
prometheus_remote_write:
# Prometheus remote write endpoint
endpoint: http://localhost:9090/api/v1/write
# Optional: Basic auth for the remote write endpoint
basic_auth:
username: expb
password: expb
# Optional: Tags to add to the metrics
tags:
- key1=value1
- key2=value2
# Grafana Pyroscope
pyroscope:
# Pyroscope endpoint
endpoint: http://localhost:4040
# Optional: Basic auth for the remote write endpoint
basic_auth:
username: expb
password: expb
# Optional: Tags to add to the metrics
tags:
- key1=value1
- key2=value2
# Resources for the docker container
resources:
cpu: 4
mem: 32g
download_speed: 50mbit
upload_speed: 15mbit
# Optional: Pin execution client container to specific CPU cores (Docker --cpuset-cpus)
# Eliminates CPU cache thrashing from scheduler migration. Strongly recommended for stable benchmarks.
# Use `lscpu -e=CPU,CORE,SOCKET` to verify your topology. Ensure pinned cores don't share
# physical cores with infra_cpuset. Avoid cores 0-1 as they typically handle hardware interrupts.
# Example for 8-physical/16-logical CPU: cores 2-5 + their HT siblings 10-13 = 8 threads on 4 dedicated physical cores.
cpuset: "2-5,10-13"
# Optional: Pin infrastructure containers (K6, Alloy, payload server) to separate CPU cores
# Prevents infra from stealing cycles from the execution client under test.
# Should use different physical cores than cpuset to avoid contention.
infra_cpuset: "0-1,8-9"
# Optional: Memory swappiness for the execution client container (0-100). Defaults to host setting.
# Low values preserve heap memory but aggressively evict page cache, which can hurt I/O-heavy workloads.
# Only set this if you understand your workload's memory profile.
# mem_swappiness: 10
# Scenarios to execute
scenarios:
# Scenario name
example:
# Required: Client type
client: nethermind # Available clients: nethermind, besu, erigon, geth, reth
# Required: Snapshot source for the selected client and network (either a path or zfs snapshot name)
snapshot_source: ./snapthots/nethermind
# Required: Path to the new payloads requests.
payloads: ./payloads.jsonl # Each line is a payload request body
# Required: Path to the new forkchoice update requests.
fcus: ./fcus.jsonl # Each line is a forkchoice update request body
# Optional: Network to use for the scenario. Defaults to mainnet
network: mainnet
# Optional: Snapshot backend to use. Available snapshot backends: overlay, zfs, copy. Defaults to overlay
snapshot_backend: overlay
# Optional: Snapshot path for copy backend. If defined, this path will be used instead of work_dir/snapshot. Only used when snapshot_backend is copy
snapshot_path: ./work_dir/snapshot
# Optional: Number of times to repeat the scenario. Defaults to 1.
repeat: 1
# Required: Number of payloads to run
amount: 100
# Optional: Delay between payloads requests in seconds. Defaults to 0 seconds.
delay: 0
# Optional: Delay between warmup payloads requests in seconds. If not defined, uses the general delay.
warmup_delay: 0
# Optional: Duration of the scenario. Defaults to 10 minutes.
duration: 10m
# Optional: Duration of the warmup (k6 setup duration). Defaults to 10 minutes.
warmup_duration: 10m
# Optional: Number of payloads to skip from the payloads file
skip: 0
# Optional: Number of payloads to execute as warmup(no metrics will be collected for those)
warmup: 0
# Optional: Wait time for client startup in seconds. Defaults to 30 seconds.
startup_wait: 30
# Optional: Wait time between warmup and benchmark requests in seconds. Defaults to 0 seconds.
warmup_wait: 0
# Optional: Client image
image: ethpandaops/nethermind:performance
# Optional: Client extra flags
extra_flags:
- --flag1=value1
# Optional: Extra environment variables
extra_env:
KEY1: VALUE1
# Optional: Extra volumes to mount into the docker container
extra_volumes:
# Volume name
volume1:
# Volume bind path inside the container
bind: /extra_volume1
# Optional: Host path on the host, if not provided the test output directory will be used as base path plus the volume name
source: ./extra_volume1
# Optional: Volume mode
mode: rw
# Optional: Commands to run in the docker container during the test execution
# These will be executed using `docker exec` after the execution client is started and stopped once the test is done
# Each command will be executed in a separate thread which can affect main process performance when too many commands are executed
extra_commands:
- ./extra_command1