Skip to content

Commit f5752b0

Browse files
author
rodrigo.nogueira
committed
refactor(benchmark): use factory functions for loop isolation
Each benchmark test now creates a fresh alru_cache instance via factory functions instead of reusing global decorated functions. This ensures each test runs with its own cache bound to the test's event loop, preventing RuntimeError from the new event loop affinity check.
1 parent 54890fd commit f5752b0

1 file changed

Lines changed: 79 additions & 52 deletions

File tree

benchmark.py

Lines changed: 79 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -38,54 +38,78 @@ def run_the_loop(fn, *args, **kwargs):
3838

3939

4040
# Bounded cache (LRU)
41-
@alru_cache(maxsize=128)
42-
async def cached_func(x):
41+
async def _cached_func(x):
4342
return x
4443

4544

46-
@alru_cache(maxsize=16, ttl=0.01)
47-
async def cached_func_ttl(x):
45+
def create_cached_func():
46+
return alru_cache(maxsize=128)(_cached_func)
47+
48+
49+
async def _cached_func_ttl(x):
4850
return x
4951

5052

53+
def create_cached_func_ttl():
54+
return alru_cache(maxsize=16, ttl=0.01)(_cached_func_ttl)
55+
56+
5157
# Unbounded cache (no maxsize)
52-
@alru_cache()
53-
async def cached_func_unbounded(x):
58+
async def _cached_func_unbounded(x):
5459
return x
5560

5661

57-
@alru_cache(ttl=0.01)
58-
async def cached_func_unbounded_ttl(x):
62+
def create_cached_func_unbounded():
63+
return alru_cache()(_cached_func_unbounded)
64+
65+
66+
async def _cached_func_unbounded_ttl(x):
5967
return x
6068

6169

70+
def create_cached_func_unbounded_ttl():
71+
return alru_cache(ttl=0.01)(_cached_func_unbounded_ttl)
72+
73+
6274
class Methods:
63-
@alru_cache(maxsize=128)
6475
async def cached_meth(self, x):
6576
return x
6677

67-
@alru_cache(maxsize=16, ttl=0.01)
6878
async def cached_meth_ttl(self, x):
6979
return x
7080

71-
@alru_cache()
7281
async def cached_meth_unbounded(self, x):
7382
return x
7483

75-
@alru_cache(ttl=0.01)
7684
async def cached_meth_unbounded_ttl(self, x):
7785
return x
7886

7987

88+
def create_cached_meth():
89+
return alru_cache(maxsize=128)(Methods.cached_meth)
90+
91+
92+
def create_cached_meth_ttl():
93+
return alru_cache(maxsize=16, ttl=0.01)(Methods.cached_meth_ttl)
94+
95+
96+
def create_cached_meth_unbounded():
97+
return alru_cache()(Methods.cached_meth_unbounded)
98+
99+
100+
def create_cached_meth_unbounded_ttl():
101+
return alru_cache(ttl=0.01)(Methods.cached_meth_unbounded_ttl)
102+
103+
80104
async def uncached_func(x):
81105
return x
82106

83107

84108
funcs_no_ttl = [
85-
cached_func,
86-
cached_func_unbounded,
87-
Methods.cached_meth,
88-
Methods.cached_meth_unbounded,
109+
create_cached_func,
110+
create_cached_func_unbounded,
111+
create_cached_meth,
112+
create_cached_meth_unbounded,
89113
]
90114
no_ttl_ids = [
91115
"func-bounded",
@@ -95,10 +119,10 @@ async def uncached_func(x):
95119
]
96120

97121
funcs_ttl = [
98-
cached_func_ttl,
99-
cached_func_unbounded_ttl,
100-
Methods.cached_meth_ttl,
101-
Methods.cached_meth_unbounded_ttl,
122+
create_cached_func_ttl,
123+
create_cached_func_unbounded_ttl,
124+
create_cached_meth_ttl,
125+
create_cached_meth_unbounded_ttl,
102126
]
103127
ttl_ids = [
104128
"func-bounded-ttl",
@@ -111,13 +135,13 @@ async def uncached_func(x):
111135
all_ids = [*no_ttl_ids, *ttl_ids]
112136

113137

114-
@pytest.mark.parametrize("func", all_funcs, ids=all_ids)
138+
@pytest.mark.parametrize("factory", all_funcs, ids=all_ids)
115139
def test_cache_hit_benchmark(
116140
benchmark: BenchmarkFixture,
117141
run_loop: Callable[..., Any],
118-
func: _LRUCacheWrapper[Any],
142+
factory: Callable[[], _LRUCacheWrapper[Any]],
119143
) -> None:
120-
# Populate cache
144+
func = factory()
121145
keys = list(range(10))
122146
for key in keys:
123147
run_loop(func, key)
@@ -130,14 +154,14 @@ async def run() -> None:
130154
benchmark(run_loop, run)
131155

132156

133-
@pytest.mark.parametrize("func", all_funcs, ids=all_ids)
157+
@pytest.mark.parametrize("factory", all_funcs, ids=all_ids)
134158
def test_cache_miss_benchmark(
135159
benchmark: BenchmarkFixture,
136160
run_loop: Callable[..., Any],
137-
func: _LRUCacheWrapper[Any],
161+
factory: Callable[[], _LRUCacheWrapper[Any]],
138162
) -> None:
163+
func = factory()
139164
unique_objects = [object() for _ in range(128)]
140-
func.cache_clear()
141165

142166
async def run() -> None:
143167
for obj in unique_objects:
@@ -146,37 +170,39 @@ async def run() -> None:
146170
benchmark(run_loop, run)
147171

148172

149-
@pytest.mark.parametrize("func", all_funcs, ids=all_ids)
173+
@pytest.mark.parametrize("factory", all_funcs, ids=all_ids)
150174
def test_cache_clear_benchmark(
151175
benchmark: BenchmarkFixture,
152176
run_loop: Callable[..., Any],
153-
func: _LRUCacheWrapper[Any],
177+
factory: Callable[[], _LRUCacheWrapper[Any]],
154178
) -> None:
179+
func = factory()
155180
for i in range(100):
156181
run_loop(func, i)
157182

158183
benchmark(func.cache_clear)
159184

160185

161-
@pytest.mark.parametrize("func_ttl", funcs_ttl, ids=ttl_ids)
186+
@pytest.mark.parametrize("factory", funcs_ttl, ids=ttl_ids)
162187
def test_cache_ttl_expiry_benchmark(
163188
benchmark: BenchmarkFixture,
164189
run_loop: Callable[..., Any],
165-
func_ttl: _LRUCacheWrapper[Any],
190+
factory: Callable[[], _LRUCacheWrapper[Any]],
166191
) -> None:
192+
func_ttl = factory()
167193
run_loop(func_ttl, 99)
168194
run_loop(asyncio.sleep, 0.02)
169195

170196
benchmark(run_loop, func_ttl, 99)
171197

172198

173-
@pytest.mark.parametrize("func", all_funcs, ids=all_ids)
199+
@pytest.mark.parametrize("factory", all_funcs, ids=all_ids)
174200
def test_cache_invalidate_benchmark(
175201
benchmark: BenchmarkFixture,
176202
run_loop: Callable[..., Any],
177-
func: _LRUCacheWrapper[Any],
203+
factory: Callable[[], _LRUCacheWrapper[Any]],
178204
) -> None:
179-
# Populate cache
205+
func = factory()
180206
keys = list(range(123, 321))
181207
for i in keys:
182208
run_loop(func, i)
@@ -189,13 +215,13 @@ def run() -> None:
189215
invalidate(i)
190216

191217

192-
@pytest.mark.parametrize("func", all_funcs, ids=all_ids)
218+
@pytest.mark.parametrize("factory", all_funcs, ids=all_ids)
193219
def test_cache_info_benchmark(
194220
benchmark: BenchmarkFixture,
195221
run_loop: Callable[..., Any],
196-
func: _LRUCacheWrapper[Any],
222+
factory: Callable[[], _LRUCacheWrapper[Any]],
197223
) -> None:
198-
# Populate cache
224+
func = factory()
199225
keys = list(range(1000))
200226
for i in keys:
201227
run_loop(func, i)
@@ -208,13 +234,13 @@ def run() -> None:
208234
cache_info()
209235

210236

211-
@pytest.mark.parametrize("func", all_funcs, ids=all_ids)
237+
@pytest.mark.parametrize("factory", all_funcs, ids=all_ids)
212238
def test_concurrent_cache_hit_benchmark(
213239
benchmark: BenchmarkFixture,
214240
run_loop: Callable[..., Any],
215-
func: _LRUCacheWrapper[Any],
241+
factory: Callable[[], _LRUCacheWrapper[Any]],
216242
) -> None:
217-
# Populate cache
243+
func = factory()
218244
keys = list(range(600, 700))
219245
for key in keys:
220246
run_loop(func, key)
@@ -230,15 +256,15 @@ async def gather_coros():
230256
def test_cache_fill_eviction_benchmark(
231257
benchmark: BenchmarkFixture, run_loop: Callable[..., Any]
232258
) -> None:
233-
# Populate cache
259+
func = create_cached_func()
234260
for i in range(-128, 0):
235-
run_loop(cached_func, i)
261+
run_loop(func, i)
236262

237263
keys = list(range(5000))
238264

239265
async def fill():
240266
for k in keys:
241-
await cached_func(k)
267+
await func(k)
242268

243269
benchmark(run_loop, fill)
244270

@@ -252,20 +278,20 @@ async def fill():
252278
# The relevant internal methods do not exist on _LRUCacheWrapperInstanceMethod,
253279
# so we can skip methods for this part of the benchmark suite.
254280
# We also skip wrappers with ttl because it raises KeyError.
255-
only_funcs_no_ttl = all_funcs[:2]
256-
func_ids_no_ttl = all_ids[:2]
281+
only_funcs_no_ttl = funcs_no_ttl[:2]
282+
func_ids_no_ttl = no_ttl_ids[:2]
257283

258284

259-
@pytest.mark.parametrize("func", only_funcs_no_ttl, ids=func_ids_no_ttl)
285+
@pytest.mark.parametrize("factory", only_funcs_no_ttl, ids=func_ids_no_ttl)
260286
def test_internal_cache_hit_microbenchmark(
261287
benchmark: BenchmarkFixture,
262288
run_loop: Callable[..., Any],
263-
func: _LRUCacheWrapper[Any],
289+
factory: Callable[[], _LRUCacheWrapper[Any]],
264290
) -> None:
265291
"""Directly benchmark _cache_hit (internal, sync) using parameterized funcs."""
292+
func = factory()
266293
cache_hit = func._cache_hit
267294

268-
# Populate cache
269295
keys = list(range(128))
270296
for i in keys:
271297
run_loop(func, i)
@@ -276,11 +302,12 @@ def run() -> None:
276302
cache_hit(i)
277303

278304

279-
@pytest.mark.parametrize("func", only_funcs_no_ttl, ids=func_ids_no_ttl)
305+
@pytest.mark.parametrize("factory", only_funcs_no_ttl, ids=func_ids_no_ttl)
280306
def test_internal_cache_miss_microbenchmark(
281-
benchmark: BenchmarkFixture, func: _LRUCacheWrapper[Any]
307+
benchmark: BenchmarkFixture, factory: Callable[[], _LRUCacheWrapper[Any]]
282308
) -> None:
283309
"""Directly benchmark _cache_miss (internal, sync) using parameterized funcs."""
310+
func = factory()
284311
cache_miss = func._cache_miss
285312

286313
@benchmark
@@ -289,17 +316,17 @@ def run() -> None:
289316
cache_miss(i)
290317

291318

292-
@pytest.mark.parametrize("func", only_funcs_no_ttl, ids=func_ids_no_ttl)
319+
@pytest.mark.parametrize("factory", only_funcs_no_ttl, ids=func_ids_no_ttl)
293320
@pytest.mark.parametrize("task_state", ["finished", "cancelled", "exception"])
294321
def test_internal_task_done_callback_microbenchmark(
295322
benchmark: BenchmarkFixture,
296323
loop: asyncio.BaseEventLoop,
297-
func: _LRUCacheWrapper[Any],
324+
factory: Callable[[], _LRUCacheWrapper[Any]],
298325
task_state: str,
299326
) -> None:
300327
"""Directly benchmark _task_done_callback (internal, sync) using parameterized funcs and task states."""
328+
func = factory()
301329

302-
# Create a dummy coroutine and task
303330
async def dummy_coro():
304331
if task_state == "exception":
305332
raise ValueError("test exception")

0 commit comments

Comments
 (0)