Skip to content

Commit a684036

Browse files
author
rodrigo.nogueira
committed
refactor(benchmark): use factory functions for loop isolation
Each benchmark test now creates a fresh alru_cache instance via factory functions instead of reusing global decorated functions. This ensures each test runs with its own cache bound to the test's event loop, preventing RuntimeError from the new event loop affinity check.
1 parent 54890fd commit a684036

1 file changed

Lines changed: 108 additions & 68 deletions

File tree

benchmark.py

Lines changed: 108 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66

77
from async_lru import _LRUCacheWrapper, alru_cache
88

9-
109
try:
1110
from pytest_codspeed import BenchmarkFixture
1211
except ImportError: # pragma: no branch # only hit in cibuildwheel
@@ -17,10 +16,17 @@
1716

1817
@pytest.fixture
1918
def loop():
20-
loop = asyncio.new_event_loop()
21-
asyncio.set_event_loop(loop)
22-
yield loop
23-
loop.close()
19+
# Save current loop to restore after the test
20+
try:
21+
old_loop = asyncio.get_running_loop()
22+
except RuntimeError:
23+
old_loop = None
24+
new_loop = asyncio.new_event_loop()
25+
asyncio.set_event_loop(new_loop)
26+
yield new_loop
27+
new_loop.close()
28+
if old_loop is not None:
29+
asyncio.set_event_loop(old_loop)
2430

2531

2632
@pytest.fixture
@@ -38,54 +44,84 @@ def run_the_loop(fn, *args, **kwargs):
3844

3945

4046
# Bounded cache (LRU)
41-
@alru_cache(maxsize=128)
42-
async def cached_func(x):
47+
async def _cached_func(x):
4348
return x
4449

4550

46-
@alru_cache(maxsize=16, ttl=0.01)
47-
async def cached_func_ttl(x):
51+
def create_cached_func():
52+
return alru_cache(maxsize=128)(_cached_func)
53+
54+
55+
async def _cached_func_ttl(x):
4856
return x
4957

5058

59+
def create_cached_func_ttl():
60+
return alru_cache(maxsize=16, ttl=0.01)(_cached_func_ttl)
61+
62+
5163
# Unbounded cache (no maxsize)
52-
@alru_cache()
53-
async def cached_func_unbounded(x):
64+
async def _cached_func_unbounded(x):
5465
return x
5566

5667

57-
@alru_cache(ttl=0.01)
58-
async def cached_func_unbounded_ttl(x):
68+
def create_cached_func_unbounded():
69+
return alru_cache()(_cached_func_unbounded)
70+
71+
72+
async def _cached_func_unbounded_ttl(x):
5973
return x
6074

6175

62-
class Methods:
63-
@alru_cache(maxsize=128)
64-
async def cached_meth(self, x):
65-
return x
76+
def create_cached_func_unbounded_ttl():
77+
return alru_cache(ttl=0.01)(_cached_func_unbounded_ttl)
78+
79+
80+
def create_cached_meth():
81+
class MethodsInstance:
82+
@alru_cache(maxsize=128)
83+
async def cached_meth(self, x):
84+
return x
6685

67-
@alru_cache(maxsize=16, ttl=0.01)
68-
async def cached_meth_ttl(self, x):
69-
return x
86+
return MethodsInstance().cached_meth
7087

71-
@alru_cache()
72-
async def cached_meth_unbounded(self, x):
73-
return x
7488

75-
@alru_cache(ttl=0.01)
76-
async def cached_meth_unbounded_ttl(self, x):
77-
return x
89+
def create_cached_meth_ttl():
90+
class MethodsInstance:
91+
@alru_cache(maxsize=16, ttl=0.01)
92+
async def cached_meth_ttl(self, x):
93+
return x
94+
95+
return MethodsInstance().cached_meth_ttl
96+
97+
98+
def create_cached_meth_unbounded():
99+
class MethodsInstance:
100+
@alru_cache()
101+
async def cached_meth_unbounded(self, x):
102+
return x
103+
104+
return MethodsInstance().cached_meth_unbounded
105+
106+
107+
def create_cached_meth_unbounded_ttl():
108+
class MethodsInstance:
109+
@alru_cache(ttl=0.01)
110+
async def cached_meth_unbounded_ttl(self, x):
111+
return x
112+
113+
return MethodsInstance().cached_meth_unbounded_ttl
78114

79115

80116
async def uncached_func(x):
81117
return x
82118

83119

84120
funcs_no_ttl = [
85-
cached_func,
86-
cached_func_unbounded,
87-
Methods.cached_meth,
88-
Methods.cached_meth_unbounded,
121+
create_cached_func,
122+
create_cached_func_unbounded,
123+
create_cached_meth,
124+
create_cached_meth_unbounded,
89125
]
90126
no_ttl_ids = [
91127
"func-bounded",
@@ -95,10 +131,10 @@ async def uncached_func(x):
95131
]
96132

97133
funcs_ttl = [
98-
cached_func_ttl,
99-
cached_func_unbounded_ttl,
100-
Methods.cached_meth_ttl,
101-
Methods.cached_meth_unbounded_ttl,
134+
create_cached_func_ttl,
135+
create_cached_func_unbounded_ttl,
136+
create_cached_meth_ttl,
137+
create_cached_meth_unbounded_ttl,
102138
]
103139
ttl_ids = [
104140
"func-bounded-ttl",
@@ -111,13 +147,13 @@ async def uncached_func(x):
111147
all_ids = [*no_ttl_ids, *ttl_ids]
112148

113149

114-
@pytest.mark.parametrize("func", all_funcs, ids=all_ids)
150+
@pytest.mark.parametrize("factory", all_funcs, ids=all_ids)
115151
def test_cache_hit_benchmark(
116152
benchmark: BenchmarkFixture,
117153
run_loop: Callable[..., Any],
118-
func: _LRUCacheWrapper[Any],
154+
factory: Callable[[], _LRUCacheWrapper[Any]],
119155
) -> None:
120-
# Populate cache
156+
func = factory()
121157
keys = list(range(10))
122158
for key in keys:
123159
run_loop(func, key)
@@ -130,14 +166,15 @@ async def run() -> None:
130166
benchmark(run_loop, run)
131167

132168

133-
@pytest.mark.parametrize("func", all_funcs, ids=all_ids)
169+
@pytest.mark.parametrize("factory", all_funcs, ids=all_ids)
134170
def test_cache_miss_benchmark(
135171
benchmark: BenchmarkFixture,
136172
run_loop: Callable[..., Any],
137-
func: _LRUCacheWrapper[Any],
173+
factory: Callable[[], _LRUCacheWrapper[Any]],
138174
) -> None:
139-
unique_objects = [object() for _ in range(128)]
140-
func.cache_clear()
175+
func = factory()
176+
# Use 2048 objects (16x maxsize=128) to force evictions and measure actual misses
177+
unique_objects = [object() for _ in range(2048)]
141178

142179
async def run() -> None:
143180
for obj in unique_objects:
@@ -146,37 +183,39 @@ async def run() -> None:
146183
benchmark(run_loop, run)
147184

148185

149-
@pytest.mark.parametrize("func", all_funcs, ids=all_ids)
186+
@pytest.mark.parametrize("factory", all_funcs, ids=all_ids)
150187
def test_cache_clear_benchmark(
151188
benchmark: BenchmarkFixture,
152189
run_loop: Callable[..., Any],
153-
func: _LRUCacheWrapper[Any],
190+
factory: Callable[[], _LRUCacheWrapper[Any]],
154191
) -> None:
192+
func = factory()
155193
for i in range(100):
156194
run_loop(func, i)
157195

158196
benchmark(func.cache_clear)
159197

160198

161-
@pytest.mark.parametrize("func_ttl", funcs_ttl, ids=ttl_ids)
199+
@pytest.mark.parametrize("factory", funcs_ttl, ids=ttl_ids)
162200
def test_cache_ttl_expiry_benchmark(
163201
benchmark: BenchmarkFixture,
164202
run_loop: Callable[..., Any],
165-
func_ttl: _LRUCacheWrapper[Any],
203+
factory: Callable[[], _LRUCacheWrapper[Any]],
166204
) -> None:
205+
func_ttl = factory()
167206
run_loop(func_ttl, 99)
168207
run_loop(asyncio.sleep, 0.02)
169208

170209
benchmark(run_loop, func_ttl, 99)
171210

172211

173-
@pytest.mark.parametrize("func", all_funcs, ids=all_ids)
212+
@pytest.mark.parametrize("factory", all_funcs, ids=all_ids)
174213
def test_cache_invalidate_benchmark(
175214
benchmark: BenchmarkFixture,
176215
run_loop: Callable[..., Any],
177-
func: _LRUCacheWrapper[Any],
216+
factory: Callable[[], _LRUCacheWrapper[Any]],
178217
) -> None:
179-
# Populate cache
218+
func = factory()
180219
keys = list(range(123, 321))
181220
for i in keys:
182221
run_loop(func, i)
@@ -189,13 +228,13 @@ def run() -> None:
189228
invalidate(i)
190229

191230

192-
@pytest.mark.parametrize("func", all_funcs, ids=all_ids)
231+
@pytest.mark.parametrize("factory", all_funcs, ids=all_ids)
193232
def test_cache_info_benchmark(
194233
benchmark: BenchmarkFixture,
195234
run_loop: Callable[..., Any],
196-
func: _LRUCacheWrapper[Any],
235+
factory: Callable[[], _LRUCacheWrapper[Any]],
197236
) -> None:
198-
# Populate cache
237+
func = factory()
199238
keys = list(range(1000))
200239
for i in keys:
201240
run_loop(func, i)
@@ -208,37 +247,37 @@ def run() -> None:
208247
cache_info()
209248

210249

211-
@pytest.mark.parametrize("func", all_funcs, ids=all_ids)
250+
@pytest.mark.parametrize("factory", all_funcs, ids=all_ids)
212251
def test_concurrent_cache_hit_benchmark(
213252
benchmark: BenchmarkFixture,
214253
run_loop: Callable[..., Any],
215-
func: _LRUCacheWrapper[Any],
254+
factory: Callable[[], _LRUCacheWrapper[Any]],
216255
) -> None:
217-
# Populate cache
256+
func = factory()
218257
keys = list(range(600, 700))
219258
for key in keys:
220259
run_loop(func, key)
221260

222261
async def gather_coros():
223262
gather = asyncio.gather
224263
for _ in range(10):
225-
return await gather(*map(func, keys))
264+
_ = await gather(*map(func, keys))
226265

227266
benchmark(run_loop, gather_coros)
228267

229268

230269
def test_cache_fill_eviction_benchmark(
231270
benchmark: BenchmarkFixture, run_loop: Callable[..., Any]
232271
) -> None:
233-
# Populate cache
272+
func = create_cached_func()
234273
for i in range(-128, 0):
235-
run_loop(cached_func, i)
274+
run_loop(func, i)
236275

237276
keys = list(range(5000))
238277

239278
async def fill():
240279
for k in keys:
241-
await cached_func(k)
280+
await func(k)
242281

243282
benchmark(run_loop, fill)
244283

@@ -252,20 +291,20 @@ async def fill():
252291
# The relevant internal methods do not exist on _LRUCacheWrapperInstanceMethod,
253292
# so we can skip methods for this part of the benchmark suite.
254293
# We also skip wrappers with ttl because it raises KeyError.
255-
only_funcs_no_ttl = all_funcs[:2]
256-
func_ids_no_ttl = all_ids[:2]
294+
only_funcs_no_ttl = funcs_no_ttl[:2]
295+
func_ids_no_ttl = no_ttl_ids[:2]
257296

258297

259-
@pytest.mark.parametrize("func", only_funcs_no_ttl, ids=func_ids_no_ttl)
298+
@pytest.mark.parametrize("factory", only_funcs_no_ttl, ids=func_ids_no_ttl)
260299
def test_internal_cache_hit_microbenchmark(
261300
benchmark: BenchmarkFixture,
262301
run_loop: Callable[..., Any],
263-
func: _LRUCacheWrapper[Any],
302+
factory: Callable[[], _LRUCacheWrapper[Any]],
264303
) -> None:
265304
"""Directly benchmark _cache_hit (internal, sync) using parameterized funcs."""
305+
func = factory()
266306
cache_hit = func._cache_hit
267307

268-
# Populate cache
269308
keys = list(range(128))
270309
for i in keys:
271310
run_loop(func, i)
@@ -276,11 +315,12 @@ def run() -> None:
276315
cache_hit(i)
277316

278317

279-
@pytest.mark.parametrize("func", only_funcs_no_ttl, ids=func_ids_no_ttl)
318+
@pytest.mark.parametrize("factory", only_funcs_no_ttl, ids=func_ids_no_ttl)
280319
def test_internal_cache_miss_microbenchmark(
281-
benchmark: BenchmarkFixture, func: _LRUCacheWrapper[Any]
320+
benchmark: BenchmarkFixture, factory: Callable[[], _LRUCacheWrapper[Any]]
282321
) -> None:
283322
"""Directly benchmark _cache_miss (internal, sync) using parameterized funcs."""
323+
func = factory()
284324
cache_miss = func._cache_miss
285325

286326
@benchmark
@@ -289,17 +329,17 @@ def run() -> None:
289329
cache_miss(i)
290330

291331

292-
@pytest.mark.parametrize("func", only_funcs_no_ttl, ids=func_ids_no_ttl)
332+
@pytest.mark.parametrize("factory", only_funcs_no_ttl, ids=func_ids_no_ttl)
293333
@pytest.mark.parametrize("task_state", ["finished", "cancelled", "exception"])
294334
def test_internal_task_done_callback_microbenchmark(
295335
benchmark: BenchmarkFixture,
296336
loop: asyncio.BaseEventLoop,
297-
func: _LRUCacheWrapper[Any],
337+
factory: Callable[[], _LRUCacheWrapper[Any]],
298338
task_state: str,
299339
) -> None:
300340
"""Directly benchmark _task_done_callback (internal, sync) using parameterized funcs and task states."""
341+
func = factory()
301342

302-
# Create a dummy coroutine and task
303343
async def dummy_coro():
304344
if task_state == "exception":
305345
raise ValueError("test exception")

0 commit comments

Comments
 (0)