|
2 | 2 | import pytest |
3 | 3 | import time |
4 | 4 |
|
| 5 | +from sqlalchemy import text |
5 | 6 | from sqlalchemy.exc import ProgrammingError |
6 | 7 | from filelock import FileLock |
7 | 8 | from pgmq_sqlalchemy import PGMQueue |
@@ -286,6 +287,12 @@ def test_set_vt_to_smaller_value(pgmq_setup_teardown: PGMQ_WITH_QUEUE): |
286 | 287 | assert pgmq.read(queue_name) is not None |
287 | 288 |
|
288 | 289 |
|
| 290 | +def test_set_vt_not_exist(pgmq_setup_teardown: PGMQ_WITH_QUEUE): |
| 291 | + pgmq, queue_name = pgmq_setup_teardown |
| 292 | + msg_updated = pgmq.set_vt(queue_name, 999, 20) |
| 293 | + assert msg_updated is None |
| 294 | + |
| 295 | + |
289 | 296 | def test_pop(pgmq_setup_teardown: PGMQ_WITH_QUEUE): |
290 | 297 | pgmq, queue_name = pgmq_setup_teardown |
291 | 298 | msg = MSG |
@@ -427,3 +434,158 @@ def test_metrics_all_queues(pgmq_setup_teardown: PGMQ_WITH_QUEUE): |
427 | 434 | assert queue_2.queue_length == 2 |
428 | 435 | assert queue_1.total_messages == 3 |
429 | 436 | assert queue_2.total_messages == 2 |
| 437 | + |
| 438 | + |
| 439 | +# Tests for detach_archive method |
| 440 | +@pgmq_deps |
| 441 | +def test_detach_archive(pgmq_fixture, db_session): |
| 442 | + """Test detach_archive method - detaches archive table from queue.""" |
| 443 | + pgmq: PGMQueue = pgmq_fixture |
| 444 | + queue_name = f"test_queue_{uuid.uuid4().hex}" |
| 445 | + pgmq.create_queue(queue_name) |
| 446 | + msg = MSG |
| 447 | + msg_id = pgmq.send(queue_name, msg) |
| 448 | + pgmq.archive(queue_name, msg_id) |
| 449 | + |
| 450 | + # Detach archive should not raise an error |
| 451 | + pgmq.detach_archive(queue_name) |
| 452 | + |
| 453 | + # Read the archive to ensure it still exists after detaching |
| 454 | + archived_msg = pgmq.read_archive(queue_name) |
| 455 | + assert archived_msg is not None |
| 456 | + assert archived_msg.msg_id == msg_id |
| 457 | + |
| 458 | + # Cleanup: Drop the archive and queue tables |
| 459 | + # After detaching, the archive is no longer part of the extension |
| 460 | + # We need to drop both tables manually by first removing them from the extension |
| 461 | + if pgmq.is_async: |
| 462 | + |
| 463 | + async def cleanup(): |
| 464 | + async with pgmq.session_maker() as session: |
| 465 | + # Drop archive table (already detached) |
| 466 | + await session.execute( |
| 467 | + text(f"DROP TABLE IF EXISTS pgmq.a_{queue_name} CASCADE;") |
| 468 | + ) |
| 469 | + # Detach and drop queue table |
| 470 | + await session.execute( |
| 471 | + text(f"ALTER EXTENSION pgmq DROP TABLE pgmq.q_{queue_name};") |
| 472 | + ) |
| 473 | + await session.execute( |
| 474 | + text(f"DROP TABLE IF EXISTS pgmq.q_{queue_name} CASCADE;") |
| 475 | + ) |
| 476 | + await session.commit() |
| 477 | + |
| 478 | + pgmq.loop.run_until_complete(cleanup()) |
| 479 | + else: |
| 480 | + with pgmq.session_maker() as session: |
| 481 | + # Drop archive table (already detached) |
| 482 | + session.execute(text(f"DROP TABLE IF EXISTS pgmq.a_{queue_name} CASCADE;")) |
| 483 | + # Detach and drop queue table |
| 484 | + session.execute( |
| 485 | + text(f"ALTER EXTENSION pgmq DROP TABLE pgmq.q_{queue_name};") |
| 486 | + ) |
| 487 | + session.execute(text(f"DROP TABLE IF EXISTS pgmq.q_{queue_name} CASCADE;")) |
| 488 | + session.commit() |
| 489 | + |
| 490 | + |
| 491 | +# Tests for read_archive methods |
| 492 | +def test_read_archive(pgmq_setup_teardown: PGMQ_WITH_QUEUE): |
| 493 | + pgmq, queue_name = pgmq_setup_teardown |
| 494 | + msg = MSG |
| 495 | + msg_ids = pgmq.send_batch(queue_name, [msg, msg, msg]) |
| 496 | + pgmq.archive(queue_name, msg_ids[0]) |
| 497 | + archived_msg = pgmq.read_archive(queue_name) |
| 498 | + assert archived_msg is not None |
| 499 | + assert archived_msg.msg_id == msg_ids[0] |
| 500 | + assert archived_msg.message == msg |
| 501 | + |
| 502 | + |
| 503 | +def test_read_archive_empty(pgmq_setup_teardown: PGMQ_WITH_QUEUE): |
| 504 | + pgmq, queue_name = pgmq_setup_teardown |
| 505 | + archived_msg = pgmq.read_archive(queue_name) |
| 506 | + assert archived_msg is None |
| 507 | + |
| 508 | + |
| 509 | +def test_read_archive_batch(pgmq_setup_teardown: PGMQ_WITH_QUEUE): |
| 510 | + pgmq, queue_name = pgmq_setup_teardown |
| 511 | + msg = MSG |
| 512 | + msg_ids = pgmq.send_batch(queue_name, [msg, msg, msg]) |
| 513 | + pgmq.archive_batch(queue_name, msg_ids) |
| 514 | + archived_msgs = pgmq.read_archive_batch(queue_name, batch_size=10) |
| 515 | + assert archived_msgs is not None |
| 516 | + assert len(archived_msgs) == 3 |
| 517 | + assert [m.msg_id for m in archived_msgs] == msg_ids |
| 518 | + for m in archived_msgs: |
| 519 | + assert m.message == msg |
| 520 | + |
| 521 | + |
| 522 | +def test_read_archive_batch_empty(pgmq_setup_teardown: PGMQ_WITH_QUEUE): |
| 523 | + pgmq, queue_name = pgmq_setup_teardown |
| 524 | + archived_msgs = pgmq.read_archive_batch(queue_name, batch_size=10) |
| 525 | + assert archived_msgs is None |
| 526 | + |
| 527 | + |
| 528 | +def test_read_archive_batch_limit(pgmq_setup_teardown: PGMQ_WITH_QUEUE): |
| 529 | + pgmq, queue_name = pgmq_setup_teardown |
| 530 | + msg = MSG |
| 531 | + msg_ids = pgmq.send_batch(queue_name, [msg, msg, msg, msg, msg]) |
| 532 | + pgmq.archive_batch(queue_name, msg_ids) |
| 533 | + archived_msgs = pgmq.read_archive_batch(queue_name, batch_size=3) |
| 534 | + assert archived_msgs is not None |
| 535 | + assert len(archived_msgs) == 3 |
| 536 | + |
| 537 | + |
| 538 | +# Tests for time-based partitioned queues |
| 539 | +@pgmq_deps |
| 540 | +def test_create_time_based_partitioned_queue(pgmq_fixture, db_session): |
| 541 | + pgmq: PGMQueue = pgmq_fixture |
| 542 | + queue_name = f"test_queue_{uuid.uuid4().hex}" |
| 543 | + pgmq.create_partitioned_queue( |
| 544 | + queue_name, partition_interval="1 day", retention_interval="7 days" |
| 545 | + ) |
| 546 | + assert check_queue_exists(db_session, queue_name) is True |
| 547 | + |
| 548 | + |
| 549 | +@pgmq_deps |
| 550 | +def test_create_time_based_partitioned_queue_various_intervals( |
| 551 | + pgmq_fixture, db_session |
| 552 | +): |
| 553 | + pgmq: PGMQueue = pgmq_fixture |
| 554 | + |
| 555 | + # Test with hour |
| 556 | + queue_name_hour = f"test_queue_{uuid.uuid4().hex}" |
| 557 | + pgmq.create_partitioned_queue( |
| 558 | + queue_name_hour, partition_interval="1 hour", retention_interval="24 hours" |
| 559 | + ) |
| 560 | + assert check_queue_exists(db_session, queue_name_hour) is True |
| 561 | + |
| 562 | + # Test with week |
| 563 | + queue_name_week = f"test_queue_{uuid.uuid4().hex}" |
| 564 | + pgmq.create_partitioned_queue( |
| 565 | + queue_name_week, partition_interval="1 week", retention_interval="4 weeks" |
| 566 | + ) |
| 567 | + assert check_queue_exists(db_session, queue_name_week) is True |
| 568 | + |
| 569 | + |
| 570 | +@pgmq_deps |
| 571 | +def test_create_partitioned_queue_invalid_time_interval(pgmq_fixture): |
| 572 | + pgmq: PGMQueue = pgmq_fixture |
| 573 | + queue_name = f"test_queue_{uuid.uuid4().hex}" |
| 574 | + with pytest.raises(ValueError) as e: |
| 575 | + pgmq.create_partitioned_queue( |
| 576 | + queue_name, |
| 577 | + partition_interval="invalid interval", |
| 578 | + retention_interval="7 days", |
| 579 | + ) |
| 580 | + assert "Invalid time-based partition interval" in str(e.value) |
| 581 | + |
| 582 | + |
| 583 | +@pgmq_deps |
| 584 | +def test_create_partitioned_queue_invalid_numeric_interval(pgmq_fixture): |
| 585 | + pgmq: PGMQueue = pgmq_fixture |
| 586 | + queue_name = f"test_queue_{uuid.uuid4().hex}" |
| 587 | + with pytest.raises(ValueError) as e: |
| 588 | + pgmq.create_partitioned_queue( |
| 589 | + queue_name, partition_interval=-100, retention_interval=100000 |
| 590 | + ) |
| 591 | + assert "Numeric partition interval must be positive" in str(e.value) |
0 commit comments