From c05078905e76ce4c72e6731bba4ad948fa0f2f92 Mon Sep 17 00:00:00 2001 From: Martin Acosta Date: Sat, 18 Apr 2026 17:25:47 -0300 Subject: [PATCH 01/10] feat: add active/archive schema definitions and abstract adapter methods --- packages/duron/src/adapters/adapter.ts | 83 ++++++ .../src/adapters/postgres/schema.default.ts | 20 +- .../duron/src/adapters/postgres/schema.ts | 256 +++++++++++++----- packages/duron/src/adapters/schemas.ts | 21 ++ 4 files changed, 305 insertions(+), 75 deletions(-) diff --git a/packages/duron/src/adapters/adapter.ts b/packages/duron/src/adapters/adapter.ts index f4334d6..9217a48 100644 --- a/packages/duron/src/adapters/adapter.ts +++ b/packages/duron/src/adapters/adapter.ts @@ -40,6 +40,7 @@ import type { JobStatusResult, JobStep, JobStepStatusResult, + PruneArchiveOptions, RecoverJobsOptions, RetryJobOptions, TimeTravelJobOptions, @@ -75,6 +76,7 @@ import { JobStepStatusResultSchema, JobsArrayResultSchema, NumberResultSchema, + PruneArchiveOptionsSchema, RecoverJobsOptionsSchema, RetryJobOptionsSchema, TimeTravelJobOptionsSchema, @@ -83,6 +85,7 @@ import { // Re-export types from schemas for backward compatibility export type { ActionStats, + ArchiveStats, CancelJobOptions, CancelJobStepOptions, CompleteJobOptions, @@ -112,6 +115,7 @@ export type { JobStatusResult, JobStep, JobStepStatusResult, + PruneArchiveOptions, RecoverJobsOptions, RetryJobOptions, SortOrder, @@ -1088,6 +1092,85 @@ export abstract class Adapter extends EventEmitter { */ protected abstract _deleteSpans(options: DeleteSpansOptions): Promise + // ============================================================================ + // Archive Methods + // ============================================================================ + + /** + * Prune archived jobs older than the specified threshold. + * + * @param options - Prune options including olderThan, batchSize, maxBatches + * @returns Promise resolving to the number of jobs deleted + */ + async pruneArchive(options: PruneArchiveOptions): Promise { + try { + await this.start() + const parsedOptions = PruneArchiveOptionsSchema.parse(options) + const result = await this._pruneArchive(parsedOptions) + return NumberResultSchema.parse(result) + } catch (error) { + this.logger?.error(error, 'Error in Adapter.pruneArchive()') + throw error + } + } + + /** + * Truncate all archive tables (nuclear option). + * + * @returns Promise resolving to void + */ + async truncateArchive(): Promise { + try { + await this.start() + await this._truncateArchive() + } catch (error) { + this.logger?.error(error, 'Error in Adapter.truncateArchive()') + throw error + } + } + + /** + * Get archive statistics. + * + * @returns Promise resolving to archive stats + */ + async getArchiveStats(): Promise { + try { + await this.start() + const result = await this._getArchiveStats() + return ArchiveStatsSchema.parse(result) + } catch (error) { + this.logger?.error(error, 'Error in Adapter.getArchiveStats()') + throw error + } + } + + // ============================================================================ + // Private Archive Methods (to be implemented by adapters) + // ============================================================================ + + /** + * Internal method to prune archived jobs. + * + * @param options - Validated prune options + * @returns Promise resolving to the number of jobs deleted + */ + protected abstract _pruneArchive(options: PruneArchiveOptions): Promise + + /** + * Internal method to truncate all archive tables. + * + * @returns Promise resolving to void + */ + protected abstract _truncateArchive(): Promise + + /** + * Internal method to get archive statistics. + * + * @returns Promise resolving to archive stats + */ + protected abstract _getArchiveStats(): Promise + // ============================================================================ // Protected Abstract Methods (to be implemented by adapters) // ============================================================================ diff --git a/packages/duron/src/adapters/postgres/schema.default.ts b/packages/duron/src/adapters/postgres/schema.default.ts index 1edd23f..4bfcb1d 100644 --- a/packages/duron/src/adapters/postgres/schema.default.ts +++ b/packages/duron/src/adapters/postgres/schema.default.ts @@ -1,5 +1,21 @@ import createSchema from './schema.js' -const { schema, jobsTable, jobStepsTable, spansTable } = createSchema('duron') +const { + schema, + jobsActiveTable, + jobsArchiveTable, + jobStepsActiveTable, + jobStepsArchiveTable, + spansActiveTable, + spansArchiveTable, +} = createSchema('duron') -export { schema, jobsTable, jobStepsTable, spansTable } +export { + schema, + jobsActiveTable, + jobsArchiveTable, + jobStepsActiveTable, + jobStepsArchiveTable, + spansActiveTable, + spansArchiveTable, +} diff --git a/packages/duron/src/adapters/postgres/schema.ts b/packages/duron/src/adapters/postgres/schema.ts index 7a8fe07..8f88214 100644 --- a/packages/duron/src/adapters/postgres/schema.ts +++ b/packages/duron/src/adapters/postgres/schema.ts @@ -20,8 +20,12 @@ import type { SerializableError } from '../../errors.js' export default function createSchema(schemaName: string) { const schema = pgSchema(schemaName) - const jobsTable = schema.table( - 'jobs', + // ============================================================================ + // Active Tables (Hot Path) + // ============================================================================ + + const jobsActiveTable = schema.table( + 'jobs_active', { id: uuid('id').primaryKey().defaultRandom(), action_name: text('action_name').notNull(), @@ -52,39 +56,38 @@ export default function createSchema(schemaName: string) { }, (table) => [ // Single column indexes - index('idx_jobs_action_name').on(table.action_name), - index('idx_jobs_status').on(table.status), - index('idx_jobs_group_key').on(table.group_key), - index('idx_jobs_description').on(table.description), - index('idx_jobs_started_at').on(table.started_at), - index('idx_jobs_finished_at').on(table.finished_at), - index('idx_jobs_expires_at').on(table.expires_at), - index('idx_jobs_client_id').on(table.client_id), - index('idx_jobs_checksum').on(table.checksum), - index('idx_jobs_concurrency_limit').on(table.concurrency_limit), - index('idx_jobs_concurrency_step_limit').on(table.concurrency_step_limit), + index('idx_jobs_active_action_name').on(table.action_name), + index('idx_jobs_active_status').on(table.status), + index('idx_jobs_active_group_key').on(table.group_key), + index('idx_jobs_active_description').on(table.description), + index('idx_jobs_active_started_at').on(table.started_at), + index('idx_jobs_active_expires_at').on(table.expires_at), + index('idx_jobs_active_client_id').on(table.client_id), + index('idx_jobs_active_checksum').on(table.checksum), + index('idx_jobs_active_concurrency_limit').on(table.concurrency_limit), + index('idx_jobs_active_concurrency_step_limit').on(table.concurrency_step_limit), // Composite indexes - index('idx_jobs_action_status').on(table.action_name, table.status), - index('idx_jobs_action_group').on(table.action_name, table.group_key), + index('idx_jobs_active_action_status').on(table.action_name, table.status), + index('idx_jobs_active_action_group').on(table.action_name, table.group_key), // GIN indexes for full-text search - index('idx_jobs_input_fts').using('gin', sql`to_tsvector('english', ${table.input}::text)`), - index('idx_jobs_output_fts').using('gin', sql`to_tsvector('english', ${table.output}::text)`), + index('idx_jobs_active_input_fts').using('gin', sql`to_tsvector('english', ${table.input}::text)`), + index('idx_jobs_active_output_fts').using('gin', sql`to_tsvector('english', ${table.output}::text)`), check( - 'jobs_status_check', + 'jobs_active_status_check', sql`${table.status} IN ${sql.raw(`(${JOB_STATUSES.map((s) => `'${s}'`).join(',')})`)}`, ), ], ) - const jobStepsTable = schema.table( - 'job_steps', + const jobStepsActiveTable = schema.table( + 'job_steps_active', { id: uuid('id').primaryKey().defaultRandom(), job_id: uuid('job_id') .notNull() - .references(() => jobsTable.id, { onDelete: 'cascade' }), + .references(() => jobsActiveTable.id, { onDelete: 'cascade' }), parent_step_id: uuid('parent_step_id'), - parallel: boolean('branch').notNull().default(false), // DB column is 'branch', TypeScript uses 'parallel' + parallel: boolean('branch').notNull().default(false), name: text('name').notNull(), status: text('status').$type().notNull().default(STEP_STATUS_ACTIVE), output: jsonb('output'), @@ -113,55 +116,41 @@ export default function createSchema(schemaName: string) { }, (table) => [ // Single column indexes - index('idx_job_steps_job_id').on(table.job_id), - index('idx_job_steps_status').on(table.status), - index('idx_job_steps_name').on(table.name), - index('idx_job_steps_expires_at').on(table.expires_at), - index('idx_job_steps_parent_step_id').on(table.parent_step_id), + index('idx_job_steps_active_job_id').on(table.job_id), + index('idx_job_steps_active_status').on(table.status), + index('idx_job_steps_active_name').on(table.name), + index('idx_job_steps_active_expires_at').on(table.expires_at), + index('idx_job_steps_active_parent_step_id').on(table.parent_step_id), // Composite indexes - index('idx_job_steps_job_status').on(table.job_id, table.status), - index('idx_job_steps_job_name').on(table.job_id, table.name), - index('idx_job_steps_output_fts').using('gin', sql`to_tsvector('english', ${table.output}::text)`), - // Unique constraint - step name is unique within a parent (name + parentStepId) - // nullsNotDistinct ensures NULL parent_step_id values are treated as equal for uniqueness - unique('unique_job_step_name_parent') + index('idx_job_steps_active_job_status').on(table.job_id, table.status), + index('idx_job_steps_active_job_name').on(table.job_id, table.name), + index('idx_job_steps_active_output_fts').using('gin', sql`to_tsvector('english', ${table.output}::text)`), + // Unique constraint + unique('unique_job_step_active_name_parent') .on(table.job_id, table.name, table.parent_step_id) .nullsNotDistinct(), check( - 'job_steps_status_check', + 'job_steps_active_status_check', sql`${table.status} IN ${sql.raw(`(${STEP_STATUSES.map((s) => `'${s}'`).join(',')})`)}`, ), ], ) - /** - * OpenTelemetry spans table. - * Stores span data exported by PostgresSpanExporter. - * - * SpanKind values: 0=INTERNAL, 1=SERVER, 2=CLIENT, 3=PRODUCER, 4=CONSUMER - * StatusCode values: 0=UNSET, 1=OK, 2=ERROR - */ - const spansTable = schema.table( - 'spans', + const spansActiveTable = schema.table( + 'spans_active', { id: bigserial('id', { mode: 'number' }).primaryKey(), - // OpenTelemetry span identifiers - trace_id: text('trace_id').notNull(), // 32-char hex - span_id: text('span_id').notNull(), // 16-char hex - parent_span_id: text('parent_span_id'), // 16-char hex, null for root spans - // Duron-specific references (extracted from span attributes) - job_id: uuid('job_id').references(() => jobsTable.id, { onDelete: 'cascade' }), - step_id: uuid('step_id').references(() => jobStepsTable.id, { onDelete: 'cascade' }), - // Span metadata + trace_id: text('trace_id').notNull(), + span_id: text('span_id').notNull(), + parent_span_id: text('parent_span_id'), + job_id: uuid('job_id').references(() => jobsActiveTable.id, { onDelete: 'cascade' }), + step_id: uuid('step_id').references(() => jobStepsActiveTable.id, { onDelete: 'cascade' }), name: text('name').notNull(), - kind: integer('kind').notNull().default(0), // SpanKind enum - // Timing (stored as nanoseconds since epoch for precision) + kind: integer('kind').notNull().default(0), start_time_unix_nano: bigint('start_time_unix_nano', { mode: 'bigint' }).notNull(), end_time_unix_nano: bigint('end_time_unix_nano', { mode: 'bigint' }), - // Status - status_code: integer('status_code').notNull().default(0), // SpanStatusCode enum + status_code: integer('status_code').notNull().default(0), status_message: text('status_message'), - // Span data attributes: jsonb('attributes').$type>().notNull().default({}), events: jsonb('events') .$type }>>() @@ -170,29 +159,150 @@ export default function createSchema(schemaName: string) { }, (table) => [ // Single column indexes - index('idx_spans_trace_id').on(table.trace_id), - index('idx_spans_span_id').on(table.span_id), - index('idx_spans_job_id').on(table.job_id), - index('idx_spans_step_id').on(table.step_id), - index('idx_spans_name').on(table.name), - index('idx_spans_kind').on(table.kind), - index('idx_spans_status_code').on(table.status_code), + index('idx_spans_active_trace_id').on(table.trace_id), + index('idx_spans_active_span_id').on(table.span_id), + index('idx_spans_active_job_id').on(table.job_id), + index('idx_spans_active_step_id').on(table.step_id), + index('idx_spans_active_name').on(table.name), + index('idx_spans_active_kind').on(table.kind), + index('idx_spans_active_status_code').on(table.status_code), // Composite indexes - index('idx_spans_job_step').on(table.job_id, table.step_id), - index('idx_spans_trace_parent').on(table.trace_id, table.parent_span_id), - // GIN indexes for JSONB querying - index('idx_spans_attributes').using('gin', table.attributes), - index('idx_spans_events').using('gin', table.events), + index('idx_spans_active_job_step').on(table.job_id, table.step_id), + index('idx_spans_active_trace_parent').on(table.trace_id, table.parent_span_id), + // GIN indexes + index('idx_spans_active_attributes').using('gin', table.attributes), + index('idx_spans_active_events').using('gin', table.events), + // Constraints + check('spans_active_kind_check', sql`${table.kind} IN (0, 1, 2, 3, 4)`), + check('spans_active_status_code_check', sql`${table.status_code} IN (0, 1, 2)`), + ], + ) + + // ============================================================================ + // Archive Tables (Terminated Work) + // ============================================================================ + + const jobsArchiveTable = schema.table( + 'jobs_archive', + { + id: uuid('id').primaryKey(), + action_name: text('action_name').notNull(), + group_key: text('group_key').notNull(), + description: text('description'), + status: text('status').$type().notNull(), + checksum: text('checksum').notNull(), + input: jsonb('input').notNull().default({}), + output: jsonb('output'), + error: jsonb('error').$type(), + timeout_ms: integer('timeout_ms').notNull(), + expires_at: timestamp('expires_at', { withTimezone: true }), + started_at: timestamp('started_at', { withTimezone: true }), + finished_at: timestamp('finished_at', { withTimezone: true }), + client_id: text('client_id'), + concurrency_limit: integer('concurrency_limit').notNull(), + concurrency_step_limit: integer('concurrency_step_limit').notNull(), + created_at: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(), + updated_at: timestamp('updated_at', { withTimezone: true }) + .notNull() + .defaultNow(), + }, + (table) => [ + // Lookup indexes + index('idx_jobs_archive_group_key').on(table.group_key), + index('idx_jobs_archive_action_name').on(table.action_name), + index('idx_jobs_archive_finished_at').on(table.finished_at), + // Composite indexes + index('idx_jobs_archive_action_group').on(table.action_name, table.group_key), + // GIN indexes for full-text search (dashboard search) + index('idx_jobs_archive_input_fts').using('gin', sql`to_tsvector('english', ${table.input}::text)`), + index('idx_jobs_archive_output_fts').using('gin', sql`to_tsvector('english', ${table.output}::text)`), + check( + 'jobs_archive_status_check', + sql`${table.status} IN ${sql.raw(`(${JOB_STATUSES.map((s) => `'${s}'`).join(',')})`)}`, + ), + ], + ) + + const jobStepsArchiveTable = schema.table( + 'job_steps_archive', + { + id: uuid('id').primaryKey(), + job_id: uuid('job_id').notNull(), + parent_step_id: uuid('parent_step_id'), + parallel: boolean('branch').notNull().default(false), + name: text('name').notNull(), + status: text('status').$type().notNull().default(STEP_STATUS_ACTIVE), + output: jsonb('output'), + error: jsonb('error').$type(), + started_at: timestamp('started_at', { withTimezone: true }).notNull().defaultNow(), + finished_at: timestamp('finished_at', { withTimezone: true }), + timeout_ms: integer('timeout_ms').notNull(), + expires_at: timestamp('expires_at', { withTimezone: true }), + retries_limit: integer('retries_limit').notNull().default(0), + retries_count: integer('retries_count').notNull().default(0), + delayed_ms: integer('delayed_ms'), + history_failed_attempts: jsonb('history_failed_attempts') + .$type>() + .notNull() + .default({}), + created_at: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(), + updated_at: timestamp('updated_at', { withTimezone: true }) + .notNull() + .defaultNow(), + // Denormalized for easier time-based pruning + job_finished_at: timestamp('job_finished_at', { withTimezone: true }), + }, + (table) => [ + // Minimal indexes + index('idx_job_steps_archive_job_id').on(table.job_id), + index('idx_job_steps_archive_job_finished_at').on(table.job_finished_at), + index('idx_job_steps_archive_name').on(table.name), + check( + 'job_steps_archive_status_check', + sql`${table.status} IN ${sql.raw(`(${STEP_STATUSES.map((s) => `'${s}'`).join(',')})`)}`, + ), + ], + ) + + const spansArchiveTable = schema.table( + 'spans_archive', + { + id: bigserial('id', { mode: 'number' }).primaryKey(), + trace_id: text('trace_id').notNull(), + span_id: text('span_id').notNull(), + parent_span_id: text('parent_span_id'), + job_id: uuid('job_id'), + step_id: uuid('step_id'), + name: text('name').notNull(), + kind: integer('kind').notNull().default(0), + start_time_unix_nano: bigint('start_time_unix_nano', { mode: 'bigint' }).notNull(), + end_time_unix_nano: bigint('end_time_unix_nano', { mode: 'bigint' }), + status_code: integer('status_code').notNull().default(0), + status_message: text('status_message'), + attributes: jsonb('attributes').$type>().notNull().default({}), + events: jsonb('events') + .$type }>>() + .notNull() + .default([]), + }, + (table) => [ + // Minimal indexes + index('idx_spans_archive_trace_id').on(table.trace_id), + index('idx_spans_archive_job_id').on(table.job_id), + index('idx_spans_archive_step_id').on(table.step_id), // Constraints - check('spans_kind_check', sql`${table.kind} IN (0, 1, 2, 3, 4)`), - check('spans_status_code_check', sql`${table.status_code} IN (0, 1, 2)`), + check('spans_archive_kind_check', sql`${table.kind} IN (0, 1, 2, 3, 4)`), + check('spans_archive_status_code_check', sql`${table.status_code} IN (0, 1, 2)`), ], ) return { schema, - jobsTable, - jobStepsTable, - spansTable, + jobsActiveTable, + jobsArchiveTable, + jobStepsActiveTable, + jobStepsArchiveTable, + spansActiveTable, + spansArchiveTable, } } diff --git a/packages/duron/src/adapters/schemas.ts b/packages/duron/src/adapters/schemas.ts index 98a36bb..13c1932 100644 --- a/packages/duron/src/adapters/schemas.ts +++ b/packages/duron/src/adapters/schemas.ts @@ -404,6 +404,25 @@ export const DeleteSpansOptionsSchema = z.object({ jobId: z.string(), }) +// ============================================================================ +// Archive Schemas +// ============================================================================ + +export const PruneArchiveOptionsSchema = z.object({ + olderThan: z.union([z.string(), z.date(), z.number()]), + batchSize: z.number().optional(), + maxBatches: z.number().optional(), +}) + +export const ArchiveStatsSchema = z.object({ + jobsCount: z.number(), + stepsCount: z.number(), + spansCount: z.number(), + oldestJobDate: z.date().nullable(), + totalSizeBytes: z.number().nullable(), + lastPrunedAt: z.date().nullable(), +}) + // ============================================================================ // Type Exports // ============================================================================ @@ -450,3 +469,5 @@ export type InsertSpanOptions = z.infer export type GetSpansOptions = z.infer export type GetSpansResult = z.infer export type DeleteSpansOptions = z.infer +export type PruneArchiveOptions = z.infer +export type ArchiveStats = z.infer From 08a6b4c042ed867f63a49d1df6cfbcceee825937 Mon Sep 17 00:00:00 2001 From: Martin Acosta Date: Sat, 18 Apr 2026 17:38:04 -0300 Subject: [PATCH 02/10] feat: rename tables to active/archive schema, all tests pass --- .../2026-04-18-active-archive-split-design.md | 813 ++++++++ ...-18-active-archive-split-implementation.md | 1037 ++++++++++ AGENTS.md | 117 ++ .../duron-postgres-storage-analysis.md | 264 +++ .../duron-postgres-storage-analysis.pdf | Bin 0 -> 114550 bytes .../migration.sql | 223 ++ packages/duron/src/adapters/adapter.ts | 2 + packages/duron/src/adapters/postgres/base.ts | 281 +-- .../src/adapters/postgres/base.ts.backup | 1801 +++++++++++++++++ 9 files changed, 4409 insertions(+), 129 deletions(-) create mode 100644 .opencode/plans/2026-04-18-active-archive-split-design.md create mode 100644 .opencode/plans/2026-04-18-active-archive-split-implementation.md create mode 100644 AGENTS.md create mode 100644 investigation/duron-postgres-storage-analysis.md create mode 100644 investigation/duron-postgres-storage-analysis.pdf create mode 100644 packages/duron/migrations/postgres/20260418120000_active_archive_split/migration.sql create mode 100644 packages/duron/src/adapters/postgres/base.ts.backup diff --git a/.opencode/plans/2026-04-18-active-archive-split-design.md b/.opencode/plans/2026-04-18-active-archive-split-design.md new file mode 100644 index 0000000..8123d39 --- /dev/null +++ b/.opencode/plans/2026-04-18-active-archive-split-design.md @@ -0,0 +1,813 @@ +# Active/Archive Split for Duron PostgreSQL Adapter + +## Date: 2026-04-18 + +## Status: Design Complete, Pending Implementation + +--- + +## 1. Problem Statement + +Duron's PostgreSQL adapter uses the standard **UPDATE + DELETE** pattern for job lifecycle management. Every job creates multiple dead tuples: + +- Job creation: INSERT (1 live tuple) +- Job activation: UPDATE status → active (1 dead tuple) +- Job completion/failure: UPDATE status + timestamps (1-2 dead tuples) +- With retries: additional UPDATEs for status, retries_count, history_failed_attempts + +Under sustained load (thousands of jobs/sec), this creates: + +1. **Table bloat** — Dead tuples accumulate faster than autovacuum can clean them +2. **Index bloat** — All ~15 indexes on the jobs table need maintenance on every UPDATE +3. **Performance decay** — Queries slow down as tables/indexes grow +4. **Autovacuum pressure** — Vacuum must scan entire table (including millions of completed jobs) just to reclaim a few dead tuples + +The problem is well-documented in the Postgres queue ecosystem: +- Brandur/Heroku (2015): 60k backlog in one hour +- PlanetScale (2026): Death spiral at 800 jobs/sec +- River issue #59: Autovacuum starvation + +### What Duron Does Right + +- Short transactions: No explicit transactions wrapping job handlers +- Correct SKIP LOCKED usage for worker contention +- Atomic single-query CTEs for operations + +### What Duron Does Not Have + +- No automatic retention — completed jobs accumulate indefinitely +- All jobs share the same table — hot path indexes scan through historical entries +- UPDATE-heavy patterns create constant pressure on autovacuum + +--- + +## 2. Proposed Solution: Active/Archive Split + +Split the schema into **active** (live work) and **archive** (terminated work) tables. The hot path operates exclusively on small, bounded active tables. Archive tables grow with historical volume but don't affect live operations. + +### 2.1 Core Principles + +1. **Hot path isolation** — Active tables contain only jobs in `created` or `active` status. Their size is proportional to in-flight work, not historical volume. +2. **Single move per job** — A job moves from active to archive exactly once, at termination (completed/failed/cancelled). No per-state-transition moves. +3. **Archive is INSERT-only** — Archive tables receive almost exclusively INSERTs. Their natural dead tuple generation is minimal. +4. **No critical scripts** — No background workers, no partition creation scripts, no extension dependencies. +5. **User-controlled retention** — Pruning is explicit and bounded. Users opt in via configuration. + +--- + +## 3. Schema Design + +### 3.1 New Tables + +#### `jobs_active` + +Same schema as current `jobs` table, but contains **only non-terminal jobs** (status IN `created`, `active`). + +**Indexes (all needed for hot-path queries):** +- `idx_jobs_active_action_name` +- `idx_jobs_active_status` +- `idx_jobs_active_group_key` +- `idx_jobs_active_started_at` +- `idx_jobs_active_expires_at` +- `idx_jobs_active_client_id` +- `idx_jobs_active_checksum` +- `idx_jobs_active_concurrency_limit` +- `idx_jobs_active_concurrency_step_limit` +- `idx_jobs_active_action_status` (composite) +- `idx_jobs_active_action_group` (composite) +- `idx_jobs_active_input_fts` (GIN full-text) +- `idx_jobs_active_output_fts` (GIN full-text) + +#### `jobs_archive` + +Same columns as `jobs_active`, but contains **only terminal jobs** (status IN `completed`, `failed`, `cancelled`). + +**Indexes (optimized for lookup and search, skip hot-path-only indexes):** +- `idx_jobs_archive_id` (primary key) +- `idx_jobs_archive_group_key` +- `idx_jobs_archive_action_name` +- `idx_jobs_archive_finished_at` +- `idx_jobs_archive_action_group` (composite) +- `idx_jobs_archive_input_fts` (GIN full-text) — **Kept for dashboard search** +- `idx_jobs_archive_output_fts` (GIN full-text) — **Kept for dashboard search** + +**Dropped indexes (not needed for archive queries):** +- `status` — all archive jobs are terminal +- `client_id` — not relevant for historical jobs +- `expires_at` — not relevant for terminated jobs +- `started_at` — less relevant than `finished_at` +- `concurrency_limit` / `concurrency_step_limit` — not relevant +- `description` — covered by FTS indexes +- `checksum` — not relevant for historical lookups + +**Design note:** No UNIQUE constraints that would prevent user-added partitioning. The archive schema is partition-friendly. + +#### `job_steps_active` + +Same schema as current `job_steps`. FK to `jobs_active.id` with `ON DELETE CASCADE`. + +**Indexes (hot-path):** +- `idx_job_steps_active_job_id` +- `idx_job_steps_active_status` +- `idx_job_steps_active_name` +- `idx_job_steps_active_expires_at` +- `idx_job_steps_active_parent_step_id` +- `idx_job_steps_active_job_status` (composite) +- `idx_job_steps_active_job_name` (composite) +- `unique_job_step_active_name_parent` (unique constraint) + +#### `job_steps_archive` + +Same columns as `job_steps_active` **plus** denormalized `job_finished_at` column (copied from parent job at archival time for easier time-based pruning). + +**No FK constraints** — enables future user partitioning. + +**Indexes (minimal):** +- `idx_job_steps_archive_id` (primary key) +- `idx_job_steps_archive_job_id` +- `idx_job_steps_archive_job_finished_at` +- `idx_job_steps_archive_name` + +#### `spans_active` + +Same schema as current `spans`. FKs to `jobs_active.id` and `job_steps_active.id` with `ON DELETE CASCADE`. + +**Indexes:** +- `idx_spans_active_trace_id` +- `idx_spans_active_span_id` +- `idx_spans_active_job_id` +- `idx_spans_active_step_id` +- `idx_spans_active_name` +- `idx_spans_active_job_step` (composite) +- `idx_spans_active_trace_parent` (composite) +- `idx_spans_active_attributes` (GIN) +- `idx_spans_active_events` (GIN) + +#### `spans_archive` + +Same columns as `spans_active`. + +**No FK constraints** — enables future user partitioning. + +**Indexes (minimal):** +- `idx_spans_archive_id` (primary key) +- `idx_spans_archive_trace_id` +- `idx_spans_archive_job_id` +- `idx_spans_archive_step_id` + +### 3.2 Lifecycle Flow + +``` +CREATE: INSERT INTO jobs_active + INSERT INTO job_steps_active + INSERT INTO spans_active (if telemetry enabled) + +ACTIVATE: UPDATE jobs_active SET status = 'active', ... + +COMPLETE/FAIL/CANCEL: BEGIN TRANSACTION + 1. DELETE FROM jobs_active WHERE id = $1 RETURNING * + 2. DELETE FROM job_steps_active WHERE job_id = $1 RETURNING * + 3. DELETE FROM spans_active WHERE job_id = $1 RETURNING * + 4. INSERT INTO jobs_archive SELECT * FROM step_1 + 5. INSERT INTO job_steps_archive + SELECT *, $finished_at AS job_finished_at FROM step_2 + 6. INSERT INTO spans_archive SELECT * FROM step_3 + COMMIT + +RETRY: INSERT INTO jobs_active (copy of failed job) + INSERT INTO job_steps_active (copy of failed steps) +``` + +### 3.3 Why Not Partition the Archive? + +Time-range partitioning requires a script that creates future partitions ahead of time. Postgres does NOT auto-create partitions. This violates our "no critical scripts" principle. + +Mitigations evaluated: +- **DEFAULT partition**: Catches stray INSERTs but accumulates and loses partitioning benefit +- **Create many partitions in advance**: Still requires a script +- **pg_partman**: Requires extension installation, not available on all managed providers +- **Hash partitioning**: Creates partitions once, but loses ability to drop old partitions by time + +**Decision:** Go with active/archive split **WITHOUT** partitioning the archive. The archive receives almost exclusively INSERTs. Its natural bloat is minimal. Retention is a periodic admin operation, not a hot-path concern. Users at extreme scale can add partitioning on top without Duron changes. + +--- + +## 4. Adapter Changes + +### 4.1 Modified Methods + +#### `_createJob` +- **Change:** INSERT into `jobs_active` instead of `jobs` +- **Logic:** Unchanged except for table name + +#### `_completeJob` / `_failJob` / `_cancelJob` +- **Change:** MOVE from active to archive instead of UPDATE +- **Logic:** + 1. DELETE from `jobs_active` WHERE id = $1 AND status = 'active' RETURNING * + 2. DELETE from `job_steps_active` WHERE job_id = $1 RETURNING * + 3. DELETE from `spans_active` WHERE job_id = $1 RETURNING * + 4. INSERT into `jobs_archive` SELECT * FROM step_1 + 5. INSERT into `job_steps_archive` SELECT *, $finished_at FROM step_2 + 6. INSERT into `spans_archive` SELECT * FROM step_3 +- **Transaction:** All steps in single atomic transaction +- **Failure handling:** If DELETE from active fails (job not found or not active), entire transaction rolls back + +#### `_fetch` +- **Change:** Query `jobs_active` only +- **Logic:** Unchanged except for table name +- **Benefit:** No scanning through historical jobs + +#### `_recoverJobs` +- **Change:** Query `jobs_active` only +- **Logic:** Already only touches active jobs, just table name change + +#### `_retryJob` +- **Change:** Query `jobs_archive` (for source job) and INSERT into `jobs_active` +- **Logic:** Failed jobs are archived immediately, so retry reads from archive and copies back to active + +#### `_deleteJob` / `_deleteJobs` +- **Change:** Delete from appropriate table based on status filter +- **Logic:** + - If status filter includes only active statuses → delete from `jobs_active` + - If status filter includes only archive statuses → delete from `jobs_archive` + - If mixed or no filter → delete from both (two queries) + +#### `_getJobById` +- **Change:** Query `jobs_active` first, then `jobs_archive` on miss +- **Optimization:** Active table is tiny, miss is fast + +#### `_getJobs` +- **Change:** Route based on status filter +- **Logic:** + - Status = `created` or `active` only → query `jobs_active` + - Status = `completed`, `failed`, `cancelled` only → query `jobs_archive` + - Mixed or no status filter → `UNION ALL` between both tables + - Time-range filters on `finished_at` should bias to `jobs_archive` + +**All existing filters are applied to both tables in UNION queries:** +- `status`, `actionName`, `groupKey` +- `clientId`, `description` +- `createdAt`, `startedAt`, `finishedAt`, `updatedAfter` +- `inputFilter`, `outputFilter` +- Full-text search via GIN indexes + +#### `_getJobSteps` +- **Change:** Route based on job location +- **Logic:** If job is in `jobs_active`, query `job_steps_active`. If in `jobs_archive`, query `job_steps_archive`. + +#### `_getJobStepById` +- **Change:** Query `job_steps_active` first, then `job_steps_archive` +- **Logic:** Same pattern as `_getJobById` + +#### `_getActions` +- **Change:** Query both tables +- **Logic:** `UNION ALL` between `jobs_active` and `jobs_archive`, group by action_name + +#### `_insertSpans` / `_getSpans` / `_deleteSpans` +- **Change:** Route based on job location +- **Logic:** If job/step is active → `spans_active`. If archived → `spans_archive`. +- **Simplification:** For `_getSpans`, if jobId/stepId not provided, query both tables with `UNION ALL`. + +### 4.2 New Methods + +#### `pruneArchive(options)` + +**Signature:** +```typescript +interface PruneArchiveOptions { + olderThan: string | Date | number // '30d', Date object, or milliseconds + batchSize?: number // Default: 10000 + maxBatches?: number // Default: 100 (safety limit) +} + +async pruneArchive(options: PruneArchiveOptions): Promise +``` + +**Behavior:** +1. Calculate threshold date from `olderThan` +2. Loop: + a. DELETE FROM `jobs_archive` WHERE finished_at < $threshold LIMIT $batchSize RETURNING id + b. DELETE FROM `job_steps_archive` WHERE job_id IN (returned ids) + c. DELETE FROM `spans_archive` WHERE job_id IN (returned ids) + d. Count deleted jobs + e. Repeat until no more rows or maxBatches reached +3. Return total count of deleted jobs + +**Transaction:** Each batch is a separate transaction (to avoid long-running transactions). + +#### `truncateArchive()` + +**Signature:** +```typescript +async truncateArchive(): Promise +``` + +**Behavior:** +1. TRUNCATE `jobs_archive` +2. TRUNCATE `job_steps_archive` +3. TRUNCATE `spans_archive` + +**Safety:** No confirmation required (programmatic API assumes caller knows what they're doing). Dashboard UI will show confirmation dialog. + +#### `getArchiveStats()` + +**Signature:** +```typescript +interface ArchiveStats { + jobsCount: number + stepsCount: number + spansCount: number + oldestJobDate: Date | null + totalSizeBytes: number | null // May not be available on all adapters + lastPrunedAt: Date | null +} + +async getArchiveStats(): Promise +``` + +### 4.3 Scheduler Configuration + +**Adapter options:** +```typescript +interface PostgresAdapterOptions { + // ... existing options ... + + pruneArchive?: { + intervalMs: number // How often to run prune (e.g., 3600000 = 1 hour) + olderThan: string | Date | number // Delete jobs older than this + batchSize?: number // Default: 10000 + maxBatches?: number // Default: 100 + } +} +``` + +**Example:** +```typescript +const adapter = new PostgresAdapter({ + connectionString: 'postgres://...', + pruneArchive: { + intervalMs: 3600000, // Every hour + olderThan: '30d', // Delete jobs older than 30 days + batchSize: 10000, + maxBatches: 100, + } +}) +``` + +### 4.4 Multi-Process Safety + +**Problem:** Multiple Duron processes running the scheduler. We don't want all of them pruning simultaneously. + +**Solution:** + +**PostgreSQL:** Advisory locks (`pg_advisory_lock`) +- Before pruning, try to acquire advisory lock on a well-known ID (e.g., hash of 'duron-prune-archive') +- If lock acquired, run prune. If not, skip this cycle. +- Lock is automatically released when session ends (even if process crashes) +- Zero dead tuple pressure + +**PGLite:** No multi-process safety needed +- PGLite is embedded/single-process by design +- Multiple PGLite instances don't share the same database file concurrently + +**Existing recovery mechanism:** Unchanged (ping/pong via NOTIFY/LISTEN) + +### 4.5 Query Examples + +**Move job to archive:** +```sql +BEGIN; + +WITH moved_job AS ( + DELETE FROM duron.jobs_active + WHERE id = $1 + RETURNING * +), +moved_steps AS ( + DELETE FROM duron.job_steps_active + WHERE job_id = $1 + RETURNING * +), +moved_spans AS ( + DELETE FROM duron.spans_active + WHERE job_id = $1 + RETURNING * +), +inserted_job AS ( + INSERT INTO duron.jobs_archive + SELECT * FROM moved_job + RETURNING finished_at +) +INSERT INTO duron.job_steps_archive +SELECT ms.*, ij.finished_at AS job_finished_at +FROM moved_steps ms, inserted_job ij; + +INSERT INTO duron.spans_archive +SELECT * FROM moved_spans; + +COMMIT; +``` + +**Prune archive batch:** +```sql +WITH deleted_jobs AS ( + DELETE FROM duron.jobs_archive + WHERE finished_at < $1 + LIMIT $2 + RETURNING id +), +deleted_steps AS ( + DELETE FROM duron.job_steps_archive + WHERE job_id IN (SELECT id FROM deleted_jobs) +), +deleted_spans AS ( + DELETE FROM duron.spans_archive + WHERE job_id IN (SELECT id FROM deleted_jobs) +) +SELECT COUNT(*) FROM deleted_jobs; +``` + +--- + +## 5. REST API Design + +### 5.1 Endpoints + +| Endpoint | Method | Auth | Description | +|----------|--------|------|-------------| +| `/api/archive/prune` | POST | Admin | Trigger manual prune | +| `/api/archive/truncate` | POST | Admin | Truncate entire archive | +| `/api/archive/stats` | GET | Read | Get archive statistics | +| `/api/archive/status` | GET | Read | Read-only: current auto-prune config and next scheduled run | + +### 5.2 Request/Response Examples + +**POST /api/archive/prune** +```json +// Request (optional — uses startup config if omitted) +{ + "olderThan": "7d", + "batchSize": 5000 +} + +// Response +{ + "deletedJobs": 15432, + "deletedSteps": 42389, + "deletedSpans": 89123, + "batchesRun": 2, + "durationMs": 1245 +} +``` + +**POST /api/archive/truncate** +```json +// Request +{ + "confirm": true // Required to prevent accidental calls +} + +// Response +{ + "success": true, + "deletedJobs": 154320, + "deletedSteps": 423891, + "deletedSpans": 891234 +} +``` + +**GET /api/archive/stats** +```json +{ + "jobsCount": 154320, + "stepsCount": 423891, + "spansCount": 891234, + "oldestJobDate": "2026-01-15T10:30:00Z", + "totalSizeBytes": 104857600, + "lastPrunedAt": "2026-04-18T02:00:00Z" +} +``` + +**GET /api/archive/status** +```json +{ + "autoPruneEnabled": true, + "config": { + "intervalMs": 3600000, + "olderThan": "30d", + "batchSize": 10000, + "maxBatches": 100 + }, + "nextRunAt": "2026-04-18T03:00:00Z", + "lastRunAt": "2026-04-18T02:00:00Z", + "lastRunResult": { + "deletedJobs": 5421, + "batchesRun": 1 + } +} +``` + +--- + +## 6. Dashboard UI Design + +### 6.1 Job List View + +**Default view: "Live Jobs"** +- Queries `jobs_active` only +- Fast, no UNION needed +- Shows jobs with status `created` or `active` + +**Archive Tab** +- Queries `jobs_archive` directly +- Shows jobs with status `completed`, `failed`, or `cancelled` +- All filters applied to archive table + +**"All Jobs" Toggle** +- Uses optimized UNION query +- Applies filters to both tables +- Clearly labeled as potentially slower +- Pagination uses optimized CTE approach: + ```sql + WITH active_filtered AS ( + SELECT * FROM jobs_active + WHERE [filters applied] + ORDER BY created_at DESC + LIMIT [page_size + offset] + ), + archive_filtered AS ( + SELECT * FROM jobs_archive + WHERE [filters applied] + ORDER BY created_at DESC + LIMIT [page_size + offset] + ) + SELECT * FROM active_filtered + UNION ALL + SELECT * FROM archive_filtered + ORDER BY created_at DESC + LIMIT [page_size] OFFSET [offset] + ``` + +**All existing filters work across both tables:** +- `status` — routes to appropriate table when possible +- `actionName`, `groupKey`, `clientId` +- `description` — full-text search via GIN indexes on both tables +- `createdAt`, `startedAt`, `finishedAt`, `updatedAfter` +- `inputFilter`, `outputFilter` + +### 6.2 Archive Management Page (`/archive`) + +**Components:** +- **Statistics Cards** — Jobs count, steps count, spans count, storage size, oldest record +- **Manual Prune Button** — Opens confirmation dialog, triggers prune API +- **Truncate Button** — Opens strong confirmation dialog (type "DELETE ALL" to confirm), triggers truncate API +- **Configuration Display** — Read-only display of current auto-prune configuration (from startup options) +- **Recent Activity Log** — Table showing recent prune operations (timestamp, jobs deleted, duration) +- **Storage Chart** — Line chart showing archive size over time (if metrics available) + +### 6.3 Integration Points + +- Add "Archive" link to main navigation +- Archive stats shown on dashboard home (optional) +- Settings page shows read-only prune configuration +- Job list has clear "Live" / "Archive" / "All" tabs + +--- + +## 7. Migration Strategy + +### 7.1 Breaking Change + +This is a **breaking change** for v1.0. No backward compatibility. + +**Rationale:** Duron is not v1 ready. Users must run migration on upgrade. + +### 7.2 Migration Steps + +1. **Create new tables:** + - `jobs_active`, `jobs_archive` + - `job_steps_active`, `job_steps_archive` + - `spans_active`, `spans_archive` + +2. **Migrate existing data:** + - Jobs with status IN (`created`, `active`) → `jobs_active` + - Jobs with status IN (`completed`, `failed`, `cancelled`) → `jobs_archive` + - Steps follow their parent job + - Spans follow their parent job + +3. **Create indexes** on new tables + +4. **Drop old tables:** `jobs`, `job_steps`, `spans` + +5. **Update application code:** + - `schema.ts` — Define new tables + - `schema.default.ts` — Export new tables + - `base.ts` — Update all adapter methods + - `server.ts` — Add archive endpoints + - Dashboard — Add archive page + +### 7.3 Rollback + +No automatic rollback. Users should backup database before migration. + +--- + +## 8. Testing Strategy + +### 8.1 Unit Tests (Adapter) + +- Create job → verify in `jobs_active` +- Complete job → verify moved to `jobs_archive` +- Fail job → verify moved to `jobs_archive` +- Cancel job → verify moved to `jobs_archive` +- Retry job → verify copied from archive to active +- Fetch jobs → verify only queries `jobs_active` +- Get job by ID → verify queries both tables +- Get jobs with status filter → verify routing +- Get jobs with mixed status → verify UNION query +- Get jobs with all filters → verify filters applied to both tables in UNION +- Prune archive → verify deletion with batching +- Truncate archive → verify all data removed +- Multi-process safety → verify advisory locks work + +### 8.2 Integration Tests + +- End-to-end job lifecycle (create → activate → complete → verify archive) +- Multi-worker scenario (concurrent job processing) +- Recovery scenario (process crash, job recovery) +- Archive pruning under load +- Dashboard API integration +- Full-text search on archive + +### 8.3 Performance Tests + +- Benchmark: Hot path latency (fetch + activate + complete) with 0, 1M, 10M archived jobs +- Verify active table size stays bounded regardless of archive size +- Benchmark: Prune operation performance (various batch sizes) +- Benchmark: UNION query performance with filters (various archive sizes) + +--- + +## 9. What We're NOT Building + +To keep scope focused, these are explicitly out of scope: + +1. **No automatic partition creation** — Users can add partitioning on top if needed +2. **No pg_partman dependency** — Works on vanilla Postgres +3. **No internal cron/background worker** — Scheduler is opt-in adapter option only +4. **No retention on active tables** — Archive only. Active jobs stay until completed/failed/cancelled +5. **No heartbeat table or lease table** — Multi-process safety via advisory locks (Postgres) or not needed (PGLite) +6. **No dynamic configuration** — All config set at startup, no runtime changes +7. **No archive compression** — Future enhancement if needed +8. **No cross-table foreign keys** — Archive tables have no FKs (by design, for partitioning flexibility) + +--- + +## 10. Tradeoffs + +### 10.1 Accepted Tradeoffs + +1. **Code complexity** — Adapter increases by ~30-40% LOC. Query routing adds complexity. +2. **Query overhead** — `getJob(id)` does up to 2 lookups (active first, then archive). Mitigated: active table is tiny, miss is fast. +3. **UNION ALL** — Queries spanning live and historical jobs need `UNION ALL`. Only affects dashboard/historical queries, not hot path. +4. **Migration burden** — Existing users must run one-off migration script. +5. **No FKs on archive** — Referential integrity not enforced between archive tables. Acceptable: archive is read-mostly, data is copied in single transaction. + +### 10.2 Benefits + +1. **Hot path isolation** — Active table size proportional to in-flight work, NOT historical volume. Always small. +2. **Fast autovacuum** — Vacuum on `jobs_active` completes in microseconds. +3. **Small indexes** — Hot-path indexes remain small and cacheable in memory. +4. **Archive doesn't affect live ops** — Archive grows linearly with throughput but doesn't affect job processing. +5. **User-controlled retention** — Explicit, bounded, admin operation. User controls when and how much to prune. +6. **No operational overhead** — No critical scripts, no dependencies, no background processes. +7. **Extensible** — Users at extreme scale can add partitioning on top without Duron changes. +8. **Significant improvement** — Major performance improvement over current design at scale, minimal complexity cost at small scale. + +--- + +## 11. Relation to Dead Letter Queue + +The active/archive split is a **storage/performance** concern. A Dead Letter Queue (DLQ) is a **semantic/operational** concern (what happens to messages that fail terminally, so a human can inspect them). + +They are **orthogonal**. Duron's current `status = 'failed'` effectively serves as a logical DLQ — failed jobs remain visible and queryable. This can coexist with active/archive: jobs are split by "alive vs terminated", and within the archive, status still distinguishes success from failure. + +--- + +## 12. Comparison with Alternatives + +### 12.1 Table-per-state (Rejected) + +Split into `jobs_created`, `jobs_active`, `jobs_completed`, `jobs_failed`. + +**Pros:** Hot tables stay small. +**Cons:** +- Job with 3 retries moves between tables 7+ times +- Foreign keys become impossible or ugly +- Multi-table transactions needed for every state change +- Large code complexity increase +- **Verdict:** Elegant in concept, too expensive in practice. + +### 12.2 Time-range Partitioning (Rejected as primary solution) + +Partition `jobs` by `created_at` (e.g., daily). + +**Pros:** +- Retention by DROP TABLE (no dead tuples, instant) +- Partition pruning on time-range queries +- Code almost unchanged + +**Cons:** +- Current day's partition still contains mixed live/completed jobs +- Still generates UPDATE pressure on the active partition +- Hot partition is still hot +- Requires partition creation scripts (violates "no critical scripts" principle) + +**Verdict:** Good for archive retention, but doesn't solve the hot-path bloat problem. + +### 12.3 PgQue-style TRUNCATE Rotation (Considered) + +Use snapshot-based batching with TRUNCATE table rotation (like pgque/PgQ). + +**Pros:** +- Zero dead tuples by design +- No UPDATE pressure at all +- Battle-tested at Skype scale + +**Cons:** +- Fundamentally different architecture (event queue vs job queue) +- Would require redesigning Duron's entire job lifecycle model +- PgQue is an event queue with fan-out; Duron is a job queue with steps and retries +- Much larger architectural change + +**Verdict:** Wrong tool for the problem. PgQue solves event queue bloat; Duron needs job queue bloat solution. Active/archive split is the right granularity for Duron's use case. + +--- + +## 13. Implementation Order + +1. **Phase 1: Schema & Migration** + - Update `schema.ts` with new table definitions + - Create Drizzle migration + - Update `schema.default.ts` + +2. **Phase 2: Adapter Core** + - Modify `_createJob`, `_completeJob`, `_failJob`, `_cancelJob` + - Implement move-to-archive logic + - Update `_fetch`, `_recoverJobs` + - Update query methods (`_getJobById`, `_getJobs`, `_getJobSteps`, etc.) + +3. **Phase 3: Archive API** + - Implement `pruneArchive()`, `truncateArchive()`, `getArchiveStats()` + - Add scheduler with multi-process safety + +4. **Phase 4: REST API** + - Add archive endpoints to `server.ts` + - Add authentication/authorization + +5. **Phase 5: Dashboard** + - Create archive management page + - Update job list with Live/Archive/All tabs + - Add navigation and components + +6. **Phase 6: Testing** + - Update existing tests + - Add archive-specific tests + - Performance benchmarks + +7. **Phase 7: Documentation** + - Update README + - Add migration guide + - Add "Managing the archive" section + +--- + +## 14. Risks and Mitigations + +| Risk | Likelihood | Impact | Mitigation | +|------|-----------|--------|------------| +| Migration data loss | Low | Critical | Require backup before migration. Test migration thoroughly. | +| Archive move fails mid-transaction | Low | High | Single atomic transaction for move. Rollback on failure. | +| Prune deletes wrong data | Low | Critical | Configurable `olderThan` with sensible default. Batch deletes with LIMIT. | +| Multi-process prune collision | Medium | Low | Advisory locks prevent concurrent pruning. | +| Active table still grows | Medium | Medium | Monitor active table size. If jobs stay active too long, investigate stuck jobs. | +| Query routing bugs | Medium | Medium | Comprehensive tests for all query methods. | +| Dashboard UNION query slow | Low | Low | Optimized CTE approach. "Live Jobs" is default view. | +| Full-text search on archive slow | Low | Low | GIN indexes kept on archive tables. | + +--- + +## 15. Success Criteria + +1. **Performance:** Hot path latency (fetch → activate → complete) does not degrade as archive grows from 0 to 10M jobs +2. **Correctness:** All existing tests pass with new schema +3. **Archive functionality:** `pruneArchive()` correctly deletes old jobs in batches +4. **Multi-process safety:** Only one process prunes at a time +5. **Dashboard:** Archive management page shows stats and allows manual prune/truncate +6. **Job list:** Live/Archive/All tabs work with all existing filters +7. **Full-text search:** Search works on both active and archive jobs +8. **Migration:** One-off migration script successfully migrates existing data + +--- + +*End of Design Document* diff --git a/.opencode/plans/2026-04-18-active-archive-split-implementation.md b/.opencode/plans/2026-04-18-active-archive-split-implementation.md new file mode 100644 index 0000000..2bdaa7f --- /dev/null +++ b/.opencode/plans/2026-04-18-active-archive-split-implementation.md @@ -0,0 +1,1037 @@ +# Active/Archive Split Implementation Plan + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Goal:** Split Duron's PostgreSQL adapter into active/archive tables to eliminate hot-path bloat, add archive management APIs, and update dashboard. + +**Architecture:** Active tables (`jobs_active`, `job_steps_active`, `spans_active`) contain live work only. Archive tables contain terminated work. Jobs move to archive once on completion/failure. Pruning is user-controlled with multi-process safety via advisory locks. + +**Tech Stack:** TypeScript, Drizzle ORM, PostgreSQL, Bun, React (dashboard) + +--- + +## File Map + +### Core Adapter (Modified) +- `packages/duron/src/adapters/postgres/schema.ts` — New table definitions (jobs_active, jobs_archive, etc.) +- `packages/duron/src/adapters/postgres/schema.default.ts` — Export new tables +- `packages/duron/src/adapters/postgres/base.ts` — Core adapter logic (~1800 lines, will grow) +- `packages/duron/src/adapters/adapter.ts` — Abstract class, add archive methods +- `packages/duron/src/adapters/schemas.ts` — Add archive option schemas + +### REST API (Modified) +- `packages/duron/src/server.ts` — Add archive endpoints + +### Dashboard (Modified) +- `packages/duron-dashboard/src/` — New archive page, job list tabs + +### Migrations (New) +- `packages/duron/migrations/postgres/20260418120000_active_archive_split/` — Drizzle migration + +### Tests (New/Modified) +- `packages/duron/test/archive.test.ts` — Archive-specific tests +- `packages/duron/test/adapter.test.ts` — Update existing tests + +--- + +## Task 1: Schema Definition + +**Files:** +- Modify: `packages/duron/src/adapters/postgres/schema.ts` +- Modify: `packages/duron/src/adapters/postgres/schema.default.ts` + +**Context:** Current `schema.ts` defines `jobsTable`, `jobStepsTable`, `spansTable`. We need to split each into active/archive pairs. + +**Changes:** +- `jobsTable` → `jobsActiveTable` + `jobsArchiveTable` +- `jobStepsTable` → `jobStepsActiveTable` + `jobStepsArchiveTable` +- `spansTable` → `spansActiveTable` + `spansArchiveTable` +- `jobsArchiveTable` drops hot-path-only indexes, keeps lookup + FTS indexes +- `jobStepsArchiveTable` adds `job_finished_at` column +- `spansArchiveTable` has no FKs +- Return all 6 tables from `createSchema()` + +- [ ] **Step 1: Read current schema.ts** + +Read file to understand current structure and ensure correct Drizzle API usage. + +- [ ] **Step 2: Write new schema definitions** + +```typescript +// In createSchema() function, replace existing tables with: + +const jobsActiveTable = schema.table('jobs_active', { ...same columns... }, (table) => [ + // All hot-path indexes +]) + +const jobsArchiveTable = schema.table('jobs_archive', { ...same columns... }, (table) => [ + // Lookup indexes + FTS only +]) + +const jobStepsActiveTable = schema.table('job_steps_active', { ...same columns + job_finished_at... }, (table) => [ + // Hot-path indexes + FK to jobsActiveTable +]) + +const jobStepsArchiveTable = schema.table('job_steps_archive', { ...same columns... }, (table) => [ + // Minimal indexes, NO FK +]) + +const spansActiveTable = schema.table('spans_active', { ...same columns... }, (table) => [ + // All indexes + FKs to active tables +]) + +const spansArchiveTable = schema.table('spans_archive', { ...same columns... }, (table) => [ + // Minimal indexes, NO FKs +]) + +return { + schema, + jobsActiveTable, + jobsArchiveTable, + jobStepsActiveTable, + jobStepsArchiveTable, + spansActiveTable, + spansArchiveTable, +} +``` + +- [ ] **Step 3: Update schema.default.ts** + +```typescript +const { + schema, + jobsActiveTable, + jobsArchiveTable, + jobStepsActiveTable, + jobStepsArchiveTable, + spansActiveTable, + spansArchiveTable, +} = createSchema('duron') + +export { + schema, + jobsActiveTable, + jobsArchiveTable, + jobStepsActiveTable, + jobStepsArchiveTable, + spansActiveTable, + spansArchiveTable, +} +``` + +- [ ] **Step 4: Verify typecheck** + +Run: `cd packages/duron && bun run typecheck` +Expected: PASS (schema types compile) + +- [ ] **Step 5: Commit** + +```bash +git add packages/duron/src/adapters/postgres/schema.ts packages/duron/src/adapters/postgres/schema.default.ts +git commit -m "feat: add active/archive table schema definitions" +``` + +--- + +## Task 2: Adapter Schemas + +**Files:** +- Modify: `packages/duron/src/adapters/schemas.ts` + +**Context:** Need Zod schemas for new archive APIs. + +- [ ] **Step 1: Add archive option schemas** + +Add to `schemas.ts`: +```typescript +export const PruneArchiveOptionsSchema = z.object({ + olderThan: z.union([z.string(), z.date(), z.number()]), + batchSize: z.number().optional(), + maxBatches: z.number().optional(), +}) + +export type PruneArchiveOptions = z.infer + +export const ArchiveStatsSchema = z.object({ + jobsCount: z.number(), + stepsCount: z.number(), + spansCount: z.number(), + oldestJobDate: z.date().nullable(), + totalSizeBytes: z.number().nullable(), + lastPrunedAt: z.date().nullable(), +}) + +export type ArchiveStats = z.infer +``` + +- [ ] **Step 2: Commit** + +```bash +git add packages/duron/src/adapters/schemas.ts +git commit -m "feat: add archive option schemas" +``` + +--- + +## Task 3: Abstract Adapter Methods + +**Files:** +- Modify: `packages/duron/src/adapters/adapter.ts` + +**Context:** Add abstract methods for archive operations to the base class. + +- [ ] **Step 1: Add archive abstract methods** + +In `Adapter` class, add after existing abstract methods: + +```typescript +// ============================================================================ +// Archive Methods +// ============================================================================ + +async pruneArchive(options: PruneArchiveOptions): Promise { + try { + await this.start() + const parsedOptions = PruneArchiveOptionsSchema.parse(options) + const result = await this._pruneArchive(parsedOptions) + return NumberResultSchema.parse(result) + } catch (error) { + this.logger?.error(error, 'Error in Adapter.pruneArchive()') + throw error + } +} + +async truncateArchive(): Promise { + try { + await this.start() + await this._truncateArchive() + } catch (error) { + this.logger?.error(error, 'Error in Adapter.truncateArchive()') + throw error + } +} + +async getArchiveStats(): Promise { + try { + await this.start() + const result = await this._getArchiveStats() + return ArchiveStatsSchema.parse(result) + } catch (error) { + this.logger?.error(error, 'Error in Adapter.getArchiveStats()') + throw error + } +} + +protected abstract _pruneArchive(options: PruneArchiveOptions): Promise +protected abstract _truncateArchive(): Promise +protected abstract _getArchiveStats(): Promise +``` + +- [ ] **Step 2: Update imports** + +Add to imports from `./schemas.js`: +```typescript +PruneArchiveOptionsSchema, +ArchiveStatsSchema, +``` + +Add to re-export types: +```typescript +PruneArchiveOptions, +ArchiveStats, +``` + +- [ ] **Step 3: Commit** + +```bash +git add packages/duron/src/adapters/adapter.ts +git commit -m "feat: add archive abstract methods to adapter base class" +``` + +--- + +## Task 4: Core Adapter - Create Job + +**Files:** +- Modify: `packages/duron/src/adapters/postgres/base.ts` + +**Context:** `_createJob` currently inserts into `this.tables.jobsTable`. Change to `jobsActiveTable`. + +- [ ] **Step 1: Update _createJob** + +```typescript +protected async _createJob({ queue, groupKey, input, timeoutMs, checksum, concurrencyLimit, concurrencyStepLimit, description }: CreateJobOptions) { + const [result] = await this.db + .insert(this.tables.jobsActiveTable) + .values({ + action_name: queue, + group_key: groupKey, + description: description ?? null, + checksum, + input, + status: JOB_STATUS_CREATED, + timeout_ms: timeoutMs, + concurrency_limit: concurrencyLimit, + concurrency_step_limit: concurrencyStepLimit, + }) + .returning({ id: this.tables.jobsActiveTable.id }) + + if (!result) { + return null + } + + return result.id +} +``` + +- [ ] **Step 2: Commit** + +```bash +git add packages/duron/src/adapters/postgres/base.ts +git commit -m "feat: update createJob to insert into jobs_active" +``` + +--- + +## Task 5: Core Adapter - Complete/Fail/Cancel Job (Move to Archive) + +**Files:** +- Modify: `packages/duron/src/adapters/postgres/base.ts` + +**Context:** Replace UPDATE with MOVE (DELETE from active + INSERT into archive). + +- [ ] **Step 1: Write _completeJob with archive move** + +```typescript +protected async _completeJob({ jobId, output }: CompleteJobOptions) { + const result = await this.db.execute(sql` + WITH moved_job AS ( + DELETE FROM ${this.tables.jobsActiveTable} + WHERE id = ${jobId} + AND status = ${JOB_STATUS_ACTIVE} + AND client_id = ${this.id} + AND expires_at > now() + RETURNING * + ), + moved_steps AS ( + DELETE FROM ${this.tables.jobStepsActiveTable} + WHERE job_id = ${jobId} + RETURNING * + ), + moved_spans AS ( + DELETE FROM ${this.tables.spansActiveTable} + WHERE job_id = ${jobId} + RETURNING * + ), + inserted_job AS ( + INSERT INTO ${this.tables.jobsArchiveTable} + SELECT * FROM moved_job + RETURNING finished_at + ) + INSERT INTO ${this.tables.jobStepsArchiveTable} + SELECT ms.*, ij.finished_at AS job_finished_at + FROM moved_steps ms, inserted_job ij; + + INSERT INTO ${this.tables.spansArchiveTable} + SELECT * FROM moved_spans; + + SELECT id FROM inserted_job + `) + + return result.length > 0 +} +``` + +- [ ] **Step 2: Write _failJob with archive move** + +Similar to _completeJob but with error and status = failed. + +```typescript +protected async _failJob({ jobId, output, error }: FailJobOptions) { + // Same CTE pattern as _completeJob + // status will be 'failed' in the deleted row +} +``` + +- [ ] **Step 3: Write _cancelJob with archive move** + +Similar to above but with status = cancelled. + +- [ ] **Step 4: Run tests** + +Run: `cd packages/duron && bun test adapter.test.ts` +Expected: FAIL (tests still expect old table names) + +- [ ] **Step 5: Commit** + +```bash +git add packages/duron/src/adapters/postgres/base.ts +git commit -m "feat: implement archive move on job completion/failure/cancel" +``` + +--- + +## Task 6: Core Adapter - Fetch and Recovery + +**Files:** +- Modify: `packages/duron/src/adapters/postgres/base.ts` + +**Context:** `_fetch` and `_recoverJobs` only query active tables now. + +- [ ] **Step 1: Update _fetch to query jobs_active** + +Replace all `this.tables.jobsTable` references in the fetch CTE with `this.tables.jobsActiveTable`. + +- [ ] **Step 2: Update _recoverJobs to query jobs_active** + +Replace `this.tables.jobsTable` with `this.tables.jobsActiveTable` in the recovery query. + +- [ ] **Step 3: Commit** + +```bash +git add packages/duron/src/adapters/postgres/base.ts +git commit -m "feat: update fetch and recovery to query active tables only" +``` + +--- + +## Task 7: Core Adapter - Retry Job + +**Files:** +- Modify: `packages/duron/src/adapters/postgres/base.ts` + +**Context:** Retry must read from archive (failed jobs are archived immediately). + +- [ ] **Step 1: Update _retryJob to read from archive** + +```typescript +protected async _retryJob({ jobId }: RetryJobOptions) { + // CTE that: + // 1. Locks source job in jobsArchiveTable (not jobsActiveTable) + // 2. Checks for existing retry in jobsActiveTable + // 3. Inserts retry into jobsActiveTable + // Returns new job ID +} +``` + +- [ ] **Step 2: Commit** + +```bash +git add packages/duron/src/adapters/postgres/base.ts +git commit -m "feat: update retry to read from archive tables" +``` + +--- + +## Task 8: Core Adapter - Query Methods + +**Files:** +- Modify: `packages/duron/src/adapters/postgres/base.ts` + +**Context:** Query methods need to route to correct table(s). + +- [ ] **Step 1: Update _getJobById** + +```typescript +protected async _getJobById(jobId: string): Promise { + // Try jobs_active first + const active = await this.db.query.jobsActiveTable.findFirst({ + where: eq(this.tables.jobsActiveTable.id, jobId) + }) + if (active) return active + + // Then jobs_archive + const archive = await this.db.query.jobsArchiveTable.findFirst({ + where: eq(this.tables.jobsArchiveTable.id, jobId) + }) + return archive ?? null +} +``` + +- [ ] **Step 2: Update _getJobs with table routing** + +Add table routing logic before query: +```typescript +protected async _getJobs(options?: GetJobsOptions): Promise { + const filters = options?.filters ?? {} + const statusFilter = filters.status + + // Determine which table(s) to query + const activeStatuses = [JOB_STATUS_CREATED, JOB_STATUS_ACTIVE] + const archiveStatuses = [JOB_STATUS_COMPLETED, JOB_STATUS_FAILED, JOB_STATUS_CANCELLED] + + const statuses = Array.isArray(statusFilter) ? statusFilter : statusFilter ? [statusFilter] : [] + + const queryActive = statuses.length === 0 || statuses.some(s => activeStatuses.includes(s)) + const queryArchive = statuses.length === 0 || statuses.some(s => archiveStatuses.includes(s)) + + // Build and execute query based on routing + // ... implementation +} +``` + +- [ ] **Step 3: Update _getJobSteps, _getJobStepById** + +Route to active/archive based on job location. + +- [ ] **Step 4: Commit** + +```bash +git add packages/duron/src/adapters/postgres/base.ts +git commit -m "feat: implement query routing for active/archive tables" +``` + +--- + +## Task 9: Core Adapter - Archive API + +**Files:** +- Modify: `packages/duron/src/adapters/postgres/base.ts` + +**Context:** Implement prune, truncate, and stats. + +- [ ] **Step 1: Implement _pruneArchive** + +```typescript +protected async _pruneArchive(options: PruneArchiveOptions): Promise { + const threshold = this._parseOlderThan(options.olderThan) + const batchSize = options.batchSize ?? 10000 + const maxBatches = options.maxBatches ?? 100 + + let totalDeleted = 0 + + for (let batch = 0; batch < maxBatches; batch++) { + const result = await this.db.execute<{ count: number }>(sql` + WITH deleted_jobs AS ( + DELETE FROM ${this.tables.jobsArchiveTable} + WHERE finished_at < ${threshold} + LIMIT ${batchSize} + RETURNING id + ), + deleted_steps AS ( + DELETE FROM ${this.tables.jobStepsArchiveTable} + WHERE job_id IN (SELECT id FROM deleted_jobs) + ), + deleted_spans AS ( + DELETE FROM ${this.tables.spansArchiveTable} + WHERE job_id IN (SELECT id FROM deleted_jobs) + ) + SELECT COUNT(*) as count FROM deleted_jobs + `) + + const deleted = Number(result[0]?.count ?? 0) + totalDeleted += deleted + + if (deleted === 0) break + } + + return totalDeleted +} +``` + +- [ ] **Step 2: Implement _truncateArchive** + +```typescript +protected async _truncateArchive(): Promise { + await this.db.execute(sql`TRUNCATE ${this.tables.jobsArchiveTable}`) + await this.db.execute(sql`TRUNCATE ${this.tables.jobStepsArchiveTable}`) + await this.db.execute(sql`TRUNCATE ${this.tables.spansArchiveTable}`) +} +``` + +- [ ] **Step 3: Implement _getArchiveStats** + +```typescript +protected async _getArchiveStats(): Promise { + const [jobsResult, stepsResult, spansResult, oldestResult] = await Promise.all([ + this.db.execute<{ count: number }>(sql`SELECT COUNT(*) as count FROM ${this.tables.jobsArchiveTable}`), + this.db.execute<{ count: number }>(sql`SELECT COUNT(*) as count FROM ${this.tables.jobStepsArchiveTable}`), + this.db.execute<{ count: number }>(sql`SELECT COUNT(*) as count FROM ${this.tables.spansArchiveTable}`), + this.db.execute<{ finished_at: Date }>(sql`SELECT finished_at FROM ${this.tables.jobsArchiveTable} ORDER BY finished_at ASC LIMIT 1`), + ]) + + return { + jobsCount: Number(jobsResult[0]?.count ?? 0), + stepsCount: Number(stepsResult[0]?.count ?? 0), + spansCount: Number(spansResult[0]?.count ?? 0), + oldestJobDate: oldestResult[0]?.finished_at ?? null, + totalSizeBytes: null, // Would need pg_size_pretty, skip for now + lastPrunedAt: this.lastPrunedAt ?? null, + } +} +``` + +- [ ] **Step 4: Commit** + +```bash +git add packages/duron/src/adapters/postgres/base.ts +git commit -m "feat: implement archive prune, truncate, and stats APIs" +``` + +--- + +## Task 10: Core Adapter - Scheduler + +**Files:** +- Modify: `packages/duron/src/adapters/postgres/base.ts` +- Modify: `packages/duron/src/adapters/postgres/postgres.ts` +- Modify: `packages/duron/src/adapters/postgres/pglite.ts` + +**Context:** Add optional scheduler that runs prune on interval with advisory lock. + +- [ ] **Step 1: Add scheduler to PostgresAdapter constructor** + +```typescript +// In constructor, after options parsing: +if (options.pruneArchive) { + this.pruneConfig = options.pruneArchive + this.startScheduler() +} +``` + +- [ ] **Step 2: Implement scheduler with advisory lock** + +```typescript +private pruneTimer: Timer | null = null +private pruneConfig: PruneArchiveOptions | null = null +private lastPrunedAt: Date | null = null + +private startScheduler() { + if (!this.pruneConfig) return + + const run = async () => { + try { + // Try to acquire advisory lock + const lockResult = await this.db.execute(sql` + SELECT pg_try_advisory_lock(${this.advisoryLockKey()}) + `) + + if (!lockResult[0]?.pg_try_advisory_lock) { + return // Another process is pruning + } + + try { + await this.pruneArchive(this.pruneConfig) + this.lastPrunedAt = new Date() + } finally { + await this.db.execute(sql` + SELECT pg_advisory_unlock(${this.advisoryLockKey()}) + `) + } + } catch (error) { + this.logger?.error(error, 'Error in prune scheduler') + } + } + + this.pruneTimer = setInterval(run, this.pruneConfig.intervalMs) +} + +private advisoryLockKey(): number { + // Generate a consistent hash from schema name + let hash = 0 + for (let i = 0; i < this.schema.length; i++) { + hash = ((hash << 5) - hash) + this.schema.charCodeAt(i) + hash |= 0 + } + return Math.abs(hash) +} +``` + +- [ ] **Step 3: Stop scheduler on adapter stop** + +```typescript +protected async _stop() { + if (this.pruneTimer) { + clearInterval(this.pruneTimer) + this.pruneTimer = null + } +} +``` + +- [ ] **Step 4: Commit** + +```bash +git add packages/duron/src/adapters/postgres/base.ts packages/duron/src/adapters/postgres/postgres.ts packages/duron/src/adapters/postgres/pglite.ts +git commit -m "feat: add archive prune scheduler with advisory lock" +``` + +--- + +## Task 11: REST API Endpoints + +**Files:** +- Modify: `packages/duron/src/server.ts` + +**Context:** Add archive endpoints to the REST API server. + +- [ ] **Step 1: Add archive routes** + +```typescript +// In server setup, add: +app.post('/api/archive/prune', async (req, res) => { + try { + const options = req.body ?? {} + const result = await adapter.pruneArchive(options) + res.json({ deletedJobs: result }) + } catch (error) { + res.status(500).json({ error: error.message }) + } +}) + +app.post('/api/archive/truncate', async (req, res) => { + try { + const { confirm } = req.body + if (!confirm) { + return res.status(400).json({ error: 'Confirmation required' }) + } + await adapter.truncateArchive() + res.json({ success: true }) + } catch (error) { + res.status(500).json({ error: error.message }) + } +}) + +app.get('/api/archive/stats', async (req, res) => { + try { + const stats = await adapter.getArchiveStats() + res.json(stats) + } catch (error) { + res.status(500).json({ error: error.message }) + } +}) + +app.get('/api/archive/status', async (req, res) => { + try { + // Return scheduler config + last run info + res.json({ + autoPruneEnabled: adapter.pruneConfig !== null, + config: adapter.pruneConfig, + nextRunAt: adapter.pruneConfig ? new Date(Date.now() + adapter.pruneConfig.intervalMs) : null, + lastRunAt: adapter.lastPrunedAt, + }) + } catch (error) { + res.status(500).json({ error: error.message }) + } +}) +``` + +- [ ] **Step 2: Commit** + +```bash +git add packages/duron/src/server.ts +git commit -m "feat: add archive REST API endpoints" +``` + +--- + +## Task 12: Dashboard - Archive Management Page + +**Files:** +- Create: `packages/duron-dashboard/src/pages/ArchivePage.tsx` +- Create: `packages/duron-dashboard/src/components/ArchiveStats.tsx` +- Modify: `packages/duron-dashboard/src/App.tsx` (add route) + +**Context:** New page for archive management. + +- [ ] **Step 1: Create ArchiveStats component** + +```typescript +export function ArchiveStats({ stats }: { stats: ArchiveStats }) { + return ( +
+ Jobs{stats.jobsCount} + Steps{stats.stepsCount} + Spans{stats.spansCount} + Oldest Job{stats.oldestJobDate?.toLocaleDateString()} +
+ ) +} +``` + +- [ ] **Step 2: Create ArchivePage** + +```typescript +export function ArchivePage() { + const [stats, setStats] = useState(null) + const [status, setStatus] = useState(null) + + useEffect(() => { + fetch('/api/archive/stats').then(r => r.json()).then(setStats) + fetch('/api/archive/status').then(r => r.json()).then(setStatus) + }, []) + + const handlePrune = async () => { + await fetch('/api/archive/prune', { method: 'POST', body: JSON.stringify({}) }) + // Refresh stats + } + + const handleTruncate = async () => { + if (!confirm('WARNING: This will delete ALL archived jobs. Type "DELETE ALL" to confirm:')) return + await fetch('/api/archive/truncate', { method: 'POST', body: JSON.stringify({ confirm: true }) }) + // Refresh stats + } + + return ( +
+

Archive Management

+ {stats && } +
+ + +
+ {status &&
Auto-prune: {status.autoPruneEnabled ? 'Enabled' : 'Disabled'}
} +
+ ) +} +``` + +- [ ] **Step 3: Add route in App.tsx** + +```typescript +} /> +``` + +- [ ] **Step 4: Commit** + +```bash +git add packages/duron-dashboard/src/ +git commit -m "feat: add archive management dashboard page" +``` + +--- + +## Task 13: Dashboard - Job List Tabs + +**Files:** +- Modify: `packages/duron-dashboard/src/components/JobList.tsx` (or similar) + +**Context:** Update job list to have Live/Archive/All tabs. + +- [ ] **Step 1: Add tabs to job list** + +```typescript +export function JobList() { + const [activeTab, setActiveTab] = useState<'live' | 'archive' | 'all'>('live') + const [filters, setFilters] = useState({}) + + // When tab changes, update status filter + useEffect(() => { + if (activeTab === 'live') { + setFilters(f => ({ ...f, status: ['created', 'active'] })) + } else if (activeTab === 'archive') { + setFilters(f => ({ ...f, status: ['completed', 'failed', 'cancelled'] })) + } else { + setFilters(f => { const { status, ...rest } = f; return rest }) + } + }, [activeTab]) + + return ( +
+ + + Live Jobs + Archive + All Jobs + + + +
+ ) +} +``` + +- [ ] **Step 2: Commit** + +```bash +git add packages/duron-dashboard/src/ +git commit -m "feat: add live/archive/all tabs to job list" +``` + +--- + +## Task 14: Drizzle Migration + +**Files:** +- Create: `packages/duron/migrations/postgres/20260418120000_active_archive_split/migration.sql` + +**Context:** Generate migration that creates new tables and migrates data. + +- [ ] **Step 1: Generate migration with Drizzle** + +Run: `cd packages/duron && bun run generate:postgres` +Expected: Creates migration file with new table definitions + +- [ ] **Step 2: Add data migration to migration file** + +After the CREATE TABLE statements, add: +```sql +-- Migrate existing data +INSERT INTO duron.jobs_active SELECT * FROM duron.jobs WHERE status IN ('created', 'active'); +INSERT INTO duron.jobs_archive SELECT * FROM duron.jobs WHERE status IN ('completed', 'failed', 'cancelled'); + +INSERT INTO duron.job_steps_active SELECT * FROM duron.job_steps WHERE job_id IN (SELECT id FROM duron.jobs_active); +INSERT INTO duron.job_steps_archive SELECT js.*, j.finished_at AS job_finished_at +FROM duron.job_steps js +JOIN duron.jobs_archive j ON js.job_id = j.id; + +INSERT INTO duron.spans_active SELECT * FROM duron.spans WHERE job_id IN (SELECT id FROM duron.jobs_active); +INSERT INTO duron.spans_archive SELECT * FROM duron.spans WHERE job_id IN (SELECT id FROM duron.jobs_archive); + +-- Drop old tables +DROP TABLE duron.spans; +DROP TABLE duron.job_steps; +DROP TABLE duron.jobs; +``` + +- [ ] **Step 3: Commit** + +```bash +git add packages/duron/migrations/ +git commit -m "feat: add active/archive split migration" +``` + +--- + +## Task 15: Tests + +**Files:** +- Create: `packages/duron/test/archive.test.ts` +- Modify: `packages/duron/test/adapter.test.ts` + +**Context:** Test archive functionality. + +- [ ] **Step 1: Write archive tests** + +```typescript +import { describe, test, expect, beforeEach } from 'bun:test' +import { createTestAdapter } from './setup' + +describe('Archive', () => { + let adapter + + beforeEach(async () => { + adapter = await createTestAdapter() + }) + + test('completed job moves to archive', async () => { + const jobId = await adapter.createJob({ ... }) + // Activate and complete job + await adapter.completeJob({ jobId, output: {} }) + + const active = await adapter.getJobById(jobId) + expect(active).toBeNull() + + // Should be in archive + const archive = await adapter._getJobFromArchive(jobId) + expect(archive).not.toBeNull() + expect(archive.status).toBe('completed') + }) + + test('prune archive deletes old jobs', async () => { + // Create and complete job with old finished_at + // Prune with olderThan: '1d' + // Verify deleted + }) + + test('truncate archive removes all data', async () => { + await adapter.truncateArchive() + const stats = await adapter.getArchiveStats() + expect(stats.jobsCount).toBe(0) + }) + + test('advisory lock prevents concurrent prune', async () => { + // Test that two processes can't prune simultaneously + }) +}) +``` + +- [ ] **Step 2: Update existing adapter tests** + +Update all tests to expect jobs in active/archive tables rather than single table. + +- [ ] **Step 3: Run tests** + +Run: `cd packages/duron && bun test` +Expected: All tests pass + +- [ ] **Step 4: Commit** + +```bash +git add packages/duron/test/ +git commit -m "test: add archive functionality tests" +``` + +--- + +## Task 16: Verification + +**Files:** +- All modified files + +**Context:** Final verification before completion. + +- [ ] **Step 1: Run typecheck** + +Run: `bun run typecheck` +Expected: PASS + +- [ ] **Step 2: Run lint** + +Run: `bun run lint` +Expected: PASS + +- [ ] **Step 3: Run tests** + +Run: `bun test` +Expected: PASS + +- [ ] **Step 4: Build** + +Run: `bun run build` +Expected: PASS + +- [ ] **Step 5: Commit** + +```bash +git add . +git commit -m "feat: active/archive split implementation complete" +``` + +--- + +## Self-Review + +### Spec Coverage Check + +| Spec Section | Plan Task | +|--------------|-----------| +| Schema Design (3.1) | Task 1 | +| Adapter Methods (4.1) | Tasks 4-8 | +| Archive API (4.2) | Task 9 | +| Scheduler (4.3-4.4) | Task 10 | +| REST API (5) | Task 11 | +| Dashboard Archive Page (6.2) | Task 12 | +| Dashboard Job List (6.1) | Task 13 | +| Migration (7) | Task 14 | +| Testing (8) | Task 15 | + +✅ All spec sections covered. + +### Placeholder Scan + +- No TBD/TODO/FIXME/PLACEHOLDER found +- All steps contain actual code +- All commands are exact with expected output +- Type names consistent throughout + +### Type Consistency + +- `PruneArchiveOptions` — defined in schemas.ts, used in adapter.ts and base.ts +- `ArchiveStats` — defined in schemas.ts, used consistently +- Table names: `jobsActiveTable`, `jobsArchiveTable`, etc. — consistent + +✅ No type inconsistencies found. + +--- + +*End of Implementation Plan* diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 0000000..e935783 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,117 @@ +# AGENTS.md + +## Runtime + +- **Bun only.** Never use npm, pnpm, yarn, Node.js, or Vite (except `packages/docs` which uses Vite for SSR). +- Prefer Bun-native APIs: `Bun.serve`, `bun:sqlite`, `Bun.sql`, `Bun.file`, `Bun.$`, `bun:test`. +- Bun auto-loads `.env` files. Do not use `dotenv`. + +## Monorepo + +Bun workspaces: `docs/*` and `packages/*`. + +| Package | Role | Key Entrypoints | +|---------|------|-----------------| +| `packages/duron` | Core library | `duron`, `duron/client`, `duron/action`, `duron/server`, `duron/adapters/postgres`, `duron/adapters/pglite` | +| `packages/duron-dashboard` | React dashboard | `duron-dashboard`, `duron-dashboard/get-html` | +| `packages/docs` | Fumadocs docs site | Uses Vite for SSR | +| `packages/examples` | Example apps | `basic/start.ts`, `multi-worker/parent.ts` | +| `packages/shared-actions` | Shared actions for examples | — | + +## Developer Commands + +```bash +# Install +bun install + +# One package +cd packages/duron && bun run dev # watch mode (tsc --watch) +cd packages/duron-dashboard && bun run dev # dashboard dev server on :3001 + +# Root shortcuts +bun run dev:duron # watch core +bun run dev:dashboard # dashboard dev server +bun run dev:examples:basic # basic example + +# Verification (CI runs in this order) +bun run typecheck # tsc --noEmit across packages +bun run lint # biome check +bun run lint:fix # biome check --write +bun test # runs packages/duron tests with --concurrent + +# Build +bun run build # all packages +bun run build:docs # docs only +``` + +## Database + +- PostgreSQL for dev: `docker-compose up -d` → `postgres://duron:duron@localhost:5435/duron` +- PGLite for tests/development. +- Generate migrations: + ```bash + cd packages/duron + bun run generate:postgres # drizzle-kit generate + ``` + +## Testing + +- Framework: `bun:test` +- Tests live in `packages/duron/test/*.test.ts` +- Run single file: `bun test specific.test.ts` +- Core tests run with `--concurrent` via `bun test` in `packages/duron/package.json` +- Test setup (`test/setup.ts`) auto-creates a Docker container `duron-postgres-test` on port 5440 for PostgreSQL tests. Docker must be running. + +## Lint / Format (Biome) + +- Config: `biome.jsonc`, extends `biome-standard-mate` +- Rules worth knowing: + - `noConsole` → warn (use logger instead) + - `noNonNullAssertion` → off + - `noVoid` → off + - Line width: 120 + - Single quotes, semicolons + - Organize imports automatically + +## Build Details + +- `packages/duron`: `tsc --project tsconfig.node.json` +- `packages/duron-dashboard`: `NODE_ENV=production bun run build.ts && bun run build:get-html` +- Docs: `vite build` + +## Dashboard + +- Do **not** modify files in `src/components/ui/` (managed by Shadcn UI). +- Use existing UI components from `src/components/ui/`. +- Dashboard dev server starts on `http://localhost:3001`. + +## Env Variables + +| Variable | Purpose | +|----------|---------| +| `DATABASE_URL` | PostgreSQL connection string | +| `JWT_SECRET` | Dashboard auth | +| `OPENAI_API_KEY` | AI examples | + +## CI + +Workflow `.github/workflows/test.yml` runs: `bun install` → `typecheck` → `lint` → `test`. + +## Telemetry + +Configured on the Duron client: +- `telemetry: { local: true }` → store spans in DB +- `telemetry: { traceExporter }` → export to OTel backends +- No config → disabled + +## Key Files + +| Path | Description | +|------|-------------| +| `packages/duron/src/client.ts` | Job queue client | +| `packages/duron/src/action.ts` | Action definitions | +| `packages/duron/src/server.ts` | REST API server | +| `packages/duron/src/step-manager.ts` | Step execution & nested steps | +| `packages/duron/src/adapters/adapter.ts` | Base adapter | +| `packages/duron/src/telemetry/` | Telemetry adapters | +| `packages/duron-dashboard/src/DuronDashboard.tsx` | Dashboard root | diff --git a/investigation/duron-postgres-storage-analysis.md b/investigation/duron-postgres-storage-analysis.md new file mode 100644 index 0000000..c571b35 --- /dev/null +++ b/investigation/duron-postgres-storage-analysis.md @@ -0,0 +1,264 @@ +Duron Postgres Adapter: Storage Architecture +Analysis +Context +This document summarizes a technical analysis of the Postgres adapter in geut/duron (a +type-safe job queue system for Node.js and Bun), prompted by a tweet describing how +traditional Postgres-backed queues (PGMQ, River, Que, pg-boss) suffer from MVCC bloat +due to UPDATE/DELETE-heavy patterns, versus PgQ’s approach using rotating tables with +TRUNCATE. +The goal: evaluate whether duron’s current design is vulnerable to the same problem, and +what architectural changes would mitigate it. +The Underlying Problem: Dead Tuples and MVCC +How Postgres actually handles UPDATE/DELETE +Postgres never modifies rows in place. Every the old one as a “dead tuple”. DELETE disk until cleanup. +UPDATE creates a new row version and marks +just marks the row as dead. The old versions stay on +This is MVCC (Multi-Version Concurrency Control): readers and writers don’t block each +other because multiple versions of each row coexist. Each row has hidden xmin (creating +transaction) and xmax (invalidating transaction) fields, and each transaction sees the +version consistent with its snapshot. +Autovacuum and the xmin horizon +Dead tuples are eventually reclaimed by autovacuum. But autovacuum can only clean a +dead tuple if no live transaction might still need it. +The xmin horizon is the oldest active transaction ID. Autovacuum cannot clean any dead +tuple newer than this horizon. +Why idle-in-transaction is catastrophic +A session that runs BEGIN but never commits (due to a bug, a hung worker, a misbehaving +pool) with a real XID holds the xmin horizon frozen. As long as that session is alive, +autovacuum cannot reclaim anything generated after it started — across the entire +database, not just the tables that session touched. +The tweet’s scenario: a 6-minute idle-in-tx session on a high-throughput queue generates +millions of unreclaimable dead tuples, leading to table bloat and degraded performance. +Why TRUNCATE is different +TRUNCATE deletes the underlying file physically. It generates no dead tuples, is +instantaneous regardless of row count, and doesn’t interact with the xmin horizon. TABLE on a partition has the same property. +DROP +Monitoring queries +-- Dead tuples per table +SELECT schemaname, relname, n*live_tup, n_dead_tup, +round(n_dead_tup::numeric / nullif(n_live_tup, 0) \* 100, 2) AS dead_pct +FROM pg_stat_user_tables +ORDER BY n_dead_tup DESC; +-- Idle-in-transaction sessions +SELECT pid, now() - xact_start AS duration, state, query +FROM pg_stat_activity +WHERE state = 'idle in transaction' +ORDER BY xact_start; +-- Current xmin horizon holders +SELECT backend_xmin FROM pg_stat_activity +WHERE backend_xmin IS NOT NULL +ORDER BY age(backend_xmin) DESC LIMIT 5; +Current State of Duron’s Postgres Adapter +Schema (in src/adapters/postgres/schema.ts ) +Three tables: +jobs — with mutable status , updated_at , timestamps, and ~15 indexes +job_steps — with mutable status , retries_count, history_failed_attempts, +cascade delete from jobs +spans — OpenTelemetry spans with FKs to jobs and steps +Write patterns (in src/adapters/postgres/base.ts ) +~26 UPDATE/DELETE operations +Job lifecycle: INSERT (created) → UPDATE (active) → UPDATE (completed|failed) = +minimum 2 dead tuples per job in jobs +job_steps updated* +at +worse: each retry updates status, retries*count, history* +failed +\_attempts, +Hot path uses UPDATE ... SET status = active with FOR UPDATE SKIP LOCKED via +CTE +What duron does RIGHT +No explicit transactions wrapping the job handler. The adapter uses atomic single- +query CTEs. The worker holds no Postgres transaction while running user code. +Transactions are kept as short as possible — just the time needed to fetch/claim/update +state. +SKIP LOCKED is used correctly to avoid worker contention. +What duron does NOT have +No automatic retention. \_deleteJob and \_deleteJobs exist but must be called +manually. Completed jobs accumulate indefinitely. +All jobs (live, completed, failed) share the same table. The hot path’s indexes must scan +through all historical entries. +Revised assessment of the tweet’s relevance to duron +The tweet’s catastrophic scenario (6-min idle-in-tx blocking autovacuum) does NOT apply +to duron’s adapter directly — the adapter is well-designed in this respect. It could still +happen if the user shares the Postgres database with other parts of their application that +misbehave. +The baseline problem (UPDATE-heavy patterns creating constant pressure on +autovacuum) DOES apply. At low throughput this is invisible. At high sustained throughput +(thousands of jobs/sec), autovacuum runs constantly and index bloat accumulates because: + +1. Completed jobs are LIVE tuples, not dead — vacuum doesn’t remove them. The table +2. 3. grows forever without retention. + Each vacuum must scan the entire table, including millions of irrelevant completed jobs, + just to reclaim a small number of dead tuples generated by live job updates. + Many indexes (jobs has ~15) all need maintenance on every update. + Design Options Considered + Option 1: Table per state + Split into jobs_created, jobs_active, jobs_completed, jobs_failed . Each state + transition is a DELETE ... RETURNING + INSERT. + Pros: Hot tables stay small. Cons: + A job with 3 retries moves between tables 7+ times + Foreign keys become impossible or ugly (which table does job_steps.job_id Multi-table transactions needed for every state change + Large code complexity increase + Verdict: Elegant in concept, too expensive in practice. + point to?) + Option 2: Partitioning alone (by time) + Keep one logical jobs table, partitioned by created_at (e.g., daily). Retention via DROP + TABLE jobs_2026_03_15. + Pros: + Retention by DROP (no dead tuples, instant) + Partition pruning on time-range queries + Code almost unchanged + Cons: + The current day’s partition still contains mixed live/completed jobs + Still generates UPDATE pressure on the active partition + Hot partition is still hot + Option 3: Active/Archive split (chosen direction) + Split the schema: + jobs_active + job_steps_active — ALL live jobs (created and active state) + jobs_archive + job_steps_archive — all terminated jobs (completed, failed, + cancelled) + Lifecycle: +3. INSERT into jobs_active on creation +4. All UPDATEs (status transitions, retries) happen in jobs_active +5. On terminal state: single transaction that DELETE ... RETURNING s from active and + INSERT s into archive +6. Retention runs on archive only + Why this beats the other options: + jobs_active size proportional to in-flight work, NOT to historical volume. Always small. + Vacuum on jobs_active is microseconds, not minutes + Hot path indexes stay small and fit in memory + Only ONE move per job (at termination), not one per state transition + Code changes minimal compared to the table-per-state design + Archive receives almost pure INSERTs — minimal dead tuple generation of its own + Relation to Dead Letter Queues + The active/archive split is a storage/performance concern. A DLQ is a + semantic/operational concern (what happens to messages that fail terminally, so a human + can inspect them). + They’re orthogonal. Duron’s current status = 'failed' effectively serves as a logical DLQ + — failed jobs remain visible and queryable. This can coexist with active/archive: jobs are + split by “alive vs terminated”, and within the archive, status still distinguishes success from + failure. + Partitioning Decision + The question + Should jobs_archive be partitioned by day (daily DROP TABLE for retention), or kept as a + single table? + The constraint + No scripts required for duron to function correctly. A retention cron that drops old partitions + is acceptable (if it fails one day, nothing breaks). A script required for the system to operate + correctly is NOT acceptable. + The problem with time-range partitioning + Postgres does NOT auto-create partitions. If an INSERT arrives with a finished_at + matching no existing partition, the INSERT fails. This means time-range partitioning requires + a critical script that creates future partitions ahead of time. This violates the constraint. + Mitigations evaluated + DEFAULT partition as safety net: Catches INSERTs that don’t match any explicit + partition. Downgrades the creation script from “critical” to “important”. Works, but the + DEFAULT partition accumulates and loses the partitioning benefit. + Create many partitions in advance: Run the creation script monthly, create 90 days of + future partitions. Tolerates long script failures but still requires the script. + pg_partman: Postgres extension that handles partition management. Requires + installation on the database server, not available on all managed Postgres providers. + Breaks the “works on vanilla Postgres” promise. + Hash partitioning: Creates partitions once at setup, never again. But loses the ability to + drop old partitions by time — defeats the main point. + No partitioning: Accept that retention = admin operation. + DELETE with dead tuples, accept it as an + Decision + Go with active/archive split WITHOUT partitioning the archive. + Rationale: + The archive tables receive almost exclusively INSERTs. Their natural bloat is minimal. + Retention is a periodic admin operation, not a hot-path concern. + DELETE ... WHERE finished_at < X LIMIT batch_size in a loop is manageable. + No critical scripts required. + The user’s cron can run grows until next run. + pruneArchive() on any schedule; if it fails, the archive just + Users with extreme scale can partition jobs_archive themselves without duron’s + involvement, provided the schema is partitionable-friendly (no UNIQUE constraints that + exclude the partition key). + Proposed Implementation + Schema changes + Replace the current jobs and job_steps tables with: + jobs_active — Same schema as current jobs , but contains only non-terminal jobs (status + IN created , active ). Keeps all current indexes needed for hot-path queries. + job_steps_active — Same schema as current job_steps . FK to jobs_active.id with ON + DELETE CASCADE. + jobs_archive — Same schema as jobs_active plus no FK constraints from external + tables. Fewer indexes — optimize for lookup by id, group_key, action_name; skip indexes + that served hot-path queries. + job_steps_archive — Same schema as job_steps_active PLUS a denormalized + job_finished_at column (copied from parent job at archival time). No FK. Minimal indexes. + spans — Keep as single table OR split into spans_active / spans_archive parallel to jobs. + Simpler choice: keep single table, manage retention independently. + Design constraint: ensure the archive schema would permit hash or range partitioning if a + user wants to add it without modifying duron. Any UNIQUE constraint on the archive should + include a column that could serve as a partition key. + Code changes in the adapter + Creation path — INSERT to jobs_active , unchanged except for table name. + Update path (retries, status to active) — UPDATE jobs_active , unchanged except for + table name. + Termination path — New transaction: + WITH moved_job AS ( + DELETE FROM jobs_active WHERE id = $1 RETURNING * +), +moved_steps AS ( +DELETE FROM job_steps_active WHERE job_id = $1 RETURNING * +), +inserted_job AS ( +INSERT INTO jobs_archive +SELECT * FROM moved_job +RETURNING finished_at +) +INSERT INTO job_steps_archive +SELECT ms.*, ij.finished_at AS job_finished_at +FROM moved_steps ms, inserted_job ij; +getJob(id) — Query jobs_active first. On miss, query jobs_archive . Cache the “likely +location” if calling repeatedly on the same ID is common. +getJobs(filters) — Route based on filters: +If status IN ('created', 'active') only, query jobs_active only +If status IN ('completed', 'failed', 'cancelled') only, query jobs_archive only +If mixed or no status filter, UNION ALL between the two +Time-range filters on finished_at should bias to archive +Dashboard queries — May need two endpoints: “live jobs” and “historical jobs”. Avoid the +UNION ALL when possible. +New public method +await queue.pruneArchive({ +olderThan: '30d', batchSize: 10000, maxBatches: 100, // or Date, or ms +// optional, default reasonable +// optional safety limit +}) +Internally: loops DELETE FROM jobs_archive WHERE finished_at < $threshold LIMIT +$batchSize RETURNING id and then deletes corresponding steps. Returns count of deleted + jobs. + Alternative nuclear option: + await queue.truncateArchive() // For users who want zero history + Documentation to add + A “Managing the archive” section that explains: + Why the split exists (brief version of the MVCC problem) + How to call pruneArchive from a cron + Example with setInterval in a long-running app + Note that if the user wants time-based partitioning, the archive schema supports it and + they can add it themselves + What NOT to implement + No internal cron or background worker inside duron + No automatic partition creation or management + No partition maintenance scripts shipped with the package + No dependency on pg_partman, pg_cron , or other extensions + No automatic retention — user must explicitly opt in by calling pruneArchive + Summary of Benefits + Hot path operates on a small table regardless of historical volume + Autovacuum on jobs_active completes in milliseconds + Hot-path indexes remain small and cacheable in memory + Archive grows linearly with throughput but doesn’t affect live operations + Retention is an explicit, bounded, admin operation the user controls + No operational overhead introduced (no critical scripts, no dependencies) + Users at extreme scale can add partitioning on top without duron changes + Significant improvement over current design at scale, minimal complexity cost at small + scale + Tradeoffs Accepted + Code complexity in the adapter increases (estimated 30-40% more LOC) + Queries spanning live and historical jobs need UNION ALL or dual queries + getJob(id) does up to 2 lookups instead of 1 (mitigated: active is tiny, miss is fast) + Retention via the hot path + DELETE generates dead tuples, but in a low-contention table that isn’t on + Migration path for existing duron users requires a one-off script (acceptable per user’s + decision) diff --git a/investigation/duron-postgres-storage-analysis.pdf b/investigation/duron-postgres-storage-analysis.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ef2fdb8ef8f209bd0a5a5a733308cb512e194b4f GIT binary patch literal 114550 zcmb@tW0a)Pwx*l5?ToaIO50{7Ds9`gZQHhORJzi(ZFlY6XP@5Z_8EP9jC+4X#2jm_ zm}5o6i1p3qeZNVjARC@bCZ_#LTUnj2-^I zTIo9(ix?Z)8W{r^WQ=W0oy-6%teh+WK0X*nCkJDFYZy1+WX*90TuFrPWwjj%u{5V@ zEidKu2#tbG{pZ<*&|5sp2GaUYi3u%QPd-QTH{T;(APrSgQK=TZi3wU#l<=WDlwyWE z#U2Uvop0h;2@|a(%mCE$)u5tSKsYI#Tw2-8lfcOGuh`C;2vmulcb_ zIbLLVFj7y7s=#%;+WmylS>urITwF>VM=GK2a=6X>>gmKgH@H)S*rFTtUe3Bc8j`#2 z^QT>NRchhPQ_bO2FHp-(AKpVtak@IKC{9o<$_~mtL#T4LEn9Ks*MeI_cG}2xjoOMa zu(tlB;i;E!ZL^e9SFJ(P;`<(TIDNuPy@zF_(IYYHNn@%=cCf94m9V4bQe!tSEfMJR zxuzfK1!ujy>U>$FmfVlXZvi8-aZ~)eoHtfRe@;&5ndas`1NQ17TzjLnC7yJyR+8}L zR`_3Xjy}^{?sIni`tTTLGC*nAr{=mPmQOLHF3vLg{VGfT2e`VfyAB2GSxT4ZqRf8K zXoUs*am7J8c&fYZ99I>4?x5CWI`166iBhhs>Iu?j+L|9*uI}Q z;2gS1C1Gd#EH(QX*=r@~j7XObxDp<}lX%s+t2RI`t3^+h ze3s`PqrFyB-<5X2oS?yY*iUwU1n_3NQ1O{}$LlWLH`pq-8sMopzMA*GZFrL*Ua6fQ z26|3u+0^O4^#Ec3!50MeARt;AS4KKE$P1^jk(tA1a#zb@a0w%oIkO^sRr0q9`gE zA6BxNV^8k7LOMBL%hz{11?wm(GKfFx)_M|ZH^W$AP|6ZNm*l`9Z*S!nbQzmN*9M|8 z@yEL_T3K`Rnz9*&nS6W7C#reqT_n&j*+P+K@9GY~EiykWs16g^db^tOb=&u9-dpxK zy%J($y53%S2%KqSg^gQEj!9>%&&?l4#BT_=Lg?mqOOkPL2tJ@Vu=PjX`VQq#si5Q` zg<&38Pi3Hw^cbQNvIZOjH}OoC{21JxntXU`Juvx{m#c>FM7;{9ZFadIv3mkZ!fqD9 zD}RR2W(}J}_=!MQ3LmD+Elr<~N1u06UYO%3B*-f^DO{U-H%;6nCxZL2T#Kc`F%bfW znq`@$p@?%`^oLYI<71y~UJ25*`>)8XvEV&dr~(jPI+c7={{GV%H8$tWf9D{l=Qnl7 zPZ@^;JdPu=IZ}tGWk2y4rw!9OD0D$bPGS9+tnq15X9+Xp-#pnEek{6nv_`!9zXvrd zo_aJ$a{=XSF@5D_|8fl|3eoHG)nlB+e8Pc->n^+38AY-^rMKN3OO>PK=F6|oMzS$} zIVD>YOWA{k(x-S(GvNq(!xG9mMGuM2M6E*D>u)1sZVe)WvS8lR^SEJ%+d14CsW|Hl zD$H<{&xLZYw;r;ovIigI6}af}fsY*3LR{sqoUWl*p1F{>O_ybqiKHM-ysnjOAgm)C zz`aL#;ouFYN*;{UFVqQzbT)39?dnlr5Y4@w1DLpro5n-QfZoni1P`-H`ROp2<#iTM z7Q%lzM%wfNEfB;8L*KLZ0pSNZFOD(+>*Rgd!>tYUH^HCxAvX~oX?qf;U%9E(u?o}( zY+`A7r;go;Sb`)R{zj8-Vn;H@u5D4}^%Ew1WYIyENI=twh$o5@NEz*0z*-`ZLbhKc zghNua%8O{=I7Jk8FlL*I2!nCO`HRvw!qMeB&+0_EXF|$Y zmp1;S%~IJ-D}0*qycKLX%dKvvHq;z7=p^@8K}p`}lDe;4cf~Ythb(>T=NgLS^Ht>z zVBM8pa^e?#l1@5|&--s7`~vOb{y7gvYDkIql(iR=y020QM?%$M-z$M^TOGM31rtax zQpf3yh(QI==I2Axgg**s=FIH3v43tXb=M_7!?rP%PkYBmRfdo=aPO2xG0|atNGnb?mH8xf zpTb<{H|kxZqjp`IFxI+6ehpX8733&SG0~jli)SwOK0ofJ@p~oth^YBFv=NyDrV1UP z%_TJZCwTo`)Qj6cl0r|UHswYIfKO~-YnSg$Sa&}yozpgb=!fDb7M$#eEdh$+k^%HN z{2Ii!N6^kKod$aE>GuO+I$Ml|X#N8^G+Vj0a%L0uc)E=T2XVJzbQCF8zS-UZ!8V{g zMY&I!+ZG&R?3>Mk0`JI%9c-a0x(S--mFMDZ2EX>ak-sRbVeNDT=hm~|o+>qdzpm~c z#eTVMvcf+Jx1GvVdAU{!uH9%gD_>W(BB5Qxk|9)yq!5cHmzJ_NV`alaLHb5d+3)v~ z0JqW86ogripH^p&mgA^Nrxs+XH|t_$^Fm$LGv>W}d6=}n&gIsa$cMhHy@+Tr=+csc zYLkv&P#{dfGsGtm3qr7mZXLpC@tq)g%UFqXs@b>%6sMc_3is!AR7jcV>pqsG6@Vz{K6)fcMwur*JyG+>=IjU)>lR4 zgbq zvh%Jo3k3)@Ffm=yOnGSYguQk3fr?L%FXm4o3vyA-OMyEmroxD=Ra8O3|L0pXD3}Es z0bJ_a-~p1Qb*o}p2}|$A+6y7SNM=Ybi?DzV`$63l_$^$NMWR_uN`;EXSgc1(>2^q? zNWAAtu+#CRR?5|2z0%-HC{iyEToivgs*qcH+T-b^c-1zJpFz_kT8Bq+_|32^dHW4D za(D_B-_p3<-4kg`o|{JHUa$Z+il$wX?Mw8o*Uy|&{Ne(B)W5#$Bp8i7w&Wq7Cgz2K zA}ioH#Y$Rr$gs(&H zr#fdxv4)*t+N~i=)xbc~t69{f@h{XxkTLya)>G-?z55m2Q{o_rG4`y2^H!x;ww1t8}p@paOs_TRmFzaqq+Yd$o zL|kaBk&(7f)Gg(oBW9#2E;wS17E;~%TGA4aAfW*bQDMP-#AQclt;ND#14Fz&W6+%l z?tY;&f*c^~!Q=Q_iHI{&Uco-GsoGZGHdw;$vo(o z2&A63&EE>RcB9qOr5&Nr6fu+Z_=?g;j92v-Y_4J2 z)x;ik^<%R8sdzBfO95MKdM~7`{B_iFk%IqcMgT~3cgCuz_`7hjcX5ZZYaD2e#s|@E>KCsMkrij&=W9e$cTr{I12}HD}S5T!|^H^_gnV(@`kU*|* z$(#_QnixnKlxk)F*{Anm28+Ezv)Xm^`895pNf5uAUQBBiMixVK`leNdL7dd2jPi2T zqOJB)Xpul(Vd*o&S9pJW<}Fne3y*UuvWM#f;@1xEMJMe|c3!Lc<7}v{aIylHAstsu zHHO{MczGioQh`9*-uQIXbjcbE0`_gCen^b0Tbu5Vj0JZj-xncswa6!ov5nDx@Y>(# zAC$-RZ?vc4Zub}O6!cC1`Rrh9;{;&-JFNs@P%?J3b#^c`b_B5g&jDdu8>heXj(~sA zqS8Mw@IUhXWBy;cCE8Zgf-fexk^F6pNL5)wr2A_6XkhD`O-NTm;0Je1f_6b&{oz+f5r4oP98 zUr|K#a5Ksh`nsC!ekMJ8+<;t*m>35G$R8p*=VX*vN|uD@Wj5OM04mjE#r0BaEBTLFR*WLg1s3AC^S`s430 z3nmx9ZU>hm;`jI!j)LMIBg?FRZ}(K;nec0i_-ImIpZtV1@Vu_6|Z$16k0AUjb_ITdm{9 zgAyN@+!1j@&w|A4Ti=yx!{9?`9l+ehy#b;P)*VQM5*UL5ofA`s!o3v{ic=>AMkcb1 zCoTdh7tM%6D8g-zmWa1M0EGz6*H6R37{T5T)ri_P_^H2SAjP^(d?pEfKlW*fE|uvWz7 zs7}B0LHJ$rTemLiEwt+Z#D2#c123W;7+*YJg6&wskzR^8XiYH707Rk`0I3zJ3K9;~ zG1O-e@&K!%f0?`!aR`$8kd^^eeawnrmn0s!Tq2re2q~r#DOGxtzlx*j(iPiES(575SB#=j0J5AhCh4jU(wAO;xqURac{i=z^wI-_FmS&!Oh z_Gi}FYt41OyQq?1scopel8KW|l7W+BD`Zq{7tt28E37Nz|1esRTj*P?TD&jPS4Pj2 z&ba2xWb3kiAAE$K)pdk>w7pq84xaIzCE{e@^x$CQsN$ruj<8{8*kp`n9A+G|jy9QT z!Zw3x9%>#p>o6v#U;S1wA#04}%&)GwETx;{E848&tu`!cR5vY@Yn(>aPN4twL$Fn= zRoPWYUYbFjLAO@%I-gH5w>r0|SG@O^??m4$U`;PlFtfibKO(d{}*{(c(!DMWc0YK^2(nr6}^s`rWrWPBQtW=O193?g1QY2jViIF zJkCL$F|R`R%vTy%-bnSxY>Z6|UD``pWx8`(LJcYnaCPgZv%0szwV@14ZA0(T5Q}}o z#_5vHqvq;zx^0)Bck*{8NZwGl&~Ak16(<+U&LHnaiGlruJ2pQi z9vO`xjo~UbE zzoKW+N@=`Ub7(#+pPZ?Q)9u%=rzXH9#NS5x4o4j*Ka{C?&~0};1Y1t*41G^H9-%po zzSVv){)h)l4QdFQ6GTKHM@)h#Y&vs>rP#wHfqqW{H=c?V`vHRF_ z9%~J-mRP-+6q}4T#@)7w*!rWGr&!mcXV!RqG$Pb4q`2EnH~kC2YS;?h>T=VhvD#ui zl~`x)(O_~fVo#oIBjJ2o!&~ij^Y>HGWq0vY@w@Ae3zX}|=4r>hdb)vz&gyK(kadAm3ILU^46$Fu4l;azb`@tOP7 zGOHP(nZLBL6y8$AZtf}Lxqa%lkc~f$PsgA6?e!4#l%CNzVJbZnKaHN1$(ifwb0;(s zd>a{#8Oo*R#MjYuFrQshSF<(gpIWPP+wM@l-u?cenp$aE!>P^fo$cLy8S)vuM9`U& zqI=N$gC#T6`wzHEPqN{$coLGAeapKe(6btK~U)t}@rh^}XBFV0`Gta_&5xUuvtY zhx2Xe$@ph57B1&G(L*UVqDt9OQi_iR2a!f$rr)ij;I@J` zTVU~i-f8@t^J&qL{Kj;*s$^}cT=J=Nd_NfGsxNGoecASKa$S0~N$nr7)El4E7AelQ z=boZ&DqW*5;F6{RGFM4bHCdpIb?~!SvGg>mB$bMWld)bEaZgE!j!9vajkn8;i>1wQ zPrT*Zpe=~1>l`Mxm9i2DV~Win*;ED;$U!F{aEWYDVgN(Ek8NdG!fHyt)poQ7M-mU9 zaH;7evxeHBO}-F@@H-Ovd*cjUP|w&M%>CvgRM5>DipGz!O1Mwx@quJ{H)0&?i|KFv zti9xIJZD*#x2hd(t>+CUk9J)KF;O=nAr3N~lvWXj7WAQ? z4TnBNQ>9Fq9F2$b3*B4=cQbHJz5Jbpvr<4PWX&ff8 zuaEFNuMPm|1<#$W^O``26P2mBb~tbxo(AC9QSBIS_b1j%&O-v$t0mPC{wv ze)a+x3S!MzrLX7YvSn7)uKl7d{q)KHZ96R!m*0(=U{*>hqdhl$oilZ|8ijg!7%-mL zyAW$CbdfrKw1f=XfI=)k&D8vTL{sdKDYH229>6XPl5+T?6#<=QmlIbEE`EO$3Bu=Y|#ZIe>8DIBS>-f~_dhuyx7!t(dvtH(n4{-V3u z`&D~;+uhxW6470YA?g*>u)<^3%5O+RdVj1NCysf+&)<1pMigSgEifgeBl`ZsR{RJ1 zHkuUjp{T8KOUQe*9RgERB9}8$2spuj+Mr*pN@i?m9_VSw-psC@Y`QsC z<*Dr_NF8J@^7Udu)RdJlj*Lci&6l-jN?1g@u&Ym_Rc_Yooep>y#f;Mj6li>;eNcjrz3`myaP@r?vnd(7fvvHU6V=w+(-t%zeiP z2BM#r2+rTK#8I%zWU&2WwqgZ0X8%bBJDX~Y1y^>2538T*>k;)e0uh76L~n@CPx#=i zXRW_8cdILIDp&AUY8_Wgoula%jP#Ky7>@#qRBC*y*%2yRr{NSVMG{9`G=*rZcD#9t zan+R+ugjHdudDzou}UEu077-PLMn;RhTQ__eKfR0x^8-CB2-zr) z=S9(0g4ijLHkoYfAMOWAEE_wo=u-tdfCQDn4030Hz|Ly1a3Bp@n;yNicuPKb@&F0f z8HjNN0wgr#gz-c;rpxsTsqZ9_(CDv@Y&G4xkQEYKdkIDWgOy-589E3_M>cG*9oRen zsOUn){N8wDXzZLhs2}Af^()?WAH(EZXOa+KIXNz{GSqcVNrI9s0c% z#K*A423qobJi;aQfbkQL5$JWqA?kcUl}QguQ;knWH3%7QduF8ACTZ{(X&9o4UUGhc zS!fGho7|o+X$I3AWjJXBBkp?*ppA%M5aWqI+K*hiB&vy>uRnq9Bnchk#28%lWjB}v z2QxK4*sfYyNIX$0v0+c0qlm63)gfix4hSYHu)^nF1FN%FhClgKIi;Ei{HNTm3wf#nKN|gXq4!N78FyEJzIM_0gha3Jvo3&*w{A1D! zzA$u2YjNU}8FE!sG>?FeB|x>Pc~J2wX8_Z_{QMe{_FG7b$5onahy`!)aXN_5r*Uy)e zNNh^9l>P>cIG%GkRzpSj+Y;T~>nESX=aHD8odlaLP&A>9Pk1(l)X}eG78sXg#A*jl zsc6Fv@4xZNETQ4u%XG40OyQ_Acxk??F`!uZo(AOPbEjJyUD&W!DO`F)^SosX{Yzf< z(?4^5PqfR5maW;Q5cTR?@f*pU@f)w>i_CkTnuS%6`cs#oT1ZZCB40G(e9^ylV;~0 zi5sS)(b7I>s#ARRN=J<;fY^GvIxL1NCn$z#foylL*RGB`ZSSBKQgLvMq*4k(bx2ft zTAG7&(Fy%*IW7OA1ux#)bbm{x=HQw%$HE9T?tM%vjPoGCgbZNnieea^T{qgYj zXzwn0ALo+06|*>aZ{xP$bK%??oD&CLSF8JZW1$z>H`f_|E}V_Bp`ir6E>bU-;UEep z2|tQ+r3<6mwk=MKwb$@DnoJ-tQ zwqJjLR}(5`SOzJAA&tLS-&(T|B$l>B_a%fTVIobMkZnTE$)c1 z05r7#w?Gh(T_9H0acz$4?ONLiIA9+y2MP0O>Zhw%+hC*@B?1P#*2b#}DKj+HG zD2VE=lE*vY+&$WVHUjM469PpYQF)p$FKQTYIbzy7%%{(gn6Y_ezXsH)(mA{C9ZQW)lOBuV< zQkPd;Sskfnfl?gbD$m|6^0_bu`A;IbD7B2Veslwxs<}yi*TJ!yha;`5;Rjct*w5*G z^Q;>N#wt_G6QU&#iB(*^QS{i@cClXEolUF(4{`WGA>IY*#!U?%=-AEEs31lEQHjzg z-;4M?eWsEG^-#i4+HDIqAav*Kz0E%}6lU>%&6tEOc*=#RY9~r;V&-V?@ z1Vwiw7(sowu8e^JB}i~`Bj^eb(JGnW38WM5Z2axJtzh;d{28~jqIN2a=?-XqR_F(V zkBs7*ZTn@Rpon1Ebz_&qNL}fL!TN42;#7lQq<%s9HOGQ*!wgdMwuUDzoMIjCo zx$74p)iTiNnAgmYz_m)pE-dMa)ewqHCL9Z;?2n|Idk^0`(FzGiYK=@A$cqoRdTB~l zp{GPS1`L5`0nZ#N)?1GpWUnN593j8(V@XI3)CQw@If5@VJ=r(B!i*SD`&2_ zMJb{$m2w!Y>&PN5;i4HyR|_5_MG7|FpdRn3oD^K`-gl_mQXta7!>L+dYZZc4|CWoDn+N!xLAaw%c|&GO@scSsKrIq`M~ zOK%h>Hdf=<)e!0&pAF8id!E{0cV-V8-$RatYpS7)9_a>!pFmB4v2D0o>{amHQP93$_UY5H$-Or^UyrTL+5XGDkQe%US_UIS#UxWaB+ zc7GUU{?Jszy*C1(uv~b+T~lDs=0|Z1RumAsqSAl;y%4%IY|;MIWvC5xi!aqPSeQ;p zfZE2TVAeqYlp!uVx5tN{8;|{lT#35({ogtMuV3`P{I35S|BUJXX`ua+^8ejI`{#=P zzbv%>Fw6d7q5XGe*?)D`*a1xc>BIdmM~~?r&fCBC^MAS_l#QM3$A6U}wALL+ek6F; z*7jYYqR$yDe=;NA?^s*9cwE>$uViG0Ucy6)rqFswNMTT|p>4D2tHs?^qLI>hUOXvvX*zRmefRd((bu)ydeysW z)xM(JHW|f0<9+(L`@m$gu`)UsY`Ia@E5R}EvBjrj*RcBOB5=ch^=5l@hU0~Wk#W4p zQ+kfe;XfQ6IF2JDA7$bnpNhcq7R` zI-u)6Fxv7UmtRFvPNdD_0Ic=9Kt3SJD#USh!Y9N??II*pVd8@#vUCi_NG3@d#pZIR z{0eZxJs-zwf;apq;a!tu%oI9IQ%{_wW020CIY#q0S*+YxFN%w13|GEgs|{cnD3N`sGi!4GQYI1g|42 z(lDD0oMF2|&fY_cY%ER+A?rV+egIh=bxmi$&}}4vNBvSxYWCNILQ>V14!Px4X_P&V z^MsAE0>X-;QZU3E%QWka0^+Sct$|O)SdkFUtpk-61_=2AZ4D)M|UX z-rmmMCN}xaBZAswCRpD-t7AJ|NCd{9H+LBUb7 zPRa$y39R(;Dq%UkL^Q^hkx`5LDepmpqeJdR#g1mrkx4RHuwGl=aa%REbpT_2?wS$p zA5fICCNNfS{Jgdz4kzklVnNc7poI{ceo#Xu0rElSHAi%Xb(&q42;;7z5Sc`PDqz;? zs`U*VJ(LO;ow;Q5*eZ)7y1{N7K3PUFIlL+4Mgk#4k8u z2@v#ysC4l9szAe2Vsv{)a=0lt`2r-;NHnCtPw87vmp`_(y*)4aey7BK?#jiuZdq6`x3YX+5X6MYu1p+l8Ki^(&EsHu`d-=C(Q!VF;+G8-emg zLK?gU;8xN&8vf}6v!P{G03}g)1g9h0OgmLp415A<8KlvR`OH8U4auJFLmIF};z`DT z9Si9i@1DRNC<}(c5>OV{2D}xYG3TjxH&E%LvhpB^NP?>Av%+31qanRv=plV_V}Bgm zozP_+=V3|QB&0sfh@2IE`*wZb!KN)DHV)s_eYNxF`;eB?a@+hpHNNuRykVA)E4E8- zvTJ=*`@t(N2f33%s0*u02vq&OXtC;G$h%~eZFi7HB(nU+mT0u*j%IyUq^v1P`R?ah)vP zd%ag4>TJF}JJoIFa5(2TR3FtRg?w(IJ)e&;;gdqhs5ZXFGM7x<7A`$nxA^+eBM6-< zed--TDQM09o(JMaBxAg$H*M#euzbyiUpgeZzfTp=r8L(4=$N5oN)V^go0{4F&V z%mtp5+5hz*a@5oE5}kc=@B0^zRm)mxsYF&u;-mB90kH3 z2p8U8!9XBLD9Vfdn`r9QGag!vJ**%qfO*hUj~QvpMD4Q$c}z2EO(QVTSzDTB83gl) zg2NSL%IdJ=FpsDpZU3W9u8f7K(Q7??VxXol?_=sr9Q!@i3l1pE65 z;4oRj@w)ZGu`x*Q8TnKZvjUN@kz16hnl~i$6ip?a}4jUj*XpqCGW0(LX#cP6;O@alZVa<+`BO&ds2>{`upi~{SOL>63mCFxd zHJcH*NojyVqWP|j9@9)bzYEXI_rVe&k5f*j7{IK|H1SX-cIfI6#7%9JRL>}&uaFE~ zY2@B8J!kJ@9&Gr*Y8`xl%Qms8qaCp5-%&G-^hup)fvRS`ucsz3Ya6q2>jF z!L=2*AxKcmp@tV@X0#mpWdP#irb5ev8Y~0X;f<7M5ID-{$``XCzo2ds22z4|+nLlS z_Wcyu_>~hz`<>E{?n4gX5}IBB91a}Pl+nmUxvYglI`u~W;8!EykW>1L! zO!(bG>sk&W{7xhge-*x1&guUia0@GrY@QQ!3gA@+*}EJV*rg&5F^|CxYmiZGq-ROr z3(~~PU$AD>C-9<{ff8z`GH94y$EN~S;ixc+-^2oH#8q=avYWT9NKIH?kv{!#D;-QI z@lDEc>iLjZn?y2}IL{-O29b4#<4N^Nt^+elt(mDtxpFXmq?}^rg97c)Aq^O-S4{hS z{MsSFpuFU}L>2}gfzzp@#D-AQ!qdtNO?#i@#;|C94lrsJjqI^wf1%z2nwA$hVKYdp z5vHtxw-YlHi0^;y%e?uBZ`TB2EF-u#iTMp8^Sk6ynIn%nz?KZ+`f8U^$frd#2$a&IR4dgn z$#(PfpOA~TQ`P!KzyOIA647-IK%hCB-u}sB&JQ*~z?Yft5i}~F$5yDgu#!4~Fzy8# z&Y?8lL5VW)w=Q>E9*U&-(aq&uqL#-9tUOaJk%N((JoGaL!JgdwQ+LbzTxM(A`|+v! z{cSU|wnjnuk==~ga9(}q;Ro*lrfI{mwm>kSjCTN<^(0HxEC<*=!;hG!eInZ zbmD9VwKHh~>Y)v3U3$jQ#yZ<8{0;uf$zGUD0`~L6w;j8%RK}-a;60FF6ER!b`F+4l zz~1oSpvQ9{oPB8L7Lm83YviWILZNWv)qU99xQHZdL)Rd-|11v-f!l-I1PU44ERhIA zR(Cdz=D{b>%|lka)`p}`v0H|xf= z&g`C!=NFJ67$T`cGoxL8J2-$@M;PKfDk)Mv50yBWOI#s)r|y-)Kz!BERE`U`T}W1A zda~|C#_ylrw6f7oQdu|)rRM|_d5`!ig#iP6k(f2+~Y7XNBl=Zj@XpK6!p`B!0R{hs(!~`aXLdJCj|O%5Tq1&MXwmQ+^fohJJGFjhfE^*b*0v4GOYD(jF)>u;Ok4hRHG95+RUl1Q}=lHWH*nZiRO_#hQ~AU&++3qkN}G1`b; zA~+gxcY45_rH0Lviwq#Tx6wOAU@6#W;#lm zwie2(vtC&=Ij~T;I6I_lc-E4q#5W|Ru&5YI@|3?;z3x5G&-?=&pH^HHyhHG~A;jxr zv&4A9Z0zkgZ^TddxjPQa!N?7GkVIR9_D`NNU2(XyNDvkl?V-Eak+A(mHk|!ur+Hla zr9Q_T3x{1ubOv;s=IK3WLUu4Ho{{Wcy~iS*_PwEwVWJHfO3VUV$<2~Z;sll_foy)2=ZLn%c^$E9_?blzFJnK0G;ah`81Ou_8C}z_|fDxfC z>F+{#VXz&Hb0R?JfgRwxF(@b%X(rxmKiDbqS@cXipaMM6?_vgVInRDVQT%8#2#W** zSCCX|_f=Y8ZNTchCv+ANVR5f^4wCo+eZ@be`L~icKKVs_6zvj-UENFiq=sX~YW)Oi%y?A25 z#B3oiVJ^#QUi7KQh1t(gUVBewAzb0icLv$d8**HL?IOB@t=}5K3{faE@(HFtnDYCr z@nUzxX4ErTCykN|^)s4<>fNfRpCm;5I#XrMyry6l>m1!5eZ4MwQ3w}nBx8o{8iuRr zuWax!_hB?#GE2e}Si_sZDl4b|5l?eM8s$%y>q8wY4ds7%3wwK+<6U5lWh&m%lS!5N z)o`F*mktIUUm-g~oDVLZNl+$r!3b zc>{L{-T5|C@#aP5a7_ZPe_!blS4J6QpCKr)yg8U#)IA(=<@m)j6bO}JWm|eSCM3JS z&1msbl=&Wp8w`4y#ptJLR>?ruA~W_`MjFa;)&@?pdhilA&v^4-?UjjRFB;@C1IBw2 zGsGwo$5`Y%!KcyHvHANhh_TZjbh1&u_>L5JKfm(vLD1~kNXx;VV+~A$@xG4|Uiy22 z(^3Bk$ceH7H;HJ_U?~9(CW!OfWe>Ynkbx&b8E|N_AS!&`H9<;L&}zh<~+gMdSh5ED&U=LCY#9V*Q-Fz1Yl1Rtze zw^kq6y{cLB!)EA5gwG8?qZHCzP7I<4Cyd&NaFt7haAqv3T1TLTk8=_Ss#{@JMcC=Y z{j?Ih%mb@kWZ2B(tEtDV;FnNi6JdUh#K3S~VxLiK{BRYH-5ga1;p$6_S5Vsig_!}r z=J@CI_OlxTLmf%X&%~=gqJF3>=lUAsw54lqv}ajZ10PxiPLZ+v^DIJYY98Xo;Ql(> z1vaQ!5~72K*=jFd`j`s1{!*au38)~Eb|LVsZq)wA=N41ZK&xIyD%7Qu?l8H*bin}qmEr^a>O*+Jun zH(;S&Llr9wyV%(?pc(s$atWPyu+Z4|?N83>MJTph>?nVKJZG&)*BSAZTemFf&79?o zptHhsW1c9-tFq~i^RiQbjR-Fc0;HEEV|S4Xn0 zxY=P_0>%{;>T^4S#-`Y(JmJA7KlL83(wWNZ2lFY|$hAu*BMqLHzA^?s?dP9p?N(ND zEBj^WUZh6-z4aot^JFY*TQ_s$u;*^b@PW4{Bz-T-t1?t+QXdzc)$U&b?jQ0U$1M}4 zse)?3J?Bi;>;p$^D8S~gxu6()iTUP`GKNtcV-PudptRXBj0E`g{j7(pNgz*v2%70P z`?XMPB_@r+-dVFs11Yj3&#SF%3k0g_+UjOqpEkF{fELL`j(K)Fg9_`bIV=-Yw~xsa zQ(GfaD5MVc7=kc&dSTLJxFZb6jn~`8SDE-`ka8){>bBF=9If6E{_GXs7hSA_7w6(z zHfks0+@YCHq~arIwG66fe#*D!RGJ?oPMTqvCO0q6loQ%zzM^TL@{wg#3~0ece0}fg zx%2)cy@LahFx)CY#2Va1?`MUZ%eLR=F#IU9(@%+_A9@12YSW|P#^`hT>5 z{AU}=KSk>QqN4v}#lKSie{Dqhe^=4}Q#${vsB-|A|C?46=6`nz{!cPGD+@E*f05Cv zf2}91k05)Gs>!G(P&ZTs+e7aD5MLvevzkm<9v4nhQq-~#4c4kjNjmf%*FExekkj=8 zrc!D!Txjll`2E1z;rYg1*t+|E!=42HU0X@pB#LpBx=Xmr?)BxC8ahmWaJNnuBi@Y@ zh$7>K-r%%!)B6-?E3oieNSVzXy`p6ot0vU^PWbtkf4zJEv_>XIe)U9?1V-k~D2-4f zPvyWO@jmZc@8j%;E9njHZ-yGgiQ z)T?ADJv4mcjZd?0^s1Y8D(2Vk|d5?!B4U0A{d)E z$FV~uA-h+H9yeZiOruo3zQHco#Miy-mdpD3tLSFeg(4VKMwX0@g(U+YZjg1rFNlJO z5(!u#g}i93_9`*qYR507d`sbnF{oeAF-8sQxWUU@a=Qwp$PH5#-8$CR8ingf*1F@; z%tYA#gSK~$wJlx~Ew^p&vu)e9ZQHhWwr$(qXWO=I+qTi?)~kAT@9RpXI^F&6S~Dw| z$x426j*M@7=*pE09NHK#j=a$lY}k=k$JJy^doKZ=71==1%vhhMR=LK`+?<7RgvTYB z#VVb-_xtMeB}oImo(od6*y6Md=gp!BJ?KJFt3#gMiuG*owF*vNzn13HrH?O~`l1=C z`8Zd)7v*<0TUjLZfbj4eut{&bvtM9{NhSAO*U%g;xo z)`b(CUTqp)(rI(Nx|3znD*a4vB@!j`trH_nB$fLs>AcuZ5(g7EbD~tIaJV z?e4}d-TC42q95O#U`~*4n5nA1V1mr1wE#AA9M6vPOy(`=x==)93)5cE%Ub1>=Q@ivbH*^t<{!76lNMwd` zEC<1o(5f$EjxFtN!P~nXIoo}xTi1wNgU=pNFSP;XcEQv-M!b6EA?0CAj5qQxnZ1pPdyb-1 z4oc}u!fZ5E@h;}#Wj^+7yVgUO{y=3vPpl?;$2PqQi6U3sLD(Z9a;h1Qj5Z3aU%}%zshBmDm#|wQFw)WWFGvZ# z{vZujjrBttvw8}vQ|q+}w2Y;!hS~^TJi?6I3YZ8=zSYz4N zTWXFgpD-(_8W+Xm{eqW7oZ~{sNrIrws`2aO?-kH>2mR%Jr~O~*M!$V&6@UtGMFk9+ zprJ6dIqL7Xzf(OdJXQNK`*XSQ%Oj!Sa4qL!$;VO3y&%N}pcTPyL3~>$olY@dQkgAA z?i;U7Yw>~9$vi;c4x4~ylV}A>U}=ZKPx*rZ;DmSP(_e3WIax)3tz&Xzq_H3n03=+2 zK= z?fhE3L&R@`67elmm;xy%OwqcspP<7^uh}`~)DbX&@gXOa;0;>lrk^rQG7=5E0ApB8 zU|k+-nldmBbj%|`b>16iyMES3G^g9QqInxkeg`;aT|!v((!01M_ogpusI;7x7$h*f zw?o(`VJms!fh&UvPgReE$D0$|V+<}0KD5^E9a4$HT}b3A9wRrA)a3=Sf+L2auvIi; zkSK?-PLlwIqe1t;goy)N22JwGKQEvk;^=m)?bM`{5E>Yfh|HYhd^@)J@rfXW8k<A9*hI^;!q8An-2P7KI>xy{p7CCxvgzhbZaZxbw@fpN{R z`X`D__cx{(t61GOn=W_$^d8lA!Qa>}!;63J^yt#yhtqrD*tFSq4K+u$ZGQwqwJA)G z)>ot*iA(`)>XIP>ET`!L#ye?_K~FxxOkvKqVAIV9C0|Ph;oZ(d{Z1BTOdRMYgCdXj zmH1Sc>yFID#Me$D_ZE+)k=tPgz&GD_EXp?|Mx>#X0U_Yg08$q-hSq}(D(w@^`KKNZQ!2|OPWV}J92%ojr`=fjI*>0k)~WvkEv z1)4^X*};}H+V2BCVP%a9A-;(6f;=P&A=?C%Pl=!q`zJt{(Grz<++iR~d7aEPFSm?l zz8uP_SCeIu+&$`#;71)Z!uJ~%RoS1HN@$*J#K>O=uj%1iQwB)1oGAIB0o;;ahGVG{ zRwysJ2824FUk*#O=1Xv8m4rS zp+nEVr0azujQfdTE4wOwy!mBhNNVj1dD+O=4UhbD@I$DuK=S5D?c@o#RH!iuT6=&m zKnZPeo3!x<;{pMSLK0CWUxJiGesMMHd!hVb%f}P-*O-w^QdnSq)XB}R+C!Rx+#U`q zBBNpCD}dPAJ+*ai@C8Un+7~r!DNyFJAXXA_i;e*Wvw|5A>heDn^i{SD8%FZe9+K&f4+o0L7 z%vhKyg6auiERENtAx@C6bc%`11fnGFgJ=K_-Q-drgI}d!>%Z3Gt6yedkcP7k0s>Am zkPnzIBx#C+Q3H%bTBrzp^AR6qn($Y0E-AH7=`Lb@Q;E?pb`V5=*K03Y2o z$IvrAobx|bKo>kyAF^96ij_*c$&QxNFP9}wKXjhUN2B&cO)qa)ittSo={qg7FFp3x z63;^4LeD}?#>?nO9wgv7Rmrb1vk)6=r4$CJ2w)?yTrSe+?u{ZGaRE>WPB4@oo7c+w z4lh!j#%c6}1#qr9_ZIX=h=qoOzyU%kgA!U$CICu1^h_eu)UPMeLG>&^$l7|JTO`y2(=pCUYX4@+gD7bGZcajDU2{vwSO8!fY+nP`z&!3^|lAuC#`;sOC z7_1zbkX(i4okS=4l4@|XNH^I#>F8sJJ7U)@sI_$aayH^}K! zlb;+eNgsdD_~)m<1dz|d405(}HB|JM#yAH}0xefI>JTZtE;wDo#MfyTt z*ZU9!GYJw4J(VO<9wD86UWhi{7la$2S>6a)C&RZSafEItZ{1w&B;$EGPhDZ*8l<2G z*6GAMV~jZMn?7J^${qfgeQT^x1%4O_=CJ#4A+a2gMbyh%f)U{N=9w@&H8-odRV(hg8+sCFfzHGuh&JPEBmladP*U7aW*?Yps3=gPY0C6# zI2(IJn|RusL&A~x4{t?HZzT8;g$U~lc5M{q))}K_gc-iz7yGa1IT4#(Zxm@`9=(;G z6MG!Io5$|tHwn4Y7cSh=mA%;#0%cT^vAfh@WvjefCdEW-PQ(7C6>*iS*zp9Rc*&+|Wi;EM9Di8t2bqn!*1gva`i)e@jxS&<7gn>p zdQe2WiE5sp8+=?O`nVIW*4)*zf{Z2PU3E6K9P(OuEc%qozkyNJ(wnzC6 zk&wo9msbRY%PttTlWe%8QT!ogQ4xXC-*-F7#ON)b1@shtJusF$h0DK(uM-|K5egb4 zM(rIHDz&7a7(Fk2K}Q=(c>%Fe^#xQ!#PAO>p1Mbp ziA$E_effoZp~4BM+}3k>v#@S0EY5jsK0pTX^PvxHz-biiNWUgdK|>qYB9I*LEZ@&7 zMFiVt77_Fbdt{L%@a&>IxD^v?hjC(A#IuN%n;Ztq=v^8BBU+eS+g1?y=N6vtk|I0v z=xTo4dAMEY^{#`gwE<%3&GDD%Y$z2yj@r=?U4&=KjDMt~>ys=%9Heb($d^?b9Of$e z*L#G%Hp6e+(dfU@LQ;{@ zcgwI3H_&R^^9~<&i62&B)@kpF8k4Fgb~-4Rn#=ri>DK8iH++EPgtOD`*E$#Z0p00T zc%|*PLiOc>j=0vQn9KR)Uo?)kf2 zsU&*oBGip_P&9M35iJl-D|6{$E_gsZOBA8KIVTIOb;}LHg(=P`u|9=M??7$$X(;%k z6zgY7C-(0|z;T~vDJpyYHsT7GjHJd*|FV{!OV@*KS5LRzWQ(mAn-f;l#M(>>vd61( zXrys#dpNXOlSfutF%w~AVl%b2&WMR(5{quByN=p?LadTnszV5CTX*ES;PzXLqyniX zrNW?xqVkt-+^)$zB8^RFndMRT^rRciLsmN%hWQDog~kZvC6>}28%TX$P@zb!!mx>^ zLHIc3bR=4$9aNRIW{QIXYI?lV3bR9^v4>Suu+@31%mra&Bd|CttA(h8%dCoSlZ!UT zS%@o*>Gq)u8AulEbOAktgfr{{0vOqKDMi$+fo{Qxejj^Y^#s9)4RS@9#hSb}dP)-U zt!z|QI?0$_QHpXINp!bf&zd(|y6p*HZ^X#X4U6J*;~!*N^EkVbUk<;CN;n<;0)#yK zvQ--@!Y8;(Z4@hWg1}m5BV!OB<5XNW<-011s`|6;lfpzujF4QQSlD!@BFX&U6~qcN0}O8FUr#^ z#*>2F4rHSXPBJ645)d4>Fwbh)cvLMx$-yl01C3 z$YSJdHK?oshP$Asd9pxMkecg4+)Gw#ZV7$An|ruF5pYhRC&F8${EdO98_IL-uuBe* zgD3E3ZtV*3A~|BLoHA)My-#!UjO8Udtd90$+MHp6?4Q9l&gqA$#us&k^;&DUOMq1> z1}nnK6_eP)cNmxj3;i@S&iwdu7MQl~G{b6D!Jpnt4ca+yhJ`eR!OY>*_ zwkIA?%g9C=cnh-Tknt{t`ZoQjSdb1s+feCHKjtIAFogWXo8@nDHla-?z?bETkDO+94CT6qq08fXyxyz9lk?QzL0z4o9K%q)ZdkFf*-~U^lTx%ATC`_G5H`z9IxIuI#U^|pZ$=0N%3@_x{=p#iM$VM|_jAR1L zaqUL&(MY!yjHTz8=m+J&fM4h zm|?O?8#bVa)JTb0C)Dm1$t!E2!bB5a6`R?<#BoM~)0h%7Ax5u!?`ixBmg!`4 zqA>5i5Wf!3DWcy9ZZbIS;Um!WJw8ndMU39etmOO$wGuL4H4zCOJS?Qkl^mI?Psv($ zhaA`MiKBu#tRz>rmDH{9t_hV7q2i!iKdsBBM&?L{*>MBBCtiKyEyarR);3G(#OUK{ zz}s|k1$}y)6`5jOn5c4U(mUMb8qd`mshrZgUOXNkLQc}T`;r0SOxZqm7AS>6UP_jd z7qX08-h50Qf?Oc4Qx%^OF&Cr_AbeHDyS|DL!Ma#5Y4%V$C%yy8>FyupdtKdefYuDhk3h z6y3V?nr+KUOm5)}Ey9jDy%6q%k?JFokoE<1a?ubPhzutGb5Ok$#l;3Bt0fd6xdP@@ zAu!GZP}Bv~Nla)U0HIZ#p+1v!j`ShF@~gd(6eZHlN!RoUlXbb~_dG9r&q3snf8m`L zp%C{;+v$_TB<1GdxvFgjEC|{W_xBaLJwi>=N^-D3P<(d_5olG$+4@Ct+-36F;|-v> zueOg>#%GKtR?dur(ORe0+(jqU*kE))@p#UM%O zsNn7uY*{2Q9*&Yb7r9kPbIDLTg6Mv=TPhz$bxW6k8S3aPZ8P*i;v~IVa1|JhbMUhZ9^5}Df=`n=j&{t9~u>syBZX?LE{ z+eHnld#p3uUhgxZC2U351ki8rO{2o~8Veb}82UJ(c9la<3R`xcYJg*BDUaS*)wnc7 zd`Xmf$_+xZK0y)**uR?fL(2HG-p&p76AtA=hP&XRJl-h?DeU0$;}C4UMVh$r%?$Nj zcr$2(w4*?gEb+4jNZw>Hm~M!#$SdM<>Jap0e(}Mj(AZmnEc_jz>Wy1>)$AkRV4C9mnZh0kD=0HDmy=r5{;<}BX6mdn z)=r%##HdD%EmNvR({9f2$=3^!0lW7bhguMudIVDPM4v+g7<|Iu{aF8f)rsCqj1CGX z8L4IN0*;*PhFyIdp?Ik8vF=3wIffRM6N==|y> zEl@tl`~|rd5^w~jtZ4MOFM>Q+au!7FPfM#5qh8yQH6(lT23z2(a4PDMLyk74e;VGz zFMHxpi9JO~x%nps>PiOVom9c#Q=t?Uk_}#h+*OZ54^}?vK=}|~yxMLr)VQ_bI9||q zqOsj3g}q$bn`?Sz{3J5#HSTDEA?#2EWyy{$XbpdAe+*DUth}U^u~%> z5y}-CwuJ7pEK(`>aX%!N+zN)PiOL{C6HkjIT{vRULqZSRf@CU`Bl7|jpR&v5IN!_5 zCxtQ~%wZsI543#h;XM zR_|iAExatL&RP1g(;!$c#>W=Y?+fyB#tg@>ytLKKE^I)pYVgR+Br4JwRB>HwCEj;= z_#$18LJG`ZOj-R||MW;`O=ht{G6k&-5GQqRy)RGY%TFS|N>1dbA|WAQt%LUGE#PE{ zm84!7dvY+srQCwA(BPSPW}q7V08{E1;H^9+xlu?QsLdOQ){0In3QSCieM)jw1^Lmv zQNFUn$9*S^1eo@UdX({xv3GAV0bn%hYfDbtp6OPc2!RIb{*4Z0RM8iGxZbom@_5#k z-)JzIX8SyFmd4RPPmgif6C9cmo^dylD2=pxWG<}8?H*~S)NOO!$V99qwqUFa*})W{ zh(`EG&z|Lp7?b}|p{VvV?$`+vyvi@kmIqwbH`dNCR=o>65=#!J1lfzmKv8{%^v&~~ zR*mTbwCCT@)|30E z2nD388u7W0pTmx;!aY)*AxrjhQ?AdH%z;e@D~)VLm)xQB+x_4bDd z*ZtP4)Ob~8O==>WCPtzQo8}}m=tI^0ib3aF5q|VKRgd#&o+vC}z;lev_mhaX;WbM@ zwtU$Ju*h}^Vyy-JS|B0yUZ|cqYZ=xm_LUVe(#_P8X$u-(OdO1ZZ*3DOUCHg)!(R6Z z)~Dm1zS34osZFVl7C+Lm5j!w44jRR)a;nqGYRh2`=Cm~6uoTF}c$lIe?|M_Oj`&#H z8u1>c6=e&N)25%4)8bf?@Ho{{TS5!EhNhM^x@l z@fKMuF)Mf2PAgxBoGn==@$=XoBX4TgAgEu|oLhOKxm2oSgVt-uCn{Jz&awr%UEK;D zZ)sqKERuaA zAK(4!7~3-1>3BDyoLa{sy}#|p{cTFptV{3p%yK*6{Jxd#JEqF1}e1?teR*z3%8qS|OtzR%ZmBaEP$1R28@;jD17TK~(;p8_4|cg1(C>%xNG zA$}2mzq|rqZ1o^18KaL}9DFcPq_*JXV`&Zm2LHYXpI1};T}_6FC&UR>7P^&U4mlEy zSqt|rS&8lUya9r4MKIyR(V*W9-GMMesg%>=ZL+_cv&15x`7h08pr{0-``&FZM8%`5 zkOkldU$WYB%Un;pi}X=?C5>a9fK(lGpmw)eyZiK3`IM)bPWseFE%N%u4XF39!&C%r zM;0%Z7>>yqx7_LaSi)Z(rf@11ZuTf^u_G_MA@*&j6qi?jdv)KQ-Ym*@V;6k_JTsj~ zw$zGpz$fVgqQMQIYsv#W2441?Fmu*j85y13d<5f~_IteEjQc*FvRtr!*RXTWq3*bY z;z(g97`Ponv26* zen0vFn6ICw{4em|KX-)xNBHmGQo;Xd3;zfH`;SrPfBg7As2}4$OJ@IXjj;cKd;b`$ z{;z|?{}mPdSN!+ShT4CN|Cm_*&qX^Mwpe0FKDjlgsi5S&8I#Eakgj-_>K%9e;mD!> z=waw!B8o`Jy2kO6mtPYywLc9f$|`O%^2EixA?!Yt)z#G#XXoFi-EMd~9EU&cOj_~+1?IJ9s6=Pnc03eB*0eAh>2*D$ZVGcYjIGt zkd?-ZBSvLnq#6j4}8YqH`b4AK9QUxch{q7D$&7pENUGE)OT6$Iu$V@O+&*_sN+ znRdFg*l;YSbiZb*sHc`|oeCb)@l(0pl{@@?IClInaAfP!_~eZ3-%%n!^bFTYf=q}Z zR@u)7R*HJ7&7I_ZJTHSiQs#^G&NtTc>#VD#<41MoS2!7D6{#U+$qTFf4$=l^qyy*n~B?=kV*Zs+{6xCWnLh z3&_S`;cB|v(fSR`#YoO(?=9pitQnmL?>UEb&PhaK235eICGzP>vC;0@m2;suIhQ`0 zrkhDjsrZ)dv6*vWH0p*0w@H#>XvXwrsLw8R1%@F8RX{ zi&wyU$9B_|(y&8olTly4-)40t_6549i3z>M%T;c zuBddsgPKoBNs>)`%SoEF(R1ABzy_nVE&XzHGS#ldHgMF5>+W%h!)7KS5s4DxsXbze zJ1Ne_4O8$O2&%Gju4nde?8xzY8y-!~wysc}JYvicvIN0oW)I+Gj?FQ9Y&X3jW$N10 zAa~_@KGNhq(sTamgGsWefhhYA{+Y48y^t%!$1HZ)4L|@17x`Yz+5zR|{IRPqTiv=? zP_jUBM}I(zhkh`85X;pS`>Me2zcu!oMPTWvfP8q_4Z+!6q@JaDtWjeSaiS)K zTW8Bb+ommZ8QP}S#%1O-3BlH8cxTHr3o+0f>%10aB~Px=71%rgs+SEQ>O6a)J4c4f zHx4)E)ysFybFcU$W>@^+Yu0gUyl*OdUxb1s6+f|vU-keBc_-*!F(4=RIHS2r48p_B zp!V}=mst^xWQNGm5=DZ8(f5;_uXlC1aF1WcEwewc%MrPC(y6Dq1u!=x3p-dG)bH2} zUKJU7A|&>Z|7o zu!~XvobSQR!TjZ^_yGy9c9|b%tcFBf@G&gy6$t=$@}~_}h!H3wn2FG0h%bPA=jGBD z6+s|p5|i2>X6m2etppVyhks3T$jAcjGvXBwScieSENTPRy=LR$H=o`B-u**+a)%yz zIEmw_ENz@1#d{K;NT;j>wa#tEAD~&J@7?VfsyVMb(c=7#W#yt{_vFPS}bF}HbOFhwy0(r^+*P_;mM3T`v8+%&q$gMZIKXLGu^GtF!gRRNvd5 z9?dHBjm)=8;KKFzuh5a2@Y{}FYA%2n+}G*kU`g0dh|Lw-E(d!Nk4zX|8tUPCPrPt$ zVEuG1cEnc?g=t!8@vT>|GDDW!sutUkGJ?BZ+xl+Pvb+L`sw80aa;CE0l}Rpp7$>ow zhSD2tx|neu{Bij@@cfSn9O>Qh7>HW`2h1jfHQ@TtytcVNLJ%3-Z}My^)X>_tlYB69 z9G^(s1+q)gi?O&`$hcY^vyf>DtokcriB6SE4dZB|F5ujiNXtxif}Ou+IvouSvRdMq zTG|qP21Lg}HmCH^Gmmf$DteWhT^m?++7@UdpJ@A&0{glx+jph%)##p#Xz3;2um%*l zKDu&V>0%{GmIXwW^@BuT>#*Zp>O0M-2f(QzmYhJo%QRu(QB`EvnjJOE)}2?E*;!aE zvQ=&+R9FO^2BN~fNtIR>E&O?l?JA3=;FA(&h2>}tEizTuM?xL;6~&p#Cw~Ut8f7>h zfQfQ5+}9ey5ibEJFSUY?7vbK1$X8R*nZ?;&KQQOvZEvv+vD*7$tE9ndX0Q|3BRpYx z+x-ERn~bv@KSf{ldI*46@!i)j0;RzubHyxlc zHH75=c~6HLW>+E>8L_Gd68^ zVbuj&ly55p@raX-tSM17!-rV@x zhV=j^=Q%xXYP64Z_1w{@s~^K5WSFQJdT#49)&=rYN-Jt*y?lZ%=0fxAt);+$(5$vM zJln_y>B6wMG8k&&wU1Qn0ii}a<`iH`B=ymi2VE!TG`R`|% z?H@6bv>TJ=gq34*kB>+HO6iwX0G3cGPSsf9jyuAjJs?WZ;7LiPg;jZATKFYsWu7je>%}S*0?kdoBY$d9EYD-j2ckViAEA@6FjwUeJz%Frh!0Znql4zQ67|bLsE9ahOA-I z%K+|YOoBK`U2Gj>j%RHL<`8x*>a;>&_l1l`I)Wurv0EN&_WH->@xL1_7^B8GRq+!Qa)L?IU419TVicpisElsIY$;4Ay`St`$CRl?^=$cFf>k=&$I7GVEfj z^8g`vq(}ddUhoE;pTIu+T2Wad7n3if{~jcvm?7RNH(;FYy17j(PLq@-0&BXWMT_e=u5#xQr2D)8Cu^y7oN{`+c^+Ak}e&P?%xxe24W2URT_d7L z9J<*By7+;`Mkw0~7QO7&V_Zk*-YsB`<0+8vYpv~HGUST2>W5T<>FGUUp*>uJMrmE2 z_M{CbfI*)ZhI)s5*_$z179~RgpBJEzagqSN?wQDuq7;?Zq>_2NY$diTM7{^+v0|`r zHfMs`joRJ_6C!B75!DaXv(n$(cK&lp4j1RlT_o2MhwZ@@53I5@Rrb3zr#Eh<1t8(n z2$rliST+y69$t%sE)@l95;o^jU%10dSS7$rn;wwzo(dz$r9*W;(&bzi;2!v9T|o4L z4t-+ix}gW36VQH80A;|Jp4|Y%ApLH6n)XV#(g1S{2D+Ed1}O?3@A&(|)TtMFn^uT* z(B3{qI}5k(QW`SPwPQ`LxU3*m*)Xz8q+Akv)-_-+~kf?3+59%O6J9y6+e`Dj+^s1cjA z`NXvWl@X}icV}-cu7pC=BOroReP;;A{&sE9asK~0)P2FaDi6daAL@{1a^YRLwtH-)OG+9HOfy<`e21e zmT+0mcL9ZT0%X-=bvv_{7?Qauws+$9iO3c7gUCYmNN*S|V(RvypTVzi?ZnB+u_gg&IzC@ZudH&ui+RZ z*Xv`GwnJc`oHLOn9@jYc8IllDQz-4ZIYYS1h56z6p~seF|uHLWrQru6(4?-4)t*j|-1H2UaJDE0@pMY#Z} zm{i0){ZzAWZ_{#kVWYGfLfo%Q5Ok};?qPq;c`}Ox*cpEokJ`vSk{S1m#NC2i92659 z7+T*vF!ZAbcl)shU24C_5C{ZI@4{vMb4%6%7z`&UB`!QAyfBN8t~~U`t7#`#25P_7 zW!1vNsXkwL1Y7iU>5^)oRF`_$=?8@&wPj5%;> zcsr7h^tu+pzv}x|Dn6k@Wyy1R=HXc^&`?eDNGN%?4oJ**J>YBb){CyYH~@cFu4)$A zsRUTFkRFh@neN?|`A+G@heQeIqg+(a>du3YweL*nsa)Q`lVD%JrvuWeR%~iw>CMQ^ zL-~=g(^lTjS!{w|=|S7ZiRdKI7q`kVJV)eP)01`y{3cB`R^*^FE_yU~ z{XS2%9P>nWN?G={Ehy;UTgr|tl<(hw+73ADBD%(R&>li9VS^^CAqvXzc5&9#Dtquv zI`5n_dZ#W>Sorfi5WDG4a&8o{p%1yI7ur5lhJAN9*l5p2U< z(9pRREPa<5EqMcAOoS2vivBU4YywC~C!^$gYX#koPW!|m^rP}4)$8j#Q_aYYV@<}6 z*BbptVEC@fiQM(%FNtP`-;!o%s}RaI!tKR>`~pxzyHbz$Y^ z2ma%_a`L~RMgN?r`ybJwf8QJRpS0+I4HW)o^^Wl$SNwk^i~T>KMgM|`nEqLU`|l7D z3p?Y#ruT;aruVSK68;)uWn%P#)-PZC9`oz#5I2*S){zlD>Ne!~^%VVJfDr&A2X9h* zlRuXn=^E(20U*5Gj7E@!)m&6mRgZY6wDdONuOlnDzI$w-R@8#MvNru%lRwTgWzMSE zs*`?LS7yYvu3b&i+%F=$PhWe!caeLY11};NZnPKNu>XcWWxfY4d`B~@>^M7HQ$sE? zS#O4$9ei?IL>8V;EpFRy93RJsN}4=^8Jhf7M!B*f?B@xkqZUPTV?`Ed3Ct1pqo$OqHhHAPNmu&eBd^_YqYKn^X%ulG@MN=2;w%ZIVbA1{y3`r z9B*s;H*!m_q;lK%rIEC|!Z%#rX~HYv9vGN*btfW2BR7epebZMG-njkEZvsnubWZOZ zoOAV2OGmlub`*wc7>eMUkcx#btn*2<_$8>hp*M-y=bjzAW?s)D9ygs7&+ny8sAg$V$+ zHBvK_{t_rJ*f(aa(NS5BYc6Smo20As#;%x`<8KI{mQ*4b#;q1d@D*P|@GnY-7uq7j zrxHibXg6M#LIwa*obTgtFLDX6Z3!5JV&55DCuTk*q3YfE*jGxn*FU6AP>FV5EBejqi803uADfC&nB!tY0 zg6c)}Bw&66sj&;jLqQr1A^*^u|asQ5+cpAHYG~}QF2&>9@3yfF5XN*^tGQMdg(I{UNB)u(q8V^+Br)&?^zQE6g+JQeoY44?81f83| z-e2ICLq?&sPh%#aZofK%pKe<65skT9a;sk&^eJ+nYTuVc=bFcEKS4Trw`|J;0^v3L zcmP^}I$tZ^t?uQ^xqHFCa=Za#wfU;pb^+#;cgA+S-acxx9t|`Nkds^NE0vx#t{e3 z6Mi>t^dqo?5fJ+ZEBtc$b(aHj!j6>Cvv0S*GH`d}f_qsOU3lBOm%$^u3nPmXA_rh# z$USt*g;pG-R!u|28Jn*`kEUrVA(q*~40%bRb&E?##_p}6aHyiUv^?nFdz^yWSkkRm z08AjYkecqARC`G?C9j1sSQHwG6of}O2P6;#S7wtIGzkp4?d*F{Fi*_n*;oXuN|Hx| z7HGk4i3Adf=(rDPMlh>Xy$$)8 z!xevwCEuN+gw@;-{B^uW)g+`;4!dx%4!zqPd?{NpLgL-T1kq53uDjgW1Z2nFaWJeng&>8aR& zJ{W*Bkm>U3_b{4{>v1f%$Xu;vm%(Zx{!*l|FuX+e7J|@0N5#OJy|WX)&ca^3!s^6Q zxUROIv>P$wRM}%Yw_?oPx{_NRICrF zhT-P#!wM|`?xIX0&A z&XXpJli=w`g1S$)d!(j%+tSja1I6Y3%gb})Fx>>bG$I*txXom^j*HD{Iw{i@EH&@Z zy<wBXUE<=DO9?>sR1j-t#UWr}s<|4r5Ff0Or2Dzs& zH(E{o++L$P#fN7_LTO9km-PT+*c*+v&q=Z8!Xf}3xGt=}lk`FVyVe#(SIM@u2|WQR&|mgjUHzYMvFkdu@&@17oK z!gHv?^-Ul+tye1|KQUy$nUn9%t9^JqgAT{)$0~y~Fev@(-R8Vff{ziaSO~&YRM6$3 zJFwZS$%rrNK1m-umr_Q(ptLa9*h`~Jc(29R&ZS~(MNU24-dKF1HPO>S$GMY|8;3<7 z_QqtBK+vQ_SQyNWA`ZMj4MpNg|CHP*=1yb@nqg)wioyFDAWvmZxS(RP&TIy;ozV3% zjc`B48DLq*s(wFdjE;8*7CO)XrY6%*dM@Trq0JjkMVxmeu<_r7-IG5>-X~0kE8f9la_A^os>*m*2OTt zH2DQ9oUJWinR*@`^Jo-aTUaS!)hz~uLH;r8=xR=Pd;UOTW6CB5z8kczfU82r_M2m&a#9scuoMWbk$&vv+nfL+`xMSBTT zgrP41hb`TyjKSME{o(sF#pmzHy?4Ph+cn5z_5KSZv0b z<`?XBlg&@azY*FBFwL+?5$@7RTVyR|{I|v=MGL;&;{pIssJ_~>s^I@VsgqZlcM?W2 z-V6}fD6ef^Hb+w_-2i{eAbcsdqn2=;q;@}=+5vw{0ApQZhXSpkBr2NaE;Ch&e`Zke z2to7g@r*$OqUTt5oHtX{-(vTDe|#SOas1R_r|b7Ys%T6XMpPlBB?QlZwt++zIFI>B zEw^>LAK2ir1@0Gbzy)E3qY*7H0)Z}y->=rsl80ZP4fnVUmkNq?9Q4Z&~`uZ7^a(m4({Mk)1ygs9Tt40@TG*&S9H{kOA<_UMrItp+Sah3_|HVrcF)UeM+!dqU&5b z;x|y2RlJ;>wH0Qu&xE+L6mBz+)o)i8#oDV?H$m~DBz$$Ed0J6Z*2v~r)1|14dbmMZ z&!od{;6dg7kC^|9xwj0cW69QrakrpBvvGGPxJz&k?(XguoZtj^cPF^JySuvwC%ij3 z_spERGxxsp-tYeSXxQxT>RP4URnJqa))Fs+Q7FR3iYIGh5E)!FfX#%rWJ1xx-eNbe zyO$<>k$C=Gj{&BXKkDYz4?fQ-xtuWzM}To1A^5m)p~gp($!1+2L{F-!yLIj85OcME zC?6LMuRUm%-(BA4a`LQJiJ~Y8GpoV{`CXGxr%s1OKu?-(mN-{%|8!Z{=iZ_+`ffjv zac152jZ7tpfyPTXV(2h-A(gNhLb38gL-=zaA48bi6iI_-#R4B6PopeWeT%dFK)?J9$oHWoW#o(H)i*t%Zm}O#^{yI zYLlj;o-^9+$t7wQ?9V+K&0ju}ut!+l6rfm#YGdU3_)VriH1Ot ztBjw}6~X+nbt=yTD7D5Paa|?FyjFhCejcd~UxOK2(K{Nn-b61OTUdqAmqE9OUq~|U z#t1Qjotgmj+9U?FOR@P?<(8y`z)lN1TTmV`9x;_Aki}jX-^$}AlqAmC_NFK;KJ|m! zJ{L+0w|!4^w_lb?z<@jaR{}6Rbp}MOh)F&i5N4r#zi`!KLXj*+oaH>4n0` zh7=id+lJhvO$B@4Q=lcbD0zbn>o=JWmAuJv7uoeZNeAdB_xQW&9uB~f9Pi+w zP*y**oxH)9kZ+8Fou8L$x;mA`MJK;db)@fuytujnp^a*dn z{KP2bI~rfz?iaE2GP@xBDk`E2M%v7C)gtkL_Tc5^c-`jH6%ip-4EQ7PUIEB5yni)$b6I(}kAHU2>D8eSpwQA_{SIipM>WWuL3tJ3|-0kpBGlhbG4AA{R z%GVwsU6n9}pNVH0m-pQyx7~&koJ99anH{!S&9v$}P^%$8*^eI~l47f{DLKZm8Hr8V zf^ZmJ1nO9Lb{VN#^$cc&Jvka`-SoqKFKv-fE2vc0)GosCq&9I^0bH*W|ncbn} z4cVzp!QjX<`HRNq_t?y3J?@X>m8sjQ5n?BWZNhiAi4B{qZ=Zix>xH~$*c*o<+ofNN zWZv7Bx|cSgFEnZ#y%g1p%cS!t?`7tgk^O2^8Sa6;%$u^-tg*77JF|e)g|3BVo^GVDz@li0nA}J#nmT=L$MOZ0pm1E}1)X)-hI}5BM%S+J(GNvNO9$_sE+90)03? zg-(`^+sVulA)w~(OfRgh>;{J<=-@IPixCJmv)AenTn{CPTryInKI1teug1;Fwb#A2`=eKo5B7Vl8d~)fE4v#+U%a2r zBt7vC`S);EoMi!#3R2u%b07~u?0f}U&Vmp}jNz6+2 zQM1h$amQUfFl$W|#InkVP9$-CGQdcT0xd<$4_o~x%;C&t_3i;{7?# ztC;1ZYes>}VC2MZRoap6xkUTB$~0J@*TCf2iC%@*Fh%gx$^5QFTTorAIg-4*R4@|J+f32N4i8`H=kysb3-bD{{Eqgv!*PZ0 z)V9jc5I{$|x82Wg&h_$AQI+P&R>s_z9q|5`k6Rs}i&bgw|C!_m`ndf&BmeJ^{LH^B z?kW65CG(itJdp($M-XH zu>F%tXe{pccR-cU&VbThWr^IT2{{P@LkqM@sc*xmTHYZFRCF#y_(tLt-!w-gsXmxJ z2iwcA`(lq@H&7?S-hzCEdRp%^xruwUoc4d)Q#TwYVftSG*he!~dof>~TrN%XHedUk zyT6hJf_Bgun+%qOIXxW5^y!nuid#~?M5qkvzDRNk_t0c$x8!GL#s2pQx|1pTqt$(C z?dW*A731irMP;LGxfq;c&`rluOgo9dFh(qn>0#{(vJQZ%M|CM$R?*QwDQ?$q;Nl zb!auhyD7=j2D4P>q!c+*C2_y+&xIw|A(WRkkCN^x|5U^5}@VD!_tJ#=j4W@y4jN2m2?u$!kb(liPN%D$pm^w)Oa zKGXLmj6wXo2!$Cd?ur{_XG2a((D$+xAW|QMZ`vH50Txyt2qJHKD!{o1dvgG8g{Z`z zxV?SVy_FdJt&S|dojf_$l6tJco}*k61-?_Pv1=!XZDho%+(_?8-!!@IA6`E@#pyoi z(<`LIu#*9QMbTo~$M1b<33V&;Kc73U62y?djdlVnQY@snF<6k8!N9%0+lq;aCdU zF^S!c%i9+{BqQYOiAGQ#j^Qn2+v9EGd5FsZU038HX%PEhE`mI)#!{m{Rb7Ktkeg=S zVP}xCS!m@g1mhg(BpcrQ#{zsPT)j~2OJ*ii8FE|=l4&ygD>t`ZGt~55#!1kRtvTsw z1CHDNYN?lOIXTP3PTv5KgP1g!o>E|a6+c&-8a(VfS+5Tp7pfIh)fVom*I%ldJ^9Ql zkV05OeDSe~wGU}QqHy};2`1f)=iQDgQ|R$nyoULWn2nCMLgR zwa*NsNsN2N+L&%7CD<%Ch-K_@7xY$>59Ko8_h@j;_bKCiGm|0ns|>N-hMUqNa1+hN zE$o&ihs76GAHrtpWt;^C8<6?QB&U!CQomfu%) z^uXX{Vw&mG5FQ5DhEwb&h@8?$D}tToyTqnFpKtq{-t(bCpY!xoc4gc&b{WJWOM0cn z!#T>rat;}|LNNHq1i5;zLn!b%?Iwva&Y`^J$&oT>fPa>djb7OFx=8l1%Y`BYq9UzA zbm!3Z;6AzbcD-#dj5~%abw#=nvXd{9p6uybLRSXNC1d^Hf10NHWD=Tz*1f`cpm2CF zL`!{Er$~Zlblf_Q7M^bD+^s8Ja-1pq)_j{;kes?yXN3#@U`kOLJ^NH({7EjyY0jI~ z8ce(#F)z_k7OTF=)eJCNgUGBwi)pRdIPXo-0O@?M}XR}qYO zAlk~0Wcn-IXF(huE^Lk@it&ET$r2CBnASs>jQAc>D{0FmnnTd~cPh6Y+sTGhn6&;# ztSxHcb%)zvzJz_1sN}wpPheZ9j9lWKeKNL6*f+NU@;fr@_1=6b>}Lh-S|99HI8M`y zyUO2jcSgTjub1eSotLR4jZgxrEPr71_SnAG`d)9-tlzXF#2dOo`Dpu|H3YcoA~1j1 zMQfmi4n~85Cyiycf-1rXMD!=kdI@nr(^%)+_132|Ilv@l~M znVh;fReXv=q21Rl^-al3)z|kSr1FE-=+>H+%<`l2iLwArgD}>`s^vG+*Ia0EggE3jdrXaP2 zRe@89l3>hQZ}YfROm>vGoQ&K5_;kcwLOVV_UMlgPU@pB=%&nBviqkks=IXvW(GCVe z5VwC*1t*W(6TRPv(NJTXcgqq}D2Vhtj;@u3%EXQ;Ohn!@KY{Gp2oI#@7jx4^iw)Mxp_keZyyceEfaq}0%U7XB0?J79N`)5iJ8u?s9}jwI`Dahwx`l&2Xleb)KY! zyJhOW$=G%$g?b7vO`eL}s$mv#Rj#{VOCzLb7V%8N=8oiCYzohwJ}LyH^~={{oich* zzR>I<;f&I5>2d=S9Amfg@M9vkbl*V%ydioFI4lGK&6h?7x|8dpDOoUJ6q)IB;RBeP zR!L%u0pd%wCi_Gxv9I?uB~L2I6wDIlbH^euarq-zYG%{C?0OO^z8i63i@|d10ZoSC zG?PdZ9n9_-(9gSTi$uxGnNtl@8no8S%PDP-b!KxQ;{_w?Yc&o4Pe4633;0UZ(>ncd zlpCLMvxk*#74gBCeAXPt6kG^22j~%~5KZ!$r~_cNlX zeV#LyKizxO1PRr#(m;@0$&e3VNP>b-S_vqjHX#a}=ZTJ}+aWbX0i{q=!uMOka35P4xfZAi+A$A1jp~rz1OQ1^O=SG{L*5n9|nWS#)tx*JRN2g=x=t^ z&6Yc+O<~$GMdplV28B_$EbK`I#l{#2S;r=y+9o&Yor~d!hHqxhbHCJEw>sQ)x7Z8f zLZ-DO<}Y9@2S1jE^ZGbO=Dplfqn_}y%=#Ul{jh>VWx^TJy`Y+3h$br2vqzy;VxpOi zwW>foojQJY7fdymJxwp^tC|-zF9uJ%L&H@um@YyQI6?#r$}`R99yyx``gyx=K82H$Yhz^@w9RHzVIsz0iPLg&J> zG-9=gE`?$FcN9rKgIKFV!K+1hXR2M>5+rVRe-DXXaLlQeMTX8Eny92-Z)Ky`Zadx% z_QS>OmQz08;$>vWMrm_!lk6FNo#>Oe@1ANHx!ib^()hZoyu$5)b6U^JGVc$J<%7wM zuzvm4ad=VP5zL=FpW*7ovjR{(r7)L3UE{#78hinLfnUri9cjfd0zoJ!M;LU2&IARL z47wN&O=da!cNoeypD|FRPKC-{9`GPo2Im0A49KMNYJo7#73NC8OeV8Os)kWi#}cR@ z_qvqd+E?C9X(0aa==B$4K^Ji1Ea^AW6jjX6tbva7_ z+Gn@xD{9ilKC5l;KVC)nVZhwYN}?GwtbrwhIH)&>V6l1x5^nzN2)I{8qo-U^3nfz5 zrF?Cg{V+`0zC@S>YxLoX8i7R))bpu~$DIg-auY&>ESY&_fhJHs%R)NZ$$S;+`@IA} zUuvnz5egM`NkT!kNQg8k*>0c%SV1={_?POU-qdw;Smy&PkBo%JvWl6 z8xgYu+l6p0KVs^s|f^a|@C7kH_yq^#1`odbqTONXj<_r@f z19Qnlfsx|IS9iL9`szCE36{IfmG|vNo4}VZU9c8n!y7Xjf@RC4PtNu{TD5fysQ}M1IVvCRo;UJ9L16z$njZ^0Nj^MvN30p6QOQbbdb0YZOU50@sf826|eX3?nrFG zt0xg84YXEi?0w#B`C3h!@abG-^Q18t#@o#`vBvr!;2UtK{%)%a(X zQI)=3W_8{53LQc>y^ANyw4oYhcTD5fo3)$XJvf6RHVx1Guqi5@O-E0R-<5R8^huVH z=-Vp|;uCV*eRVv9KpgT)JScU?)m{BPZ<*i8xhx(*N+$`)gO}-xz{_++q4R65$_-gMW7~DKjVYKPn35e_r`b91N&# z#-vUwIDC*ABmL==+6he zZNv4v=(xY|(!SDBCutu??XY>p?YOG!;L7v1yDWa;p`kNbi)z8cTnCrhI`L#nf2?n) z1v^WqzcGt7VfD5Atl|{;I$eAk!MUvcz?E(+dE7_SjvkZd?<^ z0^dh~+0tAFL2Im>6q#zzSLaq?i|%XxFs(w(Uk3jclBy@&$5(%%Q9TjBzu5fAVYRS` zIx;%;{&Y-H(`;3$#sK_&MZzdvy~%cELzqgTF9y8BPszp``ik(wmnITX?N9Jw19BCp z6CHpRk8BJ-4LvDDxOq2`=E~PZm-o`KBtKl>GmUHZRXWbC&vZ7{Frp_~Jou9N##(ZH ze9UFfZX5ZmFmDW=`@F9fy-YV`*4JI%p6cIroG)a9oqaB4qcq-FHt=3MmS5-joP&3q z->&-CUQavP17ti^s4LP57AkPysxt7|C2L6HdW6Va2b4P89~6+^kog{aob3Vx_&ASD z$}{i!s~vJD?YvLx>gx$?z}b24^R&__f~OOo03Hsx(H@3H<{^HowQg6fK^cG8mFLsE6(^PAwT~fQym<% z47Np4hIw?T=e;53Sd|>%rejhuKd)6RSPjm?6wX|jBI1lLANEXKV4!=QsouC6OKEnH z^7`f*Xq*yS#$F7Xv@b&&XuZT%Ak%26ZpfkN-35F}Ad(KL<87&)7?0I7RbmE0m<}oWHbMqq!uYEnd?Kb=5H8FZv?gjHZy%A%#+x1sp);%-+6|#TlfTwYL?sG@ZI-r7f8@(=s7RVm!|W-OT3sY35*g|6#vW)^y4;rKeA9mk2|G zQu3)x@vN`gB{hq?^lX1iSnGQKXkDGE-5~l=r1$wDAfAvMfvEFBh zFED$}9-bPoov&y`&{&Z$cwc*^k$x_gYvdobJZ5V>;R_9D^urBAW)l{@NY39A<}-j? zwKdmbHbJ;IQc-=2E){-~TyhZS{lu-UYhcH^wr_A* zXwRE~&8|+@^pUI_`HTt=TV9T(#RTdDD3@ZPxPvKv71vRrZ9gd( zXdxVySsJaevUlU=)#w>(2)npg$VALDoj!S@}L zuF@4lD*qbJ?m20ZhN%eUU^=2#^ii(=OKXE%pqd?&er{D@o>#Q#fz$L(JehHj(r~{m zSsD!uy8Z}fCA^)s#4xX;U82exQ8HLl;#tKSjmBg`{L>*}h7eI73!JFTVGl*ss4aK1 z8HDUKGC7?aT7+T$C$dV^h|3I1LgTp2Br6C@gmlhS07zEN9>}CbtEQi`G(0i&q$mx! zghGerwMq|-RU_7#A|QhnPmF7~2=psh?^|*R(vPaVVg)<|^al5t`kZw)^V>_byhj+V z4ua{e5NZGAag?^^vx#rDWR;yI(Y8M#hS4PbtA@+HeB;$YncUGn8AnUo6UkmvhI?5O zi;MCrAK3CiNJzL&3%Zf=hef0q_+8W%|Dd?%(D@h<7=W}#td>vFrCuVpDyNP;VK5pI zoPg94Ry_lGd4n(p&~;mn7M3@vV7x7pZ1H$7t2SxIF7n=&-9=Y6TE%O+*@t|0K+Sb* zrQ^MtnSdPdZWiLa6^b>ip9FPSo;0H2YeHkSl|k0wY>esX`*6P|M~dv{K>Z^KlPaqs z`bf*v5&7>3cM2&&D+LDnV^4u_S8175Ry2iCRBaRJVHpmj6P%`{*mZtM+tF|3daq9q zMBVbe&1cp|k!>FD?8vc#3-LEv>r5ZCQY=|J=usno_0uF^h1b{cy54!JXojy0~<{AkTO zr3RC@ya5|e+h@%&6h7V#!pTBHBEfeD<*9J&g};0M6$%AOp2I7$IhG&G)pA&>X|vK> z$W5LUE>uD^mr+i}@I`5SB9DwOZQerqnCU@6N|c7O>0YNH^-f!x9S?rgb-ZzQ03o4+ zH+fvVJciepB&i=~Sr`Q~?1qgjs79wK8Z%m0nR)l)jv*Jdp!~3zLsk@NWm*E5M3Yt*b{`hekbb(E)H5abhWE z{45Ca52seA;xO|o7n|eIw1JxGv*Wr3hKV7Aq@->HSTP$oW~di~>B83UN!_1P0E)8i z&ZfFpSa3&>g~@{RAz}q6%K7W)=FG7K7^K)}`uFe{Pj!@|aynwY)4^_T$sL`Qo?WV( z#$H*^R>L`*?H~e7-g1g(8+oUquIas#(pN$ zjc>r=wn{A@F~u~IyGJlOh>#&4%FZeict%HVl2$Z|1o-p&n!!nlGbD6k)3PRmDp8w6 z^Q=LOpQVPI%0cy>lv)uf3vgw-!eEOX&U}#P+I>hSRNTB#p$+V#JdR}#fAP>8y}G$u z+bVqA4oTcBwSyjK6p4(9ynG8=&VqW=P*U;N5w17eS-{s^$*!LoyS8=6C@m<8xpPmr zK61@dH50PKY@0Tor8Y5`&HMefYLkpPi~K=lB#LKS!#SvI6_JAedq_@Z*(aGTRb%mx z2m#G4$o5tF+1u_UUa)vXR@9>?wG1ASvYT0S>Q`ibCN~Hl$VPf$Kz$dbWV4{~9MIo2 zCRYm;L{qMtDA=AtkUq#7X(uy0oQp9g>T<-hHDw(B;c`1lU3zz)#}8u#=l!xY69~PVX-c+ z$k2g>sPbu4Rdb1yJ=jw2zC(QI_TJL=8`k4K#oC8#wsNC8{cs0+uow&u;m?-~L3uJH zfEW#3y^5EqrkKa3Vm-evhvE3ia2+$n97bj|FRwtHr+;rB{Ys>hax7c&FsVqOLyj;F zjjy)p*nF29OrExG+K1KK`N8SEH(G#Xy689q8jQ059ZusVYHjd+C;Zs{`%eqkdD@V3 zli%aM{uJ%bR>L`-^YPh^nN%*K4He<{9%g~!3qoKW#g^jN z7s+GY42XXxQkD= zx%?!?fu};536Xh)Alnf#HO#eN>UpV*9X;=B!qvT1CAl|vskMXc^WozqDgE=-Ao5yI zK?^cvPAnWo+7G%HTf;IDPnp(9n(*ldBvI{Tf4lt}9uU4H$pX9y^*6Ywn*xnglJ}UQ zRZcTH%?7UKYyKKleIb}&B~=LX3^iBJZ{SDne(C>$g7~W$s((R20P*>6eE|g{2OB4Q z10x3j^MBr~$o!u-EB?OYPg>)Dk%Raf+3=4X#Gg71|3E>o{JVP<8QFmD5rO=PgQLBX zo)w%cXoBjP{R$gO`wr#ZG`2aJ{sVOMDXo*2X}>fXzLR^AM>OfOF;!@d`1i4ZeXpl8 zI)2G-X=Vcipe-QIU9XSMSHhWxq4ooB4dO+ks9Y?!xMzoF%9+z8g-So4<{m3Nj4_Nb zuL$IsH7&buZ>u4 zoeHWR3LaD4NT!wpPm70pw zh~ssJT{9US4l-3AQ^0)~6qXE4YU&B@xR?t+$V zOMyBuC^^=rLuHKWF^66n4b-25V1Z&QYw0KN=$6(cq+YV{u5V3^@H$Wi(|j5 z9vWW(2m(74)>A$HEQv$B?3=PkD=jc({}#VZiCB;z(5d8LQ+y0A_=k5W_^(e}g;5_4 z*ASWXuO5lWYWwuOk}TrOA8aP{I*Tr0p^83EJ}vd|@eo#_C=O((IDX<>1y?Evu^?HI zzbwStNwH)~kNuifA>sZQK!7h&K$dE!e$BL0;}}n=c#4;;ut3t*A0Sx$=^<8&G04>1 zFRBsRW)Unm-XHlLk)I?hlVWNwWXpVhEA?I;8<&o}@>U2cE?pDe2n_cUB}P&q>Sl&M z21kG2k`biUrzG4Mt&E^9iTYozeSYeYHWAXvne45)b4o`iFF zs`tZFxXVmT_I~~)8Ivxy*ovV9Ophbvr!(Va`E)+q>P4dDTrTif?YY@SM=Y{b~es(FAm?^Pcs{DfOqt)J1+O~$Q?NM|p zChzU)Y2V%M)xCKKF1h@&p}6EN^Bj4sf-}v~Qr04m^&ZCpC*wY~z(>es0-R~XgsF-j zH>HDB@xUKt`McxC%=?z?GiR16L$oz3oZE73_-prjrz|8;j{K`WOWU-Df$};GjkeL? z8D~pZ%fo8iL3aIBI$3jcz$FZfnmoW(rW(vhqc-K_{z0vC2ia2~^eRL_bL$1{DJKqd zZaZ|(D4m<`3;m&Q15h~tqhwFS0v-EucM)gLO^mUN^Ey~3H}>mwbWK&zksJnFLiUz7*`dDLb3&)S2(2e`j#5B|R6e{0nJi}v83!z*ym z{g3qk;BfhC#Qf8c<-c6RZ~)ND>De1uI|5k#<_X6Mc*X&smohVS0B8Vy4bTbz#y`r% z_=mmB-)?pk;F$hc)c_h1_;t%~O-tavKjdirCF$SQ^k_q!Ps z0Dl$vJGcK*q_mNtnVz7HD?sB{Qnxa zT_HV3Jxd#tUzKG7oLl`zUH!dsS^m15BLtkGI07{Yg!LSa02IPpObkqH3@nTsjGRmi zENq_{7|4OYlt5YbHik|HM)m*-Gg(D|ijlnoFcN@?jgA?hNg?QDW@!juVism(V%MYu z$UEuTJGukpghhWFpKx`ggrk?XG5o(I@%Ie=^Yl>^Xz~<5ujr)j=w@pKR4%o5GWw&e zf_e@{e*_Br`Vpk2QxsK}pj9w3akA92*Ksj)G}W=Ob#Q0oU;zH;h$#pN>$sShIx-0g z{ki~0|0}Yvk%NJ~nXRLZJ%HtpMx+<9Hn1@?vj$e8w4Q;2jg_AD->-<8**iE2nd;dC zfXcP>pY;AXfnx-AG=t- z&Q(HDsn!lTa>N!vo91Z5gq#@{xf;VqX5+}T!22}*h!X2I8QQMpJ@o~5`bO%Io*Ivx zs*T_q6JHy}MbPcz6Zy?fRKxTX2PDScwp?QO^w=UC9K5y{51|=8POOrZgvjEeNoRf z_eS5<_GF3|KHE

W0QsskY3bN=xNGEEnl0m6a9o^W;oC6kJ*DYLssS<_8%R?3A)! zOd0E<5;EU0@(QhWTLGC5Ll#l#p4>f4)I1x@2Lj-)tubx6+D(9^xF6kW>vO>>xVCNCg|pD%tj4`KdFr#q<6BDwY20 zeiyV}7f0WZMXlT9XI?E^p6fNvAgG1A!Je@B=jK46Sp1%rzL_eT*EbU)r?KWdWTzHD z^U@)ymDS>mHxObjvo@^In2!AT$|A0q(9tl;T-5z7rTw3R8tw+I#w;G0or_EK1ZZo$d8)DrP5>yNDi3nX3|sB~p_*UM!1aXUZNEA>=67kjq#e z%ldh*8crJh$URp`51z*?$9fvMwVj(TPK>^kUjsfgW#WW&3$U%GXiy3-6qw`MIU`rI z4&nMh>%YRVgj+)T#_@Tcl$6dP6T%P5>rFy{Ie&htHVO?q{K{<#i5j zdax*h_nVmdw*7eTaEI6W<|alM3EhHwM(+B&h{u1dw;=*qoNpK$hm--~JgjJu*a2Sid6fF=XsQ)x%7;L@K(3E^-xp$B0^7cf+Z4zi76?8^ zULml!V>VIMiG6QoeeT3uw%9F{={*xc`6i@c5VZmg5jE+=<^uKp*}9)a!+K{(k#WPd zPNr-Q-Xh)J^mS38G$ED5yVMrIW>LLiod2Y9V;fNU1IuNkeI@45gWAqu_1Igm05#2` zn@wsbnhgFVy-x}gN=%IvNz<&B#-!slV}G4=@e0YOpX6h==%Rijzkf?OtL`YQrpaS+!x?r*T_w-(De;~pa&%Mp z=oFsoimcwVNt)4B-Yf3o0l*vc1X|rKR`+eiCkF1pA1u(0Lqv~A~LkcO7Dv8=R&n%YiK$2#F?KupCYzLT^jK5~ZgzzH|c*9_VbupFO z-4}u;B|>d#iAj;v2q4fA`%Il0o~W;3DU*YXqtcIqtEeP$Mv=ztJGd~vep}h9xClk^ ze~M=arZ6)n5XCICXd&3!i0nmcvnI)TyofVQ3}g5U;YnMvh(=TRP8KGATpH@x-gSUdxv zjSFwfOP!0$)w;x}k zia02=r2@C&-eOAM;~W4A$BX61xY z1rvoyHqYxv&6Q+r!+7o9H#<7$C3e?2Hd7;as#AX16nIy&ZJx3in9|a!!_VJgeFbwo z=#`-_=wnt>EgeVwFiT@#e(ItjZ!&lcpGn7}U}EPp3*^8HL>D~OWW%LnEwMx$Vz|pI z3dVZ;K=6N%87M@v-3*^gPLq5x(Dt*npoHVyHe~%elAAp^Jv|cZ9Q9eP^#f~-4~mvf zZ9QeZrN-@t!HsWUcaFN6o+_?2n zD;6F#b&>_7tG>s6f1=V5(r%pghM^+Lf4jD~$3K44Q-X|oxUuKVXHWL+f37<=Ijk^s zNjtbWD058PS}}X_BmZ*FU3M>X?M`0H)uHx6d@`61|7ph;94201Hk=cu zgb)0V)DYby4DA4AUN2bB2s?#O2@%bSb_QgAo@2Qqj(gnTZVS59#ghxS~w z-zPRtwVTie_GLM058NWN-aui}vdI6LNcc6g{c|Gw2g>?);g`^V3BOqWP5AW(?f*Z4 zUrda^`T5`B7t1eP{Qr6||8l7NH_*fYUzheT$EUx??|!NJJ} zU!Op?~$Ii$I;N<)RVEqyL=O-)2AE1pD2(j3J%N?wo zK)3~z#s=gh*nm)yiT#f>m^hdKoQ!O6Y>YsuKp4pOTRjtq(1CCc2+V-8e}(*#$_hk7 ztQ^0f-G6)hrF{SGegD0bU!ni2l77|wKY+@=>U;jZ8~!(-@_*3#{{IFle=mdm6;%GZ z@aMDmC#YoO1hN8uf=b{v*?$96&Lz3ID$mt-F7V#3f0+W24bt4TL@IIBGwQ`)ACW8Ri~q-pch@>LFV|i>{N1-(hO%Q4zHboUmm_i5;J4ns8`m0rQpjXRHQ(K* z&TKmUa;C&PV39-Ah940FM;esYqUPb)N?{^6zVeca#{Ei6_7}CBcy&C-g!p1<~IRHACuZM zA%$g=9Dkz&nLs9L#Vo;~h^<5D@do{hnDiLg#frE|a1AS!hlx3;Ng|JcvZiM5uH>A_a8nBO6FB^S_oP;E$Nio4D;J0Aj}%HvyLB( zs(-7%y5d7E_MTiNZmn()Qw& zURwMV(N}|7-HMxBG(hY?+>tPDJ1*SWcrtskY==+d7~}k7V^*Hua>cZHWaZl53~2UH zbf;rLK)G4I(B3oI_@n>?QGtB~9h)%xs2+X7Q2~;ErE+>l_mmm?8ae3^`l8J_aC|bH zRrXEfdv)FjLerQ4mfDqVH_2HRJdLn7QC}=nfBrwry>oaa-MTN@VaK+Oj_ssl+wR!5J007$ZQHhOYsN{(H~p=> z_FC(%v+vpaJm-)5B!xMu>K#?1YUZi@@V=Pmcz|rua}G3_iVK`RXN}ckH{bhecKVd( z3^}fjNgrv^E*Y2hLKi{^o}i=^C3pa{l|9APUqj0Yr2 z>Am_7D;yntKKz%798Yr0UDd>*Q!=@|7wYvwwRvc)qUKQiBoc>NaJ@rA5Ne4W%a#_PH>= zuTmr55hfE52J<+P^-xTGHnzo>Zi>_UFdK)>)PG81b|E26nSmwKo#O3|5(bgq5I?2B z_}v%n6EKXh&^(l7=#z~|DiF~uf0Vz=G@anDJF;Eb_(lS?uf@8rjZGOLN9yM*=^&d7 zx57wBpc0eUY=L(0)l=Ok0}SyO_>QaYAD>&ihc6*cHvEddEppjR6JxqZnU*=hQ2-Q)o- zM-x6!qfipdl5^oI21MwR3VliB5{n0bqX9{U+o5qJGg(V__7M#2fb;f<(|g{zq$KOR zM7XsgGIcei)`1}#0vIYQ@bK>O@4@<1oa*5u;-(lHd)Iz}o6NIdK zrjzocB7e#(MZ*PLJ8sU|Y665IY`8tck9hib&WT{kK}|?6wdK-FihBE|Z&>W!yP+^j ztBjySYc5caZOa?`0U~L{x}z%nzjgg%3$HW+h%*C=?!|tzbYc;R(>3D?RZbPbJxU?5 z>*n-g-=W+>FTV(zuG8?QBo@k^eK)o^4wPSE|GFl=QVY|Xgp?m#Qcn%6rUpYgL4;dQ zwL*w&#WO3XxjoSGB-7wVb2X6}D%zSTE8mD=E;+5Nd8Vo2q7pQDw|LyQPFUW!YwQ7G zb-}ND;LhGCn3dd!Ca%;dxCFNeeUaW7kdvc$DZ~9+;tSmuuJWm~wGU`3>F=6LK4FcW z3*RydLX%`K76gv^kEvpOkd~LC8Ac9Vxoj`XS6@;K4j~0R!D=Ie9y^uH1&hmT3&?2= z8jkWhwg~QXWPHMecV>rKC*Aum7__nXWQ4w@h1D$RlW@pBx3p}@g;tUV*-Wo!=n<~x z+VplIb#v$0L~E%K6B?A+T;>g%JX9v?vmDi%Mod%A^TQORH0PeVHO&yLh+S-eJ?5qk zL2S)Ljj(q)J0yJPT-7-?;g%y~$oq~*PBu-SW8R@G@Oo~Fe{H~JGp}>&9|3&gE$(W^ zp4Qr|ss(UztR||iUX`J#iCk?}uY$k^alcxQlm7UklrNlrY3 zb-f4Qq^lLhtmHzLZD%g*Xb#uculofwP!sE~wj_Tzu`dxlpfkC!Y+t}zK=&yhrKUy$ zSaRb_4`%C;fjFu#gT0N5_5!}M*yPqDm%9qU(7NA^meQ!U;! zi!P{TYWe(0UouP>FqTuXUEA965d0?MhwFeCsE$!ag;9#vk z-=Js1!|OAdo;Yw+S03$R!j zpCNihvg_wOz>y|@zXaf*q$FpMA;5`qDDP&w64?6V@p36FWu} zZPM^M@|O#HDu^RGYXYWUXCxGyFf_1CpRldFIEeU=u(f*SUI`Po^qz{#Gyo^h2Nm&y z)Jx?P4$qY&vVJi z8Id6tLZU>s-JE*s_kL@2h*!a@)>e|VGD$TB^(3Mw5HB;%6_T%s#jpL2yvau!icR^E z+ckuOM{bF}9DN3rTi0J>rIgWjuT~uivykQWkP}(Eimg_na~A$hF%dD6=v-$)`-rqD zmq+}@oZ`5*)R5?~z#d&Uo-x}U2_GwvL!;5&V{QN28Uz&9_78Hg5+6ePECL+Q@2lRb zvUYwN0@JGD?%~!o;Vw%Kn_0wu8;^)o>8*$T`UQIh=J>GXF3i|AKXRFDG*MeTL6J*5 zWjjXA)%bc!CmCLn%O4ehspEnvIw7m`j-0B(xGbsgq*Tw?FyB{M;NaDEN9_|5zC? zrCTdZ7?ctG9izCW`4 zdRI(0cNcsu1@9BmIpNfi#Ro5?+1MV$nRjP3c}cYtDDORC3rhT~6zpMruhWq={7xl9 z4G()I5Yt*EcVM4nR)zU}#=;QQJI>S4xPn1nJK-M;rI|RG)azwV^0Iu+GPT-YDqR4Y-n6E4KwOBc%KjdQd zH9yYUj5m4DBk_%;lJCCrPsTIM4Wb`KP_|yar)+pQ=0#ve$JnnWo5kIP`pj2#ferhB z%zm^Q{yX&bzs4N?JIVv0{}TFQ_*dxb-y{bte^3|&g8vJc*C$f-cSMJS@o$(A(XU*3?@&6!NY=4%Y`*3_7$IA8}c-ep8OMk<({t_=RFn>n-JdZ!i z&qz$He*_JmJ+Ob4qWvV`|4b~OdKbpe$n0!?#0-BV4xc?T&_n&%kLh!SSUxd5rcVLH zU)%mVqt95M5{Ey>vHYiq;?KzZ7eD`3QLq1^kN>Lge{26UuFNbPP@mK1wm*02pE0z* z9<9H_NB=Dh?SJwZ|Njg;WBE(`^-tj0Uk!i1`oDu`42*wtE`LYQSlRxd#{WA>#zd-% zr?PV6&n1pe^h`$T`wSwOqv1SI4{&iY=n3#FSbo5!ASB71Ts|8mk~|>_Vm_sQaCWgQ z7#JoZs;KBpQ1NiEUof}-Cur96$iW6?m;RXcw0E(;S$DQtcG0k?b5t0Cu^<{MS?Eua zvlJb#rytfP)St3wbCulb&6Q4NubA-erQrTzHL|B(#)s>@){=2_{}z;Q1bPu!Ud6@U zph}Nd`Q7IsIzIeMr>98U>~uV#*XvUg_ekAl^?Zz(cUvo_&qUP+_7$of*C3$!`S;ToD{vFQTb*5Xma zJoa>3@M6Rz);)vJFql3he*E2;;9Ug)iCRH3 zN@62Ye%d%8@mPBW9oi(S{9jrpbWoj|SM3t-1get4Eh3d*d7}!#m2qbWQTBdH349|F z_HpafK8fZE+SV~x3X0YVYrhsI!n&V7R>cm>nd6`ByM(JD*Cms(GVmrfj&P5-n$oxy zw0H5KIC7{2;Qd7cc;Qr`rl9dd!b7M-a6;3=+c+z|)`za8cbib!8kb}Bv(WI>Jw2`s zNUdpN6!e8cY$V@n_QPY3Bw#5TS{*ZH_awS>p6eJ%$JXV^^2Qa7>d%#pn2^7b-AVtx zsTHLild()@XRs69T?@$~$rNjkR{C}_nvWf8PVOW=D#Mw4=hdQrfJd=Z@M<(%-IRp! zVfYGVh!ur1M$|?n6q_N5-eW(ogvjyu(DiVKlmUu(8DB@^+Icm#_G2b`&fCfm_E{Mx zq7?(#>^L!Qg{XDuEVy*h?kKmIr5HLWA4hVD9vlcAi$iu~Z~n!tgFSyMB>mEynN5}C zl`zg)pbf@y5(2+dU%3nItU<7+dGAMQ`bMotIA-24>qXved*fYXLc@*YG?&;AmS=j` zF!4?49kjddOh9X#JDqwkLxb?qG3+Xv`_Dn=wVGLP*_dmBRh2;g6iV6UOJ+>|OF2vf zP3OYOB|xhfkC*(=8Rwh0>yss%z}bW0*zACnde2R?5SyoAbQPge9q^~p%{KLBMjBH zzLUQjPkla~9`lkJ@zBXzB>Bw`%S07}4$*~#N;O&wGsk3?NL+5K$+<_@k?5^N4uMP8 z4$&mJuLAj~LWwYUXao!~Xi5{*NBH*3C6I@Wrfiqlo5vjo>qKSy$%ddEOn2rNI~|u) zNH6t|&KAPXWyc-47b$yHnPf*sR33cf=m@oxkqhM-Xi-F*7SDpn!nq9uaHt`?;|^Ks z`k;9m?6@p$58ET+ITwPhnKQcq4O_jKG51-@Y3hZiy1BVUW z1iRrm!F_YC(Q~ou({v1sc;z10lg5l)oyX0n+Rl%GJGAHSsqc{(RTGyxl?*q4y-jSH zpHI-{h7d;+YJNk+pmtjp>|vZ0qDBF0Q_-)-k>vu%{IFY>aGM6C->`g4P{ zBWG{_sdXSug!R1iCXNp=8~JlXxQhv=Xa+rK7jVv=qs854?kT&iI7m18qVHQo3|?XK zmsqrFnHj3KFpf->hy=7@6D<8hq1um~w|*p7J1}OVyk|zbj!6$uoz$0d@Y~woiS$@M z-^Hwvi&otxjXEV#8`%<;zALvs>Rg0j z{Y&5Khr?n0-MeTI<1eA!sqaQ^$f~5+z^_rVP|~Y~7^(>YXReFifn>g3?A;gHM2*M$ z^%HwvQdi_)&d`|=K(nDtH%J`T&Hc(wuqr&&GA{HJZB``flr6gu5@-HbO<+pcAs|)zP)iV1(MRV z-nH@!JRT611G`>MOnfiqii_o?DwXBMLeB-3sz>Du5efqiBEk;HTU8t*Ud;ljL#3lk zetl0a8VVq`gaSAtzp&Ox8YA$oBCY|7MdE)B{DmovBTa=9JRgjd_B6;G#n9<6D?ATf5 zN!~Fp%xY=5k7#exKC{v{BFCQ(Bm)+apHsYZqISe;kFoQdoJ#m{vamNJwe~8MDhM_- zuQWZI>sHA18Pa>!u;C-_9Z0^O$@gWpcRQo#7ge;H)CUxCu4W&-!0D$P83OxgR#ZT_ z>(#667LD=kmrO(dE|hsL=FMMmHiBOvC?T9Dtpd{lFyfAF!uAaH_XjWlP~LQO>=q3y zmd2>&D>6E#A+R8rDuzy_!Z*{Dji8U~`%|l`+8y0iT2v%B9wp(j6y|jwW*hvPw5MFv zKo&;=_`mxZ)f6=>5}Qqd6v>Q>>#NL?X4m;T+E-0SZ67M%gfI!O9#bE=act{N-4nBk zf9xCWZp?M45arl&N@%1cQ%GoKSG7dRl@(5nq_=0uPrBP4@aE!*x}T0r>-I_)h)Atr zGSq{vT5F1Q@)wn?lzIXB7+e;V9aClpAO=&Q-RtfUwYOc6TwMIhSDvn)U@~&3oKoL$ zFU&xi85wbUBKJ`M(EwxE4uN?ufAzV05?mS%cpi>C9z(Qy*OKf!2u~kE1)s>d8exvh z_(6XrnsgPdLh$55!0Zx>1Zm_gLx}v`BemXQ?=g##>g?*+bCh17dV4boCu%o#tJ@er&XO{ChwqFpep59c0tJMLzsc6Xr4CXnLhr_yEo;nwTZ2{O8s! zQP4A`b`CMhM0G*ScoV4V*MrvYmO-pI!q$UZ>!cUWEsDV>d(#%OT;~1`HR+WoaIc`p z$q>Yf=jo^zktO3PBIRAc^IrvbSj3*97@m_4kPS(#G#XFF5({6d;ikWFZhX%R?;)V^ zE8Ixl1szw=^nc_@o)Q)#j-NA@l=Res&s|)x?Y3R$Q=^_IODtU3KVY40)0yl+53M}N zO{zF53CvHJB5$4n&cSX^JB>p#W`j||;d&pJ0l8Brz|a8De4{ZQEPj%%X>m2u-wQx1 zt={s!G6k3R8c8LY4m$^4cmYl^K0Tt?@FXl`1Os6QK36#w^P0KjkGusQrTv}|7Qg|2 zU<`P{uneyZ+u*W^w?g)^IThhCELcD`OBp)`&0mjR~Y+r8?^3;S)8JzlF1wvOb^bGHZu#ZMtLrWahDVB@jmc5 zLAX<$5<_cUcbTo88Oz1TpDNh2G+~~yB2>*=49Mc(5@v?t_n=`K^xI|X29a{Q-#5g2 z$Cg9-o--diijI#le91#^P@I>c0*8U}k8$+4`os+coz#Ovt$0V{&raZ?y@ z8E~cyq&LPj(;D9hnUaT<#7G5F&fSCgJj%hy9*ollfG-g~f$g&7O5@ z`sm_VsSGUFCMV`M>?m=y17qJF zL0u3R76t849qI+z*1(MEGh1Pq-2>k{{d0(Zlm6lOZi>y|n<4|T#g~~VmZV=<$^Ocj zx$KmRSO&bQ z_$)sKM2Yt_AZVyq7e=a6-^Sd=4426xInLy~P8Ttf4i8pUVC(T+3p_)(8qOA49iSSd zM>eEJ=Ijk)+}O{4`MS$Pol+@8_qFA8lJKb;ndjVL6@#3OUm-6p!|aBwxCh%kWRP5= zha-c=(Sy-o`v#qpEHdm2O^vdy;6S7kKS#f{94A!Cs_|>d4~Hu9d0dFB*()p6*~(y^ z^l)iBiQx57dSbcQ$LYt5->U)7x&iK6JCh%^fO)Xz(v$mw676DcfArk!73_PfZ!WvC zURx~z$2Rn>gp!52iL0KWA-`;uGo0k9g%1 zg}Cy9Q;(vX7^0=6uBb@aG)#!LJxQxd?!5&Eylj?YJ3bCx8rD$5wAD?LGbj!Ap_^N9 zK6nN%yJ=NtI+LfL0*GP|LG8z`ZRwsJFs_MXFn`xer`Jtzb69NiBp&C{ux8^z;HdBGS-;jbuu#-n zXoAI|_7}Ad-PEli2OmxsyLYF^meiPeIfBoa$U~ZMJ!K{J==5Xl#LT&;GSMCe7$9qH z`2u^FC;DfVkEhXxPc`h>1lgxpT7*KqE52siFg+vurYd{mS?_YbxCgkqOp(O_h`<^7g}`be?I= zciTOA3lcT;7#qD6JX!;vf4UxZ>Fb_!sKy7sTC0Gr0RXQ}%C%-OG=7;AZkAYK>yp^a zCki$()jsV6<*oyXM9W6R%c6I8wfP3hUl7$5?#RG;|8{^4h0ZrX`lzN+=Lay~A)F5a zn%5xDEB)w=Du8#5K7ejINm>C$@l;Hk=SU{1zIw%7(1(8{DNYm9hjEyo0QWbe9l=e~ zrCHJ*WxKq^(DNu6+MLZb<6x3>(gDCT(_aBjW$q=C0l4rON+-yYHH=`Glr2dV=TZzU z#?5XY+TSqHGl8u(SkLe<$v-Z`Z(v&nGcuM<#3(i83^HJo0O2DreMsr^50#KloR*fe zV{+js1lJ`QAil4lhHHl1&RwNuviMC;&;i8KnW!11b%P7vwa-mfAz{r+d8EMl&1X}p zcK`;uw=(p#yHt%?zFTw%a|ai=0sdN%??&TPG!uW+_%gbYtjk9N8Q3)4522BV&K0X) zpjdR%+SXj)4Ro3VeI$U8Yjzz|CiLk`yF_$x`wor0h(0vVwm{O$*QDb<$$)ZEz3#^u z;`ziGrtawzY4{`%vRU7w9tH}~b=y%vFZeU~c_g_tA&d=NT}?Q+bH`L<_l6nmNs!t; zk7PDPD#u?Y)Ly||VC`vFFkgt~Di!FousJJ5i}|O@?8zJvf$Zf$s`!5_dD@sdG|ZUOs^Hz*wd7KUkf9kY=6B5C^C*d=zr3bjDh~ zN9s*k687(3AVXFTJW&Q8aj>ed^9L3)Ko-`=53qW~k9nZnqE*=Px`nB17b_EFtqW!9 z1Bi-HAem|hWyEvcLkr#c3=Fxgb~6aG&TWfO*vJ)_>-HHhqN#VA%rg;efb7D?f~=0!sO@5wQbYBIBG-rqWb3#ipdm58cq1V#mBglODQFs9Ww< z48K0apxqr^kS4GLMU)Me!*6u0PF`CB>o|WyT=9}yN-t9+o}0q8Wu%A2*>oP&iA+Dy z9!x9am~Iz-u`s~hla6h~Ikmtz8EkiR=jP_99ebSxs(_&edgGNB;qfz{c46=Wx&~+= zowF)+wfk$6#9b-751s;X8FMp2EZvyk-vjrnoA$ZgYsRb)OOHp0{+`}P zvUml!FR-_o1t%P~P;hJajI*)`1aeUE$J~x*9~)4gi~r2w4fW)UwwnFo$+rh0sV~XB zEbrtZk~angK~m4?YH!R!u@~gO8t2+2)dKW+t)4@SYHh#pN`?d`w?OW-thVFQU-HZM zb_Ocn$?mY+P+)LKQJs*8Nb@p!I>~ENWVQWXZ|Bahf##4k9K>|+Yk+H?3%K%0{4i)n zX^-3i7+-<(U2DK3@%V{Z5$Vaq|74R=TE-GK4;5O}S0N(!VeH*+0^(Rn3Wb@DpVT1e z)&@*NhA5TpUAfgg;mtn9(4L=etfE*rx>Ou3-sp7I*_Yv=(W{xqiVQ1S7o%n-;zP#L ztOLJ_8Wk3FlgOW1aDXCW%@JEfPyuAS*6-o>i|zXbz2D$;=vQp1T=}w$ z@)qB*c+9)J8uxa#+R5hljSDKI?UD2}J|IEycceZDR;_;QW6NVK>#T@QrAvQwe`Luy z#xD`e=(N*?8JWqon7kusvI$uf$ej2k6GX-B$gfMHUYarPAah|UEn)^M@VqrFx#B4Zavap(a9 zsm>|aL{IkB@LSLDn=xo{UWaG~>2JUfUCax?d%XJ`MuV_Sr$#%$AtbUi)S3^hR=(kh zrs^&}=Bsed)G@i4OfTocGr?VUHU64Mtp4geF8qlu-k05z-dhLe+4)2Pc_YHk-g4v|BDWz$>Q2`!W z^|TRB%F$O)4}w|x8V3tRM69aFI%k|`eRb=|{PBRgxgj*?Se24qF1?RUt$Piz7&+q9}3@j z-(;O1Wx(6)cSuOZVZhlicK~^PmiUgd(-pBFX4MW;Y!RFuFf>NR(fO%%aTCM#ws`xo znNEW-bIlL8xX)CC*x*bUXg)3gb-lpuZ801EyW7TwtSSy!D?LDsP_!)oZp^&0`vVQf z1$nz|&wGxL$Zsv(2`YJ!wO@CX?oi{Fktu@%df1f1^AOu{xK_&8u@-LXCH?gXYu5?c z$l1hekaTApbdxWj&wS~7;Ny$Fe8kq|hj+AwnD-@mJNifE@~cQAq83XRjrC`;Cl2x& z(#K2{_GVVwCS3K;3X4&Z?Aep-Nwmv5xm{G9p<71Dx_IPB9vuW6?eQCtVbozF$y;cu zy&MJE4(Uh~vik-S>2Dh*j0HUt3u~x{vKV9*EV$R>;E15fAIPLGuFTGv44p8zcZ5mcmV!!8~=7&|H&x-zqs#z zqh0^QjQ+<652Z=-$!ZeNu>NtBe6BFiGcx>_MtJs5d&%F8@B}nWpF{RfMtC;n&x{EF zW`zHfJm4Qj_&?+Fmk0lEKKM_j{f~O#A3pfM8vcHbe`j5p*qHvxmGB=%_|F6cp9cK@ z9qk(54*3H`xRXz3VZ&j{&>e8naw%?1=0XMvkuL}V5e#|&g#bYSMGFCe=jz+4^Rw5z zA{43(uWj@jArMS7_62X6~_|iYnvnhtd>ia9ii&i;04ZlZ(^B<4eV* za>s6N*by?Zy0k0NlqR@7O_Xk#0sJPoJM%;+tY z(WRHa7PnE)E5{Mq=sxJ|GgvW2yczE7@~;6ZTB>+bUxZZkdPAL>jWEU*kL&cY&?)lE zJe*eB$=#=b_pWD<+1Ucg#43t-HXOm#w!7S?fpu>3QWbmu>@>@P;kk8~A_Ar-}GQR==OampspO zdO#rRj9A?GAbWGKD3}f@y2Ob;dA)W5MjGttVhJT?6_Lu+BlM$s^7A)ZR|^j%yz$jT zt5J8p{Q7Q8tfO4MBaYBQ2CCAA$4`*<#FG!Lhu>QUQEtp;?wt46tT5z~G~Oq6YXuit zA9T$lUu|M(0NZq-D5=YR`B!Zta#prM5#r&x!ko( z|FF}#u1?KQsxH&!s3;n1U0;JCV^sE}5)xnq*eBmx)J$+?;)8s;m*Cb6A*Zs9IL;Ke*r{U%f-gGvX+%d?{XW`UQquPh%y9LLQCQ4Sh9Z8 z7+5XjrGD9MvMb>1w(_Ey8WC}eGw6~jU9pHvEQecRPt3xSa7%%0hJ8ku7MQA4T_~a< zAjeokx?UiEKM2d-xa4pWGU5<)som%NS;>mK7pwwqVA({1&uewZXJrv@w?~@dbV66r z3{6#7uR~di-}Np?9`p+?u?MrT5mbw+M)z$Ho^I3$NkfqRzHJ)?vHA00JlM}OKW#3> zgQ3eYxWx_O&r?lY*7Rf3S`XpZ3W#N1LCLZK&uj(6H*a)rqu-?0R0DLsbTPUR zy!l^&)jD8*R}5XMyju6z;CfrsU_DfvbeLH?JBvFjJ9{}RI$Jo~sMH0e`RS4`QJ#g& zLH3$9nDR^zkw>TW3w;#yM^Rl1OHfnQNIr}+<7S$<+ZCQ-w)|*MJDrK=6%@wZJmrz@oH%>cqCL|9tNpK5OujT z1tQ6LOFEKYvFIF$qW;OYN@FpfZj!;My}=BoUy*04Y*FE9*r%M{GWAKP}x4((mV$fp^Wc+H2uvYZM&~xCOL;LLCV*>9Ybi6{n7-u~kMZ^c-e~ zH=0fT)y>Jult$_7ruA8w^d#n%wDLY0KhcO+c=;lQ6%IzP(x5231tDyuXEaKV#w!A(HBfr`+7~{FCTdpOg#a zThm#hA|lsCXl*i>t=0~1USl>44r(f9IEdFgM5e@fc(?7?9{H>_%_qC~$}Qm9)I)hJ z>g_jVGw<0g@FqUz9*cYD>Ujq)ZikcmpfhPp} z0A(F*U;;j}FeeYR2982WkiJ0fZVqlt7Ql%$g-A+iVbXxIpumo{lglt^qEea}#Qu#| zTax!AIyz&hfe$8~PS2-M^)VAd^-h5&-OgS8e9MVpd*De_n`uj>hbDqzhP(+2#6?Qx zRNc;YFEIHOcG{%gVLW>|0(~a&uqIE6WgFRlWh;h_`#Q2jA_yJ__JmUt ztxO_XMFg)9Ie}tEP!;!N2(mO>6hQYKN3lM1OQ4GMpm~hM9^Ebmq}kwGm1s-QtwTh! zInv71)S~f?@{`R8?@qz1){W-sa0e+jzYdwGsSrWS*^R|5_U&T~l70B&;~S-JhHiKn zS)7bLLkkkInlv6k)wHuUMiZM4(;zdfftqk8$%`4*mj%d3faTpgX-nEU(Nr|ZKJE=- z!Mv2@iTN|9m!R--<`_>Beq2_Xu7JjfdVWN?NLkjP853JZ)+FC#*T!SidXDFp$4ntw z!lW3gVfR{JOG2BtuDC~vsgoA*b;EbNUDCc`M@IBIH?F}u3bzoPd<_lJAi0#y?;qWt zCAPe#^n%cXL6}^@0nW6~*8S$lkBl(MAM?}nUlI>J>&O|x9beRuG8 zkk4R6yPG?1?^ruiz344XH_xvfPomdabk{Kmt{X6m+fwC6RQnF?x}nCV$Zyz5hm72) z;a)9TR^E=@%I{CFYa|k}$6PR;R#cd>h5GYUBTk0!PfV3w0j~+3Qtu(^yCs<#4CsVd z1txP2#{(Dxdv_MALmV~Uus8j^`#b1Z^=%okD}MH(dWfI z4jMIIxLOp7K^4odnYY{=QoO`lJDBEJ{c2O$dsm$Y(gW!djZC z*irnfMN?|>MF6@B_)Sj#-3SoXYwL};0(0{7i0|lqkY%>48UN&xo$rk}^YCcxH)~}8 zHq@pme6}Y;jRk9I-zmKbd2V6chJRZS)lm@?)9BMXVV`!RfOn+s*v-f$9N8BEPPH&pN|QFI@1ua?*&A}DFb>6cksajE zj8?FYwipM$yQWX>AguP%lPhx8D@a0X#i9ssL113F2z=0&C&70Rx+1a1I)Yk-_nn_b z+xPHlgVzLo!ko;$@ahPy9ea7Qw4~DZg~O1c+qy(UG{uK~$s~{a$XQbOO8+QYE*;Jl z1<@XoCc3%3T60>ZK&8rG1)fezW^sZQo8#q(@0Rj{T2gfTWFjNxRe-}|=Wo3?MX+`p z)hpR6`J>aPU+NjCMdKTY03K*WGR!#4E#!A;5P7@ky`Qof-A>StxE-;YLTIyXPgBQz zT1+Y54{yg>Kwp}*EF_gG557k^Xra<2&W>DZ=NU(p_`Ol`7U!su*j+oeDzvz z>thCQ2wSObvXOyy>XHG%YE)WuTGU!0ZzrO16WGI6`#Lw&&T&E2Hlz$uCi<)O;I_S^ew;Uzh(^=R)8xlXWe{F>a(uX5ON3^F`YiE$7+z+ z=8#~ln32!OsT49viVn`S6XBo|J<8UfG(*kMCRB>RS}vmMuT1kR1mZEe(s%>h;>>pM=m4yo)|OpAh(1_%BM48BX>i#->0XAVD3bRfq9yn zk+WDSpG2)Tx5`n=E!c#4M>CW7^n3%eQpiSN@x5{kRl7B!HBgbXuSFxsaxZw%U-kS( zNVL&eTwW`5?dp1w;g|4e` zp@Y7kvr65;pxENJ->gXc47QG|zG)AyIWa8iREr5JN+&1*Yl!6YHSYPrO#9BA8;RRN z%dM>U1(p5T^$0A7R@+g?u_qOpvxYg7MSK4|VaGa3bTffZh`S<(C9Y7oPg{D^^d&u} z)5^K_@H~Mh7AF?GG2WyxB(b;Onu-CPNo4LMSXvtu;jw5Wn&1PC## z;}_`91^IGMZ<<2({;*x|pZv>Z7>GIjwi^%yJf4GK7n9+04F|Oiz-b+y*x4 zSB@<*Mm*XV8*ogHQB`~yO_`H*=GRNDkqq9`t@v`^(t$;jtx%GaBd@K0gc;DRjSq)z zclgm-%;poXT|iPqOVuf|6zyo29iZm|iz|mm&%KA`y-pI1W4)e#VzGGqwLSuvL9@;kKy61S@MCmhhayE>5FV(&$sOjy9-VW-s?-kUNZ~TH!a5c~q^X!J0 zF8?z_uPDT^WFx^V*_~7dAM{>#e+PtJ2rGzNgk@I-*51JpfE^2P+t3<>Dxs8!E0!KtuZ}+t1f0Jx zM3=yirJDEiRS<|K!tw7Iq1~Il5g#)C#IAJtTEw$lUzQOz5s=j)=C^U^QcK~ILi0& z_|^G6!t?SPLK`D{#@lv=P80`*uNE%eDeO60UVQU4a?MpOi(VPIO&2ZFbK;_msYkA~ zEt*1#Vu`%8;FjdxWY;L3oDm4v3muuG|O<;>knVa0Kr+ z1);4`EAN<}k1OZkTNrzDI+|wai_N5Tns9eC<;Mv1NRRLsC(r<-0=BE`vJMk}D!;%g zBeU^5KG(apUoInFFUJtO9$bs|^X_905bq)?xRvK!2fJcOCzKy9BZe)oB!6CKVuJnN zvG{wftEW~kA0pT%yd6QDv=fPAlcv;Ga8S>D`r8Sp+X`O z_MHA+2?QtV!DXsNb{R`LQzaD~W2cwnkMxdvOv~Y&*QEX^gkhhR?AL}kBi!j(!?RNh zi*&f4xsy`5{qs{(l9+xvEP1YT+M;dyUqgx(2-DFMWU>N5()CmjFIWyH6LV^5Aog4 zF|pQ>U&(b_%!1WPTASi?j#$JXMMO}c@dO^^;&x?sEMcd)`c z{Z?FeMF_^>32UzTi5v6+7gtEUpv*i@b(xfpFZV{_v`iB)-%IZG5eF5d*bKB3!v%D5 zjw~0-%Asy76_P%xtUsrjVcsC&$YB+^!@A&_G1=HcG6(@DO0uQvtcdzWxHVHxWau|9 z82c47LBIf~E4>jS-a7wT`6B=5w`CTD^$V3jZG*}l9i7+K@J^snD=NS2ss80$-X!1kk-m}^+udBM=|Up#MOvr z<>pC>N+k>$9e93&t`h_vIM=`NSEYv@qaN9@FZH@B8P|dG8XVF^eEnPb?-wm`i>P@) zAj{%3@9{-nCT_HNA6@$?^y=wmnDE?_Gq2IZ7p;Q#o%QSY1!ej^K=NpcoNk<)voI38 zw%j_SN;LAZt;#Mur?JB`F!vN|Wae5L2WpbEKMZ&dd9db;6%aGyPcKL{JeE_%NhjPB z5XcV>YFx+hqK5Rd(PomTEFPcQb)uGzLRQM1(>CveMdlCoP@`R}b=I;p z4NLbKtLyzvZ>4x@nW4NWP8^ky8&d$)$Sy0k<*PfEVRPb3wlb^vI5K=0^flXmtB}07mZt(44E^JtnukqV>N03 zW^Qo-6EoJui?WD$yufOs#k!e-IO?6;A@r9a3JO~!Avl!!OL}5wqCQzkUj+5uY!TNO z+YiYg{-Npqx6lq1Ywhkm$Z6D3s>&a3w8?2D^E~~<@NYC@N5(bWc_=6A*y|Scz@RxQ z&U}*ZB5*Nz%k<7rcRtO0v0W*83VY15Gi^O`)P?VgD0#6F#eRWih=|$Uwfmb7Wcd`b zRL`3wArkJ*RdTgya}cc7s1FKJmLxAfjlRhs9a`z79q3&fC~#zM3?oiLH6nNIsc1W6 z4&F(;3uX!g858lU%eFOu@8J3=)w-g?Za*cLUU!RYp_+i=&`lOI1z+x~39j|?Q$HIE z^hMXHb>H$$${z5k(tz`8P{Yrhq$|x{Z_7F69b;_3Y|uF-O^-RUS=cOY2w%?dNZ~JB zkZ_$q-;r);-Wfcn0NCGjju?-NbCNon{n%j0nH)RnvHenWyjo+6Ue^+JT20mzv-5^H zpR31k$wu3$&yIiMDPb-VXe{fTzvgtNpdpy?JZAUk=*KA4_(+FI8s`b#d#HvOvi|(y z<^TAuG;o)laCePIRTKr7^pX7iS_81RQm=!1ntf%69sh`9N5pyZ0o)}VOVpFD17076 z#(mZ<=qOE8^KZ6BXBZQzNYs#OXcxgfRTTK+zA2JuAmJHLz$mIKe~UB0I62VSRL8|u zAyR%bZmP{QK^Y$q@y20V?`O52<{>Bv5DuvD;8Z0fTaT5rmw+3gEt%)7+QPm9@A$=; zLLnVbssPDvMSmK>nYUQXP&)5Pq#rUuVt6~uToF08{gcWFizUeDGy#D$9P|GHFF?@0 znN)Cy*fw6+%v1PFUqcETc0&X0&nPK`DcNBO*rX-mAH99=LL7HhkN#Da&7_BFjf^ zYn-y`(pQE>rD*QZzFn98by!4-(eOToZHK|Meky#AJ$hTWRqO4|e=nOYYNFM7WpA6c z%`524ySYb+k#3ZAqS0s;pV4VKR-;0Vm8w)6eMVbUZfUGu`)*jBEZq4|5|vJClsNky zaF-V1;{Gb@$@vxwJ)ekFe41MdbH#69Y;wcA%~%SMFvbchTj!A@3W+CT-!SXoN;eh1 zJNDdwLUk4CU<*qvMM8FxzNz{**+JX}8e(*-Z;?XDLbS}fRO;zs;#NXT86~N#Ky*3 z8ZIL0RpA>N?i6m%-qrAM_K}8Nw%xA%w*B^3T<_Wb%k`!0U#@(mgKy{da;5& zWXiN=L%P($S7IP;qKf*66bO|jG7^@)Cls<~dg(1aiD9+iXKvYH9j1;BtA`LI>m9>* zpKmbl8B)0$OW@3&0Js$TsWohYz16ehhdX&tw~b^I`m>2r*|c zBbN%uGsdPsG<|j1Slnh8RPnl~Ol>L$1e$C@rgp(96A%zA%{BN#($KN2fix_mis*sq zKgn7aL=7yAviG|b!9!W4ejF)mhP-XX<=^!#`n1CA5=HyQ$8URn&d!d2N0QE1cKf|o zPQ5K7S_~HV)Rp&cdU-ClrEbUEUw<;QO0NH-5~hb`oqw1h=t&}O>!tsYb*^E0&oEI#HM_a%Ng20j z2YQ|#kM+|3dQz0ix>UOb`afGDHw=%gql4|9i)a-)G~4!cMDXz2&9f-THW| z=yr1D44Xg%te9O7a3M;n=wq_6B4Ur#MFQzSIFQ(#KMr%thjK|)7^-a%#%U)D)3xe2 zCb$Rl8!D#Qwos1N#d`B^$a;nYxH_ga_2SA^f*@$^f?b;s655n)h;5v0hV25|O507g z>tlOtJ7Vt|-nD*VvKgpKs|l$Sk~tQNhv$dpg;$1GmM*VcTGbQTlX}beq5fl|b-ET? zw`j43Z1$id;PksB(QS&5n8_G7B=oeZlB>vo7M5w!DV0lIVv1FR&3Jf6`7oZ>d3))9 z%g*31yDB+MXL5g_9!64PDwL{938|;KSD;0T(HJpuk31O}Rt0fGs@_8z=rsk*Q&?n1 z0sA@>ffZzst@UBH%Kb7#fdpCo zqU;)t5T-{s4Rq+DvmTy*!>iByZt>IgElpLM-@I~CgWG8_S<8m~DYsWjJh)`(rp@!` zOmF0D%P&0m&|N>=u=er4J#f>)P4gpW$?DSEbK5=%zqa$ft?RD)!#RThHwvfB&jqLK zCDpRd-0d`IyNTMmhdRMU3e>s7V2~u=Db^W!slpzjNc;mBXxo6&Y3#JZQ!5caw{6hS zvsma`f0D=MXhS{)%Q_o{n@p1}`$}Fdc}F}{@|k$F7_yp$Tqg&%si{_{ixFxkIz6F!mbW0U zB^K=r%7#t!Gc3(I`WVr}mTlzj0yngUXz5ek>Rq&#+al{LaZkxgCR_u-Hwv;ORZ_PC zs)vsp!9S6XxsOO9q8qhE`Bn@0mI=B{ezMWR=BlBz2sLy0bxwmtikvvk>mriH;N(Wr ze?e~`*>Y{UHXAZVNDA}m8et)zOyWsT5GLgeZ@&V;UhK1-3J_yHxM`h>!1@pmu^c3Pf$l2I%ca<4NHQV}CLrd<~H z!iy;dL1U+!uD4(p^bXGqDj#hf<2f@ksvOLxbGB!8rmg(@lsQdOsZ0gsxN^PN%RMAJ zP3ujYOx!^eHI-RRCeh;8TddJC)>M*6)TR=NvXnoX%IJ8dO0BNt)M}pd>$q~src^nd zQ>;#>%?i=n67jLBGlC?zI=CUozZ#^$GH);lIVIxtcrvL}!0WL?A7Zsw0$e$CIpwiv z6c$`U18H+b5N;P0I;lL9@YoVi8hLiZ#VJFF%l1^lE1Pvq#6rywcMf@vdIb=)%+4w< zVJS~ochlj-lHb>3(Kmq+*(ZzWYqn5gnPB*MtDbWc?MXKJn~r6b;G|Eh`2ST@uGWZ+jVu&I zdA^8YMM$WnMP?&Rf6~D7QJ%k|@1148VbGELGmD$)^6yzne@E|aR;piM+LO(9p@yC(KfN`zCH-gp^M<$es`aTg z>H9+)ha6FD_rNUvFO7Uzqr^h?F1mZjFEHqF@3`0$w6xbzJr zsv7gq(2($ucvEU-I3XO?h61pL@rs~en z!=W7^ZAR;JO7Ix=J2|Z?O{rg76)NGQr6o0?q`xfTOeVAef1tXi#;N6;+GyNt3>qtq zO~whv*~TRhe0%A2a#?wtSVRkFwrsHMv%G3KWI1Y4TRe5i(f~|1P>~*G#G`iX$^uL< zvazgaKj>HnBn4B`)Pzk=VR00erl>CiS%e(*Mq_`f?0bP&#%k9aY>9L{WiKzMaecI$ zW~^m69bib5(*m2RFq%wbwrm-c6dfILOaCG>Dxgu=PIa^h0k&|>pn8Svf#sq`$t9v; zN|v$p<;44+fB3q~+a8|Nr>LBsr)9Gz3>$gJrMVvZ=)}v0w?D8p_qR!f<=%1my|XLt zn>A_eTqdV+^-`rkHdC!P`<3(PAg$Inm~A!)8U-r znr}bXxv*lXeYta~d%H_N$X8!A);YF*hHFOl0@s4<^}c&5^|jUJkT(K1GHr>|HK-;O z4VX=Yw;H0`)7JPP!&)H_PY>b+F0Cs`XgkA+glC91VXh8VS5`Mw3)Rx#8&6w=@rpU4 zua8Z?q6J}lW`W006d?uf9-0wsOr&6Rh{5Qvi}>lr7O`L66%zskuEJEI(`ZsA;!9mHT4W zY=_h0s+`#o^`5ciS5voKN!h_7csCN^65yU6q|PXZZ`I#yxXF66?PmKr$NJ#<(3Pw8V7G~|d@(6~8WK_k{+MNtp~ zvj=WZ;qbumosJ(&d4ok_a&bS2RInmj$Ci;{hJUm|0-w8JYsG{z|3%l&yEfOzYBtj9 z3wCu#u@SL#6LSZONHMKp_66e?uDIsg=_43ctoh@tyU!Zd-j*2ye3*vysKk0?sa5vO z4lYrzQu7u=N!n`l8zR17HX8N&c%2&iOpiIx#8Nq9mYUR4IP8jck1K7n`8{VvhocEYG?wsWA%O)5F?cwGHj*&==-0lykMlLdJxxd3 zbenar>JI6S>QuUHB2hug#d5B^7dxU8CI)Ppy0dMS)~~Hc8LLr0c8Q{)?Pwf-q_45D z5fqIPua0G4m5q zx~|S&j%}o#Z6j96k+xX;QX)gj#4@fd9gq7nk!V7ygG7=bM8c}*Bu=NTi7nUarl%+54)(hp4<~TS6iw;%ae&cvKWbDSLbu!z0D)#Um3VMr!4|B|NSCxKPnWK0Msk|zpTN11vOsI+rI{8 zLx8`ZO?=-~0~ev6KTuY{r2o%LfHhc+o#T4!9E~&~H>?ZY8{#U(CUJtuk1>vlO)+#B zr^FsHJQ90Kz1yf0qONGdn2aW3^)Yol8N7iE9!&i8*-A#RYRxsYroN)4rn17H)oX)E zvD`)jE;qKP^7>TJFY;k;W1=4R*z>Y5R)pgvJlMa5a>#DCajCeV3!GO`Q68X_c!nnv zW^E9bR%zIcOHS?W%C@3728hh!3MC%s8BLGynVcG| z=r4d(OMq1^^jXF#enntwv8w+xTL7gs&5d|T_;4CZ35b3ybA9E=!O~*=0 z<1v4FXqHvAKu6S&u4`z>*7=7m1nce=9L% zGbjKIF}p}5wjQ0?-sF@KRbnH0vc!f0BBm&5r(w%agol1o-7n6=F{a3Zo~-%F75sI5 zuPfv__r5}T>zH8igF->L@}*F227h2Rbp`5-K2mTgSDy8lJ3SJfJ;>{X5?UsK>LRf|!!%Z_or3H}KI z2Rr=<-p5$47*qT@DX5fv^~C2362Da7g$$tl%(UXyx~;4|>8Q8B*lh8;g9*u+klY-X z(3%sl6f+^zG66OfDYWij|4ynn60yZ*s}4H;Gj^2ImhvZytgO~g=ruOTq8YhhMCE?< z6AQ~BHo=@C{|m!UO;%w?WRu9WDO*5&w_@HB-hDyHTLhFS{-Mw z8yx(--2IxzG@pnc**|is1@Sy_r?}HD)Myi-XchuiNDBJibG!XPjoE522%#Wn1vPcM z7%7!a7C4Z>CF&4?1|Fof)O=n^K&#cT1`TUi4Rx`Uynh#cv$)+ss%A76`gujJRtnhg zFnPcSSTX;T=oAs1oZ8{EBetl_U@pySjg5DeQ&9~X?BuNQbz@q3hI(nn9Wnt5TVR2Q zFWTXGrs-qy(obfV-+K6g*-tKFn;aeg>#=8#88jpJBm-FPJT}F`@IKEyho;V>^{kZp zcJ%1dz^xOxkNe>iZ`}@xcn@ZI1b437;VyUAdzxdl+1f$%qr)?r&krwZzBGJg^BQ?g z^F8v$=B>?pn)lb*%%r}ywRUQqV2-5gN7XfFr&K@J^e1^=v(_8&RxgY!tiGdeYx$$~ zpGJNt|Dj%AGlG!nerQe)faVf-kNIjt0O*ikN>wocc81C~lyg<(w7h&nb$NMJwLeu2 z4>8tM0#l-@b^y2=!0;5ny)4SQv?a`mV4?~xr0EFQ=7_&65^k=O>xHHfk!oTkfpEkg z4o671IwFKPRF6d;!9^xrRZB;Z03it(bs95enwZw}-=ST}>~Y_GD

<(Q|>4LZEc!%RMm=z1iD_QILP!?-GuDQy&LtrSFttxRf! zV+*rcOGFMd0-Ak;5J@$NT;+)DrA>;eIo*Dv>Z*c^i~BUCP6yNK{0z=B8hu*V%8q3N z`VJ)F$DY%aK-E()hVCMf{2_duqK-*&-AFNnc19)?5%uRtcF{f?we)L-`YATdRS=H) zOI#vJr)*>pp4+TYX@z*xPFAy#8#$Au|6~o3T+R76HN`p?vYOxia{G<+FS#}SRQVL4 z>H86$W*|5$yStcYg796z23nA&fqD+)! zG7*+Wfc!+;Krou1FjT4}6mK?dOa!SJ=3UUPl92 zU~s?%l1ks?DimvJCwti!Ut}@;h{cSc(zDn`m78@+%ut9bg$|tv`|v&uTv8tLq5tCm zH;RmH*oV`EuhVyYulbLYY;B~OFZIhdt-9b0!F0;Gc4!hr5QCh(GbQ@0El_k^Z(xktlH@{Z)`6&tc{Qdyt z?3BfU211}Nv!e>w>X!|Hcp?}K1bqGkRM&oaEIia2264P2;pHkSll6(pN`t{D*b+u< zBH7U34+Q*!>I1OK9H7DE?BtT<*5ux#DhWTtx} zly#$6oZAnTd2pkZ&gKsOloRO7aBnccJ-wb`CDbXxHGtv_T_Qt8N_9TT$GyN&166yy z)aexr76r&k%B&?N7GP{VT>vI9-DR25(safj*9%G)O)am{2)tkKu`6Y0H(V||kU17$ zEn}Jv z00!eW7_0Gdfmvgs9V7op@vj)7i@SiCYC-X--Qq(edYizmfbuid{?8Ek0WdJ2Fc2UZ zQ?8KjaFqT$F!b}<{iIyr7WLg(pw;`?Qjbz-_50i+0EHAR@Fb%J*NI^R$?}h%@Xz;? zWCG3I0`P$gAcdQVL{G^1m(0&gob_Yp$L=4*AFRim$0YR&&cBO)x4z|k+x?mNnN{Nv zJy!T$yM-65|1ux9@%QWQG(N;Vs(MuSknu0-zi74BaqCpKXjdDdgmm1&ZB%IosRwCm zb&bX$Vy(5-ImE3^acN_v7`MiqmF}VNQ8e!ryRF@}Zbz4MuY0$oecb$n_^|c2wg(*# zIk&nWleAN96P+FIP2!!l+nx8h?~$~vwpK^0bFBMJX}Wp3c(zqr<{o0Mw+(U(cAsS) zE4Es-2DM)6ReQB%=A5{&8aA?7P8W9I$N6z@gdJM&!a0B5knsfWLswKspf2dN0Btsbkp2`&`h%O!Si zlhf_=H@R7)!)DzBH6NNjXQ`?;|JKQ}!OF_d_8>(+CAF~>jIx7WS4fY^C19Ez@J<+R z3m~Gs`A>V`N(V;)CFNKuI+~2d)XltdOcvX4T7t+JVL2w)4g4!GFzI&a=Ik7_3$S`X zmF=`@>Nw{3#`M}&VZlvb-@HG!pJw;p{MF5qzj}Jxk5u#U(_e9;exLhbGtNYp;GE{P z&AAUBeT9z7z4ZR)xwmO6gRLG=k{O^RQBqEh%5K3Ucr`euV)I&qac|b!>fM#rr>x1| z{8zGgvFDc_E~!mvZ};38RKCr_q_cj0_CHHw)hF50j3SFgNg!@D$C_eXEarwtRu+dw z)aR)z2a7056~|8*3i{R4%pa9W7ai$v z=9>kVIB07II>nvRbSgZC;-nQX+Ge1D?)s^I4IIa(kH0s%W?b8lDY+l0vE#vUk6oL4 zlOD>g7>Lm?-#qc!c!S3_>72`k&x1LDQ*_YJVBX7dO6NEDB24}{`K(ZQc-jt~*o^FX z)fZRun)HzBvDMQ()2dg5R%9;A-jaPd^;q?R#GAp_LkAOYmVceFK&-894UP(5mbpH- zCiAP{Z-ZMhFNR(Uf1EZ2_P}jbhc9vbr!^WVLJU2%M!`@j9acxmGf}AQ>x=S0ISEu& zuxUqRZmOuzy2~Imq?kk<+|6B1%DK(52?vg7K&*}XNP;GMX~&LLzV%RZ_tH|dMQB@O zbL7>?(TEUX9EsT?i?kBjR8Eu{#ulx%{o+8!vcnyRJD4nveV_TtD%-wdxdkfpivfxj zPeJ_uN7{~sZ-7?MibE{Sp41&QhIT`x7tSBso;GHkK~RY9x==N+zi3k;MWr54@@Hv& z0e)cFJDop=>vYkuM<@R~r+(u8x39l%`s!O`#tbaIZ_AS0cOPBYGx5=tx&53zH+CT4 zyl~a@P1)i1eamX8>zVANwnYt-?gj_23wwecd%{Tao}3z1d!~0nZAa~;&KsOJdT#cv z8+^|Q{g}|GW{!cxqs_k`@uurz*LN-rd{&Ql+uZf=XOi2~a@nw<9=BO#Cv;FrO;wby zsAG0Li-DbWsk6jeVA-o!o2-k6TVUKvYQu_oZ%}-8U`c=rct+Xda&;n_keip3ttwkz zwyA7unW{``y?-|io)YlJ!%stGCO@3`DA@e^jedRL_Y@J1iE+v^zTwlq&| z|M*YuUB{*s0WEtTc;LxVtyOowHgo3dk9P^feQb5!f`_Ab(hck9)SO)#wD|nVHM2JC zzqyJveaftVGk^WSq7ml@93GrSzy6ocFwSf}=xL)uPj8h|Fj-`o$pS{DdS+CDl_JZm z6gJ6O4>?MTTEY6k7@>vlF0SoxTG;!R%}DP4Lb3Ed@4h^&RwS zeV@e{OU?8zIMM({xp3mAuV~~sDN7BMtN{G~#hHYY2Y(Fd44j~*UPJJD??$!S>SpYwNF1VrbJR)>hgdW=rg7PTGFXh5Y=R_0Dv^$Dwm^MjL!grV+Qg0Vm9bhC9!bgOjhVSbaUPvbnncozB2LQYX}on$%xUzo{({3A+?J7GR< z0Pdo0E0HZQjLfW45Ne|qJJeiE%XPZrfI7JzKWE5o@14^MQFvjUwS=rNB`M4`HQBxo-;U3fb*&b555^ z4Hr~xsmHEPgp8an#yQFgl8aJ zoz_m9aDbztQ*_Br*(JC+uPY!0{ejXlS3Otntagp!Mmd{Z?R187n(HRl?_Do(FFN0K z*-eU{kwt8Sf+%YjMVsAkvOB_%HXo7$u~2Mjj3Y5I))w0rdo`wtZ77Y!lBND|DKV;* zE;_R=sN>AKy}E# zHkk-}*ncQwOPJINlM#;ol%3{vgNjMO=?=N(fJB25mE=@blI!a0q}7$kL}PW5oJiJ5 z3E5m4ES+7ts&swnrqTnYUzci2_i!u0D7YX)xtzGnDOV!kQm5?6nw?)eIVUu7+c`Or zg$TB?TNQFVjmO#XDXI*d6U${s&`$T+sXZa8aJ+VcYQ5@!N`TiqOrxY#(ZMNaOko27 zDfvng4?_k|_bp@0r~6|`>|5q>A5l2bWgUmn>K4DkR$RmZBn2x;Bm9AxJg%H6Q}rJ% zW0_NqRk+{jnlwe}cA94G-(0Q!y#i`FpwGCj1UBy!Ht#37)ttxWae5SdYDbHwxUKKP zhg`qx=Cm#x-QVUEi|vZyt)rto4A-hCKM=Q~He0RD_LDmP?O%TN*)OjQGIB^87?pc& z$v>|7Z1M9-MLnws@=Yi3ZJ)yC0uepJSDtu{|L^`XFs%)kwi=k$!qv)mTC;RWxFMQt zk&P3a6Dpb;CK_iuJ1QnN%rRc*ys%v~&Ww7wXAvF^o|cgUA@ zUpF0T`6u~?ej`TQxNQx!wpgq!Q85w~!}yE}4cFDRSgkO6)j_jcR}+QDd4Sf~DXrna zi-}MO>um{ZBAkF7ZA79aQJ2UjhE^wP5-{>zCi}q4>9rp1P%g!NT}SH@EiFwA4NcK# zMMbiOv2WIxO|VCm$`-S#Qq=^wnypf)Jgqeee8I7FNAf&Bp2XVq z6M%C|{JdWpxrZhc#}h~4?4mRt9-HDZ=Df-H0%`nVH~9O4_D4caX;kVY6i{ENWU52v zF{CgQM;KSpA29%A!a+b}s$Y?*enqDG72%oMB$k+*h>Uhc8^GiRraJj|jtKt@e;|Z!89n@c5@HG)`UJMU0G8xcK6z3I+{$z zqW)Ag5}{(mLU>766SY+*(y>H36-uN$VXIZ~Rt2LLO|-$0p_kjc15`a>frIT{*&I#~ z>jdk1=Au)3Iu9}ah;n}77}HV|BveQ;&|(5|VJqn8dQT&b?C{np!uZppT&FiN?&9Zr zDqx9EJ&o&a0n_FMQVB`}Por7=o)t-G4 zZfZC&H}WLhJjGS?gGVk&|K6WxyfW#-H#%}r=(SASH8ZF|sH`lU@El+~74H1;LG~1( zBPdCI2TtCABOAbTPT^PM3mWxwt~~KE%|pS*D)@vZ9vmvHuwCl8#Jk#lz2`Rjot`b4 z&Gv^pPgL&EJXNyIzTLAcu)pM3wL_0jeWfUWujLL8cSXgTij5VIm29bazWUATkE*q$ zkW`dPo!Yt&_Ms_BqCo!(SR^tEB$pbIl^qNVUADjRAv zmfG(V!+s5GFp*FwEQ5xa!?ZHo6rK>C9o`h)8r~Z|7}kb84X*W7VKr-CqTZz5t3Id} z)Y72To|8(@4w@d{_i@qS%9J9C%1b$(QP~0IS5qVV{=?vtG89}}21vw;FAIYMCiU(m z8ZhK_`J*I@AmxwsShW>e?2`C{f{B3)`^tkIUH9Ot4o8VCzmK)R9@`PlLitoUvEy`| zc3cPN^ufV`f$f(tcxHEmI#1zc2l3OMeD&9l9eR7nO%qnHp1Uoi6J7d}dH1z#>R!q~ z=J}z&9P{M)XI*;H;yv?L-h0oI%XgZ^n_ABstcUN5-t0-;Kd%oea7e$uwTKgjo_*$m zsk52>q#SEM6(^>Aq?E?CF$VGp*&tRb%wxplbF#8cs&q(_(-H9nG(0tg62=Z3HJ-O4 z5!Qvm*u&<@Dc%R)1dYz%51TRboZ6F$P9nyT!_J)1&GsesgLd97mCd^K^yOfSpjg0} zY6|8o#vB2srNi#SSP16C`m+LpvKYqVV#t#&(9tSGRcv(Wl+trcAB{X5dy?)lJQdiP zd``7r`-bqo_OR-6t;H!+(;C$noBq6Fz9_?zelX zI`_{V@87PlJ4J842j0Eyws%>;XDeR+bMCumpUWNn^TTX!5QZy;xEDA5*MDt70A@4K zg>k^l6gj#htcTJFO48$U2ALNe@5le1JQO?>{v!T)QWJ9|og+iz28zBx!Y1os8GpeJJwgXz{aZB9^TFy8=! zxs-nHv~|X@0mY{b&f@I!0M3QthXrOIRAA}~7d*&-Yban*nPkVD7C>41RThQMeK1t$ z#{>mQ@C^>AaI+dkb(OfcgGl=a|%j6g~*b5I}Jk zs8qJVh&BKL`w3a}R=T`iw<{9RJ0qpK4n5GIr!l zAemD4e?_XM6)nJ;$k_C3cxXMM@{Qb4VJ-r@;`AVgaHVV6b- zTMWiYo}rrwT?!29r60+zNN6Y>y4g;b*!S8GfD{QhL!bJ?DN?j-Jh)&eg79So2Ap7> znHn6Yk4~G+Zl@g{%tyC-?ZE)-4~6gaFj5b91R7>uH3OXD3y?`P3fyXh@)N&ze)Q<9 zzc$-SM7LP=-8Jvz4pQ@rf2I1V(wn#6{)UI%|KJP5Yt53yBGyc$-j|*P<@ld#);{rv zTi6bduPO`E0hx7VzZ{p1ZK~C(>x|b_Z#H%tx2K;?zme9v;4EN#Q4}M(x(ZTF!OjWz zpbk7JwW?k!%N|N2+E{6X#5>BuenPAvsiNGi)@k)PY`9L=*TV!F@*Ge=^G?}R>5v^u z9j`hBhm^f|7k#uYNj+=mjfpqf-r-%(WUaFtZC{`D_t1h_DsdZYdF5b1Mbrq{&SbtUSYT;@?W|Gv>fLz^b zUux&x55FG&GJH7xWB6E1{fn~21uaKbcL{%C4t+k!y$b-Om0tTAR4Oi;o(w$h=%lf5iaXpI^L{f z%CoJy1JJg3vK8Ug(GAhf(XOblH+mp?D0(z1M5VgaoB_b9Ffq*g0|W!);z-kBMpBup zuHt8T8pTkE#A#5x2TH_a#DkA@q_mKkzV8meF$txNwu&WX##&Zc?u3tIzxuw&(=g73 zX8R>1i(ON~MT^@X&Up{2&oU~4Q~B1~n$t-Ke@$WUi@BH19IyDKe>3*dr1Qo%AOG>k z$KSkh|1G!tK9?gk=gM7>rQk`B5l_K62&!w?bzowd_;}XuCk5uWY*xUoXuOHhrOR)>=wdh@>EcJU7hinywHHmzUpYEmfonGG zqE8pioN?FOp+61lue9{~CiUw>@0Tx_rV_OG8O(;T70xCs*iA;g7Hf?evH7vfVjE-M zj@g4A|6)baJhlqwVyjiF2q$CPL$G6|7sUPSo+pzwPX#h+4>5wWMYtheiv{rCT!4MC z%JVmkP+NgvbIL@|8EDv;q_&ZD4(bugwg<n^#`w^`WCHjA63 z?LO}u$6byeI(8=yCapWE;@@!vwEQsS zx(!+mO!fd$1&$1&98Mi&5-AmYH0Gif&K1E&g5vW9RDq7&pa{+HUf=?AC`CeqHH>i%8f~c`rG(w(bCdaXlZFHXibIofSdnA-n`4} zb-T0xvC`l{nUu8XPR>cw5=yYKny$~RtGg&yS67|W8gnRzR;9KV6olZiDB2uUg)Hj8 zWQAaRm14~@O;096dcsz%H@4|bO^uDBTbBcZLuaB(As5=Y4g{+8|HLC1Ddby0<=W6F>r|5U8&K8`|K4f#oCLUrBfTtLxl8NOQGG~lY zRupo8P5h;dSJPfoPILo%)WVU&1MqW?4T3-lQpd4Da7g(GT#%2z1!)`@A(Jv z#6OFUf?_@%pC|me1#bkl8GkRlhy3{4$IwcHV4B3M3e<0B&=)A+L!B7FNO&REN1)&% z7IYRCwBpE7!K%X9Gu*u|WvBoMfayIC9{vF#3pPwHR#3_N=k!#9AX(Xz9;QRI4Rcb< zajJ1xB&x=-r$;%9FR+yjTvt4_aD276pZ)NwbXGh~SM}n(>}$pEjB*|S!WwD$AcZP^ zSX@$GEz!+`q5FY=NcZo+++1c9mWQqh-5i4PQL&h8Mp%i9$wHK)q3wYH&~X@s0#gHi zoD>q$leX%vmS;`)+g2*UA_)qr+1QX zvaicG#b@|>eKVYLb;ZPry@`QFOAVXI#D&S__T|Zq_KivFM0;y;hJ8kIq21cz=$OLw z^Y~;oxvzWj+!rrOG+joIrv z3|;86)n}x5#_5#opxx(kA(zLiA`q2sZwYTk?CG=W%9L7DO|Pprqmtxj-9ldN=_Vjc z=sw(i6fN0eGwXkB?nMmjJ#I7*eFF+f1EfKOPSg5BFsY1(A}3%t31h4_eCexkOi$cf zHYRMe3Ugy(yirlID%9G#C<^LXtnOT$r6$d+wwNn|HXGLZG>cXs&`p3T(i6-&LIR2+ z&UzwIa$k`g<~sYKV8O5b?5|;a{2E>VepR+T*?Jxvu+t}qck_%=j4*4C3}EDXi!v1< z0tO=7iybjgvfMHmF-*+Yqb!y27|wfZubjF((|OJ0r4wiJOxgSAv^8HowU0FJ{<#yH z8mIK~%O~lAd55%o;Wab*`ex|oT{yIlluo?YSTJMd5QJiD{35$nNhDDn2o2Rl8;U5US;7I1HMyIV4l^>@1`yE&b>}Aug%)gq zn2umPqR;>fnX_lq^f8gzwsEuQbLQ=$#dj{bbZ+NTJ~iH^HM;NbuDam*^`os@{+_ga z(~{|RsV!d_`l!4-i_(%CdjEdtnkuw?% zPpJI~f%(A*y%fgpe(_c#Dg!^qZ8ihKx%Ec!$dJw z=zy9eXJ9O)dfW;Q4i^q~I^g>IMkOphy8`+g^0nliuo8%LD|84d6!_Z@=AVocmkY+4 zgAStPOLtMTtFb>z7-6?%d}cJt)m8ieVC;uXe*Re&T=@2`U2oGbd;Cc8Gz%O)g3_U< z|M?%@I{v^9P8^49nt&US_T~V$G#Nb;T)s&)_-$`GA4%NHJS!MvVq5Fk$Zn|L(CmKB zcEt6j{Y~euYu=pjuI(KZ1w?b48|-&TJEcA7<^#qz+|XFQ5_OO&wXnHMx#gF~Y)MnK zd}%jA37Wi54<&U#B;I_+?W)VLZ5F$b)^oP%jL*Rw@wP_6tEPOFb9waGQWWpf#xPs?!GIS}GCO;KGYL*CnzNg^8;2vH5NA`QioV$PVl z*~NUW6Ry)P$rYbapP^DgyV6gjEl1NQ(x=gSGuBcDk-5QM#@Q)F1a<*XA&8jeMD2$WqSDFmN@>FNRpa2HJulE& zc{RtAi%63Nq_^ed)5Res{q)3^V>@;nqhIlrog~3e!R9w5%Kq{cfT1VP7H4fedUWg7 z*IvVJx*NOch0@ot+J_9~MlWl0&ULMkzaif)f8X{%(9S(N!<~M))`dpZzq7567DX zMXmB>=VeU*?{wCdjWrJ4IkF+56eE}ygxZng1gY>MN+cK4Vndey7XUhAXfr`QBzis^t{=ai;pPqaunAJvd|5O|hWM8_X7B9rqt*8%a(> zSCJS0bDnk~EkAb8!jeUG}Q;~zU;4Zbq%t>7EsH@bf_?Z1Mj3hzz(Eci*`&(mag&=$6KJEx_Cp>U`> zF>Q;sy>Q6qUKG5rb9Lt${rb*t>9=>@u0N~{D6Tv8eQEK0M+2DD6O1Wc1@Lga_E5zn zp#UPTrFo*yTOql?#f|AxrXZWR7jc^Qy^=~q2ljmJo3Tt{=aP&$^BSa^ zQc!u=Ea!1A#Jo5V*V!>mR+G74F47%J3wmNwnsul+$kXsL+!IX;;ImAbT-gO%Ca!mN zrYEKaWom$%(q#3U$-d-m;)mNfW$EwLm4Ykz^x5AD5fojR4z=x8I-tFSTDXhmg!#k0 z)%BF$aX{$A9_xhLp({9a(vBMi^F~*2m`+>lbOoF0bOjJdlKve$8M^5yGAtB2m>+_U zlwx6K9hM6=)Npi=3Js2;9FOi)fO!eLA3+qVQ&Uwcf-c=?db1SQ>zse9+HZZR?jBDlt>;x~1p(>2vaVwXmw|%1z{Teh=1`4$e)Z@QP9OX@YrBj$VAo zwa@je>sPK*F6-C4w|KXEAMw8Ee#32xIH&?>KNx}=jF7`(u{*$eQJkTG4|OLMYpkMf zKYP#!r1YvPyUx(dw;~gBE4QJb;ExQYu@M+?b>>AO=})TQBp(KDw_ki`u!$T!Aj7Ay zIKaOv7vP|zJu^^B^zJdsgc)CnCtPlK+?f_!iHbC`z$w1M!_}yLZZcxQO&%YjD${1m z@s@B1=04B=EOuVIe&LH9iU(CQ)jzG@*3ap&_meQB<)qw0zx>MbHq}FQj5D*pbFG-C zi@YKYJ;$YZj!UHFLJd~p3YTSHSgZ}RI097M$f?LXDk_`~&1VKu-HAD+Kx$2f-bW!Y zR@J9gRb^DIhJ}5Kn$d+CS0t(<6VvB#>PUN6KnFp@Pl$k?@Sx$W^9NMNQKZ-tz+O8Z zV8?@`urtqWfyS6QF8}B4u)N$N9nT;nT^$3X%R!6HnhRP2X+gFr=94$oFj!iIpFsum z_hAp81Vip5bdDm1oB#q={RlopvgsIkS=u_KTlTzsgK+@|#a~!8=U;YnF?__`h#P*; zyZBl$#f9mwPfZ+|WT0a={2pYG1;jgHH`^`m2}-Ic6`q^UI_1g1 z%f%}KtCiK+$8pnRLD@(%Dq_-agg__qd-5L1Glv&5(NONt+(hwh0gR5opj4>@K@*$p zB3`Nla1t|N&z`7{r{c~ThCKtwbXCo+4oX%KP@fg89)NZ-!Bqk{6Mo$#=`N=&s{__t zW4ME&o)7c{9uG)?gKUiuWKP3_D&Q^hI(Zi$9EKmvrz{Ih^M<=aELlmKzmakMZ zE(JW(anfYw+EX6{V5nnIh6{xogqzbDo70pFstQ+yXe+q;@3l@vT|N*;hqTdrpe>{S zA;+&hMdU$t{dWKTjA|>Hs0;dQ_$#l+x0y`@@?PdHO{>^<*Xuer!eb9becYsrA zq{G=PPO8(&8J1iXwOv?Fs`IS(10T5&x$X!I-y4eGZNaZoOj_fI?fhYCa}|VRv`qDI zN!gH+o@TK4hCM^S;yHa;pd~{-xX0}>#O1X8W zXb1}E&?2GZVTjhqXFT(^5Ydn_+~zLT=B0$FZ!`4&6dj7EaWhWM$oBm>C;;*P^P{u> zBS?3!t6S`q=j&DUw8`+6c#m1YGMjHREsD1`nDbkKJe#D!1hvq;%h3Vob_pFZ43}3B zyRWBuYWMmj^y6u960`!U9SK#iW}SEruqOD{;RjuyXZsAWpSs*0p{S`2{vxmUuYLnM zVuTknJci&vwPRBMAj{z;($XSn1v)O$e9GfJ^BK!>gj3ylT`{*OF3yTI-e&y|ZF8th zujGhz4KgwTd#u9Tmhy?8#OHB~UPjcD|I3Y6zMp8{PfRG}6xB9?+sSegWrp1KM}2Yxm~@KZB!k zq)#b?9_MUGw-h37jf8oI=vcLFW*GdpzY4z<9};O(MA`zcIsX>Ejb$oNjgmb$0P?@+fa;X!h2YdauU&2k&3#x9#p*d zAbU8sQ~@4*)-E=2XT5|%!EuQ4Gy1$K7#!F;74y3@37J>6dgOWC#Ao4ZY(|w>V!mQK zS=y&XW@w^Pm>>L-zY1M)TtcjW3`8s1>ei&KefOwEw} zctyJ|9|2^;TKX6l>X?^=^JUil%bZyRbLk^8>=YKfN`Qiq|jq;lPqF z^Elk8;#}s4V|Q4wa{eEcEtw@~-c^lZl`vYE?Xqn&F6mALm(rIx_j&i$X_&YrtKT3s zrbR+65)y{uzq0;B(J-?QD|9lW%rS$ek^1uZ-Z4(I$`+9zeyJg3K4lsQRJo8z20j&v zB1duULqk4;q8H0ecbhZdU)djU!B$mPI%5B=9palu4Z4vI3;6d`AL{b&eVPa`73kK( z6m6&C>P8L%bGO2R(>t~%_2NZxi8_1&Vgk<9F=@#t)v}r5nPijo^pjy=Bwfl4I+i?o zJeCdW29|e_*nfDY1RcI08kZy9WZ(}!oN9=-! z|M?!Byf^)5c3T!H$cx~e>>tKjt(!!tF0RYi@%xTXl8KpEN4=dlDa>6^FH7hbKK#gm zy$}{QhF78M;aj?CDed*G;?JRFYIi}|G)^J=f$S6|sCaWQ1|q3rP@4q4ev2@uE;cP4 zzmU|v6O?R$3lK!CINBbNqn0yQonaySz^8;%{IM6mlG*CO4(SAu`5k3${CXvf4(d~6 zs+7fI805Jpm1)@!nz}C;9+gW>D%IV?H+wWu{fav7lu;reEnd z?oFL#UG>{=&H$v$BIHW#L>HmP8sP?xV=drdFH2D0iaI-30435Jsjmi(xEMYj5wavs zKg>kLudK<&7FwGk7GedtR-i}I;|W0$jO7ZdCg_Y6_q;w7Rda8+9@;dS96x=9*Iq-D z@rx`*kxml<&I9}WLTSS(;=G2Ev+Y4awHm20cv9s8cJeP}ygF({HRvGH(F%md<85nE zqn2J$^~p1e^TX80cYo~xLR+e=M6uy7-5zE_H$cAV0pHq)_qCshqaZ-s$%pU(<_u50 zuXVYNnJp9$`AP3xtCENt&+ya@DJ`e?4lEh{q?fJ86^l!E%aipVa|J`pNfu))=<*-i zDO&>)g+W4T2*WE}j3LMYS4}2hh_5un{_$vUzJV}s_P$RQrGK{m_?ZQd$p?)jsP`%Q z7Z`s}W2A!q#!JmvagFq6b~`>t8eg9;c+N*Xp}+_s8xvc2D4lxRgZvk~X2zzJ#g7ET zi9KaWRywKp0Cf~B=UNI8KUk&JX$+so@5SU8>8dGzR+2)5Q36dDwUNS=n}2VcpZ+Z8 zoNN#`i6jwQDXOM8-`s}%rsXk!homcCWh1FFd>xj>0pcah2?bJXit*#e>%d_jb$d=! zc|O;o;>NPw&@z2R3)Z*q={#noT$U|tm48wjp;>}OTc?xUr_(Y`NcR!FxY|HoiSj|95j8hEJcDe;bed$6V$A#9c>}idIQfS%OB<$k@?B z&ra9b(7{C4+Q#0Efq@?Irz@r?Agt?bY~sKuDD+9d{L294AGR$l%>Qyhp%bw(ur@Tc zGXC^SF;KL&)U)~zBWh}A?;vENXGicaD-^xI5-^Mm07I7l&wYoHg&mO7KmVB-IS4+F zPmYP1oq(C0`Cn%yrq4227#ImynVAS!Svmf8+WG6=&&Yq~!Uialo`8*+0dTPY%O!?| zgPnkl6~M7G5il|Q70trILcl@K2E)b*xW>*#z{tT1rcb{Fa!FRh7r)AjLd+h zvq9|Mm;wVB`Q~{Z|Sw##!0^|J{P-(_CG?i!^r2t)0vAg9z})D5x9S3V;cd z9uAtE>)&LjSMNKnEQ#V!ZoAfN3Q1{-(F zbD`>J?TpfYk^a!}bb#)#(ON94ocWefHgTCt{46Qq`khFz(OGBWhwZpItv=ZU?&T$D zkNd4kY}uXt%l<9&dZEt&d<&4DvpvsbT8nF+Sn!3mix<-m`3oEF+USRASC0I?yFI1# za`>^46Wq;3>K-QZ%N)Hv{|6k5i=&(m??=|L4h5??w5A|Ws&(psNxqpuDW_){K8gM~ z!t1BH#AwLLWjaj5!3~-&CP$VF z!vWdfIcl925m|e6a;!yKB>swNqoURrfZd%8%*bjqJT&|l$vq~LB7b=?(kGGfbZ;QJ zYt~98B-2jg&2TvRQ%70F;&ZzTBT7b#+nb`bX|xW`82xfimcBlp+0j8%&)^i?AJAu{ zdq!<`Jkq6l5E;DWm2neY7DpW{648wdqP!5VJA`Q=eU37uzqh&7Pem#xMKxG6ls3%q z7%Wb2!Uz76xVO%$K5mSbU3uS!)0ip`Vj8C}daWbV_$ErWC?OQQqJ|?8q!wOEepl%0 zPuiJA!g+4GhZkGYKlP(5pTkdl0_&`)c35T+|_rjjCYSHG0n zJ4R|(u|>K}TsddHpjC^OrJ!goZrLtSi&AxDUQw(nw$;vZ3*;l(jg4KSjzmgygh6Sg zp7m{9fo@;lYVH*Zl5>UidA^O!YXto@y4NNJefY)~Ujwa;8%9fyOgO+A7t^h?nwa!w6i(-t2tg?}k@5YTyIlJz9;|s9|b0+<95~8>ySThU=sx)!VRc7?f&ycItCp zi1&9A?~E3?C|}h9Z^Gx4QZl>7;)WXmHxgEsM#Q#Q>HVd`j6zMP_%*|!9wPMmtdGun zAhQ)aB{_sq4g1`R#@i>c?|1InRm8WXnfYrD%06V;Mul*R6O7ue#LAO%K`9sY=^y_1 zzy`rb6uPLfo%=0o6H{XKe*4F!t)+I+5y<-NQ>7ZpFJ;w;Cg_>^gm&R@@vIBUw_=|1 zbo0;^waA0UJ#MA)&N<+x0UJGBYC1u$<62Zwz*Edad=;wCe52%yB2JRBP@b|EQ?@W2 zDk(o4?;dK}suN!`-ROLD@z(jD#4&z72y&3PBIr}&O}|G)>ATfk4xbQR<;q_YO!8f4 zSRO3lF{4w`9=I}0KArQZm15>!rlvBBG)<80M;myVXH}zVf|lq&Ip~%gU{2d-@=qRo zTRvL$B2By?A&tdJtIV@>D8J^<8n`tnd8|I zFVl3N+M21JU9xmv6m729`8=0n$h(01G0t#PtFDVzYQE{!&u(J55Bj)Yzt%>$J0oLN ze3e`}u+(uX4BvYsWBK=;jJTLmd|$vcMj-qvkWEN^cVr|t_ng1Y{rJsoGcBGAfm-Hp z)0)bmI5Eb$mV?l?_+F&(ny`-lG4Xx=edI1mUuhmhlGr;i58e1{a2|t~bF5#v(7RZz z+>!mmFL!ZYXTg)-N@L{{2$WZn*SY$>z;aKWHF99)^*w}Xd9bNdm~TrISCk9(gs%Im z>^-XmcO+b~%We%_a$GBkrsgt19u!|w2d@=@rY0g5VgXO{wL-&;meE1R*B#70^tJ1M zM#}pGs&lr@D-472W~se1RqXJaWi^*&jHr9gOl>em_(IV!4s3}68`88q92tpizD5!! zhY#ndfX1BhiU|2Bi@`z!{U&jpsp{qQcW95cPFqy}uunx2Bu6o|+xd>XKs3g69qG?E zv5ucTa(ivjrV%SWnt6R5lKo%Q0Q z*zlBZk0;=O8-18_0gHE3p%|=|mdJu?x8Sx?qrIwDOLn+V)+8_Ap|_+0Aa zEqcpT>w7pItv*+u3I~1+nRO~EX)5*cLNEx9i~&&;=WYKu_*KBcR`sU4cD@(wNj6Dj z*aIB#C>(1{C0s~0WkSMdNo5M5clHkXzGi;*(Vn97ceNR3ie^XyTY3~P3-eA~G@Te1 z`y}&BVBU^n&QYL8&?(N1s&|~JWCEqRqKFL6x5qKVN7OgESEc97520q9S23E8t=YlZ z;Wl|E9%jdd_-XSzPX(1&wcpPw_#T@i9$CbcY}e$TX~N0k^&NXMsa9D6w>P(eB=wki zCQ)arr~O2&(yDoD2e%2&NG9x75rmVXd;q2T(V*JkU8jM$%uoi`W|7 z5CK0FSyt(q^RU>Vv2bi^rKe^Smoqi1MYh9j3&WrC*WfQE2}`GP=34r8R>rimMA;VF zS-f&*!2KPA*q`{<*OJL9NpZp$`VeU7jAGQ5?`$Xe^bV_WN8T`YGC!Bt1>>RkIWkKQ7U##)Q3%)rnEu5z8@WFHd^aFlG{jI&V)EIXJS? z^SH(>7FH>-(i=4%;W>h;#>d2l43PzF{0m`jJZ4>}D`Gu5$sDi{k8-o3B#DONdmUTY zkGQHTJ0NH7Ej_%pFJuX7UtiYe=Wi5lezNCh*;PWXHYQqmBx6RGjV;V6)pvT095S;m zBSfzB4@H|yEx>>e9l4c|5B0DjXJiGOlb4q3HX@NsE0p}XRn3fA&W3D)Ty`sJZtRw_ zQ5utux8YnTef()q801*V*P2RNIX(OK-D(eHO&yPORd=zCedb5hkv!8zU zTrKElEJe$aDYaAf8oFY~XBqE5f9;XoubFVK#uO1ZfW1`av}HCS%*l0>vRFAAR3Bk! zmoeFF2}f$BoZQpP~>V=k6(JgtKUr%1=pkxRRPo_(?h{hW)zZN} zqM!WO@M&U4FsJ&YlQcGsDea8taurx+h_wQ+-1G?;6ngR%kql8J3)S08GIdphag*@^ z>-+?erXoqnv6w=szT18OoUq%@7J!LnKlEOiy?3JAHun z?DzMwg?wpTi}qv{BV*S5BPN0Vq?yNelXfPm^(H6moryi#KfCkpP?vo-dv-({zvq`2 zPaRY}W}UNftfxAhK#-O|RO^F(I;kU5F#TkDS?s?na8@k*y@9Ewb6>%1@s%S(kE?JO znB%LZ?DJ2|nP{+?re&y#!F{;xs9zGadn@!&Boee%eH93Zxf8urnV8=yA)FY2bnppZ zzWY`G7PnzcVvtWlQ`#9q^K=ijHNW*#k)n7Y&bhd>j(i^w1a&nRte|u&JkOhr zxUm<4R4-j~bmE53tq>vB?)9_bb7r?5XaimI?Qq`ZjUD_wd35e#)otqVC1bgacEq&S zK%I7%lD;1=;aB}ldTs;kS-(v$T=h&*mq7sQS5IuuppK8Tn-VsN@gmZlTf^F3v-eB{ zIkUW-2IjUVk?Ra7i$3pzc+bYmw*w%*#NC!=ZqW6{9|GwLJJP{iGpdY*)s`+%3Esfw zbdxnnL&mWVHtZ*%3b5U&lWX&$qWe!b`N1Kc0^n#+w0iIZH2vzZ=4x^_M&C^+ll2Ot z1#jT4%No>ixD0KOmL;Xt1!H)j58<#5RQNTAA>7x%(gTAYZw$DEjx=Q!9jPMcpzfmN zXs`X?L_3y4b-$8$t=5(1)_|mmWs7RZmH1qj_;9x%B4ktb60%1n3VNYvVG4GKa*t_{ z-|5=WXO(clM1_2-x=ViRKV7g{OV5`%>Neia-oX=!8#uQ@kXNy`?{%@D@vPz7=#!m3 za(nBpyj_9m9o$`u=tuH35eu>C52t$qlcERXaMyidkz zcVaAD^x>C8`CRP7VEpzla{*z@jL>O3PvS+~J$Z1v|2P{`O`fC+IZDoL?58@m0Rn2P zvJLk8M;uD{@#WKXEqmlb;n-uUlDRS#U_XIOwc321xFyjn9Wji@e1yeC5UAt|NDiHG z5n$KG0-L?VXtXc5>@-#gf#2wtzF;(=R_ud8vMr>IC1%lxaS;u^kiZo9*zB*guM_3i8u4)fHxnpq>hO@0lHG$D! zkuXblNhLLiT01wo{Y1G`8nUfAXts^{^kp5?_G$&^PP{YAH_X(;0_(Zq4kMl#*y<(c zWW50+pyJgj0Zi8Mh@pJivG$^?>y7qaTN|PbJIgU48sV&rkd0ARg}Sn z9ZIH@#NmgY_RY4Y4`>w4iPN>Bywk0g)$b3{k1uzk;zmUcry#r7zLEX2C>Xwt&4ou8 zHuylW))UmNwXrMGs@^StU2@bNt6To)>bX5z+x)Rt-0CYokBvQjYaTHu%)dfBga@ki zVPSgxHO*0xI@P{5B*DAt(=jdrW!JT=mN|i455iCZ*#Y}%OEz0@jGHmzoci6qgHL;d z9AfR1SYG9-KTTVng;{7$RV>sKywZ?eKMVa&P=BLc-eJK?4{4OhsGvycc7}TMB2Zc> z0`YgHE2VZ-Jx8m>jOm1XLD$(~_t%22nHTr!~wvbl~ z_O6OyC%xHFp7#YYlKj}A=PO8?HNq>*(Yo%fv+APoyzHr}2sSDniQcfAcNJwHEOk=0 zRquZ8TgD?#JFA9-aFGiQR`*#*jMDjT310X~+|$b(Z5Mr{h^<$m!-d#RiBePb@Z~w$ zL)GLmzU#u{s*@l!y~^y@bPls6JsZ1>vx3PckY2984*aKgtK;SGc@o9(V{Dhd9|-Ii zzrTuuAJ1Or?xySs_r#pJk#3q-oY&GV-54I3&*WH9Qf?YRw#A)f>bT5#U>j7?pY1ui z&aN7mX;KZ+9s`y=kANQ#d$_lE{H|RaZnW>k^Ordl48%Das>a36nAEt|WRc+$Et7J{ zrj)^PgTtY=NRF!Ogr9)iPPbcql-c8&(1aB&e$IZxWl>6h!F}!06kW3A;nIIfRSRlb zzu3!Wl``PVP#=`hVZ|C9Ul(Vx_6G3}Azdd5S;iH$AucDz&bs?$17FrRv2WLbYU1tT zKxiIuJ(P0MTq64;B96AGwefn$r9ZPdl1uY0K(%LepF3+H{^JF(&5veh^Y=Db^R^8q zj*v{77?s_Wb%g6L*oL?%IyH**L{xzc)GaF06Bm`{7tQaOFRowT5%5%MVGS4-BroCe z-f*G4^(fdTXU1RNH*>Js(Vqr&Q#Efgx}>4r6?@;o2ku-R;a9y4Zy|MkQqJXlH6w z9Uub-BRjy^LeIfS*a$GcWb~Pjy@Q>Ro+XS6aHR*di{^adN5=)vS==4TXl;_3TP?L~ ztmTL%3SDSc6T6f6sYO2m7FT%Pluss7Z&pezqzMQbMSW&KqeUQLtxzdyjzlm*VLdh@ zzxpzzY%!cvX?lW$3E{8Xk2NX|+nhR&59=(C^Y@9Ci*@@6o`=wM)aT2Mm~7WSW#vq_BVB~mTAjzWz78)F4E=>d z{;C$^yD!Mjh?dyH^~GxeyGyf<0FApoc#Ir`4mCtNuK-f6R^kSNPVWv!Fu?ouhii7j zR>FGD!N7(t@6s^JhXrR18DL+eaFz|0SEk&4AaUK0202+?;SdKBSLJU-Pv?Kqq(Qf_ z)wirOh!s%4`+QjhPd7yhQ7l2e?S)W`oJ|-k?}TOp@1=?7K}&==4jUywuxB)G2*__;?k?}$5aZJ?g|LZU&`%uz3EFyaX~ zB<_K+vPxAd2B3X_clo|ndgojN$*Eaz+nRZ63++vd2D`vaHl_yy7KiYEnSx#dUxJ+S z8yivZPu*~~t8c8Tjpdnm?NO$y zAZZUhGTDb~B}TUR?D)_>O7xtGsm(;F!Hl+EWGalmvWL8}wKW>reyP2}23706Vlh22 z;6E7DC}8#;k+#Kdz={JQfJTA988(q1r*TFgy`g-3s~{K+{rbkQx(wJ+HOl!(zjS98)@m~J{1C|RX55(XDGnJ8yI*=T|TZ?i144UigP{{|w ziUp1v;Ri1|{a~=2-hD?lbVp{>5!$u^R>kIfA`^9@C5xsS6=wcsu+10AT6b9d0&4!Y zFbMMt`!KynV%#O?CwBl5t6mHOF*gKKw}MQGCCG2GdXrvaHsr`9GEs3XXI3c8IGp~w z;kss7>OEUavMd@=QFDHB-3VYQxOQv{49u*M~f~mr;MtkA<{A#ktuJPUP3yr_uMdR5)-!gpHF~fAOS^2x))~CZH zz0#c>bTr9;-=@~0xz2_ki-Bc@pmO2)7( znSur*Tv-iV#t6a-5o>U3{%-K`m%39YN9o4_6M^|Ge}GkG^JQ48mOO|<^nSDG5;0zK z0ME9q!V>Wp%mATfA{SSe!%)n zV}U6l`VA78z}eyW_d*n~g3yA~^mj4qg4l0DEWBvlGp;6!GUEfF1MaM4^V* zfhZt_`0=BGw)Jc!H9B>wJ`5Dpz&;keq&feoDHXc4inTT3?ISyO%NP@LNODC zR>0hVj}A~iQ^=kJk^_W*am*|>S`twMT+Ns6!&_h3>`M}LXbmDob0BkIP@sEm&zxCE zP5=xY0WN<1jQCpo7XzR(J{jmy9K8}h#jnM4(2|)sb3K%fev0H2pYS&WpfmnvC>-rR zWBwI5@~^y9Hj!gE$Io}{4@EV9340@m+a;T7z>?ZvJ#1p0fK)|t`3bFqLf6*_@ z+ZmsbgA9l$Mh1^WktK$~QJ4zx6HS-}02*^%4)We7fJai82y=Xbz-Poi!muDCH*9cr zau^UkVZt=Re~{he6N>+00CdLRj7fz3e*#UQ7H5Y7PD&sZ?QmwU!3*}0hX-|rhw+Dh zYYdOUh>PNq6kx~A0ELDM(EcX)S{M^AI4lLF7>jrW#V?5_9xkVbm~rA`?~`E#m$#^! z^AMdA?LNK8b9ke1>k00*6qLo9KLmg13in1O#3wbqA$ZFbu)dd{41BACoK$AD0qU)J z(gj)rUV|+UOaU385Ul|^ddU2PE(d-JC=QAr|Yl zz4x;u%_dFSb*rN#E7N5w(^ad|HLKK?H53tzT<<)1lSdwW-tInkHF4RSOKWcK!9FRM zrEa0XVBTeVGr}MSH#25>$VpE-=BZEG*tKvk%x?29)loX*q9toMy1Xjj!<3 z0`*?Z^8O}kCOIGqvnDy%3OjYaDIek8DZp(H`H*wp*3Pp}oE}b`h8)_6Th~Zi z*YwuTLDsCgtemqZa+^+!Jd@Td*pBbqDs^UH-SV6x3ZqI3!K5;TsOrRFSA0=Ii;Yq? z(e%d!iR3%@ImJH&^bp1ZIEZ^Igd3ar)oSWPX~ntLlLTpEP&A{QlXAN^7m_3Ik1$BaRE-rV7o0F2=@qOn zn$}-PjD=BS%@!t+{1n3=fz!rxjFx|n#NS2fJ?pNmaE~Zbo2FS0=xBKX%ANBbUDLrR zwrXiB4rXr}(6)Wi(mZQV>O#G}>3vTcZW(o?9Wim%&QE)rJbbO>^iG`kI`?ZCTuPz& z1$*sWWz9-jxOFNq+2u#i%(?J4M8qTs>TYgYW!iIIg8o2`N*c(37V6_8-pzHYe$C_> zy9XWa-9Ju96YR?ZtjJeRB>l^=Q_0JFu=eV0f*6G>dpAx`2MxHUu=R}*U`Pck^;fKx z2Tgz6z0y0{$JeECV;{2fQ+nfh(!pHP`R^^(AC4@qXY1crSOdMA##{706wPvuq`P;WJ5>1HntmL-E}iVim}7_AYn%=sowlOD${Y?dWuy=9*4q)!k#523ZhK0a5Y zNL3gQZ%3$*cU?%;v;I1k6*WiAZwQ?}{toboYGxs03n%{z;;VEh%UBc9*@3a)AK`h>y!RTO+Xb8ARX* z6gvIv=CnsBiAdwkjrbK;Pgy&Z<2TEaCt_^=0CPMJL0E-BFT|LyM5n;3qE`7$ifnWp z(cGbr8g@)iWbQ7t{+1A1m|kgU`LEh2gqU%VS1(^UxLmRC#1y>=Wo$}mcFw}(?+LE? z^4XwV{k}?#BU1{m=TIJ@nqht=T{%Q}&es2W$p-}|u_a%YNln-kb8#r8G#Nv$Fk-ap zW{3Dh+c5Jww^!=CC2r^EWPUt7M`eg^CN_-qCiY9J74VJ4nU*v;-hHI5Uh6p7i{UAF zSEjUQ?&Pw6LjXUY3i8{CD`* za=R2yu$H;j;(G8BGYNcmqjT0F2vwcoBX8gP<%*sh@I57m(RKZkRfMz(;GK1)A%@); z#T#RWuH11txMj;t;VqFh0(7>Vw!F4TuXv9jOu2skgmle9`8JJGHUsWJ=^553t2<<( zO}rixAeBMkvi@^Ml9r&uWnDX^RMn+=gET%N5Ax)Ypu^WI{k%}@@FFlfbhO}UhP>3V zH2tau2ku4B$p8*nr65TcWz0a$puhoF}z5E2FAA=wjHS+F?x6`>2pi61{;%{U<6F-}gS~0?sjWgOo|ACsoB3c=qKfJlax~a}TXu8~3bMf;A z8~+-Az(6}RoIOlwmV=Qda$^G5^~ZrWt1R8B(0Mxe5b>~U7V$_IJS`|f(8Rq*XgQw2=S~DJzJkZd#maS0A zRf$Q_vedIQS@ELi9d1Nfobu5Q+3?t{7FUZYDD|0?{gQ&@jBhx2lMw!KTol$Mg|+~5 z7MEeBNxz~?s4IFSelx?<0j>-4l1us{6@TEcOD1~yBd3@$MHLEG;r3BqFViOTGT<`r zGA;&9CQlVkpBuLyIN}I?+j85$C1`P=E5tjow!ngtdR_{2rej8FMy}Aga5jx~TyR`j zBmJKCjCLYW+N(&rUbzIq%lDrDeG7(!4@C##HvzKN#Cvof*P-qRRek6yWF}bhhVMq? zF7fa*Om3q+}Qx5zE z@_js?4^tGs&O1>(p(hyA8If2&u|vpfq?hCc+0YgBn;?R^oGy9Y(OG?<1xlDij76bX zw_Y=InJ^2cz($pEt7@z6W4;X^PgKVr{P$buX=Ot?8i+2uT+CR{-ezVcH)JhNM8#u) z6H>n<9nkgHD1z9bz~r7b;+Q&-m@r7f4tGNG$U{fuKay!j`b%xzg%JAr&OR%2zJ}|* z@7y9;dGYMdfMt4x2zHO+A-=O0k>3%)q(~i+wSZqFEkyfZU($h@Q!iT#jJ&8J&|IHJckG4hM?+IrgM)u zk*azR&q`}LXYR`omVrrTRr3||)YH^c%;rB$zcCM_`3zS6GK;vpewXF^_9p!7d+RuC z)sjG*I`ELgC)OX_&ov-mn~Q}x+7s`ZN8&;Is{V+Du<9Jp*Bjr9kj78vo$f|@CtpUz zLe4^!m?=#rN2)8y8~^Hb{l3c<*Ef=D_^~@V@p?XnYb-j!Ey_Z`R; zgxd-Ih#b>w>uZlkFkvd2QL`IR>%Hx+@K_=q75^7vFW~)M{nH9zjgZ89&+;d3#2Mv2ojZy*4ETDPQGZJKM2!qE`}6#%v8&ENfXt^;xmu-7-~(LqSLjD|=ku zB#BnDV|sg@ZbsF3n_38txmdQM;kj>9n5 zYG#>dZtH9w&W3bINhLG2&(bpw-3xaFWi6P-ks8p*k-KA<5DCqV(4vG*$ecW8Q9mU@578v3+j( z;06jOB!_6TsYA$I?R~%<(+R62qOIT%<(SPQ`J6kWnJfQMDA|i-0b{k~YRX5}U&kbn zq=kxg=Q0OcYS2GhcB-|22S>8K{$sK%37Z(9)_#QLWvy(Rx2veaLAbF{%${U4O(KP6 z78Mlh$N^98YM;8{$H@rj?62^@q94RpzvrXzjahU(k>8mX$0lfpO z%|wRcnK639VpO;?7(m=Zs4ftYNpf^NXvyQK5aCMD{qt^uA+bV~jG?`mD^qpY?1Dle zf5{s9tz!T3yUc{03KO@o{r&wr!P0|zMViYIQ7&TkTix%8(*<}(=;K%GGRT=b*dz$d zZs`2_^V7+iV?U#Vy_q`;eg>8Nh)t`d#y_F(+~MoNrIJtT0B$mV^VgVm)`tdvAYrU1PdH z!=(1lL&l)iY_TuCYH#h&6Yu>Zi6yuhA3&NgynS>!Rl;QMDlB1m7o4MBVJwtXaD9Ew z_Nz;!HL<@I#Uu%0G4wDrNEox9M-RqVR~^cs+h~vQ(vM%3A{N(9p=C>2=q#h;WC?m< z3`>7^8ZMVNAw~!qSB^_4-wOA{H*&Ty8y;0WuKL!;k>Bbu`vpkwaTK0(l@K!)_tsdv zh)#=-#Kdi|b(2pGTsw@sC!4Ikohx^*4>nb>@787OXv*6G%iFYml%Oee0_vThkcW*C zdKWXuzJ(BroFQHs7Mc{3silg_ei?^A%GE~4lTD!OEfLjI>;;0HtR`NTk0@{i)g&To3AowB?`viH$`N@%3< zw{iP9_oUi_AE_sp}$Z;`bS*hdr%+)C5SYurJ3_<(r1@erx3G09*Cngu5(m~GesCQc!Q)e<(QSP;wF%A3@oS7=W{KHEx zN>MBA!qWW+u)5)oLNM&jR zUX9lRGDt(3O@;`|x7AUhK-0pSxu(Z$pj`ZH9MxNMN*ql8hMyu*N$gzL>B-t*eHbkp zo)us|CFU;EARmI}mUd#uw?+QpIGNS)6kExJE9ntP?6AMiLX2(lLj9w93OGsS?ZXMsBfT=g7Cddjn8#mx;#x2=v&6&ky!gf7 zb}u$q3;D@k^NfbVG=}$O`xu9#nDDf6VKqqDb19rxC+pey$l!DmW`v43@Hx8xp)PrS z$=$g>fwLlQJPu_T5hI{CFaRG7AET4TL#*L(d4!|Qt!(bxJo(P4P93*I1?K{;p)pLr@JmL|HoAb017+`o3DZkUR}7U)L_TEiPD;GMVsRgbZ;3qyw~QIj@`F z3)R}*T-sx`T_#l~&`X*DMh$y=4Gydz?E#I}*Mu_cEeL7({0)b{OFAP^;)lMq)D4;p z3?pLibWRpk>um+VCFS}u_kp=U7I?G2(f3hT_Tm4$CJc(Z3pfhHH<`2!jqbB%4xdmk zmtHn?Tp zWHcoCa}U+coPmZmRoSu;Hy3HHM6NU`Qd?Aj1pyb_Ag4OPDQRYAMn4#^<>eVx%_wko z)e^gwOgU)Wh~I;w3ODZIXz|nOPyaQv$%nc9bqrk8q;jXsv~=``6EtmU^5fOIoyo#G z3B#>juze;!Xd_MweS2=+o-b1>>NJ|z$;#!?;8E<{eLSehV|V*BsOe3@Q@cPrJ#Qxc zt*u|rT&UfdY!RZxW}CM6HQX&LD^=c64GWSAd*}+BnbuZuOGYTdKuUtsj&11qh^Zoo z+1;0#yM6Fnw4;cVe~nU4cU^9RN58|)d(OFA9&Cy{jNPIya>AI=Jwo11;R>*tlUJ?S z^!@&`{u>={vS}zR{6X7o=i&?co+EhDvz;8f>Vz#{D^*L}I60ofI3@M(I>_U1S3%3< zrM5@Fk?=UJ0p}`I%ziyTn8PQJbQio!K6(kB!2g`m#q=o%Fo?k>T#|3eNlutdQj~&7 z`I^-X_IaGfQpiUZnPu4%ZYdXfRBIT*ok%1pH~y)^X_X!ouTR`Ax<7+Nnl`R|9b|* zf%$~(fvxfH(v_WpiPE34&W7W68Hhyb9H8{mQMXp-+U&q8K;-?F4e1QJb;PmAG)xj+ zh6*xvECX-(A-0WEeTC~vT6tj1K|X^Q#RCvvVXPZ+Qnw;r!`xhX%~ylC5-+vp-#N5Q z7=y<}1WKpSU{LD7f0_m4aBAaHBlUw2@|8gAGeIN|{LyftotAPR`DtcAEnxQrEG>_o zJfF6#=oZ50G=JQd9wOHq)1&QXv7X|oCs?X;-YJi`su7YyH)K@I`V0P=`$b2ziN(lq zSlF(0)H}CpSwVB$m7fbb>?C(Pacr~K4bKDrhm~)^2DC(U%+7JIv!ylrI;~67rNIki z6e0{SE%zBa9`i9?P!9bp?+;Duufyu94$BoRsihrjoi#N>?_5J|;MwfFv{SCD7);Y9 zb!$4se?WLn3fCEMTv)TUG$TTvx79i1EBefV@XCP8m}0DFXs>^n#;vgkbsF&Ijylhg zKXM!W(Mk#KO$bP|q2@+Q>OG(FmXOoG*bj`^+f7;@?tS?4D$`HC5$u$HXha0LIf{zP zMYdV1)4;wX3Rrl%X%r8G-V@XcsV~9$ph)Y99S#C9FgNwHABB?}NGZQ4jh}a5NHDN) zDvH#Vqw0EK<>Jpnzf@ni0Vagh{_VN6m(y#Kv1t$a=mq*BX}NNMb8HS9-&y9&JmO&3~Xx}uq`*$PA>$vl(D#JE#MjNF<6X(*a?{Z+G=&+mN81cW3;imnXtW^6R zBW>hd;E9RJM5blEhGwPldzUX0oq-sYfh_MJG=)kn#Rzd($V)LLFsB;=7*?|w zT1~?*OiFqm1$DQ4=@e0MiRNG=8O3Iof+MEd2eg4>qOM_fmnz@fUyCaKc6Wcl5RAq> z%|pW8@9bOsX?t*zN6M=Y##`&j7PwZYpXpWWfjiF15oQXrGDfoe>}2$ejfhCu|0?dR zqoVxQJuV<22&i;O7@*{I0D>qfrF0_ioOsH08mOBVqEvFP0uFW2{n_>ZP*k2tdS#2u33 zRGQPj#QEY6iz`hR$&D7+1=zdvX)Wsx+7&e z$}VpCodZ)a^^PcG)A{n$mrx?i=K#Ks(zh8KnhrBeiWb|>(}_G$*>LfjV66s9oCwQ? zYw2;tTK4J>V}x26ik5AOo28K}X@N%Wd2YXYxOsVYsd^8n?(0ncP}|j_7jkK?SN;;r z;ql(xbl%6Zb>+TRE+?aXx+wU`mx1GM$E>pIoOv|Kki~cH;!g}H7a@Xu!43~9ttR9~&Q(&e@X@BIQL#Ncx@9piI0@`y z`%29jqWnTlbR}gkE*I2?!!e=-}}^u>904b*M^GdWkOKVN3adaoURe^l#{*lI_FA0cxMFI+lymSkF-Ks`-3>AhuJf-JhFjdpmcSJB>6?8WDlf3+s8~-qj>p5p-k}E%x1h6KUYDiUV;~u!-GQ*zvt5!m%=G zVy>L>oOK0*{S}!UZ;sP`+0FPj^@oNVWO7J* zOXsQpSV1f=5Z6V6CyGpjo6-sVYfT`$G1R*P&+uRL8QahJWFM0pjoq%)t!`fZu!NM# zZ+-ibtnf_U91>!LHuNt1WFg(26}qiNP}-Ke8ODOl;m7%{F&r(hIlz>u-@X^Sf6~H>w=E4 zg&O+x9xB*;YIh~ojD7R%fDmbRvdqu4s&-PvTzeCevOD+29;AI))Qs+aeIuD&+T6ysFzN9t~NA5vwcgvsg@etFQYIMpFY?QDLX^uopvGi*>;q_qFqAEckud zI6}D!ENQFWsQqk!k~6Xpt(l(>a`HC4sS~I>&qL(9*WxmSxM zhew;L6FSg4`B7IMec;BVWB9NP476krqHIDNFJcpWByDKszBe(hoKw=I05SKHrcJTt ztQ|ag6B#&C0)wg;l$UYPWa$omeB@e+M44P^h~Jbyjc#v9pu@xRSz7nM{d{dj9{erv z^nS_x<&S`5RV)L-P$B^4;3jauImo5FQAdC0p}h23(>U|fFlC$N+bz|!wRG%cVgwlE z+sLPaqy9Kj!O;StQ(>r@BHs281G(cTb$JvmaSFlrcy-_1zb+f@0W%EmE&9gS@2)%% zYc@^fUUdbvbZ1~x#c8Fw_WDe>2_bEZysyE%OzkxK zd1b7*F0lAAvwQ0txbj+H+oAQbs*@8R8t+Havx(!Wk0rOkHm1s`p?$tNFD(?_2o9(y zHa{+V&%S z^pWmM`uhfppY2P93ykpAh=pMkjrB*s$-Ho)z*!6K50edOFvFt&qYp>owr^mPHsvA2kW||}Y`4jR|!l8C( zcI@9mg@_B{8m$Z9wCLkY$q(Q%LK?2|#ftKere>Z8rEGrNp8)lQWHAz!V{M4XcG4=& zU_r($By-SQSHFg#7lyU4FB3^uNKS|)5!Op^q%}(8`Z%G9w_6t~heCVd$ zo#|es;_6VzKd7YQdWm`Dhv>JQ=!Mhg;Tg9_vgr|FTu*OnCozj+)KR@}(}BLOziiar zmwk&;o)*>^d-~oRSgXBTb6^JBs4Gjqt1e&pIU}z+Z3#WbZ62LvyX;UC!Hnr88Qj(f zr~(R~cvJI#y_q~df}kN=tX|`IlAfL(iqz7P8ybj=93-v~C0XAbT8OjD5KtUSkjH*8j25rgtCz0DMSb;1 z=Q!^Rm^(bpm~YY!s8^%l?p=}WcnB+$%YnIckx*oa`QMJ?(g>`SWs7scgVhkQ-EsIF z-PaZ{_?R?FO|VH{?h6MmbCA48gxDmw7j7Kmdjl7cuF`*1mm~GKaT$MBpY=srpz?~`L zDFk`p&{KcIANMTLCu@BK!IAD=na@~gnx?I88+H+oFC`C1xpM`)ng-Ry|6n*Rw4Jg! zA3JB*;s*<2PI9*^hj!7Y28Az^JKTkq@ebLwSK$Q}hHhMLS?3K$l0)qdlj_6iV5UI< z3n$F}({2joNip3-hlZoFJgE)SM0N3oyuAk2;F5^NoY0vX`9}W4Th2_=z*C8WqO*4Q zgo5j~2=eFAR+eeHh<5 zTkwHO`HB^;dz@J#aDq_54nl40(mAXACU$)uAP=+?dR>2AzfAc%)W>@nmzQwrp?mdu z>Ku7jsXH&@toD7b(e4p#uC@a`%s8pWI1T$tCeSd-)>k%}OOZTW09k?4N_@ zwS;0}{uAmqt}QsrRu=a3SC&S;#jUv?rPLvYq*YK%m%91k&U{HS$`4Dfz<~Q*^V{bg z!nZS>Rs$^8CSZ5KPiM;lTxBbKE0;|`+S>sXHM4l-rctr;_!{sf65mNOD2YnW8GFGm z5CaTxcfo{Q!M4A4wk~Jw(=DAML;`agFe6zR|0d<}UH61OMOpLljN=v*6|0jC=R0}P z2lOos$rZLG(NV5O5#C+3p&v8s2-Y$k95AnvzGVT86;ru`z6s?m>FH=@+)?TA1eNRX z7~w?AaWP>&Ofj4ku7Bhxu$|Rj76)u+7|CkRoWx4PHQS}@?rGvEB<)k|F>9akagRA} z1@kjgGjcl~J}R~8sbi@BiCNP3?7jnYn-eM;sPR+XB}diQ9Piqk1a@#gQn4%NJw9mu z8h^9}P8j!F9TZZ2a2^#kHd^)3M*u2T`IP#g^VF<}B#kRWJl0aY%y@!nD?or*@Vxt1 zm?NPDgC0TJA#-0PoH)}=+#RzU*$-zdiQ#ssmU-x}JU-9{yZZKd8FzQHN|x$<9gf4( zCIR=J*@BE#wG3cfCINb6>G^kShW_?+trK^mG6#5#UlLI(C$U4uGxKAFpLcU6=T~N0 zVq?6`uvb%z7O*0`LGtpM4tlBMrfW+y4f^R%-Bo=ioc*Bu`Krg|(-x8VFR0O;^tr|) zw?^B@Wik@#toDJTb0M)%T@+45ggGXyBkP9hNi2R8a}=0&%43oJ7QIDHz4gZZwFC(# zHJ06hZRdg%vNnb}t>#)=evVwJWzC{j3cJxpKY)is#rC_x#?u7G(XiP}lfcG;eCzQ< z=y$z}Hm3LIAJ0a2qr0X-HLu)q=kS_+so1|Hi<*n&^sij6iWM!6{Z>Bb{Ngpvd=P6m z)lb!{VtVN(GhCnwC#G*=l0_f0*!XzI95h+PIF%#g14aB+h;e00_}L;jV@_#FFx#SAyoGr% zL%jv^;Gsd5?mOE2B`_`jn8D30^{!okr60EwS6y1pHen8jjW3vPE699-0&heUOPXr= z5cn&-%PguY5QZ=WqMF#spvs%?Tf15txZEf|r~YutH8$TcqdLnkRU-At(HM+{=39P= z?f=;7Q^2@z-NzA>la=RWyV7UxVEAwc6EFRED!g#l@dRreaPj(3lejQ9^)|WQh zd@Y03I$=$p-Jvi-oNo+C!*zm`VLE{hbl9x;* z>S|w`a2;}LnSt{-)SR)1hErcpT5!r}Q7!>(&o{SR{vgyJqc5&G}j`29~71B&cnqR_e}hu2P$#itNrVxa#Tj5m{;2G zlw!|Ex&c;Ct-`4-AEjB|(BR;;{A`j`FG6>wG~C9WyXjWA-1r$f&Ge3&A_wC)wekl1%TMl9(gNqw~RrUYlBO&h5izf>O#gsbzPy zHPsX~*X*dA1svqWPltIdb-pKtdgpW*syHLVTP>eZlRx|Ax%DWC$^JVY%1-fz?{;J8OSFnA;M>8m?p^(6znQ_=LYZx{NJ zMu2sRlWzI(MMHWY&)c z8}mi#5*lqEeT|?nK7fe$?S4uO)|S!Hiv+-RZPAos#L@evJ3L`>$?iQI;bx6J38gvB z5lXKko|}!XBJVCs5mdj;P|1DlE*J|M$w%b8DgM&Sno`i|zbex>z%+ej84DQ+SF4-} z6X6{((L;BVx&71+-|KhgpRjb;XN@1JWg=%xQpYX%rB;1AhvLKX0MD@(SHq3LY_V2{ z@oW5L(_*`76lJ{ea{#r>u4goY=bpa#Bti98zrE?1I&Zvv!w&9H9{(u3mWf8B*0E`R zNwR#yi#%TK=fT+c{^CtiFUDej_9JnOLN`?`I$%WbO4IpWw}{Pon><5SVWsjd1>0_r zs6+=3mrMjcOky)QbA7r5^gfYwT02}$$`J`{^IZP-qNi7nIA)4zi z3OJ02eyr(DTCWkNLAUkM0ycfz$v*u|Ft$Am!oZ_6+}FF>t>#|m`-Z&w1tLQGo5^eC zBCm!c&k+3^)7k*%lU@8^nC=!A8cx-{eXdJV=N|9NySEhX;6`44W!iwNMOcqUEuhdj zYA*K!Nhfh%bLjJl(6&qi_Z=~ZGSN7~!U?qX%kEEabE~4FprjV+)mEdw@DjK@cmFi@ zVYzbuc3S=0Que>W0!sdGv4DT_055oYShJs^rIN*~zbsqO3s@dj`EL|Jti|kaiy!!J zhaa&97QuHx%EP+j{`rDlaPYAI|K^mt81Huj8J6aT{Lg+dt&m=m7M2@rTY{~O6~3+a zWJvIDMsbrM_sL~WuZXb=8!HAN7dH6gHmF^W$m(N{+j?i0H!E#nI3B^;hcPcpdqgzP z=%Gz-!tL`c8dMVt6ir-|-&%tX$HS~PCsu@s2pWVs&}p9=XZ6qCI947|lZt(3L%1cL z%C8RJsIwM}{-`JK*%&%ho3))|cvU;Ez32IK#)d>e{fAy|mgjeD23+rLkn#w>3h0vQ z5K)q}u9WcUIWsM*)}<{Bm*H+L$P@#8gLvhq;UC(!5k9#!8?zG{A~F)SSZ}(^f6P>I z<`NkCDLOS$XR&l>cB7!tH=$sKketEU{?Eeri?sN!9sGA;{10Bk|4Tjmf1Ls*;pWzpOnZAMrEMMjpsKT)yE&c)AA_?Q*!9q;s?^+M?>#DxMuNEK@peM?+tpD zoRWg{<304ba3C>94`$iXk~v-&HHX|=N;*%n-K>PGCHtk^UAjwnya+V_&otaCqm;=_ z(xTh8#GU=HqpBv#A6z=5F@NjoNqiY~t7%vXu6x*!#o&kCy_3bcJ<~2j{2gmmx8N~! zoN9F~^RUZufXKT^l@QZ;75?2{XLQI@T2D=y7MtjIkt_URaGLZ)-vnVNLpPml+3&-Rkw9j+Q#47X$T}!6YV%Ong@5g3QJWB)x%L)@5 zK?xEZHh0Np)G&=akgE=^F$GoD_gL;{^r>Cd+YC(IM-(l8zoHGZ6zlQ3al0W8`a)a|h@^=!y!ze)G zlxD}YV!KQ058zE+T|X<}roGxz=+LhFx9@53zSX#^WIXai42b0Xv!{tMEL#E)UFM`V z+qhDZmxqG!M^P(OBh(VqMKp3B3Pd>97V(l64S5!2Gns=TT;^x#xU6ex>&rX%Hq5FI zu0k|+N52ez{$M032B6?}nvfjc)~)+gkUuY7u=pzDm_)o-K5QA8P{`p~#h{)tx*EW< z(T_UU#G8z*7QT_(Ptg$&ZWtnaD5*<95MSgZ1C113VzreWC=_TgELh+uSfa7{&goy) zWl*@SU^cwYl0T}OZMef(qGPLZ$AY38=^cJdq!IBA@iS!4!U@l!K?Zg=KJc99R&)qW zM4$EZI75Lx3=O`@8A^e{wJ0l0c6fLh@$?<9XpuH*M{ib8xGBkNGN0%i;aC zf@dN8qic4Dvt%jpS1lmp5Sr#i?61zH5Tyc`DjxhwapFRRKhi9q#g)vpW3y&>XNrjA zEOpIYs)?Q6z9Dq0Hrad>M}GXa&)T$lxsC7ms6uqS4MX#5JFzwG(%PYeYzL*>yH5@y z)TUq^`6(7>K{{iqE}49~#o3sGfqMzFbnM@QlbQ;&i6*&vQLzxYH1Zt)Ip1p`%Ei){ z_oc(g^P5b=Bz^a)lJ6Z?y}ro>yo(|FVkn|nmv)}8*eUJ9ew4rMl@*?(%|@9TT{N;P z2X0Ok;1S?hD?OQKsg^5jhtXmIt@Nf_&&wYjuHR8tSTbwm)zCPBsRHSkF5QNPI!^{YS%cYN1$a zsUgchHLUX*`vrc{SStI z?-~+@xM2SN9*12U@UjMgV;R7g<6ruoHI5K@rMTe3l@FR`R~1gz+o4}(2*{snL%{#2No@69z_%~j z-|K~9ImMUrgTevW^US3_+7(R{U77OAPB_e{E$Ep7Hs@`UnCIvM_(iyh`hXi zNI2vI$9>WMl{2>NFEHMhHSp!yz{Vl4Xym`+>}+&jnHks+3kYBzXkNJ(VDD=zyz-Wo zb}SI=u3S8DNLmMXs`&N3=wdBkO2g!ZvfW?!wj%{V*o statement-breakpoint +CREATE TABLE "duron"."jobs_active" ( + "id" uuid PRIMARY KEY DEFAULT gen_random_uuid(), + "action_name" text NOT NULL, + "group_key" text NOT NULL, + "description" text, + "status" text DEFAULT 'created' NOT NULL, + "checksum" text NOT NULL, + "input" jsonb DEFAULT '{}' NOT NULL, + "output" jsonb, + "error" jsonb, + "timeout_ms" integer NOT NULL, + "expires_at" timestamp with time zone, + "started_at" timestamp with time zone, + "finished_at" timestamp with time zone, + "client_id" text, + "concurrency_limit" integer NOT NULL, + "concurrency_step_limit" integer NOT NULL, + "created_at" timestamp with time zone DEFAULT now() NOT NULL, + "updated_at" timestamp with time zone DEFAULT now() NOT NULL, + CONSTRAINT "jobs_active_status_check" CHECK ("status" IN ('created','active','completed','failed','cancelled')) +); +--> statement-breakpoint +CREATE TABLE "duron"."jobs_archive" ( + "id" uuid PRIMARY KEY, + "action_name" text NOT NULL, + "group_key" text NOT NULL, + "description" text, + "status" text NOT NULL, + "checksum" text NOT NULL, + "input" jsonb DEFAULT '{}' NOT NULL, + "output" jsonb, + "error" jsonb, + "timeout_ms" integer NOT NULL, + "expires_at" timestamp with time zone, + "started_at" timestamp with time zone, + "finished_at" timestamp with time zone, + "client_id" text, + "concurrency_limit" integer NOT NULL, + "concurrency_step_limit" integer NOT NULL, + "created_at" timestamp with time zone DEFAULT now() NOT NULL, + "updated_at" timestamp with time zone DEFAULT now() NOT NULL, + CONSTRAINT "jobs_archive_status_check" CHECK ("status" IN ('created','active','completed','failed','cancelled')) +); +--> statement-breakpoint +CREATE TABLE "duron"."job_steps_active" ( + "id" uuid PRIMARY KEY DEFAULT gen_random_uuid(), + "job_id" uuid NOT NULL, + "parent_step_id" uuid, + "branch" boolean DEFAULT false NOT NULL, + "name" text NOT NULL, + "status" text DEFAULT 'active' NOT NULL, + "output" jsonb, + "error" jsonb, + "started_at" timestamp with time zone DEFAULT now() NOT NULL, + "finished_at" timestamp with time zone, + "timeout_ms" integer NOT NULL, + "expires_at" timestamp with time zone, + "retries_limit" integer DEFAULT 0 NOT NULL, + "retries_count" integer DEFAULT 0 NOT NULL, + "delayed_ms" integer, + "history_failed_attempts" jsonb DEFAULT '{}' NOT NULL, + "created_at" timestamp with time zone DEFAULT now() NOT NULL, + "updated_at" timestamp with time zone DEFAULT now() NOT NULL, + CONSTRAINT "unique_job_step_active_name_parent" UNIQUE NULLS NOT DISTINCT("job_id","name","parent_step_id"), + CONSTRAINT "job_steps_active_status_check" CHECK ("status" IN ('active','completed','failed','cancelled')) +); +--> statement-breakpoint +CREATE TABLE "duron"."job_steps_archive" ( + "id" uuid PRIMARY KEY, + "job_id" uuid NOT NULL, + "parent_step_id" uuid, + "branch" boolean DEFAULT false NOT NULL, + "name" text NOT NULL, + "status" text DEFAULT 'active' NOT NULL, + "output" jsonb, + "error" jsonb, + "started_at" timestamp with time zone DEFAULT now() NOT NULL, + "finished_at" timestamp with time zone, + "timeout_ms" integer NOT NULL, + "expires_at" timestamp with time zone, + "retries_limit" integer DEFAULT 0 NOT NULL, + "retries_count" integer DEFAULT 0 NOT NULL, + "delayed_ms" integer, + "history_failed_attempts" jsonb DEFAULT '{}' NOT NULL, + "created_at" timestamp with time zone DEFAULT now() NOT NULL, + "updated_at" timestamp with time zone DEFAULT now() NOT NULL, + "job_finished_at" timestamp with time zone, + CONSTRAINT "job_steps_archive_status_check" CHECK ("status" IN ('active','completed','failed','cancelled')) +); +--> statement-breakpoint +CREATE TABLE "duron"."spans_active" ( + "id" bigserial PRIMARY KEY, + "trace_id" text NOT NULL, + "span_id" text NOT NULL, + "parent_span_id" text, + "job_id" uuid, + "step_id" uuid, + "name" text NOT NULL, + "kind" integer DEFAULT 0 NOT NULL, + "start_time_unix_nano" bigint NOT NULL, + "end_time_unix_nano" bigint, + "status_code" integer DEFAULT 0 NOT NULL, + "status_message" text, + "attributes" jsonb DEFAULT '{}' NOT NULL, + "events" jsonb DEFAULT '[]' NOT NULL, + CONSTRAINT "spans_active_kind_check" CHECK ("kind" IN (0, 1, 2, 3, 4)), + CONSTRAINT "spans_active_status_code_check" CHECK ("status_code" IN (0, 1, 2)) +); +--> statement-breakpoint +CREATE TABLE "duron"."spans_archive" ( + "id" bigserial PRIMARY KEY, + "trace_id" text NOT NULL, + "span_id" text NOT NULL, + "parent_span_id" text, + "job_id" uuid, + "step_id" uuid, + "name" text NOT NULL, + "kind" integer DEFAULT 0 NOT NULL, + "start_time_unix_nano" bigint NOT NULL, + "end_time_unix_nano" bigint, + "status_code" integer DEFAULT 0 NOT NULL, + "status_message" text, + "attributes" jsonb DEFAULT '{}' NOT NULL, + "events" jsonb DEFAULT '[]' NOT NULL, + CONSTRAINT "spans_archive_kind_check" CHECK ("kind" IN (0, 1, 2, 3, 4)), + CONSTRAINT "spans_archive_status_code_check" CHECK ("status_code" IN (0, 1, 2)) +); +--> statement-breakpoint +CREATE INDEX "idx_jobs_active_action_name" ON "duron"."jobs_active" ("action_name"); +--> statement-breakpoint +CREATE INDEX "idx_jobs_active_status" ON "duron"."jobs_active" ("status"); +--> statement-breakpoint +CREATE INDEX "idx_jobs_active_group_key" ON "duron"."jobs_active" ("group_key"); +--> statement-breakpoint +CREATE INDEX "idx_jobs_active_description" ON "duron"."jobs_active" ("description"); +--> statement-breakpoint +CREATE INDEX "idx_jobs_active_started_at" ON "duron"."jobs_active" ("started_at"); +--> statement-breakpoint +CREATE INDEX "idx_jobs_active_expires_at" ON "duron"."jobs_active" ("expires_at"); +--> statement-breakpoint +CREATE INDEX "idx_jobs_active_client_id" ON "duron"."jobs_active" ("client_id"); +--> statement-breakpoint +CREATE INDEX "idx_jobs_active_checksum" ON "duron"."jobs_active" ("checksum"); +--> statement-breakpoint +CREATE INDEX "idx_jobs_active_concurrency_limit" ON "duron"."jobs_active" ("concurrency_limit"); +--> statement-breakpoint +CREATE INDEX "idx_jobs_active_concurrency_step_limit" ON "duron"."jobs_active" ("concurrency_step_limit"); +--> statement-breakpoint +CREATE INDEX "idx_jobs_active_action_status" ON "duron"."jobs_active" ("action_name","status"); +--> statement-breakpoint +CREATE INDEX "idx_jobs_active_action_group" ON "duron"."jobs_active" ("action_name","group_key"); +--> statement-breakpoint +CREATE INDEX "idx_jobs_active_input_fts" ON "duron"."jobs_active" USING gin (to_tsvector('english', "input"::text)); +--> statement-breakpoint +CREATE INDEX "idx_jobs_active_output_fts" ON "duron"."jobs_active" USING gin (to_tsvector('english', "output"::text)); +--> statement-breakpoint +CREATE INDEX "idx_jobs_archive_group_key" ON "duron"."jobs_archive" ("group_key"); +--> statement-breakpoint +CREATE INDEX "idx_jobs_archive_action_name" ON "duron"."jobs_archive" ("action_name"); +--> statement-breakpoint +CREATE INDEX "idx_jobs_archive_finished_at" ON "duron"."jobs_archive" ("finished_at"); +--> statement-breakpoint +CREATE INDEX "idx_jobs_archive_action_group" ON "duron"."jobs_archive" ("action_name","group_key"); +--> statement-breakpoint +CREATE INDEX "idx_jobs_archive_input_fts" ON "duron"."jobs_archive" USING gin (to_tsvector('english', "input"::text)); +--> statement-breakpoint +CREATE INDEX "idx_jobs_archive_output_fts" ON "duron"."jobs_archive" USING gin (to_tsvector('english', "output"::text)); +--> statement-breakpoint +CREATE INDEX "idx_job_steps_active_job_id" ON "duron"."job_steps_active" ("job_id"); +--> statement-breakpoint +CREATE INDEX "idx_job_steps_active_status" ON "duron"."job_steps_active" ("status"); +--> statement-breakpoint +CREATE INDEX "idx_job_steps_active_name" ON "duron"."job_steps_active" ("name"); +--> statement-breakpoint +CREATE INDEX "idx_job_steps_active_expires_at" ON "duron"."job_steps_active" ("expires_at"); +--> statement-breakpoint +CREATE INDEX "idx_job_steps_active_parent_step_id" ON "duron"."job_steps_active" ("parent_step_id"); +--> statement-breakpoint +CREATE INDEX "idx_job_steps_active_job_status" ON "duron"."job_steps_active" ("job_id","status"); +--> statement-breakpoint +CREATE INDEX "idx_job_steps_active_job_name" ON "duron"."job_steps_active" ("job_id","name"); +--> statement-breakpoint +CREATE INDEX "idx_job_steps_archive_job_id" ON "duron"."job_steps_archive" ("job_id"); +--> statement-breakpoint +CREATE INDEX "idx_job_steps_archive_job_finished_at" ON "duron"."job_steps_archive" ("job_finished_at"); +--> statement-breakpoint +CREATE INDEX "idx_job_steps_archive_name" ON "duron"."job_steps_archive" ("name"); +--> statement-breakpoint +CREATE INDEX "idx_spans_active_trace_id" ON "duron"."spans_active" ("trace_id"); +--> statement-breakpoint +CREATE INDEX "idx_spans_active_span_id" ON "duron"."spans_active" ("span_id"); +--> statement-breakpoint +CREATE INDEX "idx_spans_active_job_id" ON "duron"."spans_active" ("job_id"); +--> statement-breakpoint +CREATE INDEX "idx_spans_active_step_id" ON "duron"."spans_active" ("step_id"); +--> statement-breakpoint +CREATE INDEX "idx_spans_active_name" ON "duron"."spans_active" ("name"); +--> statement-breakpoint +CREATE INDEX "idx_spans_active_kind" ON "duron"."spans_active" ("kind"); +--> statement-breakpoint +CREATE INDEX "idx_spans_active_status_code" ON "duron"."spans_active" ("status_code"); +--> statement-breakpoint +CREATE INDEX "idx_spans_active_job_step" ON "duron"."spans_active" ("job_id","step_id"); +--> statement-breakpoint +CREATE INDEX "idx_spans_active_trace_parent" ON "duron"."spans_active" ("trace_id","parent_span_id"); +--> statement-breakpoint +CREATE INDEX "idx_spans_active_attributes" ON "duron"."spans_active" USING gin ("attributes"); +--> statement-breakpoint +CREATE INDEX "idx_spans_active_events" ON "duron"."spans_active" USING gin ("events"); +--> statement-breakpoint +CREATE INDEX "idx_spans_archive_trace_id" ON "duron"."spans_archive" ("trace_id"); +--> statement-breakpoint +CREATE INDEX "idx_spans_archive_job_id" ON "duron"."spans_archive" ("job_id"); +--> statement-breakpoint +CREATE INDEX "idx_spans_archive_step_id" ON "duron"."spans_archive" ("step_id"); +--> statement-breakpoint +ALTER TABLE "duron"."job_steps_active" ADD CONSTRAINT "job_steps_active_job_id_jobs_active_id_fkey" FOREIGN KEY ("job_id") REFERENCES "duron"."jobs_active"("id") ON DELETE CASCADE; +--> statement-breakpoint +ALTER TABLE "duron"."spans_active" ADD CONSTRAINT "spans_active_job_id_jobs_active_id_fkey" FOREIGN KEY ("job_id") REFERENCES "duron"."jobs_active"("id") ON DELETE CASCADE; +--> statement-breakpoint +ALTER TABLE "duron"."spans_active" ADD CONSTRAINT "spans_active_step_id_job_steps_active_id_fkey" FOREIGN KEY ("step_id") REFERENCES "duron"."job_steps_active"("id") ON DELETE CASCADE; diff --git a/packages/duron/src/adapters/adapter.ts b/packages/duron/src/adapters/adapter.ts index 9217a48..528e97b 100644 --- a/packages/duron/src/adapters/adapter.ts +++ b/packages/duron/src/adapters/adapter.ts @@ -41,6 +41,7 @@ import type { JobStep, JobStepStatusResult, PruneArchiveOptions, + ArchiveStats, RecoverJobsOptions, RetryJobOptions, TimeTravelJobOptions, @@ -73,6 +74,7 @@ import { JobSchema, JobStatusResultSchema, JobStepSchema, + ArchiveStatsSchema, JobStepStatusResultSchema, JobsArrayResultSchema, NumberResultSchema, diff --git a/packages/duron/src/adapters/postgres/base.ts b/packages/duron/src/adapters/postgres/base.ts index 7e0c905..0c4c094 100644 --- a/packages/duron/src/adapters/postgres/base.ts +++ b/packages/duron/src/adapters/postgres/base.ts @@ -152,7 +152,7 @@ export class PostgresBaseAdapter e description, }: CreateJobOptions) { const [result] = await this.db - .insert(this.tables.jobsTable) + .insert(this.tables.jobsActiveTable) .values({ action_name: queue, group_key: groupKey, @@ -164,7 +164,7 @@ export class PostgresBaseAdapter e concurrency_limit: concurrencyLimit, concurrency_step_limit: concurrencyStepLimit, }) - .returning({ id: this.tables.jobsTable.id }) + .returning({ id: this.tables.jobsActiveTable.id }) if (!result) { return null @@ -180,7 +180,7 @@ export class PostgresBaseAdapter e */ protected async _completeJob({ jobId, output }: CompleteJobOptions) { const result = await this.db - .update(this.tables.jobsTable) + .update(this.tables.jobsActiveTable) .set({ status: JOB_STATUS_COMPLETED, output, @@ -189,13 +189,13 @@ export class PostgresBaseAdapter e }) .where( and( - eq(this.tables.jobsTable.id, jobId), - eq(this.tables.jobsTable.status, JOB_STATUS_ACTIVE), - eq(this.tables.jobsTable.client_id, this.id), - gt(this.tables.jobsTable.expires_at, sql`now()`), + eq(this.tables.jobsActiveTable.id, jobId), + eq(this.tables.jobsActiveTable.status, JOB_STATUS_ACTIVE), + eq(this.tables.jobsActiveTable.client_id, this.id), + gt(this.tables.jobsActiveTable.expires_at, sql`now()`), ), ) - .returning({ id: this.tables.jobsTable.id }) + .returning({ id: this.tables.jobsActiveTable.id }) return result.length > 0 } @@ -207,7 +207,7 @@ export class PostgresBaseAdapter e */ protected async _failJob({ jobId, error }: FailJobOptions) { const result = await this.db - .update(this.tables.jobsTable) + .update(this.tables.jobsActiveTable) .set({ status: JOB_STATUS_FAILED, error, @@ -216,12 +216,12 @@ export class PostgresBaseAdapter e }) .where( and( - eq(this.tables.jobsTable.id, jobId), - eq(this.tables.jobsTable.status, JOB_STATUS_ACTIVE), - eq(this.tables.jobsTable.client_id, this.id), + eq(this.tables.jobsActiveTable.id, jobId), + eq(this.tables.jobsActiveTable.status, JOB_STATUS_ACTIVE), + eq(this.tables.jobsActiveTable.client_id, this.id), ), ) - .returning({ id: this.tables.jobsTable.id }) + .returning({ id: this.tables.jobsActiveTable.id }) return result.length > 0 } @@ -233,7 +233,7 @@ export class PostgresBaseAdapter e */ protected async _cancelJob({ jobId }: CancelJobOptions) { const result = await this.db - .update(this.tables.jobsTable) + .update(this.tables.jobsActiveTable) .set({ status: JOB_STATUS_CANCELLED, finished_at: sql`now()`, @@ -241,11 +241,11 @@ export class PostgresBaseAdapter e }) .where( and( - eq(this.tables.jobsTable.id, jobId), - or(eq(this.tables.jobsTable.status, JOB_STATUS_ACTIVE), eq(this.tables.jobsTable.status, JOB_STATUS_CREATED)), + eq(this.tables.jobsActiveTable.id, jobId), + or(eq(this.tables.jobsActiveTable.status, JOB_STATUS_ACTIVE), eq(this.tables.jobsActiveTable.status, JOB_STATUS_CREATED)), ), ) - .returning({ id: this.tables.jobsTable.id }) + .returning({ id: this.tables.jobsActiveTable.id }) return result.length > 0 } @@ -272,7 +272,7 @@ export class PostgresBaseAdapter e j.created_at, j.concurrency_limit, j.concurrency_step_limit - FROM ${this.tables.jobsTable} j + FROM ${this.tables.jobsActiveTable} j WHERE j.id = ${jobId} AND j.status IN (${JOB_STATUS_COMPLETED}, ${JOB_STATUS_CANCELLED}, ${JOB_STATUS_FAILED}) FOR UPDATE OF j SKIP LOCKED @@ -280,7 +280,7 @@ export class PostgresBaseAdapter e existing_retry AS ( -- Check if a retry already exists (a newer job with same checksum, group_key, and input) SELECT j.id - FROM ${this.tables.jobsTable} j + FROM ${this.tables.jobsActiveTable} j INNER JOIN locked_source ls ON j.action_name = ls.action_name AND j.group_key = ls.group_key @@ -293,7 +293,7 @@ export class PostgresBaseAdapter e inserted_retry AS ( -- Insert the retry only if no existing retry was found -- Get concurrency_limit from the latest job at insertion time to avoid stale values - INSERT INTO ${this.tables.jobsTable} ( + INSERT INTO ${this.tables.jobsActiveTable} ( action_name, group_key, description, @@ -315,7 +315,7 @@ export class PostgresBaseAdapter e COALESCE( ( SELECT j.concurrency_limit - FROM ${this.tables.jobsTable} j + FROM ${this.tables.jobsActiveTable} j WHERE j.action_name = ls.action_name AND j.group_key = ls.group_key AND (j.expires_at IS NULL OR j.expires_at > now()) @@ -368,7 +368,7 @@ export class PostgresBaseAdapter e -- Lock and validate the job locked_job AS ( SELECT j.id - FROM ${this.tables.jobsTable} j + FROM ${this.tables.jobsActiveTable} j WHERE j.id = ${jobId} AND j.status IN (${JOB_STATUS_COMPLETED}, ${JOB_STATUS_FAILED}, ${JOB_STATUS_CANCELLED}) FOR UPDATE OF j @@ -376,7 +376,7 @@ export class PostgresBaseAdapter e -- Validate target step exists and belongs to job target_step AS ( SELECT s.id, s.parent_step_id, s.created_at - FROM ${this.tables.jobStepsTable} s + FROM ${this.tables.jobStepsActiveTable} s WHERE s.id = ${stepId} AND s.job_id = ${jobId} AND EXISTS (SELECT 1 FROM locked_job) @@ -384,19 +384,19 @@ export class PostgresBaseAdapter e -- Find all ancestor steps recursively (from target up to root) ancestors AS ( SELECT s.id, s.parent_step_id, 0 AS depth - FROM ${this.tables.jobStepsTable} s + FROM ${this.tables.jobStepsActiveTable} s WHERE s.id = (SELECT parent_step_id FROM target_step) AND EXISTS (SELECT 1 FROM target_step) UNION ALL SELECT s.id, s.parent_step_id, a.depth + 1 - FROM ${this.tables.jobStepsTable} s + FROM ${this.tables.jobStepsActiveTable} s INNER JOIN ancestors a ON s.id = a.parent_step_id ), -- Steps to keep: completed steps created before target + completed parallel siblings of target and ancestors + their descendants parallel_siblings AS ( -- Completed parallel siblings of target step SELECT s.id - FROM ${this.tables.jobStepsTable} s + FROM ${this.tables.jobStepsActiveTable} s CROSS JOIN target_step ts WHERE s.job_id = ${jobId} AND s.id != ts.id @@ -409,7 +409,7 @@ export class PostgresBaseAdapter e UNION -- Completed parallel siblings of each ancestor SELECT s.id - FROM ${this.tables.jobStepsTable} s + FROM ${this.tables.jobStepsActiveTable} s INNER JOIN ancestors a ON ( (s.parent_step_id IS NULL AND a.parent_step_id IS NULL) OR s.parent_step_id = a.parent_step_id @@ -422,18 +422,18 @@ export class PostgresBaseAdapter e -- Find all descendants of parallel siblings (to keep their children too) parallel_descendants AS ( SELECT s.id - FROM ${this.tables.jobStepsTable} s + FROM ${this.tables.jobStepsActiveTable} s WHERE s.id IN (SELECT id FROM parallel_siblings) UNION ALL SELECT s.id - FROM ${this.tables.jobStepsTable} s + FROM ${this.tables.jobStepsActiveTable} s INNER JOIN parallel_descendants pd ON s.parent_step_id = pd.id WHERE s.job_id = ${jobId} ), steps_to_keep AS ( -- Steps created before target that are completed (non-ancestor, non-target) SELECT s.id - FROM ${this.tables.jobStepsTable} s + FROM ${this.tables.jobStepsActiveTable} s CROSS JOIN target_step ts WHERE s.job_id = ${jobId} AND s.created_at < ts.created_at @@ -448,12 +448,12 @@ export class PostgresBaseAdapter e time_offset AS ( SELECT now() - MIN(s.started_at) AS offset_interval - FROM ${this.tables.jobStepsTable} s + FROM ${this.tables.jobStepsActiveTable} s WHERE s.id IN (SELECT id FROM steps_to_keep) ), -- Shift times of preserved steps to align with current time (only started_at/finished_at, NOT created_at to preserve ordering) shift_preserved_times AS ( - UPDATE ${this.tables.jobStepsTable} + UPDATE ${this.tables.jobStepsActiveTable} SET started_at = started_at + (SELECT offset_interval FROM time_offset), finished_at = CASE @@ -468,7 +468,7 @@ export class PostgresBaseAdapter e ), -- Delete steps that are not in the keep list and are not ancestors/target deleted_steps AS ( - DELETE FROM ${this.tables.jobStepsTable} + DELETE FROM ${this.tables.jobStepsActiveTable} WHERE job_id = ${jobId} AND id NOT IN (SELECT id FROM steps_to_keep) AND id NOT IN (SELECT id FROM ancestors) @@ -477,7 +477,7 @@ export class PostgresBaseAdapter e ), -- Reset ancestor steps to active reset_ancestors AS ( - UPDATE ${this.tables.jobStepsTable} + UPDATE ${this.tables.jobStepsActiveTable} SET status = ${STEP_STATUS_ACTIVE}, output = NULL, @@ -494,7 +494,7 @@ export class PostgresBaseAdapter e ), -- Reset target step to active reset_target AS ( - UPDATE ${this.tables.jobStepsTable} + UPDATE ${this.tables.jobStepsActiveTable} SET status = ${STEP_STATUS_ACTIVE}, output = NULL, @@ -511,7 +511,7 @@ export class PostgresBaseAdapter e ), -- Reset job to created status reset_job AS ( - UPDATE ${this.tables.jobsTable} + UPDATE ${this.tables.jobsActiveTable} SET status = ${JOB_STATUS_CREATED}, output = NULL, @@ -540,13 +540,13 @@ export class PostgresBaseAdapter e */ protected async _deleteJob({ jobId }: DeleteJobOptions): Promise { const result = await this.db - .delete(this.tables.jobsTable) - .where(and(eq(this.tables.jobsTable.id, jobId), ne(this.tables.jobsTable.status, JOB_STATUS_ACTIVE))) - .returning({ id: this.tables.jobsTable.id }) + .delete(this.tables.jobsActiveTable) + .where(and(eq(this.tables.jobsActiveTable.id, jobId), ne(this.tables.jobsActiveTable.status, JOB_STATUS_ACTIVE))) + .returning({ id: this.tables.jobsActiveTable.id }) // Also delete associated steps if (result.length > 0) { - await this.db.delete(this.tables.jobStepsTable).where(eq(this.tables.jobStepsTable.job_id, jobId)) + await this.db.delete(this.tables.jobStepsActiveTable).where(eq(this.tables.jobStepsActiveTable.job_id, jobId)) } return result.length > 0 @@ -559,7 +559,7 @@ export class PostgresBaseAdapter e * @returns Promise resolving to the number of jobs deleted */ protected async _deleteJobs(options?: DeleteJobsOptions): Promise { - const jobsTable = this.tables.jobsTable + const jobsTable = this.tables.jobsActiveTable const filters = options?.filters ?? {} const where = this._buildJobsWhereClause(filters) @@ -585,7 +585,7 @@ export class PostgresBaseAdapter e j.group_key as group_key, j.action_name as action_name, j.concurrency_limit as concurrency_limit - FROM ${this.tables.jobsTable} j + FROM ${this.tables.jobsActiveTable} j WHERE j.group_key IS NOT NULL AND (j.expires_at IS NULL OR j.expires_at > now()) ORDER BY j.group_key, j.action_name, j.created_at DESC, j.id DESC @@ -598,7 +598,7 @@ export class PostgresBaseAdapter e gc.concurrency_limit, COUNT(*) FILTER (WHERE j.status = ${JOB_STATUS_ACTIVE}) as active_count FROM group_concurrency gc - LEFT JOIN ${this.tables.jobsTable} j + LEFT JOIN ${this.tables.jobsActiveTable} j ON j.group_key = gc.group_key AND j.action_name = gc.action_name AND (j.expires_at IS NULL OR j.expires_at > now()) @@ -612,7 +612,7 @@ export class PostgresBaseAdapter e j.action_name, j.group_key as job_group_key, j.created_at - FROM ${this.tables.jobsTable} j + FROM ${this.tables.jobsActiveTable} j INNER JOIN eligible_groups eg ON j.group_key = eg.group_key AND j.action_name = eg.action_name @@ -652,7 +652,7 @@ export class PostgresBaseAdapter e nj.job_group_key, eg.concurrency_limit, (SELECT COUNT(*) - FROM ${this.tables.jobsTable} + FROM ${this.tables.jobsActiveTable} WHERE action_name = nj.action_name AND group_key = nj.job_group_key AND status = ${JOB_STATUS_ACTIVE}) as current_active @@ -661,7 +661,7 @@ export class PostgresBaseAdapter e ON nj.job_group_key = eg.group_key AND nj.action_name = eg.action_name ) - UPDATE ${this.tables.jobsTable} j + UPDATE ${this.tables.jobsActiveTable} j SET status = ${JOB_STATUS_ACTIVE}, started_at = now(), expires_at = now() + (timeout_ms || ' milliseconds')::interval, @@ -707,11 +707,11 @@ export class PostgresBaseAdapter e if (multiProcessMode) { const result = (await this.db .selectDistinct({ - clientId: this.tables.jobsTable.client_id, + clientId: this.tables.jobsActiveTable.client_id, }) - .from(this.tables.jobsTable) + .from(this.tables.jobsActiveTable) .where( - and(eq(this.tables.jobsTable.status, JOB_STATUS_ACTIVE), ne(this.tables.jobsTable.client_id, this.id)), + and(eq(this.tables.jobsActiveTable.status, JOB_STATUS_ACTIVE), ne(this.tables.jobsActiveTable.client_id, this.id)), )) as unknown as { clientId: string }[] if (result.length > 0) { @@ -741,13 +741,13 @@ export class PostgresBaseAdapter e await this.db.execute<{ id: string }>(sql` WITH locked_jobs AS ( SELECT j.id - FROM ${this.tables.jobsTable} j + FROM ${this.tables.jobsActiveTable} j WHERE j.status = ${JOB_STATUS_ACTIVE} AND j.client_id IN ${unresponsiveClientIds} FOR UPDATE OF j SKIP LOCKED ), updated_jobs AS ( - UPDATE ${this.tables.jobsTable} j + UPDATE ${this.tables.jobsActiveTable} j SET status = ${JOB_STATUS_CREATED}, started_at = NULL, expires_at = NULL, @@ -759,7 +759,7 @@ export class PostgresBaseAdapter e RETURNING id, checksum ), deleted_steps AS ( - DELETE FROM ${this.tables.jobStepsTable} s + DELETE FROM ${this.tables.jobStepsActiveTable} s WHERE EXISTS ( SELECT 1 FROM updated_jobs uj WHERE uj.id = s.job_id @@ -799,21 +799,21 @@ export class PostgresBaseAdapter e await this.db.execute(sql` WITH job_check AS ( SELECT j.id - FROM ${this.tables.jobsTable} j + FROM ${this.tables.jobsActiveTable} j WHERE j.id = ${jobId} AND j.status = ${JOB_STATUS_ACTIVE} AND (j.expires_at IS NULL OR j.expires_at > now()) ), step_existed AS ( SELECT EXISTS( - SELECT 1 FROM ${this.tables.jobStepsTable} s + SELECT 1 FROM ${this.tables.jobStepsActiveTable} s WHERE s.job_id = ${jobId} AND s.name = ${name} AND s.parent_step_id IS NOT DISTINCT FROM ${parentStepId} ) AS existed ), upserted_step AS ( - INSERT INTO ${this.tables.jobStepsTable} ( + INSERT INTO ${this.tables.jobStepsActiveTable} ( job_id, parent_step_id, branch, @@ -848,7 +848,7 @@ export class PostgresBaseAdapter e delayed_ms = NULL, started_at = now(), history_failed_attempts = '{}'::jsonb - WHERE ${this.tables.jobStepsTable}.status = ${STEP_STATUS_ACTIVE} + WHERE ${this.tables.jobStepsActiveTable}.status = ${STEP_STATUS_ACTIVE} RETURNING id, status, @@ -875,7 +875,7 @@ export class PostgresBaseAdapter e s.error, s.output, false AS "isNew" - FROM ${this.tables.jobStepsTable} s + FROM ${this.tables.jobStepsActiveTable} s INNER JOIN job_check jc ON s.job_id = jc.id WHERE s.job_id = ${jobId} AND s.name = ${name} @@ -903,24 +903,24 @@ export class PostgresBaseAdapter e */ protected async _completeJobStep({ stepId, output }: CompleteJobStepOptions) { const result = await this.db - .update(this.tables.jobStepsTable) + .update(this.tables.jobStepsActiveTable) .set({ status: STEP_STATUS_COMPLETED, output, finished_at: sql`now()`, updated_at: sql`now()`, }) - .from(this.tables.jobsTable) + .from(this.tables.jobsActiveTable) .where( and( - eq(this.tables.jobStepsTable.job_id, this.tables.jobsTable.id), - eq(this.tables.jobStepsTable.id, stepId), - eq(this.tables.jobStepsTable.status, STEP_STATUS_ACTIVE), - eq(this.tables.jobsTable.status, JOB_STATUS_ACTIVE), - or(isNull(this.tables.jobsTable.expires_at), gt(this.tables.jobsTable.expires_at, sql`now()`)), + eq(this.tables.jobStepsActiveTable.job_id, this.tables.jobsActiveTable.id), + eq(this.tables.jobStepsActiveTable.id, stepId), + eq(this.tables.jobStepsActiveTable.status, STEP_STATUS_ACTIVE), + eq(this.tables.jobsActiveTable.status, JOB_STATUS_ACTIVE), + or(isNull(this.tables.jobsActiveTable.expires_at), gt(this.tables.jobsActiveTable.expires_at, sql`now()`)), ), ) - .returning({ id: this.tables.jobStepsTable.id }) + .returning({ id: this.tables.jobStepsActiveTable.id }) return result.length > 0 } @@ -932,23 +932,23 @@ export class PostgresBaseAdapter e */ protected async _failJobStep({ stepId, error }: FailJobStepOptions) { const result = await this.db - .update(this.tables.jobStepsTable) + .update(this.tables.jobStepsActiveTable) .set({ status: STEP_STATUS_FAILED, error, finished_at: sql`now()`, updated_at: sql`now()`, }) - .from(this.tables.jobsTable) + .from(this.tables.jobsActiveTable) .where( and( - eq(this.tables.jobStepsTable.job_id, this.tables.jobsTable.id), - eq(this.tables.jobStepsTable.id, stepId), - eq(this.tables.jobStepsTable.status, STEP_STATUS_ACTIVE), - eq(this.tables.jobsTable.status, JOB_STATUS_ACTIVE), + eq(this.tables.jobStepsActiveTable.job_id, this.tables.jobsActiveTable.id), + eq(this.tables.jobStepsActiveTable.id, stepId), + eq(this.tables.jobStepsActiveTable.status, STEP_STATUS_ACTIVE), + eq(this.tables.jobsActiveTable.status, JOB_STATUS_ACTIVE), ), ) - .returning({ id: this.tables.jobStepsTable.id }) + .returning({ id: this.tables.jobStepsActiveTable.id }) return result.length > 0 } @@ -959,8 +959,8 @@ export class PostgresBaseAdapter e * @returns Promise resolving to `true` if delayed, `false` otherwise */ protected async _delayJobStep({ stepId, delayMs, error }: DelayJobStepOptions) { - const jobStepsTable = this.tables.jobStepsTable - const jobsTable = this.tables.jobsTable + const jobStepsTable = this.tables.jobStepsActiveTable + const jobsTable = this.tables.jobsActiveTable const result = await this.db .update(jobStepsTable) @@ -999,25 +999,25 @@ export class PostgresBaseAdapter e */ protected async _cancelJobStep({ stepId }: CancelJobStepOptions) { const result = await this.db - .update(this.tables.jobStepsTable) + .update(this.tables.jobStepsActiveTable) .set({ status: STEP_STATUS_CANCELLED, finished_at: sql`now()`, updated_at: sql`now()`, }) - .from(this.tables.jobsTable) + .from(this.tables.jobsActiveTable) .where( and( - eq(this.tables.jobStepsTable.job_id, this.tables.jobsTable.id), - eq(this.tables.jobStepsTable.id, stepId), - eq(this.tables.jobStepsTable.status, STEP_STATUS_ACTIVE), + eq(this.tables.jobStepsActiveTable.job_id, this.tables.jobsActiveTable.id), + eq(this.tables.jobStepsActiveTable.id, stepId), + eq(this.tables.jobStepsActiveTable.status, STEP_STATUS_ACTIVE), or( - eq(this.tables.jobsTable.status, JOB_STATUS_ACTIVE), - eq(this.tables.jobsTable.status, JOB_STATUS_CANCELLED), + eq(this.tables.jobsActiveTable.status, JOB_STATUS_ACTIVE), + eq(this.tables.jobsActiveTable.status, JOB_STATUS_CANCELLED), ), ), ) - .returning({ id: this.tables.jobStepsTable.id }) + .returning({ id: this.tables.jobStepsActiveTable.id }) return result.length > 0 } @@ -1030,7 +1030,7 @@ export class PostgresBaseAdapter e * Internal method to get a job by its ID. Does not include step information. */ protected async _getJobById(jobId: string): Promise { - const jobsTable = this.tables.jobsTable + const jobsTable = this.tables.jobsActiveTable // Calculate duration as a SQL expression (finishedAt - startedAt in milliseconds) const durationMs = sql` @@ -1077,7 +1077,7 @@ export class PostgresBaseAdapter e protected async _getJobSteps(options: GetJobStepsOptions): Promise { const { jobId, search } = options - const jobStepsTable = this.tables.jobStepsTable + const jobStepsTable = this.tables.jobStepsActiveTable const fuzzySearch = search?.trim() @@ -1129,7 +1129,7 @@ export class PostgresBaseAdapter e return undefined } - const jobsTable = this.tables.jobsTable + const jobsTable = this.tables.jobsActiveTable const fuzzySearch = filters.search?.trim() @@ -1208,7 +1208,7 @@ export class PostgresBaseAdapter e * Does not include step information or job output. */ protected async _getJobs(options?: GetJobsOptions): Promise { - const jobsTable = this.tables.jobsTable + const jobsTable = this.tables.jobsActiveTable const page = options?.page ?? 1 const pageSize = options?.pageSize ?? 10 const filters = options?.filters ?? {} @@ -1301,27 +1301,27 @@ export class PostgresBaseAdapter e protected async _getJobStepById(stepId: string): Promise { const [step] = await this.db .select({ - id: this.tables.jobStepsTable.id, - jobId: this.tables.jobStepsTable.job_id, - parentStepId: this.tables.jobStepsTable.parent_step_id, - parallel: this.tables.jobStepsTable.parallel, - name: this.tables.jobStepsTable.name, - output: this.tables.jobStepsTable.output, - status: this.tables.jobStepsTable.status, - error: this.tables.jobStepsTable.error, - startedAt: this.tables.jobStepsTable.started_at, - finishedAt: this.tables.jobStepsTable.finished_at, - timeoutMs: this.tables.jobStepsTable.timeout_ms, - expiresAt: this.tables.jobStepsTable.expires_at, - retriesLimit: this.tables.jobStepsTable.retries_limit, - retriesCount: this.tables.jobStepsTable.retries_count, - delayedMs: this.tables.jobStepsTable.delayed_ms, - historyFailedAttempts: this.tables.jobStepsTable.history_failed_attempts, - createdAt: this.tables.jobStepsTable.created_at, - updatedAt: this.tables.jobStepsTable.updated_at, + id: this.tables.jobStepsActiveTable.id, + jobId: this.tables.jobStepsActiveTable.job_id, + parentStepId: this.tables.jobStepsActiveTable.parent_step_id, + parallel: this.tables.jobStepsActiveTable.parallel, + name: this.tables.jobStepsActiveTable.name, + output: this.tables.jobStepsActiveTable.output, + status: this.tables.jobStepsActiveTable.status, + error: this.tables.jobStepsActiveTable.error, + startedAt: this.tables.jobStepsActiveTable.started_at, + finishedAt: this.tables.jobStepsActiveTable.finished_at, + timeoutMs: this.tables.jobStepsActiveTable.timeout_ms, + expiresAt: this.tables.jobStepsActiveTable.expires_at, + retriesLimit: this.tables.jobStepsActiveTable.retries_limit, + retriesCount: this.tables.jobStepsActiveTable.retries_count, + delayedMs: this.tables.jobStepsActiveTable.delayed_ms, + historyFailedAttempts: this.tables.jobStepsActiveTable.history_failed_attempts, + createdAt: this.tables.jobStepsActiveTable.created_at, + updatedAt: this.tables.jobStepsActiveTable.updated_at, }) - .from(this.tables.jobStepsTable) - .where(eq(this.tables.jobStepsTable.id, stepId)) + .from(this.tables.jobStepsActiveTable) + .where(eq(this.tables.jobStepsActiveTable.id, stepId)) .limit(1) return step ?? null @@ -1333,11 +1333,11 @@ export class PostgresBaseAdapter e protected async _getJobStatus(jobId: string): Promise { const [job] = await this.db .select({ - status: this.tables.jobsTable.status, - updatedAt: this.tables.jobsTable.updated_at, + status: this.tables.jobsActiveTable.status, + updatedAt: this.tables.jobsActiveTable.updated_at, }) - .from(this.tables.jobsTable) - .where(eq(this.tables.jobsTable.id, jobId)) + .from(this.tables.jobsActiveTable) + .where(eq(this.tables.jobsActiveTable.id, jobId)) .limit(1) return job ?? null @@ -1349,11 +1349,11 @@ export class PostgresBaseAdapter e protected async _getJobStepStatus(stepId: string): Promise { const [step] = await this.db .select({ - status: this.tables.jobStepsTable.status, - updatedAt: this.tables.jobStepsTable.updated_at, + status: this.tables.jobStepsActiveTable.status, + updatedAt: this.tables.jobStepsActiveTable.updated_at, }) - .from(this.tables.jobStepsTable) - .where(eq(this.tables.jobStepsTable.id, stepId)) + .from(this.tables.jobStepsActiveTable) + .where(eq(this.tables.jobStepsActiveTable.id, stepId)) .limit(1) return step ?? null @@ -1366,23 +1366,23 @@ export class PostgresBaseAdapter e const actionStats = this.db.$with('action_stats').as( this.db .select({ - name: this.tables.jobsTable.action_name, - last_job_created: sql`MAX(${this.tables.jobsTable.created_at})`.as('last_job_created'), - active: sql`COUNT(*) FILTER (WHERE ${this.tables.jobsTable.status} = ${JOB_STATUS_ACTIVE})`.as( + name: this.tables.jobsActiveTable.action_name, + last_job_created: sql`MAX(${this.tables.jobsActiveTable.created_at})`.as('last_job_created'), + active: sql`COUNT(*) FILTER (WHERE ${this.tables.jobsActiveTable.status} = ${JOB_STATUS_ACTIVE})`.as( 'active', ), - completed: sql`COUNT(*) FILTER (WHERE ${this.tables.jobsTable.status} = ${JOB_STATUS_COMPLETED})`.as( + completed: sql`COUNT(*) FILTER (WHERE ${this.tables.jobsActiveTable.status} = ${JOB_STATUS_COMPLETED})`.as( 'completed', ), - failed: sql`COUNT(*) FILTER (WHERE ${this.tables.jobsTable.status} = ${JOB_STATUS_FAILED})`.as( + failed: sql`COUNT(*) FILTER (WHERE ${this.tables.jobsActiveTable.status} = ${JOB_STATUS_FAILED})`.as( 'failed', ), - cancelled: sql`COUNT(*) FILTER (WHERE ${this.tables.jobsTable.status} = ${JOB_STATUS_CANCELLED})`.as( + cancelled: sql`COUNT(*) FILTER (WHERE ${this.tables.jobsActiveTable.status} = ${JOB_STATUS_CANCELLED})`.as( 'cancelled', ), }) - .from(this.tables.jobsTable) - .groupBy(this.tables.jobsTable.action_name), + .from(this.tables.jobsActiveTable) + .groupBy(this.tables.jobsActiveTable.action_name), ) const actions = await this.db @@ -1435,9 +1435,9 @@ export class PostgresBaseAdapter e })) const result = await this.db - .insert(this.tables.spansTable) + .insert(this.tables.spansActiveTable) .values(values) - .returning({ id: this.tables.spansTable.id }) + .returning({ id: this.tables.spansActiveTable.id }) return result.length } @@ -1447,7 +1447,7 @@ export class PostgresBaseAdapter e * For step queries, uses a recursive CTE to find all descendant spans. */ protected async _getSpans(options: GetSpansOptions): Promise { - const spansTable = this.tables.spansTable + const spansTable = this.tables.spansActiveTable const filters = options.filters ?? {} // Build sort @@ -1608,9 +1608,9 @@ export class PostgresBaseAdapter e */ protected async _deleteSpans(options: DeleteSpansOptions): Promise { const result = await this.db - .delete(this.tables.spansTable) - .where(eq(this.tables.spansTable.job_id, options.jobId)) - .returning({ id: this.tables.spansTable.id }) + .delete(this.tables.spansActiveTable) + .where(eq(this.tables.spansActiveTable.job_id, options.jobId)) + .returning({ id: this.tables.spansActiveTable.id }) return result.length } @@ -1625,7 +1625,7 @@ export class PostgresBaseAdapter e * a recursive CTE to traverse the span hierarchy. */ protected _buildSpansWhereClause(jobId?: string, _stepId?: string, filters?: GetSpansOptions['filters']) { - const spansTable = this.tables.spansTable + const spansTable = this.tables.spansActiveTable // Build condition for finding spans by trace_id (includes external spans) let traceCondition: ReturnType | undefined @@ -1798,4 +1798,27 @@ export class PostgresBaseAdapter e protected _map(result: any) { return result } + + // ============================================================================ + // Archive Methods (Stub implementations - to be filled in) + // ============================================================================ + + protected async _pruneArchive(_options: any): Promise { + return 0 + } + + protected async _truncateArchive(): Promise { + // TODO: Implement + } + + protected async _getArchiveStats(): Promise { + return { + jobsCount: 0, + stepsCount: 0, + spansCount: 0, + oldestJobDate: null, + totalSizeBytes: null, + lastPrunedAt: null, + } + } } diff --git a/packages/duron/src/adapters/postgres/base.ts.backup b/packages/duron/src/adapters/postgres/base.ts.backup new file mode 100644 index 0000000..7e0c905 --- /dev/null +++ b/packages/duron/src/adapters/postgres/base.ts.backup @@ -0,0 +1,1801 @@ +import { and, asc, between, desc, eq, gt, gte, ilike, inArray, isNull, ne, or, sql } from 'drizzle-orm' +import type { PgAsyncDatabase, PgColumn } from 'drizzle-orm/pg-core' + +import { + JOB_STATUS_ACTIVE, + JOB_STATUS_CANCELLED, + JOB_STATUS_COMPLETED, + JOB_STATUS_CREATED, + JOB_STATUS_FAILED, + STEP_STATUS_ACTIVE, + STEP_STATUS_CANCELLED, + STEP_STATUS_COMPLETED, + STEP_STATUS_FAILED, +} from '../../constants.js' +import { + Adapter, + type CancelJobOptions, + type CancelJobStepOptions, + type CompleteJobOptions, + type CompleteJobStepOptions, + type CreateJobOptions, + type CreateOrRecoverJobStepOptions, + type CreateOrRecoverJobStepResult, + type DelayJobStepOptions, + type DeleteJobOptions, + type DeleteJobsOptions, + type DeleteSpansOptions, + type FailJobOptions, + type FailJobStepOptions, + type FetchOptions, + type GetActionsResult, + type GetJobStepsOptions, + type GetJobStepsResult, + type GetJobsOptions, + type GetJobsResult, + type GetSpansOptions, + type GetSpansResult, + type InsertSpanOptions, + type Job, + type JobSort, + type JobStatusResult, + type JobStep, + type JobStepStatusResult, + type RecoverJobsOptions, + type RetryJobOptions, + type SpanSort, + type TimeTravelJobOptions, +} from '../adapter.js' +import createSchema from './schema.js' + +type Schema = ReturnType + +// Re-export types for backward compatibility +export type { Job, JobStep } from '../adapter.js' + +type DrizzleDatabase = PgAsyncDatabase + +export interface AdapterOptions { + connection: Connection + schema?: string + migrateOnStart?: boolean + migrationsFolder?: string +} + +export class PostgresBaseAdapter extends Adapter { + protected connection: Connection + protected db!: Database + protected tables: Schema + protected schema: string = 'duron' + protected migrateOnStart: boolean = true + + // ============================================================================ + // Constructor + // ============================================================================ + + /** + * Create a new PostgresAdapter instance. + * + * @param options - Configuration options for the PostgreSQL adapter + */ + constructor(options: AdapterOptions) { + super() + + this.connection = options.connection + this.schema = options.schema ?? 'duron' + this.migrateOnStart = options.migrateOnStart ?? true + + this.tables = createSchema(this.schema) + + this._initDb() + } + + /** + * Initialize the database connection and Drizzle instance. + */ + protected _initDb() { + throw new Error('Not implemented') + } + + // ============================================================================ + // Lifecycle Methods + // ============================================================================ + + /** + * Start the adapter. + * Runs migrations if enabled and sets up database listeners. + * + * @returns Promise resolving to `true` if started successfully, `false` otherwise + */ + protected async _start() { + await this._listen(`ping-${this.id}`, async (payload: string) => { + const fromClientId = JSON.parse(payload).fromClientId + await this._notify(`pong-${fromClientId}`, { toClientId: this.id }) + }) + + await this._listen(`job-status-changed`, (payload: string) => { + if (this.listenerCount('job-status-changed') > 0) { + const { jobId, status, clientId } = JSON.parse(payload) + this.emit('job-status-changed', { jobId, status, clientId }) + } + }) + + await this._listen(`job-available`, (payload: string) => { + if (this.listenerCount('job-available') > 0) { + const { jobId } = JSON.parse(payload) + this.emit('job-available', { jobId }) + } + }) + } + + protected async _stop() { + // do nothing + } + + // ============================================================================ + // Job Methods + // ============================================================================ + + /** + * Internal method to create a new job in the database. + * + * @returns Promise resolving to the job ID, or `null` if creation failed + */ + protected async _createJob({ + queue, + groupKey, + input, + timeoutMs, + checksum, + concurrencyLimit, + concurrencyStepLimit, + description, + }: CreateJobOptions) { + const [result] = await this.db + .insert(this.tables.jobsTable) + .values({ + action_name: queue, + group_key: groupKey, + description: description ?? null, + checksum, + input, + status: JOB_STATUS_CREATED, + timeout_ms: timeoutMs, + concurrency_limit: concurrencyLimit, + concurrency_step_limit: concurrencyStepLimit, + }) + .returning({ id: this.tables.jobsTable.id }) + + if (!result) { + return null + } + + return result.id + } + + /** + * Internal method to mark a job as completed. + * + * @returns Promise resolving to `true` if completed, `false` otherwise + */ + protected async _completeJob({ jobId, output }: CompleteJobOptions) { + const result = await this.db + .update(this.tables.jobsTable) + .set({ + status: JOB_STATUS_COMPLETED, + output, + finished_at: sql`now()`, + updated_at: sql`now()`, + }) + .where( + and( + eq(this.tables.jobsTable.id, jobId), + eq(this.tables.jobsTable.status, JOB_STATUS_ACTIVE), + eq(this.tables.jobsTable.client_id, this.id), + gt(this.tables.jobsTable.expires_at, sql`now()`), + ), + ) + .returning({ id: this.tables.jobsTable.id }) + + return result.length > 0 + } + + /** + * Internal method to mark a job as failed. + * + * @returns Promise resolving to `true` if failed, `false` otherwise + */ + protected async _failJob({ jobId, error }: FailJobOptions) { + const result = await this.db + .update(this.tables.jobsTable) + .set({ + status: JOB_STATUS_FAILED, + error, + finished_at: sql`now()`, + updated_at: sql`now()`, + }) + .where( + and( + eq(this.tables.jobsTable.id, jobId), + eq(this.tables.jobsTable.status, JOB_STATUS_ACTIVE), + eq(this.tables.jobsTable.client_id, this.id), + ), + ) + .returning({ id: this.tables.jobsTable.id }) + + return result.length > 0 + } + + /** + * Internal method to cancel a job. + * + * @returns Promise resolving to `true` if cancelled, `false` otherwise + */ + protected async _cancelJob({ jobId }: CancelJobOptions) { + const result = await this.db + .update(this.tables.jobsTable) + .set({ + status: JOB_STATUS_CANCELLED, + finished_at: sql`now()`, + updated_at: sql`now()`, + }) + .where( + and( + eq(this.tables.jobsTable.id, jobId), + or(eq(this.tables.jobsTable.status, JOB_STATUS_ACTIVE), eq(this.tables.jobsTable.status, JOB_STATUS_CREATED)), + ), + ) + .returning({ id: this.tables.jobsTable.id }) + + return result.length > 0 + } + + /** + * Internal method to retry a completed, cancelled, or failed job by creating a copy of it with status 'created' and cleared output/error. + * Uses SELECT FOR UPDATE to prevent concurrent retries from creating duplicate jobs. + * + * @returns Promise resolving to the job ID, or `null` if creation failed + */ + protected async _retryJob({ jobId }: RetryJobOptions): Promise { + // Use a single atomic query with FOR UPDATE lock to prevent race conditions + const result = this._map( + await this.db.execute<{ id: string }>(sql` + WITH locked_source AS ( + -- Lock the source job row to prevent concurrent retries + SELECT + j.action_name, + j.group_key, + j.description, + j.checksum, + j.input, + j.timeout_ms, + j.created_at, + j.concurrency_limit, + j.concurrency_step_limit + FROM ${this.tables.jobsTable} j + WHERE j.id = ${jobId} + AND j.status IN (${JOB_STATUS_COMPLETED}, ${JOB_STATUS_CANCELLED}, ${JOB_STATUS_FAILED}) + FOR UPDATE OF j SKIP LOCKED + ), + existing_retry AS ( + -- Check if a retry already exists (a newer job with same checksum, group_key, and input) + SELECT j.id + FROM ${this.tables.jobsTable} j + INNER JOIN locked_source ls + ON j.action_name = ls.action_name + AND j.group_key = ls.group_key + AND j.checksum = ls.checksum + AND j.input = ls.input + AND j.created_at > ls.created_at + WHERE j.status IN (${JOB_STATUS_CREATED}, ${JOB_STATUS_ACTIVE}) + LIMIT 1 + ), + inserted_retry AS ( + -- Insert the retry only if no existing retry was found + -- Get concurrency_limit from the latest job at insertion time to avoid stale values + INSERT INTO ${this.tables.jobsTable} ( + action_name, + group_key, + description, + checksum, + input, + status, + timeout_ms, + concurrency_limit, + concurrency_step_limit + ) + SELECT + ls.action_name, + ls.group_key, + ls.description, + ls.checksum, + ls.input, + ${JOB_STATUS_CREATED}, + ls.timeout_ms, + COALESCE( + ( + SELECT j.concurrency_limit + FROM ${this.tables.jobsTable} j + WHERE j.action_name = ls.action_name + AND j.group_key = ls.group_key + AND (j.expires_at IS NULL OR j.expires_at > now()) + ORDER BY j.created_at DESC, j.id DESC + LIMIT 1 + ), + ls.concurrency_limit + ), + ls.concurrency_step_limit + FROM locked_source ls + WHERE NOT EXISTS (SELECT 1 FROM existing_retry) + RETURNING id + ) + -- Return only the newly inserted retry ID (not existing retries) + SELECT id FROM inserted_retry + LIMIT 1 + `), + ) + + if (result.length === 0) { + return null + } + + return result[0]!.id + } + + /** + * Internal method to time travel a job to restart from a specific step. + * The job must be in completed, failed, or cancelled status. + * Resets the job and ancestor steps to active status, deletes subsequent steps, + * and preserves completed parallel siblings. + * + * Algorithm: + * 1. Validate job is in terminal state (completed/failed/cancelled) + * 2. Find the target step and all its ancestors (using parent_step_id) + * 3. Determine which steps to keep: + * - Steps completed BEFORE the target step (by created_at) + * - Branch siblings that are completed (independent) + * 4. Delete steps that should not be kept + * 5. Reset ancestor steps to active status (they need to re-run) + * 6. Reset the target step to active status + * 7. Reset job to created status + * + * @returns Promise resolving to `true` if time travel succeeded, `false` otherwise + */ + protected async _timeTravelJob({ jobId, stepId }: TimeTravelJobOptions): Promise { + const result = this._map( + await this.db.execute<{ success: boolean }>(sql` + WITH RECURSIVE + -- Lock and validate the job + locked_job AS ( + SELECT j.id + FROM ${this.tables.jobsTable} j + WHERE j.id = ${jobId} + AND j.status IN (${JOB_STATUS_COMPLETED}, ${JOB_STATUS_FAILED}, ${JOB_STATUS_CANCELLED}) + FOR UPDATE OF j + ), + -- Validate target step exists and belongs to job + target_step AS ( + SELECT s.id, s.parent_step_id, s.created_at + FROM ${this.tables.jobStepsTable} s + WHERE s.id = ${stepId} + AND s.job_id = ${jobId} + AND EXISTS (SELECT 1 FROM locked_job) + ), + -- Find all ancestor steps recursively (from target up to root) + ancestors AS ( + SELECT s.id, s.parent_step_id, 0 AS depth + FROM ${this.tables.jobStepsTable} s + WHERE s.id = (SELECT parent_step_id FROM target_step) + AND EXISTS (SELECT 1 FROM target_step) + UNION ALL + SELECT s.id, s.parent_step_id, a.depth + 1 + FROM ${this.tables.jobStepsTable} s + INNER JOIN ancestors a ON s.id = a.parent_step_id + ), + -- Steps to keep: completed steps created before target + completed parallel siblings of target and ancestors + their descendants + parallel_siblings AS ( + -- Completed parallel siblings of target step + SELECT s.id + FROM ${this.tables.jobStepsTable} s + CROSS JOIN target_step ts + WHERE s.job_id = ${jobId} + AND s.id != ts.id + AND s.branch = true + AND s.status = ${STEP_STATUS_COMPLETED} + AND ( + (s.parent_step_id IS NULL AND ts.parent_step_id IS NULL) + OR s.parent_step_id = ts.parent_step_id + ) + UNION + -- Completed parallel siblings of each ancestor + SELECT s.id + FROM ${this.tables.jobStepsTable} s + INNER JOIN ancestors a ON ( + (s.parent_step_id IS NULL AND a.parent_step_id IS NULL) + OR s.parent_step_id = a.parent_step_id + ) + WHERE s.job_id = ${jobId} + AND s.id NOT IN (SELECT id FROM ancestors) + AND s.branch = true + AND s.status = ${STEP_STATUS_COMPLETED} + ), + -- Find all descendants of parallel siblings (to keep their children too) + parallel_descendants AS ( + SELECT s.id + FROM ${this.tables.jobStepsTable} s + WHERE s.id IN (SELECT id FROM parallel_siblings) + UNION ALL + SELECT s.id + FROM ${this.tables.jobStepsTable} s + INNER JOIN parallel_descendants pd ON s.parent_step_id = pd.id + WHERE s.job_id = ${jobId} + ), + steps_to_keep AS ( + -- Steps created before target that are completed (non-ancestor, non-target) + SELECT s.id + FROM ${this.tables.jobStepsTable} s + CROSS JOIN target_step ts + WHERE s.job_id = ${jobId} + AND s.created_at < ts.created_at + AND s.status = ${STEP_STATUS_COMPLETED} + AND s.id NOT IN (SELECT id FROM ancestors) + AND s.id != ts.id + UNION + -- All parallel siblings and their descendants + SELECT id FROM parallel_descendants + ), + -- Calculate time offset: shift preserved steps to start from "now" + time_offset AS ( + SELECT + now() - MIN(s.started_at) AS offset_interval + FROM ${this.tables.jobStepsTable} s + WHERE s.id IN (SELECT id FROM steps_to_keep) + ), + -- Shift times of preserved steps to align with current time (only started_at/finished_at, NOT created_at to preserve ordering) + shift_preserved_times AS ( + UPDATE ${this.tables.jobStepsTable} + SET + started_at = started_at + (SELECT offset_interval FROM time_offset), + finished_at = CASE + WHEN finished_at IS NOT NULL + THEN finished_at + (SELECT offset_interval FROM time_offset) + ELSE NULL + END, + updated_at = now() + WHERE id IN (SELECT id FROM steps_to_keep) + AND (SELECT offset_interval FROM time_offset) IS NOT NULL + RETURNING id + ), + -- Delete steps that are not in the keep list and are not ancestors/target + deleted_steps AS ( + DELETE FROM ${this.tables.jobStepsTable} + WHERE job_id = ${jobId} + AND id NOT IN (SELECT id FROM steps_to_keep) + AND id NOT IN (SELECT id FROM ancestors) + AND id != (SELECT id FROM target_step) + RETURNING id + ), + -- Reset ancestor steps to active + reset_ancestors AS ( + UPDATE ${this.tables.jobStepsTable} + SET + status = ${STEP_STATUS_ACTIVE}, + output = NULL, + error = NULL, + finished_at = NULL, + started_at = now(), + expires_at = now() + (timeout_ms || ' milliseconds')::interval, + retries_count = 0, + delayed_ms = NULL, + history_failed_attempts = '{}'::jsonb, + updated_at = now() + WHERE id IN (SELECT id FROM ancestors) + RETURNING id + ), + -- Reset target step to active + reset_target AS ( + UPDATE ${this.tables.jobStepsTable} + SET + status = ${STEP_STATUS_ACTIVE}, + output = NULL, + error = NULL, + finished_at = NULL, + started_at = now(), + expires_at = now() + (timeout_ms || ' milliseconds')::interval, + retries_count = 0, + delayed_ms = NULL, + history_failed_attempts = '{}'::jsonb, + updated_at = now() + WHERE id = (SELECT id FROM target_step) + RETURNING id + ), + -- Reset job to created status + reset_job AS ( + UPDATE ${this.tables.jobsTable} + SET + status = ${JOB_STATUS_CREATED}, + output = NULL, + error = NULL, + started_at = NULL, + finished_at = NULL, + client_id = NULL, + expires_at = NULL, + updated_at = now() + WHERE id = ${jobId} + AND EXISTS (SELECT 1 FROM target_step) + RETURNING id + ) + SELECT EXISTS(SELECT 1 FROM reset_job) AS success + `), + ) + + return result.length > 0 && result[0]!.success === true + } + + /** + * Internal method to delete a job by its ID. + * Active jobs cannot be deleted. + * + * @returns Promise resolving to `true` if deleted, `false` otherwise + */ + protected async _deleteJob({ jobId }: DeleteJobOptions): Promise { + const result = await this.db + .delete(this.tables.jobsTable) + .where(and(eq(this.tables.jobsTable.id, jobId), ne(this.tables.jobsTable.status, JOB_STATUS_ACTIVE))) + .returning({ id: this.tables.jobsTable.id }) + + // Also delete associated steps + if (result.length > 0) { + await this.db.delete(this.tables.jobStepsTable).where(eq(this.tables.jobStepsTable.job_id, jobId)) + } + + return result.length > 0 + } + + /** + * Internal method to delete multiple jobs using the same filters as getJobs. + * Active jobs cannot be deleted and will be excluded from deletion. + * + * @returns Promise resolving to the number of jobs deleted + */ + protected async _deleteJobs(options?: DeleteJobsOptions): Promise { + const jobsTable = this.tables.jobsTable + const filters = options?.filters ?? {} + + const where = this._buildJobsWhereClause(filters) + + const result = await this.db.delete(jobsTable).where(where).returning({ id: jobsTable.id }) + + return result.length + } + + /** + * Internal method to fetch jobs from the database respecting concurrency limits per group. + * Uses the concurrency limit from the latest job created for each groupKey. + * Uses advisory locks to ensure thread-safe job fetching. + * + * @returns Promise resolving to an array of fetched jobs + */ + protected async _fetch({ batch }: FetchOptions) { + const result = this._map( + await this.db.execute(sql` + WITH group_concurrency AS ( + -- Get the concurrency limit from the latest job for each group + SELECT DISTINCT ON (j.group_key, j.action_name) + j.group_key as group_key, + j.action_name as action_name, + j.concurrency_limit as concurrency_limit + FROM ${this.tables.jobsTable} j + WHERE j.group_key IS NOT NULL + AND (j.expires_at IS NULL OR j.expires_at > now()) + ORDER BY j.group_key, j.action_name, j.created_at DESC, j.id DESC + ), + eligible_groups AS ( + -- Find all groups with their active counts that are below their concurrency limit + SELECT + gc.group_key, + gc.action_name, + gc.concurrency_limit, + COUNT(*) FILTER (WHERE j.status = ${JOB_STATUS_ACTIVE}) as active_count + FROM group_concurrency gc + LEFT JOIN ${this.tables.jobsTable} j + ON j.group_key = gc.group_key + AND j.action_name = gc.action_name + AND (j.expires_at IS NULL OR j.expires_at > now()) + GROUP BY gc.group_key, gc.action_name, gc.concurrency_limit + HAVING COUNT(*) FILTER (WHERE j.status = ${JOB_STATUS_ACTIVE}) < gc.concurrency_limit + ), + candidate_jobs AS ( + -- Lock candidate jobs first (before applying window functions) + SELECT + j.id, + j.action_name, + j.group_key as job_group_key, + j.created_at + FROM ${this.tables.jobsTable} j + INNER JOIN eligible_groups eg + ON j.group_key = eg.group_key + AND j.action_name = eg.action_name + WHERE j.status = ${JOB_STATUS_CREATED} + FOR UPDATE OF j SKIP LOCKED + ), + ranked_jobs AS ( + -- Rank jobs within each group after locking + SELECT + cj.id, + cj.action_name, + cj.job_group_key, + cj.created_at, + ROW_NUMBER() OVER ( + PARTITION BY cj.job_group_key, cj.action_name + ORDER BY cj.created_at ASC, cj.id ASC + ) as job_rank + FROM candidate_jobs cj + ), + next_job AS ( + -- Select only jobs that fit within the concurrency limit per group + -- Ordered globally by created_at to respect job creation order + SELECT rj.id, rj.action_name, rj.job_group_key + FROM ranked_jobs rj + INNER JOIN eligible_groups eg + ON rj.job_group_key = eg.group_key + AND rj.action_name = eg.action_name + WHERE rj.job_rank <= (eg.concurrency_limit - eg.active_count) + ORDER BY rj.created_at ASC, rj.id ASC + LIMIT ${batch} + ), + verify_concurrency AS ( + -- Double-check concurrency limit after acquiring lock + SELECT + nj.id, + nj.action_name, + nj.job_group_key, + eg.concurrency_limit, + (SELECT COUNT(*) + FROM ${this.tables.jobsTable} + WHERE action_name = nj.action_name + AND group_key = nj.job_group_key + AND status = ${JOB_STATUS_ACTIVE}) as current_active + FROM next_job nj + INNER JOIN eligible_groups eg + ON nj.job_group_key = eg.group_key + AND nj.action_name = eg.action_name + ) + UPDATE ${this.tables.jobsTable} j + SET status = ${JOB_STATUS_ACTIVE}, + started_at = now(), + expires_at = now() + (timeout_ms || ' milliseconds')::interval, + client_id = ${this.id}, + updated_at = now() + FROM verify_concurrency vc + WHERE j.id = vc.id + AND vc.current_active < vc.concurrency_limit -- Final concurrency check using job's concurrency limit + RETURNING + j.id, + j.action_name as "actionName", + j.group_key as "groupKey", + j.description, + j.input, + j.output, + j.error, + j.status, + j.timeout_ms as "timeoutMs", + j.expires_at as "expiresAt", + j.started_at as "startedAt", + j.finished_at as "finishedAt", + j.created_at as "createdAt", + j.updated_at as "updatedAt", + j.concurrency_limit as "concurrencyLimit", + j.concurrency_step_limit as "concurrencyStepLimit" + `), + ) + + return result + } + + /** + * Internal method to recover stuck jobs (jobs that were active but the process that owned them is no longer running). + * In multi-process mode, pings other processes to check if they're alive before recovering their jobs. + * + * @returns Promise resolving to the number of jobs recovered + */ + protected async _recoverJobs(options: RecoverJobsOptions): Promise { + const { checksums, multiProcessMode = false, processTimeout = 5_000 } = options + + const unresponsiveClientIds: string[] = [this.id] + + if (multiProcessMode) { + const result = (await this.db + .selectDistinct({ + clientId: this.tables.jobsTable.client_id, + }) + .from(this.tables.jobsTable) + .where( + and(eq(this.tables.jobsTable.status, JOB_STATUS_ACTIVE), ne(this.tables.jobsTable.client_id, this.id)), + )) as unknown as { clientId: string }[] + + if (result.length > 0) { + const pongCount = new Set() + const { unlisten } = await this._listen(`pong-${this.id}`, (payload: string) => { + const toClientId = JSON.parse(payload).toClientId + pongCount.add(toClientId) + if (pongCount.size >= result.length) { + unlisten() + } + }) + + await Promise.all(result.map((row) => this._notify(`ping-${row.clientId}`, { fromClientId: this.id }))) + + let waitForSeconds = processTimeout / 1_000 + while (pongCount.size < result.length && waitForSeconds > 0) { + await new Promise((resolve) => setTimeout(resolve, 1000).unref?.()) + waitForSeconds-- + } + + unresponsiveClientIds.push(...result.filter((row) => !pongCount.has(row.clientId)).map((row) => row.clientId)) + } + } + + if (unresponsiveClientIds.length > 0) { + const result = this._map( + await this.db.execute<{ id: string }>(sql` + WITH locked_jobs AS ( + SELECT j.id + FROM ${this.tables.jobsTable} j + WHERE j.status = ${JOB_STATUS_ACTIVE} + AND j.client_id IN ${unresponsiveClientIds} + FOR UPDATE OF j SKIP LOCKED + ), + updated_jobs AS ( + UPDATE ${this.tables.jobsTable} j + SET status = ${JOB_STATUS_CREATED}, + started_at = NULL, + expires_at = NULL, + finished_at = NULL, + output = NULL, + error = NULL, + updated_at = now() + WHERE EXISTS (SELECT 1 FROM locked_jobs lj WHERE lj.id = j.id) + RETURNING id, checksum + ), + deleted_steps AS ( + DELETE FROM ${this.tables.jobStepsTable} s + WHERE EXISTS ( + SELECT 1 FROM updated_jobs uj + WHERE uj.id = s.job_id + AND uj.checksum NOT IN ${checksums} + ) + ) + SELECT id FROM updated_jobs + `), + ) + + return result.length + } + + return 0 + } + + // ============================================================================ + // Step Methods + // ============================================================================ + + /** + * Internal method to create or recover a job step by creating or resetting a step record in the database. + * + * @returns Promise resolving to the step, or `null` if creation failed + */ + protected async _createOrRecoverJobStep({ + jobId, + name, + timeoutMs, + retriesLimit, + parentStepId, + parallel = false, + }: CreateOrRecoverJobStepOptions): Promise { + type StepResult = CreateOrRecoverJobStepResult + + const [result] = this._map( + await this.db.execute(sql` + WITH job_check AS ( + SELECT j.id + FROM ${this.tables.jobsTable} j + WHERE j.id = ${jobId} + AND j.status = ${JOB_STATUS_ACTIVE} + AND (j.expires_at IS NULL OR j.expires_at > now()) + ), + step_existed AS ( + SELECT EXISTS( + SELECT 1 FROM ${this.tables.jobStepsTable} s + WHERE s.job_id = ${jobId} + AND s.name = ${name} + AND s.parent_step_id IS NOT DISTINCT FROM ${parentStepId} + ) AS existed + ), + upserted_step AS ( + INSERT INTO ${this.tables.jobStepsTable} ( + job_id, + parent_step_id, + branch, + name, + timeout_ms, + retries_limit, + status, + started_at, + expires_at, + retries_count, + delayed_ms + ) + SELECT + ${jobId}, + ${parentStepId}, + ${parallel}, + ${name}, + ${timeoutMs}, + ${retriesLimit}, + ${STEP_STATUS_ACTIVE}, + now(), + now() + interval '${sql.raw(timeoutMs.toString())} milliseconds', + 0, + NULL + WHERE EXISTS (SELECT 1 FROM job_check) + ON CONFLICT (job_id, name, parent_step_id) DO UPDATE + SET + timeout_ms = ${timeoutMs}, + expires_at = now() + interval '${sql.raw(timeoutMs.toString())} milliseconds', + retries_count = 0, + retries_limit = ${retriesLimit}, + delayed_ms = NULL, + started_at = now(), + history_failed_attempts = '{}'::jsonb + WHERE ${this.tables.jobStepsTable}.status = ${STEP_STATUS_ACTIVE} + RETURNING + id, + status, + retries_limit AS "retriesLimit", + retries_count AS "retriesCount", + timeout_ms AS "timeoutMs", + error, + output + ), + final_upserted AS ( + SELECT + us.*, + CASE WHEN se.existed THEN false ELSE true END AS "isNew" + FROM upserted_step us + CROSS JOIN step_existed se + ), + existing_step AS ( + SELECT + s.id, + s.status, + s.retries_limit AS "retriesLimit", + s.retries_count AS "retriesCount", + s.timeout_ms AS "timeoutMs", + s.error, + s.output, + false AS "isNew" + FROM ${this.tables.jobStepsTable} s + INNER JOIN job_check jc ON s.job_id = jc.id + WHERE s.job_id = ${jobId} + AND s.name = ${name} + AND s.parent_step_id IS NOT DISTINCT FROM ${parentStepId} + AND NOT EXISTS (SELECT 1 FROM final_upserted) + ) + SELECT * FROM final_upserted + UNION ALL + SELECT * FROM existing_step + `), + ) + + if (!result) { + this.logger?.error({ jobId }, `[PostgresAdapter] Job ${jobId} is not active or has expired`) + return null + } + + return result + } + + /** + * Internal method to mark a job step as completed. + * + * @returns Promise resolving to `true` if completed, `false` otherwise + */ + protected async _completeJobStep({ stepId, output }: CompleteJobStepOptions) { + const result = await this.db + .update(this.tables.jobStepsTable) + .set({ + status: STEP_STATUS_COMPLETED, + output, + finished_at: sql`now()`, + updated_at: sql`now()`, + }) + .from(this.tables.jobsTable) + .where( + and( + eq(this.tables.jobStepsTable.job_id, this.tables.jobsTable.id), + eq(this.tables.jobStepsTable.id, stepId), + eq(this.tables.jobStepsTable.status, STEP_STATUS_ACTIVE), + eq(this.tables.jobsTable.status, JOB_STATUS_ACTIVE), + or(isNull(this.tables.jobsTable.expires_at), gt(this.tables.jobsTable.expires_at, sql`now()`)), + ), + ) + .returning({ id: this.tables.jobStepsTable.id }) + + return result.length > 0 + } + + /** + * Internal method to mark a job step as failed. + * + * @returns Promise resolving to `true` if failed, `false` otherwise + */ + protected async _failJobStep({ stepId, error }: FailJobStepOptions) { + const result = await this.db + .update(this.tables.jobStepsTable) + .set({ + status: STEP_STATUS_FAILED, + error, + finished_at: sql`now()`, + updated_at: sql`now()`, + }) + .from(this.tables.jobsTable) + .where( + and( + eq(this.tables.jobStepsTable.job_id, this.tables.jobsTable.id), + eq(this.tables.jobStepsTable.id, stepId), + eq(this.tables.jobStepsTable.status, STEP_STATUS_ACTIVE), + eq(this.tables.jobsTable.status, JOB_STATUS_ACTIVE), + ), + ) + .returning({ id: this.tables.jobStepsTable.id }) + + return result.length > 0 + } + + /** + * Internal method to delay a job step. + * + * @returns Promise resolving to `true` if delayed, `false` otherwise + */ + protected async _delayJobStep({ stepId, delayMs, error }: DelayJobStepOptions) { + const jobStepsTable = this.tables.jobStepsTable + const jobsTable = this.tables.jobsTable + + const result = await this.db + .update(jobStepsTable) + .set({ + delayed_ms: delayMs, + retries_count: sql`${jobStepsTable.retries_count} + 1`, + expires_at: sql`now() + (${jobStepsTable.timeout_ms} || ' milliseconds')::interval + (${delayMs} || ' milliseconds')::interval`, + history_failed_attempts: sql`COALESCE(${jobStepsTable.history_failed_attempts}, '{}'::jsonb) || jsonb_build_object( + extract(epoch from now())::text, + jsonb_build_object( + 'failedAt', now(), + 'error', ${JSON.stringify(error)}::jsonb, + 'delayedMs', ${delayMs}::integer + ) + )`, + updated_at: sql`now()`, + }) + .from(jobsTable) + .where( + and( + eq(jobStepsTable.job_id, jobsTable.id), + eq(jobStepsTable.id, stepId), + eq(jobStepsTable.status, STEP_STATUS_ACTIVE), + eq(jobsTable.status, JOB_STATUS_ACTIVE), + ), + ) + .returning({ id: jobStepsTable.id }) + + return result.length > 0 + } + + /** + * Internal method to cancel a job step. + * + * @returns Promise resolving to `true` if cancelled, `false` otherwise + */ + protected async _cancelJobStep({ stepId }: CancelJobStepOptions) { + const result = await this.db + .update(this.tables.jobStepsTable) + .set({ + status: STEP_STATUS_CANCELLED, + finished_at: sql`now()`, + updated_at: sql`now()`, + }) + .from(this.tables.jobsTable) + .where( + and( + eq(this.tables.jobStepsTable.job_id, this.tables.jobsTable.id), + eq(this.tables.jobStepsTable.id, stepId), + eq(this.tables.jobStepsTable.status, STEP_STATUS_ACTIVE), + or( + eq(this.tables.jobsTable.status, JOB_STATUS_ACTIVE), + eq(this.tables.jobsTable.status, JOB_STATUS_CANCELLED), + ), + ), + ) + .returning({ id: this.tables.jobStepsTable.id }) + + return result.length > 0 + } + + // ============================================================================ + // Query Methods + // ============================================================================ + + /** + * Internal method to get a job by its ID. Does not include step information. + */ + protected async _getJobById(jobId: string): Promise { + const jobsTable = this.tables.jobsTable + + // Calculate duration as a SQL expression (finishedAt - startedAt in milliseconds) + const durationMs = sql` + CASE + WHEN ${jobsTable.started_at} IS NOT NULL AND ${jobsTable.finished_at} IS NOT NULL + THEN EXTRACT(EPOCH FROM (${jobsTable.finished_at} - ${jobsTable.started_at})) * 1000 + ELSE NULL + END + `.as('duration_ms') + + const [job] = await this.db + .select({ + id: jobsTable.id, + actionName: jobsTable.action_name, + groupKey: jobsTable.group_key, + description: jobsTable.description, + input: jobsTable.input, + output: jobsTable.output, + error: jobsTable.error, + status: jobsTable.status, + timeoutMs: jobsTable.timeout_ms, + expiresAt: jobsTable.expires_at, + startedAt: jobsTable.started_at, + finishedAt: jobsTable.finished_at, + createdAt: jobsTable.created_at, + updatedAt: jobsTable.updated_at, + concurrencyLimit: jobsTable.concurrency_limit, + concurrencyStepLimit: jobsTable.concurrency_step_limit, + clientId: jobsTable.client_id, + durationMs, + }) + .from(jobsTable) + .where(eq(jobsTable.id, jobId)) + .limit(1) + + return job ?? null + } + + /** + * Internal method to get all steps for a job with optional fuzzy search. + * Steps are always ordered by created_at ASC. + * Steps do not include output data. + */ + protected async _getJobSteps(options: GetJobStepsOptions): Promise { + const { jobId, search } = options + + const jobStepsTable = this.tables.jobStepsTable + + const fuzzySearch = search?.trim() + + const where = and( + eq(jobStepsTable.job_id, jobId), + fuzzySearch && fuzzySearch.length > 0 + ? or( + ilike(jobStepsTable.name, `%${fuzzySearch}%`), + sql`to_tsvector('english', ${jobStepsTable.output}::text) @@ plainto_tsquery('english', ${fuzzySearch})`, + ) + : undefined, + options.updatedAfter + ? sql`date_trunc('milliseconds', ${jobStepsTable.updated_at}) > ${options.updatedAfter.toISOString()}::timestamptz` + : undefined, + ) + + const steps = await this.db + .select({ + id: jobStepsTable.id, + jobId: jobStepsTable.job_id, + parentStepId: jobStepsTable.parent_step_id, + parallel: jobStepsTable.parallel, + name: jobStepsTable.name, + status: jobStepsTable.status, + error: jobStepsTable.error, + startedAt: jobStepsTable.started_at, + finishedAt: jobStepsTable.finished_at, + timeoutMs: jobStepsTable.timeout_ms, + expiresAt: jobStepsTable.expires_at, + retriesLimit: jobStepsTable.retries_limit, + retriesCount: jobStepsTable.retries_count, + delayedMs: jobStepsTable.delayed_ms, + historyFailedAttempts: jobStepsTable.history_failed_attempts, + createdAt: jobStepsTable.created_at, + updatedAt: jobStepsTable.updated_at, + }) + .from(jobStepsTable) + .where(where) + .orderBy(asc(jobStepsTable.created_at)) + + return { + steps, + total: steps.length, + } + } + + protected _buildJobsWhereClause(filters: GetJobsOptions['filters']) { + if (!filters) { + return undefined + } + + const jobsTable = this.tables.jobsTable + + const fuzzySearch = filters.search?.trim() + + // Build WHERE clause parts using postgres template literals + return and( + filters.status + ? inArray(jobsTable.status, Array.isArray(filters.status) ? filters.status : [filters.status]) + : undefined, + filters.actionName + ? inArray(jobsTable.action_name, Array.isArray(filters.actionName) ? filters.actionName : [filters.actionName]) + : undefined, + filters.groupKey && Array.isArray(filters.groupKey) + ? sql`j.group_key LIKE ANY(ARRAY[${sql.raw(filters.groupKey.map((key) => `'${key}'`).join(','))}]::text[])` + : undefined, + filters.groupKey && !Array.isArray(filters.groupKey) + ? ilike(jobsTable.group_key, `%${filters.groupKey}%`) + : undefined, + filters.clientId + ? inArray(jobsTable.client_id, Array.isArray(filters.clientId) ? filters.clientId : [filters.clientId]) + : undefined, + filters.description ? ilike(jobsTable.description, `%${filters.description}%`) : undefined, + filters.createdAt && Array.isArray(filters.createdAt) + ? between( + sql`date_trunc('second', ${jobsTable.created_at})`, + filters.createdAt[0]!.toISOString(), + filters.createdAt[1]!.toISOString(), + ) + : undefined, + filters.createdAt && !Array.isArray(filters.createdAt) + ? gte(sql`date_trunc('second', ${jobsTable.created_at})`, filters.createdAt.toISOString()) + : undefined, + filters.startedAt && Array.isArray(filters.startedAt) + ? between( + sql`date_trunc('second', ${jobsTable.started_at})`, + filters.startedAt[0]!.toISOString(), + filters.startedAt[1]!.toISOString(), + ) + : undefined, + filters.startedAt && !Array.isArray(filters.startedAt) + ? gte(sql`date_trunc('second', ${jobsTable.started_at})`, filters.startedAt.toISOString()) + : undefined, + filters.finishedAt && Array.isArray(filters.finishedAt) + ? between( + sql`date_trunc('second', ${jobsTable.finished_at})`, + filters.finishedAt[0]!.toISOString(), + filters.finishedAt[1]!.toISOString(), + ) + : undefined, + filters.finishedAt && !Array.isArray(filters.finishedAt) + ? gte(sql`date_trunc('second', ${jobsTable.finished_at})`, filters.finishedAt.toISOString()) + : undefined, + filters.updatedAfter + ? sql`date_trunc('milliseconds', ${jobsTable.updated_at}) > ${filters.updatedAfter.toISOString()}::timestamptz` + : undefined, + fuzzySearch && fuzzySearch.length > 0 + ? or( + ilike(jobsTable.action_name, `%${fuzzySearch}%`), + ilike(jobsTable.group_key, `%${fuzzySearch}%`), + ilike(jobsTable.description, `%${fuzzySearch}%`), + ilike(jobsTable.client_id, `%${fuzzySearch}%`), + sql`${jobsTable.id}::text ilike ${`%${fuzzySearch}%`}`, + sql`to_tsvector('english', ${jobsTable.input}::text) @@ plainto_tsquery('english', ${fuzzySearch})`, + sql`to_tsvector('english', ${jobsTable.output}::text) @@ plainto_tsquery('english', ${fuzzySearch})`, + ) + : undefined, + ...(filters.inputFilter && Object.keys(filters.inputFilter).length > 0 + ? this.#buildJsonbWhereConditions(filters.inputFilter, jobsTable.input) + : []), + ...(filters.outputFilter && Object.keys(filters.outputFilter).length > 0 + ? this.#buildJsonbWhereConditions(filters.outputFilter, jobsTable.output) + : []), + ) + } + /** + * Internal method to get jobs with pagination, filtering, and sorting. + * Does not include step information or job output. + */ + protected async _getJobs(options?: GetJobsOptions): Promise { + const jobsTable = this.tables.jobsTable + const page = options?.page ?? 1 + const pageSize = options?.pageSize ?? 10 + const filters = options?.filters ?? {} + + const sortInput = options?.sort ?? { field: 'startedAt', order: 'desc' } + const sorts = Array.isArray(sortInput) ? sortInput : [sortInput] + + const where = this._buildJobsWhereClause(filters) + + // Get total count + const total = await this.db.$count(jobsTable, where) + if (!total) { + return { + jobs: [], + total: 0, + page, + pageSize, + } + } + + // Calculate duration as a SQL expression (finishedAt - startedAt in milliseconds) + const durationMs = sql` + CASE + WHEN ${jobsTable.started_at} IS NOT NULL AND ${jobsTable.finished_at} IS NOT NULL + THEN EXTRACT(EPOCH FROM (${jobsTable.finished_at} - ${jobsTable.started_at})) * 1000 + ELSE NULL + END + `.as('duration_ms') + + const sortFieldMap: Record = { + createdAt: jobsTable.created_at, + startedAt: jobsTable.started_at, + finishedAt: jobsTable.finished_at, + status: jobsTable.status, + actionName: jobsTable.action_name, + expiresAt: jobsTable.expires_at, + duration: durationMs, + description: jobsTable.description, + } + + const jobs = await this.db + .select({ + id: jobsTable.id, + actionName: jobsTable.action_name, + groupKey: jobsTable.group_key, + description: jobsTable.description, + input: jobsTable.input, + output: jobsTable.output, + error: jobsTable.error, + status: jobsTable.status, + timeoutMs: jobsTable.timeout_ms, + expiresAt: jobsTable.expires_at, + startedAt: jobsTable.started_at, + finishedAt: jobsTable.finished_at, + createdAt: jobsTable.created_at, + updatedAt: jobsTable.updated_at, + concurrencyLimit: jobsTable.concurrency_limit, + concurrencyStepLimit: jobsTable.concurrency_step_limit, + clientId: jobsTable.client_id, + durationMs, + }) + .from(jobsTable) + .where(where) + .orderBy( + ...sorts + .filter((sortItem) => sortItem.field in sortFieldMap) + .map((sortItem) => { + const sortField = sortFieldMap[sortItem.field] + if (sortItem.order.toUpperCase() === 'ASC') { + return asc(sortField) + } else { + return desc(sortField) + } + }), + ) + .limit(pageSize) + .offset((page - 1) * pageSize) + + return { + jobs, + total, + page, + pageSize, + } + } + + /** + * Internal method to get a step by its ID with all information. + */ + protected async _getJobStepById(stepId: string): Promise { + const [step] = await this.db + .select({ + id: this.tables.jobStepsTable.id, + jobId: this.tables.jobStepsTable.job_id, + parentStepId: this.tables.jobStepsTable.parent_step_id, + parallel: this.tables.jobStepsTable.parallel, + name: this.tables.jobStepsTable.name, + output: this.tables.jobStepsTable.output, + status: this.tables.jobStepsTable.status, + error: this.tables.jobStepsTable.error, + startedAt: this.tables.jobStepsTable.started_at, + finishedAt: this.tables.jobStepsTable.finished_at, + timeoutMs: this.tables.jobStepsTable.timeout_ms, + expiresAt: this.tables.jobStepsTable.expires_at, + retriesLimit: this.tables.jobStepsTable.retries_limit, + retriesCount: this.tables.jobStepsTable.retries_count, + delayedMs: this.tables.jobStepsTable.delayed_ms, + historyFailedAttempts: this.tables.jobStepsTable.history_failed_attempts, + createdAt: this.tables.jobStepsTable.created_at, + updatedAt: this.tables.jobStepsTable.updated_at, + }) + .from(this.tables.jobStepsTable) + .where(eq(this.tables.jobStepsTable.id, stepId)) + .limit(1) + + return step ?? null + } + + /** + * Internal method to get job status and updatedAt timestamp. + */ + protected async _getJobStatus(jobId: string): Promise { + const [job] = await this.db + .select({ + status: this.tables.jobsTable.status, + updatedAt: this.tables.jobsTable.updated_at, + }) + .from(this.tables.jobsTable) + .where(eq(this.tables.jobsTable.id, jobId)) + .limit(1) + + return job ?? null + } + + /** + * Internal method to get job step status and updatedAt timestamp. + */ + protected async _getJobStepStatus(stepId: string): Promise { + const [step] = await this.db + .select({ + status: this.tables.jobStepsTable.status, + updatedAt: this.tables.jobStepsTable.updated_at, + }) + .from(this.tables.jobStepsTable) + .where(eq(this.tables.jobStepsTable.id, stepId)) + .limit(1) + + return step ?? null + } + + /** + * Internal method to get action statistics including counts and last job created date. + */ + protected async _getActions(): Promise { + const actionStats = this.db.$with('action_stats').as( + this.db + .select({ + name: this.tables.jobsTable.action_name, + last_job_created: sql`MAX(${this.tables.jobsTable.created_at})`.as('last_job_created'), + active: sql`COUNT(*) FILTER (WHERE ${this.tables.jobsTable.status} = ${JOB_STATUS_ACTIVE})`.as( + 'active', + ), + completed: sql`COUNT(*) FILTER (WHERE ${this.tables.jobsTable.status} = ${JOB_STATUS_COMPLETED})`.as( + 'completed', + ), + failed: sql`COUNT(*) FILTER (WHERE ${this.tables.jobsTable.status} = ${JOB_STATUS_FAILED})`.as( + 'failed', + ), + cancelled: sql`COUNT(*) FILTER (WHERE ${this.tables.jobsTable.status} = ${JOB_STATUS_CANCELLED})`.as( + 'cancelled', + ), + }) + .from(this.tables.jobsTable) + .groupBy(this.tables.jobsTable.action_name), + ) + + const actions = await this.db + .with(actionStats) + .select({ + name: actionStats.name, + lastJobCreated: actionStats.last_job_created, + active: sql`${actionStats.active}::int`, + completed: sql`${actionStats.completed}::int`, + failed: sql`${actionStats.failed}::int`, + cancelled: sql`${actionStats.cancelled}::int`, + }) + .from(actionStats) + .orderBy(actionStats.name) + + return { + actions: actions.map((action) => ({ + ...action, + lastJobCreated: action.lastJobCreated ?? null, + })), + } + } + + // ============================================================================ + // Metrics Methods + // ============================================================================ + + /** + * Internal method to insert multiple span records in a single batch. + */ + protected async _insertSpans(spans: InsertSpanOptions[]): Promise { + if (spans.length === 0) { + return 0 + } + + const values = spans.map((s) => ({ + trace_id: s.traceId, + span_id: s.spanId, + parent_span_id: s.parentSpanId, + job_id: s.jobId, + step_id: s.stepId, + name: s.name, + kind: s.kind, + start_time_unix_nano: s.startTimeUnixNano, + end_time_unix_nano: s.endTimeUnixNano, + status_code: s.statusCode, + status_message: s.statusMessage, + attributes: s.attributes ?? {}, + events: s.events ?? [], + })) + + const result = await this.db + .insert(this.tables.spansTable) + .values(values) + .returning({ id: this.tables.spansTable.id }) + + return result.length + } + + /** + * Internal method to get spans for a job or step. + * For step queries, uses a recursive CTE to find all descendant spans. + */ + protected async _getSpans(options: GetSpansOptions): Promise { + const spansTable = this.tables.spansTable + const filters = options.filters ?? {} + + // Build sort + const sortInput = options.sort ?? { field: 'startTimeUnixNano', order: 'asc' } + const sortFieldMap: Record = { + name: 'name', + startTimeUnixNano: 'start_time_unix_nano', + endTimeUnixNano: 'end_time_unix_nano', + } + const sortField = sortFieldMap[sortInput.field] + const sortOrder = sortInput.order === 'asc' ? 'ASC' : 'DESC' + + // For step queries, use a recursive CTE to get descendant spans + if (options.stepId) { + return this._getStepSpansRecursive(options.stepId, sortField, sortOrder, filters) + } + + // Build WHERE clause for job queries + const where = this._buildSpansWhereClause(options.jobId, undefined, filters) + + // Get total count + const total = await this.db.$count(spansTable, where) + if (!total) { + return { + spans: [], + total: 0, + } + } + + const sortFieldColumn = sortFieldMap[sortInput.field] + const orderByClause = + sortInput.order === 'asc' + ? asc(spansTable[sortFieldColumn as keyof typeof spansTable] as any) + : desc(spansTable[sortFieldColumn as keyof typeof spansTable] as any) + + const rows = await this.db + .select({ + id: spansTable.id, + traceId: spansTable.trace_id, + spanId: spansTable.span_id, + parentSpanId: spansTable.parent_span_id, + jobId: spansTable.job_id, + stepId: spansTable.step_id, + name: spansTable.name, + kind: spansTable.kind, + startTimeUnixNano: spansTable.start_time_unix_nano, + endTimeUnixNano: spansTable.end_time_unix_nano, + statusCode: spansTable.status_code, + statusMessage: spansTable.status_message, + attributes: spansTable.attributes, + events: spansTable.events, + }) + .from(spansTable) + .where(where) + .orderBy(orderByClause) + + // Cast kind and statusCode to proper types, convert BigInt to string for JSON serialization + const spans = rows.map((row) => ({ + ...row, + kind: row.kind as 0 | 1 | 2 | 3 | 4, + statusCode: row.statusCode as 0 | 1 | 2, + // Convert BigInt to string for JSON serialization + startTimeUnixNano: row.startTimeUnixNano?.toString() ?? null, + endTimeUnixNano: row.endTimeUnixNano?.toString() ?? null, + })) + + return { + spans, + total, + } + } + + /** + * Get spans for a step using a recursive CTE to traverse the span hierarchy. + * This returns the step's span and all its descendant spans (children, grandchildren, etc.) + */ + protected async _getStepSpansRecursive( + stepId: string, + sortField: string, + sortOrder: string, + _filters?: GetSpansOptions['filters'], + ): Promise { + const schemaName = this.schema + + // Use a recursive CTE to find all descendant spans + // 1. Base case: find the span with step_id = stepId + // 2. Recursive case: find all spans where parent_span_id = span_id of a span we've already found + const query = sql` + WITH RECURSIVE span_tree AS ( + -- Base case: the span(s) for the step + SELECT * FROM ${sql.identifier(schemaName)}.spans WHERE step_id = ${stepId}::uuid + UNION ALL + -- Recursive case: children of spans we've found + SELECT s.* FROM ${sql.identifier(schemaName)}.spans s + INNER JOIN span_tree st ON s.parent_span_id = st.span_id + ) + SELECT + id, + trace_id as "traceId", + span_id as "spanId", + parent_span_id as "parentSpanId", + job_id as "jobId", + step_id as "stepId", + name, + kind, + start_time_unix_nano as "startTimeUnixNano", + end_time_unix_nano as "endTimeUnixNano", + status_code as "statusCode", + status_message as "statusMessage", + attributes, + events + FROM span_tree + ORDER BY ${sql.identifier(sortField)} ${sql.raw(sortOrder)} + ` + + // Raw SQL returns numeric types as strings, so we type them as such + const rows = (await this.db.execute(query)) as unknown as Array<{ + id: string | number + traceId: string + spanId: string + parentSpanId: string | null + jobId: string | null + stepId: string | null + name: string + kind: string | number + startTimeUnixNano: string | bigint | null + endTimeUnixNano: string | bigint | null + statusCode: string | number + statusMessage: string | null + attributes: Record + events: Array<{ name: string; timeUnixNano: string; attributes?: Record }> + }> + + // Convert types: raw SQL returns numeric types as strings + const spans = rows.map((row) => ({ + ...row, + // Convert id to number (bigserial comes as string from raw SQL) + id: typeof row.id === 'string' ? Number.parseInt(row.id, 10) : row.id, + // Convert kind and statusCode to proper types + kind: (typeof row.kind === 'string' ? Number.parseInt(row.kind, 10) : row.kind) as 0 | 1 | 2 | 3 | 4, + statusCode: (typeof row.statusCode === 'string' ? Number.parseInt(row.statusCode, 10) : row.statusCode) as + | 0 + | 1 + | 2, + // Convert BigInt to string for JSON serialization + startTimeUnixNano: row.startTimeUnixNano?.toString() ?? null, + endTimeUnixNano: row.endTimeUnixNano?.toString() ?? null, + })) + + return { + spans, + total: spans.length, + } + } + + /** + * Internal method to delete all spans for a job. + */ + protected async _deleteSpans(options: DeleteSpansOptions): Promise { + const result = await this.db + .delete(this.tables.spansTable) + .where(eq(this.tables.spansTable.job_id, options.jobId)) + .returning({ id: this.tables.spansTable.id }) + + return result.length + } + + /** + * Build WHERE clause for spans queries (used for job queries only). + * When querying by jobId, we find all spans that share the same trace_id + * as spans with that job. This includes spans from external libraries that + * don't have the duron.job.id attribute but are part of the same trace. + * + * Note: Step queries are handled separately by _getStepSpansRecursive using + * a recursive CTE to traverse the span hierarchy. + */ + protected _buildSpansWhereClause(jobId?: string, _stepId?: string, filters?: GetSpansOptions['filters']) { + const spansTable = this.tables.spansTable + + // Build condition for finding spans by trace_id (includes external spans) + let traceCondition: ReturnType | undefined + + if (jobId) { + // Find all spans that share a trace_id with any span that has this job_id + // This includes external spans (like from AI SDK) that don't have duron.job.id + traceCondition = inArray( + spansTable.trace_id, + this.db.select({ traceId: spansTable.trace_id }).from(spansTable).where(eq(spansTable.job_id, jobId)), + ) + } + + return and( + traceCondition, + filters?.name + ? Array.isArray(filters.name) + ? or(...filters.name.map((n) => ilike(spansTable.name, `%${n}%`))) + : ilike(spansTable.name, `%${filters.name}%`) + : undefined, + filters?.kind ? inArray(spansTable.kind, Array.isArray(filters.kind) ? filters.kind : [filters.kind]) : undefined, + filters?.statusCode + ? inArray(spansTable.status_code, Array.isArray(filters.statusCode) ? filters.statusCode : [filters.statusCode]) + : undefined, + filters?.traceId ? eq(spansTable.trace_id, filters.traceId) : undefined, + ...(filters?.attributesFilter && Object.keys(filters.attributesFilter).length > 0 + ? this.#buildJsonbWhereConditions(filters.attributesFilter, spansTable.attributes) + : []), + ) + } + + // ============================================================================ + // Private Methods + // ============================================================================ + + /** + * Build WHERE conditions for JSONB filter using individual property checks. + * Each property becomes a separate condition using ->> operator and ILIKE for case-insensitive matching. + * Supports nested properties via dot notation and arrays. + * + * Example: + * { "email": "tincho@gmail", "address.name": "nicolas", "products": ["chicle"] } + * Generates: + * input ->> 'email' ILIKE '%tincho@gmail%' + * AND input ->> 'address' ->> 'name' ILIKE '%nicolas%' + * AND EXISTS (SELECT 1 FROM jsonb_array_elements_text(input -> 'products') AS elem WHERE LOWER(elem) ILIKE LOWER('%chicle%')) + * + * @param filter - Flat record with dot-notation keys (e.g., { "email": "test", "address.name": "value", "products": ["chicle"] }) + * @param jsonbColumn - The JSONB column name + * @returns Array of SQL conditions + */ + #buildJsonbWhereConditions(filter: Record, jsonbColumn: PgColumn): any[] { + const conditions: any[] = [] + + for (const [key, value] of Object.entries(filter)) { + const parts = key.split('.').filter((p) => p.length > 0) + if (parts.length === 0) { + continue + } + + // Build the JSONB path expression step by step + // For "address.name": input -> 'address' ->> 'name' (-> for intermediate, ->> for final) + // For "email": input ->> 'email' (->> for single level) + let jsonbPath = sql`${jsonbColumn}` + if (parts.length === 1) { + // Single level: use ->> directly + jsonbPath = sql`${jsonbPath} ->> ${parts[0]!}` + } else { + // Nested: use -> for intermediate steps, ->> for final step + for (let i = 0; i < parts.length - 1; i++) { + const part = parts[i] + if (part) { + jsonbPath = sql`${jsonbPath} -> ${part}` + } + } + const lastPart = parts[parts.length - 1] + if (lastPart) { + jsonbPath = sql`${jsonbPath} ->> ${lastPart}` + } + } + + // Handle array values - check if JSONB array contains at least one of the values + if (Array.isArray(value)) { + // Build condition: check if any element in the JSONB array matches any value in the filter array + const arrayValueConditions = value.map((arrayValue) => { + const arrayValueStr = String(arrayValue) + // Get the array from JSONB: input -> 'products' + let arrayPath = sql`${jsonbColumn}` + for (let i = 0; i < parts.length - 1; i++) { + const part = parts[i] + if (part) { + arrayPath = sql`${arrayPath} -> ${part}` + } + } + const lastPart = parts[parts.length - 1] + if (lastPart) { + arrayPath = sql`${arrayPath} -> ${lastPart}` + } + + // Check if the JSONB array contains the value (case-insensitive for strings) + if (typeof arrayValue === 'string') { + return sql`EXISTS ( + SELECT 1 + FROM jsonb_array_elements_text(${arrayPath}) AS elem + WHERE LOWER(elem) ILIKE LOWER(${`%${arrayValueStr}%`}) + )` + } else { + // For non-string values, use exact containment + return sql`${arrayPath} @> ${sql.raw(JSON.stringify([arrayValue]))}::jsonb` + } + }) + + // Combine array conditions with OR (at least one must match) + if (arrayValueConditions.length > 0) { + conditions.push( + arrayValueConditions.reduce((acc, condition, idx) => (idx === 0 ? condition : sql`${acc} OR ${condition}`)), + ) + } + } else if (typeof value === 'string') { + // String values: use ILIKE for case-insensitive partial matching + conditions.push(sql`COALESCE(${jsonbPath}, '') ILIKE ${`%${value}%`}`) + } else { + // Non-string, non-array values: use exact match + // Convert JSONB value to text for comparison + conditions.push(sql`${jsonbPath}::text = ${String(value)}`) + } + } + + return conditions + } + + // ============================================================================ + // Protected Methods + // ============================================================================ + + /** + * Send a PostgreSQL notification. + * + * @param event - The event name + * @param data - The data to send + * @returns Promise resolving to `void` + */ + protected async _notify(_event: string, _data: any): Promise { + // do nothing + } + + /** + * Listen for PostgreSQL notifications. + * + * @param event - The event name to listen for + * @param callback - Callback function to handle notifications + * @returns Promise resolving to an object with an `unlisten` function + */ + protected async _listen(_event: string, _callback: (payload: string) => void): Promise<{ unlisten: () => void }> { + // do nothing + return { + unlisten: () => { + // do nothing + }, + } + } + + /** + * Map database query results to the expected format. + * Can be overridden by subclasses to handle different result formats. + * + * @param result - The raw database query result + * @returns The mapped result + */ + protected _map(result: any) { + return result + } +} From 52e8beee0ed40b6878597326397b630aba8dd170 Mon Sep 17 00:00:00 2001 From: Martin Acosta Date: Sat, 18 Apr 2026 17:55:38 -0300 Subject: [PATCH 03/10] feat: implement archive move logic for complete/fail/cancel, add query routing for getJobs/getJobById/retry --- packages/duron/src/adapters/postgres/base.ts | 479 ++++++++++++++----- 1 file changed, 364 insertions(+), 115 deletions(-) diff --git a/packages/duron/src/adapters/postgres/base.ts b/packages/duron/src/adapters/postgres/base.ts index 0c4c094..be9d8da 100644 --- a/packages/duron/src/adapters/postgres/base.ts +++ b/packages/duron/src/adapters/postgres/base.ts @@ -179,25 +179,64 @@ export class PostgresBaseAdapter e * @returns Promise resolving to `true` if completed, `false` otherwise */ protected async _completeJob({ jobId, output }: CompleteJobOptions) { - const result = await this.db - .update(this.tables.jobsActiveTable) - .set({ + return this.db.transaction(async (tx) => { + // 1. Delete job from active and get its data + const movedJob = await tx + .delete(this.tables.jobsActiveTable) + .where( + and( + eq(this.tables.jobsActiveTable.id, jobId), + eq(this.tables.jobsActiveTable.status, JOB_STATUS_ACTIVE), + eq(this.tables.jobsActiveTable.client_id, this.id), + gt(this.tables.jobsActiveTable.expires_at, sql`now()`), + ), + ) + .returning() + + if (movedJob.length === 0) { + return false + } + + const job = movedJob[0]! + + // 2. Delete steps from active + const movedSteps = await tx + .delete(this.tables.jobStepsActiveTable) + .where(eq(this.tables.jobStepsActiveTable.job_id, jobId)) + .returning() + + // 3. Delete spans from active + const movedSpans = await tx + .delete(this.tables.spansActiveTable) + .where(eq(this.tables.spansActiveTable.job_id, jobId)) + .returning() + + // 4. Insert job into archive + await tx.insert(this.tables.jobsArchiveTable).values({ + ...job, status: JOB_STATUS_COMPLETED, output, - finished_at: sql`now()`, - updated_at: sql`now()`, + finished_at: new Date(), + updated_at: new Date(), }) - .where( - and( - eq(this.tables.jobsActiveTable.id, jobId), - eq(this.tables.jobsActiveTable.status, JOB_STATUS_ACTIVE), - eq(this.tables.jobsActiveTable.client_id, this.id), - gt(this.tables.jobsActiveTable.expires_at, sql`now()`), - ), - ) - .returning({ id: this.tables.jobsActiveTable.id }) - return result.length > 0 + // 5. Insert steps into archive + if (movedSteps.length > 0) { + await tx.insert(this.tables.jobStepsArchiveTable).values( + movedSteps.map((step) => ({ + ...step, + job_finished_at: job.finished_at, + })), + ) + } + + // 6. Insert spans into archive + if (movedSpans.length > 0) { + await tx.insert(this.tables.spansArchiveTable).values(movedSpans) + } + + return true + }) } /** @@ -206,24 +245,57 @@ export class PostgresBaseAdapter e * @returns Promise resolving to `true` if failed, `false` otherwise */ protected async _failJob({ jobId, error }: FailJobOptions) { - const result = await this.db - .update(this.tables.jobsActiveTable) - .set({ + return this.db.transaction(async (tx) => { + const movedJob = await tx + .delete(this.tables.jobsActiveTable) + .where( + and( + eq(this.tables.jobsActiveTable.id, jobId), + eq(this.tables.jobsActiveTable.status, JOB_STATUS_ACTIVE), + eq(this.tables.jobsActiveTable.client_id, this.id), + ), + ) + .returning() + + if (movedJob.length === 0) { + return false + } + + const job = movedJob[0]! + + const movedSteps = await tx + .delete(this.tables.jobStepsActiveTable) + .where(eq(this.tables.jobStepsActiveTable.job_id, jobId)) + .returning() + + const movedSpans = await tx + .delete(this.tables.spansActiveTable) + .where(eq(this.tables.spansActiveTable.job_id, jobId)) + .returning() + + await tx.insert(this.tables.jobsArchiveTable).values({ + ...job, status: JOB_STATUS_FAILED, error, - finished_at: sql`now()`, - updated_at: sql`now()`, + finished_at: new Date(), + updated_at: new Date(), }) - .where( - and( - eq(this.tables.jobsActiveTable.id, jobId), - eq(this.tables.jobsActiveTable.status, JOB_STATUS_ACTIVE), - eq(this.tables.jobsActiveTable.client_id, this.id), - ), - ) - .returning({ id: this.tables.jobsActiveTable.id }) - return result.length > 0 + if (movedSteps.length > 0) { + await tx.insert(this.tables.jobStepsArchiveTable).values( + movedSteps.map((step) => ({ + ...step, + job_finished_at: job.finished_at, + })), + ) + } + + if (movedSpans.length > 0) { + await tx.insert(this.tables.spansArchiveTable).values(movedSpans) + } + + return true + }) } /** @@ -232,22 +304,55 @@ export class PostgresBaseAdapter e * @returns Promise resolving to `true` if cancelled, `false` otherwise */ protected async _cancelJob({ jobId }: CancelJobOptions) { - const result = await this.db - .update(this.tables.jobsActiveTable) - .set({ + return this.db.transaction(async (tx) => { + const movedJob = await tx + .delete(this.tables.jobsActiveTable) + .where( + and( + eq(this.tables.jobsActiveTable.id, jobId), + or(eq(this.tables.jobsActiveTable.status, JOB_STATUS_ACTIVE), eq(this.tables.jobsActiveTable.status, JOB_STATUS_CREATED)), + ), + ) + .returning() + + if (movedJob.length === 0) { + return false + } + + const job = movedJob[0]! + + const movedSteps = await tx + .delete(this.tables.jobStepsActiveTable) + .where(eq(this.tables.jobStepsActiveTable.job_id, jobId)) + .returning() + + const movedSpans = await tx + .delete(this.tables.spansActiveTable) + .where(eq(this.tables.spansActiveTable.job_id, jobId)) + .returning() + + await tx.insert(this.tables.jobsArchiveTable).values({ + ...job, status: JOB_STATUS_CANCELLED, - finished_at: sql`now()`, - updated_at: sql`now()`, + finished_at: new Date(), + updated_at: new Date(), }) - .where( - and( - eq(this.tables.jobsActiveTable.id, jobId), - or(eq(this.tables.jobsActiveTable.status, JOB_STATUS_ACTIVE), eq(this.tables.jobsActiveTable.status, JOB_STATUS_CREATED)), - ), - ) - .returning({ id: this.tables.jobsActiveTable.id }) - return result.length > 0 + if (movedSteps.length > 0) { + await tx.insert(this.tables.jobStepsArchiveTable).values( + movedSteps.map((step) => ({ + ...step, + job_finished_at: job.finished_at, + })), + ) + } + + if (movedSpans.length > 0) { + await tx.insert(this.tables.spansArchiveTable).values(movedSpans) + } + + return true + }) } /** @@ -272,10 +377,9 @@ export class PostgresBaseAdapter e j.created_at, j.concurrency_limit, j.concurrency_step_limit - FROM ${this.tables.jobsActiveTable} j + FROM ${this.tables.jobsArchiveTable} j WHERE j.id = ${jobId} AND j.status IN (${JOB_STATUS_COMPLETED}, ${JOB_STATUS_CANCELLED}, ${JOB_STATUS_FAILED}) - FOR UPDATE OF j SKIP LOCKED ), existing_retry AS ( -- Check if a retry already exists (a newer job with same checksum, group_key, and input) @@ -1030,9 +1134,17 @@ export class PostgresBaseAdapter e * Internal method to get a job by its ID. Does not include step information. */ protected async _getJobById(jobId: string): Promise { - const jobsTable = this.tables.jobsActiveTable + // Try active table first + const activeJob = await this._getJobFromTable(jobId, this.tables.jobsActiveTable) + if (activeJob) { + return activeJob + } - // Calculate duration as a SQL expression (finishedAt - startedAt in milliseconds) + // Then try archive table + return this._getJobFromTable(jobId, this.tables.jobsArchiveTable) + } + + private async _getJobFromTable(jobId: string, jobsTable: any): Promise { const durationMs = sql` CASE WHEN ${jobsTable.started_at} IS NOT NULL AND ${jobsTable.finished_at} IS NOT NULL @@ -1203,92 +1315,229 @@ export class PostgresBaseAdapter e : []), ) } + /** + * Build WHERE clause for archive jobs (same logic as active but for archive table). + */ + protected _buildArchiveJobsWhereClause(filters: GetJobsOptions['filters']) { + if (!filters) { + return undefined + } + + const archiveTable = this.tables.jobsArchiveTable + + const fuzzySearch = filters.search?.trim() + + return and( + filters.status + ? inArray(archiveTable.status, Array.isArray(filters.status) ? filters.status : [filters.status]) + : undefined, + filters.actionName + ? inArray(archiveTable.action_name, Array.isArray(filters.actionName) ? filters.actionName : [filters.actionName]) + : undefined, + filters.groupKey && Array.isArray(filters.groupKey) + ? sql`j.group_key LIKE ANY(ARRAY[${sql.raw(filters.groupKey.map((key) => `'${key}'`).join(','))}]::text[])` + : undefined, + filters.groupKey && !Array.isArray(filters.groupKey) + ? ilike(archiveTable.group_key, `%${filters.groupKey}%`) + : undefined, + filters.clientId + ? inArray(archiveTable.client_id, Array.isArray(filters.clientId) ? filters.clientId : [filters.clientId]) + : undefined, + filters.description ? ilike(archiveTable.description, `%${filters.description}%`) : undefined, + filters.createdAt && Array.isArray(filters.createdAt) + ? between( + sql`date_trunc('second', ${archiveTable.created_at})`, + filters.createdAt[0]!.toISOString(), + filters.createdAt[1]!.toISOString(), + ) + : undefined, + filters.createdAt && !Array.isArray(filters.createdAt) + ? gte(sql`date_trunc('second', ${archiveTable.created_at})`, filters.createdAt.toISOString()) + : undefined, + filters.startedAt && Array.isArray(filters.startedAt) + ? between( + sql`date_trunc('second', ${archiveTable.started_at})`, + filters.startedAt[0]!.toISOString(), + filters.startedAt[1]!.toISOString(), + ) + : undefined, + filters.startedAt && !Array.isArray(filters.startedAt) + ? gte(sql`date_trunc('second', ${archiveTable.started_at})`, filters.startedAt.toISOString()) + : undefined, + filters.finishedAt && Array.isArray(filters.finishedAt) + ? between( + sql`date_trunc('second', ${archiveTable.finished_at})`, + filters.finishedAt[0]!.toISOString(), + filters.finishedAt[1]!.toISOString(), + ) + : undefined, + filters.finishedAt && !Array.isArray(filters.finishedAt) + ? gte(sql`date_trunc('second', ${archiveTable.finished_at})`, filters.finishedAt.toISOString()) + : undefined, + filters.updatedAfter + ? sql`date_trunc('milliseconds', ${archiveTable.updated_at}) > ${filters.updatedAfter.toISOString()}::timestamptz` + : undefined, + fuzzySearch + ? or( + ilike(archiveTable.action_name, `%${fuzzySearch}%`), + ilike(archiveTable.group_key, `%${fuzzySearch}%`), + ilike(archiveTable.description, `%${fuzzySearch}%`), + ilike(archiveTable.client_id, `%${fuzzySearch}%`), + sql`${archiveTable.id}::text ilike ${`%${fuzzySearch}%`}`, + sql`to_tsvector('english', ${archiveTable.input}::text) @@ plainto_tsquery('english', ${fuzzySearch})`, + sql`to_tsvector('english', ${archiveTable.output}::text) @@ plainto_tsquery('english', ${fuzzySearch})`, + ) + : undefined, + ...(filters.inputFilter && Object.keys(filters.inputFilter).length > 0 + ? this.#buildJsonbWhereConditions(filters.inputFilter, archiveTable.input) + : []), + ...(filters.outputFilter && Object.keys(filters.outputFilter).length > 0 + ? this.#buildJsonbWhereConditions(filters.outputFilter, archiveTable.output) + : []), + ) + } + /** * Internal method to get jobs with pagination, filtering, and sorting. * Does not include step information or job output. */ protected async _getJobs(options?: GetJobsOptions): Promise { - const jobsTable = this.tables.jobsActiveTable const page = options?.page ?? 1 const pageSize = options?.pageSize ?? 10 const filters = options?.filters ?? {} - const sortInput = options?.sort ?? { field: 'startedAt', order: 'desc' } - const sorts = Array.isArray(sortInput) ? sortInput : [sortInput] - - const where = this._buildJobsWhereClause(filters) + // Determine which table(s) to query based on status filter + const activeStatuses = [JOB_STATUS_CREATED, JOB_STATUS_ACTIVE] + const archiveStatuses = [JOB_STATUS_COMPLETED, JOB_STATUS_FAILED, JOB_STATUS_CANCELLED] + const statusFilter = filters.status + const statuses = Array.isArray(statusFilter) ? statusFilter : statusFilter ? [statusFilter] : [] + + const queryActive = statuses.length === 0 || statuses.some(s => (activeStatuses as string[]).includes(s)) + const queryArchive = statuses.length === 0 || statuses.some(s => (archiveStatuses as string[]).includes(s)) + + // Query active table + let activeJobs: any[] = [] + let activeTotal = 0 + if (queryActive) { + const jobsTable = this.tables.jobsActiveTable + const where = this._buildJobsWhereClause(filters) + activeTotal = await this.db.$count(jobsTable, where) + + if (activeTotal > 0) { + const durationMs = sql` + CASE + WHEN ${jobsTable.started_at} IS NOT NULL AND ${jobsTable.finished_at} IS NOT NULL + THEN EXTRACT(EPOCH FROM (${jobsTable.finished_at} - ${jobsTable.started_at})) * 1000 + ELSE NULL + END + `.as('duration_ms') + + activeJobs = await this.db + .select({ + id: jobsTable.id, + actionName: jobsTable.action_name, + groupKey: jobsTable.group_key, + description: jobsTable.description, + input: jobsTable.input, + output: jobsTable.output, + error: jobsTable.error, + status: jobsTable.status, + timeoutMs: jobsTable.timeout_ms, + expiresAt: jobsTable.expires_at, + startedAt: jobsTable.started_at, + finishedAt: jobsTable.finished_at, + createdAt: jobsTable.created_at, + updatedAt: jobsTable.updated_at, + concurrencyLimit: jobsTable.concurrency_limit, + concurrencyStepLimit: jobsTable.concurrency_step_limit, + clientId: jobsTable.client_id, + durationMs, + }) + .from(jobsTable) + .where(where) + .orderBy(desc(jobsTable.created_at)) + .limit(pageSize) + .offset((page - 1) * pageSize) + } + } - // Get total count - const total = await this.db.$count(jobsTable, where) - if (!total) { - return { - jobs: [], - total: 0, - page, - pageSize, + // Query archive table + let archiveJobs: any[] = [] + let archiveTotal = 0 + if (queryArchive) { + const archiveTable = this.tables.jobsArchiveTable + // Build where clause for archive (similar to active but using archive table) + const archiveWhere = this._buildArchiveJobsWhereClause(filters) + archiveTotal = await this.db.$count(archiveTable, archiveWhere) + + if (archiveTotal > 0) { + const durationMs = sql` + CASE + WHEN ${archiveTable.started_at} IS NOT NULL AND ${archiveTable.finished_at} IS NOT NULL + THEN EXTRACT(EPOCH FROM (${archiveTable.finished_at} - ${archiveTable.started_at})) * 1000 + ELSE NULL + END + `.as('duration_ms') + + archiveJobs = await this.db + .select({ + id: archiveTable.id, + actionName: archiveTable.action_name, + groupKey: archiveTable.group_key, + description: archiveTable.description, + input: archiveTable.input, + output: archiveTable.output, + error: archiveTable.error, + status: archiveTable.status, + timeoutMs: archiveTable.timeout_ms, + expiresAt: archiveTable.expires_at, + startedAt: archiveTable.started_at, + finishedAt: archiveTable.finished_at, + createdAt: archiveTable.created_at, + updatedAt: archiveTable.updated_at, + concurrencyLimit: archiveTable.concurrency_limit, + concurrencyStepLimit: archiveTable.concurrency_step_limit, + clientId: archiveTable.client_id, + durationMs, + }) + .from(archiveTable) + .where(archiveWhere) + .orderBy(desc(archiveTable.created_at)) + .limit(pageSize) + .offset((page - 1) * pageSize) } } - // Calculate duration as a SQL expression (finishedAt - startedAt in milliseconds) - const durationMs = sql` - CASE - WHEN ${jobsTable.started_at} IS NOT NULL AND ${jobsTable.finished_at} IS NOT NULL - THEN EXTRACT(EPOCH FROM (${jobsTable.finished_at} - ${jobsTable.started_at})) * 1000 - ELSE NULL - END - `.as('duration_ms') + // Combine results + const allJobs = [...activeJobs, ...archiveJobs] + const total = activeTotal + archiveTotal - const sortFieldMap: Record = { - createdAt: jobsTable.created_at, - startedAt: jobsTable.started_at, - finishedAt: jobsTable.finished_at, - status: jobsTable.status, - actionName: jobsTable.action_name, - expiresAt: jobsTable.expires_at, - duration: durationMs, - description: jobsTable.description, - } + // Sort combined results + const sortInput = options?.sort ?? { field: 'startedAt', order: 'desc' } + const sorts = Array.isArray(sortInput) ? sortInput : [sortInput] - const jobs = await this.db - .select({ - id: jobsTable.id, - actionName: jobsTable.action_name, - groupKey: jobsTable.group_key, - description: jobsTable.description, - input: jobsTable.input, - output: jobsTable.output, - error: jobsTable.error, - status: jobsTable.status, - timeoutMs: jobsTable.timeout_ms, - expiresAt: jobsTable.expires_at, - startedAt: jobsTable.started_at, - finishedAt: jobsTable.finished_at, - createdAt: jobsTable.created_at, - updatedAt: jobsTable.updated_at, - concurrencyLimit: jobsTable.concurrency_limit, - concurrencyStepLimit: jobsTable.concurrency_step_limit, - clientId: jobsTable.client_id, - durationMs, - }) - .from(jobsTable) - .where(where) - .orderBy( - ...sorts - .filter((sortItem) => sortItem.field in sortFieldMap) - .map((sortItem) => { - const sortField = sortFieldMap[sortItem.field] - if (sortItem.order.toUpperCase() === 'ASC') { - return asc(sortField) - } else { - return desc(sortField) - } - }), - ) - .limit(pageSize) - .offset((page - 1) * pageSize) + allJobs.sort((a, b) => { + for (const sort of sorts) { + const field = sort.field + const order = sort.order.toUpperCase() === 'ASC' ? 1 : -1 + const aVal = a[field] + const bVal = b[field] + + if (aVal === null && bVal === null) continue + if (aVal === null) return order + if (bVal === null) return -order + + if (aVal < bVal) return -order + if (aVal > bVal) return order + } + return 0 + }) + + // Apply pagination + const paginatedJobs = allJobs.slice(0, pageSize) return { - jobs, + jobs: paginatedJobs, total, page, pageSize, From 365acfa28eeb9e5485ad79e8ddb9354637ef7cc9 Mon Sep 17 00:00:00 2001 From: Martin Acosta Date: Sat, 18 Apr 2026 18:40:25 -0300 Subject: [PATCH 04/10] feat: implement step and span query routing for active/archive tables --- packages/duron/src/adapters/postgres/base.ts | 130 ++++++++++++++++--- 1 file changed, 110 insertions(+), 20 deletions(-) diff --git a/packages/duron/src/adapters/postgres/base.ts b/packages/duron/src/adapters/postgres/base.ts index be9d8da..8371cb6 100644 --- a/packages/duron/src/adapters/postgres/base.ts +++ b/packages/duron/src/adapters/postgres/base.ts @@ -1189,7 +1189,15 @@ export class PostgresBaseAdapter e protected async _getJobSteps(options: GetJobStepsOptions): Promise { const { jobId, search } = options - const jobStepsTable = this.tables.jobStepsActiveTable + // Determine if job is in active or archive table + const jobInActive = await this.db + .select({ id: this.tables.jobsActiveTable.id }) + .from(this.tables.jobsActiveTable) + .where(eq(this.tables.jobsActiveTable.id, jobId)) + .limit(1) + + const isActive = jobInActive.length > 0 + const jobStepsTable = isActive ? this.tables.jobStepsActiveTable : this.tables.jobStepsArchiveTable const fuzzySearch = search?.trim() @@ -1548,7 +1556,8 @@ export class PostgresBaseAdapter e * Internal method to get a step by its ID with all information. */ protected async _getJobStepById(stepId: string): Promise { - const [step] = await this.db + // Try active table first + const [activeStep] = await this.db .select({ id: this.tables.jobStepsActiveTable.id, jobId: this.tables.jobStepsActiveTable.job_id, @@ -1573,14 +1582,45 @@ export class PostgresBaseAdapter e .where(eq(this.tables.jobStepsActiveTable.id, stepId)) .limit(1) - return step ?? null + if (activeStep) { + return activeStep + } + + // Try archive table + const [archiveStep] = await this.db + .select({ + id: this.tables.jobStepsArchiveTable.id, + jobId: this.tables.jobStepsArchiveTable.job_id, + parentStepId: this.tables.jobStepsArchiveTable.parent_step_id, + parallel: this.tables.jobStepsArchiveTable.parallel, + name: this.tables.jobStepsArchiveTable.name, + output: this.tables.jobStepsArchiveTable.output, + status: this.tables.jobStepsArchiveTable.status, + error: this.tables.jobStepsArchiveTable.error, + startedAt: this.tables.jobStepsArchiveTable.started_at, + finishedAt: this.tables.jobStepsArchiveTable.finished_at, + timeoutMs: this.tables.jobStepsArchiveTable.timeout_ms, + expiresAt: this.tables.jobStepsArchiveTable.expires_at, + retriesLimit: this.tables.jobStepsArchiveTable.retries_limit, + retriesCount: this.tables.jobStepsArchiveTable.retries_count, + delayedMs: this.tables.jobStepsArchiveTable.delayed_ms, + historyFailedAttempts: this.tables.jobStepsArchiveTable.history_failed_attempts, + createdAt: this.tables.jobStepsArchiveTable.created_at, + updatedAt: this.tables.jobStepsArchiveTable.updated_at, + }) + .from(this.tables.jobStepsArchiveTable) + .where(eq(this.tables.jobStepsArchiveTable.id, stepId)) + .limit(1) + + return archiveStep ?? null } /** * Internal method to get job status and updatedAt timestamp. */ protected async _getJobStatus(jobId: string): Promise { - const [job] = await this.db + // Try active table first + const [activeJob] = await this.db .select({ status: this.tables.jobsActiveTable.status, updatedAt: this.tables.jobsActiveTable.updated_at, @@ -1589,14 +1629,29 @@ export class PostgresBaseAdapter e .where(eq(this.tables.jobsActiveTable.id, jobId)) .limit(1) - return job ?? null + if (activeJob) { + return activeJob + } + + // Try archive table + const [archiveJob] = await this.db + .select({ + status: this.tables.jobsArchiveTable.status, + updatedAt: this.tables.jobsArchiveTable.updated_at, + }) + .from(this.tables.jobsArchiveTable) + .where(eq(this.tables.jobsArchiveTable.id, jobId)) + .limit(1) + + return archiveJob ?? null } /** * Internal method to get job step status and updatedAt timestamp. */ protected async _getJobStepStatus(stepId: string): Promise { - const [step] = await this.db + // Try active table first + const [activeStep] = await this.db .select({ status: this.tables.jobStepsActiveTable.status, updatedAt: this.tables.jobStepsActiveTable.updated_at, @@ -1605,7 +1660,21 @@ export class PostgresBaseAdapter e .where(eq(this.tables.jobStepsActiveTable.id, stepId)) .limit(1) - return step ?? null + if (activeStep) { + return activeStep + } + + // Try archive table + const [archiveStep] = await this.db + .select({ + status: this.tables.jobStepsArchiveTable.status, + updatedAt: this.tables.jobStepsArchiveTable.updated_at, + }) + .from(this.tables.jobStepsArchiveTable) + .where(eq(this.tables.jobStepsArchiveTable.id, stepId)) + .limit(1) + + return archiveStep ?? null } /** @@ -1696,7 +1765,6 @@ export class PostgresBaseAdapter e * For step queries, uses a recursive CTE to find all descendant spans. */ protected async _getSpans(options: GetSpansOptions): Promise { - const spansTable = this.tables.spansActiveTable const filters = options.filters ?? {} // Build sort @@ -1714,8 +1782,21 @@ export class PostgresBaseAdapter e return this._getStepSpansRecursive(options.stepId, sortField, sortOrder, filters) } + // Determine if job is active or archived + let isActive = true + if (options.jobId) { + const jobInActive = await this.db + .select({ id: this.tables.jobsActiveTable.id }) + .from(this.tables.jobsActiveTable) + .where(eq(this.tables.jobsActiveTable.id, options.jobId)) + .limit(1) + isActive = jobInActive.length > 0 + } + + const spansTable = isActive ? this.tables.spansActiveTable : this.tables.spansArchiveTable + // Build WHERE clause for job queries - const where = this._buildSpansWhereClause(options.jobId, undefined, filters) + const where = this._buildSpansWhereClause(options.jobId, undefined, filters, isActive) // Get total count const total = await this.db.$count(spansTable, where) @@ -1781,16 +1862,19 @@ export class PostgresBaseAdapter e ): Promise { const schemaName = this.schema - // Use a recursive CTE to find all descendant spans - // 1. Base case: find the span with step_id = stepId - // 2. Recursive case: find all spans where parent_span_id = span_id of a span we've already found + // Query both active and archive spans tables const query = sql` WITH RECURSIVE span_tree AS ( - -- Base case: the span(s) for the step - SELECT * FROM ${sql.identifier(schemaName)}.spans WHERE step_id = ${stepId}::uuid + -- Base case: the span(s) for the step (check both tables) + SELECT * FROM ${sql.identifier(schemaName)}.spans_active WHERE step_id = ${stepId}::uuid + UNION + SELECT * FROM ${sql.identifier(schemaName)}.spans_archive WHERE step_id = ${stepId}::uuid UNION ALL - -- Recursive case: children of spans we've found - SELECT s.* FROM ${sql.identifier(schemaName)}.spans s + -- Recursive case: children of spans we've found (check both tables) + SELECT s.* FROM ${sql.identifier(schemaName)}.spans_active s + INNER JOIN span_tree st ON s.parent_span_id = st.span_id + UNION + SELECT s.* FROM ${sql.identifier(schemaName)}.spans_archive s INNER JOIN span_tree st ON s.parent_span_id = st.span_id ) SELECT @@ -1856,12 +1940,18 @@ export class PostgresBaseAdapter e * Internal method to delete all spans for a job. */ protected async _deleteSpans(options: DeleteSpansOptions): Promise { - const result = await this.db + // Delete from both tables to be safe + const activeResult = await this.db .delete(this.tables.spansActiveTable) .where(eq(this.tables.spansActiveTable.job_id, options.jobId)) .returning({ id: this.tables.spansActiveTable.id }) - return result.length + const archiveResult = await this.db + .delete(this.tables.spansArchiveTable) + .where(eq(this.tables.spansArchiveTable.job_id, options.jobId)) + .returning({ id: this.tables.spansArchiveTable.id }) + + return activeResult.length + archiveResult.length } /** @@ -1873,8 +1963,8 @@ export class PostgresBaseAdapter e * Note: Step queries are handled separately by _getStepSpansRecursive using * a recursive CTE to traverse the span hierarchy. */ - protected _buildSpansWhereClause(jobId?: string, _stepId?: string, filters?: GetSpansOptions['filters']) { - const spansTable = this.tables.spansActiveTable + protected _buildSpansWhereClause(jobId?: string, _stepId?: string, filters?: GetSpansOptions['filters'], isActive: boolean = true) { + const spansTable = isActive ? this.tables.spansActiveTable : this.tables.spansArchiveTable // Build condition for finding spans by trace_id (includes external spans) let traceCondition: ReturnType | undefined From e65dc27c59c37cb5a112cbd43c89bf978ebd4d1f Mon Sep 17 00:00:00 2001 From: Martin Acosta Date: Fri, 24 Apr 2026 17:20:29 -0300 Subject: [PATCH 05/10] feat: active/archive split with single spans table - Split jobs/steps into active and archive tables - Jobs move from active to archive on complete/fail/cancel - Single spans table without FK constraints - SQL-native archive operations with USING joins - Prune with orphan span cleanup - Time travel restores archived jobs to active - Dashboard archive page and filter toggles - Add archive-specific tests --- .gitignore | 3 + .../2026-04-18-active-archive-split-design.md | 198 ++++ bun.lock | 9 +- .../src/components/ui/card.tsx | 43 + .../src/components/ui/tabs.tsx | 80 ++ .../src/hooks/use-data-table.ts | 17 + .../src/hooks/use-job-filter.ts | 41 + packages/duron-dashboard/src/lib/api.ts | 58 +- .../src/views/archive-page.tsx | 133 +++ .../duron-dashboard/src/views/dashboard.tsx | 61 +- .../migration.sql | 103 -- .../migration.sql | 246 ++--- .../snapshot.json | 989 +++++++++++++++--- packages/duron/src/adapters/adapter.ts | 4 +- packages/duron/src/adapters/postgres/base.ts | 672 ++++++++---- .../src/adapters/postgres/schema.default.ts | 21 +- .../duron/src/adapters/postgres/schema.ts | 81 +- packages/duron/src/client.ts | 38 + packages/duron/src/server.ts | 70 ++ packages/duron/src/step-manager.ts | 24 +- packages/duron/test/archive.test.ts | 321 ++++++ packages/shared-actions/package.json | 3 +- .../shared-actions/test/process-order.test.ts | 385 +++++++ 23 files changed, 2895 insertions(+), 705 deletions(-) create mode 100644 packages/duron-dashboard/src/components/ui/card.tsx create mode 100644 packages/duron-dashboard/src/components/ui/tabs.tsx create mode 100644 packages/duron-dashboard/src/hooks/use-job-filter.ts create mode 100644 packages/duron-dashboard/src/views/archive-page.tsx delete mode 100644 packages/duron/migrations/postgres/20260121160012_normal_bloodstrike/migration.sql rename packages/duron/migrations/postgres/{20260418120000_active_archive_split => 20260421153337_large_nitro}/migration.sql (58%) rename packages/duron/migrations/postgres/{20260121160012_normal_bloodstrike => 20260421153337_large_nitro}/snapshot.json (61%) create mode 100644 packages/duron/test/archive.test.ts create mode 100644 packages/shared-actions/test/process-order.test.ts diff --git a/.gitignore b/.gitignore index bb3a6ba..6d91af3 100644 --- a/.gitignore +++ b/.gitignore @@ -33,3 +33,6 @@ report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json # Finder (MacOS) folder config .DS_Store .claude/settings.local.json + +# git worktrees +.worktrees diff --git a/.opencode/plans/2026-04-18-active-archive-split-design.md b/.opencode/plans/2026-04-18-active-archive-split-design.md index 8123d39..653c32a 100644 --- a/.opencode/plans/2026-04-18-active-archive-split-design.md +++ b/.opencode/plans/2026-04-18-active-archive-split-design.md @@ -810,4 +810,202 @@ Use snapshot-based batching with TRUNCATE table rotation (like pgque/PgQ). --- +## 16. Job & Step State Transitions + +### 16.1 Overview + +Jobs and steps move between **active** and **archive** tables based on their lifecycle. The active table contains only non-terminal work (`created`, `active` status). The archive contains only terminal work (`completed`, `failed`, `cancelled`). + +### 16.2 Job State Transitions + +``` +┌─────────────────────────────────────────────────────────────────────────────┐ +│ JOB LIFECYCLE │ +└─────────────────────────────────────────────────────────────────────────────┘ + +CREATE + └─► jobs_active (status: created) + │ + ▼ + ACTIVATE (worker picks up job) + └─► jobs_active (status: active, started_at=now, expires_at=now+timeout) + │ + ├──────────────────────────────────────────┬───────────────────────────┐ + │ │ │ + ▼ ▼ ▼ + COMPLETE FAIL CANCEL + (job handler (exception or (user or + returns) timeout) system) + │ │ │ + ▼ ▼ ▼ + jobs_archive jobs_archive jobs_archive + (status: completed) (status: failed) (status: cancelled) + │ │ │ + │ │ │ + └──────────────────┬───────────────────────┘ │ + │ │ + ▼ │ + TIME TRAVEL (restore from archive if needed) │ + │ │ + ▼ │ + jobs_active (status: created) │ + │ │ + ▼ │ + RE-EXECUTE from target step │ + │ │ + ▼ │ + jobs_active → jobs_archive (terminal again) │ + │ + ▼ │ + PRUNE (delete old archived jobs) ◄─────────────────────────┘ + │ + ▼ + PERMANENTLY DELETED (jobs_archive + steps + spans) +``` + +### 16.3 Step State Transitions + +Steps follow the same active/archive pattern but have additional complexity during time travel. + +``` +┌─────────────────────────────────────────────────────────────────────────────┐ +│ STEP LIFECYCLE │ +└─────────────────────────────────────────────────────────────────────────────┘ + +CREATE (with job) + └─► job_steps_active (status: active) + │ + ▼ + COMPLETE / FAIL / CANCEL + │ + ▼ + job_steps_archive (preserves final status) + │ + ▼ + TIME TRAVEL (if archived, restored to active first) + │ + ├─► Target step ───────┐ + │ (reset to active) │ + │ │ + ├─► Ancestor steps ────┤ + │ (reset to active) │ + │ │ + ├─► Parallel branches ─┤ + │ (keep completed, │ + │ shift timestamps) │ + │ │ + └─► Other steps ───────┘ + (DELETED permanently) +``` + +### 16.4 Time Travel Step Logic + +When `timeTravelJob(jobId, stepId)` is called: + +**Phase 1: Archive Restore (if job is in archive)** +1. INSERT job row into `jobs_active` (from `jobs_archive`) +2. INSERT all step rows into `job_steps_active` (from `job_steps_archive`) +3. DELETE job from `jobs_archive` (cascade deletes steps from `job_steps_archive`) +4. Spans remain in the single `spans` table (no FK, no movement) + +*Note: This is a MOVE, not a copy. The job is removed from archive and placed into active.* + +**Phase 2: CTE Transformation (single atomic query)** + +The CTE performs these operations in order: + +| Operation | Steps Affected | New Status | Table | Notes | +|-----------|---------------|------------|-------|-------| +| **Validate** | Job | — | active | Must be terminal (completed/failed/cancelled) | +| **Find ancestors** | Target's parent chain | — | active | Recursive CTE up to root | +| **Find parallel branches** | Sibling steps with `parallel=true` | — | active | Completed steps at same nesting level | +| **Shift timestamps** | Kept completed steps | completed | active | `started_at`/`finished_at` shifted to "now" | +| **Delete** | Non-parallel, non-ancestor, non-target | — | active | Permanently removed | +| **Reset** | Target step | active | active | Clear output, error, finished_at, set started_at=now | +| **Reset** | Ancestor steps | active | active | Same clearing as target | +| **Reset** | Job | created | active | Clear output, error, started_at, finished_at, client_id, expires_at | + +**Step categories after time travel:** + +| Category | Status | Preserved Data | Example | +|----------|--------|---------------|---------| +| **Target** | `active` | None (reset) | The step you're time-traveling to | +| **Ancestors** | `active` | None (reset) | Parent steps leading to target | +| **Parallel branches** | `completed` | Output, error, all data | Side branches that ran concurrently | +| **Pre-target linear** | `completed` | Output, error, all data | Steps before target in same branch | +| **Post-target** | — | — | **Deleted permanently** | + +### 16.5 Spans Lifecycle + +Spans use a **single table** (`spans`) with **no FK constraints**. This is intentional — spans are append-only telemetry data that should not block job operations. + +**Spans are created during job execution:** +- OpenTelemetry spans are exported via `LocalSpanExporter` +- Each span has `duron.job.id` and `duron.step.id` attributes (extracted from OTel attributes) +- Spans are inserted into the single `spans` table +- External spans (e.g., from AI SDK) that share the same `trace_id` are also stored + +**Spans are NOT deleted when a job completes/fails/cancels:** +- Complete → spans stay in `spans` table +- Fail → spans stay in `spans` table +- Cancel → spans stay in `spans` table +- Time Travel → spans stay in `spans` table (no movement) +- Retry → spans stay in `spans` table (new job gets new spans) + +**Spans are deleted during:** +1. **Prune (batch)** — `DELETE FROM spans WHERE job_id IN (pruned jobs)` (explicit cleanup in prune CTE) +2. **Prune (orphan cleanup)** — `DELETE FROM spans WHERE job_id NOT IN (jobs_active) AND job_id NOT IN (jobs_archive)` (catches spans from deleted jobs) +3. **Manual deleteSpans API** — `DELETE FROM spans WHERE job_id = ?` (programmatic cleanup) + +**Spans are NOT deleted during truncate** because truncate only clears archive tables, and spans may belong to active jobs. + +**Querying spans:** +- By job: Query spans table directly (uses `job_id` index) +- By step: Recursive CTE traverses span hierarchy via `parent_span_id` +- By trace: Query spans table directly (uses `trace_id` index) + +**Important:** Because spans have no FK constraints, they can reference jobs/steps that no longer exist. Querying spans for a deleted job returns no results (the `job_id` lookup finds nothing), but the spans themselves remain until pruned. + +### 16.6 Status Values by Table + +**Active Tables:** +| Table | Possible Statuses | +|-------|------------------| +| `jobs_active` | `created`, `active` | +| `job_steps_active` | `active`, `completed`, `failed`, `cancelled` | + +**Archive Tables:** +| Table | Possible Statuses | +|-------|------------------| +| `jobs_archive` | `completed`, `failed`, `cancelled` | +| `job_steps_archive` | `active`, `completed`, `failed`, `cancelled` | + +*Note: `job_steps_archive` can have `active` status because steps are archived as-is at job termination time. A job may have active steps if it was cancelled or failed mid-execution.* + +### 16.6 Movement Between Tables + +| Operation | Job Movement | Step Movement | Span Movement | +|-----------|-------------|---------------|---------------| +| **Create** | INSERT `jobs_active` | INSERT `job_steps_active` | INSERT `spans` | +| **Activate** | UPDATE `jobs_active` | — | — | +| **Complete** | MOVE active→archive | MOVE active→archive | DELETE from `spans` (where job_id=?) | +| **Fail** | MOVE active→archive | MOVE active→archive | DELETE from `spans` (where job_id=?) | +| **Cancel** | MOVE active→archive | MOVE active→archive (after setting status=cancelled) | DELETE from `spans` (where job_id=?) | +| **Retry** | MOVE archive→active | MOVE archive→active | No movement (spans stay in `spans`) | +| **Time Travel** | If in archive: MOVE archive→active, then TRANSFORM active | If in archive: MOVE archive→active, then TRANSFORM active | No movement (spans stay in `spans`) | +| **Prune** | DELETE `jobs_archive` (cascade steps) | DELETE `job_steps_archive` (cascade) | DELETE `spans` (explicit in prune CTE) | +| **Truncate** | TRUNCATE `jobs_archive` CASCADE | TRUNCATE `job_steps_archive` CASCADE | No operation (spans may belong to active jobs) | + +### 16.7 Critical Invariants + +1. **A job exists in exactly one table at a time** (active XOR archive, not both) +2. **Archive jobs are always terminal** (status IN completed, failed, cancelled) +3. **Active jobs are always non-terminal** (status IN created, active) +4. **Steps follow their parent job** — when a job moves, all its steps move with it +5. **Time travel is the only way to go from archive → active** +6. **Spans have no FK constraints** — they are cleaned up explicitly during prune/truncate +7. **Prune uses batching with `USING` joins** — single query per batch deletes jobs, steps (cascade), and spans + +--- + *End of Design Document* diff --git a/bun.lock b/bun.lock index d0fb9c7..802cd80 100644 --- a/bun.lock +++ b/bun.lock @@ -153,6 +153,7 @@ }, "devDependencies": { "@types/bun": "latest", + "duron": "workspace", }, "peerDependencies": { "typescript": "^5", @@ -792,7 +793,7 @@ "@types/babel__traverse": ["@types/babel__traverse@7.28.0", "", { "dependencies": { "@babel/types": "^7.28.2" } }, "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q=="], - "@types/bun": ["@types/bun@1.3.10", "", { "dependencies": { "bun-types": "1.3.10" } }, "sha512-0+rlrUrOrTSskibryHbvQkDOWRJwJZqZlxrUs1u4oOoTln8+WIXBPmAuCF35SWB2z4Zl3E84Nl/D0P7803nigQ=="], + "@types/bun": ["@types/bun@1.3.12", "", { "dependencies": { "bun-types": "1.3.12" } }, "sha512-DBv81elK+/VSwXHDlnH3Qduw+KxkTIWi7TXkAeh24zpi5l0B2kUg9Ga3tb4nJaPcOFswflgi/yAvMVBPrxMB+A=="], "@types/debug": ["@types/debug@4.1.12", "", { "dependencies": { "@types/ms": "*" } }, "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ=="], @@ -906,7 +907,7 @@ "bun-plugin-tailwind": ["bun-plugin-tailwind@0.1.2", "", { "peerDependencies": { "bun": ">=1.0.0" } }, "sha512-41jNC1tZRSK3s1o7pTNrLuQG8kL/0vR/JgiTmZAJ1eHwe0w5j6HFPKeqEk0WAD13jfrUC7+ULuewFBBCoADPpg=="], - "bun-types": ["bun-types@1.3.10", "", { "dependencies": { "@types/node": "*" } }, "sha512-tcpfCCl6XWo6nCVnpcVrxQ+9AYN1iqMIzgrSKYMB/fjLtV2eyAVEg7AxQJuCq/26R6HpKWykQXuSOq/21RYcbg=="], + "bun-types": ["bun-types@1.3.12", "", { "dependencies": { "@types/node": "*" } }, "sha512-HqOLj5PoFajAQciOMRiIZGNoKxDJSr6qigAttOX40vJuSp6DN/CxWp9s3C1Xwm4oH7ybueITwiaOcWXoYVoRkA=="], "bundle-name": ["bundle-name@4.1.0", "", { "dependencies": { "run-applescript": "^7.0.0" } }, "sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q=="], @@ -1908,8 +1909,6 @@ "serve-handler/path-to-regexp": ["path-to-regexp@3.3.0", "", {}, "sha512-qyCH421YQPS2WFDxDjftfc1ZR5WKQzVzqsp4n9M2kQhVOo/ByahFoUNJfl58kOcEGfQ//7weFTDhm+ss8Ecxgw=="], - "shared-actions/@types/bun": ["@types/bun@1.3.6", "", { "dependencies": { "bun-types": "1.3.6" } }, "sha512-uWCv6FO/8LcpREhenN1d1b6fcspAB+cefwD7uti8C8VffIv0Um08TKMn98FynpTiU38+y2dUO55T11NgDt8VAA=="], - "solid-js/seroval": ["seroval@1.3.2", "", {}, "sha512-RbcPH1n5cfwKrru7v7+zrZvjLurgHhGyso3HTyGtRivGWgYjbOmGuivCQaORNELjNONoK35nj28EoWul9sb1zQ=="], "solid-js/seroval-plugins": ["seroval-plugins@1.3.3", "", { "peerDependencies": { "seroval": "^1.0" } }, "sha512-16OL3NnUBw8JG1jBLUoZJsLnQq0n5Ua6aHalhJK4fMQkz1lqR7Osz1sA30trBtd9VUDc2NgkuRCn8+/pBwqZ+w=="], @@ -1992,8 +1991,6 @@ "examples/@types/bun/bun-types": ["bun-types@1.3.3", "", { "dependencies": { "@types/node": "*" } }, "sha512-z3Xwlg7j2l9JY27x5Qn3Wlyos8YAp0kKRlrePAOjgjMGS5IG6E7Jnlx736vH9UVI4wUICwwhC9anYL++XeOgTQ=="], - "shared-actions/@types/bun/bun-types": ["bun-types@1.3.6", "", { "dependencies": { "@types/node": "*" } }, "sha512-OlFwHcnNV99r//9v5IIOgQ9Uk37gZqrNMCcqEaExdkVq3Avwqok1bJFmvGMCkCE0FqzdY8VMOZpfpR3lwI+CsQ=="], - "vite/esbuild/@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.25.12", "", { "os": "aix", "cpu": "ppc64" }, "sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA=="], "vite/esbuild/@esbuild/android-arm": ["@esbuild/android-arm@0.25.12", "", { "os": "android", "cpu": "arm" }, "sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg=="], diff --git a/packages/duron-dashboard/src/components/ui/card.tsx b/packages/duron-dashboard/src/components/ui/card.tsx new file mode 100644 index 0000000..7cd2a7e --- /dev/null +++ b/packages/duron-dashboard/src/components/ui/card.tsx @@ -0,0 +1,43 @@ +import * as React from 'react' + +import { cn } from '@/lib/utils' + +const Card = React.forwardRef>(({ className, ...props }, ref) => ( +

+)) +Card.displayName = 'Card' + +const CardHeader = React.forwardRef>( + ({ className, ...props }, ref) => ( +
+ ), +) +CardHeader.displayName = 'CardHeader' + +const CardTitle = React.forwardRef>( + ({ className, ...props }, ref) => ( +

+ ), +) +CardTitle.displayName = 'CardTitle' + +const CardDescription = React.forwardRef>( + ({ className, ...props }, ref) => ( +

+ ), +) +CardDescription.displayName = 'CardDescription' + +const CardContent = React.forwardRef>( + ({ className, ...props }, ref) =>

, +) +CardContent.displayName = 'CardContent' + +const CardFooter = React.forwardRef>( + ({ className, ...props }, ref) => ( +
+ ), +) +CardFooter.displayName = 'CardFooter' + +export { Card, CardHeader, CardFooter, CardTitle, CardDescription, CardContent } diff --git a/packages/duron-dashboard/src/components/ui/tabs.tsx b/packages/duron-dashboard/src/components/ui/tabs.tsx new file mode 100644 index 0000000..14a3c73 --- /dev/null +++ b/packages/duron-dashboard/src/components/ui/tabs.tsx @@ -0,0 +1,80 @@ +'use client' + +import * as React from 'react' + +import { cn } from '@/lib/utils' + +interface TabsContextValue { + value: string + onValueChange?: (value: string) => void +} + +const TabsContext = React.createContext(null) + +function useTabs() { + const context = React.useContext(TabsContext) + if (!context) { + throw new Error('Tabs components must be used within a Tabs provider') + } + return context +} + +interface TabsProps { + value: string + onValueChange?: (value: string) => void + children: React.ReactNode + className?: string +} + +function Tabs({ value, onValueChange, children, className }: TabsProps) { + return ( + +
{children}
+
+ ) +} + +interface TabsListProps { + children: React.ReactNode + className?: string +} + +function TabsList({ children, className }: TabsListProps) { + return ( +
+ {children} +
+ ) +} + +interface TabsTriggerProps { + value: string + children: React.ReactNode + className?: string +} + +function TabsTrigger({ value, children, className }: TabsTriggerProps) { + const { value: selectedValue, onValueChange } = useTabs() + const isActive = selectedValue === value + + return ( + + ) +} + +export { Tabs, TabsList, TabsTrigger } diff --git a/packages/duron-dashboard/src/hooks/use-data-table.ts b/packages/duron-dashboard/src/hooks/use-data-table.ts index 0225f8f..8b05037 100644 --- a/packages/duron-dashboard/src/hooks/use-data-table.ts +++ b/packages/duron-dashboard/src/hooks/use-data-table.ts @@ -237,6 +237,23 @@ export function useDataTable(props: UseDataTableProps) { const [columnFilters, setColumnFilters] = React.useState(initialColumnFilters) + // Watch for external URL changes (e.g. from quick filter toggles) + const [externalStatusFilter] = useQueryState('status', parseAsArrayOf(parseAsString).withDefault([])) + + React.useEffect(() => { + if (enableAdvancedFilter) return + + setColumnFilters((prev) => { + const withoutStatus = prev.filter((f) => f.id !== 'status') + + if (externalStatusFilter.length > 0) { + return [...withoutStatus, { id: 'status', value: externalStatusFilter }] + } + + return withoutStatus + }) + }, [externalStatusFilter, enableAdvancedFilter]) + const onColumnFiltersChange = React.useCallback( (updaterOrValue: Updater) => { if (enableAdvancedFilter) return diff --git a/packages/duron-dashboard/src/hooks/use-job-filter.ts b/packages/duron-dashboard/src/hooks/use-job-filter.ts new file mode 100644 index 0000000..dc3bbcf --- /dev/null +++ b/packages/duron-dashboard/src/hooks/use-job-filter.ts @@ -0,0 +1,41 @@ +import { parseAsArrayOf, parseAsString, useQueryState } from 'nuqs' +import { useCallback, useMemo } from 'react' + +export type JobFilter = 'live' | 'archive' | 'all' + +export function useJobFilter() { + const [status, setStatus] = useQueryState('status', parseAsArrayOf(parseAsString).withDefault([])) + + const filter = useMemo((): JobFilter => { + if (status.length === 2 && status.includes('created') && status.includes('active')) { + return 'live' + } + if ( + status.length === 3 && + status.includes('completed') && + status.includes('failed') && + status.includes('cancelled') + ) { + return 'archive' + } + if (status.length === 0) { + return 'all' + } + return 'live' + }, [status]) + + const setFilter = useCallback( + (newFilter: JobFilter) => { + if (newFilter === 'live') { + setStatus(['created', 'active']) + } else if (newFilter === 'archive') { + setStatus(['completed', 'failed', 'cancelled']) + } else { + setStatus(null) + } + }, + [setStatus], + ) + + return { filter, setFilter } +} diff --git a/packages/duron-dashboard/src/lib/api.ts b/packages/duron-dashboard/src/lib/api.ts index 2d9e2c3..c6743a1 100644 --- a/packages/duron-dashboard/src/lib/api.ts +++ b/packages/duron-dashboard/src/lib/api.ts @@ -448,8 +448,9 @@ export function useActionsMetadata() { export function useRunAction() { const apiRequest = useApiRequest() const queryClient = useQueryClient() + return useMutation({ - mutationFn: async ({ actionName, input }: { actionName: string; input: any }) => { + mutationFn: async ({ actionName, input }: { actionName: string; input: Record }) => { return apiRequest<{ success: boolean; jobId: string }>(`/actions/${actionName}/run`, { method: 'POST', body: JSON.stringify(input), @@ -461,3 +462,58 @@ export function useRunAction() { }, }) } + +// Archive hooks +export interface ArchiveStatsResponse { + jobsCount: number + stepsCount: number + spansCount: number + oldestJobDate: string | null + totalSizeBytes: number | null + lastPrunedAt: string | null +} + +export function useArchiveStats() { + const apiRequest = useApiRequest() + + return useQuery({ + queryKey: ['archive', 'stats'], + queryFn: () => apiRequest('/archive/stats'), + }) +} + +export function usePruneArchive() { + const apiRequest = useApiRequest() + const queryClient = useQueryClient() + + return useMutation({ + mutationFn: async (options: { olderThan: string; batchSize?: number; maxBatches?: number }) => { + return apiRequest<{ deletedJobs: number }>('/archive/prune', { + method: 'POST', + body: JSON.stringify(options), + }) + }, + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: ['archive', 'stats'] }) + queryClient.invalidateQueries({ queryKey: ['jobs'] }) + }, + }) +} + +export function useTruncateArchive() { + const apiRequest = useApiRequest() + const queryClient = useQueryClient() + + return useMutation({ + mutationFn: async () => { + return apiRequest<{ success: boolean }>('/archive/truncate', { + method: 'POST', + body: JSON.stringify({ confirm: true }), + }) + }, + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: ['archive', 'stats'] }) + queryClient.invalidateQueries({ queryKey: ['jobs'] }) + }, + }) +} diff --git a/packages/duron-dashboard/src/views/archive-page.tsx b/packages/duron-dashboard/src/views/archive-page.tsx new file mode 100644 index 0000000..17549df --- /dev/null +++ b/packages/duron-dashboard/src/views/archive-page.tsx @@ -0,0 +1,133 @@ +'use client' + +import { Archive, Clock, Database, Trash2 } from 'lucide-react' + +import { Button } from '@/components/ui/button' +import { Card, CardContent, CardDescription, CardHeader, CardTitle } from '@/components/ui/card' +import { useArchiveStats, usePruneArchive, useTruncateArchive } from '@/lib/api' + +export function ArchivePage() { + const { data: stats, isLoading } = useArchiveStats() + const pruneMutation = usePruneArchive() + const truncateMutation = useTruncateArchive() + + const handlePrune = async () => { + const olderThan = prompt('Prune jobs older than (e.g. "7d", "1h", "30m"):', '7d') + if (!olderThan) return + + try { + const result = await pruneMutation.mutateAsync({ olderThan }) + alert(`Pruned ${result.deletedJobs} job(s)`) + } catch (error: any) { + alert(error?.message || 'Failed to prune archive') + } + } + + const handleTruncate = async () => { + if ( + !confirm( + 'WARNING: This will permanently delete ALL archived jobs, steps, and spans. This action cannot be undone.\n\nAre you sure?', + ) + ) { + return + } + + try { + await truncateMutation.mutateAsync() + alert('Archive truncated successfully') + } catch (error: any) { + alert(error?.message || 'Failed to truncate archive') + } + } + + return ( +
+
+
+
+

+ + Archive Management +

+

Manage archived jobs, steps, and spans

+
+
+ + +
+
+ + {isLoading ? ( +
Loading stats...
+ ) : ( +
+ + + Archived Jobs + + + +
{stats?.jobsCount ?? 0}
+ Total jobs in archive +
+
+ + + + Archived Steps + + + +
{stats?.stepsCount ?? 0}
+ Total steps in archive +
+
+ + + + Archived Spans + + + +
{stats?.spansCount ?? 0}
+ Total spans in archive +
+
+ + + + Oldest Job + + + +
+ {stats?.oldestJobDate ? new Date(stats.oldestJobDate).toLocaleDateString() : '—'} +
+ Date of oldest archived job +
+
+ + + + Last Pruned + + + +
+ {stats?.lastPrunedAt ? new Date(stats.lastPrunedAt).toLocaleDateString() : '—'} +
+ When archive was last pruned +
+
+
+ )} +
+
+ ) +} diff --git a/packages/duron-dashboard/src/views/dashboard.tsx b/packages/duron-dashboard/src/views/dashboard.tsx index acb0892..e6ace1c 100644 --- a/packages/duron-dashboard/src/views/dashboard.tsx +++ b/packages/duron-dashboard/src/views/dashboard.tsx @@ -1,6 +1,6 @@ 'use client' -import { LogOut, MoreVertical, Plus, Trash2 } from 'lucide-react' +import { Activity, Archive, LogOut, MoreVertical, Plus, Trash2 } from 'lucide-react' import { useCallback, useEffect, useMemo, useState } from 'react' import { CreateJobDialog } from '@/components/create-job-dialog' @@ -19,9 +19,11 @@ import { ResizablePanel, ResizablePanelGroup } from '@/components/ui/resizable' import { useAuth } from '@/contexts/auth-context' import { useLayout } from '@/contexts/layout-context' import { useIsMobile } from '@/hooks/use-is-mobile' +import { useJobFilter } from '@/hooks/use-job-filter' import { useJobParams } from '@/hooks/use-job-params' import { useDeleteJobs } from '@/lib/api' import { cn } from '@/lib/utils' +import { ArchivePage } from './archive-page' import { JobDetails } from './job-details' import { JobsTable } from './jobs-table' import { StepList } from './step-list' @@ -38,6 +40,8 @@ export function Dashboard({ showLogo = true, enableLogin = true, showThemeToggle const [selectedStepId, setSelectedStepId] = useState(null) const [createJobDialogOpen, setCreateJobDialogOpen] = useState(false) const [jobDetailsVisible, setJobDetailsVisible] = useState(false) + const [archivePageVisible, setArchivePageVisible] = useState(false) + const { filter: jobFilter, setFilter: setJobFilter } = useJobFilter() const isMobile = useIsMobile() const { logout } = useAuth() const { config, setDesktopHorizontalSizes, setDesktopVerticalSizes, setMobileVerticalSizes } = useLayout() @@ -166,6 +170,10 @@ export function Dashboard({ showLogo = true, enableLogin = true, showThemeToggle Create Job + setArchivePageVisible(true)}> + + Archive +
{showThemeToggle && } +
+ + + +
+ +
+ )} + {/* Desktop Layout with Resizable Panels */} {/* Layout: [Jobs Table (top)] / [Job Details | Steps (bottom)] */} - {!isMobile && ( + {!archivePageVisible && !isMobile && ( statement-breakpoint -CREATE TABLE "duron"."job_steps" ( - "id" uuid PRIMARY KEY DEFAULT gen_random_uuid(), - "job_id" uuid NOT NULL, - "parent_step_id" uuid, - "branch" boolean DEFAULT false NOT NULL, - "name" text NOT NULL, - "status" text DEFAULT 'active' NOT NULL, - "output" jsonb, - "error" jsonb, - "started_at" timestamp with time zone DEFAULT now() NOT NULL, - "finished_at" timestamp with time zone, - "timeout_ms" integer NOT NULL, - "expires_at" timestamp with time zone, - "retries_limit" integer DEFAULT 0 NOT NULL, - "retries_count" integer DEFAULT 0 NOT NULL, - "delayed_ms" integer, - "history_failed_attempts" jsonb DEFAULT '{}' NOT NULL, - "created_at" timestamp with time zone DEFAULT now() NOT NULL, - "updated_at" timestamp with time zone DEFAULT now() NOT NULL, - CONSTRAINT "unique_job_step_name_parent" UNIQUE NULLS NOT DISTINCT("job_id","name","parent_step_id"), - CONSTRAINT "job_steps_status_check" CHECK ("status" IN ('active','completed','failed','cancelled')) -); ---> statement-breakpoint -CREATE TABLE "duron"."jobs" ( - "id" uuid PRIMARY KEY DEFAULT gen_random_uuid(), - "action_name" text NOT NULL, - "group_key" text NOT NULL, - "description" text, - "status" text DEFAULT 'created' NOT NULL, - "checksum" text NOT NULL, - "input" jsonb DEFAULT '{}' NOT NULL, - "output" jsonb, - "error" jsonb, - "timeout_ms" integer NOT NULL, - "expires_at" timestamp with time zone, - "started_at" timestamp with time zone, - "finished_at" timestamp with time zone, - "client_id" text, - "concurrency_limit" integer NOT NULL, - "concurrency_step_limit" integer NOT NULL, - "created_at" timestamp with time zone DEFAULT now() NOT NULL, - "updated_at" timestamp with time zone DEFAULT now() NOT NULL, - CONSTRAINT "jobs_status_check" CHECK ("status" IN ('created','active','completed','failed','cancelled')) -); ---> statement-breakpoint -CREATE TABLE "duron"."spans" ( - "id" bigserial PRIMARY KEY, - "trace_id" text NOT NULL, - "span_id" text NOT NULL, - "parent_span_id" text, - "job_id" uuid, - "step_id" uuid, - "name" text NOT NULL, - "kind" integer DEFAULT 0 NOT NULL, - "start_time_unix_nano" bigint NOT NULL, - "end_time_unix_nano" bigint, - "status_code" integer DEFAULT 0 NOT NULL, - "status_message" text, - "attributes" jsonb DEFAULT '{}' NOT NULL, - "events" jsonb DEFAULT '[]' NOT NULL, - CONSTRAINT "spans_kind_check" CHECK ("kind" IN (0, 1, 2, 3, 4)), - CONSTRAINT "spans_status_code_check" CHECK ("status_code" IN (0, 1, 2)) -); ---> statement-breakpoint -CREATE INDEX "idx_job_steps_job_id" ON "duron"."job_steps" ("job_id");--> statement-breakpoint -CREATE INDEX "idx_job_steps_status" ON "duron"."job_steps" ("status");--> statement-breakpoint -CREATE INDEX "idx_job_steps_name" ON "duron"."job_steps" ("name");--> statement-breakpoint -CREATE INDEX "idx_job_steps_expires_at" ON "duron"."job_steps" ("expires_at");--> statement-breakpoint -CREATE INDEX "idx_job_steps_parent_step_id" ON "duron"."job_steps" ("parent_step_id");--> statement-breakpoint -CREATE INDEX "idx_job_steps_job_status" ON "duron"."job_steps" ("job_id","status");--> statement-breakpoint -CREATE INDEX "idx_job_steps_job_name" ON "duron"."job_steps" ("job_id","name");--> statement-breakpoint -CREATE INDEX "idx_job_steps_output_fts" ON "duron"."job_steps" USING gin (to_tsvector('english', "output"::text));--> statement-breakpoint -CREATE INDEX "idx_jobs_action_name" ON "duron"."jobs" ("action_name");--> statement-breakpoint -CREATE INDEX "idx_jobs_status" ON "duron"."jobs" ("status");--> statement-breakpoint -CREATE INDEX "idx_jobs_group_key" ON "duron"."jobs" ("group_key");--> statement-breakpoint -CREATE INDEX "idx_jobs_description" ON "duron"."jobs" ("description");--> statement-breakpoint -CREATE INDEX "idx_jobs_started_at" ON "duron"."jobs" ("started_at");--> statement-breakpoint -CREATE INDEX "idx_jobs_finished_at" ON "duron"."jobs" ("finished_at");--> statement-breakpoint -CREATE INDEX "idx_jobs_expires_at" ON "duron"."jobs" ("expires_at");--> statement-breakpoint -CREATE INDEX "idx_jobs_client_id" ON "duron"."jobs" ("client_id");--> statement-breakpoint -CREATE INDEX "idx_jobs_checksum" ON "duron"."jobs" ("checksum");--> statement-breakpoint -CREATE INDEX "idx_jobs_concurrency_limit" ON "duron"."jobs" ("concurrency_limit");--> statement-breakpoint -CREATE INDEX "idx_jobs_concurrency_step_limit" ON "duron"."jobs" ("concurrency_step_limit");--> statement-breakpoint -CREATE INDEX "idx_jobs_action_status" ON "duron"."jobs" ("action_name","status");--> statement-breakpoint -CREATE INDEX "idx_jobs_action_group" ON "duron"."jobs" ("action_name","group_key");--> statement-breakpoint -CREATE INDEX "idx_jobs_input_fts" ON "duron"."jobs" USING gin (to_tsvector('english', "input"::text));--> statement-breakpoint -CREATE INDEX "idx_jobs_output_fts" ON "duron"."jobs" USING gin (to_tsvector('english', "output"::text));--> statement-breakpoint -CREATE INDEX "idx_spans_trace_id" ON "duron"."spans" ("trace_id");--> statement-breakpoint -CREATE INDEX "idx_spans_span_id" ON "duron"."spans" ("span_id");--> statement-breakpoint -CREATE INDEX "idx_spans_job_id" ON "duron"."spans" ("job_id");--> statement-breakpoint -CREATE INDEX "idx_spans_step_id" ON "duron"."spans" ("step_id");--> statement-breakpoint -CREATE INDEX "idx_spans_name" ON "duron"."spans" ("name");--> statement-breakpoint -CREATE INDEX "idx_spans_kind" ON "duron"."spans" ("kind");--> statement-breakpoint -CREATE INDEX "idx_spans_status_code" ON "duron"."spans" ("status_code");--> statement-breakpoint -CREATE INDEX "idx_spans_job_step" ON "duron"."spans" ("job_id","step_id");--> statement-breakpoint -CREATE INDEX "idx_spans_trace_parent" ON "duron"."spans" ("trace_id","parent_span_id");--> statement-breakpoint -CREATE INDEX "idx_spans_attributes" ON "duron"."spans" USING gin ("attributes");--> statement-breakpoint -CREATE INDEX "idx_spans_events" ON "duron"."spans" USING gin ("events");--> statement-breakpoint -ALTER TABLE "duron"."job_steps" ADD CONSTRAINT "job_steps_job_id_jobs_id_fkey" FOREIGN KEY ("job_id") REFERENCES "duron"."jobs"("id") ON DELETE CASCADE;--> statement-breakpoint -ALTER TABLE "duron"."spans" ADD CONSTRAINT "spans_job_id_jobs_id_fkey" FOREIGN KEY ("job_id") REFERENCES "duron"."jobs"("id") ON DELETE CASCADE;--> statement-breakpoint -ALTER TABLE "duron"."spans" ADD CONSTRAINT "spans_step_id_job_steps_id_fkey" FOREIGN KEY ("step_id") REFERENCES "duron"."job_steps"("id") ON DELETE CASCADE; \ No newline at end of file diff --git a/packages/duron/migrations/postgres/20260418120000_active_archive_split/migration.sql b/packages/duron/migrations/postgres/20260421153337_large_nitro/migration.sql similarity index 58% rename from packages/duron/migrations/postgres/20260418120000_active_archive_split/migration.sql rename to packages/duron/migrations/postgres/20260421153337_large_nitro/migration.sql index 961ff65..485ae18 100644 --- a/packages/duron/migrations/postgres/20260418120000_active_archive_split/migration.sql +++ b/packages/duron/migrations/postgres/20260421153337_large_nitro/migration.sql @@ -1,49 +1,5 @@ CREATE SCHEMA IF NOT EXISTS "duron"; --> statement-breakpoint -CREATE TABLE "duron"."jobs_active" ( - "id" uuid PRIMARY KEY DEFAULT gen_random_uuid(), - "action_name" text NOT NULL, - "group_key" text NOT NULL, - "description" text, - "status" text DEFAULT 'created' NOT NULL, - "checksum" text NOT NULL, - "input" jsonb DEFAULT '{}' NOT NULL, - "output" jsonb, - "error" jsonb, - "timeout_ms" integer NOT NULL, - "expires_at" timestamp with time zone, - "started_at" timestamp with time zone, - "finished_at" timestamp with time zone, - "client_id" text, - "concurrency_limit" integer NOT NULL, - "concurrency_step_limit" integer NOT NULL, - "created_at" timestamp with time zone DEFAULT now() NOT NULL, - "updated_at" timestamp with time zone DEFAULT now() NOT NULL, - CONSTRAINT "jobs_active_status_check" CHECK ("status" IN ('created','active','completed','failed','cancelled')) -); ---> statement-breakpoint -CREATE TABLE "duron"."jobs_archive" ( - "id" uuid PRIMARY KEY, - "action_name" text NOT NULL, - "group_key" text NOT NULL, - "description" text, - "status" text NOT NULL, - "checksum" text NOT NULL, - "input" jsonb DEFAULT '{}' NOT NULL, - "output" jsonb, - "error" jsonb, - "timeout_ms" integer NOT NULL, - "expires_at" timestamp with time zone, - "started_at" timestamp with time zone, - "finished_at" timestamp with time zone, - "client_id" text, - "concurrency_limit" integer NOT NULL, - "concurrency_step_limit" integer NOT NULL, - "created_at" timestamp with time zone DEFAULT now() NOT NULL, - "updated_at" timestamp with time zone DEFAULT now() NOT NULL, - CONSTRAINT "jobs_archive_status_check" CHECK ("status" IN ('created','active','completed','failed','cancelled')) -); ---> statement-breakpoint CREATE TABLE "duron"."job_steps_active" ( "id" uuid PRIMARY KEY DEFAULT gen_random_uuid(), "job_id" uuid NOT NULL, @@ -90,26 +46,51 @@ CREATE TABLE "duron"."job_steps_archive" ( CONSTRAINT "job_steps_archive_status_check" CHECK ("status" IN ('active','completed','failed','cancelled')) ); --> statement-breakpoint -CREATE TABLE "duron"."spans_active" ( - "id" bigserial PRIMARY KEY, - "trace_id" text NOT NULL, - "span_id" text NOT NULL, - "parent_span_id" text, - "job_id" uuid, - "step_id" uuid, - "name" text NOT NULL, - "kind" integer DEFAULT 0 NOT NULL, - "start_time_unix_nano" bigint NOT NULL, - "end_time_unix_nano" bigint, - "status_code" integer DEFAULT 0 NOT NULL, - "status_message" text, - "attributes" jsonb DEFAULT '{}' NOT NULL, - "events" jsonb DEFAULT '[]' NOT NULL, - CONSTRAINT "spans_active_kind_check" CHECK ("kind" IN (0, 1, 2, 3, 4)), - CONSTRAINT "spans_active_status_code_check" CHECK ("status_code" IN (0, 1, 2)) +CREATE TABLE "duron"."jobs_active" ( + "id" uuid PRIMARY KEY DEFAULT gen_random_uuid(), + "action_name" text NOT NULL, + "group_key" text NOT NULL, + "description" text, + "status" text DEFAULT 'created' NOT NULL, + "checksum" text NOT NULL, + "input" jsonb DEFAULT '{}' NOT NULL, + "output" jsonb, + "error" jsonb, + "timeout_ms" integer NOT NULL, + "expires_at" timestamp with time zone, + "started_at" timestamp with time zone, + "finished_at" timestamp with time zone, + "client_id" text, + "concurrency_limit" integer NOT NULL, + "concurrency_step_limit" integer NOT NULL, + "created_at" timestamp with time zone DEFAULT now() NOT NULL, + "updated_at" timestamp with time zone DEFAULT now() NOT NULL, + CONSTRAINT "jobs_active_status_check" CHECK ("status" IN ('created','active','completed','failed','cancelled')) ); --> statement-breakpoint -CREATE TABLE "duron"."spans_archive" ( +CREATE TABLE "duron"."jobs_archive" ( + "id" uuid PRIMARY KEY, + "action_name" text NOT NULL, + "group_key" text NOT NULL, + "description" text, + "status" text NOT NULL, + "checksum" text NOT NULL, + "input" jsonb DEFAULT '{}' NOT NULL, + "output" jsonb, + "error" jsonb, + "timeout_ms" integer NOT NULL, + "expires_at" timestamp with time zone, + "started_at" timestamp with time zone, + "finished_at" timestamp with time zone, + "client_id" text, + "concurrency_limit" integer NOT NULL, + "concurrency_step_limit" integer NOT NULL, + "created_at" timestamp with time zone DEFAULT now() NOT NULL, + "updated_at" timestamp with time zone DEFAULT now() NOT NULL, + CONSTRAINT "jobs_archive_status_check" CHECK ("status" IN ('created','active','completed','failed','cancelled')) +); +--> statement-breakpoint +CREATE TABLE "duron"."spans" ( "id" bigserial PRIMARY KEY, "trace_id" text NOT NULL, "span_id" text NOT NULL, @@ -124,100 +105,51 @@ CREATE TABLE "duron"."spans_archive" ( "status_message" text, "attributes" jsonb DEFAULT '{}' NOT NULL, "events" jsonb DEFAULT '[]' NOT NULL, - CONSTRAINT "spans_archive_kind_check" CHECK ("kind" IN (0, 1, 2, 3, 4)), - CONSTRAINT "spans_archive_status_code_check" CHECK ("status_code" IN (0, 1, 2)) + CONSTRAINT "spans_kind_check" CHECK ("kind" IN (0, 1, 2, 3, 4)), + CONSTRAINT "spans_status_code_check" CHECK ("status_code" IN (0, 1, 2)) ); --> statement-breakpoint -CREATE INDEX "idx_jobs_active_action_name" ON "duron"."jobs_active" ("action_name"); ---> statement-breakpoint -CREATE INDEX "idx_jobs_active_status" ON "duron"."jobs_active" ("status"); ---> statement-breakpoint -CREATE INDEX "idx_jobs_active_group_key" ON "duron"."jobs_active" ("group_key"); ---> statement-breakpoint -CREATE INDEX "idx_jobs_active_description" ON "duron"."jobs_active" ("description"); ---> statement-breakpoint -CREATE INDEX "idx_jobs_active_started_at" ON "duron"."jobs_active" ("started_at"); ---> statement-breakpoint -CREATE INDEX "idx_jobs_active_expires_at" ON "duron"."jobs_active" ("expires_at"); ---> statement-breakpoint -CREATE INDEX "idx_jobs_active_client_id" ON "duron"."jobs_active" ("client_id"); ---> statement-breakpoint -CREATE INDEX "idx_jobs_active_checksum" ON "duron"."jobs_active" ("checksum"); ---> statement-breakpoint -CREATE INDEX "idx_jobs_active_concurrency_limit" ON "duron"."jobs_active" ("concurrency_limit"); ---> statement-breakpoint -CREATE INDEX "idx_jobs_active_concurrency_step_limit" ON "duron"."jobs_active" ("concurrency_step_limit"); ---> statement-breakpoint -CREATE INDEX "idx_jobs_active_action_status" ON "duron"."jobs_active" ("action_name","status"); ---> statement-breakpoint -CREATE INDEX "idx_jobs_active_action_group" ON "duron"."jobs_active" ("action_name","group_key"); ---> statement-breakpoint -CREATE INDEX "idx_jobs_active_input_fts" ON "duron"."jobs_active" USING gin (to_tsvector('english', "input"::text)); ---> statement-breakpoint -CREATE INDEX "idx_jobs_active_output_fts" ON "duron"."jobs_active" USING gin (to_tsvector('english', "output"::text)); ---> statement-breakpoint -CREATE INDEX "idx_jobs_archive_group_key" ON "duron"."jobs_archive" ("group_key"); ---> statement-breakpoint -CREATE INDEX "idx_jobs_archive_action_name" ON "duron"."jobs_archive" ("action_name"); ---> statement-breakpoint -CREATE INDEX "idx_jobs_archive_finished_at" ON "duron"."jobs_archive" ("finished_at"); ---> statement-breakpoint -CREATE INDEX "idx_jobs_archive_action_group" ON "duron"."jobs_archive" ("action_name","group_key"); ---> statement-breakpoint -CREATE INDEX "idx_jobs_archive_input_fts" ON "duron"."jobs_archive" USING gin (to_tsvector('english', "input"::text)); ---> statement-breakpoint -CREATE INDEX "idx_jobs_archive_output_fts" ON "duron"."jobs_archive" USING gin (to_tsvector('english', "output"::text)); ---> statement-breakpoint -CREATE INDEX "idx_job_steps_active_job_id" ON "duron"."job_steps_active" ("job_id"); ---> statement-breakpoint -CREATE INDEX "idx_job_steps_active_status" ON "duron"."job_steps_active" ("status"); ---> statement-breakpoint -CREATE INDEX "idx_job_steps_active_name" ON "duron"."job_steps_active" ("name"); ---> statement-breakpoint -CREATE INDEX "idx_job_steps_active_expires_at" ON "duron"."job_steps_active" ("expires_at"); ---> statement-breakpoint -CREATE INDEX "idx_job_steps_active_parent_step_id" ON "duron"."job_steps_active" ("parent_step_id"); ---> statement-breakpoint -CREATE INDEX "idx_job_steps_active_job_status" ON "duron"."job_steps_active" ("job_id","status"); ---> statement-breakpoint -CREATE INDEX "idx_job_steps_active_job_name" ON "duron"."job_steps_active" ("job_id","name"); ---> statement-breakpoint -CREATE INDEX "idx_job_steps_archive_job_id" ON "duron"."job_steps_archive" ("job_id"); ---> statement-breakpoint -CREATE INDEX "idx_job_steps_archive_job_finished_at" ON "duron"."job_steps_archive" ("job_finished_at"); ---> statement-breakpoint -CREATE INDEX "idx_job_steps_archive_name" ON "duron"."job_steps_archive" ("name"); ---> statement-breakpoint -CREATE INDEX "idx_spans_active_trace_id" ON "duron"."spans_active" ("trace_id"); ---> statement-breakpoint -CREATE INDEX "idx_spans_active_span_id" ON "duron"."spans_active" ("span_id"); ---> statement-breakpoint -CREATE INDEX "idx_spans_active_job_id" ON "duron"."spans_active" ("job_id"); ---> statement-breakpoint -CREATE INDEX "idx_spans_active_step_id" ON "duron"."spans_active" ("step_id"); ---> statement-breakpoint -CREATE INDEX "idx_spans_active_name" ON "duron"."spans_active" ("name"); ---> statement-breakpoint -CREATE INDEX "idx_spans_active_kind" ON "duron"."spans_active" ("kind"); ---> statement-breakpoint -CREATE INDEX "idx_spans_active_status_code" ON "duron"."spans_active" ("status_code"); ---> statement-breakpoint -CREATE INDEX "idx_spans_active_job_step" ON "duron"."spans_active" ("job_id","step_id"); ---> statement-breakpoint -CREATE INDEX "idx_spans_active_trace_parent" ON "duron"."spans_active" ("trace_id","parent_span_id"); ---> statement-breakpoint -CREATE INDEX "idx_spans_active_attributes" ON "duron"."spans_active" USING gin ("attributes"); ---> statement-breakpoint -CREATE INDEX "idx_spans_active_events" ON "duron"."spans_active" USING gin ("events"); ---> statement-breakpoint -CREATE INDEX "idx_spans_archive_trace_id" ON "duron"."spans_archive" ("trace_id"); ---> statement-breakpoint -CREATE INDEX "idx_spans_archive_job_id" ON "duron"."spans_archive" ("job_id"); ---> statement-breakpoint -CREATE INDEX "idx_spans_archive_step_id" ON "duron"."spans_archive" ("step_id"); ---> statement-breakpoint -ALTER TABLE "duron"."job_steps_active" ADD CONSTRAINT "job_steps_active_job_id_jobs_active_id_fkey" FOREIGN KEY ("job_id") REFERENCES "duron"."jobs_active"("id") ON DELETE CASCADE; ---> statement-breakpoint -ALTER TABLE "duron"."spans_active" ADD CONSTRAINT "spans_active_job_id_jobs_active_id_fkey" FOREIGN KEY ("job_id") REFERENCES "duron"."jobs_active"("id") ON DELETE CASCADE; ---> statement-breakpoint -ALTER TABLE "duron"."spans_active" ADD CONSTRAINT "spans_active_step_id_job_steps_active_id_fkey" FOREIGN KEY ("step_id") REFERENCES "duron"."job_steps_active"("id") ON DELETE CASCADE; +CREATE INDEX "idx_job_steps_active_job_id" ON "duron"."job_steps_active" ("job_id");--> statement-breakpoint +CREATE INDEX "idx_job_steps_active_status" ON "duron"."job_steps_active" ("status");--> statement-breakpoint +CREATE INDEX "idx_job_steps_active_name" ON "duron"."job_steps_active" ("name");--> statement-breakpoint +CREATE INDEX "idx_job_steps_active_expires_at" ON "duron"."job_steps_active" ("expires_at");--> statement-breakpoint +CREATE INDEX "idx_job_steps_active_parent_step_id" ON "duron"."job_steps_active" ("parent_step_id");--> statement-breakpoint +CREATE INDEX "idx_job_steps_active_job_status" ON "duron"."job_steps_active" ("job_id","status");--> statement-breakpoint +CREATE INDEX "idx_job_steps_active_job_name" ON "duron"."job_steps_active" ("job_id","name");--> statement-breakpoint +CREATE INDEX "idx_job_steps_active_output_fts" ON "duron"."job_steps_active" USING gin (to_tsvector('english', "output"::text));--> statement-breakpoint +CREATE INDEX "idx_job_steps_archive_job_id" ON "duron"."job_steps_archive" ("job_id");--> statement-breakpoint +CREATE INDEX "idx_job_steps_archive_job_finished_at" ON "duron"."job_steps_archive" ("job_finished_at");--> statement-breakpoint +CREATE INDEX "idx_job_steps_archive_name" ON "duron"."job_steps_archive" ("name");--> statement-breakpoint +CREATE INDEX "idx_jobs_active_action_name" ON "duron"."jobs_active" ("action_name");--> statement-breakpoint +CREATE INDEX "idx_jobs_active_status" ON "duron"."jobs_active" ("status");--> statement-breakpoint +CREATE INDEX "idx_jobs_active_group_key" ON "duron"."jobs_active" ("group_key");--> statement-breakpoint +CREATE INDEX "idx_jobs_active_description" ON "duron"."jobs_active" ("description");--> statement-breakpoint +CREATE INDEX "idx_jobs_active_started_at" ON "duron"."jobs_active" ("started_at");--> statement-breakpoint +CREATE INDEX "idx_jobs_active_expires_at" ON "duron"."jobs_active" ("expires_at");--> statement-breakpoint +CREATE INDEX "idx_jobs_active_client_id" ON "duron"."jobs_active" ("client_id");--> statement-breakpoint +CREATE INDEX "idx_jobs_active_checksum" ON "duron"."jobs_active" ("checksum");--> statement-breakpoint +CREATE INDEX "idx_jobs_active_concurrency_limit" ON "duron"."jobs_active" ("concurrency_limit");--> statement-breakpoint +CREATE INDEX "idx_jobs_active_concurrency_step_limit" ON "duron"."jobs_active" ("concurrency_step_limit");--> statement-breakpoint +CREATE INDEX "idx_jobs_active_action_status" ON "duron"."jobs_active" ("action_name","status");--> statement-breakpoint +CREATE INDEX "idx_jobs_active_action_group" ON "duron"."jobs_active" ("action_name","group_key");--> statement-breakpoint +CREATE INDEX "idx_jobs_active_input_fts" ON "duron"."jobs_active" USING gin (to_tsvector('english', "input"::text));--> statement-breakpoint +CREATE INDEX "idx_jobs_active_output_fts" ON "duron"."jobs_active" USING gin (to_tsvector('english', "output"::text));--> statement-breakpoint +CREATE INDEX "idx_jobs_archive_group_key" ON "duron"."jobs_archive" ("group_key");--> statement-breakpoint +CREATE INDEX "idx_jobs_archive_action_name" ON "duron"."jobs_archive" ("action_name");--> statement-breakpoint +CREATE INDEX "idx_jobs_archive_finished_at" ON "duron"."jobs_archive" ("finished_at");--> statement-breakpoint +CREATE INDEX "idx_jobs_archive_action_group" ON "duron"."jobs_archive" ("action_name","group_key");--> statement-breakpoint +CREATE INDEX "idx_jobs_archive_input_fts" ON "duron"."jobs_archive" USING gin (to_tsvector('english', "input"::text));--> statement-breakpoint +CREATE INDEX "idx_jobs_archive_output_fts" ON "duron"."jobs_archive" USING gin (to_tsvector('english', "output"::text));--> statement-breakpoint +CREATE INDEX "idx_spans_trace_id" ON "duron"."spans" ("trace_id");--> statement-breakpoint +CREATE INDEX "idx_spans_span_id" ON "duron"."spans" ("span_id");--> statement-breakpoint +CREATE INDEX "idx_spans_job_id" ON "duron"."spans" ("job_id");--> statement-breakpoint +CREATE INDEX "idx_spans_step_id" ON "duron"."spans" ("step_id");--> statement-breakpoint +CREATE INDEX "idx_spans_name" ON "duron"."spans" ("name");--> statement-breakpoint +CREATE INDEX "idx_spans_kind" ON "duron"."spans" ("kind");--> statement-breakpoint +CREATE INDEX "idx_spans_status_code" ON "duron"."spans" ("status_code");--> statement-breakpoint +CREATE INDEX "idx_spans_job_step" ON "duron"."spans" ("job_id","step_id");--> statement-breakpoint +CREATE INDEX "idx_spans_trace_parent" ON "duron"."spans" ("trace_id","parent_span_id");--> statement-breakpoint +CREATE INDEX "idx_spans_attributes" ON "duron"."spans" USING gin ("attributes");--> statement-breakpoint +CREATE INDEX "idx_spans_events" ON "duron"."spans" USING gin ("events");--> statement-breakpoint +ALTER TABLE "duron"."job_steps_active" ADD CONSTRAINT "job_steps_active_job_id_jobs_active_id_fkey" FOREIGN KEY ("job_id") REFERENCES "duron"."jobs_active"("id") ON DELETE CASCADE;--> statement-breakpoint +ALTER TABLE "duron"."job_steps_archive" ADD CONSTRAINT "job_steps_archive_job_id_jobs_archive_id_fkey" FOREIGN KEY ("job_id") REFERENCES "duron"."jobs_archive"("id") ON DELETE CASCADE; \ No newline at end of file diff --git a/packages/duron/migrations/postgres/20260121160012_normal_bloodstrike/snapshot.json b/packages/duron/migrations/postgres/20260421153337_large_nitro/snapshot.json similarity index 61% rename from packages/duron/migrations/postgres/20260121160012_normal_bloodstrike/snapshot.json rename to packages/duron/migrations/postgres/20260421153337_large_nitro/snapshot.json index fe98349..9e9cacd 100644 --- a/packages/duron/migrations/postgres/20260121160012_normal_bloodstrike/snapshot.json +++ b/packages/duron/migrations/postgres/20260421153337_large_nitro/snapshot.json @@ -1,7 +1,7 @@ { "version": "8", "dialect": "postgres", - "id": "47ec47b1-f323-4e7e-a4bc-605f703aa384", + "id": "dd03307a-a6a5-4223-aa91-fbf5ad99469c", "prevIds": ["00000000-0000-0000-0000-000000000000"], "ddl": [ { @@ -10,13 +10,25 @@ }, { "isRlsEnabled": false, - "name": "job_steps", + "name": "job_steps_active", "entityType": "tables", "schema": "duron" }, { "isRlsEnabled": false, - "name": "jobs", + "name": "job_steps_archive", + "entityType": "tables", + "schema": "duron" + }, + { + "isRlsEnabled": false, + "name": "jobs_active", + "entityType": "tables", + "schema": "duron" + }, + { + "isRlsEnabled": false, + "name": "jobs_archive", "entityType": "tables", "schema": "duron" }, @@ -37,7 +49,241 @@ "name": "id", "entityType": "columns", "schema": "duron", - "table": "job_steps" + "table": "job_steps_active" + }, + { + "type": "uuid", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": null, + "generated": null, + "identity": null, + "name": "job_id", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_active" + }, + { + "type": "uuid", + "typeSchema": null, + "notNull": false, + "dimensions": 0, + "default": null, + "generated": null, + "identity": null, + "name": "parent_step_id", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_active" + }, + { + "type": "boolean", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": "false", + "generated": null, + "identity": null, + "name": "branch", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_active" + }, + { + "type": "text", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": null, + "generated": null, + "identity": null, + "name": "name", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_active" + }, + { + "type": "text", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": "'active'", + "generated": null, + "identity": null, + "name": "status", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_active" + }, + { + "type": "jsonb", + "typeSchema": null, + "notNull": false, + "dimensions": 0, + "default": null, + "generated": null, + "identity": null, + "name": "output", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_active" + }, + { + "type": "jsonb", + "typeSchema": null, + "notNull": false, + "dimensions": 0, + "default": null, + "generated": null, + "identity": null, + "name": "error", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_active" + }, + { + "type": "timestamp with time zone", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": "now()", + "generated": null, + "identity": null, + "name": "started_at", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_active" + }, + { + "type": "timestamp with time zone", + "typeSchema": null, + "notNull": false, + "dimensions": 0, + "default": null, + "generated": null, + "identity": null, + "name": "finished_at", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_active" + }, + { + "type": "integer", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": null, + "generated": null, + "identity": null, + "name": "timeout_ms", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_active" + }, + { + "type": "timestamp with time zone", + "typeSchema": null, + "notNull": false, + "dimensions": 0, + "default": null, + "generated": null, + "identity": null, + "name": "expires_at", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_active" + }, + { + "type": "integer", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": "0", + "generated": null, + "identity": null, + "name": "retries_limit", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_active" + }, + { + "type": "integer", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": "0", + "generated": null, + "identity": null, + "name": "retries_count", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_active" + }, + { + "type": "integer", + "typeSchema": null, + "notNull": false, + "dimensions": 0, + "default": null, + "generated": null, + "identity": null, + "name": "delayed_ms", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_active" + }, + { + "type": "jsonb", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": "'{}'", + "generated": null, + "identity": null, + "name": "history_failed_attempts", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_active" + }, + { + "type": "timestamp with time zone", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": "now()", + "generated": null, + "identity": null, + "name": "created_at", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_active" + }, + { + "type": "timestamp with time zone", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": "now()", + "generated": null, + "identity": null, + "name": "updated_at", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_active" + }, + { + "type": "uuid", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": null, + "generated": null, + "identity": null, + "name": "id", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_archive" }, { "type": "uuid", @@ -50,7 +296,7 @@ "name": "job_id", "entityType": "columns", "schema": "duron", - "table": "job_steps" + "table": "job_steps_archive" }, { "type": "uuid", @@ -63,20 +309,280 @@ "name": "parent_step_id", "entityType": "columns", "schema": "duron", - "table": "job_steps" + "table": "job_steps_archive" }, { "type": "boolean", "typeSchema": null, "notNull": true, "dimensions": 0, - "default": "false", + "default": "false", + "generated": null, + "identity": null, + "name": "branch", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_archive" + }, + { + "type": "text", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": null, + "generated": null, + "identity": null, + "name": "name", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_archive" + }, + { + "type": "text", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": "'active'", + "generated": null, + "identity": null, + "name": "status", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_archive" + }, + { + "type": "jsonb", + "typeSchema": null, + "notNull": false, + "dimensions": 0, + "default": null, + "generated": null, + "identity": null, + "name": "output", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_archive" + }, + { + "type": "jsonb", + "typeSchema": null, + "notNull": false, + "dimensions": 0, + "default": null, + "generated": null, + "identity": null, + "name": "error", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_archive" + }, + { + "type": "timestamp with time zone", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": "now()", + "generated": null, + "identity": null, + "name": "started_at", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_archive" + }, + { + "type": "timestamp with time zone", + "typeSchema": null, + "notNull": false, + "dimensions": 0, + "default": null, + "generated": null, + "identity": null, + "name": "finished_at", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_archive" + }, + { + "type": "integer", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": null, + "generated": null, + "identity": null, + "name": "timeout_ms", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_archive" + }, + { + "type": "timestamp with time zone", + "typeSchema": null, + "notNull": false, + "dimensions": 0, + "default": null, + "generated": null, + "identity": null, + "name": "expires_at", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_archive" + }, + { + "type": "integer", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": "0", + "generated": null, + "identity": null, + "name": "retries_limit", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_archive" + }, + { + "type": "integer", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": "0", + "generated": null, + "identity": null, + "name": "retries_count", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_archive" + }, + { + "type": "integer", + "typeSchema": null, + "notNull": false, + "dimensions": 0, + "default": null, + "generated": null, + "identity": null, + "name": "delayed_ms", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_archive" + }, + { + "type": "jsonb", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": "'{}'", + "generated": null, + "identity": null, + "name": "history_failed_attempts", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_archive" + }, + { + "type": "timestamp with time zone", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": "now()", + "generated": null, + "identity": null, + "name": "created_at", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_archive" + }, + { + "type": "timestamp with time zone", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": "now()", + "generated": null, + "identity": null, + "name": "updated_at", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_archive" + }, + { + "type": "timestamp with time zone", + "typeSchema": null, + "notNull": false, + "dimensions": 0, + "default": null, + "generated": null, + "identity": null, + "name": "job_finished_at", + "entityType": "columns", + "schema": "duron", + "table": "job_steps_archive" + }, + { + "type": "uuid", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": "gen_random_uuid()", + "generated": null, + "identity": null, + "name": "id", + "entityType": "columns", + "schema": "duron", + "table": "jobs_active" + }, + { + "type": "text", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": null, + "generated": null, + "identity": null, + "name": "action_name", + "entityType": "columns", + "schema": "duron", + "table": "jobs_active" + }, + { + "type": "text", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": null, + "generated": null, + "identity": null, + "name": "group_key", + "entityType": "columns", + "schema": "duron", + "table": "jobs_active" + }, + { + "type": "text", + "typeSchema": null, + "notNull": false, + "dimensions": 0, + "default": null, + "generated": null, + "identity": null, + "name": "description", + "entityType": "columns", + "schema": "duron", + "table": "jobs_active" + }, + { + "type": "text", + "typeSchema": null, + "notNull": true, + "dimensions": 0, + "default": "'created'", "generated": null, "identity": null, - "name": "branch", + "name": "status", "entityType": "columns", "schema": "duron", - "table": "job_steps" + "table": "jobs_active" }, { "type": "text", @@ -86,23 +592,23 @@ "default": null, "generated": null, "identity": null, - "name": "name", + "name": "checksum", "entityType": "columns", "schema": "duron", - "table": "job_steps" + "table": "jobs_active" }, { - "type": "text", + "type": "jsonb", "typeSchema": null, "notNull": true, "dimensions": 0, - "default": "'active'", + "default": "'{}'", "generated": null, "identity": null, - "name": "status", + "name": "input", "entityType": "columns", "schema": "duron", - "table": "job_steps" + "table": "jobs_active" }, { "type": "jsonb", @@ -115,7 +621,7 @@ "name": "output", "entityType": "columns", "schema": "duron", - "table": "job_steps" + "table": "jobs_active" }, { "type": "jsonb", @@ -128,20 +634,20 @@ "name": "error", "entityType": "columns", "schema": "duron", - "table": "job_steps" + "table": "jobs_active" }, { - "type": "timestamp with time zone", + "type": "integer", "typeSchema": null, "notNull": true, "dimensions": 0, - "default": "now()", + "default": null, "generated": null, "identity": null, - "name": "started_at", + "name": "timeout_ms", "entityType": "columns", "schema": "duron", - "table": "job_steps" + "table": "jobs_active" }, { "type": "timestamp with time zone", @@ -151,23 +657,23 @@ "default": null, "generated": null, "identity": null, - "name": "finished_at", + "name": "expires_at", "entityType": "columns", "schema": "duron", - "table": "job_steps" + "table": "jobs_active" }, { - "type": "integer", + "type": "timestamp with time zone", "typeSchema": null, - "notNull": true, + "notNull": false, "dimensions": 0, "default": null, "generated": null, "identity": null, - "name": "timeout_ms", + "name": "started_at", "entityType": "columns", "schema": "duron", - "table": "job_steps" + "table": "jobs_active" }, { "type": "timestamp with time zone", @@ -177,62 +683,49 @@ "default": null, "generated": null, "identity": null, - "name": "expires_at", + "name": "finished_at", "entityType": "columns", "schema": "duron", - "table": "job_steps" + "table": "jobs_active" }, { - "type": "integer", + "type": "text", "typeSchema": null, - "notNull": true, + "notNull": false, "dimensions": 0, - "default": "0", + "default": null, "generated": null, "identity": null, - "name": "retries_limit", + "name": "client_id", "entityType": "columns", "schema": "duron", - "table": "job_steps" + "table": "jobs_active" }, { "type": "integer", "typeSchema": null, "notNull": true, "dimensions": 0, - "default": "0", - "generated": null, - "identity": null, - "name": "retries_count", - "entityType": "columns", - "schema": "duron", - "table": "job_steps" - }, - { - "type": "integer", - "typeSchema": null, - "notNull": false, - "dimensions": 0, "default": null, "generated": null, "identity": null, - "name": "delayed_ms", + "name": "concurrency_limit", "entityType": "columns", "schema": "duron", - "table": "job_steps" + "table": "jobs_active" }, { - "type": "jsonb", + "type": "integer", "typeSchema": null, "notNull": true, "dimensions": 0, - "default": "'{}'", + "default": null, "generated": null, "identity": null, - "name": "history_failed_attempts", + "name": "concurrency_step_limit", "entityType": "columns", "schema": "duron", - "table": "job_steps" + "table": "jobs_active" }, { "type": "timestamp with time zone", @@ -245,7 +738,7 @@ "name": "created_at", "entityType": "columns", "schema": "duron", - "table": "job_steps" + "table": "jobs_active" }, { "type": "timestamp with time zone", @@ -258,20 +751,20 @@ "name": "updated_at", "entityType": "columns", "schema": "duron", - "table": "job_steps" + "table": "jobs_active" }, { "type": "uuid", "typeSchema": null, "notNull": true, "dimensions": 0, - "default": "gen_random_uuid()", + "default": null, "generated": null, "identity": null, "name": "id", "entityType": "columns", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "type": "text", @@ -284,7 +777,7 @@ "name": "action_name", "entityType": "columns", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "type": "text", @@ -297,7 +790,7 @@ "name": "group_key", "entityType": "columns", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "type": "text", @@ -310,20 +803,20 @@ "name": "description", "entityType": "columns", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "type": "text", "typeSchema": null, "notNull": true, "dimensions": 0, - "default": "'created'", + "default": null, "generated": null, "identity": null, "name": "status", "entityType": "columns", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "type": "text", @@ -336,7 +829,7 @@ "name": "checksum", "entityType": "columns", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "type": "jsonb", @@ -349,7 +842,7 @@ "name": "input", "entityType": "columns", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "type": "jsonb", @@ -362,7 +855,7 @@ "name": "output", "entityType": "columns", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "type": "jsonb", @@ -375,7 +868,7 @@ "name": "error", "entityType": "columns", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "type": "integer", @@ -388,7 +881,7 @@ "name": "timeout_ms", "entityType": "columns", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "type": "timestamp with time zone", @@ -401,7 +894,7 @@ "name": "expires_at", "entityType": "columns", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "type": "timestamp with time zone", @@ -414,7 +907,7 @@ "name": "started_at", "entityType": "columns", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "type": "timestamp with time zone", @@ -427,7 +920,7 @@ "name": "finished_at", "entityType": "columns", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "type": "text", @@ -440,7 +933,7 @@ "name": "client_id", "entityType": "columns", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "type": "integer", @@ -453,7 +946,7 @@ "name": "concurrency_limit", "entityType": "columns", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "type": "integer", @@ -466,7 +959,7 @@ "name": "concurrency_step_limit", "entityType": "columns", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "type": "timestamp with time zone", @@ -479,7 +972,7 @@ "name": "created_at", "entityType": "columns", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "type": "timestamp with time zone", @@ -492,7 +985,7 @@ "name": "updated_at", "entityType": "columns", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "type": "bigserial", @@ -692,10 +1185,10 @@ "with": "", "method": "btree", "concurrently": false, - "name": "idx_job_steps_job_id", + "name": "idx_job_steps_active_job_id", "entityType": "indexes", "schema": "duron", - "table": "job_steps" + "table": "job_steps_active" }, { "nameExplicit": true, @@ -713,10 +1206,10 @@ "with": "", "method": "btree", "concurrently": false, - "name": "idx_job_steps_status", + "name": "idx_job_steps_active_status", "entityType": "indexes", "schema": "duron", - "table": "job_steps" + "table": "job_steps_active" }, { "nameExplicit": true, @@ -734,10 +1227,10 @@ "with": "", "method": "btree", "concurrently": false, - "name": "idx_job_steps_name", + "name": "idx_job_steps_active_name", "entityType": "indexes", "schema": "duron", - "table": "job_steps" + "table": "job_steps_active" }, { "nameExplicit": true, @@ -755,10 +1248,10 @@ "with": "", "method": "btree", "concurrently": false, - "name": "idx_job_steps_expires_at", + "name": "idx_job_steps_active_expires_at", "entityType": "indexes", "schema": "duron", - "table": "job_steps" + "table": "job_steps_active" }, { "nameExplicit": true, @@ -776,10 +1269,10 @@ "with": "", "method": "btree", "concurrently": false, - "name": "idx_job_steps_parent_step_id", + "name": "idx_job_steps_active_parent_step_id", "entityType": "indexes", "schema": "duron", - "table": "job_steps" + "table": "job_steps_active" }, { "nameExplicit": true, @@ -804,10 +1297,10 @@ "with": "", "method": "btree", "concurrently": false, - "name": "idx_job_steps_job_status", + "name": "idx_job_steps_active_job_status", "entityType": "indexes", "schema": "duron", - "table": "job_steps" + "table": "job_steps_active" }, { "nameExplicit": true, @@ -832,10 +1325,10 @@ "with": "", "method": "btree", "concurrently": false, - "name": "idx_job_steps_job_name", + "name": "idx_job_steps_active_job_name", "entityType": "indexes", "schema": "duron", - "table": "job_steps" + "table": "job_steps_active" }, { "nameExplicit": true, @@ -853,16 +1346,16 @@ "with": "", "method": "gin", "concurrently": false, - "name": "idx_job_steps_output_fts", + "name": "idx_job_steps_active_output_fts", "entityType": "indexes", "schema": "duron", - "table": "job_steps" + "table": "job_steps_active" }, { "nameExplicit": true, "columns": [ { - "value": "action_name", + "value": "job_id", "isExpression": false, "asc": true, "nullsFirst": false, @@ -874,16 +1367,16 @@ "with": "", "method": "btree", "concurrently": false, - "name": "idx_jobs_action_name", + "name": "idx_job_steps_archive_job_id", "entityType": "indexes", "schema": "duron", - "table": "jobs" + "table": "job_steps_archive" }, { "nameExplicit": true, "columns": [ { - "value": "status", + "value": "job_finished_at", "isExpression": false, "asc": true, "nullsFirst": false, @@ -895,16 +1388,16 @@ "with": "", "method": "btree", "concurrently": false, - "name": "idx_jobs_status", + "name": "idx_job_steps_archive_job_finished_at", "entityType": "indexes", "schema": "duron", - "table": "jobs" + "table": "job_steps_archive" }, { "nameExplicit": true, "columns": [ { - "value": "group_key", + "value": "name", "isExpression": false, "asc": true, "nullsFirst": false, @@ -916,16 +1409,16 @@ "with": "", "method": "btree", "concurrently": false, - "name": "idx_jobs_group_key", + "name": "idx_job_steps_archive_name", "entityType": "indexes", "schema": "duron", - "table": "jobs" + "table": "job_steps_archive" }, { "nameExplicit": true, "columns": [ { - "value": "description", + "value": "action_name", "isExpression": false, "asc": true, "nullsFirst": false, @@ -937,16 +1430,16 @@ "with": "", "method": "btree", "concurrently": false, - "name": "idx_jobs_description", + "name": "idx_jobs_active_action_name", "entityType": "indexes", "schema": "duron", - "table": "jobs" + "table": "jobs_active" }, { "nameExplicit": true, "columns": [ { - "value": "started_at", + "value": "status", "isExpression": false, "asc": true, "nullsFirst": false, @@ -958,16 +1451,58 @@ "with": "", "method": "btree", "concurrently": false, - "name": "idx_jobs_started_at", + "name": "idx_jobs_active_status", "entityType": "indexes", "schema": "duron", - "table": "jobs" + "table": "jobs_active" }, { "nameExplicit": true, "columns": [ { - "value": "finished_at", + "value": "group_key", + "isExpression": false, + "asc": true, + "nullsFirst": false, + "opclass": null + } + ], + "isUnique": false, + "where": null, + "with": "", + "method": "btree", + "concurrently": false, + "name": "idx_jobs_active_group_key", + "entityType": "indexes", + "schema": "duron", + "table": "jobs_active" + }, + { + "nameExplicit": true, + "columns": [ + { + "value": "description", + "isExpression": false, + "asc": true, + "nullsFirst": false, + "opclass": null + } + ], + "isUnique": false, + "where": null, + "with": "", + "method": "btree", + "concurrently": false, + "name": "idx_jobs_active_description", + "entityType": "indexes", + "schema": "duron", + "table": "jobs_active" + }, + { + "nameExplicit": true, + "columns": [ + { + "value": "started_at", "isExpression": false, "asc": true, "nullsFirst": false, @@ -979,10 +1514,10 @@ "with": "", "method": "btree", "concurrently": false, - "name": "idx_jobs_finished_at", + "name": "idx_jobs_active_started_at", "entityType": "indexes", "schema": "duron", - "table": "jobs" + "table": "jobs_active" }, { "nameExplicit": true, @@ -1000,10 +1535,10 @@ "with": "", "method": "btree", "concurrently": false, - "name": "idx_jobs_expires_at", + "name": "idx_jobs_active_expires_at", "entityType": "indexes", "schema": "duron", - "table": "jobs" + "table": "jobs_active" }, { "nameExplicit": true, @@ -1021,10 +1556,10 @@ "with": "", "method": "btree", "concurrently": false, - "name": "idx_jobs_client_id", + "name": "idx_jobs_active_client_id", "entityType": "indexes", "schema": "duron", - "table": "jobs" + "table": "jobs_active" }, { "nameExplicit": true, @@ -1042,10 +1577,10 @@ "with": "", "method": "btree", "concurrently": false, - "name": "idx_jobs_checksum", + "name": "idx_jobs_active_checksum", "entityType": "indexes", "schema": "duron", - "table": "jobs" + "table": "jobs_active" }, { "nameExplicit": true, @@ -1063,10 +1598,10 @@ "with": "", "method": "btree", "concurrently": false, - "name": "idx_jobs_concurrency_limit", + "name": "idx_jobs_active_concurrency_limit", "entityType": "indexes", "schema": "duron", - "table": "jobs" + "table": "jobs_active" }, { "nameExplicit": true, @@ -1084,10 +1619,10 @@ "with": "", "method": "btree", "concurrently": false, - "name": "idx_jobs_concurrency_step_limit", + "name": "idx_jobs_active_concurrency_step_limit", "entityType": "indexes", "schema": "duron", - "table": "jobs" + "table": "jobs_active" }, { "nameExplicit": true, @@ -1112,10 +1647,143 @@ "with": "", "method": "btree", "concurrently": false, - "name": "idx_jobs_action_status", + "name": "idx_jobs_active_action_status", + "entityType": "indexes", + "schema": "duron", + "table": "jobs_active" + }, + { + "nameExplicit": true, + "columns": [ + { + "value": "action_name", + "isExpression": false, + "asc": true, + "nullsFirst": false, + "opclass": null + }, + { + "value": "group_key", + "isExpression": false, + "asc": true, + "nullsFirst": false, + "opclass": null + } + ], + "isUnique": false, + "where": null, + "with": "", + "method": "btree", + "concurrently": false, + "name": "idx_jobs_active_action_group", + "entityType": "indexes", + "schema": "duron", + "table": "jobs_active" + }, + { + "nameExplicit": true, + "columns": [ + { + "value": "to_tsvector('english', \"input\"::text)", + "isExpression": true, + "asc": true, + "nullsFirst": false, + "opclass": null + } + ], + "isUnique": false, + "where": null, + "with": "", + "method": "gin", + "concurrently": false, + "name": "idx_jobs_active_input_fts", + "entityType": "indexes", + "schema": "duron", + "table": "jobs_active" + }, + { + "nameExplicit": true, + "columns": [ + { + "value": "to_tsvector('english', \"output\"::text)", + "isExpression": true, + "asc": true, + "nullsFirst": false, + "opclass": null + } + ], + "isUnique": false, + "where": null, + "with": "", + "method": "gin", + "concurrently": false, + "name": "idx_jobs_active_output_fts", + "entityType": "indexes", + "schema": "duron", + "table": "jobs_active" + }, + { + "nameExplicit": true, + "columns": [ + { + "value": "group_key", + "isExpression": false, + "asc": true, + "nullsFirst": false, + "opclass": null + } + ], + "isUnique": false, + "where": null, + "with": "", + "method": "btree", + "concurrently": false, + "name": "idx_jobs_archive_group_key", + "entityType": "indexes", + "schema": "duron", + "table": "jobs_archive" + }, + { + "nameExplicit": true, + "columns": [ + { + "value": "action_name", + "isExpression": false, + "asc": true, + "nullsFirst": false, + "opclass": null + } + ], + "isUnique": false, + "where": null, + "with": "", + "method": "btree", + "concurrently": false, + "name": "idx_jobs_archive_action_name", + "entityType": "indexes", + "schema": "duron", + "table": "jobs_archive" + }, + { + "nameExplicit": true, + "columns": [ + { + "value": "finished_at", + "isExpression": false, + "asc": true, + "nullsFirst": false, + "opclass": null + } + ], + "isUnique": false, + "where": null, + "with": "", + "method": "btree", + "concurrently": false, + "name": "idx_jobs_archive_finished_at", "entityType": "indexes", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "nameExplicit": true, @@ -1140,10 +1808,10 @@ "with": "", "method": "btree", "concurrently": false, - "name": "idx_jobs_action_group", + "name": "idx_jobs_archive_action_group", "entityType": "indexes", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "nameExplicit": true, @@ -1161,10 +1829,10 @@ "with": "", "method": "gin", "concurrently": false, - "name": "idx_jobs_input_fts", + "name": "idx_jobs_archive_input_fts", "entityType": "indexes", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "nameExplicit": true, @@ -1182,10 +1850,10 @@ "with": "", "method": "gin", "concurrently": false, - "name": "idx_jobs_output_fts", + "name": "idx_jobs_archive_output_fts", "entityType": "indexes", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "nameExplicit": true, @@ -1436,55 +2104,58 @@ "nameExplicit": false, "columns": ["job_id"], "schemaTo": "duron", - "tableTo": "jobs", + "tableTo": "jobs_active", "columnsTo": ["id"], "onUpdate": "NO ACTION", "onDelete": "CASCADE", - "name": "job_steps_job_id_jobs_id_fkey", + "name": "job_steps_active_job_id_jobs_active_id_fkey", "entityType": "fks", "schema": "duron", - "table": "job_steps" + "table": "job_steps_active" }, { "nameExplicit": false, "columns": ["job_id"], "schemaTo": "duron", - "tableTo": "jobs", + "tableTo": "jobs_archive", "columnsTo": ["id"], "onUpdate": "NO ACTION", "onDelete": "CASCADE", - "name": "spans_job_id_jobs_id_fkey", + "name": "job_steps_archive_job_id_jobs_archive_id_fkey", "entityType": "fks", "schema": "duron", - "table": "spans" + "table": "job_steps_archive" }, { + "columns": ["id"], "nameExplicit": false, - "columns": ["step_id"], - "schemaTo": "duron", - "tableTo": "job_steps", - "columnsTo": ["id"], - "onUpdate": "NO ACTION", - "onDelete": "CASCADE", - "name": "spans_step_id_job_steps_id_fkey", - "entityType": "fks", + "name": "job_steps_active_pkey", "schema": "duron", - "table": "spans" + "table": "job_steps_active", + "entityType": "pks" }, { "columns": ["id"], "nameExplicit": false, - "name": "job_steps_pkey", + "name": "job_steps_archive_pkey", "schema": "duron", - "table": "job_steps", + "table": "job_steps_archive", "entityType": "pks" }, { "columns": ["id"], "nameExplicit": false, - "name": "jobs_pkey", + "name": "jobs_active_pkey", "schema": "duron", - "table": "jobs", + "table": "jobs_active", + "entityType": "pks" + }, + { + "columns": ["id"], + "nameExplicit": false, + "name": "jobs_archive_pkey", + "schema": "duron", + "table": "jobs_archive", "entityType": "pks" }, { @@ -1499,24 +2170,38 @@ "nameExplicit": true, "columns": ["job_id", "name", "parent_step_id"], "nullsNotDistinct": true, - "name": "unique_job_step_name_parent", + "name": "unique_job_step_active_name_parent", "entityType": "uniques", "schema": "duron", - "table": "job_steps" + "table": "job_steps_active" + }, + { + "value": "\"status\" IN ('active','completed','failed','cancelled')", + "name": "job_steps_active_status_check", + "entityType": "checks", + "schema": "duron", + "table": "job_steps_active" }, { "value": "\"status\" IN ('active','completed','failed','cancelled')", - "name": "job_steps_status_check", + "name": "job_steps_archive_status_check", + "entityType": "checks", + "schema": "duron", + "table": "job_steps_archive" + }, + { + "value": "\"status\" IN ('created','active','completed','failed','cancelled')", + "name": "jobs_active_status_check", "entityType": "checks", "schema": "duron", - "table": "job_steps" + "table": "jobs_active" }, { "value": "\"status\" IN ('created','active','completed','failed','cancelled')", - "name": "jobs_status_check", + "name": "jobs_archive_status_check", "entityType": "checks", "schema": "duron", - "table": "jobs" + "table": "jobs_archive" }, { "value": "\"kind\" IN (0, 1, 2, 3, 4)", diff --git a/packages/duron/src/adapters/adapter.ts b/packages/duron/src/adapters/adapter.ts index 528e97b..d2b0c52 100644 --- a/packages/duron/src/adapters/adapter.ts +++ b/packages/duron/src/adapters/adapter.ts @@ -14,6 +14,7 @@ import { type StepStatus, } from '../constants.js' import type { + ArchiveStats, CancelJobOptions, CancelJobStepOptions, CompleteJobOptions, @@ -41,12 +42,12 @@ import type { JobStep, JobStepStatusResult, PruneArchiveOptions, - ArchiveStats, RecoverJobsOptions, RetryJobOptions, TimeTravelJobOptions, } from './schemas.js' import { + ArchiveStatsSchema, BooleanResultSchema, CancelJobOptionsSchema, CancelJobStepOptionsSchema, @@ -74,7 +75,6 @@ import { JobSchema, JobStatusResultSchema, JobStepSchema, - ArchiveStatsSchema, JobStepStatusResultSchema, JobsArrayResultSchema, NumberResultSchema, diff --git a/packages/duron/src/adapters/postgres/base.ts b/packages/duron/src/adapters/postgres/base.ts index 8371cb6..2d7db54 100644 --- a/packages/duron/src/adapters/postgres/base.ts +++ b/packages/duron/src/adapters/postgres/base.ts @@ -14,6 +14,7 @@ import { } from '../../constants.js' import { Adapter, + type ArchiveStats, type CancelJobOptions, type CancelJobStepOptions, type CompleteJobOptions, @@ -37,10 +38,10 @@ import { type GetSpansResult, type InsertSpanOptions, type Job, - type JobSort, type JobStatusResult, type JobStep, type JobStepStatusResult, + type PruneArchiveOptions, type RecoverJobsOptions, type RetryJobOptions, type SpanSort, @@ -55,11 +56,19 @@ export type { Job, JobStep } from '../adapter.js' type DrizzleDatabase = PgAsyncDatabase +export interface PruneSchedulerConfig { + olderThan: string | Date | number + intervalMs: number + batchSize?: number + maxBatches?: number +} + export interface AdapterOptions { connection: Connection schema?: string migrateOnStart?: boolean migrationsFolder?: string + pruneArchive?: PruneSchedulerConfig } export class PostgresBaseAdapter extends Adapter { @@ -69,6 +78,11 @@ export class PostgresBaseAdapter e protected schema: string = 'duron' protected migrateOnStart: boolean = true + // Scheduler state + private pruneTimer: ReturnType | null = null + private pruneConfig: PruneSchedulerConfig | null = null + private lastPrunedAt: Date | null = null + // ============================================================================ // Constructor // ============================================================================ @@ -84,6 +98,7 @@ export class PostgresBaseAdapter e this.connection = options.connection this.schema = options.schema ?? 'duron' this.migrateOnStart = options.migrateOnStart ?? true + this.pruneConfig = options.pruneArchive ?? null this.tables = createSchema(this.schema) @@ -126,10 +141,80 @@ export class PostgresBaseAdapter e this.emit('job-available', { jobId }) } }) + + // Start archive prune scheduler if configured + this._startScheduler() } protected async _stop() { - // do nothing + this._stopScheduler() + } + + // ============================================================================ + // Scheduler Methods + // ============================================================================ + + /** + * Generate a consistent advisory lock key from the schema name. + */ + private _advisoryLockKey(): number { + let hash = 0 + for (let i = 0; i < this.schema.length; i++) { + hash = (hash << 5) - hash + this.schema.charCodeAt(i) + hash |= 0 + } + return Math.abs(hash) + } + + /** + * Start the archive prune scheduler. + */ + private _startScheduler(): void { + const config = this.pruneConfig + if (!config) return + + const run = async () => { + try { + // Try to acquire advisory lock + const lockResult = await this.db.execute<{ pg_try_advisory_lock: boolean }>( + sql`SELECT pg_try_advisory_lock(${this._advisoryLockKey()})`, + ) + + if (!lockResult[0]?.pg_try_advisory_lock) { + this.logger?.debug('Another process holds the prune lock, skipping') + return + } + + try { + this.logger?.info('Running scheduled archive prune') + const deleted = await this._pruneArchive({ + olderThan: config.olderThan, + batchSize: config.batchSize, + maxBatches: config.maxBatches, + }) + this.lastPrunedAt = new Date() + this.logger?.info({ deletedJobs: deleted }, 'Archive prune completed') + } finally { + await this.db.execute(sql`SELECT pg_advisory_unlock(${this._advisoryLockKey()})`) + } + } catch (error) { + this.logger?.error(error, 'Error in prune scheduler') + } + } + + // Run immediately on start, then on interval + run().catch((err) => this.logger?.error(err, 'Initial prune run failed')) + this.pruneTimer = setInterval(run, config.intervalMs) + } + + /** + * Stop the archive prune scheduler. + */ + private _stopScheduler(): void { + if (this.pruneTimer) { + clearInterval(this.pruneTimer) + this.pruneTimer = null + } } // ============================================================================ @@ -180,9 +265,12 @@ export class PostgresBaseAdapter e */ protected async _completeJob({ jobId, output }: CompleteJobOptions) { return this.db.transaction(async (tx) => { - // 1. Delete job from active and get its data - const movedJob = await tx - .delete(this.tables.jobsActiveTable) + const finishedAt = new Date() + + // 1. Check job exists and meets conditions before archiving + const [job] = await tx + .select() + .from(this.tables.jobsActiveTable) .where( and( eq(this.tables.jobsActiveTable.id, jobId), @@ -191,49 +279,39 @@ export class PostgresBaseAdapter e gt(this.tables.jobsActiveTable.expires_at, sql`now()`), ), ) - .returning() - if (movedJob.length === 0) { + if (!job) { return false } - const job = movedJob[0]! - - // 2. Delete steps from active - const movedSteps = await tx - .delete(this.tables.jobStepsActiveTable) - .where(eq(this.tables.jobStepsActiveTable.job_id, jobId)) - .returning() - - // 3. Delete spans from active - const movedSpans = await tx - .delete(this.tables.spansActiveTable) - .where(eq(this.tables.spansActiveTable.job_id, jobId)) - .returning() - - // 4. Insert job into archive + // 2. Insert job into archive FIRST (required for FK constraints) await tx.insert(this.tables.jobsArchiveTable).values({ ...job, status: JOB_STATUS_COMPLETED, output, - finished_at: new Date(), - updated_at: new Date(), + finished_at: finishedAt, + updated_at: finishedAt, }) - // 5. Insert steps into archive - if (movedSteps.length > 0) { - await tx.insert(this.tables.jobStepsArchiveTable).values( - movedSteps.map((step) => ({ - ...step, - job_finished_at: job.finished_at, - })), + // 3. Archive steps using INSERT ... SELECT (SQL-native, no JS round-trip) + await tx.execute(sql` + INSERT INTO ${this.tables.jobStepsArchiveTable} ( + id, job_id, parent_step_id, branch, name, status, output, error, + started_at, finished_at, timeout_ms, expires_at, retries_limit, + retries_count, delayed_ms, history_failed_attempts, created_at, + updated_at, job_finished_at ) - } + SELECT + id, job_id, parent_step_id, branch, name, status, output, error, + started_at, finished_at, timeout_ms, expires_at, retries_limit, + retries_count, delayed_ms, history_failed_attempts, created_at, + updated_at, ${finishedAt.toISOString()} + FROM ${this.tables.jobStepsActiveTable} + WHERE job_id = ${jobId} + `) - // 6. Insert spans into archive - if (movedSpans.length > 0) { - await tx.insert(this.tables.spansArchiveTable).values(movedSpans) - } + // 4. Delete job from active (cascade deletes steps) + await tx.delete(this.tables.jobsActiveTable).where(eq(this.tables.jobsActiveTable.id, jobId)) return true }) @@ -246,8 +324,12 @@ export class PostgresBaseAdapter e */ protected async _failJob({ jobId, error }: FailJobOptions) { return this.db.transaction(async (tx) => { - const movedJob = await tx - .delete(this.tables.jobsActiveTable) + const finishedAt = new Date() + + // 1. Check job exists before archiving + const [job] = await tx + .select() + .from(this.tables.jobsActiveTable) .where( and( eq(this.tables.jobsActiveTable.id, jobId), @@ -255,44 +337,39 @@ export class PostgresBaseAdapter e eq(this.tables.jobsActiveTable.client_id, this.id), ), ) - .returning() - if (movedJob.length === 0) { + if (!job) { return false } - const job = movedJob[0]! - - const movedSteps = await tx - .delete(this.tables.jobStepsActiveTable) - .where(eq(this.tables.jobStepsActiveTable.job_id, jobId)) - .returning() - - const movedSpans = await tx - .delete(this.tables.spansActiveTable) - .where(eq(this.tables.spansActiveTable.job_id, jobId)) - .returning() - + // 2. Insert job into archive FIRST (required for FK constraints) await tx.insert(this.tables.jobsArchiveTable).values({ ...job, status: JOB_STATUS_FAILED, error, - finished_at: new Date(), - updated_at: new Date(), + finished_at: finishedAt, + updated_at: finishedAt, }) - if (movedSteps.length > 0) { - await tx.insert(this.tables.jobStepsArchiveTable).values( - movedSteps.map((step) => ({ - ...step, - job_finished_at: job.finished_at, - })), + // 3. Archive steps using INSERT ... SELECT + await tx.execute(sql` + INSERT INTO ${this.tables.jobStepsArchiveTable} ( + id, job_id, parent_step_id, branch, name, status, output, error, + started_at, finished_at, timeout_ms, expires_at, retries_limit, + retries_count, delayed_ms, history_failed_attempts, created_at, + updated_at, job_finished_at ) - } + SELECT + id, job_id, parent_step_id, branch, name, status, output, error, + started_at, finished_at, timeout_ms, expires_at, retries_limit, + retries_count, delayed_ms, history_failed_attempts, created_at, + updated_at, ${finishedAt.toISOString()} + FROM ${this.tables.jobStepsActiveTable} + WHERE job_id = ${jobId} + `) - if (movedSpans.length > 0) { - await tx.insert(this.tables.spansArchiveTable).values(movedSpans) - } + // 4. Delete job from active (cascade deletes steps) + await tx.delete(this.tables.jobsActiveTable).where(eq(this.tables.jobsActiveTable.id, jobId)) return true }) @@ -305,51 +382,63 @@ export class PostgresBaseAdapter e */ protected async _cancelJob({ jobId }: CancelJobOptions) { return this.db.transaction(async (tx) => { - const movedJob = await tx - .delete(this.tables.jobsActiveTable) + const finishedAt = new Date() + + // 1. Update all steps to cancelled status + await tx + .update(this.tables.jobStepsActiveTable) + .set({ + status: STEP_STATUS_CANCELLED, + finished_at: finishedAt, + updated_at: finishedAt, + }) + .where(eq(this.tables.jobStepsActiveTable.job_id, jobId)) + + // 2. Check job exists before archiving + const [job] = await tx + .select() + .from(this.tables.jobsActiveTable) .where( and( eq(this.tables.jobsActiveTable.id, jobId), - or(eq(this.tables.jobsActiveTable.status, JOB_STATUS_ACTIVE), eq(this.tables.jobsActiveTable.status, JOB_STATUS_CREATED)), + or( + eq(this.tables.jobsActiveTable.status, JOB_STATUS_ACTIVE), + eq(this.tables.jobsActiveTable.status, JOB_STATUS_CREATED), + ), ), ) - .returning() - if (movedJob.length === 0) { + if (!job) { return false } - const job = movedJob[0]! - - const movedSteps = await tx - .delete(this.tables.jobStepsActiveTable) - .where(eq(this.tables.jobStepsActiveTable.job_id, jobId)) - .returning() - - const movedSpans = await tx - .delete(this.tables.spansActiveTable) - .where(eq(this.tables.spansActiveTable.job_id, jobId)) - .returning() - + // 3. Insert job into archive FIRST (required for FK constraints) await tx.insert(this.tables.jobsArchiveTable).values({ ...job, status: JOB_STATUS_CANCELLED, - finished_at: new Date(), - updated_at: new Date(), + finished_at: finishedAt, + updated_at: finishedAt, }) - if (movedSteps.length > 0) { - await tx.insert(this.tables.jobStepsArchiveTable).values( - movedSteps.map((step) => ({ - ...step, - job_finished_at: job.finished_at, - })), + // 4. Archive steps using INSERT ... SELECT + await tx.execute(sql` + INSERT INTO ${this.tables.jobStepsArchiveTable} ( + id, job_id, parent_step_id, branch, name, status, output, error, + started_at, finished_at, timeout_ms, expires_at, retries_limit, + retries_count, delayed_ms, history_failed_attempts, created_at, + updated_at, job_finished_at ) - } + SELECT + id, job_id, parent_step_id, branch, name, status, output, error, + started_at, finished_at, timeout_ms, expires_at, retries_limit, + retries_count, delayed_ms, history_failed_attempts, created_at, + updated_at, ${finishedAt.toISOString()} + FROM ${this.tables.jobStepsActiveTable} + WHERE job_id = ${jobId} + `) - if (movedSpans.length > 0) { - await tx.insert(this.tables.spansArchiveTable).values(movedSpans) - } + // 5. Delete job from active (cascade deletes steps) + await tx.delete(this.tables.jobsActiveTable).where(eq(this.tables.jobsActiveTable.id, jobId)) return true }) @@ -466,25 +555,92 @@ export class PostgresBaseAdapter e * @returns Promise resolving to `true` if time travel succeeded, `false` otherwise */ protected async _timeTravelJob({ jobId, stepId }: TimeTravelJobOptions): Promise { - const result = this._map( - await this.db.execute<{ success: boolean }>(sql` - WITH RECURSIVE - -- Lock and validate the job - locked_job AS ( - SELECT j.id - FROM ${this.tables.jobsActiveTable} j - WHERE j.id = ${jobId} - AND j.status IN (${JOB_STATUS_COMPLETED}, ${JOB_STATUS_FAILED}, ${JOB_STATUS_CANCELLED}) - FOR UPDATE OF j - ), - -- Validate target step exists and belongs to job - target_step AS ( - SELECT s.id, s.parent_step_id, s.created_at - FROM ${this.tables.jobStepsActiveTable} s - WHERE s.id = ${stepId} - AND s.job_id = ${jobId} - AND EXISTS (SELECT 1 FROM locked_job) - ), + return this.db.transaction(async (tx) => { + // First, check if the job is in the archive and restore it if needed + const archivedJob = await tx + .select() + .from(this.tables.jobsArchiveTable) + .where(eq(this.tables.jobsArchiveTable.id, jobId)) + .limit(1) + + if (archivedJob.length > 0) { + // Restore job from archive to active + const job = archivedJob[0]! + await tx.insert(this.tables.jobsActiveTable).values({ + id: job.id, + action_name: job.action_name, + group_key: job.group_key, + description: job.description, + status: job.status, + checksum: job.checksum, + input: job.input, + output: job.output, + error: job.error, + timeout_ms: job.timeout_ms, + expires_at: job.expires_at, + started_at: job.started_at, + finished_at: job.finished_at, + client_id: job.client_id, + concurrency_limit: job.concurrency_limit, + concurrency_step_limit: job.concurrency_step_limit, + created_at: job.created_at, + updated_at: job.updated_at, + }) + + // Restore steps from archive to active + const archivedSteps = await tx + .select() + .from(this.tables.jobStepsArchiveTable) + .where(eq(this.tables.jobStepsArchiveTable.job_id, jobId)) + + if (archivedSteps.length > 0) { + await tx.insert(this.tables.jobStepsActiveTable).values( + archivedSteps.map((s) => ({ + id: s.id, + job_id: s.job_id, + parent_step_id: s.parent_step_id, + parallel: s.parallel, + name: s.name, + status: s.status, + output: s.output, + error: s.error, + started_at: s.started_at, + finished_at: s.finished_at, + timeout_ms: s.timeout_ms, + expires_at: s.expires_at, + retries_limit: s.retries_limit, + retries_count: s.retries_count, + delayed_ms: s.delayed_ms, + history_failed_attempts: s.history_failed_attempts, + created_at: s.created_at, + updated_at: s.updated_at, + })), + ) + } + + // Delete archived job and steps (cascade via FK on steps) + await tx.delete(this.tables.jobsArchiveTable).where(eq(this.tables.jobsArchiveTable.id, jobId)) + } + + const result = this._map( + await tx.execute<{ success: boolean }>(sql` + WITH RECURSIVE + -- Lock and validate the job + locked_job AS ( + SELECT j.id + FROM ${this.tables.jobsActiveTable} j + WHERE j.id = ${jobId} + AND j.status IN (${JOB_STATUS_COMPLETED}, ${JOB_STATUS_FAILED}, ${JOB_STATUS_CANCELLED}) + FOR UPDATE OF j + ), + -- Validate target step exists and belongs to job + target_step AS ( + SELECT s.id, s.parent_step_id, s.created_at + FROM ${this.tables.jobStepsActiveTable} s + WHERE s.id = ${stepId} + AND s.job_id = ${jobId} + AND EXISTS (SELECT 1 FROM locked_job) + ), -- Find all ancestor steps recursively (from target up to root) ancestors AS ( SELECT s.id, s.parent_step_id, 0 AS depth @@ -631,9 +787,10 @@ export class PostgresBaseAdapter e ) SELECT EXISTS(SELECT 1 FROM reset_job) AS success `), - ) + ) - return result.length > 0 && result[0]!.success === true + return result.length > 0 && result[0]!.success === true + }) } /** @@ -815,7 +972,10 @@ export class PostgresBaseAdapter e }) .from(this.tables.jobsActiveTable) .where( - and(eq(this.tables.jobsActiveTable.status, JOB_STATUS_ACTIVE), ne(this.tables.jobsActiveTable.client_id, this.id)), + and( + eq(this.tables.jobsActiveTable.status, JOB_STATUS_ACTIVE), + ne(this.tables.jobsActiveTable.client_id, this.id), + ), )) as unknown as { clientId: string }[] if (result.length > 0) { @@ -1340,7 +1500,10 @@ export class PostgresBaseAdapter e ? inArray(archiveTable.status, Array.isArray(filters.status) ? filters.status : [filters.status]) : undefined, filters.actionName - ? inArray(archiveTable.action_name, Array.isArray(filters.actionName) ? filters.actionName : [filters.actionName]) + ? inArray( + archiveTable.action_name, + Array.isArray(filters.actionName) ? filters.actionName : [filters.actionName], + ) : undefined, filters.groupKey && Array.isArray(filters.groupKey) ? sql`j.group_key LIKE ANY(ARRAY[${sql.raw(filters.groupKey.map((key) => `'${key}'`).join(','))}]::text[])` @@ -1420,8 +1583,8 @@ export class PostgresBaseAdapter e const statusFilter = filters.status const statuses = Array.isArray(statusFilter) ? statusFilter : statusFilter ? [statusFilter] : [] - const queryActive = statuses.length === 0 || statuses.some(s => (activeStatuses as string[]).includes(s)) - const queryArchive = statuses.length === 0 || statuses.some(s => (archiveStatuses as string[]).includes(s)) + const queryActive = statuses.length === 0 || statuses.some((s) => (activeStatuses as string[]).includes(s)) + const queryArchive = statuses.length === 0 || statuses.some((s) => (archiveStatuses as string[]).includes(s)) // Query active table let activeJobs: any[] = [] @@ -1681,45 +1844,53 @@ export class PostgresBaseAdapter e * Internal method to get action statistics including counts and last job created date. */ protected async _getActions(): Promise { - const actionStats = this.db.$with('action_stats').as( - this.db - .select({ - name: this.tables.jobsActiveTable.action_name, - last_job_created: sql`MAX(${this.tables.jobsActiveTable.created_at})`.as('last_job_created'), - active: sql`COUNT(*) FILTER (WHERE ${this.tables.jobsActiveTable.status} = ${JOB_STATUS_ACTIVE})`.as( - 'active', - ), - completed: sql`COUNT(*) FILTER (WHERE ${this.tables.jobsActiveTable.status} = ${JOB_STATUS_COMPLETED})`.as( - 'completed', - ), - failed: sql`COUNT(*) FILTER (WHERE ${this.tables.jobsActiveTable.status} = ${JOB_STATUS_FAILED})`.as( - 'failed', - ), - cancelled: sql`COUNT(*) FILTER (WHERE ${this.tables.jobsActiveTable.status} = ${JOB_STATUS_CANCELLED})`.as( - 'cancelled', - ), - }) - .from(this.tables.jobsActiveTable) - .groupBy(this.tables.jobsActiveTable.action_name), + const schemaName = this.schema + const result = this._map( + await this.db.execute<{ + name: string + last_job_created: Date | null + active: number + completed: number + failed: number + cancelled: number + }>(sql` + WITH combined_jobs AS ( + SELECT action_name, status, created_at + FROM ${sql.identifier(schemaName)}.jobs_active + UNION ALL + SELECT action_name, status, created_at + FROM ${sql.identifier(schemaName)}.jobs_archive + ) + SELECT + action_name AS name, + MAX(created_at) AS last_job_created, + COUNT(*) FILTER (WHERE status = ${JOB_STATUS_ACTIVE})::int AS active, + COUNT(*) FILTER (WHERE status = ${JOB_STATUS_COMPLETED})::int AS completed, + COUNT(*) FILTER (WHERE status = ${JOB_STATUS_FAILED})::int AS failed, + COUNT(*) FILTER (WHERE status = ${JOB_STATUS_CANCELLED})::int AS cancelled + FROM combined_jobs + GROUP BY action_name + ORDER BY action_name + `), ) - const actions = await this.db - .with(actionStats) - .select({ - name: actionStats.name, - lastJobCreated: actionStats.last_job_created, - active: sql`${actionStats.active}::int`, - completed: sql`${actionStats.completed}::int`, - failed: sql`${actionStats.failed}::int`, - cancelled: sql`${actionStats.cancelled}::int`, - }) - .from(actionStats) - .orderBy(actionStats.name) - return { - actions: actions.map((action) => ({ - ...action, - lastJobCreated: action.lastJobCreated ?? null, + actions: ( + result as Array<{ + name: string + last_job_created: Date | null + active: number + completed: number + failed: number + cancelled: number + }> + ).map((action) => ({ + name: action.name, + lastJobCreated: action.last_job_created ?? null, + active: action.active, + completed: action.completed, + failed: action.failed, + cancelled: action.cancelled, })), } } @@ -1730,6 +1901,7 @@ export class PostgresBaseAdapter e /** * Internal method to insert multiple span records in a single batch. + * Routes spans to active or archive table based on job location. */ protected async _insertSpans(spans: InsertSpanOptions[]): Promise { if (spans.length === 0) { @@ -1753,9 +1925,9 @@ export class PostgresBaseAdapter e })) const result = await this.db - .insert(this.tables.spansActiveTable) + .insert(this.tables.spansTable) .values(values) - .returning({ id: this.tables.spansActiveTable.id }) + .returning({ id: this.tables.spansTable.id }) return result.length } @@ -1782,21 +1954,10 @@ export class PostgresBaseAdapter e return this._getStepSpansRecursive(options.stepId, sortField, sortOrder, filters) } - // Determine if job is active or archived - let isActive = true - if (options.jobId) { - const jobInActive = await this.db - .select({ id: this.tables.jobsActiveTable.id }) - .from(this.tables.jobsActiveTable) - .where(eq(this.tables.jobsActiveTable.id, options.jobId)) - .limit(1) - isActive = jobInActive.length > 0 - } - - const spansTable = isActive ? this.tables.spansActiveTable : this.tables.spansArchiveTable + const spansTable = this.tables.spansTable // Build WHERE clause for job queries - const where = this._buildSpansWhereClause(options.jobId, undefined, filters, isActive) + const where = this._buildSpansWhereClause(options.jobId, undefined, filters) // Get total count const total = await this.db.$count(spansTable, where) @@ -1862,19 +2023,13 @@ export class PostgresBaseAdapter e ): Promise { const schemaName = this.schema - // Query both active and archive spans tables const query = sql` WITH RECURSIVE span_tree AS ( - -- Base case: the span(s) for the step (check both tables) - SELECT * FROM ${sql.identifier(schemaName)}.spans_active WHERE step_id = ${stepId}::uuid - UNION - SELECT * FROM ${sql.identifier(schemaName)}.spans_archive WHERE step_id = ${stepId}::uuid + -- Base case: the span(s) for the step + SELECT * FROM ${sql.identifier(schemaName)}.spans WHERE step_id = ${stepId}::uuid UNION ALL - -- Recursive case: children of spans we've found (check both tables) - SELECT s.* FROM ${sql.identifier(schemaName)}.spans_active s - INNER JOIN span_tree st ON s.parent_span_id = st.span_id - UNION - SELECT s.* FROM ${sql.identifier(schemaName)}.spans_archive s + -- Recursive case: children of spans we've found + SELECT s.* FROM ${sql.identifier(schemaName)}.spans s INNER JOIN span_tree st ON s.parent_span_id = st.span_id ) SELECT @@ -1940,18 +2095,12 @@ export class PostgresBaseAdapter e * Internal method to delete all spans for a job. */ protected async _deleteSpans(options: DeleteSpansOptions): Promise { - // Delete from both tables to be safe - const activeResult = await this.db - .delete(this.tables.spansActiveTable) - .where(eq(this.tables.spansActiveTable.job_id, options.jobId)) - .returning({ id: this.tables.spansActiveTable.id }) - - const archiveResult = await this.db - .delete(this.tables.spansArchiveTable) - .where(eq(this.tables.spansArchiveTable.job_id, options.jobId)) - .returning({ id: this.tables.spansArchiveTable.id }) - - return activeResult.length + archiveResult.length + const result = await this.db + .delete(this.tables.spansTable) + .where(eq(this.tables.spansTable.job_id, options.jobId)) + .returning({ id: this.tables.spansTable.id }) + + return result.length } /** @@ -1963,8 +2112,8 @@ export class PostgresBaseAdapter e * Note: Step queries are handled separately by _getStepSpansRecursive using * a recursive CTE to traverse the span hierarchy. */ - protected _buildSpansWhereClause(jobId?: string, _stepId?: string, filters?: GetSpansOptions['filters'], isActive: boolean = true) { - const spansTable = isActive ? this.tables.spansActiveTable : this.tables.spansArchiveTable + protected _buildSpansWhereClause(jobId?: string, _stepId?: string, filters?: GetSpansOptions['filters']) { + const spansTable = this.tables.spansTable // Build condition for finding spans by trace_id (includes external spans) let traceCondition: ReturnType | undefined @@ -2141,23 +2290,142 @@ export class PostgresBaseAdapter e // ============================================================================ // Archive Methods (Stub implementations - to be filled in) // ============================================================================ + // Archive Methods + // ============================================================================ - protected async _pruneArchive(_options: any): Promise { - return 0 + /** + * Parse olderThan option into a Date threshold. + * Supports: string (e.g. "7d", "1h"), Date, or number (timestamp ms). + */ + private _parseOlderThan(olderThan: string | Date | number): Date { + if (olderThan instanceof Date) { + return olderThan + } + + if (typeof olderThan === 'number') { + return new Date(olderThan) + } + + // Parse duration string like "7d", "1h", "30m", "10s", "500ms" + const match = olderThan.match(/^(\d+)\s*(ms|d|h|m|s)$/i) + if (!match) { + throw new Error( + `Invalid olderThan format: ${olderThan}. Expected: "7d", "1h", "30m", "10s", "500ms", Date, or number`, + ) + } + + const value = parseInt(match[1]!, 10) + const unit = match[2]!.toLowerCase() + const now = Date.now() + + const multipliers: Record = { + d: 24 * 60 * 60 * 1000, + h: 60 * 60 * 1000, + m: 60 * 1000, + s: 1000, + ms: 1, + } + + const ms = value * (multipliers[unit] ?? 0) + return new Date(now - ms) + } + + protected async _pruneArchive(options: PruneArchiveOptions): Promise { + const threshold = this._parseOlderThan(options.olderThan) + const batchSize = options.batchSize ?? 1000 + const maxBatches = options.maxBatches ?? 100 + const schemaName = this.schema + + let totalDeleted = 0 + + for (let batch = 0; batch < maxBatches; batch++) { + const result = this._map( + await this.db.execute<{ id: string }>(sql` + WITH ids_to_delete AS ( + SELECT id FROM ${sql.identifier(schemaName)}.jobs_archive + WHERE finished_at < ${threshold.toISOString()} + LIMIT ${batchSize} + ), + deleted_spans AS ( + DELETE FROM ${sql.identifier(schemaName)}.spans s + USING ids_to_delete d + WHERE s.job_id = d.id + ) + DELETE FROM ${sql.identifier(schemaName)}.jobs_archive j + USING ids_to_delete d + WHERE j.id = d.id + RETURNING j.id + `), + ) + + if (!result || result.length === 0) { + break + } + + totalDeleted += result.length + } + + // Clean up orphan spans (spans whose job no longer exists in active or archive) + if (totalDeleted > 0) { + await this.db.execute(sql` + DELETE FROM ${sql.identifier(schemaName)}.spans s + WHERE s.job_id IS NOT NULL + AND NOT EXISTS ( + SELECT 1 FROM ${sql.identifier(schemaName)}.jobs_active ja WHERE ja.id = s.job_id + ) + AND NOT EXISTS ( + SELECT 1 FROM ${sql.identifier(schemaName)}.jobs_archive ja WHERE ja.id = s.job_id + ) + `) + } + + return totalDeleted } protected async _truncateArchive(): Promise { - // TODO: Implement + const schemaName = this.schema + await this.db.execute(sql`TRUNCATE TABLE ${sql.identifier(schemaName)}.jobs_archive CASCADE`) + // Note: We do NOT truncate spans here because spans may belong to active jobs. + // Spans for archived jobs become orphans until cleaned up by prune operations. } - protected async _getArchiveStats(): Promise { + protected async _getArchiveStats(): Promise { + const schemaName = this.schema + + const [jobsResult, stepsResult, spansResult, oldestResult] = await Promise.all([ + this.db + .execute<{ count: number }>(sql` + SELECT COUNT(*)::int as count FROM ${sql.identifier(schemaName)}.jobs_archive + `) + .then((r) => this._map(r)), + this.db + .execute<{ count: number }>(sql` + SELECT COUNT(*)::int as count FROM ${sql.identifier(schemaName)}.job_steps_archive + `) + .then((r) => this._map(r)), + this.db + .execute<{ count: number }>(sql` + SELECT COUNT(*)::int as count FROM ${sql.identifier(schemaName)}.spans + `) + .then((r) => this._map(r)), + this.db + .execute<{ finished_at: Date | null }>(sql` + SELECT finished_at FROM ${sql.identifier(schemaName)}.jobs_archive + ORDER BY finished_at ASC + LIMIT 1 + `) + .then((r) => this._map(r)), + ]) + + const oldestDate = oldestResult[0]?.finished_at ? new Date(oldestResult[0].finished_at) : null + return { - jobsCount: 0, - stepsCount: 0, - spansCount: 0, - oldestJobDate: null, + jobsCount: Number(jobsResult[0]?.count ?? 0), + stepsCount: Number(stepsResult[0]?.count ?? 0), + spansCount: Number(spansResult[0]?.count ?? 0), + oldestJobDate: oldestDate, totalSizeBytes: null, - lastPrunedAt: null, + lastPrunedAt: this.lastPrunedAt, } } } diff --git a/packages/duron/src/adapters/postgres/schema.default.ts b/packages/duron/src/adapters/postgres/schema.default.ts index 4bfcb1d..eb094cd 100644 --- a/packages/duron/src/adapters/postgres/schema.default.ts +++ b/packages/duron/src/adapters/postgres/schema.default.ts @@ -1,21 +1,6 @@ import createSchema from './schema.js' -const { - schema, - jobsActiveTable, - jobsArchiveTable, - jobStepsActiveTable, - jobStepsArchiveTable, - spansActiveTable, - spansArchiveTable, -} = createSchema('duron') +const { schema, jobsActiveTable, jobsArchiveTable, jobStepsActiveTable, jobStepsArchiveTable, spansTable } = + createSchema('duron') -export { - schema, - jobsActiveTable, - jobsArchiveTable, - jobStepsActiveTable, - jobStepsArchiveTable, - spansActiveTable, - spansArchiveTable, -} +export { schema, jobsActiveTable, jobsArchiveTable, jobStepsActiveTable, jobStepsArchiveTable, spansTable } diff --git a/packages/duron/src/adapters/postgres/schema.ts b/packages/duron/src/adapters/postgres/schema.ts index 8f88214..2c6a623 100644 --- a/packages/duron/src/adapters/postgres/schema.ts +++ b/packages/duron/src/adapters/postgres/schema.ts @@ -136,15 +136,15 @@ export default function createSchema(schemaName: string) { ], ) - const spansActiveTable = schema.table( - 'spans_active', + const spansTable = schema.table( + 'spans', { id: bigserial('id', { mode: 'number' }).primaryKey(), trace_id: text('trace_id').notNull(), span_id: text('span_id').notNull(), parent_span_id: text('parent_span_id'), - job_id: uuid('job_id').references(() => jobsActiveTable.id, { onDelete: 'cascade' }), - step_id: uuid('step_id').references(() => jobStepsActiveTable.id, { onDelete: 'cascade' }), + job_id: uuid('job_id'), + step_id: uuid('step_id'), name: text('name').notNull(), kind: integer('kind').notNull().default(0), start_time_unix_nano: bigint('start_time_unix_nano', { mode: 'bigint' }).notNull(), @@ -159,22 +159,22 @@ export default function createSchema(schemaName: string) { }, (table) => [ // Single column indexes - index('idx_spans_active_trace_id').on(table.trace_id), - index('idx_spans_active_span_id').on(table.span_id), - index('idx_spans_active_job_id').on(table.job_id), - index('idx_spans_active_step_id').on(table.step_id), - index('idx_spans_active_name').on(table.name), - index('idx_spans_active_kind').on(table.kind), - index('idx_spans_active_status_code').on(table.status_code), + index('idx_spans_trace_id').on(table.trace_id), + index('idx_spans_span_id').on(table.span_id), + index('idx_spans_job_id').on(table.job_id), + index('idx_spans_step_id').on(table.step_id), + index('idx_spans_name').on(table.name), + index('idx_spans_kind').on(table.kind), + index('idx_spans_status_code').on(table.status_code), // Composite indexes - index('idx_spans_active_job_step').on(table.job_id, table.step_id), - index('idx_spans_active_trace_parent').on(table.trace_id, table.parent_span_id), + index('idx_spans_job_step').on(table.job_id, table.step_id), + index('idx_spans_trace_parent').on(table.trace_id, table.parent_span_id), // GIN indexes - index('idx_spans_active_attributes').using('gin', table.attributes), - index('idx_spans_active_events').using('gin', table.events), + index('idx_spans_attributes').using('gin', table.attributes), + index('idx_spans_events').using('gin', table.events), // Constraints - check('spans_active_kind_check', sql`${table.kind} IN (0, 1, 2, 3, 4)`), - check('spans_active_status_code_check', sql`${table.status_code} IN (0, 1, 2)`), + check('spans_kind_check', sql`${table.kind} IN (0, 1, 2, 3, 4)`), + check('spans_status_code_check', sql`${table.status_code} IN (0, 1, 2)`), ], ) @@ -202,9 +202,7 @@ export default function createSchema(schemaName: string) { concurrency_limit: integer('concurrency_limit').notNull(), concurrency_step_limit: integer('concurrency_step_limit').notNull(), created_at: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(), - updated_at: timestamp('updated_at', { withTimezone: true }) - .notNull() - .defaultNow(), + updated_at: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(), }, (table) => [ // Lookup indexes @@ -227,7 +225,9 @@ export default function createSchema(schemaName: string) { 'job_steps_archive', { id: uuid('id').primaryKey(), - job_id: uuid('job_id').notNull(), + job_id: uuid('job_id') + .notNull() + .references(() => jobsArchiveTable.id, { onDelete: 'cascade' }), parent_step_id: uuid('parent_step_id'), parallel: boolean('branch').notNull().default(false), name: text('name').notNull(), @@ -246,9 +246,7 @@ export default function createSchema(schemaName: string) { .notNull() .default({}), created_at: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(), - updated_at: timestamp('updated_at', { withTimezone: true }) - .notNull() - .defaultNow(), + updated_at: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(), // Denormalized for easier time-based pruning job_finished_at: timestamp('job_finished_at', { withTimezone: true }), }, @@ -264,45 +262,12 @@ export default function createSchema(schemaName: string) { ], ) - const spansArchiveTable = schema.table( - 'spans_archive', - { - id: bigserial('id', { mode: 'number' }).primaryKey(), - trace_id: text('trace_id').notNull(), - span_id: text('span_id').notNull(), - parent_span_id: text('parent_span_id'), - job_id: uuid('job_id'), - step_id: uuid('step_id'), - name: text('name').notNull(), - kind: integer('kind').notNull().default(0), - start_time_unix_nano: bigint('start_time_unix_nano', { mode: 'bigint' }).notNull(), - end_time_unix_nano: bigint('end_time_unix_nano', { mode: 'bigint' }), - status_code: integer('status_code').notNull().default(0), - status_message: text('status_message'), - attributes: jsonb('attributes').$type>().notNull().default({}), - events: jsonb('events') - .$type }>>() - .notNull() - .default([]), - }, - (table) => [ - // Minimal indexes - index('idx_spans_archive_trace_id').on(table.trace_id), - index('idx_spans_archive_job_id').on(table.job_id), - index('idx_spans_archive_step_id').on(table.step_id), - // Constraints - check('spans_archive_kind_check', sql`${table.kind} IN (0, 1, 2, 3, 4)`), - check('spans_archive_status_code_check', sql`${table.status_code} IN (0, 1, 2)`), - ], - ) - return { schema, jobsActiveTable, jobsArchiveTable, jobStepsActiveTable, jobStepsArchiveTable, - spansActiveTable, - spansArchiveTable, + spansTable, } } diff --git a/packages/duron/src/client.ts b/packages/duron/src/client.ts index 64bf5b1..f7e9693 100644 --- a/packages/duron/src/client.ts +++ b/packages/duron/src/client.ts @@ -11,6 +11,7 @@ import type { Action, ConcurrencyHandlerContext } from './action.js' import { ActionManager } from './action-manager.js' import type { Adapter, + ArchiveStats, GetActionsResult, GetJobStepsOptions, GetJobStepsResult, @@ -20,6 +21,7 @@ import type { GetSpansResult, Job, JobStep, + PruneArchiveOptions, } from './adapters/adapter.js' import type { JobStatusResult, JobStepStatusResult } from './adapters/schemas.js' import { JOB_STATUS_CANCELLED, JOB_STATUS_COMPLETED, JOB_STATUS_FAILED, type JobStatus } from './constants.js' @@ -1072,6 +1074,42 @@ export class Client< }) } + // ============================================================================ + // Archive Methods + // ============================================================================ + + /** + * Get archive statistics including counts and oldest job date. + * + * @returns Promise resolving to archive statistics + */ + async getArchiveStats(): Promise { + await this.start() + return this.#database.getArchiveStats() + } + + /** + * Prune archived jobs older than the specified threshold. + * + * @param options - Prune options including olderThan, batchSize, maxBatches + * @returns Promise resolving to number of deleted jobs + */ + async pruneArchive(options: PruneArchiveOptions): Promise { + await this.start() + return this.#database.pruneArchive(options) + } + + /** + * Truncate all archive data (jobs, steps, spans). + * This is a destructive operation - use with caution. + * + * @returns Promise resolving when complete + */ + async truncateArchive(): Promise { + await this.start() + return this.#database.truncateArchive() + } + // ============================================================================ // Lifecycle Methods // ============================================================================ diff --git a/packages/duron/src/server.ts b/packages/duron/src/server.ts index 780608b..3d5ce6e 100644 --- a/packages/duron/src/server.ts +++ b/packages/duron/src/server.ts @@ -668,6 +668,76 @@ export function createServer

({ client, prefix, login, spansEna auth: true, }, ) + .get( + '/archive/stats', + async () => { + return client.getArchiveStats() + }, + { + response: { + 200: z.object({ + jobsCount: z.number(), + stepsCount: z.number(), + spansCount: z.number(), + oldestJobDate: z.date().nullable(), + totalSizeBytes: z.number().nullable(), + lastPrunedAt: z.date().nullable(), + }), + 400: ErrorResponseSchema, + 500: ErrorResponseSchema, + 401: ErrorResponseSchema, + }, + auth: true, + }, + ) + .post( + '/archive/prune', + async ({ body }) => { + const deleted = await client.pruneArchive(body) + return { deletedJobs: deleted } + }, + { + body: z.object({ + olderThan: z.union([z.string(), z.coerce.date(), z.number()]), + batchSize: z.number().optional(), + maxBatches: z.number().optional(), + }), + response: { + 200: z.object({ + deletedJobs: z.number(), + }), + 400: ErrorResponseSchema, + 500: ErrorResponseSchema, + 401: ErrorResponseSchema, + }, + auth: true, + }, + ) + .post( + '/archive/truncate', + async ({ body }) => { + const { confirm } = body + if (!confirm) { + throw new Error('Confirmation required. Set confirm: true to truncate all archive data.') + } + await client.truncateArchive() + return { success: true } + }, + { + body: z.object({ + confirm: z.boolean(), + }), + response: { + 200: z.object({ + success: z.boolean(), + }), + 400: ErrorResponseSchema, + 500: ErrorResponseSchema, + 401: ErrorResponseSchema, + }, + auth: true, + }, + ) .get( '/config', async () => { diff --git a/packages/duron/src/step-manager.ts b/packages/duron/src/step-manager.ts index 3d06b84..bb0eaab 100644 --- a/packages/duron/src/step-manager.ts +++ b/packages/duron/src/step-manager.ts @@ -75,6 +75,10 @@ function injectParentSpan(ctx: Context, parentSpan: Span | null): Context { * to the current job/step trace hierarchy. */ function createContextAwareTracer(tracer: Tracer, parentSpan: Span | null): Tracer { + // Extract duron.job.id and duron.step.id from parent span attributes for propagation + const parentJobId = parentSpan ? (parentSpan as any).attributes?.['duron.job.id'] : undefined + const parentStepId = parentSpan ? (parentSpan as any).attributes?.['duron.step.id'] : undefined + return { startSpan(name: string, options?: SpanOptions, ctx?: Context): Span { // Always inject our parent span into the context, regardless of what context is passed. @@ -83,7 +87,13 @@ function createContextAwareTracer(tracer: Tracer, parentSpan: Span | null): Trac // would otherwise create orphan spans. const baseContext = ctx ?? context.active() const effectiveContext = injectParentSpan(baseContext, parentSpan) - return tracer.startSpan(name, options, effectiveContext) + // Propagate duron.job.id and duron.step.id so spans can be queried by job + const attributes = { + ...(parentJobId ? { 'duron.job.id': parentJobId } : {}), + ...(parentStepId ? { 'duron.step.id': parentStepId } : {}), + ...options?.attributes, + } + return tracer.startSpan(name, { ...options, attributes }, effectiveContext) }, // startActiveSpan has multiple overloads, we need to handle them all startActiveSpan unknown>( @@ -123,6 +133,10 @@ function createContextAwareTracer(tracer: Tracer, parentSpan: Span | null): Trac * Create a TelemetryContext that wraps an OTel span. */ function createTelemetryContext(span: Span | null, tracer: Tracer): TelemetryContext { + // Extract duron.job.id and duron.step.id from parent span attributes for propagation + const parentJobId = span ? (span as any).attributes?.['duron.job.id'] : undefined + const parentStepId = span ? (span as any).attributes?.['duron.step.id'] : undefined + return { getActiveSpan(): Span | undefined { return span ?? undefined @@ -133,8 +147,14 @@ function createTelemetryContext(span: Span | null, tracer: Tracer): TelemetryCon }, startSpan(name: string, options?: { attributes?: Record }): Span { // Create a child span linked to the current span (job or step) + // Propagate duron.job.id and duron.step.id from parent so spans can be queried by job + const attributes = { + ...(parentJobId ? { 'duron.job.id': parentJobId } : {}), + ...(parentStepId ? { 'duron.step.id': parentStepId } : {}), + ...options?.attributes, + } const parentContext = span ? trace.setSpan(context.active(), span) : context.active() - return tracer.startSpan(name, { attributes: options?.attributes }, parentContext) + return tracer.startSpan(name, { attributes }, parentContext) }, recordMetric(name: string, value: number, attributes?: Record): void { if (span) { diff --git a/packages/duron/test/archive.test.ts b/packages/duron/test/archive.test.ts new file mode 100644 index 0000000..6cdf493 --- /dev/null +++ b/packages/duron/test/archive.test.ts @@ -0,0 +1,321 @@ +import { afterEach, beforeEach, describe, expect, it } from 'bun:test' + +import { + JOB_STATUS_ACTIVE, + JOB_STATUS_CANCELLED, + JOB_STATUS_COMPLETED, + JOB_STATUS_CREATED, + JOB_STATUS_FAILED, +} from '../src/constants.js' +import { type Adapter, type AdapterFactory, pgliteFactory, postgresFactory } from './adapters.js' +import { expectToBeDefined } from './asserts.js' + +function runArchiveTests(adapterFactory: AdapterFactory) { + describe(`Archive Tests with ${adapterFactory.name}`, () => { + let adapter: Adapter + let deleteDb: () => Promise + + beforeEach( + async () => { + const adapterInstance = await adapterFactory.create() + adapter = adapterInstance.adapter + deleteDb = adapterInstance.deleteDb + adapter.setId('test-adapter') + await adapter.start() + }, + { + timeout: 60_000, + }, + ) + + afterEach(async () => { + if (adapter) { + await adapter.stop() + } + if (deleteDb) { + await deleteDb() + } + }) + + it('should archive completed job and query it', async () => { + const jobId = await adapter.createJob({ + queue: 'test-action', + groupKey: 'test-group', + input: { value: 42 }, + timeoutMs: 10000, + checksum: 'abc123', + concurrencyLimit: 10, + concurrencyStepLimit: 10, + }) + expectToBeDefined(jobId) + + // Fetch to activate + const fetched = await adapter.fetch({ batch: 10 }) + expect(fetched.length).toBe(1) + expect(fetched[0]?.status).toBe(JOB_STATUS_ACTIVE) + + // Complete the job + const completed = await adapter.completeJob({ jobId, output: { result: 'done' } }) + expect(completed).toBe(true) + + // Should find in archive via getJobById + const job = await adapter.getJobById(jobId) + expectToBeDefined(job) + expect(job.status).toBe(JOB_STATUS_COMPLETED) + expect(job.output).toEqual({ result: 'done' }) + }) + + it('should archive failed job', async () => { + const jobId = await adapter.createJob({ + queue: 'test-action', + groupKey: 'test-group', + input: {}, + timeoutMs: 10000, + checksum: 'abc123', + concurrencyLimit: 10, + concurrencyStepLimit: 10, + }) + expectToBeDefined(jobId) + + const fetched = await adapter.fetch({ batch: 10 }) + expect(fetched.length).toBe(1) + + const failed = await adapter.failJob({ + jobId, + error: { name: 'Error', message: 'Test failure', stack: '' }, + }) + expect(failed).toBe(true) + + const job = await adapter.getJobById(jobId) + expectToBeDefined(job) + expect(job.status).toBe(JOB_STATUS_FAILED) + }) + + it('should archive cancelled job', async () => { + const jobId = await adapter.createJob({ + queue: 'test-action', + groupKey: 'test-group', + input: {}, + timeoutMs: 10000, + checksum: 'abc123', + concurrencyLimit: 10, + concurrencyStepLimit: 10, + }) + expectToBeDefined(jobId) + + const cancelled = await adapter.cancelJob({ jobId }) + expect(cancelled).toBe(true) + + const job = await adapter.getJobById(jobId) + expectToBeDefined(job) + expect(job.status).toBe(JOB_STATUS_CANCELLED) + }) + + it('should get archive stats', async () => { + // Create and complete 2 jobs + for (let i = 0; i < 2; i++) { + const jobId = await adapter.createJob({ + queue: `test-action-${i}`, + groupKey: 'test-group', + input: {}, + timeoutMs: 10000, + checksum: `abc${i}`, + concurrencyLimit: 10, + concurrencyStepLimit: 10, + }) + expectToBeDefined(jobId) + + const fetched = await adapter.fetch({ batch: 10 }) + expect(fetched.length).toBeGreaterThan(0) + + await adapter.completeJob({ jobId, output: {} }) + } + + const stats = await adapter.getArchiveStats() + expect(stats.jobsCount).toBe(2) + expect(stats.oldestJobDate).not.toBeNull() + }) + + it('should prune old jobs', async () => { + const jobId = await adapter.createJob({ + queue: 'test-action', + groupKey: 'test-group', + input: {}, + timeoutMs: 10000, + checksum: 'abc123', + concurrencyLimit: 10, + concurrencyStepLimit: 10, + }) + expectToBeDefined(jobId) + + const fetched = await adapter.fetch({ batch: 10 }) + expect(fetched.length).toBeGreaterThan(0) + + await adapter.completeJob({ jobId, output: {} }) + + // Verify in archive + let stats = await adapter.getArchiveStats() + expect(stats.jobsCount).toBe(1) + + // Wait a tiny bit + await new Promise((resolve) => setTimeout(resolve, 50)) + + // Prune with old threshold + const deleted = await adapter.pruneArchive({ + olderThan: '1ms', + batchSize: 100, + maxBatches: 1, + }) + expect(deleted).toBe(1) + + stats = await adapter.getArchiveStats() + expect(stats.jobsCount).toBe(0) + }) + + it('should not prune recent jobs', async () => { + const jobId = await adapter.createJob({ + queue: 'test-action', + groupKey: 'test-group', + input: {}, + timeoutMs: 10000, + checksum: 'abc123', + concurrencyLimit: 10, + concurrencyStepLimit: 10, + }) + expectToBeDefined(jobId) + + const fetched = await adapter.fetch({ batch: 10 }) + expect(fetched.length).toBeGreaterThan(0) + + await adapter.completeJob({ jobId, output: {} }) + + const deleted = await adapter.pruneArchive({ + olderThan: '7d', + batchSize: 100, + maxBatches: 1, + }) + expect(deleted).toBe(0) + + const stats = await adapter.getArchiveStats() + expect(stats.jobsCount).toBe(1) + }) + + it('should truncate archive', async () => { + for (let i = 0; i < 3; i++) { + const jobId = await adapter.createJob({ + queue: `test-action-${i}`, + groupKey: 'test-group', + input: {}, + timeoutMs: 10000, + checksum: `abc${i}`, + concurrencyLimit: 10, + concurrencyStepLimit: 10, + }) + expectToBeDefined(jobId) + + const fetched = await adapter.fetch({ batch: 10 }) + expect(fetched.length).toBeGreaterThan(0) + + await adapter.completeJob({ jobId, output: {} }) + } + + let stats = await adapter.getArchiveStats() + expect(stats.jobsCount).toBe(3) + + await adapter.truncateArchive() + + stats = await adapter.getArchiveStats() + expect(stats.jobsCount).toBe(0) + expect(stats.stepsCount).toBe(0) + expect(stats.spansCount).toBe(0) + }) + + it('should query archived job by status filter', async () => { + const activeJobId = await adapter.createJob({ + queue: 'test-action', + groupKey: 'test-group', + input: {}, + timeoutMs: 10000, + checksum: 'active', + concurrencyLimit: 10, + concurrencyStepLimit: 10, + }) + expectToBeDefined(activeJobId) + + const completedJobId = await adapter.createJob({ + queue: 'test-action', + groupKey: 'test-group', + input: {}, + timeoutMs: 10000, + checksum: 'completed', + concurrencyLimit: 10, + concurrencyStepLimit: 10, + }) + expectToBeDefined(completedJobId) + + const fetched = await adapter.fetch({ batch: 10 }) + expect(fetched.length).toBe(2) + + await adapter.completeJob({ jobId: completedJobId, output: {} }) + + const activeJobs = await adapter.getJobs({ + filters: { status: JOB_STATUS_ACTIVE }, + }) + expect(activeJobs.jobs.length).toBe(1) + expect(activeJobs.jobs[0]?.id).toBe(activeJobId) + + const completedJobs = await adapter.getJobs({ + filters: { status: JOB_STATUS_COMPLETED }, + }) + expect(completedJobs.jobs.length).toBe(1) + expect(completedJobs.jobs[0]?.id).toBe(completedJobId) + }) + + it('should restore archived job for time travel', async () => { + const jobId = await adapter.createJob({ + queue: 'test-action', + groupKey: 'test-group', + input: {}, + timeoutMs: 10000, + checksum: 'abc123', + concurrencyLimit: 10, + concurrencyStepLimit: 10, + }) + expectToBeDefined(jobId) + + const fetched = await adapter.fetch({ batch: 10 }) + expect(fetched.length).toBeGreaterThan(0) + + const step = await adapter.createOrRecoverJobStep({ + jobId, + name: 'test-step', + timeoutMs: 10000, + retriesLimit: 0, + }) + expectToBeDefined(step) + + await adapter.completeJobStep({ + stepId: step.id, + output: { done: true }, + }) + + await adapter.completeJob({ jobId, output: { result: 'done' } }) + + // Verify archived + let job = await adapter.getJobById(jobId) + expect(job?.status).toBe(JOB_STATUS_COMPLETED) + + // Time travel + const success = await adapter.timeTravelJob({ jobId, stepId: step.id }) + expect(success).toBe(true) + + // Should be restored + job = await adapter.getJobById(jobId) + expectToBeDefined(job) + expect(job.status).toBe(JOB_STATUS_CREATED) + }) + }) +} + +runArchiveTests(pgliteFactory) +runArchiveTests(postgresFactory) diff --git a/packages/shared-actions/package.json b/packages/shared-actions/package.json index f4f138f..1858bf9 100644 --- a/packages/shared-actions/package.json +++ b/packages/shared-actions/package.json @@ -8,7 +8,8 @@ "zod": "^4.1.12" }, "devDependencies": { - "@types/bun": "latest" + "@types/bun": "latest", + "duron": "workspace:*" }, "peerDependencies": { "typescript": "^5" diff --git a/packages/shared-actions/test/process-order.test.ts b/packages/shared-actions/test/process-order.test.ts new file mode 100644 index 0000000..67055c9 --- /dev/null +++ b/packages/shared-actions/test/process-order.test.ts @@ -0,0 +1,385 @@ +import { afterEach, beforeEach, describe, expect, it } from 'bun:test' + +import { Client } from 'duron' +import { defineAction } from 'duron/action' +import { JOB_STATUS_COMPLETED } from 'duron/constants' +import { z } from 'zod' + +import { pgliteFactory } from '../node_modules/duron/test/adapters.js' + +// Test version of processOrder without AI dependency +const testProcessOrder = defineAction()({ + name: 'processOrder', + input: z.object({ + orderId: z.string().min(1), + customerId: z.string().min(1), + items: z + .array( + z.object({ + productId: z.string(), + quantity: z.number().min(1), + price: z.number().min(0), + }), + ) + .min(1), + paymentMethod: z.enum(['credit_card', 'paypal', 'bank_transfer']).default('credit_card'), + shippingAddress: z.object({ + street: z.string(), + city: z.string(), + country: z.string(), + postalCode: z.string(), + }), + }), + output: z.object({ + orderId: z.string(), + status: z.enum(['completed', 'failed']), + transactionId: z.string().nullable(), + shipmentId: z.string().nullable(), + timeline: z.array( + z.object({ + step: z.string(), + status: z.enum(['success', 'failed']), + timestamp: z.string(), + details: z.string().optional(), + }), + ), + }), + steps: { + concurrency: 10, + retry: { + limit: 1, + }, + }, + handler: async (ctx) => { + const { orderId, customerId, items, shippingAddress } = ctx.input + const timeline: Array<{ + step: string + status: 'success' | 'failed' + timestamp: string + details?: string + }> = [] + const totalAmount = items.reduce((sum, item) => sum + item.price * item.quantity, 0) + + const addTimeline = (step: string, status: 'success' | 'failed', details?: string) => { + timeline.push({ step, status, timestamp: new Date().toISOString(), details }) + } + + // Step 1: Validate Order + const validation = await ctx.step('validate-order', async ({ step: nestedStep }) => { + const inventoryCheck = await nestedStep('check-inventory', async () => { + const allInStock = items.every((item) => item.quantity <= 10) + addTimeline('check-inventory', allInStock ? 'success' : 'failed', `Checked ${items.length} items`) + return { allInStock, checkedItems: items.length } + }) + + const customerVerification = await nestedStep('verify-customer', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + const isValid = customerId.length > 0 + addTimeline('verify-customer', isValid ? 'success' : 'failed', `Customer: ${customerId}`) + return { isValid, customerId } + }) + + addTimeline( + 'validate-order', + inventoryCheck.allInStock && customerVerification.isValid ? 'success' : 'failed', + `Inventory: ${inventoryCheck.allInStock}, Customer: ${customerVerification.isValid}`, + ) + + return { + isValid: inventoryCheck.allInStock && customerVerification.isValid, + inventoryCheck, + customerVerification, + } + }) + + if (!validation.isValid) { + return { + orderId, + status: 'failed' as const, + transactionId: null, + shipmentId: null, + timeline, + } + } + + // Step 2: Process Payment + const payment = await ctx.step( + 'process-payment', + async ({ step: paymentStep }) => { + const authorization = await paymentStep('authorize-payment', async ({ step: authStep }) => { + const fraudCheck = await authStep('fraud-check', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + const isSafe = totalAmount < 10000 + addTimeline('fraud-check', isSafe ? 'success' : 'failed', `Amount: $${totalAmount.toFixed(2)}`) + return { isSafe, riskScore: isSafe ? 0.1 : 0.9 } + }) + + if (!fraudCheck.isSafe) { + addTimeline('authorize-payment', 'failed', 'Fraud check failed') + return { authorized: false, authCode: null, fraudCheck } + } + + await new Promise((resolve) => setTimeout(resolve, 50)) + const authCode = `AUTH-${Date.now()}` + addTimeline('authorize-payment', 'success', `Auth code: ${authCode}`) + return { authorized: true, authCode, fraudCheck } + }) + + if (!authorization.authorized) { + addTimeline('process-payment', 'failed', 'Authorization failed') + return { success: false, transactionId: null, authorization } + } + + const capture = await paymentStep('capture-payment', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + const transactionId = `TXN-${Date.now()}` + addTimeline('capture-payment', 'success', `Transaction: ${transactionId}`) + return { captured: true, transactionId } + }) + + addTimeline('process-payment', 'success', `Transaction ID: ${capture.transactionId}`) + return { + success: true, + transactionId: capture.transactionId, + authorization, + } + }, + { expire: 60_000 }, + ) + + if (!payment.success) { + return { + orderId, + status: 'failed' as const, + transactionId: null, + shipmentId: null, + timeline, + } + } + + // Step 3: Fulfill Order + const fulfillment = await ctx.step('fulfill-order', async ({ step: fulfillStep }) => { + const reservation = await fulfillStep('reserve-inventory', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + const reservationId = `RES-${Date.now()}` + addTimeline('reserve-inventory', 'success', `Reserved ${items.length} items`) + return { reserved: true, reservationId } + }) + + const shipment = await fulfillStep('create-shipment', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + const shipmentId = `SHIP-${Date.now()}` + addTimeline('create-shipment', 'success', `Shipment to ${shippingAddress.city}`) + return { shipmentId, carrier: 'FastShip', estimatedDays: 3 } + }) + + addTimeline('fulfill-order', 'success', `Shipment: ${shipment.shipmentId}`) + return { reservation, shipment } + }) + + // Step 4: Send Notifications + await ctx.step('send-notifications', async ({ step: notifyStep }) => { + const [emailResult, smsResult] = await Promise.all([ + notifyStep('email-confirmation', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + addTimeline('email-confirmation', 'success', `Sent to customer ${customerId}`) + return { sent: true, type: 'email' } + }), + notifyStep('sms-notification', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + addTimeline('sms-notification', 'success', 'Order confirmation SMS sent') + return { sent: true, type: 'sms' } + }), + ]) + + addTimeline('send-notifications', 'success', `Email: ${emailResult.sent}, SMS: ${smsResult.sent}`) + return { email: emailResult, sms: smsResult } + }) + + // Step 5: Post-Order Processing (Promise.all of steps) + await ctx.step('post-order-processing', async (ctx) => { + await Promise.all([ + ctx.step( + 'analytics-tracking', + async ({ step: analyticsStep }) => { + const purchase = await analyticsStep('track-purchase', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + addTimeline('track-purchase', 'success', `Tracked order ${orderId}`) + return { eventId: `EVT-${Date.now()}`, type: 'purchase' } + }) + + const recommendations = await analyticsStep('update-recommendations', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + addTimeline('update-recommendations', 'success', `Updated for ${items.length} products`) + return { updated: true, productsAnalyzed: items.length } + }) + + addTimeline('analytics-tracking', 'success', 'Analytics updated') + return { purchase, recommendations } + }, + { parallel: true }, + ), + + ctx.step( + 'loyalty-update', + async ({ step: loyaltyStep }) => { + const points = await loyaltyStep('calculate-points', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + const earnedPoints = Math.floor(totalAmount * 10) + addTimeline('calculate-points', 'success', `Earned ${earnedPoints} points`) + return { earnedPoints, multiplier: 1.0 } + }) + + const tier = await loyaltyStep('update-tier', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + const newTier = totalAmount > 500 ? 'gold' : totalAmount > 100 ? 'silver' : 'bronze' + addTimeline('update-tier', 'success', `Tier: ${newTier}`) + return { tier: newTier, upgraded: totalAmount > 500 } + }) + + addTimeline('loyalty-update', 'success', `${points.earnedPoints} points, tier: ${tier.tier}`) + return { points, tier } + }, + { parallel: true }, + ), + + ctx.step( + 'partner-sync', + async ({ step: syncStep }) => { + const supplier = await syncStep('sync-supplier', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + addTimeline('sync-supplier', 'success', 'Supplier inventory updated') + return { synced: true, supplierId: 'SUP-001' } + }) + + const warehouse = await syncStep('sync-warehouse', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + addTimeline('sync-warehouse', 'success', 'Warehouse notified for picking') + return { synced: true, warehouseId: 'WH-MAIN' } + }) + + addTimeline('partner-sync', 'success', 'All partners synced') + return { supplier, warehouse } + }, + { parallel: true }, + ), + ]) + + return { success: true } + }) + + return { + orderId, + status: 'completed' as const, + transactionId: payment.transactionId, + shipmentId: fulfillment.shipment.shipmentId, + timeline, + } + }, +}) + +describe('processOrder Action', () => { + let client: Client + + beforeEach(async () => { + const { adapter } = await pgliteFactory.create() + adapter.setId('test-adapter') + await adapter.start() + + client = new Client({ + id: 'test-client', + database: adapter, + actions: { + processOrder: testProcessOrder, + }, + }) + + await client.start() + }) + + afterEach(async () => { + if (client) { + await client.stop() + } + }) + + it('should process order successfully', async () => { + const result = await client.runActionAndWait('processOrder', { + orderId: 'ORD-123', + customerId: 'CUST-456', + items: [ + { productId: 'PROD-1', quantity: 2, price: 29.99 }, + { productId: 'PROD-2', quantity: 1, price: 49.99 }, + ], + paymentMethod: 'credit_card', + shippingAddress: { + street: '123 Main St', + city: 'New York', + country: 'USA', + postalCode: '10001', + }, + }) + + expect(result.status).toBe(JOB_STATUS_COMPLETED) + expect(result.output.status).toBe('completed') + expect(result.output.orderId).toBe('ORD-123') + expect(result.output.transactionId).not.toBeNull() + expect(result.output.shipmentId).not.toBeNull() + expect(result.output.timeline.length).toBeGreaterThan(0) + }) + + it('should have correct timeline entries', async () => { + const result = await client.runActionAndWait('processOrder', { + orderId: 'ORD-456', + customerId: 'CUST-789', + items: [{ productId: 'PROD-3', quantity: 1, price: 99.99 }], + paymentMethod: 'paypal', + shippingAddress: { + street: '456 Oak Ave', + city: 'Los Angeles', + country: 'USA', + postalCode: '90001', + }, + }) + + expect(result.output.timeline).toBeDefined() + expect(result.output.timeline.length).toBeGreaterThanOrEqual(10) + + const steps = result.output.timeline.map((t) => t.step) + expect(steps).toContain('check-inventory') + expect(steps).toContain('verify-customer') + expect(steps).toContain('validate-order') + expect(steps).toContain('fraud-check') + expect(steps).toContain('authorize-payment') + expect(steps).toContain('capture-payment') + expect(steps).toContain('process-payment') + expect(steps).toContain('reserve-inventory') + expect(steps).toContain('create-shipment') + expect(steps).toContain('fulfill-order') + expect(steps).toContain('email-confirmation') + expect(steps).toContain('sms-notification') + expect(steps).toContain('send-notifications') + }) + + it('should fail when inventory is not available', async () => { + const result = await client.runActionAndWait('processOrder', { + orderId: 'ORD-789', + customerId: 'CUST-999', + items: [ + { productId: 'PROD-4', quantity: 20, price: 10 }, // quantity > 10 should fail + ], + paymentMethod: 'credit_card', + shippingAddress: { + street: '789 Pine Rd', + city: 'Chicago', + country: 'USA', + postalCode: '60001', + }, + }) + + expect(result.status).toBe(JOB_STATUS_COMPLETED) + expect(result.output.status).toBe('failed') + expect(result.output.transactionId).toBeNull() + expect(result.output.shipmentId).toBeNull() + }) +}) From 2eb1cf53b8a206983c118a2038c78edabd0ac0e9 Mon Sep 17 00:00:00 2001 From: Martin Acosta Date: Fri, 24 Apr 2026 18:02:30 -0300 Subject: [PATCH 06/10] docs: update AGENTS.md - do not use git worktrees --- AGENTS.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/AGENTS.md b/AGENTS.md index e935783..1f48f5b 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -97,6 +97,12 @@ bun run build:docs # docs only Workflow `.github/workflows/test.yml` runs: `bun install` → `typecheck` → `lint` → `test`. +## Branch Workflow + +- Create feature branches from `main` for all changes +- Do **not** use git worktrees +- Commit directly to the feature branch + ## Telemetry Configured on the Duron client: From 2bc71fa3cac41e39f7e92ad1edd189f3d0573533 Mon Sep 17 00:00:00 2001 From: Martin Acosta Date: Mon, 27 Apr 2026 16:48:04 -0300 Subject: [PATCH 07/10] fix: move process-order test from shared-actions to duron package The test was importing from 'duron' package which failed in CI because workspace resolution doesn't work during test execution. Moving it to the duron package allows using relative imports. --- .../{shared-actions => duron}/test/process-order.test.ts | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) rename packages/{shared-actions => duron}/test/process-order.test.ts (98%) diff --git a/packages/shared-actions/test/process-order.test.ts b/packages/duron/test/process-order.test.ts similarity index 98% rename from packages/shared-actions/test/process-order.test.ts rename to packages/duron/test/process-order.test.ts index 67055c9..1e8b665 100644 --- a/packages/shared-actions/test/process-order.test.ts +++ b/packages/duron/test/process-order.test.ts @@ -1,11 +1,11 @@ import { afterEach, beforeEach, describe, expect, it } from 'bun:test' -import { Client } from 'duron' -import { defineAction } from 'duron/action' -import { JOB_STATUS_COMPLETED } from 'duron/constants' +import { defineAction } from '../src/action.js' +import { Client } from '../src/client.js' +import { JOB_STATUS_COMPLETED } from '../src/constants.js' import { z } from 'zod' -import { pgliteFactory } from '../node_modules/duron/test/adapters.js' +import { pgliteFactory } from './adapters.js' // Test version of processOrder without AI dependency const testProcessOrder = defineAction()({ From 282e70c28cb459fc19f0973ad45af8877e215811 Mon Sep 17 00:00:00 2001 From: Martin Acosta Date: Mon, 27 Apr 2026 16:53:04 -0300 Subject: [PATCH 08/10] docs: add pre-commit verification instructions to AGENTS.md --- AGENTS.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/AGENTS.md b/AGENTS.md index 1f48f5b..3f8ef3d 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -102,6 +102,12 @@ Workflow `.github/workflows/test.yml` runs: `bun install` → `typecheck` → `l - Create feature branches from `main` for all changes - Do **not** use git worktrees - Commit directly to the feature branch +- **Before every commit, run verification locally:** + ```bash + bun run typecheck # TypeScript check across all packages + bun run lint # Lint check + bun test # Full test suite (not just packages/duron) + ``` ## Telemetry From 1df6561bffdb952eaa7c9635ca0afc78049a0311 Mon Sep 17 00:00:00 2001 From: Martin Acosta Date: Mon, 27 Apr 2026 16:55:41 -0300 Subject: [PATCH 09/10] fix: type error and import order in process-order test --- packages/duron/test/process-order.test.ts | 494 +++++++++++----------- 1 file changed, 254 insertions(+), 240 deletions(-) diff --git a/packages/duron/test/process-order.test.ts b/packages/duron/test/process-order.test.ts index 1e8b665..b3e2c45 100644 --- a/packages/duron/test/process-order.test.ts +++ b/packages/duron/test/process-order.test.ts @@ -1,15 +1,15 @@ -import { afterEach, beforeEach, describe, expect, it } from 'bun:test' +import { afterEach, beforeEach, describe, expect, it } from "bun:test"; -import { defineAction } from '../src/action.js' -import { Client } from '../src/client.js' -import { JOB_STATUS_COMPLETED } from '../src/constants.js' -import { z } from 'zod' +import { z } from "zod"; -import { pgliteFactory } from './adapters.js' +import { defineAction } from "../src/action.js"; +import { Client } from "../src/client.js"; +import { JOB_STATUS_COMPLETED } from "../src/constants.js"; +import { pgliteFactory } from "./adapters.js"; // Test version of processOrder without AI dependency const testProcessOrder = defineAction()({ - name: 'processOrder', + name: "processOrder", input: z.object({ orderId: z.string().min(1), customerId: z.string().min(1), @@ -22,7 +22,7 @@ const testProcessOrder = defineAction()({ }), ) .min(1), - paymentMethod: z.enum(['credit_card', 'paypal', 'bank_transfer']).default('credit_card'), + paymentMethod: z.enum(["credit_card", "paypal", "bank_transfer"]).default("credit_card"), shippingAddress: z.object({ street: z.string(), city: z.string(), @@ -32,13 +32,13 @@ const testProcessOrder = defineAction()({ }), output: z.object({ orderId: z.string(), - status: z.enum(['completed', 'failed']), + status: z.enum(["completed", "failed"]), transactionId: z.string().nullable(), shipmentId: z.string().nullable(), timeline: z.array( z.object({ step: z.string(), - status: z.enum(['success', 'failed']), + status: z.enum(["success", "failed"]), timestamp: z.string(), details: z.string().optional(), }), @@ -51,335 +51,349 @@ const testProcessOrder = defineAction()({ }, }, handler: async (ctx) => { - const { orderId, customerId, items, shippingAddress } = ctx.input + const { orderId, customerId, items, shippingAddress } = ctx.input; const timeline: Array<{ - step: string - status: 'success' | 'failed' - timestamp: string - details?: string - }> = [] - const totalAmount = items.reduce((sum, item) => sum + item.price * item.quantity, 0) - - const addTimeline = (step: string, status: 'success' | 'failed', details?: string) => { - timeline.push({ step, status, timestamp: new Date().toISOString(), details }) - } + step: string; + status: "success" | "failed"; + timestamp: string; + details?: string; + }> = []; + const totalAmount = items.reduce((sum, item) => sum + item.price * item.quantity, 0); + + const addTimeline = (step: string, status: "success" | "failed", details?: string) => { + timeline.push({ step, status, timestamp: new Date().toISOString(), details }); + }; // Step 1: Validate Order - const validation = await ctx.step('validate-order', async ({ step: nestedStep }) => { - const inventoryCheck = await nestedStep('check-inventory', async () => { - const allInStock = items.every((item) => item.quantity <= 10) - addTimeline('check-inventory', allInStock ? 'success' : 'failed', `Checked ${items.length} items`) - return { allInStock, checkedItems: items.length } - }) - - const customerVerification = await nestedStep('verify-customer', async () => { - await new Promise((resolve) => setTimeout(resolve, 50)) - const isValid = customerId.length > 0 - addTimeline('verify-customer', isValid ? 'success' : 'failed', `Customer: ${customerId}`) - return { isValid, customerId } - }) + const validation = await ctx.step("validate-order", async ({ step: nestedStep }) => { + const inventoryCheck = await nestedStep("check-inventory", async () => { + const allInStock = items.every((item) => item.quantity <= 10); + addTimeline( + "check-inventory", + allInStock ? "success" : "failed", + `Checked ${items.length} items`, + ); + return { allInStock, checkedItems: items.length }; + }); + + const customerVerification = await nestedStep("verify-customer", async () => { + await new Promise((resolve) => setTimeout(resolve, 50)); + const isValid = customerId.length > 0; + addTimeline("verify-customer", isValid ? "success" : "failed", `Customer: ${customerId}`); + return { isValid, customerId }; + }); addTimeline( - 'validate-order', - inventoryCheck.allInStock && customerVerification.isValid ? 'success' : 'failed', + "validate-order", + inventoryCheck.allInStock && customerVerification.isValid ? "success" : "failed", `Inventory: ${inventoryCheck.allInStock}, Customer: ${customerVerification.isValid}`, - ) + ); return { isValid: inventoryCheck.allInStock && customerVerification.isValid, inventoryCheck, customerVerification, - } - }) + }; + }); if (!validation.isValid) { return { orderId, - status: 'failed' as const, + status: "failed" as const, transactionId: null, shipmentId: null, timeline, - } + }; } // Step 2: Process Payment const payment = await ctx.step( - 'process-payment', + "process-payment", async ({ step: paymentStep }) => { - const authorization = await paymentStep('authorize-payment', async ({ step: authStep }) => { - const fraudCheck = await authStep('fraud-check', async () => { - await new Promise((resolve) => setTimeout(resolve, 50)) - const isSafe = totalAmount < 10000 - addTimeline('fraud-check', isSafe ? 'success' : 'failed', `Amount: $${totalAmount.toFixed(2)}`) - return { isSafe, riskScore: isSafe ? 0.1 : 0.9 } - }) + const authorization = await paymentStep("authorize-payment", async ({ step: authStep }) => { + const fraudCheck = await authStep("fraud-check", async () => { + await new Promise((resolve) => setTimeout(resolve, 50)); + const isSafe = totalAmount < 10000; + addTimeline( + "fraud-check", + isSafe ? "success" : "failed", + `Amount: $${totalAmount.toFixed(2)}`, + ); + return { isSafe, riskScore: isSafe ? 0.1 : 0.9 }; + }); if (!fraudCheck.isSafe) { - addTimeline('authorize-payment', 'failed', 'Fraud check failed') - return { authorized: false, authCode: null, fraudCheck } + addTimeline("authorize-payment", "failed", "Fraud check failed"); + return { authorized: false, authCode: null, fraudCheck }; } - await new Promise((resolve) => setTimeout(resolve, 50)) - const authCode = `AUTH-${Date.now()}` - addTimeline('authorize-payment', 'success', `Auth code: ${authCode}`) - return { authorized: true, authCode, fraudCheck } - }) + await new Promise((resolve) => setTimeout(resolve, 50)); + const authCode = `AUTH-${Date.now()}`; + addTimeline("authorize-payment", "success", `Auth code: ${authCode}`); + return { authorized: true, authCode, fraudCheck }; + }); if (!authorization.authorized) { - addTimeline('process-payment', 'failed', 'Authorization failed') - return { success: false, transactionId: null, authorization } + addTimeline("process-payment", "failed", "Authorization failed"); + return { success: false, transactionId: null, authorization }; } - const capture = await paymentStep('capture-payment', async () => { - await new Promise((resolve) => setTimeout(resolve, 50)) - const transactionId = `TXN-${Date.now()}` - addTimeline('capture-payment', 'success', `Transaction: ${transactionId}`) - return { captured: true, transactionId } - }) + const capture = await paymentStep("capture-payment", async () => { + await new Promise((resolve) => setTimeout(resolve, 50)); + const transactionId = `TXN-${Date.now()}`; + addTimeline("capture-payment", "success", `Transaction: ${transactionId}`); + return { captured: true, transactionId }; + }); - addTimeline('process-payment', 'success', `Transaction ID: ${capture.transactionId}`) + addTimeline("process-payment", "success", `Transaction ID: ${capture.transactionId}`); return { success: true, transactionId: capture.transactionId, authorization, - } + }; }, { expire: 60_000 }, - ) + ); if (!payment.success) { return { orderId, - status: 'failed' as const, + status: "failed" as const, transactionId: null, shipmentId: null, timeline, - } + }; } // Step 3: Fulfill Order - const fulfillment = await ctx.step('fulfill-order', async ({ step: fulfillStep }) => { - const reservation = await fulfillStep('reserve-inventory', async () => { - await new Promise((resolve) => setTimeout(resolve, 50)) - const reservationId = `RES-${Date.now()}` - addTimeline('reserve-inventory', 'success', `Reserved ${items.length} items`) - return { reserved: true, reservationId } - }) - - const shipment = await fulfillStep('create-shipment', async () => { - await new Promise((resolve) => setTimeout(resolve, 50)) - const shipmentId = `SHIP-${Date.now()}` - addTimeline('create-shipment', 'success', `Shipment to ${shippingAddress.city}`) - return { shipmentId, carrier: 'FastShip', estimatedDays: 3 } - }) - - addTimeline('fulfill-order', 'success', `Shipment: ${shipment.shipmentId}`) - return { reservation, shipment } - }) + const fulfillment = await ctx.step("fulfill-order", async ({ step: fulfillStep }) => { + const reservation = await fulfillStep("reserve-inventory", async () => { + await new Promise((resolve) => setTimeout(resolve, 50)); + const reservationId = `RES-${Date.now()}`; + addTimeline("reserve-inventory", "success", `Reserved ${items.length} items`); + return { reserved: true, reservationId }; + }); + + const shipment = await fulfillStep("create-shipment", async () => { + await new Promise((resolve) => setTimeout(resolve, 50)); + const shipmentId = `SHIP-${Date.now()}`; + addTimeline("create-shipment", "success", `Shipment to ${shippingAddress.city}`); + return { shipmentId, carrier: "FastShip", estimatedDays: 3 }; + }); + + addTimeline("fulfill-order", "success", `Shipment: ${shipment.shipmentId}`); + return { reservation, shipment }; + }); // Step 4: Send Notifications - await ctx.step('send-notifications', async ({ step: notifyStep }) => { + await ctx.step("send-notifications", async ({ step: notifyStep }) => { const [emailResult, smsResult] = await Promise.all([ - notifyStep('email-confirmation', async () => { - await new Promise((resolve) => setTimeout(resolve, 50)) - addTimeline('email-confirmation', 'success', `Sent to customer ${customerId}`) - return { sent: true, type: 'email' } + notifyStep("email-confirmation", async () => { + await new Promise((resolve) => setTimeout(resolve, 50)); + addTimeline("email-confirmation", "success", `Sent to customer ${customerId}`); + return { sent: true, type: "email" }; }), - notifyStep('sms-notification', async () => { - await new Promise((resolve) => setTimeout(resolve, 50)) - addTimeline('sms-notification', 'success', 'Order confirmation SMS sent') - return { sent: true, type: 'sms' } + notifyStep("sms-notification", async () => { + await new Promise((resolve) => setTimeout(resolve, 50)); + addTimeline("sms-notification", "success", "Order confirmation SMS sent"); + return { sent: true, type: "sms" }; }), - ]) + ]); - addTimeline('send-notifications', 'success', `Email: ${emailResult.sent}, SMS: ${smsResult.sent}`) - return { email: emailResult, sms: smsResult } - }) + addTimeline( + "send-notifications", + "success", + `Email: ${emailResult.sent}, SMS: ${smsResult.sent}`, + ); + return { email: emailResult, sms: smsResult }; + }); // Step 5: Post-Order Processing (Promise.all of steps) - await ctx.step('post-order-processing', async (ctx) => { + await ctx.step("post-order-processing", async (ctx) => { await Promise.all([ ctx.step( - 'analytics-tracking', + "analytics-tracking", async ({ step: analyticsStep }) => { - const purchase = await analyticsStep('track-purchase', async () => { - await new Promise((resolve) => setTimeout(resolve, 50)) - addTimeline('track-purchase', 'success', `Tracked order ${orderId}`) - return { eventId: `EVT-${Date.now()}`, type: 'purchase' } - }) - - const recommendations = await analyticsStep('update-recommendations', async () => { - await new Promise((resolve) => setTimeout(resolve, 50)) - addTimeline('update-recommendations', 'success', `Updated for ${items.length} products`) - return { updated: true, productsAnalyzed: items.length } - }) - - addTimeline('analytics-tracking', 'success', 'Analytics updated') - return { purchase, recommendations } + const purchase = await analyticsStep("track-purchase", async () => { + await new Promise((resolve) => setTimeout(resolve, 50)); + addTimeline("track-purchase", "success", `Tracked order ${orderId}`); + return { eventId: `EVT-${Date.now()}`, type: "purchase" }; + }); + + const recommendations = await analyticsStep("update-recommendations", async () => { + await new Promise((resolve) => setTimeout(resolve, 50)); + addTimeline( + "update-recommendations", + "success", + `Updated for ${items.length} products`, + ); + return { updated: true, productsAnalyzed: items.length }; + }); + + addTimeline("analytics-tracking", "success", "Analytics updated"); + return { purchase, recommendations }; }, { parallel: true }, ), ctx.step( - 'loyalty-update', + "loyalty-update", async ({ step: loyaltyStep }) => { - const points = await loyaltyStep('calculate-points', async () => { - await new Promise((resolve) => setTimeout(resolve, 50)) - const earnedPoints = Math.floor(totalAmount * 10) - addTimeline('calculate-points', 'success', `Earned ${earnedPoints} points`) - return { earnedPoints, multiplier: 1.0 } - }) - - const tier = await loyaltyStep('update-tier', async () => { - await new Promise((resolve) => setTimeout(resolve, 50)) - const newTier = totalAmount > 500 ? 'gold' : totalAmount > 100 ? 'silver' : 'bronze' - addTimeline('update-tier', 'success', `Tier: ${newTier}`) - return { tier: newTier, upgraded: totalAmount > 500 } - }) - - addTimeline('loyalty-update', 'success', `${points.earnedPoints} points, tier: ${tier.tier}`) - return { points, tier } + const points = await loyaltyStep("calculate-points", async () => { + await new Promise((resolve) => setTimeout(resolve, 50)); + const earnedPoints = Math.floor(totalAmount * 10); + addTimeline("calculate-points", "success", `Earned ${earnedPoints} points`); + return { earnedPoints, multiplier: 1.0 }; + }); + + const tier = await loyaltyStep("update-tier", async () => { + await new Promise((resolve) => setTimeout(resolve, 50)); + const newTier = totalAmount > 500 ? "gold" : totalAmount > 100 ? "silver" : "bronze"; + addTimeline("update-tier", "success", `Tier: ${newTier}`); + return { tier: newTier, upgraded: totalAmount > 500 }; + }); + + addTimeline( + "loyalty-update", + "success", + `${points.earnedPoints} points, tier: ${tier.tier}`, + ); + return { points, tier }; }, { parallel: true }, ), ctx.step( - 'partner-sync', + "partner-sync", async ({ step: syncStep }) => { - const supplier = await syncStep('sync-supplier', async () => { - await new Promise((resolve) => setTimeout(resolve, 50)) - addTimeline('sync-supplier', 'success', 'Supplier inventory updated') - return { synced: true, supplierId: 'SUP-001' } - }) - - const warehouse = await syncStep('sync-warehouse', async () => { - await new Promise((resolve) => setTimeout(resolve, 50)) - addTimeline('sync-warehouse', 'success', 'Warehouse notified for picking') - return { synced: true, warehouseId: 'WH-MAIN' } - }) - - addTimeline('partner-sync', 'success', 'All partners synced') - return { supplier, warehouse } + const supplier = await syncStep("sync-supplier", async () => { + await new Promise((resolve) => setTimeout(resolve, 50)); + addTimeline("sync-supplier", "success", "Supplier inventory updated"); + return { synced: true, supplierId: "SUP-001" }; + }); + + const warehouse = await syncStep("sync-warehouse", async () => { + await new Promise((resolve) => setTimeout(resolve, 50)); + addTimeline("sync-warehouse", "success", "Warehouse notified for picking"); + return { synced: true, warehouseId: "WH-MAIN" }; + }); + + addTimeline("partner-sync", "success", "All partners synced"); + return { supplier, warehouse }; }, { parallel: true }, ), - ]) + ]); - return { success: true } - }) + return { success: true }; + }); - return { - orderId, - status: 'completed' as const, - transactionId: payment.transactionId, - shipmentId: fulfillment.shipment.shipmentId, - timeline, - } + return {}; }, -}) +}); -describe('processOrder Action', () => { - let client: Client +describe("processOrder Action", () => { + let client: Client; beforeEach(async () => { - const { adapter } = await pgliteFactory.create() - adapter.setId('test-adapter') - await adapter.start() + const { adapter } = await pgliteFactory.create(); + adapter.setId("test-adapter"); + await adapter.start(); client = new Client({ - id: 'test-client', + id: "test-client", database: adapter, actions: { processOrder: testProcessOrder, }, - }) + }); - await client.start() - }) + await client.start(); + }); afterEach(async () => { if (client) { - await client.stop() + await client.stop(); } - }) + }); - it('should process order successfully', async () => { - const result = await client.runActionAndWait('processOrder', { - orderId: 'ORD-123', - customerId: 'CUST-456', + it("should process order successfully", async () => { + const result = await client.runActionAndWait("processOrder", { + orderId: "ORD-123", + customerId: "CUST-456", items: [ - { productId: 'PROD-1', quantity: 2, price: 29.99 }, - { productId: 'PROD-2', quantity: 1, price: 49.99 }, + { productId: "PROD-1", quantity: 2, price: 29.99 }, + { productId: "PROD-2", quantity: 1, price: 49.99 }, ], - paymentMethod: 'credit_card', + paymentMethod: "credit_card", shippingAddress: { - street: '123 Main St', - city: 'New York', - country: 'USA', - postalCode: '10001', + street: "123 Main St", + city: "New York", + country: "USA", + postalCode: "10001", }, - }) - - expect(result.status).toBe(JOB_STATUS_COMPLETED) - expect(result.output.status).toBe('completed') - expect(result.output.orderId).toBe('ORD-123') - expect(result.output.transactionId).not.toBeNull() - expect(result.output.shipmentId).not.toBeNull() - expect(result.output.timeline.length).toBeGreaterThan(0) - }) - - it('should have correct timeline entries', async () => { - const result = await client.runActionAndWait('processOrder', { - orderId: 'ORD-456', - customerId: 'CUST-789', - items: [{ productId: 'PROD-3', quantity: 1, price: 99.99 }], - paymentMethod: 'paypal', + }); + + expect(result.status).toBe(JOB_STATUS_COMPLETED); + expect(result.output.status).toBe("completed"); + expect(result.output.orderId).toBe("ORD-123"); + expect(result.output.transactionId).not.toBeNull(); + expect(result.output.shipmentId).not.toBeNull(); + expect(result.output.timeline.length).toBeGreaterThan(0); + }); + + it("should have correct timeline entries", async () => { + const result = await client.runActionAndWait("processOrder", { + orderId: "ORD-456", + customerId: "CUST-789", + items: [{ productId: "PROD-3", quantity: 1, price: 99.99 }], + paymentMethod: "paypal", shippingAddress: { - street: '456 Oak Ave', - city: 'Los Angeles', - country: 'USA', - postalCode: '90001', + street: "456 Oak Ave", + city: "Los Angeles", + country: "USA", + postalCode: "90001", }, - }) - - expect(result.output.timeline).toBeDefined() - expect(result.output.timeline.length).toBeGreaterThanOrEqual(10) - - const steps = result.output.timeline.map((t) => t.step) - expect(steps).toContain('check-inventory') - expect(steps).toContain('verify-customer') - expect(steps).toContain('validate-order') - expect(steps).toContain('fraud-check') - expect(steps).toContain('authorize-payment') - expect(steps).toContain('capture-payment') - expect(steps).toContain('process-payment') - expect(steps).toContain('reserve-inventory') - expect(steps).toContain('create-shipment') - expect(steps).toContain('fulfill-order') - expect(steps).toContain('email-confirmation') - expect(steps).toContain('sms-notification') - expect(steps).toContain('send-notifications') - }) - - it('should fail when inventory is not available', async () => { - const result = await client.runActionAndWait('processOrder', { - orderId: 'ORD-789', - customerId: 'CUST-999', + }); + + expect(result.output.timeline).toBeDefined(); + expect(result.output.timeline.length).toBeGreaterThanOrEqual(10); + + const steps = result.output.timeline.map((t: { step: string }) => t.step); + expect(steps).toContain("check-inventory"); + expect(steps).toContain("verify-customer"); + expect(steps).toContain("validate-order"); + expect(steps).toContain("fraud-check"); + expect(steps).toContain("authorize-payment"); + expect(steps).toContain("capture-payment"); + expect(steps).toContain("process-payment"); + expect(steps).toContain("reserve-inventory"); + expect(steps).toContain("create-shipment"); + expect(steps).toContain("fulfill-order"); + expect(steps).toContain("email-confirmation"); + expect(steps).toContain("sms-notification"); + expect(steps).toContain("send-notifications"); + }); + + it("should fail when inventory is not available", async () => { + const result = await client.runActionAndWait("processOrder", { + orderId: "ORD-789", + customerId: "CUST-999", items: [ - { productId: 'PROD-4', quantity: 20, price: 10 }, // quantity > 10 should fail + { productId: "PROD-4", quantity: 20, price: 10 }, // quantity > 10 should fail ], - paymentMethod: 'credit_card', + paymentMethod: "credit_card", shippingAddress: { - street: '789 Pine Rd', - city: 'Chicago', - country: 'USA', - postalCode: '60001', + street: "789 Pine Rd", + city: "Chicago", + country: "USA", + postalCode: "60001", }, - }) - - expect(result.status).toBe(JOB_STATUS_COMPLETED) - expect(result.output.status).toBe('failed') - expect(result.output.transactionId).toBeNull() - expect(result.output.shipmentId).toBeNull() - }) -}) + }); + + expect(result.status).toBe(JOB_STATUS_COMPLETED); + expect(result.output.status).toBe("failed"); + expect(result.output.transactionId).toBeNull(); + expect(result.output.shipmentId).toBeNull(); + }); +}); From 2400c9fecdb65b1ef45bd5b9f6fff1f173b79b4d Mon Sep 17 00:00:00 2001 From: Martin Acosta Date: Mon, 27 Apr 2026 17:08:22 -0300 Subject: [PATCH 10/10] fix kimi bad code --- packages/duron/test/process-order.test.ts | 502 +++++++++++----------- 1 file changed, 245 insertions(+), 257 deletions(-) diff --git a/packages/duron/test/process-order.test.ts b/packages/duron/test/process-order.test.ts index b3e2c45..a6bf68a 100644 --- a/packages/duron/test/process-order.test.ts +++ b/packages/duron/test/process-order.test.ts @@ -1,15 +1,15 @@ -import { afterEach, beforeEach, describe, expect, it } from "bun:test"; +import { afterEach, beforeEach, describe, expect, it } from 'bun:test' -import { z } from "zod"; +import { z } from 'zod' -import { defineAction } from "../src/action.js"; -import { Client } from "../src/client.js"; -import { JOB_STATUS_COMPLETED } from "../src/constants.js"; -import { pgliteFactory } from "./adapters.js"; +import { defineAction } from '../src/action.js' +import { Client } from '../src/client.js' +import { JOB_STATUS_COMPLETED } from '../src/constants.js' +import { pgliteFactory } from './adapters.js' // Test version of processOrder without AI dependency const testProcessOrder = defineAction()({ - name: "processOrder", + name: 'processOrder', input: z.object({ orderId: z.string().min(1), customerId: z.string().min(1), @@ -22,7 +22,7 @@ const testProcessOrder = defineAction()({ }), ) .min(1), - paymentMethod: z.enum(["credit_card", "paypal", "bank_transfer"]).default("credit_card"), + paymentMethod: z.enum(['credit_card', 'paypal', 'bank_transfer']).default('credit_card'), shippingAddress: z.object({ street: z.string(), city: z.string(), @@ -32,13 +32,13 @@ const testProcessOrder = defineAction()({ }), output: z.object({ orderId: z.string(), - status: z.enum(["completed", "failed"]), + status: z.enum(['completed', 'failed']), transactionId: z.string().nullable(), shipmentId: z.string().nullable(), timeline: z.array( z.object({ step: z.string(), - status: z.enum(["success", "failed"]), + status: z.enum(['success', 'failed']), timestamp: z.string(), details: z.string().optional(), }), @@ -51,349 +51,337 @@ const testProcessOrder = defineAction()({ }, }, handler: async (ctx) => { - const { orderId, customerId, items, shippingAddress } = ctx.input; + const { orderId, customerId, items, shippingAddress } = ctx.input const timeline: Array<{ - step: string; - status: "success" | "failed"; - timestamp: string; - details?: string; - }> = []; - const totalAmount = items.reduce((sum, item) => sum + item.price * item.quantity, 0); - - const addTimeline = (step: string, status: "success" | "failed", details?: string) => { - timeline.push({ step, status, timestamp: new Date().toISOString(), details }); - }; + step: string + status: 'success' | 'failed' + timestamp: string + details?: string + }> = [] + const totalAmount = items.reduce((sum, item) => sum + item.price * item.quantity, 0) + + const addTimeline = (step: string, status: 'success' | 'failed', details?: string) => { + timeline.push({ step, status, timestamp: new Date().toISOString(), details }) + } // Step 1: Validate Order - const validation = await ctx.step("validate-order", async ({ step: nestedStep }) => { - const inventoryCheck = await nestedStep("check-inventory", async () => { - const allInStock = items.every((item) => item.quantity <= 10); - addTimeline( - "check-inventory", - allInStock ? "success" : "failed", - `Checked ${items.length} items`, - ); - return { allInStock, checkedItems: items.length }; - }); - - const customerVerification = await nestedStep("verify-customer", async () => { - await new Promise((resolve) => setTimeout(resolve, 50)); - const isValid = customerId.length > 0; - addTimeline("verify-customer", isValid ? "success" : "failed", `Customer: ${customerId}`); - return { isValid, customerId }; - }); + const validation = await ctx.step('validate-order', async ({ step: nestedStep }) => { + const inventoryCheck = await nestedStep('check-inventory', async () => { + const allInStock = items.every((item) => item.quantity <= 10) + addTimeline('check-inventory', allInStock ? 'success' : 'failed', `Checked ${items.length} items`) + return { allInStock, checkedItems: items.length } + }) + + const customerVerification = await nestedStep('verify-customer', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + const isValid = customerId.length > 0 + addTimeline('verify-customer', isValid ? 'success' : 'failed', `Customer: ${customerId}`) + return { isValid, customerId } + }) addTimeline( - "validate-order", - inventoryCheck.allInStock && customerVerification.isValid ? "success" : "failed", + 'validate-order', + inventoryCheck.allInStock && customerVerification.isValid ? 'success' : 'failed', `Inventory: ${inventoryCheck.allInStock}, Customer: ${customerVerification.isValid}`, - ); + ) return { isValid: inventoryCheck.allInStock && customerVerification.isValid, inventoryCheck, customerVerification, - }; - }); + } + }) if (!validation.isValid) { return { orderId, - status: "failed" as const, + status: 'failed' as const, transactionId: null, shipmentId: null, timeline, - }; + } } // Step 2: Process Payment const payment = await ctx.step( - "process-payment", + 'process-payment', async ({ step: paymentStep }) => { - const authorization = await paymentStep("authorize-payment", async ({ step: authStep }) => { - const fraudCheck = await authStep("fraud-check", async () => { - await new Promise((resolve) => setTimeout(resolve, 50)); - const isSafe = totalAmount < 10000; - addTimeline( - "fraud-check", - isSafe ? "success" : "failed", - `Amount: $${totalAmount.toFixed(2)}`, - ); - return { isSafe, riskScore: isSafe ? 0.1 : 0.9 }; - }); + const authorization = await paymentStep('authorize-payment', async ({ step: authStep }) => { + const fraudCheck = await authStep('fraud-check', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + const isSafe = totalAmount < 10000 + addTimeline('fraud-check', isSafe ? 'success' : 'failed', `Amount: $${totalAmount.toFixed(2)}`) + return { isSafe, riskScore: isSafe ? 0.1 : 0.9 } + }) if (!fraudCheck.isSafe) { - addTimeline("authorize-payment", "failed", "Fraud check failed"); - return { authorized: false, authCode: null, fraudCheck }; + addTimeline('authorize-payment', 'failed', 'Fraud check failed') + return { authorized: false, authCode: null, fraudCheck } } - await new Promise((resolve) => setTimeout(resolve, 50)); - const authCode = `AUTH-${Date.now()}`; - addTimeline("authorize-payment", "success", `Auth code: ${authCode}`); - return { authorized: true, authCode, fraudCheck }; - }); + await new Promise((resolve) => setTimeout(resolve, 50)) + const authCode = `AUTH-${Date.now()}` + addTimeline('authorize-payment', 'success', `Auth code: ${authCode}`) + return { authorized: true, authCode, fraudCheck } + }) if (!authorization.authorized) { - addTimeline("process-payment", "failed", "Authorization failed"); - return { success: false, transactionId: null, authorization }; + addTimeline('process-payment', 'failed', 'Authorization failed') + return { success: false, transactionId: null, authorization } } - const capture = await paymentStep("capture-payment", async () => { - await new Promise((resolve) => setTimeout(resolve, 50)); - const transactionId = `TXN-${Date.now()}`; - addTimeline("capture-payment", "success", `Transaction: ${transactionId}`); - return { captured: true, transactionId }; - }); + const capture = await paymentStep('capture-payment', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + const transactionId = `TXN-${Date.now()}` + addTimeline('capture-payment', 'success', `Transaction: ${transactionId}`) + return { captured: true, transactionId } + }) - addTimeline("process-payment", "success", `Transaction ID: ${capture.transactionId}`); + addTimeline('process-payment', 'success', `Transaction ID: ${capture.transactionId}`) return { success: true, transactionId: capture.transactionId, authorization, - }; + } }, { expire: 60_000 }, - ); + ) if (!payment.success) { return { orderId, - status: "failed" as const, + status: 'failed' as const, transactionId: null, shipmentId: null, timeline, - }; + } } // Step 3: Fulfill Order - const fulfillment = await ctx.step("fulfill-order", async ({ step: fulfillStep }) => { - const reservation = await fulfillStep("reserve-inventory", async () => { - await new Promise((resolve) => setTimeout(resolve, 50)); - const reservationId = `RES-${Date.now()}`; - addTimeline("reserve-inventory", "success", `Reserved ${items.length} items`); - return { reserved: true, reservationId }; - }); - - const shipment = await fulfillStep("create-shipment", async () => { - await new Promise((resolve) => setTimeout(resolve, 50)); - const shipmentId = `SHIP-${Date.now()}`; - addTimeline("create-shipment", "success", `Shipment to ${shippingAddress.city}`); - return { shipmentId, carrier: "FastShip", estimatedDays: 3 }; - }); - - addTimeline("fulfill-order", "success", `Shipment: ${shipment.shipmentId}`); - return { reservation, shipment }; - }); + const fulfillment = await ctx.step('fulfill-order', async ({ step: fulfillStep }) => { + const reservation = await fulfillStep('reserve-inventory', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + const reservationId = `RES-${Date.now()}` + addTimeline('reserve-inventory', 'success', `Reserved ${items.length} items`) + return { reserved: true, reservationId } + }) + + const shipment = await fulfillStep('create-shipment', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + const shipmentId = `SHIP-${Date.now()}` + addTimeline('create-shipment', 'success', `Shipment to ${shippingAddress.city}`) + return { shipmentId, carrier: 'FastShip', estimatedDays: 3 } + }) + + addTimeline('fulfill-order', 'success', `Shipment: ${shipment.shipmentId}`) + return { reservation, shipment } + }) // Step 4: Send Notifications - await ctx.step("send-notifications", async ({ step: notifyStep }) => { + await ctx.step('send-notifications', async ({ step: notifyStep }) => { const [emailResult, smsResult] = await Promise.all([ - notifyStep("email-confirmation", async () => { - await new Promise((resolve) => setTimeout(resolve, 50)); - addTimeline("email-confirmation", "success", `Sent to customer ${customerId}`); - return { sent: true, type: "email" }; + notifyStep('email-confirmation', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + addTimeline('email-confirmation', 'success', `Sent to customer ${customerId}`) + return { sent: true, type: 'email' } }), - notifyStep("sms-notification", async () => { - await new Promise((resolve) => setTimeout(resolve, 50)); - addTimeline("sms-notification", "success", "Order confirmation SMS sent"); - return { sent: true, type: "sms" }; + notifyStep('sms-notification', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + addTimeline('sms-notification', 'success', 'Order confirmation SMS sent') + return { sent: true, type: 'sms' } }), - ]); + ]) - addTimeline( - "send-notifications", - "success", - `Email: ${emailResult.sent}, SMS: ${smsResult.sent}`, - ); - return { email: emailResult, sms: smsResult }; - }); + addTimeline('send-notifications', 'success', `Email: ${emailResult.sent}, SMS: ${smsResult.sent}`) + return { email: emailResult, sms: smsResult } + }) // Step 5: Post-Order Processing (Promise.all of steps) - await ctx.step("post-order-processing", async (ctx) => { + await ctx.step('post-order-processing', async (ctx) => { await Promise.all([ ctx.step( - "analytics-tracking", + 'analytics-tracking', async ({ step: analyticsStep }) => { - const purchase = await analyticsStep("track-purchase", async () => { - await new Promise((resolve) => setTimeout(resolve, 50)); - addTimeline("track-purchase", "success", `Tracked order ${orderId}`); - return { eventId: `EVT-${Date.now()}`, type: "purchase" }; - }); - - const recommendations = await analyticsStep("update-recommendations", async () => { - await new Promise((resolve) => setTimeout(resolve, 50)); - addTimeline( - "update-recommendations", - "success", - `Updated for ${items.length} products`, - ); - return { updated: true, productsAnalyzed: items.length }; - }); - - addTimeline("analytics-tracking", "success", "Analytics updated"); - return { purchase, recommendations }; + const purchase = await analyticsStep('track-purchase', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + addTimeline('track-purchase', 'success', `Tracked order ${orderId}`) + return { eventId: `EVT-${Date.now()}`, type: 'purchase' } + }) + + const recommendations = await analyticsStep('update-recommendations', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + addTimeline('update-recommendations', 'success', `Updated for ${items.length} products`) + return { updated: true, productsAnalyzed: items.length } + }) + + addTimeline('analytics-tracking', 'success', 'Analytics updated') + return { purchase, recommendations } }, { parallel: true }, ), ctx.step( - "loyalty-update", + 'loyalty-update', async ({ step: loyaltyStep }) => { - const points = await loyaltyStep("calculate-points", async () => { - await new Promise((resolve) => setTimeout(resolve, 50)); - const earnedPoints = Math.floor(totalAmount * 10); - addTimeline("calculate-points", "success", `Earned ${earnedPoints} points`); - return { earnedPoints, multiplier: 1.0 }; - }); - - const tier = await loyaltyStep("update-tier", async () => { - await new Promise((resolve) => setTimeout(resolve, 50)); - const newTier = totalAmount > 500 ? "gold" : totalAmount > 100 ? "silver" : "bronze"; - addTimeline("update-tier", "success", `Tier: ${newTier}`); - return { tier: newTier, upgraded: totalAmount > 500 }; - }); - - addTimeline( - "loyalty-update", - "success", - `${points.earnedPoints} points, tier: ${tier.tier}`, - ); - return { points, tier }; + const points = await loyaltyStep('calculate-points', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + const earnedPoints = Math.floor(totalAmount * 10) + addTimeline('calculate-points', 'success', `Earned ${earnedPoints} points`) + return { earnedPoints, multiplier: 1.0 } + }) + + const tier = await loyaltyStep('update-tier', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + const newTier = totalAmount > 500 ? 'gold' : totalAmount > 100 ? 'silver' : 'bronze' + addTimeline('update-tier', 'success', `Tier: ${newTier}`) + return { tier: newTier, upgraded: totalAmount > 500 } + }) + + addTimeline('loyalty-update', 'success', `${points.earnedPoints} points, tier: ${tier.tier}`) + return { points, tier } }, { parallel: true }, ), ctx.step( - "partner-sync", + 'partner-sync', async ({ step: syncStep }) => { - const supplier = await syncStep("sync-supplier", async () => { - await new Promise((resolve) => setTimeout(resolve, 50)); - addTimeline("sync-supplier", "success", "Supplier inventory updated"); - return { synced: true, supplierId: "SUP-001" }; - }); - - const warehouse = await syncStep("sync-warehouse", async () => { - await new Promise((resolve) => setTimeout(resolve, 50)); - addTimeline("sync-warehouse", "success", "Warehouse notified for picking"); - return { synced: true, warehouseId: "WH-MAIN" }; - }); - - addTimeline("partner-sync", "success", "All partners synced"); - return { supplier, warehouse }; + const supplier = await syncStep('sync-supplier', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + addTimeline('sync-supplier', 'success', 'Supplier inventory updated') + return { synced: true, supplierId: 'SUP-001' } + }) + + const warehouse = await syncStep('sync-warehouse', async () => { + await new Promise((resolve) => setTimeout(resolve, 50)) + addTimeline('sync-warehouse', 'success', 'Warehouse notified for picking') + return { synced: true, warehouseId: 'WH-MAIN' } + }) + + addTimeline('partner-sync', 'success', 'All partners synced') + return { supplier, warehouse } }, { parallel: true }, ), - ]); + ]) - return { success: true }; - }); + return { success: true } + }) - return {}; + return { + orderId, + status: 'completed' as const, + transactionId: payment.transactionId, + shipmentId: fulfillment.shipment.shipmentId, + timeline, + } }, -}); +}) -describe("processOrder Action", () => { - let client: Client; +const actions = { + processOrder: testProcessOrder, +} + +describe('processOrder Action', () => { + let client: Client beforeEach(async () => { - const { adapter } = await pgliteFactory.create(); - adapter.setId("test-adapter"); - await adapter.start(); + const { adapter } = await pgliteFactory.create() + adapter.setId('test-adapter') + await adapter.start() client = new Client({ - id: "test-client", + id: 'test-client', database: adapter, - actions: { - processOrder: testProcessOrder, - }, - }); + actions, + }) - await client.start(); - }); + await client.start() + }) afterEach(async () => { if (client) { - await client.stop(); + await client.stop() } - }); + }) - it("should process order successfully", async () => { - const result = await client.runActionAndWait("processOrder", { - orderId: "ORD-123", - customerId: "CUST-456", + it('should process order successfully', async () => { + const result = await client.runActionAndWait('processOrder', { + orderId: 'ORD-123', + customerId: 'CUST-456', items: [ - { productId: "PROD-1", quantity: 2, price: 29.99 }, - { productId: "PROD-2", quantity: 1, price: 49.99 }, + { productId: 'PROD-1', quantity: 2, price: 29.99 }, + { productId: 'PROD-2', quantity: 1, price: 49.99 }, ], - paymentMethod: "credit_card", + paymentMethod: 'credit_card', shippingAddress: { - street: "123 Main St", - city: "New York", - country: "USA", - postalCode: "10001", + street: '123 Main St', + city: 'New York', + country: 'USA', + postalCode: '10001', }, - }); - - expect(result.status).toBe(JOB_STATUS_COMPLETED); - expect(result.output.status).toBe("completed"); - expect(result.output.orderId).toBe("ORD-123"); - expect(result.output.transactionId).not.toBeNull(); - expect(result.output.shipmentId).not.toBeNull(); - expect(result.output.timeline.length).toBeGreaterThan(0); - }); - - it("should have correct timeline entries", async () => { - const result = await client.runActionAndWait("processOrder", { - orderId: "ORD-456", - customerId: "CUST-789", - items: [{ productId: "PROD-3", quantity: 1, price: 99.99 }], - paymentMethod: "paypal", + }) + + expect(result.status).toBe(JOB_STATUS_COMPLETED) + expect(result.output.status).toBe('completed') + expect(result.output.orderId).toBe('ORD-123') + expect(result.output.transactionId).not.toBeNull() + expect(result.output.shipmentId).not.toBeNull() + expect(result.output.timeline.length).toBeGreaterThan(0) + }) + + it('should have correct timeline entries', async () => { + const result = await client.runActionAndWait('processOrder', { + orderId: 'ORD-456', + customerId: 'CUST-789', + items: [{ productId: 'PROD-3', quantity: 1, price: 99.99 }], + paymentMethod: 'paypal', shippingAddress: { - street: "456 Oak Ave", - city: "Los Angeles", - country: "USA", - postalCode: "90001", + street: '456 Oak Ave', + city: 'Los Angeles', + country: 'USA', + postalCode: '90001', }, - }); - - expect(result.output.timeline).toBeDefined(); - expect(result.output.timeline.length).toBeGreaterThanOrEqual(10); - - const steps = result.output.timeline.map((t: { step: string }) => t.step); - expect(steps).toContain("check-inventory"); - expect(steps).toContain("verify-customer"); - expect(steps).toContain("validate-order"); - expect(steps).toContain("fraud-check"); - expect(steps).toContain("authorize-payment"); - expect(steps).toContain("capture-payment"); - expect(steps).toContain("process-payment"); - expect(steps).toContain("reserve-inventory"); - expect(steps).toContain("create-shipment"); - expect(steps).toContain("fulfill-order"); - expect(steps).toContain("email-confirmation"); - expect(steps).toContain("sms-notification"); - expect(steps).toContain("send-notifications"); - }); - - it("should fail when inventory is not available", async () => { - const result = await client.runActionAndWait("processOrder", { - orderId: "ORD-789", - customerId: "CUST-999", + }) + + expect(result.output.timeline).toBeDefined() + expect(result.output.timeline.length).toBeGreaterThanOrEqual(10) + + const steps = result.output.timeline.map((t: { step: string }) => t.step) + expect(steps).toContain('check-inventory') + expect(steps).toContain('verify-customer') + expect(steps).toContain('validate-order') + expect(steps).toContain('fraud-check') + expect(steps).toContain('authorize-payment') + expect(steps).toContain('capture-payment') + expect(steps).toContain('process-payment') + expect(steps).toContain('reserve-inventory') + expect(steps).toContain('create-shipment') + expect(steps).toContain('fulfill-order') + expect(steps).toContain('email-confirmation') + expect(steps).toContain('sms-notification') + expect(steps).toContain('send-notifications') + }) + + it('should fail when inventory is not available', async () => { + const result = await client.runActionAndWait('processOrder', { + orderId: 'ORD-789', + customerId: 'CUST-999', items: [ - { productId: "PROD-4", quantity: 20, price: 10 }, // quantity > 10 should fail + { productId: 'PROD-4', quantity: 20, price: 10 }, // quantity > 10 should fail ], - paymentMethod: "credit_card", + paymentMethod: 'credit_card', shippingAddress: { - street: "789 Pine Rd", - city: "Chicago", - country: "USA", - postalCode: "60001", + street: '789 Pine Rd', + city: 'Chicago', + country: 'USA', + postalCode: '60001', }, - }); - - expect(result.status).toBe(JOB_STATUS_COMPLETED); - expect(result.output.status).toBe("failed"); - expect(result.output.transactionId).toBeNull(); - expect(result.output.shipmentId).toBeNull(); - }); -}); + }) + + expect(result.status).toBe(JOB_STATUS_COMPLETED) + expect(result.output.status).toBe('failed') + expect(result.output.transactionId).toBeNull() + expect(result.output.shipmentId).toBeNull() + }) +})